Unnamed: 0
int64
0
4.69k
program_id
stringclasses
25 values
project_id
stringlengths
13
16
source
stringlengths
5
7.62M
source_diag
float64
ground_truth
stringlengths
3
7.62M
path
float64
200
CWE-787
CVE-2020-20276
/* FTP engine * * Copyright (c) 2014-2019 Joachim Nilsson <troglobit@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "uftpd.h" #include <ctype.h> #include <arpa/ftp.h> #ifdef HAVE_SYS_TIME_H # include <sys/time.h> #endif typedef struct { char *command; void (*cb)(ctrl_t *ctr, char *arg); } ftp_cmd_t; static ftp_cmd_t supported[]; static void do_PORT(ctrl_t *ctrl, int pending); static void do_LIST(uev_t *w, void *arg, int events); static void do_RETR(uev_t *w, void *arg, int events); static void do_STOR(uev_t *w, void *arg, int events); static int is_cont(char *msg) { char *ptr; ptr = strchr(msg, '\r'); if (ptr) { ptr++; if (strchr(ptr, '\r')) return 1; } return 0; } static int send_msg(int sd, char *msg) { int n = 0; int l; if (!msg) { err: ERR(EINVAL, "Missing argument to send_msg()"); return 1; } l = strlen(msg); if (l <= 0) goto err; while (n < l) { int result = send(sd, msg + n, l, 0); if (result < 0) { ERR(errno, "Failed sending message to client"); return 1; } n += result; } DBG("Sent: %s%s", is_cont(msg) ? "\n" : "", msg); return 0; } /* * Receive message from client, split into command and argument */ static int recv_msg(int sd, char *msg, size_t len, char **cmd, char **argument) { char *ptr; ssize_t bytes; uint8_t *raw = (uint8_t *)msg; /* Clear for every new command. */ memset(msg, 0, len); /* Save one byte (-1) for NUL termination */ bytes = recv(sd, msg, len - 1, 0); if (bytes < 0) { if (EINTR == errno) return 1; if (ECONNRESET == errno) DBG("Connection reset by client."); else ERR(errno, "Failed reading client command"); return 1; } if (!bytes) { INFO("Client disconnected."); return 1; } if (raw[0] == 0xff) { char tmp[4]; char buf[20] = { 0 }; int i; i = recv(sd, &msg[bytes], len - bytes - 1, MSG_OOB | MSG_DONTWAIT); if (i > 0) bytes += i; for (i = 0; i < bytes; i++) { snprintf(tmp, sizeof(tmp), "%2X%s", raw[i], i + 1 < bytes ? " " : ""); strlcat(buf, tmp, sizeof(buf)); } strlcpy(msg, buf, len); *cmd = msg; *argument = NULL; DBG("Recv: [%s], %zd bytes", msg, bytes); return 0; } /* NUL terminate for strpbrk() */ msg[bytes] = 0; *cmd = msg; ptr = strpbrk(msg, " "); if (ptr) { *ptr = 0; ptr++; *argument = ptr; } else { *argument = NULL; ptr = msg; } ptr = strpbrk(ptr, "\r\n"); if (ptr) *ptr = 0; /* Convert command to std ftp upper case, issue #18 */ for (ptr = msg; *ptr; ++ptr) *ptr = toupper(*ptr); DBG("Recv: %s %s", *cmd, *argument ?: ""); return 0; } static int open_data_connection(ctrl_t *ctrl) { socklen_t len = sizeof(struct sockaddr); struct sockaddr_in sin; /* Previous PORT command from client */ if (ctrl->data_address[0]) { int rc; ctrl->data_sd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0); if (-1 == ctrl->data_sd) { ERR(errno, "Failed creating data socket"); return -1; } memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_port = htons(ctrl->data_port); inet_aton(ctrl->data_address, &(sin.sin_addr)); rc = connect(ctrl->data_sd, (struct sockaddr *)&sin, len); if (rc == -1 && EINPROGRESS != errno) { ERR(errno, "Failed connecting data socket to client"); close(ctrl->data_sd); ctrl->data_sd = -1; return -1; } DBG("Connected successfully to client's previously requested address:PORT %s:%d", ctrl->data_address, ctrl->data_port); return 0; } /* Previous PASV command, accept connect from client */ if (ctrl->data_listen_sd > 0) { const int const_int_1 = 1; int retries = 3; char client_ip[100]; retry: ctrl->data_sd = accept(ctrl->data_listen_sd, (struct sockaddr *)&sin, &len); if (-1 == ctrl->data_sd) { if (EAGAIN == errno && --retries) { sleep(1); goto retry; } ERR(errno, "Failed accepting connection from client"); return -1; } setsockopt(ctrl->data_sd, SOL_SOCKET, SO_KEEPALIVE, &const_int_1, sizeof(const_int_1)); set_nonblock(ctrl->data_sd); inet_ntop(AF_INET, &(sin.sin_addr), client_ip, INET_ADDRSTRLEN); DBG("Client PASV data connection from %s:%d", client_ip, ntohs(sin.sin_port)); close(ctrl->data_listen_sd); ctrl->data_listen_sd = -1; } return 0; } static int close_data_connection(ctrl_t *ctrl) { int ret = 0; DBG("Closing data connection ..."); /* PASV server listening socket */ if (ctrl->data_listen_sd > 0) { shutdown(ctrl->data_listen_sd, SHUT_RDWR); close(ctrl->data_listen_sd); ctrl->data_listen_sd = -1; ret++; } /* PASV client socket */ if (ctrl->data_sd > 0) { shutdown(ctrl->data_sd, SHUT_RDWR); close(ctrl->data_sd); ctrl->data_sd = -1; ret++; } /* PORT */ if (ctrl->data_address[0]) { ctrl->data_address[0] = 0; ctrl->data_port = 0; } return ret; } static int check_user_pass(ctrl_t *ctrl) { if (!ctrl->name[0]) return -1; if (!strcmp("anonymous", ctrl->name)) return 1; return 0; } static int do_abort(ctrl_t *ctrl) { if (ctrl->d || ctrl->d_num) { uev_io_stop(&ctrl->data_watcher); if (ctrl->d_num > 0) free(ctrl->d); ctrl->d_num = 0; ctrl->d = NULL; ctrl->i = 0; if (ctrl->file) free(ctrl->file); ctrl->file = NULL; } if (ctrl->file) { uev_io_stop(&ctrl->data_watcher); free(ctrl->file); ctrl->file = NULL; } if (ctrl->fp) { fclose(ctrl->fp); ctrl->fp = NULL; } ctrl->pending = 0; ctrl->offset = 0; return close_data_connection(ctrl); } static void handle_ABOR(ctrl_t *ctrl, char *arg) { DBG("Aborting any current transfer ..."); if (do_abort(ctrl)) send_msg(ctrl->sd, "426 Connection closed; transfer aborted.\r\n"); send_msg(ctrl->sd, "226 Closing data connection.\r\n"); } static void handle_USER(ctrl_t *ctrl, char *name) { if (ctrl->name[0]) { ctrl->name[0] = 0; ctrl->pass[0] = 0; } if (name) { strlcpy(ctrl->name, name, sizeof(ctrl->name)); if (check_user_pass(ctrl) == 1) { INFO("Guest logged in from %s", ctrl->clientaddr); send_msg(ctrl->sd, "230 Guest login OK, access restrictions apply.\r\n"); } else { send_msg(ctrl->sd, "331 Login OK, please enter password.\r\n"); } } else { send_msg(ctrl->sd, "530 You must input your name.\r\n"); } } static void handle_PASS(ctrl_t *ctrl, char *pass) { if (!ctrl->name[0]) { send_msg(ctrl->sd, "503 No username given.\r\n"); return; } strlcpy(ctrl->pass, pass, sizeof(ctrl->pass)); if (check_user_pass(ctrl) < 0) { LOG("User %s from %s, invalid password!", ctrl->name, ctrl->clientaddr); send_msg(ctrl->sd, "530 username or password is unacceptable\r\n"); return; } INFO("User %s login from %s", ctrl->name, ctrl->clientaddr); send_msg(ctrl->sd, "230 Guest login OK, access restrictions apply.\r\n"); } static void handle_SYST(ctrl_t *ctrl, char *arg) { char system[] = "215 UNIX Type: L8\r\n"; send_msg(ctrl->sd, system); } static void handle_TYPE(ctrl_t *ctrl, char *argument) { char type[24] = "200 Type set to I.\r\n"; char unknown[] = "501 Invalid argument to TYPE.\r\n"; if (!argument) argument = "Z"; switch (argument[0]) { case 'A': ctrl->type = TYPE_A; /* ASCII */ break; case 'I': ctrl->type = TYPE_I; /* IMAGE/BINARY */ break; default: send_msg(ctrl->sd, unknown); return; } type[16] = argument[0]; send_msg(ctrl->sd, type); } static void handle_PWD(ctrl_t *ctrl, char *arg) { char buf[sizeof(ctrl->cwd) + 10]; snprintf(buf, sizeof(buf), "257 \"%s\"\r\n", ctrl->cwd); send_msg(ctrl->sd, buf); } static void handle_CWD(ctrl_t *ctrl, char *path) { struct stat st; char *dir; if (!path) goto done; /* * Some FTP clients, most notably Chrome, use CWD to check if an * entry is a file or directory. */ dir = compose_abspath(ctrl, path); if (!dir || stat(dir, &st) || !S_ISDIR(st.st_mode)) { DBG("chrooted:%d, ctrl->cwd: %s, home:%s, dir:%s, len:%zd, dirlen:%zd", chrooted, ctrl->cwd, home, dir, strlen(home), strlen(dir)); send_msg(ctrl->sd, "550 No such directory.\r\n"); return; } if (!chrooted) { size_t len = strlen(home); DBG("non-chrooted CWD, home:%s, dir:%s, len:%zd, dirlen:%zd", home, dir, len, strlen(dir)); dir += len; } snprintf(ctrl->cwd, sizeof(ctrl->cwd), "%s", dir); if (ctrl->cwd[0] == 0) snprintf(ctrl->cwd, sizeof(ctrl->cwd), "/"); done: DBG("New CWD: '%s'", ctrl->cwd); send_msg(ctrl->sd, "250 OK\r\n"); } static void handle_CDUP(ctrl_t *ctrl, char *path) { handle_CWD(ctrl, ".."); } static void handle_PORT(ctrl_t *ctrl, char *str) { int a, b, c, d, e, f; char addr[INET_ADDRSTRLEN]; struct sockaddr_in sin; if (ctrl->data_sd > 0) { uev_io_stop(&ctrl->data_watcher); close(ctrl->data_sd); ctrl->data_sd = -1; } /* Convert PORT command's argument to IP address + port */ sscanf(str, "%d,%d,%d,%d,%d,%d", &a, &b, &c, &d, &e, &f); sprintf(addr, "%d.%d.%d.%d", a, b, c, d); /* Check IPv4 address using inet_aton(), throw away converted result */ if (!inet_aton(addr, &(sin.sin_addr))) { ERR(0, "Invalid address '%s' given to PORT command", addr); send_msg(ctrl->sd, "500 Illegal PORT command.\r\n"); return; } strlcpy(ctrl->data_address, addr, sizeof(ctrl->data_address)); ctrl->data_port = e * 256 + f; DBG("Client PORT command accepted for %s:%d", ctrl->data_address, ctrl->data_port); send_msg(ctrl->sd, "200 PORT command successful.\r\n"); } static void handle_EPRT(ctrl_t *ctrl, char *str) { send_msg(ctrl->sd, "502 Command not implemented.\r\n"); } static char *mode_to_str(mode_t m) { static char str[11]; snprintf(str, sizeof(str), "%c%c%c%c%c%c%c%c%c%c", S_ISDIR(m) ? 'd' : '-', m & S_IRUSR ? 'r' : '-', m & S_IWUSR ? 'w' : '-', m & S_IXUSR ? 'x' : '-', m & S_IRGRP ? 'r' : '-', m & S_IWGRP ? 'w' : '-', m & S_IXGRP ? 'x' : '-', m & S_IROTH ? 'r' : '-', m & S_IWOTH ? 'w' : '-', m & S_IXOTH ? 'x' : '-'); return str; } static char *time_to_str(time_t mtime) { struct tm *t = localtime(&mtime); static char str[20]; setlocale(LC_TIME, "C"); strftime(str, sizeof(str), "%b %e %H:%M", t); return str; } static char *mlsd_time(time_t mtime) { struct tm *t = localtime(&mtime); static char str[20]; strftime(str, sizeof(str), "%Y%m%d%H%M%S", t); return str; } static const char *mlsd_type(char *name, int mode) { if (!strcmp(name, ".")) return "cdir"; if (!strcmp(name, "..")) return "pdir"; return S_ISDIR(mode) ? "dir" : "file"; } void mlsd_fact(char fact, char *buf, size_t len, char *name, char *perms, struct stat *st) { char size[20]; switch (fact) { case 'm': strlcat(buf, "modify=", len); strlcat(buf, mlsd_time(st->st_mtime), len); break; case 'p': strlcat(buf, "perm=", len); strlcat(buf, perms, len); break; case 't': strlcat(buf, "type=", len); strlcat(buf, mlsd_type(name, st->st_mode), len); break; case 's': if (S_ISDIR(st->st_mode)) return; snprintf(size, sizeof(size), "size=%" PRIu64, st->st_size); strlcat(buf, size, len); break; default: return; } strlcat(buf, ";", len); } static void mlsd_printf(ctrl_t *ctrl, char *buf, size_t len, char *path, char *name, struct stat *st) { char perms[10] = ""; int ro = !access(path, R_OK); int rw = !access(path, W_OK); if (S_ISDIR(st->st_mode)) { /* XXX: Verify 'e' by checking that we can CD to the 'name' */ if (ro) strlcat(perms, "le", sizeof(perms)); if (rw) strlcat(perms, "pc", sizeof(perms)); /* 'd' RMD, 'm' MKD */ } else { if (ro) strlcat(perms, "r", sizeof(perms)); if (rw) strlcat(perms, "w", sizeof(perms)); /* 'f' RNFR, 'd' DELE */ } memset(buf, 0, len); if (ctrl->d_num == -1 && (ctrl->list_mode & 0x0F) == 2) strlcat(buf, " ", len); for (int i = 0; ctrl->facts[i]; i++) mlsd_fact(ctrl->facts[i], buf, len, name, perms, st); strlcat(buf, " ", len); strlcat(buf, name, len); strlcat(buf, "\r\n", len); } static int list_printf(ctrl_t *ctrl, char *buf, size_t len, char *path, char *name) { int dirs; int mode = ctrl->list_mode; struct stat st; if (stat(path, &st)) return -1; dirs = mode & 0xF0; mode = mode & 0x0F; if (dirs && !S_ISDIR(st.st_mode)) return 1; if (!dirs && S_ISDIR(st.st_mode)) return 1; switch (mode) { case 3: /* MLSD */ /* fallthrough */ case 2: /* MLST */ mlsd_printf(ctrl, buf, len, path, name, &st); break; case 1: /* NLST */ snprintf(buf, len, "%s\r\n", name); break; case 0: /* LIST */ snprintf(buf, len, "%s 1 %5d %5d %12" PRIu64 " %s %s\r\n", mode_to_str(st.st_mode), 0, 0, (uint64_t)st.st_size, time_to_str(st.st_mtime), name); break; } return 0; } static void do_MLST(ctrl_t *ctrl) { size_t len = 0; char buf[512] = { 0 }; int sd = ctrl->sd; if (ctrl->data_sd != -1) sd = ctrl->data_sd; snprintf(buf, sizeof(buf), "250- Listing %s\r\n", ctrl->file); len = strlen(buf); if (list_printf(ctrl, &buf[len], sizeof(buf) - len, ctrl->file, basename(ctrl->file))) { do_abort(ctrl); send_msg(ctrl->sd, "550 No such file or directory.\r\n"); return; } strlcat(buf, "250 End.\r\n", sizeof(buf)); send_msg(sd, buf); } static void do_MLSD(ctrl_t *ctrl) { char buf[512] = { 0 }; if (list_printf(ctrl, buf, sizeof(buf), ctrl->file, basename(ctrl->file))) { do_abort(ctrl); send_msg(ctrl->sd, "550 No such file or directory.\r\n"); return; } send_msg(ctrl->data_sd, buf); send_msg(ctrl->sd, "226 Transfer complete.\r\n"); } static void do_LIST(uev_t *w, void *arg, int events) { ctrl_t *ctrl = (ctrl_t *)arg; struct timeval tv; ssize_t bytes; char buf[BUFFER_SIZE] = { 0 }; if (UEV_ERROR == events || UEV_HUP == events) { uev_io_start(w); return; } /* Reset inactivity timer. */ uev_timer_set(&ctrl->timeout_watcher, INACTIVITY_TIMER, 0); if (ctrl->d_num == -1) { if (ctrl->list_mode == 3) do_MLSD(ctrl); else do_MLST(ctrl); do_abort(ctrl); return; } gettimeofday(&tv, NULL); if (tv.tv_sec - ctrl->tv.tv_sec > 3) { DBG("Sending LIST entry %d of %d to %s ...", ctrl->i, ctrl->d_num, ctrl->clientaddr); ctrl->tv.tv_sec = tv.tv_sec; } ctrl->list_mode |= (ctrl->pending ? 0 : 0x80); while (ctrl->i < ctrl->d_num) { struct dirent *entry; char *name, *path; char cwd[PATH_MAX]; entry = ctrl->d[ctrl->i++]; name = entry->d_name; DBG("Found directory entry %s", name); if ((!strcmp(name, ".") || !strcmp(name, "..")) && ctrl->list_mode < 2) continue; snprintf(cwd, sizeof(cwd), "%s%s%s", ctrl->file, ctrl->file[strlen(ctrl->file) - 1] == '/' ? "" : "/", name); path = compose_path(ctrl, cwd); if (!path) { fail: LOGIT(LOG_INFO, errno, "Failed reading status for %s", path ? path : name); continue; } switch (list_printf(ctrl, buf, sizeof(buf), path, name)) { case -1: goto fail; case 1: continue; default: break; } DBG("LIST %s", buf); free(entry); bytes = send(ctrl->data_sd, buf, strlen(buf), 0); if (-1 == bytes) { if (ECONNRESET == errno) DBG("Connection reset by client."); else ERR(errno, "Failed sending file %s to client", ctrl->file); while (ctrl->i < ctrl->d_num) { struct dirent *entry = ctrl->d[ctrl->i++]; free(entry); } do_abort(ctrl); send_msg(ctrl->sd, "426 TCP connection was established but then broken!\r\n"); } return; } ctrl->list_mode &= 0x0F; /* Rewind and list files */ if (ctrl->pending == 0) { ctrl->pending++; ctrl->i = 0; return; } do_abort(ctrl); send_msg(ctrl->sd, "226 Transfer complete.\r\n"); } static void list(ctrl_t *ctrl, char *arg, int mode) { char *path; if (string_valid(arg)) { char *ptr, *quot; /* Check if client sends ls arguments ... */ ptr = arg; while (*ptr) { if (isspace(*ptr)) ptr++; if (*ptr == '-') { while (*ptr && !isspace(*ptr)) ptr++; } break; } /* Strip any "" from "<arg>" */ while ((quot = strchr(ptr, '"'))) { char *ptr2; ptr2 = strchr(&quot[1], '"'); if (ptr2) { memmove(ptr2, &ptr2[1], strlen(ptr2)); memmove(quot, &quot[1], strlen(quot)); } } arg = ptr; } if (mode >= 2) path = compose_abspath(ctrl, arg); else path = compose_path(ctrl, arg); if (!path) { send_msg(ctrl->sd, "550 No such file or directory.\r\n"); return; } ctrl->list_mode = mode; ctrl->file = strdup(arg ? arg : ""); ctrl->i = 0; ctrl->d_num = scandir(path, &ctrl->d, NULL, alphasort); if (ctrl->d_num == -1) { send_msg(ctrl->sd, "550 No such file or directory.\r\n"); DBG("Failed reading directory '%s': %s", path, strerror(errno)); return; } DBG("Reading directory %s ... %d number of entries", path, ctrl->d_num); if (ctrl->data_sd > -1) { send_msg(ctrl->sd, "125 Data connection already open; transfer starting.\r\n"); uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_LIST, ctrl, ctrl->data_sd, UEV_WRITE); return; } do_PORT(ctrl, 1); } static void handle_LIST(ctrl_t *ctrl, char *arg) { list(ctrl, arg, 0); } static void handle_NLST(ctrl_t *ctrl, char *arg) { list(ctrl, arg, 1); } static void handle_MLST(ctrl_t *ctrl, char *arg) { list(ctrl, arg, 2); } static void handle_MLSD(ctrl_t *ctrl, char *arg) { list(ctrl, arg, 3); } static void do_pasv_connection(uev_t *w, void *arg, int events) { ctrl_t *ctrl = (ctrl_t *)arg; int rc = 0; if (UEV_ERROR == events || UEV_HUP == events) { DBG("error on data_listen_sd ..."); uev_io_start(w); return; } DBG("Event on data_listen_sd ..."); uev_io_stop(&ctrl->data_watcher); if (open_data_connection(ctrl)) return; switch (ctrl->pending) { case 3: /* fall-through */ case 2: if (ctrl->offset) rc = fseek(ctrl->fp, ctrl->offset, SEEK_SET); if (rc) { do_abort(ctrl); send_msg(ctrl->sd, "551 Failed seeking to that position in file.\r\n"); return; } /* fall-through */ case 1: break; default: DBG("No pending command, waiting ..."); return; } switch (ctrl->pending) { case 3: /* STOR */ DBG("Pending STOR, starting ..."); uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_STOR, ctrl, ctrl->data_sd, UEV_READ); break; case 2: /* RETR */ DBG("Pending RETR, starting ..."); uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_RETR, ctrl, ctrl->data_sd, UEV_WRITE); break; case 1: /* LIST */ DBG("Pending LIST, starting ..."); uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_LIST, ctrl, ctrl->data_sd, UEV_WRITE); break; } if (ctrl->pending == 1 && ctrl->list_mode == 2) send_msg(ctrl->sd, "150 Opening ASCII mode data connection for MLSD.\r\n"); else send_msg(ctrl->sd, "150 Data connection accepted; transfer starting.\r\n"); ctrl->pending = 0; } static int do_PASV(ctrl_t *ctrl, char *arg, struct sockaddr *data, socklen_t *len) { struct sockaddr_in server; if (ctrl->data_sd > 0) { close(ctrl->data_sd); ctrl->data_sd = -1; } if (ctrl->data_listen_sd > 0) close(ctrl->data_listen_sd); ctrl->data_listen_sd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0); if (ctrl->data_listen_sd < 0) { ERR(errno, "Failed opening data server socket"); send_msg(ctrl->sd, "426 Internal server error.\r\n"); return 1; } memset(&server, 0, sizeof(server)); server.sin_family = AF_INET; server.sin_addr.s_addr = inet_addr(ctrl->serveraddr); server.sin_port = htons(0); if (bind(ctrl->data_listen_sd, (struct sockaddr *)&server, sizeof(server)) < 0) { ERR(errno, "Failed binding to client socket"); send_msg(ctrl->sd, "426 Internal server error.\r\n"); close(ctrl->data_listen_sd); ctrl->data_listen_sd = -1; return 1; } INFO("Data server port estabished. Waiting for client to connect ..."); if (listen(ctrl->data_listen_sd, 1) < 0) { ERR(errno, "Client data connection failure"); send_msg(ctrl->sd, "426 Internal server error.\r\n"); close(ctrl->data_listen_sd); ctrl->data_listen_sd = -1; return 1; } memset(data, 0, sizeof(*data)); if (-1 == getsockname(ctrl->data_listen_sd, data, len)) { ERR(errno, "Cannot determine our address, need it if client should connect to us"); close(ctrl->data_listen_sd); ctrl->data_listen_sd = -1; return 1; } uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_pasv_connection, ctrl, ctrl->data_listen_sd, UEV_READ); return 0; } static void handle_PASV(ctrl_t *ctrl, char *arg) { struct sockaddr_in data; socklen_t len = sizeof(data); char *msg, *p, buf[200]; int port; if (do_PASV(ctrl, arg, (struct sockaddr *)&data, &len)) return; /* Convert server IP address and port to comma separated list */ msg = strdup(ctrl->serveraddr); if (!msg) { send_msg(ctrl->sd, "426 Internal server error.\r\n"); exit(1); } p = msg; while ((p = strchr(p, '.'))) *p++ = ','; port = ntohs(data.sin_port); snprintf(buf, sizeof(buf), "227 Entering Passive Mode (%s,%d,%d)\r\n", msg, port / 256, port % 256); send_msg(ctrl->sd, buf); free(msg); } static void handle_EPSV(ctrl_t *ctrl, char *arg) { struct sockaddr_in data; socklen_t len = sizeof(data); char buf[200]; if (string_valid(arg) && string_case_compare(arg, "ALL")) { send_msg(ctrl->sd, "200 Command OK\r\n"); return; } if (do_PASV(ctrl, arg, (struct sockaddr *)&data, &len)) return; snprintf(buf, sizeof(buf), "229 Entering Extended Passive Mode (|||%d|)\r\n", ntohs(data.sin_port)); send_msg(ctrl->sd, buf); } static void do_RETR(uev_t *w, void *arg, int events) { ctrl_t *ctrl = (ctrl_t *)arg; struct timeval tv; ssize_t bytes; size_t num; char buf[BUFFER_SIZE]; if (UEV_ERROR == events || UEV_HUP == events) { DBG("error on data_sd ..."); uev_io_start(w); return; } if (!ctrl->fp) { DBG("no fp for RETR, bailing."); return; } num = fread(buf, sizeof(char), sizeof(buf), ctrl->fp); if (!num) { if (feof(ctrl->fp)) INFO("User %s from %s downloaded %s", ctrl->name, ctrl->clientaddr, ctrl->file); else if (ferror(ctrl->fp)) ERR(0, "Error while reading %s", ctrl->file); do_abort(ctrl); send_msg(ctrl->sd, "226 Transfer complete.\r\n"); return; } /* Reset inactivity timer. */ uev_timer_set(&ctrl->timeout_watcher, INACTIVITY_TIMER, 0); gettimeofday(&tv, NULL); if (tv.tv_sec - ctrl->tv.tv_sec > 3) { DBG("Sending %zd bytes of %s to %s ...", num, ctrl->file, ctrl->clientaddr); ctrl->tv.tv_sec = tv.tv_sec; } bytes = send(ctrl->data_sd, buf, num, 0); if (-1 == bytes) { if (ECONNRESET == errno) DBG("Connection reset by client."); else ERR(errno, "Failed sending file %s to client", ctrl->file); do_abort(ctrl); send_msg(ctrl->sd, "426 TCP connection was established but then broken!\r\n"); } } /* * Check if previous command was PORT, then connect to client and * transfer file/listing similar to what's done for PASV conns. */ static void do_PORT(ctrl_t *ctrl, int pending) { if (!ctrl->data_address[0]) { /* Check if previous command was PASV */ if (ctrl->data_sd == -1 && ctrl->data_listen_sd == -1) { if (pending == 1 && ctrl->d_num == -1) do_MLST(ctrl); return; } ctrl->pending = pending; return; } if (open_data_connection(ctrl)) { do_abort(ctrl); send_msg(ctrl->sd, "425 TCP connection cannot be established.\r\n"); return; } if (pending != 1 || ctrl->list_mode != 2) send_msg(ctrl->sd, "150 Data connection opened; transfer starting.\r\n"); switch (pending) { case 3: uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_STOR, ctrl, ctrl->data_sd, UEV_READ); break; case 2: uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_RETR, ctrl, ctrl->data_sd, UEV_WRITE); break; case 1: uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_LIST, ctrl, ctrl->data_sd, UEV_WRITE); break; } ctrl->pending = 0; } static void handle_RETR(ctrl_t *ctrl, char *file) { FILE *fp; char *path; struct stat st; path = compose_abspath(ctrl, file); if (!path || stat(path, &st) || !S_ISREG(st.st_mode)) { send_msg(ctrl->sd, "550 Not a regular file.\r\n"); return; } fp = fopen(path, "rb"); if (!fp) { if (errno != ENOENT) ERR(errno, "Failed RETR %s for %s", path, ctrl->clientaddr); send_msg(ctrl->sd, "451 Trouble to RETR file.\r\n"); return; } ctrl->fp = fp; ctrl->file = strdup(file); if (ctrl->data_sd > -1) { if (ctrl->offset) { DBG("Previous REST %ld of file size %ld", ctrl->offset, st.st_size); if (fseek(fp, ctrl->offset, SEEK_SET)) { do_abort(ctrl); send_msg(ctrl->sd, "551 Failed seeking to that position in file.\r\n"); return; } } send_msg(ctrl->sd, "125 Data connection already open; transfer starting.\r\n"); uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_RETR, ctrl, ctrl->data_sd, UEV_WRITE); return; } do_PORT(ctrl, 2); } static void handle_MDTM(ctrl_t *ctrl, char *file) { struct stat st; struct tm *tm; char *path, *ptr; char *mtime = NULL; char buf[80]; /* Request to set mtime, ncftp does this */ ptr = strchr(file, ' '); if (ptr) { *ptr++ = 0; mtime = file; file = ptr; } path = compose_abspath(ctrl, file); if (!path || stat(path, &st) || !S_ISREG(st.st_mode)) { send_msg(ctrl->sd, "550 Not a regular file.\r\n"); return; } if (mtime) { struct timespec times[2] = { { 0, UTIME_OMIT }, { 0, 0 } }; struct tm tm; int rc; if (!strptime(mtime, "%Y%m%d%H%M%S", &tm)) { fail: send_msg(ctrl->sd, "550 Invalid time format\r\n"); return; } times[1].tv_sec = mktime(&tm); rc = utimensat(0, path, times, 0); if (rc) { ERR(errno, "Failed setting MTIME %s of %s", mtime, file); goto fail; } (void)stat(path, &st); } tm = gmtime(&st.st_mtime); strftime(buf, sizeof(buf), "213 %Y%m%d%H%M%S\r\n", tm); send_msg(ctrl->sd, buf); } static void do_STOR(uev_t *w, void *arg, int events) { ctrl_t *ctrl = (ctrl_t *)arg; struct timeval tv; ssize_t bytes; size_t num; char buf[BUFFER_SIZE]; if (UEV_ERROR == events || UEV_HUP == events) { DBG("error on data_sd ..."); uev_io_start(w); return; } if (!ctrl->fp) { DBG("no fp for STOR, bailing."); return; } /* Reset inactivity timer. */ uev_timer_set(&ctrl->timeout_watcher, INACTIVITY_TIMER, 0); bytes = recv(ctrl->data_sd, buf, sizeof(buf), 0); if (bytes < 0) { if (ECONNRESET == errno) DBG("Connection reset by client."); else ERR(errno, "Failed receiving file %s from client", ctrl->file); do_abort(ctrl); send_msg(ctrl->sd, "426 TCP connection was established but then broken!\r\n"); return; } if (bytes == 0) { INFO("User %s at %s uploaded file %s", ctrl->name, ctrl->clientaddr, ctrl->file); do_abort(ctrl); send_msg(ctrl->sd, "226 Transfer complete.\r\n"); return; } gettimeofday(&tv, NULL); if (tv.tv_sec - ctrl->tv.tv_sec > 3) { DBG("Receiving %zd bytes of %s from %s ...", bytes, ctrl->file, ctrl->clientaddr); ctrl->tv.tv_sec = tv.tv_sec; } num = fwrite(buf, 1, bytes, ctrl->fp); if ((size_t)bytes != num) ERR(errno, "552 Disk full."); } static void handle_STOR(ctrl_t *ctrl, char *file) { FILE *fp = NULL; char *path; int rc = 0; path = compose_abspath(ctrl, file); if (!path) { INFO("Invalid path for %s: %m", file); goto fail; } DBG("Trying to write to %s ...", path); fp = fopen(path, "wb"); if (!fp) { /* If EACCESS client is trying to do something disallowed */ ERR(errno, "Failed writing %s", path); fail: send_msg(ctrl->sd, "451 Trouble storing file.\r\n"); do_abort(ctrl); return; } ctrl->fp = fp; ctrl->file = strdup(file); if (ctrl->data_sd > -1) { if (ctrl->offset) rc = fseek(fp, ctrl->offset, SEEK_SET); if (rc) { do_abort(ctrl); send_msg(ctrl->sd, "551 Failed seeking to that position in file.\r\n"); return; } send_msg(ctrl->sd, "125 Data connection already open; transfer starting.\r\n"); uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_STOR, ctrl, ctrl->data_sd, UEV_READ); return; } do_PORT(ctrl, 3); } static void handle_DELE(ctrl_t *ctrl, char *file) { char *path; path = compose_abspath(ctrl, file); if (!path) { ERR(errno, "Cannot find %s", file); goto fail; } if (remove(path)) { if (ENOENT == errno) fail: send_msg(ctrl->sd, "550 No such file or directory.\r\n"); else if (EPERM == errno) send_msg(ctrl->sd, "550 Not allowed to remove file or directory.\r\n"); else send_msg(ctrl->sd, "550 Unknown error.\r\n"); return; } send_msg(ctrl->sd, "200 Command OK\r\n"); } static void handle_MKD(ctrl_t *ctrl, char *arg) { char *path; path = compose_abspath(ctrl, arg); if (!path) { INFO("Invalid path for %s: %m", arg); goto fail; } if (mkdir(path, 0755)) { if (EPERM == errno) fail: send_msg(ctrl->sd, "550 Not allowed to create directory.\r\n"); else send_msg(ctrl->sd, "550 Unknown error.\r\n"); return; } send_msg(ctrl->sd, "200 Command OK\r\n"); } static void handle_RMD(ctrl_t *ctrl, char *arg) { handle_DELE(ctrl, arg); } static void handle_REST(ctrl_t *ctrl, char *arg) { const char *errstr; char buf[80]; if (!string_valid(arg)) { send_msg(ctrl->sd, "550 Invalid argument.\r\n"); return; } ctrl->offset = strtonum(arg, 0, INT64_MAX, &errstr); snprintf(buf, sizeof(buf), "350 Restarting at %ld. Send STOR or RETR to continue transfer.\r\n", ctrl->offset); send_msg(ctrl->sd, buf); } static size_t num_nl(char *file) { FILE *fp; char buf[80]; size_t len, num = 0; fp = fopen(file, "r"); if (!fp) return 0; do { char *ptr = buf; len = fread(buf, sizeof(char), sizeof(buf) - 1, fp); if (len > 0) { buf[len] = 0; while ((ptr = strchr(ptr, '\n'))) { ptr++; num++; } } } while (len > 0); fclose(fp); return num; } static void handle_SIZE(ctrl_t *ctrl, char *file) { char *path; char buf[80]; size_t extralen = 0; struct stat st; path = compose_abspath(ctrl, file); if (!path || stat(path, &st) || S_ISDIR(st.st_mode)) { send_msg(ctrl->sd, "550 No such file, or argument is a directory.\r\n"); return; } DBG("SIZE %s", path); if (ctrl->type == TYPE_A) extralen = num_nl(path); snprintf(buf, sizeof(buf), "213 %" PRIu64 "\r\n", (uint64_t)(st.st_size + extralen)); send_msg(ctrl->sd, buf); } /* No operation - used as session keepalive by clients. */ static void handle_NOOP(ctrl_t *ctrl, char *arg) { send_msg(ctrl->sd, "200 NOOP OK.\r\n"); } #if 0 static void handle_RNFR(ctrl_t *ctrl, char *arg) { } static void handle_RNTO(ctrl_t *ctrl, char *arg) { } #endif static void handle_QUIT(ctrl_t *ctrl, char *arg) { send_msg(ctrl->sd, "221 Goodbye.\r\n"); uev_exit(ctrl->ctx); } static void handle_CLNT(ctrl_t *ctrl, char *arg) { send_msg(ctrl->sd, "200 CLNT\r\n"); } static void handle_OPTS(ctrl_t *ctrl, char *arg) { /* OPTS MLST type;size;modify;perm; */ if (strstr(arg, "MLST")) { size_t i = 0; char *ptr; char buf[42] = "200 MLST OPTS "; char facts[10] = { 0 }; ptr = strtok(arg + 4, " \t;"); while (ptr && i < sizeof(facts) - 1) { if (!strcmp(ptr, "modify") || !strcmp(ptr, "perm") || !strcmp(ptr, "size") || !strcmp(ptr, "type")) { facts[i++] = ptr[0]; strlcat(buf, ptr, sizeof(buf)); strlcat(buf, ";", sizeof(buf)); } ptr = strtok(NULL, ";"); } strlcat(buf, "\r\n", sizeof(buf)); DBG("New MLSD facts: %s", facts); strlcpy(ctrl->facts, facts, sizeof(ctrl->facts)); send_msg(ctrl->sd, buf); } else send_msg(ctrl->sd, "200 UTF8 OPTS ON\r\n"); } static void handle_HELP(ctrl_t *ctrl, char *arg) { int i = 0; char buf[80]; ftp_cmd_t *cmd; if (string_valid(arg) && !string_compare(arg, "SITE")) { send_msg(ctrl->sd, "500 command HELP does not take any arguments on this server.\r\n"); return; } snprintf(ctrl->buf, ctrl->bufsz, "214-The following commands are recognized."); for (cmd = &supported[0]; cmd->command; cmd++, i++) { if (i % 14 == 0) strlcat(ctrl->buf, "\r\n", ctrl->bufsz); snprintf(buf, sizeof(buf), " %s", cmd->command); strlcat(ctrl->buf, buf, ctrl->bufsz); } snprintf(buf, sizeof(buf), "\r\n214 Help OK.\r\n"); strlcat(ctrl->buf, buf, ctrl->bufsz); send_msg(ctrl->sd, ctrl->buf); } static void handle_FEAT(ctrl_t *ctrl, char *arg) { snprintf(ctrl->buf, ctrl->bufsz, "211-Features:\r\n" " EPSV\r\n" " PASV\r\n" " SIZE\r\n" " UTF8\r\n" " REST STREAM\r\n" " MLST modify*;perm*;size*;type*;\r\n" "211 End\r\n"); send_msg(ctrl->sd, ctrl->buf); } static void handle_UNKNOWN(ctrl_t *ctrl, char *command) { char buf[128]; snprintf(buf, sizeof(buf), "500 command '%s' not recognized by server.\r\n", command); send_msg(ctrl->sd, buf); } #define COMMAND(NAME) { #NAME, handle_ ## NAME } static ftp_cmd_t supported[] = { COMMAND(ABOR), COMMAND(DELE), COMMAND(USER), COMMAND(PASS), COMMAND(SYST), COMMAND(TYPE), COMMAND(PORT), COMMAND(EPRT), COMMAND(RETR), COMMAND(MKD), COMMAND(RMD), COMMAND(REST), COMMAND(MDTM), COMMAND(PASV), COMMAND(EPSV), COMMAND(QUIT), COMMAND(LIST), COMMAND(NLST), COMMAND(MLST), COMMAND(MLSD), COMMAND(CLNT), COMMAND(OPTS), COMMAND(PWD), COMMAND(STOR), COMMAND(CWD), COMMAND(CDUP), COMMAND(SIZE), COMMAND(NOOP), COMMAND(HELP), COMMAND(FEAT), { NULL, NULL } }; static void child_exit(uev_t *w, void *arg, int events) { DBG("Child exiting ..."); uev_exit(w->ctx); } static void read_client_command(uev_t *w, void *arg, int events) { char *command, *argument; ctrl_t *ctrl = (ctrl_t *)arg; ftp_cmd_t *cmd; if (UEV_ERROR == events || UEV_HUP == events) { uev_io_start(w); return; } /* Reset inactivity timer. */ uev_timer_set(&ctrl->timeout_watcher, INACTIVITY_TIMER, 0); if (recv_msg(w->fd, ctrl->buf, ctrl->bufsz, &command, &argument)) { DBG("Short read, exiting."); uev_exit(ctrl->ctx); return; } if (!string_valid(command)) return; if (string_match(command, "FF F4")) { DBG("Ignoring IAC command, client should send ABOR as well."); return; } for (cmd = &supported[0]; cmd->command; cmd++) { if (string_compare(command, cmd->command)) { cmd->cb(ctrl, argument); return; } } handle_UNKNOWN(ctrl, command); } static void ftp_command(ctrl_t *ctrl) { uev_t sigterm_watcher; ctrl->bufsz = BUFFER_SIZE * sizeof(char); ctrl->buf = malloc(ctrl->bufsz); if (!ctrl->buf) { WARN(errno, "FTP session failed allocating buffer"); exit(1); } snprintf(ctrl->buf, ctrl->bufsz, "220 %s (%s) ready.\r\n", prognm, VERSION); send_msg(ctrl->sd, ctrl->buf); uev_signal_init(ctrl->ctx, &sigterm_watcher, child_exit, NULL, SIGTERM); uev_io_init(ctrl->ctx, &ctrl->io_watcher, read_client_command, ctrl, ctrl->sd, UEV_READ); uev_run(ctrl->ctx, 0); } int ftp_session(uev_ctx_t *ctx, int sd) { int pid = 0; ctrl_t *ctrl; socklen_t len; ctrl = new_session(ctx, sd, &pid); if (!ctrl) { if (pid < 0) { shutdown(sd, SHUT_RDWR); close(sd); } return pid; } len = sizeof(ctrl->server_sa); if (-1 == getsockname(sd, (struct sockaddr *)&ctrl->server_sa, &len)) { ERR(errno, "Cannot determine our address"); goto fail; } convert_address(&ctrl->server_sa, ctrl->serveraddr, sizeof(ctrl->serveraddr)); len = sizeof(ctrl->client_sa); if (-1 == getpeername(sd, (struct sockaddr *)&ctrl->client_sa, &len)) { ERR(errno, "Cannot determine client address"); goto fail; } convert_address(&ctrl->client_sa, ctrl->clientaddr, sizeof(ctrl->clientaddr)); ctrl->type = TYPE_A; ctrl->data_listen_sd = -1; ctrl->data_sd = -1; ctrl->name[0] = 0; ctrl->pass[0] = 0; ctrl->data_address[0] = 0; strlcpy(ctrl->facts, "mpst", sizeof(ctrl->facts)); INFO("Client connection from %s", ctrl->clientaddr); ftp_command(ctrl); DBG("Client exiting, bye"); exit(del_session(ctrl, 1)); fail: free(ctrl); shutdown(sd, SHUT_RDWR); close(sd); return -1; } /** * Local Variables: * indent-tabs-mode: t * c-file-style: "linux" * End: */
null
/* FTP engine * * Copyright (c) 2014-2019 Joachim Nilsson <troglobit@gmail.com> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "uftpd.h" #include <ctype.h> #include <arpa/ftp.h> #ifdef HAVE_SYS_TIME_H # include <sys/time.h> #endif typedef struct { char *command; void (*cb)(ctrl_t *ctr, char *arg); } ftp_cmd_t; static ftp_cmd_t supported[]; static void do_PORT(ctrl_t *ctrl, int pending); static void do_LIST(uev_t *w, void *arg, int events); static void do_RETR(uev_t *w, void *arg, int events); static void do_STOR(uev_t *w, void *arg, int events); static int is_cont(char *msg) { char *ptr; ptr = strchr(msg, '\r'); if (ptr) { ptr++; if (strchr(ptr, '\r')) return 1; } return 0; } static int send_msg(int sd, char *msg) { int n = 0; int l; if (!msg) { err: ERR(EINVAL, "Missing argument to send_msg()"); return 1; } l = strlen(msg); if (l <= 0) goto err; while (n < l) { int result = send(sd, msg + n, l, 0); if (result < 0) { ERR(errno, "Failed sending message to client"); return 1; } n += result; } DBG("Sent: %s%s", is_cont(msg) ? "\n" : "", msg); return 0; } /* * Receive message from client, split into command and argument */ static int recv_msg(int sd, char *msg, size_t len, char **cmd, char **argument) { char *ptr; ssize_t bytes; uint8_t *raw = (uint8_t *)msg; /* Clear for every new command. */ memset(msg, 0, len); /* Save one byte (-1) for NUL termination */ bytes = recv(sd, msg, len - 1, 0); if (bytes < 0) { if (EINTR == errno) return 1; if (ECONNRESET == errno) DBG("Connection reset by client."); else ERR(errno, "Failed reading client command"); return 1; } if (!bytes) { INFO("Client disconnected."); return 1; } if (raw[0] == 0xff) { char tmp[4]; char buf[20] = { 0 }; int i; i = recv(sd, &msg[bytes], len - bytes - 1, MSG_OOB | MSG_DONTWAIT); if (i > 0) bytes += i; for (i = 0; i < bytes; i++) { snprintf(tmp, sizeof(tmp), "%2X%s", raw[i], i + 1 < bytes ? " " : ""); strlcat(buf, tmp, sizeof(buf)); } strlcpy(msg, buf, len); *cmd = msg; *argument = NULL; DBG("Recv: [%s], %zd bytes", msg, bytes); return 0; } /* NUL terminate for strpbrk() */ msg[bytes] = 0; *cmd = msg; ptr = strpbrk(msg, " "); if (ptr) { *ptr = 0; ptr++; *argument = ptr; } else { *argument = NULL; ptr = msg; } ptr = strpbrk(ptr, "\r\n"); if (ptr) *ptr = 0; /* Convert command to std ftp upper case, issue #18 */ for (ptr = msg; *ptr; ++ptr) *ptr = toupper(*ptr); DBG("Recv: %s %s", *cmd, *argument ?: ""); return 0; } static int open_data_connection(ctrl_t *ctrl) { socklen_t len = sizeof(struct sockaddr); struct sockaddr_in sin; /* Previous PORT command from client */ if (ctrl->data_address[0]) { int rc; ctrl->data_sd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0); if (-1 == ctrl->data_sd) { ERR(errno, "Failed creating data socket"); return -1; } memset(&sin, 0, sizeof(sin)); sin.sin_family = AF_INET; sin.sin_port = htons(ctrl->data_port); inet_aton(ctrl->data_address, &(sin.sin_addr)); rc = connect(ctrl->data_sd, (struct sockaddr *)&sin, len); if (rc == -1 && EINPROGRESS != errno) { ERR(errno, "Failed connecting data socket to client"); close(ctrl->data_sd); ctrl->data_sd = -1; return -1; } DBG("Connected successfully to client's previously requested address:PORT %s:%d", ctrl->data_address, ctrl->data_port); return 0; } /* Previous PASV command, accept connect from client */ if (ctrl->data_listen_sd > 0) { const int const_int_1 = 1; int retries = 3; char client_ip[100]; retry: ctrl->data_sd = accept(ctrl->data_listen_sd, (struct sockaddr *)&sin, &len); if (-1 == ctrl->data_sd) { if (EAGAIN == errno && --retries) { sleep(1); goto retry; } ERR(errno, "Failed accepting connection from client"); return -1; } setsockopt(ctrl->data_sd, SOL_SOCKET, SO_KEEPALIVE, &const_int_1, sizeof(const_int_1)); set_nonblock(ctrl->data_sd); inet_ntop(AF_INET, &(sin.sin_addr), client_ip, INET_ADDRSTRLEN); DBG("Client PASV data connection from %s:%d", client_ip, ntohs(sin.sin_port)); close(ctrl->data_listen_sd); ctrl->data_listen_sd = -1; } return 0; } static int close_data_connection(ctrl_t *ctrl) { int ret = 0; DBG("Closing data connection ..."); /* PASV server listening socket */ if (ctrl->data_listen_sd > 0) { shutdown(ctrl->data_listen_sd, SHUT_RDWR); close(ctrl->data_listen_sd); ctrl->data_listen_sd = -1; ret++; } /* PASV client socket */ if (ctrl->data_sd > 0) { shutdown(ctrl->data_sd, SHUT_RDWR); close(ctrl->data_sd); ctrl->data_sd = -1; ret++; } /* PORT */ if (ctrl->data_address[0]) { ctrl->data_address[0] = 0; ctrl->data_port = 0; } return ret; } static int check_user_pass(ctrl_t *ctrl) { if (!ctrl->name[0]) return -1; if (!strcmp("anonymous", ctrl->name)) return 1; return 0; } static int do_abort(ctrl_t *ctrl) { if (ctrl->d || ctrl->d_num) { uev_io_stop(&ctrl->data_watcher); if (ctrl->d_num > 0) free(ctrl->d); ctrl->d_num = 0; ctrl->d = NULL; ctrl->i = 0; if (ctrl->file) free(ctrl->file); ctrl->file = NULL; } if (ctrl->file) { uev_io_stop(&ctrl->data_watcher); free(ctrl->file); ctrl->file = NULL; } if (ctrl->fp) { fclose(ctrl->fp); ctrl->fp = NULL; } ctrl->pending = 0; ctrl->offset = 0; return close_data_connection(ctrl); } static void handle_ABOR(ctrl_t *ctrl, char *arg) { DBG("Aborting any current transfer ..."); if (do_abort(ctrl)) send_msg(ctrl->sd, "426 Connection closed; transfer aborted.\r\n"); send_msg(ctrl->sd, "226 Closing data connection.\r\n"); } static void handle_USER(ctrl_t *ctrl, char *name) { if (ctrl->name[0]) { ctrl->name[0] = 0; ctrl->pass[0] = 0; } if (name) { strlcpy(ctrl->name, name, sizeof(ctrl->name)); if (check_user_pass(ctrl) == 1) { INFO("Guest logged in from %s", ctrl->clientaddr); send_msg(ctrl->sd, "230 Guest login OK, access restrictions apply.\r\n"); } else { send_msg(ctrl->sd, "331 Login OK, please enter password.\r\n"); } } else { send_msg(ctrl->sd, "530 You must input your name.\r\n"); } } static void handle_PASS(ctrl_t *ctrl, char *pass) { if (!ctrl->name[0]) { send_msg(ctrl->sd, "503 No username given.\r\n"); return; } strlcpy(ctrl->pass, pass, sizeof(ctrl->pass)); if (check_user_pass(ctrl) < 0) { LOG("User %s from %s, invalid password!", ctrl->name, ctrl->clientaddr); send_msg(ctrl->sd, "530 username or password is unacceptable\r\n"); return; } INFO("User %s login from %s", ctrl->name, ctrl->clientaddr); send_msg(ctrl->sd, "230 Guest login OK, access restrictions apply.\r\n"); } static void handle_SYST(ctrl_t *ctrl, char *arg) { char system[] = "215 UNIX Type: L8\r\n"; send_msg(ctrl->sd, system); } static void handle_TYPE(ctrl_t *ctrl, char *argument) { char type[24] = "200 Type set to I.\r\n"; char unknown[] = "501 Invalid argument to TYPE.\r\n"; if (!argument) argument = "Z"; switch (argument[0]) { case 'A': ctrl->type = TYPE_A; /* ASCII */ break; case 'I': ctrl->type = TYPE_I; /* IMAGE/BINARY */ break; default: send_msg(ctrl->sd, unknown); return; } type[16] = argument[0]; send_msg(ctrl->sd, type); } static void handle_PWD(ctrl_t *ctrl, char *arg) { char buf[sizeof(ctrl->cwd) + 10]; snprintf(buf, sizeof(buf), "257 \"%s\"\r\n", ctrl->cwd); send_msg(ctrl->sd, buf); } static void handle_CWD(ctrl_t *ctrl, char *path) { struct stat st; char *dir; if (!path) goto done; /* * Some FTP clients, most notably Chrome, use CWD to check if an * entry is a file or directory. */ dir = compose_abspath(ctrl, path); if (!dir || stat(dir, &st) || !S_ISDIR(st.st_mode)) { DBG("chrooted:%d, ctrl->cwd: %s, home:%s, dir:%s, len:%zd, dirlen:%zd", chrooted, ctrl->cwd, home, dir, strlen(home), strlen(dir)); send_msg(ctrl->sd, "550 No such directory.\r\n"); return; } if (!chrooted) { size_t len = strlen(home); DBG("non-chrooted CWD, home:%s, dir:%s, len:%zd, dirlen:%zd", home, dir, len, strlen(dir)); dir += len; } snprintf(ctrl->cwd, sizeof(ctrl->cwd), "%s", dir); if (ctrl->cwd[0] == 0) snprintf(ctrl->cwd, sizeof(ctrl->cwd), "/"); done: DBG("New CWD: '%s'", ctrl->cwd); send_msg(ctrl->sd, "250 OK\r\n"); } static void handle_CDUP(ctrl_t *ctrl, char *path) { handle_CWD(ctrl, ".."); } static void handle_PORT(ctrl_t *ctrl, char *str) { int a, b, c, d, e, f; char addr[INET_ADDRSTRLEN]; struct sockaddr_in sin; if (ctrl->data_sd > 0) { uev_io_stop(&ctrl->data_watcher); close(ctrl->data_sd); ctrl->data_sd = -1; } /* Convert PORT command's argument to IP address + port */ sscanf(str, "%d,%d,%d,%d,%d,%d", &a, &b, &c, &d, &e, &f); snprintf(addr, sizeof(addr), "%d.%d.%d.%d", a, b, c, d); /* Check IPv4 address using inet_aton(), throw away converted result */ if (!inet_aton(addr, &(sin.sin_addr))) { ERR(0, "Invalid address '%s' given to PORT command", addr); send_msg(ctrl->sd, "500 Illegal PORT command.\r\n"); return; } strlcpy(ctrl->data_address, addr, sizeof(ctrl->data_address)); ctrl->data_port = e * 256 + f; DBG("Client PORT command accepted for %s:%d", ctrl->data_address, ctrl->data_port); send_msg(ctrl->sd, "200 PORT command successful.\r\n"); } static void handle_EPRT(ctrl_t *ctrl, char *str) { send_msg(ctrl->sd, "502 Command not implemented.\r\n"); } static char *mode_to_str(mode_t m) { static char str[11]; snprintf(str, sizeof(str), "%c%c%c%c%c%c%c%c%c%c", S_ISDIR(m) ? 'd' : '-', m & S_IRUSR ? 'r' : '-', m & S_IWUSR ? 'w' : '-', m & S_IXUSR ? 'x' : '-', m & S_IRGRP ? 'r' : '-', m & S_IWGRP ? 'w' : '-', m & S_IXGRP ? 'x' : '-', m & S_IROTH ? 'r' : '-', m & S_IWOTH ? 'w' : '-', m & S_IXOTH ? 'x' : '-'); return str; } static char *time_to_str(time_t mtime) { struct tm *t = localtime(&mtime); static char str[20]; setlocale(LC_TIME, "C"); strftime(str, sizeof(str), "%b %e %H:%M", t); return str; } static char *mlsd_time(time_t mtime) { struct tm *t = localtime(&mtime); static char str[20]; strftime(str, sizeof(str), "%Y%m%d%H%M%S", t); return str; } static const char *mlsd_type(char *name, int mode) { if (!strcmp(name, ".")) return "cdir"; if (!strcmp(name, "..")) return "pdir"; return S_ISDIR(mode) ? "dir" : "file"; } void mlsd_fact(char fact, char *buf, size_t len, char *name, char *perms, struct stat *st) { char size[20]; switch (fact) { case 'm': strlcat(buf, "modify=", len); strlcat(buf, mlsd_time(st->st_mtime), len); break; case 'p': strlcat(buf, "perm=", len); strlcat(buf, perms, len); break; case 't': strlcat(buf, "type=", len); strlcat(buf, mlsd_type(name, st->st_mode), len); break; case 's': if (S_ISDIR(st->st_mode)) return; snprintf(size, sizeof(size), "size=%" PRIu64, st->st_size); strlcat(buf, size, len); break; default: return; } strlcat(buf, ";", len); } static void mlsd_printf(ctrl_t *ctrl, char *buf, size_t len, char *path, char *name, struct stat *st) { char perms[10] = ""; int ro = !access(path, R_OK); int rw = !access(path, W_OK); if (S_ISDIR(st->st_mode)) { /* XXX: Verify 'e' by checking that we can CD to the 'name' */ if (ro) strlcat(perms, "le", sizeof(perms)); if (rw) strlcat(perms, "pc", sizeof(perms)); /* 'd' RMD, 'm' MKD */ } else { if (ro) strlcat(perms, "r", sizeof(perms)); if (rw) strlcat(perms, "w", sizeof(perms)); /* 'f' RNFR, 'd' DELE */ } memset(buf, 0, len); if (ctrl->d_num == -1 && (ctrl->list_mode & 0x0F) == 2) strlcat(buf, " ", len); for (int i = 0; ctrl->facts[i]; i++) mlsd_fact(ctrl->facts[i], buf, len, name, perms, st); strlcat(buf, " ", len); strlcat(buf, name, len); strlcat(buf, "\r\n", len); } static int list_printf(ctrl_t *ctrl, char *buf, size_t len, char *path, char *name) { int dirs; int mode = ctrl->list_mode; struct stat st; if (stat(path, &st)) return -1; dirs = mode & 0xF0; mode = mode & 0x0F; if (dirs && !S_ISDIR(st.st_mode)) return 1; if (!dirs && S_ISDIR(st.st_mode)) return 1; switch (mode) { case 3: /* MLSD */ /* fallthrough */ case 2: /* MLST */ mlsd_printf(ctrl, buf, len, path, name, &st); break; case 1: /* NLST */ snprintf(buf, len, "%s\r\n", name); break; case 0: /* LIST */ snprintf(buf, len, "%s 1 %5d %5d %12" PRIu64 " %s %s\r\n", mode_to_str(st.st_mode), 0, 0, (uint64_t)st.st_size, time_to_str(st.st_mtime), name); break; } return 0; } static void do_MLST(ctrl_t *ctrl) { size_t len = 0; char buf[512] = { 0 }; int sd = ctrl->sd; if (ctrl->data_sd != -1) sd = ctrl->data_sd; snprintf(buf, sizeof(buf), "250- Listing %s\r\n", ctrl->file); len = strlen(buf); if (list_printf(ctrl, &buf[len], sizeof(buf) - len, ctrl->file, basename(ctrl->file))) { do_abort(ctrl); send_msg(ctrl->sd, "550 No such file or directory.\r\n"); return; } strlcat(buf, "250 End.\r\n", sizeof(buf)); send_msg(sd, buf); } static void do_MLSD(ctrl_t *ctrl) { char buf[512] = { 0 }; if (list_printf(ctrl, buf, sizeof(buf), ctrl->file, basename(ctrl->file))) { do_abort(ctrl); send_msg(ctrl->sd, "550 No such file or directory.\r\n"); return; } send_msg(ctrl->data_sd, buf); send_msg(ctrl->sd, "226 Transfer complete.\r\n"); } static void do_LIST(uev_t *w, void *arg, int events) { ctrl_t *ctrl = (ctrl_t *)arg; struct timeval tv; ssize_t bytes; char buf[BUFFER_SIZE] = { 0 }; if (UEV_ERROR == events || UEV_HUP == events) { uev_io_start(w); return; } /* Reset inactivity timer. */ uev_timer_set(&ctrl->timeout_watcher, INACTIVITY_TIMER, 0); if (ctrl->d_num == -1) { if (ctrl->list_mode == 3) do_MLSD(ctrl); else do_MLST(ctrl); do_abort(ctrl); return; } gettimeofday(&tv, NULL); if (tv.tv_sec - ctrl->tv.tv_sec > 3) { DBG("Sending LIST entry %d of %d to %s ...", ctrl->i, ctrl->d_num, ctrl->clientaddr); ctrl->tv.tv_sec = tv.tv_sec; } ctrl->list_mode |= (ctrl->pending ? 0 : 0x80); while (ctrl->i < ctrl->d_num) { struct dirent *entry; char *name, *path; char cwd[PATH_MAX]; entry = ctrl->d[ctrl->i++]; name = entry->d_name; DBG("Found directory entry %s", name); if ((!strcmp(name, ".") || !strcmp(name, "..")) && ctrl->list_mode < 2) continue; snprintf(cwd, sizeof(cwd), "%s%s%s", ctrl->file, ctrl->file[strlen(ctrl->file) - 1] == '/' ? "" : "/", name); path = compose_path(ctrl, cwd); if (!path) { fail: LOGIT(LOG_INFO, errno, "Failed reading status for %s", path ? path : name); continue; } switch (list_printf(ctrl, buf, sizeof(buf), path, name)) { case -1: goto fail; case 1: continue; default: break; } DBG("LIST %s", buf); free(entry); bytes = send(ctrl->data_sd, buf, strlen(buf), 0); if (-1 == bytes) { if (ECONNRESET == errno) DBG("Connection reset by client."); else ERR(errno, "Failed sending file %s to client", ctrl->file); while (ctrl->i < ctrl->d_num) { struct dirent *entry = ctrl->d[ctrl->i++]; free(entry); } do_abort(ctrl); send_msg(ctrl->sd, "426 TCP connection was established but then broken!\r\n"); } return; } ctrl->list_mode &= 0x0F; /* Rewind and list files */ if (ctrl->pending == 0) { ctrl->pending++; ctrl->i = 0; return; } do_abort(ctrl); send_msg(ctrl->sd, "226 Transfer complete.\r\n"); } static void list(ctrl_t *ctrl, char *arg, int mode) { char *path; if (string_valid(arg)) { char *ptr, *quot; /* Check if client sends ls arguments ... */ ptr = arg; while (*ptr) { if (isspace(*ptr)) ptr++; if (*ptr == '-') { while (*ptr && !isspace(*ptr)) ptr++; } break; } /* Strip any "" from "<arg>" */ while ((quot = strchr(ptr, '"'))) { char *ptr2; ptr2 = strchr(&quot[1], '"'); if (ptr2) { memmove(ptr2, &ptr2[1], strlen(ptr2)); memmove(quot, &quot[1], strlen(quot)); } } arg = ptr; } if (mode >= 2) path = compose_abspath(ctrl, arg); else path = compose_path(ctrl, arg); if (!path) { send_msg(ctrl->sd, "550 No such file or directory.\r\n"); return; } ctrl->list_mode = mode; ctrl->file = strdup(arg ? arg : ""); ctrl->i = 0; ctrl->d_num = scandir(path, &ctrl->d, NULL, alphasort); if (ctrl->d_num == -1) { send_msg(ctrl->sd, "550 No such file or directory.\r\n"); DBG("Failed reading directory '%s': %s", path, strerror(errno)); return; } DBG("Reading directory %s ... %d number of entries", path, ctrl->d_num); if (ctrl->data_sd > -1) { send_msg(ctrl->sd, "125 Data connection already open; transfer starting.\r\n"); uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_LIST, ctrl, ctrl->data_sd, UEV_WRITE); return; } do_PORT(ctrl, 1); } static void handle_LIST(ctrl_t *ctrl, char *arg) { list(ctrl, arg, 0); } static void handle_NLST(ctrl_t *ctrl, char *arg) { list(ctrl, arg, 1); } static void handle_MLST(ctrl_t *ctrl, char *arg) { list(ctrl, arg, 2); } static void handle_MLSD(ctrl_t *ctrl, char *arg) { list(ctrl, arg, 3); } static void do_pasv_connection(uev_t *w, void *arg, int events) { ctrl_t *ctrl = (ctrl_t *)arg; int rc = 0; if (UEV_ERROR == events || UEV_HUP == events) { DBG("error on data_listen_sd ..."); uev_io_start(w); return; } DBG("Event on data_listen_sd ..."); uev_io_stop(&ctrl->data_watcher); if (open_data_connection(ctrl)) return; switch (ctrl->pending) { case 3: /* fall-through */ case 2: if (ctrl->offset) rc = fseek(ctrl->fp, ctrl->offset, SEEK_SET); if (rc) { do_abort(ctrl); send_msg(ctrl->sd, "551 Failed seeking to that position in file.\r\n"); return; } /* fall-through */ case 1: break; default: DBG("No pending command, waiting ..."); return; } switch (ctrl->pending) { case 3: /* STOR */ DBG("Pending STOR, starting ..."); uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_STOR, ctrl, ctrl->data_sd, UEV_READ); break; case 2: /* RETR */ DBG("Pending RETR, starting ..."); uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_RETR, ctrl, ctrl->data_sd, UEV_WRITE); break; case 1: /* LIST */ DBG("Pending LIST, starting ..."); uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_LIST, ctrl, ctrl->data_sd, UEV_WRITE); break; } if (ctrl->pending == 1 && ctrl->list_mode == 2) send_msg(ctrl->sd, "150 Opening ASCII mode data connection for MLSD.\r\n"); else send_msg(ctrl->sd, "150 Data connection accepted; transfer starting.\r\n"); ctrl->pending = 0; } static int do_PASV(ctrl_t *ctrl, char *arg, struct sockaddr *data, socklen_t *len) { struct sockaddr_in server; if (ctrl->data_sd > 0) { close(ctrl->data_sd); ctrl->data_sd = -1; } if (ctrl->data_listen_sd > 0) close(ctrl->data_listen_sd); ctrl->data_listen_sd = socket(AF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0); if (ctrl->data_listen_sd < 0) { ERR(errno, "Failed opening data server socket"); send_msg(ctrl->sd, "426 Internal server error.\r\n"); return 1; } memset(&server, 0, sizeof(server)); server.sin_family = AF_INET; server.sin_addr.s_addr = inet_addr(ctrl->serveraddr); server.sin_port = htons(0); if (bind(ctrl->data_listen_sd, (struct sockaddr *)&server, sizeof(server)) < 0) { ERR(errno, "Failed binding to client socket"); send_msg(ctrl->sd, "426 Internal server error.\r\n"); close(ctrl->data_listen_sd); ctrl->data_listen_sd = -1; return 1; } INFO("Data server port estabished. Waiting for client to connect ..."); if (listen(ctrl->data_listen_sd, 1) < 0) { ERR(errno, "Client data connection failure"); send_msg(ctrl->sd, "426 Internal server error.\r\n"); close(ctrl->data_listen_sd); ctrl->data_listen_sd = -1; return 1; } memset(data, 0, sizeof(*data)); if (-1 == getsockname(ctrl->data_listen_sd, data, len)) { ERR(errno, "Cannot determine our address, need it if client should connect to us"); close(ctrl->data_listen_sd); ctrl->data_listen_sd = -1; return 1; } uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_pasv_connection, ctrl, ctrl->data_listen_sd, UEV_READ); return 0; } static void handle_PASV(ctrl_t *ctrl, char *arg) { struct sockaddr_in data; socklen_t len = sizeof(data); char *msg, *p, buf[200]; int port; if (do_PASV(ctrl, arg, (struct sockaddr *)&data, &len)) return; /* Convert server IP address and port to comma separated list */ msg = strdup(ctrl->serveraddr); if (!msg) { send_msg(ctrl->sd, "426 Internal server error.\r\n"); exit(1); } p = msg; while ((p = strchr(p, '.'))) *p++ = ','; port = ntohs(data.sin_port); snprintf(buf, sizeof(buf), "227 Entering Passive Mode (%s,%d,%d)\r\n", msg, port / 256, port % 256); send_msg(ctrl->sd, buf); free(msg); } static void handle_EPSV(ctrl_t *ctrl, char *arg) { struct sockaddr_in data; socklen_t len = sizeof(data); char buf[200]; if (string_valid(arg) && string_case_compare(arg, "ALL")) { send_msg(ctrl->sd, "200 Command OK\r\n"); return; } if (do_PASV(ctrl, arg, (struct sockaddr *)&data, &len)) return; snprintf(buf, sizeof(buf), "229 Entering Extended Passive Mode (|||%d|)\r\n", ntohs(data.sin_port)); send_msg(ctrl->sd, buf); } static void do_RETR(uev_t *w, void *arg, int events) { ctrl_t *ctrl = (ctrl_t *)arg; struct timeval tv; ssize_t bytes; size_t num; char buf[BUFFER_SIZE]; if (UEV_ERROR == events || UEV_HUP == events) { DBG("error on data_sd ..."); uev_io_start(w); return; } if (!ctrl->fp) { DBG("no fp for RETR, bailing."); return; } num = fread(buf, sizeof(char), sizeof(buf), ctrl->fp); if (!num) { if (feof(ctrl->fp)) INFO("User %s from %s downloaded %s", ctrl->name, ctrl->clientaddr, ctrl->file); else if (ferror(ctrl->fp)) ERR(0, "Error while reading %s", ctrl->file); do_abort(ctrl); send_msg(ctrl->sd, "226 Transfer complete.\r\n"); return; } /* Reset inactivity timer. */ uev_timer_set(&ctrl->timeout_watcher, INACTIVITY_TIMER, 0); gettimeofday(&tv, NULL); if (tv.tv_sec - ctrl->tv.tv_sec > 3) { DBG("Sending %zd bytes of %s to %s ...", num, ctrl->file, ctrl->clientaddr); ctrl->tv.tv_sec = tv.tv_sec; } bytes = send(ctrl->data_sd, buf, num, 0); if (-1 == bytes) { if (ECONNRESET == errno) DBG("Connection reset by client."); else ERR(errno, "Failed sending file %s to client", ctrl->file); do_abort(ctrl); send_msg(ctrl->sd, "426 TCP connection was established but then broken!\r\n"); } } /* * Check if previous command was PORT, then connect to client and * transfer file/listing similar to what's done for PASV conns. */ static void do_PORT(ctrl_t *ctrl, int pending) { if (!ctrl->data_address[0]) { /* Check if previous command was PASV */ if (ctrl->data_sd == -1 && ctrl->data_listen_sd == -1) { if (pending == 1 && ctrl->d_num == -1) do_MLST(ctrl); return; } ctrl->pending = pending; return; } if (open_data_connection(ctrl)) { do_abort(ctrl); send_msg(ctrl->sd, "425 TCP connection cannot be established.\r\n"); return; } if (pending != 1 || ctrl->list_mode != 2) send_msg(ctrl->sd, "150 Data connection opened; transfer starting.\r\n"); switch (pending) { case 3: uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_STOR, ctrl, ctrl->data_sd, UEV_READ); break; case 2: uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_RETR, ctrl, ctrl->data_sd, UEV_WRITE); break; case 1: uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_LIST, ctrl, ctrl->data_sd, UEV_WRITE); break; } ctrl->pending = 0; } static void handle_RETR(ctrl_t *ctrl, char *file) { FILE *fp; char *path; struct stat st; path = compose_abspath(ctrl, file); if (!path || stat(path, &st) || !S_ISREG(st.st_mode)) { send_msg(ctrl->sd, "550 Not a regular file.\r\n"); return; } fp = fopen(path, "rb"); if (!fp) { if (errno != ENOENT) ERR(errno, "Failed RETR %s for %s", path, ctrl->clientaddr); send_msg(ctrl->sd, "451 Trouble to RETR file.\r\n"); return; } ctrl->fp = fp; ctrl->file = strdup(file); if (ctrl->data_sd > -1) { if (ctrl->offset) { DBG("Previous REST %ld of file size %ld", ctrl->offset, st.st_size); if (fseek(fp, ctrl->offset, SEEK_SET)) { do_abort(ctrl); send_msg(ctrl->sd, "551 Failed seeking to that position in file.\r\n"); return; } } send_msg(ctrl->sd, "125 Data connection already open; transfer starting.\r\n"); uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_RETR, ctrl, ctrl->data_sd, UEV_WRITE); return; } do_PORT(ctrl, 2); } static void handle_MDTM(ctrl_t *ctrl, char *file) { struct stat st; struct tm *tm; char *path, *ptr; char *mtime = NULL; char buf[80]; /* Request to set mtime, ncftp does this */ ptr = strchr(file, ' '); if (ptr) { *ptr++ = 0; mtime = file; file = ptr; } path = compose_abspath(ctrl, file); if (!path || stat(path, &st) || !S_ISREG(st.st_mode)) { send_msg(ctrl->sd, "550 Not a regular file.\r\n"); return; } if (mtime) { struct timespec times[2] = { { 0, UTIME_OMIT }, { 0, 0 } }; struct tm tm; int rc; if (!strptime(mtime, "%Y%m%d%H%M%S", &tm)) { fail: send_msg(ctrl->sd, "550 Invalid time format\r\n"); return; } times[1].tv_sec = mktime(&tm); rc = utimensat(0, path, times, 0); if (rc) { ERR(errno, "Failed setting MTIME %s of %s", mtime, file); goto fail; } (void)stat(path, &st); } tm = gmtime(&st.st_mtime); strftime(buf, sizeof(buf), "213 %Y%m%d%H%M%S\r\n", tm); send_msg(ctrl->sd, buf); } static void do_STOR(uev_t *w, void *arg, int events) { ctrl_t *ctrl = (ctrl_t *)arg; struct timeval tv; ssize_t bytes; size_t num; char buf[BUFFER_SIZE]; if (UEV_ERROR == events || UEV_HUP == events) { DBG("error on data_sd ..."); uev_io_start(w); return; } if (!ctrl->fp) { DBG("no fp for STOR, bailing."); return; } /* Reset inactivity timer. */ uev_timer_set(&ctrl->timeout_watcher, INACTIVITY_TIMER, 0); bytes = recv(ctrl->data_sd, buf, sizeof(buf), 0); if (bytes < 0) { if (ECONNRESET == errno) DBG("Connection reset by client."); else ERR(errno, "Failed receiving file %s from client", ctrl->file); do_abort(ctrl); send_msg(ctrl->sd, "426 TCP connection was established but then broken!\r\n"); return; } if (bytes == 0) { INFO("User %s at %s uploaded file %s", ctrl->name, ctrl->clientaddr, ctrl->file); do_abort(ctrl); send_msg(ctrl->sd, "226 Transfer complete.\r\n"); return; } gettimeofday(&tv, NULL); if (tv.tv_sec - ctrl->tv.tv_sec > 3) { DBG("Receiving %zd bytes of %s from %s ...", bytes, ctrl->file, ctrl->clientaddr); ctrl->tv.tv_sec = tv.tv_sec; } num = fwrite(buf, 1, bytes, ctrl->fp); if ((size_t)bytes != num) ERR(errno, "552 Disk full."); } static void handle_STOR(ctrl_t *ctrl, char *file) { FILE *fp = NULL; char *path; int rc = 0; path = compose_abspath(ctrl, file); if (!path) { INFO("Invalid path for %s: %m", file); goto fail; } DBG("Trying to write to %s ...", path); fp = fopen(path, "wb"); if (!fp) { /* If EACCESS client is trying to do something disallowed */ ERR(errno, "Failed writing %s", path); fail: send_msg(ctrl->sd, "451 Trouble storing file.\r\n"); do_abort(ctrl); return; } ctrl->fp = fp; ctrl->file = strdup(file); if (ctrl->data_sd > -1) { if (ctrl->offset) rc = fseek(fp, ctrl->offset, SEEK_SET); if (rc) { do_abort(ctrl); send_msg(ctrl->sd, "551 Failed seeking to that position in file.\r\n"); return; } send_msg(ctrl->sd, "125 Data connection already open; transfer starting.\r\n"); uev_io_init(ctrl->ctx, &ctrl->data_watcher, do_STOR, ctrl, ctrl->data_sd, UEV_READ); return; } do_PORT(ctrl, 3); } static void handle_DELE(ctrl_t *ctrl, char *file) { char *path; path = compose_abspath(ctrl, file); if (!path) { ERR(errno, "Cannot find %s", file); goto fail; } if (remove(path)) { if (ENOENT == errno) fail: send_msg(ctrl->sd, "550 No such file or directory.\r\n"); else if (EPERM == errno) send_msg(ctrl->sd, "550 Not allowed to remove file or directory.\r\n"); else send_msg(ctrl->sd, "550 Unknown error.\r\n"); return; } send_msg(ctrl->sd, "200 Command OK\r\n"); } static void handle_MKD(ctrl_t *ctrl, char *arg) { char *path; path = compose_abspath(ctrl, arg); if (!path) { INFO("Invalid path for %s: %m", arg); goto fail; } if (mkdir(path, 0755)) { if (EPERM == errno) fail: send_msg(ctrl->sd, "550 Not allowed to create directory.\r\n"); else send_msg(ctrl->sd, "550 Unknown error.\r\n"); return; } send_msg(ctrl->sd, "200 Command OK\r\n"); } static void handle_RMD(ctrl_t *ctrl, char *arg) { handle_DELE(ctrl, arg); } static void handle_REST(ctrl_t *ctrl, char *arg) { const char *errstr; char buf[80]; if (!string_valid(arg)) { send_msg(ctrl->sd, "550 Invalid argument.\r\n"); return; } ctrl->offset = strtonum(arg, 0, INT64_MAX, &errstr); snprintf(buf, sizeof(buf), "350 Restarting at %ld. Send STOR or RETR to continue transfer.\r\n", ctrl->offset); send_msg(ctrl->sd, buf); } static size_t num_nl(char *file) { FILE *fp; char buf[80]; size_t len, num = 0; fp = fopen(file, "r"); if (!fp) return 0; do { char *ptr = buf; len = fread(buf, sizeof(char), sizeof(buf) - 1, fp); if (len > 0) { buf[len] = 0; while ((ptr = strchr(ptr, '\n'))) { ptr++; num++; } } } while (len > 0); fclose(fp); return num; } static void handle_SIZE(ctrl_t *ctrl, char *file) { char *path; char buf[80]; size_t extralen = 0; struct stat st; path = compose_abspath(ctrl, file); if (!path || stat(path, &st) || S_ISDIR(st.st_mode)) { send_msg(ctrl->sd, "550 No such file, or argument is a directory.\r\n"); return; } DBG("SIZE %s", path); if (ctrl->type == TYPE_A) extralen = num_nl(path); snprintf(buf, sizeof(buf), "213 %" PRIu64 "\r\n", (uint64_t)(st.st_size + extralen)); send_msg(ctrl->sd, buf); } /* No operation - used as session keepalive by clients. */ static void handle_NOOP(ctrl_t *ctrl, char *arg) { send_msg(ctrl->sd, "200 NOOP OK.\r\n"); } #if 0 static void handle_RNFR(ctrl_t *ctrl, char *arg) { } static void handle_RNTO(ctrl_t *ctrl, char *arg) { } #endif static void handle_QUIT(ctrl_t *ctrl, char *arg) { send_msg(ctrl->sd, "221 Goodbye.\r\n"); uev_exit(ctrl->ctx); } static void handle_CLNT(ctrl_t *ctrl, char *arg) { send_msg(ctrl->sd, "200 CLNT\r\n"); } static void handle_OPTS(ctrl_t *ctrl, char *arg) { /* OPTS MLST type;size;modify;perm; */ if (strstr(arg, "MLST")) { size_t i = 0; char *ptr; char buf[42] = "200 MLST OPTS "; char facts[10] = { 0 }; ptr = strtok(arg + 4, " \t;"); while (ptr && i < sizeof(facts) - 1) { if (!strcmp(ptr, "modify") || !strcmp(ptr, "perm") || !strcmp(ptr, "size") || !strcmp(ptr, "type")) { facts[i++] = ptr[0]; strlcat(buf, ptr, sizeof(buf)); strlcat(buf, ";", sizeof(buf)); } ptr = strtok(NULL, ";"); } strlcat(buf, "\r\n", sizeof(buf)); DBG("New MLSD facts: %s", facts); strlcpy(ctrl->facts, facts, sizeof(ctrl->facts)); send_msg(ctrl->sd, buf); } else send_msg(ctrl->sd, "200 UTF8 OPTS ON\r\n"); } static void handle_HELP(ctrl_t *ctrl, char *arg) { int i = 0; char buf[80]; ftp_cmd_t *cmd; if (string_valid(arg) && !string_compare(arg, "SITE")) { send_msg(ctrl->sd, "500 command HELP does not take any arguments on this server.\r\n"); return; } snprintf(ctrl->buf, ctrl->bufsz, "214-The following commands are recognized."); for (cmd = &supported[0]; cmd->command; cmd++, i++) { if (i % 14 == 0) strlcat(ctrl->buf, "\r\n", ctrl->bufsz); snprintf(buf, sizeof(buf), " %s", cmd->command); strlcat(ctrl->buf, buf, ctrl->bufsz); } snprintf(buf, sizeof(buf), "\r\n214 Help OK.\r\n"); strlcat(ctrl->buf, buf, ctrl->bufsz); send_msg(ctrl->sd, ctrl->buf); } static void handle_FEAT(ctrl_t *ctrl, char *arg) { snprintf(ctrl->buf, ctrl->bufsz, "211-Features:\r\n" " EPSV\r\n" " PASV\r\n" " SIZE\r\n" " UTF8\r\n" " REST STREAM\r\n" " MLST modify*;perm*;size*;type*;\r\n" "211 End\r\n"); send_msg(ctrl->sd, ctrl->buf); } static void handle_UNKNOWN(ctrl_t *ctrl, char *command) { char buf[128]; snprintf(buf, sizeof(buf), "500 command '%s' not recognized by server.\r\n", command); send_msg(ctrl->sd, buf); } #define COMMAND(NAME) { #NAME, handle_ ## NAME } static ftp_cmd_t supported[] = { COMMAND(ABOR), COMMAND(DELE), COMMAND(USER), COMMAND(PASS), COMMAND(SYST), COMMAND(TYPE), COMMAND(PORT), COMMAND(EPRT), COMMAND(RETR), COMMAND(MKD), COMMAND(RMD), COMMAND(REST), COMMAND(MDTM), COMMAND(PASV), COMMAND(EPSV), COMMAND(QUIT), COMMAND(LIST), COMMAND(NLST), COMMAND(MLST), COMMAND(MLSD), COMMAND(CLNT), COMMAND(OPTS), COMMAND(PWD), COMMAND(STOR), COMMAND(CWD), COMMAND(CDUP), COMMAND(SIZE), COMMAND(NOOP), COMMAND(HELP), COMMAND(FEAT), { NULL, NULL } }; static void child_exit(uev_t *w, void *arg, int events) { DBG("Child exiting ..."); uev_exit(w->ctx); } static void read_client_command(uev_t *w, void *arg, int events) { char *command, *argument; ctrl_t *ctrl = (ctrl_t *)arg; ftp_cmd_t *cmd; if (UEV_ERROR == events || UEV_HUP == events) { uev_io_start(w); return; } /* Reset inactivity timer. */ uev_timer_set(&ctrl->timeout_watcher, INACTIVITY_TIMER, 0); if (recv_msg(w->fd, ctrl->buf, ctrl->bufsz, &command, &argument)) { DBG("Short read, exiting."); uev_exit(ctrl->ctx); return; } if (!string_valid(command)) return; if (string_match(command, "FF F4")) { DBG("Ignoring IAC command, client should send ABOR as well."); return; } for (cmd = &supported[0]; cmd->command; cmd++) { if (string_compare(command, cmd->command)) { cmd->cb(ctrl, argument); return; } } handle_UNKNOWN(ctrl, command); } static void ftp_command(ctrl_t *ctrl) { uev_t sigterm_watcher; ctrl->bufsz = BUFFER_SIZE * sizeof(char); ctrl->buf = malloc(ctrl->bufsz); if (!ctrl->buf) { WARN(errno, "FTP session failed allocating buffer"); exit(1); } snprintf(ctrl->buf, ctrl->bufsz, "220 %s (%s) ready.\r\n", prognm, VERSION); send_msg(ctrl->sd, ctrl->buf); uev_signal_init(ctrl->ctx, &sigterm_watcher, child_exit, NULL, SIGTERM); uev_io_init(ctrl->ctx, &ctrl->io_watcher, read_client_command, ctrl, ctrl->sd, UEV_READ); uev_run(ctrl->ctx, 0); } int ftp_session(uev_ctx_t *ctx, int sd) { int pid = 0; ctrl_t *ctrl; socklen_t len; ctrl = new_session(ctx, sd, &pid); if (!ctrl) { if (pid < 0) { shutdown(sd, SHUT_RDWR); close(sd); } return pid; } len = sizeof(ctrl->server_sa); if (-1 == getsockname(sd, (struct sockaddr *)&ctrl->server_sa, &len)) { ERR(errno, "Cannot determine our address"); goto fail; } convert_address(&ctrl->server_sa, ctrl->serveraddr, sizeof(ctrl->serveraddr)); len = sizeof(ctrl->client_sa); if (-1 == getpeername(sd, (struct sockaddr *)&ctrl->client_sa, &len)) { ERR(errno, "Cannot determine client address"); goto fail; } convert_address(&ctrl->client_sa, ctrl->clientaddr, sizeof(ctrl->clientaddr)); ctrl->type = TYPE_A; ctrl->data_listen_sd = -1; ctrl->data_sd = -1; ctrl->name[0] = 0; ctrl->pass[0] = 0; ctrl->data_address[0] = 0; strlcpy(ctrl->facts, "mpst", sizeof(ctrl->facts)); INFO("Client connection from %s", ctrl->clientaddr); ftp_command(ctrl); DBG("Client exiting, bye"); exit(del_session(ctrl, 1)); fail: free(ctrl); shutdown(sd, SHUT_RDWR); close(sd); return -1; } /** * Local Variables: * indent-tabs-mode: t * c-file-style: "linux" * End: */
null
201
CWE-787
CVE-2020-20740
Matt Davis (enferex): Original pdfresurrect, modified man page Francois Marier: Original man page, modified Makefile.in and modified configure.ac, potential security bug identifying (e.g., bad code detection). David Binderman: Finding a bug and offering a correction suggestion Ryan Schmidt: MacPorts Makefile work xambroz: Suggestions and Makefile enhancement. j0lamma: Identified a bug that can cause a corrupted buffer. capcorpscert0188, LeeSunWoo, ParkInChul: Identified a bug that can cause a corrupted buffer. rtfingc: Reported bugs regarding memory corruption and a leak. rwhitworth: Reported and helped isolate bugs. Created initial .gitignore. carter-yagemann: Reported and helped isolate a memory issue. rootup: Reporting and helped isolate a memory issue. ZanderChang: Reported and identified bad code in kid page loading.
null
Matt Davis (enferex): Original pdfresurrect, modified man page Francois Marier: Original man page, modified Makefile.in and modified configure.ac, potential security bug identifying (e.g., bad code detection). David Binderman: Finding a bug and offering a correction suggestion Ryan Schmidt: MacPorts Makefile work xambroz: Suggestions and Makefile enhancement. j0lamma: Identified a bug that can cause a corrupted buffer. capcorpscert0188, LeeSunWoo, ParkInChul: Identified a bug that can cause a corrupted buffer. rtfingc: Reported bugs regarding memory corruption and a leak. rwhitworth: Reported and helped isolate bugs. Created initial .gitignore. carter-yagemann: Reported and helped isolate a memory issue. rootup: Reporting and helped isolate a memory issue. ZanderChang: Reported and identified bad code in kid page loading. yifengchen-cc: Reported a header parsing bug.
null
202
CWE-787
CVE-2020-21050
/* * This file is derived from "stb_image.h" that is in public domain. * https://github.com/nothings/stb * * Hayaki Saito <saitoha@me.com> modified this and re-licensed * it under the MIT license. * * Copyright (c) 2014-2018 Hayaki Saito * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "config.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include "frame.h" #include "fromgif.h" /* * gif_context_t struct and start_xxx functions * * gif_context_t structure is our basic context used by all images, so it * contains all the IO context, plus some basic image information */ typedef struct { unsigned int img_x, img_y; int img_n, img_out_n; int buflen; unsigned char buffer_start[128]; unsigned char *img_buffer, *img_buffer_end; unsigned char *img_buffer_original; } gif_context_t; typedef struct { signed short prefix; unsigned char first; unsigned char suffix; } gif_lzw; typedef struct { int w, h; unsigned char *out; /* output buffer (always 4 components) */ int flags, bgindex, ratio, transparent, eflags; unsigned char pal[256][3]; unsigned char lpal[256][3]; gif_lzw codes[4096]; unsigned char *color_table; int parse, step; int lflags; int start_x, start_y; int max_x, max_y; int cur_x, cur_y; int line_size; int loop_count; int delay; int is_multiframe; int is_terminated; } gif_t; /* initialize a memory-decode context */ static unsigned char gif_get8(gif_context_t *s) { if (s->img_buffer < s->img_buffer_end) { return *s->img_buffer++; } return 0; } static int gif_get16le(gif_context_t *s) { int z = gif_get8(s); return z + (gif_get8(s) << 8); } static void gif_parse_colortable( gif_context_t /* in */ *s, unsigned char /* in */ pal[256][3], int /* in */ num_entries) { int i; for (i = 0; i < num_entries; ++i) { pal[i][2] = gif_get8(s); pal[i][1] = gif_get8(s); pal[i][0] = gif_get8(s); } } static SIXELSTATUS gif_load_header( gif_context_t /* in */ *s, gif_t /* in */ *g) { SIXELSTATUS status = SIXEL_FALSE; unsigned char version; if (gif_get8(s) != 'G') { goto end; } if (gif_get8(s) != 'I') { goto end; } if (gif_get8(s) != 'F') { goto end; } if (gif_get8(s) != '8') { goto end; } version = gif_get8(s); if (version != '7' && version != '9') { goto end; } if (gif_get8(s) != 'a') { goto end; } g->w = gif_get16le(s); g->h = gif_get16le(s); g->flags = gif_get8(s); g->bgindex = gif_get8(s); g->ratio = gif_get8(s); g->transparent = (-1); g->loop_count = (-1); if (g->flags & 0x80) { gif_parse_colortable(s, g->pal, 2 << (g->flags & 7)); } status = SIXEL_OK; end: return status; } static SIXELSTATUS gif_init_frame( sixel_frame_t /* in */ *frame, gif_t /* in */ *pg, unsigned char /* in */ *bgcolor, int /* in */ reqcolors, int /* in */ fuse_palette) { SIXELSTATUS status = SIXEL_OK; int i; int ncolors; frame->delay = pg->delay; ncolors = 2 << (pg->flags & 7); if (frame->palette == NULL) { frame->palette = (unsigned char *)sixel_allocator_malloc(frame->allocator, (size_t)(ncolors * 3)); } else if (frame->ncolors < ncolors) { sixel_allocator_free(frame->allocator, frame->palette); frame->palette = (unsigned char *)sixel_allocator_malloc(frame->allocator, (size_t)(ncolors * 3)); } if (frame->palette == NULL) { sixel_helper_set_additional_message( "gif_init_frame: sixel_allocator_malloc() failed."); status = SIXEL_BAD_ALLOCATION; goto end; } frame->ncolors = ncolors; if (frame->ncolors <= reqcolors && fuse_palette) { frame->pixelformat = SIXEL_PIXELFORMAT_PAL8; sixel_allocator_free(frame->allocator, frame->pixels); frame->pixels = (unsigned char *)sixel_allocator_malloc(frame->allocator, (size_t)(frame->width * frame->height)); if (frame->pixels == NULL) { sixel_helper_set_additional_message( "sixel_allocator_malloc() failed in gif_init_frame()."); status = SIXEL_BAD_ALLOCATION; goto end; } memcpy(frame->pixels, pg->out, (size_t)(frame->width * frame->height)); for (i = 0; i < frame->ncolors; ++i) { frame->palette[i * 3 + 0] = pg->color_table[i * 3 + 2]; frame->palette[i * 3 + 1] = pg->color_table[i * 3 + 1]; frame->palette[i * 3 + 2] = pg->color_table[i * 3 + 0]; } if (pg->lflags & 0x80) { if (pg->eflags & 0x01) { if (bgcolor) { frame->palette[pg->transparent * 3 + 0] = bgcolor[0]; frame->palette[pg->transparent * 3 + 1] = bgcolor[1]; frame->palette[pg->transparent * 3 + 2] = bgcolor[2]; } else { frame->transparent = pg->transparent; } } } else if (pg->flags & 0x80) { if (pg->eflags & 0x01) { if (bgcolor) { frame->palette[pg->transparent * 3 + 0] = bgcolor[0]; frame->palette[pg->transparent * 3 + 1] = bgcolor[1]; frame->palette[pg->transparent * 3 + 2] = bgcolor[2]; } else { frame->transparent = pg->transparent; } } } } else { frame->pixelformat = SIXEL_PIXELFORMAT_RGB888; frame->pixels = (unsigned char *)sixel_allocator_malloc(frame->allocator, (size_t)(pg->w * pg->h * 3)); if (frame->pixels == NULL) { sixel_helper_set_additional_message( "sixel_allocator_malloc() failed in gif_init_frame()."); status = SIXEL_BAD_ALLOCATION; goto end; } for (i = 0; i < pg->w * pg->h; ++i) { frame->pixels[i * 3 + 0] = pg->color_table[pg->out[i] * 3 + 2]; frame->pixels[i * 3 + 1] = pg->color_table[pg->out[i] * 3 + 1]; frame->pixels[i * 3 + 2] = pg->color_table[pg->out[i] * 3 + 0]; } } frame->multiframe = (pg->loop_count != (-1)); status = SIXEL_OK; end: return status; } static void gif_out_code( gif_t /* in */ *g, unsigned short /* in */ code ) { /* recurse to decode the prefixes, since the linked-list is backwards, and working backwards through an interleaved image would be nasty */ if (g->codes[code].prefix >= 0) { gif_out_code(g, (unsigned short)g->codes[code].prefix); } if (g->cur_y >= g->max_y) { return; } g->out[g->cur_x + g->cur_y] = g->codes[code].suffix; g->cur_x++; if (g->cur_x >= g->max_x) { g->cur_x = g->start_x; g->cur_y += g->step; while (g->cur_y >= g->max_y && g->parse > 0) { g->step = (1 << g->parse) * g->line_size; g->cur_y = g->start_y + (g->step >> 1); --g->parse; } } } static SIXELSTATUS gif_process_raster( gif_context_t /* in */ *s, gif_t /* in */ *g ) { SIXELSTATUS status = SIXEL_FALSE; unsigned char lzw_cs; signed int len, code; unsigned int first; signed int codesize, codemask, avail, oldcode, bits, valid_bits, clear; gif_lzw *p; lzw_cs = gif_get8(s); clear = 1 << lzw_cs; first = 1; codesize = lzw_cs + 1; codemask = (1 << codesize) - 1; bits = 0; valid_bits = 0; for (code = 0; code < clear; code++) { g->codes[code].prefix = -1; g->codes[code].first = (unsigned char) code; g->codes[code].suffix = (unsigned char) code; } /* support no starting clear code */ avail = clear + 2; oldcode = (-1); len = 0; for(;;) { if (valid_bits < codesize) { if (len == 0) { len = gif_get8(s); /* start new block */ if (len == 0) { return SIXEL_OK; } } --len; bits |= (signed int) gif_get8(s) << valid_bits; valid_bits += 8; } else { code = bits & codemask; bits >>= codesize; valid_bits -= codesize; /* @OPTIMIZE: is there some way we can accelerate the non-clear path? */ if (code == clear) { /* clear code */ codesize = lzw_cs + 1; codemask = (1 << codesize) - 1; avail = clear + 2; oldcode = -1; first = 0; } else if (code == clear + 1) { /* end of stream code */ s->img_buffer += len; while ((len = gif_get8(s)) > 0) { s->img_buffer += len; } return SIXEL_OK; } else if (code <= avail) { if (first) { sixel_helper_set_additional_message( "corrupt GIF (reason: no clear code)."); status = SIXEL_RUNTIME_ERROR; goto end; } if (oldcode >= 0) { if (avail < 4096) { p = &g->codes[avail++]; p->prefix = (signed short) oldcode; p->first = g->codes[oldcode].first; p->suffix = (code == avail) ? p->first : g->codes[code].first; } } else if (code == avail) { sixel_helper_set_additional_message( "corrupt GIF (reason: illegal code in raster)."); status = SIXEL_RUNTIME_ERROR; goto end; } gif_out_code(g, (unsigned short) code); if ((avail & codemask) == 0 && avail <= 0x0FFF) { codesize++; codemask = (1 << codesize) - 1; } oldcode = code; } else { sixel_helper_set_additional_message( "corrupt GIF (reason: illegal code in raster)."); status = SIXEL_RUNTIME_ERROR; goto end; } } } status = SIXEL_OK; end: return status; } /* this function is ported from stb_image.h */ static SIXELSTATUS gif_load_next( gif_context_t /* in */ *s, gif_t /* in */ *g, unsigned char /* in */ *bgcolor ) { SIXELSTATUS status = SIXEL_FALSE; unsigned char buffer[256]; int x; int y; int w; int h; int len; for (;;) { switch (gif_get8(s)) { case 0x2C: /* Image Descriptor */ x = gif_get16le(s); y = gif_get16le(s); w = gif_get16le(s); h = gif_get16le(s); if (((x + w) > (g->w)) || ((y + h) > (g->h))) { sixel_helper_set_additional_message( "corrupt GIF (reason: bad Image Descriptor)."); status = SIXEL_RUNTIME_ERROR; goto end; } g->line_size = g->w; g->start_x = x; g->start_y = y * g->line_size; g->max_x = g->start_x + w; g->max_y = g->start_y + h * g->line_size; g->cur_x = g->start_x; g->cur_y = g->start_y; g->lflags = gif_get8(s); if (g->lflags & 0x40) { g->step = 8 * g->line_size; /* first interlaced spacing */ g->parse = 3; } else { g->step = g->line_size; g->parse = 0; } if (g->lflags & 0x80) { gif_parse_colortable(s, g->lpal, 2 << (g->lflags & 7)); g->color_table = (unsigned char *) g->lpal; } else if (g->flags & 0x80) { if (g->transparent >= 0 && (g->eflags & 0x01)) { if (bgcolor) { g->pal[g->transparent][0] = bgcolor[2]; g->pal[g->transparent][1] = bgcolor[1]; g->pal[g->transparent][2] = bgcolor[0]; } } g->color_table = (unsigned char *)g->pal; } else { sixel_helper_set_additional_message( "corrupt GIF (reason: missing color table)."); status = SIXEL_RUNTIME_ERROR; goto end; } status = gif_process_raster(s, g); if (SIXEL_FAILED(status)) { goto end; } goto end; case 0x21: /* Comment Extension. */ switch (gif_get8(s)) { case 0x01: /* Plain Text Extension */ break; case 0x21: /* Comment Extension */ break; case 0xF9: /* Graphic Control Extension */ len = gif_get8(s); /* block size */ if (len == 4) { g->eflags = gif_get8(s); g->delay = gif_get16le(s); /* delay */ g->transparent = gif_get8(s); } else { s->img_buffer += len; break; } break; case 0xFF: /* Application Extension */ len = gif_get8(s); /* block size */ if (s->img_buffer + len > s->img_buffer_end) { status = SIXEL_RUNTIME_ERROR; goto end; } memcpy(buffer, s->img_buffer, (size_t)len); s->img_buffer += len; buffer[len] = 0; if (len == 11 && strcmp((char *)buffer, "NETSCAPE2.0") == 0) { if (gif_get8(s) == 0x03) { /* loop count */ switch (gif_get8(s)) { case 0x00: g->loop_count = 1; break; case 0x01: g->loop_count = gif_get16le(s); break; default: g->loop_count = 1; break; } } } break; default: break; } while ((len = gif_get8(s)) != 0) { s->img_buffer += len; } break; case 0x3B: /* gif stream termination code */ g->is_terminated = 1; status = SIXEL_OK; goto end; default: sixel_helper_set_additional_message( "corrupt GIF (reason: unknown code)."); status = SIXEL_RUNTIME_ERROR; goto end; } } status = SIXEL_OK; end: return status; } typedef union _fn_pointer { sixel_load_image_function fn; void * p; } fn_pointer; SIXELSTATUS load_gif( unsigned char /* in */ *buffer, int /* in */ size, unsigned char /* in */ *bgcolor, int /* in */ reqcolors, int /* in */ fuse_palette, int /* in */ fstatic, int /* in */ loop_control, void /* in */ *fn_load, /* callback */ void /* in */ *context, /* private data for callback */ sixel_allocator_t /* in */ *allocator) /* allocator object */ { gif_context_t s; gif_t g; SIXELSTATUS status = SIXEL_FALSE; sixel_frame_t *frame; fn_pointer fnp; fnp.p = fn_load; g.out = NULL; status = sixel_frame_new(&frame, allocator); if (SIXEL_FAILED(status)) { goto end; } s.img_buffer = s.img_buffer_original = (unsigned char *)buffer; s.img_buffer_end = (unsigned char *)buffer + size; memset(&g, 0, sizeof(g)); status = gif_load_header(&s, &g); if (status != SIXEL_OK) { goto end; } g.out = (unsigned char *)sixel_allocator_malloc(allocator, (size_t)(g.w * g.h)); if (g.out == NULL) { sixel_helper_set_additional_message( "load_gif: sixel_allocator_malloc() failed."); status = SIXEL_BAD_ALLOCATION; goto end; } frame->loop_count = 0; for (;;) { /* per loop */ frame->frame_no = 0; s.img_buffer = s.img_buffer_original; status = gif_load_header(&s, &g); if (status != SIXEL_OK) { goto end; } g.is_terminated = 0; for (;;) { /* per frame */ status = gif_load_next(&s, &g, bgcolor); if (status != SIXEL_OK) { goto end; } if (g.is_terminated) { break; } frame->width = g.w; frame->height = g.h; status = gif_init_frame(frame, &g, bgcolor, reqcolors, fuse_palette); if (status != SIXEL_OK) { goto end; } status = fnp.fn(frame, context); if (status != SIXEL_OK) { goto end; } if (fstatic) { goto end; } ++frame->frame_no; } ++frame->loop_count; if (g.loop_count < 0) { break; } if (loop_control == SIXEL_LOOP_DISABLE || frame->frame_no == 1) { break; } if (loop_control == SIXEL_LOOP_AUTO) { if (frame->loop_count == g.loop_count) { break; } } } end: sixel_allocator_free(frame->allocator, g.out); sixel_frame_unref(frame); return status; } #if HAVE_TESTS static int test1(void) { int nret = EXIT_FAILURE; nret = EXIT_SUCCESS; return nret; } SIXELAPI int sixel_fromgif_tests_main(void) { int nret = EXIT_FAILURE; size_t i; typedef int (* testcase)(void); static testcase const testcases[] = { test1, }; for (i = 0; i < sizeof(testcases) / sizeof(testcase); ++i) { nret = testcases[i](); if (nret != EXIT_SUCCESS) { goto error; } } nret = EXIT_SUCCESS; error: return nret; } #endif /* HAVE_TESTS */ /* emacs Local Variables: */ /* emacs mode: c */ /* emacs tab-width: 4 */ /* emacs indent-tabs-mode: nil */ /* emacs c-basic-offset: 4 */ /* emacs End: */ /* vim: set expandtab ts=4 sts=4 sw=4 : */ /* EOF */
null
/* * This file is derived from "stb_image.h" that is in public domain. * https://github.com/nothings/stb * * Hayaki Saito <saitoha@me.com> modified this and re-licensed * it under the MIT license. * * Copyright (c) 2014-2018 Hayaki Saito * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "config.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include "frame.h" #include "fromgif.h" /* * gif_context_t struct and start_xxx functions * * gif_context_t structure is our basic context used by all images, so it * contains all the IO context, plus some basic image information */ typedef struct { unsigned int img_x, img_y; int img_n, img_out_n; int buflen; unsigned char buffer_start[128]; unsigned char *img_buffer, *img_buffer_end; unsigned char *img_buffer_original; } gif_context_t; typedef struct { signed short prefix; unsigned char first; unsigned char suffix; } gif_lzw; enum { gif_lzw_max_code_size = 12 }; typedef struct { int w, h; unsigned char *out; /* output buffer (always 4 components) */ int flags, bgindex, ratio, transparent, eflags; unsigned char pal[256][3]; unsigned char lpal[256][3]; gif_lzw codes[1 << gif_lzw_max_code_size]; unsigned char *color_table; int parse, step; int lflags; int start_x, start_y; int max_x, max_y; int cur_x, cur_y; int line_size; int loop_count; int delay; int is_multiframe; int is_terminated; } gif_t; /* initialize a memory-decode context */ static unsigned char gif_get8(gif_context_t *s) { if (s->img_buffer < s->img_buffer_end) { return *s->img_buffer++; } return 0; } static int gif_get16le(gif_context_t *s) { int z = gif_get8(s); return z + (gif_get8(s) << 8); } static void gif_parse_colortable( gif_context_t /* in */ *s, unsigned char /* in */ pal[256][3], int /* in */ num_entries) { int i; for (i = 0; i < num_entries; ++i) { pal[i][2] = gif_get8(s); pal[i][1] = gif_get8(s); pal[i][0] = gif_get8(s); } } static SIXELSTATUS gif_load_header( gif_context_t /* in */ *s, gif_t /* in */ *g) { SIXELSTATUS status = SIXEL_FALSE; unsigned char version; if (gif_get8(s) != 'G') { goto end; } if (gif_get8(s) != 'I') { goto end; } if (gif_get8(s) != 'F') { goto end; } if (gif_get8(s) != '8') { goto end; } version = gif_get8(s); if (version != '7' && version != '9') { goto end; } if (gif_get8(s) != 'a') { goto end; } g->w = gif_get16le(s); g->h = gif_get16le(s); g->flags = gif_get8(s); g->bgindex = gif_get8(s); g->ratio = gif_get8(s); g->transparent = (-1); g->loop_count = (-1); if (g->flags & 0x80) { gif_parse_colortable(s, g->pal, 2 << (g->flags & 7)); } status = SIXEL_OK; end: return status; } static SIXELSTATUS gif_init_frame( sixel_frame_t /* in */ *frame, gif_t /* in */ *pg, unsigned char /* in */ *bgcolor, int /* in */ reqcolors, int /* in */ fuse_palette) { SIXELSTATUS status = SIXEL_OK; int i; int ncolors; frame->delay = pg->delay; ncolors = 2 << (pg->flags & 7); if (frame->palette == NULL) { frame->palette = (unsigned char *)sixel_allocator_malloc(frame->allocator, (size_t)(ncolors * 3)); } else if (frame->ncolors < ncolors) { sixel_allocator_free(frame->allocator, frame->palette); frame->palette = (unsigned char *)sixel_allocator_malloc(frame->allocator, (size_t)(ncolors * 3)); } if (frame->palette == NULL) { sixel_helper_set_additional_message( "gif_init_frame: sixel_allocator_malloc() failed."); status = SIXEL_BAD_ALLOCATION; goto end; } frame->ncolors = ncolors; if (frame->ncolors <= reqcolors && fuse_palette) { frame->pixelformat = SIXEL_PIXELFORMAT_PAL8; sixel_allocator_free(frame->allocator, frame->pixels); frame->pixels = (unsigned char *)sixel_allocator_malloc(frame->allocator, (size_t)(frame->width * frame->height)); if (frame->pixels == NULL) { sixel_helper_set_additional_message( "sixel_allocator_malloc() failed in gif_init_frame()."); status = SIXEL_BAD_ALLOCATION; goto end; } memcpy(frame->pixels, pg->out, (size_t)(frame->width * frame->height)); for (i = 0; i < frame->ncolors; ++i) { frame->palette[i * 3 + 0] = pg->color_table[i * 3 + 2]; frame->palette[i * 3 + 1] = pg->color_table[i * 3 + 1]; frame->palette[i * 3 + 2] = pg->color_table[i * 3 + 0]; } if (pg->lflags & 0x80) { if (pg->eflags & 0x01) { if (bgcolor) { frame->palette[pg->transparent * 3 + 0] = bgcolor[0]; frame->palette[pg->transparent * 3 + 1] = bgcolor[1]; frame->palette[pg->transparent * 3 + 2] = bgcolor[2]; } else { frame->transparent = pg->transparent; } } } else if (pg->flags & 0x80) { if (pg->eflags & 0x01) { if (bgcolor) { frame->palette[pg->transparent * 3 + 0] = bgcolor[0]; frame->palette[pg->transparent * 3 + 1] = bgcolor[1]; frame->palette[pg->transparent * 3 + 2] = bgcolor[2]; } else { frame->transparent = pg->transparent; } } } } else { frame->pixelformat = SIXEL_PIXELFORMAT_RGB888; frame->pixels = (unsigned char *)sixel_allocator_malloc(frame->allocator, (size_t)(pg->w * pg->h * 3)); if (frame->pixels == NULL) { sixel_helper_set_additional_message( "sixel_allocator_malloc() failed in gif_init_frame()."); status = SIXEL_BAD_ALLOCATION; goto end; } for (i = 0; i < pg->w * pg->h; ++i) { frame->pixels[i * 3 + 0] = pg->color_table[pg->out[i] * 3 + 2]; frame->pixels[i * 3 + 1] = pg->color_table[pg->out[i] * 3 + 1]; frame->pixels[i * 3 + 2] = pg->color_table[pg->out[i] * 3 + 0]; } } frame->multiframe = (pg->loop_count != (-1)); status = SIXEL_OK; end: return status; } static void gif_out_code( gif_t /* in */ *g, unsigned short /* in */ code ) { /* recurse to decode the prefixes, since the linked-list is backwards, and working backwards through an interleaved image would be nasty */ if (g->codes[code].prefix >= 0) { gif_out_code(g, (unsigned short)g->codes[code].prefix); } if (g->cur_y >= g->max_y) { return; } g->out[g->cur_x + g->cur_y] = g->codes[code].suffix; g->cur_x++; if (g->cur_x >= g->max_x) { g->cur_x = g->start_x; g->cur_y += g->step; while (g->cur_y >= g->max_y && g->parse > 0) { g->step = (1 << g->parse) * g->line_size; g->cur_y = g->start_y + (g->step >> 1); --g->parse; } } } static SIXELSTATUS gif_process_raster( gif_context_t /* in */ *s, gif_t /* in */ *g ) { SIXELSTATUS status = SIXEL_FALSE; unsigned char lzw_cs; signed int len, code; unsigned int first; signed int codesize, codemask, avail, oldcode, bits, valid_bits, clear; gif_lzw *p; /* LZW Minimum Code Size */ lzw_cs = gif_get8(s); if (lzw_cs > gif_lzw_max_code_size) { sixel_helper_set_additional_message( "Unsupported GIF (LZW code size)"); status = SIXEL_RUNTIME_ERROR; goto end; } clear = 1 << lzw_cs; first = 1; codesize = lzw_cs + 1; codemask = (1 << codesize) - 1; bits = 0; valid_bits = 0; for (code = 0; code < clear; code++) { g->codes[code].prefix = -1; g->codes[code].first = (unsigned char) code; g->codes[code].suffix = (unsigned char) code; } /* support no starting clear code */ avail = clear + 2; oldcode = (-1); len = 0; for(;;) { if (valid_bits < codesize) { if (len == 0) { len = gif_get8(s); /* start new block */ if (len == 0) { return SIXEL_OK; } } --len; bits |= (signed int) gif_get8(s) << valid_bits; valid_bits += 8; } else { code = bits & codemask; bits >>= codesize; valid_bits -= codesize; /* @OPTIMIZE: is there some way we can accelerate the non-clear path? */ if (code == clear) { /* clear code */ codesize = lzw_cs + 1; codemask = (1 << codesize) - 1; avail = clear + 2; oldcode = -1; first = 0; } else if (code == clear + 1) { /* end of stream code */ s->img_buffer += len; while ((len = gif_get8(s)) > 0) { s->img_buffer += len; } return SIXEL_OK; } else if (code <= avail) { if (first) { sixel_helper_set_additional_message( "corrupt GIF (reason: no clear code)."); status = SIXEL_RUNTIME_ERROR; goto end; } if (oldcode >= 0) { if (avail < (1 << gif_lzw_max_code_size)) { p = &g->codes[avail++]; p->prefix = (signed short) oldcode; p->first = g->codes[oldcode].first; p->suffix = (code == avail) ? p->first : g->codes[code].first; } } else if (code == avail) { sixel_helper_set_additional_message( "corrupt GIF (reason: illegal code in raster)."); status = SIXEL_RUNTIME_ERROR; goto end; } gif_out_code(g, (unsigned short) code); if ((avail & codemask) == 0 && avail <= 0x0FFF) { codesize++; codemask = (1 << codesize) - 1; } oldcode = code; } else { sixel_helper_set_additional_message( "corrupt GIF (reason: illegal code in raster)."); status = SIXEL_RUNTIME_ERROR; goto end; } } } status = SIXEL_OK; end: return status; } /* this function is ported from stb_image.h */ static SIXELSTATUS gif_load_next( gif_context_t /* in */ *s, gif_t /* in */ *g, unsigned char /* in */ *bgcolor ) { SIXELSTATUS status = SIXEL_FALSE; unsigned char buffer[256]; int x; int y; int w; int h; int len; for (;;) { switch (gif_get8(s)) { case 0x2C: /* Image Descriptor */ x = gif_get16le(s); y = gif_get16le(s); w = gif_get16le(s); h = gif_get16le(s); if (((x + w) > (g->w)) || ((y + h) > (g->h))) { sixel_helper_set_additional_message( "corrupt GIF (reason: bad Image Descriptor)."); status = SIXEL_RUNTIME_ERROR; goto end; } g->line_size = g->w; g->start_x = x; g->start_y = y * g->line_size; g->max_x = g->start_x + w; g->max_y = g->start_y + h * g->line_size; g->cur_x = g->start_x; g->cur_y = g->start_y; g->lflags = gif_get8(s); if (g->lflags & 0x40) { g->step = 8 * g->line_size; /* first interlaced spacing */ g->parse = 3; } else { g->step = g->line_size; g->parse = 0; } if (g->lflags & 0x80) { gif_parse_colortable(s, g->lpal, 2 << (g->lflags & 7)); g->color_table = (unsigned char *) g->lpal; } else if (g->flags & 0x80) { if (g->transparent >= 0 && (g->eflags & 0x01)) { if (bgcolor) { g->pal[g->transparent][0] = bgcolor[2]; g->pal[g->transparent][1] = bgcolor[1]; g->pal[g->transparent][2] = bgcolor[0]; } } g->color_table = (unsigned char *)g->pal; } else { sixel_helper_set_additional_message( "corrupt GIF (reason: missing color table)."); status = SIXEL_RUNTIME_ERROR; goto end; } status = gif_process_raster(s, g); if (SIXEL_FAILED(status)) { goto end; } goto end; case 0x21: /* Comment Extension. */ switch (gif_get8(s)) { case 0x01: /* Plain Text Extension */ break; case 0x21: /* Comment Extension */ break; case 0xF9: /* Graphic Control Extension */ len = gif_get8(s); /* block size */ if (len == 4) { g->eflags = gif_get8(s); g->delay = gif_get16le(s); /* delay */ g->transparent = gif_get8(s); } else { s->img_buffer += len; break; } break; case 0xFF: /* Application Extension */ len = gif_get8(s); /* block size */ if (s->img_buffer + len > s->img_buffer_end) { status = SIXEL_RUNTIME_ERROR; goto end; } memcpy(buffer, s->img_buffer, (size_t)len); s->img_buffer += len; buffer[len] = 0; if (len == 11 && strcmp((char *)buffer, "NETSCAPE2.0") == 0) { if (gif_get8(s) == 0x03) { /* loop count */ switch (gif_get8(s)) { case 0x00: g->loop_count = 1; break; case 0x01: g->loop_count = gif_get16le(s); break; default: g->loop_count = 1; break; } } } break; default: break; } while ((len = gif_get8(s)) != 0) { s->img_buffer += len; } break; case 0x3B: /* gif stream termination code */ g->is_terminated = 1; status = SIXEL_OK; goto end; default: sixel_helper_set_additional_message( "corrupt GIF (reason: unknown code)."); status = SIXEL_RUNTIME_ERROR; goto end; } } status = SIXEL_OK; end: return status; } typedef union _fn_pointer { sixel_load_image_function fn; void * p; } fn_pointer; SIXELSTATUS load_gif( unsigned char /* in */ *buffer, int /* in */ size, unsigned char /* in */ *bgcolor, int /* in */ reqcolors, int /* in */ fuse_palette, int /* in */ fstatic, int /* in */ loop_control, void /* in */ *fn_load, /* callback */ void /* in */ *context, /* private data for callback */ sixel_allocator_t /* in */ *allocator) /* allocator object */ { gif_context_t s; gif_t g; SIXELSTATUS status = SIXEL_FALSE; sixel_frame_t *frame; fn_pointer fnp; fnp.p = fn_load; g.out = NULL; status = sixel_frame_new(&frame, allocator); if (SIXEL_FAILED(status)) { goto end; } s.img_buffer = s.img_buffer_original = (unsigned char *)buffer; s.img_buffer_end = (unsigned char *)buffer + size; memset(&g, 0, sizeof(g)); status = gif_load_header(&s, &g); if (status != SIXEL_OK) { goto end; } g.out = (unsigned char *)sixel_allocator_malloc(allocator, (size_t)(g.w * g.h)); if (g.out == NULL) { sixel_helper_set_additional_message( "load_gif: sixel_allocator_malloc() failed."); status = SIXEL_BAD_ALLOCATION; goto end; } frame->loop_count = 0; for (;;) { /* per loop */ frame->frame_no = 0; s.img_buffer = s.img_buffer_original; status = gif_load_header(&s, &g); if (status != SIXEL_OK) { goto end; } g.is_terminated = 0; for (;;) { /* per frame */ status = gif_load_next(&s, &g, bgcolor); if (status != SIXEL_OK) { goto end; } if (g.is_terminated) { break; } frame->width = g.w; frame->height = g.h; status = gif_init_frame(frame, &g, bgcolor, reqcolors, fuse_palette); if (status != SIXEL_OK) { goto end; } status = fnp.fn(frame, context); if (status != SIXEL_OK) { goto end; } if (fstatic) { goto end; } ++frame->frame_no; } ++frame->loop_count; if (g.loop_count < 0) { break; } if (loop_control == SIXEL_LOOP_DISABLE || frame->frame_no == 1) { break; } if (loop_control == SIXEL_LOOP_AUTO) { if (frame->loop_count == g.loop_count) { break; } } } end: sixel_allocator_free(frame->allocator, g.out); sixel_frame_unref(frame); return status; } #if HAVE_TESTS static int test1(void) { int nret = EXIT_FAILURE; nret = EXIT_SUCCESS; return nret; } SIXELAPI int sixel_fromgif_tests_main(void) { int nret = EXIT_FAILURE; size_t i; typedef int (* testcase)(void); static testcase const testcases[] = { test1, }; for (i = 0; i < sizeof(testcases) / sizeof(testcase); ++i) { nret = testcases[i](); if (nret != EXIT_SUCCESS) { goto error; } } nret = EXIT_SUCCESS; error: return nret; } #endif /* HAVE_TESTS */ /* emacs Local Variables: */ /* emacs mode: c */ /* emacs tab-width: 4 */ /* emacs indent-tabs-mode: nil */ /* emacs c-basic-offset: 4 */ /* emacs End: */ /* vim: set expandtab ts=4 sts=4 sw=4 : */ /* EOF */
null
203
CWE-787
CVE-2020-21674
/*- * Copyright (c) 2003-2011 Tim Kientzle * Copyright (c) 2011-2012 Michihiro NAKAJIMA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "archive_platform.h" __FBSDID("$FreeBSD: head/lib/libarchive/archive_string.c 201095 2009-12-28 02:33:22Z kientzle $"); /* * Basic resizable string support, to simplify manipulating arbitrary-sized * strings while minimizing heap activity. * * In particular, the buffer used by a string object is only grown, it * never shrinks, so you can clear and reuse the same string object * without incurring additional memory allocations. */ #ifdef HAVE_ERRNO_H #include <errno.h> #endif #ifdef HAVE_ICONV_H #include <iconv.h> #endif #ifdef HAVE_LANGINFO_H #include <langinfo.h> #endif #ifdef HAVE_LOCALCHARSET_H #include <localcharset.h> #endif #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #ifdef HAVE_WCHAR_H #include <wchar.h> #endif #if defined(_WIN32) && !defined(__CYGWIN__) #include <windows.h> #include <locale.h> #endif #include "archive_endian.h" #include "archive_private.h" #include "archive_string.h" #include "archive_string_composition.h" #if !defined(HAVE_WMEMCPY) && !defined(wmemcpy) #define wmemcpy(a,b,i) (wchar_t *)memcpy((a), (b), (i) * sizeof(wchar_t)) #endif #if !defined(HAVE_WMEMMOVE) && !defined(wmemmove) #define wmemmove(a,b,i) (wchar_t *)memmove((a), (b), (i) * sizeof(wchar_t)) #endif struct archive_string_conv { struct archive_string_conv *next; char *from_charset; char *to_charset; unsigned from_cp; unsigned to_cp; /* Set 1 if from_charset and to_charset are the same. */ int same; int flag; #define SCONV_TO_CHARSET 1 /* MBS is being converted to specified * charset. */ #define SCONV_FROM_CHARSET (1<<1) /* MBS is being converted from * specified charset. */ #define SCONV_BEST_EFFORT (1<<2) /* Copy at least ASCII code. */ #define SCONV_WIN_CP (1<<3) /* Use Windows API for converting * MBS. */ #define SCONV_UTF8_LIBARCHIVE_2 (1<<4) /* Incorrect UTF-8 made by libarchive * 2.x in the wrong assumption. */ #define SCONV_NORMALIZATION_C (1<<6) /* Need normalization to be Form C. * Before UTF-8 characters are actually * processed. */ #define SCONV_NORMALIZATION_D (1<<7) /* Need normalization to be Form D. * Before UTF-8 characters are actually * processed. * Currently this only for MAC OS X. */ #define SCONV_TO_UTF8 (1<<8) /* "to charset" side is UTF-8. */ #define SCONV_FROM_UTF8 (1<<9) /* "from charset" side is UTF-8. */ #define SCONV_TO_UTF16BE (1<<10) /* "to charset" side is UTF-16BE. */ #define SCONV_FROM_UTF16BE (1<<11) /* "from charset" side is UTF-16BE. */ #define SCONV_TO_UTF16LE (1<<12) /* "to charset" side is UTF-16LE. */ #define SCONV_FROM_UTF16LE (1<<13) /* "from charset" side is UTF-16LE. */ #define SCONV_TO_UTF16 (SCONV_TO_UTF16BE | SCONV_TO_UTF16LE) #define SCONV_FROM_UTF16 (SCONV_FROM_UTF16BE | SCONV_FROM_UTF16LE) #if HAVE_ICONV iconv_t cd; iconv_t cd_w;/* Use at archive_mstring on * Windows. */ #endif /* A temporary buffer for normalization. */ struct archive_string utftmp; int (*converter[2])(struct archive_string *, const void *, size_t, struct archive_string_conv *); int nconverter; }; #define CP_C_LOCALE 0 /* "C" locale only for this file. */ #define CP_UTF16LE 1200 #define CP_UTF16BE 1201 #define IS_HIGH_SURROGATE_LA(uc) ((uc) >= 0xD800 && (uc) <= 0xDBFF) #define IS_LOW_SURROGATE_LA(uc) ((uc) >= 0xDC00 && (uc) <= 0xDFFF) #define IS_SURROGATE_PAIR_LA(uc) ((uc) >= 0xD800 && (uc) <= 0xDFFF) #define UNICODE_MAX 0x10FFFF #define UNICODE_R_CHAR 0xFFFD /* Replacement character. */ /* Set U+FFFD(Replacement character) in UTF-8. */ static const char utf8_replacement_char[] = {0xef, 0xbf, 0xbd}; static struct archive_string_conv *find_sconv_object(struct archive *, const char *, const char *); static void add_sconv_object(struct archive *, struct archive_string_conv *); static struct archive_string_conv *create_sconv_object(const char *, const char *, unsigned, int); static void free_sconv_object(struct archive_string_conv *); static struct archive_string_conv *get_sconv_object(struct archive *, const char *, const char *, int); static unsigned make_codepage_from_charset(const char *); static unsigned get_current_codepage(void); static unsigned get_current_oemcp(void); static size_t mbsnbytes(const void *, size_t); static size_t utf16nbytes(const void *, size_t); #if defined(_WIN32) && !defined(__CYGWIN__) static int archive_wstring_append_from_mbs_in_codepage( struct archive_wstring *, const char *, size_t, struct archive_string_conv *); static int archive_string_append_from_wcs_in_codepage(struct archive_string *, const wchar_t *, size_t, struct archive_string_conv *); static int is_big_endian(void); static int strncat_in_codepage(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int win_strncat_from_utf16be(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int win_strncat_from_utf16le(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int win_strncat_to_utf16be(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int win_strncat_to_utf16le(struct archive_string *, const void *, size_t, struct archive_string_conv *); #endif static int best_effort_strncat_from_utf16be(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int best_effort_strncat_from_utf16le(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int best_effort_strncat_to_utf16be(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int best_effort_strncat_to_utf16le(struct archive_string *, const void *, size_t, struct archive_string_conv *); #if defined(HAVE_ICONV) static int iconv_strncat_in_locale(struct archive_string *, const void *, size_t, struct archive_string_conv *); #endif static int best_effort_strncat_in_locale(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int _utf8_to_unicode(uint32_t *, const char *, size_t); static int utf8_to_unicode(uint32_t *, const char *, size_t); static inline uint32_t combine_surrogate_pair(uint32_t, uint32_t); static int cesu8_to_unicode(uint32_t *, const char *, size_t); static size_t unicode_to_utf8(char *, size_t, uint32_t); static int utf16_to_unicode(uint32_t *, const char *, size_t, int); static size_t unicode_to_utf16be(char *, size_t, uint32_t); static size_t unicode_to_utf16le(char *, size_t, uint32_t); static int strncat_from_utf8_libarchive2(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int strncat_from_utf8_to_utf8(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int archive_string_normalize_C(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int archive_string_normalize_D(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int archive_string_append_unicode(struct archive_string *, const void *, size_t, struct archive_string_conv *); static struct archive_string * archive_string_append(struct archive_string *as, const char *p, size_t s) { if (archive_string_ensure(as, as->length + s + 1) == NULL) return (NULL); if (s) memmove(as->s + as->length, p, s); as->length += s; as->s[as->length] = 0; return (as); } static struct archive_wstring * archive_wstring_append(struct archive_wstring *as, const wchar_t *p, size_t s) { if (archive_wstring_ensure(as, as->length + s + 1) == NULL) return (NULL); if (s) wmemmove(as->s + as->length, p, s); as->length += s; as->s[as->length] = 0; return (as); } struct archive_string * archive_array_append(struct archive_string *as, const char *p, size_t s) { return archive_string_append(as, p, s); } void archive_string_concat(struct archive_string *dest, struct archive_string *src) { if (archive_string_append(dest, src->s, src->length) == NULL) __archive_errx(1, "Out of memory"); } void archive_wstring_concat(struct archive_wstring *dest, struct archive_wstring *src) { if (archive_wstring_append(dest, src->s, src->length) == NULL) __archive_errx(1, "Out of memory"); } void archive_string_free(struct archive_string *as) { as->length = 0; as->buffer_length = 0; free(as->s); as->s = NULL; } void archive_wstring_free(struct archive_wstring *as) { as->length = 0; as->buffer_length = 0; free(as->s); as->s = NULL; } struct archive_wstring * archive_wstring_ensure(struct archive_wstring *as, size_t s) { return (struct archive_wstring *) archive_string_ensure((struct archive_string *)as, s * sizeof(wchar_t)); } /* Returns NULL on any allocation failure. */ struct archive_string * archive_string_ensure(struct archive_string *as, size_t s) { char *p; size_t new_length; /* If buffer is already big enough, don't reallocate. */ if (as->s && (s <= as->buffer_length)) return (as); /* * Growing the buffer at least exponentially ensures that * append operations are always linear in the number of * characters appended. Using a smaller growth rate for * larger buffers reduces memory waste somewhat at the cost of * a larger constant factor. */ if (as->buffer_length < 32) /* Start with a minimum 32-character buffer. */ new_length = 32; else if (as->buffer_length < 8192) /* Buffers under 8k are doubled for speed. */ new_length = as->buffer_length + as->buffer_length; else { /* Buffers 8k and over grow by at least 25% each time. */ new_length = as->buffer_length + as->buffer_length / 4; /* Be safe: If size wraps, fail. */ if (new_length < as->buffer_length) { /* On failure, wipe the string and return NULL. */ archive_string_free(as); errno = ENOMEM;/* Make sure errno has ENOMEM. */ return (NULL); } } /* * The computation above is a lower limit to how much we'll * grow the buffer. In any case, we have to grow it enough to * hold the request. */ if (new_length < s) new_length = s; /* Now we can reallocate the buffer. */ p = (char *)realloc(as->s, new_length); if (p == NULL) { /* On failure, wipe the string and return NULL. */ archive_string_free(as); errno = ENOMEM;/* Make sure errno has ENOMEM. */ return (NULL); } as->s = p; as->buffer_length = new_length; return (as); } /* * TODO: See if there's a way to avoid scanning * the source string twice. Then test to see * if it actually helps (remember that we're almost * always called with pretty short arguments, so * such an optimization might not help). */ struct archive_string * archive_strncat(struct archive_string *as, const void *_p, size_t n) { size_t s; const char *p, *pp; p = (const char *)_p; /* Like strlen(p), except won't examine positions beyond p[n]. */ s = 0; pp = p; while (s < n && *pp) { pp++; s++; } if ((as = archive_string_append(as, p, s)) == NULL) __archive_errx(1, "Out of memory"); return (as); } struct archive_wstring * archive_wstrncat(struct archive_wstring *as, const wchar_t *p, size_t n) { size_t s; const wchar_t *pp; /* Like strlen(p), except won't examine positions beyond p[n]. */ s = 0; pp = p; while (s < n && *pp) { pp++; s++; } if ((as = archive_wstring_append(as, p, s)) == NULL) __archive_errx(1, "Out of memory"); return (as); } struct archive_string * archive_strcat(struct archive_string *as, const void *p) { /* strcat is just strncat without an effective limit. * Assert that we'll never get called with a source * string over 16MB. * TODO: Review all uses of strcat in the source * and try to replace them with strncat(). */ return archive_strncat(as, p, 0x1000000); } struct archive_wstring * archive_wstrcat(struct archive_wstring *as, const wchar_t *p) { /* Ditto. */ return archive_wstrncat(as, p, 0x1000000); } struct archive_string * archive_strappend_char(struct archive_string *as, char c) { if ((as = archive_string_append(as, &c, 1)) == NULL) __archive_errx(1, "Out of memory"); return (as); } struct archive_wstring * archive_wstrappend_wchar(struct archive_wstring *as, wchar_t c) { if ((as = archive_wstring_append(as, &c, 1)) == NULL) __archive_errx(1, "Out of memory"); return (as); } /* * Get the "current character set" name to use with iconv. * On FreeBSD, the empty character set name "" chooses * the correct character encoding for the current locale, * so this isn't necessary. * But iconv on Mac OS 10.6 doesn't seem to handle this correctly; * on that system, we have to explicitly call nl_langinfo() * to get the right name. Not sure about other platforms. * * NOTE: GNU libiconv does not recognize the character-set name * which some platform nl_langinfo(CODESET) returns, so we should * use locale_charset() instead of nl_langinfo(CODESET) for GNU libiconv. */ static const char * default_iconv_charset(const char *charset) { if (charset != NULL && charset[0] != '\0') return charset; #if HAVE_LOCALE_CHARSET && !defined(__APPLE__) /* locale_charset() is broken on Mac OS */ return locale_charset(); #elif HAVE_NL_LANGINFO return nl_langinfo(CODESET); #else return ""; #endif } #if defined(_WIN32) && !defined(__CYGWIN__) /* * Convert MBS to WCS. * Note: returns -1 if conversion fails. */ int archive_wstring_append_from_mbs(struct archive_wstring *dest, const char *p, size_t len) { return archive_wstring_append_from_mbs_in_codepage(dest, p, len, NULL); } static int archive_wstring_append_from_mbs_in_codepage(struct archive_wstring *dest, const char *s, size_t length, struct archive_string_conv *sc) { int count, ret = 0; UINT from_cp; if (sc != NULL) from_cp = sc->from_cp; else from_cp = get_current_codepage(); if (from_cp == CP_C_LOCALE) { /* * "C" locale special processing. */ wchar_t *ws; const unsigned char *mp; if (NULL == archive_wstring_ensure(dest, dest->length + length + 1)) return (-1); ws = dest->s + dest->length; mp = (const unsigned char *)s; count = 0; while (count < (int)length && *mp) { *ws++ = (wchar_t)*mp++; count++; } } else if (sc != NULL && (sc->flag & (SCONV_NORMALIZATION_C | SCONV_NORMALIZATION_D))) { /* * Normalize UTF-8 and UTF-16BE and convert it directly * to UTF-16 as wchar_t. */ struct archive_string u16; int saved_flag = sc->flag;/* save current flag. */ if (is_big_endian()) sc->flag |= SCONV_TO_UTF16BE; else sc->flag |= SCONV_TO_UTF16LE; if (sc->flag & SCONV_FROM_UTF16) { /* * UTF-16BE/LE NFD ===> UTF-16 NFC * UTF-16BE/LE NFC ===> UTF-16 NFD */ count = (int)utf16nbytes(s, length); } else { /* * UTF-8 NFD ===> UTF-16 NFC * UTF-8 NFC ===> UTF-16 NFD */ count = (int)mbsnbytes(s, length); } u16.s = (char *)dest->s; u16.length = dest->length << 1;; u16.buffer_length = dest->buffer_length; if (sc->flag & SCONV_NORMALIZATION_C) ret = archive_string_normalize_C(&u16, s, count, sc); else ret = archive_string_normalize_D(&u16, s, count, sc); dest->s = (wchar_t *)u16.s; dest->length = u16.length >> 1; dest->buffer_length = u16.buffer_length; sc->flag = saved_flag;/* restore the saved flag. */ return (ret); } else if (sc != NULL && (sc->flag & SCONV_FROM_UTF16)) { count = (int)utf16nbytes(s, length); count >>= 1; /* to be WCS length */ /* Allocate memory for WCS. */ if (NULL == archive_wstring_ensure(dest, dest->length + count + 1)) return (-1); wmemcpy(dest->s + dest->length, (const wchar_t *)s, count); if ((sc->flag & SCONV_FROM_UTF16BE) && !is_big_endian()) { uint16_t *u16 = (uint16_t *)(dest->s + dest->length); int b; for (b = 0; b < count; b++) { uint16_t val = archive_le16dec(u16+b); archive_be16enc(u16+b, val); } } else if ((sc->flag & SCONV_FROM_UTF16LE) && is_big_endian()) { uint16_t *u16 = (uint16_t *)(dest->s + dest->length); int b; for (b = 0; b < count; b++) { uint16_t val = archive_be16dec(u16+b); archive_le16enc(u16+b, val); } } } else { DWORD mbflag; size_t buffsize; if (sc == NULL) mbflag = 0; else if (sc->flag & SCONV_FROM_CHARSET) { /* Do not trust the length which comes from * an archive file. */ length = mbsnbytes(s, length); mbflag = 0; } else mbflag = MB_PRECOMPOSED; buffsize = dest->length + length + 1; do { /* Allocate memory for WCS. */ if (NULL == archive_wstring_ensure(dest, buffsize)) return (-1); /* Convert MBS to WCS. */ count = MultiByteToWideChar(from_cp, mbflag, s, (int)length, dest->s + dest->length, (int)(dest->buffer_length >> 1) -1); if (count == 0 && GetLastError() == ERROR_INSUFFICIENT_BUFFER) { /* Expand the WCS buffer. */ buffsize = dest->buffer_length << 1; continue; } if (count == 0 && length != 0) ret = -1; break; } while (1); } dest->length += count; dest->s[dest->length] = L'\0'; return (ret); } #else /* * Convert MBS to WCS. * Note: returns -1 if conversion fails. */ int archive_wstring_append_from_mbs(struct archive_wstring *dest, const char *p, size_t len) { size_t r; int ret_val = 0; /* * No single byte will be more than one wide character, * so this length estimate will always be big enough. */ // size_t wcs_length = len; size_t mbs_length = len; const char *mbs = p; wchar_t *wcs; #if HAVE_MBRTOWC mbstate_t shift_state; memset(&shift_state, 0, sizeof(shift_state)); #endif /* * As we decided to have wcs_length == mbs_length == len * we can use len here instead of wcs_length */ if (NULL == archive_wstring_ensure(dest, dest->length + len + 1)) return (-1); wcs = dest->s + dest->length; /* * We cannot use mbsrtowcs/mbstowcs here because those may convert * extra MBS when strlen(p) > len and one wide character consists of * multi bytes. */ while (*mbs && mbs_length > 0) { /* * The buffer we allocated is always big enough. * Keep this code path in a comment if we decide to choose * smaller wcs_length in the future */ /* if (wcs_length == 0) { dest->length = wcs - dest->s; dest->s[dest->length] = L'\0'; wcs_length = mbs_length; if (NULL == archive_wstring_ensure(dest, dest->length + wcs_length + 1)) return (-1); wcs = dest->s + dest->length; } */ #if HAVE_MBRTOWC r = mbrtowc(wcs, mbs, mbs_length, &shift_state); #else r = mbtowc(wcs, mbs, mbs_length); #endif if (r == (size_t)-1 || r == (size_t)-2) { ret_val = -1; break; } if (r == 0 || r > mbs_length) break; wcs++; // wcs_length--; mbs += r; mbs_length -= r; } dest->length = wcs - dest->s; dest->s[dest->length] = L'\0'; return (ret_val); } #endif #if defined(_WIN32) && !defined(__CYGWIN__) /* * WCS ==> MBS. * Note: returns -1 if conversion fails. * * Win32 builds use WideCharToMultiByte from the Windows API. * (Maybe Cygwin should too? WideCharToMultiByte will know a * lot more about local character encodings than the wcrtomb() * wrapper is going to know.) */ int archive_string_append_from_wcs(struct archive_string *as, const wchar_t *w, size_t len) { return archive_string_append_from_wcs_in_codepage(as, w, len, NULL); } static int archive_string_append_from_wcs_in_codepage(struct archive_string *as, const wchar_t *ws, size_t len, struct archive_string_conv *sc) { BOOL defchar_used, *dp; int count, ret = 0; UINT to_cp; int wslen = (int)len; if (sc != NULL) to_cp = sc->to_cp; else to_cp = get_current_codepage(); if (to_cp == CP_C_LOCALE) { /* * "C" locale special processing. */ const wchar_t *wp = ws; char *p; if (NULL == archive_string_ensure(as, as->length + wslen +1)) return (-1); p = as->s + as->length; count = 0; defchar_used = 0; while (count < wslen && *wp) { if (*wp > 255) { *p++ = '?'; wp++; defchar_used = 1; } else *p++ = (char)*wp++; count++; } } else if (sc != NULL && (sc->flag & SCONV_TO_UTF16)) { uint16_t *u16; if (NULL == archive_string_ensure(as, as->length + len * 2 + 2)) return (-1); u16 = (uint16_t *)(as->s + as->length); count = 0; defchar_used = 0; if (sc->flag & SCONV_TO_UTF16BE) { while (count < (int)len && *ws) { archive_be16enc(u16+count, *ws); ws++; count++; } } else { while (count < (int)len && *ws) { archive_le16enc(u16+count, *ws); ws++; count++; } } count <<= 1; /* to be byte size */ } else { /* Make sure the MBS buffer has plenty to set. */ if (NULL == archive_string_ensure(as, as->length + len * 2 + 1)) return (-1); do { defchar_used = 0; if (to_cp == CP_UTF8 || sc == NULL) dp = NULL; else dp = &defchar_used; count = WideCharToMultiByte(to_cp, 0, ws, wslen, as->s + as->length, (int)as->buffer_length-1, NULL, dp); if (count == 0 && GetLastError() == ERROR_INSUFFICIENT_BUFFER) { /* Expand the MBS buffer and retry. */ if (NULL == archive_string_ensure(as, as->buffer_length + len)) return (-1); continue; } if (count == 0) ret = -1; break; } while (1); } as->length += count; as->s[as->length] = '\0'; return (defchar_used?-1:ret); } #elif defined(HAVE_WCTOMB) || defined(HAVE_WCRTOMB) /* * Translates a wide character string into current locale character set * and appends to the archive_string. Note: returns -1 if conversion * fails. */ int archive_string_append_from_wcs(struct archive_string *as, const wchar_t *w, size_t len) { /* We cannot use the standard wcstombs() here because it * cannot tell us how big the output buffer should be. So * I've built a loop around wcrtomb() or wctomb() that * converts a character at a time and resizes the string as * needed. We prefer wcrtomb() when it's available because * it's thread-safe. */ int n, ret_val = 0; char *p; char *end; #if HAVE_WCRTOMB mbstate_t shift_state; memset(&shift_state, 0, sizeof(shift_state)); #else /* Clear the shift state before starting. */ wctomb(NULL, L'\0'); #endif /* * Allocate buffer for MBS. * We need this allocation here since it is possible that * as->s is still NULL. */ if (archive_string_ensure(as, as->length + len + 1) == NULL) return (-1); p = as->s + as->length; end = as->s + as->buffer_length - MB_CUR_MAX -1; while (*w != L'\0' && len > 0) { if (p >= end) { as->length = p - as->s; as->s[as->length] = '\0'; /* Re-allocate buffer for MBS. */ if (archive_string_ensure(as, as->length + len * 2 + 1) == NULL) return (-1); p = as->s + as->length; end = as->s + as->buffer_length - MB_CUR_MAX -1; } #if HAVE_WCRTOMB n = wcrtomb(p, *w++, &shift_state); #else n = wctomb(p, *w++); #endif if (n == -1) { if (errno == EILSEQ) { /* Skip an illegal wide char. */ *p++ = '?'; ret_val = -1; } else { ret_val = -1; break; } } else p += n; len--; } as->length = p - as->s; as->s[as->length] = '\0'; return (ret_val); } #else /* HAVE_WCTOMB || HAVE_WCRTOMB */ /* * TODO: Test if __STDC_ISO_10646__ is defined. * Non-Windows uses ISO C wcrtomb() or wctomb() to perform the conversion * one character at a time. If a non-Windows platform doesn't have * either of these, fall back to the built-in UTF8 conversion. */ int archive_string_append_from_wcs(struct archive_string *as, const wchar_t *w, size_t len) { (void)as;/* UNUSED */ (void)w;/* UNUSED */ (void)len;/* UNUSED */ errno = ENOSYS; return (-1); } #endif /* HAVE_WCTOMB || HAVE_WCRTOMB */ /* * Find a string conversion object by a pair of 'from' charset name * and 'to' charset name from an archive object. * Return NULL if not found. */ static struct archive_string_conv * find_sconv_object(struct archive *a, const char *fc, const char *tc) { struct archive_string_conv *sc; if (a == NULL) return (NULL); for (sc = a->sconv; sc != NULL; sc = sc->next) { if (strcmp(sc->from_charset, fc) == 0 && strcmp(sc->to_charset, tc) == 0) break; } return (sc); } /* * Register a string object to an archive object. */ static void add_sconv_object(struct archive *a, struct archive_string_conv *sc) { struct archive_string_conv **psc; /* Add a new sconv to sconv list. */ psc = &(a->sconv); while (*psc != NULL) psc = &((*psc)->next); *psc = sc; } static void add_converter(struct archive_string_conv *sc, int (*converter) (struct archive_string *, const void *, size_t, struct archive_string_conv *)) { if (sc == NULL || sc->nconverter >= 2) __archive_errx(1, "Programming error"); sc->converter[sc->nconverter++] = converter; } static void setup_converter(struct archive_string_conv *sc) { /* Reset. */ sc->nconverter = 0; /* * Perform special sequence for the incorrect UTF-8 filenames * made by libarchive2.x. */ if (sc->flag & SCONV_UTF8_LIBARCHIVE_2) { add_converter(sc, strncat_from_utf8_libarchive2); return; } /* * Convert a string to UTF-16BE/LE. */ if (sc->flag & SCONV_TO_UTF16) { /* * If the current locale is UTF-8, we can translate * a UTF-8 string into a UTF-16BE string. */ if (sc->flag & SCONV_FROM_UTF8) { add_converter(sc, archive_string_append_unicode); return; } #if defined(_WIN32) && !defined(__CYGWIN__) if (sc->flag & SCONV_WIN_CP) { if (sc->flag & SCONV_TO_UTF16BE) add_converter(sc, win_strncat_to_utf16be); else add_converter(sc, win_strncat_to_utf16le); return; } #endif #if defined(HAVE_ICONV) if (sc->cd != (iconv_t)-1) { add_converter(sc, iconv_strncat_in_locale); return; } #endif if (sc->flag & SCONV_BEST_EFFORT) { if (sc->flag & SCONV_TO_UTF16BE) add_converter(sc, best_effort_strncat_to_utf16be); else add_converter(sc, best_effort_strncat_to_utf16le); } else /* Make sure we have no converter. */ sc->nconverter = 0; return; } /* * Convert a string from UTF-16BE/LE. */ if (sc->flag & SCONV_FROM_UTF16) { /* * At least we should normalize a UTF-16BE string. */ if (sc->flag & SCONV_NORMALIZATION_D) add_converter(sc,archive_string_normalize_D); else if (sc->flag & SCONV_NORMALIZATION_C) add_converter(sc, archive_string_normalize_C); if (sc->flag & SCONV_TO_UTF8) { /* * If the current locale is UTF-8, we can translate * a UTF-16BE/LE string into a UTF-8 string directly. */ if (!(sc->flag & (SCONV_NORMALIZATION_D |SCONV_NORMALIZATION_C))) add_converter(sc, archive_string_append_unicode); return; } #if defined(_WIN32) && !defined(__CYGWIN__) if (sc->flag & SCONV_WIN_CP) { if (sc->flag & SCONV_FROM_UTF16BE) add_converter(sc, win_strncat_from_utf16be); else add_converter(sc, win_strncat_from_utf16le); return; } #endif #if defined(HAVE_ICONV) if (sc->cd != (iconv_t)-1) { add_converter(sc, iconv_strncat_in_locale); return; } #endif if ((sc->flag & (SCONV_BEST_EFFORT | SCONV_FROM_UTF16BE)) == (SCONV_BEST_EFFORT | SCONV_FROM_UTF16BE)) add_converter(sc, best_effort_strncat_from_utf16be); else if ((sc->flag & (SCONV_BEST_EFFORT | SCONV_FROM_UTF16LE)) == (SCONV_BEST_EFFORT | SCONV_FROM_UTF16LE)) add_converter(sc, best_effort_strncat_from_utf16le); else /* Make sure we have no converter. */ sc->nconverter = 0; return; } if (sc->flag & SCONV_FROM_UTF8) { /* * At least we should normalize a UTF-8 string. */ if (sc->flag & SCONV_NORMALIZATION_D) add_converter(sc,archive_string_normalize_D); else if (sc->flag & SCONV_NORMALIZATION_C) add_converter(sc, archive_string_normalize_C); /* * Copy UTF-8 string with a check of CESU-8. * Apparently, iconv does not check surrogate pairs in UTF-8 * when both from-charset and to-charset are UTF-8, and then * we use our UTF-8 copy code. */ if (sc->flag & SCONV_TO_UTF8) { /* * If the current locale is UTF-8, we can translate * a UTF-16BE string into a UTF-8 string directly. */ if (!(sc->flag & (SCONV_NORMALIZATION_D |SCONV_NORMALIZATION_C))) add_converter(sc, strncat_from_utf8_to_utf8); return; } } #if defined(_WIN32) && !defined(__CYGWIN__) /* * On Windows we can use Windows API for a string conversion. */ if (sc->flag & SCONV_WIN_CP) { add_converter(sc, strncat_in_codepage); return; } #endif #if HAVE_ICONV if (sc->cd != (iconv_t)-1) { add_converter(sc, iconv_strncat_in_locale); /* * iconv generally does not support UTF-8-MAC and so * we have to the output of iconv from NFC to NFD if * need. */ if ((sc->flag & SCONV_FROM_CHARSET) && (sc->flag & SCONV_TO_UTF8)) { if (sc->flag & SCONV_NORMALIZATION_D) add_converter(sc, archive_string_normalize_D); } return; } #endif /* * Try conversion in the best effort or no conversion. */ if ((sc->flag & SCONV_BEST_EFFORT) || sc->same) add_converter(sc, best_effort_strncat_in_locale); else /* Make sure we have no converter. */ sc->nconverter = 0; } /* * Return canonicalized charset-name but this supports just UTF-8, UTF-16BE * and CP932 which are referenced in create_sconv_object(). */ static const char * canonical_charset_name(const char *charset) { char cs[16]; char *p; const char *s; if (charset == NULL || charset[0] == '\0' || strlen(charset) > 15) return (charset); /* Copy name to uppercase. */ p = cs; s = charset; while (*s) { char c = *s++; if (c >= 'a' && c <= 'z') c -= 'a' - 'A'; *p++ = c; } *p++ = '\0'; if (strcmp(cs, "UTF-8") == 0 || strcmp(cs, "UTF8") == 0) return ("UTF-8"); if (strcmp(cs, "UTF-16BE") == 0 || strcmp(cs, "UTF16BE") == 0) return ("UTF-16BE"); if (strcmp(cs, "UTF-16LE") == 0 || strcmp(cs, "UTF16LE") == 0) return ("UTF-16LE"); if (strcmp(cs, "CP932") == 0) return ("CP932"); return (charset); } /* * Create a string conversion object. */ static struct archive_string_conv * create_sconv_object(const char *fc, const char *tc, unsigned current_codepage, int flag) { struct archive_string_conv *sc; sc = calloc(1, sizeof(*sc)); if (sc == NULL) return (NULL); sc->next = NULL; sc->from_charset = strdup(fc); if (sc->from_charset == NULL) { free(sc); return (NULL); } sc->to_charset = strdup(tc); if (sc->to_charset == NULL) { free(sc->from_charset); free(sc); return (NULL); } archive_string_init(&sc->utftmp); if (flag & SCONV_TO_CHARSET) { /* * Convert characters from the current locale charset to * a specified charset. */ sc->from_cp = current_codepage; sc->to_cp = make_codepage_from_charset(tc); #if defined(_WIN32) && !defined(__CYGWIN__) if (IsValidCodePage(sc->to_cp)) flag |= SCONV_WIN_CP; #endif } else if (flag & SCONV_FROM_CHARSET) { /* * Convert characters from a specified charset to * the current locale charset. */ sc->to_cp = current_codepage; sc->from_cp = make_codepage_from_charset(fc); #if defined(_WIN32) && !defined(__CYGWIN__) if (IsValidCodePage(sc->from_cp)) flag |= SCONV_WIN_CP; #endif } /* * Check if "from charset" and "to charset" are the same. */ if (strcmp(fc, tc) == 0 || (sc->from_cp != (unsigned)-1 && sc->from_cp == sc->to_cp)) sc->same = 1; else sc->same = 0; /* * Mark if "from charset" or "to charset" are UTF-8 or UTF-16BE/LE. */ if (strcmp(tc, "UTF-8") == 0) flag |= SCONV_TO_UTF8; else if (strcmp(tc, "UTF-16BE") == 0) flag |= SCONV_TO_UTF16BE; else if (strcmp(tc, "UTF-16LE") == 0) flag |= SCONV_TO_UTF16LE; if (strcmp(fc, "UTF-8") == 0) flag |= SCONV_FROM_UTF8; else if (strcmp(fc, "UTF-16BE") == 0) flag |= SCONV_FROM_UTF16BE; else if (strcmp(fc, "UTF-16LE") == 0) flag |= SCONV_FROM_UTF16LE; #if defined(_WIN32) && !defined(__CYGWIN__) if (sc->to_cp == CP_UTF8) flag |= SCONV_TO_UTF8; else if (sc->to_cp == CP_UTF16BE) flag |= SCONV_TO_UTF16BE | SCONV_WIN_CP; else if (sc->to_cp == CP_UTF16LE) flag |= SCONV_TO_UTF16LE | SCONV_WIN_CP; if (sc->from_cp == CP_UTF8) flag |= SCONV_FROM_UTF8; else if (sc->from_cp == CP_UTF16BE) flag |= SCONV_FROM_UTF16BE | SCONV_WIN_CP; else if (sc->from_cp == CP_UTF16LE) flag |= SCONV_FROM_UTF16LE | SCONV_WIN_CP; #endif /* * Set a flag for Unicode NFD. Usually iconv cannot correctly * handle it. So we have to translate NFD characters to NFC ones * ourselves before iconv handles. Another reason is to prevent * that the same sight of two filenames, one is NFC and other * is NFD, would be in its directory. * On Mac OS X, although its filesystem layer automatically * convert filenames to NFD, it would be useful for filename * comparing to find out the same filenames that we normalize * that to be NFD ourselves. */ if ((flag & SCONV_FROM_CHARSET) && (flag & (SCONV_FROM_UTF16 | SCONV_FROM_UTF8))) { #if defined(__APPLE__) if (flag & SCONV_TO_UTF8) flag |= SCONV_NORMALIZATION_D; else #endif flag |= SCONV_NORMALIZATION_C; } #if defined(__APPLE__) /* * In case writing an archive file, make sure that a filename * going to be passed to iconv is a Unicode NFC string since * a filename in HFS Plus filesystem is a Unicode NFD one and * iconv cannot handle it with "UTF-8" charset. It is simpler * than a use of "UTF-8-MAC" charset. */ if ((flag & SCONV_TO_CHARSET) && (flag & (SCONV_FROM_UTF16 | SCONV_FROM_UTF8)) && !(flag & (SCONV_TO_UTF16 | SCONV_TO_UTF8))) flag |= SCONV_NORMALIZATION_C; /* * In case reading an archive file. make sure that a filename * will be passed to users is a Unicode NFD string in order to * correctly compare the filename with other one which comes * from HFS Plus filesystem. */ if ((flag & SCONV_FROM_CHARSET) && !(flag & (SCONV_FROM_UTF16 | SCONV_FROM_UTF8)) && (flag & SCONV_TO_UTF8)) flag |= SCONV_NORMALIZATION_D; #endif #if defined(HAVE_ICONV) sc->cd_w = (iconv_t)-1; /* * Create an iconv object. */ if (((flag & (SCONV_TO_UTF8 | SCONV_TO_UTF16)) && (flag & (SCONV_FROM_UTF8 | SCONV_FROM_UTF16))) || (flag & SCONV_WIN_CP)) { /* This case we won't use iconv. */ sc->cd = (iconv_t)-1; } else { sc->cd = iconv_open(tc, fc); if (sc->cd == (iconv_t)-1 && (sc->flag & SCONV_BEST_EFFORT)) { /* * Unfortunately, all of iconv implements do support * "CP932" character-set, so we should use "SJIS" * instead if iconv_open failed. */ if (strcmp(tc, "CP932") == 0) sc->cd = iconv_open("SJIS", fc); else if (strcmp(fc, "CP932") == 0) sc->cd = iconv_open(tc, "SJIS"); } #if defined(_WIN32) && !defined(__CYGWIN__) /* * archive_mstring on Windows directly convert multi-bytes * into archive_wstring in order not to depend on locale * so that you can do a I18N programming. This will be * used only in archive_mstring_copy_mbs_len_l so far. */ if (flag & SCONV_FROM_CHARSET) { sc->cd_w = iconv_open("UTF-8", fc); if (sc->cd_w == (iconv_t)-1 && (sc->flag & SCONV_BEST_EFFORT)) { if (strcmp(fc, "CP932") == 0) sc->cd_w = iconv_open("UTF-8", "SJIS"); } } #endif /* _WIN32 && !__CYGWIN__ */ } #endif /* HAVE_ICONV */ sc->flag = flag; /* * Set up converters. */ setup_converter(sc); return (sc); } /* * Free a string conversion object. */ static void free_sconv_object(struct archive_string_conv *sc) { free(sc->from_charset); free(sc->to_charset); archive_string_free(&sc->utftmp); #if HAVE_ICONV if (sc->cd != (iconv_t)-1) iconv_close(sc->cd); if (sc->cd_w != (iconv_t)-1) iconv_close(sc->cd_w); #endif free(sc); } #if defined(_WIN32) && !defined(__CYGWIN__) static unsigned my_atoi(const char *p) { unsigned cp; cp = 0; while (*p) { if (*p >= '0' && *p <= '9') cp = cp * 10 + (*p - '0'); else return (-1); p++; } return (cp); } /* * Translate Charset name (as used by iconv) into CodePage (as used by Windows) * Return -1 if failed. * * Note: This translation code may be insufficient. */ static struct charset { const char *name; unsigned cp; } charsets[] = { /* MUST BE SORTED! */ {"ASCII", 1252}, {"ASMO-708", 708}, {"BIG5", 950}, {"CHINESE", 936}, {"CP367", 1252}, {"CP819", 1252}, {"CP1025", 21025}, {"DOS-720", 720}, {"DOS-862", 862}, {"EUC-CN", 51936}, {"EUC-JP", 51932}, {"EUC-KR", 949}, {"EUCCN", 51936}, {"EUCJP", 51932}, {"EUCKR", 949}, {"GB18030", 54936}, {"GB2312", 936}, {"HEBREW", 1255}, {"HZ-GB-2312", 52936}, {"IBM273", 20273}, {"IBM277", 20277}, {"IBM278", 20278}, {"IBM280", 20280}, {"IBM284", 20284}, {"IBM285", 20285}, {"IBM290", 20290}, {"IBM297", 20297}, {"IBM367", 1252}, {"IBM420", 20420}, {"IBM423", 20423}, {"IBM424", 20424}, {"IBM819", 1252}, {"IBM871", 20871}, {"IBM880", 20880}, {"IBM905", 20905}, {"IBM924", 20924}, {"ISO-8859-1", 28591}, {"ISO-8859-13", 28603}, {"ISO-8859-15", 28605}, {"ISO-8859-2", 28592}, {"ISO-8859-3", 28593}, {"ISO-8859-4", 28594}, {"ISO-8859-5", 28595}, {"ISO-8859-6", 28596}, {"ISO-8859-7", 28597}, {"ISO-8859-8", 28598}, {"ISO-8859-9", 28599}, {"ISO8859-1", 28591}, {"ISO8859-13", 28603}, {"ISO8859-15", 28605}, {"ISO8859-2", 28592}, {"ISO8859-3", 28593}, {"ISO8859-4", 28594}, {"ISO8859-5", 28595}, {"ISO8859-6", 28596}, {"ISO8859-7", 28597}, {"ISO8859-8", 28598}, {"ISO8859-9", 28599}, {"JOHAB", 1361}, {"KOI8-R", 20866}, {"KOI8-U", 21866}, {"KS_C_5601-1987", 949}, {"LATIN1", 1252}, {"LATIN2", 28592}, {"MACINTOSH", 10000}, {"SHIFT-JIS", 932}, {"SHIFT_JIS", 932}, {"SJIS", 932}, {"US", 1252}, {"US-ASCII", 1252}, {"UTF-16", 1200}, {"UTF-16BE", 1201}, {"UTF-16LE", 1200}, {"UTF-8", CP_UTF8}, {"X-EUROPA", 29001}, {"X-MAC-ARABIC", 10004}, {"X-MAC-CE", 10029}, {"X-MAC-CHINESEIMP", 10008}, {"X-MAC-CHINESETRAD", 10002}, {"X-MAC-CROATIAN", 10082}, {"X-MAC-CYRILLIC", 10007}, {"X-MAC-GREEK", 10006}, {"X-MAC-HEBREW", 10005}, {"X-MAC-ICELANDIC", 10079}, {"X-MAC-JAPANESE", 10001}, {"X-MAC-KOREAN", 10003}, {"X-MAC-ROMANIAN", 10010}, {"X-MAC-THAI", 10021}, {"X-MAC-TURKISH", 10081}, {"X-MAC-UKRAINIAN", 10017}, }; static unsigned make_codepage_from_charset(const char *charset) { char cs[16]; char *p; unsigned cp; int a, b; if (charset == NULL || strlen(charset) > 15) return -1; /* Copy name to uppercase. */ p = cs; while (*charset) { char c = *charset++; if (c >= 'a' && c <= 'z') c -= 'a' - 'A'; *p++ = c; } *p++ = '\0'; cp = -1; /* Look it up in the table first, so that we can easily * override CP367, which we map to 1252 instead of 367. */ a = 0; b = sizeof(charsets)/sizeof(charsets[0]); while (b > a) { int c = (b + a) / 2; int r = strcmp(charsets[c].name, cs); if (r < 0) a = c + 1; else if (r > 0) b = c; else return charsets[c].cp; } /* If it's not in the table, try to parse it. */ switch (*cs) { case 'C': if (cs[1] == 'P' && cs[2] >= '0' && cs[2] <= '9') { cp = my_atoi(cs + 2); } else if (strcmp(cs, "CP_ACP") == 0) cp = get_current_codepage(); else if (strcmp(cs, "CP_OEMCP") == 0) cp = get_current_oemcp(); break; case 'I': if (cs[1] == 'B' && cs[2] == 'M' && cs[3] >= '0' && cs[3] <= '9') { cp = my_atoi(cs + 3); } break; case 'W': if (strncmp(cs, "WINDOWS-", 8) == 0) { cp = my_atoi(cs + 8); if (cp != 874 && (cp < 1250 || cp > 1258)) cp = -1;/* This may invalid code. */ } break; } return (cp); } /* * Return ANSI Code Page of current locale set by setlocale(). */ static unsigned get_current_codepage(void) { char *locale, *p; unsigned cp; locale = setlocale(LC_CTYPE, NULL); if (locale == NULL) return (GetACP()); if (locale[0] == 'C' && locale[1] == '\0') return (CP_C_LOCALE); p = strrchr(locale, '.'); if (p == NULL) return (GetACP()); if (strcmp(p+1, "utf8") == 0) return CP_UTF8; cp = my_atoi(p+1); if ((int)cp <= 0) return (GetACP()); return (cp); } /* * Translation table between Locale Name and ACP/OEMCP. */ static struct { unsigned acp; unsigned ocp; const char *locale; } acp_ocp_map[] = { { 950, 950, "Chinese_Taiwan" }, { 936, 936, "Chinese_People's Republic of China" }, { 950, 950, "Chinese_Taiwan" }, { 1250, 852, "Czech_Czech Republic" }, { 1252, 850, "Danish_Denmark" }, { 1252, 850, "Dutch_Netherlands" }, { 1252, 850, "Dutch_Belgium" }, { 1252, 437, "English_United States" }, { 1252, 850, "English_Australia" }, { 1252, 850, "English_Canada" }, { 1252, 850, "English_New Zealand" }, { 1252, 850, "English_United Kingdom" }, { 1252, 437, "English_United States" }, { 1252, 850, "Finnish_Finland" }, { 1252, 850, "French_France" }, { 1252, 850, "French_Belgium" }, { 1252, 850, "French_Canada" }, { 1252, 850, "French_Switzerland" }, { 1252, 850, "German_Germany" }, { 1252, 850, "German_Austria" }, { 1252, 850, "German_Switzerland" }, { 1253, 737, "Greek_Greece" }, { 1250, 852, "Hungarian_Hungary" }, { 1252, 850, "Icelandic_Iceland" }, { 1252, 850, "Italian_Italy" }, { 1252, 850, "Italian_Switzerland" }, { 932, 932, "Japanese_Japan" }, { 949, 949, "Korean_Korea" }, { 1252, 850, "Norwegian (BokmOl)_Norway" }, { 1252, 850, "Norwegian (BokmOl)_Norway" }, { 1252, 850, "Norwegian-Nynorsk_Norway" }, { 1250, 852, "Polish_Poland" }, { 1252, 850, "Portuguese_Portugal" }, { 1252, 850, "Portuguese_Brazil" }, { 1251, 866, "Russian_Russia" }, { 1250, 852, "Slovak_Slovakia" }, { 1252, 850, "Spanish_Spain" }, { 1252, 850, "Spanish_Mexico" }, { 1252, 850, "Spanish_Spain" }, { 1252, 850, "Swedish_Sweden" }, { 1254, 857, "Turkish_Turkey" }, { 0, 0, NULL} }; /* * Return OEM Code Page of current locale set by setlocale(). */ static unsigned get_current_oemcp(void) { int i; char *locale, *p; size_t len; locale = setlocale(LC_CTYPE, NULL); if (locale == NULL) return (GetOEMCP()); if (locale[0] == 'C' && locale[1] == '\0') return (CP_C_LOCALE); p = strrchr(locale, '.'); if (p == NULL) return (GetOEMCP()); len = p - locale; for (i = 0; acp_ocp_map[i].acp; i++) { if (strncmp(acp_ocp_map[i].locale, locale, len) == 0) return (acp_ocp_map[i].ocp); } return (GetOEMCP()); } #else /* * POSIX platform does not use CodePage. */ static unsigned get_current_codepage(void) { return (-1);/* Unknown */ } static unsigned make_codepage_from_charset(const char *charset) { (void)charset; /* UNUSED */ return (-1);/* Unknown */ } static unsigned get_current_oemcp(void) { return (-1);/* Unknown */ } #endif /* defined(_WIN32) && !defined(__CYGWIN__) */ /* * Return a string conversion object. */ static struct archive_string_conv * get_sconv_object(struct archive *a, const char *fc, const char *tc, int flag) { struct archive_string_conv *sc; unsigned current_codepage; /* Check if we have made the sconv object. */ sc = find_sconv_object(a, fc, tc); if (sc != NULL) return (sc); if (a == NULL) current_codepage = get_current_codepage(); else current_codepage = a->current_codepage; sc = create_sconv_object(canonical_charset_name(fc), canonical_charset_name(tc), current_codepage, flag); if (sc == NULL) { if (a != NULL) archive_set_error(a, ENOMEM, "Could not allocate memory for " "a string conversion object"); return (NULL); } /* * If there is no converter for current string conversion object, * we cannot handle this conversion. */ if (sc->nconverter == 0) { if (a != NULL) { #if HAVE_ICONV archive_set_error(a, ARCHIVE_ERRNO_MISC, "iconv_open failed : Cannot handle ``%s''", (flag & SCONV_TO_CHARSET)?tc:fc); #else archive_set_error(a, ARCHIVE_ERRNO_MISC, "A character-set conversion not fully supported " "on this platform"); #endif } /* Failed; free a sconv object. */ free_sconv_object(sc); return (NULL); } /* * Success! */ if (a != NULL) add_sconv_object(a, sc); return (sc); } static const char * get_current_charset(struct archive *a) { const char *cur_charset; if (a == NULL) cur_charset = default_iconv_charset(""); else { cur_charset = default_iconv_charset(a->current_code); if (a->current_code == NULL) { a->current_code = strdup(cur_charset); a->current_codepage = get_current_codepage(); a->current_oemcp = get_current_oemcp(); } } return (cur_charset); } /* * Make and Return a string conversion object. * Return NULL if the platform does not support the specified conversion * and best_effort is 0. * If best_effort is set, A string conversion object must be returned * unless memory allocation for the object fails, but the conversion * might fail when non-ASCII code is found. */ struct archive_string_conv * archive_string_conversion_to_charset(struct archive *a, const char *charset, int best_effort) { int flag = SCONV_TO_CHARSET; if (best_effort) flag |= SCONV_BEST_EFFORT; return (get_sconv_object(a, get_current_charset(a), charset, flag)); } struct archive_string_conv * archive_string_conversion_from_charset(struct archive *a, const char *charset, int best_effort) { int flag = SCONV_FROM_CHARSET; if (best_effort) flag |= SCONV_BEST_EFFORT; return (get_sconv_object(a, charset, get_current_charset(a), flag)); } /* * archive_string_default_conversion_*_archive() are provided for Windows * platform because other archiver application use CP_OEMCP for * MultiByteToWideChar() and WideCharToMultiByte() for the filenames * in tar or zip files. But mbstowcs/wcstombs(CRT) usually use CP_ACP * unless you use setlocale(LC_ALL, ".OCP")(specify CP_OEMCP). * So we should make a string conversion between CP_ACP and CP_OEMCP * for compatibility. */ #if defined(_WIN32) && !defined(__CYGWIN__) struct archive_string_conv * archive_string_default_conversion_for_read(struct archive *a) { const char *cur_charset = get_current_charset(a); char oemcp[16]; /* NOTE: a check of cur_charset is unneeded but we need * that get_current_charset() has been surely called at * this time whatever C compiler optimized. */ if (cur_charset != NULL && (a->current_codepage == CP_C_LOCALE || a->current_codepage == a->current_oemcp)) return (NULL);/* no conversion. */ _snprintf(oemcp, sizeof(oemcp)-1, "CP%d", a->current_oemcp); /* Make sure a null termination must be set. */ oemcp[sizeof(oemcp)-1] = '\0'; return (get_sconv_object(a, oemcp, cur_charset, SCONV_FROM_CHARSET)); } struct archive_string_conv * archive_string_default_conversion_for_write(struct archive *a) { const char *cur_charset = get_current_charset(a); char oemcp[16]; /* NOTE: a check of cur_charset is unneeded but we need * that get_current_charset() has been surely called at * this time whatever C compiler optimized. */ if (cur_charset != NULL && (a->current_codepage == CP_C_LOCALE || a->current_codepage == a->current_oemcp)) return (NULL);/* no conversion. */ _snprintf(oemcp, sizeof(oemcp)-1, "CP%d", a->current_oemcp); /* Make sure a null termination must be set. */ oemcp[sizeof(oemcp)-1] = '\0'; return (get_sconv_object(a, cur_charset, oemcp, SCONV_TO_CHARSET)); } #else struct archive_string_conv * archive_string_default_conversion_for_read(struct archive *a) { (void)a; /* UNUSED */ return (NULL); } struct archive_string_conv * archive_string_default_conversion_for_write(struct archive *a) { (void)a; /* UNUSED */ return (NULL); } #endif /* * Dispose of all character conversion objects in the archive object. */ void archive_string_conversion_free(struct archive *a) { struct archive_string_conv *sc; struct archive_string_conv *sc_next; for (sc = a->sconv; sc != NULL; sc = sc_next) { sc_next = sc->next; free_sconv_object(sc); } a->sconv = NULL; free(a->current_code); a->current_code = NULL; } /* * Return a conversion charset name. */ const char * archive_string_conversion_charset_name(struct archive_string_conv *sc) { if (sc->flag & SCONV_TO_CHARSET) return (sc->to_charset); else return (sc->from_charset); } /* * Change the behavior of a string conversion. */ void archive_string_conversion_set_opt(struct archive_string_conv *sc, int opt) { switch (opt) { /* * A filename in UTF-8 was made with libarchive 2.x in a wrong * assumption that wchar_t was Unicode. * This option enables simulating the assumption in order to read * that filename correctly. */ case SCONV_SET_OPT_UTF8_LIBARCHIVE2X: #if (defined(_WIN32) && !defined(__CYGWIN__)) \ || defined(__STDC_ISO_10646__) || defined(__APPLE__) /* * Nothing to do for it since wchar_t on these platforms * is really Unicode. */ (void)sc; /* UNUSED */ #else if ((sc->flag & SCONV_UTF8_LIBARCHIVE_2) == 0) { sc->flag |= SCONV_UTF8_LIBARCHIVE_2; /* Set up string converters. */ setup_converter(sc); } #endif break; case SCONV_SET_OPT_NORMALIZATION_C: if ((sc->flag & SCONV_NORMALIZATION_C) == 0) { sc->flag |= SCONV_NORMALIZATION_C; sc->flag &= ~SCONV_NORMALIZATION_D; /* Set up string converters. */ setup_converter(sc); } break; case SCONV_SET_OPT_NORMALIZATION_D: #if defined(HAVE_ICONV) /* * If iconv will take the string, do not change the * setting of the normalization. */ if (!(sc->flag & SCONV_WIN_CP) && (sc->flag & (SCONV_FROM_UTF16 | SCONV_FROM_UTF8)) && !(sc->flag & (SCONV_TO_UTF16 | SCONV_TO_UTF8))) break; #endif if ((sc->flag & SCONV_NORMALIZATION_D) == 0) { sc->flag |= SCONV_NORMALIZATION_D; sc->flag &= ~SCONV_NORMALIZATION_C; /* Set up string converters. */ setup_converter(sc); } break; default: break; } } /* * * Copy one archive_string to another in locale conversion. * * archive_strncat_l(); * archive_strncpy_l(); * */ static size_t mbsnbytes(const void *_p, size_t n) { size_t s; const char *p, *pp; if (_p == NULL) return (0); p = (const char *)_p; /* Like strlen(p), except won't examine positions beyond p[n]. */ s = 0; pp = p; while (s < n && *pp) { pp++; s++; } return (s); } static size_t utf16nbytes(const void *_p, size_t n) { size_t s; const char *p, *pp; if (_p == NULL) return (0); p = (const char *)_p; /* Like strlen(p), except won't examine positions beyond p[n]. */ s = 0; pp = p; n >>= 1; while (s < n && (pp[0] || pp[1])) { pp += 2; s++; } return (s<<1); } int archive_strncpy_l(struct archive_string *as, const void *_p, size_t n, struct archive_string_conv *sc) { as->length = 0; return (archive_strncat_l(as, _p, n, sc)); } int archive_strncat_l(struct archive_string *as, const void *_p, size_t n, struct archive_string_conv *sc) { const void *s; size_t length = 0; int i, r = 0, r2; if (_p != NULL && n > 0) { if (sc != NULL && (sc->flag & SCONV_FROM_UTF16)) length = utf16nbytes(_p, n); else length = mbsnbytes(_p, n); } /* We must allocate memory even if there is no data for conversion * or copy. This simulates archive_string_append behavior. */ if (length == 0) { int tn = 1; if (sc != NULL && (sc->flag & SCONV_TO_UTF16)) tn = 2; if (archive_string_ensure(as, as->length + tn) == NULL) return (-1); as->s[as->length] = 0; if (tn == 2) as->s[as->length+1] = 0; return (0); } /* * If sc is NULL, we just make a copy. */ if (sc == NULL) { if (archive_string_append(as, _p, length) == NULL) return (-1);/* No memory */ return (0); } s = _p; i = 0; if (sc->nconverter > 1) { sc->utftmp.length = 0; r2 = sc->converter[0](&(sc->utftmp), s, length, sc); if (r2 != 0 && errno == ENOMEM) return (r2); if (r > r2) r = r2; s = sc->utftmp.s; length = sc->utftmp.length; ++i; } r2 = sc->converter[i](as, s, length, sc); if (r > r2) r = r2; return (r); } #if HAVE_ICONV /* * Return -1 if conversion fails. */ static int iconv_strncat_in_locale(struct archive_string *as, const void *_p, size_t length, struct archive_string_conv *sc) { ICONV_CONST char *itp; size_t remaining; iconv_t cd; char *outp; size_t avail, bs; int return_value = 0; /* success */ int to_size, from_size; if (sc->flag & SCONV_TO_UTF16) to_size = 2; else to_size = 1; if (sc->flag & SCONV_FROM_UTF16) from_size = 2; else from_size = 1; if (archive_string_ensure(as, as->length + length*2+to_size) == NULL) return (-1); cd = sc->cd; itp = (char *)(uintptr_t)_p; remaining = length; outp = as->s + as->length; avail = as->buffer_length - as->length - to_size; while (remaining >= (size_t)from_size) { size_t result = iconv(cd, &itp, &remaining, &outp, &avail); if (result != (size_t)-1) break; /* Conversion completed. */ if (errno == EILSEQ || errno == EINVAL) { /* * If an output charset is UTF-8 or UTF-16BE/LE, * unknown character should be U+FFFD * (replacement character). */ if (sc->flag & (SCONV_TO_UTF8 | SCONV_TO_UTF16)) { size_t rbytes; if (sc->flag & SCONV_TO_UTF8) rbytes = sizeof(utf8_replacement_char); else rbytes = 2; if (avail < rbytes) { as->length = outp - as->s; bs = as->buffer_length + (remaining * to_size) + rbytes; if (NULL == archive_string_ensure(as, bs)) return (-1); outp = as->s + as->length; avail = as->buffer_length - as->length - to_size; } if (sc->flag & SCONV_TO_UTF8) memcpy(outp, utf8_replacement_char, sizeof(utf8_replacement_char)); else if (sc->flag & SCONV_TO_UTF16BE) archive_be16enc(outp, UNICODE_R_CHAR); else archive_le16enc(outp, UNICODE_R_CHAR); outp += rbytes; avail -= rbytes; } else { /* Skip the illegal input bytes. */ *outp++ = '?'; avail--; } itp += from_size; remaining -= from_size; return_value = -1; /* failure */ } else { /* E2BIG no output buffer, * Increase an output buffer. */ as->length = outp - as->s; bs = as->buffer_length + remaining * 2; if (NULL == archive_string_ensure(as, bs)) return (-1); outp = as->s + as->length; avail = as->buffer_length - as->length - to_size; } } as->length = outp - as->s; as->s[as->length] = 0; if (to_size == 2) as->s[as->length+1] = 0; return (return_value); } #endif /* HAVE_ICONV */ #if defined(_WIN32) && !defined(__CYGWIN__) /* * Translate a string from a some CodePage to an another CodePage by * Windows APIs, and copy the result. Return -1 if conversion fails. */ static int strncat_in_codepage(struct archive_string *as, const void *_p, size_t length, struct archive_string_conv *sc) { const char *s = (const char *)_p; struct archive_wstring aws; size_t l; int r, saved_flag; archive_string_init(&aws); saved_flag = sc->flag; sc->flag &= ~(SCONV_NORMALIZATION_D | SCONV_NORMALIZATION_C); r = archive_wstring_append_from_mbs_in_codepage(&aws, s, length, sc); sc->flag = saved_flag; if (r != 0) { archive_wstring_free(&aws); if (errno != ENOMEM) archive_string_append(as, s, length); return (-1); } l = as->length; r = archive_string_append_from_wcs_in_codepage( as, aws.s, aws.length, sc); if (r != 0 && errno != ENOMEM && l == as->length) archive_string_append(as, s, length); archive_wstring_free(&aws); return (r); } /* * Test whether MBS ==> WCS is okay. */ static int invalid_mbs(const void *_p, size_t n, struct archive_string_conv *sc) { const char *p = (const char *)_p; unsigned codepage; DWORD mbflag = MB_ERR_INVALID_CHARS; if (sc->flag & SCONV_FROM_CHARSET) codepage = sc->to_cp; else codepage = sc->from_cp; if (codepage == CP_C_LOCALE) return (0); if (codepage != CP_UTF8) mbflag |= MB_PRECOMPOSED; if (MultiByteToWideChar(codepage, mbflag, p, (int)n, NULL, 0) == 0) return (-1); /* Invalid */ return (0); /* Okay */ } #else /* * Test whether MBS ==> WCS is okay. */ static int invalid_mbs(const void *_p, size_t n, struct archive_string_conv *sc) { const char *p = (const char *)_p; size_t r; #if HAVE_MBRTOWC mbstate_t shift_state; memset(&shift_state, 0, sizeof(shift_state)); #else /* Clear the shift state before starting. */ mbtowc(NULL, NULL, 0); #endif while (n) { wchar_t wc; #if HAVE_MBRTOWC r = mbrtowc(&wc, p, n, &shift_state); #else r = mbtowc(&wc, p, n); #endif if (r == (size_t)-1 || r == (size_t)-2) return (-1);/* Invalid. */ if (r == 0) break; p += r; n -= r; } (void)sc; /* UNUSED */ return (0); /* All Okey. */ } #endif /* defined(_WIN32) && !defined(__CYGWIN__) */ /* * Basically returns -1 because we cannot make a conversion of charset * without iconv but in some cases this would return 0. * Returns 0 if all copied characters are ASCII. * Returns 0 if both from-locale and to-locale are the same and those * can be WCS with no error. */ static int best_effort_strncat_in_locale(struct archive_string *as, const void *_p, size_t length, struct archive_string_conv *sc) { size_t remaining; const uint8_t *itp; int return_value = 0; /* success */ /* * If both from-locale and to-locale is the same, this makes a copy. * And then this checks all copied MBS can be WCS if so returns 0. */ if (sc->same) { if (archive_string_append(as, _p, length) == NULL) return (-1);/* No memory */ return (invalid_mbs(_p, length, sc)); } /* * If a character is ASCII, this just copies it. If not, this * assigns '?' character instead but in UTF-8 locale this assigns * byte sequence 0xEF 0xBD 0xBD, which are code point U+FFFD, * a Replacement Character in Unicode. */ remaining = length; itp = (const uint8_t *)_p; while (*itp && remaining > 0) { if (*itp > 127) { // Non-ASCII: Substitute with suitable replacement if (sc->flag & SCONV_TO_UTF8) { if (archive_string_append(as, utf8_replacement_char, sizeof(utf8_replacement_char)) == NULL) { __archive_errx(1, "Out of memory"); } } else { archive_strappend_char(as, '?'); } return_value = -1; } else { archive_strappend_char(as, *itp); } ++itp; } return (return_value); } /* * Unicode conversion functions. * - UTF-8 <===> UTF-8 in removing surrogate pairs. * - UTF-8 NFD ===> UTF-8 NFC in removing surrogate pairs. * - UTF-8 made by libarchive 2.x ===> UTF-8. * - UTF-16BE <===> UTF-8. * */ /* * Utility to convert a single UTF-8 sequence. * * Usually return used bytes, return used byte in negative value when * a unicode character is replaced with U+FFFD. * See also http://unicode.org/review/pr-121.html Public Review Issue #121 * Recommended Practice for Replacement Characters. */ static int _utf8_to_unicode(uint32_t *pwc, const char *s, size_t n) { static const char utf8_count[256] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 00 - 0F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 10 - 1F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 20 - 2F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 30 - 3F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 40 - 4F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 50 - 5F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 60 - 6F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 70 - 7F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,/* 80 - 8F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,/* 90 - 9F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,/* A0 - AF */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,/* B0 - BF */ 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,/* C0 - CF */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,/* D0 - DF */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,/* E0 - EF */ 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F0 - FF */ }; int ch, i; int cnt; uint32_t wc; /* Sanity check. */ if (n == 0) return (0); /* * Decode 1-4 bytes depending on the value of the first byte. */ ch = (unsigned char)*s; if (ch == 0) return (0); /* Standard: return 0 for end-of-string. */ cnt = utf8_count[ch]; /* Invalid sequence or there are not plenty bytes. */ if ((int)n < cnt) { cnt = (int)n; for (i = 1; i < cnt; i++) { if ((s[i] & 0xc0) != 0x80) { cnt = i; break; } } goto invalid_sequence; } /* Make a Unicode code point from a single UTF-8 sequence. */ switch (cnt) { case 1: /* 1 byte sequence. */ *pwc = ch & 0x7f; return (cnt); case 2: /* 2 bytes sequence. */ if ((s[1] & 0xc0) != 0x80) { cnt = 1; goto invalid_sequence; } *pwc = ((ch & 0x1f) << 6) | (s[1] & 0x3f); return (cnt); case 3: /* 3 bytes sequence. */ if ((s[1] & 0xc0) != 0x80) { cnt = 1; goto invalid_sequence; } if ((s[2] & 0xc0) != 0x80) { cnt = 2; goto invalid_sequence; } wc = ((ch & 0x0f) << 12) | ((s[1] & 0x3f) << 6) | (s[2] & 0x3f); if (wc < 0x800) goto invalid_sequence;/* Overlong sequence. */ break; case 4: /* 4 bytes sequence. */ if ((s[1] & 0xc0) != 0x80) { cnt = 1; goto invalid_sequence; } if ((s[2] & 0xc0) != 0x80) { cnt = 2; goto invalid_sequence; } if ((s[3] & 0xc0) != 0x80) { cnt = 3; goto invalid_sequence; } wc = ((ch & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f); if (wc < 0x10000) goto invalid_sequence;/* Overlong sequence. */ break; default: /* Others are all invalid sequence. */ if (ch == 0xc0 || ch == 0xc1) cnt = 2; else if (ch >= 0xf5 && ch <= 0xf7) cnt = 4; else if (ch >= 0xf8 && ch <= 0xfb) cnt = 5; else if (ch == 0xfc || ch == 0xfd) cnt = 6; else cnt = 1; if ((int)n < cnt) cnt = (int)n; for (i = 1; i < cnt; i++) { if ((s[i] & 0xc0) != 0x80) { cnt = i; break; } } goto invalid_sequence; } /* The code point larger than 0x10FFFF is not legal * Unicode values. */ if (wc > UNICODE_MAX) goto invalid_sequence; /* Correctly gets a Unicode, returns used bytes. */ *pwc = wc; return (cnt); invalid_sequence: *pwc = UNICODE_R_CHAR;/* set the Replacement Character instead. */ return (cnt * -1); } static int utf8_to_unicode(uint32_t *pwc, const char *s, size_t n) { int cnt; cnt = _utf8_to_unicode(pwc, s, n); /* Any of Surrogate pair is not legal Unicode values. */ if (cnt == 3 && IS_SURROGATE_PAIR_LA(*pwc)) return (-3); return (cnt); } static inline uint32_t combine_surrogate_pair(uint32_t uc, uint32_t uc2) { uc -= 0xD800; uc *= 0x400; uc += uc2 - 0xDC00; uc += 0x10000; return (uc); } /* * Convert a single UTF-8/CESU-8 sequence to a Unicode code point in * removing surrogate pairs. * * CESU-8: The Compatibility Encoding Scheme for UTF-16. * * Usually return used bytes, return used byte in negative value when * a unicode character is replaced with U+FFFD. */ static int cesu8_to_unicode(uint32_t *pwc, const char *s, size_t n) { uint32_t wc = 0; int cnt; cnt = _utf8_to_unicode(&wc, s, n); if (cnt == 3 && IS_HIGH_SURROGATE_LA(wc)) { uint32_t wc2 = 0; if (n - 3 < 3) { /* Invalid byte sequence. */ goto invalid_sequence; } cnt = _utf8_to_unicode(&wc2, s+3, n-3); if (cnt != 3 || !IS_LOW_SURROGATE_LA(wc2)) { /* Invalid byte sequence. */ goto invalid_sequence; } wc = combine_surrogate_pair(wc, wc2); cnt = 6; } else if (cnt == 3 && IS_LOW_SURROGATE_LA(wc)) { /* Invalid byte sequence. */ goto invalid_sequence; } *pwc = wc; return (cnt); invalid_sequence: *pwc = UNICODE_R_CHAR;/* set the Replacement Character instead. */ if (cnt > 0) cnt *= -1; return (cnt); } /* * Convert a Unicode code point to a single UTF-8 sequence. * * NOTE:This function does not check if the Unicode is legal or not. * Please you definitely check it before calling this. */ static size_t unicode_to_utf8(char *p, size_t remaining, uint32_t uc) { char *_p = p; /* Invalid Unicode char maps to Replacement character */ if (uc > UNICODE_MAX) uc = UNICODE_R_CHAR; /* Translate code point to UTF8 */ if (uc <= 0x7f) { if (remaining == 0) return (0); *p++ = (char)uc; } else if (uc <= 0x7ff) { if (remaining < 2) return (0); *p++ = 0xc0 | ((uc >> 6) & 0x1f); *p++ = 0x80 | (uc & 0x3f); } else if (uc <= 0xffff) { if (remaining < 3) return (0); *p++ = 0xe0 | ((uc >> 12) & 0x0f); *p++ = 0x80 | ((uc >> 6) & 0x3f); *p++ = 0x80 | (uc & 0x3f); } else { if (remaining < 4) return (0); *p++ = 0xf0 | ((uc >> 18) & 0x07); *p++ = 0x80 | ((uc >> 12) & 0x3f); *p++ = 0x80 | ((uc >> 6) & 0x3f); *p++ = 0x80 | (uc & 0x3f); } return (p - _p); } static int utf16be_to_unicode(uint32_t *pwc, const char *s, size_t n) { return (utf16_to_unicode(pwc, s, n, 1)); } static int utf16le_to_unicode(uint32_t *pwc, const char *s, size_t n) { return (utf16_to_unicode(pwc, s, n, 0)); } static int utf16_to_unicode(uint32_t *pwc, const char *s, size_t n, int be) { const char *utf16 = s; unsigned uc; if (n == 0) return (0); if (n == 1) { /* set the Replacement Character instead. */ *pwc = UNICODE_R_CHAR; return (-1); } if (be) uc = archive_be16dec(utf16); else uc = archive_le16dec(utf16); utf16 += 2; /* If this is a surrogate pair, assemble the full code point.*/ if (IS_HIGH_SURROGATE_LA(uc)) { unsigned uc2; if (n >= 4) { if (be) uc2 = archive_be16dec(utf16); else uc2 = archive_le16dec(utf16); } else uc2 = 0; if (IS_LOW_SURROGATE_LA(uc2)) { uc = combine_surrogate_pair(uc, uc2); utf16 += 2; } else { /* Undescribed code point should be U+FFFD * (replacement character). */ *pwc = UNICODE_R_CHAR; return (-2); } } /* * Surrogate pair values(0xd800 through 0xdfff) are only * used by UTF-16, so, after above calculation, the code * must not be surrogate values, and Unicode has no codes * larger than 0x10ffff. Thus, those are not legal Unicode * values. */ if (IS_SURROGATE_PAIR_LA(uc) || uc > UNICODE_MAX) { /* Undescribed code point should be U+FFFD * (replacement character). */ *pwc = UNICODE_R_CHAR; return (((int)(utf16 - s)) * -1); } *pwc = uc; return ((int)(utf16 - s)); } static size_t unicode_to_utf16be(char *p, size_t remaining, uint32_t uc) { char *utf16 = p; if (uc > 0xffff) { /* We have a code point that won't fit into a * wchar_t; convert it to a surrogate pair. */ if (remaining < 4) return (0); uc -= 0x10000; archive_be16enc(utf16, ((uc >> 10) & 0x3ff) + 0xD800); archive_be16enc(utf16+2, (uc & 0x3ff) + 0xDC00); return (4); } else { if (remaining < 2) return (0); archive_be16enc(utf16, uc); return (2); } } static size_t unicode_to_utf16le(char *p, size_t remaining, uint32_t uc) { char *utf16 = p; if (uc > 0xffff) { /* We have a code point that won't fit into a * wchar_t; convert it to a surrogate pair. */ if (remaining < 4) return (0); uc -= 0x10000; archive_le16enc(utf16, ((uc >> 10) & 0x3ff) + 0xD800); archive_le16enc(utf16+2, (uc & 0x3ff) + 0xDC00); return (4); } else { if (remaining < 2) return (0); archive_le16enc(utf16, uc); return (2); } } /* * Copy UTF-8 string in checking surrogate pair. * If any surrogate pair are found, it would be canonicalized. */ static int strncat_from_utf8_to_utf8(struct archive_string *as, const void *_p, size_t len, struct archive_string_conv *sc) { const char *s; char *p, *endp; int n, ret = 0; (void)sc; /* UNUSED */ if (archive_string_ensure(as, as->length + len + 1) == NULL) return (-1); s = (const char *)_p; p = as->s + as->length; endp = as->s + as->buffer_length -1; do { uint32_t uc; const char *ss = s; size_t w; /* * Forward byte sequence until a conversion of that is needed. */ while ((n = utf8_to_unicode(&uc, s, len)) > 0) { s += n; len -= n; } if (ss < s) { if (p + (s - ss) > endp) { as->length = p - as->s; if (archive_string_ensure(as, as->buffer_length + len + 1) == NULL) return (-1); p = as->s + as->length; endp = as->s + as->buffer_length -1; } memcpy(p, ss, s - ss); p += s - ss; } /* * If n is negative, current byte sequence needs a replacement. */ if (n < 0) { if (n == -3 && IS_SURROGATE_PAIR_LA(uc)) { /* Current byte sequence may be CESU-8. */ n = cesu8_to_unicode(&uc, s, len); } if (n < 0) { ret = -1; n *= -1;/* Use a replaced unicode character. */ } /* Rebuild UTF-8 byte sequence. */ while ((w = unicode_to_utf8(p, endp - p, uc)) == 0) { as->length = p - as->s; if (archive_string_ensure(as, as->buffer_length + len + 1) == NULL) return (-1); p = as->s + as->length; endp = as->s + as->buffer_length -1; } p += w; s += n; len -= n; } } while (n > 0); as->length = p - as->s; as->s[as->length] = '\0'; return (ret); } static int archive_string_append_unicode(struct archive_string *as, const void *_p, size_t len, struct archive_string_conv *sc) { const char *s; char *p, *endp; uint32_t uc; size_t w; int n, ret = 0, ts, tm; int (*parse)(uint32_t *, const char *, size_t); size_t (*unparse)(char *, size_t, uint32_t); if (sc->flag & SCONV_TO_UTF16BE) { unparse = unicode_to_utf16be; ts = 2; } else if (sc->flag & SCONV_TO_UTF16LE) { unparse = unicode_to_utf16le; ts = 2; } else if (sc->flag & SCONV_TO_UTF8) { unparse = unicode_to_utf8; ts = 1; } else { /* * This case is going to be converted to another * character-set through iconv. */ if (sc->flag & SCONV_FROM_UTF16BE) { unparse = unicode_to_utf16be; ts = 2; } else if (sc->flag & SCONV_FROM_UTF16LE) { unparse = unicode_to_utf16le; ts = 2; } else { unparse = unicode_to_utf8; ts = 1; } } if (sc->flag & SCONV_FROM_UTF16BE) { parse = utf16be_to_unicode; tm = 1; } else if (sc->flag & SCONV_FROM_UTF16LE) { parse = utf16le_to_unicode; tm = 1; } else { parse = cesu8_to_unicode; tm = ts; } if (archive_string_ensure(as, as->length + len * tm + ts) == NULL) return (-1); s = (const char *)_p; p = as->s + as->length; endp = as->s + as->buffer_length - ts; while ((n = parse(&uc, s, len)) != 0) { if (n < 0) { /* Use a replaced unicode character. */ n *= -1; ret = -1; } s += n; len -= n; while ((w = unparse(p, endp - p, uc)) == 0) { /* There is not enough output buffer so * we have to expand it. */ as->length = p - as->s; if (archive_string_ensure(as, as->buffer_length + len * tm + ts) == NULL) return (-1); p = as->s + as->length; endp = as->s + as->buffer_length - ts; } p += w; } as->length = p - as->s; as->s[as->length] = '\0'; if (ts == 2) as->s[as->length+1] = '\0'; return (ret); } /* * Following Constants for Hangul compositions this information comes from * Unicode Standard Annex #15 http://unicode.org/reports/tr15/ */ #define HC_SBASE 0xAC00 #define HC_LBASE 0x1100 #define HC_VBASE 0x1161 #define HC_TBASE 0x11A7 #define HC_LCOUNT 19 #define HC_VCOUNT 21 #define HC_TCOUNT 28 #define HC_NCOUNT (HC_VCOUNT * HC_TCOUNT) #define HC_SCOUNT (HC_LCOUNT * HC_NCOUNT) static uint32_t get_nfc(uint32_t uc, uint32_t uc2) { int t, b; t = 0; b = sizeof(u_composition_table)/sizeof(u_composition_table[0]) -1; while (b >= t) { int m = (t + b) / 2; if (u_composition_table[m].cp1 < uc) t = m + 1; else if (u_composition_table[m].cp1 > uc) b = m - 1; else if (u_composition_table[m].cp2 < uc2) t = m + 1; else if (u_composition_table[m].cp2 > uc2) b = m - 1; else return (u_composition_table[m].nfc); } return (0); } #define FDC_MAX 10 /* The maximum number of Following Decomposable * Characters. */ /* * Update first code point. */ #define UPDATE_UC(new_uc) do { \ uc = new_uc; \ ucptr = NULL; \ } while (0) /* * Replace first code point with second code point. */ #define REPLACE_UC_WITH_UC2() do { \ uc = uc2; \ ucptr = uc2ptr; \ n = n2; \ } while (0) #define EXPAND_BUFFER() do { \ as->length = p - as->s; \ if (archive_string_ensure(as, \ as->buffer_length + len * tm + ts) == NULL)\ return (-1); \ p = as->s + as->length; \ endp = as->s + as->buffer_length - ts; \ } while (0) #define UNPARSE(p, endp, uc) do { \ while ((w = unparse(p, (endp) - (p), uc)) == 0) {\ EXPAND_BUFFER(); \ } \ p += w; \ } while (0) /* * Write first code point. * If the code point has not be changed from its original code, * this just copies it from its original buffer pointer. * If not, this converts it to UTF-8 byte sequence and copies it. */ #define WRITE_UC() do { \ if (ucptr) { \ if (p + n > endp) \ EXPAND_BUFFER(); \ switch (n) { \ case 4: \ *p++ = *ucptr++; \ /* FALL THROUGH */ \ case 3: \ *p++ = *ucptr++; \ /* FALL THROUGH */ \ case 2: \ *p++ = *ucptr++; \ /* FALL THROUGH */ \ case 1: \ *p++ = *ucptr; \ break; \ } \ ucptr = NULL; \ } else { \ UNPARSE(p, endp, uc); \ } \ } while (0) /* * Collect following decomposable code points. */ #define COLLECT_CPS(start) do { \ int _i; \ for (_i = start; _i < FDC_MAX ; _i++) { \ nx = parse(&ucx[_i], s, len); \ if (nx <= 0) \ break; \ cx = CCC(ucx[_i]); \ if (cl >= cx && cl != 228 && cx != 228)\ break; \ s += nx; \ len -= nx; \ cl = cx; \ ccx[_i] = cx; \ } \ if (_i >= FDC_MAX) { \ ret = -1; \ ucx_size = FDC_MAX; \ } else \ ucx_size = _i; \ } while (0) /* * Normalize UTF-8/UTF-16BE characters to Form C and copy the result. * * TODO: Convert composition exclusions, which are never converted * from NFC,NFD,NFKC and NFKD, to Form C. */ static int archive_string_normalize_C(struct archive_string *as, const void *_p, size_t len, struct archive_string_conv *sc) { const char *s = (const char *)_p; char *p, *endp; uint32_t uc, uc2; size_t w; int always_replace, n, n2, ret = 0, spair, ts, tm; int (*parse)(uint32_t *, const char *, size_t); size_t (*unparse)(char *, size_t, uint32_t); always_replace = 1; ts = 1;/* text size. */ if (sc->flag & SCONV_TO_UTF16BE) { unparse = unicode_to_utf16be; ts = 2; if (sc->flag & SCONV_FROM_UTF16BE) always_replace = 0; } else if (sc->flag & SCONV_TO_UTF16LE) { unparse = unicode_to_utf16le; ts = 2; if (sc->flag & SCONV_FROM_UTF16LE) always_replace = 0; } else if (sc->flag & SCONV_TO_UTF8) { unparse = unicode_to_utf8; if (sc->flag & SCONV_FROM_UTF8) always_replace = 0; } else { /* * This case is going to be converted to another * character-set through iconv. */ always_replace = 0; if (sc->flag & SCONV_FROM_UTF16BE) { unparse = unicode_to_utf16be; ts = 2; } else if (sc->flag & SCONV_FROM_UTF16LE) { unparse = unicode_to_utf16le; ts = 2; } else { unparse = unicode_to_utf8; } } if (sc->flag & SCONV_FROM_UTF16BE) { parse = utf16be_to_unicode; tm = 1; spair = 4;/* surrogate pair size in UTF-16. */ } else if (sc->flag & SCONV_FROM_UTF16LE) { parse = utf16le_to_unicode; tm = 1; spair = 4;/* surrogate pair size in UTF-16. */ } else { parse = cesu8_to_unicode; tm = ts; spair = 6;/* surrogate pair size in UTF-8. */ } if (archive_string_ensure(as, as->length + len * tm + ts) == NULL) return (-1); p = as->s + as->length; endp = as->s + as->buffer_length - ts; while ((n = parse(&uc, s, len)) != 0) { const char *ucptr, *uc2ptr; if (n < 0) { /* Use a replaced unicode character. */ UNPARSE(p, endp, uc); s += n*-1; len -= n*-1; ret = -1; continue; } else if (n == spair || always_replace) /* uc is converted from a surrogate pair. * this should be treated as a changed code. */ ucptr = NULL; else ucptr = s; s += n; len -= n; /* Read second code point. */ while ((n2 = parse(&uc2, s, len)) > 0) { uint32_t ucx[FDC_MAX]; int ccx[FDC_MAX]; int cl, cx, i, nx, ucx_size; int LIndex,SIndex; uint32_t nfc; if (n2 == spair || always_replace) /* uc2 is converted from a surrogate pair. * this should be treated as a changed code. */ uc2ptr = NULL; else uc2ptr = s; s += n2; len -= n2; /* * If current second code point is out of decomposable * code points, finding compositions is unneeded. */ if (!IS_DECOMPOSABLE_BLOCK(uc2)) { WRITE_UC(); REPLACE_UC_WITH_UC2(); continue; } /* * Try to combine current code points. */ /* * We have to combine Hangul characters according to * http://uniicode.org/reports/tr15/#Hangul */ if (0 <= (LIndex = uc - HC_LBASE) && LIndex < HC_LCOUNT) { /* * Hangul Composition. * 1. Two current code points are L and V. */ int VIndex = uc2 - HC_VBASE; if (0 <= VIndex && VIndex < HC_VCOUNT) { /* Make syllable of form LV. */ UPDATE_UC(HC_SBASE + (LIndex * HC_VCOUNT + VIndex) * HC_TCOUNT); } else { WRITE_UC(); REPLACE_UC_WITH_UC2(); } continue; } else if (0 <= (SIndex = uc - HC_SBASE) && SIndex < HC_SCOUNT && (SIndex % HC_TCOUNT) == 0) { /* * Hangul Composition. * 2. Two current code points are LV and T. */ int TIndex = uc2 - HC_TBASE; if (0 < TIndex && TIndex < HC_TCOUNT) { /* Make syllable of form LVT. */ UPDATE_UC(uc + TIndex); } else { WRITE_UC(); REPLACE_UC_WITH_UC2(); } continue; } else if ((nfc = get_nfc(uc, uc2)) != 0) { /* A composition to current code points * is found. */ UPDATE_UC(nfc); continue; } else if ((cl = CCC(uc2)) == 0) { /* Clearly 'uc2' the second code point is not * a decomposable code. */ WRITE_UC(); REPLACE_UC_WITH_UC2(); continue; } /* * Collect following decomposable code points. */ cx = 0; ucx[0] = uc2; ccx[0] = cl; COLLECT_CPS(1); /* * Find a composed code in the collected code points. */ i = 1; while (i < ucx_size) { int j; if ((nfc = get_nfc(uc, ucx[i])) == 0) { i++; continue; } /* * nfc is composed of uc and ucx[i]. */ UPDATE_UC(nfc); /* * Remove ucx[i] by shifting * following code points. */ for (j = i; j+1 < ucx_size; j++) { ucx[j] = ucx[j+1]; ccx[j] = ccx[j+1]; } ucx_size --; /* * Collect following code points blocked * by ucx[i] the removed code point. */ if (ucx_size > 0 && i == ucx_size && nx > 0 && cx == cl) { cl = ccx[ucx_size-1]; COLLECT_CPS(ucx_size); } /* * Restart finding a composed code with * the updated uc from the top of the * collected code points. */ i = 0; } /* * Apparently the current code points are not * decomposed characters or already composed. */ WRITE_UC(); for (i = 0; i < ucx_size; i++) UNPARSE(p, endp, ucx[i]); /* * Flush out remaining canonical combining characters. */ if (nx > 0 && cx == cl && len > 0) { while ((nx = parse(&ucx[0], s, len)) > 0) { cx = CCC(ucx[0]); if (cl > cx) break; s += nx; len -= nx; cl = cx; UNPARSE(p, endp, ucx[0]); } } break; } if (n2 < 0) { WRITE_UC(); /* Use a replaced unicode character. */ UNPARSE(p, endp, uc2); s += n2*-1; len -= n2*-1; ret = -1; continue; } else if (n2 == 0) { WRITE_UC(); break; } } as->length = p - as->s; as->s[as->length] = '\0'; if (ts == 2) as->s[as->length+1] = '\0'; return (ret); } static int get_nfd(uint32_t *cp1, uint32_t *cp2, uint32_t uc) { int t, b; /* * These are not converted to NFD on Mac OS. */ if ((uc >= 0x2000 && uc <= 0x2FFF) || (uc >= 0xF900 && uc <= 0xFAFF) || (uc >= 0x2F800 && uc <= 0x2FAFF)) return (0); /* * Those code points are not converted to NFD on Mac OS. * I do not know the reason because it is undocumented. * NFC NFD * 1109A ==> 11099 110BA * 1109C ==> 1109B 110BA * 110AB ==> 110A5 110BA */ if (uc == 0x1109A || uc == 0x1109C || uc == 0x110AB) return (0); t = 0; b = sizeof(u_decomposition_table)/sizeof(u_decomposition_table[0]) -1; while (b >= t) { int m = (t + b) / 2; if (u_decomposition_table[m].nfc < uc) t = m + 1; else if (u_decomposition_table[m].nfc > uc) b = m - 1; else { *cp1 = u_decomposition_table[m].cp1; *cp2 = u_decomposition_table[m].cp2; return (1); } } return (0); } #define REPLACE_UC_WITH(cp) do { \ uc = cp; \ ucptr = NULL; \ } while (0) /* * Normalize UTF-8 characters to Form D and copy the result. */ static int archive_string_normalize_D(struct archive_string *as, const void *_p, size_t len, struct archive_string_conv *sc) { const char *s = (const char *)_p; char *p, *endp; uint32_t uc, uc2; size_t w; int always_replace, n, n2, ret = 0, spair, ts, tm; int (*parse)(uint32_t *, const char *, size_t); size_t (*unparse)(char *, size_t, uint32_t); always_replace = 1; ts = 1;/* text size. */ if (sc->flag & SCONV_TO_UTF16BE) { unparse = unicode_to_utf16be; ts = 2; if (sc->flag & SCONV_FROM_UTF16BE) always_replace = 0; } else if (sc->flag & SCONV_TO_UTF16LE) { unparse = unicode_to_utf16le; ts = 2; if (sc->flag & SCONV_FROM_UTF16LE) always_replace = 0; } else if (sc->flag & SCONV_TO_UTF8) { unparse = unicode_to_utf8; if (sc->flag & SCONV_FROM_UTF8) always_replace = 0; } else { /* * This case is going to be converted to another * character-set through iconv. */ always_replace = 0; if (sc->flag & SCONV_FROM_UTF16BE) { unparse = unicode_to_utf16be; ts = 2; } else if (sc->flag & SCONV_FROM_UTF16LE) { unparse = unicode_to_utf16le; ts = 2; } else { unparse = unicode_to_utf8; } } if (sc->flag & SCONV_FROM_UTF16BE) { parse = utf16be_to_unicode; tm = 1; spair = 4;/* surrogate pair size in UTF-16. */ } else if (sc->flag & SCONV_FROM_UTF16LE) { parse = utf16le_to_unicode; tm = 1; spair = 4;/* surrogate pair size in UTF-16. */ } else { parse = cesu8_to_unicode; tm = ts; spair = 6;/* surrogate pair size in UTF-8. */ } if (archive_string_ensure(as, as->length + len * tm + ts) == NULL) return (-1); p = as->s + as->length; endp = as->s + as->buffer_length - ts; while ((n = parse(&uc, s, len)) != 0) { const char *ucptr; uint32_t cp1, cp2; int SIndex; struct { uint32_t uc; int ccc; } fdc[FDC_MAX]; int fdi, fdj; int ccc; check_first_code: if (n < 0) { /* Use a replaced unicode character. */ UNPARSE(p, endp, uc); s += n*-1; len -= n*-1; ret = -1; continue; } else if (n == spair || always_replace) /* uc is converted from a surrogate pair. * this should be treated as a changed code. */ ucptr = NULL; else ucptr = s; s += n; len -= n; /* Hangul Decomposition. */ if ((SIndex = uc - HC_SBASE) >= 0 && SIndex < HC_SCOUNT) { int L = HC_LBASE + SIndex / HC_NCOUNT; int V = HC_VBASE + (SIndex % HC_NCOUNT) / HC_TCOUNT; int T = HC_TBASE + SIndex % HC_TCOUNT; REPLACE_UC_WITH(L); WRITE_UC(); REPLACE_UC_WITH(V); WRITE_UC(); if (T != HC_TBASE) { REPLACE_UC_WITH(T); WRITE_UC(); } continue; } if (IS_DECOMPOSABLE_BLOCK(uc) && CCC(uc) != 0) { WRITE_UC(); continue; } fdi = 0; while (get_nfd(&cp1, &cp2, uc) && fdi < FDC_MAX) { int k; for (k = fdi; k > 0; k--) fdc[k] = fdc[k-1]; fdc[0].ccc = CCC(cp2); fdc[0].uc = cp2; fdi++; REPLACE_UC_WITH(cp1); } /* Read following code points. */ while ((n2 = parse(&uc2, s, len)) > 0 && (ccc = CCC(uc2)) != 0 && fdi < FDC_MAX) { int j, k; s += n2; len -= n2; for (j = 0; j < fdi; j++) { if (fdc[j].ccc > ccc) break; } if (j < fdi) { for (k = fdi; k > j; k--) fdc[k] = fdc[k-1]; fdc[j].ccc = ccc; fdc[j].uc = uc2; } else { fdc[fdi].ccc = ccc; fdc[fdi].uc = uc2; } fdi++; } WRITE_UC(); for (fdj = 0; fdj < fdi; fdj++) { REPLACE_UC_WITH(fdc[fdj].uc); WRITE_UC(); } if (n2 == 0) break; REPLACE_UC_WITH(uc2); n = n2; goto check_first_code; } as->length = p - as->s; as->s[as->length] = '\0'; if (ts == 2) as->s[as->length+1] = '\0'; return (ret); } /* * libarchive 2.x made incorrect UTF-8 strings in the wrong assumption * that WCS is Unicode. It is true for several platforms but some are false. * And then people who did not use UTF-8 locale on the non Unicode WCS * platform and made a tar file with libarchive(mostly bsdtar) 2.x. Those * now cannot get right filename from libarchive 3.x and later since we * fixed the wrong assumption and it is incompatible to older its versions. * So we provide special option, "compat-2x.x", for resolving it. * That option enable the string conversion of libarchive 2.x. * * Translates the wrong UTF-8 string made by libarchive 2.x into current * locale character set and appends to the archive_string. * Note: returns -1 if conversion fails. */ static int strncat_from_utf8_libarchive2(struct archive_string *as, const void *_p, size_t len, struct archive_string_conv *sc) { const char *s; int n; char *p; char *end; uint32_t unicode; #if HAVE_WCRTOMB mbstate_t shift_state; memset(&shift_state, 0, sizeof(shift_state)); #else /* Clear the shift state before starting. */ wctomb(NULL, L'\0'); #endif (void)sc; /* UNUSED */ /* * Allocate buffer for MBS. * We need this allocation here since it is possible that * as->s is still NULL. */ if (archive_string_ensure(as, as->length + len + 1) == NULL) return (-1); s = (const char *)_p; p = as->s + as->length; end = as->s + as->buffer_length - MB_CUR_MAX -1; while ((n = _utf8_to_unicode(&unicode, s, len)) != 0) { wchar_t wc; if (p >= end) { as->length = p - as->s; /* Re-allocate buffer for MBS. */ if (archive_string_ensure(as, as->length + len * 2 + 1) == NULL) return (-1); p = as->s + as->length; end = as->s + as->buffer_length - MB_CUR_MAX -1; } /* * As libarchive 2.x, translates the UTF-8 characters into * wide-characters in the assumption that WCS is Unicode. */ if (n < 0) { n *= -1; wc = L'?'; } else wc = (wchar_t)unicode; s += n; len -= n; /* * Translates the wide-character into the current locale MBS. */ #if HAVE_WCRTOMB n = (int)wcrtomb(p, wc, &shift_state); #else n = (int)wctomb(p, wc); #endif if (n == -1) return (-1); p += n; } as->length = p - as->s; as->s[as->length] = '\0'; return (0); } /* * Conversion functions between current locale dependent MBS and UTF-16BE. * strncat_from_utf16be() : UTF-16BE --> MBS * strncat_to_utf16be() : MBS --> UTF16BE */ #if defined(_WIN32) && !defined(__CYGWIN__) /* * Convert a UTF-16BE/LE string to current locale and copy the result. * Return -1 if conversion fails. */ static int win_strncat_from_utf16(struct archive_string *as, const void *_p, size_t bytes, struct archive_string_conv *sc, int be) { struct archive_string tmp; const char *u16; int ll; BOOL defchar; char *mbs; size_t mbs_size, b; int ret = 0; bytes &= ~1; if (archive_string_ensure(as, as->length + bytes +1) == NULL) return (-1); mbs = as->s + as->length; mbs_size = as->buffer_length - as->length -1; if (sc->to_cp == CP_C_LOCALE) { /* * "C" locale special process. */ u16 = _p; ll = 0; for (b = 0; b < bytes; b += 2) { uint16_t val; if (be) val = archive_be16dec(u16+b); else val = archive_le16dec(u16+b); if (val > 255) { *mbs++ = '?'; ret = -1; } else *mbs++ = (char)(val&0xff); ll++; } as->length += ll; as->s[as->length] = '\0'; return (ret); } archive_string_init(&tmp); if (be) { if (is_big_endian()) { u16 = _p; } else { if (archive_string_ensure(&tmp, bytes+2) == NULL) return (-1); memcpy(tmp.s, _p, bytes); for (b = 0; b < bytes; b += 2) { uint16_t val = archive_be16dec(tmp.s+b); archive_le16enc(tmp.s+b, val); } u16 = tmp.s; } } else { if (!is_big_endian()) { u16 = _p; } else { if (archive_string_ensure(&tmp, bytes+2) == NULL) return (-1); memcpy(tmp.s, _p, bytes); for (b = 0; b < bytes; b += 2) { uint16_t val = archive_le16dec(tmp.s+b); archive_be16enc(tmp.s+b, val); } u16 = tmp.s; } } do { defchar = 0; ll = WideCharToMultiByte(sc->to_cp, 0, (LPCWSTR)u16, (int)bytes>>1, mbs, (int)mbs_size, NULL, &defchar); /* Exit loop if we succeeded */ if (ll != 0 || GetLastError() != ERROR_INSUFFICIENT_BUFFER) { break; } /* Else expand buffer and loop to try again. */ ll = WideCharToMultiByte(sc->to_cp, 0, (LPCWSTR)u16, (int)bytes, NULL, 0, NULL, NULL); if (archive_string_ensure(as, ll +1) == NULL) return (-1); mbs = as->s + as->length; mbs_size = as->buffer_length - as->length -1; } while (1); archive_string_free(&tmp); as->length += ll; as->s[as->length] = '\0'; if (ll == 0 || defchar) ret = -1; return (ret); } static int win_strncat_from_utf16be(struct archive_string *as, const void *_p, size_t bytes, struct archive_string_conv *sc) { return (win_strncat_from_utf16(as, _p, bytes, sc, 1)); } static int win_strncat_from_utf16le(struct archive_string *as, const void *_p, size_t bytes, struct archive_string_conv *sc) { return (win_strncat_from_utf16(as, _p, bytes, sc, 0)); } static int is_big_endian(void) { uint16_t d = 1; return (archive_be16dec(&d) == 1); } /* * Convert a current locale string to UTF-16BE/LE and copy the result. * Return -1 if conversion fails. */ static int win_strncat_to_utf16(struct archive_string *as16, const void *_p, size_t length, struct archive_string_conv *sc, int bigendian) { const char *s = (const char *)_p; char *u16; size_t count, avail; if (archive_string_ensure(as16, as16->length + (length + 1) * 2) == NULL) return (-1); u16 = as16->s + as16->length; avail = as16->buffer_length - 2; if (sc->from_cp == CP_C_LOCALE) { /* * "C" locale special process. */ count = 0; while (count < length && *s) { if (bigendian) archive_be16enc(u16, *s); else archive_le16enc(u16, *s); u16 += 2; s++; count++; } as16->length += count << 1; as16->s[as16->length] = 0; as16->s[as16->length+1] = 0; return (0); } do { count = MultiByteToWideChar(sc->from_cp, MB_PRECOMPOSED, s, (int)length, (LPWSTR)u16, (int)avail>>1); /* Exit loop if we succeeded */ if (count != 0 || GetLastError() != ERROR_INSUFFICIENT_BUFFER) { break; } /* Expand buffer and try again */ count = MultiByteToWideChar(sc->from_cp, MB_PRECOMPOSED, s, (int)length, NULL, 0); if (archive_string_ensure(as16, (count +1) * 2) == NULL) return (-1); u16 = as16->s + as16->length; avail = as16->buffer_length - 2; } while (1); as16->length += count * 2; as16->s[as16->length] = 0; as16->s[as16->length+1] = 0; if (count == 0) return (-1); if (is_big_endian()) { if (!bigendian) { while (count > 0) { uint16_t v = archive_be16dec(u16); archive_le16enc(u16, v); u16 += 2; count--; } } } else { if (bigendian) { while (count > 0) { uint16_t v = archive_le16dec(u16); archive_be16enc(u16, v); u16 += 2; count--; } } } return (0); } static int win_strncat_to_utf16be(struct archive_string *as16, const void *_p, size_t length, struct archive_string_conv *sc) { return (win_strncat_to_utf16(as16, _p, length, sc, 1)); } static int win_strncat_to_utf16le(struct archive_string *as16, const void *_p, size_t length, struct archive_string_conv *sc) { return (win_strncat_to_utf16(as16, _p, length, sc, 0)); } #endif /* _WIN32 && !__CYGWIN__ */ /* * Do the best effort for conversions. * We cannot handle UTF-16BE character-set without such iconv, * but there is a chance if a string consists just ASCII code or * a current locale is UTF-8. */ /* * Convert a UTF-16BE string to current locale and copy the result. * Return -1 if conversion fails. */ static int best_effort_strncat_from_utf16(struct archive_string *as, const void *_p, size_t bytes, struct archive_string_conv *sc, int be) { const char *utf16 = (const char *)_p; char *mbs; uint32_t uc; int n, ret; (void)sc; /* UNUSED */ /* * Other case, we should do the best effort. * If all character are ASCII(<0x7f), we can convert it. * if not , we set a alternative character and return -1. */ ret = 0; if (archive_string_ensure(as, as->length + bytes +1) == NULL) return (-1); mbs = as->s + as->length; while ((n = utf16_to_unicode(&uc, utf16, bytes, be)) != 0) { if (n < 0) { n *= -1; ret = -1; } bytes -= n; utf16 += n; if (uc > 127) { /* We cannot handle it. */ *mbs++ = '?'; ret = -1; } else *mbs++ = (char)uc; } as->length = mbs - as->s; as->s[as->length] = '\0'; return (ret); } static int best_effort_strncat_from_utf16be(struct archive_string *as, const void *_p, size_t bytes, struct archive_string_conv *sc) { return (best_effort_strncat_from_utf16(as, _p, bytes, sc, 1)); } static int best_effort_strncat_from_utf16le(struct archive_string *as, const void *_p, size_t bytes, struct archive_string_conv *sc) { return (best_effort_strncat_from_utf16(as, _p, bytes, sc, 0)); } /* * Convert a current locale string to UTF-16BE/LE and copy the result. * Return -1 if conversion fails. */ static int best_effort_strncat_to_utf16(struct archive_string *as16, const void *_p, size_t length, struct archive_string_conv *sc, int bigendian) { const char *s = (const char *)_p; char *utf16; size_t remaining; int ret; (void)sc; /* UNUSED */ /* * Other case, we should do the best effort. * If all character are ASCII(<0x7f), we can convert it. * if not , we set a alternative character and return -1. */ ret = 0; remaining = length; if (archive_string_ensure(as16, as16->length + (length + 1) * 2) == NULL) return (-1); utf16 = as16->s + as16->length; while (remaining--) { unsigned c = *s++; if (c > 127) { /* We cannot handle it. */ c = UNICODE_R_CHAR; ret = -1; } if (bigendian) archive_be16enc(utf16, c); else archive_le16enc(utf16, c); utf16 += 2; } as16->length = utf16 - as16->s; as16->s[as16->length] = 0; as16->s[as16->length+1] = 0; return (ret); } static int best_effort_strncat_to_utf16be(struct archive_string *as16, const void *_p, size_t length, struct archive_string_conv *sc) { return (best_effort_strncat_to_utf16(as16, _p, length, sc, 1)); } static int best_effort_strncat_to_utf16le(struct archive_string *as16, const void *_p, size_t length, struct archive_string_conv *sc) { return (best_effort_strncat_to_utf16(as16, _p, length, sc, 0)); } /* * Multistring operations. */ void archive_mstring_clean(struct archive_mstring *aes) { archive_wstring_free(&(aes->aes_wcs)); archive_string_free(&(aes->aes_mbs)); archive_string_free(&(aes->aes_utf8)); archive_string_free(&(aes->aes_mbs_in_locale)); aes->aes_set = 0; } void archive_mstring_copy(struct archive_mstring *dest, struct archive_mstring *src) { dest->aes_set = src->aes_set; archive_string_copy(&(dest->aes_mbs), &(src->aes_mbs)); archive_string_copy(&(dest->aes_utf8), &(src->aes_utf8)); archive_wstring_copy(&(dest->aes_wcs), &(src->aes_wcs)); } int archive_mstring_get_utf8(struct archive *a, struct archive_mstring *aes, const char **p) { struct archive_string_conv *sc; int r; /* If we already have a UTF8 form, return that immediately. */ if (aes->aes_set & AES_SET_UTF8) { *p = aes->aes_utf8.s; return (0); } *p = NULL; if (aes->aes_set & AES_SET_MBS) { sc = archive_string_conversion_to_charset(a, "UTF-8", 1); if (sc == NULL) return (-1);/* Couldn't allocate memory for sc. */ r = archive_strncpy_l(&(aes->aes_utf8), aes->aes_mbs.s, aes->aes_mbs.length, sc); if (a == NULL) free_sconv_object(sc); if (r == 0) { aes->aes_set |= AES_SET_UTF8; *p = aes->aes_utf8.s; return (0);/* success. */ } else return (-1);/* failure. */ } return (0);/* success. */ } int archive_mstring_get_mbs(struct archive *a, struct archive_mstring *aes, const char **p) { int r, ret = 0; (void)a; /* UNUSED */ /* If we already have an MBS form, return that immediately. */ if (aes->aes_set & AES_SET_MBS) { *p = aes->aes_mbs.s; return (ret); } *p = NULL; /* If there's a WCS form, try converting with the native locale. */ if (aes->aes_set & AES_SET_WCS) { archive_string_empty(&(aes->aes_mbs)); r = archive_string_append_from_wcs(&(aes->aes_mbs), aes->aes_wcs.s, aes->aes_wcs.length); *p = aes->aes_mbs.s; if (r == 0) { aes->aes_set |= AES_SET_MBS; return (ret); } else ret = -1; } /* * Only a UTF-8 form cannot avail because its conversion already * failed at archive_mstring_update_utf8(). */ return (ret); } int archive_mstring_get_wcs(struct archive *a, struct archive_mstring *aes, const wchar_t **wp) { int r, ret = 0; (void)a;/* UNUSED */ /* Return WCS form if we already have it. */ if (aes->aes_set & AES_SET_WCS) { *wp = aes->aes_wcs.s; return (ret); } *wp = NULL; /* Try converting MBS to WCS using native locale. */ if (aes->aes_set & AES_SET_MBS) { archive_wstring_empty(&(aes->aes_wcs)); r = archive_wstring_append_from_mbs(&(aes->aes_wcs), aes->aes_mbs.s, aes->aes_mbs.length); if (r == 0) { aes->aes_set |= AES_SET_WCS; *wp = aes->aes_wcs.s; } else ret = -1;/* failure. */ } return (ret); } int archive_mstring_get_mbs_l(struct archive_mstring *aes, const char **p, size_t *length, struct archive_string_conv *sc) { int r, ret = 0; #if defined(_WIN32) && !defined(__CYGWIN__) /* * Internationalization programming on Windows must use Wide * characters because Windows platform cannot make locale UTF-8. */ if (sc != NULL && (aes->aes_set & AES_SET_WCS) != 0) { archive_string_empty(&(aes->aes_mbs_in_locale)); r = archive_string_append_from_wcs_in_codepage( &(aes->aes_mbs_in_locale), aes->aes_wcs.s, aes->aes_wcs.length, sc); if (r == 0) { *p = aes->aes_mbs_in_locale.s; if (length != NULL) *length = aes->aes_mbs_in_locale.length; return (0); } else if (errno == ENOMEM) return (-1); else ret = -1; } #endif /* If there is not an MBS form but is a WCS form, try converting * with the native locale to be used for translating it to specified * character-set. */ if ((aes->aes_set & AES_SET_MBS) == 0 && (aes->aes_set & AES_SET_WCS) != 0) { archive_string_empty(&(aes->aes_mbs)); r = archive_string_append_from_wcs(&(aes->aes_mbs), aes->aes_wcs.s, aes->aes_wcs.length); if (r == 0) aes->aes_set |= AES_SET_MBS; else if (errno == ENOMEM) return (-1); else ret = -1; } /* If we already have an MBS form, use it to be translated to * specified character-set. */ if (aes->aes_set & AES_SET_MBS) { if (sc == NULL) { /* Conversion is unneeded. */ *p = aes->aes_mbs.s; if (length != NULL) *length = aes->aes_mbs.length; return (0); } ret = archive_strncpy_l(&(aes->aes_mbs_in_locale), aes->aes_mbs.s, aes->aes_mbs.length, sc); *p = aes->aes_mbs_in_locale.s; if (length != NULL) *length = aes->aes_mbs_in_locale.length; } else { *p = NULL; if (length != NULL) *length = 0; } return (ret); } int archive_mstring_copy_mbs(struct archive_mstring *aes, const char *mbs) { if (mbs == NULL) { aes->aes_set = 0; return (0); } return (archive_mstring_copy_mbs_len(aes, mbs, strlen(mbs))); } int archive_mstring_copy_mbs_len(struct archive_mstring *aes, const char *mbs, size_t len) { if (mbs == NULL) { aes->aes_set = 0; return (0); } aes->aes_set = AES_SET_MBS; /* Only MBS form is set now. */ archive_strncpy(&(aes->aes_mbs), mbs, len); archive_string_empty(&(aes->aes_utf8)); archive_wstring_empty(&(aes->aes_wcs)); return (0); } int archive_mstring_copy_wcs(struct archive_mstring *aes, const wchar_t *wcs) { return archive_mstring_copy_wcs_len(aes, wcs, wcs == NULL ? 0 : wcslen(wcs)); } int archive_mstring_copy_utf8(struct archive_mstring *aes, const char *utf8) { if (utf8 == NULL) { aes->aes_set = 0; return (0); } aes->aes_set = AES_SET_UTF8; archive_string_empty(&(aes->aes_mbs)); archive_string_empty(&(aes->aes_wcs)); archive_strncpy(&(aes->aes_utf8), utf8, strlen(utf8)); return (int)strlen(utf8); } int archive_mstring_copy_wcs_len(struct archive_mstring *aes, const wchar_t *wcs, size_t len) { if (wcs == NULL) { aes->aes_set = 0; return (0); } aes->aes_set = AES_SET_WCS; /* Only WCS form set. */ archive_string_empty(&(aes->aes_mbs)); archive_string_empty(&(aes->aes_utf8)); archive_wstrncpy(&(aes->aes_wcs), wcs, len); return (0); } int archive_mstring_copy_mbs_len_l(struct archive_mstring *aes, const char *mbs, size_t len, struct archive_string_conv *sc) { int r; if (mbs == NULL) { aes->aes_set = 0; return (0); } archive_string_empty(&(aes->aes_mbs)); archive_wstring_empty(&(aes->aes_wcs)); archive_string_empty(&(aes->aes_utf8)); #if defined(_WIN32) && !defined(__CYGWIN__) /* * Internationalization programming on Windows must use Wide * characters because Windows platform cannot make locale UTF-8. */ if (sc == NULL) { if (archive_string_append(&(aes->aes_mbs), mbs, mbsnbytes(mbs, len)) == NULL) { aes->aes_set = 0; r = -1; } else { aes->aes_set = AES_SET_MBS; r = 0; } #if defined(HAVE_ICONV) } else if (sc != NULL && sc->cd_w != (iconv_t)-1) { /* * This case happens only when MultiByteToWideChar() cannot * handle sc->from_cp, and we have to iconv in order to * translate character-set to wchar_t,UTF-16. */ iconv_t cd = sc->cd; unsigned from_cp; int flag; /* * Translate multi-bytes from some character-set to UTF-8. */ sc->cd = sc->cd_w; r = archive_strncpy_l(&(aes->aes_utf8), mbs, len, sc); sc->cd = cd; if (r != 0) { aes->aes_set = 0; return (r); } aes->aes_set = AES_SET_UTF8; /* * Append the UTF-8 string into wstring. */ flag = sc->flag; sc->flag &= ~(SCONV_NORMALIZATION_C | SCONV_TO_UTF16| SCONV_FROM_UTF16); from_cp = sc->from_cp; sc->from_cp = CP_UTF8; r = archive_wstring_append_from_mbs_in_codepage(&(aes->aes_wcs), aes->aes_utf8.s, aes->aes_utf8.length, sc); sc->flag = flag; sc->from_cp = from_cp; if (r == 0) aes->aes_set |= AES_SET_WCS; #endif } else { r = archive_wstring_append_from_mbs_in_codepage( &(aes->aes_wcs), mbs, len, sc); if (r == 0) aes->aes_set = AES_SET_WCS; else aes->aes_set = 0; } #else r = archive_strncpy_l(&(aes->aes_mbs), mbs, len, sc); if (r == 0) aes->aes_set = AES_SET_MBS; /* Only MBS form is set now. */ else aes->aes_set = 0; #endif return (r); } /* * The 'update' form tries to proactively update all forms of * this string (WCS and MBS) and returns an error if any of * them fail. This is used by the 'pax' handler, for instance, * to detect and report character-conversion failures early while * still allowing clients to get potentially useful values from * the more tolerant lazy conversions. (get_mbs and get_wcs will * strive to give the user something useful, so you can get hopefully * usable values even if some of the character conversions are failing.) */ int archive_mstring_update_utf8(struct archive *a, struct archive_mstring *aes, const char *utf8) { struct archive_string_conv *sc; int r; if (utf8 == NULL) { aes->aes_set = 0; return (0); /* Succeeded in clearing everything. */ } /* Save the UTF8 string. */ archive_strcpy(&(aes->aes_utf8), utf8); /* Empty the mbs and wcs strings. */ archive_string_empty(&(aes->aes_mbs)); archive_wstring_empty(&(aes->aes_wcs)); aes->aes_set = AES_SET_UTF8; /* Only UTF8 is set now. */ /* Try converting UTF-8 to MBS, return false on failure. */ sc = archive_string_conversion_from_charset(a, "UTF-8", 1); if (sc == NULL) return (-1);/* Couldn't allocate memory for sc. */ r = archive_strcpy_l(&(aes->aes_mbs), utf8, sc); if (a == NULL) free_sconv_object(sc); if (r != 0) return (-1); aes->aes_set = AES_SET_UTF8 | AES_SET_MBS; /* Both UTF8 and MBS set. */ /* Try converting MBS to WCS, return false on failure. */ if (archive_wstring_append_from_mbs(&(aes->aes_wcs), aes->aes_mbs.s, aes->aes_mbs.length)) return (-1); aes->aes_set = AES_SET_UTF8 | AES_SET_WCS | AES_SET_MBS; /* All conversions succeeded. */ return (0); }
null
/*- * Copyright (c) 2003-2011 Tim Kientzle * Copyright (c) 2011-2012 Michihiro NAKAJIMA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "archive_platform.h" __FBSDID("$FreeBSD: head/lib/libarchive/archive_string.c 201095 2009-12-28 02:33:22Z kientzle $"); /* * Basic resizable string support, to simplify manipulating arbitrary-sized * strings while minimizing heap activity. * * In particular, the buffer used by a string object is only grown, it * never shrinks, so you can clear and reuse the same string object * without incurring additional memory allocations. */ #ifdef HAVE_ERRNO_H #include <errno.h> #endif #ifdef HAVE_ICONV_H #include <iconv.h> #endif #ifdef HAVE_LANGINFO_H #include <langinfo.h> #endif #ifdef HAVE_LOCALCHARSET_H #include <localcharset.h> #endif #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #ifdef HAVE_WCHAR_H #include <wchar.h> #endif #if defined(_WIN32) && !defined(__CYGWIN__) #include <windows.h> #include <locale.h> #endif #include "archive_endian.h" #include "archive_private.h" #include "archive_string.h" #include "archive_string_composition.h" #if !defined(HAVE_WMEMCPY) && !defined(wmemcpy) #define wmemcpy(a,b,i) (wchar_t *)memcpy((a), (b), (i) * sizeof(wchar_t)) #endif #if !defined(HAVE_WMEMMOVE) && !defined(wmemmove) #define wmemmove(a,b,i) (wchar_t *)memmove((a), (b), (i) * sizeof(wchar_t)) #endif #undef max #define max(a, b) ((a)>(b)?(a):(b)) struct archive_string_conv { struct archive_string_conv *next; char *from_charset; char *to_charset; unsigned from_cp; unsigned to_cp; /* Set 1 if from_charset and to_charset are the same. */ int same; int flag; #define SCONV_TO_CHARSET 1 /* MBS is being converted to specified * charset. */ #define SCONV_FROM_CHARSET (1<<1) /* MBS is being converted from * specified charset. */ #define SCONV_BEST_EFFORT (1<<2) /* Copy at least ASCII code. */ #define SCONV_WIN_CP (1<<3) /* Use Windows API for converting * MBS. */ #define SCONV_UTF8_LIBARCHIVE_2 (1<<4) /* Incorrect UTF-8 made by libarchive * 2.x in the wrong assumption. */ #define SCONV_NORMALIZATION_C (1<<6) /* Need normalization to be Form C. * Before UTF-8 characters are actually * processed. */ #define SCONV_NORMALIZATION_D (1<<7) /* Need normalization to be Form D. * Before UTF-8 characters are actually * processed. * Currently this only for MAC OS X. */ #define SCONV_TO_UTF8 (1<<8) /* "to charset" side is UTF-8. */ #define SCONV_FROM_UTF8 (1<<9) /* "from charset" side is UTF-8. */ #define SCONV_TO_UTF16BE (1<<10) /* "to charset" side is UTF-16BE. */ #define SCONV_FROM_UTF16BE (1<<11) /* "from charset" side is UTF-16BE. */ #define SCONV_TO_UTF16LE (1<<12) /* "to charset" side is UTF-16LE. */ #define SCONV_FROM_UTF16LE (1<<13) /* "from charset" side is UTF-16LE. */ #define SCONV_TO_UTF16 (SCONV_TO_UTF16BE | SCONV_TO_UTF16LE) #define SCONV_FROM_UTF16 (SCONV_FROM_UTF16BE | SCONV_FROM_UTF16LE) #if HAVE_ICONV iconv_t cd; iconv_t cd_w;/* Use at archive_mstring on * Windows. */ #endif /* A temporary buffer for normalization. */ struct archive_string utftmp; int (*converter[2])(struct archive_string *, const void *, size_t, struct archive_string_conv *); int nconverter; }; #define CP_C_LOCALE 0 /* "C" locale only for this file. */ #define CP_UTF16LE 1200 #define CP_UTF16BE 1201 #define IS_HIGH_SURROGATE_LA(uc) ((uc) >= 0xD800 && (uc) <= 0xDBFF) #define IS_LOW_SURROGATE_LA(uc) ((uc) >= 0xDC00 && (uc) <= 0xDFFF) #define IS_SURROGATE_PAIR_LA(uc) ((uc) >= 0xD800 && (uc) <= 0xDFFF) #define UNICODE_MAX 0x10FFFF #define UNICODE_R_CHAR 0xFFFD /* Replacement character. */ /* Set U+FFFD(Replacement character) in UTF-8. */ static const char utf8_replacement_char[] = {0xef, 0xbf, 0xbd}; static struct archive_string_conv *find_sconv_object(struct archive *, const char *, const char *); static void add_sconv_object(struct archive *, struct archive_string_conv *); static struct archive_string_conv *create_sconv_object(const char *, const char *, unsigned, int); static void free_sconv_object(struct archive_string_conv *); static struct archive_string_conv *get_sconv_object(struct archive *, const char *, const char *, int); static unsigned make_codepage_from_charset(const char *); static unsigned get_current_codepage(void); static unsigned get_current_oemcp(void); static size_t mbsnbytes(const void *, size_t); static size_t utf16nbytes(const void *, size_t); #if defined(_WIN32) && !defined(__CYGWIN__) static int archive_wstring_append_from_mbs_in_codepage( struct archive_wstring *, const char *, size_t, struct archive_string_conv *); static int archive_string_append_from_wcs_in_codepage(struct archive_string *, const wchar_t *, size_t, struct archive_string_conv *); static int is_big_endian(void); static int strncat_in_codepage(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int win_strncat_from_utf16be(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int win_strncat_from_utf16le(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int win_strncat_to_utf16be(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int win_strncat_to_utf16le(struct archive_string *, const void *, size_t, struct archive_string_conv *); #endif static int best_effort_strncat_from_utf16be(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int best_effort_strncat_from_utf16le(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int best_effort_strncat_to_utf16be(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int best_effort_strncat_to_utf16le(struct archive_string *, const void *, size_t, struct archive_string_conv *); #if defined(HAVE_ICONV) static int iconv_strncat_in_locale(struct archive_string *, const void *, size_t, struct archive_string_conv *); #endif static int best_effort_strncat_in_locale(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int _utf8_to_unicode(uint32_t *, const char *, size_t); static int utf8_to_unicode(uint32_t *, const char *, size_t); static inline uint32_t combine_surrogate_pair(uint32_t, uint32_t); static int cesu8_to_unicode(uint32_t *, const char *, size_t); static size_t unicode_to_utf8(char *, size_t, uint32_t); static int utf16_to_unicode(uint32_t *, const char *, size_t, int); static size_t unicode_to_utf16be(char *, size_t, uint32_t); static size_t unicode_to_utf16le(char *, size_t, uint32_t); static int strncat_from_utf8_libarchive2(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int strncat_from_utf8_to_utf8(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int archive_string_normalize_C(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int archive_string_normalize_D(struct archive_string *, const void *, size_t, struct archive_string_conv *); static int archive_string_append_unicode(struct archive_string *, const void *, size_t, struct archive_string_conv *); static struct archive_string * archive_string_append(struct archive_string *as, const char *p, size_t s) { if (archive_string_ensure(as, as->length + s + 1) == NULL) return (NULL); if (s) memmove(as->s + as->length, p, s); as->length += s; as->s[as->length] = 0; return (as); } static struct archive_wstring * archive_wstring_append(struct archive_wstring *as, const wchar_t *p, size_t s) { if (archive_wstring_ensure(as, as->length + s + 1) == NULL) return (NULL); if (s) wmemmove(as->s + as->length, p, s); as->length += s; as->s[as->length] = 0; return (as); } struct archive_string * archive_array_append(struct archive_string *as, const char *p, size_t s) { return archive_string_append(as, p, s); } void archive_string_concat(struct archive_string *dest, struct archive_string *src) { if (archive_string_append(dest, src->s, src->length) == NULL) __archive_errx(1, "Out of memory"); } void archive_wstring_concat(struct archive_wstring *dest, struct archive_wstring *src) { if (archive_wstring_append(dest, src->s, src->length) == NULL) __archive_errx(1, "Out of memory"); } void archive_string_free(struct archive_string *as) { as->length = 0; as->buffer_length = 0; free(as->s); as->s = NULL; } void archive_wstring_free(struct archive_wstring *as) { as->length = 0; as->buffer_length = 0; free(as->s); as->s = NULL; } struct archive_wstring * archive_wstring_ensure(struct archive_wstring *as, size_t s) { return (struct archive_wstring *) archive_string_ensure((struct archive_string *)as, s * sizeof(wchar_t)); } /* Returns NULL on any allocation failure. */ struct archive_string * archive_string_ensure(struct archive_string *as, size_t s) { char *p; size_t new_length; /* If buffer is already big enough, don't reallocate. */ if (as->s && (s <= as->buffer_length)) return (as); /* * Growing the buffer at least exponentially ensures that * append operations are always linear in the number of * characters appended. Using a smaller growth rate for * larger buffers reduces memory waste somewhat at the cost of * a larger constant factor. */ if (as->buffer_length < 32) /* Start with a minimum 32-character buffer. */ new_length = 32; else if (as->buffer_length < 8192) /* Buffers under 8k are doubled for speed. */ new_length = as->buffer_length + as->buffer_length; else { /* Buffers 8k and over grow by at least 25% each time. */ new_length = as->buffer_length + as->buffer_length / 4; /* Be safe: If size wraps, fail. */ if (new_length < as->buffer_length) { /* On failure, wipe the string and return NULL. */ archive_string_free(as); errno = ENOMEM;/* Make sure errno has ENOMEM. */ return (NULL); } } /* * The computation above is a lower limit to how much we'll * grow the buffer. In any case, we have to grow it enough to * hold the request. */ if (new_length < s) new_length = s; /* Now we can reallocate the buffer. */ p = (char *)realloc(as->s, new_length); if (p == NULL) { /* On failure, wipe the string and return NULL. */ archive_string_free(as); errno = ENOMEM;/* Make sure errno has ENOMEM. */ return (NULL); } as->s = p; as->buffer_length = new_length; return (as); } /* * TODO: See if there's a way to avoid scanning * the source string twice. Then test to see * if it actually helps (remember that we're almost * always called with pretty short arguments, so * such an optimization might not help). */ struct archive_string * archive_strncat(struct archive_string *as, const void *_p, size_t n) { size_t s; const char *p, *pp; p = (const char *)_p; /* Like strlen(p), except won't examine positions beyond p[n]. */ s = 0; pp = p; while (s < n && *pp) { pp++; s++; } if ((as = archive_string_append(as, p, s)) == NULL) __archive_errx(1, "Out of memory"); return (as); } struct archive_wstring * archive_wstrncat(struct archive_wstring *as, const wchar_t *p, size_t n) { size_t s; const wchar_t *pp; /* Like strlen(p), except won't examine positions beyond p[n]. */ s = 0; pp = p; while (s < n && *pp) { pp++; s++; } if ((as = archive_wstring_append(as, p, s)) == NULL) __archive_errx(1, "Out of memory"); return (as); } struct archive_string * archive_strcat(struct archive_string *as, const void *p) { /* strcat is just strncat without an effective limit. * Assert that we'll never get called with a source * string over 16MB. * TODO: Review all uses of strcat in the source * and try to replace them with strncat(). */ return archive_strncat(as, p, 0x1000000); } struct archive_wstring * archive_wstrcat(struct archive_wstring *as, const wchar_t *p) { /* Ditto. */ return archive_wstrncat(as, p, 0x1000000); } struct archive_string * archive_strappend_char(struct archive_string *as, char c) { if ((as = archive_string_append(as, &c, 1)) == NULL) __archive_errx(1, "Out of memory"); return (as); } struct archive_wstring * archive_wstrappend_wchar(struct archive_wstring *as, wchar_t c) { if ((as = archive_wstring_append(as, &c, 1)) == NULL) __archive_errx(1, "Out of memory"); return (as); } /* * Get the "current character set" name to use with iconv. * On FreeBSD, the empty character set name "" chooses * the correct character encoding for the current locale, * so this isn't necessary. * But iconv on Mac OS 10.6 doesn't seem to handle this correctly; * on that system, we have to explicitly call nl_langinfo() * to get the right name. Not sure about other platforms. * * NOTE: GNU libiconv does not recognize the character-set name * which some platform nl_langinfo(CODESET) returns, so we should * use locale_charset() instead of nl_langinfo(CODESET) for GNU libiconv. */ static const char * default_iconv_charset(const char *charset) { if (charset != NULL && charset[0] != '\0') return charset; #if HAVE_LOCALE_CHARSET && !defined(__APPLE__) /* locale_charset() is broken on Mac OS */ return locale_charset(); #elif HAVE_NL_LANGINFO return nl_langinfo(CODESET); #else return ""; #endif } #if defined(_WIN32) && !defined(__CYGWIN__) /* * Convert MBS to WCS. * Note: returns -1 if conversion fails. */ int archive_wstring_append_from_mbs(struct archive_wstring *dest, const char *p, size_t len) { return archive_wstring_append_from_mbs_in_codepage(dest, p, len, NULL); } static int archive_wstring_append_from_mbs_in_codepage(struct archive_wstring *dest, const char *s, size_t length, struct archive_string_conv *sc) { int count, ret = 0; UINT from_cp; if (sc != NULL) from_cp = sc->from_cp; else from_cp = get_current_codepage(); if (from_cp == CP_C_LOCALE) { /* * "C" locale special processing. */ wchar_t *ws; const unsigned char *mp; if (NULL == archive_wstring_ensure(dest, dest->length + length + 1)) return (-1); ws = dest->s + dest->length; mp = (const unsigned char *)s; count = 0; while (count < (int)length && *mp) { *ws++ = (wchar_t)*mp++; count++; } } else if (sc != NULL && (sc->flag & (SCONV_NORMALIZATION_C | SCONV_NORMALIZATION_D))) { /* * Normalize UTF-8 and UTF-16BE and convert it directly * to UTF-16 as wchar_t. */ struct archive_string u16; int saved_flag = sc->flag;/* save current flag. */ if (is_big_endian()) sc->flag |= SCONV_TO_UTF16BE; else sc->flag |= SCONV_TO_UTF16LE; if (sc->flag & SCONV_FROM_UTF16) { /* * UTF-16BE/LE NFD ===> UTF-16 NFC * UTF-16BE/LE NFC ===> UTF-16 NFD */ count = (int)utf16nbytes(s, length); } else { /* * UTF-8 NFD ===> UTF-16 NFC * UTF-8 NFC ===> UTF-16 NFD */ count = (int)mbsnbytes(s, length); } u16.s = (char *)dest->s; u16.length = dest->length << 1;; u16.buffer_length = dest->buffer_length; if (sc->flag & SCONV_NORMALIZATION_C) ret = archive_string_normalize_C(&u16, s, count, sc); else ret = archive_string_normalize_D(&u16, s, count, sc); dest->s = (wchar_t *)u16.s; dest->length = u16.length >> 1; dest->buffer_length = u16.buffer_length; sc->flag = saved_flag;/* restore the saved flag. */ return (ret); } else if (sc != NULL && (sc->flag & SCONV_FROM_UTF16)) { count = (int)utf16nbytes(s, length); count >>= 1; /* to be WCS length */ /* Allocate memory for WCS. */ if (NULL == archive_wstring_ensure(dest, dest->length + count + 1)) return (-1); wmemcpy(dest->s + dest->length, (const wchar_t *)s, count); if ((sc->flag & SCONV_FROM_UTF16BE) && !is_big_endian()) { uint16_t *u16 = (uint16_t *)(dest->s + dest->length); int b; for (b = 0; b < count; b++) { uint16_t val = archive_le16dec(u16+b); archive_be16enc(u16+b, val); } } else if ((sc->flag & SCONV_FROM_UTF16LE) && is_big_endian()) { uint16_t *u16 = (uint16_t *)(dest->s + dest->length); int b; for (b = 0; b < count; b++) { uint16_t val = archive_be16dec(u16+b); archive_le16enc(u16+b, val); } } } else { DWORD mbflag; size_t buffsize; if (sc == NULL) mbflag = 0; else if (sc->flag & SCONV_FROM_CHARSET) { /* Do not trust the length which comes from * an archive file. */ length = mbsnbytes(s, length); mbflag = 0; } else mbflag = MB_PRECOMPOSED; buffsize = dest->length + length + 1; do { /* Allocate memory for WCS. */ if (NULL == archive_wstring_ensure(dest, buffsize)) return (-1); /* Convert MBS to WCS. */ count = MultiByteToWideChar(from_cp, mbflag, s, (int)length, dest->s + dest->length, (int)(dest->buffer_length >> 1) -1); if (count == 0 && GetLastError() == ERROR_INSUFFICIENT_BUFFER) { /* Expand the WCS buffer. */ buffsize = dest->buffer_length << 1; continue; } if (count == 0 && length != 0) ret = -1; break; } while (1); } dest->length += count; dest->s[dest->length] = L'\0'; return (ret); } #else /* * Convert MBS to WCS. * Note: returns -1 if conversion fails. */ int archive_wstring_append_from_mbs(struct archive_wstring *dest, const char *p, size_t len) { size_t r; int ret_val = 0; /* * No single byte will be more than one wide character, * so this length estimate will always be big enough. */ // size_t wcs_length = len; size_t mbs_length = len; const char *mbs = p; wchar_t *wcs; #if HAVE_MBRTOWC mbstate_t shift_state; memset(&shift_state, 0, sizeof(shift_state)); #endif /* * As we decided to have wcs_length == mbs_length == len * we can use len here instead of wcs_length */ if (NULL == archive_wstring_ensure(dest, dest->length + len + 1)) return (-1); wcs = dest->s + dest->length; /* * We cannot use mbsrtowcs/mbstowcs here because those may convert * extra MBS when strlen(p) > len and one wide character consists of * multi bytes. */ while (*mbs && mbs_length > 0) { /* * The buffer we allocated is always big enough. * Keep this code path in a comment if we decide to choose * smaller wcs_length in the future */ /* if (wcs_length == 0) { dest->length = wcs - dest->s; dest->s[dest->length] = L'\0'; wcs_length = mbs_length; if (NULL == archive_wstring_ensure(dest, dest->length + wcs_length + 1)) return (-1); wcs = dest->s + dest->length; } */ #if HAVE_MBRTOWC r = mbrtowc(wcs, mbs, mbs_length, &shift_state); #else r = mbtowc(wcs, mbs, mbs_length); #endif if (r == (size_t)-1 || r == (size_t)-2) { ret_val = -1; break; } if (r == 0 || r > mbs_length) break; wcs++; // wcs_length--; mbs += r; mbs_length -= r; } dest->length = wcs - dest->s; dest->s[dest->length] = L'\0'; return (ret_val); } #endif #if defined(_WIN32) && !defined(__CYGWIN__) /* * WCS ==> MBS. * Note: returns -1 if conversion fails. * * Win32 builds use WideCharToMultiByte from the Windows API. * (Maybe Cygwin should too? WideCharToMultiByte will know a * lot more about local character encodings than the wcrtomb() * wrapper is going to know.) */ int archive_string_append_from_wcs(struct archive_string *as, const wchar_t *w, size_t len) { return archive_string_append_from_wcs_in_codepage(as, w, len, NULL); } static int archive_string_append_from_wcs_in_codepage(struct archive_string *as, const wchar_t *ws, size_t len, struct archive_string_conv *sc) { BOOL defchar_used, *dp; int count, ret = 0; UINT to_cp; int wslen = (int)len; if (sc != NULL) to_cp = sc->to_cp; else to_cp = get_current_codepage(); if (to_cp == CP_C_LOCALE) { /* * "C" locale special processing. */ const wchar_t *wp = ws; char *p; if (NULL == archive_string_ensure(as, as->length + wslen +1)) return (-1); p = as->s + as->length; count = 0; defchar_used = 0; while (count < wslen && *wp) { if (*wp > 255) { *p++ = '?'; wp++; defchar_used = 1; } else *p++ = (char)*wp++; count++; } } else if (sc != NULL && (sc->flag & SCONV_TO_UTF16)) { uint16_t *u16; if (NULL == archive_string_ensure(as, as->length + len * 2 + 2)) return (-1); u16 = (uint16_t *)(as->s + as->length); count = 0; defchar_used = 0; if (sc->flag & SCONV_TO_UTF16BE) { while (count < (int)len && *ws) { archive_be16enc(u16+count, *ws); ws++; count++; } } else { while (count < (int)len && *ws) { archive_le16enc(u16+count, *ws); ws++; count++; } } count <<= 1; /* to be byte size */ } else { /* Make sure the MBS buffer has plenty to set. */ if (NULL == archive_string_ensure(as, as->length + len * 2 + 1)) return (-1); do { defchar_used = 0; if (to_cp == CP_UTF8 || sc == NULL) dp = NULL; else dp = &defchar_used; count = WideCharToMultiByte(to_cp, 0, ws, wslen, as->s + as->length, (int)as->buffer_length-1, NULL, dp); if (count == 0 && GetLastError() == ERROR_INSUFFICIENT_BUFFER) { /* Expand the MBS buffer and retry. */ if (NULL == archive_string_ensure(as, as->buffer_length + len)) return (-1); continue; } if (count == 0) ret = -1; break; } while (1); } as->length += count; as->s[as->length] = '\0'; return (defchar_used?-1:ret); } #elif defined(HAVE_WCTOMB) || defined(HAVE_WCRTOMB) /* * Translates a wide character string into current locale character set * and appends to the archive_string. Note: returns -1 if conversion * fails. */ int archive_string_append_from_wcs(struct archive_string *as, const wchar_t *w, size_t len) { /* We cannot use the standard wcstombs() here because it * cannot tell us how big the output buffer should be. So * I've built a loop around wcrtomb() or wctomb() that * converts a character at a time and resizes the string as * needed. We prefer wcrtomb() when it's available because * it's thread-safe. */ int n, ret_val = 0; char *p; char *end; #if HAVE_WCRTOMB mbstate_t shift_state; memset(&shift_state, 0, sizeof(shift_state)); #else /* Clear the shift state before starting. */ wctomb(NULL, L'\0'); #endif /* * Allocate buffer for MBS. * We need this allocation here since it is possible that * as->s is still NULL. */ if (archive_string_ensure(as, as->length + len + 1) == NULL) return (-1); p = as->s + as->length; end = as->s + as->buffer_length - MB_CUR_MAX -1; while (*w != L'\0' && len > 0) { if (p >= end) { as->length = p - as->s; as->s[as->length] = '\0'; /* Re-allocate buffer for MBS. */ if (archive_string_ensure(as, as->length + max(len * 2, (size_t)MB_CUR_MAX) + 1) == NULL) return (-1); p = as->s + as->length; end = as->s + as->buffer_length - MB_CUR_MAX -1; } #if HAVE_WCRTOMB n = wcrtomb(p, *w++, &shift_state); #else n = wctomb(p, *w++); #endif if (n == -1) { if (errno == EILSEQ) { /* Skip an illegal wide char. */ *p++ = '?'; ret_val = -1; } else { ret_val = -1; break; } } else p += n; len--; } as->length = p - as->s; as->s[as->length] = '\0'; return (ret_val); } #else /* HAVE_WCTOMB || HAVE_WCRTOMB */ /* * TODO: Test if __STDC_ISO_10646__ is defined. * Non-Windows uses ISO C wcrtomb() or wctomb() to perform the conversion * one character at a time. If a non-Windows platform doesn't have * either of these, fall back to the built-in UTF8 conversion. */ int archive_string_append_from_wcs(struct archive_string *as, const wchar_t *w, size_t len) { (void)as;/* UNUSED */ (void)w;/* UNUSED */ (void)len;/* UNUSED */ errno = ENOSYS; return (-1); } #endif /* HAVE_WCTOMB || HAVE_WCRTOMB */ /* * Find a string conversion object by a pair of 'from' charset name * and 'to' charset name from an archive object. * Return NULL if not found. */ static struct archive_string_conv * find_sconv_object(struct archive *a, const char *fc, const char *tc) { struct archive_string_conv *sc; if (a == NULL) return (NULL); for (sc = a->sconv; sc != NULL; sc = sc->next) { if (strcmp(sc->from_charset, fc) == 0 && strcmp(sc->to_charset, tc) == 0) break; } return (sc); } /* * Register a string object to an archive object. */ static void add_sconv_object(struct archive *a, struct archive_string_conv *sc) { struct archive_string_conv **psc; /* Add a new sconv to sconv list. */ psc = &(a->sconv); while (*psc != NULL) psc = &((*psc)->next); *psc = sc; } static void add_converter(struct archive_string_conv *sc, int (*converter) (struct archive_string *, const void *, size_t, struct archive_string_conv *)) { if (sc == NULL || sc->nconverter >= 2) __archive_errx(1, "Programming error"); sc->converter[sc->nconverter++] = converter; } static void setup_converter(struct archive_string_conv *sc) { /* Reset. */ sc->nconverter = 0; /* * Perform special sequence for the incorrect UTF-8 filenames * made by libarchive2.x. */ if (sc->flag & SCONV_UTF8_LIBARCHIVE_2) { add_converter(sc, strncat_from_utf8_libarchive2); return; } /* * Convert a string to UTF-16BE/LE. */ if (sc->flag & SCONV_TO_UTF16) { /* * If the current locale is UTF-8, we can translate * a UTF-8 string into a UTF-16BE string. */ if (sc->flag & SCONV_FROM_UTF8) { add_converter(sc, archive_string_append_unicode); return; } #if defined(_WIN32) && !defined(__CYGWIN__) if (sc->flag & SCONV_WIN_CP) { if (sc->flag & SCONV_TO_UTF16BE) add_converter(sc, win_strncat_to_utf16be); else add_converter(sc, win_strncat_to_utf16le); return; } #endif #if defined(HAVE_ICONV) if (sc->cd != (iconv_t)-1) { add_converter(sc, iconv_strncat_in_locale); return; } #endif if (sc->flag & SCONV_BEST_EFFORT) { if (sc->flag & SCONV_TO_UTF16BE) add_converter(sc, best_effort_strncat_to_utf16be); else add_converter(sc, best_effort_strncat_to_utf16le); } else /* Make sure we have no converter. */ sc->nconverter = 0; return; } /* * Convert a string from UTF-16BE/LE. */ if (sc->flag & SCONV_FROM_UTF16) { /* * At least we should normalize a UTF-16BE string. */ if (sc->flag & SCONV_NORMALIZATION_D) add_converter(sc,archive_string_normalize_D); else if (sc->flag & SCONV_NORMALIZATION_C) add_converter(sc, archive_string_normalize_C); if (sc->flag & SCONV_TO_UTF8) { /* * If the current locale is UTF-8, we can translate * a UTF-16BE/LE string into a UTF-8 string directly. */ if (!(sc->flag & (SCONV_NORMALIZATION_D |SCONV_NORMALIZATION_C))) add_converter(sc, archive_string_append_unicode); return; } #if defined(_WIN32) && !defined(__CYGWIN__) if (sc->flag & SCONV_WIN_CP) { if (sc->flag & SCONV_FROM_UTF16BE) add_converter(sc, win_strncat_from_utf16be); else add_converter(sc, win_strncat_from_utf16le); return; } #endif #if defined(HAVE_ICONV) if (sc->cd != (iconv_t)-1) { add_converter(sc, iconv_strncat_in_locale); return; } #endif if ((sc->flag & (SCONV_BEST_EFFORT | SCONV_FROM_UTF16BE)) == (SCONV_BEST_EFFORT | SCONV_FROM_UTF16BE)) add_converter(sc, best_effort_strncat_from_utf16be); else if ((sc->flag & (SCONV_BEST_EFFORT | SCONV_FROM_UTF16LE)) == (SCONV_BEST_EFFORT | SCONV_FROM_UTF16LE)) add_converter(sc, best_effort_strncat_from_utf16le); else /* Make sure we have no converter. */ sc->nconverter = 0; return; } if (sc->flag & SCONV_FROM_UTF8) { /* * At least we should normalize a UTF-8 string. */ if (sc->flag & SCONV_NORMALIZATION_D) add_converter(sc,archive_string_normalize_D); else if (sc->flag & SCONV_NORMALIZATION_C) add_converter(sc, archive_string_normalize_C); /* * Copy UTF-8 string with a check of CESU-8. * Apparently, iconv does not check surrogate pairs in UTF-8 * when both from-charset and to-charset are UTF-8, and then * we use our UTF-8 copy code. */ if (sc->flag & SCONV_TO_UTF8) { /* * If the current locale is UTF-8, we can translate * a UTF-16BE string into a UTF-8 string directly. */ if (!(sc->flag & (SCONV_NORMALIZATION_D |SCONV_NORMALIZATION_C))) add_converter(sc, strncat_from_utf8_to_utf8); return; } } #if defined(_WIN32) && !defined(__CYGWIN__) /* * On Windows we can use Windows API for a string conversion. */ if (sc->flag & SCONV_WIN_CP) { add_converter(sc, strncat_in_codepage); return; } #endif #if HAVE_ICONV if (sc->cd != (iconv_t)-1) { add_converter(sc, iconv_strncat_in_locale); /* * iconv generally does not support UTF-8-MAC and so * we have to the output of iconv from NFC to NFD if * need. */ if ((sc->flag & SCONV_FROM_CHARSET) && (sc->flag & SCONV_TO_UTF8)) { if (sc->flag & SCONV_NORMALIZATION_D) add_converter(sc, archive_string_normalize_D); } return; } #endif /* * Try conversion in the best effort or no conversion. */ if ((sc->flag & SCONV_BEST_EFFORT) || sc->same) add_converter(sc, best_effort_strncat_in_locale); else /* Make sure we have no converter. */ sc->nconverter = 0; } /* * Return canonicalized charset-name but this supports just UTF-8, UTF-16BE * and CP932 which are referenced in create_sconv_object(). */ static const char * canonical_charset_name(const char *charset) { char cs[16]; char *p; const char *s; if (charset == NULL || charset[0] == '\0' || strlen(charset) > 15) return (charset); /* Copy name to uppercase. */ p = cs; s = charset; while (*s) { char c = *s++; if (c >= 'a' && c <= 'z') c -= 'a' - 'A'; *p++ = c; } *p++ = '\0'; if (strcmp(cs, "UTF-8") == 0 || strcmp(cs, "UTF8") == 0) return ("UTF-8"); if (strcmp(cs, "UTF-16BE") == 0 || strcmp(cs, "UTF16BE") == 0) return ("UTF-16BE"); if (strcmp(cs, "UTF-16LE") == 0 || strcmp(cs, "UTF16LE") == 0) return ("UTF-16LE"); if (strcmp(cs, "CP932") == 0) return ("CP932"); return (charset); } /* * Create a string conversion object. */ static struct archive_string_conv * create_sconv_object(const char *fc, const char *tc, unsigned current_codepage, int flag) { struct archive_string_conv *sc; sc = calloc(1, sizeof(*sc)); if (sc == NULL) return (NULL); sc->next = NULL; sc->from_charset = strdup(fc); if (sc->from_charset == NULL) { free(sc); return (NULL); } sc->to_charset = strdup(tc); if (sc->to_charset == NULL) { free(sc->from_charset); free(sc); return (NULL); } archive_string_init(&sc->utftmp); if (flag & SCONV_TO_CHARSET) { /* * Convert characters from the current locale charset to * a specified charset. */ sc->from_cp = current_codepage; sc->to_cp = make_codepage_from_charset(tc); #if defined(_WIN32) && !defined(__CYGWIN__) if (IsValidCodePage(sc->to_cp)) flag |= SCONV_WIN_CP; #endif } else if (flag & SCONV_FROM_CHARSET) { /* * Convert characters from a specified charset to * the current locale charset. */ sc->to_cp = current_codepage; sc->from_cp = make_codepage_from_charset(fc); #if defined(_WIN32) && !defined(__CYGWIN__) if (IsValidCodePage(sc->from_cp)) flag |= SCONV_WIN_CP; #endif } /* * Check if "from charset" and "to charset" are the same. */ if (strcmp(fc, tc) == 0 || (sc->from_cp != (unsigned)-1 && sc->from_cp == sc->to_cp)) sc->same = 1; else sc->same = 0; /* * Mark if "from charset" or "to charset" are UTF-8 or UTF-16BE/LE. */ if (strcmp(tc, "UTF-8") == 0) flag |= SCONV_TO_UTF8; else if (strcmp(tc, "UTF-16BE") == 0) flag |= SCONV_TO_UTF16BE; else if (strcmp(tc, "UTF-16LE") == 0) flag |= SCONV_TO_UTF16LE; if (strcmp(fc, "UTF-8") == 0) flag |= SCONV_FROM_UTF8; else if (strcmp(fc, "UTF-16BE") == 0) flag |= SCONV_FROM_UTF16BE; else if (strcmp(fc, "UTF-16LE") == 0) flag |= SCONV_FROM_UTF16LE; #if defined(_WIN32) && !defined(__CYGWIN__) if (sc->to_cp == CP_UTF8) flag |= SCONV_TO_UTF8; else if (sc->to_cp == CP_UTF16BE) flag |= SCONV_TO_UTF16BE | SCONV_WIN_CP; else if (sc->to_cp == CP_UTF16LE) flag |= SCONV_TO_UTF16LE | SCONV_WIN_CP; if (sc->from_cp == CP_UTF8) flag |= SCONV_FROM_UTF8; else if (sc->from_cp == CP_UTF16BE) flag |= SCONV_FROM_UTF16BE | SCONV_WIN_CP; else if (sc->from_cp == CP_UTF16LE) flag |= SCONV_FROM_UTF16LE | SCONV_WIN_CP; #endif /* * Set a flag for Unicode NFD. Usually iconv cannot correctly * handle it. So we have to translate NFD characters to NFC ones * ourselves before iconv handles. Another reason is to prevent * that the same sight of two filenames, one is NFC and other * is NFD, would be in its directory. * On Mac OS X, although its filesystem layer automatically * convert filenames to NFD, it would be useful for filename * comparing to find out the same filenames that we normalize * that to be NFD ourselves. */ if ((flag & SCONV_FROM_CHARSET) && (flag & (SCONV_FROM_UTF16 | SCONV_FROM_UTF8))) { #if defined(__APPLE__) if (flag & SCONV_TO_UTF8) flag |= SCONV_NORMALIZATION_D; else #endif flag |= SCONV_NORMALIZATION_C; } #if defined(__APPLE__) /* * In case writing an archive file, make sure that a filename * going to be passed to iconv is a Unicode NFC string since * a filename in HFS Plus filesystem is a Unicode NFD one and * iconv cannot handle it with "UTF-8" charset. It is simpler * than a use of "UTF-8-MAC" charset. */ if ((flag & SCONV_TO_CHARSET) && (flag & (SCONV_FROM_UTF16 | SCONV_FROM_UTF8)) && !(flag & (SCONV_TO_UTF16 | SCONV_TO_UTF8))) flag |= SCONV_NORMALIZATION_C; /* * In case reading an archive file. make sure that a filename * will be passed to users is a Unicode NFD string in order to * correctly compare the filename with other one which comes * from HFS Plus filesystem. */ if ((flag & SCONV_FROM_CHARSET) && !(flag & (SCONV_FROM_UTF16 | SCONV_FROM_UTF8)) && (flag & SCONV_TO_UTF8)) flag |= SCONV_NORMALIZATION_D; #endif #if defined(HAVE_ICONV) sc->cd_w = (iconv_t)-1; /* * Create an iconv object. */ if (((flag & (SCONV_TO_UTF8 | SCONV_TO_UTF16)) && (flag & (SCONV_FROM_UTF8 | SCONV_FROM_UTF16))) || (flag & SCONV_WIN_CP)) { /* This case we won't use iconv. */ sc->cd = (iconv_t)-1; } else { sc->cd = iconv_open(tc, fc); if (sc->cd == (iconv_t)-1 && (sc->flag & SCONV_BEST_EFFORT)) { /* * Unfortunately, all of iconv implements do support * "CP932" character-set, so we should use "SJIS" * instead if iconv_open failed. */ if (strcmp(tc, "CP932") == 0) sc->cd = iconv_open("SJIS", fc); else if (strcmp(fc, "CP932") == 0) sc->cd = iconv_open(tc, "SJIS"); } #if defined(_WIN32) && !defined(__CYGWIN__) /* * archive_mstring on Windows directly convert multi-bytes * into archive_wstring in order not to depend on locale * so that you can do a I18N programming. This will be * used only in archive_mstring_copy_mbs_len_l so far. */ if (flag & SCONV_FROM_CHARSET) { sc->cd_w = iconv_open("UTF-8", fc); if (sc->cd_w == (iconv_t)-1 && (sc->flag & SCONV_BEST_EFFORT)) { if (strcmp(fc, "CP932") == 0) sc->cd_w = iconv_open("UTF-8", "SJIS"); } } #endif /* _WIN32 && !__CYGWIN__ */ } #endif /* HAVE_ICONV */ sc->flag = flag; /* * Set up converters. */ setup_converter(sc); return (sc); } /* * Free a string conversion object. */ static void free_sconv_object(struct archive_string_conv *sc) { free(sc->from_charset); free(sc->to_charset); archive_string_free(&sc->utftmp); #if HAVE_ICONV if (sc->cd != (iconv_t)-1) iconv_close(sc->cd); if (sc->cd_w != (iconv_t)-1) iconv_close(sc->cd_w); #endif free(sc); } #if defined(_WIN32) && !defined(__CYGWIN__) static unsigned my_atoi(const char *p) { unsigned cp; cp = 0; while (*p) { if (*p >= '0' && *p <= '9') cp = cp * 10 + (*p - '0'); else return (-1); p++; } return (cp); } /* * Translate Charset name (as used by iconv) into CodePage (as used by Windows) * Return -1 if failed. * * Note: This translation code may be insufficient. */ static struct charset { const char *name; unsigned cp; } charsets[] = { /* MUST BE SORTED! */ {"ASCII", 1252}, {"ASMO-708", 708}, {"BIG5", 950}, {"CHINESE", 936}, {"CP367", 1252}, {"CP819", 1252}, {"CP1025", 21025}, {"DOS-720", 720}, {"DOS-862", 862}, {"EUC-CN", 51936}, {"EUC-JP", 51932}, {"EUC-KR", 949}, {"EUCCN", 51936}, {"EUCJP", 51932}, {"EUCKR", 949}, {"GB18030", 54936}, {"GB2312", 936}, {"HEBREW", 1255}, {"HZ-GB-2312", 52936}, {"IBM273", 20273}, {"IBM277", 20277}, {"IBM278", 20278}, {"IBM280", 20280}, {"IBM284", 20284}, {"IBM285", 20285}, {"IBM290", 20290}, {"IBM297", 20297}, {"IBM367", 1252}, {"IBM420", 20420}, {"IBM423", 20423}, {"IBM424", 20424}, {"IBM819", 1252}, {"IBM871", 20871}, {"IBM880", 20880}, {"IBM905", 20905}, {"IBM924", 20924}, {"ISO-8859-1", 28591}, {"ISO-8859-13", 28603}, {"ISO-8859-15", 28605}, {"ISO-8859-2", 28592}, {"ISO-8859-3", 28593}, {"ISO-8859-4", 28594}, {"ISO-8859-5", 28595}, {"ISO-8859-6", 28596}, {"ISO-8859-7", 28597}, {"ISO-8859-8", 28598}, {"ISO-8859-9", 28599}, {"ISO8859-1", 28591}, {"ISO8859-13", 28603}, {"ISO8859-15", 28605}, {"ISO8859-2", 28592}, {"ISO8859-3", 28593}, {"ISO8859-4", 28594}, {"ISO8859-5", 28595}, {"ISO8859-6", 28596}, {"ISO8859-7", 28597}, {"ISO8859-8", 28598}, {"ISO8859-9", 28599}, {"JOHAB", 1361}, {"KOI8-R", 20866}, {"KOI8-U", 21866}, {"KS_C_5601-1987", 949}, {"LATIN1", 1252}, {"LATIN2", 28592}, {"MACINTOSH", 10000}, {"SHIFT-JIS", 932}, {"SHIFT_JIS", 932}, {"SJIS", 932}, {"US", 1252}, {"US-ASCII", 1252}, {"UTF-16", 1200}, {"UTF-16BE", 1201}, {"UTF-16LE", 1200}, {"UTF-8", CP_UTF8}, {"X-EUROPA", 29001}, {"X-MAC-ARABIC", 10004}, {"X-MAC-CE", 10029}, {"X-MAC-CHINESEIMP", 10008}, {"X-MAC-CHINESETRAD", 10002}, {"X-MAC-CROATIAN", 10082}, {"X-MAC-CYRILLIC", 10007}, {"X-MAC-GREEK", 10006}, {"X-MAC-HEBREW", 10005}, {"X-MAC-ICELANDIC", 10079}, {"X-MAC-JAPANESE", 10001}, {"X-MAC-KOREAN", 10003}, {"X-MAC-ROMANIAN", 10010}, {"X-MAC-THAI", 10021}, {"X-MAC-TURKISH", 10081}, {"X-MAC-UKRAINIAN", 10017}, }; static unsigned make_codepage_from_charset(const char *charset) { char cs[16]; char *p; unsigned cp; int a, b; if (charset == NULL || strlen(charset) > 15) return -1; /* Copy name to uppercase. */ p = cs; while (*charset) { char c = *charset++; if (c >= 'a' && c <= 'z') c -= 'a' - 'A'; *p++ = c; } *p++ = '\0'; cp = -1; /* Look it up in the table first, so that we can easily * override CP367, which we map to 1252 instead of 367. */ a = 0; b = sizeof(charsets)/sizeof(charsets[0]); while (b > a) { int c = (b + a) / 2; int r = strcmp(charsets[c].name, cs); if (r < 0) a = c + 1; else if (r > 0) b = c; else return charsets[c].cp; } /* If it's not in the table, try to parse it. */ switch (*cs) { case 'C': if (cs[1] == 'P' && cs[2] >= '0' && cs[2] <= '9') { cp = my_atoi(cs + 2); } else if (strcmp(cs, "CP_ACP") == 0) cp = get_current_codepage(); else if (strcmp(cs, "CP_OEMCP") == 0) cp = get_current_oemcp(); break; case 'I': if (cs[1] == 'B' && cs[2] == 'M' && cs[3] >= '0' && cs[3] <= '9') { cp = my_atoi(cs + 3); } break; case 'W': if (strncmp(cs, "WINDOWS-", 8) == 0) { cp = my_atoi(cs + 8); if (cp != 874 && (cp < 1250 || cp > 1258)) cp = -1;/* This may invalid code. */ } break; } return (cp); } /* * Return ANSI Code Page of current locale set by setlocale(). */ static unsigned get_current_codepage(void) { char *locale, *p; unsigned cp; locale = setlocale(LC_CTYPE, NULL); if (locale == NULL) return (GetACP()); if (locale[0] == 'C' && locale[1] == '\0') return (CP_C_LOCALE); p = strrchr(locale, '.'); if (p == NULL) return (GetACP()); if (strcmp(p+1, "utf8") == 0) return CP_UTF8; cp = my_atoi(p+1); if ((int)cp <= 0) return (GetACP()); return (cp); } /* * Translation table between Locale Name and ACP/OEMCP. */ static struct { unsigned acp; unsigned ocp; const char *locale; } acp_ocp_map[] = { { 950, 950, "Chinese_Taiwan" }, { 936, 936, "Chinese_People's Republic of China" }, { 950, 950, "Chinese_Taiwan" }, { 1250, 852, "Czech_Czech Republic" }, { 1252, 850, "Danish_Denmark" }, { 1252, 850, "Dutch_Netherlands" }, { 1252, 850, "Dutch_Belgium" }, { 1252, 437, "English_United States" }, { 1252, 850, "English_Australia" }, { 1252, 850, "English_Canada" }, { 1252, 850, "English_New Zealand" }, { 1252, 850, "English_United Kingdom" }, { 1252, 437, "English_United States" }, { 1252, 850, "Finnish_Finland" }, { 1252, 850, "French_France" }, { 1252, 850, "French_Belgium" }, { 1252, 850, "French_Canada" }, { 1252, 850, "French_Switzerland" }, { 1252, 850, "German_Germany" }, { 1252, 850, "German_Austria" }, { 1252, 850, "German_Switzerland" }, { 1253, 737, "Greek_Greece" }, { 1250, 852, "Hungarian_Hungary" }, { 1252, 850, "Icelandic_Iceland" }, { 1252, 850, "Italian_Italy" }, { 1252, 850, "Italian_Switzerland" }, { 932, 932, "Japanese_Japan" }, { 949, 949, "Korean_Korea" }, { 1252, 850, "Norwegian (BokmOl)_Norway" }, { 1252, 850, "Norwegian (BokmOl)_Norway" }, { 1252, 850, "Norwegian-Nynorsk_Norway" }, { 1250, 852, "Polish_Poland" }, { 1252, 850, "Portuguese_Portugal" }, { 1252, 850, "Portuguese_Brazil" }, { 1251, 866, "Russian_Russia" }, { 1250, 852, "Slovak_Slovakia" }, { 1252, 850, "Spanish_Spain" }, { 1252, 850, "Spanish_Mexico" }, { 1252, 850, "Spanish_Spain" }, { 1252, 850, "Swedish_Sweden" }, { 1254, 857, "Turkish_Turkey" }, { 0, 0, NULL} }; /* * Return OEM Code Page of current locale set by setlocale(). */ static unsigned get_current_oemcp(void) { int i; char *locale, *p; size_t len; locale = setlocale(LC_CTYPE, NULL); if (locale == NULL) return (GetOEMCP()); if (locale[0] == 'C' && locale[1] == '\0') return (CP_C_LOCALE); p = strrchr(locale, '.'); if (p == NULL) return (GetOEMCP()); len = p - locale; for (i = 0; acp_ocp_map[i].acp; i++) { if (strncmp(acp_ocp_map[i].locale, locale, len) == 0) return (acp_ocp_map[i].ocp); } return (GetOEMCP()); } #else /* * POSIX platform does not use CodePage. */ static unsigned get_current_codepage(void) { return (-1);/* Unknown */ } static unsigned make_codepage_from_charset(const char *charset) { (void)charset; /* UNUSED */ return (-1);/* Unknown */ } static unsigned get_current_oemcp(void) { return (-1);/* Unknown */ } #endif /* defined(_WIN32) && !defined(__CYGWIN__) */ /* * Return a string conversion object. */ static struct archive_string_conv * get_sconv_object(struct archive *a, const char *fc, const char *tc, int flag) { struct archive_string_conv *sc; unsigned current_codepage; /* Check if we have made the sconv object. */ sc = find_sconv_object(a, fc, tc); if (sc != NULL) return (sc); if (a == NULL) current_codepage = get_current_codepage(); else current_codepage = a->current_codepage; sc = create_sconv_object(canonical_charset_name(fc), canonical_charset_name(tc), current_codepage, flag); if (sc == NULL) { if (a != NULL) archive_set_error(a, ENOMEM, "Could not allocate memory for " "a string conversion object"); return (NULL); } /* * If there is no converter for current string conversion object, * we cannot handle this conversion. */ if (sc->nconverter == 0) { if (a != NULL) { #if HAVE_ICONV archive_set_error(a, ARCHIVE_ERRNO_MISC, "iconv_open failed : Cannot handle ``%s''", (flag & SCONV_TO_CHARSET)?tc:fc); #else archive_set_error(a, ARCHIVE_ERRNO_MISC, "A character-set conversion not fully supported " "on this platform"); #endif } /* Failed; free a sconv object. */ free_sconv_object(sc); return (NULL); } /* * Success! */ if (a != NULL) add_sconv_object(a, sc); return (sc); } static const char * get_current_charset(struct archive *a) { const char *cur_charset; if (a == NULL) cur_charset = default_iconv_charset(""); else { cur_charset = default_iconv_charset(a->current_code); if (a->current_code == NULL) { a->current_code = strdup(cur_charset); a->current_codepage = get_current_codepage(); a->current_oemcp = get_current_oemcp(); } } return (cur_charset); } /* * Make and Return a string conversion object. * Return NULL if the platform does not support the specified conversion * and best_effort is 0. * If best_effort is set, A string conversion object must be returned * unless memory allocation for the object fails, but the conversion * might fail when non-ASCII code is found. */ struct archive_string_conv * archive_string_conversion_to_charset(struct archive *a, const char *charset, int best_effort) { int flag = SCONV_TO_CHARSET; if (best_effort) flag |= SCONV_BEST_EFFORT; return (get_sconv_object(a, get_current_charset(a), charset, flag)); } struct archive_string_conv * archive_string_conversion_from_charset(struct archive *a, const char *charset, int best_effort) { int flag = SCONV_FROM_CHARSET; if (best_effort) flag |= SCONV_BEST_EFFORT; return (get_sconv_object(a, charset, get_current_charset(a), flag)); } /* * archive_string_default_conversion_*_archive() are provided for Windows * platform because other archiver application use CP_OEMCP for * MultiByteToWideChar() and WideCharToMultiByte() for the filenames * in tar or zip files. But mbstowcs/wcstombs(CRT) usually use CP_ACP * unless you use setlocale(LC_ALL, ".OCP")(specify CP_OEMCP). * So we should make a string conversion between CP_ACP and CP_OEMCP * for compatibility. */ #if defined(_WIN32) && !defined(__CYGWIN__) struct archive_string_conv * archive_string_default_conversion_for_read(struct archive *a) { const char *cur_charset = get_current_charset(a); char oemcp[16]; /* NOTE: a check of cur_charset is unneeded but we need * that get_current_charset() has been surely called at * this time whatever C compiler optimized. */ if (cur_charset != NULL && (a->current_codepage == CP_C_LOCALE || a->current_codepage == a->current_oemcp)) return (NULL);/* no conversion. */ _snprintf(oemcp, sizeof(oemcp)-1, "CP%d", a->current_oemcp); /* Make sure a null termination must be set. */ oemcp[sizeof(oemcp)-1] = '\0'; return (get_sconv_object(a, oemcp, cur_charset, SCONV_FROM_CHARSET)); } struct archive_string_conv * archive_string_default_conversion_for_write(struct archive *a) { const char *cur_charset = get_current_charset(a); char oemcp[16]; /* NOTE: a check of cur_charset is unneeded but we need * that get_current_charset() has been surely called at * this time whatever C compiler optimized. */ if (cur_charset != NULL && (a->current_codepage == CP_C_LOCALE || a->current_codepage == a->current_oemcp)) return (NULL);/* no conversion. */ _snprintf(oemcp, sizeof(oemcp)-1, "CP%d", a->current_oemcp); /* Make sure a null termination must be set. */ oemcp[sizeof(oemcp)-1] = '\0'; return (get_sconv_object(a, cur_charset, oemcp, SCONV_TO_CHARSET)); } #else struct archive_string_conv * archive_string_default_conversion_for_read(struct archive *a) { (void)a; /* UNUSED */ return (NULL); } struct archive_string_conv * archive_string_default_conversion_for_write(struct archive *a) { (void)a; /* UNUSED */ return (NULL); } #endif /* * Dispose of all character conversion objects in the archive object. */ void archive_string_conversion_free(struct archive *a) { struct archive_string_conv *sc; struct archive_string_conv *sc_next; for (sc = a->sconv; sc != NULL; sc = sc_next) { sc_next = sc->next; free_sconv_object(sc); } a->sconv = NULL; free(a->current_code); a->current_code = NULL; } /* * Return a conversion charset name. */ const char * archive_string_conversion_charset_name(struct archive_string_conv *sc) { if (sc->flag & SCONV_TO_CHARSET) return (sc->to_charset); else return (sc->from_charset); } /* * Change the behavior of a string conversion. */ void archive_string_conversion_set_opt(struct archive_string_conv *sc, int opt) { switch (opt) { /* * A filename in UTF-8 was made with libarchive 2.x in a wrong * assumption that wchar_t was Unicode. * This option enables simulating the assumption in order to read * that filename correctly. */ case SCONV_SET_OPT_UTF8_LIBARCHIVE2X: #if (defined(_WIN32) && !defined(__CYGWIN__)) \ || defined(__STDC_ISO_10646__) || defined(__APPLE__) /* * Nothing to do for it since wchar_t on these platforms * is really Unicode. */ (void)sc; /* UNUSED */ #else if ((sc->flag & SCONV_UTF8_LIBARCHIVE_2) == 0) { sc->flag |= SCONV_UTF8_LIBARCHIVE_2; /* Set up string converters. */ setup_converter(sc); } #endif break; case SCONV_SET_OPT_NORMALIZATION_C: if ((sc->flag & SCONV_NORMALIZATION_C) == 0) { sc->flag |= SCONV_NORMALIZATION_C; sc->flag &= ~SCONV_NORMALIZATION_D; /* Set up string converters. */ setup_converter(sc); } break; case SCONV_SET_OPT_NORMALIZATION_D: #if defined(HAVE_ICONV) /* * If iconv will take the string, do not change the * setting of the normalization. */ if (!(sc->flag & SCONV_WIN_CP) && (sc->flag & (SCONV_FROM_UTF16 | SCONV_FROM_UTF8)) && !(sc->flag & (SCONV_TO_UTF16 | SCONV_TO_UTF8))) break; #endif if ((sc->flag & SCONV_NORMALIZATION_D) == 0) { sc->flag |= SCONV_NORMALIZATION_D; sc->flag &= ~SCONV_NORMALIZATION_C; /* Set up string converters. */ setup_converter(sc); } break; default: break; } } /* * * Copy one archive_string to another in locale conversion. * * archive_strncat_l(); * archive_strncpy_l(); * */ static size_t mbsnbytes(const void *_p, size_t n) { size_t s; const char *p, *pp; if (_p == NULL) return (0); p = (const char *)_p; /* Like strlen(p), except won't examine positions beyond p[n]. */ s = 0; pp = p; while (s < n && *pp) { pp++; s++; } return (s); } static size_t utf16nbytes(const void *_p, size_t n) { size_t s; const char *p, *pp; if (_p == NULL) return (0); p = (const char *)_p; /* Like strlen(p), except won't examine positions beyond p[n]. */ s = 0; pp = p; n >>= 1; while (s < n && (pp[0] || pp[1])) { pp += 2; s++; } return (s<<1); } int archive_strncpy_l(struct archive_string *as, const void *_p, size_t n, struct archive_string_conv *sc) { as->length = 0; return (archive_strncat_l(as, _p, n, sc)); } int archive_strncat_l(struct archive_string *as, const void *_p, size_t n, struct archive_string_conv *sc) { const void *s; size_t length = 0; int i, r = 0, r2; if (_p != NULL && n > 0) { if (sc != NULL && (sc->flag & SCONV_FROM_UTF16)) length = utf16nbytes(_p, n); else length = mbsnbytes(_p, n); } /* We must allocate memory even if there is no data for conversion * or copy. This simulates archive_string_append behavior. */ if (length == 0) { int tn = 1; if (sc != NULL && (sc->flag & SCONV_TO_UTF16)) tn = 2; if (archive_string_ensure(as, as->length + tn) == NULL) return (-1); as->s[as->length] = 0; if (tn == 2) as->s[as->length+1] = 0; return (0); } /* * If sc is NULL, we just make a copy. */ if (sc == NULL) { if (archive_string_append(as, _p, length) == NULL) return (-1);/* No memory */ return (0); } s = _p; i = 0; if (sc->nconverter > 1) { sc->utftmp.length = 0; r2 = sc->converter[0](&(sc->utftmp), s, length, sc); if (r2 != 0 && errno == ENOMEM) return (r2); if (r > r2) r = r2; s = sc->utftmp.s; length = sc->utftmp.length; ++i; } r2 = sc->converter[i](as, s, length, sc); if (r > r2) r = r2; return (r); } #if HAVE_ICONV /* * Return -1 if conversion fails. */ static int iconv_strncat_in_locale(struct archive_string *as, const void *_p, size_t length, struct archive_string_conv *sc) { ICONV_CONST char *itp; size_t remaining; iconv_t cd; char *outp; size_t avail, bs; int return_value = 0; /* success */ int to_size, from_size; if (sc->flag & SCONV_TO_UTF16) to_size = 2; else to_size = 1; if (sc->flag & SCONV_FROM_UTF16) from_size = 2; else from_size = 1; if (archive_string_ensure(as, as->length + length*2+to_size) == NULL) return (-1); cd = sc->cd; itp = (char *)(uintptr_t)_p; remaining = length; outp = as->s + as->length; avail = as->buffer_length - as->length - to_size; while (remaining >= (size_t)from_size) { size_t result = iconv(cd, &itp, &remaining, &outp, &avail); if (result != (size_t)-1) break; /* Conversion completed. */ if (errno == EILSEQ || errno == EINVAL) { /* * If an output charset is UTF-8 or UTF-16BE/LE, * unknown character should be U+FFFD * (replacement character). */ if (sc->flag & (SCONV_TO_UTF8 | SCONV_TO_UTF16)) { size_t rbytes; if (sc->flag & SCONV_TO_UTF8) rbytes = sizeof(utf8_replacement_char); else rbytes = 2; if (avail < rbytes) { as->length = outp - as->s; bs = as->buffer_length + (remaining * to_size) + rbytes; if (NULL == archive_string_ensure(as, bs)) return (-1); outp = as->s + as->length; avail = as->buffer_length - as->length - to_size; } if (sc->flag & SCONV_TO_UTF8) memcpy(outp, utf8_replacement_char, sizeof(utf8_replacement_char)); else if (sc->flag & SCONV_TO_UTF16BE) archive_be16enc(outp, UNICODE_R_CHAR); else archive_le16enc(outp, UNICODE_R_CHAR); outp += rbytes; avail -= rbytes; } else { /* Skip the illegal input bytes. */ *outp++ = '?'; avail--; } itp += from_size; remaining -= from_size; return_value = -1; /* failure */ } else { /* E2BIG no output buffer, * Increase an output buffer. */ as->length = outp - as->s; bs = as->buffer_length + remaining * 2; if (NULL == archive_string_ensure(as, bs)) return (-1); outp = as->s + as->length; avail = as->buffer_length - as->length - to_size; } } as->length = outp - as->s; as->s[as->length] = 0; if (to_size == 2) as->s[as->length+1] = 0; return (return_value); } #endif /* HAVE_ICONV */ #if defined(_WIN32) && !defined(__CYGWIN__) /* * Translate a string from a some CodePage to an another CodePage by * Windows APIs, and copy the result. Return -1 if conversion fails. */ static int strncat_in_codepage(struct archive_string *as, const void *_p, size_t length, struct archive_string_conv *sc) { const char *s = (const char *)_p; struct archive_wstring aws; size_t l; int r, saved_flag; archive_string_init(&aws); saved_flag = sc->flag; sc->flag &= ~(SCONV_NORMALIZATION_D | SCONV_NORMALIZATION_C); r = archive_wstring_append_from_mbs_in_codepage(&aws, s, length, sc); sc->flag = saved_flag; if (r != 0) { archive_wstring_free(&aws); if (errno != ENOMEM) archive_string_append(as, s, length); return (-1); } l = as->length; r = archive_string_append_from_wcs_in_codepage( as, aws.s, aws.length, sc); if (r != 0 && errno != ENOMEM && l == as->length) archive_string_append(as, s, length); archive_wstring_free(&aws); return (r); } /* * Test whether MBS ==> WCS is okay. */ static int invalid_mbs(const void *_p, size_t n, struct archive_string_conv *sc) { const char *p = (const char *)_p; unsigned codepage; DWORD mbflag = MB_ERR_INVALID_CHARS; if (sc->flag & SCONV_FROM_CHARSET) codepage = sc->to_cp; else codepage = sc->from_cp; if (codepage == CP_C_LOCALE) return (0); if (codepage != CP_UTF8) mbflag |= MB_PRECOMPOSED; if (MultiByteToWideChar(codepage, mbflag, p, (int)n, NULL, 0) == 0) return (-1); /* Invalid */ return (0); /* Okay */ } #else /* * Test whether MBS ==> WCS is okay. */ static int invalid_mbs(const void *_p, size_t n, struct archive_string_conv *sc) { const char *p = (const char *)_p; size_t r; #if HAVE_MBRTOWC mbstate_t shift_state; memset(&shift_state, 0, sizeof(shift_state)); #else /* Clear the shift state before starting. */ mbtowc(NULL, NULL, 0); #endif while (n) { wchar_t wc; #if HAVE_MBRTOWC r = mbrtowc(&wc, p, n, &shift_state); #else r = mbtowc(&wc, p, n); #endif if (r == (size_t)-1 || r == (size_t)-2) return (-1);/* Invalid. */ if (r == 0) break; p += r; n -= r; } (void)sc; /* UNUSED */ return (0); /* All Okey. */ } #endif /* defined(_WIN32) && !defined(__CYGWIN__) */ /* * Basically returns -1 because we cannot make a conversion of charset * without iconv but in some cases this would return 0. * Returns 0 if all copied characters are ASCII. * Returns 0 if both from-locale and to-locale are the same and those * can be WCS with no error. */ static int best_effort_strncat_in_locale(struct archive_string *as, const void *_p, size_t length, struct archive_string_conv *sc) { size_t remaining; const uint8_t *itp; int return_value = 0; /* success */ /* * If both from-locale and to-locale is the same, this makes a copy. * And then this checks all copied MBS can be WCS if so returns 0. */ if (sc->same) { if (archive_string_append(as, _p, length) == NULL) return (-1);/* No memory */ return (invalid_mbs(_p, length, sc)); } /* * If a character is ASCII, this just copies it. If not, this * assigns '?' character instead but in UTF-8 locale this assigns * byte sequence 0xEF 0xBD 0xBD, which are code point U+FFFD, * a Replacement Character in Unicode. */ remaining = length; itp = (const uint8_t *)_p; while (*itp && remaining > 0) { if (*itp > 127) { // Non-ASCII: Substitute with suitable replacement if (sc->flag & SCONV_TO_UTF8) { if (archive_string_append(as, utf8_replacement_char, sizeof(utf8_replacement_char)) == NULL) { __archive_errx(1, "Out of memory"); } } else { archive_strappend_char(as, '?'); } return_value = -1; } else { archive_strappend_char(as, *itp); } ++itp; } return (return_value); } /* * Unicode conversion functions. * - UTF-8 <===> UTF-8 in removing surrogate pairs. * - UTF-8 NFD ===> UTF-8 NFC in removing surrogate pairs. * - UTF-8 made by libarchive 2.x ===> UTF-8. * - UTF-16BE <===> UTF-8. * */ /* * Utility to convert a single UTF-8 sequence. * * Usually return used bytes, return used byte in negative value when * a unicode character is replaced with U+FFFD. * See also http://unicode.org/review/pr-121.html Public Review Issue #121 * Recommended Practice for Replacement Characters. */ static int _utf8_to_unicode(uint32_t *pwc, const char *s, size_t n) { static const char utf8_count[256] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 00 - 0F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 10 - 1F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 20 - 2F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 30 - 3F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 40 - 4F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 50 - 5F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 60 - 6F */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,/* 70 - 7F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,/* 80 - 8F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,/* 90 - 9F */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,/* A0 - AF */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,/* B0 - BF */ 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,/* C0 - CF */ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,/* D0 - DF */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,/* E0 - EF */ 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F0 - FF */ }; int ch, i; int cnt; uint32_t wc; /* Sanity check. */ if (n == 0) return (0); /* * Decode 1-4 bytes depending on the value of the first byte. */ ch = (unsigned char)*s; if (ch == 0) return (0); /* Standard: return 0 for end-of-string. */ cnt = utf8_count[ch]; /* Invalid sequence or there are not plenty bytes. */ if ((int)n < cnt) { cnt = (int)n; for (i = 1; i < cnt; i++) { if ((s[i] & 0xc0) != 0x80) { cnt = i; break; } } goto invalid_sequence; } /* Make a Unicode code point from a single UTF-8 sequence. */ switch (cnt) { case 1: /* 1 byte sequence. */ *pwc = ch & 0x7f; return (cnt); case 2: /* 2 bytes sequence. */ if ((s[1] & 0xc0) != 0x80) { cnt = 1; goto invalid_sequence; } *pwc = ((ch & 0x1f) << 6) | (s[1] & 0x3f); return (cnt); case 3: /* 3 bytes sequence. */ if ((s[1] & 0xc0) != 0x80) { cnt = 1; goto invalid_sequence; } if ((s[2] & 0xc0) != 0x80) { cnt = 2; goto invalid_sequence; } wc = ((ch & 0x0f) << 12) | ((s[1] & 0x3f) << 6) | (s[2] & 0x3f); if (wc < 0x800) goto invalid_sequence;/* Overlong sequence. */ break; case 4: /* 4 bytes sequence. */ if ((s[1] & 0xc0) != 0x80) { cnt = 1; goto invalid_sequence; } if ((s[2] & 0xc0) != 0x80) { cnt = 2; goto invalid_sequence; } if ((s[3] & 0xc0) != 0x80) { cnt = 3; goto invalid_sequence; } wc = ((ch & 0x07) << 18) | ((s[1] & 0x3f) << 12) | ((s[2] & 0x3f) << 6) | (s[3] & 0x3f); if (wc < 0x10000) goto invalid_sequence;/* Overlong sequence. */ break; default: /* Others are all invalid sequence. */ if (ch == 0xc0 || ch == 0xc1) cnt = 2; else if (ch >= 0xf5 && ch <= 0xf7) cnt = 4; else if (ch >= 0xf8 && ch <= 0xfb) cnt = 5; else if (ch == 0xfc || ch == 0xfd) cnt = 6; else cnt = 1; if ((int)n < cnt) cnt = (int)n; for (i = 1; i < cnt; i++) { if ((s[i] & 0xc0) != 0x80) { cnt = i; break; } } goto invalid_sequence; } /* The code point larger than 0x10FFFF is not legal * Unicode values. */ if (wc > UNICODE_MAX) goto invalid_sequence; /* Correctly gets a Unicode, returns used bytes. */ *pwc = wc; return (cnt); invalid_sequence: *pwc = UNICODE_R_CHAR;/* set the Replacement Character instead. */ return (cnt * -1); } static int utf8_to_unicode(uint32_t *pwc, const char *s, size_t n) { int cnt; cnt = _utf8_to_unicode(pwc, s, n); /* Any of Surrogate pair is not legal Unicode values. */ if (cnt == 3 && IS_SURROGATE_PAIR_LA(*pwc)) return (-3); return (cnt); } static inline uint32_t combine_surrogate_pair(uint32_t uc, uint32_t uc2) { uc -= 0xD800; uc *= 0x400; uc += uc2 - 0xDC00; uc += 0x10000; return (uc); } /* * Convert a single UTF-8/CESU-8 sequence to a Unicode code point in * removing surrogate pairs. * * CESU-8: The Compatibility Encoding Scheme for UTF-16. * * Usually return used bytes, return used byte in negative value when * a unicode character is replaced with U+FFFD. */ static int cesu8_to_unicode(uint32_t *pwc, const char *s, size_t n) { uint32_t wc = 0; int cnt; cnt = _utf8_to_unicode(&wc, s, n); if (cnt == 3 && IS_HIGH_SURROGATE_LA(wc)) { uint32_t wc2 = 0; if (n - 3 < 3) { /* Invalid byte sequence. */ goto invalid_sequence; } cnt = _utf8_to_unicode(&wc2, s+3, n-3); if (cnt != 3 || !IS_LOW_SURROGATE_LA(wc2)) { /* Invalid byte sequence. */ goto invalid_sequence; } wc = combine_surrogate_pair(wc, wc2); cnt = 6; } else if (cnt == 3 && IS_LOW_SURROGATE_LA(wc)) { /* Invalid byte sequence. */ goto invalid_sequence; } *pwc = wc; return (cnt); invalid_sequence: *pwc = UNICODE_R_CHAR;/* set the Replacement Character instead. */ if (cnt > 0) cnt *= -1; return (cnt); } /* * Convert a Unicode code point to a single UTF-8 sequence. * * NOTE:This function does not check if the Unicode is legal or not. * Please you definitely check it before calling this. */ static size_t unicode_to_utf8(char *p, size_t remaining, uint32_t uc) { char *_p = p; /* Invalid Unicode char maps to Replacement character */ if (uc > UNICODE_MAX) uc = UNICODE_R_CHAR; /* Translate code point to UTF8 */ if (uc <= 0x7f) { if (remaining == 0) return (0); *p++ = (char)uc; } else if (uc <= 0x7ff) { if (remaining < 2) return (0); *p++ = 0xc0 | ((uc >> 6) & 0x1f); *p++ = 0x80 | (uc & 0x3f); } else if (uc <= 0xffff) { if (remaining < 3) return (0); *p++ = 0xe0 | ((uc >> 12) & 0x0f); *p++ = 0x80 | ((uc >> 6) & 0x3f); *p++ = 0x80 | (uc & 0x3f); } else { if (remaining < 4) return (0); *p++ = 0xf0 | ((uc >> 18) & 0x07); *p++ = 0x80 | ((uc >> 12) & 0x3f); *p++ = 0x80 | ((uc >> 6) & 0x3f); *p++ = 0x80 | (uc & 0x3f); } return (p - _p); } static int utf16be_to_unicode(uint32_t *pwc, const char *s, size_t n) { return (utf16_to_unicode(pwc, s, n, 1)); } static int utf16le_to_unicode(uint32_t *pwc, const char *s, size_t n) { return (utf16_to_unicode(pwc, s, n, 0)); } static int utf16_to_unicode(uint32_t *pwc, const char *s, size_t n, int be) { const char *utf16 = s; unsigned uc; if (n == 0) return (0); if (n == 1) { /* set the Replacement Character instead. */ *pwc = UNICODE_R_CHAR; return (-1); } if (be) uc = archive_be16dec(utf16); else uc = archive_le16dec(utf16); utf16 += 2; /* If this is a surrogate pair, assemble the full code point.*/ if (IS_HIGH_SURROGATE_LA(uc)) { unsigned uc2; if (n >= 4) { if (be) uc2 = archive_be16dec(utf16); else uc2 = archive_le16dec(utf16); } else uc2 = 0; if (IS_LOW_SURROGATE_LA(uc2)) { uc = combine_surrogate_pair(uc, uc2); utf16 += 2; } else { /* Undescribed code point should be U+FFFD * (replacement character). */ *pwc = UNICODE_R_CHAR; return (-2); } } /* * Surrogate pair values(0xd800 through 0xdfff) are only * used by UTF-16, so, after above calculation, the code * must not be surrogate values, and Unicode has no codes * larger than 0x10ffff. Thus, those are not legal Unicode * values. */ if (IS_SURROGATE_PAIR_LA(uc) || uc > UNICODE_MAX) { /* Undescribed code point should be U+FFFD * (replacement character). */ *pwc = UNICODE_R_CHAR; return (((int)(utf16 - s)) * -1); } *pwc = uc; return ((int)(utf16 - s)); } static size_t unicode_to_utf16be(char *p, size_t remaining, uint32_t uc) { char *utf16 = p; if (uc > 0xffff) { /* We have a code point that won't fit into a * wchar_t; convert it to a surrogate pair. */ if (remaining < 4) return (0); uc -= 0x10000; archive_be16enc(utf16, ((uc >> 10) & 0x3ff) + 0xD800); archive_be16enc(utf16+2, (uc & 0x3ff) + 0xDC00); return (4); } else { if (remaining < 2) return (0); archive_be16enc(utf16, uc); return (2); } } static size_t unicode_to_utf16le(char *p, size_t remaining, uint32_t uc) { char *utf16 = p; if (uc > 0xffff) { /* We have a code point that won't fit into a * wchar_t; convert it to a surrogate pair. */ if (remaining < 4) return (0); uc -= 0x10000; archive_le16enc(utf16, ((uc >> 10) & 0x3ff) + 0xD800); archive_le16enc(utf16+2, (uc & 0x3ff) + 0xDC00); return (4); } else { if (remaining < 2) return (0); archive_le16enc(utf16, uc); return (2); } } /* * Copy UTF-8 string in checking surrogate pair. * If any surrogate pair are found, it would be canonicalized. */ static int strncat_from_utf8_to_utf8(struct archive_string *as, const void *_p, size_t len, struct archive_string_conv *sc) { const char *s; char *p, *endp; int n, ret = 0; (void)sc; /* UNUSED */ if (archive_string_ensure(as, as->length + len + 1) == NULL) return (-1); s = (const char *)_p; p = as->s + as->length; endp = as->s + as->buffer_length -1; do { uint32_t uc; const char *ss = s; size_t w; /* * Forward byte sequence until a conversion of that is needed. */ while ((n = utf8_to_unicode(&uc, s, len)) > 0) { s += n; len -= n; } if (ss < s) { if (p + (s - ss) > endp) { as->length = p - as->s; if (archive_string_ensure(as, as->buffer_length + len + 1) == NULL) return (-1); p = as->s + as->length; endp = as->s + as->buffer_length -1; } memcpy(p, ss, s - ss); p += s - ss; } /* * If n is negative, current byte sequence needs a replacement. */ if (n < 0) { if (n == -3 && IS_SURROGATE_PAIR_LA(uc)) { /* Current byte sequence may be CESU-8. */ n = cesu8_to_unicode(&uc, s, len); } if (n < 0) { ret = -1; n *= -1;/* Use a replaced unicode character. */ } /* Rebuild UTF-8 byte sequence. */ while ((w = unicode_to_utf8(p, endp - p, uc)) == 0) { as->length = p - as->s; if (archive_string_ensure(as, as->buffer_length + len + 1) == NULL) return (-1); p = as->s + as->length; endp = as->s + as->buffer_length -1; } p += w; s += n; len -= n; } } while (n > 0); as->length = p - as->s; as->s[as->length] = '\0'; return (ret); } static int archive_string_append_unicode(struct archive_string *as, const void *_p, size_t len, struct archive_string_conv *sc) { const char *s; char *p, *endp; uint32_t uc; size_t w; int n, ret = 0, ts, tm; int (*parse)(uint32_t *, const char *, size_t); size_t (*unparse)(char *, size_t, uint32_t); if (sc->flag & SCONV_TO_UTF16BE) { unparse = unicode_to_utf16be; ts = 2; } else if (sc->flag & SCONV_TO_UTF16LE) { unparse = unicode_to_utf16le; ts = 2; } else if (sc->flag & SCONV_TO_UTF8) { unparse = unicode_to_utf8; ts = 1; } else { /* * This case is going to be converted to another * character-set through iconv. */ if (sc->flag & SCONV_FROM_UTF16BE) { unparse = unicode_to_utf16be; ts = 2; } else if (sc->flag & SCONV_FROM_UTF16LE) { unparse = unicode_to_utf16le; ts = 2; } else { unparse = unicode_to_utf8; ts = 1; } } if (sc->flag & SCONV_FROM_UTF16BE) { parse = utf16be_to_unicode; tm = 1; } else if (sc->flag & SCONV_FROM_UTF16LE) { parse = utf16le_to_unicode; tm = 1; } else { parse = cesu8_to_unicode; tm = ts; } if (archive_string_ensure(as, as->length + len * tm + ts) == NULL) return (-1); s = (const char *)_p; p = as->s + as->length; endp = as->s + as->buffer_length - ts; while ((n = parse(&uc, s, len)) != 0) { if (n < 0) { /* Use a replaced unicode character. */ n *= -1; ret = -1; } s += n; len -= n; while ((w = unparse(p, endp - p, uc)) == 0) { /* There is not enough output buffer so * we have to expand it. */ as->length = p - as->s; if (archive_string_ensure(as, as->buffer_length + len * tm + ts) == NULL) return (-1); p = as->s + as->length; endp = as->s + as->buffer_length - ts; } p += w; } as->length = p - as->s; as->s[as->length] = '\0'; if (ts == 2) as->s[as->length+1] = '\0'; return (ret); } /* * Following Constants for Hangul compositions this information comes from * Unicode Standard Annex #15 http://unicode.org/reports/tr15/ */ #define HC_SBASE 0xAC00 #define HC_LBASE 0x1100 #define HC_VBASE 0x1161 #define HC_TBASE 0x11A7 #define HC_LCOUNT 19 #define HC_VCOUNT 21 #define HC_TCOUNT 28 #define HC_NCOUNT (HC_VCOUNT * HC_TCOUNT) #define HC_SCOUNT (HC_LCOUNT * HC_NCOUNT) static uint32_t get_nfc(uint32_t uc, uint32_t uc2) { int t, b; t = 0; b = sizeof(u_composition_table)/sizeof(u_composition_table[0]) -1; while (b >= t) { int m = (t + b) / 2; if (u_composition_table[m].cp1 < uc) t = m + 1; else if (u_composition_table[m].cp1 > uc) b = m - 1; else if (u_composition_table[m].cp2 < uc2) t = m + 1; else if (u_composition_table[m].cp2 > uc2) b = m - 1; else return (u_composition_table[m].nfc); } return (0); } #define FDC_MAX 10 /* The maximum number of Following Decomposable * Characters. */ /* * Update first code point. */ #define UPDATE_UC(new_uc) do { \ uc = new_uc; \ ucptr = NULL; \ } while (0) /* * Replace first code point with second code point. */ #define REPLACE_UC_WITH_UC2() do { \ uc = uc2; \ ucptr = uc2ptr; \ n = n2; \ } while (0) #define EXPAND_BUFFER() do { \ as->length = p - as->s; \ if (archive_string_ensure(as, \ as->buffer_length + len * tm + ts) == NULL)\ return (-1); \ p = as->s + as->length; \ endp = as->s + as->buffer_length - ts; \ } while (0) #define UNPARSE(p, endp, uc) do { \ while ((w = unparse(p, (endp) - (p), uc)) == 0) {\ EXPAND_BUFFER(); \ } \ p += w; \ } while (0) /* * Write first code point. * If the code point has not be changed from its original code, * this just copies it from its original buffer pointer. * If not, this converts it to UTF-8 byte sequence and copies it. */ #define WRITE_UC() do { \ if (ucptr) { \ if (p + n > endp) \ EXPAND_BUFFER(); \ switch (n) { \ case 4: \ *p++ = *ucptr++; \ /* FALL THROUGH */ \ case 3: \ *p++ = *ucptr++; \ /* FALL THROUGH */ \ case 2: \ *p++ = *ucptr++; \ /* FALL THROUGH */ \ case 1: \ *p++ = *ucptr; \ break; \ } \ ucptr = NULL; \ } else { \ UNPARSE(p, endp, uc); \ } \ } while (0) /* * Collect following decomposable code points. */ #define COLLECT_CPS(start) do { \ int _i; \ for (_i = start; _i < FDC_MAX ; _i++) { \ nx = parse(&ucx[_i], s, len); \ if (nx <= 0) \ break; \ cx = CCC(ucx[_i]); \ if (cl >= cx && cl != 228 && cx != 228)\ break; \ s += nx; \ len -= nx; \ cl = cx; \ ccx[_i] = cx; \ } \ if (_i >= FDC_MAX) { \ ret = -1; \ ucx_size = FDC_MAX; \ } else \ ucx_size = _i; \ } while (0) /* * Normalize UTF-8/UTF-16BE characters to Form C and copy the result. * * TODO: Convert composition exclusions, which are never converted * from NFC,NFD,NFKC and NFKD, to Form C. */ static int archive_string_normalize_C(struct archive_string *as, const void *_p, size_t len, struct archive_string_conv *sc) { const char *s = (const char *)_p; char *p, *endp; uint32_t uc, uc2; size_t w; int always_replace, n, n2, ret = 0, spair, ts, tm; int (*parse)(uint32_t *, const char *, size_t); size_t (*unparse)(char *, size_t, uint32_t); always_replace = 1; ts = 1;/* text size. */ if (sc->flag & SCONV_TO_UTF16BE) { unparse = unicode_to_utf16be; ts = 2; if (sc->flag & SCONV_FROM_UTF16BE) always_replace = 0; } else if (sc->flag & SCONV_TO_UTF16LE) { unparse = unicode_to_utf16le; ts = 2; if (sc->flag & SCONV_FROM_UTF16LE) always_replace = 0; } else if (sc->flag & SCONV_TO_UTF8) { unparse = unicode_to_utf8; if (sc->flag & SCONV_FROM_UTF8) always_replace = 0; } else { /* * This case is going to be converted to another * character-set through iconv. */ always_replace = 0; if (sc->flag & SCONV_FROM_UTF16BE) { unparse = unicode_to_utf16be; ts = 2; } else if (sc->flag & SCONV_FROM_UTF16LE) { unparse = unicode_to_utf16le; ts = 2; } else { unparse = unicode_to_utf8; } } if (sc->flag & SCONV_FROM_UTF16BE) { parse = utf16be_to_unicode; tm = 1; spair = 4;/* surrogate pair size in UTF-16. */ } else if (sc->flag & SCONV_FROM_UTF16LE) { parse = utf16le_to_unicode; tm = 1; spair = 4;/* surrogate pair size in UTF-16. */ } else { parse = cesu8_to_unicode; tm = ts; spair = 6;/* surrogate pair size in UTF-8. */ } if (archive_string_ensure(as, as->length + len * tm + ts) == NULL) return (-1); p = as->s + as->length; endp = as->s + as->buffer_length - ts; while ((n = parse(&uc, s, len)) != 0) { const char *ucptr, *uc2ptr; if (n < 0) { /* Use a replaced unicode character. */ UNPARSE(p, endp, uc); s += n*-1; len -= n*-1; ret = -1; continue; } else if (n == spair || always_replace) /* uc is converted from a surrogate pair. * this should be treated as a changed code. */ ucptr = NULL; else ucptr = s; s += n; len -= n; /* Read second code point. */ while ((n2 = parse(&uc2, s, len)) > 0) { uint32_t ucx[FDC_MAX]; int ccx[FDC_MAX]; int cl, cx, i, nx, ucx_size; int LIndex,SIndex; uint32_t nfc; if (n2 == spair || always_replace) /* uc2 is converted from a surrogate pair. * this should be treated as a changed code. */ uc2ptr = NULL; else uc2ptr = s; s += n2; len -= n2; /* * If current second code point is out of decomposable * code points, finding compositions is unneeded. */ if (!IS_DECOMPOSABLE_BLOCK(uc2)) { WRITE_UC(); REPLACE_UC_WITH_UC2(); continue; } /* * Try to combine current code points. */ /* * We have to combine Hangul characters according to * http://uniicode.org/reports/tr15/#Hangul */ if (0 <= (LIndex = uc - HC_LBASE) && LIndex < HC_LCOUNT) { /* * Hangul Composition. * 1. Two current code points are L and V. */ int VIndex = uc2 - HC_VBASE; if (0 <= VIndex && VIndex < HC_VCOUNT) { /* Make syllable of form LV. */ UPDATE_UC(HC_SBASE + (LIndex * HC_VCOUNT + VIndex) * HC_TCOUNT); } else { WRITE_UC(); REPLACE_UC_WITH_UC2(); } continue; } else if (0 <= (SIndex = uc - HC_SBASE) && SIndex < HC_SCOUNT && (SIndex % HC_TCOUNT) == 0) { /* * Hangul Composition. * 2. Two current code points are LV and T. */ int TIndex = uc2 - HC_TBASE; if (0 < TIndex && TIndex < HC_TCOUNT) { /* Make syllable of form LVT. */ UPDATE_UC(uc + TIndex); } else { WRITE_UC(); REPLACE_UC_WITH_UC2(); } continue; } else if ((nfc = get_nfc(uc, uc2)) != 0) { /* A composition to current code points * is found. */ UPDATE_UC(nfc); continue; } else if ((cl = CCC(uc2)) == 0) { /* Clearly 'uc2' the second code point is not * a decomposable code. */ WRITE_UC(); REPLACE_UC_WITH_UC2(); continue; } /* * Collect following decomposable code points. */ cx = 0; ucx[0] = uc2; ccx[0] = cl; COLLECT_CPS(1); /* * Find a composed code in the collected code points. */ i = 1; while (i < ucx_size) { int j; if ((nfc = get_nfc(uc, ucx[i])) == 0) { i++; continue; } /* * nfc is composed of uc and ucx[i]. */ UPDATE_UC(nfc); /* * Remove ucx[i] by shifting * following code points. */ for (j = i; j+1 < ucx_size; j++) { ucx[j] = ucx[j+1]; ccx[j] = ccx[j+1]; } ucx_size --; /* * Collect following code points blocked * by ucx[i] the removed code point. */ if (ucx_size > 0 && i == ucx_size && nx > 0 && cx == cl) { cl = ccx[ucx_size-1]; COLLECT_CPS(ucx_size); } /* * Restart finding a composed code with * the updated uc from the top of the * collected code points. */ i = 0; } /* * Apparently the current code points are not * decomposed characters or already composed. */ WRITE_UC(); for (i = 0; i < ucx_size; i++) UNPARSE(p, endp, ucx[i]); /* * Flush out remaining canonical combining characters. */ if (nx > 0 && cx == cl && len > 0) { while ((nx = parse(&ucx[0], s, len)) > 0) { cx = CCC(ucx[0]); if (cl > cx) break; s += nx; len -= nx; cl = cx; UNPARSE(p, endp, ucx[0]); } } break; } if (n2 < 0) { WRITE_UC(); /* Use a replaced unicode character. */ UNPARSE(p, endp, uc2); s += n2*-1; len -= n2*-1; ret = -1; continue; } else if (n2 == 0) { WRITE_UC(); break; } } as->length = p - as->s; as->s[as->length] = '\0'; if (ts == 2) as->s[as->length+1] = '\0'; return (ret); } static int get_nfd(uint32_t *cp1, uint32_t *cp2, uint32_t uc) { int t, b; /* * These are not converted to NFD on Mac OS. */ if ((uc >= 0x2000 && uc <= 0x2FFF) || (uc >= 0xF900 && uc <= 0xFAFF) || (uc >= 0x2F800 && uc <= 0x2FAFF)) return (0); /* * Those code points are not converted to NFD on Mac OS. * I do not know the reason because it is undocumented. * NFC NFD * 1109A ==> 11099 110BA * 1109C ==> 1109B 110BA * 110AB ==> 110A5 110BA */ if (uc == 0x1109A || uc == 0x1109C || uc == 0x110AB) return (0); t = 0; b = sizeof(u_decomposition_table)/sizeof(u_decomposition_table[0]) -1; while (b >= t) { int m = (t + b) / 2; if (u_decomposition_table[m].nfc < uc) t = m + 1; else if (u_decomposition_table[m].nfc > uc) b = m - 1; else { *cp1 = u_decomposition_table[m].cp1; *cp2 = u_decomposition_table[m].cp2; return (1); } } return (0); } #define REPLACE_UC_WITH(cp) do { \ uc = cp; \ ucptr = NULL; \ } while (0) /* * Normalize UTF-8 characters to Form D and copy the result. */ static int archive_string_normalize_D(struct archive_string *as, const void *_p, size_t len, struct archive_string_conv *sc) { const char *s = (const char *)_p; char *p, *endp; uint32_t uc, uc2; size_t w; int always_replace, n, n2, ret = 0, spair, ts, tm; int (*parse)(uint32_t *, const char *, size_t); size_t (*unparse)(char *, size_t, uint32_t); always_replace = 1; ts = 1;/* text size. */ if (sc->flag & SCONV_TO_UTF16BE) { unparse = unicode_to_utf16be; ts = 2; if (sc->flag & SCONV_FROM_UTF16BE) always_replace = 0; } else if (sc->flag & SCONV_TO_UTF16LE) { unparse = unicode_to_utf16le; ts = 2; if (sc->flag & SCONV_FROM_UTF16LE) always_replace = 0; } else if (sc->flag & SCONV_TO_UTF8) { unparse = unicode_to_utf8; if (sc->flag & SCONV_FROM_UTF8) always_replace = 0; } else { /* * This case is going to be converted to another * character-set through iconv. */ always_replace = 0; if (sc->flag & SCONV_FROM_UTF16BE) { unparse = unicode_to_utf16be; ts = 2; } else if (sc->flag & SCONV_FROM_UTF16LE) { unparse = unicode_to_utf16le; ts = 2; } else { unparse = unicode_to_utf8; } } if (sc->flag & SCONV_FROM_UTF16BE) { parse = utf16be_to_unicode; tm = 1; spair = 4;/* surrogate pair size in UTF-16. */ } else if (sc->flag & SCONV_FROM_UTF16LE) { parse = utf16le_to_unicode; tm = 1; spair = 4;/* surrogate pair size in UTF-16. */ } else { parse = cesu8_to_unicode; tm = ts; spair = 6;/* surrogate pair size in UTF-8. */ } if (archive_string_ensure(as, as->length + len * tm + ts) == NULL) return (-1); p = as->s + as->length; endp = as->s + as->buffer_length - ts; while ((n = parse(&uc, s, len)) != 0) { const char *ucptr; uint32_t cp1, cp2; int SIndex; struct { uint32_t uc; int ccc; } fdc[FDC_MAX]; int fdi, fdj; int ccc; check_first_code: if (n < 0) { /* Use a replaced unicode character. */ UNPARSE(p, endp, uc); s += n*-1; len -= n*-1; ret = -1; continue; } else if (n == spair || always_replace) /* uc is converted from a surrogate pair. * this should be treated as a changed code. */ ucptr = NULL; else ucptr = s; s += n; len -= n; /* Hangul Decomposition. */ if ((SIndex = uc - HC_SBASE) >= 0 && SIndex < HC_SCOUNT) { int L = HC_LBASE + SIndex / HC_NCOUNT; int V = HC_VBASE + (SIndex % HC_NCOUNT) / HC_TCOUNT; int T = HC_TBASE + SIndex % HC_TCOUNT; REPLACE_UC_WITH(L); WRITE_UC(); REPLACE_UC_WITH(V); WRITE_UC(); if (T != HC_TBASE) { REPLACE_UC_WITH(T); WRITE_UC(); } continue; } if (IS_DECOMPOSABLE_BLOCK(uc) && CCC(uc) != 0) { WRITE_UC(); continue; } fdi = 0; while (get_nfd(&cp1, &cp2, uc) && fdi < FDC_MAX) { int k; for (k = fdi; k > 0; k--) fdc[k] = fdc[k-1]; fdc[0].ccc = CCC(cp2); fdc[0].uc = cp2; fdi++; REPLACE_UC_WITH(cp1); } /* Read following code points. */ while ((n2 = parse(&uc2, s, len)) > 0 && (ccc = CCC(uc2)) != 0 && fdi < FDC_MAX) { int j, k; s += n2; len -= n2; for (j = 0; j < fdi; j++) { if (fdc[j].ccc > ccc) break; } if (j < fdi) { for (k = fdi; k > j; k--) fdc[k] = fdc[k-1]; fdc[j].ccc = ccc; fdc[j].uc = uc2; } else { fdc[fdi].ccc = ccc; fdc[fdi].uc = uc2; } fdi++; } WRITE_UC(); for (fdj = 0; fdj < fdi; fdj++) { REPLACE_UC_WITH(fdc[fdj].uc); WRITE_UC(); } if (n2 == 0) break; REPLACE_UC_WITH(uc2); n = n2; goto check_first_code; } as->length = p - as->s; as->s[as->length] = '\0'; if (ts == 2) as->s[as->length+1] = '\0'; return (ret); } /* * libarchive 2.x made incorrect UTF-8 strings in the wrong assumption * that WCS is Unicode. It is true for several platforms but some are false. * And then people who did not use UTF-8 locale on the non Unicode WCS * platform and made a tar file with libarchive(mostly bsdtar) 2.x. Those * now cannot get right filename from libarchive 3.x and later since we * fixed the wrong assumption and it is incompatible to older its versions. * So we provide special option, "compat-2x.x", for resolving it. * That option enable the string conversion of libarchive 2.x. * * Translates the wrong UTF-8 string made by libarchive 2.x into current * locale character set and appends to the archive_string. * Note: returns -1 if conversion fails. */ static int strncat_from_utf8_libarchive2(struct archive_string *as, const void *_p, size_t len, struct archive_string_conv *sc) { const char *s; int n; char *p; char *end; uint32_t unicode; #if HAVE_WCRTOMB mbstate_t shift_state; memset(&shift_state, 0, sizeof(shift_state)); #else /* Clear the shift state before starting. */ wctomb(NULL, L'\0'); #endif (void)sc; /* UNUSED */ /* * Allocate buffer for MBS. * We need this allocation here since it is possible that * as->s is still NULL. */ if (archive_string_ensure(as, as->length + len + 1) == NULL) return (-1); s = (const char *)_p; p = as->s + as->length; end = as->s + as->buffer_length - MB_CUR_MAX -1; while ((n = _utf8_to_unicode(&unicode, s, len)) != 0) { wchar_t wc; if (p >= end) { as->length = p - as->s; /* Re-allocate buffer for MBS. */ if (archive_string_ensure(as, as->length + max(len * 2, (size_t)MB_CUR_MAX) + 1) == NULL) return (-1); p = as->s + as->length; end = as->s + as->buffer_length - MB_CUR_MAX -1; } /* * As libarchive 2.x, translates the UTF-8 characters into * wide-characters in the assumption that WCS is Unicode. */ if (n < 0) { n *= -1; wc = L'?'; } else wc = (wchar_t)unicode; s += n; len -= n; /* * Translates the wide-character into the current locale MBS. */ #if HAVE_WCRTOMB n = (int)wcrtomb(p, wc, &shift_state); #else n = (int)wctomb(p, wc); #endif if (n == -1) return (-1); p += n; } as->length = p - as->s; as->s[as->length] = '\0'; return (0); } /* * Conversion functions between current locale dependent MBS and UTF-16BE. * strncat_from_utf16be() : UTF-16BE --> MBS * strncat_to_utf16be() : MBS --> UTF16BE */ #if defined(_WIN32) && !defined(__CYGWIN__) /* * Convert a UTF-16BE/LE string to current locale and copy the result. * Return -1 if conversion fails. */ static int win_strncat_from_utf16(struct archive_string *as, const void *_p, size_t bytes, struct archive_string_conv *sc, int be) { struct archive_string tmp; const char *u16; int ll; BOOL defchar; char *mbs; size_t mbs_size, b; int ret = 0; bytes &= ~1; if (archive_string_ensure(as, as->length + bytes +1) == NULL) return (-1); mbs = as->s + as->length; mbs_size = as->buffer_length - as->length -1; if (sc->to_cp == CP_C_LOCALE) { /* * "C" locale special process. */ u16 = _p; ll = 0; for (b = 0; b < bytes; b += 2) { uint16_t val; if (be) val = archive_be16dec(u16+b); else val = archive_le16dec(u16+b); if (val > 255) { *mbs++ = '?'; ret = -1; } else *mbs++ = (char)(val&0xff); ll++; } as->length += ll; as->s[as->length] = '\0'; return (ret); } archive_string_init(&tmp); if (be) { if (is_big_endian()) { u16 = _p; } else { if (archive_string_ensure(&tmp, bytes+2) == NULL) return (-1); memcpy(tmp.s, _p, bytes); for (b = 0; b < bytes; b += 2) { uint16_t val = archive_be16dec(tmp.s+b); archive_le16enc(tmp.s+b, val); } u16 = tmp.s; } } else { if (!is_big_endian()) { u16 = _p; } else { if (archive_string_ensure(&tmp, bytes+2) == NULL) return (-1); memcpy(tmp.s, _p, bytes); for (b = 0; b < bytes; b += 2) { uint16_t val = archive_le16dec(tmp.s+b); archive_be16enc(tmp.s+b, val); } u16 = tmp.s; } } do { defchar = 0; ll = WideCharToMultiByte(sc->to_cp, 0, (LPCWSTR)u16, (int)bytes>>1, mbs, (int)mbs_size, NULL, &defchar); /* Exit loop if we succeeded */ if (ll != 0 || GetLastError() != ERROR_INSUFFICIENT_BUFFER) { break; } /* Else expand buffer and loop to try again. */ ll = WideCharToMultiByte(sc->to_cp, 0, (LPCWSTR)u16, (int)bytes, NULL, 0, NULL, NULL); if (archive_string_ensure(as, ll +1) == NULL) return (-1); mbs = as->s + as->length; mbs_size = as->buffer_length - as->length -1; } while (1); archive_string_free(&tmp); as->length += ll; as->s[as->length] = '\0'; if (ll == 0 || defchar) ret = -1; return (ret); } static int win_strncat_from_utf16be(struct archive_string *as, const void *_p, size_t bytes, struct archive_string_conv *sc) { return (win_strncat_from_utf16(as, _p, bytes, sc, 1)); } static int win_strncat_from_utf16le(struct archive_string *as, const void *_p, size_t bytes, struct archive_string_conv *sc) { return (win_strncat_from_utf16(as, _p, bytes, sc, 0)); } static int is_big_endian(void) { uint16_t d = 1; return (archive_be16dec(&d) == 1); } /* * Convert a current locale string to UTF-16BE/LE and copy the result. * Return -1 if conversion fails. */ static int win_strncat_to_utf16(struct archive_string *as16, const void *_p, size_t length, struct archive_string_conv *sc, int bigendian) { const char *s = (const char *)_p; char *u16; size_t count, avail; if (archive_string_ensure(as16, as16->length + (length + 1) * 2) == NULL) return (-1); u16 = as16->s + as16->length; avail = as16->buffer_length - 2; if (sc->from_cp == CP_C_LOCALE) { /* * "C" locale special process. */ count = 0; while (count < length && *s) { if (bigendian) archive_be16enc(u16, *s); else archive_le16enc(u16, *s); u16 += 2; s++; count++; } as16->length += count << 1; as16->s[as16->length] = 0; as16->s[as16->length+1] = 0; return (0); } do { count = MultiByteToWideChar(sc->from_cp, MB_PRECOMPOSED, s, (int)length, (LPWSTR)u16, (int)avail>>1); /* Exit loop if we succeeded */ if (count != 0 || GetLastError() != ERROR_INSUFFICIENT_BUFFER) { break; } /* Expand buffer and try again */ count = MultiByteToWideChar(sc->from_cp, MB_PRECOMPOSED, s, (int)length, NULL, 0); if (archive_string_ensure(as16, (count +1) * 2) == NULL) return (-1); u16 = as16->s + as16->length; avail = as16->buffer_length - 2; } while (1); as16->length += count * 2; as16->s[as16->length] = 0; as16->s[as16->length+1] = 0; if (count == 0) return (-1); if (is_big_endian()) { if (!bigendian) { while (count > 0) { uint16_t v = archive_be16dec(u16); archive_le16enc(u16, v); u16 += 2; count--; } } } else { if (bigendian) { while (count > 0) { uint16_t v = archive_le16dec(u16); archive_be16enc(u16, v); u16 += 2; count--; } } } return (0); } static int win_strncat_to_utf16be(struct archive_string *as16, const void *_p, size_t length, struct archive_string_conv *sc) { return (win_strncat_to_utf16(as16, _p, length, sc, 1)); } static int win_strncat_to_utf16le(struct archive_string *as16, const void *_p, size_t length, struct archive_string_conv *sc) { return (win_strncat_to_utf16(as16, _p, length, sc, 0)); } #endif /* _WIN32 && !__CYGWIN__ */ /* * Do the best effort for conversions. * We cannot handle UTF-16BE character-set without such iconv, * but there is a chance if a string consists just ASCII code or * a current locale is UTF-8. */ /* * Convert a UTF-16BE string to current locale and copy the result. * Return -1 if conversion fails. */ static int best_effort_strncat_from_utf16(struct archive_string *as, const void *_p, size_t bytes, struct archive_string_conv *sc, int be) { const char *utf16 = (const char *)_p; char *mbs; uint32_t uc; int n, ret; (void)sc; /* UNUSED */ /* * Other case, we should do the best effort. * If all character are ASCII(<0x7f), we can convert it. * if not , we set a alternative character and return -1. */ ret = 0; if (archive_string_ensure(as, as->length + bytes +1) == NULL) return (-1); mbs = as->s + as->length; while ((n = utf16_to_unicode(&uc, utf16, bytes, be)) != 0) { if (n < 0) { n *= -1; ret = -1; } bytes -= n; utf16 += n; if (uc > 127) { /* We cannot handle it. */ *mbs++ = '?'; ret = -1; } else *mbs++ = (char)uc; } as->length = mbs - as->s; as->s[as->length] = '\0'; return (ret); } static int best_effort_strncat_from_utf16be(struct archive_string *as, const void *_p, size_t bytes, struct archive_string_conv *sc) { return (best_effort_strncat_from_utf16(as, _p, bytes, sc, 1)); } static int best_effort_strncat_from_utf16le(struct archive_string *as, const void *_p, size_t bytes, struct archive_string_conv *sc) { return (best_effort_strncat_from_utf16(as, _p, bytes, sc, 0)); } /* * Convert a current locale string to UTF-16BE/LE and copy the result. * Return -1 if conversion fails. */ static int best_effort_strncat_to_utf16(struct archive_string *as16, const void *_p, size_t length, struct archive_string_conv *sc, int bigendian) { const char *s = (const char *)_p; char *utf16; size_t remaining; int ret; (void)sc; /* UNUSED */ /* * Other case, we should do the best effort. * If all character are ASCII(<0x7f), we can convert it. * if not , we set a alternative character and return -1. */ ret = 0; remaining = length; if (archive_string_ensure(as16, as16->length + (length + 1) * 2) == NULL) return (-1); utf16 = as16->s + as16->length; while (remaining--) { unsigned c = *s++; if (c > 127) { /* We cannot handle it. */ c = UNICODE_R_CHAR; ret = -1; } if (bigendian) archive_be16enc(utf16, c); else archive_le16enc(utf16, c); utf16 += 2; } as16->length = utf16 - as16->s; as16->s[as16->length] = 0; as16->s[as16->length+1] = 0; return (ret); } static int best_effort_strncat_to_utf16be(struct archive_string *as16, const void *_p, size_t length, struct archive_string_conv *sc) { return (best_effort_strncat_to_utf16(as16, _p, length, sc, 1)); } static int best_effort_strncat_to_utf16le(struct archive_string *as16, const void *_p, size_t length, struct archive_string_conv *sc) { return (best_effort_strncat_to_utf16(as16, _p, length, sc, 0)); } /* * Multistring operations. */ void archive_mstring_clean(struct archive_mstring *aes) { archive_wstring_free(&(aes->aes_wcs)); archive_string_free(&(aes->aes_mbs)); archive_string_free(&(aes->aes_utf8)); archive_string_free(&(aes->aes_mbs_in_locale)); aes->aes_set = 0; } void archive_mstring_copy(struct archive_mstring *dest, struct archive_mstring *src) { dest->aes_set = src->aes_set; archive_string_copy(&(dest->aes_mbs), &(src->aes_mbs)); archive_string_copy(&(dest->aes_utf8), &(src->aes_utf8)); archive_wstring_copy(&(dest->aes_wcs), &(src->aes_wcs)); } int archive_mstring_get_utf8(struct archive *a, struct archive_mstring *aes, const char **p) { struct archive_string_conv *sc; int r; /* If we already have a UTF8 form, return that immediately. */ if (aes->aes_set & AES_SET_UTF8) { *p = aes->aes_utf8.s; return (0); } *p = NULL; if (aes->aes_set & AES_SET_MBS) { sc = archive_string_conversion_to_charset(a, "UTF-8", 1); if (sc == NULL) return (-1);/* Couldn't allocate memory for sc. */ r = archive_strncpy_l(&(aes->aes_utf8), aes->aes_mbs.s, aes->aes_mbs.length, sc); if (a == NULL) free_sconv_object(sc); if (r == 0) { aes->aes_set |= AES_SET_UTF8; *p = aes->aes_utf8.s; return (0);/* success. */ } else return (-1);/* failure. */ } return (0);/* success. */ } int archive_mstring_get_mbs(struct archive *a, struct archive_mstring *aes, const char **p) { int r, ret = 0; (void)a; /* UNUSED */ /* If we already have an MBS form, return that immediately. */ if (aes->aes_set & AES_SET_MBS) { *p = aes->aes_mbs.s; return (ret); } *p = NULL; /* If there's a WCS form, try converting with the native locale. */ if (aes->aes_set & AES_SET_WCS) { archive_string_empty(&(aes->aes_mbs)); r = archive_string_append_from_wcs(&(aes->aes_mbs), aes->aes_wcs.s, aes->aes_wcs.length); *p = aes->aes_mbs.s; if (r == 0) { aes->aes_set |= AES_SET_MBS; return (ret); } else ret = -1; } /* * Only a UTF-8 form cannot avail because its conversion already * failed at archive_mstring_update_utf8(). */ return (ret); } int archive_mstring_get_wcs(struct archive *a, struct archive_mstring *aes, const wchar_t **wp) { int r, ret = 0; (void)a;/* UNUSED */ /* Return WCS form if we already have it. */ if (aes->aes_set & AES_SET_WCS) { *wp = aes->aes_wcs.s; return (ret); } *wp = NULL; /* Try converting MBS to WCS using native locale. */ if (aes->aes_set & AES_SET_MBS) { archive_wstring_empty(&(aes->aes_wcs)); r = archive_wstring_append_from_mbs(&(aes->aes_wcs), aes->aes_mbs.s, aes->aes_mbs.length); if (r == 0) { aes->aes_set |= AES_SET_WCS; *wp = aes->aes_wcs.s; } else ret = -1;/* failure. */ } return (ret); } int archive_mstring_get_mbs_l(struct archive_mstring *aes, const char **p, size_t *length, struct archive_string_conv *sc) { int r, ret = 0; #if defined(_WIN32) && !defined(__CYGWIN__) /* * Internationalization programming on Windows must use Wide * characters because Windows platform cannot make locale UTF-8. */ if (sc != NULL && (aes->aes_set & AES_SET_WCS) != 0) { archive_string_empty(&(aes->aes_mbs_in_locale)); r = archive_string_append_from_wcs_in_codepage( &(aes->aes_mbs_in_locale), aes->aes_wcs.s, aes->aes_wcs.length, sc); if (r == 0) { *p = aes->aes_mbs_in_locale.s; if (length != NULL) *length = aes->aes_mbs_in_locale.length; return (0); } else if (errno == ENOMEM) return (-1); else ret = -1; } #endif /* If there is not an MBS form but is a WCS form, try converting * with the native locale to be used for translating it to specified * character-set. */ if ((aes->aes_set & AES_SET_MBS) == 0 && (aes->aes_set & AES_SET_WCS) != 0) { archive_string_empty(&(aes->aes_mbs)); r = archive_string_append_from_wcs(&(aes->aes_mbs), aes->aes_wcs.s, aes->aes_wcs.length); if (r == 0) aes->aes_set |= AES_SET_MBS; else if (errno == ENOMEM) return (-1); else ret = -1; } /* If we already have an MBS form, use it to be translated to * specified character-set. */ if (aes->aes_set & AES_SET_MBS) { if (sc == NULL) { /* Conversion is unneeded. */ *p = aes->aes_mbs.s; if (length != NULL) *length = aes->aes_mbs.length; return (0); } ret = archive_strncpy_l(&(aes->aes_mbs_in_locale), aes->aes_mbs.s, aes->aes_mbs.length, sc); *p = aes->aes_mbs_in_locale.s; if (length != NULL) *length = aes->aes_mbs_in_locale.length; } else { *p = NULL; if (length != NULL) *length = 0; } return (ret); } int archive_mstring_copy_mbs(struct archive_mstring *aes, const char *mbs) { if (mbs == NULL) { aes->aes_set = 0; return (0); } return (archive_mstring_copy_mbs_len(aes, mbs, strlen(mbs))); } int archive_mstring_copy_mbs_len(struct archive_mstring *aes, const char *mbs, size_t len) { if (mbs == NULL) { aes->aes_set = 0; return (0); } aes->aes_set = AES_SET_MBS; /* Only MBS form is set now. */ archive_strncpy(&(aes->aes_mbs), mbs, len); archive_string_empty(&(aes->aes_utf8)); archive_wstring_empty(&(aes->aes_wcs)); return (0); } int archive_mstring_copy_wcs(struct archive_mstring *aes, const wchar_t *wcs) { return archive_mstring_copy_wcs_len(aes, wcs, wcs == NULL ? 0 : wcslen(wcs)); } int archive_mstring_copy_utf8(struct archive_mstring *aes, const char *utf8) { if (utf8 == NULL) { aes->aes_set = 0; return (0); } aes->aes_set = AES_SET_UTF8; archive_string_empty(&(aes->aes_mbs)); archive_string_empty(&(aes->aes_wcs)); archive_strncpy(&(aes->aes_utf8), utf8, strlen(utf8)); return (int)strlen(utf8); } int archive_mstring_copy_wcs_len(struct archive_mstring *aes, const wchar_t *wcs, size_t len) { if (wcs == NULL) { aes->aes_set = 0; return (0); } aes->aes_set = AES_SET_WCS; /* Only WCS form set. */ archive_string_empty(&(aes->aes_mbs)); archive_string_empty(&(aes->aes_utf8)); archive_wstrncpy(&(aes->aes_wcs), wcs, len); return (0); } int archive_mstring_copy_mbs_len_l(struct archive_mstring *aes, const char *mbs, size_t len, struct archive_string_conv *sc) { int r; if (mbs == NULL) { aes->aes_set = 0; return (0); } archive_string_empty(&(aes->aes_mbs)); archive_wstring_empty(&(aes->aes_wcs)); archive_string_empty(&(aes->aes_utf8)); #if defined(_WIN32) && !defined(__CYGWIN__) /* * Internationalization programming on Windows must use Wide * characters because Windows platform cannot make locale UTF-8. */ if (sc == NULL) { if (archive_string_append(&(aes->aes_mbs), mbs, mbsnbytes(mbs, len)) == NULL) { aes->aes_set = 0; r = -1; } else { aes->aes_set = AES_SET_MBS; r = 0; } #if defined(HAVE_ICONV) } else if (sc != NULL && sc->cd_w != (iconv_t)-1) { /* * This case happens only when MultiByteToWideChar() cannot * handle sc->from_cp, and we have to iconv in order to * translate character-set to wchar_t,UTF-16. */ iconv_t cd = sc->cd; unsigned from_cp; int flag; /* * Translate multi-bytes from some character-set to UTF-8. */ sc->cd = sc->cd_w; r = archive_strncpy_l(&(aes->aes_utf8), mbs, len, sc); sc->cd = cd; if (r != 0) { aes->aes_set = 0; return (r); } aes->aes_set = AES_SET_UTF8; /* * Append the UTF-8 string into wstring. */ flag = sc->flag; sc->flag &= ~(SCONV_NORMALIZATION_C | SCONV_TO_UTF16| SCONV_FROM_UTF16); from_cp = sc->from_cp; sc->from_cp = CP_UTF8; r = archive_wstring_append_from_mbs_in_codepage(&(aes->aes_wcs), aes->aes_utf8.s, aes->aes_utf8.length, sc); sc->flag = flag; sc->from_cp = from_cp; if (r == 0) aes->aes_set |= AES_SET_WCS; #endif } else { r = archive_wstring_append_from_mbs_in_codepage( &(aes->aes_wcs), mbs, len, sc); if (r == 0) aes->aes_set = AES_SET_WCS; else aes->aes_set = 0; } #else r = archive_strncpy_l(&(aes->aes_mbs), mbs, len, sc); if (r == 0) aes->aes_set = AES_SET_MBS; /* Only MBS form is set now. */ else aes->aes_set = 0; #endif return (r); } /* * The 'update' form tries to proactively update all forms of * this string (WCS and MBS) and returns an error if any of * them fail. This is used by the 'pax' handler, for instance, * to detect and report character-conversion failures early while * still allowing clients to get potentially useful values from * the more tolerant lazy conversions. (get_mbs and get_wcs will * strive to give the user something useful, so you can get hopefully * usable values even if some of the character conversions are failing.) */ int archive_mstring_update_utf8(struct archive *a, struct archive_mstring *aes, const char *utf8) { struct archive_string_conv *sc; int r; if (utf8 == NULL) { aes->aes_set = 0; return (0); /* Succeeded in clearing everything. */ } /* Save the UTF8 string. */ archive_strcpy(&(aes->aes_utf8), utf8); /* Empty the mbs and wcs strings. */ archive_string_empty(&(aes->aes_mbs)); archive_wstring_empty(&(aes->aes_wcs)); aes->aes_set = AES_SET_UTF8; /* Only UTF8 is set now. */ /* Try converting UTF-8 to MBS, return false on failure. */ sc = archive_string_conversion_from_charset(a, "UTF-8", 1); if (sc == NULL) return (-1);/* Couldn't allocate memory for sc. */ r = archive_strcpy_l(&(aes->aes_mbs), utf8, sc); if (a == NULL) free_sconv_object(sc); if (r != 0) return (-1); aes->aes_set = AES_SET_UTF8 | AES_SET_MBS; /* Both UTF8 and MBS set. */ /* Try converting MBS to WCS, return false on failure. */ if (archive_wstring_append_from_mbs(&(aes->aes_wcs), aes->aes_mbs.s, aes->aes_mbs.length)) return (-1); aes->aes_set = AES_SET_UTF8 | AES_SET_WCS | AES_SET_MBS; /* All conversions succeeded. */ return (0); }
null
204
CWE-787
CVE-2020-23907
/** * @file src/bin2llvmir/optimizations/decoder/ir_modifications.cpp * @brief Decode input binary into LLVM IR. * @copyright (c) 2017 Avast Software, licensed under the MIT license */ #include "retdec/bin2llvmir/optimizations/decoder/decoder.h" using namespace retdec::utils; using namespace llvm; namespace retdec { namespace bin2llvmir { llvm::CallInst* Decoder::transformToCall( llvm::CallInst* pseudo, llvm::Function* callee) { auto* c = CallInst::Create(callee); c->insertAfter(pseudo); if (auto* retObj = getCallReturnObject()) { auto* cc = cast<Instruction>( IrModifier::convertValueToTypeAfter(c, retObj->getValueType(), c)); auto* s = new StoreInst(cc, retObj); s->insertAfter(cc); } return c; } llvm::CallInst* Decoder::transformToCondCall( llvm::CallInst* pseudo, llvm::Value* cond, llvm::Function* callee, llvm::BasicBlock* falseBb) { auto* oldBb = pseudo->getParent(); auto* newBb = oldBb->splitBasicBlock(pseudo); // We do NOT want to name or give address to this block. auto* oldTerm = oldBb->getTerminator(); BranchInst::Create(newBb, falseBb, cond, oldTerm); oldTerm->eraseFromParent(); auto* newTerm = newBb->getTerminator(); BranchInst::Create(falseBb, newTerm); newTerm->eraseFromParent(); auto* c = CallInst::Create(callee); c->insertAfter(pseudo); return c; } llvm::ReturnInst* Decoder::transformToReturn(llvm::CallInst* pseudo) { auto* term = pseudo->getParent()->getTerminator(); assert(pseudo->getNextNode() == term); auto* r = ReturnInst::Create( pseudo->getModule()->getContext(), UndefValue::get(pseudo->getFunction()->getReturnType()), term); term->eraseFromParent(); return r; } llvm::BranchInst* Decoder::transformToBranch( llvm::CallInst* pseudo, llvm::BasicBlock* branchee) { auto* term = pseudo->getParent()->getTerminator(); assert(pseudo->getNextNode() == term); auto* br = BranchInst::Create(branchee, term); term->eraseFromParent(); return br; } llvm::BranchInst* Decoder::transformToCondBranch( llvm::CallInst* pseudo, llvm::Value* cond, llvm::BasicBlock* trueBb, llvm::BasicBlock* falseBb) { auto* term = pseudo->getParent()->getTerminator(); assert(pseudo->getNextNode() == term); auto* br = BranchInst::Create(trueBb, falseBb, cond, term); term->eraseFromParent(); return br; } llvm::SwitchInst* Decoder::transformToSwitch( llvm::CallInst* pseudo, llvm::Value* val, llvm::BasicBlock* defaultBb, const std::vector<llvm::BasicBlock*>& cases) { unsigned numCases = 0; for (auto* c : cases) { if (c != defaultBb) { ++numCases; } } // If we do not do this, this can happen: // "Instruction does not dominate all uses" auto* insn = dyn_cast<Instruction>(val); if (insn && insn->getType()) { auto* gv = new GlobalVariable( *insn->getModule(), insn->getType(), false, GlobalValue::ExternalLinkage, nullptr); auto* s = new StoreInst(insn, gv); s->insertAfter(insn); val = new LoadInst(gv, "", pseudo); } auto* term = pseudo->getParent()->getTerminator(); assert(pseudo->getNextNode() == term); auto* intType = cast<IntegerType>(val->getType()); auto* sw = SwitchInst::Create(val, defaultBb, numCases, term); unsigned cntr = 0; for (auto& c : cases) { if (c != defaultBb) { sw->addCase(ConstantInt::get(intType, cntr), c); } ++cntr; } term->eraseFromParent(); return sw; } /** * TODO: We should get registers based on the ABI the function is using, * not the same register for all calls on an architecture. */ llvm::GlobalVariable* Decoder::getCallReturnObject() { if (_config->getConfig().architecture.isX86_32()) { return _abi->getRegister(X86_REG_EAX); } else if (_config->getConfig().architecture.isX86_64()) { return _abi->getRegister(X86_REG_RAX); } else if (_config->getConfig().architecture.isMipsOrPic32()) { return _abi->getRegister(MIPS_REG_V0); } else if (_config->getConfig().architecture.isPpc()) { return _abi->getRegister(PPC_REG_R3); } else if (_config->getConfig().architecture.isArm32OrThumb()) { return _abi->getRegister(ARM_REG_R0); } else if (_config->getConfig().architecture.isArm64()) { return _config->getLlvmRegister("r0"); } assert(false); return nullptr; } /** * Primary: try to create function for \p addr target and fill \p tFnc with * the result. If successful, \p tBb is also filled. * Secondary: if function not created, try to create BB for \p addr target and * fill \p tBb with the result. */ void Decoder::getOrCreateCallTarget( utils::Address addr, llvm::Function*& tFnc, llvm::BasicBlock*& tBb) { tBb = nullptr; tFnc = nullptr; if (auto* f = getFunctionAtAddress(addr)) { tFnc = f; tBb = tFnc->empty() ? nullptr : &tFnc->front(); LOG << "\t\t\t\t" << "F: getFunctionAtAddress() @ " << addr << std::endl; } else if (auto* f = splitFunctionOn(addr)) { tFnc = f; tBb = tFnc->empty() ? nullptr : &tFnc->front(); LOG << "\t\t\t\t" << "F: splitFunctionOn() @ " << addr << std::endl; } else if (auto* bb = getBasicBlockAtAddress(addr)) { tBb = bb; LOG << "\t\t\t\t" << "F: getBasicBlockAtAddress() @ " << addr << std::endl; } else if (getBasicBlockContainingAddress(addr)) { // Nothing - we are not splitting BBs here. LOG << "\t\t\t\t" << "F: getBasicBlockContainingAddress() @ " << addr << std::endl; } else if (getFunctionContainingAddress(addr)) { auto* bb = getBasicBlockBeforeAddress(addr); assert(bb); tBb = createBasicBlock(addr, bb->getParent(), bb); LOG << "\t\t\t\t" << "F: getFunctionContainingAddress() @ " << addr << std::endl; } else { tFnc = createFunction(addr); tBb = tFnc && !tFnc->empty() ? &tFnc->front() : nullptr; LOG << "\t\t\t\t" << "F: createFunction() @ " << addr << std::endl; } } /** * */ void Decoder::getOrCreateBranchTarget( utils::Address addr, llvm::BasicBlock*& tBb, llvm::Function*& tFnc, llvm::Instruction* from) { tBb = nullptr; tFnc = nullptr; auto* fromFnc = from->getFunction(); if (auto* bb = getBasicBlockAtAddress(addr)) { tBb = bb; LOG << "\t\t\t\t" << "B: getBasicBlockAtAddress() @ " << addr << std::endl; } else if (getBasicBlockContainingAddress(addr)) { auto ai = AsmInstruction(_module, addr); if (ai.isInvalid()) { // Target in existing block, but not at existing instruction. // Something is wrong, nothing we can do. LOG << "\t\t\t\t" << "B: invalid ASM @ " << addr << std::endl; return; } else if (ai.getFunction() == fromFnc) { tBb = ai.makeStart(); addBasicBlock(addr, tBb); LOG << "\t\t\t\t" << "B: addBasicBlock @ " << addr << std::endl; } else { // Target at existing instruction, but in different function. // Do not split existing block in other functions here. LOG << "\t\t\t\t" << "B: ASM in diff fnc @ " << addr << std::endl; return; } } // Function without BBs (e.g. import declarations). else if (auto* targetFnc = getFunctionAtAddress(addr)) { tFnc = targetFnc; LOG << "\t\t\t\t" << "B: getFunctionAtAddress() @ " << addr << std::endl; } else if (auto* bb = getBasicBlockBeforeAddress(addr)) { tBb = createBasicBlock(addr, bb->getParent(), bb); LOG << "\t\t\t\t" << "B: getBasicBlockBeforeAddress() @ " << addr << std::endl; } else { tFnc = createFunction(addr); tBb = tFnc && !tFnc->empty() ? &tFnc->front() : nullptr; LOG << "\t\t\t\t" << "B: default @ " << addr << std::endl; } if (tBb && tBb->getPrevNode() == nullptr) { tFnc = tBb->getParent(); } if (tBb && tBb->getParent() == fromFnc) { return; } if (tFnc) { return; } LOG << "\t\t\t\t" << "B: splitFunctionOn @ " << addr << std::endl; tFnc = splitFunctionOn(addr); tBb = tFnc && !tFnc->empty() ? &tFnc->front() : tBb; } /** * \return \c True if it is allowed to split function on basic block \p bb. */ bool Decoder::canSplitFunctionOn(llvm::BasicBlock* bb) { for (auto* u : bb->users()) { // All users must be unconditional branch instructions. // auto* br = dyn_cast<BranchInst>(u); if (br == nullptr || br->isConditional()) { LOG << "\t\t\t\t\t\t" << "!CAN : user not uncond for " << llvmObjToString(u) << ", user = " << llvmObjToString(br) << std::endl; return false; } // Branch can not come from istruction right before basic block. // This expects that such branches were created // TODO: if // AsmInstruction brAsm(br); AsmInstruction bbAsm(bb); if (brAsm.getEndAddress() == bbAsm.getAddress()) { LOG << "\t\t\t\t\t\t" << "branch from ASM insn right before: " << brAsm.getAddress() << " -> " << bbAsm.getAddress() << std::endl; return false; } // BB must be true branch in all users. // // if (br->getSuccessor(0) != bb) // { // return false; // } } return true; } /** * \return \c True if it is allowed to split function on basic block \p bb. * * TODO: * The problem here is, that function may became unsplittable after it was * split. What then? Merge them back together and transform calls to JUMP_OUTs? * Or defer splits/calls/etc only after basic decoding of all functions is done? * E.g. * fnc1(): * ... * b lab_in_2 * ... * * fnc2(): (nothing decoded yet) * ... * // should not be split here, but it can, because flow from fnc2() * // start does not exist yet. * lab_in_2: * ... * fnc2 end */ bool Decoder::canSplitFunctionOn( utils::Address addr, llvm::BasicBlock* splitBb, std::set<llvm::BasicBlock*>& newFncStarts) { newFncStarts.insert(splitBb); auto* f = splitBb->getParent(); auto fAddr = getFunctionAddress(f); auto fSzIt = _fnc2sz.find(f); if (fSzIt != _fnc2sz.end()) { if (fAddr <= addr && addr < (fAddr+fSzIt->second)) { LOG << "\t\t\t\t\t" << "!CAN S: addr cond @ " << addr << std::endl; return false; } } std::set<Address> fncStarts; fncStarts.insert(fAddr); fncStarts.insert(addr); LOG << "\t\t\t\t\t" << "CAN S: split @ " << fAddr << std::endl; LOG << "\t\t\t\t\t" << "CAN S: split @ " << addr << std::endl; bool changed = true; while (changed) { changed = false; for (BasicBlock& b : *f) { // Address bAddr = getBasicBlockAddress(&b); Address bAddr; // TODO: shitty BasicBlock* bPrev = &b; while (bAddr.isUndefined() && bPrev) { bAddr = getBasicBlockAddress(bPrev); bPrev = bPrev->getPrevNode(); } if (bAddr.isUndefined()) { continue; } auto up = fncStarts.upper_bound(bAddr); --up; Address bFnc = *up; for (auto* p : predecessors(&b)) { // Address pAddr = getBasicBlockAddress(p); Address pAddr; // TODO: shitty BasicBlock* pPrev = p; while (pAddr.isUndefined() && pPrev) { pAddr = getBasicBlockAddress(pPrev); pPrev = pPrev->getPrevNode(); } if (pAddr.isUndefined()) { continue; } auto up = fncStarts.upper_bound(pAddr); --up; Address pFnc = *up; if (bFnc != pFnc) { if (!canSplitFunctionOn(&b)) { return false; } changed |= newFncStarts.insert(&b).second; changed |= fncStarts.insert(bAddr).second; LOG << "\t\t\t\t\t" << "CAN S: split @ " << bAddr << std::endl; } } } } return true; } /** * This can create new BB at \p addr even if it then cannot split function * on this new BB. Is this desirable behavior? */ llvm::Function* Decoder::splitFunctionOn(utils::Address addr) { if (auto* bb = getBasicBlockAtAddress(addr)) { LOG << "\t\t\t\t" << "S: splitFunctionOn @ " << addr << std::endl; return bb->getPrevNode() ? splitFunctionOn(addr, bb) : bb->getParent(); } // There is an instruction at address, but not BB -> do not split // existing blocks to create functions. // else if (auto ai = AsmInstruction(_module, addr)) { if (ai.isInvalid()) { LOG << "\t\t\t\t" << "S: invalid ASM @ " << addr << std::endl; return nullptr; } else { LOG << "\t\t\t\t" << "S: ASM @ " << addr << std::endl; return nullptr; } } else if (getFunctionContainingAddress(addr)) { LOG << "\t\t\t\t" << "S: getFunctionContainingAddress() @ " << addr << std::endl; auto* before = getBasicBlockBeforeAddress(addr); assert(before); auto* newBb = createBasicBlock(addr, before->getParent(), before); return splitFunctionOn(addr, newBb); } else { LOG << "\t\t\t\t" << "S: createFunction() @ " << addr << std::endl; return createFunction(addr); } } llvm::Function* Decoder::splitFunctionOn( utils::Address addr, llvm::BasicBlock* splitOnBb) { LOG << "\t\t\t\t" << "S: splitFunctionOn @ " << addr << " on " << splitOnBb->getName().str() << std::endl; if (splitOnBb->getPrevNode() == nullptr) { LOG << "\t\t\t\t" << "S: BB first @ " << addr << std::endl; return splitOnBb->getParent(); } std::set<BasicBlock*> newFncStarts; if (!canSplitFunctionOn(addr, splitOnBb, newFncStarts)) { LOG << "\t\t\t\t" << "S: !canSplitFunctionOn() @ " << addr << std::endl; return nullptr; } llvm::Function* ret = nullptr; std::set<Function*> newFncs; for (auto* splitBb : newFncStarts) { Address splitAddr = getBasicBlockAddress(splitBb); LOG << "\t\t\t\t" << "S: splitting @ " << splitAddr << " on " << splitBb->getName().str() << std::endl; std::string name = _names->getPreferredNameForAddress(splitAddr); if (name.empty()) { name = names::generateFunctionName(splitAddr, _config->getConfig().isIda()); } Function* oldFnc = splitBb->getParent(); Function* newFnc = Function::Create( FunctionType::get(oldFnc->getReturnType(), false), oldFnc->getLinkage(), name); oldFnc->getParent()->getFunctionList().insertAfter( oldFnc->getIterator(), newFnc); addFunction(splitAddr, newFnc); newFnc->getBasicBlockList().splice( newFnc->begin(), oldFnc->getBasicBlockList(), splitBb->getIterator(), oldFnc->getBasicBlockList().end()); newFncs.insert(oldFnc); newFncs.insert(newFnc); if (splitOnBb == splitBb) { ret = newFnc; } } assert(ret); for (Function* f : newFncs) for (BasicBlock& b : *f) { auto* br = dyn_cast<BranchInst>(b.getTerminator()); if (br && (br->getSuccessor(0)->getParent() != br->getFunction() || br->getSuccessor(0)->getPrevNode() == nullptr)) { auto* callee = br->getSuccessor(0)->getParent(); auto* c = CallInst::Create(callee, "", br); if (auto* retObj = getCallReturnObject()) { auto* cc = cast<Instruction>( IrModifier::convertValueToTypeAfter(c, retObj->getValueType(), c)); auto* s = new StoreInst(cc, retObj); s->insertAfter(cc); } ReturnInst::Create( br->getModule()->getContext(), UndefValue::get(br->getFunction()->getReturnType()), br); br->eraseFromParent(); } // Test. for (auto* s : successors(&b)) { if (b.getParent() != s->getParent()) { dumpModuleToFile(_module, _config->getOutputDirectory()); } assert(b.getParent() == s->getParent()); } } return ret; } } // namespace bin2llvmir } // namespace retdec
null
/** * @file src/bin2llvmir/optimizations/decoder/ir_modifications.cpp * @brief Decode input binary into LLVM IR. * @copyright (c) 2017 Avast Software, licensed under the MIT license */ #include "retdec/bin2llvmir/optimizations/decoder/decoder.h" using namespace retdec::utils; using namespace llvm; namespace retdec { namespace bin2llvmir { llvm::CallInst* Decoder::transformToCall( llvm::CallInst* pseudo, llvm::Function* callee) { auto* c = CallInst::Create(callee); c->insertAfter(pseudo); if (auto* retObj = getCallReturnObject()) { auto* cc = cast<Instruction>( IrModifier::convertValueToTypeAfter(c, retObj->getValueType(), c)); auto* s = new StoreInst(cc, retObj); s->insertAfter(cc); } return c; } llvm::CallInst* Decoder::transformToCondCall( llvm::CallInst* pseudo, llvm::Value* cond, llvm::Function* callee, llvm::BasicBlock* falseBb) { auto* oldBb = pseudo->getParent(); auto* newBb = oldBb->splitBasicBlock(pseudo); // We do NOT want to name or give address to this block. auto* oldTerm = oldBb->getTerminator(); BranchInst::Create(newBb, falseBb, cond, oldTerm); oldTerm->eraseFromParent(); auto* newTerm = newBb->getTerminator(); BranchInst::Create(falseBb, newTerm); newTerm->eraseFromParent(); auto* c = CallInst::Create(callee); c->insertAfter(pseudo); return c; } llvm::ReturnInst* Decoder::transformToReturn(llvm::CallInst* pseudo) { auto* term = pseudo->getParent()->getTerminator(); assert(pseudo->getNextNode() == term); auto* r = ReturnInst::Create( pseudo->getModule()->getContext(), UndefValue::get(pseudo->getFunction()->getReturnType()), term); term->eraseFromParent(); return r; } llvm::BranchInst* Decoder::transformToBranch( llvm::CallInst* pseudo, llvm::BasicBlock* branchee) { auto* term = pseudo->getParent()->getTerminator(); assert(pseudo->getNextNode() == term); auto* br = BranchInst::Create(branchee, term); term->eraseFromParent(); return br; } llvm::BranchInst* Decoder::transformToCondBranch( llvm::CallInst* pseudo, llvm::Value* cond, llvm::BasicBlock* trueBb, llvm::BasicBlock* falseBb) { auto* term = pseudo->getParent()->getTerminator(); assert(pseudo->getNextNode() == term); auto* br = BranchInst::Create(trueBb, falseBb, cond, term); term->eraseFromParent(); return br; } llvm::SwitchInst* Decoder::transformToSwitch( llvm::CallInst* pseudo, llvm::Value* val, llvm::BasicBlock* defaultBb, const std::vector<llvm::BasicBlock*>& cases) { unsigned numCases = 0; for (auto* c : cases) { if (c != defaultBb) { ++numCases; } } // If we do not do this, this can happen: // "Instruction does not dominate all uses" auto* insn = dyn_cast<Instruction>(val); if (insn && insn->getType()) { auto* gv = new GlobalVariable( *insn->getModule(), insn->getType(), false, GlobalValue::ExternalLinkage, nullptr); auto* s = new StoreInst(insn, gv); s->insertAfter(insn); val = new LoadInst(gv, "", pseudo); } auto* term = pseudo->getParent()->getTerminator(); assert(pseudo->getNextNode() == term); auto* intType = cast<IntegerType>(val->getType()); auto* sw = SwitchInst::Create(val, defaultBb, numCases, term); unsigned cntr = 0; for (auto& c : cases) { if (c != defaultBb) { sw->addCase(ConstantInt::get(intType, cntr), c); } ++cntr; } term->eraseFromParent(); return sw; } /** * TODO: We should get registers based on the ABI the function is using, * not the same register for all calls on an architecture. */ llvm::GlobalVariable* Decoder::getCallReturnObject() { if (_config->getConfig().architecture.isX86_32()) { return _abi->getRegister(X86_REG_EAX); } else if (_config->getConfig().architecture.isX86_64()) { return _abi->getRegister(X86_REG_RAX); } else if (_config->getConfig().architecture.isMipsOrPic32()) { return _abi->getRegister(MIPS_REG_V0); } else if (_config->getConfig().architecture.isPpc()) { return _abi->getRegister(PPC_REG_R3); } else if (_config->getConfig().architecture.isArm32OrThumb()) { return _abi->getRegister(ARM_REG_R0); } else if (_config->getConfig().architecture.isArm64()) { return _config->getLlvmRegister("r0"); } assert(false); return nullptr; } /** * Primary: try to create function for \p addr target and fill \p tFnc with * the result. If successful, \p tBb is also filled. * Secondary: if function not created, try to create BB for \p addr target and * fill \p tBb with the result. */ void Decoder::getOrCreateCallTarget( utils::Address addr, llvm::Function*& tFnc, llvm::BasicBlock*& tBb) { tBb = nullptr; tFnc = nullptr; if (auto* f = getFunctionAtAddress(addr)) { tFnc = f; tBb = tFnc->empty() ? nullptr : &tFnc->front(); LOG << "\t\t\t\t" << "F: getFunctionAtAddress() @ " << addr << std::endl; } else if (auto* f = splitFunctionOn(addr)) { tFnc = f; tBb = tFnc->empty() ? nullptr : &tFnc->front(); LOG << "\t\t\t\t" << "F: splitFunctionOn() @ " << addr << std::endl; } else if (auto* bb = getBasicBlockAtAddress(addr)) { tBb = bb; LOG << "\t\t\t\t" << "F: getBasicBlockAtAddress() @ " << addr << std::endl; } else if (getBasicBlockContainingAddress(addr)) { // Nothing - we are not splitting BBs here. LOG << "\t\t\t\t" << "F: getBasicBlockContainingAddress() @ " << addr << std::endl; } else if (getFunctionContainingAddress(addr)) { auto* bb = getBasicBlockBeforeAddress(addr); assert(bb); tBb = createBasicBlock(addr, bb->getParent(), bb); LOG << "\t\t\t\t" << "F: getFunctionContainingAddress() @ " << addr << std::endl; } else { tFnc = createFunction(addr); tBb = tFnc && !tFnc->empty() ? &tFnc->front() : nullptr; LOG << "\t\t\t\t" << "F: createFunction() @ " << addr << std::endl; } } /** * */ void Decoder::getOrCreateBranchTarget( utils::Address addr, llvm::BasicBlock*& tBb, llvm::Function*& tFnc, llvm::Instruction* from) { tBb = nullptr; tFnc = nullptr; auto* fromFnc = from->getFunction(); if (auto* bb = getBasicBlockAtAddress(addr)) { tBb = bb; LOG << "\t\t\t\t" << "B: getBasicBlockAtAddress() @ " << addr << std::endl; } else if (getBasicBlockContainingAddress(addr)) { auto ai = AsmInstruction(_module, addr); if (ai.isInvalid()) { // Target in existing block, but not at existing instruction. // Something is wrong, nothing we can do. LOG << "\t\t\t\t" << "B: invalid ASM @ " << addr << std::endl; return; } else if (ai.getFunction() == fromFnc) { tBb = ai.makeStart(); addBasicBlock(addr, tBb); LOG << "\t\t\t\t" << "B: addBasicBlock @ " << addr << std::endl; } else { // Target at existing instruction, but in different function. // Do not split existing block in other functions here. LOG << "\t\t\t\t" << "B: ASM in diff fnc @ " << addr << std::endl; return; } } // Function without BBs (e.g. import declarations). else if (auto* targetFnc = getFunctionAtAddress(addr)) { tFnc = targetFnc; LOG << "\t\t\t\t" << "B: getFunctionAtAddress() @ " << addr << std::endl; } else if (auto* bb = getBasicBlockBeforeAddress(addr)) { tBb = createBasicBlock(addr, bb->getParent(), bb); LOG << "\t\t\t\t" << "B: getBasicBlockBeforeAddress() @ " << addr << std::endl; } else { tFnc = createFunction(addr); tBb = tFnc && !tFnc->empty() ? &tFnc->front() : nullptr; LOG << "\t\t\t\t" << "B: default @ " << addr << std::endl; } if (tBb && tBb->getPrevNode() == nullptr) { tFnc = tBb->getParent(); } if (tBb && tBb->getParent() == fromFnc) { return; } if (tFnc) { return; } LOG << "\t\t\t\t" << "B: splitFunctionOn @ " << addr << std::endl; tFnc = splitFunctionOn(addr); tBb = tFnc && !tFnc->empty() ? &tFnc->front() : tBb; } /** * \return \c True if it is allowed to split function on basic block \p bb. */ bool Decoder::canSplitFunctionOn(llvm::BasicBlock* bb) { for (auto* u : bb->users()) { // All users must be unconditional branch instructions. // auto* br = dyn_cast<BranchInst>(u); if (br == nullptr || br->isConditional()) { LOG << "\t\t\t\t\t\t" << "!CAN : user not uncond for " << llvmObjToString(u) << ", user = " << llvmObjToString(br) << std::endl; return false; } // Branch can not come from istruction right before basic block. // This expects that such branches were created // TODO: if // AsmInstruction brAsm(br); AsmInstruction bbAsm(bb); if (brAsm.getEndAddress() == bbAsm.getAddress()) { LOG << "\t\t\t\t\t\t" << "branch from ASM insn right before: " << brAsm.getAddress() << " -> " << bbAsm.getAddress() << std::endl; return false; } // BB must be true branch in all users. // // if (br->getSuccessor(0) != bb) // { // return false; // } } return true; } /** * \return \c True if it is allowed to split function on basic block \p bb. * * TODO: * The problem here is, that function may became unsplittable after it was * split. What then? Merge them back together and transform calls to JUMP_OUTs? * Or defer splits/calls/etc only after basic decoding of all functions is done? * E.g. * fnc1(): * ... * b lab_in_2 * ... * * fnc2(): (nothing decoded yet) * ... * // should not be split here, but it can, because flow from fnc2() * // start does not exist yet. * lab_in_2: * ... * fnc2 end */ bool Decoder::canSplitFunctionOn( utils::Address addr, llvm::BasicBlock* splitBb, std::set<llvm::BasicBlock*>& newFncStarts) { newFncStarts.insert(splitBb); auto* f = splitBb->getParent(); auto fAddr = getFunctionAddress(f); auto fSzIt = _fnc2sz.find(f); if (fSzIt != _fnc2sz.end()) { if (fAddr <= addr && addr < (fAddr+fSzIt->second)) { LOG << "\t\t\t\t\t" << "!CAN S: addr cond @ " << addr << std::endl; return false; } } std::set<Address> fncStarts; fncStarts.insert(fAddr); fncStarts.insert(addr); LOG << "\t\t\t\t\t" << "CAN S: split @ " << fAddr << std::endl; LOG << "\t\t\t\t\t" << "CAN S: split @ " << addr << std::endl; bool changed = true; while (changed) { changed = false; for (BasicBlock& b : *f) { // Address bAddr = getBasicBlockAddress(&b); Address bAddr; // TODO: shitty BasicBlock* bPrev = &b; while (bAddr.isUndefined() && bPrev) { bAddr = getBasicBlockAddress(bPrev); bPrev = bPrev->getPrevNode(); } if (bAddr.isUndefined()) { continue; } auto up = fncStarts.upper_bound(bAddr); if (up == fncStarts.begin()) { return false; } --up; Address bFnc = *up; for (auto* p : predecessors(&b)) { // Address pAddr = getBasicBlockAddress(p); Address pAddr; // TODO: shitty BasicBlock* pPrev = p; while (pAddr.isUndefined() && pPrev) { pAddr = getBasicBlockAddress(pPrev); pPrev = pPrev->getPrevNode(); } if (pAddr.isUndefined()) { continue; } auto up = fncStarts.upper_bound(pAddr); if (up == fncStarts.begin()) { return false; } --up; Address pFnc = *up; if (bFnc != pFnc) { if (!canSplitFunctionOn(&b)) { return false; } changed |= newFncStarts.insert(&b).second; changed |= fncStarts.insert(bAddr).second; LOG << "\t\t\t\t\t" << "CAN S: split @ " << bAddr << std::endl; } } } } return true; } /** * This can create new BB at \p addr even if it then cannot split function * on this new BB. Is this desirable behavior? */ llvm::Function* Decoder::splitFunctionOn(utils::Address addr) { if (auto* bb = getBasicBlockAtAddress(addr)) { LOG << "\t\t\t\t" << "S: splitFunctionOn @ " << addr << std::endl; return bb->getPrevNode() ? splitFunctionOn(addr, bb) : bb->getParent(); } // There is an instruction at address, but not BB -> do not split // existing blocks to create functions. // else if (auto ai = AsmInstruction(_module, addr)) { if (ai.isInvalid()) { LOG << "\t\t\t\t" << "S: invalid ASM @ " << addr << std::endl; return nullptr; } else { LOG << "\t\t\t\t" << "S: ASM @ " << addr << std::endl; return nullptr; } } else if (getFunctionContainingAddress(addr)) { LOG << "\t\t\t\t" << "S: getFunctionContainingAddress() @ " << addr << std::endl; auto* before = getBasicBlockBeforeAddress(addr); assert(before); auto* newBb = createBasicBlock(addr, before->getParent(), before); return splitFunctionOn(addr, newBb); } else { LOG << "\t\t\t\t" << "S: createFunction() @ " << addr << std::endl; return createFunction(addr); } } llvm::Function* Decoder::splitFunctionOn( utils::Address addr, llvm::BasicBlock* splitOnBb) { LOG << "\t\t\t\t" << "S: splitFunctionOn @ " << addr << " on " << splitOnBb->getName().str() << std::endl; if (splitOnBb->getPrevNode() == nullptr) { LOG << "\t\t\t\t" << "S: BB first @ " << addr << std::endl; return splitOnBb->getParent(); } std::set<BasicBlock*> newFncStarts; if (!canSplitFunctionOn(addr, splitOnBb, newFncStarts)) { LOG << "\t\t\t\t" << "S: !canSplitFunctionOn() @ " << addr << std::endl; return nullptr; } llvm::Function* ret = nullptr; std::set<Function*> newFncs; for (auto* splitBb : newFncStarts) { Address splitAddr = getBasicBlockAddress(splitBb); LOG << "\t\t\t\t" << "S: splitting @ " << splitAddr << " on " << splitBb->getName().str() << std::endl; std::string name = _names->getPreferredNameForAddress(splitAddr); if (name.empty()) { name = names::generateFunctionName(splitAddr, _config->getConfig().isIda()); } Function* oldFnc = splitBb->getParent(); Function* newFnc = Function::Create( FunctionType::get(oldFnc->getReturnType(), false), oldFnc->getLinkage(), name); oldFnc->getParent()->getFunctionList().insertAfter( oldFnc->getIterator(), newFnc); addFunction(splitAddr, newFnc); newFnc->getBasicBlockList().splice( newFnc->begin(), oldFnc->getBasicBlockList(), splitBb->getIterator(), oldFnc->getBasicBlockList().end()); newFncs.insert(oldFnc); newFncs.insert(newFnc); if (splitOnBb == splitBb) { ret = newFnc; } } assert(ret); for (Function* f : newFncs) for (BasicBlock& b : *f) { auto* br = dyn_cast<BranchInst>(b.getTerminator()); if (br && (br->getSuccessor(0)->getParent() != br->getFunction() || br->getSuccessor(0)->getPrevNode() == nullptr)) { auto* callee = br->getSuccessor(0)->getParent(); auto* c = CallInst::Create(callee, "", br); if (auto* retObj = getCallReturnObject()) { auto* cc = cast<Instruction>( IrModifier::convertValueToTypeAfter(c, retObj->getValueType(), c)); auto* s = new StoreInst(cc, retObj); s->insertAfter(cc); } ReturnInst::Create( br->getModule()->getContext(), UndefValue::get(br->getFunction()->getReturnType()), br); br->eraseFromParent(); } // Test. for (auto* s : successors(&b)) { if (b.getParent() != s->getParent()) { dumpModuleToFile(_module, _config->getOutputDirectory()); } assert(b.getParent() == s->getParent()); } } return ret; } } // namespace bin2llvmir } // namespace retdec
null
205
CWE-787
CVE-2020-24753
/* CBOR-to-JSON translation utility */ #include "rtjsonsrc/osrtjson.h" #include "rtcborsrc/osrtcbor.h" #include "rtxsrc/rtxCharStr.h" #include "rtxsrc/rtxContext.h" #include "rtxsrc/rtxFile.h" #include "rtxsrc/rtxHexDump.h" #include <stdio.h> #ifndef _NO_INT64_SUPPORT #define OSUINTTYPE OSUINT64 #define OSINTTYPE OSINT64 #define rtCborDecUInt rtCborDecUInt64 #define rtCborDecInt rtCborDecInt64 #else #define OSUINTTYPE OSUINT32 #define OSINTTYPE OSINT32 #define rtCborDecUInt rtCborDecUInt32 #define rtCborDecInt rtCborDecInt32 #endif static int cborTagNotSupp (OSCTXT* pctxt, OSOCTET tag) { char numbuf[10]; char errtext[80]; rtxUIntToCharStr (tag, numbuf, sizeof(numbuf), 0); rtxStrJoin (errtext, sizeof(errtext), "CBOR tag ", numbuf, 0, 0, 0); rtxErrAddStrParm (pctxt, errtext); return RTERR_NOTSUPP; } static int cborElemNameToJson (OSCTXT* pCborCtxt, OSCTXT* pJsonCtxt) { char* pElemName = 0; OSOCTET ub; int ret; /* Read byte from stream */ ret = rtxReadBytes (pCborCtxt, &ub, 1); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); /* Decode element name (note: only string type is currently supported) */ ret = rtCborDecDynUTF8Str (pCborCtxt, ub, &pElemName); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); /* Encode map element name as string */ ret = rtJsonEncStringValue (pJsonCtxt, (const OSUTF8CHAR*)pElemName); rtxMemFreePtr (pCborCtxt, pElemName); if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); OSRTSAFEPUTCHAR (pJsonCtxt, ':'); return 0; } static int cbor2json (OSCTXT* pCborCtxt, OSCTXT* pJsonCtxt) { int ret = 0; OSOCTET tag, ub; /* Read byte from stream */ ret = rtxReadBytes (pCborCtxt, &ub, 1); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); tag = ub >> 5; /* Switch on tag value */ switch (tag) { case OSRTCBOR_UINT: { OSUINTTYPE value; ret = rtCborDecUInt (pCborCtxt, ub, &value); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); /* Encode JSON */ #ifndef _NO_INT64_SUPPORT ret = rtJsonEncUInt64Value (pJsonCtxt, value); #else ret = rtJsonEncUIntValue (pJsonCtxt, value); #endif if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); break; } case OSRTCBOR_NEGINT: { OSINTTYPE value; ret = rtCborDecInt (pCborCtxt, ub, &value); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); /* Encode JSON */ #ifndef _NO_INT64_SUPPORT ret = rtJsonEncInt64Value (pJsonCtxt, value); #else ret = rtJsonEncIntValue (pJsonCtxt, value); #endif if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); break; } case OSRTCBOR_BYTESTR: { OSDynOctStr64 byteStr; ret = rtCborDecDynByteStr (pCborCtxt, ub, &byteStr); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); /* Encode JSON */ ret = rtJsonEncHexStr (pJsonCtxt, byteStr.numocts, byteStr.data); rtxMemFreePtr (pCborCtxt, byteStr.data); if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); break; } case OSRTCBOR_UTF8STR: { OSUTF8CHAR* utf8str; ret = rtCborDecDynUTF8Str (pCborCtxt, ub, (char**)&utf8str); ret = rtJsonEncStringValue (pJsonCtxt, utf8str); rtxMemFreePtr (pCborCtxt, utf8str); if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); break; } case OSRTCBOR_ARRAY: case OSRTCBOR_MAP: { OSOCTET len = ub & 0x1F; char startChar = (tag == OSRTCBOR_ARRAY) ? '[' : '{'; char endChar = (tag == OSRTCBOR_ARRAY) ? ']' : '}'; OSRTSAFEPUTCHAR (pJsonCtxt, startChar); if (len == OSRTCBOR_INDEF) { OSBOOL first = TRUE; for (;;) { if (OSRTCBOR_MATCHEOC (pCborCtxt)) { pCborCtxt->buffer.byteIndex++; break; } if (!first) OSRTSAFEPUTCHAR (pJsonCtxt, ','); else first = FALSE; /* If map, decode object name */ if (tag == OSRTCBOR_MAP) { ret = cborElemNameToJson (pCborCtxt, pJsonCtxt); } /* Make recursive call */ if (0 == ret) ret = cbor2json (pCborCtxt, pJsonCtxt); if (0 != ret) { OSCTXT* pctxt = (rtxErrGetErrorCnt(pJsonCtxt) > 0) ? pJsonCtxt : pCborCtxt; return LOG_RTERR (pctxt, ret); } } } else { /* definite length */ OSSIZE nitems; /* Decode tag and number of items */ ret = rtCborDecSize (pCborCtxt, len, &nitems); if (0 == ret) { OSSIZE i; /* Loop to decode array items */ for (i = 0; i < nitems; i++) { if (0 != i) OSRTSAFEPUTCHAR (pJsonCtxt, ','); /* If map, decode object name */ if (tag == OSRTCBOR_MAP) { ret = cborElemNameToJson (pCborCtxt, pJsonCtxt); } /* Make recursive call */ if (0 == ret) ret = cbor2json (pCborCtxt, pJsonCtxt); if (0 != ret) { OSCTXT* pctxt = (rtxErrGetErrorCnt(pJsonCtxt) > 0) ? pJsonCtxt : pCborCtxt; return LOG_RTERR (pctxt, ret); } } } } OSRTSAFEPUTCHAR (pJsonCtxt, endChar); break; } case OSRTCBOR_FLOAT: if (tag == OSRTCBOR_FALSEENC || tag == OSRTCBOR_TRUEENC) { OSBOOL boolval = (ub == OSRTCBOR_TRUEENC) ? TRUE : FALSE; ret = rtJsonEncBoolValue (pJsonCtxt, boolval); if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); } else if (tag == OSRTCBOR_FLT16ENC || tag == OSRTCBOR_FLT32ENC || tag == OSRTCBOR_FLT64ENC) { OSDOUBLE fltval; ret = rtCborDecFloat (pCborCtxt, ub, &fltval); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); /* Encode JSON */ ret = rtJsonEncDoubleValue (pJsonCtxt, fltval, 0); if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); } else { ret = cborTagNotSupp (pCborCtxt, tag); } break; default: ret = cborTagNotSupp (pCborCtxt, tag); } return ret; } int main (int argc, char** argv) { OSCTXT jsonCtxt, cborCtxt; OSOCTET* pMsgBuf = 0; size_t msglen; OSBOOL verbose = FALSE; const char* filename = "message.cbor"; const char* outfname = "message.json"; int ret; /* Process command line arguments */ if (argc > 1) { int i; for (i = 1; i < argc; i++) { if (!strcmp (argv[i], "-v")) verbose = TRUE; else if (!strcmp (argv[i], "-i")) filename = argv[++i]; else if (!strcmp (argv[i], "-o")) outfname = argv[++i]; else { printf ("usage: cbor2json [-v] [-i <filename>] [-o filename]\n"); printf (" -v verbose mode: print trace info\n"); printf (" -i <filename> read CBOR msg from <filename>\n"); printf (" -o <filename> write JSON data to <filename>\n"); return 1; } } } /* Initialize context structures */ ret = rtxInitContext (&jsonCtxt); if (ret != 0) { rtxErrPrint (&jsonCtxt); return ret; } rtxErrInit(); /* rtxSetDiag (&jsonCtxt, verbose); */ ret = rtxInitContext (&cborCtxt); if (ret != 0) { rtxErrPrint (&cborCtxt); return ret; } /* rtxSetDiag (&cborCtxt, verbose); */ /* Create file input stream */ #if 0 /* Streaming not supported in open source version ret = rtxStreamFileCreateReader (&jsonCtxt, filename); */ #else /* Read input file into memory buffer */ ret = rtxFileReadBinary (&cborCtxt, filename, &pMsgBuf, &msglen); if (0 == ret) { ret = rtxInitContextBuffer (&cborCtxt, pMsgBuf, msglen); } #endif if (0 != ret) { rtxErrPrint (&jsonCtxt); rtxFreeContext (&jsonCtxt); rtxFreeContext (&cborCtxt); return ret; } /* Init JSON output buffer */ ret = rtxInitContextBuffer (&jsonCtxt, 0, 0); if (0 != ret) { rtxErrPrint (&jsonCtxt); rtxFreeContext (&jsonCtxt); rtxFreeContext (&cborCtxt); return ret; } /* Invoke the translation function */ ret = cbor2json (&cborCtxt, &jsonCtxt); if (0 == ret && cborCtxt.level != 0) ret = LOG_RTERR (&cborCtxt, RTERR_UNBAL); if (0 == ret && 0 != outfname) { /* Write encoded JSON data to output file */ OSRTSAFEPUTCHAR (&jsonCtxt, '\0'); /* null terminate buffer */ int fileret = rtxFileWriteText (outfname, (const char*)jsonCtxt.buffer.data); if (0 != fileret) { printf ("unable to write message data to '%s', status = %d\n", outfname, fileret); } } if (0 != ret) { rtxErrPrint (&jsonCtxt); rtxErrPrint (&cborCtxt); } rtxFreeContext (&jsonCtxt); rtxFreeContext (&cborCtxt); return ret; }
null
/* CBOR-to-JSON translation utility */ #include "rtjsonsrc/osrtjson.h" #include "rtcborsrc/osrtcbor.h" #include "rtxsrc/rtxCharStr.h" #include "rtxsrc/rtxContext.h" #include "rtxsrc/rtxFile.h" #include "rtxsrc/rtxHexDump.h" #include <stdio.h> #ifndef _NO_INT64_SUPPORT #define OSUINTTYPE OSUINT64 #define OSINTTYPE OSINT64 #define rtCborDecUInt rtCborDecUInt64 #define rtCborDecInt rtCborDecInt64 #else #define OSUINTTYPE OSUINT32 #define OSINTTYPE OSINT32 #define rtCborDecUInt rtCborDecUInt32 #define rtCborDecInt rtCborDecInt32 #endif static int cborTagNotSupp (OSCTXT* pctxt, OSOCTET tag) { char numbuf[10]; char errtext[80]; rtxUIntToCharStr (tag, numbuf, sizeof(numbuf), 0); rtxStrJoin (errtext, sizeof(errtext), "CBOR tag ", numbuf, 0, 0, 0); rtxErrAddStrParm (pctxt, errtext); return RTERR_NOTSUPP; } static int cborElemNameToJson (OSCTXT* pCborCtxt, OSCTXT* pJsonCtxt) { char* pElemName = 0; OSOCTET ub; int ret; /* Read byte from stream */ ret = rtxReadBytes (pCborCtxt, &ub, 1); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); /* Decode element name (note: only string type is currently supported) */ ret = rtCborDecDynUTF8Str (pCborCtxt, ub, &pElemName); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); /* Encode map element name as string */ ret = rtJsonEncStringValue (pJsonCtxt, (const OSUTF8CHAR*)pElemName); rtxMemFreePtr (pCborCtxt, pElemName); if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); OSRTSAFEPUTCHAR (pJsonCtxt, ':'); return 0; } static int cbor2json (OSCTXT* pCborCtxt, OSCTXT* pJsonCtxt) { int ret = 0; OSOCTET tag, ub; /* Read byte from stream */ ret = rtxReadBytes (pCborCtxt, &ub, 1); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); tag = ub >> 5; /* Switch on tag value */ switch (tag) { case OSRTCBOR_UINT: { OSUINTTYPE value; ret = rtCborDecUInt (pCborCtxt, ub, &value); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); /* Encode JSON */ #ifndef _NO_INT64_SUPPORT ret = rtJsonEncUInt64Value (pJsonCtxt, value); #else ret = rtJsonEncUIntValue (pJsonCtxt, value); #endif if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); break; } case OSRTCBOR_NEGINT: { OSINTTYPE value; ret = rtCborDecInt (pCborCtxt, ub, &value); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); /* Encode JSON */ #ifndef _NO_INT64_SUPPORT ret = rtJsonEncInt64Value (pJsonCtxt, value); #else ret = rtJsonEncIntValue (pJsonCtxt, value); #endif if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); break; } case OSRTCBOR_BYTESTR: { OSDynOctStr64 byteStr; ret = rtCborDecDynByteStr (pCborCtxt, ub, &byteStr); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); /* Encode JSON */ ret = rtJsonEncHexStr (pJsonCtxt, byteStr.numocts, byteStr.data); rtxMemFreePtr (pCborCtxt, byteStr.data); if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); break; } case OSRTCBOR_UTF8STR: { OSUTF8CHAR* utf8str; ret = rtCborDecDynUTF8Str (pCborCtxt, ub, (char**)&utf8str); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); ret = rtJsonEncStringValue (pJsonCtxt, utf8str); rtxMemFreePtr (pCborCtxt, utf8str); if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); break; } case OSRTCBOR_ARRAY: case OSRTCBOR_MAP: { OSOCTET len = ub & 0x1F; char startChar = (tag == OSRTCBOR_ARRAY) ? '[' : '{'; char endChar = (tag == OSRTCBOR_ARRAY) ? ']' : '}'; OSRTSAFEPUTCHAR (pJsonCtxt, startChar); if (len == OSRTCBOR_INDEF) { OSBOOL first = TRUE; for (;;) { if (OSRTCBOR_MATCHEOC (pCborCtxt)) { pCborCtxt->buffer.byteIndex++; break; } if (!first) OSRTSAFEPUTCHAR (pJsonCtxt, ','); else first = FALSE; /* If map, decode object name */ if (tag == OSRTCBOR_MAP) { ret = cborElemNameToJson (pCborCtxt, pJsonCtxt); } /* Make recursive call */ if (0 == ret) ret = cbor2json (pCborCtxt, pJsonCtxt); if (0 != ret) { OSCTXT* pctxt = (rtxErrGetErrorCnt(pJsonCtxt) > 0) ? pJsonCtxt : pCborCtxt; return LOG_RTERR (pctxt, ret); } } } else { /* definite length */ OSSIZE nitems; /* Decode tag and number of items */ ret = rtCborDecSize (pCborCtxt, len, &nitems); if (0 == ret) { OSSIZE i; /* Loop to decode array items */ for (i = 0; i < nitems; i++) { if (0 != i) OSRTSAFEPUTCHAR (pJsonCtxt, ','); /* If map, decode object name */ if (tag == OSRTCBOR_MAP) { ret = cborElemNameToJson (pCborCtxt, pJsonCtxt); } /* Make recursive call */ if (0 == ret) ret = cbor2json (pCborCtxt, pJsonCtxt); if (0 != ret) { OSCTXT* pctxt = (rtxErrGetErrorCnt(pJsonCtxt) > 0) ? pJsonCtxt : pCborCtxt; return LOG_RTERR (pctxt, ret); } } } } OSRTSAFEPUTCHAR (pJsonCtxt, endChar); break; } case OSRTCBOR_FLOAT: if (tag == OSRTCBOR_FALSEENC || tag == OSRTCBOR_TRUEENC) { OSBOOL boolval = (ub == OSRTCBOR_TRUEENC) ? TRUE : FALSE; ret = rtJsonEncBoolValue (pJsonCtxt, boolval); if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); } else if (tag == OSRTCBOR_FLT16ENC || tag == OSRTCBOR_FLT32ENC || tag == OSRTCBOR_FLT64ENC) { OSDOUBLE fltval; ret = rtCborDecFloat (pCborCtxt, ub, &fltval); if (0 != ret) return LOG_RTERR (pCborCtxt, ret); /* Encode JSON */ ret = rtJsonEncDoubleValue (pJsonCtxt, fltval, 0); if (0 != ret) return LOG_RTERR (pJsonCtxt, ret); } else { ret = cborTagNotSupp (pCborCtxt, tag); } break; default: ret = cborTagNotSupp (pCborCtxt, tag); } return ret; } int main (int argc, char** argv) { OSCTXT jsonCtxt, cborCtxt; OSOCTET* pMsgBuf = 0; size_t msglen; OSBOOL verbose = FALSE; const char* filename = "message.cbor"; const char* outfname = "message.json"; int ret; /* Process command line arguments */ if (argc > 1) { int i; for (i = 1; i < argc; i++) { if (!strcmp (argv[i], "-v")) verbose = TRUE; else if (!strcmp (argv[i], "-i")) filename = argv[++i]; else if (!strcmp (argv[i], "-o")) outfname = argv[++i]; else { printf ("usage: cbor2json [-v] [-i <filename>] [-o filename]\n"); printf (" -v verbose mode: print trace info\n"); printf (" -i <filename> read CBOR msg from <filename>\n"); printf (" -o <filename> write JSON data to <filename>\n"); return 1; } } } /* Initialize context structures */ ret = rtxInitContext (&jsonCtxt); if (ret != 0) { rtxErrPrint (&jsonCtxt); return ret; } rtxErrInit(); /* rtxSetDiag (&jsonCtxt, verbose); */ ret = rtxInitContext (&cborCtxt); if (ret != 0) { rtxErrPrint (&cborCtxt); return ret; } /* rtxSetDiag (&cborCtxt, verbose); */ /* Create file input stream */ #if 0 /* Streaming not supported in open source version ret = rtxStreamFileCreateReader (&jsonCtxt, filename); */ #else /* Read input file into memory buffer */ ret = rtxFileReadBinary (&cborCtxt, filename, &pMsgBuf, &msglen); if (0 == ret) { ret = rtxInitContextBuffer (&cborCtxt, pMsgBuf, msglen); } #endif if (0 != ret) { rtxErrPrint (&jsonCtxt); rtxFreeContext (&jsonCtxt); rtxFreeContext (&cborCtxt); return ret; } /* Init JSON output buffer */ ret = rtxInitContextBuffer (&jsonCtxt, 0, 0); if (0 != ret) { rtxErrPrint (&jsonCtxt); rtxFreeContext (&jsonCtxt); rtxFreeContext (&cborCtxt); return ret; } /* Invoke the translation function */ ret = cbor2json (&cborCtxt, &jsonCtxt); if (0 == ret && cborCtxt.level != 0) ret = LOG_RTERR (&cborCtxt, RTERR_UNBAL); if (0 == ret && 0 != outfname) { /* Write encoded JSON data to output file */ OSRTSAFEPUTCHAR (&jsonCtxt, '\0'); /* null terminate buffer */ int fileret = rtxFileWriteText (outfname, (const char*)jsonCtxt.buffer.data); if (0 != fileret) { printf ("unable to write message data to '%s', status = %d\n", outfname, fileret); } } if (0 != ret) { rtxErrPrint (&jsonCtxt); rtxErrPrint (&cborCtxt); } rtxFreeContext (&jsonCtxt); rtxFreeContext (&cborCtxt); return ret; }
null
206
CWE-787
CVE-2020-24870
/* -*- C++ -*- * Copyright 2019-2020 LibRaw LLC (info@libraw.org) * LibRaw uses code from dcraw.c -- Dave Coffin's raw photo decoder, dcraw.c is copyright 1997-2018 by Dave Coffin, dcoffin a cybercom o net. LibRaw do not use RESTRICTED code from dcraw.c LibRaw is free software; you can redistribute it and/or modify it under the terms of the one of two licenses as you choose: 1. GNU LESSER GENERAL PUBLIC LICENSE version 2.1 (See file LICENSE.LGPL provided in LibRaw distribution archive for details). 2. COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 (See file LICENSE.CDDL provided in LibRaw distribution archive for details). */ #include "../../internal/dcraw_defs.h" #include "../../internal/libraw_cameraids.h" // clang-format on static const struct { const int CorpId; const char *CorpName; } CorpTable[] = { {LIBRAW_CAMERAMAKER_Agfa, "AgfaPhoto"}, {LIBRAW_CAMERAMAKER_Apple, "Apple"}, {LIBRAW_CAMERAMAKER_Broadcom, "Broadcom"}, {LIBRAW_CAMERAMAKER_Canon, "Canon"}, {LIBRAW_CAMERAMAKER_Casio, "Casio"}, {LIBRAW_CAMERAMAKER_CINE, "CINE"}, {LIBRAW_CAMERAMAKER_Epson, "Epson"}, {LIBRAW_CAMERAMAKER_Fujifilm, "Fujifilm"}, {LIBRAW_CAMERAMAKER_Mamiya, "Mamiya"}, {LIBRAW_CAMERAMAKER_Motorola, "Motorola"}, {LIBRAW_CAMERAMAKER_Kodak, "Kodak"}, {LIBRAW_CAMERAMAKER_Konica, "Konica"}, {LIBRAW_CAMERAMAKER_Minolta, "Minolta"}, {LIBRAW_CAMERAMAKER_Leica, "Leica"}, {LIBRAW_CAMERAMAKER_Nikon, "Nikon"}, {LIBRAW_CAMERAMAKER_Nokia, "Nokia"}, {LIBRAW_CAMERAMAKER_Olympus, "Olympus"}, {LIBRAW_CAMERAMAKER_Ricoh, "Ricoh"}, {LIBRAW_CAMERAMAKER_Pentax, "Pentax"}, {LIBRAW_CAMERAMAKER_PhaseOne, "Phase One"}, {LIBRAW_CAMERAMAKER_PhaseOne, "PhaseOne"}, {LIBRAW_CAMERAMAKER_Samsung, "Samsung"}, {LIBRAW_CAMERAMAKER_Sigma, "Sigma"}, {LIBRAW_CAMERAMAKER_Sinar, "Sinar"}, {LIBRAW_CAMERAMAKER_Sony, "Sony"}, {LIBRAW_CAMERAMAKER_YI, "YI"}, // add corp. names below {LIBRAW_CAMERAMAKER_Alcatel, "Alcatel"}, {LIBRAW_CAMERAMAKER_Aptina, "Aptina"}, {LIBRAW_CAMERAMAKER_AVT, "AVT"}, {LIBRAW_CAMERAMAKER_Baumer, "Baumer"}, {LIBRAW_CAMERAMAKER_Clauss, "Clauss"}, {LIBRAW_CAMERAMAKER_Contax, "Contax"}, {LIBRAW_CAMERAMAKER_Creative, "Creative"}, {LIBRAW_CAMERAMAKER_DJI, "DJI"}, {LIBRAW_CAMERAMAKER_Foculus, "Foculus"}, {LIBRAW_CAMERAMAKER_Generic, "Generic"}, {LIBRAW_CAMERAMAKER_Gione, "Gione"}, {LIBRAW_CAMERAMAKER_GITUP, "GITUP"}, {LIBRAW_CAMERAMAKER_Hasselblad, "Hasselblad"}, {LIBRAW_CAMERAMAKER_HTC, "HTC"}, {LIBRAW_CAMERAMAKER_I_Mobile, "I_Mobile"}, {LIBRAW_CAMERAMAKER_Imacon, "Imacon"}, {LIBRAW_CAMERAMAKER_JK_Imaging, "JK Imaging"}, // Kodak {LIBRAW_CAMERAMAKER_Leaf, "Leaf"}, {LIBRAW_CAMERAMAKER_Lenovo, "Lenovo"}, {LIBRAW_CAMERAMAKER_LG, "LG"}, {LIBRAW_CAMERAMAKER_Logitech, "Logitech"}, {LIBRAW_CAMERAMAKER_Matrix, "Matrix"}, {LIBRAW_CAMERAMAKER_Meizu, "Meizu"}, {LIBRAW_CAMERAMAKER_Micron, "Micron"}, {LIBRAW_CAMERAMAKER_NGM, "NGM"}, {LIBRAW_CAMERAMAKER_OmniVison, "OmniVison"}, {LIBRAW_CAMERAMAKER_Panasonic, "Panasonic"}, {LIBRAW_CAMERAMAKER_Photron, "Photron"}, {LIBRAW_CAMERAMAKER_Pixelink, "Pixelink"}, {LIBRAW_CAMERAMAKER_Polaroid, "Polaroid"}, {LIBRAW_CAMERAMAKER_Rollei, "Rollei"}, {LIBRAW_CAMERAMAKER_RoverShot, "RoverShot"}, {LIBRAW_CAMERAMAKER_SMaL, "SMaL"}, {LIBRAW_CAMERAMAKER_ST_Micro, "ST Micro"}, {LIBRAW_CAMERAMAKER_THL, "THL"}, {LIBRAW_CAMERAMAKER_Xiaomi, "Xiaomi"}, {LIBRAW_CAMERAMAKER_XIAOYI, "Xiayi"}, {LIBRAW_CAMERAMAKER_Yuneec, "Yuneec"}, {LIBRAW_CAMERAMAKER_DXO, "DxO"}, {LIBRAW_CAMERAMAKER_RED, "Red"}, {LIBRAW_CAMERAMAKER_PhotoControl, "Photo Control"}, {LIBRAW_CAMERAMAKER_Google, "Google"}, {LIBRAW_CAMERAMAKER_GoPro, "GoPro"}, {LIBRAW_CAMERAMAKER_Parrot, "Parrot"}, {LIBRAW_CAMERAMAKER_Zeiss, "Zeiss"} }; // clang-format on int LibRaw::setMakeFromIndex(unsigned makei) { if (makei <= LIBRAW_CAMERAMAKER_Unknown || makei >= LIBRAW_CAMERAMAKER_TheLastOne) return 0; for (int i = 0; i < int(sizeof CorpTable / sizeof *CorpTable); i++) if ((unsigned)CorpTable[i].CorpId == makei) { strcpy(normalized_make, CorpTable[i].CorpName); maker_index = makei; return 1; } return 0; } const char *LibRaw::cameramakeridx2maker(unsigned maker) { for (int i = 0; i < int(sizeof CorpTable / sizeof *CorpTable); i++) if((unsigned)CorpTable[i].CorpId == maker) return CorpTable[i].CorpName; return 0; } void LibRaw::fixupArri() { struct alist_t { const char *a_model; const char *a_software; ushort a_width,a_height; int a_black; unsigned a_filters; float a_aspect; } alist[] = { {"ALEXA65", "Alexa65 XT", 6560 ,3100, 256,0x49494949,1.f}, {"ALEXALF", "Alexa LF Plus W", 3840 ,2160, 256,0x49494949,1.0f }, {"ALEXALF", "Alexa LF Plus W", 4448 ,1856, 256,0x49494949,0.75f }, {"ALEXALF", "Alexa LF Plus W", 4448 ,3096, 256,0x49494949,1.f }, {"ALEXA", "Alexa Plus 4:3 SXT", 2880 ,1620, 256,0x61616161,.75f}, {"ALEXA", "Alexa Plus 4:3 SXT", 3168 ,1782, 256,0x61616161,0.75f}, {"ALEXA", "Alexa Plus 4:3 SXT", 3424 ,2202, 256,0x61616161,1.f}, {"ALEXA", "Alexa Plus 4:3 SXT", 2592 ,2160, 256,0x61616161,1.12f}, {"ALEXA", "Alexa Plus 4:3 XT", 2592 ,2160, 256,0x61616161,1.12f}, {"ALEXA", "Alexa Plus 4:3 XT", 2880 ,2160, 256,0x61616161,1.f}, {"ALEXA", "Alexa Plus 4:3 XT", 2880 ,1620, 256,0x61616161,0.75f}, {"ALEXA", "Alexa Plus 4:3 XT", 3424 ,2202, 256,0x61616161,1.f}, }; for(int i = 0; i < int(sizeof(alist)/sizeof(alist[0])); i++) if(!strncasecmp(model,alist[i].a_model,strlen(alist[i].a_model)) && software && !strncasecmp(software,alist[i].a_software,strlen(alist[i].a_software)) && width == alist[i].a_width && height == alist[i].a_height) { filters = alist[i].a_filters; black = alist[i].a_black; pixel_aspect = alist[i].a_aspect; strcpy(model,software); software[0]=0; return; } } /* Identify which camera created this file, and set global variables accordingly. */ void LibRaw::identify() { // clang-format off static const ushort canon[][11] = { // raw_width, raw_height, left_margin, top_margin, width_decrement, // height_decrement, mask01, mask03, mask11, // mask13, CFA_filters. { 1944, 1416, 0, 0, 48, 0 }, // 00 "PowerShot Pro90 IS" { 2144, 1560, 4, 8, 52, 2, 0, 0, 0, 25 }, // 01 "PowerShot S30", "PowerShot G1" { 2224, 1456, 48, 6, 0, 2 }, // 02 "EOS D30" { 2376, 1728, 12, 6, 52, 2 }, // 03 "PowerShot G2", "PowerShot S40", "PowerShot G3", "PowerShot S45" { 2672, 1968, 12, 6, 44, 2 }, // 04 "PowerShot G5", "PowerShot S50", "PowerShot S60" { 3152, 2068, 64, 12, 0, 0, 16 }, // 05 "EOS D60", "EOS 10D", "EOS 300D" { 3160, 2344, 44, 12, 4, 4 }, // 06 "PowerShot G6", "PowerShot S70" { 3344, 2484, 4, 6, 52, 6 }, // 07 "PowerShot Pro1" { 3516, 2328, 42, 14, 0, 0 }, // 08 "EOS 350D" { 3596, 2360, 74, 12, 0, 0 }, // 09 "EOS-1D Mark II", "EOS 20D", "EOS-1D Mark II N", "EOS 30D" { 3744, 2784, 52, 12, 8, 12 }, // 10 "PowerShot G11", "PowerShot S90", "PowerShot G12", "PowerShot S95" { 3944, 2622, 30, 18, 6, 2 }, // 11 "EOS 40D" { 3948, 2622, 42, 18, 0, 2 }, // 12 "EOS 400D", "EOS 1000D" { 3984, 2622, 76, 20, 0, 2, 14 }, // 13 "EOS-1D Mark III" { 4032, 2656, 112, 44, 10, 0 }, // 14 APS-C crop mode: "EOS 6D Mark II"??, "EOS RP" { 4104, 3048, 48, 12, 24, 12 }, // 15 "PowerShot G9" { 4116, 2178, 4, 2, 0, 0 }, // 16 ?? { 4152, 2772, 192, 12, 0, 0 }, // 17 "PowerShot SX1 IS" { 4160, 3124, 104, 11, 8, 65 }, // 18 "PowerShot S100 (new)", "PowerShot S100V", "PowerShot G15", "PowerShot S110 (new)" { 4176, 3062, 96, 17, 8, 0, 0, 16, 0, 7, 0x49 }, // 19 "PowerShot SX50 HS" { 4192, 3062, 96, 17, 24, 0, 0, 16, 0, 0, 0x49 }, // 20 "PowerShot G16", "PowerShot S120" { 4312, 2876, 22, 18, 0, 2 }, // 21 "EOS 450D" { 4352, 2850, 144, 46, 0, 0 }, // 22 APS-C crop mode: "EOS R" { 4352, 2874, 62, 18, 0, 0 }, // 23 "EOS 1100D" { 4476, 2954, 90, 34, 0, 0 }, // 24 "EOS 5D" { 4480, 3348, 12, 10, 36, 12, 0, 0, 0, 18, 0x49 }, // 25 "PowerShot G10" { 4480, 3366, 80, 50, 0, 0 }, // 26 "PowerShot G1 X Mark II" { 4496, 3366, 80, 50, 12, 0 }, // 27 "PowerShot G1 X" { 4768, 3516, 96, 16, 0, 0, 0, 16 }, // 28 "PowerShot SX60 HS" { 4832, 3204, 62, 26, 0, 0 }, // 29 "EOS 500D" { 4832, 3228, 62, 51, 0, 0 }, // 30 "EOS 50D" { 5108, 3349, 98, 13, 0, 0 }, // 31 "EOS-1Ds Mark II" { 5120, 3318, 142, 45, 62, 0 }, // 32 "EOS-1D Mark IV" { 5280, 3528, 72, 52, 0, 0 }, // 33 "EOS M10", "EOS 650D", "EOS 700D", "EOS M", "EOS 100D", "EOS M2" { 5344, 3516, 142, 51, 0, 0 }, // 34 "EOS 550D", "EOS 600D", "EOS 60D", "EOS 1200D", "EOS 1300D", "EOS 3000D" { 5344, 3584, 126, 100, 0, 2 }, // 35 "EOS-1D X", "EOS-1D C" { 5344, 3950, 98, 18, 0, 0, 0, 24, 0, 0 }, // 36 "PowerShot SX70 HS" { 5360, 3516, 158, 51, 0, 0 }, // 37 "EOS 7D" { 5568, 3708, 72, 38, 0, 0 }, // 38; "EOS 7D Mark II", "EOS 6D", "EOS 70D", "EOS-1D X MARK II" { 5632, 3710, 96, 17, 0, 0, 0, 16, 0, 0, 0x49 }, // 39 "PowerShot G7 X", "PowerShot G3 X", "PowerShot G9 X", "PowerShot G5 X", "PowerShot G7 X Mark II", "PowerShot G9 X Mark II" { 5712, 3774, 62, 20, 10, 2 }, // 40 "EOS-1Ds Mark III" { 5792, 3804, 158, 51, 0, 0 }, // 41 "EOS 5D Mark II" { 5920, 3950, 122, 80, 2, 0 }, // 42 "EOS 5D Mark III" { 6096, 4051, 76, 35, 0, 0 }, // 43 "EOS 1500D" { 6096, 4056, 72, 34, 0, 0 }, // 44 "EOS M3", "EOS 760D", "EOS 750D" { 6288, 4056, 264, 36, 0, 0 }, // 45 "EOS M5", "EOS M100", "EOS M6", "PowerShot G1 X Mark III", "EOS 80D", "EOS 800D", "EOS 77D", "EOS 200D", "EOS 250D", "EOS M50" { 6384, 4224, 120, 44, 0, 0 }, // 46 "EOS 6D Mark II", "EOS RP" { 6880, 4544, 136, 42, 0, 0 }, // 47 "EOS 5D Mark IV" { 6888, 4546, 146, 48, 0, 0 }, // 48 "EOS R" { 7128, 4732, 144, 72, 0, 0 }, // 49 "EOS M6 II", "EOS 90D" { 8896, 5920, 160, 64, 0, 0 }, // 50 "EOS 5DS", "EOS 5DS R" }; static const libraw_custom_camera_t const_table[] = { { 786432, 1024, 768, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-080C" }, { 1447680, 1392, 1040, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-145C" }, { 1920000, 1600, 1200, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-201C" }, { 5067304, 2588, 1958, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-510C" }, { 5067316, 2588, 1958, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-510C", 12 }, { 10134608, 2588, 1958, 0, 0, 0, 0, 9, 0x94, 0, 0, "AVT", "F-510C" }, { 10134620, 2588, 1958, 0, 0, 0, 0, 9, 0x94, 0, 0, "AVT", "F-510C", 12 }, { 16157136, 3272, 2469, 0, 0, 0, 0, 9, 0x94, 0, 0, "AVT", "F-810C" }, { 15980544, 3264, 2448, 0, 0, 0, 0, 8, 0x61, 0, 1, "AgfaPhoto", "DC-833m" }, { 9631728, 2532, 1902, 0, 0, 0, 0, 96, 0x61, 0, 0, "Alcatel", "5035D" }, { 31850496, 4608, 3456, 0, 0, 0, 0, 0, 0x94, 0, 0, "GITUP", "GIT2 4:3" }, { 23887872, 4608, 2592, 0, 0, 0, 0, 0, 0x94, 0, 0, "GITUP", "GIT2 16:9" }, { 32257024, 4624, 3488, 8, 2, 16, 2, 0, 0x94, 0, 0, "GITUP", "GIT2P 4:3" }, { 24192768, 4624, 2616, 8, 2, 16, 2, 0, 0x94, 0, 0, "GITUP", "GIT2P 16:9" }, { 18016000, 4000, 2252, 0, 0, 0, 0, 0, 0x94, 0, 0, "GITUP", "G3DUO 16:9" }, // {24000000, 4000, 3000, 0, 0, 0, 0, 0, 0x94, 0, 0, "GITUP", // "G3DUO 4:3"}, // Conflict w/ Samsung WB550 // Android Raw dumps id start // File Size in bytes Horizontal Res Vertical Flag then bayer order eg // 0x16 bbgr 0x94 rggb { 1540857, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "Samsung", "S3" }, { 2658304, 1212, 1096, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G3FrontMipi" }, { 2842624, 1296, 1096, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G3FrontQCOM" }, { 2969600, 1976, 1200, 0, 0, 0, 0, 1, 0x16, 0, 0, "Xiaomi", "MI3wMipi" }, { 3170304, 1976, 1200, 0, 0, 0, 0, 1, 0x16, 0, 0, "Xiaomi", "MI3wQCOM" }, { 3763584, 1584, 1184, 0, 0, 0, 0, 96, 0x61, 0, 0, "I_Mobile", "I_StyleQ6" }, { 5107712, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "UltraPixel1" }, { 5382640, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "UltraPixel2" }, { 5664912, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "4688" }, { 5664912, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "4688" }, { 5364240, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "4688" }, { 6299648, 2592, 1944, 0, 0, 0, 0, 1, 0x16, 0, 0, "OmniVisi", "OV5648" }, { 6721536, 2592, 1944, 0, 0, 0, 0, 0, 0x16, 0, 0, "OmniVisi", "OV56482" }, { 6746112, 2592, 1944, 0, 0, 0, 0, 0, 0x16, 0, 0, "HTC", "OneSV" }, { 9631728, 2532, 1902, 0, 0, 0, 0, 96, 0x61, 0, 0, "Sony", "5mp" }, { 9830400, 2560, 1920, 0, 0, 0, 0, 96, 0x61, 0, 0, "NGM", "ForwardArt" }, { 10186752, 3264, 2448, 0, 0, 0, 0, 1, 0x94, 0, 0, "Sony", "IMX219-mipi 8mp" }, { 10223360, 2608, 1944, 0, 0, 0, 0, 96, 0x16, 0, 0, "Sony", "IMX" }, { 10782464, 3282, 2448, 0, 0, 0, 0, 0, 0x16, 0, 0, "HTC", "MyTouch4GSlide" }, { 10788864, 3282, 2448, 0, 0, 0, 0, 0, 0x16, 0, 0, "Xperia", "L" }, { 15967488, 3264, 2446, 0, 0, 0, 0, 96, 0x16, 0, 0, "OmniVison", "OV8850" }, { 16224256, 4208, 3082, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G3MipiL" }, { 16424960, 4208, 3120, 0, 0, 0, 0, 1, 0x16, 0, 0, "IMX135", "MipiL" }, { 17326080, 4164, 3120, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G3LQCom" }, { 17522688, 4212, 3120, 0, 0, 0, 0, 0, 0x16, 0, 0, "Sony", "IMX135-QCOM" }, { 19906560, 4608, 3456, 0, 0, 0, 0, 1, 0x16, 0, 0, "Gione", "E7mipi" }, { 19976192, 5312, 2988, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G4" }, { 20389888, 4632, 3480, 0, 0, 0, 0, 1, 0x16, 0, 0, "Xiaomi", "RedmiNote3Pro" }, { 20500480, 4656, 3496, 0, 0, 0, 0, 1, 0x94, 0, 0, "Sony", "IMX298-mipi 16mp" }, { 21233664, 4608, 3456, 0, 0, 0, 0, 1, 0x16, 0, 0, "Gione", "E7qcom" }, { 26023936, 4192, 3104, 0, 0, 0, 0, 96, 0x94, 0, 0, "THL", "5000" }, { 26257920, 4208, 3120, 0, 0, 0, 0, 96, 0x94, 0, 0, "Sony", "IMX214" }, { 26357760, 4224, 3120, 0, 0, 0, 0, 96, 0x61, 0, 0, "OV", "13860" }, { 41312256, 5248, 3936, 0, 0, 0, 0, 96, 0x61, 0, 0, "Meizu", "MX4" }, { 42923008, 5344, 4016, 0, 0, 0, 0, 96, 0x61, 0, 0, "Sony", "IMX230" }, // Android Raw dumps id end { 20137344, 3664, 2748, 0, 0, 0, 0, 0x40, 0x49, 0, 0, "Aptina", "MT9J003", 0xffff }, { 2868726, 1384, 1036, 0, 0, 0, 0, 64, 0x49, 0, 8, "Baumer", "TXG14", 1078 }, { 5298000, 2400, 1766, 12, 12, 44, 2, 40, 0x94, 0, 2, "Canon", "PowerShot SD300" }, // chdk hack { 6553440, 2664, 1968, 4, 4, 44, 4, 40, 0x94, 0, 2, "Canon", "PowerShot A460" }, // chdk hack { 6573120, 2672, 1968, 12, 8, 44, 0, 40, 0x94, 0, 2, "Canon", "PowerShot A610" }, // chdk hack { 6653280, 2672, 1992, 10, 6, 42, 2, 40, 0x94, 0, 2, "Canon", "PowerShot A530" }, // chdk hack { 7710960, 2888, 2136, 44, 8, 4, 0, 40, 0x94, 0, 2, "Canon", "PowerShot S3 IS" }, // chdk hack { 9219600, 3152, 2340, 36, 12, 4, 0, 40, 0x94, 0, 2, "Canon", "PowerShot A620" }, // chdk hack { 9243240, 3152, 2346, 12, 7, 44, 13, 40, 0x49, 0, 2, "Canon", "PowerShot A470" }, // chdk hack { 10341600, 3336, 2480, 6, 5, 32, 3, 40, 0x94, 0, 2, "Canon", "PowerShot A720 IS" }, // chdk hack { 10383120, 3344, 2484, 12, 6, 44, 6, 40, 0x94, 0, 2, "Canon", "PowerShot A630" }, // chdk hack { 12945240, 3736, 2772, 12, 6, 52, 6, 40, 0x94, 0, 2, "Canon", "PowerShot A640" }, // chdk hack { 15636240, 4104, 3048, 48, 12, 24, 12, 40, 0x94, 0, 2, "Canon", "PowerShot A650" }, // chdk hack { 15467760, 3720, 2772, 6, 12, 30, 0, 40, 0x94, 0, 2, "Canon", "PowerShot SX110 IS" }, // chdk hack { 15534576, 3728, 2778, 12, 9, 44, 9, 40, 0x94, 0, 2, "Canon", "PowerShot SX120 IS" }, // chdk hack { 18653760, 4080, 3048, 24, 12, 24, 12, 40, 0x94, 0, 2, "Canon", "PowerShot SX20 IS" }, // chdk hack { 18763488, 4104, 3048, 10, 22, 82, 22, 8, 0x49, 0, 0, "Canon", "PowerShot D10" }, // ? chdk hack ? { 19131120, 4168, 3060, 92, 16, 4, 1, 40, 0x94, 0, 2, "Canon", "PowerShot SX220 HS" }, // chdk hack { 21936096, 4464, 3276, 25, 10, 73, 12, 40, 0x16, 0, 2, "Canon", "PowerShot SX30 IS" }, // chdk hack { 24724224, 4704, 3504, 8, 16, 56, 8, 40, 0x49, 0, 2, "Canon", "PowerShot A3300 IS" }, // chdk hack { 30858240, 5248, 3920, 8, 16, 56, 16, 40, 0x94, 0, 2, "Canon", "IXUS 160" }, // chdk hack { 1976352, 1632, 1211, 0, 2, 0, 1, 0, 0x94, 0, 1, "Casio", "QV-2000UX" }, { 3217760, 2080, 1547, 0, 0, 10, 1, 0, 0x94, 0, 1, "Casio", "QV-3*00EX" }, { 6218368, 2585, 1924, 0, 0, 9, 0, 0, 0x94, 0, 1, "Casio", "QV-5700" }, { 7816704, 2867, 2181, 0, 0, 34, 36, 0, 0x16, 0, 1, "Casio", "EX-Z60" }, { 2937856, 1621, 1208, 0, 0, 1, 0, 0, 0x94, 7, 13, "Casio", "EX-S20" }, { 4948608, 2090, 1578, 0, 0, 32, 34, 0, 0x94, 7, 1, "Casio", "EX-S100" }, { 6054400, 2346, 1720, 2, 0, 32, 0, 0, 0x94, 7, 1, "Casio", "QV-R41" }, { 7426656, 2568, 1928, 0, 0, 0, 0, 0, 0x94, 0, 1, "Casio", "EX-P505" }, { 7530816, 2602, 1929, 0, 0, 22, 0, 0, 0x94, 7, 1, "Casio", "QV-R51" }, { 7542528, 2602, 1932, 0, 0, 32, 0, 0, 0x94, 7, 1, "Casio", "EX-Z50" }, { 7562048, 2602, 1937, 0, 0, 25, 0, 0, 0x16, 7, 1, "Casio", "EX-Z500" }, { 7753344, 2602, 1986, 0, 0, 32, 26, 0, 0x94, 7, 1, "Casio", "EX-Z55" }, { 9313536, 2858, 2172, 0, 0, 14, 30, 0, 0x94, 7, 1, "Casio", "EX-P600" }, { 10834368, 3114, 2319, 0, 0, 27, 0, 0, 0x94, 0, 1, "Casio", "EX-Z750" }, { 10843712, 3114, 2321, 0, 0, 25, 0, 0, 0x94, 0, 1, "Casio", "EX-Z75" }, { 10979200, 3114, 2350, 0, 0, 32, 32, 0, 0x94, 7, 1, "Casio", "EX-P700" }, { 12310144, 3285, 2498, 0, 0, 6, 30, 0, 0x94, 0, 1, "Casio", "EX-Z850" }, { 12489984, 3328, 2502, 0, 0, 47, 35, 0, 0x94, 0, 1, "Casio", "EX-Z8" }, { 15499264, 3754, 2752, 0, 0, 82, 0, 0, 0x94, 0, 1, "Casio", "EX-Z1050" }, { 18702336, 4096, 3044, 0, 0, 24, 0, 80, 0x94, 7, 1, "Casio", "EX-ZR100" }, { 7684000, 2260, 1700, 0, 0, 0, 0, 13, 0x94, 0, 1, "Casio", "QV-4000" }, { 787456, 1024, 769, 0, 1, 0, 0, 0, 0x49, 0, 0, "Creative", "PC-CAM 600" }, { 28829184, 4384, 3288, 0, 0, 0, 0, 36, 0x61, 0, 0, "DJI" }, { 15151104, 4608, 3288, 0, 0, 0, 0, 0, 0x94, 0, 0, "Matrix" }, { 3840000, 1600, 1200, 0, 0, 0, 0, 65, 0x49, 0, 0, "Foculus", "531C" }, { 307200, 640, 480, 0, 0, 0, 0, 0, 0x94, 0, 0, "Generic" }, { 62464, 256, 244, 1, 1, 6, 1, 0, 0x8d, 0, 0, "Kodak", "DC20" }, { 124928, 512, 244, 1, 1, 10, 1, 0, 0x8d, 0, 0, "Kodak", "DC20" }, { 1652736, 1536, 1076, 0, 52, 0, 0, 0, 0x61, 0, 0, "Kodak", "DCS200" }, { 4159302, 2338, 1779, 1, 33, 1, 2, 0, 0x94, 0, 0, "Kodak", "C330" }, { 4162462, 2338, 1779, 1, 33, 1, 2, 0, 0x94, 0, 0, "Kodak", "C330", 3160 }, { 2247168, 1232, 912, 0, 0, 16, 0, 0, 0x00, 0, 0, "Kodak", "C330" }, { 3370752, 1232, 912, 0, 0, 16, 0, 0, 0x00, 0, 0, "Kodak", "C330" }, { 6163328, 2864, 2152, 0, 0, 0, 0, 0, 0x94, 0, 0, "Kodak", "C603" }, { 6166488, 2864, 2152, 0, 0, 0, 0, 0, 0x94, 0, 0, "Kodak", "C603", 3160 }, { 460800, 640, 480, 0, 0, 0, 0, 0, 0x00, 0, 0, "Kodak", "C603" }, { 9116448, 2848, 2134, 0, 0, 0, 0, 0, 0x00, 0, 0, "Kodak", "C603" }, { 12241200, 4040, 3030, 2, 0, 0, 13, 0, 0x49, 0, 0, "Kodak", "12MP" }, { 12272756, 4040, 3030, 2, 0, 0, 13, 0, 0x49, 0, 0, "Kodak", "12MP", 31556 }, { 18000000, 4000, 3000, 0, 0, 0, 0, 0, 0x00, 0, 0, "Kodak", "12MP" }, { 614400, 640, 480, 0, 3, 0, 0, 64, 0x94, 0, 0, "Kodak", "KAI-0340" }, { 15360000, 3200, 2400, 0, 0, 0, 0, 96, 0x16, 0, 0, "Lenovo", "A820" }, { 3884928, 1608, 1207, 0, 0, 0, 0, 96, 0x16, 0, 0, "Micron", "2010", 3212 }, { 1138688, 1534, 986, 0, 0, 0, 0, 0, 0x61, 0, 0, "Minolta", "RD175", 513 }, { 1581060, 1305, 969, 0, 0, 18, 6, 6, 0x1e, 4, 1, "Nikon", "E900" }, // "diag raw" hack { 2465792, 1638, 1204, 0, 0, 22, 1, 6, 0x4b, 5, 1, "Nikon", "E950" }, // "diag raw" hack; possibly also Nikon E700, E800, E775; // Olympus C-2020Z { 2940928, 1616, 1213, 0, 0, 0, 7, 30, 0x94, 0, 1, "Nikon", "E2100" }, // "diag raw" hack; also Nikon E2500 { 4771840, 2064, 1541, 0, 0, 0, 1, 6, 0xe1, 0, 1, "Nikon", "E990" }, // "diag raw" hack; possibly also Nikon E880, E885, E995; // Olympus C-3030Z { 4775936, 2064, 1542, 0, 0, 0, 0, 30, 0x94, 0, 1, "Nikon", "E3700" }, // "diag raw" hack; Nikon E3100, E3200, E3500; // Pentax "Optio 33WR"; possibly also Olympus C-740UZ { 5865472, 2288, 1709, 0, 0, 0, 1, 6, 0xb4, 0, 1, "Nikon", "E4500" }, // "diag raw" hack; possibly also Olympus C-4040Z { 5869568, 2288, 1710, 0, 0, 0, 0, 6, 0x16, 0, 1, "Nikon", "E4300" }, // "diag raw" hack; also Minolta "DiMAGE Z2" { 7438336, 2576, 1925, 0, 0, 0, 1, 6, 0xb4, 0, 1, "Nikon", "E5000" }, // also Nikon E5700 { 8998912, 2832, 2118, 0, 0, 0, 0, 30, 0x94, 7, 1, "Nikon", "COOLPIX S6" }, // "diag raw" hack { 5939200, 2304, 1718, 0, 0, 0, 0, 30, 0x16, 0, 0, "Olympus", "C-770UZ" }, // possibly also Olympus C-4100Z, C-765UZ { 3178560, 2064, 1540, 0, 0, 0, 0, 0, 0x94, 0, 1, "Pentax", "Optio S V1.01" }, { 4841984, 2090, 1544, 0, 0, 22, 0, 0, 0x94, 7, 1, "Pentax", "Optio S" }, { 6114240, 2346, 1737, 0, 0, 22, 0, 0, 0x94, 7, 1, "Pentax", "Optio S4" }, { 10702848, 3072, 2322, 0, 0, 0, 21, 30, 0x94, 0, 1, "Pentax", "Optio 750Z" }, { 4147200, 1920, 1080, 0, 0, 0, 0, 0, 0x49, 0, 0, "Photron", "BC2-HD" }, { 4151666, 1920, 1080, 0, 0, 0, 0, 0, 0x49, 0, 0, "Photron", "BC2-HD", 8 }, { 13248000, 2208, 3000, 0, 0, 0, 0, 13, 0x61, 0, 0, "Pixelink", "A782" }, { 6291456, 2048, 1536, 0, 0, 0, 0, 96, 0x61, 0, 0, "RoverShot", "3320AF" }, { 311696, 644, 484, 0, 0, 0, 0, 0, 0x16, 0, 8, "ST Micro", "STV680 VGA" }, { 16098048, 3288, 2448, 0, 0, 24, 0, 9, 0x94, 0, 1, "Samsung", "S85" }, // hack { 16215552, 3312, 2448, 0, 0, 48, 0, 9, 0x94, 0, 1, "Samsung", "S85" }, // hack { 20487168, 3648, 2808, 0, 0, 0, 0, 13, 0x94, 5, 1, "Samsung", "WB550" }, { 24000000, 4000, 3000, 0, 0, 0, 0, 13, 0x94, 5, 1, "Samsung", "WB550" }, { 12582980, 3072, 2048, 0, 0, 0, 0, 33, 0x61, 0, 0, "Sinar", "", 68 }, // Sinarback 23; same res. as Leaf Volare & Cantare { 33292868, 4080, 4080, 0, 0, 0, 0, 33, 0x61, 0, 0, "Sinar", "", 68 }, // Sinarback 44 { 44390468, 4080, 5440, 0, 0, 0, 0, 33, 0x61, 0, 0, "Sinar", "", 68 }, // Sinarback 54 { 1409024, 1376, 1024, 0, 0, 1, 0, 0, 0x49, 0, 0, "Sony", "XCD-SX910CR" }, { 2818048, 1376, 1024, 0, 0, 1, 0, 97, 0x49, 0, 0, "Sony", "XCD-SX910CR" }, }; libraw_custom_camera_t table[64 + sizeof(const_table) / sizeof(const_table[0])]; // clang-format on char head[64] = {0}, *cp; int hlen, fsize, flen, zero_fsize = 1, i, c; struct jhead jh; unsigned camera_count = parse_custom_cameras(64, table, imgdata.params.custom_camera_strings); for (int q = 0; q < int(sizeof(const_table) / sizeof(const_table[0])); q++) memmove(&table[q + camera_count], &const_table[q], sizeof(const_table[0])); camera_count += sizeof(const_table) / sizeof(const_table[0]); tiff_flip = flip = filters = UINT_MAX; /* unknown */ raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0; maximum = height = width = top_margin = left_margin = 0; cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0; iso_speed = shutter = aperture = focal_len = 0; unique_id = 0ULL; tiff_nifds = 0; is_NikonTransfer = 0; is_Sony = 0; is_pana_raw = 0; maker_index = LIBRAW_CAMERAMAKER_Unknown; is_4K_RAFdata = 0; FujiCropMode = 0; is_PentaxRicohMakernotes = 0; normalized_model[0] = 0; normalized_make[0] = 0; CM_found = 0; memset(tiff_ifd, 0, sizeof tiff_ifd); libraw_internal_data.unpacker_data.crx_track_selected = -1; libraw_internal_data.unpacker_data.CR3_CTMDtag = 0; imgdata.makernotes.hasselblad.nIFD_CM[0] = imgdata.makernotes.hasselblad.nIFD_CM[1] = -1; imgdata.makernotes.kodak.ISOCalibrationGain = 1.0f; imCommon.CameraTemperature = imCommon.SensorTemperature = imCommon.SensorTemperature2 = imCommon.LensTemperature = imCommon.AmbientTemperature = imCommon.BatteryTemperature = imCommon.exifAmbientTemperature = -1000.0f; imgdata.color.ExifColorSpace = LIBRAW_COLORSPACE_Unknown; for (i = 0; i < LIBRAW_IFD_MAXCOUNT; i++) { tiff_ifd[i].dng_color[0].illuminant = tiff_ifd[i].dng_color[1].illuminant = 0xffff; for (int c = 0; c < 4; c++) tiff_ifd[i].dng_levels.analogbalance[c] = 1.0f; } memset(gpsdata, 0, sizeof gpsdata); memset(cblack, 0, sizeof cblack); memset(white, 0, sizeof white); memset(mask, 0, sizeof mask); thumb_offset = thumb_length = thumb_width = thumb_height = 0; load_raw = thumb_load_raw = 0; write_thumb = &LibRaw::jpeg_thumb; data_offset = meta_offset = meta_length = tiff_bps = tiff_compress = 0; kodak_cbpp = zero_after_ff = dng_version = load_flags = 0; timestamp = shot_order = tiff_samples = black = is_foveon = 0; mix_green = profile_length = data_error = zero_is_bad = 0; pixel_aspect = is_raw = raw_color = 1; tile_width = tile_length = 0; metadata_blocks = 0; for (i = 0; i < 4; i++) { cam_mul[i] = i == 1; pre_mul[i] = i < 3; FORC3 cmatrix[c][i] = 0; FORC3 rgb_cam[c][i] = c == i; } colors = 3; for (i = 0; i < 0x10000; i++) curve[i] = i; order = get2(); hlen = get4(); fseek(ifp, 0, SEEK_SET); if (fread(head, 1, 64, ifp) < 64) throw LIBRAW_EXCEPTION_IO_CORRUPT; libraw_internal_data.unpacker_data.lenRAFData = libraw_internal_data.unpacker_data.posRAFData = 0; fseek(ifp, 0, SEEK_END); flen = fsize = ftell(ifp); if ((cp = (char *)memmem(head, 32, (char *)"MMMM", 4)) || (cp = (char *)memmem(head, 32, (char *)"IIII", 4))) { parse_phase_one(cp - head); if (cp - head && parse_tiff(0)) apply_tiff(); } else if (order == 0x4949 || order == 0x4d4d) { if (!memcmp(head + 6, "HEAPCCDR", 8)) { data_offset = hlen; parse_ciff(hlen, flen - hlen, 0); load_raw = &LibRaw::canon_load_raw; } else if (parse_tiff(0)) apply_tiff(); } else if (!memcmp(head, "\xff\xd8\xff\xe1", 4) && !memcmp(head + 6, "Exif", 4)) { fseek(ifp, 4, SEEK_SET); data_offset = 4 + get2(); fseek(ifp, data_offset, SEEK_SET); if (fgetc(ifp) != 0xff) parse_tiff(12); thumb_offset = 0; } else if (!memcmp(head + 25, "ARECOYK", 7)) // 'KYOCERA' right-to-left { strcpy(make, "Contax"); strcpy(model, "N Digital"); parse_kyocera(); } else if (!strcmp(head, "PXN")) { strcpy(make, "Logitech"); strcpy(model, "Fotoman Pixtura"); } else if (!strcmp(head, "qktk")) { strcpy(make, "Apple"); strcpy(model, "QuickTake 100"); load_raw = &LibRaw::quicktake_100_load_raw; } else if (!strcmp(head, "qktn")) { strcpy(make, "Apple"); strcpy(model, "QuickTake 150"); load_raw = &LibRaw::kodak_radc_load_raw; } else if (!memcmp(head, "FUJIFILM", 8)) { memcpy(imFuji.SerialSignature, head + 0x10, 0x0c); imFuji.SerialSignature[0x0c] = 0; strncpy(model, head + 0x1c, 0x20); model[0x20] = 0; memcpy(model2, head + 0x3c, 4); model2[4] = 0; strcpy(imFuji.RAFVersion, model2); fseek(ifp, 84, SEEK_SET); thumb_offset = get4(); thumb_length = get4(); fseek(ifp, 92, SEEK_SET); parse_fuji(get4()); if (thumb_offset > 120) { fseek(ifp, 120, SEEK_SET); is_raw += (i = get4()) ? 1 : 0; if (is_raw == 2 && shot_select) parse_fuji(i); } load_raw = &LibRaw::unpacked_load_raw; fseek(ifp, 100 + 28 * (shot_select > 0), SEEK_SET); parse_tiff(data_offset = get4()); parse_tiff(thumb_offset + 12); apply_tiff(); } else if (!memcmp(head, "RIFF", 4)) { fseek(ifp, 0, SEEK_SET); parse_riff(); } else if (!memcmp(head + 4, "ftypqt ", 9)) { fseek(ifp, 0, SEEK_SET); parse_qt(fsize); is_raw = 0; } else if (!memcmp(head, "\0\001\0\001\0@", 6)) { fseek(ifp, 6, SEEK_SET); fread(make, 1, 8, ifp); fread(model, 1, 8, ifp); fread(model2, 1, 16, ifp); data_offset = get2(); get2(); raw_width = get2(); raw_height = get2(); load_raw = &LibRaw::nokia_load_raw; filters = 0x61616161; } else if (!memcmp(head, "NOKIARAW", 8)) { strcpy(make, "NOKIA"); order = 0x4949; fseek(ifp, 300, SEEK_SET); data_offset = get4(); i = get4(); // bytes count width = get2(); height = get2(); // Data integrity check if (width < 1 || width > 16000 || height < 1 || height > 16000 || i < (width * height) || i > (2 * width * height)) throw LIBRAW_EXCEPTION_IO_CORRUPT; switch (tiff_bps = i * 8 / (width * height)) { case 8: load_raw = &LibRaw::eight_bit_load_raw; break; case 10: load_raw = &LibRaw::nokia_load_raw; break; case 0: throw LIBRAW_EXCEPTION_IO_CORRUPT; break; } raw_height = height + (top_margin = i / (width * tiff_bps / 8) - height); mask[0][3] = 1; filters = 0x61616161; } else if (!memcmp(head, "ARRI", 4)) { order = 0x4949; fseek(ifp, 20, SEEK_SET); width = get4(); height = get4(); strcpy(make, "ARRI"); fseek(ifp, 668, SEEK_SET); fread(model, 1, 64, ifp); model[63] = 0; fseek(ifp, 760, SEEK_SET); fread(software, 1, 64, ifp); if((unsigned char)software[0] == 0xff) software[0] = 0; software[63] = 0; data_offset = 4096; load_raw = &LibRaw::packed_load_raw; load_flags = 88; filters = 0x61616161; fixupArri(); } else if (!memcmp(head, "XPDS", 4)) { order = 0x4949; fseek(ifp, 0x800, SEEK_SET); fread(make, 1, 41, ifp); raw_height = get2(); raw_width = get2(); fseek(ifp, 56, SEEK_CUR); fread(model, 1, 30, ifp); data_offset = 0x10000; load_raw = &LibRaw::canon_rmf_load_raw; gamma_curve(0, 12.25, 1, 1023); } else if (!memcmp(head + 4, "RED1", 4)) { strcpy(make, "Red"); strcpy(model, "One"); parse_redcine(); load_raw = &LibRaw::redcine_load_raw; gamma_curve(1 / 2.4, 12.92, 1, 4095); filters = 0x49494949; } else if (!memcmp(head, "DSC-Image", 9)) parse_rollei(); else if (!memcmp(head, "PWAD", 4)) parse_sinar_ia(); else if (!memcmp(head, "\0MRM", 4)) parse_minolta(0); else if (!memcmp(head, "FOVb", 4)) { parse_x3f(); /* Does nothing if USE_X3FTOOLS is not defined */ } else if (!memcmp(head, "CI", 2)) parse_cine(); #ifdef USE_6BY9RPI else if (!memcmp(head, "BRCM", 4)) { fseek(ifp, 0, SEEK_SET); strcpy(make, "RaspberryPi"); strcpy(model, "Pi"); parse_raspberrypi(); } #endif else if (!memcmp(head + 4, "ftypcrx ", 8)) { int err; unsigned long long szAtomList; short nesting = -1; short nTrack = -1; short TrackType; char AtomNameStack[128]; strcpy(make, "Canon"); szAtomList = ifp->size(); err = parseCR3(0ULL, szAtomList, nesting, AtomNameStack, nTrack, TrackType); if ((err == 0 || err == -14) && nTrack >= 0) // no error, or too deep nesting selectCRXTrack(nTrack); } if (make[0] == 0) for (zero_fsize = i = 0; i < (int)camera_count; i++) if (fsize == (int)table[i].fsize) { strcpy(make, table[i].t_make); strcpy(model, table[i].t_model); flip = table[i].flags >> 2; zero_is_bad = table[i].flags & 2; data_offset = table[i].offset == 0xffff ? 0 : table[i].offset; raw_width = table[i].rw; raw_height = table[i].rh; left_margin = table[i].lm; top_margin = table[i].tm; width = raw_width - left_margin - table[i].rm; height = raw_height - top_margin - table[i].bm; filters = 0x1010101U * table[i].cf; colors = 4 - !((filters & filters >> 1) & 0x5555); load_flags = table[i].lf & 0xff; if (table[i].lf & 0x100) /* Monochrome sensor dump */ { colors = 1; filters = 0; } switch (tiff_bps = (fsize - data_offset) * 8 / (raw_width * raw_height)) { case 6: load_raw = &LibRaw::minolta_rd175_load_raw; ilm.CameraMount = LIBRAW_MOUNT_Minolta_A; break; case 8: load_raw = &LibRaw::eight_bit_load_raw; break; case 10: if ((fsize - data_offset) / raw_height * 3 >= raw_width * 4) { load_raw = &LibRaw::android_loose_load_raw; break; } else if (load_flags & 1) { load_raw = &LibRaw::android_tight_load_raw; break; } case 12: load_flags |= 128; load_raw = &LibRaw::packed_load_raw; break; case 16: order = 0x4949 | 0x404 * (load_flags & 1); tiff_bps -= load_flags >> 4; tiff_bps -= load_flags = load_flags >> 1 & 7; load_raw = table[i].offset == 0xffff ? &LibRaw::unpacked_load_raw_reversed : &LibRaw::unpacked_load_raw; } maximum = (1 << tiff_bps) - (1 << table[i].max); break; } if (zero_fsize) fsize = 0; if (make[0] == 0) parse_smal(0, flen); if (make[0] == 0) { parse_jpeg(0); #ifdef USE_6BY9RPI if (!(strncmp(model, "ov", 2) && strncmp(model, "RP_", 3))) { //Assume that this isn't a raw unless the header can be found is_raw = 0; if (!strncasecmp(model, "RP_imx", 6)) { const long offsets[] = { //IMX219 offsets 10270208, //8MPix 3280x2464 2678784, //1920x1080 2628608, //1640x1232 1963008, //1640x922 1233920, //1280x720 445440, //640x480 -1 //Marker for end of table }; int offset_idx; for (offset_idx = 0; offsets[offset_idx] != -1; offset_idx++) { if (!fseek(ifp, -offsets[offset_idx], SEEK_END) && fread(head, 1, 32, ifp) && !strncmp(head, "BRCM", 4)) { fseek(ifp, -32, SEEK_CUR); strcpy(make, "SonyRPF"); parse_raspberrypi(); break; } } } else if (!strncasecmp(model, "RP_OV", 5) || !strncasecmp(model, "ov5647", 6)) { const long offsets[] = { 6404096, //5MPix 2592x1944 2717696, //1920x1080 1625600, //1296x972 1233920, //1296x730 445440, //640x480 -1 //Marker for end of table }; int offset_idx; for (offset_idx = 0; offsets[offset_idx] != -1; offset_idx++) { if (!fseek(ifp, -offsets[offset_idx], SEEK_END) && fread(head, 1, 32, ifp) && !strncmp(head, "BRCM", 4)) { fseek(ifp, -32, SEEK_CUR); strcpy(make, "OmniVision"); width = raw_width; //Defaults raw_width = 2611; filters = 0x16161616; parse_raspberrypi(); break; } } } }// else is_raw = 0; #else fseek(ifp, 0, SEEK_END); int sz = ftell(ifp); if (!strncmp(model, "RP_imx219", 9) && sz >= 0x9cb600 && !fseek(ifp, -0x9cb600, SEEK_END) && fread(head, 1, 0x20, ifp) && !strncmp(head, "BRCM", 4)) { strcpy(make, "Broadcom"); strcpy(model, "RPi IMX219"); if (raw_height > raw_width) flip = 5; data_offset = ftell(ifp) + 0x8000 - 0x20; parse_broadcom(); black = 66; maximum = 0x3ff; load_raw = &LibRaw::broadcom_load_raw; thumb_offset = 0; thumb_length = sz - 0x9cb600 - 1; } else if (!(strncmp(model, "ov5647", 6) && strncmp(model, "RP_OV5647", 9)) && sz >= 0x61b800 && !fseek(ifp, -0x61b800, SEEK_END) && fread(head, 1, 0x20, ifp) && !strncmp(head, "BRCM", 4)) { strcpy(make, "Broadcom"); if (!strncmp(model, "ov5647", 6)) strcpy(model, "RPi OV5647 v.1"); else strcpy(model, "RPi OV5647 v.2"); if (raw_height > raw_width) flip = 5; data_offset = ftell(ifp) + 0x8000 - 0x20; parse_broadcom(); black = 16; maximum = 0x3ff; load_raw = &LibRaw::broadcom_load_raw; thumb_offset = 0; thumb_length = sz - 0x61b800 - 1; } else is_raw = 0; #endif } // make sure strings are terminated desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0; for (i = 0; i < int(sizeof CorpTable / sizeof *CorpTable); i++) { if (strcasestr(make, CorpTable[i].CorpName)) { /* Simplify company names */ maker_index = CorpTable[i].CorpId; strcpy(make, CorpTable[i].CorpName); } } if ((makeIs(LIBRAW_CAMERAMAKER_Kodak) || makeIs(LIBRAW_CAMERAMAKER_Leica)) && ((cp = strcasestr(model, " DIGITAL CAMERA")) || (cp = strstr(model, "FILE VERSION")))) { *cp = 0; } else if (makeIs(LIBRAW_CAMERAMAKER_Ricoh) && !strncasecmp(model, "PENTAX", 6)) { maker_index = LIBRAW_CAMERAMAKER_Pentax; strcpy(make, "Pentax"); } else if (makeIs(LIBRAW_CAMERAMAKER_JK_Imaging) && !strncasecmp(model, "Kodak", 5)) { maker_index = LIBRAW_CAMERAMAKER_Kodak; strcpy(make, "Kodak"); } remove_trailing_spaces(make, sizeof(make)); remove_trailing_spaces(model, sizeof(model)); i = strbuflen(make); /* Remove make from model */ if (!strncasecmp(model, make, i) && model[i++] == ' ') memmove(model, model + i, 64 - i); if (makeIs(LIBRAW_CAMERAMAKER_Fujifilm) && !strncmp(model, "FinePix", 7)) { memmove(model, model + 7, strlen(model) - 6); if (model[0] == ' ') { memmove(model, model + 1, strlen(model)); } } else if ((makeIs(LIBRAW_CAMERAMAKER_Kodak) || makeIs(LIBRAW_CAMERAMAKER_Konica)) && !strncmp(model, "Digital Camera ", 15)) { memmove(model, model + 15, strlen(model) - 14); } desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0; if (!is_raw) goto notraw; if (!height) height = raw_height; if (!width) width = raw_width; identify_finetune_pentax(); if (dng_version) { if (filters == UINT_MAX) filters = 0; if (!filters) colors = tiff_samples; switch (tiff_compress) { case 0: // Compression not set, assuming uncompressed case 1: #ifdef USE_DNGSDK // Uncompressed float if (load_raw != &LibRaw::float_dng_load_raw_placeholder) #endif load_raw = &LibRaw::packed_dng_load_raw; break; case 7: load_raw = &LibRaw::lossless_dng_load_raw; break; case 8: load_raw = &LibRaw::deflate_dng_load_raw; break; #ifdef USE_GPRSDK case 9: load_raw = &LibRaw::vc5_dng_load_raw_placeholder; break; #endif case 34892: load_raw = &LibRaw::lossy_dng_load_raw; break; default: load_raw = 0; } GetNormalizedModel(); if (makeIs(LIBRAW_CAMERAMAKER_Olympus) && (OlyID == OlyID_STYLUS_1) && // don't use normalized_model below, it is 'Stylus 1' (strchr(model+6, 's') || strchr(model+6, 'S'))) { width -= 16; } goto dng_skip; } if (makeIs(LIBRAW_CAMERAMAKER_Canon) && !fsize && tiff_bps != 15) { bool fromtable = false; if (!load_raw) load_raw = &LibRaw::lossless_jpeg_load_raw; for (i = 0; i < int(sizeof canon / sizeof *canon); i++) if (raw_width == canon[i][0] && raw_height == canon[i][1]) { width = raw_width - (left_margin = canon[i][2]); height = raw_height - (top_margin = canon[i][3]); width -= canon[i][4]; height -= canon[i][5]; mask[0][1] = canon[i][6]; mask[0][3] = -canon[i][7]; mask[1][1] = canon[i][8]; mask[1][3] = -canon[i][9]; if (canon[i][10]) filters = canon[i][10] * 0x01010101U; fromtable = true; } if ((unique_id | 0x20000ULL) == 0x2720000ULL) // "PowerShot G11", "PowerShot S90": 0x2700000, 0x2720000 // possibly "PowerShot SX120 IS" (if not chdk hack?): 0x2710000 { left_margin = 8; top_margin = 16; } if(!fromtable && imgdata.makernotes.canon.AverageBlackLevel) // not known, but metadata known { FORC4 cblack[c] = imgdata.makernotes.canon.ChannelBlackLevel[c]; black = cblack[4] = cblack[5] = 0; // Prevent automatic BL calculation mask[0][3] = 1; mask[0][1] = 2; if(imgdata.makernotes.canon.SensorWidth == raw_width && imgdata.makernotes.canon.SensorHeight == raw_height) { left_margin = (imgdata.makernotes.canon.SensorLeftBorder+1) & 0xfffe; // round to 2 width = imgdata.makernotes.canon.SensorRightBorder - left_margin; top_margin = (imgdata.makernotes.canon.SensorTopBorder +1) & 0xfffe; height = imgdata.makernotes.canon.SensorBottomBorder - top_margin; } } } identify_finetune_by_filesize(fsize); if (!strcmp(model, "KAI-0340") && find_green(16, 16, 3840, 5120) < 25) { height = 480; top_margin = filters = 0; strcpy(model, "C603"); } GetNormalizedModel(); identify_finetune_dcr(head, fsize, flen); /* Early reject for damaged images */ if (!load_raw || height < 22 || width < 22 || (tiff_bps > 16 && (load_raw != &LibRaw::deflate_dng_load_raw && load_raw != &LibRaw::float_dng_load_raw_placeholder)) || tiff_samples > 4 || colors > 4 || colors < 1 /* alloc in unpack() may be fooled by size adjust */ || ((int)width + (int)left_margin > 65535) || ((int)height + (int)top_margin > 65535)) { is_raw = 0; RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY, 1, 2); return; } if (!model[0]) { sprintf(model, "%dx%d", width, height); strcpy(normalized_model, model); } if (!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_ZEROFILTERS_FOR_MONOCHROMETIFFS) && (filters == UINT_MAX)) // Default dcraw behaviour filters = 0x94949494; else if (filters == UINT_MAX) { if (tiff_nifds > 0 && tiff_samples == 1) { colors = 1; filters = 0; } else filters = 0x94949494; } if (thumb_offset && !thumb_height) { fseek(ifp, thumb_offset, SEEK_SET); if (ljpeg_start(&jh, 1)) { thumb_width = jh.wide; thumb_height = jh.high; } } dng_skip: if (dng_version) identify_process_dng_fields(); /* Early reject for damaged images again (after dng fields processing) */ if (!load_raw || height < 22 || width < 22 || (tiff_bps > 16 && (load_raw != &LibRaw::deflate_dng_load_raw && load_raw != &LibRaw::float_dng_load_raw_placeholder)) || tiff_samples > 4 || colors > 4 || colors < 1) { is_raw = 0; RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY, 1, 2); return; } { // Check cam_mul range int cmul_ok = 1; FORCC if (cam_mul[c] <= 0.001f) cmul_ok = 0; ; if (cmul_ok) { double cmin = cam_mul[0], cmax; double cnorm[4]; FORCC cmin = MIN(cmin, cam_mul[c]); FORCC cnorm[c] = cam_mul[c] / cmin; cmax = cmin = cnorm[0]; FORCC { cmin = MIN(cmin, cnorm[c]); cmax = MIN(cmax, cnorm[c]); } if (cmin <= 0.01f || cmax > 100.f) cmul_ok = false; } if (!cmul_ok) { if (cam_mul[0] > 0) cam_mul[0] = 0; cam_mul[3] = 0; } } if ((use_camera_matrix & (((use_camera_wb || dng_version)?0:1) | 0x2)) && cmatrix[0][0] > 0.125) { memcpy(rgb_cam, cmatrix, sizeof cmatrix); raw_color = 0; } if (raw_color && !CM_found) CM_found = adobe_coeff(maker_index, normalized_model); else if ((imgdata.color.cam_xyz[0][0] < 0.01) && !CM_found) CM_found = adobe_coeff(maker_index, normalized_model, 1); if (load_raw == &LibRaw::kodak_radc_load_raw) if ((raw_color) && !CM_found) CM_found = adobe_coeff(LIBRAW_CAMERAMAKER_Apple, "Quicktake"); if ((maker_index != LIBRAW_CAMERAMAKER_Unknown) && normalized_model[0]) SetStandardIlluminants (maker_index, normalized_model); // Clear erorneus fuji_width if not set through parse_fuji or for DNG if (fuji_width && !dng_version && !(imgdata.process_warnings & LIBRAW_WARN_PARSEFUJI_PROCESSED)) fuji_width = 0; if (fuji_width) { fuji_width = width >> !fuji_layout; filters = fuji_width & 1 ? 0x94949494 : 0x49494949; width = (height >> fuji_layout) + fuji_width; height = width - 1; pixel_aspect = 1; } else { if (raw_height < height) raw_height = height; if (raw_width < width) raw_width = width; } if (!tiff_bps) tiff_bps = 12; if (!maximum) { maximum = (1 << tiff_bps) - 1; if (maximum < 0x10000 && curve[maximum] > 0 && load_raw == &LibRaw::sony_arw2_load_raw) maximum = curve[maximum]; } if (maximum > 0xffff) maximum = 0xffff; if (!load_raw || height < 22 || width < 22 || (tiff_bps > 16 && (load_raw != &LibRaw::deflate_dng_load_raw && load_raw != &LibRaw::float_dng_load_raw_placeholder)) || tiff_samples > 6 || colors > 4) is_raw = 0; if (raw_width < 22 || raw_width > 64000 || raw_height < 22 || pixel_aspect < 0.1 || pixel_aspect > 10. || raw_height > 64000) is_raw = 0; #ifdef NO_JASPER if (load_raw == &LibRaw::redcine_load_raw) { is_raw = 0; imgdata.process_warnings |= LIBRAW_WARN_NO_JASPER; } #endif #ifdef NO_JPEG if (load_raw == &LibRaw::kodak_jpeg_load_raw || load_raw == &LibRaw::lossy_dng_load_raw) { is_raw = 0; imgdata.process_warnings |= LIBRAW_WARN_NO_JPEGLIB; } #endif if (!cdesc[0]) strcpy(cdesc, colors == 3 ? "RGBG" : "GMCY"); if (!raw_height) raw_height = height; if (!raw_width) raw_width = width; if (filters > 999 && colors == 3) filters |= ((filters >> 2 & 0x22222222) | (filters << 2 & 0x88888888)) & filters << 1; notraw: if (flip == (int)UINT_MAX) flip = tiff_flip; if (flip == (int)UINT_MAX) flip = 0; // Convert from degrees to bit-field if needed if (flip > 89 || flip < -89) { switch ((flip + 3600) % 360) { case 270: flip = 5; break; case 180: flip = 3; break; case 90: flip = 6; break; } } if (pana_bpp) imgdata.color.raw_bps = pana_bpp; else if ((load_raw == &LibRaw::phase_one_load_raw) || (load_raw == &LibRaw::phase_one_load_raw_c)) imgdata.color.raw_bps = ph1.format; else imgdata.color.raw_bps = tiff_bps; RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY, 1, 2); } void LibRaw::identify_process_dng_fields() { if (!dng_version) return; int c; { /* copy DNG data from per-IFD field to color.dng */ int iifd = find_ifd_by_offset(data_offset); int pifd = find_ifd_by_offset(thumb_offset); #define CFAROUND(value, filters) \ filters ? (filters >= 1000 ? ((value + 1) / 2) * 2 : ((value + 5) / 6) * 6) \ : value #define IFDCOLORINDEX(ifd, subset, bit) \ (tiff_ifd[ifd].dng_color[subset].parsedfields & bit) \ ? ifd \ : ((tiff_ifd[0].dng_color[subset].parsedfields & bit) ? 0 : -1) #define IFDLEVELINDEX(ifd, bit) \ (tiff_ifd[ifd].dng_levels.parsedfields & bit) \ ? ifd \ : ((tiff_ifd[0].dng_levels.parsedfields & bit) ? 0 : -1) #define COPYARR(to, from) memmove(&to, &from, sizeof(from)) if (iifd < (int)tiff_nifds && iifd >= 0) { int sidx; // Per field, not per structure if (!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_DONT_CHECK_DNG_ILLUMINANT)) { int illidx[2], cmidx[2], calidx[2], abidx; for (int i = 0; i < 2; i++) { illidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_ILLUMINANT); cmidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_COLORMATRIX); calidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_CALIBRATION); } abidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ANALOGBALANCE); // Data found, all in same ifd, illuminants are inited if (illidx[0] >= 0 && illidx[0] < (int)tiff_nifds && illidx[0] == illidx[1] && illidx[0] == cmidx[0] && illidx[0] == cmidx[1] && tiff_ifd[illidx[0]].dng_color[0].illuminant > 0 && tiff_ifd[illidx[0]].dng_color[1].illuminant > 0) { sidx = illidx[0]; // => selected IFD double cc[4][4], cm[4][3], cam_xyz[4][3]; // CM -> Color Matrix // CC -> Camera calibration for (int j = 0; j < 4; j++) for (int i = 0; i < 4; i++) cc[j][i] = i == j; int colidx = -1; // IS D65 here? for (int i = 0; i < 2; i++) { if (tiff_ifd[sidx].dng_color[i].illuminant == LIBRAW_WBI_D65) { colidx = i; break; } } // Other daylight-type ill if (colidx < 0) for (int i = 0; i < 2; i++) { int ill = tiff_ifd[sidx].dng_color[i].illuminant; if (ill == LIBRAW_WBI_Daylight || ill == LIBRAW_WBI_D55 || ill == LIBRAW_WBI_D75 || ill == LIBRAW_WBI_D50 || ill == LIBRAW_WBI_Flash) { colidx = i; break; } } if (colidx >= 0) // Selected { // Init camera matrix from DNG FORCC for (int j = 0; j < 3; j++) cm[c][j] = tiff_ifd[sidx].dng_color[colidx].colormatrix[c][j]; if (calidx[colidx] == sidx) { for (int i = 0; i < colors; i++) FORCC cc[i][c] = tiff_ifd[sidx].dng_color[colidx].calibration[i][c]; } if (abidx == sidx) for (int i = 0; i < colors; i++) FORCC cc[i][c] *= tiff_ifd[sidx].dng_levels.analogbalance[i]; int j; FORCC for (int i = 0; i < 3; i++) for (cam_xyz[c][i] = j = 0; j < colors; j++) cam_xyz[c][i] += cc[c][j] * cm[j][i]; // add AsShotXY later * xyz[i]; cam_xyz_coeff(cmatrix, cam_xyz); } } } bool noFujiDNGCrop = makeIs(LIBRAW_CAMERAMAKER_Fujifilm) && (!strcmp(normalized_model, "S3Pro") || !strcmp(normalized_model, "S5Pro") || !strcmp(normalized_model, "S2Pro")); if (!noFujiDNGCrop && (imgdata.params.raw_processing_options &LIBRAW_PROCESSING_USE_DNG_DEFAULT_CROP)) { sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_CROPORIGIN); int sidx2 = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_CROPSIZE); if (sidx >= 0 && sidx == sidx2 && tiff_ifd[sidx].dng_levels.default_crop[2] > 0 && tiff_ifd[sidx].dng_levels.default_crop[3] > 0) { int lm = tiff_ifd[sidx].dng_levels.default_crop[0]; int lmm = CFAROUND(lm, filters); int tm = tiff_ifd[sidx].dng_levels.default_crop[1]; int tmm = CFAROUND(tm, filters); int ww = tiff_ifd[sidx].dng_levels.default_crop[2]; int hh = tiff_ifd[sidx].dng_levels.default_crop[3]; if (lmm > lm) ww -= (lmm - lm); if (tmm > tm) hh -= (tmm - tm); if (left_margin + lm + ww <= raw_width && top_margin + tm + hh <= raw_height) { left_margin += lmm; top_margin += tmm; width = ww; height = hh; } } } if (!(imgdata.color.dng_color[0].parsedfields & LIBRAW_DNGFM_FORWARDMATRIX)) // Not set already (Leica makernotes) { sidx = IFDCOLORINDEX(iifd, 0, LIBRAW_DNGFM_FORWARDMATRIX); if (sidx >= 0) COPYARR(imgdata.color.dng_color[0].forwardmatrix, tiff_ifd[sidx].dng_color[0].forwardmatrix); } if (!(imgdata.color.dng_color[1].parsedfields & LIBRAW_DNGFM_FORWARDMATRIX)) // Not set already (Leica makernotes) { sidx = IFDCOLORINDEX(iifd, 1, LIBRAW_DNGFM_FORWARDMATRIX); if (sidx >= 0) COPYARR(imgdata.color.dng_color[1].forwardmatrix, tiff_ifd[sidx].dng_color[1].forwardmatrix); } for (int ss = 0; ss < 2; ss++) { sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_COLORMATRIX); if (sidx >= 0) COPYARR(imgdata.color.dng_color[ss].colormatrix, tiff_ifd[sidx].dng_color[ss].colormatrix); sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_CALIBRATION); if (sidx >= 0) COPYARR(imgdata.color.dng_color[ss].calibration, tiff_ifd[sidx].dng_color[ss].calibration); sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_ILLUMINANT); if (sidx >= 0) imgdata.color.dng_color[ss].illuminant = tiff_ifd[sidx].dng_color[ss].illuminant; } // Levels sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ANALOGBALANCE); if (sidx >= 0) COPYARR(imgdata.color.dng_levels.analogbalance, tiff_ifd[sidx].dng_levels.analogbalance); sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_BASELINEEXPOSURE); if (sidx >= 0) imgdata.color.dng_levels.baseline_exposure = tiff_ifd[sidx].dng_levels.baseline_exposure; sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_WHITE); if (sidx >= 0 && tiff_ifd[sidx].dng_levels.dng_whitelevel[0]) COPYARR(imgdata.color.dng_levels.dng_whitelevel, tiff_ifd[sidx].dng_levels.dng_whitelevel); else if (tiff_ifd[iifd].sample_format <= 2 && tiff_ifd[iifd].bps > 0 && tiff_ifd[iifd].bps < 32) FORC4 imgdata.color.dng_levels.dng_whitelevel[c] = (1 << tiff_ifd[iifd].bps) - 1; sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ASSHOTNEUTRAL); if (sidx >= 0) { COPYARR(imgdata.color.dng_levels.asshotneutral, tiff_ifd[sidx].dng_levels.asshotneutral); if (imgdata.color.dng_levels.asshotneutral[0]) { cam_mul[3] = 0; FORCC if (fabs(imgdata.color.dng_levels.asshotneutral[c]) > 0.0001) cam_mul[c] = 1 / imgdata.color.dng_levels.asshotneutral[c]; } } sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_BLACK); if (sidx >= 0) { imgdata.color.dng_levels.dng_fblack = tiff_ifd[sidx].dng_levels.dng_fblack; imgdata.color.dng_levels.dng_black = tiff_ifd[sidx].dng_levels.dng_black; COPYARR(imgdata.color.dng_levels.dng_cblack, tiff_ifd[sidx].dng_levels.dng_cblack); COPYARR(imgdata.color.dng_levels.dng_fcblack, tiff_ifd[sidx].dng_levels.dng_fcblack); } if (pifd >= 0) { sidx = IFDLEVELINDEX(pifd, LIBRAW_DNGFM_PREVIEWCS); if (sidx >= 0) imgdata.color.dng_levels.preview_colorspace = tiff_ifd[sidx].dng_levels.preview_colorspace; } sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_OPCODE2); if (sidx >= 0) meta_offset = tiff_ifd[sidx].opcode2_offset; sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_LINTABLE); INT64 linoff = -1; int linlen = 0; if (sidx >= 0) { linoff = tiff_ifd[sidx].lineartable_offset; linlen = tiff_ifd[sidx].lineartable_len; } if (linoff >= 0 && linlen > 0) { INT64 pos = ftell(ifp); fseek(ifp, linoff, SEEK_SET); linear_table(linlen); fseek(ifp, pos, SEEK_SET); } // Need to add curve too } /* Copy DNG black level to LibRaw's */ if (load_raw == &LibRaw::lossy_dng_load_raw) { maximum = 0xffff; FORC4 imgdata.color.linear_max[c] = imgdata.color.dng_levels.dng_whitelevel[c] = 0xffff; } else { maximum = imgdata.color.dng_levels.dng_whitelevel[0]; } black = imgdata.color.dng_levels.dng_black; if (tiff_samples == 2 && imgdata.color.dng_levels.dng_cblack[4] * imgdata.color.dng_levels.dng_cblack[5] * tiff_samples == imgdata.color.dng_levels.dng_cblack[LIBRAW_CBLACK_SIZE - 1]) { unsigned ff = filters; if (filters > 999 && colors == 3) filters |= ((filters >> 2 & 0x22222222) | (filters << 2 & 0x88888888)) & filters << 1; /* Special case, Fuji SuperCCD dng */ int csum[4] = { 0,0,0,0 }, ccount[4] = { 0,0,0,0 }; int i = 6 + shot_select; for (unsigned row = 0; row < imgdata.color.dng_levels.dng_cblack[4]; row++) for (unsigned col = 0; col < imgdata.color.dng_levels.dng_cblack[5]; col++) { csum[FC(row, col)] += imgdata.color.dng_levels.dng_cblack[i]; ccount[FC(row, col)]++; i += tiff_samples; } for (int c = 0; c < 4; c++) if (ccount[c]) imgdata.color.dng_levels.dng_cblack[c] += csum[c] / ccount[c]; imgdata.color.dng_levels.dng_cblack[4] = imgdata.color.dng_levels.dng_cblack[5] = 0; filters = ff; } else if (tiff_samples > 2 && tiff_samples <= 4 && imgdata.color.dng_levels.dng_cblack[4] * imgdata.color.dng_levels.dng_cblack[5] * tiff_samples == imgdata.color.dng_levels.dng_cblack[LIBRAW_CBLACK_SIZE - 1]) { /* Special case, per_channel blacks in RepeatDim, average for per-channel */ int csum[4] = { 0,0,0,0 }, ccount[4] = { 0,0,0,0 }; int i = 6; for (unsigned row = 0; row < imgdata.color.dng_levels.dng_cblack[4]; row++) for (unsigned col = 0; col < imgdata.color.dng_levels.dng_cblack[5]; col++) for (unsigned c = 0; c < tiff_samples; c++) { csum[c] += imgdata.color.dng_levels.dng_cblack[i]; ccount[c]++; i++; } for (int c = 0; c < 4; c++) if (ccount[c]) imgdata.color.dng_levels.dng_cblack[c] += csum[c] / ccount[c]; imgdata.color.dng_levels.dng_cblack[4] = imgdata.color.dng_levels.dng_cblack[5] = 0; } memmove(cblack, imgdata.color.dng_levels.dng_cblack, sizeof(cblack)); if (iifd < (int)tiff_nifds && iifd >= 0) { int sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_LINEARRESPONSELIMIT); if (sidx >= 0) { imgdata.color.dng_levels.LinearResponseLimit = tiff_ifd[sidx].dng_levels.LinearResponseLimit; if (imgdata.color.dng_levels.LinearResponseLimit > 0.1 && imgdata.color.dng_levels.LinearResponseLimit <= 1.0) { // And approx promote it to linear_max: int bl4 = 0, bl64 = 0; for (int chan = 0; chan < colors && chan < 4; chan++) bl4 += cblack[chan]; bl4 /= LIM(colors, 1, 4); if (cblack[4] * cblack[5] > 0) { unsigned cnt = 0; for (unsigned c = 0; c < 4096 && c < cblack[4] * cblack[5]; c++) { bl64 += cblack[c + 6]; cnt++; } bl64 /= LIM(cnt, 1, 4096); } int rblack = black + bl4 + bl64; for (int chan = 0; chan < colors && chan < 4; chan++) imgdata.color.linear_max[chan] = (maximum - rblack) * imgdata.color.dng_levels.LinearResponseLimit + rblack; } } } } } void LibRaw::identify_finetune_pentax() { if (makeIs(LIBRAW_CAMERAMAKER_Pentax) || makeIs(LIBRAW_CAMERAMAKER_Samsung)) { if (height == 2624 && width == 3936) // Pentax K10D, Samsung GX10; { height = 2616; width = 3896; } if (height == 3136 && width == 4864) // Pentax K20D, Samsung GX20; { height = 3124; width = 4688; filters = 0x16161616; } } if (makeIs(LIBRAW_CAMERAMAKER_Pentax)) { if ((width == 4352) && ((unique_id == PentaxID_K_r) || (unique_id == PentaxID_K_x))) { width = 4309; filters = 0x16161616; } if ((width >= 4960) && ((unique_id == PentaxID_K_5) || (unique_id == PentaxID_K_5_II) || (unique_id == PentaxID_K_5_II_s))) { left_margin = 10; width = 4950; filters = 0x16161616; } if ((width == 6080) && (unique_id == PentaxID_K_70)) { height = 4016; top_margin = 32; width = 6020; left_margin = 60; } if ((width == 4736) && (unique_id == PentaxID_K_7)) { height = 3122; width = 4684; filters = 0x16161616; top_margin = 2; } if ((width == 6080) && (unique_id == PentaxID_K_3_II)) { left_margin = 4; width = 6040; } if ((width == 6112) && (unique_id == PentaxID_KP)) { // From DNG, maybe too strict left_margin = 54; top_margin = 28; width = 6028; height = raw_height - top_margin; } if ((width == 6080) && (unique_id == PentaxID_K_3)) { left_margin = 4; width = 6040; } if ((width == 7424) && (unique_id == PentaxID_645D)) { height = 5502; width = 7328; filters = 0x61616161; top_margin = 29; left_margin = 48; } } else if (makeIs(LIBRAW_CAMERAMAKER_Ricoh) && (height == 3014) && (width == 4096)) // Ricoh GX200 width = 4014; } void LibRaw::identify_finetune_by_filesize(int fsize) { if (fsize == 4771840) { // hack Nikon 3mpix: E880, E885, E990, E995; // Olympus C-3030Z if (!timestamp && nikon_e995()) strcpy(model, "E995"); } else if (fsize == 2940928) { // hack Nikon 2mpix: E2100, E2500 if (!timestamp && !nikon_e2100()) strcpy(model, "E2500"); } else if (fsize == 4775936) { // hack Nikon 3mpix: E3100, E3200, E3500, E3700; // Pentax "Optio 33WR"; // Olympus C-740UZ if (!timestamp) nikon_3700(); } else if (fsize == 5869568) { // hack Nikon 4mpix: E4300; // hack Minolta "DiMAGE Z2" if (!timestamp && minolta_z2()) { maker_index = LIBRAW_CAMERAMAKER_Minolta; strcpy(make, "Minolta"); strcpy(model, "DiMAGE Z2"); } } } void LibRaw::identify_finetune_dcr(char head[64], int fsize, int flen) { static const short pana[][6] = { // raw_width, raw_height, left_margin, top_margin, width_increment, // height_increment {3130, 1743, 4, 0, -6, 0}, /* 00 */ {3130, 2055, 4, 0, -6, 0}, /* 01 */ {3130, 2319, 4, 0, -6, 0}, /* 02 DMC-FZ8 */ {3170, 2103, 18, 0, -42, 20}, /* 03 */ {3170, 2367, 18, 13, -42, -21}, /* 04 */ {3177, 2367, 0, 0, -1, 0}, /* 05 DMC-L1 */ {3304, 2458, 0, 0, -1, 0}, /* 06 DMC-FZ30 */ {3330, 2463, 9, 0, -5, 0}, /* 07 DMC-FZ18 */ {3330, 2479, 9, 0, -17, 4}, /* 08 */ {3370, 1899, 15, 0, -44, 20}, /* 09 */ {3370, 2235, 15, 0, -44, 20}, /* 10 */ {3370, 2511, 15, 10, -44, -21}, /* 11 */ {3690, 2751, 3, 0, -8, -3}, /* 12 DMC-FZ50 */ {3710, 2751, 0, 0, -3, 0}, /* 13 DMC-L10 */ {3724, 2450, 0, 0, 0, -2}, /* 14 */ {3770, 2487, 17, 0, -44, 19}, /* 15 */ {3770, 2799, 17, 15, -44, -19}, /* 16 */ {3880, 2170, 6, 0, -6, 0}, /* 17 DMC-LX1 */ {4060, 3018, 0, 0, 0, -2}, /* 18 DMC-FZ35, DMC-FZ38 */ {4290, 2391, 3, 0, -8, -1}, /* 19 DMC-LX2 */ {4330, 2439, 17, 15, -44, -19}, /* 20 "D-LUX 3" */ {4508, 2962, 0, 0, -3, -4}, /* 21 */ {4508, 3330, 0, 0, -3, -6}, /* 22 */ {10480, 7794, 0, 0, -2, 0}, /* 23: G9 in high-res */ }; int i,c; struct jhead jh; if (makeIs(LIBRAW_CAMERAMAKER_Canon) && !tiff_flip && imCanon.MakernotesFlip) { tiff_flip = imCanon.MakernotesFlip; } else if (makeIs(LIBRAW_CAMERAMAKER_Nikon)) { if (!load_raw) load_raw = &LibRaw::packed_load_raw; if (model[0] == 'E') // Nikon E8800, E8700, E8400, E5700, E5400, E5000, // others are diag hacks? load_flags |= !data_offset << 2 | 2; } /* Set parameters based on camera name (for non-DNG files). */ /* Always 512 for arw2_load_raw */ else if (makeIs(LIBRAW_CAMERAMAKER_Sony) && (raw_width > 3888) && !black && !cblack[0]) { black = (load_raw == &LibRaw::sony_arw2_load_raw) ? 512 : (128 << (tiff_bps - 12)); } if (is_foveon) { if (height * 2 < width) pixel_aspect = 0.5; if (height > width) pixel_aspect = 2; filters = 0; } else if (makeIs(LIBRAW_CAMERAMAKER_Pentax)) { if ((unique_id == PentaxID_K_1) || (unique_id == PentaxID_K_1_Mark_II)) { top_margin = 18; height = raw_height - top_margin; if (raw_width == 7392) { left_margin = 6; width = 7376; } } else if (unique_id == PentaxID_Optio_S_V101) { // (fsize == 3178560) cam_mul[0] *= 4; cam_mul[2] *= 4; } else if (unique_id == PentaxID_Optio_33WR) { // (fsize == 4775936) flip = 1; filters = 0x16161616; } else if (unique_id == PentaxID_staristD) { load_raw = &LibRaw::unpacked_load_raw; data_error = -1; } else if (unique_id == PentaxID_staristDS) { height -= 2; } } else if (makeIs(LIBRAW_CAMERAMAKER_Canon)) { if (tiff_bps == 15) { // Canon sRAW if (width == 3344) width = 3272; else if (width == 3872) width = 3866; if (height > width) { SWAP(height, width); SWAP(raw_height, raw_width); } if (width == 7200 && height == 3888) { // Canon EOS 5DS (R); raw_width = width = 6480; raw_height = height = 4320; } filters = 0; tiff_samples = colors = 3; load_raw = &LibRaw::canon_sraw_load_raw; } if (!strcmp(normalized_model, "PowerShot 600")) { height = 613; width = 854; raw_width = 896; colors = 4; filters = 0xe1e4e1e4; load_raw = &LibRaw::canon_600_load_raw; } else if (!strcmp(normalized_model, "PowerShot A5") || !strcmp(normalized_model, "PowerShot A5 Zoom")) { height = 773; width = 960; raw_width = 992; pixel_aspect = 256 / 235.0; filters = 0x1e4e1e4e; goto canon_a5; } else if (!strcmp(normalized_model, "PowerShot A50")) { height = 968; width = 1290; raw_width = 1320; filters = 0x1b4e4b1e; goto canon_a5; } else if (!strcmp(normalized_model, "PowerShot Pro70")) { height = 1024; width = 1552; filters = 0x1e4b4e1b; canon_a5: colors = 4; tiff_bps = 10; load_raw = &LibRaw::packed_load_raw; load_flags = 40; } else if (!strcmp(normalized_model, "PowerShot Pro90 IS") || !strcmp(normalized_model, "PowerShot G1")) { colors = 4; filters = 0xb4b4b4b4; } else if (!strcmp(normalized_model, "PowerShot A610")) { // chdk hack if (canon_s2is()) strcpy(model + 10, "S2 IS"); // chdk hack } else if (!strcmp(normalized_model, "PowerShot SX220 HS")) { // chdk hack mask[1][3] = -4; top_margin = 16; left_margin = 92; } else if (!strcmp(normalized_model, "PowerShot S120")) { // chdk hack raw_width = 4192; raw_height = 3062; width = 4022; height = 3016; mask[0][0] = top_margin = 31; mask[0][2] = top_margin + height; left_margin = 120; mask[0][1] = 23; mask[0][3] = 72; } else if (!strcmp(normalized_model, "PowerShot G16")) { mask[0][0] = 0; mask[0][2] = 80; mask[0][1] = 0; mask[0][3] = 16; top_margin = 29; left_margin = 120; width = raw_width - left_margin - 48; height = raw_height - top_margin - 14; } else if (!strcmp(normalized_model, "PowerShot SX50 HS")) { top_margin = 17; } } else if (makeIs(LIBRAW_CAMERAMAKER_Nikon)) { if (!strcmp(model, "D1")) { imgdata.other.analogbalance[0] = cam_mul[0]; imgdata.other.analogbalance[2] = cam_mul[2]; imgdata.other.analogbalance[1] = imgdata.other.analogbalance[3] = cam_mul[1]; cam_mul[0] = cam_mul[1] = cam_mul[2] = 1.0f; } else if (!strcmp(model, "D1X")) { width -= 4; pixel_aspect = 0.5; } else if (!strcmp(model, "D40X") || !strcmp(model, "D60") || !strcmp(model, "D80") || !strcmp(model, "D3000")) { height -= 3; width -= 4; } else if (!strcmp(model, "D3") || !strcmp(model, "D3S") || !strcmp(model, "D700")) { width -= 4; left_margin = 2; } else if (!strcmp(model, "D3100")) { width -= 28; left_margin = 6; } else if (!strcmp(model, "D5000") || !strcmp(model, "D90")) { width -= 42; } else if (!strcmp(model, "D5100") || !strcmp(model, "D7000") || !strcmp(model, "COOLPIX A")) { width -= 44; } else if (!strcmp(model, "D3200") || !strcmp(model, "D600") || !strcmp(model, "D610") || !strncmp(model, "D800", 4)) // Nikons: D800, D800E { width -= 46; } else if (!strcmp(model, "D4") || !strcmp(model, "Df")) { width -= 52; left_margin = 2; } else if (!strcmp(model, "D500")) { // Empty - to avoid width-1 below } else if (!strncmp(model, "D40", 3) || !strncmp(model, "D50", 3) || !strncmp(model, "D70", 3)) { width--; } else if (!strcmp(model, "D100")) { if (load_flags) // compressed NEF raw_width = (width += 3) + 3; } else if (!strcmp(model, "D200")) { left_margin = 1; width -= 4; filters = 0x94949494; } else if (!strncmp(model, "D2H", 3)) // Nikons: D2H, D2Hs { left_margin = 6; width -= 14; } else if (!strncmp(model, "D2X", 3)) // Nikons: D2X, D2Xs { if (width == 3264) // in-camera Hi-speed crop: On width -= 32; else width -= 8; } else if (!strncmp(model, "D300", 4)) // Nikons: D300, D300s { width -= 32; } else if (raw_width == 4032) // Nikon "COOLPIX P7700", "COOLPIX P7800", // "COOLPIX P330", "COOLPIX P340" { if (!strcmp(normalized_model, "COOLPIX P7700")) { maximum = 65504; load_flags = 0; } else if (!strcmp(normalized_model, "COOLPIX P7800")) { maximum = 65504; load_flags = 0; } else if (!strcmp(model, "COOLPIX P340")) { load_flags = 0; } } else if (!strncmp(model, "COOLPIX P", 9) && raw_width != 4032) // Nikon "COOLPIX P1000", "COOLPIX P6000", // "COOLPIX P7000", "COOLPIX P7100" { load_flags = 24; filters = 0x94949494; /* the following 'if' is most probably obsolete, because we now read black * level from metadata */ if ((model[9] == '7') && /* P7000, P7100 */ ((iso_speed >= 400) || (iso_speed == 0)) && !strstr(software, "V1.2")) /* v. 1.2 seen for P7000 only */ black = 255; } else if (!strncmp(model, "COOLPIX B700", 12)) { load_flags = 24; } else if (!strncmp(model, "1 ", 2)) // Nikons: "1 AW1", "1 J1", "1 J2", "1 J3", "1 J4", // "1 J5", "1 S1", "1 S2", "1 V1", "1 V2", "1 V3" { height -= 2; } else if (fsize == 1581060) // hack Nikon 1mpix: E900 { simple_coeff(3); pre_mul[0] = 1.2085; pre_mul[1] = 1.0943; pre_mul[3] = 1.1103; } else if ((fsize == 4771840) && // hack Nikon 3mpix: E880, E885, E990 strcmp(model, "E995")) // but not E995 { filters = 0xb4b4b4b4; simple_coeff(3); pre_mul[0] = 1.196; pre_mul[1] = 1.246; pre_mul[2] = 1.018; } else if ((fsize == 4775936) && // hack Nikon 3mpix: E3100, E3200, E3500 (atoi(model + 1) < 3700)) // but not E3700; { filters = 0x49494949; } else if (fsize == 5869568) // hack Nikon 4mpix: E4300; { load_flags = 6; } else if (!strcmp(model, "E2500")) { height -= 2; load_flags = 6; colors = 4; filters = 0x4b4b4b4b; } } else if (makeIs(LIBRAW_CAMERAMAKER_Olympus)) { if (OlyID == OlyID_C_740UZ) { // (fsize == 4775936) i = find_green(12, 32, 1188864, 3576832); c = find_green(12, 32, 2383920, 2387016); if (abs(i) < abs(c)) { SWAP(i, c); load_flags = 24; } if (i < 0) filters = 0x61616161; } else if (OlyID == OlyID_C_770UZ) { height = 1718; width = 2304; filters = 0x16161616; load_raw = &LibRaw::packed_load_raw; load_flags = 30; } else { height += height & 1; if (exif_cfa) filters = exif_cfa; if (width == 4100) // Olympus E-PL2, E-PL1, E-P2, E-P1, E-620, E-600, E-5, E-30; width -= 4; if (width == 4080) // Olympus E-PM1, E-PL3, E-P3; width -= 24; if (width == 10400) // Olympus PEN-F, E-M1-II, E-M1-III, E-M1X width -= 12; if (width == 8200) // E-M1-III in 50Mp mode, E-M1X width -= 30; if (width == 9280) { // Olympus E-M5 Mark II; width -= 6; height -= 6; } if (load_raw == &LibRaw::unpacked_load_raw) load_flags = 4; tiff_bps = 12; if ((OlyID == OlyID_E_300) || (OlyID == OlyID_E_500)) { width -= 20; if (load_raw == &LibRaw::unpacked_load_raw) { maximum = 0xfc3; memset(cblack, 0, sizeof cblack); } } else if (OlyID == OlyID_STYLUS_1) { width -= 16; maximum = 0xfff; } else if (OlyID == OlyID_E_330) { width -= 30; if (load_raw == &LibRaw::unpacked_load_raw) maximum = 0xf79; } else if (OlyID == OlyID_SP_550UZ) { thumb_length = flen - (thumb_offset = 0xa39800); thumb_height = 480; thumb_width = 640; } else if (OlyID == OlyID_TG_4) { width -= 16; } else if ((OlyID == OlyID_TG_5) || (OlyID == OlyID_TG_6)) { width -= 26; } } } else if (makeIs(LIBRAW_CAMERAMAKER_RoverShot) && (fsize == 6291456)) { // RoverShot 3320AF fseek(ifp, 0x300000, SEEK_SET); if ((order = guess_byte_order(0x10000)) == 0x4d4d) { height -= (top_margin = 16); width -= (left_margin = 28); maximum = 0xf5c0; strcpy(make, "ISG"); model[0] = 0; } } else if (makeIs(LIBRAW_CAMERAMAKER_Fujifilm)) { if (!strcmp(model, "S2Pro")) { height = 2144; width = 2880; flip = 6; } else if (load_raw != &LibRaw::packed_load_raw && strncmp(model, "X-", 2) && filters >= 1000) // Bayer and not an X-model maximum = (is_raw == 2 && shot_select) ? 0x2f00 : 0x3e00; if (FujiCropMode == 1) { // FF crop on GFX width = raw_width; height = raw_height; } else if (FujiCropMode == 4) { /* electronic shutter, high speed mode (1.25x crop) */ height = raw_height; } top_margin = (raw_height >= height) ? (raw_height - height) >> 2 << 1 : 0; left_margin = (raw_width >= width) ? (raw_width - width) >> 2 << 1 : 0; if (!strcmp(model, "X-T3") || !strcmp(model, "X-T4") || !strcmp(model, "X100V") || !strcmp(model, "X-T30") || !strcmp(model, "X-Pro3")) { top_margin = 0; if (FujiCropMode == 0) { top_margin = 6; height = 4170; left_margin = 0; width = 6246; } else if (FujiCropMode == 4) { /* electronic shutter, high speed mode (1.25x crop) */ left_margin = 624; width = 5004; } } if (width == 2848 || // Fujifilm X-S1, X10, XF1 width == 3664) // Fujifilm "HS10 HS11" filters = 0x16161616; if (width == 4032 || // Fujifilm X20, X30, XQ1, XQ2 width == 4952) // Fujifilm X-A1, X-A2, X-E1, X-M1, X-Pro1 left_margin = 0; if (width == 3328 && (width -= 66)) // Fujifilm F550EXR, F600EXR, F770EXR, F800EXR, F900EXR, // HS20EXR, HS30EXR, HS33EXR, HS50EXR left_margin = 34; if (width == 4936) // Fujifilm X-E2S, X-E2, X-T10, X-T1, X100S, X100T, X70 left_margin = 4; if (width == 6032) // Fujifilm X100F, X-T2, X-T20, X-Pro2, X-H1, X-E3 left_margin = 0; if (!strcmp(normalized_model, "DBP for GX680")) { /* 7712 2752 -> 5504 3856 */ /* width = 688; height = 30848; raw_width = 688; raw_height = 30848; */ raw_width = 5504; raw_height = 3856; left_margin = 32; top_margin = 8; width = raw_width - left_margin - 32; height = raw_height - top_margin - 8; load_raw = &LibRaw::unpacked_load_raw_FujiDBP; // maximum = 0x0fff; filters = 0x16161616; load_flags = 0; flip = 6; } if (!strcmp(model, "HS50EXR") || !strcmp(model, "F900EXR")) { width += 2; left_margin = 0; filters = 0x16161616; } if (!strncmp(model, "GFX 50", 6)) { left_margin = 0; top_margin = 0; } if (!strncmp(model, "GFX 100", 7)) { left_margin = 0; width = raw_width - 146; height = raw_height - (top_margin = 2); if (tiff_bps == 16) maximum = 0xffff; } if (!strcmp(normalized_model, "S5100")) { height -= (top_margin = 6); } if (fuji_layout) raw_width *= is_raw; if (filters == 9) FORC(36) ((char *)xtrans)[c] = xtrans_abs[(c / 6 + top_margin) % 6][(c + left_margin) % 6]; } else if (makeIs(LIBRAW_CAMERAMAKER_Konica)) { if (!strcmp(model, "KD-400Z")) { height = 1712; width = 2312; raw_width = 2336; goto konica_400z; } else if (!strcmp(model, "KD-510Z")) { goto konica_510z; } } else if (makeIs(LIBRAW_CAMERAMAKER_Minolta)) { if (fsize == 5869568) { // hack Minolta "DiMAGE Z2" load_flags = 30; } if (!load_raw && (maximum = 0xfff)) { load_raw = &LibRaw::unpacked_load_raw; } if (!strncmp(model, "DiMAGE A", 8)) // Minolta "DiMAGE A1", "DiMAGE A2", "DiMAGE A200" { if (!strcmp(model, "DiMAGE A200")) filters = 0x49494949; tiff_bps = 12; load_raw = &LibRaw::packed_load_raw; } else if (!strncmp(normalized_model, "DG-", 3)) { load_raw = &LibRaw::packed_load_raw; } else if (!strncmp(model, "DiMAGE G", 8)) // hack Minolta "DiMAGE G400", "DiMAGE G500", // "DiMAGE G530", "DiMAGE G600" { if (model[8] == '4') // DiMAGE G400 { height = 1716; width = 2304; } else if (model[8] == '5') // DiMAGE G500 / G530 { konica_510z: height = 1956; width = 2607; raw_width = 2624; } else if (model[8] == '6') // DiMAGE G600 { height = 2136; width = 2848; } data_offset += 14; filters = 0x61616161; konica_400z: load_raw = &LibRaw::unpacked_load_raw; maximum = 0x3df; order = 0x4d4d; } } else if (makeIs(LIBRAW_CAMERAMAKER_Samsung)) { if (raw_width == 4704) // Samsung NX100, NX10, NX11, { height -= top_margin = 8; width -= 2 * (left_margin = 8); load_flags = 32; } else if (!strcmp(model, "NX3000")) // Samsung NX3000; raw_width: 5600 { top_margin = 38; left_margin = 92; width = 5456; height = 3634; filters = 0x61616161; colors = 3; } else if (raw_height == 3714) // Samsung NX2000, NX300M, NX300, NX30, EK-GN120 { height -= top_margin = 18; left_margin = raw_width - (width = 5536); if (raw_width != 5600) left_margin = top_margin = 0; filters = 0x61616161; colors = 3; } else if (raw_width == 5632) // Samsung NX1000, NX200, NX20, NX210 { order = 0x4949; height = 3694; top_margin = 2; width = 5574 - (left_margin = 32 + tiff_bps); if (tiff_bps == 12) load_flags = 80; } else if (raw_width == 5664) // Samsung "NX mini" { height -= top_margin = 17; left_margin = 96; width = 5544; filters = 0x49494949; } else if (raw_width == 6496) // Samsung NX1, NX500 { filters = 0x61616161; if (!black && !cblack[0] && !cblack[1] && !cblack[2] && !cblack[3]) black = 1 << (tiff_bps - 7); } else if (!strcmp(model, "EX1")) // Samsung EX1; raw_width: 3688 { order = 0x4949; height -= 20; top_margin = 2; if ((width -= 6) > 3682) { height -= 10; width -= 46; top_margin = 8; } } else if (!strcmp(model, "WB2000")) // Samsung WB2000; raw_width: 3728 { order = 0x4949; height -= 3; top_margin = 2; if ((width -= 10) > 3718) { height -= 28; width -= 56; top_margin = 8; } } else if (!strcmp(model, "WB550")) // Samsung WB550; raw_width: 4000 { order = 0x4949; } else if (!strcmp(model, "EX2F")) // Samsung EX2F; raw_width: 4176 { height = 3030; width = 4040; top_margin = 15; left_margin = 24; order = 0x4949; filters = 0x49494949; load_raw = &LibRaw::unpacked_load_raw; } } else if (makeIs(LIBRAW_CAMERAMAKER_ST_Micro) && !strcmp(model, "STV680 VGA")) { black = 16; } else if (!strcmp(model, "N95")) { height = raw_height - (top_margin = 2); } else if (!strcmp(model, "640x480")) { gamma_curve(0.45, 4.5, 1, 255); } else if (makeIs(LIBRAW_CAMERAMAKER_Hasselblad)) { if (load_raw == &LibRaw::lossless_jpeg_load_raw) load_raw = &LibRaw::hasselblad_load_raw; if ((imHassy.SensorCode == 4) && !strncmp(model, "V96C", 4)) { // Hasselblad V96C strcpy(model, "V96C"); strcpy(normalized_model, model); height -= (top_margin = 6); width -= (left_margin = 3) + 7; filters = 0x61616161; } else if ((imHassy.SensorCode == 9) && imHassy.uncropped) { // various Hasselblad '-39' height = 5444; width = 7248; top_margin = 4; left_margin = 7; filters = 0x61616161; } else if ((imHassy.SensorCode == 13) && imHassy.uncropped) { // Hasselblad H4D-40, H5D-40 height -= 84; width -= 82; top_margin = 4; left_margin = 41; filters = 0x61616161; } else if ((imHassy.SensorCode == 11) && imHassy.uncropped) { // Hasselblad H5D-50 height -= 84; width -= 82; top_margin = 4; left_margin = 41; filters = 0x61616161; } else if ((imHassy.SensorCode == 15) && !imHassy.SensorSubCode && // Hasselblad H5D-50c imHassy.uncropped) { left_margin = 52; top_margin = 100; width = 8272; height = 6200; black = 256; } else if ((imHassy.SensorCode == 15) && (imHassy.SensorSubCode == 2) && // various Hasselblad X1D cameras imHassy.uncropped) { top_margin = 96; height -= 96; left_margin = 48; width -= 106; maximum = 0xffff; tiff_bps = 16; } else if ((imHassy.SensorCode == 12) && imHassy.uncropped) { // Hasselblad H4D-60 if (black > 500) { // (imHassy.format == LIBRAW_HF_FFF) top_margin = 12; left_margin = 44; width = 8956; height = 6708; memset(cblack, 0, sizeof(cblack)); black = 512; } else { // (imHassy.format == LIBRAW_HF_3FR) top_margin = 8; left_margin = 40; width = 8964; height = 6716; black += load_flags = 256; maximum = 0x8101; } } else if ((imHassy.SensorCode == 17) && imHassy.uncropped) { // Hasselblad H6D-100c, A6D-100c left_margin = 64; width = 11608; top_margin = 108; height = raw_height - top_margin; } if (tiff_samples > 1) { is_raw = tiff_samples + 1; if (!shot_select && !half_size) filters = 0; } } else if (makeIs(LIBRAW_CAMERAMAKER_Sinar)) { if (!load_raw) load_raw = &LibRaw::unpacked_load_raw; if (is_raw > 1 && !shot_select) filters = 0; maximum = 0x3fff; } if (load_raw == &LibRaw::sinar_4shot_load_raw) { if (is_raw > 1 && !shot_select) filters = 0; } else if (makeIs(LIBRAW_CAMERAMAKER_Leaf)) { maximum = 0x3fff; fseek(ifp, data_offset, SEEK_SET); if (ljpeg_start(&jh, 1) && jh.bits == 15) maximum = 0x1fff; if (tiff_samples > 1) filters = 0; if (tiff_samples > 1 || tile_length < raw_height) { load_raw = &LibRaw::leaf_hdr_load_raw; raw_width = tile_width; } if ((width | height) == 2048) { if (tiff_samples == 1) { filters = 1; strcpy(cdesc, "RBTG"); strcpy(model, "CatchLight"); strcpy(normalized_model, model); top_margin = 8; left_margin = 18; height = 2032; width = 2016; } else { strcpy(model, "DCB2"); strcpy(normalized_model, model); top_margin = 10; left_margin = 16; height = 2028; width = 2022; } } else if (width + height == 3144 + 2060) { if (!model[0]) { strcpy(model, "Cantare"); strcpy(normalized_model, model); } if (width > height) { top_margin = 6; left_margin = 32; height = 2048; width = 3072; filters = 0x61616161; } else { left_margin = 6; top_margin = 32; width = 2048; height = 3072; filters = 0x16161616; } if (!cam_mul[0] || model[0] == 'V') filters = 0; else is_raw = tiff_samples; } else if (width == 2116) // Leaf "Valeo 6" { strcpy(model, "Valeo 6"); strcpy(normalized_model, model); height -= 2 * (top_margin = 30); width -= 2 * (left_margin = 55); filters = 0x49494949; } else if (width == 3171) // Leaf "Valeo 6" { strcpy(model, "Valeo 6"); strcpy(normalized_model, model); height -= 2 * (top_margin = 24); width -= 2 * (left_margin = 24); filters = 0x16161616; } } else if (makeIs(LIBRAW_CAMERAMAKER_Panasonic)) { if (raw_width > 0 && ((flen - data_offset) / (raw_width * 8 / 7) == raw_height)) load_raw = &LibRaw::panasonic_load_raw; if (!load_raw) { load_raw = &LibRaw::unpacked_load_raw; load_flags = 4; } zero_is_bad = 1; if ((height += 12) > raw_height) height = raw_height; for (i = 0; i < int(sizeof pana / sizeof *pana); i++) if (raw_width == pana[i][0] && raw_height == pana[i][1]) { left_margin = pana[i][2]; top_margin = pana[i][3]; width += pana[i][4]; height += pana[i][5]; } if (!tiff_bps && pana_bpp >= 12 && pana_bpp <= 14) tiff_bps = pana_bpp; filters = 0x01010101U * (uchar) "\x94\x61\x49\x16"[((filters - 1) ^ (left_margin & 1) ^ (top_margin << 1)) & 3]; } else if (makeIs(LIBRAW_CAMERAMAKER_Contax) && !strcmp(model, "N Digital")) { height = 2047; width = 3072; filters = 0x61616161; data_offset = 0x1a00; load_raw = &LibRaw::packed_load_raw; } else if (makeIs(LIBRAW_CAMERAMAKER_Sony)) { if (!strcmp(model, "DSC-F828")) { // Sony DSC-F828 width = 3288; left_margin = 5; mask[1][3] = -17; data_offset = 862144; load_raw = &LibRaw::sony_load_raw; filters = 0x9c9c9c9c; colors = 4; strcpy(cdesc, "RGBE"); } else if (!strcmp(model, "DSC-V3")) { // Sony DSC-V3 width = 3109; left_margin = 59; mask[0][1] = 9; data_offset = 787392; load_raw = &LibRaw::sony_load_raw; } else if (raw_width == 3984) { // Sony DSC-R1; width = 3925; order = 0x4d4d; } else if (raw_width == 4288) { // Sony ILCE-7S, ILCE-7SM2, DSLR-A700, DSLR-A500; width -= 32; } else if (raw_width == 4600) { // Sony DSLR-A290, DSLR-A350, DSLR-A380; if (!strcmp(model, "DSLR-A350")) height -= 4; black = 0; } else if (raw_width == 4928) { // Sony DSLR-A580, NEX-C3, SLT-A35, DSC-HX99, SLT-A55, // NEX-5N, SLT-A37, SLT-A57, NEX-F3, NEX-6, NEX-5R, NEX-3N, NEX-5T; if (height < 3280) width -= 8; } else if (raw_width == 5504) { // Sony ILCE-3000, SLT-A58, DSC-RX100M3, ILCE-QX1, // DSC-RX10M4, DSC-RX100M6, DSC-RX100, DSC-RX100M2, DSC-RX10, // ILCE-5000, DSC-RX100M4, DSC-RX10M2, DSC-RX10M3, // DSC-RX100M5, DSC-RX100M5A; width -= height > 3664 ? 8 : 32; } else if (raw_width == 6048) { // Sony SLT-A65, DSC-RX1, SLT-A77, DSC-RX1, ILCA-77M2, // ILCE-7M3, NEX-7, SLT-A99, ILCE-7, DSC-RX1R, ILCE-6000, // ILCE-5100, ILCE-7M2, ILCA-68, ILCE-6300, ILCE-9, // ILCE-6500, ILCE-6400; width -= 24; if (strstr(normalized_model, "RX1") || strstr(normalized_model, "A99")) width -= 6; } else if (raw_width == 7392) { // Sony ILCE-7R; width -= 30; } else if (raw_width == 8000) { // Sony ILCE-7RM2, ILCE-7RM2, ILCE-7RM3, DSC-RX1RM2, ILCA-99M2; width -= 32; } else if (raw_width == 9600) { // Sony ILCE-7RM4 width -= 32; } else if (!strcmp(model, "DSLR-A100")) { if (width == 3880) { height--; width = ++raw_width; } else { height -= 4; width -= 4; order = 0x4d4d; load_flags = 2; } filters = 0x61616161; } } else if (!strcmp(model, "PIXL")) { height -= top_margin = 4; width -= left_margin = 32; gamma_curve(0, 7, 1, 255); } else if (makeIs(LIBRAW_CAMERAMAKER_Kodak)) { if (!strncasecmp(model, "EasyShare", 9)) { data_offset = data_offset < 0x15000 ? 0x15000 : 0x17000; load_raw = &LibRaw::packed_load_raw; } else if (!strcmp(model, "C603") || !strcmp(model, "C330") || !strcmp(model, "12MP")) { order = 0x4949; if (filters && data_offset) { fseek(ifp, data_offset < 4096 ? 168 : 5252, SEEK_SET); read_shorts(curve, 256); } else gamma_curve(0, 3.875, 1, 255); load_raw = filters ? &LibRaw::eight_bit_load_raw : strcmp(model, "C330") ? &LibRaw::kodak_c603_load_raw : &LibRaw::kodak_c330_load_raw; load_flags = tiff_bps > 16; tiff_bps = 8; } else { if (!strncmp(model, "NC2000", 6) || !strncmp(model, "EOSDCS", 6) || !strncmp(model, "DCS4", 4)) { width -= 4; left_margin = 2; } else if (!strcmp(model, "DCS660M")) { black = 214; } else if (!strcmp(model, "EOS D2000C")) { filters = 0x61616161; if (!black) black = curve[200]; } if (filters == UINT_MAX) filters = 0x61616161; if (!strcmp(model + 4, "20X")) strcpy(cdesc, "MYCY"); if (!strcmp(model, "DC25")) { data_offset = 15424; } if (!strncmp(model, "DC2", 3)) { raw_height = 2 + (height = 242); if (!strncmp(model, "DC290", 5)) iso_speed = 100; if (!strncmp(model, "DC280", 5)) iso_speed = 70; if (flen < 100000) { raw_width = 256; width = 249; pixel_aspect = (4.0 * height) / (3.0 * width); } else { raw_width = 512; width = 501; pixel_aspect = (493.0 * height) / (373.0 * width); } top_margin = left_margin = 1; colors = 4; filters = 0x8d8d8d8d; simple_coeff(1); pre_mul[1] = 1.179; pre_mul[2] = 1.209; pre_mul[3] = 1.036; load_raw = &LibRaw::eight_bit_load_raw; } else if (!strcmp(model, "DC40")) { height = 512; width = 768; data_offset = 1152; load_raw = &LibRaw::kodak_radc_load_raw; tiff_bps = 12; FORC4 cam_mul[c] = 1.0f; } else if (!strcmp(model, "DC50")) { height = 512; width = 768; iso_speed = 84; data_offset = 19712; load_raw = &LibRaw::kodak_radc_load_raw; FORC4 cam_mul[c] = 1.0f; } else if (!strcmp(model, "DC120")) { raw_height = height = 976; raw_width = width = 848; iso_speed = 160; pixel_aspect = height / 0.75 / width; load_raw = tiff_compress == 7 ? &LibRaw::kodak_jpeg_load_raw : &LibRaw::kodak_dc120_load_raw; } else if (!strcmp(model, "DCS200")) { thumb_height = 128; thumb_width = 192; thumb_offset = 6144; thumb_misc = 360; iso_speed = 140; write_thumb = &LibRaw::layer_thumb; black = 17; } } } else if (makeIs(LIBRAW_CAMERAMAKER_Logitech) && !strcmp(model, "Fotoman Pixtura")) { height = 512; width = 768; data_offset = 3632; load_raw = &LibRaw::kodak_radc_load_raw; filters = 0x61616161; simple_coeff(2); } else if (makeIs(LIBRAW_CAMERAMAKER_Apple) && !strncmp(model, "QuickTake", 9)) { if (head[5]) strcpy(model + 10, "200"); fseek(ifp, 544, SEEK_SET); height = get2(); width = get2(); data_offset = (get4(), get2()) == 30 ? 738 : 736; if (height > width) { SWAP(height, width); fseek(ifp, data_offset - 6, SEEK_SET); flip = ~get2() & 3 ? 5 : 6; } filters = 0x61616161; } else if (makeIs(LIBRAW_CAMERAMAKER_Rollei) && !load_raw) { switch (raw_width) { case 1316: // Rollei d530flex height = 1030; width = 1300; top_margin = 1; left_margin = 6; break; case 2568: height = 1960; width = 2560; top_margin = 2; left_margin = 8; } filters = 0x16161616; load_raw = &LibRaw::rollei_load_raw; } else if (!strcmp(model, "GRAS-50S5C")) { height = 2048; width = 2440; load_raw = &LibRaw::unpacked_load_raw; data_offset = 0; filters = 0x49494949; order = 0x4949; maximum = 0xfffC; } else if (!strcmp(model, "BB-500CL")) { height = 2058; width = 2448; load_raw = &LibRaw::unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x3fff; } else if (!strcmp(model, "BB-500GE")) { height = 2058; width = 2456; load_raw = &LibRaw::unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x3fff; } else if (!strcmp(model, "SVS625CL")) { height = 2050; width = 2448; load_raw = &LibRaw::unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x0fff; } }
null
/* -*- C++ -*- * Copyright 2019-2020 LibRaw LLC (info@libraw.org) * LibRaw uses code from dcraw.c -- Dave Coffin's raw photo decoder, dcraw.c is copyright 1997-2018 by Dave Coffin, dcoffin a cybercom o net. LibRaw do not use RESTRICTED code from dcraw.c LibRaw is free software; you can redistribute it and/or modify it under the terms of the one of two licenses as you choose: 1. GNU LESSER GENERAL PUBLIC LICENSE version 2.1 (See file LICENSE.LGPL provided in LibRaw distribution archive for details). 2. COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 (See file LICENSE.CDDL provided in LibRaw distribution archive for details). */ #include "../../internal/dcraw_defs.h" #include "../../internal/libraw_cameraids.h" // clang-format on static const struct { const int CorpId; const char *CorpName; } CorpTable[] = { {LIBRAW_CAMERAMAKER_Agfa, "AgfaPhoto"}, {LIBRAW_CAMERAMAKER_Apple, "Apple"}, {LIBRAW_CAMERAMAKER_Broadcom, "Broadcom"}, {LIBRAW_CAMERAMAKER_Canon, "Canon"}, {LIBRAW_CAMERAMAKER_Casio, "Casio"}, {LIBRAW_CAMERAMAKER_CINE, "CINE"}, {LIBRAW_CAMERAMAKER_Epson, "Epson"}, {LIBRAW_CAMERAMAKER_Fujifilm, "Fujifilm"}, {LIBRAW_CAMERAMAKER_Mamiya, "Mamiya"}, {LIBRAW_CAMERAMAKER_Motorola, "Motorola"}, {LIBRAW_CAMERAMAKER_Kodak, "Kodak"}, {LIBRAW_CAMERAMAKER_Konica, "Konica"}, {LIBRAW_CAMERAMAKER_Minolta, "Minolta"}, {LIBRAW_CAMERAMAKER_Leica, "Leica"}, {LIBRAW_CAMERAMAKER_Nikon, "Nikon"}, {LIBRAW_CAMERAMAKER_Nokia, "Nokia"}, {LIBRAW_CAMERAMAKER_Olympus, "Olympus"}, {LIBRAW_CAMERAMAKER_Ricoh, "Ricoh"}, {LIBRAW_CAMERAMAKER_Pentax, "Pentax"}, {LIBRAW_CAMERAMAKER_PhaseOne, "Phase One"}, {LIBRAW_CAMERAMAKER_PhaseOne, "PhaseOne"}, {LIBRAW_CAMERAMAKER_Samsung, "Samsung"}, {LIBRAW_CAMERAMAKER_Sigma, "Sigma"}, {LIBRAW_CAMERAMAKER_Sinar, "Sinar"}, {LIBRAW_CAMERAMAKER_Sony, "Sony"}, {LIBRAW_CAMERAMAKER_YI, "YI"}, // add corp. names below {LIBRAW_CAMERAMAKER_Alcatel, "Alcatel"}, {LIBRAW_CAMERAMAKER_Aptina, "Aptina"}, {LIBRAW_CAMERAMAKER_AVT, "AVT"}, {LIBRAW_CAMERAMAKER_Baumer, "Baumer"}, {LIBRAW_CAMERAMAKER_Clauss, "Clauss"}, {LIBRAW_CAMERAMAKER_Contax, "Contax"}, {LIBRAW_CAMERAMAKER_Creative, "Creative"}, {LIBRAW_CAMERAMAKER_DJI, "DJI"}, {LIBRAW_CAMERAMAKER_Foculus, "Foculus"}, {LIBRAW_CAMERAMAKER_Generic, "Generic"}, {LIBRAW_CAMERAMAKER_Gione, "Gione"}, {LIBRAW_CAMERAMAKER_GITUP, "GITUP"}, {LIBRAW_CAMERAMAKER_Hasselblad, "Hasselblad"}, {LIBRAW_CAMERAMAKER_HTC, "HTC"}, {LIBRAW_CAMERAMAKER_I_Mobile, "I_Mobile"}, {LIBRAW_CAMERAMAKER_Imacon, "Imacon"}, {LIBRAW_CAMERAMAKER_JK_Imaging, "JK Imaging"}, // Kodak {LIBRAW_CAMERAMAKER_Leaf, "Leaf"}, {LIBRAW_CAMERAMAKER_Lenovo, "Lenovo"}, {LIBRAW_CAMERAMAKER_LG, "LG"}, {LIBRAW_CAMERAMAKER_Logitech, "Logitech"}, {LIBRAW_CAMERAMAKER_Matrix, "Matrix"}, {LIBRAW_CAMERAMAKER_Meizu, "Meizu"}, {LIBRAW_CAMERAMAKER_Micron, "Micron"}, {LIBRAW_CAMERAMAKER_NGM, "NGM"}, {LIBRAW_CAMERAMAKER_OmniVison, "OmniVison"}, {LIBRAW_CAMERAMAKER_Panasonic, "Panasonic"}, {LIBRAW_CAMERAMAKER_Photron, "Photron"}, {LIBRAW_CAMERAMAKER_Pixelink, "Pixelink"}, {LIBRAW_CAMERAMAKER_Polaroid, "Polaroid"}, {LIBRAW_CAMERAMAKER_Rollei, "Rollei"}, {LIBRAW_CAMERAMAKER_RoverShot, "RoverShot"}, {LIBRAW_CAMERAMAKER_SMaL, "SMaL"}, {LIBRAW_CAMERAMAKER_ST_Micro, "ST Micro"}, {LIBRAW_CAMERAMAKER_THL, "THL"}, {LIBRAW_CAMERAMAKER_Xiaomi, "Xiaomi"}, {LIBRAW_CAMERAMAKER_XIAOYI, "Xiayi"}, {LIBRAW_CAMERAMAKER_Yuneec, "Yuneec"}, {LIBRAW_CAMERAMAKER_DXO, "DxO"}, {LIBRAW_CAMERAMAKER_RED, "Red"}, {LIBRAW_CAMERAMAKER_PhotoControl, "Photo Control"}, {LIBRAW_CAMERAMAKER_Google, "Google"}, {LIBRAW_CAMERAMAKER_GoPro, "GoPro"}, {LIBRAW_CAMERAMAKER_Parrot, "Parrot"}, {LIBRAW_CAMERAMAKER_Zeiss, "Zeiss"} }; // clang-format on int LibRaw::setMakeFromIndex(unsigned makei) { if (makei <= LIBRAW_CAMERAMAKER_Unknown || makei >= LIBRAW_CAMERAMAKER_TheLastOne) return 0; for (int i = 0; i < int(sizeof CorpTable / sizeof *CorpTable); i++) if ((unsigned)CorpTable[i].CorpId == makei) { strcpy(normalized_make, CorpTable[i].CorpName); maker_index = makei; return 1; } return 0; } const char *LibRaw::cameramakeridx2maker(unsigned maker) { for (int i = 0; i < int(sizeof CorpTable / sizeof *CorpTable); i++) if((unsigned)CorpTable[i].CorpId == maker) return CorpTable[i].CorpName; return 0; } void LibRaw::fixupArri() { struct alist_t { const char *a_model; const char *a_software; ushort a_width,a_height; int a_black; unsigned a_filters; float a_aspect; } alist[] = { {"ALEXA65", "Alexa65 XT", 6560 ,3100, 256,0x49494949,1.f}, {"ALEXALF", "Alexa LF Plus W", 3840 ,2160, 256,0x49494949,1.0f }, {"ALEXALF", "Alexa LF Plus W", 4448 ,1856, 256,0x49494949,0.75f }, {"ALEXALF", "Alexa LF Plus W", 4448 ,3096, 256,0x49494949,1.f }, {"ALEXA", "Alexa Plus 4:3 SXT", 2880 ,1620, 256,0x61616161,.75f}, {"ALEXA", "Alexa Plus 4:3 SXT", 3168 ,1782, 256,0x61616161,0.75f}, {"ALEXA", "Alexa Plus 4:3 SXT", 3424 ,2202, 256,0x61616161,1.f}, {"ALEXA", "Alexa Plus 4:3 SXT", 2592 ,2160, 256,0x61616161,1.12f}, {"ALEXA", "Alexa Plus 4:3 XT", 2592 ,2160, 256,0x61616161,1.12f}, {"ALEXA", "Alexa Plus 4:3 XT", 2880 ,2160, 256,0x61616161,1.f}, {"ALEXA", "Alexa Plus 4:3 XT", 2880 ,1620, 256,0x61616161,0.75f}, {"ALEXA", "Alexa Plus 4:3 XT", 3424 ,2202, 256,0x61616161,1.f}, }; for(int i = 0; i < int(sizeof(alist)/sizeof(alist[0])); i++) if(!strncasecmp(model,alist[i].a_model,strlen(alist[i].a_model)) && software && !strncasecmp(software,alist[i].a_software,strlen(alist[i].a_software)) && width == alist[i].a_width && height == alist[i].a_height) { filters = alist[i].a_filters; black = alist[i].a_black; pixel_aspect = alist[i].a_aspect; strcpy(model,software); software[0]=0; return; } } /* Identify which camera created this file, and set global variables accordingly. */ void LibRaw::identify() { // clang-format off static const ushort canon[][11] = { // raw_width, raw_height, left_margin, top_margin, width_decrement, // height_decrement, mask01, mask03, mask11, // mask13, CFA_filters. { 1944, 1416, 0, 0, 48, 0 }, // 00 "PowerShot Pro90 IS" { 2144, 1560, 4, 8, 52, 2, 0, 0, 0, 25 }, // 01 "PowerShot S30", "PowerShot G1" { 2224, 1456, 48, 6, 0, 2 }, // 02 "EOS D30" { 2376, 1728, 12, 6, 52, 2 }, // 03 "PowerShot G2", "PowerShot S40", "PowerShot G3", "PowerShot S45" { 2672, 1968, 12, 6, 44, 2 }, // 04 "PowerShot G5", "PowerShot S50", "PowerShot S60" { 3152, 2068, 64, 12, 0, 0, 16 }, // 05 "EOS D60", "EOS 10D", "EOS 300D" { 3160, 2344, 44, 12, 4, 4 }, // 06 "PowerShot G6", "PowerShot S70" { 3344, 2484, 4, 6, 52, 6 }, // 07 "PowerShot Pro1" { 3516, 2328, 42, 14, 0, 0 }, // 08 "EOS 350D" { 3596, 2360, 74, 12, 0, 0 }, // 09 "EOS-1D Mark II", "EOS 20D", "EOS-1D Mark II N", "EOS 30D" { 3744, 2784, 52, 12, 8, 12 }, // 10 "PowerShot G11", "PowerShot S90", "PowerShot G12", "PowerShot S95" { 3944, 2622, 30, 18, 6, 2 }, // 11 "EOS 40D" { 3948, 2622, 42, 18, 0, 2 }, // 12 "EOS 400D", "EOS 1000D" { 3984, 2622, 76, 20, 0, 2, 14 }, // 13 "EOS-1D Mark III" { 4032, 2656, 112, 44, 10, 0 }, // 14 APS-C crop mode: "EOS 6D Mark II"??, "EOS RP" { 4104, 3048, 48, 12, 24, 12 }, // 15 "PowerShot G9" { 4116, 2178, 4, 2, 0, 0 }, // 16 ?? { 4152, 2772, 192, 12, 0, 0 }, // 17 "PowerShot SX1 IS" { 4160, 3124, 104, 11, 8, 65 }, // 18 "PowerShot S100 (new)", "PowerShot S100V", "PowerShot G15", "PowerShot S110 (new)" { 4176, 3062, 96, 17, 8, 0, 0, 16, 0, 7, 0x49 }, // 19 "PowerShot SX50 HS" { 4192, 3062, 96, 17, 24, 0, 0, 16, 0, 0, 0x49 }, // 20 "PowerShot G16", "PowerShot S120" { 4312, 2876, 22, 18, 0, 2 }, // 21 "EOS 450D" { 4352, 2850, 144, 46, 0, 0 }, // 22 APS-C crop mode: "EOS R" { 4352, 2874, 62, 18, 0, 0 }, // 23 "EOS 1100D" { 4476, 2954, 90, 34, 0, 0 }, // 24 "EOS 5D" { 4480, 3348, 12, 10, 36, 12, 0, 0, 0, 18, 0x49 }, // 25 "PowerShot G10" { 4480, 3366, 80, 50, 0, 0 }, // 26 "PowerShot G1 X Mark II" { 4496, 3366, 80, 50, 12, 0 }, // 27 "PowerShot G1 X" { 4768, 3516, 96, 16, 0, 0, 0, 16 }, // 28 "PowerShot SX60 HS" { 4832, 3204, 62, 26, 0, 0 }, // 29 "EOS 500D" { 4832, 3228, 62, 51, 0, 0 }, // 30 "EOS 50D" { 5108, 3349, 98, 13, 0, 0 }, // 31 "EOS-1Ds Mark II" { 5120, 3318, 142, 45, 62, 0 }, // 32 "EOS-1D Mark IV" { 5280, 3528, 72, 52, 0, 0 }, // 33 "EOS M10", "EOS 650D", "EOS 700D", "EOS M", "EOS 100D", "EOS M2" { 5344, 3516, 142, 51, 0, 0 }, // 34 "EOS 550D", "EOS 600D", "EOS 60D", "EOS 1200D", "EOS 1300D", "EOS 3000D" { 5344, 3584, 126, 100, 0, 2 }, // 35 "EOS-1D X", "EOS-1D C" { 5344, 3950, 98, 18, 0, 0, 0, 24, 0, 0 }, // 36 "PowerShot SX70 HS" { 5360, 3516, 158, 51, 0, 0 }, // 37 "EOS 7D" { 5568, 3708, 72, 38, 0, 0 }, // 38; "EOS 7D Mark II", "EOS 6D", "EOS 70D", "EOS-1D X MARK II" { 5632, 3710, 96, 17, 0, 0, 0, 16, 0, 0, 0x49 }, // 39 "PowerShot G7 X", "PowerShot G3 X", "PowerShot G9 X", "PowerShot G5 X", "PowerShot G7 X Mark II", "PowerShot G9 X Mark II" { 5712, 3774, 62, 20, 10, 2 }, // 40 "EOS-1Ds Mark III" { 5792, 3804, 158, 51, 0, 0 }, // 41 "EOS 5D Mark II" { 5920, 3950, 122, 80, 2, 0 }, // 42 "EOS 5D Mark III" { 6096, 4051, 76, 35, 0, 0 }, // 43 "EOS 1500D" { 6096, 4056, 72, 34, 0, 0 }, // 44 "EOS M3", "EOS 760D", "EOS 750D" { 6288, 4056, 264, 36, 0, 0 }, // 45 "EOS M5", "EOS M100", "EOS M6", "PowerShot G1 X Mark III", "EOS 80D", "EOS 800D", "EOS 77D", "EOS 200D", "EOS 250D", "EOS M50" { 6384, 4224, 120, 44, 0, 0 }, // 46 "EOS 6D Mark II", "EOS RP" { 6880, 4544, 136, 42, 0, 0 }, // 47 "EOS 5D Mark IV" { 6888, 4546, 146, 48, 0, 0 }, // 48 "EOS R" { 7128, 4732, 144, 72, 0, 0 }, // 49 "EOS M6 II", "EOS 90D" { 8896, 5920, 160, 64, 0, 0 }, // 50 "EOS 5DS", "EOS 5DS R" }; static const libraw_custom_camera_t const_table[] = { { 786432, 1024, 768, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-080C" }, { 1447680, 1392, 1040, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-145C" }, { 1920000, 1600, 1200, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-201C" }, { 5067304, 2588, 1958, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-510C" }, { 5067316, 2588, 1958, 0, 0, 0, 0, 0, 0x94, 0, 0, "AVT", "F-510C", 12 }, { 10134608, 2588, 1958, 0, 0, 0, 0, 9, 0x94, 0, 0, "AVT", "F-510C" }, { 10134620, 2588, 1958, 0, 0, 0, 0, 9, 0x94, 0, 0, "AVT", "F-510C", 12 }, { 16157136, 3272, 2469, 0, 0, 0, 0, 9, 0x94, 0, 0, "AVT", "F-810C" }, { 15980544, 3264, 2448, 0, 0, 0, 0, 8, 0x61, 0, 1, "AgfaPhoto", "DC-833m" }, { 9631728, 2532, 1902, 0, 0, 0, 0, 96, 0x61, 0, 0, "Alcatel", "5035D" }, { 31850496, 4608, 3456, 0, 0, 0, 0, 0, 0x94, 0, 0, "GITUP", "GIT2 4:3" }, { 23887872, 4608, 2592, 0, 0, 0, 0, 0, 0x94, 0, 0, "GITUP", "GIT2 16:9" }, { 32257024, 4624, 3488, 8, 2, 16, 2, 0, 0x94, 0, 0, "GITUP", "GIT2P 4:3" }, { 24192768, 4624, 2616, 8, 2, 16, 2, 0, 0x94, 0, 0, "GITUP", "GIT2P 16:9" }, { 18016000, 4000, 2252, 0, 0, 0, 0, 0, 0x94, 0, 0, "GITUP", "G3DUO 16:9" }, // {24000000, 4000, 3000, 0, 0, 0, 0, 0, 0x94, 0, 0, "GITUP", // "G3DUO 4:3"}, // Conflict w/ Samsung WB550 // Android Raw dumps id start // File Size in bytes Horizontal Res Vertical Flag then bayer order eg // 0x16 bbgr 0x94 rggb { 1540857, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "Samsung", "S3" }, { 2658304, 1212, 1096, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G3FrontMipi" }, { 2842624, 1296, 1096, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G3FrontQCOM" }, { 2969600, 1976, 1200, 0, 0, 0, 0, 1, 0x16, 0, 0, "Xiaomi", "MI3wMipi" }, { 3170304, 1976, 1200, 0, 0, 0, 0, 1, 0x16, 0, 0, "Xiaomi", "MI3wQCOM" }, { 3763584, 1584, 1184, 0, 0, 0, 0, 96, 0x61, 0, 0, "I_Mobile", "I_StyleQ6" }, { 5107712, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "UltraPixel1" }, { 5382640, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "UltraPixel2" }, { 5664912, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "4688" }, { 5664912, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "4688" }, { 5364240, 2688, 1520, 0, 0, 0, 0, 1, 0x61, 0, 0, "OmniVisi", "4688" }, { 6299648, 2592, 1944, 0, 0, 0, 0, 1, 0x16, 0, 0, "OmniVisi", "OV5648" }, { 6721536, 2592, 1944, 0, 0, 0, 0, 0, 0x16, 0, 0, "OmniVisi", "OV56482" }, { 6746112, 2592, 1944, 0, 0, 0, 0, 0, 0x16, 0, 0, "HTC", "OneSV" }, { 9631728, 2532, 1902, 0, 0, 0, 0, 96, 0x61, 0, 0, "Sony", "5mp" }, { 9830400, 2560, 1920, 0, 0, 0, 0, 96, 0x61, 0, 0, "NGM", "ForwardArt" }, { 10186752, 3264, 2448, 0, 0, 0, 0, 1, 0x94, 0, 0, "Sony", "IMX219-mipi 8mp" }, { 10223360, 2608, 1944, 0, 0, 0, 0, 96, 0x16, 0, 0, "Sony", "IMX" }, { 10782464, 3282, 2448, 0, 0, 0, 0, 0, 0x16, 0, 0, "HTC", "MyTouch4GSlide" }, { 10788864, 3282, 2448, 0, 0, 0, 0, 0, 0x16, 0, 0, "Xperia", "L" }, { 15967488, 3264, 2446, 0, 0, 0, 0, 96, 0x16, 0, 0, "OmniVison", "OV8850" }, { 16224256, 4208, 3082, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G3MipiL" }, { 16424960, 4208, 3120, 0, 0, 0, 0, 1, 0x16, 0, 0, "IMX135", "MipiL" }, { 17326080, 4164, 3120, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G3LQCom" }, { 17522688, 4212, 3120, 0, 0, 0, 0, 0, 0x16, 0, 0, "Sony", "IMX135-QCOM" }, { 19906560, 4608, 3456, 0, 0, 0, 0, 1, 0x16, 0, 0, "Gione", "E7mipi" }, { 19976192, 5312, 2988, 0, 0, 0, 0, 1, 0x16, 0, 0, "LG", "G4" }, { 20389888, 4632, 3480, 0, 0, 0, 0, 1, 0x16, 0, 0, "Xiaomi", "RedmiNote3Pro" }, { 20500480, 4656, 3496, 0, 0, 0, 0, 1, 0x94, 0, 0, "Sony", "IMX298-mipi 16mp" }, { 21233664, 4608, 3456, 0, 0, 0, 0, 1, 0x16, 0, 0, "Gione", "E7qcom" }, { 26023936, 4192, 3104, 0, 0, 0, 0, 96, 0x94, 0, 0, "THL", "5000" }, { 26257920, 4208, 3120, 0, 0, 0, 0, 96, 0x94, 0, 0, "Sony", "IMX214" }, { 26357760, 4224, 3120, 0, 0, 0, 0, 96, 0x61, 0, 0, "OV", "13860" }, { 41312256, 5248, 3936, 0, 0, 0, 0, 96, 0x61, 0, 0, "Meizu", "MX4" }, { 42923008, 5344, 4016, 0, 0, 0, 0, 96, 0x61, 0, 0, "Sony", "IMX230" }, // Android Raw dumps id end { 20137344, 3664, 2748, 0, 0, 0, 0, 0x40, 0x49, 0, 0, "Aptina", "MT9J003", 0xffff }, { 2868726, 1384, 1036, 0, 0, 0, 0, 64, 0x49, 0, 8, "Baumer", "TXG14", 1078 }, { 5298000, 2400, 1766, 12, 12, 44, 2, 40, 0x94, 0, 2, "Canon", "PowerShot SD300" }, // chdk hack { 6553440, 2664, 1968, 4, 4, 44, 4, 40, 0x94, 0, 2, "Canon", "PowerShot A460" }, // chdk hack { 6573120, 2672, 1968, 12, 8, 44, 0, 40, 0x94, 0, 2, "Canon", "PowerShot A610" }, // chdk hack { 6653280, 2672, 1992, 10, 6, 42, 2, 40, 0x94, 0, 2, "Canon", "PowerShot A530" }, // chdk hack { 7710960, 2888, 2136, 44, 8, 4, 0, 40, 0x94, 0, 2, "Canon", "PowerShot S3 IS" }, // chdk hack { 9219600, 3152, 2340, 36, 12, 4, 0, 40, 0x94, 0, 2, "Canon", "PowerShot A620" }, // chdk hack { 9243240, 3152, 2346, 12, 7, 44, 13, 40, 0x49, 0, 2, "Canon", "PowerShot A470" }, // chdk hack { 10341600, 3336, 2480, 6, 5, 32, 3, 40, 0x94, 0, 2, "Canon", "PowerShot A720 IS" }, // chdk hack { 10383120, 3344, 2484, 12, 6, 44, 6, 40, 0x94, 0, 2, "Canon", "PowerShot A630" }, // chdk hack { 12945240, 3736, 2772, 12, 6, 52, 6, 40, 0x94, 0, 2, "Canon", "PowerShot A640" }, // chdk hack { 15636240, 4104, 3048, 48, 12, 24, 12, 40, 0x94, 0, 2, "Canon", "PowerShot A650" }, // chdk hack { 15467760, 3720, 2772, 6, 12, 30, 0, 40, 0x94, 0, 2, "Canon", "PowerShot SX110 IS" }, // chdk hack { 15534576, 3728, 2778, 12, 9, 44, 9, 40, 0x94, 0, 2, "Canon", "PowerShot SX120 IS" }, // chdk hack { 18653760, 4080, 3048, 24, 12, 24, 12, 40, 0x94, 0, 2, "Canon", "PowerShot SX20 IS" }, // chdk hack { 18763488, 4104, 3048, 10, 22, 82, 22, 8, 0x49, 0, 0, "Canon", "PowerShot D10" }, // ? chdk hack ? { 19131120, 4168, 3060, 92, 16, 4, 1, 40, 0x94, 0, 2, "Canon", "PowerShot SX220 HS" }, // chdk hack { 21936096, 4464, 3276, 25, 10, 73, 12, 40, 0x16, 0, 2, "Canon", "PowerShot SX30 IS" }, // chdk hack { 24724224, 4704, 3504, 8, 16, 56, 8, 40, 0x49, 0, 2, "Canon", "PowerShot A3300 IS" }, // chdk hack { 30858240, 5248, 3920, 8, 16, 56, 16, 40, 0x94, 0, 2, "Canon", "IXUS 160" }, // chdk hack { 1976352, 1632, 1211, 0, 2, 0, 1, 0, 0x94, 0, 1, "Casio", "QV-2000UX" }, { 3217760, 2080, 1547, 0, 0, 10, 1, 0, 0x94, 0, 1, "Casio", "QV-3*00EX" }, { 6218368, 2585, 1924, 0, 0, 9, 0, 0, 0x94, 0, 1, "Casio", "QV-5700" }, { 7816704, 2867, 2181, 0, 0, 34, 36, 0, 0x16, 0, 1, "Casio", "EX-Z60" }, { 2937856, 1621, 1208, 0, 0, 1, 0, 0, 0x94, 7, 13, "Casio", "EX-S20" }, { 4948608, 2090, 1578, 0, 0, 32, 34, 0, 0x94, 7, 1, "Casio", "EX-S100" }, { 6054400, 2346, 1720, 2, 0, 32, 0, 0, 0x94, 7, 1, "Casio", "QV-R41" }, { 7426656, 2568, 1928, 0, 0, 0, 0, 0, 0x94, 0, 1, "Casio", "EX-P505" }, { 7530816, 2602, 1929, 0, 0, 22, 0, 0, 0x94, 7, 1, "Casio", "QV-R51" }, { 7542528, 2602, 1932, 0, 0, 32, 0, 0, 0x94, 7, 1, "Casio", "EX-Z50" }, { 7562048, 2602, 1937, 0, 0, 25, 0, 0, 0x16, 7, 1, "Casio", "EX-Z500" }, { 7753344, 2602, 1986, 0, 0, 32, 26, 0, 0x94, 7, 1, "Casio", "EX-Z55" }, { 9313536, 2858, 2172, 0, 0, 14, 30, 0, 0x94, 7, 1, "Casio", "EX-P600" }, { 10834368, 3114, 2319, 0, 0, 27, 0, 0, 0x94, 0, 1, "Casio", "EX-Z750" }, { 10843712, 3114, 2321, 0, 0, 25, 0, 0, 0x94, 0, 1, "Casio", "EX-Z75" }, { 10979200, 3114, 2350, 0, 0, 32, 32, 0, 0x94, 7, 1, "Casio", "EX-P700" }, { 12310144, 3285, 2498, 0, 0, 6, 30, 0, 0x94, 0, 1, "Casio", "EX-Z850" }, { 12489984, 3328, 2502, 0, 0, 47, 35, 0, 0x94, 0, 1, "Casio", "EX-Z8" }, { 15499264, 3754, 2752, 0, 0, 82, 0, 0, 0x94, 0, 1, "Casio", "EX-Z1050" }, { 18702336, 4096, 3044, 0, 0, 24, 0, 80, 0x94, 7, 1, "Casio", "EX-ZR100" }, { 7684000, 2260, 1700, 0, 0, 0, 0, 13, 0x94, 0, 1, "Casio", "QV-4000" }, { 787456, 1024, 769, 0, 1, 0, 0, 0, 0x49, 0, 0, "Creative", "PC-CAM 600" }, { 28829184, 4384, 3288, 0, 0, 0, 0, 36, 0x61, 0, 0, "DJI" }, { 15151104, 4608, 3288, 0, 0, 0, 0, 0, 0x94, 0, 0, "Matrix" }, { 3840000, 1600, 1200, 0, 0, 0, 0, 65, 0x49, 0, 0, "Foculus", "531C" }, { 307200, 640, 480, 0, 0, 0, 0, 0, 0x94, 0, 0, "Generic" }, { 62464, 256, 244, 1, 1, 6, 1, 0, 0x8d, 0, 0, "Kodak", "DC20" }, { 124928, 512, 244, 1, 1, 10, 1, 0, 0x8d, 0, 0, "Kodak", "DC20" }, { 1652736, 1536, 1076, 0, 52, 0, 0, 0, 0x61, 0, 0, "Kodak", "DCS200" }, { 4159302, 2338, 1779, 1, 33, 1, 2, 0, 0x94, 0, 0, "Kodak", "C330" }, { 4162462, 2338, 1779, 1, 33, 1, 2, 0, 0x94, 0, 0, "Kodak", "C330", 3160 }, { 2247168, 1232, 912, 0, 0, 16, 0, 0, 0x00, 0, 0, "Kodak", "C330" }, { 3370752, 1232, 912, 0, 0, 16, 0, 0, 0x00, 0, 0, "Kodak", "C330" }, { 6163328, 2864, 2152, 0, 0, 0, 0, 0, 0x94, 0, 0, "Kodak", "C603" }, { 6166488, 2864, 2152, 0, 0, 0, 0, 0, 0x94, 0, 0, "Kodak", "C603", 3160 }, { 460800, 640, 480, 0, 0, 0, 0, 0, 0x00, 0, 0, "Kodak", "C603" }, { 9116448, 2848, 2134, 0, 0, 0, 0, 0, 0x00, 0, 0, "Kodak", "C603" }, { 12241200, 4040, 3030, 2, 0, 0, 13, 0, 0x49, 0, 0, "Kodak", "12MP" }, { 12272756, 4040, 3030, 2, 0, 0, 13, 0, 0x49, 0, 0, "Kodak", "12MP", 31556 }, { 18000000, 4000, 3000, 0, 0, 0, 0, 0, 0x00, 0, 0, "Kodak", "12MP" }, { 614400, 640, 480, 0, 3, 0, 0, 64, 0x94, 0, 0, "Kodak", "KAI-0340" }, { 15360000, 3200, 2400, 0, 0, 0, 0, 96, 0x16, 0, 0, "Lenovo", "A820" }, { 3884928, 1608, 1207, 0, 0, 0, 0, 96, 0x16, 0, 0, "Micron", "2010", 3212 }, { 1138688, 1534, 986, 0, 0, 0, 0, 0, 0x61, 0, 0, "Minolta", "RD175", 513 }, { 1581060, 1305, 969, 0, 0, 18, 6, 6, 0x1e, 4, 1, "Nikon", "E900" }, // "diag raw" hack { 2465792, 1638, 1204, 0, 0, 22, 1, 6, 0x4b, 5, 1, "Nikon", "E950" }, // "diag raw" hack; possibly also Nikon E700, E800, E775; // Olympus C-2020Z { 2940928, 1616, 1213, 0, 0, 0, 7, 30, 0x94, 0, 1, "Nikon", "E2100" }, // "diag raw" hack; also Nikon E2500 { 4771840, 2064, 1541, 0, 0, 0, 1, 6, 0xe1, 0, 1, "Nikon", "E990" }, // "diag raw" hack; possibly also Nikon E880, E885, E995; // Olympus C-3030Z { 4775936, 2064, 1542, 0, 0, 0, 0, 30, 0x94, 0, 1, "Nikon", "E3700" }, // "diag raw" hack; Nikon E3100, E3200, E3500; // Pentax "Optio 33WR"; possibly also Olympus C-740UZ { 5865472, 2288, 1709, 0, 0, 0, 1, 6, 0xb4, 0, 1, "Nikon", "E4500" }, // "diag raw" hack; possibly also Olympus C-4040Z { 5869568, 2288, 1710, 0, 0, 0, 0, 6, 0x16, 0, 1, "Nikon", "E4300" }, // "diag raw" hack; also Minolta "DiMAGE Z2" { 7438336, 2576, 1925, 0, 0, 0, 1, 6, 0xb4, 0, 1, "Nikon", "E5000" }, // also Nikon E5700 { 8998912, 2832, 2118, 0, 0, 0, 0, 30, 0x94, 7, 1, "Nikon", "COOLPIX S6" }, // "diag raw" hack { 5939200, 2304, 1718, 0, 0, 0, 0, 30, 0x16, 0, 0, "Olympus", "C-770UZ" }, // possibly also Olympus C-4100Z, C-765UZ { 3178560, 2064, 1540, 0, 0, 0, 0, 0, 0x94, 0, 1, "Pentax", "Optio S V1.01" }, { 4841984, 2090, 1544, 0, 0, 22, 0, 0, 0x94, 7, 1, "Pentax", "Optio S" }, { 6114240, 2346, 1737, 0, 0, 22, 0, 0, 0x94, 7, 1, "Pentax", "Optio S4" }, { 10702848, 3072, 2322, 0, 0, 0, 21, 30, 0x94, 0, 1, "Pentax", "Optio 750Z" }, { 4147200, 1920, 1080, 0, 0, 0, 0, 0, 0x49, 0, 0, "Photron", "BC2-HD" }, { 4151666, 1920, 1080, 0, 0, 0, 0, 0, 0x49, 0, 0, "Photron", "BC2-HD", 8 }, { 13248000, 2208, 3000, 0, 0, 0, 0, 13, 0x61, 0, 0, "Pixelink", "A782" }, { 6291456, 2048, 1536, 0, 0, 0, 0, 96, 0x61, 0, 0, "RoverShot", "3320AF" }, { 311696, 644, 484, 0, 0, 0, 0, 0, 0x16, 0, 8, "ST Micro", "STV680 VGA" }, { 16098048, 3288, 2448, 0, 0, 24, 0, 9, 0x94, 0, 1, "Samsung", "S85" }, // hack { 16215552, 3312, 2448, 0, 0, 48, 0, 9, 0x94, 0, 1, "Samsung", "S85" }, // hack { 20487168, 3648, 2808, 0, 0, 0, 0, 13, 0x94, 5, 1, "Samsung", "WB550" }, { 24000000, 4000, 3000, 0, 0, 0, 0, 13, 0x94, 5, 1, "Samsung", "WB550" }, { 12582980, 3072, 2048, 0, 0, 0, 0, 33, 0x61, 0, 0, "Sinar", "", 68 }, // Sinarback 23; same res. as Leaf Volare & Cantare { 33292868, 4080, 4080, 0, 0, 0, 0, 33, 0x61, 0, 0, "Sinar", "", 68 }, // Sinarback 44 { 44390468, 4080, 5440, 0, 0, 0, 0, 33, 0x61, 0, 0, "Sinar", "", 68 }, // Sinarback 54 { 1409024, 1376, 1024, 0, 0, 1, 0, 0, 0x49, 0, 0, "Sony", "XCD-SX910CR" }, { 2818048, 1376, 1024, 0, 0, 1, 0, 97, 0x49, 0, 0, "Sony", "XCD-SX910CR" }, }; libraw_custom_camera_t table[64 + sizeof(const_table) / sizeof(const_table[0])]; // clang-format on char head[64] = {0}, *cp; int hlen, fsize, flen, zero_fsize = 1, i, c; struct jhead jh; unsigned camera_count = parse_custom_cameras(64, table, imgdata.params.custom_camera_strings); for (int q = 0; q < int(sizeof(const_table) / sizeof(const_table[0])); q++) memmove(&table[q + camera_count], &const_table[q], sizeof(const_table[0])); camera_count += sizeof(const_table) / sizeof(const_table[0]); tiff_flip = flip = filters = UINT_MAX; /* unknown */ raw_height = raw_width = fuji_width = fuji_layout = cr2_slice[0] = 0; maximum = height = width = top_margin = left_margin = 0; cdesc[0] = desc[0] = artist[0] = make[0] = model[0] = model2[0] = 0; iso_speed = shutter = aperture = focal_len = 0; unique_id = 0ULL; tiff_nifds = 0; is_NikonTransfer = 0; is_Sony = 0; is_pana_raw = 0; maker_index = LIBRAW_CAMERAMAKER_Unknown; is_4K_RAFdata = 0; FujiCropMode = 0; is_PentaxRicohMakernotes = 0; normalized_model[0] = 0; normalized_make[0] = 0; CM_found = 0; memset(tiff_ifd, 0, sizeof tiff_ifd); libraw_internal_data.unpacker_data.crx_track_selected = -1; libraw_internal_data.unpacker_data.CR3_CTMDtag = 0; imgdata.makernotes.hasselblad.nIFD_CM[0] = imgdata.makernotes.hasselblad.nIFD_CM[1] = -1; imgdata.makernotes.kodak.ISOCalibrationGain = 1.0f; imCommon.CameraTemperature = imCommon.SensorTemperature = imCommon.SensorTemperature2 = imCommon.LensTemperature = imCommon.AmbientTemperature = imCommon.BatteryTemperature = imCommon.exifAmbientTemperature = -1000.0f; imgdata.color.ExifColorSpace = LIBRAW_COLORSPACE_Unknown; for (i = 0; i < LIBRAW_IFD_MAXCOUNT; i++) { tiff_ifd[i].dng_color[0].illuminant = tiff_ifd[i].dng_color[1].illuminant = 0xffff; for (int c = 0; c < 4; c++) tiff_ifd[i].dng_levels.analogbalance[c] = 1.0f; } memset(gpsdata, 0, sizeof gpsdata); memset(cblack, 0, sizeof cblack); memset(white, 0, sizeof white); memset(mask, 0, sizeof mask); thumb_offset = thumb_length = thumb_width = thumb_height = 0; load_raw = thumb_load_raw = 0; write_thumb = &LibRaw::jpeg_thumb; data_offset = meta_offset = meta_length = tiff_bps = tiff_compress = 0; kodak_cbpp = zero_after_ff = dng_version = load_flags = 0; timestamp = shot_order = tiff_samples = black = is_foveon = 0; mix_green = profile_length = data_error = zero_is_bad = 0; pixel_aspect = is_raw = raw_color = 1; tile_width = tile_length = 0; metadata_blocks = 0; for (i = 0; i < 4; i++) { cam_mul[i] = i == 1; pre_mul[i] = i < 3; FORC3 cmatrix[c][i] = 0; FORC3 rgb_cam[c][i] = c == i; } colors = 3; for (i = 0; i < 0x10000; i++) curve[i] = i; order = get2(); hlen = get4(); fseek(ifp, 0, SEEK_SET); if (fread(head, 1, 64, ifp) < 64) throw LIBRAW_EXCEPTION_IO_CORRUPT; libraw_internal_data.unpacker_data.lenRAFData = libraw_internal_data.unpacker_data.posRAFData = 0; fseek(ifp, 0, SEEK_END); flen = fsize = ftell(ifp); if ((cp = (char *)memmem(head, 32, (char *)"MMMM", 4)) || (cp = (char *)memmem(head, 32, (char *)"IIII", 4))) { parse_phase_one(cp - head); if (cp - head && parse_tiff(0)) apply_tiff(); } else if (order == 0x4949 || order == 0x4d4d) { if (!memcmp(head + 6, "HEAPCCDR", 8)) { data_offset = hlen; parse_ciff(hlen, flen - hlen, 0); load_raw = &LibRaw::canon_load_raw; } else if (parse_tiff(0)) apply_tiff(); } else if (!memcmp(head, "\xff\xd8\xff\xe1", 4) && !memcmp(head + 6, "Exif", 4)) { fseek(ifp, 4, SEEK_SET); data_offset = 4 + get2(); fseek(ifp, data_offset, SEEK_SET); if (fgetc(ifp) != 0xff) parse_tiff(12); thumb_offset = 0; } else if (!memcmp(head + 25, "ARECOYK", 7)) // 'KYOCERA' right-to-left { strcpy(make, "Contax"); strcpy(model, "N Digital"); parse_kyocera(); } else if (!strcmp(head, "PXN")) { strcpy(make, "Logitech"); strcpy(model, "Fotoman Pixtura"); } else if (!strcmp(head, "qktk")) { strcpy(make, "Apple"); strcpy(model, "QuickTake 100"); load_raw = &LibRaw::quicktake_100_load_raw; } else if (!strcmp(head, "qktn")) { strcpy(make, "Apple"); strcpy(model, "QuickTake 150"); load_raw = &LibRaw::kodak_radc_load_raw; } else if (!memcmp(head, "FUJIFILM", 8)) { memcpy(imFuji.SerialSignature, head + 0x10, 0x0c); imFuji.SerialSignature[0x0c] = 0; strncpy(model, head + 0x1c, 0x20); model[0x20] = 0; memcpy(model2, head + 0x3c, 4); model2[4] = 0; strcpy(imFuji.RAFVersion, model2); fseek(ifp, 84, SEEK_SET); thumb_offset = get4(); thumb_length = get4(); fseek(ifp, 92, SEEK_SET); parse_fuji(get4()); if (thumb_offset > 120) { fseek(ifp, 120, SEEK_SET); is_raw += (i = get4()) ? 1 : 0; if (is_raw == 2 && shot_select) parse_fuji(i); } load_raw = &LibRaw::unpacked_load_raw; fseek(ifp, 100 + 28 * (shot_select > 0), SEEK_SET); parse_tiff(data_offset = get4()); parse_tiff(thumb_offset + 12); apply_tiff(); } else if (!memcmp(head, "RIFF", 4)) { fseek(ifp, 0, SEEK_SET); parse_riff(); } else if (!memcmp(head + 4, "ftypqt ", 9)) { fseek(ifp, 0, SEEK_SET); parse_qt(fsize); is_raw = 0; } else if (!memcmp(head, "\0\001\0\001\0@", 6)) { fseek(ifp, 6, SEEK_SET); fread(make, 1, 8, ifp); fread(model, 1, 8, ifp); fread(model2, 1, 16, ifp); data_offset = get2(); get2(); raw_width = get2(); raw_height = get2(); load_raw = &LibRaw::nokia_load_raw; filters = 0x61616161; } else if (!memcmp(head, "NOKIARAW", 8)) { strcpy(make, "NOKIA"); order = 0x4949; fseek(ifp, 300, SEEK_SET); data_offset = get4(); i = get4(); // bytes count width = get2(); height = get2(); // Data integrity check if (width < 1 || width > 16000 || height < 1 || height > 16000 || i < (width * height) || i > (2 * width * height)) throw LIBRAW_EXCEPTION_IO_CORRUPT; switch (tiff_bps = i * 8 / (width * height)) { case 8: load_raw = &LibRaw::eight_bit_load_raw; break; case 10: load_raw = &LibRaw::nokia_load_raw; break; case 0: throw LIBRAW_EXCEPTION_IO_CORRUPT; break; } raw_height = height + (top_margin = i / (width * tiff_bps / 8) - height); mask[0][3] = 1; filters = 0x61616161; } else if (!memcmp(head, "ARRI", 4)) { order = 0x4949; fseek(ifp, 20, SEEK_SET); width = get4(); height = get4(); strcpy(make, "ARRI"); fseek(ifp, 668, SEEK_SET); fread(model, 1, 64, ifp); model[63] = 0; fseek(ifp, 760, SEEK_SET); fread(software, 1, 64, ifp); if((unsigned char)software[0] == 0xff) software[0] = 0; software[63] = 0; data_offset = 4096; load_raw = &LibRaw::packed_load_raw; load_flags = 88; filters = 0x61616161; fixupArri(); } else if (!memcmp(head, "XPDS", 4)) { order = 0x4949; fseek(ifp, 0x800, SEEK_SET); fread(make, 1, 41, ifp); raw_height = get2(); raw_width = get2(); fseek(ifp, 56, SEEK_CUR); fread(model, 1, 30, ifp); data_offset = 0x10000; load_raw = &LibRaw::canon_rmf_load_raw; gamma_curve(0, 12.25, 1, 1023); } else if (!memcmp(head + 4, "RED1", 4)) { strcpy(make, "Red"); strcpy(model, "One"); parse_redcine(); load_raw = &LibRaw::redcine_load_raw; gamma_curve(1 / 2.4, 12.92, 1, 4095); filters = 0x49494949; } else if (!memcmp(head, "DSC-Image", 9)) parse_rollei(); else if (!memcmp(head, "PWAD", 4)) parse_sinar_ia(); else if (!memcmp(head, "\0MRM", 4)) parse_minolta(0); else if (!memcmp(head, "FOVb", 4)) { parse_x3f(); /* Does nothing if USE_X3FTOOLS is not defined */ } else if (!memcmp(head, "CI", 2)) parse_cine(); #ifdef USE_6BY9RPI else if (!memcmp(head, "BRCM", 4)) { fseek(ifp, 0, SEEK_SET); strcpy(make, "RaspberryPi"); strcpy(model, "Pi"); parse_raspberrypi(); } #endif else if (!memcmp(head + 4, "ftypcrx ", 8)) { int err; unsigned long long szAtomList; short nesting = -1; short nTrack = -1; short TrackType; char AtomNameStack[128]; strcpy(make, "Canon"); szAtomList = ifp->size(); err = parseCR3(0ULL, szAtomList, nesting, AtomNameStack, nTrack, TrackType); if ((err == 0 || err == -14) && nTrack >= 0) // no error, or too deep nesting selectCRXTrack(nTrack); } if (make[0] == 0) for (zero_fsize = i = 0; i < (int)camera_count; i++) if (fsize == (int)table[i].fsize) { strcpy(make, table[i].t_make); strcpy(model, table[i].t_model); flip = table[i].flags >> 2; zero_is_bad = table[i].flags & 2; data_offset = table[i].offset == 0xffff ? 0 : table[i].offset; raw_width = table[i].rw; raw_height = table[i].rh; left_margin = table[i].lm; top_margin = table[i].tm; width = raw_width - left_margin - table[i].rm; height = raw_height - top_margin - table[i].bm; filters = 0x1010101U * table[i].cf; colors = 4 - !((filters & filters >> 1) & 0x5555); load_flags = table[i].lf & 0xff; if (table[i].lf & 0x100) /* Monochrome sensor dump */ { colors = 1; filters = 0; } switch (tiff_bps = (fsize - data_offset) * 8 / (raw_width * raw_height)) { case 6: load_raw = &LibRaw::minolta_rd175_load_raw; ilm.CameraMount = LIBRAW_MOUNT_Minolta_A; break; case 8: load_raw = &LibRaw::eight_bit_load_raw; break; case 10: if ((fsize - data_offset) / raw_height * 3 >= raw_width * 4) { load_raw = &LibRaw::android_loose_load_raw; break; } else if (load_flags & 1) { load_raw = &LibRaw::android_tight_load_raw; break; } case 12: load_flags |= 128; load_raw = &LibRaw::packed_load_raw; break; case 16: order = 0x4949 | 0x404 * (load_flags & 1); tiff_bps -= load_flags >> 4; tiff_bps -= load_flags = load_flags >> 1 & 7; load_raw = table[i].offset == 0xffff ? &LibRaw::unpacked_load_raw_reversed : &LibRaw::unpacked_load_raw; } maximum = (1 << tiff_bps) - (1 << table[i].max); break; } if (zero_fsize) fsize = 0; if (make[0] == 0) parse_smal(0, flen); if (make[0] == 0) { parse_jpeg(0); #ifdef USE_6BY9RPI if (!(strncmp(model, "ov", 2) && strncmp(model, "RP_", 3))) { //Assume that this isn't a raw unless the header can be found is_raw = 0; if (!strncasecmp(model, "RP_imx", 6)) { const long offsets[] = { //IMX219 offsets 10270208, //8MPix 3280x2464 2678784, //1920x1080 2628608, //1640x1232 1963008, //1640x922 1233920, //1280x720 445440, //640x480 -1 //Marker for end of table }; int offset_idx; for (offset_idx = 0; offsets[offset_idx] != -1; offset_idx++) { if (!fseek(ifp, -offsets[offset_idx], SEEK_END) && fread(head, 1, 32, ifp) && !strncmp(head, "BRCM", 4)) { fseek(ifp, -32, SEEK_CUR); strcpy(make, "SonyRPF"); parse_raspberrypi(); break; } } } else if (!strncasecmp(model, "RP_OV", 5) || !strncasecmp(model, "ov5647", 6)) { const long offsets[] = { 6404096, //5MPix 2592x1944 2717696, //1920x1080 1625600, //1296x972 1233920, //1296x730 445440, //640x480 -1 //Marker for end of table }; int offset_idx; for (offset_idx = 0; offsets[offset_idx] != -1; offset_idx++) { if (!fseek(ifp, -offsets[offset_idx], SEEK_END) && fread(head, 1, 32, ifp) && !strncmp(head, "BRCM", 4)) { fseek(ifp, -32, SEEK_CUR); strcpy(make, "OmniVision"); width = raw_width; //Defaults raw_width = 2611; filters = 0x16161616; parse_raspberrypi(); break; } } } }// else is_raw = 0; #else fseek(ifp, 0, SEEK_END); int sz = ftell(ifp); if (!strncmp(model, "RP_imx219", 9) && sz >= 0x9cb600 && !fseek(ifp, -0x9cb600, SEEK_END) && fread(head, 1, 0x20, ifp) && !strncmp(head, "BRCM", 4)) { strcpy(make, "Broadcom"); strcpy(model, "RPi IMX219"); if (raw_height > raw_width) flip = 5; data_offset = ftell(ifp) + 0x8000 - 0x20; parse_broadcom(); black = 66; maximum = 0x3ff; load_raw = &LibRaw::broadcom_load_raw; thumb_offset = 0; thumb_length = sz - 0x9cb600 - 1; } else if (!(strncmp(model, "ov5647", 6) && strncmp(model, "RP_OV5647", 9)) && sz >= 0x61b800 && !fseek(ifp, -0x61b800, SEEK_END) && fread(head, 1, 0x20, ifp) && !strncmp(head, "BRCM", 4)) { strcpy(make, "Broadcom"); if (!strncmp(model, "ov5647", 6)) strcpy(model, "RPi OV5647 v.1"); else strcpy(model, "RPi OV5647 v.2"); if (raw_height > raw_width) flip = 5; data_offset = ftell(ifp) + 0x8000 - 0x20; parse_broadcom(); black = 16; maximum = 0x3ff; load_raw = &LibRaw::broadcom_load_raw; thumb_offset = 0; thumb_length = sz - 0x61b800 - 1; } else is_raw = 0; #endif } // make sure strings are terminated desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0; for (i = 0; i < int(sizeof CorpTable / sizeof *CorpTable); i++) { if (strcasestr(make, CorpTable[i].CorpName)) { /* Simplify company names */ maker_index = CorpTable[i].CorpId; strcpy(make, CorpTable[i].CorpName); } } if ((makeIs(LIBRAW_CAMERAMAKER_Kodak) || makeIs(LIBRAW_CAMERAMAKER_Leica)) && ((cp = strcasestr(model, " DIGITAL CAMERA")) || (cp = strstr(model, "FILE VERSION")))) { *cp = 0; } else if (makeIs(LIBRAW_CAMERAMAKER_Ricoh) && !strncasecmp(model, "PENTAX", 6)) { maker_index = LIBRAW_CAMERAMAKER_Pentax; strcpy(make, "Pentax"); } else if (makeIs(LIBRAW_CAMERAMAKER_JK_Imaging) && !strncasecmp(model, "Kodak", 5)) { maker_index = LIBRAW_CAMERAMAKER_Kodak; strcpy(make, "Kodak"); } remove_trailing_spaces(make, sizeof(make)); remove_trailing_spaces(model, sizeof(model)); i = strbuflen(make); /* Remove make from model */ if (!strncasecmp(model, make, i) && model[i++] == ' ') memmove(model, model + i, 64 - i); if (makeIs(LIBRAW_CAMERAMAKER_Fujifilm) && !strncmp(model, "FinePix", 7)) { memmove(model, model + 7, strlen(model) - 6); if (model[0] == ' ') { memmove(model, model + 1, strlen(model)); } } else if ((makeIs(LIBRAW_CAMERAMAKER_Kodak) || makeIs(LIBRAW_CAMERAMAKER_Konica)) && !strncmp(model, "Digital Camera ", 15)) { memmove(model, model + 15, strlen(model) - 14); } desc[511] = artist[63] = make[63] = model[63] = model2[63] = 0; if (!is_raw) goto notraw; if (!height) height = raw_height; if (!width) width = raw_width; identify_finetune_pentax(); if (dng_version) { if (filters == UINT_MAX) filters = 0; if (!filters) colors = tiff_samples; switch (tiff_compress) { case 0: // Compression not set, assuming uncompressed case 1: #ifdef USE_DNGSDK // Uncompressed float if (load_raw != &LibRaw::float_dng_load_raw_placeholder) #endif load_raw = &LibRaw::packed_dng_load_raw; break; case 7: load_raw = &LibRaw::lossless_dng_load_raw; break; case 8: load_raw = &LibRaw::deflate_dng_load_raw; break; #ifdef USE_GPRSDK case 9: load_raw = &LibRaw::vc5_dng_load_raw_placeholder; break; #endif case 34892: load_raw = &LibRaw::lossy_dng_load_raw; break; default: load_raw = 0; } GetNormalizedModel(); if (makeIs(LIBRAW_CAMERAMAKER_Olympus) && (OlyID == OlyID_STYLUS_1) && // don't use normalized_model below, it is 'Stylus 1' (strchr(model+6, 's') || strchr(model+6, 'S'))) { width -= 16; } goto dng_skip; } if (makeIs(LIBRAW_CAMERAMAKER_Canon) && !fsize && tiff_bps != 15) { bool fromtable = false; if (!load_raw) load_raw = &LibRaw::lossless_jpeg_load_raw; for (i = 0; i < int(sizeof canon / sizeof *canon); i++) if (raw_width == canon[i][0] && raw_height == canon[i][1]) { width = raw_width - (left_margin = canon[i][2]); height = raw_height - (top_margin = canon[i][3]); width -= canon[i][4]; height -= canon[i][5]; mask[0][1] = canon[i][6]; mask[0][3] = -canon[i][7]; mask[1][1] = canon[i][8]; mask[1][3] = -canon[i][9]; if (canon[i][10]) filters = canon[i][10] * 0x01010101U; fromtable = true; } if ((unique_id | 0x20000ULL) == 0x2720000ULL) // "PowerShot G11", "PowerShot S90": 0x2700000, 0x2720000 // possibly "PowerShot SX120 IS" (if not chdk hack?): 0x2710000 { left_margin = 8; top_margin = 16; } if(!fromtable && imgdata.makernotes.canon.AverageBlackLevel) // not known, but metadata known { FORC4 cblack[c] = imgdata.makernotes.canon.ChannelBlackLevel[c]; black = cblack[4] = cblack[5] = 0; // Prevent automatic BL calculation mask[0][3] = 1; mask[0][1] = 2; if(imgdata.makernotes.canon.SensorWidth == raw_width && imgdata.makernotes.canon.SensorHeight == raw_height) { left_margin = (imgdata.makernotes.canon.SensorLeftBorder+1) & 0xfffe; // round to 2 width = imgdata.makernotes.canon.SensorRightBorder - left_margin; top_margin = (imgdata.makernotes.canon.SensorTopBorder +1) & 0xfffe; height = imgdata.makernotes.canon.SensorBottomBorder - top_margin; } } } identify_finetune_by_filesize(fsize); if (!strcmp(model, "KAI-0340") && find_green(16, 16, 3840, 5120) < 25) { height = 480; top_margin = filters = 0; strcpy(model, "C603"); } GetNormalizedModel(); identify_finetune_dcr(head, fsize, flen); /* Early reject for damaged images */ if (!load_raw || height < 22 || width < 22 || (tiff_bps > 16 && (load_raw != &LibRaw::deflate_dng_load_raw && load_raw != &LibRaw::float_dng_load_raw_placeholder)) || tiff_samples > 4 || colors > 4 || colors < 1 /* alloc in unpack() may be fooled by size adjust */ || ((int)width + (int)left_margin > 65535) || ((int)height + (int)top_margin > 65535)) { is_raw = 0; RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY, 1, 2); return; } if (!model[0]) { sprintf(model, "%dx%d", width, height); strcpy(normalized_model, model); } if (!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_ZEROFILTERS_FOR_MONOCHROMETIFFS) && (filters == UINT_MAX)) // Default dcraw behaviour filters = 0x94949494; else if (filters == UINT_MAX) { if (tiff_nifds > 0 && tiff_samples == 1) { colors = 1; filters = 0; } else filters = 0x94949494; } if (thumb_offset && !thumb_height) { fseek(ifp, thumb_offset, SEEK_SET); if (ljpeg_start(&jh, 1)) { thumb_width = jh.wide; thumb_height = jh.high; } } dng_skip: if (dng_version) identify_process_dng_fields(); /* Early reject for damaged images again (after dng fields processing) */ if (!load_raw || height < 22 || width < 22 || (tiff_bps > 16 && (load_raw != &LibRaw::deflate_dng_load_raw && load_raw != &LibRaw::float_dng_load_raw_placeholder)) || tiff_samples > 4 || colors > 4 || colors < 1) { is_raw = 0; RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY, 1, 2); return; } { // Check cam_mul range int cmul_ok = 1; FORCC if (cam_mul[c] <= 0.001f) cmul_ok = 0; ; if (cmul_ok) { double cmin = cam_mul[0], cmax; double cnorm[4]; FORCC cmin = MIN(cmin, cam_mul[c]); FORCC cnorm[c] = cam_mul[c] / cmin; cmax = cmin = cnorm[0]; FORCC { cmin = MIN(cmin, cnorm[c]); cmax = MIN(cmax, cnorm[c]); } if (cmin <= 0.01f || cmax > 100.f) cmul_ok = false; } if (!cmul_ok) { if (cam_mul[0] > 0) cam_mul[0] = 0; cam_mul[3] = 0; } } if ((use_camera_matrix & (((use_camera_wb || dng_version)?0:1) | 0x2)) && cmatrix[0][0] > 0.125) { memcpy(rgb_cam, cmatrix, sizeof cmatrix); raw_color = 0; } if (raw_color && !CM_found) CM_found = adobe_coeff(maker_index, normalized_model); else if ((imgdata.color.cam_xyz[0][0] < 0.01) && !CM_found) CM_found = adobe_coeff(maker_index, normalized_model, 1); if (load_raw == &LibRaw::kodak_radc_load_raw) if ((raw_color) && !CM_found) CM_found = adobe_coeff(LIBRAW_CAMERAMAKER_Apple, "Quicktake"); if ((maker_index != LIBRAW_CAMERAMAKER_Unknown) && normalized_model[0]) SetStandardIlluminants (maker_index, normalized_model); // Clear erorneus fuji_width if not set through parse_fuji or for DNG if (fuji_width && !dng_version && !(imgdata.process_warnings & LIBRAW_WARN_PARSEFUJI_PROCESSED)) fuji_width = 0; if (fuji_width) { fuji_width = width >> !fuji_layout; filters = fuji_width & 1 ? 0x94949494 : 0x49494949; width = (height >> fuji_layout) + fuji_width; height = width - 1; pixel_aspect = 1; } else { if (raw_height < height) raw_height = height; if (raw_width < width) raw_width = width; } if (!tiff_bps) tiff_bps = 12; if (!maximum) { maximum = (1 << tiff_bps) - 1; if (maximum < 0x10000 && curve[maximum] > 0 && load_raw == &LibRaw::sony_arw2_load_raw) maximum = curve[maximum]; } if (maximum > 0xffff) maximum = 0xffff; if (!load_raw || height < 22 || width < 22 || (tiff_bps > 16 && (load_raw != &LibRaw::deflate_dng_load_raw && load_raw != &LibRaw::float_dng_load_raw_placeholder)) || tiff_samples > 6 || colors > 4) is_raw = 0; if (raw_width < 22 || raw_width > 64000 || raw_height < 22 || pixel_aspect < 0.1 || pixel_aspect > 10. || raw_height > 64000) is_raw = 0; #ifdef NO_JASPER if (load_raw == &LibRaw::redcine_load_raw) { is_raw = 0; imgdata.process_warnings |= LIBRAW_WARN_NO_JASPER; } #endif #ifdef NO_JPEG if (load_raw == &LibRaw::kodak_jpeg_load_raw || load_raw == &LibRaw::lossy_dng_load_raw) { is_raw = 0; imgdata.process_warnings |= LIBRAW_WARN_NO_JPEGLIB; } #endif if (!cdesc[0]) strcpy(cdesc, colors == 3 ? "RGBG" : "GMCY"); if (!raw_height) raw_height = height; if (!raw_width) raw_width = width; if (filters > 999 && colors == 3) filters |= ((filters >> 2 & 0x22222222) | (filters << 2 & 0x88888888)) & filters << 1; notraw: if (flip == (int)UINT_MAX) flip = tiff_flip; if (flip == (int)UINT_MAX) flip = 0; // Convert from degrees to bit-field if needed if (flip > 89 || flip < -89) { switch ((flip + 3600) % 360) { case 270: flip = 5; break; case 180: flip = 3; break; case 90: flip = 6; break; } } if (pana_bpp) imgdata.color.raw_bps = pana_bpp; else if ((load_raw == &LibRaw::phase_one_load_raw) || (load_raw == &LibRaw::phase_one_load_raw_c)) imgdata.color.raw_bps = ph1.format; else imgdata.color.raw_bps = tiff_bps; RUN_CALLBACK(LIBRAW_PROGRESS_IDENTIFY, 1, 2); } void LibRaw::identify_process_dng_fields() { if (!dng_version) return; int c; { /* copy DNG data from per-IFD field to color.dng */ int iifd = find_ifd_by_offset(data_offset); int pifd = find_ifd_by_offset(thumb_offset); #define CFAROUND(value, filters) \ filters ? (filters >= 1000 ? ((value + 1) / 2) * 2 : ((value + 5) / 6) * 6) \ : value #define IFDCOLORINDEX(ifd, subset, bit) \ (tiff_ifd[ifd].dng_color[subset].parsedfields & bit) \ ? ifd \ : ((tiff_ifd[0].dng_color[subset].parsedfields & bit) ? 0 : -1) #define IFDLEVELINDEX(ifd, bit) \ (tiff_ifd[ifd].dng_levels.parsedfields & bit) \ ? ifd \ : ((tiff_ifd[0].dng_levels.parsedfields & bit) ? 0 : -1) #define COPYARR(to, from) memmove(&to, &from, sizeof(from)) if (iifd < (int)tiff_nifds && iifd >= 0) { int sidx; // Per field, not per structure if (!(imgdata.params.raw_processing_options & LIBRAW_PROCESSING_DONT_CHECK_DNG_ILLUMINANT)) { int illidx[2], cmidx[2], calidx[2], abidx; for (int i = 0; i < 2; i++) { illidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_ILLUMINANT); cmidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_COLORMATRIX); calidx[i] = IFDCOLORINDEX(iifd, i, LIBRAW_DNGFM_CALIBRATION); } abidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ANALOGBALANCE); // Data found, all in same ifd, illuminants are inited if (illidx[0] >= 0 && illidx[0] < (int)tiff_nifds && illidx[0] == illidx[1] && illidx[0] == cmidx[0] && illidx[0] == cmidx[1] && tiff_ifd[illidx[0]].dng_color[0].illuminant > 0 && tiff_ifd[illidx[0]].dng_color[1].illuminant > 0) { sidx = illidx[0]; // => selected IFD double cc[4][4], cm[4][3], cam_xyz[4][3]; // CM -> Color Matrix // CC -> Camera calibration for (int j = 0; j < 4; j++) for (int i = 0; i < 4; i++) cc[j][i] = i == j; int colidx = -1; // IS D65 here? for (int i = 0; i < 2; i++) { if (tiff_ifd[sidx].dng_color[i].illuminant == LIBRAW_WBI_D65) { colidx = i; break; } } // Other daylight-type ill if (colidx < 0) for (int i = 0; i < 2; i++) { int ill = tiff_ifd[sidx].dng_color[i].illuminant; if (ill == LIBRAW_WBI_Daylight || ill == LIBRAW_WBI_D55 || ill == LIBRAW_WBI_D75 || ill == LIBRAW_WBI_D50 || ill == LIBRAW_WBI_Flash) { colidx = i; break; } } if (colidx >= 0) // Selected { // Init camera matrix from DNG FORCC for (int j = 0; j < 3; j++) cm[c][j] = tiff_ifd[sidx].dng_color[colidx].colormatrix[c][j]; if (calidx[colidx] == sidx) { for (int i = 0; i < colors && i < 4; i++) FORCC cc[i][c] = tiff_ifd[sidx].dng_color[colidx].calibration[i][c]; } if (abidx == sidx) for (int i = 0; i < colors && i < 4; i++) FORCC cc[i][c] *= tiff_ifd[sidx].dng_levels.analogbalance[i]; int j; FORCC for (int i = 0; i < 3; i++) for (cam_xyz[c][i] = j = 0; j < colors && j < 4; j++) cam_xyz[c][i] += cc[c][j] * cm[j][i]; // add AsShotXY later * xyz[i]; cam_xyz_coeff(cmatrix, cam_xyz); } } } bool noFujiDNGCrop = makeIs(LIBRAW_CAMERAMAKER_Fujifilm) && (!strcmp(normalized_model, "S3Pro") || !strcmp(normalized_model, "S5Pro") || !strcmp(normalized_model, "S2Pro")); if (!noFujiDNGCrop && (imgdata.params.raw_processing_options &LIBRAW_PROCESSING_USE_DNG_DEFAULT_CROP)) { sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_CROPORIGIN); int sidx2 = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_CROPSIZE); if (sidx >= 0 && sidx == sidx2 && tiff_ifd[sidx].dng_levels.default_crop[2] > 0 && tiff_ifd[sidx].dng_levels.default_crop[3] > 0) { int lm = tiff_ifd[sidx].dng_levels.default_crop[0]; int lmm = CFAROUND(lm, filters); int tm = tiff_ifd[sidx].dng_levels.default_crop[1]; int tmm = CFAROUND(tm, filters); int ww = tiff_ifd[sidx].dng_levels.default_crop[2]; int hh = tiff_ifd[sidx].dng_levels.default_crop[3]; if (lmm > lm) ww -= (lmm - lm); if (tmm > tm) hh -= (tmm - tm); if (left_margin + lm + ww <= raw_width && top_margin + tm + hh <= raw_height) { left_margin += lmm; top_margin += tmm; width = ww; height = hh; } } } if (!(imgdata.color.dng_color[0].parsedfields & LIBRAW_DNGFM_FORWARDMATRIX)) // Not set already (Leica makernotes) { sidx = IFDCOLORINDEX(iifd, 0, LIBRAW_DNGFM_FORWARDMATRIX); if (sidx >= 0) COPYARR(imgdata.color.dng_color[0].forwardmatrix, tiff_ifd[sidx].dng_color[0].forwardmatrix); } if (!(imgdata.color.dng_color[1].parsedfields & LIBRAW_DNGFM_FORWARDMATRIX)) // Not set already (Leica makernotes) { sidx = IFDCOLORINDEX(iifd, 1, LIBRAW_DNGFM_FORWARDMATRIX); if (sidx >= 0) COPYARR(imgdata.color.dng_color[1].forwardmatrix, tiff_ifd[sidx].dng_color[1].forwardmatrix); } for (int ss = 0; ss < 2; ss++) { sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_COLORMATRIX); if (sidx >= 0) COPYARR(imgdata.color.dng_color[ss].colormatrix, tiff_ifd[sidx].dng_color[ss].colormatrix); sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_CALIBRATION); if (sidx >= 0) COPYARR(imgdata.color.dng_color[ss].calibration, tiff_ifd[sidx].dng_color[ss].calibration); sidx = IFDCOLORINDEX(iifd, ss, LIBRAW_DNGFM_ILLUMINANT); if (sidx >= 0) imgdata.color.dng_color[ss].illuminant = tiff_ifd[sidx].dng_color[ss].illuminant; } // Levels sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ANALOGBALANCE); if (sidx >= 0) COPYARR(imgdata.color.dng_levels.analogbalance, tiff_ifd[sidx].dng_levels.analogbalance); sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_BASELINEEXPOSURE); if (sidx >= 0) imgdata.color.dng_levels.baseline_exposure = tiff_ifd[sidx].dng_levels.baseline_exposure; sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_WHITE); if (sidx >= 0 && tiff_ifd[sidx].dng_levels.dng_whitelevel[0]) COPYARR(imgdata.color.dng_levels.dng_whitelevel, tiff_ifd[sidx].dng_levels.dng_whitelevel); else if (tiff_ifd[iifd].sample_format <= 2 && tiff_ifd[iifd].bps > 0 && tiff_ifd[iifd].bps < 32) FORC4 imgdata.color.dng_levels.dng_whitelevel[c] = (1 << tiff_ifd[iifd].bps) - 1; sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_ASSHOTNEUTRAL); if (sidx >= 0) { COPYARR(imgdata.color.dng_levels.asshotneutral, tiff_ifd[sidx].dng_levels.asshotneutral); if (imgdata.color.dng_levels.asshotneutral[0]) { cam_mul[3] = 0; FORCC if (fabs(imgdata.color.dng_levels.asshotneutral[c]) > 0.0001) cam_mul[c] = 1 / imgdata.color.dng_levels.asshotneutral[c]; } } sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_BLACK); if (sidx >= 0) { imgdata.color.dng_levels.dng_fblack = tiff_ifd[sidx].dng_levels.dng_fblack; imgdata.color.dng_levels.dng_black = tiff_ifd[sidx].dng_levels.dng_black; COPYARR(imgdata.color.dng_levels.dng_cblack, tiff_ifd[sidx].dng_levels.dng_cblack); COPYARR(imgdata.color.dng_levels.dng_fcblack, tiff_ifd[sidx].dng_levels.dng_fcblack); } if (pifd >= 0) { sidx = IFDLEVELINDEX(pifd, LIBRAW_DNGFM_PREVIEWCS); if (sidx >= 0) imgdata.color.dng_levels.preview_colorspace = tiff_ifd[sidx].dng_levels.preview_colorspace; } sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_OPCODE2); if (sidx >= 0) meta_offset = tiff_ifd[sidx].opcode2_offset; sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_LINTABLE); INT64 linoff = -1; int linlen = 0; if (sidx >= 0) { linoff = tiff_ifd[sidx].lineartable_offset; linlen = tiff_ifd[sidx].lineartable_len; } if (linoff >= 0 && linlen > 0) { INT64 pos = ftell(ifp); fseek(ifp, linoff, SEEK_SET); linear_table(linlen); fseek(ifp, pos, SEEK_SET); } // Need to add curve too } /* Copy DNG black level to LibRaw's */ if (load_raw == &LibRaw::lossy_dng_load_raw) { maximum = 0xffff; FORC4 imgdata.color.linear_max[c] = imgdata.color.dng_levels.dng_whitelevel[c] = 0xffff; } else { maximum = imgdata.color.dng_levels.dng_whitelevel[0]; } black = imgdata.color.dng_levels.dng_black; if (tiff_samples == 2 && imgdata.color.dng_levels.dng_cblack[4] * imgdata.color.dng_levels.dng_cblack[5] * tiff_samples == imgdata.color.dng_levels.dng_cblack[LIBRAW_CBLACK_SIZE - 1]) { unsigned ff = filters; if (filters > 999 && colors == 3) filters |= ((filters >> 2 & 0x22222222) | (filters << 2 & 0x88888888)) & filters << 1; /* Special case, Fuji SuperCCD dng */ int csum[4] = { 0,0,0,0 }, ccount[4] = { 0,0,0,0 }; int i = 6 + shot_select; for (unsigned row = 0; row < imgdata.color.dng_levels.dng_cblack[4]; row++) for (unsigned col = 0; col < imgdata.color.dng_levels.dng_cblack[5]; col++) { csum[FC(row, col)] += imgdata.color.dng_levels.dng_cblack[i]; ccount[FC(row, col)]++; i += tiff_samples; } for (int c = 0; c < 4; c++) if (ccount[c]) imgdata.color.dng_levels.dng_cblack[c] += csum[c] / ccount[c]; imgdata.color.dng_levels.dng_cblack[4] = imgdata.color.dng_levels.dng_cblack[5] = 0; filters = ff; } else if (tiff_samples > 2 && tiff_samples <= 4 && imgdata.color.dng_levels.dng_cblack[4] * imgdata.color.dng_levels.dng_cblack[5] * tiff_samples == imgdata.color.dng_levels.dng_cblack[LIBRAW_CBLACK_SIZE - 1]) { /* Special case, per_channel blacks in RepeatDim, average for per-channel */ int csum[4] = { 0,0,0,0 }, ccount[4] = { 0,0,0,0 }; int i = 6; for (unsigned row = 0; row < imgdata.color.dng_levels.dng_cblack[4]; row++) for (unsigned col = 0; col < imgdata.color.dng_levels.dng_cblack[5]; col++) for (unsigned c = 0; c < tiff_samples && c < 4; c++) { csum[c] += imgdata.color.dng_levels.dng_cblack[i]; ccount[c]++; i++; } for (int c = 0; c < 4; c++) if (ccount[c]) imgdata.color.dng_levels.dng_cblack[c] += csum[c] / ccount[c]; imgdata.color.dng_levels.dng_cblack[4] = imgdata.color.dng_levels.dng_cblack[5] = 0; } memmove(cblack, imgdata.color.dng_levels.dng_cblack, sizeof(cblack)); if (iifd < (int)tiff_nifds && iifd >= 0) { int sidx = IFDLEVELINDEX(iifd, LIBRAW_DNGFM_LINEARRESPONSELIMIT); if (sidx >= 0) { imgdata.color.dng_levels.LinearResponseLimit = tiff_ifd[sidx].dng_levels.LinearResponseLimit; if (imgdata.color.dng_levels.LinearResponseLimit > 0.1 && imgdata.color.dng_levels.LinearResponseLimit <= 1.0) { // And approx promote it to linear_max: int bl4 = 0, bl64 = 0; for (int chan = 0; chan < colors && chan < 4; chan++) bl4 += cblack[chan]; bl4 /= LIM(colors, 1, 4); if (cblack[4] * cblack[5] > 0) { unsigned cnt = 0; for (unsigned c = 0; c < 4096 && c < cblack[4] * cblack[5]; c++) { bl64 += cblack[c + 6]; cnt++; } bl64 /= LIM(cnt, 1, 4096); } int rblack = black + bl4 + bl64; for (int chan = 0; chan < colors && chan < 4; chan++) imgdata.color.linear_max[chan] = (maximum - rblack) * imgdata.color.dng_levels.LinearResponseLimit + rblack; } } } } } void LibRaw::identify_finetune_pentax() { if (makeIs(LIBRAW_CAMERAMAKER_Pentax) || makeIs(LIBRAW_CAMERAMAKER_Samsung)) { if (height == 2624 && width == 3936) // Pentax K10D, Samsung GX10; { height = 2616; width = 3896; } if (height == 3136 && width == 4864) // Pentax K20D, Samsung GX20; { height = 3124; width = 4688; filters = 0x16161616; } } if (makeIs(LIBRAW_CAMERAMAKER_Pentax)) { if ((width == 4352) && ((unique_id == PentaxID_K_r) || (unique_id == PentaxID_K_x))) { width = 4309; filters = 0x16161616; } if ((width >= 4960) && ((unique_id == PentaxID_K_5) || (unique_id == PentaxID_K_5_II) || (unique_id == PentaxID_K_5_II_s))) { left_margin = 10; width = 4950; filters = 0x16161616; } if ((width == 6080) && (unique_id == PentaxID_K_70)) { height = 4016; top_margin = 32; width = 6020; left_margin = 60; } if ((width == 4736) && (unique_id == PentaxID_K_7)) { height = 3122; width = 4684; filters = 0x16161616; top_margin = 2; } if ((width == 6080) && (unique_id == PentaxID_K_3_II)) { left_margin = 4; width = 6040; } if ((width == 6112) && (unique_id == PentaxID_KP)) { // From DNG, maybe too strict left_margin = 54; top_margin = 28; width = 6028; height = raw_height - top_margin; } if ((width == 6080) && (unique_id == PentaxID_K_3)) { left_margin = 4; width = 6040; } if ((width == 7424) && (unique_id == PentaxID_645D)) { height = 5502; width = 7328; filters = 0x61616161; top_margin = 29; left_margin = 48; } } else if (makeIs(LIBRAW_CAMERAMAKER_Ricoh) && (height == 3014) && (width == 4096)) // Ricoh GX200 width = 4014; } void LibRaw::identify_finetune_by_filesize(int fsize) { if (fsize == 4771840) { // hack Nikon 3mpix: E880, E885, E990, E995; // Olympus C-3030Z if (!timestamp && nikon_e995()) strcpy(model, "E995"); } else if (fsize == 2940928) { // hack Nikon 2mpix: E2100, E2500 if (!timestamp && !nikon_e2100()) strcpy(model, "E2500"); } else if (fsize == 4775936) { // hack Nikon 3mpix: E3100, E3200, E3500, E3700; // Pentax "Optio 33WR"; // Olympus C-740UZ if (!timestamp) nikon_3700(); } else if (fsize == 5869568) { // hack Nikon 4mpix: E4300; // hack Minolta "DiMAGE Z2" if (!timestamp && minolta_z2()) { maker_index = LIBRAW_CAMERAMAKER_Minolta; strcpy(make, "Minolta"); strcpy(model, "DiMAGE Z2"); } } } void LibRaw::identify_finetune_dcr(char head[64], int fsize, int flen) { static const short pana[][6] = { // raw_width, raw_height, left_margin, top_margin, width_increment, // height_increment {3130, 1743, 4, 0, -6, 0}, /* 00 */ {3130, 2055, 4, 0, -6, 0}, /* 01 */ {3130, 2319, 4, 0, -6, 0}, /* 02 DMC-FZ8 */ {3170, 2103, 18, 0, -42, 20}, /* 03 */ {3170, 2367, 18, 13, -42, -21}, /* 04 */ {3177, 2367, 0, 0, -1, 0}, /* 05 DMC-L1 */ {3304, 2458, 0, 0, -1, 0}, /* 06 DMC-FZ30 */ {3330, 2463, 9, 0, -5, 0}, /* 07 DMC-FZ18 */ {3330, 2479, 9, 0, -17, 4}, /* 08 */ {3370, 1899, 15, 0, -44, 20}, /* 09 */ {3370, 2235, 15, 0, -44, 20}, /* 10 */ {3370, 2511, 15, 10, -44, -21}, /* 11 */ {3690, 2751, 3, 0, -8, -3}, /* 12 DMC-FZ50 */ {3710, 2751, 0, 0, -3, 0}, /* 13 DMC-L10 */ {3724, 2450, 0, 0, 0, -2}, /* 14 */ {3770, 2487, 17, 0, -44, 19}, /* 15 */ {3770, 2799, 17, 15, -44, -19}, /* 16 */ {3880, 2170, 6, 0, -6, 0}, /* 17 DMC-LX1 */ {4060, 3018, 0, 0, 0, -2}, /* 18 DMC-FZ35, DMC-FZ38 */ {4290, 2391, 3, 0, -8, -1}, /* 19 DMC-LX2 */ {4330, 2439, 17, 15, -44, -19}, /* 20 "D-LUX 3" */ {4508, 2962, 0, 0, -3, -4}, /* 21 */ {4508, 3330, 0, 0, -3, -6}, /* 22 */ {10480, 7794, 0, 0, -2, 0}, /* 23: G9 in high-res */ }; int i,c; struct jhead jh; if (makeIs(LIBRAW_CAMERAMAKER_Canon) && !tiff_flip && imCanon.MakernotesFlip) { tiff_flip = imCanon.MakernotesFlip; } else if (makeIs(LIBRAW_CAMERAMAKER_Nikon)) { if (!load_raw) load_raw = &LibRaw::packed_load_raw; if (model[0] == 'E') // Nikon E8800, E8700, E8400, E5700, E5400, E5000, // others are diag hacks? load_flags |= !data_offset << 2 | 2; } /* Set parameters based on camera name (for non-DNG files). */ /* Always 512 for arw2_load_raw */ else if (makeIs(LIBRAW_CAMERAMAKER_Sony) && (raw_width > 3888) && !black && !cblack[0]) { black = (load_raw == &LibRaw::sony_arw2_load_raw) ? 512 : (128 << (tiff_bps - 12)); } if (is_foveon) { if (height * 2 < width) pixel_aspect = 0.5; if (height > width) pixel_aspect = 2; filters = 0; } else if (makeIs(LIBRAW_CAMERAMAKER_Pentax)) { if ((unique_id == PentaxID_K_1) || (unique_id == PentaxID_K_1_Mark_II)) { top_margin = 18; height = raw_height - top_margin; if (raw_width == 7392) { left_margin = 6; width = 7376; } } else if (unique_id == PentaxID_Optio_S_V101) { // (fsize == 3178560) cam_mul[0] *= 4; cam_mul[2] *= 4; } else if (unique_id == PentaxID_Optio_33WR) { // (fsize == 4775936) flip = 1; filters = 0x16161616; } else if (unique_id == PentaxID_staristD) { load_raw = &LibRaw::unpacked_load_raw; data_error = -1; } else if (unique_id == PentaxID_staristDS) { height -= 2; } } else if (makeIs(LIBRAW_CAMERAMAKER_Canon)) { if (tiff_bps == 15) { // Canon sRAW if (width == 3344) width = 3272; else if (width == 3872) width = 3866; if (height > width) { SWAP(height, width); SWAP(raw_height, raw_width); } if (width == 7200 && height == 3888) { // Canon EOS 5DS (R); raw_width = width = 6480; raw_height = height = 4320; } filters = 0; tiff_samples = colors = 3; load_raw = &LibRaw::canon_sraw_load_raw; } if (!strcmp(normalized_model, "PowerShot 600")) { height = 613; width = 854; raw_width = 896; colors = 4; filters = 0xe1e4e1e4; load_raw = &LibRaw::canon_600_load_raw; } else if (!strcmp(normalized_model, "PowerShot A5") || !strcmp(normalized_model, "PowerShot A5 Zoom")) { height = 773; width = 960; raw_width = 992; pixel_aspect = 256 / 235.0; filters = 0x1e4e1e4e; goto canon_a5; } else if (!strcmp(normalized_model, "PowerShot A50")) { height = 968; width = 1290; raw_width = 1320; filters = 0x1b4e4b1e; goto canon_a5; } else if (!strcmp(normalized_model, "PowerShot Pro70")) { height = 1024; width = 1552; filters = 0x1e4b4e1b; canon_a5: colors = 4; tiff_bps = 10; load_raw = &LibRaw::packed_load_raw; load_flags = 40; } else if (!strcmp(normalized_model, "PowerShot Pro90 IS") || !strcmp(normalized_model, "PowerShot G1")) { colors = 4; filters = 0xb4b4b4b4; } else if (!strcmp(normalized_model, "PowerShot A610")) { // chdk hack if (canon_s2is()) strcpy(model + 10, "S2 IS"); // chdk hack } else if (!strcmp(normalized_model, "PowerShot SX220 HS")) { // chdk hack mask[1][3] = -4; top_margin = 16; left_margin = 92; } else if (!strcmp(normalized_model, "PowerShot S120")) { // chdk hack raw_width = 4192; raw_height = 3062; width = 4022; height = 3016; mask[0][0] = top_margin = 31; mask[0][2] = top_margin + height; left_margin = 120; mask[0][1] = 23; mask[0][3] = 72; } else if (!strcmp(normalized_model, "PowerShot G16")) { mask[0][0] = 0; mask[0][2] = 80; mask[0][1] = 0; mask[0][3] = 16; top_margin = 29; left_margin = 120; width = raw_width - left_margin - 48; height = raw_height - top_margin - 14; } else if (!strcmp(normalized_model, "PowerShot SX50 HS")) { top_margin = 17; } } else if (makeIs(LIBRAW_CAMERAMAKER_Nikon)) { if (!strcmp(model, "D1")) { imgdata.other.analogbalance[0] = cam_mul[0]; imgdata.other.analogbalance[2] = cam_mul[2]; imgdata.other.analogbalance[1] = imgdata.other.analogbalance[3] = cam_mul[1]; cam_mul[0] = cam_mul[1] = cam_mul[2] = 1.0f; } else if (!strcmp(model, "D1X")) { width -= 4; pixel_aspect = 0.5; } else if (!strcmp(model, "D40X") || !strcmp(model, "D60") || !strcmp(model, "D80") || !strcmp(model, "D3000")) { height -= 3; width -= 4; } else if (!strcmp(model, "D3") || !strcmp(model, "D3S") || !strcmp(model, "D700")) { width -= 4; left_margin = 2; } else if (!strcmp(model, "D3100")) { width -= 28; left_margin = 6; } else if (!strcmp(model, "D5000") || !strcmp(model, "D90")) { width -= 42; } else if (!strcmp(model, "D5100") || !strcmp(model, "D7000") || !strcmp(model, "COOLPIX A")) { width -= 44; } else if (!strcmp(model, "D3200") || !strcmp(model, "D600") || !strcmp(model, "D610") || !strncmp(model, "D800", 4)) // Nikons: D800, D800E { width -= 46; } else if (!strcmp(model, "D4") || !strcmp(model, "Df")) { width -= 52; left_margin = 2; } else if (!strcmp(model, "D500")) { // Empty - to avoid width-1 below } else if (!strncmp(model, "D40", 3) || !strncmp(model, "D50", 3) || !strncmp(model, "D70", 3)) { width--; } else if (!strcmp(model, "D100")) { if (load_flags) // compressed NEF raw_width = (width += 3) + 3; } else if (!strcmp(model, "D200")) { left_margin = 1; width -= 4; filters = 0x94949494; } else if (!strncmp(model, "D2H", 3)) // Nikons: D2H, D2Hs { left_margin = 6; width -= 14; } else if (!strncmp(model, "D2X", 3)) // Nikons: D2X, D2Xs { if (width == 3264) // in-camera Hi-speed crop: On width -= 32; else width -= 8; } else if (!strncmp(model, "D300", 4)) // Nikons: D300, D300s { width -= 32; } else if (raw_width == 4032) // Nikon "COOLPIX P7700", "COOLPIX P7800", // "COOLPIX P330", "COOLPIX P340" { if (!strcmp(normalized_model, "COOLPIX P7700")) { maximum = 65504; load_flags = 0; } else if (!strcmp(normalized_model, "COOLPIX P7800")) { maximum = 65504; load_flags = 0; } else if (!strcmp(model, "COOLPIX P340")) { load_flags = 0; } } else if (!strncmp(model, "COOLPIX P", 9) && raw_width != 4032) // Nikon "COOLPIX P1000", "COOLPIX P6000", // "COOLPIX P7000", "COOLPIX P7100" { load_flags = 24; filters = 0x94949494; /* the following 'if' is most probably obsolete, because we now read black * level from metadata */ if ((model[9] == '7') && /* P7000, P7100 */ ((iso_speed >= 400) || (iso_speed == 0)) && !strstr(software, "V1.2")) /* v. 1.2 seen for P7000 only */ black = 255; } else if (!strncmp(model, "COOLPIX B700", 12)) { load_flags = 24; } else if (!strncmp(model, "1 ", 2)) // Nikons: "1 AW1", "1 J1", "1 J2", "1 J3", "1 J4", // "1 J5", "1 S1", "1 S2", "1 V1", "1 V2", "1 V3" { height -= 2; } else if (fsize == 1581060) // hack Nikon 1mpix: E900 { simple_coeff(3); pre_mul[0] = 1.2085; pre_mul[1] = 1.0943; pre_mul[3] = 1.1103; } else if ((fsize == 4771840) && // hack Nikon 3mpix: E880, E885, E990 strcmp(model, "E995")) // but not E995 { filters = 0xb4b4b4b4; simple_coeff(3); pre_mul[0] = 1.196; pre_mul[1] = 1.246; pre_mul[2] = 1.018; } else if ((fsize == 4775936) && // hack Nikon 3mpix: E3100, E3200, E3500 (atoi(model + 1) < 3700)) // but not E3700; { filters = 0x49494949; } else if (fsize == 5869568) // hack Nikon 4mpix: E4300; { load_flags = 6; } else if (!strcmp(model, "E2500")) { height -= 2; load_flags = 6; colors = 4; filters = 0x4b4b4b4b; } } else if (makeIs(LIBRAW_CAMERAMAKER_Olympus)) { if (OlyID == OlyID_C_740UZ) { // (fsize == 4775936) i = find_green(12, 32, 1188864, 3576832); c = find_green(12, 32, 2383920, 2387016); if (abs(i) < abs(c)) { SWAP(i, c); load_flags = 24; } if (i < 0) filters = 0x61616161; } else if (OlyID == OlyID_C_770UZ) { height = 1718; width = 2304; filters = 0x16161616; load_raw = &LibRaw::packed_load_raw; load_flags = 30; } else { height += height & 1; if (exif_cfa) filters = exif_cfa; if (width == 4100) // Olympus E-PL2, E-PL1, E-P2, E-P1, E-620, E-600, E-5, E-30; width -= 4; if (width == 4080) // Olympus E-PM1, E-PL3, E-P3; width -= 24; if (width == 10400) // Olympus PEN-F, E-M1-II, E-M1-III, E-M1X width -= 12; if (width == 8200) // E-M1-III in 50Mp mode, E-M1X width -= 30; if (width == 9280) { // Olympus E-M5 Mark II; width -= 6; height -= 6; } if (load_raw == &LibRaw::unpacked_load_raw) load_flags = 4; tiff_bps = 12; if ((OlyID == OlyID_E_300) || (OlyID == OlyID_E_500)) { width -= 20; if (load_raw == &LibRaw::unpacked_load_raw) { maximum = 0xfc3; memset(cblack, 0, sizeof cblack); } } else if (OlyID == OlyID_STYLUS_1) { width -= 16; maximum = 0xfff; } else if (OlyID == OlyID_E_330) { width -= 30; if (load_raw == &LibRaw::unpacked_load_raw) maximum = 0xf79; } else if (OlyID == OlyID_SP_550UZ) { thumb_length = flen - (thumb_offset = 0xa39800); thumb_height = 480; thumb_width = 640; } else if (OlyID == OlyID_TG_4) { width -= 16; } else if ((OlyID == OlyID_TG_5) || (OlyID == OlyID_TG_6)) { width -= 26; } } } else if (makeIs(LIBRAW_CAMERAMAKER_RoverShot) && (fsize == 6291456)) { // RoverShot 3320AF fseek(ifp, 0x300000, SEEK_SET); if ((order = guess_byte_order(0x10000)) == 0x4d4d) { height -= (top_margin = 16); width -= (left_margin = 28); maximum = 0xf5c0; strcpy(make, "ISG"); model[0] = 0; } } else if (makeIs(LIBRAW_CAMERAMAKER_Fujifilm)) { if (!strcmp(model, "S2Pro")) { height = 2144; width = 2880; flip = 6; } else if (load_raw != &LibRaw::packed_load_raw && strncmp(model, "X-", 2) && filters >= 1000) // Bayer and not an X-model maximum = (is_raw == 2 && shot_select) ? 0x2f00 : 0x3e00; if (FujiCropMode == 1) { // FF crop on GFX width = raw_width; height = raw_height; } else if (FujiCropMode == 4) { /* electronic shutter, high speed mode (1.25x crop) */ height = raw_height; } top_margin = (raw_height >= height) ? (raw_height - height) >> 2 << 1 : 0; left_margin = (raw_width >= width) ? (raw_width - width) >> 2 << 1 : 0; if (!strcmp(model, "X-T3") || !strcmp(model, "X-T4") || !strcmp(model, "X100V") || !strcmp(model, "X-T30") || !strcmp(model, "X-Pro3")) { top_margin = 0; if (FujiCropMode == 0) { top_margin = 6; height = 4170; left_margin = 0; width = 6246; } else if (FujiCropMode == 4) { /* electronic shutter, high speed mode (1.25x crop) */ left_margin = 624; width = 5004; } } if (width == 2848 || // Fujifilm X-S1, X10, XF1 width == 3664) // Fujifilm "HS10 HS11" filters = 0x16161616; if (width == 4032 || // Fujifilm X20, X30, XQ1, XQ2 width == 4952) // Fujifilm X-A1, X-A2, X-E1, X-M1, X-Pro1 left_margin = 0; if (width == 3328 && (width -= 66)) // Fujifilm F550EXR, F600EXR, F770EXR, F800EXR, F900EXR, // HS20EXR, HS30EXR, HS33EXR, HS50EXR left_margin = 34; if (width == 4936) // Fujifilm X-E2S, X-E2, X-T10, X-T1, X100S, X100T, X70 left_margin = 4; if (width == 6032) // Fujifilm X100F, X-T2, X-T20, X-Pro2, X-H1, X-E3 left_margin = 0; if (!strcmp(normalized_model, "DBP for GX680")) { /* 7712 2752 -> 5504 3856 */ /* width = 688; height = 30848; raw_width = 688; raw_height = 30848; */ raw_width = 5504; raw_height = 3856; left_margin = 32; top_margin = 8; width = raw_width - left_margin - 32; height = raw_height - top_margin - 8; load_raw = &LibRaw::unpacked_load_raw_FujiDBP; // maximum = 0x0fff; filters = 0x16161616; load_flags = 0; flip = 6; } if (!strcmp(model, "HS50EXR") || !strcmp(model, "F900EXR")) { width += 2; left_margin = 0; filters = 0x16161616; } if (!strncmp(model, "GFX 50", 6)) { left_margin = 0; top_margin = 0; } if (!strncmp(model, "GFX 100", 7)) { left_margin = 0; width = raw_width - 146; height = raw_height - (top_margin = 2); if (tiff_bps == 16) maximum = 0xffff; } if (!strcmp(normalized_model, "S5100")) { height -= (top_margin = 6); } if (fuji_layout) raw_width *= is_raw; if (filters == 9) FORC(36) ((char *)xtrans)[c] = xtrans_abs[(c / 6 + top_margin) % 6][(c + left_margin) % 6]; } else if (makeIs(LIBRAW_CAMERAMAKER_Konica)) { if (!strcmp(model, "KD-400Z")) { height = 1712; width = 2312; raw_width = 2336; goto konica_400z; } else if (!strcmp(model, "KD-510Z")) { goto konica_510z; } } else if (makeIs(LIBRAW_CAMERAMAKER_Minolta)) { if (fsize == 5869568) { // hack Minolta "DiMAGE Z2" load_flags = 30; } if (!load_raw && (maximum = 0xfff)) { load_raw = &LibRaw::unpacked_load_raw; } if (!strncmp(model, "DiMAGE A", 8)) // Minolta "DiMAGE A1", "DiMAGE A2", "DiMAGE A200" { if (!strcmp(model, "DiMAGE A200")) filters = 0x49494949; tiff_bps = 12; load_raw = &LibRaw::packed_load_raw; } else if (!strncmp(normalized_model, "DG-", 3)) { load_raw = &LibRaw::packed_load_raw; } else if (!strncmp(model, "DiMAGE G", 8)) // hack Minolta "DiMAGE G400", "DiMAGE G500", // "DiMAGE G530", "DiMAGE G600" { if (model[8] == '4') // DiMAGE G400 { height = 1716; width = 2304; } else if (model[8] == '5') // DiMAGE G500 / G530 { konica_510z: height = 1956; width = 2607; raw_width = 2624; } else if (model[8] == '6') // DiMAGE G600 { height = 2136; width = 2848; } data_offset += 14; filters = 0x61616161; konica_400z: load_raw = &LibRaw::unpacked_load_raw; maximum = 0x3df; order = 0x4d4d; } } else if (makeIs(LIBRAW_CAMERAMAKER_Samsung)) { if (raw_width == 4704) // Samsung NX100, NX10, NX11, { height -= top_margin = 8; width -= 2 * (left_margin = 8); load_flags = 32; } else if (!strcmp(model, "NX3000")) // Samsung NX3000; raw_width: 5600 { top_margin = 38; left_margin = 92; width = 5456; height = 3634; filters = 0x61616161; colors = 3; } else if (raw_height == 3714) // Samsung NX2000, NX300M, NX300, NX30, EK-GN120 { height -= top_margin = 18; left_margin = raw_width - (width = 5536); if (raw_width != 5600) left_margin = top_margin = 0; filters = 0x61616161; colors = 3; } else if (raw_width == 5632) // Samsung NX1000, NX200, NX20, NX210 { order = 0x4949; height = 3694; top_margin = 2; width = 5574 - (left_margin = 32 + tiff_bps); if (tiff_bps == 12) load_flags = 80; } else if (raw_width == 5664) // Samsung "NX mini" { height -= top_margin = 17; left_margin = 96; width = 5544; filters = 0x49494949; } else if (raw_width == 6496) // Samsung NX1, NX500 { filters = 0x61616161; if (!black && !cblack[0] && !cblack[1] && !cblack[2] && !cblack[3]) black = 1 << (tiff_bps - 7); } else if (!strcmp(model, "EX1")) // Samsung EX1; raw_width: 3688 { order = 0x4949; height -= 20; top_margin = 2; if ((width -= 6) > 3682) { height -= 10; width -= 46; top_margin = 8; } } else if (!strcmp(model, "WB2000")) // Samsung WB2000; raw_width: 3728 { order = 0x4949; height -= 3; top_margin = 2; if ((width -= 10) > 3718) { height -= 28; width -= 56; top_margin = 8; } } else if (!strcmp(model, "WB550")) // Samsung WB550; raw_width: 4000 { order = 0x4949; } else if (!strcmp(model, "EX2F")) // Samsung EX2F; raw_width: 4176 { height = 3030; width = 4040; top_margin = 15; left_margin = 24; order = 0x4949; filters = 0x49494949; load_raw = &LibRaw::unpacked_load_raw; } } else if (makeIs(LIBRAW_CAMERAMAKER_ST_Micro) && !strcmp(model, "STV680 VGA")) { black = 16; } else if (!strcmp(model, "N95")) { height = raw_height - (top_margin = 2); } else if (!strcmp(model, "640x480")) { gamma_curve(0.45, 4.5, 1, 255); } else if (makeIs(LIBRAW_CAMERAMAKER_Hasselblad)) { if (load_raw == &LibRaw::lossless_jpeg_load_raw) load_raw = &LibRaw::hasselblad_load_raw; if ((imHassy.SensorCode == 4) && !strncmp(model, "V96C", 4)) { // Hasselblad V96C strcpy(model, "V96C"); strcpy(normalized_model, model); height -= (top_margin = 6); width -= (left_margin = 3) + 7; filters = 0x61616161; } else if ((imHassy.SensorCode == 9) && imHassy.uncropped) { // various Hasselblad '-39' height = 5444; width = 7248; top_margin = 4; left_margin = 7; filters = 0x61616161; } else if ((imHassy.SensorCode == 13) && imHassy.uncropped) { // Hasselblad H4D-40, H5D-40 height -= 84; width -= 82; top_margin = 4; left_margin = 41; filters = 0x61616161; } else if ((imHassy.SensorCode == 11) && imHassy.uncropped) { // Hasselblad H5D-50 height -= 84; width -= 82; top_margin = 4; left_margin = 41; filters = 0x61616161; } else if ((imHassy.SensorCode == 15) && !imHassy.SensorSubCode && // Hasselblad H5D-50c imHassy.uncropped) { left_margin = 52; top_margin = 100; width = 8272; height = 6200; black = 256; } else if ((imHassy.SensorCode == 15) && (imHassy.SensorSubCode == 2) && // various Hasselblad X1D cameras imHassy.uncropped) { top_margin = 96; height -= 96; left_margin = 48; width -= 106; maximum = 0xffff; tiff_bps = 16; } else if ((imHassy.SensorCode == 12) && imHassy.uncropped) { // Hasselblad H4D-60 if (black > 500) { // (imHassy.format == LIBRAW_HF_FFF) top_margin = 12; left_margin = 44; width = 8956; height = 6708; memset(cblack, 0, sizeof(cblack)); black = 512; } else { // (imHassy.format == LIBRAW_HF_3FR) top_margin = 8; left_margin = 40; width = 8964; height = 6716; black += load_flags = 256; maximum = 0x8101; } } else if ((imHassy.SensorCode == 17) && imHassy.uncropped) { // Hasselblad H6D-100c, A6D-100c left_margin = 64; width = 11608; top_margin = 108; height = raw_height - top_margin; } if (tiff_samples > 1) { is_raw = tiff_samples + 1; if (!shot_select && !half_size) filters = 0; } } else if (makeIs(LIBRAW_CAMERAMAKER_Sinar)) { if (!load_raw) load_raw = &LibRaw::unpacked_load_raw; if (is_raw > 1 && !shot_select) filters = 0; maximum = 0x3fff; } if (load_raw == &LibRaw::sinar_4shot_load_raw) { if (is_raw > 1 && !shot_select) filters = 0; } else if (makeIs(LIBRAW_CAMERAMAKER_Leaf)) { maximum = 0x3fff; fseek(ifp, data_offset, SEEK_SET); if (ljpeg_start(&jh, 1) && jh.bits == 15) maximum = 0x1fff; if (tiff_samples > 1) filters = 0; if (tiff_samples > 1 || tile_length < raw_height) { load_raw = &LibRaw::leaf_hdr_load_raw; raw_width = tile_width; } if ((width | height) == 2048) { if (tiff_samples == 1) { filters = 1; strcpy(cdesc, "RBTG"); strcpy(model, "CatchLight"); strcpy(normalized_model, model); top_margin = 8; left_margin = 18; height = 2032; width = 2016; } else { strcpy(model, "DCB2"); strcpy(normalized_model, model); top_margin = 10; left_margin = 16; height = 2028; width = 2022; } } else if (width + height == 3144 + 2060) { if (!model[0]) { strcpy(model, "Cantare"); strcpy(normalized_model, model); } if (width > height) { top_margin = 6; left_margin = 32; height = 2048; width = 3072; filters = 0x61616161; } else { left_margin = 6; top_margin = 32; width = 2048; height = 3072; filters = 0x16161616; } if (!cam_mul[0] || model[0] == 'V') filters = 0; else is_raw = tiff_samples; } else if (width == 2116) // Leaf "Valeo 6" { strcpy(model, "Valeo 6"); strcpy(normalized_model, model); height -= 2 * (top_margin = 30); width -= 2 * (left_margin = 55); filters = 0x49494949; } else if (width == 3171) // Leaf "Valeo 6" { strcpy(model, "Valeo 6"); strcpy(normalized_model, model); height -= 2 * (top_margin = 24); width -= 2 * (left_margin = 24); filters = 0x16161616; } } else if (makeIs(LIBRAW_CAMERAMAKER_Panasonic)) { if (raw_width > 0 && ((flen - data_offset) / (raw_width * 8 / 7) == raw_height)) load_raw = &LibRaw::panasonic_load_raw; if (!load_raw) { load_raw = &LibRaw::unpacked_load_raw; load_flags = 4; } zero_is_bad = 1; if ((height += 12) > raw_height) height = raw_height; for (i = 0; i < int(sizeof pana / sizeof *pana); i++) if (raw_width == pana[i][0] && raw_height == pana[i][1]) { left_margin = pana[i][2]; top_margin = pana[i][3]; width += pana[i][4]; height += pana[i][5]; } if (!tiff_bps && pana_bpp >= 12 && pana_bpp <= 14) tiff_bps = pana_bpp; filters = 0x01010101U * (uchar) "\x94\x61\x49\x16"[((filters - 1) ^ (left_margin & 1) ^ (top_margin << 1)) & 3]; } else if (makeIs(LIBRAW_CAMERAMAKER_Contax) && !strcmp(model, "N Digital")) { height = 2047; width = 3072; filters = 0x61616161; data_offset = 0x1a00; load_raw = &LibRaw::packed_load_raw; } else if (makeIs(LIBRAW_CAMERAMAKER_Sony)) { if (!strcmp(model, "DSC-F828")) { // Sony DSC-F828 width = 3288; left_margin = 5; mask[1][3] = -17; data_offset = 862144; load_raw = &LibRaw::sony_load_raw; filters = 0x9c9c9c9c; colors = 4; strcpy(cdesc, "RGBE"); } else if (!strcmp(model, "DSC-V3")) { // Sony DSC-V3 width = 3109; left_margin = 59; mask[0][1] = 9; data_offset = 787392; load_raw = &LibRaw::sony_load_raw; } else if (raw_width == 3984) { // Sony DSC-R1; width = 3925; order = 0x4d4d; } else if (raw_width == 4288) { // Sony ILCE-7S, ILCE-7SM2, DSLR-A700, DSLR-A500; width -= 32; } else if (raw_width == 4600) { // Sony DSLR-A290, DSLR-A350, DSLR-A380; if (!strcmp(model, "DSLR-A350")) height -= 4; black = 0; } else if (raw_width == 4928) { // Sony DSLR-A580, NEX-C3, SLT-A35, DSC-HX99, SLT-A55, // NEX-5N, SLT-A37, SLT-A57, NEX-F3, NEX-6, NEX-5R, NEX-3N, NEX-5T; if (height < 3280) width -= 8; } else if (raw_width == 5504) { // Sony ILCE-3000, SLT-A58, DSC-RX100M3, ILCE-QX1, // DSC-RX10M4, DSC-RX100M6, DSC-RX100, DSC-RX100M2, DSC-RX10, // ILCE-5000, DSC-RX100M4, DSC-RX10M2, DSC-RX10M3, // DSC-RX100M5, DSC-RX100M5A; width -= height > 3664 ? 8 : 32; } else if (raw_width == 6048) { // Sony SLT-A65, DSC-RX1, SLT-A77, DSC-RX1, ILCA-77M2, // ILCE-7M3, NEX-7, SLT-A99, ILCE-7, DSC-RX1R, ILCE-6000, // ILCE-5100, ILCE-7M2, ILCA-68, ILCE-6300, ILCE-9, // ILCE-6500, ILCE-6400; width -= 24; if (strstr(normalized_model, "RX1") || strstr(normalized_model, "A99")) width -= 6; } else if (raw_width == 7392) { // Sony ILCE-7R; width -= 30; } else if (raw_width == 8000) { // Sony ILCE-7RM2, ILCE-7RM2, ILCE-7RM3, DSC-RX1RM2, ILCA-99M2; width -= 32; } else if (raw_width == 9600) { // Sony ILCE-7RM4 width -= 32; } else if (!strcmp(model, "DSLR-A100")) { if (width == 3880) { height--; width = ++raw_width; } else { height -= 4; width -= 4; order = 0x4d4d; load_flags = 2; } filters = 0x61616161; } } else if (!strcmp(model, "PIXL")) { height -= top_margin = 4; width -= left_margin = 32; gamma_curve(0, 7, 1, 255); } else if (makeIs(LIBRAW_CAMERAMAKER_Kodak)) { if (!strncasecmp(model, "EasyShare", 9)) { data_offset = data_offset < 0x15000 ? 0x15000 : 0x17000; load_raw = &LibRaw::packed_load_raw; } else if (!strcmp(model, "C603") || !strcmp(model, "C330") || !strcmp(model, "12MP")) { order = 0x4949; if (filters && data_offset) { fseek(ifp, data_offset < 4096 ? 168 : 5252, SEEK_SET); read_shorts(curve, 256); } else gamma_curve(0, 3.875, 1, 255); load_raw = filters ? &LibRaw::eight_bit_load_raw : strcmp(model, "C330") ? &LibRaw::kodak_c603_load_raw : &LibRaw::kodak_c330_load_raw; load_flags = tiff_bps > 16; tiff_bps = 8; } else { if (!strncmp(model, "NC2000", 6) || !strncmp(model, "EOSDCS", 6) || !strncmp(model, "DCS4", 4)) { width -= 4; left_margin = 2; } else if (!strcmp(model, "DCS660M")) { black = 214; } else if (!strcmp(model, "EOS D2000C")) { filters = 0x61616161; if (!black) black = curve[200]; } if (filters == UINT_MAX) filters = 0x61616161; if (!strcmp(model + 4, "20X")) strcpy(cdesc, "MYCY"); if (!strcmp(model, "DC25")) { data_offset = 15424; } if (!strncmp(model, "DC2", 3)) { raw_height = 2 + (height = 242); if (!strncmp(model, "DC290", 5)) iso_speed = 100; if (!strncmp(model, "DC280", 5)) iso_speed = 70; if (flen < 100000) { raw_width = 256; width = 249; pixel_aspect = (4.0 * height) / (3.0 * width); } else { raw_width = 512; width = 501; pixel_aspect = (493.0 * height) / (373.0 * width); } top_margin = left_margin = 1; colors = 4; filters = 0x8d8d8d8d; simple_coeff(1); pre_mul[1] = 1.179; pre_mul[2] = 1.209; pre_mul[3] = 1.036; load_raw = &LibRaw::eight_bit_load_raw; } else if (!strcmp(model, "DC40")) { height = 512; width = 768; data_offset = 1152; load_raw = &LibRaw::kodak_radc_load_raw; tiff_bps = 12; FORC4 cam_mul[c] = 1.0f; } else if (!strcmp(model, "DC50")) { height = 512; width = 768; iso_speed = 84; data_offset = 19712; load_raw = &LibRaw::kodak_radc_load_raw; FORC4 cam_mul[c] = 1.0f; } else if (!strcmp(model, "DC120")) { raw_height = height = 976; raw_width = width = 848; iso_speed = 160; pixel_aspect = height / 0.75 / width; load_raw = tiff_compress == 7 ? &LibRaw::kodak_jpeg_load_raw : &LibRaw::kodak_dc120_load_raw; } else if (!strcmp(model, "DCS200")) { thumb_height = 128; thumb_width = 192; thumb_offset = 6144; thumb_misc = 360; iso_speed = 140; write_thumb = &LibRaw::layer_thumb; black = 17; } } } else if (makeIs(LIBRAW_CAMERAMAKER_Logitech) && !strcmp(model, "Fotoman Pixtura")) { height = 512; width = 768; data_offset = 3632; load_raw = &LibRaw::kodak_radc_load_raw; filters = 0x61616161; simple_coeff(2); } else if (makeIs(LIBRAW_CAMERAMAKER_Apple) && !strncmp(model, "QuickTake", 9)) { if (head[5]) strcpy(model + 10, "200"); fseek(ifp, 544, SEEK_SET); height = get2(); width = get2(); data_offset = (get4(), get2()) == 30 ? 738 : 736; if (height > width) { SWAP(height, width); fseek(ifp, data_offset - 6, SEEK_SET); flip = ~get2() & 3 ? 5 : 6; } filters = 0x61616161; } else if (makeIs(LIBRAW_CAMERAMAKER_Rollei) && !load_raw) { switch (raw_width) { case 1316: // Rollei d530flex height = 1030; width = 1300; top_margin = 1; left_margin = 6; break; case 2568: height = 1960; width = 2560; top_margin = 2; left_margin = 8; } filters = 0x16161616; load_raw = &LibRaw::rollei_load_raw; } else if (!strcmp(model, "GRAS-50S5C")) { height = 2048; width = 2440; load_raw = &LibRaw::unpacked_load_raw; data_offset = 0; filters = 0x49494949; order = 0x4949; maximum = 0xfffC; } else if (!strcmp(model, "BB-500CL")) { height = 2058; width = 2448; load_raw = &LibRaw::unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x3fff; } else if (!strcmp(model, "BB-500GE")) { height = 2058; width = 2456; load_raw = &LibRaw::unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x3fff; } else if (!strcmp(model, "SVS625CL")) { height = 2050; width = 2448; load_raw = &LibRaw::unpacked_load_raw; data_offset = 0; filters = 0x94949494; order = 0x4949; maximum = 0x0fff; } }
null
207
CWE-787
CVE-2020-25021
/* * Copyright (C) 2016 Southern Storm Software, Pty Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ package com.southernstorm.noise.protocol; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.util.Arrays; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.IllegalBlockSizeException; import javax.crypto.NoSuchPaddingException; import javax.crypto.ShortBufferException; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; import com.southernstorm.noise.crypto.GHASH; /** * Emulates the "AESGCM" cipher for Noise using the "AES/CTR/NoPadding" * transformation from JCA/JCE. * * This class is used on platforms that don't have "AES/GCM/NoPadding", * but which do have the older "AES/CTR/NoPadding". */ class AESGCMOnCtrCipherState implements CipherState { private Cipher cipher; private SecretKeySpec keySpec; private long n; private byte[] iv; private byte[] hashKey; private GHASH ghash; /** * Constructs a new cipher state for the "AESGCM" algorithm. * * @throws NoSuchAlgorithmException The system does not have a * provider for this algorithm. */ public AESGCMOnCtrCipherState() throws NoSuchAlgorithmException { try { cipher = Cipher.getInstance("AES/CTR/NoPadding"); } catch (NoSuchPaddingException e) { // AES/CTR is available, but not the unpadded version? Huh? throw new NoSuchAlgorithmException("AES/CTR/NoPadding not available", e); } keySpec = null; n = 0; iv = new byte [16]; hashKey = new byte [16]; ghash = new GHASH(); // Try to set a 256-bit key on the cipher. Some JCE's are // configured to disallow 256-bit AES if an extra policy // file has not been installed. try { SecretKeySpec spec = new SecretKeySpec(new byte [32], "AES"); IvParameterSpec params = new IvParameterSpec(iv); cipher.init(Cipher.ENCRYPT_MODE, spec, params); } catch (InvalidKeyException e) { throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e); } catch (InvalidAlgorithmParameterException e) { throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e); } } @Override public void destroy() { // There doesn't seem to be a standard API to clean out a Cipher. // So we instead set the key and IV to all-zeroes to hopefully // destroy the sensitive data in the cipher instance. ghash.destroy(); Noise.destroy(hashKey); Noise.destroy(iv); keySpec = new SecretKeySpec(new byte [32], "AES"); IvParameterSpec params = new IvParameterSpec(iv); try { cipher.init(Cipher.ENCRYPT_MODE, keySpec, params); } catch (InvalidKeyException e) { // Shouldn't happen. } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. } } @Override public String getCipherName() { return "AESGCM"; } @Override public int getKeyLength() { return 32; } @Override public int getMACLength() { return keySpec != null ? 16 : 0; } @Override public void initializeKey(byte[] key, int offset) { // Set the encryption key. keySpec = new SecretKeySpec(key, offset, 32, "AES"); // Generate the hashing key by encrypting a block of zeroes. Arrays.fill(iv, (byte)0); Arrays.fill(hashKey, (byte)0); try { cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv)); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } try { int result = cipher.update(hashKey, 0, 16, hashKey, 0); cipher.doFinal(hashKey, result); } catch (ShortBufferException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.reset(hashKey, 0); // Reset the nonce. n = 0; } @Override public boolean hasKey() { return keySpec != null; } /** * Set up to encrypt or decrypt the next packet. * * @param ad The associated data for the packet. */ private void setup(byte[] ad) throws InvalidKeyException, InvalidAlgorithmParameterException { // Check for nonce wrap-around. if (n == -1L) throw new IllegalStateException("Nonce has wrapped around"); // Format the counter/IV block for AES/CTR/NoPadding. iv[0] = 0; iv[1] = 0; iv[2] = 0; iv[3] = 0; iv[4] = (byte)(n >> 56); iv[5] = (byte)(n >> 48); iv[6] = (byte)(n >> 40); iv[7] = (byte)(n >> 32); iv[8] = (byte)(n >> 24); iv[9] = (byte)(n >> 16); iv[10] = (byte)(n >> 8); iv[11] = (byte)n; iv[12] = 0; iv[13] = 0; iv[14] = 0; iv[15] = 1; ++n; // Initialize the CTR mode cipher with the key and IV. cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv)); // Encrypt a block of zeroes to generate the hash key to XOR // the GHASH tag with at the end of the encrypt/decrypt operation. Arrays.fill(hashKey, (byte)0); try { cipher.update(hashKey, 0, 16, hashKey, 0); } catch (ShortBufferException e) { // Shouldn't happen. throw new IllegalStateException(e); } // Initialize the GHASH with the associated data value. ghash.reset(); if (ad != null) { ghash.update(ad, 0, ad.length); ghash.pad(); } } @Override public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset, byte[] ciphertext, int ciphertextOffset, int length) throws ShortBufferException { int space; if (ciphertextOffset > ciphertext.length) space = 0; else space = ciphertext.length - ciphertextOffset; if (keySpec == null) { // The key is not set yet - return the plaintext as-is. if (length > space) throw new ShortBufferException(); if (plaintext != ciphertext || plaintextOffset != ciphertextOffset) System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length); return length; } if (space < 16 || length > (space - 16)) throw new ShortBufferException(); try { setup(ad); int result = cipher.update(plaintext, plaintextOffset, length, ciphertext, ciphertextOffset); cipher.doFinal(ciphertext, ciphertextOffset + result); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.update(ciphertext, ciphertextOffset, length); ghash.pad(ad != null ? ad.length : 0, length); ghash.finish(ciphertext, ciphertextOffset + length, 16); for (int index = 0; index < 16; ++index) ciphertext[ciphertextOffset + length + index] ^= hashKey[index]; return length + 16; } @Override public int decryptWithAd(byte[] ad, byte[] ciphertext, int ciphertextOffset, byte[] plaintext, int plaintextOffset, int length) throws ShortBufferException, BadPaddingException { int space; if (ciphertextOffset > ciphertext.length) space = 0; else space = ciphertext.length - ciphertextOffset; if (length > space) throw new ShortBufferException(); if (plaintextOffset > plaintext.length) space = 0; else space = plaintext.length - plaintextOffset; if (keySpec == null) { // The key is not set yet - return the ciphertext as-is. if (length > space) throw new ShortBufferException(); if (plaintext != ciphertext || plaintextOffset != ciphertextOffset) System.arraycopy(ciphertext, ciphertextOffset, plaintext, plaintextOffset, length); return length; } if (length < 16) Noise.throwBadTagException(); int dataLen = length - 16; if (dataLen > space) throw new ShortBufferException(); try { setup(ad); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.update(ciphertext, ciphertextOffset, dataLen); ghash.pad(ad != null ? ad.length : 0, dataLen); ghash.finish(iv, 0, 16); int temp = 0; for (int index = 0; index < 16; ++index) temp |= (hashKey[index] ^ iv[index] ^ ciphertext[ciphertextOffset + dataLen + index]); if ((temp & 0xFF) != 0) Noise.throwBadTagException(); try { int result = cipher.update(ciphertext, ciphertextOffset, dataLen, plaintext, plaintextOffset); cipher.doFinal(plaintext, plaintextOffset + result); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } return dataLen; } @Override public CipherState fork(byte[] key, int offset) { CipherState cipher; try { cipher = new AESGCMOnCtrCipherState(); } catch (NoSuchAlgorithmException e) { // Shouldn't happen. return null; } cipher.initializeKey(key, offset); return cipher; } @Override public void setNonce(long nonce) { n = nonce; } }
null
/* * Copyright (C) 2016 Southern Storm Software, Pty Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ package com.southernstorm.noise.protocol; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.util.Arrays; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.IllegalBlockSizeException; import javax.crypto.NoSuchPaddingException; import javax.crypto.ShortBufferException; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; import com.southernstorm.noise.crypto.GHASH; /** * Emulates the "AESGCM" cipher for Noise using the "AES/CTR/NoPadding" * transformation from JCA/JCE. * * This class is used on platforms that don't have "AES/GCM/NoPadding", * but which do have the older "AES/CTR/NoPadding". */ class AESGCMOnCtrCipherState implements CipherState { private Cipher cipher; private SecretKeySpec keySpec; private long n; private byte[] iv; private byte[] hashKey; private GHASH ghash; /** * Constructs a new cipher state for the "AESGCM" algorithm. * * @throws NoSuchAlgorithmException The system does not have a * provider for this algorithm. */ public AESGCMOnCtrCipherState() throws NoSuchAlgorithmException { try { cipher = Cipher.getInstance("AES/CTR/NoPadding"); } catch (NoSuchPaddingException e) { // AES/CTR is available, but not the unpadded version? Huh? throw new NoSuchAlgorithmException("AES/CTR/NoPadding not available", e); } keySpec = null; n = 0; iv = new byte [16]; hashKey = new byte [16]; ghash = new GHASH(); // Try to set a 256-bit key on the cipher. Some JCE's are // configured to disallow 256-bit AES if an extra policy // file has not been installed. try { SecretKeySpec spec = new SecretKeySpec(new byte [32], "AES"); IvParameterSpec params = new IvParameterSpec(iv); cipher.init(Cipher.ENCRYPT_MODE, spec, params); } catch (InvalidKeyException e) { throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e); } catch (InvalidAlgorithmParameterException e) { throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e); } } @Override public void destroy() { // There doesn't seem to be a standard API to clean out a Cipher. // So we instead set the key and IV to all-zeroes to hopefully // destroy the sensitive data in the cipher instance. ghash.destroy(); Noise.destroy(hashKey); Noise.destroy(iv); keySpec = new SecretKeySpec(new byte [32], "AES"); IvParameterSpec params = new IvParameterSpec(iv); try { cipher.init(Cipher.ENCRYPT_MODE, keySpec, params); } catch (InvalidKeyException e) { // Shouldn't happen. } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. } } @Override public String getCipherName() { return "AESGCM"; } @Override public int getKeyLength() { return 32; } @Override public int getMACLength() { return keySpec != null ? 16 : 0; } @Override public void initializeKey(byte[] key, int offset) { // Set the encryption key. keySpec = new SecretKeySpec(key, offset, 32, "AES"); // Generate the hashing key by encrypting a block of zeroes. Arrays.fill(iv, (byte)0); Arrays.fill(hashKey, (byte)0); try { cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv)); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } try { int result = cipher.update(hashKey, 0, 16, hashKey, 0); cipher.doFinal(hashKey, result); } catch (ShortBufferException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.reset(hashKey, 0); // Reset the nonce. n = 0; } @Override public boolean hasKey() { return keySpec != null; } /** * Set up to encrypt or decrypt the next packet. * * @param ad The associated data for the packet. */ private void setup(byte[] ad) throws InvalidKeyException, InvalidAlgorithmParameterException { // Check for nonce wrap-around. if (n == -1L) throw new IllegalStateException("Nonce has wrapped around"); // Format the counter/IV block for AES/CTR/NoPadding. iv[0] = 0; iv[1] = 0; iv[2] = 0; iv[3] = 0; iv[4] = (byte)(n >> 56); iv[5] = (byte)(n >> 48); iv[6] = (byte)(n >> 40); iv[7] = (byte)(n >> 32); iv[8] = (byte)(n >> 24); iv[9] = (byte)(n >> 16); iv[10] = (byte)(n >> 8); iv[11] = (byte)n; iv[12] = 0; iv[13] = 0; iv[14] = 0; iv[15] = 1; ++n; // Initialize the CTR mode cipher with the key and IV. cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv)); // Encrypt a block of zeroes to generate the hash key to XOR // the GHASH tag with at the end of the encrypt/decrypt operation. Arrays.fill(hashKey, (byte)0); try { cipher.update(hashKey, 0, 16, hashKey, 0); } catch (ShortBufferException e) { // Shouldn't happen. throw new IllegalStateException(e); } // Initialize the GHASH with the associated data value. ghash.reset(); if (ad != null) { ghash.update(ad, 0, ad.length); ghash.pad(); } } @Override public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset, byte[] ciphertext, int ciphertextOffset, int length) throws ShortBufferException { int space; if (ciphertextOffset < 0 || ciphertextOffset > ciphertext.length) throw new IllegalArgumentException(); if (length < 0 || plaintextOffset < 0 || plaintextOffset > plaintext.length) throw new IllegalArgumentException(); space = ciphertext.length - ciphertextOffset; if (keySpec == null) { // The key is not set yet - return the plaintext as-is. if (length > space) throw new ShortBufferException(); if (plaintext != ciphertext || plaintextOffset != ciphertextOffset) System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length); return length; } if (space < 16 || length > (space - 16)) throw new ShortBufferException(); try { setup(ad); int result = cipher.update(plaintext, plaintextOffset, length, ciphertext, ciphertextOffset); cipher.doFinal(ciphertext, ciphertextOffset + result); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.update(ciphertext, ciphertextOffset, length); ghash.pad(ad != null ? ad.length : 0, length); ghash.finish(ciphertext, ciphertextOffset + length, 16); for (int index = 0; index < 16; ++index) ciphertext[ciphertextOffset + length + index] ^= hashKey[index]; return length + 16; } @Override public int decryptWithAd(byte[] ad, byte[] ciphertext, int ciphertextOffset, byte[] plaintext, int plaintextOffset, int length) throws ShortBufferException, BadPaddingException { int space; if (ciphertextOffset < 0 || ciphertextOffset > ciphertext.length) throw new IllegalArgumentException(); else space = ciphertext.length - ciphertextOffset; if (length > space) throw new ShortBufferException(); if (length < 0 || plaintextOffset < 0 || plaintextOffset > plaintext.length) throw new IllegalArgumentException(); space = plaintext.length - plaintextOffset; if (keySpec == null) { // The key is not set yet - return the ciphertext as-is. if (length > space) throw new ShortBufferException(); if (plaintext != ciphertext || plaintextOffset != ciphertextOffset) System.arraycopy(ciphertext, ciphertextOffset, plaintext, plaintextOffset, length); return length; } if (length < 16) Noise.throwBadTagException(); int dataLen = length - 16; if (dataLen > space) throw new ShortBufferException(); try { setup(ad); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.update(ciphertext, ciphertextOffset, dataLen); ghash.pad(ad != null ? ad.length : 0, dataLen); ghash.finish(iv, 0, 16); int temp = 0; for (int index = 0; index < 16; ++index) temp |= (hashKey[index] ^ iv[index] ^ ciphertext[ciphertextOffset + dataLen + index]); if ((temp & 0xFF) != 0) Noise.throwBadTagException(); try { int result = cipher.update(ciphertext, ciphertextOffset, dataLen, plaintext, plaintextOffset); cipher.doFinal(plaintext, plaintextOffset + result); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } return dataLen; } @Override public CipherState fork(byte[] key, int offset) { CipherState cipher; try { cipher = new AESGCMOnCtrCipherState(); } catch (NoSuchAlgorithmException e) { // Shouldn't happen. return null; } cipher.initializeKey(key, offset); return cipher; } @Override public void setNonce(long nonce) { n = nonce; } }
null
208
CWE-787
CVE-2020-25022
/* * Copyright (C) 2016 Southern Storm Software, Pty Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ package com.southernstorm.noise.protocol; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.util.Arrays; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.IllegalBlockSizeException; import javax.crypto.NoSuchPaddingException; import javax.crypto.ShortBufferException; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; import com.southernstorm.noise.crypto.GHASH; /** * Emulates the "AESGCM" cipher for Noise using the "AES/CTR/NoPadding" * transformation from JCA/JCE. * * This class is used on platforms that don't have "AES/GCM/NoPadding", * but which do have the older "AES/CTR/NoPadding". */ class AESGCMOnCtrCipherState implements CipherState { private Cipher cipher; private SecretKeySpec keySpec; private long n; private byte[] iv; private byte[] hashKey; private GHASH ghash; /** * Constructs a new cipher state for the "AESGCM" algorithm. * * @throws NoSuchAlgorithmException The system does not have a * provider for this algorithm. */ public AESGCMOnCtrCipherState() throws NoSuchAlgorithmException { try { cipher = Cipher.getInstance("AES/CTR/NoPadding"); } catch (NoSuchPaddingException e) { // AES/CTR is available, but not the unpadded version? Huh? throw new NoSuchAlgorithmException("AES/CTR/NoPadding not available", e); } keySpec = null; n = 0; iv = new byte [16]; hashKey = new byte [16]; ghash = new GHASH(); // Try to set a 256-bit key on the cipher. Some JCE's are // configured to disallow 256-bit AES if an extra policy // file has not been installed. try { SecretKeySpec spec = new SecretKeySpec(new byte [32], "AES"); IvParameterSpec params = new IvParameterSpec(iv); cipher.init(Cipher.ENCRYPT_MODE, spec, params); } catch (InvalidKeyException e) { throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e); } catch (InvalidAlgorithmParameterException e) { throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e); } } @Override public void destroy() { // There doesn't seem to be a standard API to clean out a Cipher. // So we instead set the key and IV to all-zeroes to hopefully // destroy the sensitive data in the cipher instance. ghash.destroy(); Noise.destroy(hashKey); Noise.destroy(iv); keySpec = new SecretKeySpec(new byte [32], "AES"); IvParameterSpec params = new IvParameterSpec(iv); try { cipher.init(Cipher.ENCRYPT_MODE, keySpec, params); } catch (InvalidKeyException e) { // Shouldn't happen. } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. } } @Override public String getCipherName() { return "AESGCM"; } @Override public int getKeyLength() { return 32; } @Override public int getMACLength() { return keySpec != null ? 16 : 0; } @Override public void initializeKey(byte[] key, int offset) { // Set the encryption key. keySpec = new SecretKeySpec(key, offset, 32, "AES"); // Generate the hashing key by encrypting a block of zeroes. Arrays.fill(iv, (byte)0); Arrays.fill(hashKey, (byte)0); try { cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv)); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } try { int result = cipher.update(hashKey, 0, 16, hashKey, 0); cipher.doFinal(hashKey, result); } catch (ShortBufferException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.reset(hashKey, 0); // Reset the nonce. n = 0; } @Override public boolean hasKey() { return keySpec != null; } /** * Set up to encrypt or decrypt the next packet. * * @param ad The associated data for the packet. */ private void setup(byte[] ad) throws InvalidKeyException, InvalidAlgorithmParameterException { // Check for nonce wrap-around. if (n == -1L) throw new IllegalStateException("Nonce has wrapped around"); // Format the counter/IV block for AES/CTR/NoPadding. iv[0] = 0; iv[1] = 0; iv[2] = 0; iv[3] = 0; iv[4] = (byte)(n >> 56); iv[5] = (byte)(n >> 48); iv[6] = (byte)(n >> 40); iv[7] = (byte)(n >> 32); iv[8] = (byte)(n >> 24); iv[9] = (byte)(n >> 16); iv[10] = (byte)(n >> 8); iv[11] = (byte)n; iv[12] = 0; iv[13] = 0; iv[14] = 0; iv[15] = 1; ++n; // Initialize the CTR mode cipher with the key and IV. cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv)); // Encrypt a block of zeroes to generate the hash key to XOR // the GHASH tag with at the end of the encrypt/decrypt operation. Arrays.fill(hashKey, (byte)0); try { cipher.update(hashKey, 0, 16, hashKey, 0); } catch (ShortBufferException e) { // Shouldn't happen. throw new IllegalStateException(e); } // Initialize the GHASH with the associated data value. ghash.reset(); if (ad != null) { ghash.update(ad, 0, ad.length); ghash.pad(); } } @Override public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset, byte[] ciphertext, int ciphertextOffset, int length) throws ShortBufferException { int space; if (ciphertextOffset > ciphertext.length) space = 0; else space = ciphertext.length - ciphertextOffset; if (keySpec == null) { // The key is not set yet - return the plaintext as-is. if (length > space) throw new ShortBufferException(); if (plaintext != ciphertext || plaintextOffset != ciphertextOffset) System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length); return length; } if (space < 16 || length > (space - 16)) throw new ShortBufferException(); try { setup(ad); int result = cipher.update(plaintext, plaintextOffset, length, ciphertext, ciphertextOffset); cipher.doFinal(ciphertext, ciphertextOffset + result); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.update(ciphertext, ciphertextOffset, length); ghash.pad(ad != null ? ad.length : 0, length); ghash.finish(ciphertext, ciphertextOffset + length, 16); for (int index = 0; index < 16; ++index) ciphertext[ciphertextOffset + length + index] ^= hashKey[index]; return length + 16; } @Override public int decryptWithAd(byte[] ad, byte[] ciphertext, int ciphertextOffset, byte[] plaintext, int plaintextOffset, int length) throws ShortBufferException, BadPaddingException { int space; if (ciphertextOffset > ciphertext.length) space = 0; else space = ciphertext.length - ciphertextOffset; if (length > space) throw new ShortBufferException(); if (plaintextOffset > plaintext.length) space = 0; else space = plaintext.length - plaintextOffset; if (keySpec == null) { // The key is not set yet - return the ciphertext as-is. if (length > space) throw new ShortBufferException(); if (plaintext != ciphertext || plaintextOffset != ciphertextOffset) System.arraycopy(ciphertext, ciphertextOffset, plaintext, plaintextOffset, length); return length; } if (length < 16) Noise.throwBadTagException(); int dataLen = length - 16; if (dataLen > space) throw new ShortBufferException(); try { setup(ad); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.update(ciphertext, ciphertextOffset, dataLen); ghash.pad(ad != null ? ad.length : 0, dataLen); ghash.finish(iv, 0, 16); int temp = 0; for (int index = 0; index < 16; ++index) temp |= (hashKey[index] ^ iv[index] ^ ciphertext[ciphertextOffset + dataLen + index]); if ((temp & 0xFF) != 0) Noise.throwBadTagException(); try { int result = cipher.update(ciphertext, ciphertextOffset, dataLen, plaintext, plaintextOffset); cipher.doFinal(plaintext, plaintextOffset + result); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } return dataLen; } @Override public CipherState fork(byte[] key, int offset) { CipherState cipher; try { cipher = new AESGCMOnCtrCipherState(); } catch (NoSuchAlgorithmException e) { // Shouldn't happen. return null; } cipher.initializeKey(key, offset); return cipher; } @Override public void setNonce(long nonce) { n = nonce; } }
null
/* * Copyright (C) 2016 Southern Storm Software, Pty Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ package com.southernstorm.noise.protocol; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.util.Arrays; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.IllegalBlockSizeException; import javax.crypto.NoSuchPaddingException; import javax.crypto.ShortBufferException; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; import com.southernstorm.noise.crypto.GHASH; /** * Emulates the "AESGCM" cipher for Noise using the "AES/CTR/NoPadding" * transformation from JCA/JCE. * * This class is used on platforms that don't have "AES/GCM/NoPadding", * but which do have the older "AES/CTR/NoPadding". */ class AESGCMOnCtrCipherState implements CipherState { private Cipher cipher; private SecretKeySpec keySpec; private long n; private byte[] iv; private byte[] hashKey; private GHASH ghash; /** * Constructs a new cipher state for the "AESGCM" algorithm. * * @throws NoSuchAlgorithmException The system does not have a * provider for this algorithm. */ public AESGCMOnCtrCipherState() throws NoSuchAlgorithmException { try { cipher = Cipher.getInstance("AES/CTR/NoPadding"); } catch (NoSuchPaddingException e) { // AES/CTR is available, but not the unpadded version? Huh? throw new NoSuchAlgorithmException("AES/CTR/NoPadding not available", e); } keySpec = null; n = 0; iv = new byte [16]; hashKey = new byte [16]; ghash = new GHASH(); // Try to set a 256-bit key on the cipher. Some JCE's are // configured to disallow 256-bit AES if an extra policy // file has not been installed. try { SecretKeySpec spec = new SecretKeySpec(new byte [32], "AES"); IvParameterSpec params = new IvParameterSpec(iv); cipher.init(Cipher.ENCRYPT_MODE, spec, params); } catch (InvalidKeyException e) { throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e); } catch (InvalidAlgorithmParameterException e) { throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e); } } @Override public void destroy() { // There doesn't seem to be a standard API to clean out a Cipher. // So we instead set the key and IV to all-zeroes to hopefully // destroy the sensitive data in the cipher instance. ghash.destroy(); Noise.destroy(hashKey); Noise.destroy(iv); keySpec = new SecretKeySpec(new byte [32], "AES"); IvParameterSpec params = new IvParameterSpec(iv); try { cipher.init(Cipher.ENCRYPT_MODE, keySpec, params); } catch (InvalidKeyException e) { // Shouldn't happen. } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. } } @Override public String getCipherName() { return "AESGCM"; } @Override public int getKeyLength() { return 32; } @Override public int getMACLength() { return keySpec != null ? 16 : 0; } @Override public void initializeKey(byte[] key, int offset) { // Set the encryption key. keySpec = new SecretKeySpec(key, offset, 32, "AES"); // Generate the hashing key by encrypting a block of zeroes. Arrays.fill(iv, (byte)0); Arrays.fill(hashKey, (byte)0); try { cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv)); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } try { int result = cipher.update(hashKey, 0, 16, hashKey, 0); cipher.doFinal(hashKey, result); } catch (ShortBufferException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.reset(hashKey, 0); // Reset the nonce. n = 0; } @Override public boolean hasKey() { return keySpec != null; } /** * Set up to encrypt or decrypt the next packet. * * @param ad The associated data for the packet. */ private void setup(byte[] ad) throws InvalidKeyException, InvalidAlgorithmParameterException { // Check for nonce wrap-around. if (n == -1L) throw new IllegalStateException("Nonce has wrapped around"); // Format the counter/IV block for AES/CTR/NoPadding. iv[0] = 0; iv[1] = 0; iv[2] = 0; iv[3] = 0; iv[4] = (byte)(n >> 56); iv[5] = (byte)(n >> 48); iv[6] = (byte)(n >> 40); iv[7] = (byte)(n >> 32); iv[8] = (byte)(n >> 24); iv[9] = (byte)(n >> 16); iv[10] = (byte)(n >> 8); iv[11] = (byte)n; iv[12] = 0; iv[13] = 0; iv[14] = 0; iv[15] = 1; ++n; // Initialize the CTR mode cipher with the key and IV. cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv)); // Encrypt a block of zeroes to generate the hash key to XOR // the GHASH tag with at the end of the encrypt/decrypt operation. Arrays.fill(hashKey, (byte)0); try { cipher.update(hashKey, 0, 16, hashKey, 0); } catch (ShortBufferException e) { // Shouldn't happen. throw new IllegalStateException(e); } // Initialize the GHASH with the associated data value. ghash.reset(); if (ad != null) { ghash.update(ad, 0, ad.length); ghash.pad(); } } @Override public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset, byte[] ciphertext, int ciphertextOffset, int length) throws ShortBufferException { int space; if (ciphertextOffset < 0 || ciphertextOffset > ciphertext.length) throw new IllegalArgumentException(); if (length < 0 || plaintextOffset < 0 || plaintextOffset > plaintext.length) throw new IllegalArgumentException(); space = ciphertext.length - ciphertextOffset; if (keySpec == null) { // The key is not set yet - return the plaintext as-is. if (length > space) throw new ShortBufferException(); if (plaintext != ciphertext || plaintextOffset != ciphertextOffset) System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length); return length; } if (space < 16 || length > (space - 16)) throw new ShortBufferException(); try { setup(ad); int result = cipher.update(plaintext, plaintextOffset, length, ciphertext, ciphertextOffset); cipher.doFinal(ciphertext, ciphertextOffset + result); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.update(ciphertext, ciphertextOffset, length); ghash.pad(ad != null ? ad.length : 0, length); ghash.finish(ciphertext, ciphertextOffset + length, 16); for (int index = 0; index < 16; ++index) ciphertext[ciphertextOffset + length + index] ^= hashKey[index]; return length + 16; } @Override public int decryptWithAd(byte[] ad, byte[] ciphertext, int ciphertextOffset, byte[] plaintext, int plaintextOffset, int length) throws ShortBufferException, BadPaddingException { int space; if (ciphertextOffset < 0 || ciphertextOffset > ciphertext.length) throw new IllegalArgumentException(); else space = ciphertext.length - ciphertextOffset; if (length > space) throw new ShortBufferException(); if (length < 0 || plaintextOffset < 0 || plaintextOffset > plaintext.length) throw new IllegalArgumentException(); space = plaintext.length - plaintextOffset; if (keySpec == null) { // The key is not set yet - return the ciphertext as-is. if (length > space) throw new ShortBufferException(); if (plaintext != ciphertext || plaintextOffset != ciphertextOffset) System.arraycopy(ciphertext, ciphertextOffset, plaintext, plaintextOffset, length); return length; } if (length < 16) Noise.throwBadTagException(); int dataLen = length - 16; if (dataLen > space) throw new ShortBufferException(); try { setup(ad); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.update(ciphertext, ciphertextOffset, dataLen); ghash.pad(ad != null ? ad.length : 0, dataLen); ghash.finish(iv, 0, 16); int temp = 0; for (int index = 0; index < 16; ++index) temp |= (hashKey[index] ^ iv[index] ^ ciphertext[ciphertextOffset + dataLen + index]); if ((temp & 0xFF) != 0) Noise.throwBadTagException(); try { int result = cipher.update(ciphertext, ciphertextOffset, dataLen, plaintext, plaintextOffset); cipher.doFinal(plaintext, plaintextOffset + result); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } return dataLen; } @Override public CipherState fork(byte[] key, int offset) { CipherState cipher; try { cipher = new AESGCMOnCtrCipherState(); } catch (NoSuchAlgorithmException e) { // Shouldn't happen. return null; } cipher.initializeKey(key, offset); return cipher; } @Override public void setNonce(long nonce) { n = nonce; } }
null
209
CWE-787
CVE-2020-25023
/* * Copyright (C) 2016 Southern Storm Software, Pty Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ package com.southernstorm.noise.protocol; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.util.Arrays; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.IllegalBlockSizeException; import javax.crypto.NoSuchPaddingException; import javax.crypto.ShortBufferException; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; import com.southernstorm.noise.crypto.GHASH; /** * Emulates the "AESGCM" cipher for Noise using the "AES/CTR/NoPadding" * transformation from JCA/JCE. * * This class is used on platforms that don't have "AES/GCM/NoPadding", * but which do have the older "AES/CTR/NoPadding". */ class AESGCMOnCtrCipherState implements CipherState { private Cipher cipher; private SecretKeySpec keySpec; private long n; private byte[] iv; private byte[] hashKey; private GHASH ghash; /** * Constructs a new cipher state for the "AESGCM" algorithm. * * @throws NoSuchAlgorithmException The system does not have a * provider for this algorithm. */ public AESGCMOnCtrCipherState() throws NoSuchAlgorithmException { try { cipher = Cipher.getInstance("AES/CTR/NoPadding"); } catch (NoSuchPaddingException e) { // AES/CTR is available, but not the unpadded version? Huh? throw new NoSuchAlgorithmException("AES/CTR/NoPadding not available", e); } keySpec = null; n = 0; iv = new byte [16]; hashKey = new byte [16]; ghash = new GHASH(); // Try to set a 256-bit key on the cipher. Some JCE's are // configured to disallow 256-bit AES if an extra policy // file has not been installed. try { SecretKeySpec spec = new SecretKeySpec(new byte [32], "AES"); IvParameterSpec params = new IvParameterSpec(iv); cipher.init(Cipher.ENCRYPT_MODE, spec, params); } catch (InvalidKeyException e) { throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e); } catch (InvalidAlgorithmParameterException e) { throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e); } } @Override public void destroy() { // There doesn't seem to be a standard API to clean out a Cipher. // So we instead set the key and IV to all-zeroes to hopefully // destroy the sensitive data in the cipher instance. ghash.destroy(); Noise.destroy(hashKey); Noise.destroy(iv); keySpec = new SecretKeySpec(new byte [32], "AES"); IvParameterSpec params = new IvParameterSpec(iv); try { cipher.init(Cipher.ENCRYPT_MODE, keySpec, params); } catch (InvalidKeyException e) { // Shouldn't happen. } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. } } @Override public String getCipherName() { return "AESGCM"; } @Override public int getKeyLength() { return 32; } @Override public int getMACLength() { return keySpec != null ? 16 : 0; } @Override public void initializeKey(byte[] key, int offset) { // Set the encryption key. keySpec = new SecretKeySpec(key, offset, 32, "AES"); // Generate the hashing key by encrypting a block of zeroes. Arrays.fill(iv, (byte)0); Arrays.fill(hashKey, (byte)0); try { cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv)); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } try { int result = cipher.update(hashKey, 0, 16, hashKey, 0); cipher.doFinal(hashKey, result); } catch (ShortBufferException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.reset(hashKey, 0); // Reset the nonce. n = 0; } @Override public boolean hasKey() { return keySpec != null; } /** * Set up to encrypt or decrypt the next packet. * * @param ad The associated data for the packet. */ private void setup(byte[] ad) throws InvalidKeyException, InvalidAlgorithmParameterException { // Check for nonce wrap-around. if (n == -1L) throw new IllegalStateException("Nonce has wrapped around"); // Format the counter/IV block for AES/CTR/NoPadding. iv[0] = 0; iv[1] = 0; iv[2] = 0; iv[3] = 0; iv[4] = (byte)(n >> 56); iv[5] = (byte)(n >> 48); iv[6] = (byte)(n >> 40); iv[7] = (byte)(n >> 32); iv[8] = (byte)(n >> 24); iv[9] = (byte)(n >> 16); iv[10] = (byte)(n >> 8); iv[11] = (byte)n; iv[12] = 0; iv[13] = 0; iv[14] = 0; iv[15] = 1; ++n; // Initialize the CTR mode cipher with the key and IV. cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv)); // Encrypt a block of zeroes to generate the hash key to XOR // the GHASH tag with at the end of the encrypt/decrypt operation. Arrays.fill(hashKey, (byte)0); try { cipher.update(hashKey, 0, 16, hashKey, 0); } catch (ShortBufferException e) { // Shouldn't happen. throw new IllegalStateException(e); } // Initialize the GHASH with the associated data value. ghash.reset(); if (ad != null) { ghash.update(ad, 0, ad.length); ghash.pad(); } } @Override public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset, byte[] ciphertext, int ciphertextOffset, int length) throws ShortBufferException { int space; if (ciphertextOffset > ciphertext.length) space = 0; else space = ciphertext.length - ciphertextOffset; if (keySpec == null) { // The key is not set yet - return the plaintext as-is. if (length > space) throw new ShortBufferException(); if (plaintext != ciphertext || plaintextOffset != ciphertextOffset) System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length); return length; } if (space < 16 || length > (space - 16)) throw new ShortBufferException(); try { setup(ad); int result = cipher.update(plaintext, plaintextOffset, length, ciphertext, ciphertextOffset); cipher.doFinal(ciphertext, ciphertextOffset + result); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.update(ciphertext, ciphertextOffset, length); ghash.pad(ad != null ? ad.length : 0, length); ghash.finish(ciphertext, ciphertextOffset + length, 16); for (int index = 0; index < 16; ++index) ciphertext[ciphertextOffset + length + index] ^= hashKey[index]; return length + 16; } @Override public int decryptWithAd(byte[] ad, byte[] ciphertext, int ciphertextOffset, byte[] plaintext, int plaintextOffset, int length) throws ShortBufferException, BadPaddingException { int space; if (ciphertextOffset > ciphertext.length) space = 0; else space = ciphertext.length - ciphertextOffset; if (length > space) throw new ShortBufferException(); if (plaintextOffset > plaintext.length) space = 0; else space = plaintext.length - plaintextOffset; if (keySpec == null) { // The key is not set yet - return the ciphertext as-is. if (length > space) throw new ShortBufferException(); if (plaintext != ciphertext || plaintextOffset != ciphertextOffset) System.arraycopy(ciphertext, ciphertextOffset, plaintext, plaintextOffset, length); return length; } if (length < 16) Noise.throwBadTagException(); int dataLen = length - 16; if (dataLen > space) throw new ShortBufferException(); try { setup(ad); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.update(ciphertext, ciphertextOffset, dataLen); ghash.pad(ad != null ? ad.length : 0, dataLen); ghash.finish(iv, 0, 16); int temp = 0; for (int index = 0; index < 16; ++index) temp |= (hashKey[index] ^ iv[index] ^ ciphertext[ciphertextOffset + dataLen + index]); if ((temp & 0xFF) != 0) Noise.throwBadTagException(); try { int result = cipher.update(ciphertext, ciphertextOffset, dataLen, plaintext, plaintextOffset); cipher.doFinal(plaintext, plaintextOffset + result); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } return dataLen; } @Override public CipherState fork(byte[] key, int offset) { CipherState cipher; try { cipher = new AESGCMOnCtrCipherState(); } catch (NoSuchAlgorithmException e) { // Shouldn't happen. return null; } cipher.initializeKey(key, offset); return cipher; } @Override public void setNonce(long nonce) { n = nonce; } }
null
/* * Copyright (C) 2016 Southern Storm Software, Pty Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ package com.southernstorm.noise.protocol; import java.security.InvalidAlgorithmParameterException; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; import java.util.Arrays; import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.IllegalBlockSizeException; import javax.crypto.NoSuchPaddingException; import javax.crypto.ShortBufferException; import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; import com.southernstorm.noise.crypto.GHASH; /** * Emulates the "AESGCM" cipher for Noise using the "AES/CTR/NoPadding" * transformation from JCA/JCE. * * This class is used on platforms that don't have "AES/GCM/NoPadding", * but which do have the older "AES/CTR/NoPadding". */ class AESGCMOnCtrCipherState implements CipherState { private Cipher cipher; private SecretKeySpec keySpec; private long n; private byte[] iv; private byte[] hashKey; private GHASH ghash; /** * Constructs a new cipher state for the "AESGCM" algorithm. * * @throws NoSuchAlgorithmException The system does not have a * provider for this algorithm. */ public AESGCMOnCtrCipherState() throws NoSuchAlgorithmException { try { cipher = Cipher.getInstance("AES/CTR/NoPadding"); } catch (NoSuchPaddingException e) { // AES/CTR is available, but not the unpadded version? Huh? throw new NoSuchAlgorithmException("AES/CTR/NoPadding not available", e); } keySpec = null; n = 0; iv = new byte [16]; hashKey = new byte [16]; ghash = new GHASH(); // Try to set a 256-bit key on the cipher. Some JCE's are // configured to disallow 256-bit AES if an extra policy // file has not been installed. try { SecretKeySpec spec = new SecretKeySpec(new byte [32], "AES"); IvParameterSpec params = new IvParameterSpec(iv); cipher.init(Cipher.ENCRYPT_MODE, spec, params); } catch (InvalidKeyException e) { throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e); } catch (InvalidAlgorithmParameterException e) { throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e); } } @Override public void destroy() { // There doesn't seem to be a standard API to clean out a Cipher. // So we instead set the key and IV to all-zeroes to hopefully // destroy the sensitive data in the cipher instance. ghash.destroy(); Noise.destroy(hashKey); Noise.destroy(iv); keySpec = new SecretKeySpec(new byte [32], "AES"); IvParameterSpec params = new IvParameterSpec(iv); try { cipher.init(Cipher.ENCRYPT_MODE, keySpec, params); } catch (InvalidKeyException e) { // Shouldn't happen. } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. } } @Override public String getCipherName() { return "AESGCM"; } @Override public int getKeyLength() { return 32; } @Override public int getMACLength() { return keySpec != null ? 16 : 0; } @Override public void initializeKey(byte[] key, int offset) { // Set the encryption key. keySpec = new SecretKeySpec(key, offset, 32, "AES"); // Generate the hashing key by encrypting a block of zeroes. Arrays.fill(iv, (byte)0); Arrays.fill(hashKey, (byte)0); try { cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv)); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } try { int result = cipher.update(hashKey, 0, 16, hashKey, 0); cipher.doFinal(hashKey, result); } catch (ShortBufferException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.reset(hashKey, 0); // Reset the nonce. n = 0; } @Override public boolean hasKey() { return keySpec != null; } /** * Set up to encrypt or decrypt the next packet. * * @param ad The associated data for the packet. */ private void setup(byte[] ad) throws InvalidKeyException, InvalidAlgorithmParameterException { // Check for nonce wrap-around. if (n == -1L) throw new IllegalStateException("Nonce has wrapped around"); // Format the counter/IV block for AES/CTR/NoPadding. iv[0] = 0; iv[1] = 0; iv[2] = 0; iv[3] = 0; iv[4] = (byte)(n >> 56); iv[5] = (byte)(n >> 48); iv[6] = (byte)(n >> 40); iv[7] = (byte)(n >> 32); iv[8] = (byte)(n >> 24); iv[9] = (byte)(n >> 16); iv[10] = (byte)(n >> 8); iv[11] = (byte)n; iv[12] = 0; iv[13] = 0; iv[14] = 0; iv[15] = 1; ++n; // Initialize the CTR mode cipher with the key and IV. cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv)); // Encrypt a block of zeroes to generate the hash key to XOR // the GHASH tag with at the end of the encrypt/decrypt operation. Arrays.fill(hashKey, (byte)0); try { cipher.update(hashKey, 0, 16, hashKey, 0); } catch (ShortBufferException e) { // Shouldn't happen. throw new IllegalStateException(e); } // Initialize the GHASH with the associated data value. ghash.reset(); if (ad != null) { ghash.update(ad, 0, ad.length); ghash.pad(); } } @Override public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset, byte[] ciphertext, int ciphertextOffset, int length) throws ShortBufferException { int space; if (ciphertextOffset < 0 || ciphertextOffset > ciphertext.length) throw new IllegalArgumentException(); if (length < 0 || plaintextOffset < 0 || plaintextOffset > plaintext.length) throw new IllegalArgumentException(); space = ciphertext.length - ciphertextOffset; if (keySpec == null) { // The key is not set yet - return the plaintext as-is. if (length > space) throw new ShortBufferException(); if (plaintext != ciphertext || plaintextOffset != ciphertextOffset) System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length); return length; } if (space < 16 || length > (space - 16)) throw new ShortBufferException(); try { setup(ad); int result = cipher.update(plaintext, plaintextOffset, length, ciphertext, ciphertextOffset); cipher.doFinal(ciphertext, ciphertextOffset + result); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.update(ciphertext, ciphertextOffset, length); ghash.pad(ad != null ? ad.length : 0, length); ghash.finish(ciphertext, ciphertextOffset + length, 16); for (int index = 0; index < 16; ++index) ciphertext[ciphertextOffset + length + index] ^= hashKey[index]; return length + 16; } @Override public int decryptWithAd(byte[] ad, byte[] ciphertext, int ciphertextOffset, byte[] plaintext, int plaintextOffset, int length) throws ShortBufferException, BadPaddingException { int space; if (ciphertextOffset < 0 || ciphertextOffset > ciphertext.length) throw new IllegalArgumentException(); else space = ciphertext.length - ciphertextOffset; if (length > space) throw new ShortBufferException(); if (length < 0 || plaintextOffset < 0 || plaintextOffset > plaintext.length) throw new IllegalArgumentException(); space = plaintext.length - plaintextOffset; if (keySpec == null) { // The key is not set yet - return the ciphertext as-is. if (length > space) throw new ShortBufferException(); if (plaintext != ciphertext || plaintextOffset != ciphertextOffset) System.arraycopy(ciphertext, ciphertextOffset, plaintext, plaintextOffset, length); return length; } if (length < 16) Noise.throwBadTagException(); int dataLen = length - 16; if (dataLen > space) throw new ShortBufferException(); try { setup(ad); } catch (InvalidKeyException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (InvalidAlgorithmParameterException e) { // Shouldn't happen. throw new IllegalStateException(e); } ghash.update(ciphertext, ciphertextOffset, dataLen); ghash.pad(ad != null ? ad.length : 0, dataLen); ghash.finish(iv, 0, 16); int temp = 0; for (int index = 0; index < 16; ++index) temp |= (hashKey[index] ^ iv[index] ^ ciphertext[ciphertextOffset + dataLen + index]); if ((temp & 0xFF) != 0) Noise.throwBadTagException(); try { int result = cipher.update(ciphertext, ciphertextOffset, dataLen, plaintext, plaintextOffset); cipher.doFinal(plaintext, plaintextOffset + result); } catch (IllegalBlockSizeException e) { // Shouldn't happen. throw new IllegalStateException(e); } catch (BadPaddingException e) { // Shouldn't happen. throw new IllegalStateException(e); } return dataLen; } @Override public CipherState fork(byte[] key, int offset) { CipherState cipher; try { cipher = new AESGCMOnCtrCipherState(); } catch (NoSuchAlgorithmException e) { // Shouldn't happen. return null; } cipher.initializeKey(key, offset); return cipher; } @Override public void setNonce(long nonce) { n = nonce; } }
null
210
CWE-787
CVE-2020-26208
//-------------------------------------------------------------------------- // Program to pull the information out of various types of EXIF digital // camera files and show it in a reasonably consistent way // // This module handles basic Jpeg file handling // // Matthias Wandel //-------------------------------------------------------------------------- #include "jhead.h" // Storage for simplified info extracted from file. ImageInfo_t ImageInfo; static Section_t * Sections = NULL; static int SectionsAllocated; static int SectionsRead; static int HaveAll; #define PSEUDO_IMAGE_MARKER 0x123; // Extra value. //-------------------------------------------------------------------------- // Get 16 bits motorola order (always) for jpeg header stuff. //-------------------------------------------------------------------------- static int Get16m(const void * Short) { return (((uchar *)Short)[0] << 8) | ((uchar *)Short)[1]; } //-------------------------------------------------------------------------- // Process a COM marker. // We want to print out the marker contents as legible text; // we must guard against random junk and varying newline representations. //-------------------------------------------------------------------------- static void process_COM (const uchar * Data, int length) { int ch; char Comment[MAX_COMMENT_SIZE+1]; int nch; int a; nch = 0; if (length > MAX_COMMENT_SIZE) length = MAX_COMMENT_SIZE; // Truncate if it won't fit in our structure. for (a=2;a<length;a++){ ch = Data[a]; if (ch == '\r' && a < length-1 && Data[a+1] == '\n') continue; // Remove cr followed by lf. if (ch >= 32 || ch == '\n' || ch == '\t'){ Comment[nch++] = (char)ch; }else{ Comment[nch++] = '?'; } } Comment[nch] = '\0'; // Null terminate if (ShowTags){ printf("COM marker comment: %s\n",Comment); } strcpy(ImageInfo.Comments,Comment); } //-------------------------------------------------------------------------- // Process a SOFn marker. This is useful for the image dimensions //-------------------------------------------------------------------------- static void process_SOFn (const uchar * Data, int marker) { int data_precision, num_components; data_precision = Data[2]; ImageInfo.Height = Get16m(Data+3); ImageInfo.Width = Get16m(Data+5); num_components = Data[7]; if (num_components == 3){ ImageInfo.IsColor = 1; }else{ ImageInfo.IsColor = 0; } ImageInfo.Process = marker; if (ShowTags){ printf("JPEG image is %uw * %uh, %d color components, %d bits per sample\n", ImageInfo.Width, ImageInfo.Height, num_components, data_precision); } } //-------------------------------------------------------------------------- // Check sections array to see if it needs to be increased in size. //-------------------------------------------------------------------------- static void CheckSectionsAllocated(void) { if (SectionsRead > SectionsAllocated){ ErrFatal("allocation screwup"); } if (SectionsRead >= SectionsAllocated){ SectionsAllocated += SectionsAllocated/2; Sections = (Section_t *)realloc(Sections, sizeof(Section_t)*SectionsAllocated); if (Sections == NULL){ ErrFatal("could not allocate data for entire image"); } } } //-------------------------------------------------------------------------- // Parse the marker stream until SOS or EOI is seen; //-------------------------------------------------------------------------- int ReadJpegSections (FILE * infile, ReadMode_t ReadMode) { int a; int HaveCom = FALSE; a = fgetc(infile); if (a != 0xff || fgetc(infile) != M_SOI){ return FALSE; } ImageInfo.JfifHeader.XDensity = ImageInfo.JfifHeader.YDensity = 300; ImageInfo.JfifHeader.ResolutionUnits = 1; for(;;){ int itemlen; int prev; int marker = 0; int ll,lh, got; uchar * Data; CheckSectionsAllocated(); prev = 0; for (a=0;;a++){ marker = fgetc(infile); if (marker != 0xff && prev == 0xff) break; if (marker == EOF){ ErrFatal("Unexpected end of file"); } prev = marker; } if (a > 10){ ErrNonfatal("Extraneous %d padding bytes before section %02X",a-1,marker); } Sections[SectionsRead].Type = marker; // Read the length of the section. lh = fgetc(infile); ll = fgetc(infile); if (lh == EOF || ll == EOF){ ErrFatal("Unexpected end of file"); } itemlen = (lh << 8) | ll; if (itemlen < 2){ ErrFatal("invalid marker"); } Sections[SectionsRead].Size = itemlen; Data = (uchar *)malloc(itemlen); if (Data == NULL){ ErrFatal("Could not allocate memory"); } Sections[SectionsRead].Data = Data; // Store first two pre-read bytes. Data[0] = (uchar)lh; Data[1] = (uchar)ll; got = fread(Data+2, 1, itemlen-2, infile); // Read the whole section. if (got != itemlen-2){ ErrFatal("Premature end of file?"); } SectionsRead += 1; switch(marker){ case M_SOS: // stop before hitting compressed data // If reading entire image is requested, read the rest of the data. if (ReadMode & READ_IMAGE){ int cp, ep, size; // Determine how much file is left. cp = ftell(infile); fseek(infile, 0, SEEK_END); ep = ftell(infile); fseek(infile, cp, SEEK_SET); size = ep-cp; Data = (uchar *)malloc(size); if (Data == NULL){ ErrFatal("could not allocate data for entire image"); } got = fread(Data, 1, size, infile); if (got != size){ ErrFatal("could not read the rest of the image"); } CheckSectionsAllocated(); Sections[SectionsRead].Data = Data; Sections[SectionsRead].Size = size; Sections[SectionsRead].Type = PSEUDO_IMAGE_MARKER; SectionsRead ++; HaveAll = 1; } return TRUE; case M_DQT: // Use for jpeg quality guessing process_DQT(Data, itemlen); break; case M_DHT: // Use for jpeg quality guessing process_DHT(Data, itemlen); break; case M_EOI: // in case it's a tables-only JPEG stream fprintf(stderr,"No image in jpeg!\n"); return FALSE; case M_COM: // Comment section if (HaveCom || ((ReadMode & READ_METADATA) == 0)){ // Discard this section. free(Sections[--SectionsRead].Data); }else{ process_COM(Data, itemlen); HaveCom = TRUE; } break; case M_JFIF: // Regular jpegs always have this tag, exif images have the exif // marker instead, althogh ACDsee will write images with both markers. // this program will re-create this marker on absence of exif marker. // hence no need to keep the copy from the file. if (itemlen < 16){ fprintf(stderr,"Jfif header too short\n"); goto ignore; } if (memcmp(Data+2, "JFIF\0",5)){ fprintf(stderr,"Header missing JFIF marker\n"); } ImageInfo.JfifHeader.Present = TRUE; ImageInfo.JfifHeader.ResolutionUnits = Data[9]; ImageInfo.JfifHeader.XDensity = (Data[10]<<8) | Data[11]; ImageInfo.JfifHeader.YDensity = (Data[12]<<8) | Data[13]; if (ShowTags){ printf("JFIF SOI marker: Units: %d ",ImageInfo.JfifHeader.ResolutionUnits); switch(ImageInfo.JfifHeader.ResolutionUnits){ case 0: printf("(aspect ratio)"); break; case 1: printf("(dots per inch)"); break; case 2: printf("(dots per cm)"); break; default: printf("(unknown)"); break; } printf(" X-density=%d Y-density=%d\n",ImageInfo.JfifHeader.XDensity, ImageInfo.JfifHeader.YDensity); if (Data[14] || Data[15]){ fprintf(stderr,"Ignoring jfif header thumbnail\n"); } } ignore: free(Sections[--SectionsRead].Data); break; case M_EXIF: // There can be different section using the same marker. if (ReadMode & READ_METADATA){ if (memcmp(Data+2, "Exif", 4) == 0){ process_EXIF(Data, itemlen); break; }else if (memcmp(Data+2, "http:", 5) == 0){ Sections[SectionsRead-1].Type = M_XMP; // Change tag for internal purposes. if (ShowTags){ printf("Image contains XMP section, %d bytes long\n", itemlen); if (ShowTags){ ShowXmp(Sections[SectionsRead-1]); } } break; } } // Oterwise, discard this section. free(Sections[--SectionsRead].Data); break; case M_IPTC: if (ReadMode & READ_METADATA){ if (ShowTags){ printf("Image contains IPTC section, %d bytes long\n", itemlen); } // Note: We just store the IPTC section. Its relatively straightforward // and we don't act on any part of it, so just display it at parse time. }else{ free(Sections[--SectionsRead].Data); } break; case M_SOF0: case M_SOF1: case M_SOF2: case M_SOF3: case M_SOF5: case M_SOF6: case M_SOF7: case M_SOF9: case M_SOF10: case M_SOF11: case M_SOF13: case M_SOF14: case M_SOF15: if (itemlen < 8){ fprintf(stderr,"Section too short\n"); break; } process_SOFn(Data, marker); break; default: // Skip any other sections. if (ShowTags){ printf("Jpeg section marker 0x%02x size %d\n",marker, itemlen); } break; } } return TRUE; } //-------------------------------------------------------------------------- // Discard read data. //-------------------------------------------------------------------------- void DiscardData(void) { int a; for (a=0;a<SectionsRead;a++){ free(Sections[a].Data); } memset(&ImageInfo, 0, sizeof(ImageInfo)); SectionsRead = 0; HaveAll = 0; } //-------------------------------------------------------------------------- // Read image data. //-------------------------------------------------------------------------- int ReadJpegFile(const char * FileName, ReadMode_t ReadMode) { FILE * infile; int ret; infile = fopen(FileName, "rb"); // Unix ignores 'b', windows needs it. if (infile == NULL) { fprintf(stderr, "can't open '%s'\n", FileName); return FALSE; } // Scan the JPEG headers. ret = ReadJpegSections(infile, ReadMode); if (!ret){ if (ReadMode == READ_ANY){ // Process any files mode. Ignore the fact that it's not // a jpeg file. ret = TRUE; }else{ fprintf(stderr,"Not JPEG: %s\n",FileName); } } fclose(infile); if (ret == FALSE){ DiscardData(); } return ret; } //-------------------------------------------------------------------------- // Replace or remove exif thumbnail //-------------------------------------------------------------------------- int SaveThumbnail(char * ThumbFileName) { FILE * ThumbnailFile; if (ImageInfo.ThumbnailOffset == 0 || ImageInfo.ThumbnailSize == 0){ fprintf(stderr,"Image contains no thumbnail\n"); return FALSE; } if (strcmp(ThumbFileName, "-") == 0){ // A filename of '-' indicates thumbnail goes to stdout. // This doesn't make much sense under Windows, so this feature is unix only. ThumbnailFile = stdout; }else{ ThumbnailFile = fopen(ThumbFileName,"wb"); } if (ThumbnailFile){ uchar * ThumbnailPointer; Section_t * ExifSection; ExifSection = FindSection(M_EXIF); ThumbnailPointer = ExifSection->Data+ImageInfo.ThumbnailOffset+8; fwrite(ThumbnailPointer, ImageInfo.ThumbnailSize ,1, ThumbnailFile); fclose(ThumbnailFile); return TRUE; }else{ ErrFatal("Could not write thumbnail file"); return FALSE; } } //-------------------------------------------------------------------------- // Replace or remove exif thumbnail //-------------------------------------------------------------------------- int ReplaceThumbnail(const char * ThumbFileName) { FILE * ThumbnailFile; int ThumbLen, NewExifSize; Section_t * ExifSection; uchar * ThumbnailPointer; if (ImageInfo.ThumbnailOffset == 0 || ImageInfo.ThumbnailAtEnd == FALSE){ if (ThumbFileName == NULL){ // Delete of nonexistent thumbnail (not even pointers present) // No action, no error. return FALSE; } // Adding or removing of thumbnail is not possible - that would require rearranging // of the exif header, which is risky, and jhad doesn't know how to do. fprintf(stderr,"Image contains no thumbnail to replace - add is not possible\n"); return FALSE; } if (ThumbFileName){ ThumbnailFile = fopen(ThumbFileName,"rb"); if (ThumbnailFile == NULL){ noread: ErrFatal("Could not read thumbnail file"); return FALSE; } // get length fseek(ThumbnailFile, 0, SEEK_END); ThumbLen = ftell(ThumbnailFile); fseek(ThumbnailFile, 0, SEEK_SET); if (ThumbLen + ImageInfo.ThumbnailOffset > 0x10000-20){ ErrFatal("Thumbnail is too large to insert into exif header"); } }else{ if (ImageInfo.ThumbnailSize == 0){ return FALSE; } ThumbLen = 0; ThumbnailFile = NULL; } ExifSection = FindSection(M_EXIF); NewExifSize = ImageInfo.ThumbnailOffset+8+ThumbLen; ExifSection->Data = (uchar *)realloc(ExifSection->Data, NewExifSize); ThumbnailPointer = ExifSection->Data+ImageInfo.ThumbnailOffset+8; if (ThumbnailFile){ if (fread(ThumbnailPointer, 1, ThumbLen, ThumbnailFile) != ThumbLen){ goto noread; } fclose(ThumbnailFile); } ImageInfo.ThumbnailSize = ThumbLen; Put32u(ExifSection->Data+ImageInfo.ThumbnailSizeOffset+8, ThumbLen); ExifSection->Data[0] = (uchar)(NewExifSize >> 8); ExifSection->Data[1] = (uchar)NewExifSize; ExifSection->Size = NewExifSize; return TRUE; } //-------------------------------------------------------------------------- // Discard everything but the exif and comment sections. //-------------------------------------------------------------------------- void DiscardAllButExif(void) { Section_t ExifKeeper; Section_t CommentKeeper; Section_t IptcKeeper; Section_t XmpKeeper; int a; memset(&ExifKeeper, 0, sizeof(ExifKeeper)); memset(&CommentKeeper, 0, sizeof(CommentKeeper)); memset(&IptcKeeper, 0, sizeof(IptcKeeper)); memset(&XmpKeeper, 0, sizeof(IptcKeeper)); for (a=0;a<SectionsRead;a++){ if (Sections[a].Type == M_EXIF && ExifKeeper.Type == 0){ ExifKeeper = Sections[a]; }else if (Sections[a].Type == M_XMP && XmpKeeper.Type == 0){ XmpKeeper = Sections[a]; }else if (Sections[a].Type == M_COM && CommentKeeper.Type == 0){ CommentKeeper = Sections[a]; }else if (Sections[a].Type == M_IPTC && IptcKeeper.Type == 0){ IptcKeeper = Sections[a]; }else{ free(Sections[a].Data); } } SectionsRead = 0; if (ExifKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = ExifKeeper; } if (CommentKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = CommentKeeper; } if (IptcKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = IptcKeeper; } if (XmpKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = XmpKeeper; } } //-------------------------------------------------------------------------- // Write image data back to disk. //-------------------------------------------------------------------------- void WriteJpegFile(const char * FileName) { FILE * outfile; int a; if (!HaveAll){ ErrFatal("Can't write back - didn't read all"); } outfile = fopen(FileName,"wb"); if (outfile == NULL){ ErrFatal("Could not open file for write"); } // Initial static jpeg marker. fputc(0xff,outfile); fputc(0xd8,outfile); if (Sections[0].Type != M_EXIF && Sections[0].Type != M_JFIF){ // The image must start with an exif or jfif marker. If we threw those away, create one. static uchar JfifHead[18] = { 0xff, M_JFIF, 0x00, 0x10, 'J' , 'F' , 'I' , 'F' , 0x00, 0x01, 0x01, 0x01, 0x01, 0x2C, 0x01, 0x2C, 0x00, 0x00 }; if (ImageInfo.ResolutionUnit == 2 || ImageInfo.ResolutionUnit == 3){ // Use the exif resolution info to fill out the jfif header. // Usually, for exif images, there's no jfif header, so if wediscard // the exif header, use info from the exif header for the jfif header. ImageInfo.JfifHeader.ResolutionUnits = (char)(ImageInfo.ResolutionUnit-1); // Jfif is 1 and 2, Exif is 2 and 3 for In and cm respecively ImageInfo.JfifHeader.XDensity = (int)ImageInfo.xResolution; ImageInfo.JfifHeader.YDensity = (int)ImageInfo.yResolution; } JfifHead[11] = ImageInfo.JfifHeader.ResolutionUnits; JfifHead[12] = (uchar)(ImageInfo.JfifHeader.XDensity >> 8); JfifHead[13] = (uchar)ImageInfo.JfifHeader.XDensity; JfifHead[14] = (uchar)(ImageInfo.JfifHeader.YDensity >> 8); JfifHead[15] = (uchar)ImageInfo.JfifHeader.YDensity; fwrite(JfifHead, 18, 1, outfile); // use the values from the exif data for the jfif header, if we have found values if (ImageInfo.ResolutionUnit != 0) { // JFIF.ResolutionUnit is {1,2}, EXIF.ResolutionUnit is {2,3} JfifHead[11] = (uchar)ImageInfo.ResolutionUnit - 1; } if (ImageInfo.xResolution > 0.0 && ImageInfo.yResolution > 0.0) { JfifHead[12] = (uchar)((int)ImageInfo.xResolution>>8); JfifHead[13] = (uchar)((int)ImageInfo.xResolution); JfifHead[14] = (uchar)((int)ImageInfo.yResolution>>8); JfifHead[15] = (uchar)((int)ImageInfo.yResolution); } } // Write all the misc sections for (a=0;a<SectionsRead-1;a++){ fputc(0xff,outfile); fputc((unsigned char)Sections[a].Type, outfile); fwrite(Sections[a].Data, Sections[a].Size, 1, outfile); } // Write the remaining image data. fwrite(Sections[a].Data, Sections[a].Size, 1, outfile); fclose(outfile); } //-------------------------------------------------------------------------- // Check if image has exif header. //-------------------------------------------------------------------------- Section_t * FindSection(int SectionType) { int a; for (a=0;a<SectionsRead;a++){ if (Sections[a].Type == SectionType){ return &Sections[a]; } } // Could not be found. return NULL; } //-------------------------------------------------------------------------- // Remove a certain type of section. //-------------------------------------------------------------------------- int RemoveSectionType(int SectionType) { int a; int retval = FALSE; for (a=0;a<SectionsRead-1;a++){ if (Sections[a].Type == SectionType){ // Free up this section free (Sections[a].Data); // Move succeding sections back by one to close space in array. memmove(Sections+a, Sections+a+1, sizeof(Section_t) * (SectionsRead-a)); SectionsRead -= 1; a -= 1; retval = TRUE; } } return retval; } //-------------------------------------------------------------------------- // Remove sectons not part of image and not exif or comment sections. //-------------------------------------------------------------------------- int RemoveUnknownSections(void) { int a; int Modified = FALSE; for (a=0;a<SectionsRead-1;){ switch(Sections[a].Type){ case M_SOF0: case M_SOF1: case M_SOF2: case M_SOF3: case M_SOF5: case M_SOF6: case M_SOF7: case M_SOF9: case M_SOF10: case M_SOF11: case M_SOF13: case M_SOF14: case M_SOF15: case M_SOI: case M_EOI: case M_SOS: case M_JFIF: case M_EXIF: case M_XMP: case M_COM: case M_DQT: case M_DHT: case M_DRI: case M_IPTC: // keep. a++; break; default: // Unknown. Delete. free (Sections[a].Data); // Move succeding sections back by one to close space in array. memmove(Sections+a, Sections+a+1, sizeof(Section_t) * (SectionsRead-a)); SectionsRead -= 1; Modified = TRUE; } } return Modified; } //-------------------------------------------------------------------------- // Add a section (assume it doesn't already exist) - used for // adding comment sections and exif sections //-------------------------------------------------------------------------- Section_t * CreateSection(int SectionType, unsigned char * Data, int Size) { Section_t * NewSection; int a; int NewIndex; NewIndex = 0; // Figure out where to put the comment section. if (SectionType == M_EXIF){ // Exif alwas goes first! }else{ for (;NewIndex < 3;NewIndex++){ // Maximum fourth position (just for the heck of it) if (Sections[NewIndex].Type == M_JFIF) continue; // Put it after Jfif if (Sections[NewIndex].Type == M_EXIF) continue; // Put it after Exif break; } } if (SectionsRead < NewIndex){ ErrFatal("Too few sections!"); } CheckSectionsAllocated(); for (a=SectionsRead;a>NewIndex;a--){ Sections[a] = Sections[a-1]; } SectionsRead += 1; NewSection = Sections+NewIndex; NewSection->Type = SectionType; NewSection->Size = Size; NewSection->Data = Data; return NewSection; } //-------------------------------------------------------------------------- // Initialisation. //-------------------------------------------------------------------------- void ResetJpgfile(void) { if (Sections == NULL){ Sections = (Section_t *)malloc(sizeof(Section_t)*5); SectionsAllocated = 5; } SectionsRead = 0; HaveAll = 0; }
null
//-------------------------------------------------------------------------- // Program to pull the information out of various types of EXIF digital // camera files and show it in a reasonably consistent way // // This module handles basic Jpeg file handling // // Matthias Wandel //-------------------------------------------------------------------------- #include "jhead.h" // Storage for simplified info extracted from file. ImageInfo_t ImageInfo; static Section_t * Sections = NULL; static int SectionsAllocated; static int SectionsRead; static int HaveAll; #define PSEUDO_IMAGE_MARKER 0x123; // Extra value. //-------------------------------------------------------------------------- // Get 16 bits motorola order (always) for jpeg header stuff. //-------------------------------------------------------------------------- static int Get16m(const void * Short) { return (((uchar *)Short)[0] << 8) | ((uchar *)Short)[1]; } //-------------------------------------------------------------------------- // Process a COM marker. // We want to print out the marker contents as legible text; // we must guard against random junk and varying newline representations. //-------------------------------------------------------------------------- static void process_COM (const uchar * Data, int length) { int ch; char Comment[MAX_COMMENT_SIZE+1]; int nch; int a; nch = 0; if (length > MAX_COMMENT_SIZE) length = MAX_COMMENT_SIZE; // Truncate if it won't fit in our structure. for (a=2;a<length;a++){ ch = Data[a]; if (ch == '\r' && a < length-1 && Data[a+1] == '\n') continue; // Remove cr followed by lf. if (ch >= 32 || ch == '\n' || ch == '\t'){ Comment[nch++] = (char)ch; }else{ Comment[nch++] = '?'; } } Comment[nch] = '\0'; // Null terminate if (ShowTags){ printf("COM marker comment: %s\n",Comment); } strcpy(ImageInfo.Comments,Comment); } //-------------------------------------------------------------------------- // Process a SOFn marker. This is useful for the image dimensions //-------------------------------------------------------------------------- static void process_SOFn (const uchar * Data, int marker) { int data_precision, num_components; data_precision = Data[2]; ImageInfo.Height = Get16m(Data+3); ImageInfo.Width = Get16m(Data+5); num_components = Data[7]; if (num_components == 3){ ImageInfo.IsColor = 1; }else{ ImageInfo.IsColor = 0; } ImageInfo.Process = marker; if (ShowTags){ printf("JPEG image is %uw * %uh, %d color components, %d bits per sample\n", ImageInfo.Width, ImageInfo.Height, num_components, data_precision); } } //-------------------------------------------------------------------------- // Check sections array to see if it needs to be increased in size. //-------------------------------------------------------------------------- static void CheckSectionsAllocated(void) { if (SectionsRead > SectionsAllocated){ ErrFatal("allocation screwup"); } if (SectionsRead >= SectionsAllocated){ SectionsAllocated += SectionsAllocated/2; Sections = (Section_t *)realloc(Sections, sizeof(Section_t)*SectionsAllocated); if (Sections == NULL){ ErrFatal("could not allocate data for entire image"); } } } //-------------------------------------------------------------------------- // Parse the marker stream until SOS or EOI is seen; //-------------------------------------------------------------------------- int ReadJpegSections (FILE * infile, ReadMode_t ReadMode) { int a; int HaveCom = FALSE; a = fgetc(infile); if (a != 0xff || fgetc(infile) != M_SOI){ return FALSE; } ImageInfo.JfifHeader.XDensity = ImageInfo.JfifHeader.YDensity = 300; ImageInfo.JfifHeader.ResolutionUnits = 1; for(;;){ int itemlen; int prev; int marker = 0; int ll,lh, got; uchar * Data; CheckSectionsAllocated(); prev = 0; for (a=0;;a++){ marker = fgetc(infile); if (marker != 0xff && prev == 0xff) break; if (marker == EOF){ ErrFatal("Unexpected end of file"); } prev = marker; } if (a > 10){ ErrNonfatal("Extraneous %d padding bytes before section %02X",a-1,marker); } Sections[SectionsRead].Type = marker; // Read the length of the section. lh = fgetc(infile); ll = fgetc(infile); if (lh == EOF || ll == EOF){ ErrFatal("Unexpected end of file"); } itemlen = (lh << 8) | ll; if (itemlen < 2){ ErrFatal("invalid marker"); } Sections[SectionsRead].Size = itemlen; // Allocate an extra 20 bytes more than needed, because sometimes when reading structures, // if the section erroneously ends before short structures that should be there, that can trip // memory checkers in combination with fuzzers. Data = (uchar *)malloc(itemlen+20); if (Data == NULL){ ErrFatal("Could not allocate memory"); } Sections[SectionsRead].Data = Data; // Store first two pre-read bytes. Data[0] = (uchar)lh; Data[1] = (uchar)ll; got = fread(Data+2, 1, itemlen-2, infile); // Read the whole section. if (got != itemlen-2){ ErrFatal("Premature end of file?"); } SectionsRead += 1; switch(marker){ case M_SOS: // stop before hitting compressed data // If reading entire image is requested, read the rest of the data. if (ReadMode & READ_IMAGE){ int cp, ep, size; // Determine how much file is left. cp = ftell(infile); fseek(infile, 0, SEEK_END); ep = ftell(infile); fseek(infile, cp, SEEK_SET); size = ep-cp; Data = (uchar *)malloc(size); if (Data == NULL){ ErrFatal("could not allocate data for entire image"); } got = fread(Data, 1, size, infile); if (got != size){ ErrFatal("could not read the rest of the image"); } CheckSectionsAllocated(); Sections[SectionsRead].Data = Data; Sections[SectionsRead].Size = size; Sections[SectionsRead].Type = PSEUDO_IMAGE_MARKER; SectionsRead ++; HaveAll = 1; } return TRUE; case M_DQT: // Use for jpeg quality guessing process_DQT(Data, itemlen); break; case M_DHT: // Use for jpeg quality guessing process_DHT(Data, itemlen); break; case M_EOI: // in case it's a tables-only JPEG stream fprintf(stderr,"No image in jpeg!\n"); return FALSE; case M_COM: // Comment section if (HaveCom || ((ReadMode & READ_METADATA) == 0)){ // Discard this section. free(Sections[--SectionsRead].Data); }else{ process_COM(Data, itemlen); HaveCom = TRUE; } break; case M_JFIF: // Regular jpegs always have this tag, exif images have the exif // marker instead, althogh ACDsee will write images with both markers. // this program will re-create this marker on absence of exif marker. // hence no need to keep the copy from the file. if (itemlen < 16){ fprintf(stderr,"Jfif header too short\n"); goto ignore; } if (memcmp(Data+2, "JFIF\0",5)){ fprintf(stderr,"Header missing JFIF marker\n"); } ImageInfo.JfifHeader.Present = TRUE; ImageInfo.JfifHeader.ResolutionUnits = Data[9]; ImageInfo.JfifHeader.XDensity = (Data[10]<<8) | Data[11]; ImageInfo.JfifHeader.YDensity = (Data[12]<<8) | Data[13]; if (ShowTags){ printf("JFIF SOI marker: Units: %d ",ImageInfo.JfifHeader.ResolutionUnits); switch(ImageInfo.JfifHeader.ResolutionUnits){ case 0: printf("(aspect ratio)"); break; case 1: printf("(dots per inch)"); break; case 2: printf("(dots per cm)"); break; default: printf("(unknown)"); break; } printf(" X-density=%d Y-density=%d\n",ImageInfo.JfifHeader.XDensity, ImageInfo.JfifHeader.YDensity); if (Data[14] || Data[15]){ fprintf(stderr,"Ignoring jfif header thumbnail\n"); } } ignore: free(Sections[--SectionsRead].Data); break; case M_EXIF: // There can be different section using the same marker. if (ReadMode & READ_METADATA){ if (memcmp(Data+2, "Exif", 4) == 0){ process_EXIF(Data, itemlen); break; }else if (memcmp(Data+2, "http:", 5) == 0){ Sections[SectionsRead-1].Type = M_XMP; // Change tag for internal purposes. if (ShowTags){ printf("Image contains XMP section, %d bytes long\n", itemlen); if (ShowTags){ ShowXmp(Sections[SectionsRead-1]); } } break; } } // Oterwise, discard this section. free(Sections[--SectionsRead].Data); break; case M_IPTC: if (ReadMode & READ_METADATA){ if (ShowTags){ printf("Image contains IPTC section, %d bytes long\n", itemlen); } // Note: We just store the IPTC section. Its relatively straightforward // and we don't act on any part of it, so just display it at parse time. }else{ free(Sections[--SectionsRead].Data); } break; case M_SOF0: case M_SOF1: case M_SOF2: case M_SOF3: case M_SOF5: case M_SOF6: case M_SOF7: case M_SOF9: case M_SOF10: case M_SOF11: case M_SOF13: case M_SOF14: case M_SOF15: if (itemlen < 8){ fprintf(stderr,"Section too short\n"); break; } process_SOFn(Data, marker); break; default: // Skip any other sections. if (ShowTags){ printf("Jpeg section marker 0x%02x size %d\n",marker, itemlen); } break; } } return TRUE; } //-------------------------------------------------------------------------- // Discard read data. //-------------------------------------------------------------------------- void DiscardData(void) { int a; for (a=0;a<SectionsRead;a++){ free(Sections[a].Data); } memset(&ImageInfo, 0, sizeof(ImageInfo)); SectionsRead = 0; HaveAll = 0; } //-------------------------------------------------------------------------- // Read image data. //-------------------------------------------------------------------------- int ReadJpegFile(const char * FileName, ReadMode_t ReadMode) { FILE * infile; int ret; infile = fopen(FileName, "rb"); // Unix ignores 'b', windows needs it. if (infile == NULL) { fprintf(stderr, "can't open '%s'\n", FileName); return FALSE; } // Scan the JPEG headers. ret = ReadJpegSections(infile, ReadMode); if (!ret){ if (ReadMode == READ_ANY){ // Process any files mode. Ignore the fact that it's not // a jpeg file. ret = TRUE; }else{ fprintf(stderr,"Not JPEG: %s\n",FileName); } } fclose(infile); if (ret == FALSE){ DiscardData(); } return ret; } //-------------------------------------------------------------------------- // Replace or remove exif thumbnail //-------------------------------------------------------------------------- int SaveThumbnail(char * ThumbFileName) { FILE * ThumbnailFile; if (ImageInfo.ThumbnailOffset == 0 || ImageInfo.ThumbnailSize == 0){ fprintf(stderr,"Image contains no thumbnail\n"); return FALSE; } if (strcmp(ThumbFileName, "-") == 0){ // A filename of '-' indicates thumbnail goes to stdout. // This doesn't make much sense under Windows, so this feature is unix only. ThumbnailFile = stdout; }else{ ThumbnailFile = fopen(ThumbFileName,"wb"); } if (ThumbnailFile){ uchar * ThumbnailPointer; Section_t * ExifSection; ExifSection = FindSection(M_EXIF); ThumbnailPointer = ExifSection->Data+ImageInfo.ThumbnailOffset+8; fwrite(ThumbnailPointer, ImageInfo.ThumbnailSize ,1, ThumbnailFile); fclose(ThumbnailFile); return TRUE; }else{ ErrFatal("Could not write thumbnail file"); return FALSE; } } //-------------------------------------------------------------------------- // Replace or remove exif thumbnail //-------------------------------------------------------------------------- int ReplaceThumbnail(const char * ThumbFileName) { FILE * ThumbnailFile; int ThumbLen, NewExifSize; Section_t * ExifSection; uchar * ThumbnailPointer; if (ImageInfo.ThumbnailOffset == 0 || ImageInfo.ThumbnailAtEnd == FALSE){ if (ThumbFileName == NULL){ // Delete of nonexistent thumbnail (not even pointers present) // No action, no error. return FALSE; } // Adding or removing of thumbnail is not possible - that would require rearranging // of the exif header, which is risky, and jhad doesn't know how to do. fprintf(stderr,"Image contains no thumbnail to replace - add is not possible\n"); return FALSE; } if (ThumbFileName){ ThumbnailFile = fopen(ThumbFileName,"rb"); if (ThumbnailFile == NULL){ noread: ErrFatal("Could not read thumbnail file"); return FALSE; } // get length fseek(ThumbnailFile, 0, SEEK_END); ThumbLen = ftell(ThumbnailFile); fseek(ThumbnailFile, 0, SEEK_SET); if (ThumbLen + ImageInfo.ThumbnailOffset > 0x10000-20){ ErrFatal("Thumbnail is too large to insert into exif header"); } }else{ if (ImageInfo.ThumbnailSize == 0){ return FALSE; } ThumbLen = 0; ThumbnailFile = NULL; } ExifSection = FindSection(M_EXIF); NewExifSize = ImageInfo.ThumbnailOffset+8+ThumbLen; ExifSection->Data = (uchar *)realloc(ExifSection->Data, NewExifSize); ThumbnailPointer = ExifSection->Data+ImageInfo.ThumbnailOffset+8; if (ThumbnailFile){ if (fread(ThumbnailPointer, 1, ThumbLen, ThumbnailFile) != ThumbLen){ goto noread; } fclose(ThumbnailFile); } ImageInfo.ThumbnailSize = ThumbLen; Put32u(ExifSection->Data+ImageInfo.ThumbnailSizeOffset+8, ThumbLen); ExifSection->Data[0] = (uchar)(NewExifSize >> 8); ExifSection->Data[1] = (uchar)NewExifSize; ExifSection->Size = NewExifSize; return TRUE; } //-------------------------------------------------------------------------- // Discard everything but the exif and comment sections. //-------------------------------------------------------------------------- void DiscardAllButExif(void) { Section_t ExifKeeper; Section_t CommentKeeper; Section_t IptcKeeper; Section_t XmpKeeper; int a; memset(&ExifKeeper, 0, sizeof(ExifKeeper)); memset(&CommentKeeper, 0, sizeof(CommentKeeper)); memset(&IptcKeeper, 0, sizeof(IptcKeeper)); memset(&XmpKeeper, 0, sizeof(IptcKeeper)); for (a=0;a<SectionsRead;a++){ if (Sections[a].Type == M_EXIF && ExifKeeper.Type == 0){ ExifKeeper = Sections[a]; }else if (Sections[a].Type == M_XMP && XmpKeeper.Type == 0){ XmpKeeper = Sections[a]; }else if (Sections[a].Type == M_COM && CommentKeeper.Type == 0){ CommentKeeper = Sections[a]; }else if (Sections[a].Type == M_IPTC && IptcKeeper.Type == 0){ IptcKeeper = Sections[a]; }else{ free(Sections[a].Data); } } SectionsRead = 0; if (ExifKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = ExifKeeper; } if (CommentKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = CommentKeeper; } if (IptcKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = IptcKeeper; } if (XmpKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = XmpKeeper; } } //-------------------------------------------------------------------------- // Write image data back to disk. //-------------------------------------------------------------------------- void WriteJpegFile(const char * FileName) { FILE * outfile; int a; if (!HaveAll){ ErrFatal("Can't write back - didn't read all"); } outfile = fopen(FileName,"wb"); if (outfile == NULL){ ErrFatal("Could not open file for write"); } // Initial static jpeg marker. fputc(0xff,outfile); fputc(0xd8,outfile); if (Sections[0].Type != M_EXIF && Sections[0].Type != M_JFIF){ // The image must start with an exif or jfif marker. If we threw those away, create one. static uchar JfifHead[18] = { 0xff, M_JFIF, 0x00, 0x10, 'J' , 'F' , 'I' , 'F' , 0x00, 0x01, 0x01, 0x01, 0x01, 0x2C, 0x01, 0x2C, 0x00, 0x00 }; if (ImageInfo.ResolutionUnit == 2 || ImageInfo.ResolutionUnit == 3){ // Use the exif resolution info to fill out the jfif header. // Usually, for exif images, there's no jfif header, so if wediscard // the exif header, use info from the exif header for the jfif header. ImageInfo.JfifHeader.ResolutionUnits = (char)(ImageInfo.ResolutionUnit-1); // Jfif is 1 and 2, Exif is 2 and 3 for In and cm respecively ImageInfo.JfifHeader.XDensity = (int)ImageInfo.xResolution; ImageInfo.JfifHeader.YDensity = (int)ImageInfo.yResolution; } JfifHead[11] = ImageInfo.JfifHeader.ResolutionUnits; JfifHead[12] = (uchar)(ImageInfo.JfifHeader.XDensity >> 8); JfifHead[13] = (uchar)ImageInfo.JfifHeader.XDensity; JfifHead[14] = (uchar)(ImageInfo.JfifHeader.YDensity >> 8); JfifHead[15] = (uchar)ImageInfo.JfifHeader.YDensity; fwrite(JfifHead, 18, 1, outfile); // use the values from the exif data for the jfif header, if we have found values if (ImageInfo.ResolutionUnit != 0) { // JFIF.ResolutionUnit is {1,2}, EXIF.ResolutionUnit is {2,3} JfifHead[11] = (uchar)ImageInfo.ResolutionUnit - 1; } if (ImageInfo.xResolution > 0.0 && ImageInfo.yResolution > 0.0) { JfifHead[12] = (uchar)((int)ImageInfo.xResolution>>8); JfifHead[13] = (uchar)((int)ImageInfo.xResolution); JfifHead[14] = (uchar)((int)ImageInfo.yResolution>>8); JfifHead[15] = (uchar)((int)ImageInfo.yResolution); } } // Write all the misc sections for (a=0;a<SectionsRead-1;a++){ fputc(0xff,outfile); fputc((unsigned char)Sections[a].Type, outfile); fwrite(Sections[a].Data, Sections[a].Size, 1, outfile); } // Write the remaining image data. fwrite(Sections[a].Data, Sections[a].Size, 1, outfile); fclose(outfile); } //-------------------------------------------------------------------------- // Check if image has exif header. //-------------------------------------------------------------------------- Section_t * FindSection(int SectionType) { int a; for (a=0;a<SectionsRead;a++){ if (Sections[a].Type == SectionType){ return &Sections[a]; } } // Could not be found. return NULL; } //-------------------------------------------------------------------------- // Remove a certain type of section. //-------------------------------------------------------------------------- int RemoveSectionType(int SectionType) { int a; int retval = FALSE; for (a=0;a<SectionsRead-1;a++){ if (Sections[a].Type == SectionType){ // Free up this section free (Sections[a].Data); // Move succeding sections back by one to close space in array. memmove(Sections+a, Sections+a+1, sizeof(Section_t) * (SectionsRead-a)); SectionsRead -= 1; a -= 1; retval = TRUE; } } return retval; } //-------------------------------------------------------------------------- // Remove sectons not part of image and not exif or comment sections. //-------------------------------------------------------------------------- int RemoveUnknownSections(void) { int a; int Modified = FALSE; for (a=0;a<SectionsRead-1;){ switch(Sections[a].Type){ case M_SOF0: case M_SOF1: case M_SOF2: case M_SOF3: case M_SOF5: case M_SOF6: case M_SOF7: case M_SOF9: case M_SOF10: case M_SOF11: case M_SOF13: case M_SOF14: case M_SOF15: case M_SOI: case M_EOI: case M_SOS: case M_JFIF: case M_EXIF: case M_XMP: case M_COM: case M_DQT: case M_DHT: case M_DRI: case M_IPTC: // keep. a++; break; default: // Unknown. Delete. free (Sections[a].Data); // Move succeding sections back by one to close space in array. memmove(Sections+a, Sections+a+1, sizeof(Section_t) * (SectionsRead-a)); SectionsRead -= 1; Modified = TRUE; } } return Modified; } //-------------------------------------------------------------------------- // Add a section (assume it doesn't already exist) - used for // adding comment sections and exif sections //-------------------------------------------------------------------------- Section_t * CreateSection(int SectionType, unsigned char * Data, int Size) { Section_t * NewSection; int a; int NewIndex; NewIndex = 0; // Figure out where to put the comment section. if (SectionType == M_EXIF){ // Exif alwas goes first! }else{ for (;NewIndex < 3;NewIndex++){ // Maximum fourth position (just for the heck of it) if (Sections[NewIndex].Type == M_JFIF) continue; // Put it after Jfif if (Sections[NewIndex].Type == M_EXIF) continue; // Put it after Exif break; } } if (SectionsRead < NewIndex){ ErrFatal("Too few sections!"); } CheckSectionsAllocated(); for (a=SectionsRead;a>NewIndex;a--){ Sections[a] = Sections[a-1]; } SectionsRead += 1; NewSection = Sections+NewIndex; NewSection->Type = SectionType; NewSection->Size = Size; NewSection->Data = Data; return NewSection; } //-------------------------------------------------------------------------- // Initialisation. //-------------------------------------------------------------------------- void ResetJpgfile(void) { if (Sections == NULL){ Sections = (Section_t *)malloc(sizeof(Section_t)*5); SectionsAllocated = 5; } SectionsRead = 0; HaveAll = 0; }
null
211
CWE-787
CVE-2020-26570
/* * PKCS15 emulation layer for Oberthur card. * * Copyright (C) 2010, Viktor Tarasov <vtarasov@opentrust.com> * Copyright (C) 2005, Andrea Frigido <andrea@frisoft.it> * Copyright (C) 2005, Sirio Capizzi <graaf@virgilio.it> * Copyright (C) 2004, Antonino Iacono <ant_iacono@tin.it> * Copyright (C) 2003, Olaf Kirch <okir@suse.de> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include "../common/compat_strlcpy.h" #include "pkcs15.h" #include "log.h" #include "asn1.h" #include "internal.h" #ifdef ENABLE_OPENSSL #include <openssl/bio.h> #include <openssl/crypto.h> #include <openssl/x509.h> #include <openssl/x509v3.h> #endif #define OBERTHUR_ATTR_MODIFIABLE 0x0001 #define OBERTHUR_ATTR_TRUSTED 0x0002 #define OBERTHUR_ATTR_LOCAL 0x0004 #define OBERTHUR_ATTR_ENCRYPT 0x0008 #define OBERTHUR_ATTR_DECRYPT 0x0010 #define OBERTHUR_ATTR_SIGN 0x0020 #define OBERTHUR_ATTR_VERIFY 0x0040 #define OBERTHUR_ATTR_RSIGN 0x0080 #define OBERTHUR_ATTR_RVERIFY 0x0100 #define OBERTHUR_ATTR_WRAP 0x0200 #define OBERTHUR_ATTR_UNWRAP 0x0400 #define OBERTHUR_ATTR_DERIVE 0x0800 #define USAGE_PRV_ENC (SC_PKCS15_PRKEY_USAGE_ENCRYPT | SC_PKCS15_PRKEY_USAGE_DECRYPT |\ SC_PKCS15_PRKEY_USAGE_WRAP | SC_PKCS15_PRKEY_USAGE_UNWRAP) #define USAGE_PRV_AUT SC_PKCS15_PRKEY_USAGE_SIGN #define USAGE_PRV_SIGN (SC_PKCS15_PRKEY_USAGE_SIGN | SC_PKCS15_PRKEY_USAGE_NONREPUDIATION) #define USAGE_PUB_ENC (SC_PKCS15_PRKEY_USAGE_ENCRYPT | SC_PKCS15_PRKEY_USAGE_WRAP) #define USAGE_PUB_AUT SC_PKCS15_PRKEY_USAGE_VERIFY #define USAGE_PUB_SIGN (SC_PKCS15_PRKEY_USAGE_VERIFY | SC_PKCS15_PRKEY_USAGE_VERIFYRECOVER) #define PIN_DOMAIN_LABEL "SCM" const unsigned char PinDomainID[3] = {0x53, 0x43, 0x4D}; #define AWP_PIN_DF "3F005011" #define AWP_TOKEN_INFO "3F0050111000" #define AWP_PUK_FILE "3F0050112000" #define AWP_CONTAINERS_MS "3F0050113000" #define AWP_OBJECTS_LIST_PUB "3F0050114000" #define AWP_OBJECTS_LIST_PRV "3F0050115000" #define AWP_OBJECTS_DF_PUB "3F0050119001" #define AWP_OBJECTS_DF_PRV "3F0050119002" #define AWP_BASE_RSA_PRV "3F00501190023000" #define AWP_BASE_RSA_PUB "3F00501190011000" #define AWP_BASE_CERTIFICATE "3F00501190012000" #define BASE_ID_PUB_RSA 0x10 #define BASE_ID_CERT 0x20 #define BASE_ID_PRV_RSA 0x30 #define BASE_ID_PRV_DES 0x40 #define BASE_ID_PUB_DATA 0x50 #define BASE_ID_PRV_DATA 0x60 #define BASE_ID_PUB_DES 0x70 static int sc_pkcs15emu_oberthur_add_prvkey(struct sc_pkcs15_card *, unsigned, unsigned); static int sc_pkcs15emu_oberthur_add_pubkey(struct sc_pkcs15_card *, unsigned, unsigned); static int sc_pkcs15emu_oberthur_add_cert(struct sc_pkcs15_card *, unsigned); static int sc_pkcs15emu_oberthur_add_data(struct sc_pkcs15_card *, unsigned, unsigned, int); static int sc_oberthur_parse_tokeninfo (struct sc_pkcs15_card *, unsigned char *, size_t, int); static int sc_oberthur_parse_containers (struct sc_pkcs15_card *, unsigned char *, size_t, int); static int sc_oberthur_parse_publicinfo (struct sc_pkcs15_card *, unsigned char *, size_t, int); static int sc_oberthur_parse_privateinfo (struct sc_pkcs15_card *, unsigned char *, size_t, int); static int sc_awp_parse_df(struct sc_pkcs15_card *, struct sc_pkcs15_df *); static void sc_awp_clear(struct sc_pkcs15_card *); struct crypto_container { unsigned id_pub; unsigned id_prv; unsigned id_cert; }; struct container { char uuid[37]; struct crypto_container exchange; struct crypto_container sign; struct container *next; struct container *prev; }; struct container *Containers = NULL; static struct { const char *name; const char *path; unsigned char *content; size_t len; int (*parser)(struct sc_pkcs15_card *, unsigned char *, size_t, int); int postpone_allowed; } oberthur_infos[] = { /* Never change the following order */ { "Token info", AWP_TOKEN_INFO, NULL, 0, sc_oberthur_parse_tokeninfo, 0}, { "Containers MS", AWP_CONTAINERS_MS, NULL, 0, sc_oberthur_parse_containers, 0}, { "Public objects list", AWP_OBJECTS_LIST_PUB, NULL, 0, sc_oberthur_parse_publicinfo, 0}, { "Private objects list", AWP_OBJECTS_LIST_PRV, NULL, 0, sc_oberthur_parse_privateinfo, 1}, { NULL, NULL, NULL, 0, NULL, 0} }; static unsigned sc_oberthur_decode_usage(unsigned flags) { unsigned ret = 0; if (flags & OBERTHUR_ATTR_ENCRYPT) ret |= SC_PKCS15_PRKEY_USAGE_ENCRYPT; if (flags & OBERTHUR_ATTR_DECRYPT) ret |= SC_PKCS15_PRKEY_USAGE_DECRYPT; if (flags & OBERTHUR_ATTR_SIGN) ret |= SC_PKCS15_PRKEY_USAGE_SIGN; if (flags & OBERTHUR_ATTR_RSIGN) ret |= SC_PKCS15_PRKEY_USAGE_SIGNRECOVER; if (flags & OBERTHUR_ATTR_WRAP) ret |= SC_PKCS15_PRKEY_USAGE_WRAP; if (flags & OBERTHUR_ATTR_UNWRAP) ret |= SC_PKCS15_PRKEY_USAGE_UNWRAP; if (flags & OBERTHUR_ATTR_VERIFY) ret |= SC_PKCS15_PRKEY_USAGE_VERIFY; if (flags & OBERTHUR_ATTR_RVERIFY) ret |= SC_PKCS15_PRKEY_USAGE_VERIFYRECOVER; if (flags & OBERTHUR_ATTR_DERIVE) ret |= SC_PKCS15_PRKEY_USAGE_DERIVE; return ret; } static int sc_oberthur_get_friends (unsigned int id, struct crypto_container *ccont) { struct container *cont; for (cont = Containers; cont; cont = cont->next) { if (cont->exchange.id_pub == id || cont->exchange.id_prv == id || cont->exchange.id_cert == id) { if (ccont) memcpy(ccont, &cont->exchange, sizeof(struct crypto_container)); break; } if (cont->sign.id_pub == id || cont->sign.id_prv == id || cont->sign.id_cert == id) { if (ccont) memcpy(ccont, &cont->sign, sizeof(struct crypto_container)); break; } } return cont ? 0 : SC_ERROR_TEMPLATE_NOT_FOUND; } static int sc_oberthur_get_certificate_authority(struct sc_pkcs15_der *der, int *out_authority) { #ifdef ENABLE_OPENSSL X509 *x; BUF_MEM buf_mem; BIO *bio = NULL; BASIC_CONSTRAINTS *bs = NULL; if (!der) return SC_ERROR_INVALID_ARGUMENTS; buf_mem.data = malloc(der->len); if (!buf_mem.data) return SC_ERROR_OUT_OF_MEMORY; memcpy(buf_mem.data, der->value, der->len); buf_mem.max = buf_mem.length = der->len; bio = BIO_new(BIO_s_mem()); if (!bio) { free(buf_mem.data); return SC_ERROR_OUT_OF_MEMORY; } BIO_set_mem_buf(bio, &buf_mem, BIO_NOCLOSE); x = d2i_X509_bio(bio, 0); BIO_free(bio); if (!x) return SC_ERROR_INVALID_DATA; bs = (BASIC_CONSTRAINTS *)X509_get_ext_d2i(x, NID_basic_constraints, NULL, NULL); if (out_authority) *out_authority = (bs && bs->ca); X509_free(x); return SC_SUCCESS; #else return SC_ERROR_NOT_SUPPORTED; #endif } static int sc_oberthur_read_file(struct sc_pkcs15_card *p15card, const char *in_path, unsigned char **out, size_t *out_len, int verify_pin) { struct sc_context *ctx = p15card->card->ctx; struct sc_card *card = p15card->card; struct sc_file *file = NULL; struct sc_path path; size_t sz; int rv; LOG_FUNC_CALLED(ctx); if (!in_path || !out || !out_len) LOG_TEST_RET(ctx, SC_ERROR_INVALID_ARGUMENTS, "Cannot read oberthur file"); sc_log(ctx, "read file '%s'; verify_pin:%i", in_path, verify_pin); *out = NULL; *out_len = 0; sc_format_path(in_path, &path); rv = sc_select_file(card, &path, &file); if (rv != SC_SUCCESS) { sc_file_free(file); LOG_TEST_RET(ctx, rv, "Cannot select oberthur file to read"); } if (file->ef_structure == SC_FILE_EF_TRANSPARENT) sz = file->size; else sz = (file->record_length + 2) * file->record_count; *out = calloc(sz, 1); if (*out == NULL) { sc_file_free(file); LOG_TEST_RET(ctx, SC_ERROR_OUT_OF_MEMORY, "Cannot read oberthur file"); } if (file->ef_structure == SC_FILE_EF_TRANSPARENT) { rv = sc_read_binary(card, 0, *out, sz, 0); } else { int rec; int offs = 0; int rec_len = file->record_length; for (rec = 1; ; rec++) { rv = sc_read_record(card, rec, *out + offs + 2, rec_len, SC_RECORD_BY_REC_NR); if (rv == SC_ERROR_RECORD_NOT_FOUND) { rv = 0; break; } else if (rv < 0) { break; } rec_len = rv; *(*out + offs) = 'R'; *(*out + offs + 1) = rv; offs += rv + 2; } sz = offs; } sc_log(ctx, "read oberthur file result %i", rv); if (verify_pin && rv == SC_ERROR_SECURITY_STATUS_NOT_SATISFIED) { struct sc_pkcs15_object *objs[0x10], *pin_obj = NULL; const struct sc_acl_entry *acl = sc_file_get_acl_entry(file, SC_AC_OP_READ); int ii; rv = sc_pkcs15_get_objects(p15card, SC_PKCS15_TYPE_AUTH_PIN, objs, 0x10); if (rv != SC_SUCCESS) { sc_file_free(file); LOG_TEST_RET(ctx, rv, "Cannot read oberthur file: get AUTH objects error"); } for (ii=0; ii<rv; ii++) { struct sc_pkcs15_auth_info *auth_info = (struct sc_pkcs15_auth_info *) objs[ii]->data; sc_log(ctx, "compare PIN/ACL refs:%i/%i, method:%i/%i", auth_info->attrs.pin.reference, acl->key_ref, auth_info->auth_method, acl->method); if (auth_info->attrs.pin.reference == (int)acl->key_ref && auth_info->auth_method == (unsigned)acl->method) { pin_obj = objs[ii]; break; } } if (!pin_obj || !pin_obj->content.value) { rv = SC_ERROR_SECURITY_STATUS_NOT_SATISFIED; } else { rv = sc_pkcs15_verify_pin(p15card, pin_obj, pin_obj->content.value, pin_obj->content.len); if (!rv) rv = sc_oberthur_read_file(p15card, in_path, out, out_len, 0); } }; sc_file_free(file); if (rv < 0) { free(*out); *out = NULL; *out_len = 0; } *out_len = sz; LOG_FUNC_RETURN(ctx, rv); } static int sc_oberthur_parse_tokeninfo (struct sc_pkcs15_card *p15card, unsigned char *buff, size_t len, int postpone_allowed) { struct sc_context *ctx = p15card->card->ctx; char label[0x21]; unsigned flags; int ii; LOG_FUNC_CALLED(ctx); if (!buff || len < 0x24) LOG_TEST_RET(ctx, SC_ERROR_INVALID_ARGUMENTS, "Cannot parse token info"); memset(label, 0, sizeof(label)); memcpy(label, buff, 0x20); ii = 0x20; while (*(label + --ii)==' ' && ii) ; *(label + ii + 1) = '\0'; flags = *(buff + 0x22) * 0x100 + *(buff + 0x23); set_string(&p15card->tokeninfo->label, label); set_string(&p15card->tokeninfo->manufacturer_id, "Oberthur/OpenSC"); if (flags & 0x01) p15card->tokeninfo->flags |= SC_PKCS15_TOKEN_PRN_GENERATION; sc_log(ctx, "label %s", p15card->tokeninfo->label); sc_log(ctx, "manufacturer_id %s", p15card->tokeninfo->manufacturer_id); LOG_FUNC_RETURN(ctx, SC_SUCCESS); } static int sc_oberthur_parse_containers (struct sc_pkcs15_card *p15card, unsigned char *buff, size_t len, int postpone_allowed) { struct sc_context *ctx = p15card->card->ctx; size_t offs; LOG_FUNC_CALLED(ctx); while (Containers) { struct container *next = Containers->next; free (Containers); Containers = next; } for (offs=0; offs < len;) { struct container *cont; unsigned char *ptr = buff + offs + 2; sc_log(ctx, "parse contaniers offs:%"SC_FORMAT_LEN_SIZE_T"u, len:%"SC_FORMAT_LEN_SIZE_T"u", offs, len); if (*(buff + offs) != 'R') return SC_ERROR_INVALID_DATA; cont = (struct container *)calloc(sizeof(struct container), 1); if (!cont) return SC_ERROR_OUT_OF_MEMORY; cont->exchange.id_pub = *ptr * 0x100 + *(ptr + 1); ptr += 2; cont->exchange.id_prv = *ptr * 0x100 + *(ptr + 1); ptr += 2; cont->exchange.id_cert = *ptr * 0x100 + *(ptr + 1); ptr += 2; cont->sign.id_pub = *ptr * 0x100 + *(ptr + 1); ptr += 2; cont->sign.id_prv = *ptr * 0x100 + *(ptr + 1); ptr += 2; cont->sign.id_cert = *ptr * 0x100 + *(ptr + 1); ptr += 2; memcpy(cont->uuid, ptr + 2, 36); sc_log(ctx, "UUID: %s; 0x%X, 0x%X, 0x%X", cont->uuid, cont->exchange.id_pub, cont->exchange.id_prv, cont->exchange.id_cert); if (!Containers) { Containers = cont; } else { cont->next = Containers; Containers->prev = (void *)cont; Containers = cont; } offs += *(buff + offs + 1) + 2; } LOG_FUNC_RETURN(ctx, SC_SUCCESS); } static int sc_oberthur_parse_publicinfo (struct sc_pkcs15_card *p15card, unsigned char *buff, size_t len, int postpone_allowed) { struct sc_context *ctx = p15card->card->ctx; size_t ii; int rv; LOG_FUNC_CALLED(ctx); for (ii=0; ii<len; ii+=5) { unsigned int file_id, size; if(*(buff+ii) != 0xFF) continue; file_id = 0x100 * *(buff+ii + 1) + *(buff+ii + 2); size = 0x100 * *(buff+ii + 3) + *(buff+ii + 4); sc_log(ctx, "add public object(file-id:%04X,size:%X)", file_id, size); switch (*(buff+ii + 1)) { case BASE_ID_PUB_RSA : rv = sc_pkcs15emu_oberthur_add_pubkey(p15card, file_id, size); LOG_TEST_RET(ctx, rv, "Cannot parse public key info"); break; case BASE_ID_CERT : rv = sc_pkcs15emu_oberthur_add_cert(p15card, file_id); LOG_TEST_RET(ctx, rv, "Cannot parse certificate info"); break; case BASE_ID_PUB_DES : break; case BASE_ID_PUB_DATA : rv = sc_pkcs15emu_oberthur_add_data(p15card, file_id, size, 0); LOG_TEST_RET(ctx, rv, "Cannot parse data info"); break; default: LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Public object parse error"); } } LOG_FUNC_RETURN(ctx, SC_SUCCESS); } static int sc_oberthur_parse_privateinfo (struct sc_pkcs15_card *p15card, unsigned char *buff, size_t len, int postpone_allowed) { struct sc_context *ctx = p15card->card->ctx; size_t ii; int rv; int no_more_private_keys = 0, no_more_private_data = 0; LOG_FUNC_CALLED(ctx); for (ii=0; ii<len; ii+=5) { unsigned int file_id, size; if(*(buff+ii) != 0xFF) continue; file_id = 0x100 * *(buff+ii + 1) + *(buff+ii + 2); size = 0x100 * *(buff+ii + 3) + *(buff+ii + 4); sc_log(ctx, "add private object (file-id:%04X, size:%X)", file_id, size); switch (*(buff+ii + 1)) { case BASE_ID_PRV_RSA : if (no_more_private_keys) break; rv = sc_pkcs15emu_oberthur_add_prvkey(p15card, file_id, size); if (rv == SC_ERROR_SECURITY_STATUS_NOT_SATISFIED && postpone_allowed) { struct sc_path path; sc_log(ctx, "postpone adding of the private keys"); sc_format_path("5011A5A5", &path); rv = sc_pkcs15_add_df(p15card, SC_PKCS15_PRKDF, &path); LOG_TEST_RET(ctx, rv, "Add PrkDF error"); no_more_private_keys = 1; } LOG_TEST_RET(ctx, rv, "Cannot parse private key info"); break; case BASE_ID_PRV_DES : break; case BASE_ID_PRV_DATA : sc_log(ctx, "*(buff+ii + 1):%X", *(buff+ii + 1)); if (no_more_private_data) break; rv = sc_pkcs15emu_oberthur_add_data(p15card, file_id, size, 1); if (rv == SC_ERROR_SECURITY_STATUS_NOT_SATISFIED && postpone_allowed) { struct sc_path path; sc_log(ctx, "postpone adding of the private data"); sc_format_path("5011A6A6", &path); rv = sc_pkcs15_add_df(p15card, SC_PKCS15_DODF, &path); LOG_TEST_RET(ctx, rv, "Add DODF error"); no_more_private_data = 1; } LOG_TEST_RET(ctx, rv, "Cannot parse private data info"); break; default: LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Private object parse error"); } } LOG_FUNC_RETURN(ctx, SC_SUCCESS); } /* Public key info: * flags:2, * CN(len:2,value:<variable length>), * ID(len:2,value:(SHA1 value)), * StartDate(Ascii:8) * EndDate(Ascii:8) * ??(0x00:2) */ static int sc_pkcs15emu_oberthur_add_pubkey(struct sc_pkcs15_card *p15card, unsigned int file_id, unsigned int size) { struct sc_context *ctx = p15card->card->ctx; struct sc_pkcs15_pubkey_info key_info; struct sc_pkcs15_object key_obj; char ch_tmp[0x100]; unsigned char *info_blob; size_t len, info_len, offs; unsigned flags; int rv; LOG_FUNC_CALLED(ctx); sc_log(ctx, "public key(file-id:%04X,size:%X)", file_id, size); memset(&key_info, 0, sizeof(key_info)); memset(&key_obj, 0, sizeof(key_obj)); snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", AWP_OBJECTS_DF_PUB, file_id | 0x100); rv = sc_oberthur_read_file(p15card, ch_tmp, &info_blob, &info_len, 1); LOG_TEST_RET(ctx, rv, "Failed to add public key: read oberthur file error"); /* Flags */ offs = 2; if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add public key: no 'tag'"); flags = *(info_blob + 0) * 0x100 + *(info_blob + 1); key_info.usage = sc_oberthur_decode_usage(flags); if (flags & OBERTHUR_ATTR_MODIFIABLE) key_obj.flags = SC_PKCS15_CO_FLAG_MODIFIABLE; sc_log(ctx, "Public key key-usage:%04X", key_info.usage); /* Label */ if (offs + 2 > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add public key: no 'Label'"); len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (len) { if (len > sizeof(key_obj.label) - 1) len = sizeof(key_obj.label) - 1; memcpy(key_obj.label, info_blob + offs + 2, len); } offs += 2 + len; /* ID */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add public key: no 'ID'"); len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (!len || len > sizeof(key_info.id.value)) LOG_TEST_RET(ctx, SC_ERROR_INVALID_DATA, "Failed to add public key: invalid 'ID' length"); memcpy(key_info.id.value, info_blob + offs + 2, len); key_info.id.len = len; /* Ignore Start/End dates */ snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", AWP_OBJECTS_DF_PUB, file_id); sc_format_path(ch_tmp, &key_info.path); key_info.native = 1; key_info.key_reference = file_id & 0xFF; key_info.modulus_length = size; rv = sc_pkcs15emu_add_rsa_pubkey(p15card, &key_obj, &key_info); LOG_FUNC_RETURN(ctx, rv); } /* Certificate info: * flags:2, * Label(len:2,value:), * ID(len:2,value:(SHA1 value)), * Subject in ASN.1(len:2,value:) * Issuer in ASN.1(len:2,value:) * Serial encoded in LV or ASN.1 FIXME */ static int sc_pkcs15emu_oberthur_add_cert(struct sc_pkcs15_card *p15card, unsigned int file_id) { struct sc_context *ctx = p15card->card->ctx; struct sc_pkcs15_cert_info cinfo; struct sc_pkcs15_object cobj; unsigned char *info_blob, *cert_blob; size_t info_len, cert_len, len, offs; unsigned flags; int rv; char ch_tmp[0x20]; LOG_FUNC_CALLED(ctx); sc_log(ctx, "add certificate(file-id:%04X)", file_id); memset(&cinfo, 0, sizeof(cinfo)); memset(&cobj, 0, sizeof(cobj)); snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", AWP_OBJECTS_DF_PUB, file_id | 0x100); rv = sc_oberthur_read_file(p15card, ch_tmp, &info_blob, &info_len, 1); LOG_TEST_RET(ctx, rv, "Failed to add certificate: read oberthur file error"); if (info_len < 2) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add certificate: no 'tag'"); flags = *(info_blob + 0) * 0x100 + *(info_blob + 1); offs = 2; /* Label */ if (offs + 2 > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add certificate: no 'CN'"); len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (len) { if (len > sizeof(cobj.label) - 1) len = sizeof(cobj.label) - 1; memcpy(cobj.label, info_blob + offs + 2, len); } offs += 2 + len; /* ID */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add certificate: no 'ID'"); len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (len > sizeof(cinfo.id.value)) LOG_TEST_RET(ctx, SC_ERROR_INVALID_DATA, "Failed to add certificate: invalid 'ID' length"); memcpy(cinfo.id.value, info_blob + offs + 2, len); cinfo.id.len = len; /* Ignore subject, issuer and serial */ snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", AWP_OBJECTS_DF_PUB, file_id); sc_format_path(ch_tmp, &cinfo.path); rv = sc_oberthur_read_file(p15card, ch_tmp, &cert_blob, &cert_len, 1); LOG_TEST_RET(ctx, rv, "Failed to add certificate: read certificate error"); cinfo.value.value = cert_blob; cinfo.value.len = cert_len; rv = sc_oberthur_get_certificate_authority(&cinfo.value, &cinfo.authority); LOG_TEST_RET(ctx, rv, "Failed to add certificate: get certificate attributes error"); if (flags & OBERTHUR_ATTR_MODIFIABLE) cobj.flags |= SC_PKCS15_CO_FLAG_MODIFIABLE; rv = sc_pkcs15emu_add_x509_cert(p15card, &cobj, &cinfo); LOG_FUNC_RETURN(p15card->card->ctx, rv); } /* Private key info: * flags:2, * CN(len:2,value:), * ID(len:2,value:(SHA1 value)), * StartDate(Ascii:8) * EndDate(Ascii:8) * Subject in ASN.1(len:2,value:) * modulus(value:) * exponent(length:1, value:3) */ static int sc_pkcs15emu_oberthur_add_prvkey(struct sc_pkcs15_card *p15card, unsigned int file_id, unsigned int size) { struct sc_context *ctx = p15card->card->ctx; struct sc_pkcs15_prkey_info kinfo; struct sc_pkcs15_object kobj; struct crypto_container ccont; unsigned char *info_blob = NULL; size_t info_len = 0; unsigned flags; size_t offs, len; char ch_tmp[0x100]; int rv; LOG_FUNC_CALLED(ctx); sc_log(ctx, "add private key(file-id:%04X,size:%04X)", file_id, size); memset(&kinfo, 0, sizeof(kinfo)); memset(&kobj, 0, sizeof(kobj)); memset(&ccont, 0, sizeof(ccont)); rv = sc_oberthur_get_friends (file_id, &ccont); LOG_TEST_RET(ctx, rv, "Failed to add private key: get friends error"); if (ccont.id_cert) { struct sc_pkcs15_object *objs[32]; int ii; sc_log(ctx, "friend certificate %04X", ccont.id_cert); rv = sc_pkcs15_get_objects(p15card, SC_PKCS15_TYPE_CERT_X509, objs, 32); LOG_TEST_RET(ctx, rv, "Failed to add private key: get certificates error"); for (ii=0; ii<rv; ii++) { struct sc_pkcs15_cert_info *cert = (struct sc_pkcs15_cert_info *)objs[ii]->data; struct sc_path path = cert->path; unsigned int id = path.value[path.len - 2] * 0x100 + path.value[path.len - 1]; if (id == ccont.id_cert) { strlcpy(kobj.label, objs[ii]->label, sizeof(kobj.label)); break; } } if (ii == rv) LOG_TEST_RET(ctx, SC_ERROR_INCONSISTENT_PROFILE, "Failed to add private key: friend not found"); } snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", AWP_OBJECTS_DF_PRV, file_id | 0x100); rv = sc_oberthur_read_file(p15card, ch_tmp, &info_blob, &info_len, 1); LOG_TEST_RET(ctx, rv, "Failed to add private key: read oberthur file error"); if (info_len < 2) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add private key: no 'tag'"); flags = *(info_blob + 0) * 0x100 + *(info_blob + 1); offs = 2; /* CN */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add private key: no 'CN'"); len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (len && !strlen(kobj.label)) { if (len > sizeof(kobj.label) - 1) len = sizeof(kobj.label) - 1; strncpy(kobj.label, (char *)(info_blob + offs + 2), len); } offs += 2 + len; /* ID */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add private key: no 'ID'"); len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (!len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add private key: zero length ID"); else if (len > sizeof(kinfo.id.value)) LOG_TEST_RET(ctx, SC_ERROR_INVALID_DATA, "Failed to add private key: invalid ID length"); memcpy(kinfo.id.value, info_blob + offs + 2, len); kinfo.id.len = len; offs += 2 + len; /* Ignore Start/End dates */ offs += 16; /* Subject encoded in ASN1 */ if (offs > info_len) return SC_ERROR_UNKNOWN_DATA_RECEIVED; len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (len) { kinfo.subject.value = malloc(len); if (!kinfo.subject.value) LOG_TEST_RET(ctx, SC_ERROR_OUT_OF_MEMORY, "Failed to add private key: memory allocation error"); kinfo.subject.len = len; memcpy(kinfo.subject.value, info_blob + offs + 2, len); } /* Modulus and exponent are ignored */ snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", AWP_OBJECTS_DF_PRV, file_id); sc_format_path(ch_tmp, &kinfo.path); sc_log(ctx, "Private key info path %s", ch_tmp); kinfo.modulus_length = size; kinfo.native = 1; kinfo.key_reference = file_id & 0xFF; kinfo.usage = sc_oberthur_decode_usage(flags); kobj.flags = SC_PKCS15_CO_FLAG_PRIVATE; if (flags & OBERTHUR_ATTR_MODIFIABLE) kobj.flags |= SC_PKCS15_CO_FLAG_MODIFIABLE; kobj.auth_id.len = sizeof(PinDomainID) > sizeof(kobj.auth_id.value) ? sizeof(kobj.auth_id.value) : sizeof(PinDomainID); memcpy(kobj.auth_id.value, PinDomainID, kobj.auth_id.len); sc_log(ctx, "Parsed private key(reference:%i,usage:%X,flags:%X)", kinfo.key_reference, kinfo.usage, kobj.flags); rv = sc_pkcs15emu_add_rsa_prkey(p15card, &kobj, &kinfo); LOG_FUNC_RETURN(ctx, rv); } static int sc_pkcs15emu_oberthur_add_data(struct sc_pkcs15_card *p15card, unsigned int file_id, unsigned int size, int private) { struct sc_context *ctx = p15card->card->ctx; struct sc_pkcs15_data_info dinfo; struct sc_pkcs15_object dobj; unsigned flags; unsigned char *info_blob = NULL, *label = NULL, *app = NULL, *oid = NULL; size_t info_len, label_len, app_len, oid_len, offs; char ch_tmp[0x100]; int rv; SC_FUNC_CALLED(ctx, SC_LOG_DEBUG_VERBOSE); sc_log(ctx, "Add data(file-id:%04X,size:%i,is-private:%i)", file_id, size, private); memset(&dinfo, 0, sizeof(dinfo)); memset(&dobj, 0, sizeof(dobj)); snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", private ? AWP_OBJECTS_DF_PRV : AWP_OBJECTS_DF_PUB, file_id | 0x100); rv = sc_oberthur_read_file(p15card, ch_tmp, &info_blob, &info_len, 1); LOG_TEST_RET(ctx, rv, "Failed to add data: read oberthur file error"); if (info_len < 2) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add certificate: no 'tag'"); flags = *(info_blob + 0) * 0x100 + *(info_blob + 1); offs = 2; /* Label */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add data: no 'label'"); label = info_blob + offs + 2; label_len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (label_len > sizeof(dobj.label) - 1) label_len = sizeof(dobj.label) - 1; offs += 2 + *(info_blob + offs + 1); /* Application */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add data: no 'application'"); app = info_blob + offs + 2; app_len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (app_len > sizeof(dinfo.app_label) - 1) app_len = sizeof(dinfo.app_label) - 1; offs += 2 + app_len; /* OID encode like DER(ASN.1(oid)) */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add data: no 'OID'"); oid_len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (oid_len) { oid = info_blob + offs + 2; if (*oid != 0x06 || (*(oid + 1) != oid_len - 2)) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add data: invalid 'OID' format"); oid += 2; oid_len -= 2; } snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", private ? AWP_OBJECTS_DF_PRV : AWP_OBJECTS_DF_PUB, file_id); sc_format_path(ch_tmp, &dinfo.path); memcpy(dobj.label, label, label_len); memcpy(dinfo.app_label, app, app_len); if (oid_len) sc_asn1_decode_object_id(oid, oid_len, &dinfo.app_oid); if (flags & OBERTHUR_ATTR_MODIFIABLE) dobj.flags |= SC_PKCS15_CO_FLAG_MODIFIABLE; if (private) { dobj.auth_id.len = sizeof(PinDomainID) > sizeof(dobj.auth_id.value) ? sizeof(dobj.auth_id.value) : sizeof(PinDomainID); memcpy(dobj.auth_id.value, PinDomainID, dobj.auth_id.len); dobj.flags |= SC_PKCS15_CO_FLAG_PRIVATE; } rv = sc_pkcs15emu_add_data_object(p15card, &dobj, &dinfo); LOG_FUNC_RETURN(p15card->card->ctx, rv); } static int sc_pkcs15emu_oberthur_init(struct sc_pkcs15_card * p15card) { struct sc_context *ctx = p15card->card->ctx; struct sc_pkcs15_auth_info auth_info; struct sc_pkcs15_object obj; struct sc_card *card = p15card->card; struct sc_path path; int rv, ii, tries_left; char serial[0x10]; unsigned char sopin_reference = 0x04; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); sc_bin_to_hex(card->serialnr.value, card->serialnr.len, serial, sizeof(serial), 0); set_string(&p15card->tokeninfo->serial_number, serial); p15card->ops.parse_df = sc_awp_parse_df; p15card->ops.clear = sc_awp_clear; sc_log(ctx, "Oberthur init: serial %s", p15card->tokeninfo->serial_number); sc_format_path(AWP_PIN_DF, &path); rv = sc_select_file(card, &path, NULL); LOG_TEST_RET(ctx, rv, "Oberthur init failed: cannot select PIN dir"); tries_left = -1; rv = sc_verify(card, SC_AC_CHV, sopin_reference, (unsigned char *)"", 0, &tries_left); if (rv && rv != SC_ERROR_PIN_CODE_INCORRECT) { sopin_reference = 0x84; rv = sc_verify(card, SC_AC_CHV, sopin_reference, (unsigned char *)"", 0, &tries_left); } if (rv && rv != SC_ERROR_PIN_CODE_INCORRECT) LOG_TEST_RET(ctx, rv, "Invalid state of SO-PIN"); /* add PIN */ memset(&auth_info, 0, sizeof(auth_info)); memset(&obj, 0, sizeof(obj)); auth_info.auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; auth_info.auth_method = SC_AC_CHV; auth_info.auth_id.len = 1; auth_info.auth_id.value[0] = 0xFF; auth_info.attrs.pin.min_length = 4; auth_info.attrs.pin.max_length = 64; auth_info.attrs.pin.stored_length = 64; auth_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_ASCII_NUMERIC; auth_info.attrs.pin.reference = sopin_reference; auth_info.attrs.pin.pad_char = 0xFF; auth_info.attrs.pin.flags = SC_PKCS15_PIN_FLAG_CASE_SENSITIVE | SC_PKCS15_PIN_FLAG_INITIALIZED | SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_SO_PIN; auth_info.tries_left = tries_left; auth_info.logged_in = SC_PIN_STATE_UNKNOWN; strncpy(obj.label, "SO PIN", SC_PKCS15_MAX_LABEL_SIZE-1); obj.flags = SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE; sc_log(ctx, "Add PIN(%s,auth_id:%s,reference:%i)", obj.label, sc_pkcs15_print_id(&auth_info.auth_id), auth_info.attrs.pin.reference); rv = sc_pkcs15emu_add_pin_obj(p15card, &obj, &auth_info); LOG_TEST_RET(ctx, rv, "Oberthur init failed: cannot add PIN object"); tries_left = -1; rv = sc_verify(card, SC_AC_CHV, 0x81, (unsigned char *)"", 0, &tries_left); if (rv == SC_ERROR_PIN_CODE_INCORRECT) { /* add PIN */ memset(&auth_info, 0, sizeof(auth_info)); memset(&obj, 0, sizeof(obj)); auth_info.auth_id.len = sizeof(PinDomainID) > sizeof(auth_info.auth_id.value) ? sizeof(auth_info.auth_id.value) : sizeof(PinDomainID); memcpy(auth_info.auth_id.value, PinDomainID, auth_info.auth_id.len); auth_info.auth_method = SC_AC_CHV; auth_info.attrs.pin.min_length = 4; auth_info.attrs.pin.max_length = 64; auth_info.attrs.pin.stored_length = 64; auth_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_ASCII_NUMERIC; auth_info.attrs.pin.reference = 0x81; auth_info.attrs.pin.pad_char = 0xFF; auth_info.attrs.pin.flags = SC_PKCS15_PIN_FLAG_CASE_SENSITIVE | SC_PKCS15_PIN_FLAG_INITIALIZED | SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_LOCAL; auth_info.tries_left = tries_left; strncpy(obj.label, PIN_DOMAIN_LABEL, SC_PKCS15_MAX_LABEL_SIZE-1); obj.flags = SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE; if (sopin_reference == 0x84) { /* * auth_pin_reset_oberthur_style() in card-oberthur.c * always uses PUK with reference 0x84 for * unblocking of User PIN */ obj.auth_id.len = 1; obj.auth_id.value[0] = 0xFF; } sc_format_path(AWP_PIN_DF, &auth_info.path); auth_info.path.type = SC_PATH_TYPE_PATH; sc_log(ctx, "Add PIN(%s,auth_id:%s,reference:%i)", obj.label, sc_pkcs15_print_id(&auth_info.auth_id), auth_info.attrs.pin.reference); rv = sc_pkcs15emu_add_pin_obj(p15card, &obj, &auth_info); LOG_TEST_RET(ctx, rv, "Oberthur init failed: cannot add PIN object"); } else if (rv != SC_ERROR_DATA_OBJECT_NOT_FOUND) { LOG_TEST_RET(ctx, rv, "Oberthur init failed: cannot verify PIN"); } for (ii=0; oberthur_infos[ii].name; ii++) { sc_log(ctx, "Oberthur init: read %s file", oberthur_infos[ii].name); rv = sc_oberthur_read_file(p15card, oberthur_infos[ii].path, &oberthur_infos[ii].content, &oberthur_infos[ii].len, 1); LOG_TEST_RET(ctx, rv, "Oberthur init failed: read oberthur file error"); sc_log(ctx, "Oberthur init: parse %s file, content length %"SC_FORMAT_LEN_SIZE_T"u", oberthur_infos[ii].name, oberthur_infos[ii].len); rv = oberthur_infos[ii].parser(p15card, oberthur_infos[ii].content, oberthur_infos[ii].len, oberthur_infos[ii].postpone_allowed); LOG_TEST_RET(ctx, rv, "Oberthur init failed: parse error"); } LOG_FUNC_RETURN(ctx, SC_SUCCESS); } static int oberthur_detect_card(struct sc_pkcs15_card * p15card) { struct sc_card *card = p15card->card; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); if (p15card->card->type != SC_CARD_TYPE_OBERTHUR_64K) LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_WRONG_CARD); LOG_FUNC_RETURN(p15card->card->ctx, SC_SUCCESS); } int sc_pkcs15emu_oberthur_init_ex(struct sc_pkcs15_card * p15card, struct sc_aid *aid) { int rv; LOG_FUNC_CALLED(p15card->card->ctx); rv = oberthur_detect_card(p15card); if (!rv) rv = sc_pkcs15emu_oberthur_init(p15card); LOG_FUNC_RETURN(p15card->card->ctx, rv); } static int sc_awp_parse_df(struct sc_pkcs15_card *p15card, struct sc_pkcs15_df *df) { struct sc_context *ctx = p15card->card->ctx; unsigned char *buf = NULL; size_t buf_len; int rv; LOG_FUNC_CALLED(ctx); if (df->type != SC_PKCS15_PRKDF && df->type != SC_PKCS15_DODF) LOG_FUNC_RETURN(ctx, SC_ERROR_NOT_SUPPORTED); if (df->enumerated) LOG_FUNC_RETURN(ctx, SC_SUCCESS); rv = sc_oberthur_read_file(p15card, AWP_OBJECTS_LIST_PRV, &buf, &buf_len, 1); LOG_TEST_RET(ctx, rv, "Parse DF: read private objects info failed"); rv = sc_oberthur_parse_privateinfo(p15card, buf, buf_len, 0); if (buf) free(buf); if (rv == SC_ERROR_SECURITY_STATUS_NOT_SATISFIED) LOG_FUNC_RETURN(ctx, SC_SUCCESS); LOG_TEST_RET(ctx, rv, "Parse DF: private info parse error"); df->enumerated = 1; LOG_FUNC_RETURN(ctx, rv); } static void sc_awp_clear(struct sc_pkcs15_card *p15card) { LOG_FUNC_CALLED(p15card->card->ctx); }
null
/* * PKCS15 emulation layer for Oberthur card. * * Copyright (C) 2010, Viktor Tarasov <vtarasov@opentrust.com> * Copyright (C) 2005, Andrea Frigido <andrea@frisoft.it> * Copyright (C) 2005, Sirio Capizzi <graaf@virgilio.it> * Copyright (C) 2004, Antonino Iacono <ant_iacono@tin.it> * Copyright (C) 2003, Olaf Kirch <okir@suse.de> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <stdlib.h> #include <string.h> #include <stdio.h> #include "../common/compat_strlcpy.h" #include "pkcs15.h" #include "log.h" #include "asn1.h" #include "internal.h" #ifdef ENABLE_OPENSSL #include <openssl/bio.h> #include <openssl/crypto.h> #include <openssl/x509.h> #include <openssl/x509v3.h> #endif #define OBERTHUR_ATTR_MODIFIABLE 0x0001 #define OBERTHUR_ATTR_TRUSTED 0x0002 #define OBERTHUR_ATTR_LOCAL 0x0004 #define OBERTHUR_ATTR_ENCRYPT 0x0008 #define OBERTHUR_ATTR_DECRYPT 0x0010 #define OBERTHUR_ATTR_SIGN 0x0020 #define OBERTHUR_ATTR_VERIFY 0x0040 #define OBERTHUR_ATTR_RSIGN 0x0080 #define OBERTHUR_ATTR_RVERIFY 0x0100 #define OBERTHUR_ATTR_WRAP 0x0200 #define OBERTHUR_ATTR_UNWRAP 0x0400 #define OBERTHUR_ATTR_DERIVE 0x0800 #define USAGE_PRV_ENC (SC_PKCS15_PRKEY_USAGE_ENCRYPT | SC_PKCS15_PRKEY_USAGE_DECRYPT |\ SC_PKCS15_PRKEY_USAGE_WRAP | SC_PKCS15_PRKEY_USAGE_UNWRAP) #define USAGE_PRV_AUT SC_PKCS15_PRKEY_USAGE_SIGN #define USAGE_PRV_SIGN (SC_PKCS15_PRKEY_USAGE_SIGN | SC_PKCS15_PRKEY_USAGE_NONREPUDIATION) #define USAGE_PUB_ENC (SC_PKCS15_PRKEY_USAGE_ENCRYPT | SC_PKCS15_PRKEY_USAGE_WRAP) #define USAGE_PUB_AUT SC_PKCS15_PRKEY_USAGE_VERIFY #define USAGE_PUB_SIGN (SC_PKCS15_PRKEY_USAGE_VERIFY | SC_PKCS15_PRKEY_USAGE_VERIFYRECOVER) #define PIN_DOMAIN_LABEL "SCM" const unsigned char PinDomainID[3] = {0x53, 0x43, 0x4D}; #define AWP_PIN_DF "3F005011" #define AWP_TOKEN_INFO "3F0050111000" #define AWP_PUK_FILE "3F0050112000" #define AWP_CONTAINERS_MS "3F0050113000" #define AWP_OBJECTS_LIST_PUB "3F0050114000" #define AWP_OBJECTS_LIST_PRV "3F0050115000" #define AWP_OBJECTS_DF_PUB "3F0050119001" #define AWP_OBJECTS_DF_PRV "3F0050119002" #define AWP_BASE_RSA_PRV "3F00501190023000" #define AWP_BASE_RSA_PUB "3F00501190011000" #define AWP_BASE_CERTIFICATE "3F00501190012000" #define BASE_ID_PUB_RSA 0x10 #define BASE_ID_CERT 0x20 #define BASE_ID_PRV_RSA 0x30 #define BASE_ID_PRV_DES 0x40 #define BASE_ID_PUB_DATA 0x50 #define BASE_ID_PRV_DATA 0x60 #define BASE_ID_PUB_DES 0x70 static int sc_pkcs15emu_oberthur_add_prvkey(struct sc_pkcs15_card *, unsigned, unsigned); static int sc_pkcs15emu_oberthur_add_pubkey(struct sc_pkcs15_card *, unsigned, unsigned); static int sc_pkcs15emu_oberthur_add_cert(struct sc_pkcs15_card *, unsigned); static int sc_pkcs15emu_oberthur_add_data(struct sc_pkcs15_card *, unsigned, unsigned, int); static int sc_oberthur_parse_tokeninfo (struct sc_pkcs15_card *, unsigned char *, size_t, int); static int sc_oberthur_parse_containers (struct sc_pkcs15_card *, unsigned char *, size_t, int); static int sc_oberthur_parse_publicinfo (struct sc_pkcs15_card *, unsigned char *, size_t, int); static int sc_oberthur_parse_privateinfo (struct sc_pkcs15_card *, unsigned char *, size_t, int); static int sc_awp_parse_df(struct sc_pkcs15_card *, struct sc_pkcs15_df *); static void sc_awp_clear(struct sc_pkcs15_card *); struct crypto_container { unsigned id_pub; unsigned id_prv; unsigned id_cert; }; struct container { char uuid[37]; struct crypto_container exchange; struct crypto_container sign; struct container *next; struct container *prev; }; struct container *Containers = NULL; static struct { const char *name; const char *path; unsigned char *content; size_t len; int (*parser)(struct sc_pkcs15_card *, unsigned char *, size_t, int); int postpone_allowed; } oberthur_infos[] = { /* Never change the following order */ { "Token info", AWP_TOKEN_INFO, NULL, 0, sc_oberthur_parse_tokeninfo, 0}, { "Containers MS", AWP_CONTAINERS_MS, NULL, 0, sc_oberthur_parse_containers, 0}, { "Public objects list", AWP_OBJECTS_LIST_PUB, NULL, 0, sc_oberthur_parse_publicinfo, 0}, { "Private objects list", AWP_OBJECTS_LIST_PRV, NULL, 0, sc_oberthur_parse_privateinfo, 1}, { NULL, NULL, NULL, 0, NULL, 0} }; static unsigned sc_oberthur_decode_usage(unsigned flags) { unsigned ret = 0; if (flags & OBERTHUR_ATTR_ENCRYPT) ret |= SC_PKCS15_PRKEY_USAGE_ENCRYPT; if (flags & OBERTHUR_ATTR_DECRYPT) ret |= SC_PKCS15_PRKEY_USAGE_DECRYPT; if (flags & OBERTHUR_ATTR_SIGN) ret |= SC_PKCS15_PRKEY_USAGE_SIGN; if (flags & OBERTHUR_ATTR_RSIGN) ret |= SC_PKCS15_PRKEY_USAGE_SIGNRECOVER; if (flags & OBERTHUR_ATTR_WRAP) ret |= SC_PKCS15_PRKEY_USAGE_WRAP; if (flags & OBERTHUR_ATTR_UNWRAP) ret |= SC_PKCS15_PRKEY_USAGE_UNWRAP; if (flags & OBERTHUR_ATTR_VERIFY) ret |= SC_PKCS15_PRKEY_USAGE_VERIFY; if (flags & OBERTHUR_ATTR_RVERIFY) ret |= SC_PKCS15_PRKEY_USAGE_VERIFYRECOVER; if (flags & OBERTHUR_ATTR_DERIVE) ret |= SC_PKCS15_PRKEY_USAGE_DERIVE; return ret; } static int sc_oberthur_get_friends (unsigned int id, struct crypto_container *ccont) { struct container *cont; for (cont = Containers; cont; cont = cont->next) { if (cont->exchange.id_pub == id || cont->exchange.id_prv == id || cont->exchange.id_cert == id) { if (ccont) memcpy(ccont, &cont->exchange, sizeof(struct crypto_container)); break; } if (cont->sign.id_pub == id || cont->sign.id_prv == id || cont->sign.id_cert == id) { if (ccont) memcpy(ccont, &cont->sign, sizeof(struct crypto_container)); break; } } return cont ? 0 : SC_ERROR_TEMPLATE_NOT_FOUND; } static int sc_oberthur_get_certificate_authority(struct sc_pkcs15_der *der, int *out_authority) { #ifdef ENABLE_OPENSSL X509 *x; BUF_MEM buf_mem; BIO *bio = NULL; BASIC_CONSTRAINTS *bs = NULL; if (!der) return SC_ERROR_INVALID_ARGUMENTS; buf_mem.data = malloc(der->len); if (!buf_mem.data) return SC_ERROR_OUT_OF_MEMORY; memcpy(buf_mem.data, der->value, der->len); buf_mem.max = buf_mem.length = der->len; bio = BIO_new(BIO_s_mem()); if (!bio) { free(buf_mem.data); return SC_ERROR_OUT_OF_MEMORY; } BIO_set_mem_buf(bio, &buf_mem, BIO_NOCLOSE); x = d2i_X509_bio(bio, 0); BIO_free(bio); if (!x) return SC_ERROR_INVALID_DATA; bs = (BASIC_CONSTRAINTS *)X509_get_ext_d2i(x, NID_basic_constraints, NULL, NULL); if (out_authority) *out_authority = (bs && bs->ca); X509_free(x); return SC_SUCCESS; #else return SC_ERROR_NOT_SUPPORTED; #endif } static int sc_oberthur_read_file(struct sc_pkcs15_card *p15card, const char *in_path, unsigned char **out, size_t *out_len, int verify_pin) { struct sc_context *ctx = p15card->card->ctx; struct sc_card *card = p15card->card; struct sc_file *file = NULL; struct sc_path path; size_t sz; int rv; LOG_FUNC_CALLED(ctx); if (!in_path || !out || !out_len) LOG_TEST_RET(ctx, SC_ERROR_INVALID_ARGUMENTS, "Cannot read oberthur file"); sc_log(ctx, "read file '%s'; verify_pin:%i", in_path, verify_pin); *out = NULL; *out_len = 0; sc_format_path(in_path, &path); rv = sc_select_file(card, &path, &file); if (rv != SC_SUCCESS) { sc_file_free(file); LOG_TEST_RET(ctx, rv, "Cannot select oberthur file to read"); } if (file->ef_structure == SC_FILE_EF_TRANSPARENT) sz = file->size; else sz = (file->record_length + 2) * file->record_count; *out = calloc(sz, 1); if (*out == NULL) { sc_file_free(file); LOG_TEST_RET(ctx, SC_ERROR_OUT_OF_MEMORY, "Cannot read oberthur file"); } if (file->ef_structure == SC_FILE_EF_TRANSPARENT) { rv = sc_read_binary(card, 0, *out, sz, 0); } else { size_t rec; size_t offs = 0; size_t rec_len = file->record_length; for (rec = 1; ; rec++) { if (rec > file->record_count) { rv = 0; break; } rv = sc_read_record(card, rec, *out + offs + 2, rec_len, SC_RECORD_BY_REC_NR); if (rv == SC_ERROR_RECORD_NOT_FOUND) { rv = 0; break; } else if (rv < 0) { break; } rec_len = rv; *(*out + offs) = 'R'; *(*out + offs + 1) = rv; offs += rv + 2; } sz = offs; } sc_log(ctx, "read oberthur file result %i", rv); if (verify_pin && rv == SC_ERROR_SECURITY_STATUS_NOT_SATISFIED) { struct sc_pkcs15_object *objs[0x10], *pin_obj = NULL; const struct sc_acl_entry *acl = sc_file_get_acl_entry(file, SC_AC_OP_READ); int ii; rv = sc_pkcs15_get_objects(p15card, SC_PKCS15_TYPE_AUTH_PIN, objs, 0x10); if (rv != SC_SUCCESS) { sc_file_free(file); LOG_TEST_RET(ctx, rv, "Cannot read oberthur file: get AUTH objects error"); } for (ii=0; ii<rv; ii++) { struct sc_pkcs15_auth_info *auth_info = (struct sc_pkcs15_auth_info *) objs[ii]->data; sc_log(ctx, "compare PIN/ACL refs:%i/%i, method:%i/%i", auth_info->attrs.pin.reference, acl->key_ref, auth_info->auth_method, acl->method); if (auth_info->attrs.pin.reference == (int)acl->key_ref && auth_info->auth_method == (unsigned)acl->method) { pin_obj = objs[ii]; break; } } if (!pin_obj || !pin_obj->content.value) { rv = SC_ERROR_SECURITY_STATUS_NOT_SATISFIED; } else { rv = sc_pkcs15_verify_pin(p15card, pin_obj, pin_obj->content.value, pin_obj->content.len); if (!rv) rv = sc_oberthur_read_file(p15card, in_path, out, out_len, 0); } }; sc_file_free(file); if (rv < 0) { free(*out); *out = NULL; *out_len = 0; } *out_len = sz; LOG_FUNC_RETURN(ctx, rv); } static int sc_oberthur_parse_tokeninfo (struct sc_pkcs15_card *p15card, unsigned char *buff, size_t len, int postpone_allowed) { struct sc_context *ctx = p15card->card->ctx; char label[0x21]; unsigned flags; int ii; LOG_FUNC_CALLED(ctx); if (!buff || len < 0x24) LOG_TEST_RET(ctx, SC_ERROR_INVALID_ARGUMENTS, "Cannot parse token info"); memset(label, 0, sizeof(label)); memcpy(label, buff, 0x20); ii = 0x20; while (*(label + --ii)==' ' && ii) ; *(label + ii + 1) = '\0'; flags = *(buff + 0x22) * 0x100 + *(buff + 0x23); set_string(&p15card->tokeninfo->label, label); set_string(&p15card->tokeninfo->manufacturer_id, "Oberthur/OpenSC"); if (flags & 0x01) p15card->tokeninfo->flags |= SC_PKCS15_TOKEN_PRN_GENERATION; sc_log(ctx, "label %s", p15card->tokeninfo->label); sc_log(ctx, "manufacturer_id %s", p15card->tokeninfo->manufacturer_id); LOG_FUNC_RETURN(ctx, SC_SUCCESS); } static int sc_oberthur_parse_containers (struct sc_pkcs15_card *p15card, unsigned char *buff, size_t len, int postpone_allowed) { struct sc_context *ctx = p15card->card->ctx; size_t offs; LOG_FUNC_CALLED(ctx); while (Containers) { struct container *next = Containers->next; free (Containers); Containers = next; } for (offs=0; offs < len;) { struct container *cont; unsigned char *ptr = buff + offs + 2; sc_log(ctx, "parse contaniers offs:%"SC_FORMAT_LEN_SIZE_T"u, len:%"SC_FORMAT_LEN_SIZE_T"u", offs, len); if (*(buff + offs) != 'R') return SC_ERROR_INVALID_DATA; cont = (struct container *)calloc(sizeof(struct container), 1); if (!cont) return SC_ERROR_OUT_OF_MEMORY; cont->exchange.id_pub = *ptr * 0x100 + *(ptr + 1); ptr += 2; cont->exchange.id_prv = *ptr * 0x100 + *(ptr + 1); ptr += 2; cont->exchange.id_cert = *ptr * 0x100 + *(ptr + 1); ptr += 2; cont->sign.id_pub = *ptr * 0x100 + *(ptr + 1); ptr += 2; cont->sign.id_prv = *ptr * 0x100 + *(ptr + 1); ptr += 2; cont->sign.id_cert = *ptr * 0x100 + *(ptr + 1); ptr += 2; memcpy(cont->uuid, ptr + 2, 36); sc_log(ctx, "UUID: %s; 0x%X, 0x%X, 0x%X", cont->uuid, cont->exchange.id_pub, cont->exchange.id_prv, cont->exchange.id_cert); if (!Containers) { Containers = cont; } else { cont->next = Containers; Containers->prev = (void *)cont; Containers = cont; } offs += *(buff + offs + 1) + 2; } LOG_FUNC_RETURN(ctx, SC_SUCCESS); } static int sc_oberthur_parse_publicinfo (struct sc_pkcs15_card *p15card, unsigned char *buff, size_t len, int postpone_allowed) { struct sc_context *ctx = p15card->card->ctx; size_t ii; int rv; LOG_FUNC_CALLED(ctx); for (ii=0; ii<len; ii+=5) { unsigned int file_id, size; if(*(buff+ii) != 0xFF) continue; file_id = 0x100 * *(buff+ii + 1) + *(buff+ii + 2); size = 0x100 * *(buff+ii + 3) + *(buff+ii + 4); sc_log(ctx, "add public object(file-id:%04X,size:%X)", file_id, size); switch (*(buff+ii + 1)) { case BASE_ID_PUB_RSA : rv = sc_pkcs15emu_oberthur_add_pubkey(p15card, file_id, size); LOG_TEST_RET(ctx, rv, "Cannot parse public key info"); break; case BASE_ID_CERT : rv = sc_pkcs15emu_oberthur_add_cert(p15card, file_id); LOG_TEST_RET(ctx, rv, "Cannot parse certificate info"); break; case BASE_ID_PUB_DES : break; case BASE_ID_PUB_DATA : rv = sc_pkcs15emu_oberthur_add_data(p15card, file_id, size, 0); LOG_TEST_RET(ctx, rv, "Cannot parse data info"); break; default: LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Public object parse error"); } } LOG_FUNC_RETURN(ctx, SC_SUCCESS); } static int sc_oberthur_parse_privateinfo (struct sc_pkcs15_card *p15card, unsigned char *buff, size_t len, int postpone_allowed) { struct sc_context *ctx = p15card->card->ctx; size_t ii; int rv; int no_more_private_keys = 0, no_more_private_data = 0; LOG_FUNC_CALLED(ctx); for (ii=0; ii<len; ii+=5) { unsigned int file_id, size; if(*(buff+ii) != 0xFF) continue; file_id = 0x100 * *(buff+ii + 1) + *(buff+ii + 2); size = 0x100 * *(buff+ii + 3) + *(buff+ii + 4); sc_log(ctx, "add private object (file-id:%04X, size:%X)", file_id, size); switch (*(buff+ii + 1)) { case BASE_ID_PRV_RSA : if (no_more_private_keys) break; rv = sc_pkcs15emu_oberthur_add_prvkey(p15card, file_id, size); if (rv == SC_ERROR_SECURITY_STATUS_NOT_SATISFIED && postpone_allowed) { struct sc_path path; sc_log(ctx, "postpone adding of the private keys"); sc_format_path("5011A5A5", &path); rv = sc_pkcs15_add_df(p15card, SC_PKCS15_PRKDF, &path); LOG_TEST_RET(ctx, rv, "Add PrkDF error"); no_more_private_keys = 1; } LOG_TEST_RET(ctx, rv, "Cannot parse private key info"); break; case BASE_ID_PRV_DES : break; case BASE_ID_PRV_DATA : sc_log(ctx, "*(buff+ii + 1):%X", *(buff+ii + 1)); if (no_more_private_data) break; rv = sc_pkcs15emu_oberthur_add_data(p15card, file_id, size, 1); if (rv == SC_ERROR_SECURITY_STATUS_NOT_SATISFIED && postpone_allowed) { struct sc_path path; sc_log(ctx, "postpone adding of the private data"); sc_format_path("5011A6A6", &path); rv = sc_pkcs15_add_df(p15card, SC_PKCS15_DODF, &path); LOG_TEST_RET(ctx, rv, "Add DODF error"); no_more_private_data = 1; } LOG_TEST_RET(ctx, rv, "Cannot parse private data info"); break; default: LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Private object parse error"); } } LOG_FUNC_RETURN(ctx, SC_SUCCESS); } /* Public key info: * flags:2, * CN(len:2,value:<variable length>), * ID(len:2,value:(SHA1 value)), * StartDate(Ascii:8) * EndDate(Ascii:8) * ??(0x00:2) */ static int sc_pkcs15emu_oberthur_add_pubkey(struct sc_pkcs15_card *p15card, unsigned int file_id, unsigned int size) { struct sc_context *ctx = p15card->card->ctx; struct sc_pkcs15_pubkey_info key_info; struct sc_pkcs15_object key_obj; char ch_tmp[0x100]; unsigned char *info_blob; size_t len, info_len, offs; unsigned flags; int rv; LOG_FUNC_CALLED(ctx); sc_log(ctx, "public key(file-id:%04X,size:%X)", file_id, size); memset(&key_info, 0, sizeof(key_info)); memset(&key_obj, 0, sizeof(key_obj)); snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", AWP_OBJECTS_DF_PUB, file_id | 0x100); rv = sc_oberthur_read_file(p15card, ch_tmp, &info_blob, &info_len, 1); LOG_TEST_RET(ctx, rv, "Failed to add public key: read oberthur file error"); /* Flags */ offs = 2; if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add public key: no 'tag'"); flags = *(info_blob + 0) * 0x100 + *(info_blob + 1); key_info.usage = sc_oberthur_decode_usage(flags); if (flags & OBERTHUR_ATTR_MODIFIABLE) key_obj.flags = SC_PKCS15_CO_FLAG_MODIFIABLE; sc_log(ctx, "Public key key-usage:%04X", key_info.usage); /* Label */ if (offs + 2 > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add public key: no 'Label'"); len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (len) { if (len > sizeof(key_obj.label) - 1) len = sizeof(key_obj.label) - 1; memcpy(key_obj.label, info_blob + offs + 2, len); } offs += 2 + len; /* ID */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add public key: no 'ID'"); len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (!len || len > sizeof(key_info.id.value)) LOG_TEST_RET(ctx, SC_ERROR_INVALID_DATA, "Failed to add public key: invalid 'ID' length"); memcpy(key_info.id.value, info_blob + offs + 2, len); key_info.id.len = len; /* Ignore Start/End dates */ snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", AWP_OBJECTS_DF_PUB, file_id); sc_format_path(ch_tmp, &key_info.path); key_info.native = 1; key_info.key_reference = file_id & 0xFF; key_info.modulus_length = size; rv = sc_pkcs15emu_add_rsa_pubkey(p15card, &key_obj, &key_info); LOG_FUNC_RETURN(ctx, rv); } /* Certificate info: * flags:2, * Label(len:2,value:), * ID(len:2,value:(SHA1 value)), * Subject in ASN.1(len:2,value:) * Issuer in ASN.1(len:2,value:) * Serial encoded in LV or ASN.1 FIXME */ static int sc_pkcs15emu_oberthur_add_cert(struct sc_pkcs15_card *p15card, unsigned int file_id) { struct sc_context *ctx = p15card->card->ctx; struct sc_pkcs15_cert_info cinfo; struct sc_pkcs15_object cobj; unsigned char *info_blob, *cert_blob; size_t info_len, cert_len, len, offs; unsigned flags; int rv; char ch_tmp[0x20]; LOG_FUNC_CALLED(ctx); sc_log(ctx, "add certificate(file-id:%04X)", file_id); memset(&cinfo, 0, sizeof(cinfo)); memset(&cobj, 0, sizeof(cobj)); snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", AWP_OBJECTS_DF_PUB, file_id | 0x100); rv = sc_oberthur_read_file(p15card, ch_tmp, &info_blob, &info_len, 1); LOG_TEST_RET(ctx, rv, "Failed to add certificate: read oberthur file error"); if (info_len < 2) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add certificate: no 'tag'"); flags = *(info_blob + 0) * 0x100 + *(info_blob + 1); offs = 2; /* Label */ if (offs + 2 > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add certificate: no 'CN'"); len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (len) { if (len > sizeof(cobj.label) - 1) len = sizeof(cobj.label) - 1; memcpy(cobj.label, info_blob + offs + 2, len); } offs += 2 + len; /* ID */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add certificate: no 'ID'"); len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (len > sizeof(cinfo.id.value)) LOG_TEST_RET(ctx, SC_ERROR_INVALID_DATA, "Failed to add certificate: invalid 'ID' length"); memcpy(cinfo.id.value, info_blob + offs + 2, len); cinfo.id.len = len; /* Ignore subject, issuer and serial */ snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", AWP_OBJECTS_DF_PUB, file_id); sc_format_path(ch_tmp, &cinfo.path); rv = sc_oberthur_read_file(p15card, ch_tmp, &cert_blob, &cert_len, 1); LOG_TEST_RET(ctx, rv, "Failed to add certificate: read certificate error"); cinfo.value.value = cert_blob; cinfo.value.len = cert_len; rv = sc_oberthur_get_certificate_authority(&cinfo.value, &cinfo.authority); LOG_TEST_RET(ctx, rv, "Failed to add certificate: get certificate attributes error"); if (flags & OBERTHUR_ATTR_MODIFIABLE) cobj.flags |= SC_PKCS15_CO_FLAG_MODIFIABLE; rv = sc_pkcs15emu_add_x509_cert(p15card, &cobj, &cinfo); LOG_FUNC_RETURN(p15card->card->ctx, rv); } /* Private key info: * flags:2, * CN(len:2,value:), * ID(len:2,value:(SHA1 value)), * StartDate(Ascii:8) * EndDate(Ascii:8) * Subject in ASN.1(len:2,value:) * modulus(value:) * exponent(length:1, value:3) */ static int sc_pkcs15emu_oberthur_add_prvkey(struct sc_pkcs15_card *p15card, unsigned int file_id, unsigned int size) { struct sc_context *ctx = p15card->card->ctx; struct sc_pkcs15_prkey_info kinfo; struct sc_pkcs15_object kobj; struct crypto_container ccont; unsigned char *info_blob = NULL; size_t info_len = 0; unsigned flags; size_t offs, len; char ch_tmp[0x100]; int rv; LOG_FUNC_CALLED(ctx); sc_log(ctx, "add private key(file-id:%04X,size:%04X)", file_id, size); memset(&kinfo, 0, sizeof(kinfo)); memset(&kobj, 0, sizeof(kobj)); memset(&ccont, 0, sizeof(ccont)); rv = sc_oberthur_get_friends (file_id, &ccont); LOG_TEST_RET(ctx, rv, "Failed to add private key: get friends error"); if (ccont.id_cert) { struct sc_pkcs15_object *objs[32]; int ii; sc_log(ctx, "friend certificate %04X", ccont.id_cert); rv = sc_pkcs15_get_objects(p15card, SC_PKCS15_TYPE_CERT_X509, objs, 32); LOG_TEST_RET(ctx, rv, "Failed to add private key: get certificates error"); for (ii=0; ii<rv; ii++) { struct sc_pkcs15_cert_info *cert = (struct sc_pkcs15_cert_info *)objs[ii]->data; struct sc_path path = cert->path; unsigned int id = path.value[path.len - 2] * 0x100 + path.value[path.len - 1]; if (id == ccont.id_cert) { strlcpy(kobj.label, objs[ii]->label, sizeof(kobj.label)); break; } } if (ii == rv) LOG_TEST_RET(ctx, SC_ERROR_INCONSISTENT_PROFILE, "Failed to add private key: friend not found"); } snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", AWP_OBJECTS_DF_PRV, file_id | 0x100); rv = sc_oberthur_read_file(p15card, ch_tmp, &info_blob, &info_len, 1); LOG_TEST_RET(ctx, rv, "Failed to add private key: read oberthur file error"); if (info_len < 2) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add private key: no 'tag'"); flags = *(info_blob + 0) * 0x100 + *(info_blob + 1); offs = 2; /* CN */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add private key: no 'CN'"); len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (len && !strlen(kobj.label)) { if (len > sizeof(kobj.label) - 1) len = sizeof(kobj.label) - 1; strncpy(kobj.label, (char *)(info_blob + offs + 2), len); } offs += 2 + len; /* ID */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add private key: no 'ID'"); len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (!len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add private key: zero length ID"); else if (len > sizeof(kinfo.id.value)) LOG_TEST_RET(ctx, SC_ERROR_INVALID_DATA, "Failed to add private key: invalid ID length"); memcpy(kinfo.id.value, info_blob + offs + 2, len); kinfo.id.len = len; offs += 2 + len; /* Ignore Start/End dates */ offs += 16; /* Subject encoded in ASN1 */ if (offs > info_len) return SC_ERROR_UNKNOWN_DATA_RECEIVED; len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (len) { kinfo.subject.value = malloc(len); if (!kinfo.subject.value) LOG_TEST_RET(ctx, SC_ERROR_OUT_OF_MEMORY, "Failed to add private key: memory allocation error"); kinfo.subject.len = len; memcpy(kinfo.subject.value, info_blob + offs + 2, len); } /* Modulus and exponent are ignored */ snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", AWP_OBJECTS_DF_PRV, file_id); sc_format_path(ch_tmp, &kinfo.path); sc_log(ctx, "Private key info path %s", ch_tmp); kinfo.modulus_length = size; kinfo.native = 1; kinfo.key_reference = file_id & 0xFF; kinfo.usage = sc_oberthur_decode_usage(flags); kobj.flags = SC_PKCS15_CO_FLAG_PRIVATE; if (flags & OBERTHUR_ATTR_MODIFIABLE) kobj.flags |= SC_PKCS15_CO_FLAG_MODIFIABLE; kobj.auth_id.len = sizeof(PinDomainID) > sizeof(kobj.auth_id.value) ? sizeof(kobj.auth_id.value) : sizeof(PinDomainID); memcpy(kobj.auth_id.value, PinDomainID, kobj.auth_id.len); sc_log(ctx, "Parsed private key(reference:%i,usage:%X,flags:%X)", kinfo.key_reference, kinfo.usage, kobj.flags); rv = sc_pkcs15emu_add_rsa_prkey(p15card, &kobj, &kinfo); LOG_FUNC_RETURN(ctx, rv); } static int sc_pkcs15emu_oberthur_add_data(struct sc_pkcs15_card *p15card, unsigned int file_id, unsigned int size, int private) { struct sc_context *ctx = p15card->card->ctx; struct sc_pkcs15_data_info dinfo; struct sc_pkcs15_object dobj; unsigned flags; unsigned char *info_blob = NULL, *label = NULL, *app = NULL, *oid = NULL; size_t info_len, label_len, app_len, oid_len, offs; char ch_tmp[0x100]; int rv; SC_FUNC_CALLED(ctx, SC_LOG_DEBUG_VERBOSE); sc_log(ctx, "Add data(file-id:%04X,size:%i,is-private:%i)", file_id, size, private); memset(&dinfo, 0, sizeof(dinfo)); memset(&dobj, 0, sizeof(dobj)); snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", private ? AWP_OBJECTS_DF_PRV : AWP_OBJECTS_DF_PUB, file_id | 0x100); rv = sc_oberthur_read_file(p15card, ch_tmp, &info_blob, &info_len, 1); LOG_TEST_RET(ctx, rv, "Failed to add data: read oberthur file error"); if (info_len < 2) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add certificate: no 'tag'"); flags = *(info_blob + 0) * 0x100 + *(info_blob + 1); offs = 2; /* Label */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add data: no 'label'"); label = info_blob + offs + 2; label_len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (label_len > sizeof(dobj.label) - 1) label_len = sizeof(dobj.label) - 1; offs += 2 + *(info_blob + offs + 1); /* Application */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add data: no 'application'"); app = info_blob + offs + 2; app_len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (app_len > sizeof(dinfo.app_label) - 1) app_len = sizeof(dinfo.app_label) - 1; offs += 2 + app_len; /* OID encode like DER(ASN.1(oid)) */ if (offs > info_len) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add data: no 'OID'"); oid_len = *(info_blob + offs + 1) + *(info_blob + offs) * 0x100; if (oid_len) { oid = info_blob + offs + 2; if (*oid != 0x06 || (*(oid + 1) != oid_len - 2)) LOG_TEST_RET(ctx, SC_ERROR_UNKNOWN_DATA_RECEIVED, "Failed to add data: invalid 'OID' format"); oid += 2; oid_len -= 2; } snprintf(ch_tmp, sizeof(ch_tmp), "%s%04X", private ? AWP_OBJECTS_DF_PRV : AWP_OBJECTS_DF_PUB, file_id); sc_format_path(ch_tmp, &dinfo.path); memcpy(dobj.label, label, label_len); memcpy(dinfo.app_label, app, app_len); if (oid_len) sc_asn1_decode_object_id(oid, oid_len, &dinfo.app_oid); if (flags & OBERTHUR_ATTR_MODIFIABLE) dobj.flags |= SC_PKCS15_CO_FLAG_MODIFIABLE; if (private) { dobj.auth_id.len = sizeof(PinDomainID) > sizeof(dobj.auth_id.value) ? sizeof(dobj.auth_id.value) : sizeof(PinDomainID); memcpy(dobj.auth_id.value, PinDomainID, dobj.auth_id.len); dobj.flags |= SC_PKCS15_CO_FLAG_PRIVATE; } rv = sc_pkcs15emu_add_data_object(p15card, &dobj, &dinfo); LOG_FUNC_RETURN(p15card->card->ctx, rv); } static int sc_pkcs15emu_oberthur_init(struct sc_pkcs15_card * p15card) { struct sc_context *ctx = p15card->card->ctx; struct sc_pkcs15_auth_info auth_info; struct sc_pkcs15_object obj; struct sc_card *card = p15card->card; struct sc_path path; int rv, ii, tries_left; char serial[0x10]; unsigned char sopin_reference = 0x04; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); sc_bin_to_hex(card->serialnr.value, card->serialnr.len, serial, sizeof(serial), 0); set_string(&p15card->tokeninfo->serial_number, serial); p15card->ops.parse_df = sc_awp_parse_df; p15card->ops.clear = sc_awp_clear; sc_log(ctx, "Oberthur init: serial %s", p15card->tokeninfo->serial_number); sc_format_path(AWP_PIN_DF, &path); rv = sc_select_file(card, &path, NULL); LOG_TEST_RET(ctx, rv, "Oberthur init failed: cannot select PIN dir"); tries_left = -1; rv = sc_verify(card, SC_AC_CHV, sopin_reference, (unsigned char *)"", 0, &tries_left); if (rv && rv != SC_ERROR_PIN_CODE_INCORRECT) { sopin_reference = 0x84; rv = sc_verify(card, SC_AC_CHV, sopin_reference, (unsigned char *)"", 0, &tries_left); } if (rv && rv != SC_ERROR_PIN_CODE_INCORRECT) LOG_TEST_RET(ctx, rv, "Invalid state of SO-PIN"); /* add PIN */ memset(&auth_info, 0, sizeof(auth_info)); memset(&obj, 0, sizeof(obj)); auth_info.auth_type = SC_PKCS15_PIN_AUTH_TYPE_PIN; auth_info.auth_method = SC_AC_CHV; auth_info.auth_id.len = 1; auth_info.auth_id.value[0] = 0xFF; auth_info.attrs.pin.min_length = 4; auth_info.attrs.pin.max_length = 64; auth_info.attrs.pin.stored_length = 64; auth_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_ASCII_NUMERIC; auth_info.attrs.pin.reference = sopin_reference; auth_info.attrs.pin.pad_char = 0xFF; auth_info.attrs.pin.flags = SC_PKCS15_PIN_FLAG_CASE_SENSITIVE | SC_PKCS15_PIN_FLAG_INITIALIZED | SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_SO_PIN; auth_info.tries_left = tries_left; auth_info.logged_in = SC_PIN_STATE_UNKNOWN; strncpy(obj.label, "SO PIN", SC_PKCS15_MAX_LABEL_SIZE-1); obj.flags = SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE; sc_log(ctx, "Add PIN(%s,auth_id:%s,reference:%i)", obj.label, sc_pkcs15_print_id(&auth_info.auth_id), auth_info.attrs.pin.reference); rv = sc_pkcs15emu_add_pin_obj(p15card, &obj, &auth_info); LOG_TEST_RET(ctx, rv, "Oberthur init failed: cannot add PIN object"); tries_left = -1; rv = sc_verify(card, SC_AC_CHV, 0x81, (unsigned char *)"", 0, &tries_left); if (rv == SC_ERROR_PIN_CODE_INCORRECT) { /* add PIN */ memset(&auth_info, 0, sizeof(auth_info)); memset(&obj, 0, sizeof(obj)); auth_info.auth_id.len = sizeof(PinDomainID) > sizeof(auth_info.auth_id.value) ? sizeof(auth_info.auth_id.value) : sizeof(PinDomainID); memcpy(auth_info.auth_id.value, PinDomainID, auth_info.auth_id.len); auth_info.auth_method = SC_AC_CHV; auth_info.attrs.pin.min_length = 4; auth_info.attrs.pin.max_length = 64; auth_info.attrs.pin.stored_length = 64; auth_info.attrs.pin.type = SC_PKCS15_PIN_TYPE_ASCII_NUMERIC; auth_info.attrs.pin.reference = 0x81; auth_info.attrs.pin.pad_char = 0xFF; auth_info.attrs.pin.flags = SC_PKCS15_PIN_FLAG_CASE_SENSITIVE | SC_PKCS15_PIN_FLAG_INITIALIZED | SC_PKCS15_PIN_FLAG_NEEDS_PADDING | SC_PKCS15_PIN_FLAG_LOCAL; auth_info.tries_left = tries_left; strncpy(obj.label, PIN_DOMAIN_LABEL, SC_PKCS15_MAX_LABEL_SIZE-1); obj.flags = SC_PKCS15_CO_FLAG_MODIFIABLE | SC_PKCS15_CO_FLAG_PRIVATE; if (sopin_reference == 0x84) { /* * auth_pin_reset_oberthur_style() in card-oberthur.c * always uses PUK with reference 0x84 for * unblocking of User PIN */ obj.auth_id.len = 1; obj.auth_id.value[0] = 0xFF; } sc_format_path(AWP_PIN_DF, &auth_info.path); auth_info.path.type = SC_PATH_TYPE_PATH; sc_log(ctx, "Add PIN(%s,auth_id:%s,reference:%i)", obj.label, sc_pkcs15_print_id(&auth_info.auth_id), auth_info.attrs.pin.reference); rv = sc_pkcs15emu_add_pin_obj(p15card, &obj, &auth_info); LOG_TEST_RET(ctx, rv, "Oberthur init failed: cannot add PIN object"); } else if (rv != SC_ERROR_DATA_OBJECT_NOT_FOUND) { LOG_TEST_RET(ctx, rv, "Oberthur init failed: cannot verify PIN"); } for (ii=0; oberthur_infos[ii].name; ii++) { sc_log(ctx, "Oberthur init: read %s file", oberthur_infos[ii].name); rv = sc_oberthur_read_file(p15card, oberthur_infos[ii].path, &oberthur_infos[ii].content, &oberthur_infos[ii].len, 1); LOG_TEST_RET(ctx, rv, "Oberthur init failed: read oberthur file error"); sc_log(ctx, "Oberthur init: parse %s file, content length %"SC_FORMAT_LEN_SIZE_T"u", oberthur_infos[ii].name, oberthur_infos[ii].len); rv = oberthur_infos[ii].parser(p15card, oberthur_infos[ii].content, oberthur_infos[ii].len, oberthur_infos[ii].postpone_allowed); LOG_TEST_RET(ctx, rv, "Oberthur init failed: parse error"); } LOG_FUNC_RETURN(ctx, SC_SUCCESS); } static int oberthur_detect_card(struct sc_pkcs15_card * p15card) { struct sc_card *card = p15card->card; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); if (p15card->card->type != SC_CARD_TYPE_OBERTHUR_64K) LOG_FUNC_RETURN(p15card->card->ctx, SC_ERROR_WRONG_CARD); LOG_FUNC_RETURN(p15card->card->ctx, SC_SUCCESS); } int sc_pkcs15emu_oberthur_init_ex(struct sc_pkcs15_card * p15card, struct sc_aid *aid) { int rv; LOG_FUNC_CALLED(p15card->card->ctx); rv = oberthur_detect_card(p15card); if (!rv) rv = sc_pkcs15emu_oberthur_init(p15card); LOG_FUNC_RETURN(p15card->card->ctx, rv); } static int sc_awp_parse_df(struct sc_pkcs15_card *p15card, struct sc_pkcs15_df *df) { struct sc_context *ctx = p15card->card->ctx; unsigned char *buf = NULL; size_t buf_len; int rv; LOG_FUNC_CALLED(ctx); if (df->type != SC_PKCS15_PRKDF && df->type != SC_PKCS15_DODF) LOG_FUNC_RETURN(ctx, SC_ERROR_NOT_SUPPORTED); if (df->enumerated) LOG_FUNC_RETURN(ctx, SC_SUCCESS); rv = sc_oberthur_read_file(p15card, AWP_OBJECTS_LIST_PRV, &buf, &buf_len, 1); LOG_TEST_RET(ctx, rv, "Parse DF: read private objects info failed"); rv = sc_oberthur_parse_privateinfo(p15card, buf, buf_len, 0); if (buf) free(buf); if (rv == SC_ERROR_SECURITY_STATUS_NOT_SATISFIED) LOG_FUNC_RETURN(ctx, SC_SUCCESS); LOG_TEST_RET(ctx, rv, "Parse DF: private info parse error"); df->enumerated = 1; LOG_FUNC_RETURN(ctx, rv); } static void sc_awp_clear(struct sc_pkcs15_card *p15card) { LOG_FUNC_CALLED(p15card->card->ctx); }
null
212
CWE-787
CVE-2020-26572
/* * card-tcos.c: Support for TCOS cards * * Copyright (C) 2011 Peter Koch <pk@opensc-project.org> * Copyright (C) 2002 g10 Code GmbH * Copyright (C) 2001 Juha Yrjölä <juha.yrjola@iki.fi> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <string.h> #include <ctype.h> #include <time.h> #include <stdlib.h> #include "internal.h" #include "asn1.h" #include "cardctl.h" static const struct sc_atr_table tcos_atrs[] = { /* Infineon SLE44 */ { "3B:BA:13:00:81:31:86:5D:00:64:05:0A:02:01:31:80:90:00:8B", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Infineon SLE66S */ { "3B:BA:14:00:81:31:86:5D:00:64:05:14:02:02:31:80:90:00:91", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Infineon SLE66CX320P */ { "3B:BA:96:00:81:31:86:5D:00:64:05:60:02:03:31:80:90:00:66", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Infineon SLE66CX322P */ { "3B:BA:96:00:81:31:86:5D:00:64:05:7B:02:03:31:80:90:00:7D", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Philips P5CT072 */ { "3B:BF:96:00:81:31:FE:5D:00:64:04:11:03:01:31:C0:73:F7:01:D0:00:90:00:7D", NULL, NULL, SC_CARD_TYPE_TCOS_V3, 0, NULL }, { "3B:BF:96:00:81:31:FE:5D:00:64:04:11:04:0F:31:C0:73:F7:01:D0:00:90:00:74", NULL, NULL, SC_CARD_TYPE_TCOS_V3, 0, NULL }, /* Philips P5CT080 */ { "3B:BF:B6:00:81:31:FE:5D:00:64:04:28:03:02:31:C0:73:F7:01:D0:00:90:00:67", NULL, NULL, SC_CARD_TYPE_TCOS_V3, 0, NULL }, { NULL, NULL, NULL, 0, 0, NULL } }; static struct sc_card_operations tcos_ops; static struct sc_card_driver tcos_drv = { "TCOS 3.0", "tcos", &tcos_ops, NULL, 0, NULL }; static const struct sc_card_operations *iso_ops = NULL; typedef struct tcos_data_st { unsigned int pad_flags; unsigned int next_sign; } tcos_data; static int tcos_finish(sc_card_t *card) { free(card->drv_data); return 0; } static int tcos_match_card(sc_card_t *card) { int i; i = _sc_match_atr(card, tcos_atrs, &card->type); if (i < 0) return 0; return 1; } static int tcos_init(sc_card_t *card) { unsigned long flags; tcos_data *data = malloc(sizeof(tcos_data)); if (!data) return SC_ERROR_OUT_OF_MEMORY; card->name = "TCOS"; card->drv_data = (void *)data; card->cla = 0x00; flags = SC_ALGORITHM_RSA_RAW; flags |= SC_ALGORITHM_RSA_PAD_PKCS1; flags |= SC_ALGORITHM_RSA_HASH_NONE; _sc_card_add_rsa_alg(card, 512, flags, 0); _sc_card_add_rsa_alg(card, 768, flags, 0); _sc_card_add_rsa_alg(card, 1024, flags, 0); if (card->type == SC_CARD_TYPE_TCOS_V3) { card->caps |= SC_CARD_CAP_APDU_EXT; _sc_card_add_rsa_alg(card, 1280, flags, 0); _sc_card_add_rsa_alg(card, 1536, flags, 0); _sc_card_add_rsa_alg(card, 1792, flags, 0); _sc_card_add_rsa_alg(card, 2048, flags, 0); } return 0; } /* Hmmm, I don't know what to do. It seems that the ACL design of OpenSC should be enhanced to allow for the command based security attributes of TCOS. FIXME: This just allows to create a very basic file. */ static int tcos_construct_fci(const sc_file_t *file, u8 *out, size_t *outlen) { u8 *p = out; u8 buf[64]; size_t n; /* FIXME: possible buffer overflow */ *p++ = 0x6F; /* FCI */ p++; /* File size */ buf[0] = (file->size >> 8) & 0xFF; buf[1] = file->size & 0xFF; sc_asn1_put_tag(0x81, buf, 2, p, 16, &p); /* File descriptor */ n = 0; buf[n] = file->shareable ? 0x40 : 0; switch (file->type) { case SC_FILE_TYPE_WORKING_EF: break; case SC_FILE_TYPE_DF: buf[0] |= 0x38; break; default: return SC_ERROR_NOT_SUPPORTED; } buf[n++] |= file->ef_structure & 7; if ( (file->ef_structure & 7) > 1) { /* record structured file */ buf[n++] = 0x41; /* indicate 3rd byte */ buf[n++] = file->record_length; } sc_asn1_put_tag(0x82, buf, n, p, 8, &p); /* File identifier */ buf[0] = (file->id >> 8) & 0xFF; buf[1] = file->id & 0xFF; sc_asn1_put_tag(0x83, buf, 2, p, 16, &p); /* Directory name */ if (file->type == SC_FILE_TYPE_DF) { if (file->namelen) { sc_asn1_put_tag(0x84, file->name, file->namelen, p, 16, &p); } else { /* TCOS needs one, so we use a faked one */ snprintf ((char *) buf, sizeof(buf)-1, "foo-%lu", (unsigned long) time (NULL)); sc_asn1_put_tag(0x84, buf, strlen ((char *) buf), p, 16, &p); } } /* File descriptor extension */ if (file->prop_attr_len && file->prop_attr) { n = file->prop_attr_len; memcpy(buf, file->prop_attr, n); } else { n = 0; buf[n++] = 0x01; /* not invalidated, permanent */ if (file->type == SC_FILE_TYPE_WORKING_EF) buf[n++] = 0x00; /* generic data file */ } sc_asn1_put_tag(0x85, buf, n, p, 16, &p); /* Security attributes */ if (file->sec_attr_len && file->sec_attr) { memcpy(buf, file->sec_attr, file->sec_attr_len); n = file->sec_attr_len; } else { /* no attributes given - fall back to default one */ memcpy (buf+ 0, "\xa4\x00\x00\x00\xff\xff", 6); /* select */ memcpy (buf+ 6, "\xb0\x00\x00\x00\xff\xff", 6); /* read bin */ memcpy (buf+12, "\xd6\x00\x00\x00\xff\xff", 6); /* upd bin */ memcpy (buf+18, "\x60\x00\x00\x00\xff\xff", 6); /* admin grp*/ n = 24; } sc_asn1_put_tag(0x86, buf, n, p, sizeof (buf), &p); /* fixup length of FCI */ out[1] = p - out - 2; *outlen = p - out; return 0; } static int tcos_create_file(sc_card_t *card, sc_file_t *file) { int r; size_t len; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE]; sc_apdu_t apdu; len = SC_MAX_APDU_BUFFER_SIZE; r = tcos_construct_fci(file, sbuf, &len); LOG_TEST_RET(card->ctx, r, "tcos_construct_fci() failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xE0, 0x00, 0x00); apdu.cla |= 0x80; /* this is an proprietary extension */ apdu.lc = len; apdu.datalen = len; apdu.data = sbuf; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); } static unsigned int map_operations (int commandbyte) { unsigned int op = (unsigned int)-1; switch ( (commandbyte & 0xfe) ) { case 0xe2: /* append record */ op = SC_AC_OP_UPDATE; break; case 0x24: /* change password */ op = SC_AC_OP_UPDATE; break; case 0xe0: /* create */ op = SC_AC_OP_CREATE; break; case 0xe4: /* delete */ op = SC_AC_OP_DELETE; break; case 0xe8: /* exclude sfi */ op = SC_AC_OP_WRITE; break; case 0x82: /* external auth */ op = SC_AC_OP_READ; break; case 0xe6: /* include sfi */ op = SC_AC_OP_WRITE; break; case 0x88: /* internal auth */ op = SC_AC_OP_READ; break; case 0x04: /* invalidate */ op = SC_AC_OP_INVALIDATE; break; case 0x2a: /* perform sec. op */ op = SC_AC_OP_SELECT; break; case 0xb0: /* read binary */ op = SC_AC_OP_READ; break; case 0xb2: /* read record */ op = SC_AC_OP_READ; break; case 0x44: /* rehabilitate */ op = SC_AC_OP_REHABILITATE; break; case 0xa4: /* select */ op = SC_AC_OP_SELECT; break; case 0xee: /* set permanent */ op = SC_AC_OP_CREATE; break; case 0x2c: /* unblock password */op = SC_AC_OP_WRITE; break; case 0xd6: /* update binary */ op = SC_AC_OP_WRITE; break; case 0xdc: /* update record */ op = SC_AC_OP_WRITE; break; case 0x20: /* verify password */ op = SC_AC_OP_SELECT; break; case 0x60: /* admin group */ op = SC_AC_OP_CREATE; break; } return op; } /* Hmmm, I don't know what to do. It seems that the ACL design of OpenSC should be enhanced to allow for the command based security attributes of TCOS. FIXME: This just allows to create a very basic file. */ static void parse_sec_attr(sc_card_t *card, sc_file_t *file, const u8 *buf, size_t len) { unsigned int op; /* list directory is not covered by ACLs - so always add an entry */ sc_file_add_acl_entry (file, SC_AC_OP_LIST_FILES, SC_AC_NONE, SC_AC_KEY_REF_NONE); /* FIXME: check for what LOCK is used */ sc_file_add_acl_entry (file, SC_AC_OP_LOCK, SC_AC_NONE, SC_AC_KEY_REF_NONE); for (; len >= 6; len -= 6, buf += 6) { /* FIXME: temporary hacks */ if (!memcmp(buf, "\xa4\x00\x00\x00\xff\xff", 6)) {/* select */ sc_file_add_acl_entry (file, SC_AC_OP_SELECT, SC_AC_NONE, SC_AC_KEY_REF_NONE); } else if (!memcmp(buf, "\xb0\x00\x00\x00\xff\xff", 6)) {/*read*/ sc_file_add_acl_entry (file, SC_AC_OP_READ, SC_AC_NONE, SC_AC_KEY_REF_NONE); } else if (!memcmp(buf, "\xd6\x00\x00\x00\xff\xff", 6)) {/*upd*/ sc_file_add_acl_entry (file, SC_AC_OP_UPDATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); } else if (!memcmp(buf, "\x60\x00\x00\x00\xff\xff", 6)) {/*adm */ sc_file_add_acl_entry (file, SC_AC_OP_WRITE, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry (file, SC_AC_OP_CREATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry (file, SC_AC_OP_INVALIDATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry (file, SC_AC_OP_REHABILITATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); } else { /* the first byte tells use the command or the command group. We have to mask bit 0 because this one distinguish between AND/OR combination of PINs*/ op = map_operations (buf[0]); if (op == (unsigned int)-1) { sc_log(card->ctx, "Unknown security command byte %02x\n", buf[0]); continue; } if (!buf[1]) sc_file_add_acl_entry (file, op, SC_AC_NONE, SC_AC_KEY_REF_NONE); else sc_file_add_acl_entry (file, op, SC_AC_CHV, buf[1]); if (!buf[2] && !buf[3]) sc_file_add_acl_entry (file, op, SC_AC_NONE, SC_AC_KEY_REF_NONE); else sc_file_add_acl_entry (file, op, SC_AC_TERM, (buf[2]<<8)|buf[3]); } } } static int tcos_select_file(sc_card_t *card, const sc_path_t *in_path, sc_file_t **file_out) { sc_context_t *ctx; sc_apdu_t apdu; sc_file_t *file=NULL; u8 buf[SC_MAX_APDU_BUFFER_SIZE], pathbuf[SC_MAX_PATH_SIZE], *path = pathbuf; int r, pathlen; assert(card != NULL && in_path != NULL); ctx=card->ctx; memcpy(path, in_path->value, in_path->len); pathlen = in_path->len; sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0xA4, 0, 0x04); switch (in_path->type) { case SC_PATH_TYPE_FILE_ID: if (pathlen != 2) return SC_ERROR_INVALID_ARGUMENTS; /* fall through */ case SC_PATH_TYPE_FROM_CURRENT: apdu.p1 = 9; break; case SC_PATH_TYPE_DF_NAME: apdu.p1 = 4; break; case SC_PATH_TYPE_PATH: apdu.p1 = 8; if (pathlen >= 2 && memcmp(path, "\x3F\x00", 2) == 0) path += 2, pathlen -= 2; if (pathlen == 0) apdu.p1 = 0; break; case SC_PATH_TYPE_PARENT: apdu.p1 = 3; pathlen = 0; break; default: SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); } if( pathlen == 0 ) apdu.cse = SC_APDU_CASE_2_SHORT; apdu.lc = pathlen; apdu.data = path; apdu.datalen = pathlen; if (file_out != NULL) { apdu.resp = buf; apdu.resplen = sizeof(buf); apdu.le = 256; } else { apdu.resplen = 0; apdu.le = 0; apdu.p2 = 0x0C; apdu.cse = (pathlen == 0) ? SC_APDU_CASE_1 : SC_APDU_CASE_3_SHORT; } r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); if (r || file_out == NULL) SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, r); if (apdu.resplen < 1 || apdu.resp[0] != 0x62) { sc_log(ctx, "received invalid template %02X\n", apdu.resp[0]); SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_UNKNOWN_DATA_RECEIVED); } file = sc_file_new(); if (file == NULL) LOG_FUNC_RETURN(ctx, SC_ERROR_OUT_OF_MEMORY); *file_out = file; file->path = *in_path; iso_ops->process_fci(card, file, apdu.resp, apdu.resplen); parse_sec_attr(card, file, file->sec_attr, file->sec_attr_len); return 0; } static int tcos_list_files(sc_card_t *card, u8 *buf, size_t buflen) { sc_context_t *ctx; sc_apdu_t apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE], p1; int r, count = 0; assert(card != NULL); ctx = card->ctx; for (p1=1; p1<=2; p1++) { sc_format_apdu(card, &apdu, SC_APDU_CASE_2_SHORT, 0xAA, p1, 0); apdu.cla = 0x80; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 256; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(ctx, r, "APDU transmit failed"); if (apdu.sw1==0x6A && (apdu.sw2==0x82 || apdu.sw2==0x88)) continue; r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(ctx, r, "List Dir failed"); if (apdu.resplen > buflen) return SC_ERROR_BUFFER_TOO_SMALL; sc_log(ctx, "got %"SC_FORMAT_LEN_SIZE_T"u %s-FileIDs\n", apdu.resplen / 2, p1 == 1 ? "DF" : "EF"); memcpy(buf, apdu.resp, apdu.resplen); buf += apdu.resplen; buflen -= apdu.resplen; count += apdu.resplen; } return count; } static int tcos_delete_file(sc_card_t *card, const sc_path_t *path) { int r; u8 sbuf[2]; sc_apdu_t apdu; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); if (path->type != SC_PATH_TYPE_FILE_ID && path->len != 2) { sc_log(card->ctx, "File type has to be SC_PATH_TYPE_FILE_ID\n"); LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); } sbuf[0] = path->value[0]; sbuf[1] = path->value[1]; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xE4, 0x00, 0x00); apdu.cla |= 0x80; apdu.lc = 2; apdu.datalen = 2; apdu.data = sbuf; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); } static int tcos_set_security_env(sc_card_t *card, const sc_security_env_t *env, int se_num) { sc_context_t *ctx; sc_apdu_t apdu; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE], *p; int r, default_key, tcos3; tcos_data *data; assert(card != NULL && env != NULL); ctx = card->ctx; tcos3=(card->type==SC_CARD_TYPE_TCOS_V3); data=(tcos_data *)card->drv_data; if (se_num || (env->operation!=SC_SEC_OPERATION_DECIPHER && env->operation!=SC_SEC_OPERATION_SIGN)) { LOG_FUNC_RETURN(ctx, SC_ERROR_INVALID_ARGUMENTS); } if(!(env->flags & SC_SEC_ENV_KEY_REF_PRESENT)) sc_log(ctx, "No Key-Reference in SecEnvironment\n"); else sc_log(ctx, "Key-Reference %02X (len=%"SC_FORMAT_LEN_SIZE_T"u)\n", env->key_ref[0], env->key_ref_len); /* Key-Reference 0x80 ?? */ default_key= !(env->flags & SC_SEC_ENV_KEY_REF_PRESENT) || (env->key_ref_len==1 && env->key_ref[0]==0x80); sc_log(ctx, "TCOS3:%d PKCS1:%d\n", tcos3, !!(env->algorithm_flags & SC_ALGORITHM_RSA_PAD_PKCS1)); data->pad_flags = env->algorithm_flags; data->next_sign = default_key; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x22, tcos3 ? 0x41 : 0xC1, 0xB8); p = sbuf; if (env->flags & SC_SEC_ENV_KEY_REF_PRESENT) { *p++ = (env->flags & SC_SEC_ENV_KEY_REF_SYMMETRIC) ? 0x83 : 0x84; *p++ = env->key_ref_len; memcpy(p, env->key_ref, env->key_ref_len); p += env->key_ref_len; } apdu.data = sbuf; apdu.lc = apdu.datalen = (p - sbuf); r=sc_transmit_apdu(card, &apdu); if (r) { sc_log(ctx, "%s: APDU transmit failed", sc_strerror(r)); return r; } if (apdu.sw1==0x6A && (apdu.sw2==0x81 || apdu.sw2==0x88)) { sc_log(ctx, "Detected Signature-Only key\n"); if (env->operation==SC_SEC_OPERATION_SIGN && default_key) return SC_SUCCESS; } SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, sc_check_sw(card, apdu.sw1, apdu.sw2)); } static int tcos_restore_security_env(sc_card_t *card, int se_num) { return 0; } static int tcos_compute_signature(sc_card_t *card, const u8 * data, size_t datalen, u8 * out, size_t outlen) { size_t i, dlen=datalen; sc_apdu_t apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE]; int tcos3, r; assert(card != NULL && data != NULL && out != NULL); tcos3=(card->type==SC_CARD_TYPE_TCOS_V3); // We can sign (key length / 8) bytes if (datalen > 256) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); if(((tcos_data *)card->drv_data)->next_sign) { if(datalen>48) { sc_log(card->ctx, "Data to be signed is too long (TCOS supports max. 48 bytes)\n"); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); } sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0x2A, 0x9E, 0x9A); memcpy(sbuf, data, datalen); dlen=datalen; } else { int keylen= tcos3 ? 256 : 128; sc_format_apdu(card, &apdu, keylen>255 ? SC_APDU_CASE_4_EXT : SC_APDU_CASE_4_SHORT, 0x2A,0x80,0x86); for(i=0; i<sizeof(sbuf);++i) sbuf[i]=0xff; sbuf[0]=0x02; sbuf[1]=0x00; sbuf[2]=0x01; sbuf[keylen-datalen]=0x00; memcpy(sbuf+keylen-datalen+1, data, datalen); dlen=keylen+1; } apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = tcos3 ? 256 : 128; apdu.data = sbuf; apdu.lc = apdu.datalen = dlen; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (tcos3 && apdu.p1==0x80 && apdu.sw1==0x6A && apdu.sw2==0x87) { int keylen=128; sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0x2A,0x80,0x86); for(i=0; i<sizeof(sbuf);++i) sbuf[i]=0xff; sbuf[0]=0x02; sbuf[1]=0x00; sbuf[2]=0x01; sbuf[keylen-datalen]=0x00; memcpy(sbuf+keylen-datalen+1, data, datalen); dlen=keylen+1; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 128; apdu.data = sbuf; apdu.lc = apdu.datalen = dlen; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); } if (apdu.sw1==0x90 && apdu.sw2==0x00) { size_t len = apdu.resplen>outlen ? outlen : apdu.resplen; memcpy(out, apdu.resp, len); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, len); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, sc_check_sw(card, apdu.sw1, apdu.sw2)); } static int tcos_decipher(sc_card_t *card, const u8 * crgram, size_t crgram_len, u8 * out, size_t outlen) { sc_context_t *ctx; sc_apdu_t apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE]; tcos_data *data; int tcos3, r; assert(card != NULL && crgram != NULL && out != NULL); ctx = card->ctx; tcos3=(card->type==SC_CARD_TYPE_TCOS_V3); data=(tcos_data *)card->drv_data; LOG_FUNC_CALLED(ctx); sc_log(ctx, "TCOS3:%d PKCS1:%d\n",tcos3, !!(data->pad_flags & SC_ALGORITHM_RSA_PAD_PKCS1)); sc_format_apdu(card, &apdu, crgram_len>255 ? SC_APDU_CASE_4_EXT : SC_APDU_CASE_4_SHORT, 0x2A, 0x80, 0x86); apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = crgram_len; apdu.data = sbuf; apdu.lc = apdu.datalen = crgram_len+1; sbuf[0] = tcos3 ? 0x00 : ((data->pad_flags & SC_ALGORITHM_RSA_PAD_PKCS1) ? 0x81 : 0x02); memcpy(sbuf+1, crgram, crgram_len); r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (apdu.sw1==0x90 && apdu.sw2==0x00) { size_t len= (apdu.resplen>outlen) ? outlen : apdu.resplen; unsigned int offset=0; if(tcos3 && (data->pad_flags & SC_ALGORITHM_RSA_PAD_PKCS1) && apdu.resp[0]==0 && apdu.resp[1]==2) { offset=2; while(offset<len && apdu.resp[offset]!=0) ++offset; offset=(offset<len-1) ? offset+1 : 0; } memcpy(out, apdu.resp+offset, len-offset); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, len-offset); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, sc_check_sw(card, apdu.sw1, apdu.sw2)); } /* Issue the SET PERMANENT command. With ENABLE_NULLPIN set the NullPIN method will be activated, otherwise the permanent operation will be done on the active file. */ static int tcos_setperm(sc_card_t *card, int enable_nullpin) { int r; sc_apdu_t apdu; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); sc_format_apdu(card, &apdu, SC_APDU_CASE_1, 0xEE, 0x00, 0x00); apdu.cla |= 0x80; apdu.lc = 0; apdu.datalen = 0; apdu.data = NULL; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); } static int tcos_get_serialnr(sc_card_t *card, sc_serial_number_t *serial) { int r; if (!serial) return SC_ERROR_INVALID_ARGUMENTS; /* see if we have cached serial number */ if (card->serialnr.len) { memcpy(serial, &card->serialnr, sizeof(*serial)); return SC_SUCCESS; } card->serialnr.len = sizeof card->serialnr.value; r = sc_parse_ef_gdo(card, card->serialnr.value, &card->serialnr.len, NULL, 0); if (r < 0) { card->serialnr.len = 0; return r; } /* copy and return serial number */ memcpy(serial, &card->serialnr, sizeof(*serial)); return SC_SUCCESS; } static int tcos_card_ctl(sc_card_t *card, unsigned long cmd, void *ptr) { switch (cmd) { case SC_CARDCTL_TCOS_SETPERM: return tcos_setperm(card, !!ptr); case SC_CARDCTL_GET_SERIALNR: return tcos_get_serialnr(card, (sc_serial_number_t *)ptr); } return SC_ERROR_NOT_SUPPORTED; } struct sc_card_driver * sc_get_tcos_driver(void) { struct sc_card_driver *iso_drv = sc_get_iso7816_driver(); if (iso_ops == NULL) iso_ops = iso_drv->ops; tcos_ops = *iso_drv->ops; tcos_ops.match_card = tcos_match_card; tcos_ops.init = tcos_init; tcos_ops.finish = tcos_finish; tcos_ops.create_file = tcos_create_file; tcos_ops.set_security_env = tcos_set_security_env; tcos_ops.select_file = tcos_select_file; tcos_ops.list_files = tcos_list_files; tcos_ops.delete_file = tcos_delete_file; tcos_ops.compute_signature = tcos_compute_signature; tcos_ops.decipher = tcos_decipher; tcos_ops.restore_security_env = tcos_restore_security_env; tcos_ops.card_ctl = tcos_card_ctl; return &tcos_drv; }
null
/* * card-tcos.c: Support for TCOS cards * * Copyright (C) 2011 Peter Koch <pk@opensc-project.org> * Copyright (C) 2002 g10 Code GmbH * Copyright (C) 2001 Juha Yrjölä <juha.yrjola@iki.fi> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #if HAVE_CONFIG_H #include "config.h" #endif #include <string.h> #include <ctype.h> #include <time.h> #include <stdlib.h> #include "internal.h" #include "asn1.h" #include "cardctl.h" static const struct sc_atr_table tcos_atrs[] = { /* Infineon SLE44 */ { "3B:BA:13:00:81:31:86:5D:00:64:05:0A:02:01:31:80:90:00:8B", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Infineon SLE66S */ { "3B:BA:14:00:81:31:86:5D:00:64:05:14:02:02:31:80:90:00:91", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Infineon SLE66CX320P */ { "3B:BA:96:00:81:31:86:5D:00:64:05:60:02:03:31:80:90:00:66", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Infineon SLE66CX322P */ { "3B:BA:96:00:81:31:86:5D:00:64:05:7B:02:03:31:80:90:00:7D", NULL, NULL, SC_CARD_TYPE_TCOS_V2, 0, NULL }, /* Philips P5CT072 */ { "3B:BF:96:00:81:31:FE:5D:00:64:04:11:03:01:31:C0:73:F7:01:D0:00:90:00:7D", NULL, NULL, SC_CARD_TYPE_TCOS_V3, 0, NULL }, { "3B:BF:96:00:81:31:FE:5D:00:64:04:11:04:0F:31:C0:73:F7:01:D0:00:90:00:74", NULL, NULL, SC_CARD_TYPE_TCOS_V3, 0, NULL }, /* Philips P5CT080 */ { "3B:BF:B6:00:81:31:FE:5D:00:64:04:28:03:02:31:C0:73:F7:01:D0:00:90:00:67", NULL, NULL, SC_CARD_TYPE_TCOS_V3, 0, NULL }, { NULL, NULL, NULL, 0, 0, NULL } }; static struct sc_card_operations tcos_ops; static struct sc_card_driver tcos_drv = { "TCOS 3.0", "tcos", &tcos_ops, NULL, 0, NULL }; static const struct sc_card_operations *iso_ops = NULL; typedef struct tcos_data_st { unsigned int pad_flags; unsigned int next_sign; } tcos_data; static int tcos_finish(sc_card_t *card) { free(card->drv_data); return 0; } static int tcos_match_card(sc_card_t *card) { int i; i = _sc_match_atr(card, tcos_atrs, &card->type); if (i < 0) return 0; return 1; } static int tcos_init(sc_card_t *card) { unsigned long flags; tcos_data *data = malloc(sizeof(tcos_data)); if (!data) return SC_ERROR_OUT_OF_MEMORY; card->name = "TCOS"; card->drv_data = (void *)data; card->cla = 0x00; flags = SC_ALGORITHM_RSA_RAW; flags |= SC_ALGORITHM_RSA_PAD_PKCS1; flags |= SC_ALGORITHM_RSA_HASH_NONE; _sc_card_add_rsa_alg(card, 512, flags, 0); _sc_card_add_rsa_alg(card, 768, flags, 0); _sc_card_add_rsa_alg(card, 1024, flags, 0); if (card->type == SC_CARD_TYPE_TCOS_V3) { card->caps |= SC_CARD_CAP_APDU_EXT; _sc_card_add_rsa_alg(card, 1280, flags, 0); _sc_card_add_rsa_alg(card, 1536, flags, 0); _sc_card_add_rsa_alg(card, 1792, flags, 0); _sc_card_add_rsa_alg(card, 2048, flags, 0); } return 0; } /* Hmmm, I don't know what to do. It seems that the ACL design of OpenSC should be enhanced to allow for the command based security attributes of TCOS. FIXME: This just allows to create a very basic file. */ static int tcos_construct_fci(const sc_file_t *file, u8 *out, size_t *outlen) { u8 *p = out; u8 buf[64]; size_t n; /* FIXME: possible buffer overflow */ *p++ = 0x6F; /* FCI */ p++; /* File size */ buf[0] = (file->size >> 8) & 0xFF; buf[1] = file->size & 0xFF; sc_asn1_put_tag(0x81, buf, 2, p, 16, &p); /* File descriptor */ n = 0; buf[n] = file->shareable ? 0x40 : 0; switch (file->type) { case SC_FILE_TYPE_WORKING_EF: break; case SC_FILE_TYPE_DF: buf[0] |= 0x38; break; default: return SC_ERROR_NOT_SUPPORTED; } buf[n++] |= file->ef_structure & 7; if ( (file->ef_structure & 7) > 1) { /* record structured file */ buf[n++] = 0x41; /* indicate 3rd byte */ buf[n++] = file->record_length; } sc_asn1_put_tag(0x82, buf, n, p, 8, &p); /* File identifier */ buf[0] = (file->id >> 8) & 0xFF; buf[1] = file->id & 0xFF; sc_asn1_put_tag(0x83, buf, 2, p, 16, &p); /* Directory name */ if (file->type == SC_FILE_TYPE_DF) { if (file->namelen) { sc_asn1_put_tag(0x84, file->name, file->namelen, p, 16, &p); } else { /* TCOS needs one, so we use a faked one */ snprintf ((char *) buf, sizeof(buf)-1, "foo-%lu", (unsigned long) time (NULL)); sc_asn1_put_tag(0x84, buf, strlen ((char *) buf), p, 16, &p); } } /* File descriptor extension */ if (file->prop_attr_len && file->prop_attr) { n = file->prop_attr_len; memcpy(buf, file->prop_attr, n); } else { n = 0; buf[n++] = 0x01; /* not invalidated, permanent */ if (file->type == SC_FILE_TYPE_WORKING_EF) buf[n++] = 0x00; /* generic data file */ } sc_asn1_put_tag(0x85, buf, n, p, 16, &p); /* Security attributes */ if (file->sec_attr_len && file->sec_attr) { memcpy(buf, file->sec_attr, file->sec_attr_len); n = file->sec_attr_len; } else { /* no attributes given - fall back to default one */ memcpy (buf+ 0, "\xa4\x00\x00\x00\xff\xff", 6); /* select */ memcpy (buf+ 6, "\xb0\x00\x00\x00\xff\xff", 6); /* read bin */ memcpy (buf+12, "\xd6\x00\x00\x00\xff\xff", 6); /* upd bin */ memcpy (buf+18, "\x60\x00\x00\x00\xff\xff", 6); /* admin grp*/ n = 24; } sc_asn1_put_tag(0x86, buf, n, p, sizeof (buf), &p); /* fixup length of FCI */ out[1] = p - out - 2; *outlen = p - out; return 0; } static int tcos_create_file(sc_card_t *card, sc_file_t *file) { int r; size_t len; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE]; sc_apdu_t apdu; len = SC_MAX_APDU_BUFFER_SIZE; r = tcos_construct_fci(file, sbuf, &len); LOG_TEST_RET(card->ctx, r, "tcos_construct_fci() failed"); sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xE0, 0x00, 0x00); apdu.cla |= 0x80; /* this is an proprietary extension */ apdu.lc = len; apdu.datalen = len; apdu.data = sbuf; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); } static unsigned int map_operations (int commandbyte) { unsigned int op = (unsigned int)-1; switch ( (commandbyte & 0xfe) ) { case 0xe2: /* append record */ op = SC_AC_OP_UPDATE; break; case 0x24: /* change password */ op = SC_AC_OP_UPDATE; break; case 0xe0: /* create */ op = SC_AC_OP_CREATE; break; case 0xe4: /* delete */ op = SC_AC_OP_DELETE; break; case 0xe8: /* exclude sfi */ op = SC_AC_OP_WRITE; break; case 0x82: /* external auth */ op = SC_AC_OP_READ; break; case 0xe6: /* include sfi */ op = SC_AC_OP_WRITE; break; case 0x88: /* internal auth */ op = SC_AC_OP_READ; break; case 0x04: /* invalidate */ op = SC_AC_OP_INVALIDATE; break; case 0x2a: /* perform sec. op */ op = SC_AC_OP_SELECT; break; case 0xb0: /* read binary */ op = SC_AC_OP_READ; break; case 0xb2: /* read record */ op = SC_AC_OP_READ; break; case 0x44: /* rehabilitate */ op = SC_AC_OP_REHABILITATE; break; case 0xa4: /* select */ op = SC_AC_OP_SELECT; break; case 0xee: /* set permanent */ op = SC_AC_OP_CREATE; break; case 0x2c: /* unblock password */op = SC_AC_OP_WRITE; break; case 0xd6: /* update binary */ op = SC_AC_OP_WRITE; break; case 0xdc: /* update record */ op = SC_AC_OP_WRITE; break; case 0x20: /* verify password */ op = SC_AC_OP_SELECT; break; case 0x60: /* admin group */ op = SC_AC_OP_CREATE; break; } return op; } /* Hmmm, I don't know what to do. It seems that the ACL design of OpenSC should be enhanced to allow for the command based security attributes of TCOS. FIXME: This just allows to create a very basic file. */ static void parse_sec_attr(sc_card_t *card, sc_file_t *file, const u8 *buf, size_t len) { unsigned int op; /* list directory is not covered by ACLs - so always add an entry */ sc_file_add_acl_entry (file, SC_AC_OP_LIST_FILES, SC_AC_NONE, SC_AC_KEY_REF_NONE); /* FIXME: check for what LOCK is used */ sc_file_add_acl_entry (file, SC_AC_OP_LOCK, SC_AC_NONE, SC_AC_KEY_REF_NONE); for (; len >= 6; len -= 6, buf += 6) { /* FIXME: temporary hacks */ if (!memcmp(buf, "\xa4\x00\x00\x00\xff\xff", 6)) {/* select */ sc_file_add_acl_entry (file, SC_AC_OP_SELECT, SC_AC_NONE, SC_AC_KEY_REF_NONE); } else if (!memcmp(buf, "\xb0\x00\x00\x00\xff\xff", 6)) {/*read*/ sc_file_add_acl_entry (file, SC_AC_OP_READ, SC_AC_NONE, SC_AC_KEY_REF_NONE); } else if (!memcmp(buf, "\xd6\x00\x00\x00\xff\xff", 6)) {/*upd*/ sc_file_add_acl_entry (file, SC_AC_OP_UPDATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); } else if (!memcmp(buf, "\x60\x00\x00\x00\xff\xff", 6)) {/*adm */ sc_file_add_acl_entry (file, SC_AC_OP_WRITE, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry (file, SC_AC_OP_CREATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry (file, SC_AC_OP_INVALIDATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); sc_file_add_acl_entry (file, SC_AC_OP_REHABILITATE, SC_AC_NONE, SC_AC_KEY_REF_NONE); } else { /* the first byte tells use the command or the command group. We have to mask bit 0 because this one distinguish between AND/OR combination of PINs*/ op = map_operations (buf[0]); if (op == (unsigned int)-1) { sc_log(card->ctx, "Unknown security command byte %02x\n", buf[0]); continue; } if (!buf[1]) sc_file_add_acl_entry (file, op, SC_AC_NONE, SC_AC_KEY_REF_NONE); else sc_file_add_acl_entry (file, op, SC_AC_CHV, buf[1]); if (!buf[2] && !buf[3]) sc_file_add_acl_entry (file, op, SC_AC_NONE, SC_AC_KEY_REF_NONE); else sc_file_add_acl_entry (file, op, SC_AC_TERM, (buf[2]<<8)|buf[3]); } } } static int tcos_select_file(sc_card_t *card, const sc_path_t *in_path, sc_file_t **file_out) { sc_context_t *ctx; sc_apdu_t apdu; sc_file_t *file=NULL; u8 buf[SC_MAX_APDU_BUFFER_SIZE], pathbuf[SC_MAX_PATH_SIZE], *path = pathbuf; int r, pathlen; assert(card != NULL && in_path != NULL); ctx=card->ctx; memcpy(path, in_path->value, in_path->len); pathlen = in_path->len; sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0xA4, 0, 0x04); switch (in_path->type) { case SC_PATH_TYPE_FILE_ID: if (pathlen != 2) return SC_ERROR_INVALID_ARGUMENTS; /* fall through */ case SC_PATH_TYPE_FROM_CURRENT: apdu.p1 = 9; break; case SC_PATH_TYPE_DF_NAME: apdu.p1 = 4; break; case SC_PATH_TYPE_PATH: apdu.p1 = 8; if (pathlen >= 2 && memcmp(path, "\x3F\x00", 2) == 0) path += 2, pathlen -= 2; if (pathlen == 0) apdu.p1 = 0; break; case SC_PATH_TYPE_PARENT: apdu.p1 = 3; pathlen = 0; break; default: SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); } if( pathlen == 0 ) apdu.cse = SC_APDU_CASE_2_SHORT; apdu.lc = pathlen; apdu.data = path; apdu.datalen = pathlen; if (file_out != NULL) { apdu.resp = buf; apdu.resplen = sizeof(buf); apdu.le = 256; } else { apdu.resplen = 0; apdu.le = 0; apdu.p2 = 0x0C; apdu.cse = (pathlen == 0) ? SC_APDU_CASE_1 : SC_APDU_CASE_3_SHORT; } r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(ctx, r, "APDU transmit failed"); r = sc_check_sw(card, apdu.sw1, apdu.sw2); if (r || file_out == NULL) SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, r); if (apdu.resplen < 1 || apdu.resp[0] != 0x62) { sc_log(ctx, "received invalid template %02X\n", apdu.resp[0]); SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_UNKNOWN_DATA_RECEIVED); } file = sc_file_new(); if (file == NULL) LOG_FUNC_RETURN(ctx, SC_ERROR_OUT_OF_MEMORY); *file_out = file; file->path = *in_path; iso_ops->process_fci(card, file, apdu.resp, apdu.resplen); parse_sec_attr(card, file, file->sec_attr, file->sec_attr_len); return 0; } static int tcos_list_files(sc_card_t *card, u8 *buf, size_t buflen) { sc_context_t *ctx; sc_apdu_t apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE], p1; int r, count = 0; assert(card != NULL); ctx = card->ctx; for (p1=1; p1<=2; p1++) { sc_format_apdu(card, &apdu, SC_APDU_CASE_2_SHORT, 0xAA, p1, 0); apdu.cla = 0x80; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 256; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(ctx, r, "APDU transmit failed"); if (apdu.sw1==0x6A && (apdu.sw2==0x82 || apdu.sw2==0x88)) continue; r = sc_check_sw(card, apdu.sw1, apdu.sw2); LOG_TEST_RET(ctx, r, "List Dir failed"); if (apdu.resplen > buflen) return SC_ERROR_BUFFER_TOO_SMALL; sc_log(ctx, "got %"SC_FORMAT_LEN_SIZE_T"u %s-FileIDs\n", apdu.resplen / 2, p1 == 1 ? "DF" : "EF"); memcpy(buf, apdu.resp, apdu.resplen); buf += apdu.resplen; buflen -= apdu.resplen; count += apdu.resplen; } return count; } static int tcos_delete_file(sc_card_t *card, const sc_path_t *path) { int r; u8 sbuf[2]; sc_apdu_t apdu; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); if (path->type != SC_PATH_TYPE_FILE_ID && path->len != 2) { sc_log(card->ctx, "File type has to be SC_PATH_TYPE_FILE_ID\n"); LOG_FUNC_RETURN(card->ctx, SC_ERROR_INVALID_ARGUMENTS); } sbuf[0] = path->value[0]; sbuf[1] = path->value[1]; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0xE4, 0x00, 0x00); apdu.cla |= 0x80; apdu.lc = 2; apdu.datalen = 2; apdu.data = sbuf; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); } static int tcos_set_security_env(sc_card_t *card, const sc_security_env_t *env, int se_num) { sc_context_t *ctx; sc_apdu_t apdu; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE], *p; int r, default_key, tcos3; tcos_data *data; assert(card != NULL && env != NULL); ctx = card->ctx; tcos3=(card->type==SC_CARD_TYPE_TCOS_V3); data=(tcos_data *)card->drv_data; if (se_num || (env->operation!=SC_SEC_OPERATION_DECIPHER && env->operation!=SC_SEC_OPERATION_SIGN)) { LOG_FUNC_RETURN(ctx, SC_ERROR_INVALID_ARGUMENTS); } if(!(env->flags & SC_SEC_ENV_KEY_REF_PRESENT)) sc_log(ctx, "No Key-Reference in SecEnvironment\n"); else sc_log(ctx, "Key-Reference %02X (len=%"SC_FORMAT_LEN_SIZE_T"u)\n", env->key_ref[0], env->key_ref_len); /* Key-Reference 0x80 ?? */ default_key= !(env->flags & SC_SEC_ENV_KEY_REF_PRESENT) || (env->key_ref_len==1 && env->key_ref[0]==0x80); sc_log(ctx, "TCOS3:%d PKCS1:%d\n", tcos3, !!(env->algorithm_flags & SC_ALGORITHM_RSA_PAD_PKCS1)); data->pad_flags = env->algorithm_flags; data->next_sign = default_key; sc_format_apdu(card, &apdu, SC_APDU_CASE_3_SHORT, 0x22, tcos3 ? 0x41 : 0xC1, 0xB8); p = sbuf; if (env->flags & SC_SEC_ENV_KEY_REF_PRESENT) { *p++ = (env->flags & SC_SEC_ENV_KEY_REF_SYMMETRIC) ? 0x83 : 0x84; *p++ = env->key_ref_len; memcpy(p, env->key_ref, env->key_ref_len); p += env->key_ref_len; } apdu.data = sbuf; apdu.lc = apdu.datalen = (p - sbuf); r=sc_transmit_apdu(card, &apdu); if (r) { sc_log(ctx, "%s: APDU transmit failed", sc_strerror(r)); return r; } if (apdu.sw1==0x6A && (apdu.sw2==0x81 || apdu.sw2==0x88)) { sc_log(ctx, "Detected Signature-Only key\n"); if (env->operation==SC_SEC_OPERATION_SIGN && default_key) return SC_SUCCESS; } SC_FUNC_RETURN(ctx, SC_LOG_DEBUG_VERBOSE, sc_check_sw(card, apdu.sw1, apdu.sw2)); } static int tcos_restore_security_env(sc_card_t *card, int se_num) { return 0; } static int tcos_compute_signature(sc_card_t *card, const u8 * data, size_t datalen, u8 * out, size_t outlen) { size_t i, dlen=datalen; sc_apdu_t apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE]; int tcos3, r; assert(card != NULL && data != NULL && out != NULL); tcos3=(card->type==SC_CARD_TYPE_TCOS_V3); // We can sign (key length / 8) bytes if (datalen > 256) SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); if(((tcos_data *)card->drv_data)->next_sign) { if(datalen>48) { sc_log(card->ctx, "Data to be signed is too long (TCOS supports max. 48 bytes)\n"); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, SC_ERROR_INVALID_ARGUMENTS); } sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0x2A, 0x9E, 0x9A); memcpy(sbuf, data, datalen); dlen=datalen; } else { int keylen= tcos3 ? 256 : 128; sc_format_apdu(card, &apdu, keylen>255 ? SC_APDU_CASE_4_EXT : SC_APDU_CASE_4_SHORT, 0x2A,0x80,0x86); for(i=0; i<sizeof(sbuf);++i) sbuf[i]=0xff; sbuf[0]=0x02; sbuf[1]=0x00; sbuf[2]=0x01; sbuf[keylen-datalen]=0x00; memcpy(sbuf+keylen-datalen+1, data, datalen); dlen=keylen+1; } apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = tcos3 ? 256 : 128; apdu.data = sbuf; apdu.lc = apdu.datalen = dlen; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (tcos3 && apdu.p1==0x80 && apdu.sw1==0x6A && apdu.sw2==0x87) { int keylen=128; sc_format_apdu(card, &apdu, SC_APDU_CASE_4_SHORT, 0x2A,0x80,0x86); for(i=0; i<sizeof(sbuf);++i) sbuf[i]=0xff; sbuf[0]=0x02; sbuf[1]=0x00; sbuf[2]=0x01; sbuf[keylen-datalen]=0x00; memcpy(sbuf+keylen-datalen+1, data, datalen); dlen=keylen+1; apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = 128; apdu.data = sbuf; apdu.lc = apdu.datalen = dlen; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); } if (apdu.sw1==0x90 && apdu.sw2==0x00) { size_t len = apdu.resplen>outlen ? outlen : apdu.resplen; memcpy(out, apdu.resp, len); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, len); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, sc_check_sw(card, apdu.sw1, apdu.sw2)); } static int tcos_decipher(sc_card_t *card, const u8 * crgram, size_t crgram_len, u8 * out, size_t outlen) { sc_context_t *ctx; sc_apdu_t apdu; u8 rbuf[SC_MAX_APDU_BUFFER_SIZE]; u8 sbuf[SC_MAX_APDU_BUFFER_SIZE]; tcos_data *data; int tcos3, r; assert(card != NULL && crgram != NULL && out != NULL); ctx = card->ctx; tcos3=(card->type==SC_CARD_TYPE_TCOS_V3); data=(tcos_data *)card->drv_data; LOG_FUNC_CALLED(ctx); sc_log(ctx, "TCOS3:%d PKCS1:%d\n",tcos3, !!(data->pad_flags & SC_ALGORITHM_RSA_PAD_PKCS1)); sc_format_apdu(card, &apdu, crgram_len>255 ? SC_APDU_CASE_4_EXT : SC_APDU_CASE_4_SHORT, 0x2A, 0x80, 0x86); apdu.resp = rbuf; apdu.resplen = sizeof(rbuf); apdu.le = crgram_len; apdu.data = sbuf; apdu.lc = apdu.datalen = crgram_len+1; sbuf[0] = tcos3 ? 0x00 : ((data->pad_flags & SC_ALGORITHM_RSA_PAD_PKCS1) ? 0x81 : 0x02); if (sizeof sbuf - 1 < crgram_len) return SC_ERROR_INVALID_ARGUMENTS; memcpy(sbuf+1, crgram, crgram_len); r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); if (apdu.sw1==0x90 && apdu.sw2==0x00) { size_t len= (apdu.resplen>outlen) ? outlen : apdu.resplen; unsigned int offset=0; if(tcos3 && (data->pad_flags & SC_ALGORITHM_RSA_PAD_PKCS1) && apdu.resp[0]==0 && apdu.resp[1]==2) { offset=2; while(offset<len && apdu.resp[offset]!=0) ++offset; offset=(offset<len-1) ? offset+1 : 0; } memcpy(out, apdu.resp+offset, len-offset); SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, len-offset); } SC_FUNC_RETURN(card->ctx, SC_LOG_DEBUG_VERBOSE, sc_check_sw(card, apdu.sw1, apdu.sw2)); } /* Issue the SET PERMANENT command. With ENABLE_NULLPIN set the NullPIN method will be activated, otherwise the permanent operation will be done on the active file. */ static int tcos_setperm(sc_card_t *card, int enable_nullpin) { int r; sc_apdu_t apdu; SC_FUNC_CALLED(card->ctx, SC_LOG_DEBUG_VERBOSE); sc_format_apdu(card, &apdu, SC_APDU_CASE_1, 0xEE, 0x00, 0x00); apdu.cla |= 0x80; apdu.lc = 0; apdu.datalen = 0; apdu.data = NULL; r = sc_transmit_apdu(card, &apdu); LOG_TEST_RET(card->ctx, r, "APDU transmit failed"); return sc_check_sw(card, apdu.sw1, apdu.sw2); } static int tcos_get_serialnr(sc_card_t *card, sc_serial_number_t *serial) { int r; if (!serial) return SC_ERROR_INVALID_ARGUMENTS; /* see if we have cached serial number */ if (card->serialnr.len) { memcpy(serial, &card->serialnr, sizeof(*serial)); return SC_SUCCESS; } card->serialnr.len = sizeof card->serialnr.value; r = sc_parse_ef_gdo(card, card->serialnr.value, &card->serialnr.len, NULL, 0); if (r < 0) { card->serialnr.len = 0; return r; } /* copy and return serial number */ memcpy(serial, &card->serialnr, sizeof(*serial)); return SC_SUCCESS; } static int tcos_card_ctl(sc_card_t *card, unsigned long cmd, void *ptr) { switch (cmd) { case SC_CARDCTL_TCOS_SETPERM: return tcos_setperm(card, !!ptr); case SC_CARDCTL_GET_SERIALNR: return tcos_get_serialnr(card, (sc_serial_number_t *)ptr); } return SC_ERROR_NOT_SUPPORTED; } struct sc_card_driver * sc_get_tcos_driver(void) { struct sc_card_driver *iso_drv = sc_get_iso7816_driver(); if (iso_ops == NULL) iso_ops = iso_drv->ops; tcos_ops = *iso_drv->ops; tcos_ops.match_card = tcos_match_card; tcos_ops.init = tcos_init; tcos_ops.finish = tcos_finish; tcos_ops.create_file = tcos_create_file; tcos_ops.set_security_env = tcos_set_security_env; tcos_ops.select_file = tcos_select_file; tcos_ops.list_files = tcos_list_files; tcos_ops.delete_file = tcos_delete_file; tcos_ops.compute_signature = tcos_compute_signature; tcos_ops.decipher = tcos_decipher; tcos_ops.restore_security_env = tcos_restore_security_env; tcos_ops.card_ctl = tcos_card_ctl; return &tcos_drv; }
null
213
CWE-787
CVE-2020-27347
/* $OpenBSD$ */ /* * Copyright (c) 2007 Nicholas Marriott <nicholas.marriott@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <sys/types.h> #include <netinet/in.h> #include <ctype.h> #include <resolv.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "tmux.h" /* * Based on the description by Paul Williams at: * * https://vt100.net/emu/dec_ansi_parser * * With the following changes: * * - 7-bit only. * * - Support for UTF-8. * * - OSC (but not APC) may be terminated by \007 as well as ST. * * - A state for APC similar to OSC. Some terminals appear to use this to set * the title. * * - A state for the screen \033k...\033\\ sequence to rename a window. This is * pretty stupid but not supporting it is more trouble than it is worth. * * - Special handling for ESC inside a DCS to allow arbitrary byte sequences to * be passed to the underlying terminals. */ /* Input parser cell. */ struct input_cell { struct grid_cell cell; int set; int g0set; /* 1 if ACS */ int g1set; /* 1 if ACS */ }; /* Input parser argument. */ struct input_param { enum { INPUT_MISSING, INPUT_NUMBER, INPUT_STRING } type; union { int num; char *str; }; }; /* Input parser context. */ struct input_ctx { struct window_pane *wp; struct bufferevent *event; struct screen_write_ctx ctx; struct input_cell cell; struct input_cell old_cell; u_int old_cx; u_int old_cy; int old_mode; u_char interm_buf[4]; size_t interm_len; u_char param_buf[64]; size_t param_len; #define INPUT_BUF_START 32 #define INPUT_BUF_LIMIT 1048576 u_char *input_buf; size_t input_len; size_t input_space; enum { INPUT_END_ST, INPUT_END_BEL } input_end; struct input_param param_list[24]; u_int param_list_len; struct utf8_data utf8data; int utf8started; int ch; int last; int flags; #define INPUT_DISCARD 0x1 const struct input_state *state; struct event timer; /* * All input received since we were last in the ground state. Sent to * control clients on connection. */ struct evbuffer *since_ground; }; /* Helper functions. */ struct input_transition; static int input_split(struct input_ctx *); static int input_get(struct input_ctx *, u_int, int, int); static void printflike(2, 3) input_reply(struct input_ctx *, const char *, ...); static void input_set_state(struct input_ctx *, const struct input_transition *); static void input_reset_cell(struct input_ctx *); static void input_osc_4(struct input_ctx *, const char *); static void input_osc_10(struct input_ctx *, const char *); static void input_osc_11(struct input_ctx *, const char *); static void input_osc_52(struct input_ctx *, const char *); static void input_osc_104(struct input_ctx *, const char *); /* Transition entry/exit handlers. */ static void input_clear(struct input_ctx *); static void input_ground(struct input_ctx *); static void input_enter_dcs(struct input_ctx *); static void input_enter_osc(struct input_ctx *); static void input_exit_osc(struct input_ctx *); static void input_enter_apc(struct input_ctx *); static void input_exit_apc(struct input_ctx *); static void input_enter_rename(struct input_ctx *); static void input_exit_rename(struct input_ctx *); /* Input state handlers. */ static int input_print(struct input_ctx *); static int input_intermediate(struct input_ctx *); static int input_parameter(struct input_ctx *); static int input_input(struct input_ctx *); static int input_c0_dispatch(struct input_ctx *); static int input_esc_dispatch(struct input_ctx *); static int input_csi_dispatch(struct input_ctx *); static void input_csi_dispatch_rm(struct input_ctx *); static void input_csi_dispatch_rm_private(struct input_ctx *); static void input_csi_dispatch_sm(struct input_ctx *); static void input_csi_dispatch_sm_private(struct input_ctx *); static void input_csi_dispatch_winops(struct input_ctx *); static void input_csi_dispatch_sgr_256(struct input_ctx *, int, u_int *); static void input_csi_dispatch_sgr_rgb(struct input_ctx *, int, u_int *); static void input_csi_dispatch_sgr(struct input_ctx *); static int input_dcs_dispatch(struct input_ctx *); static int input_top_bit_set(struct input_ctx *); static int input_end_bel(struct input_ctx *); /* Command table comparison function. */ static int input_table_compare(const void *, const void *); /* Command table entry. */ struct input_table_entry { int ch; const char *interm; int type; }; /* Escape commands. */ enum input_esc_type { INPUT_ESC_DECALN, INPUT_ESC_DECKPAM, INPUT_ESC_DECKPNM, INPUT_ESC_DECRC, INPUT_ESC_DECSC, INPUT_ESC_HTS, INPUT_ESC_IND, INPUT_ESC_NEL, INPUT_ESC_RI, INPUT_ESC_RIS, INPUT_ESC_SCSG0_OFF, INPUT_ESC_SCSG0_ON, INPUT_ESC_SCSG1_OFF, INPUT_ESC_SCSG1_ON, INPUT_ESC_ST, }; /* Escape command table. */ static const struct input_table_entry input_esc_table[] = { { '0', "(", INPUT_ESC_SCSG0_ON }, { '0', ")", INPUT_ESC_SCSG1_ON }, { '7', "", INPUT_ESC_DECSC }, { '8', "", INPUT_ESC_DECRC }, { '8', "#", INPUT_ESC_DECALN }, { '=', "", INPUT_ESC_DECKPAM }, { '>', "", INPUT_ESC_DECKPNM }, { 'B', "(", INPUT_ESC_SCSG0_OFF }, { 'B', ")", INPUT_ESC_SCSG1_OFF }, { 'D', "", INPUT_ESC_IND }, { 'E', "", INPUT_ESC_NEL }, { 'H', "", INPUT_ESC_HTS }, { 'M', "", INPUT_ESC_RI }, { '\\', "", INPUT_ESC_ST }, { 'c', "", INPUT_ESC_RIS }, }; /* Control (CSI) commands. */ enum input_csi_type { INPUT_CSI_CBT, INPUT_CSI_CNL, INPUT_CSI_CPL, INPUT_CSI_CUB, INPUT_CSI_CUD, INPUT_CSI_CUF, INPUT_CSI_CUP, INPUT_CSI_CUU, INPUT_CSI_DA, INPUT_CSI_DA_TWO, INPUT_CSI_DCH, INPUT_CSI_DECSCUSR, INPUT_CSI_DECSTBM, INPUT_CSI_DL, INPUT_CSI_DSR, INPUT_CSI_ECH, INPUT_CSI_ED, INPUT_CSI_EL, INPUT_CSI_HPA, INPUT_CSI_ICH, INPUT_CSI_IL, INPUT_CSI_MODOFF, INPUT_CSI_MODSET, INPUT_CSI_RCP, INPUT_CSI_REP, INPUT_CSI_RM, INPUT_CSI_RM_PRIVATE, INPUT_CSI_SCP, INPUT_CSI_SD, INPUT_CSI_SGR, INPUT_CSI_SM, INPUT_CSI_SM_PRIVATE, INPUT_CSI_SU, INPUT_CSI_TBC, INPUT_CSI_VPA, INPUT_CSI_WINOPS, INPUT_CSI_XDA, }; /* Control (CSI) command table. */ static const struct input_table_entry input_csi_table[] = { { '@', "", INPUT_CSI_ICH }, { 'A', "", INPUT_CSI_CUU }, { 'B', "", INPUT_CSI_CUD }, { 'C', "", INPUT_CSI_CUF }, { 'D', "", INPUT_CSI_CUB }, { 'E', "", INPUT_CSI_CNL }, { 'F', "", INPUT_CSI_CPL }, { 'G', "", INPUT_CSI_HPA }, { 'H', "", INPUT_CSI_CUP }, { 'J', "", INPUT_CSI_ED }, { 'K', "", INPUT_CSI_EL }, { 'L', "", INPUT_CSI_IL }, { 'M', "", INPUT_CSI_DL }, { 'P', "", INPUT_CSI_DCH }, { 'S', "", INPUT_CSI_SU }, { 'T', "", INPUT_CSI_SD }, { 'X', "", INPUT_CSI_ECH }, { 'Z', "", INPUT_CSI_CBT }, { '`', "", INPUT_CSI_HPA }, { 'b', "", INPUT_CSI_REP }, { 'c', "", INPUT_CSI_DA }, { 'c', ">", INPUT_CSI_DA_TWO }, { 'd', "", INPUT_CSI_VPA }, { 'f', "", INPUT_CSI_CUP }, { 'g', "", INPUT_CSI_TBC }, { 'h', "", INPUT_CSI_SM }, { 'h', "?", INPUT_CSI_SM_PRIVATE }, { 'l', "", INPUT_CSI_RM }, { 'l', "?", INPUT_CSI_RM_PRIVATE }, { 'm', "", INPUT_CSI_SGR }, { 'm', ">", INPUT_CSI_MODSET }, { 'n', "", INPUT_CSI_DSR }, { 'n', ">", INPUT_CSI_MODOFF }, { 'q', " ", INPUT_CSI_DECSCUSR }, { 'q', ">", INPUT_CSI_XDA }, { 'r', "", INPUT_CSI_DECSTBM }, { 's', "", INPUT_CSI_SCP }, { 't', "", INPUT_CSI_WINOPS }, { 'u', "", INPUT_CSI_RCP }, }; /* Input transition. */ struct input_transition { int first; int last; int (*handler)(struct input_ctx *); const struct input_state *state; }; /* Input state. */ struct input_state { const char *name; void (*enter)(struct input_ctx *); void (*exit)(struct input_ctx *); const struct input_transition *transitions; }; /* State transitions available from all states. */ #define INPUT_STATE_ANYWHERE \ { 0x18, 0x18, input_c0_dispatch, &input_state_ground }, \ { 0x1a, 0x1a, input_c0_dispatch, &input_state_ground }, \ { 0x1b, 0x1b, NULL, &input_state_esc_enter } /* Forward declarations of state tables. */ static const struct input_transition input_state_ground_table[]; static const struct input_transition input_state_esc_enter_table[]; static const struct input_transition input_state_esc_intermediate_table[]; static const struct input_transition input_state_csi_enter_table[]; static const struct input_transition input_state_csi_parameter_table[]; static const struct input_transition input_state_csi_intermediate_table[]; static const struct input_transition input_state_csi_ignore_table[]; static const struct input_transition input_state_dcs_enter_table[]; static const struct input_transition input_state_dcs_parameter_table[]; static const struct input_transition input_state_dcs_intermediate_table[]; static const struct input_transition input_state_dcs_handler_table[]; static const struct input_transition input_state_dcs_escape_table[]; static const struct input_transition input_state_dcs_ignore_table[]; static const struct input_transition input_state_osc_string_table[]; static const struct input_transition input_state_apc_string_table[]; static const struct input_transition input_state_rename_string_table[]; static const struct input_transition input_state_consume_st_table[]; /* ground state definition. */ static const struct input_state input_state_ground = { "ground", input_ground, NULL, input_state_ground_table }; /* esc_enter state definition. */ static const struct input_state input_state_esc_enter = { "esc_enter", input_clear, NULL, input_state_esc_enter_table }; /* esc_intermediate state definition. */ static const struct input_state input_state_esc_intermediate = { "esc_intermediate", NULL, NULL, input_state_esc_intermediate_table }; /* csi_enter state definition. */ static const struct input_state input_state_csi_enter = { "csi_enter", input_clear, NULL, input_state_csi_enter_table }; /* csi_parameter state definition. */ static const struct input_state input_state_csi_parameter = { "csi_parameter", NULL, NULL, input_state_csi_parameter_table }; /* csi_intermediate state definition. */ static const struct input_state input_state_csi_intermediate = { "csi_intermediate", NULL, NULL, input_state_csi_intermediate_table }; /* csi_ignore state definition. */ static const struct input_state input_state_csi_ignore = { "csi_ignore", NULL, NULL, input_state_csi_ignore_table }; /* dcs_enter state definition. */ static const struct input_state input_state_dcs_enter = { "dcs_enter", input_enter_dcs, NULL, input_state_dcs_enter_table }; /* dcs_parameter state definition. */ static const struct input_state input_state_dcs_parameter = { "dcs_parameter", NULL, NULL, input_state_dcs_parameter_table }; /* dcs_intermediate state definition. */ static const struct input_state input_state_dcs_intermediate = { "dcs_intermediate", NULL, NULL, input_state_dcs_intermediate_table }; /* dcs_handler state definition. */ static const struct input_state input_state_dcs_handler = { "dcs_handler", NULL, NULL, input_state_dcs_handler_table }; /* dcs_escape state definition. */ static const struct input_state input_state_dcs_escape = { "dcs_escape", NULL, NULL, input_state_dcs_escape_table }; /* dcs_ignore state definition. */ static const struct input_state input_state_dcs_ignore = { "dcs_ignore", NULL, NULL, input_state_dcs_ignore_table }; /* osc_string state definition. */ static const struct input_state input_state_osc_string = { "osc_string", input_enter_osc, input_exit_osc, input_state_osc_string_table }; /* apc_string state definition. */ static const struct input_state input_state_apc_string = { "apc_string", input_enter_apc, input_exit_apc, input_state_apc_string_table }; /* rename_string state definition. */ static const struct input_state input_state_rename_string = { "rename_string", input_enter_rename, input_exit_rename, input_state_rename_string_table }; /* consume_st state definition. */ static const struct input_state input_state_consume_st = { "consume_st", input_enter_rename, NULL, /* rename also waits for ST */ input_state_consume_st_table }; /* ground state table. */ static const struct input_transition input_state_ground_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x7e, input_print, NULL }, { 0x7f, 0x7f, NULL, NULL }, { 0x80, 0xff, input_top_bit_set, NULL }, { -1, -1, NULL, NULL } }; /* esc_enter state table. */ static const struct input_transition input_state_esc_enter_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x2f, input_intermediate, &input_state_esc_intermediate }, { 0x30, 0x4f, input_esc_dispatch, &input_state_ground }, { 0x50, 0x50, NULL, &input_state_dcs_enter }, { 0x51, 0x57, input_esc_dispatch, &input_state_ground }, { 0x58, 0x58, NULL, &input_state_consume_st }, { 0x59, 0x59, input_esc_dispatch, &input_state_ground }, { 0x5a, 0x5a, input_esc_dispatch, &input_state_ground }, { 0x5b, 0x5b, NULL, &input_state_csi_enter }, { 0x5c, 0x5c, input_esc_dispatch, &input_state_ground }, { 0x5d, 0x5d, NULL, &input_state_osc_string }, { 0x5e, 0x5e, NULL, &input_state_consume_st }, { 0x5f, 0x5f, NULL, &input_state_apc_string }, { 0x60, 0x6a, input_esc_dispatch, &input_state_ground }, { 0x6b, 0x6b, NULL, &input_state_rename_string }, { 0x6c, 0x7e, input_esc_dispatch, &input_state_ground }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* esc_intermediate state table. */ static const struct input_transition input_state_esc_intermediate_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x2f, input_intermediate, NULL }, { 0x30, 0x7e, input_esc_dispatch, &input_state_ground }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* csi_enter state table. */ static const struct input_transition input_state_csi_enter_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x2f, input_intermediate, &input_state_csi_intermediate }, { 0x30, 0x39, input_parameter, &input_state_csi_parameter }, { 0x3a, 0x3a, input_parameter, &input_state_csi_parameter }, { 0x3b, 0x3b, input_parameter, &input_state_csi_parameter }, { 0x3c, 0x3f, input_intermediate, &input_state_csi_parameter }, { 0x40, 0x7e, input_csi_dispatch, &input_state_ground }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* csi_parameter state table. */ static const struct input_transition input_state_csi_parameter_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x2f, input_intermediate, &input_state_csi_intermediate }, { 0x30, 0x39, input_parameter, NULL }, { 0x3a, 0x3a, input_parameter, NULL }, { 0x3b, 0x3b, input_parameter, NULL }, { 0x3c, 0x3f, NULL, &input_state_csi_ignore }, { 0x40, 0x7e, input_csi_dispatch, &input_state_ground }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* csi_intermediate state table. */ static const struct input_transition input_state_csi_intermediate_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x2f, input_intermediate, NULL }, { 0x30, 0x3f, NULL, &input_state_csi_ignore }, { 0x40, 0x7e, input_csi_dispatch, &input_state_ground }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* csi_ignore state table. */ static const struct input_transition input_state_csi_ignore_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x3f, NULL, NULL }, { 0x40, 0x7e, NULL, &input_state_ground }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* dcs_enter state table. */ static const struct input_transition input_state_dcs_enter_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0x2f, input_intermediate, &input_state_dcs_intermediate }, { 0x30, 0x39, input_parameter, &input_state_dcs_parameter }, { 0x3a, 0x3a, NULL, &input_state_dcs_ignore }, { 0x3b, 0x3b, input_parameter, &input_state_dcs_parameter }, { 0x3c, 0x3f, input_intermediate, &input_state_dcs_parameter }, { 0x40, 0x7e, input_input, &input_state_dcs_handler }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* dcs_parameter state table. */ static const struct input_transition input_state_dcs_parameter_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0x2f, input_intermediate, &input_state_dcs_intermediate }, { 0x30, 0x39, input_parameter, NULL }, { 0x3a, 0x3a, NULL, &input_state_dcs_ignore }, { 0x3b, 0x3b, input_parameter, NULL }, { 0x3c, 0x3f, NULL, &input_state_dcs_ignore }, { 0x40, 0x7e, input_input, &input_state_dcs_handler }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* dcs_intermediate state table. */ static const struct input_transition input_state_dcs_intermediate_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0x2f, input_intermediate, NULL }, { 0x30, 0x3f, NULL, &input_state_dcs_ignore }, { 0x40, 0x7e, input_input, &input_state_dcs_handler }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* dcs_handler state table. */ static const struct input_transition input_state_dcs_handler_table[] = { /* No INPUT_STATE_ANYWHERE */ { 0x00, 0x1a, input_input, NULL }, { 0x1b, 0x1b, NULL, &input_state_dcs_escape }, { 0x1c, 0xff, input_input, NULL }, { -1, -1, NULL, NULL } }; /* dcs_escape state table. */ static const struct input_transition input_state_dcs_escape_table[] = { /* No INPUT_STATE_ANYWHERE */ { 0x00, 0x5b, input_input, &input_state_dcs_handler }, { 0x5c, 0x5c, input_dcs_dispatch, &input_state_ground }, { 0x5d, 0xff, input_input, &input_state_dcs_handler }, { -1, -1, NULL, NULL } }; /* dcs_ignore state table. */ static const struct input_transition input_state_dcs_ignore_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* osc_string state table. */ static const struct input_transition input_state_osc_string_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x06, NULL, NULL }, { 0x07, 0x07, input_end_bel, &input_state_ground }, { 0x08, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0xff, input_input, NULL }, { -1, -1, NULL, NULL } }; /* apc_string state table. */ static const struct input_transition input_state_apc_string_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0xff, input_input, NULL }, { -1, -1, NULL, NULL } }; /* rename_string state table. */ static const struct input_transition input_state_rename_string_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0xff, input_input, NULL }, { -1, -1, NULL, NULL } }; /* consume_st state table. */ static const struct input_transition input_state_consume_st_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* Input table compare. */ static int input_table_compare(const void *key, const void *value) { const struct input_ctx *ictx = key; const struct input_table_entry *entry = value; if (ictx->ch != entry->ch) return (ictx->ch - entry->ch); return (strcmp(ictx->interm_buf, entry->interm)); } /* * Timer - if this expires then have been waiting for a terminator for too * long, so reset to ground. */ static void input_timer_callback(__unused int fd, __unused short events, void *arg) { struct input_ctx *ictx = arg; log_debug("%s: %s expired" , __func__, ictx->state->name); input_reset(ictx, 0); } /* Start the timer. */ static void input_start_timer(struct input_ctx *ictx) { struct timeval tv = { .tv_sec = 5, .tv_usec = 0 }; event_del(&ictx->timer); event_add(&ictx->timer, &tv); } /* Reset cell state to default. */ static void input_reset_cell(struct input_ctx *ictx) { memcpy(&ictx->cell.cell, &grid_default_cell, sizeof ictx->cell.cell); ictx->cell.set = 0; ictx->cell.g0set = ictx->cell.g1set = 0; memcpy(&ictx->old_cell, &ictx->cell, sizeof ictx->old_cell); ictx->old_cx = 0; ictx->old_cy = 0; } /* Save screen state. */ static void input_save_state(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct screen *s = sctx->s; memcpy(&ictx->old_cell, &ictx->cell, sizeof ictx->old_cell); ictx->old_cx = s->cx; ictx->old_cy = s->cy; ictx->old_mode = s->mode; } /* Restore screen state. */ static void input_restore_state(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; memcpy(&ictx->cell, &ictx->old_cell, sizeof ictx->cell); if (ictx->old_mode & MODE_ORIGIN) screen_write_mode_set(sctx, MODE_ORIGIN); else screen_write_mode_clear(sctx, MODE_ORIGIN); screen_write_cursormove(sctx, ictx->old_cx, ictx->old_cy, 0); } /* Initialise input parser. */ struct input_ctx * input_init(struct window_pane *wp, struct bufferevent *bev) { struct input_ctx *ictx; ictx = xcalloc(1, sizeof *ictx); ictx->wp = wp; ictx->event = bev; ictx->input_space = INPUT_BUF_START; ictx->input_buf = xmalloc(INPUT_BUF_START); ictx->since_ground = evbuffer_new(); if (ictx->since_ground == NULL) fatalx("out of memory"); evtimer_set(&ictx->timer, input_timer_callback, ictx); input_reset(ictx, 0); return (ictx); } /* Destroy input parser. */ void input_free(struct input_ctx *ictx) { u_int i; for (i = 0; i < ictx->param_list_len; i++) { if (ictx->param_list[i].type == INPUT_STRING) free(ictx->param_list[i].str); } event_del(&ictx->timer); free(ictx->input_buf); evbuffer_free(ictx->since_ground); free(ictx); } /* Reset input state and clear screen. */ void input_reset(struct input_ctx *ictx, int clear) { struct screen_write_ctx *sctx = &ictx->ctx; struct window_pane *wp = ictx->wp; input_reset_cell(ictx); if (clear && wp != NULL) { if (TAILQ_EMPTY(&wp->modes)) screen_write_start_pane(sctx, wp, &wp->base); else screen_write_start(sctx, &wp->base); screen_write_reset(sctx); screen_write_stop(sctx); } input_clear(ictx); ictx->last = -1; ictx->state = &input_state_ground; ictx->flags = 0; } /* Return pending data. */ struct evbuffer * input_pending(struct input_ctx *ictx) { return (ictx->since_ground); } /* Change input state. */ static void input_set_state(struct input_ctx *ictx, const struct input_transition *itr) { if (ictx->state->exit != NULL) ictx->state->exit(ictx); ictx->state = itr->state; if (ictx->state->enter != NULL) ictx->state->enter(ictx); } /* Parse data. */ static void input_parse(struct input_ctx *ictx, u_char *buf, size_t len) { struct screen_write_ctx *sctx = &ictx->ctx; const struct input_state *state = NULL; const struct input_transition *itr = NULL; size_t off = 0; /* Parse the input. */ while (off < len) { ictx->ch = buf[off++]; /* Find the transition. */ if (ictx->state != state || itr == NULL || ictx->ch < itr->first || ictx->ch > itr->last) { itr = ictx->state->transitions; while (itr->first != -1 && itr->last != -1) { if (ictx->ch >= itr->first && ictx->ch <= itr->last) break; itr++; } if (itr->first == -1 || itr->last == -1) { /* No transition? Eh? */ fatalx("no transition from state"); } } state = ictx->state; /* * Any state except print stops the current collection. This is * an optimization to avoid checking if the attributes have * changed for every character. It will stop unnecessarily for * sequences that don't make a terminal change, but they should * be the minority. */ if (itr->handler != input_print) screen_write_collect_end(sctx); /* * Execute the handler, if any. Don't switch state if it * returns non-zero. */ if (itr->handler != NULL && itr->handler(ictx) != 0) continue; /* And switch state, if necessary. */ if (itr->state != NULL) input_set_state(ictx, itr); /* If not in ground state, save input. */ if (ictx->state != &input_state_ground) evbuffer_add(ictx->since_ground, &ictx->ch, 1); } } /* Parse input from pane. */ void input_parse_pane(struct window_pane *wp) { void *new_data; size_t new_size; new_data = window_pane_get_new_data(wp, &wp->offset, &new_size); input_parse_buffer(wp, new_data, new_size); window_pane_update_used_data(wp, &wp->offset, new_size); } /* Parse given input. */ void input_parse_buffer(struct window_pane *wp, u_char *buf, size_t len) { struct input_ctx *ictx = wp->ictx; struct screen_write_ctx *sctx = &ictx->ctx; if (len == 0) return; window_update_activity(wp->window); wp->flags |= PANE_CHANGED; /* NULL wp if there is a mode set as don't want to update the tty. */ if (TAILQ_EMPTY(&wp->modes)) screen_write_start_pane(sctx, wp, &wp->base); else screen_write_start(sctx, &wp->base); log_debug("%s: %%%u %s, %zu bytes: %.*s", __func__, wp->id, ictx->state->name, len, (int)len, buf); input_parse(ictx, buf, len); screen_write_stop(sctx); } /* Parse given input for screen. */ void input_parse_screen(struct input_ctx *ictx, struct screen *s, screen_write_init_ctx_cb cb, void *arg, u_char *buf, size_t len) { struct screen_write_ctx *sctx = &ictx->ctx; if (len == 0) return; screen_write_start_callback(sctx, s, cb, arg); input_parse(ictx, buf, len); screen_write_stop(sctx); } /* Split the parameter list (if any). */ static int input_split(struct input_ctx *ictx) { const char *errstr; char *ptr, *out; struct input_param *ip; u_int i; for (i = 0; i < ictx->param_list_len; i++) { if (ictx->param_list[i].type == INPUT_STRING) free(ictx->param_list[i].str); } ictx->param_list_len = 0; if (ictx->param_len == 0) return (0); ip = &ictx->param_list[0]; ptr = ictx->param_buf; while ((out = strsep(&ptr, ";")) != NULL) { if (*out == '\0') ip->type = INPUT_MISSING; else { if (strchr(out, ':') != NULL) { ip->type = INPUT_STRING; ip->str = xstrdup(out); } else { ip->type = INPUT_NUMBER; ip->num = strtonum(out, 0, INT_MAX, &errstr); if (errstr != NULL) return (-1); } } ip = &ictx->param_list[++ictx->param_list_len]; if (ictx->param_list_len == nitems(ictx->param_list)) return (-1); } for (i = 0; i < ictx->param_list_len; i++) { ip = &ictx->param_list[i]; if (ip->type == INPUT_MISSING) log_debug("parameter %u: missing", i); else if (ip->type == INPUT_STRING) log_debug("parameter %u: string %s", i, ip->str); else if (ip->type == INPUT_NUMBER) log_debug("parameter %u: number %d", i, ip->num); } return (0); } /* Get an argument or return default value. */ static int input_get(struct input_ctx *ictx, u_int validx, int minval, int defval) { struct input_param *ip; int retval; if (validx >= ictx->param_list_len) return (defval); ip = &ictx->param_list[validx]; if (ip->type == INPUT_MISSING) return (defval); if (ip->type == INPUT_STRING) return (-1); retval = ip->num; if (retval < minval) return (minval); return (retval); } /* Reply to terminal query. */ static void input_reply(struct input_ctx *ictx, const char *fmt, ...) { struct bufferevent *bev = ictx->event; va_list ap; char *reply; va_start(ap, fmt); xvasprintf(&reply, fmt, ap); va_end(ap); bufferevent_write(bev, reply, strlen(reply)); free(reply); } /* Clear saved state. */ static void input_clear(struct input_ctx *ictx) { event_del(&ictx->timer); *ictx->interm_buf = '\0'; ictx->interm_len = 0; *ictx->param_buf = '\0'; ictx->param_len = 0; *ictx->input_buf = '\0'; ictx->input_len = 0; ictx->input_end = INPUT_END_ST; ictx->flags &= ~INPUT_DISCARD; } /* Reset for ground state. */ static void input_ground(struct input_ctx *ictx) { event_del(&ictx->timer); evbuffer_drain(ictx->since_ground, EVBUFFER_LENGTH(ictx->since_ground)); if (ictx->input_space > INPUT_BUF_START) { ictx->input_space = INPUT_BUF_START; ictx->input_buf = xrealloc(ictx->input_buf, INPUT_BUF_START); } } /* Output this character to the screen. */ static int input_print(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; int set; ictx->utf8started = 0; /* can't be valid UTF-8 */ set = ictx->cell.set == 0 ? ictx->cell.g0set : ictx->cell.g1set; if (set == 1) ictx->cell.cell.attr |= GRID_ATTR_CHARSET; else ictx->cell.cell.attr &= ~GRID_ATTR_CHARSET; utf8_set(&ictx->cell.cell.data, ictx->ch); screen_write_collect_add(sctx, &ictx->cell.cell); ictx->last = ictx->ch; ictx->cell.cell.attr &= ~GRID_ATTR_CHARSET; return (0); } /* Collect intermediate string. */ static int input_intermediate(struct input_ctx *ictx) { if (ictx->interm_len == (sizeof ictx->interm_buf) - 1) ictx->flags |= INPUT_DISCARD; else { ictx->interm_buf[ictx->interm_len++] = ictx->ch; ictx->interm_buf[ictx->interm_len] = '\0'; } return (0); } /* Collect parameter string. */ static int input_parameter(struct input_ctx *ictx) { if (ictx->param_len == (sizeof ictx->param_buf) - 1) ictx->flags |= INPUT_DISCARD; else { ictx->param_buf[ictx->param_len++] = ictx->ch; ictx->param_buf[ictx->param_len] = '\0'; } return (0); } /* Collect input string. */ static int input_input(struct input_ctx *ictx) { size_t available; available = ictx->input_space; while (ictx->input_len + 1 >= available) { available *= 2; if (available > INPUT_BUF_LIMIT) { ictx->flags |= INPUT_DISCARD; return (0); } ictx->input_buf = xrealloc(ictx->input_buf, available); ictx->input_space = available; } ictx->input_buf[ictx->input_len++] = ictx->ch; ictx->input_buf[ictx->input_len] = '\0'; return (0); } /* Execute C0 control sequence. */ static int input_c0_dispatch(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct window_pane *wp = ictx->wp; struct screen *s = sctx->s; ictx->utf8started = 0; /* can't be valid UTF-8 */ log_debug("%s: '%c'", __func__, ictx->ch); switch (ictx->ch) { case '\000': /* NUL */ break; case '\007': /* BEL */ if (wp != NULL) alerts_queue(wp->window, WINDOW_BELL); break; case '\010': /* BS */ screen_write_backspace(sctx); break; case '\011': /* HT */ /* Don't tab beyond the end of the line. */ if (s->cx >= screen_size_x(s) - 1) break; /* Find the next tab point, or use the last column if none. */ do { s->cx++; if (bit_test(s->tabs, s->cx)) break; } while (s->cx < screen_size_x(s) - 1); break; case '\012': /* LF */ case '\013': /* VT */ case '\014': /* FF */ screen_write_linefeed(sctx, 0, ictx->cell.cell.bg); if (s->mode & MODE_CRLF) screen_write_carriagereturn(sctx); break; case '\015': /* CR */ screen_write_carriagereturn(sctx); break; case '\016': /* SO */ ictx->cell.set = 1; break; case '\017': /* SI */ ictx->cell.set = 0; break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } ictx->last = -1; return (0); } /* Execute escape sequence. */ static int input_esc_dispatch(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct window_pane *wp = ictx->wp; struct screen *s = sctx->s; struct input_table_entry *entry; if (ictx->flags & INPUT_DISCARD) return (0); log_debug("%s: '%c', %s", __func__, ictx->ch, ictx->interm_buf); entry = bsearch(ictx, input_esc_table, nitems(input_esc_table), sizeof input_esc_table[0], input_table_compare); if (entry == NULL) { log_debug("%s: unknown '%c'", __func__, ictx->ch); return (0); } switch (entry->type) { case INPUT_ESC_RIS: if (wp != NULL) window_pane_reset_palette(wp); input_reset_cell(ictx); screen_write_reset(sctx); break; case INPUT_ESC_IND: screen_write_linefeed(sctx, 0, ictx->cell.cell.bg); break; case INPUT_ESC_NEL: screen_write_carriagereturn(sctx); screen_write_linefeed(sctx, 0, ictx->cell.cell.bg); break; case INPUT_ESC_HTS: if (s->cx < screen_size_x(s)) bit_set(s->tabs, s->cx); break; case INPUT_ESC_RI: screen_write_reverseindex(sctx, ictx->cell.cell.bg); break; case INPUT_ESC_DECKPAM: screen_write_mode_set(sctx, MODE_KKEYPAD); break; case INPUT_ESC_DECKPNM: screen_write_mode_clear(sctx, MODE_KKEYPAD); break; case INPUT_ESC_DECSC: input_save_state(ictx); break; case INPUT_ESC_DECRC: input_restore_state(ictx); break; case INPUT_ESC_DECALN: screen_write_alignmenttest(sctx); break; case INPUT_ESC_SCSG0_ON: ictx->cell.g0set = 1; break; case INPUT_ESC_SCSG0_OFF: ictx->cell.g0set = 0; break; case INPUT_ESC_SCSG1_ON: ictx->cell.g1set = 1; break; case INPUT_ESC_SCSG1_OFF: ictx->cell.g1set = 0; break; case INPUT_ESC_ST: /* ST terminates OSC but the state transition already did it. */ break; } ictx->last = -1; return (0); } /* Execute control sequence. */ static int input_csi_dispatch(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct screen *s = sctx->s; struct input_table_entry *entry; int i, n, m; u_int cx, bg = ictx->cell.cell.bg; if (ictx->flags & INPUT_DISCARD) return (0); log_debug("%s: '%c' \"%s\" \"%s\"", __func__, ictx->ch, ictx->interm_buf, ictx->param_buf); if (input_split(ictx) != 0) return (0); entry = bsearch(ictx, input_csi_table, nitems(input_csi_table), sizeof input_csi_table[0], input_table_compare); if (entry == NULL) { log_debug("%s: unknown '%c'", __func__, ictx->ch); return (0); } switch (entry->type) { case INPUT_CSI_CBT: /* Find the previous tab point, n times. */ cx = s->cx; if (cx > screen_size_x(s) - 1) cx = screen_size_x(s) - 1; n = input_get(ictx, 0, 1, 1); if (n == -1) break; while (cx > 0 && n-- > 0) { do cx--; while (cx > 0 && !bit_test(s->tabs, cx)); } s->cx = cx; break; case INPUT_CSI_CUB: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_cursorleft(sctx, n); break; case INPUT_CSI_CUD: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_cursordown(sctx, n); break; case INPUT_CSI_CUF: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_cursorright(sctx, n); break; case INPUT_CSI_CUP: n = input_get(ictx, 0, 1, 1); m = input_get(ictx, 1, 1, 1); if (n != -1 && m != -1) screen_write_cursormove(sctx, m - 1, n - 1, 1); break; case INPUT_CSI_MODSET: n = input_get(ictx, 0, 0, 0); m = input_get(ictx, 1, 0, 0); if (n == 0 || (n == 4 && m == 0)) screen_write_mode_clear(sctx, MODE_KEXTENDED); else if (n == 4 && (m == 1 || m == 2)) screen_write_mode_set(sctx, MODE_KEXTENDED); break; case INPUT_CSI_MODOFF: n = input_get(ictx, 0, 0, 0); if (n == 4) screen_write_mode_clear(sctx, MODE_KEXTENDED); break; case INPUT_CSI_WINOPS: input_csi_dispatch_winops(ictx); break; case INPUT_CSI_CUU: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_cursorup(sctx, n); break; case INPUT_CSI_CNL: n = input_get(ictx, 0, 1, 1); if (n != -1) { screen_write_carriagereturn(sctx); screen_write_cursordown(sctx, n); } break; case INPUT_CSI_CPL: n = input_get(ictx, 0, 1, 1); if (n != -1) { screen_write_carriagereturn(sctx); screen_write_cursorup(sctx, n); } break; case INPUT_CSI_DA: switch (input_get(ictx, 0, 0, 0)) { case -1: break; case 0: input_reply(ictx, "\033[?1;2c"); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } break; case INPUT_CSI_DA_TWO: switch (input_get(ictx, 0, 0, 0)) { case -1: break; case 0: input_reply(ictx, "\033[>84;0;0c"); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } break; case INPUT_CSI_ECH: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_clearcharacter(sctx, n, bg); break; case INPUT_CSI_DCH: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_deletecharacter(sctx, n, bg); break; case INPUT_CSI_DECSTBM: n = input_get(ictx, 0, 1, 1); m = input_get(ictx, 1, 1, screen_size_y(s)); if (n != -1 && m != -1) screen_write_scrollregion(sctx, n - 1, m - 1); break; case INPUT_CSI_DL: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_deleteline(sctx, n, bg); break; case INPUT_CSI_DSR: switch (input_get(ictx, 0, 0, 0)) { case -1: break; case 5: input_reply(ictx, "\033[0n"); break; case 6: input_reply(ictx, "\033[%u;%uR", s->cy + 1, s->cx + 1); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } break; case INPUT_CSI_ED: switch (input_get(ictx, 0, 0, 0)) { case -1: break; case 0: screen_write_clearendofscreen(sctx, bg); break; case 1: screen_write_clearstartofscreen(sctx, bg); break; case 2: screen_write_clearscreen(sctx, bg); break; case 3: if (input_get(ictx, 1, 0, 0) == 0) { /* * Linux console extension to clear history * (for example before locking the screen). */ screen_write_clearhistory(sctx); } break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } break; case INPUT_CSI_EL: switch (input_get(ictx, 0, 0, 0)) { case -1: break; case 0: screen_write_clearendofline(sctx, bg); break; case 1: screen_write_clearstartofline(sctx, bg); break; case 2: screen_write_clearline(sctx, bg); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } break; case INPUT_CSI_HPA: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_cursormove(sctx, n - 1, -1, 1); break; case INPUT_CSI_ICH: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_insertcharacter(sctx, n, bg); break; case INPUT_CSI_IL: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_insertline(sctx, n, bg); break; case INPUT_CSI_REP: n = input_get(ictx, 0, 1, 1); if (n == -1) break; if (ictx->last == -1) break; ictx->ch = ictx->last; for (i = 0; i < n; i++) input_print(ictx); break; case INPUT_CSI_RCP: input_restore_state(ictx); break; case INPUT_CSI_RM: input_csi_dispatch_rm(ictx); break; case INPUT_CSI_RM_PRIVATE: input_csi_dispatch_rm_private(ictx); break; case INPUT_CSI_SCP: input_save_state(ictx); break; case INPUT_CSI_SGR: input_csi_dispatch_sgr(ictx); break; case INPUT_CSI_SM: input_csi_dispatch_sm(ictx); break; case INPUT_CSI_SM_PRIVATE: input_csi_dispatch_sm_private(ictx); break; case INPUT_CSI_SU: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_scrollup(sctx, n, bg); break; case INPUT_CSI_SD: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_scrolldown(sctx, n, bg); break; case INPUT_CSI_TBC: switch (input_get(ictx, 0, 0, 0)) { case -1: break; case 0: if (s->cx < screen_size_x(s)) bit_clear(s->tabs, s->cx); break; case 3: bit_nclear(s->tabs, 0, screen_size_x(s) - 1); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } break; case INPUT_CSI_VPA: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_cursormove(sctx, -1, n - 1, 1); break; case INPUT_CSI_DECSCUSR: n = input_get(ictx, 0, 0, 0); if (n != -1) screen_set_cursor_style(s, n); break; case INPUT_CSI_XDA: n = input_get(ictx, 0, 0, 0); if (n == 0) input_reply(ictx, "\033P>|tmux %s\033\\", getversion()); break; } ictx->last = -1; return (0); } /* Handle CSI RM. */ static void input_csi_dispatch_rm(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; u_int i; for (i = 0; i < ictx->param_list_len; i++) { switch (input_get(ictx, i, 0, -1)) { case -1: break; case 4: /* IRM */ screen_write_mode_clear(sctx, MODE_INSERT); break; case 34: screen_write_mode_set(sctx, MODE_BLINKING); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } } } /* Handle CSI private RM. */ static void input_csi_dispatch_rm_private(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct grid_cell *gc = &ictx->cell.cell; u_int i; for (i = 0; i < ictx->param_list_len; i++) { switch (input_get(ictx, i, 0, -1)) { case -1: break; case 1: /* DECCKM */ screen_write_mode_clear(sctx, MODE_KCURSOR); break; case 3: /* DECCOLM */ screen_write_cursormove(sctx, 0, 0, 1); screen_write_clearscreen(sctx, gc->bg); break; case 6: /* DECOM */ screen_write_mode_clear(sctx, MODE_ORIGIN); screen_write_cursormove(sctx, 0, 0, 1); break; case 7: /* DECAWM */ screen_write_mode_clear(sctx, MODE_WRAP); break; case 12: screen_write_mode_clear(sctx, MODE_BLINKING); break; case 25: /* TCEM */ screen_write_mode_clear(sctx, MODE_CURSOR); break; case 1000: case 1001: case 1002: case 1003: screen_write_mode_clear(sctx, ALL_MOUSE_MODES); break; case 1004: screen_write_mode_clear(sctx, MODE_FOCUSON); break; case 1005: screen_write_mode_clear(sctx, MODE_MOUSE_UTF8); break; case 1006: screen_write_mode_clear(sctx, MODE_MOUSE_SGR); break; case 47: case 1047: screen_write_alternateoff(sctx, gc, 0); break; case 1049: screen_write_alternateoff(sctx, gc, 1); break; case 2004: screen_write_mode_clear(sctx, MODE_BRACKETPASTE); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } } } /* Handle CSI SM. */ static void input_csi_dispatch_sm(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; u_int i; for (i = 0; i < ictx->param_list_len; i++) { switch (input_get(ictx, i, 0, -1)) { case -1: break; case 4: /* IRM */ screen_write_mode_set(sctx, MODE_INSERT); break; case 34: screen_write_mode_clear(sctx, MODE_BLINKING); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } } } /* Handle CSI private SM. */ static void input_csi_dispatch_sm_private(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct window_pane *wp = ictx->wp; struct grid_cell *gc = &ictx->cell.cell; u_int i; for (i = 0; i < ictx->param_list_len; i++) { switch (input_get(ictx, i, 0, -1)) { case -1: break; case 1: /* DECCKM */ screen_write_mode_set(sctx, MODE_KCURSOR); break; case 3: /* DECCOLM */ screen_write_cursormove(sctx, 0, 0, 1); screen_write_clearscreen(sctx, ictx->cell.cell.bg); break; case 6: /* DECOM */ screen_write_mode_set(sctx, MODE_ORIGIN); screen_write_cursormove(sctx, 0, 0, 1); break; case 7: /* DECAWM */ screen_write_mode_set(sctx, MODE_WRAP); break; case 12: screen_write_mode_set(sctx, MODE_BLINKING); break; case 25: /* TCEM */ screen_write_mode_set(sctx, MODE_CURSOR); break; case 1000: screen_write_mode_clear(sctx, ALL_MOUSE_MODES); screen_write_mode_set(sctx, MODE_MOUSE_STANDARD); break; case 1002: screen_write_mode_clear(sctx, ALL_MOUSE_MODES); screen_write_mode_set(sctx, MODE_MOUSE_BUTTON); break; case 1003: screen_write_mode_clear(sctx, ALL_MOUSE_MODES); screen_write_mode_set(sctx, MODE_MOUSE_ALL); break; case 1004: if (sctx->s->mode & MODE_FOCUSON) break; screen_write_mode_set(sctx, MODE_FOCUSON); if (wp != NULL) wp->flags |= PANE_FOCUSPUSH; /* force update */ break; case 1005: screen_write_mode_set(sctx, MODE_MOUSE_UTF8); break; case 1006: screen_write_mode_set(sctx, MODE_MOUSE_SGR); break; case 47: case 1047: screen_write_alternateon(sctx, gc, 0); break; case 1049: screen_write_alternateon(sctx, gc, 1); break; case 2004: screen_write_mode_set(sctx, MODE_BRACKETPASTE); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } } } /* Handle CSI window operations. */ static void input_csi_dispatch_winops(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct screen *s = sctx->s; struct window_pane *wp = ictx->wp; u_int x = screen_size_x(s), y = screen_size_y(s); int n, m; m = 0; while ((n = input_get(ictx, m, 0, -1)) != -1) { switch (n) { case 1: case 2: case 5: case 6: case 7: case 11: case 13: case 14: case 19: case 20: case 21: case 24: break; case 3: case 4: case 8: m++; if (input_get(ictx, m, 0, -1) == -1) return; /* FALLTHROUGH */ case 9: case 10: m++; if (input_get(ictx, m, 0, -1) == -1) return; break; case 22: m++; switch (input_get(ictx, m, 0, -1)) { case -1: return; case 0: case 2: screen_push_title(sctx->s); break; } break; case 23: m++; switch (input_get(ictx, m, 0, -1)) { case -1: return; case 0: case 2: screen_pop_title(sctx->s); if (wp != NULL) { notify_pane("pane-title-changed", wp); server_redraw_window_borders(wp->window); server_status_window(wp->window); } break; } break; case 18: input_reply(ictx, "\033[8;%u;%ut", x, y); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } m++; } } /* Helper for 256 colour SGR. */ static int input_csi_dispatch_sgr_256_do(struct input_ctx *ictx, int fgbg, int c) { struct grid_cell *gc = &ictx->cell.cell; if (c == -1 || c > 255) { if (fgbg == 38) gc->fg = 8; else if (fgbg == 48) gc->bg = 8; } else { if (fgbg == 38) gc->fg = c | COLOUR_FLAG_256; else if (fgbg == 48) gc->bg = c | COLOUR_FLAG_256; else if (fgbg == 58) gc->us = c | COLOUR_FLAG_256; } return (1); } /* Handle CSI SGR for 256 colours. */ static void input_csi_dispatch_sgr_256(struct input_ctx *ictx, int fgbg, u_int *i) { int c; c = input_get(ictx, (*i) + 1, 0, -1); if (input_csi_dispatch_sgr_256_do(ictx, fgbg, c)) (*i)++; } /* Helper for RGB colour SGR. */ static int input_csi_dispatch_sgr_rgb_do(struct input_ctx *ictx, int fgbg, int r, int g, int b) { struct grid_cell *gc = &ictx->cell.cell; if (r == -1 || r > 255) return (0); if (g == -1 || g > 255) return (0); if (b == -1 || b > 255) return (0); if (fgbg == 38) gc->fg = colour_join_rgb(r, g, b); else if (fgbg == 48) gc->bg = colour_join_rgb(r, g, b); else if (fgbg == 58) gc->us = colour_join_rgb(r, g, b); return (1); } /* Handle CSI SGR for RGB colours. */ static void input_csi_dispatch_sgr_rgb(struct input_ctx *ictx, int fgbg, u_int *i) { int r, g, b; r = input_get(ictx, (*i) + 1, 0, -1); g = input_get(ictx, (*i) + 2, 0, -1); b = input_get(ictx, (*i) + 3, 0, -1); if (input_csi_dispatch_sgr_rgb_do(ictx, fgbg, r, g, b)) (*i) += 3; } /* Handle CSI SGR with a ISO parameter. */ static void input_csi_dispatch_sgr_colon(struct input_ctx *ictx, u_int i) { struct grid_cell *gc = &ictx->cell.cell; char *s = ictx->param_list[i].str, *copy, *ptr, *out; int p[8]; u_int n; const char *errstr; for (n = 0; n < nitems(p); n++) p[n] = -1; n = 0; ptr = copy = xstrdup(s); while ((out = strsep(&ptr, ":")) != NULL) { if (*out != '\0') { p[n++] = strtonum(out, 0, INT_MAX, &errstr); if (errstr != NULL || n == nitems(p)) { free(copy); return; } } else n++; log_debug("%s: %u = %d", __func__, n - 1, p[n - 1]); } free(copy); if (n == 0) return; if (p[0] == 4) { if (n != 2) return; switch (p[1]) { case 0: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; break; case 1: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; gc->attr |= GRID_ATTR_UNDERSCORE; break; case 2: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; gc->attr |= GRID_ATTR_UNDERSCORE_2; break; case 3: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; gc->attr |= GRID_ATTR_UNDERSCORE_3; break; case 4: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; gc->attr |= GRID_ATTR_UNDERSCORE_4; break; case 5: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; gc->attr |= GRID_ATTR_UNDERSCORE_5; break; } return; } if (n < 2 || (p[0] != 38 && p[0] != 48 && p[0] != 58)) return; switch (p[1]) { case 2: if (n < 3) break; if (n == 5) i = 2; else i = 3; if (n < i + 3) break; input_csi_dispatch_sgr_rgb_do(ictx, p[0], p[i], p[i + 1], p[i + 2]); break; case 5: if (n < 3) break; input_csi_dispatch_sgr_256_do(ictx, p[0], p[2]); break; } } /* Handle CSI SGR. */ static void input_csi_dispatch_sgr(struct input_ctx *ictx) { struct grid_cell *gc = &ictx->cell.cell; u_int i; int n; if (ictx->param_list_len == 0) { memcpy(gc, &grid_default_cell, sizeof *gc); return; } for (i = 0; i < ictx->param_list_len; i++) { if (ictx->param_list[i].type == INPUT_STRING) { input_csi_dispatch_sgr_colon(ictx, i); continue; } n = input_get(ictx, i, 0, 0); if (n == -1) continue; if (n == 38 || n == 48 || n == 58) { i++; switch (input_get(ictx, i, 0, -1)) { case 2: input_csi_dispatch_sgr_rgb(ictx, n, &i); break; case 5: input_csi_dispatch_sgr_256(ictx, n, &i); break; } continue; } switch (n) { case 0: memcpy(gc, &grid_default_cell, sizeof *gc); break; case 1: gc->attr |= GRID_ATTR_BRIGHT; break; case 2: gc->attr |= GRID_ATTR_DIM; break; case 3: gc->attr |= GRID_ATTR_ITALICS; break; case 4: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; gc->attr |= GRID_ATTR_UNDERSCORE; break; case 5: gc->attr |= GRID_ATTR_BLINK; break; case 7: gc->attr |= GRID_ATTR_REVERSE; break; case 8: gc->attr |= GRID_ATTR_HIDDEN; break; case 9: gc->attr |= GRID_ATTR_STRIKETHROUGH; break; case 22: gc->attr &= ~(GRID_ATTR_BRIGHT|GRID_ATTR_DIM); break; case 23: gc->attr &= ~GRID_ATTR_ITALICS; break; case 24: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; break; case 25: gc->attr &= ~GRID_ATTR_BLINK; break; case 27: gc->attr &= ~GRID_ATTR_REVERSE; break; case 28: gc->attr &= ~GRID_ATTR_HIDDEN; break; case 29: gc->attr &= ~GRID_ATTR_STRIKETHROUGH; break; case 30: case 31: case 32: case 33: case 34: case 35: case 36: case 37: gc->fg = n - 30; break; case 39: gc->fg = 8; break; case 40: case 41: case 42: case 43: case 44: case 45: case 46: case 47: gc->bg = n - 40; break; case 49: gc->bg = 8; break; case 53: gc->attr |= GRID_ATTR_OVERLINE; break; case 55: gc->attr &= ~GRID_ATTR_OVERLINE; break; case 59: gc->us = 0; break; case 90: case 91: case 92: case 93: case 94: case 95: case 96: case 97: gc->fg = n; break; case 100: case 101: case 102: case 103: case 104: case 105: case 106: case 107: gc->bg = n - 10; break; } } } /* End of input with BEL. */ static int input_end_bel(struct input_ctx *ictx) { log_debug("%s", __func__); ictx->input_end = INPUT_END_BEL; return (0); } /* DCS string started. */ static void input_enter_dcs(struct input_ctx *ictx) { log_debug("%s", __func__); input_clear(ictx); input_start_timer(ictx); ictx->last = -1; } /* DCS terminator (ST) received. */ static int input_dcs_dispatch(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; u_char *buf = ictx->input_buf; size_t len = ictx->input_len; const char prefix[] = "tmux;"; const u_int prefixlen = (sizeof prefix) - 1; if (ictx->flags & INPUT_DISCARD) return (0); log_debug("%s: \"%s\"", __func__, buf); if (len >= prefixlen && strncmp(buf, prefix, prefixlen) == 0) screen_write_rawstring(sctx, buf + prefixlen, len - prefixlen); return (0); } /* OSC string started. */ static void input_enter_osc(struct input_ctx *ictx) { log_debug("%s", __func__); input_clear(ictx); input_start_timer(ictx); ictx->last = -1; } /* OSC terminator (ST) received. */ static void input_exit_osc(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct window_pane *wp = ictx->wp; u_char *p = ictx->input_buf; u_int option; if (ictx->flags & INPUT_DISCARD) return; if (ictx->input_len < 1 || *p < '0' || *p > '9') return; log_debug("%s: \"%s\" (end %s)", __func__, p, ictx->input_end == INPUT_END_ST ? "ST" : "BEL"); option = 0; while (*p >= '0' && *p <= '9') option = option * 10 + *p++ - '0'; if (*p == ';') p++; switch (option) { case 0: case 2: if (screen_set_title(sctx->s, p) && wp != NULL) { notify_pane("pane-title-changed", wp); server_redraw_window_borders(wp->window); server_status_window(wp->window); } break; case 4: input_osc_4(ictx, p); break; case 7: if (utf8_isvalid(p)) { screen_set_path(sctx->s, p); if (wp != NULL) { server_redraw_window_borders(wp->window); server_status_window(wp->window); } } break; case 10: input_osc_10(ictx, p); break; case 11: input_osc_11(ictx, p); break; case 12: if (utf8_isvalid(p) && *p != '?') /* ? is colour request */ screen_set_cursor_colour(sctx->s, p); break; case 52: input_osc_52(ictx, p); break; case 104: input_osc_104(ictx, p); break; case 112: if (*p == '\0') /* no arguments allowed */ screen_set_cursor_colour(sctx->s, ""); break; default: log_debug("%s: unknown '%u'", __func__, option); break; } } /* APC string started. */ static void input_enter_apc(struct input_ctx *ictx) { log_debug("%s", __func__); input_clear(ictx); input_start_timer(ictx); ictx->last = -1; } /* APC terminator (ST) received. */ static void input_exit_apc(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct window_pane *wp = ictx->wp; if (ictx->flags & INPUT_DISCARD) return; log_debug("%s: \"%s\"", __func__, ictx->input_buf); if (screen_set_title(sctx->s, ictx->input_buf) && wp != NULL) { notify_pane("pane-title-changed", wp); server_redraw_window_borders(wp->window); server_status_window(wp->window); } } /* Rename string started. */ static void input_enter_rename(struct input_ctx *ictx) { log_debug("%s", __func__); input_clear(ictx); input_start_timer(ictx); ictx->last = -1; } /* Rename terminator (ST) received. */ static void input_exit_rename(struct input_ctx *ictx) { struct window_pane *wp = ictx->wp; struct options_entry *o; if (wp == NULL) return; if (ictx->flags & INPUT_DISCARD) return; if (!options_get_number(ictx->wp->options, "allow-rename")) return; log_debug("%s: \"%s\"", __func__, ictx->input_buf); if (!utf8_isvalid(ictx->input_buf)) return; if (ictx->input_len == 0) { o = options_get_only(wp->window->options, "automatic-rename"); if (o != NULL) options_remove_or_default(o, -1, NULL); return; } window_set_name(wp->window, ictx->input_buf); options_set_number(wp->window->options, "automatic-rename", 0); server_redraw_window_borders(wp->window); server_status_window(wp->window); } /* Open UTF-8 character. */ static int input_top_bit_set(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct utf8_data *ud = &ictx->utf8data; ictx->last = -1; if (!ictx->utf8started) { if (utf8_open(ud, ictx->ch) != UTF8_MORE) return (0); ictx->utf8started = 1; return (0); } switch (utf8_append(ud, ictx->ch)) { case UTF8_MORE: return (0); case UTF8_ERROR: ictx->utf8started = 0; return (0); case UTF8_DONE: break; } ictx->utf8started = 0; log_debug("%s %hhu '%*s' (width %hhu)", __func__, ud->size, (int)ud->size, ud->data, ud->width); utf8_copy(&ictx->cell.cell.data, ud); screen_write_collect_add(sctx, &ictx->cell.cell); return (0); } /* Parse colour from OSC. */ static int input_osc_parse_colour(const char *p, u_int *r, u_int *g, u_int *b) { u_int rsize, gsize, bsize; const char *cp, *s = p; if (sscanf(p, "rgb:%x/%x/%x", r, g, b) != 3) return (0); p += 4; cp = strchr(p, '/'); rsize = cp - p; if (rsize == 1) (*r) = (*r) | ((*r) << 4); else if (rsize == 3) (*r) >>= 4; else if (rsize == 4) (*r) >>= 8; else if (rsize != 2) return (0); p = cp + 1; cp = strchr(p, '/'); gsize = cp - p; if (gsize == 1) (*g) = (*g) | ((*g) << 4); else if (gsize == 3) (*g) >>= 4; else if (gsize == 4) (*g) >>= 8; else if (gsize != 2) return (0); bsize = strlen(cp + 1); if (bsize == 1) (*b) = (*b) | ((*b) << 4); else if (bsize == 3) (*b) >>= 4; else if (bsize == 4) (*b) >>= 8; else if (bsize != 2) return (0); log_debug("%s: %s = %02x%02x%02x", __func__, s, *r, *g, *b); return (1); } /* Reply to a colour request. */ static void input_osc_colour_reply(struct input_ctx *ictx, u_int n, int c) { u_char r, g, b; const char *end; if (c == 8 || (~c & COLOUR_FLAG_RGB)) return; colour_split_rgb(c, &r, &g, &b); if (ictx->input_end == INPUT_END_BEL) end = "\007"; else end = "\033\\"; input_reply(ictx, "\033]%u;rgb:%02hhx/%02hhx/%02hhx%s", n, r, g, b, end); } /* Handle the OSC 4 sequence for setting (multiple) palette entries. */ static void input_osc_4(struct input_ctx *ictx, const char *p) { struct window_pane *wp = ictx->wp; char *copy, *s, *next = NULL; long idx; u_int r, g, b; if (wp == NULL) return; copy = s = xstrdup(p); while (s != NULL && *s != '\0') { idx = strtol(s, &next, 10); if (*next++ != ';') goto bad; if (idx < 0 || idx >= 0x100) goto bad; s = strsep(&next, ";"); if (!input_osc_parse_colour(s, &r, &g, &b)) { s = next; continue; } window_pane_set_palette(wp, idx, colour_join_rgb(r, g, b)); s = next; } free(copy); return; bad: log_debug("bad OSC 4: %s", p); free(copy); } /* Handle the OSC 10 sequence for setting and querying foreground colour. */ static void input_osc_10(struct input_ctx *ictx, const char *p) { struct window_pane *wp = ictx->wp; struct grid_cell defaults; u_int r, g, b; if (wp == NULL) return; if (strcmp(p, "?") == 0) { tty_default_colours(&defaults, wp); input_osc_colour_reply(ictx, 10, defaults.fg); return; } if (!input_osc_parse_colour(p, &r, &g, &b)) goto bad; wp->fg = colour_join_rgb(r, g, b); wp->flags |= (PANE_REDRAW|PANE_STYLECHANGED); return; bad: log_debug("bad OSC 10: %s", p); } /* Handle the OSC 11 sequence for setting and querying background colour. */ static void input_osc_11(struct input_ctx *ictx, const char *p) { struct window_pane *wp = ictx->wp; struct grid_cell defaults; u_int r, g, b; if (wp == NULL) return; if (strcmp(p, "?") == 0) { tty_default_colours(&defaults, wp); input_osc_colour_reply(ictx, 11, defaults.bg); return; } if (!input_osc_parse_colour(p, &r, &g, &b)) goto bad; wp->bg = colour_join_rgb(r, g, b); wp->flags |= (PANE_REDRAW|PANE_STYLECHANGED); return; bad: log_debug("bad OSC 11: %s", p); } /* Handle the OSC 52 sequence for setting the clipboard. */ static void input_osc_52(struct input_ctx *ictx, const char *p) { struct window_pane *wp = ictx->wp; char *end; const char *buf; size_t len; u_char *out; int outlen, state; struct screen_write_ctx ctx; struct paste_buffer *pb; if (wp == NULL) return; state = options_get_number(global_options, "set-clipboard"); if (state != 2) return; if ((end = strchr(p, ';')) == NULL) return; end++; if (*end == '\0') return; log_debug("%s: %s", __func__, end); if (strcmp(end, "?") == 0) { if ((pb = paste_get_top(NULL)) != NULL) { buf = paste_buffer_data(pb, &len); outlen = 4 * ((len + 2) / 3) + 1; out = xmalloc(outlen); if ((outlen = b64_ntop(buf, len, out, outlen)) == -1) { free(out); return; } } else { outlen = 0; out = NULL; } bufferevent_write(ictx->event, "\033]52;;", 6); if (outlen != 0) bufferevent_write(ictx->event, out, outlen); if (ictx->input_end == INPUT_END_BEL) bufferevent_write(ictx->event, "\007", 1); else bufferevent_write(ictx->event, "\033\\", 2); free(out); return; } len = (strlen(end) / 4) * 3; if (len == 0) return; out = xmalloc(len); if ((outlen = b64_pton(end, out, len)) == -1) { free(out); return; } screen_write_start_pane(&ctx, wp, NULL); screen_write_setselection(&ctx, out, outlen); screen_write_stop(&ctx); notify_pane("pane-set-clipboard", wp); paste_add(NULL, out, outlen); } /* Handle the OSC 104 sequence for unsetting (multiple) palette entries. */ static void input_osc_104(struct input_ctx *ictx, const char *p) { struct window_pane *wp = ictx->wp; char *copy, *s; long idx; if (wp == NULL) return; if (*p == '\0') { window_pane_reset_palette(wp); return; } copy = s = xstrdup(p); while (*s != '\0') { idx = strtol(s, &s, 10); if (*s != '\0' && *s != ';') goto bad; if (idx < 0 || idx >= 0x100) goto bad; window_pane_unset_palette(wp, idx); if (*s == ';') s++; } free(copy); return; bad: log_debug("bad OSC 104: %s", p); free(copy); }
null
/* $OpenBSD$ */ /* * Copyright (c) 2007 Nicholas Marriott <nicholas.marriott@gmail.com> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <sys/types.h> #include <netinet/in.h> #include <ctype.h> #include <resolv.h> #include <stdlib.h> #include <string.h> #include <time.h> #include "tmux.h" /* * Based on the description by Paul Williams at: * * https://vt100.net/emu/dec_ansi_parser * * With the following changes: * * - 7-bit only. * * - Support for UTF-8. * * - OSC (but not APC) may be terminated by \007 as well as ST. * * - A state for APC similar to OSC. Some terminals appear to use this to set * the title. * * - A state for the screen \033k...\033\\ sequence to rename a window. This is * pretty stupid but not supporting it is more trouble than it is worth. * * - Special handling for ESC inside a DCS to allow arbitrary byte sequences to * be passed to the underlying terminals. */ /* Input parser cell. */ struct input_cell { struct grid_cell cell; int set; int g0set; /* 1 if ACS */ int g1set; /* 1 if ACS */ }; /* Input parser argument. */ struct input_param { enum { INPUT_MISSING, INPUT_NUMBER, INPUT_STRING } type; union { int num; char *str; }; }; /* Input parser context. */ struct input_ctx { struct window_pane *wp; struct bufferevent *event; struct screen_write_ctx ctx; struct input_cell cell; struct input_cell old_cell; u_int old_cx; u_int old_cy; int old_mode; u_char interm_buf[4]; size_t interm_len; u_char param_buf[64]; size_t param_len; #define INPUT_BUF_START 32 #define INPUT_BUF_LIMIT 1048576 u_char *input_buf; size_t input_len; size_t input_space; enum { INPUT_END_ST, INPUT_END_BEL } input_end; struct input_param param_list[24]; u_int param_list_len; struct utf8_data utf8data; int utf8started; int ch; int last; int flags; #define INPUT_DISCARD 0x1 const struct input_state *state; struct event timer; /* * All input received since we were last in the ground state. Sent to * control clients on connection. */ struct evbuffer *since_ground; }; /* Helper functions. */ struct input_transition; static int input_split(struct input_ctx *); static int input_get(struct input_ctx *, u_int, int, int); static void printflike(2, 3) input_reply(struct input_ctx *, const char *, ...); static void input_set_state(struct input_ctx *, const struct input_transition *); static void input_reset_cell(struct input_ctx *); static void input_osc_4(struct input_ctx *, const char *); static void input_osc_10(struct input_ctx *, const char *); static void input_osc_11(struct input_ctx *, const char *); static void input_osc_52(struct input_ctx *, const char *); static void input_osc_104(struct input_ctx *, const char *); /* Transition entry/exit handlers. */ static void input_clear(struct input_ctx *); static void input_ground(struct input_ctx *); static void input_enter_dcs(struct input_ctx *); static void input_enter_osc(struct input_ctx *); static void input_exit_osc(struct input_ctx *); static void input_enter_apc(struct input_ctx *); static void input_exit_apc(struct input_ctx *); static void input_enter_rename(struct input_ctx *); static void input_exit_rename(struct input_ctx *); /* Input state handlers. */ static int input_print(struct input_ctx *); static int input_intermediate(struct input_ctx *); static int input_parameter(struct input_ctx *); static int input_input(struct input_ctx *); static int input_c0_dispatch(struct input_ctx *); static int input_esc_dispatch(struct input_ctx *); static int input_csi_dispatch(struct input_ctx *); static void input_csi_dispatch_rm(struct input_ctx *); static void input_csi_dispatch_rm_private(struct input_ctx *); static void input_csi_dispatch_sm(struct input_ctx *); static void input_csi_dispatch_sm_private(struct input_ctx *); static void input_csi_dispatch_winops(struct input_ctx *); static void input_csi_dispatch_sgr_256(struct input_ctx *, int, u_int *); static void input_csi_dispatch_sgr_rgb(struct input_ctx *, int, u_int *); static void input_csi_dispatch_sgr(struct input_ctx *); static int input_dcs_dispatch(struct input_ctx *); static int input_top_bit_set(struct input_ctx *); static int input_end_bel(struct input_ctx *); /* Command table comparison function. */ static int input_table_compare(const void *, const void *); /* Command table entry. */ struct input_table_entry { int ch; const char *interm; int type; }; /* Escape commands. */ enum input_esc_type { INPUT_ESC_DECALN, INPUT_ESC_DECKPAM, INPUT_ESC_DECKPNM, INPUT_ESC_DECRC, INPUT_ESC_DECSC, INPUT_ESC_HTS, INPUT_ESC_IND, INPUT_ESC_NEL, INPUT_ESC_RI, INPUT_ESC_RIS, INPUT_ESC_SCSG0_OFF, INPUT_ESC_SCSG0_ON, INPUT_ESC_SCSG1_OFF, INPUT_ESC_SCSG1_ON, INPUT_ESC_ST, }; /* Escape command table. */ static const struct input_table_entry input_esc_table[] = { { '0', "(", INPUT_ESC_SCSG0_ON }, { '0', ")", INPUT_ESC_SCSG1_ON }, { '7', "", INPUT_ESC_DECSC }, { '8', "", INPUT_ESC_DECRC }, { '8', "#", INPUT_ESC_DECALN }, { '=', "", INPUT_ESC_DECKPAM }, { '>', "", INPUT_ESC_DECKPNM }, { 'B', "(", INPUT_ESC_SCSG0_OFF }, { 'B', ")", INPUT_ESC_SCSG1_OFF }, { 'D', "", INPUT_ESC_IND }, { 'E', "", INPUT_ESC_NEL }, { 'H', "", INPUT_ESC_HTS }, { 'M', "", INPUT_ESC_RI }, { '\\', "", INPUT_ESC_ST }, { 'c', "", INPUT_ESC_RIS }, }; /* Control (CSI) commands. */ enum input_csi_type { INPUT_CSI_CBT, INPUT_CSI_CNL, INPUT_CSI_CPL, INPUT_CSI_CUB, INPUT_CSI_CUD, INPUT_CSI_CUF, INPUT_CSI_CUP, INPUT_CSI_CUU, INPUT_CSI_DA, INPUT_CSI_DA_TWO, INPUT_CSI_DCH, INPUT_CSI_DECSCUSR, INPUT_CSI_DECSTBM, INPUT_CSI_DL, INPUT_CSI_DSR, INPUT_CSI_ECH, INPUT_CSI_ED, INPUT_CSI_EL, INPUT_CSI_HPA, INPUT_CSI_ICH, INPUT_CSI_IL, INPUT_CSI_MODOFF, INPUT_CSI_MODSET, INPUT_CSI_RCP, INPUT_CSI_REP, INPUT_CSI_RM, INPUT_CSI_RM_PRIVATE, INPUT_CSI_SCP, INPUT_CSI_SD, INPUT_CSI_SGR, INPUT_CSI_SM, INPUT_CSI_SM_PRIVATE, INPUT_CSI_SU, INPUT_CSI_TBC, INPUT_CSI_VPA, INPUT_CSI_WINOPS, INPUT_CSI_XDA, }; /* Control (CSI) command table. */ static const struct input_table_entry input_csi_table[] = { { '@', "", INPUT_CSI_ICH }, { 'A', "", INPUT_CSI_CUU }, { 'B', "", INPUT_CSI_CUD }, { 'C', "", INPUT_CSI_CUF }, { 'D', "", INPUT_CSI_CUB }, { 'E', "", INPUT_CSI_CNL }, { 'F', "", INPUT_CSI_CPL }, { 'G', "", INPUT_CSI_HPA }, { 'H', "", INPUT_CSI_CUP }, { 'J', "", INPUT_CSI_ED }, { 'K', "", INPUT_CSI_EL }, { 'L', "", INPUT_CSI_IL }, { 'M', "", INPUT_CSI_DL }, { 'P', "", INPUT_CSI_DCH }, { 'S', "", INPUT_CSI_SU }, { 'T', "", INPUT_CSI_SD }, { 'X', "", INPUT_CSI_ECH }, { 'Z', "", INPUT_CSI_CBT }, { '`', "", INPUT_CSI_HPA }, { 'b', "", INPUT_CSI_REP }, { 'c', "", INPUT_CSI_DA }, { 'c', ">", INPUT_CSI_DA_TWO }, { 'd', "", INPUT_CSI_VPA }, { 'f', "", INPUT_CSI_CUP }, { 'g', "", INPUT_CSI_TBC }, { 'h', "", INPUT_CSI_SM }, { 'h', "?", INPUT_CSI_SM_PRIVATE }, { 'l', "", INPUT_CSI_RM }, { 'l', "?", INPUT_CSI_RM_PRIVATE }, { 'm', "", INPUT_CSI_SGR }, { 'm', ">", INPUT_CSI_MODSET }, { 'n', "", INPUT_CSI_DSR }, { 'n', ">", INPUT_CSI_MODOFF }, { 'q', " ", INPUT_CSI_DECSCUSR }, { 'q', ">", INPUT_CSI_XDA }, { 'r', "", INPUT_CSI_DECSTBM }, { 's', "", INPUT_CSI_SCP }, { 't', "", INPUT_CSI_WINOPS }, { 'u', "", INPUT_CSI_RCP }, }; /* Input transition. */ struct input_transition { int first; int last; int (*handler)(struct input_ctx *); const struct input_state *state; }; /* Input state. */ struct input_state { const char *name; void (*enter)(struct input_ctx *); void (*exit)(struct input_ctx *); const struct input_transition *transitions; }; /* State transitions available from all states. */ #define INPUT_STATE_ANYWHERE \ { 0x18, 0x18, input_c0_dispatch, &input_state_ground }, \ { 0x1a, 0x1a, input_c0_dispatch, &input_state_ground }, \ { 0x1b, 0x1b, NULL, &input_state_esc_enter } /* Forward declarations of state tables. */ static const struct input_transition input_state_ground_table[]; static const struct input_transition input_state_esc_enter_table[]; static const struct input_transition input_state_esc_intermediate_table[]; static const struct input_transition input_state_csi_enter_table[]; static const struct input_transition input_state_csi_parameter_table[]; static const struct input_transition input_state_csi_intermediate_table[]; static const struct input_transition input_state_csi_ignore_table[]; static const struct input_transition input_state_dcs_enter_table[]; static const struct input_transition input_state_dcs_parameter_table[]; static const struct input_transition input_state_dcs_intermediate_table[]; static const struct input_transition input_state_dcs_handler_table[]; static const struct input_transition input_state_dcs_escape_table[]; static const struct input_transition input_state_dcs_ignore_table[]; static const struct input_transition input_state_osc_string_table[]; static const struct input_transition input_state_apc_string_table[]; static const struct input_transition input_state_rename_string_table[]; static const struct input_transition input_state_consume_st_table[]; /* ground state definition. */ static const struct input_state input_state_ground = { "ground", input_ground, NULL, input_state_ground_table }; /* esc_enter state definition. */ static const struct input_state input_state_esc_enter = { "esc_enter", input_clear, NULL, input_state_esc_enter_table }; /* esc_intermediate state definition. */ static const struct input_state input_state_esc_intermediate = { "esc_intermediate", NULL, NULL, input_state_esc_intermediate_table }; /* csi_enter state definition. */ static const struct input_state input_state_csi_enter = { "csi_enter", input_clear, NULL, input_state_csi_enter_table }; /* csi_parameter state definition. */ static const struct input_state input_state_csi_parameter = { "csi_parameter", NULL, NULL, input_state_csi_parameter_table }; /* csi_intermediate state definition. */ static const struct input_state input_state_csi_intermediate = { "csi_intermediate", NULL, NULL, input_state_csi_intermediate_table }; /* csi_ignore state definition. */ static const struct input_state input_state_csi_ignore = { "csi_ignore", NULL, NULL, input_state_csi_ignore_table }; /* dcs_enter state definition. */ static const struct input_state input_state_dcs_enter = { "dcs_enter", input_enter_dcs, NULL, input_state_dcs_enter_table }; /* dcs_parameter state definition. */ static const struct input_state input_state_dcs_parameter = { "dcs_parameter", NULL, NULL, input_state_dcs_parameter_table }; /* dcs_intermediate state definition. */ static const struct input_state input_state_dcs_intermediate = { "dcs_intermediate", NULL, NULL, input_state_dcs_intermediate_table }; /* dcs_handler state definition. */ static const struct input_state input_state_dcs_handler = { "dcs_handler", NULL, NULL, input_state_dcs_handler_table }; /* dcs_escape state definition. */ static const struct input_state input_state_dcs_escape = { "dcs_escape", NULL, NULL, input_state_dcs_escape_table }; /* dcs_ignore state definition. */ static const struct input_state input_state_dcs_ignore = { "dcs_ignore", NULL, NULL, input_state_dcs_ignore_table }; /* osc_string state definition. */ static const struct input_state input_state_osc_string = { "osc_string", input_enter_osc, input_exit_osc, input_state_osc_string_table }; /* apc_string state definition. */ static const struct input_state input_state_apc_string = { "apc_string", input_enter_apc, input_exit_apc, input_state_apc_string_table }; /* rename_string state definition. */ static const struct input_state input_state_rename_string = { "rename_string", input_enter_rename, input_exit_rename, input_state_rename_string_table }; /* consume_st state definition. */ static const struct input_state input_state_consume_st = { "consume_st", input_enter_rename, NULL, /* rename also waits for ST */ input_state_consume_st_table }; /* ground state table. */ static const struct input_transition input_state_ground_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x7e, input_print, NULL }, { 0x7f, 0x7f, NULL, NULL }, { 0x80, 0xff, input_top_bit_set, NULL }, { -1, -1, NULL, NULL } }; /* esc_enter state table. */ static const struct input_transition input_state_esc_enter_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x2f, input_intermediate, &input_state_esc_intermediate }, { 0x30, 0x4f, input_esc_dispatch, &input_state_ground }, { 0x50, 0x50, NULL, &input_state_dcs_enter }, { 0x51, 0x57, input_esc_dispatch, &input_state_ground }, { 0x58, 0x58, NULL, &input_state_consume_st }, { 0x59, 0x59, input_esc_dispatch, &input_state_ground }, { 0x5a, 0x5a, input_esc_dispatch, &input_state_ground }, { 0x5b, 0x5b, NULL, &input_state_csi_enter }, { 0x5c, 0x5c, input_esc_dispatch, &input_state_ground }, { 0x5d, 0x5d, NULL, &input_state_osc_string }, { 0x5e, 0x5e, NULL, &input_state_consume_st }, { 0x5f, 0x5f, NULL, &input_state_apc_string }, { 0x60, 0x6a, input_esc_dispatch, &input_state_ground }, { 0x6b, 0x6b, NULL, &input_state_rename_string }, { 0x6c, 0x7e, input_esc_dispatch, &input_state_ground }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* esc_intermediate state table. */ static const struct input_transition input_state_esc_intermediate_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x2f, input_intermediate, NULL }, { 0x30, 0x7e, input_esc_dispatch, &input_state_ground }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* csi_enter state table. */ static const struct input_transition input_state_csi_enter_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x2f, input_intermediate, &input_state_csi_intermediate }, { 0x30, 0x39, input_parameter, &input_state_csi_parameter }, { 0x3a, 0x3a, input_parameter, &input_state_csi_parameter }, { 0x3b, 0x3b, input_parameter, &input_state_csi_parameter }, { 0x3c, 0x3f, input_intermediate, &input_state_csi_parameter }, { 0x40, 0x7e, input_csi_dispatch, &input_state_ground }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* csi_parameter state table. */ static const struct input_transition input_state_csi_parameter_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x2f, input_intermediate, &input_state_csi_intermediate }, { 0x30, 0x39, input_parameter, NULL }, { 0x3a, 0x3a, input_parameter, NULL }, { 0x3b, 0x3b, input_parameter, NULL }, { 0x3c, 0x3f, NULL, &input_state_csi_ignore }, { 0x40, 0x7e, input_csi_dispatch, &input_state_ground }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* csi_intermediate state table. */ static const struct input_transition input_state_csi_intermediate_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x2f, input_intermediate, NULL }, { 0x30, 0x3f, NULL, &input_state_csi_ignore }, { 0x40, 0x7e, input_csi_dispatch, &input_state_ground }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* csi_ignore state table. */ static const struct input_transition input_state_csi_ignore_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, input_c0_dispatch, NULL }, { 0x19, 0x19, input_c0_dispatch, NULL }, { 0x1c, 0x1f, input_c0_dispatch, NULL }, { 0x20, 0x3f, NULL, NULL }, { 0x40, 0x7e, NULL, &input_state_ground }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* dcs_enter state table. */ static const struct input_transition input_state_dcs_enter_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0x2f, input_intermediate, &input_state_dcs_intermediate }, { 0x30, 0x39, input_parameter, &input_state_dcs_parameter }, { 0x3a, 0x3a, NULL, &input_state_dcs_ignore }, { 0x3b, 0x3b, input_parameter, &input_state_dcs_parameter }, { 0x3c, 0x3f, input_intermediate, &input_state_dcs_parameter }, { 0x40, 0x7e, input_input, &input_state_dcs_handler }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* dcs_parameter state table. */ static const struct input_transition input_state_dcs_parameter_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0x2f, input_intermediate, &input_state_dcs_intermediate }, { 0x30, 0x39, input_parameter, NULL }, { 0x3a, 0x3a, NULL, &input_state_dcs_ignore }, { 0x3b, 0x3b, input_parameter, NULL }, { 0x3c, 0x3f, NULL, &input_state_dcs_ignore }, { 0x40, 0x7e, input_input, &input_state_dcs_handler }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* dcs_intermediate state table. */ static const struct input_transition input_state_dcs_intermediate_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0x2f, input_intermediate, NULL }, { 0x30, 0x3f, NULL, &input_state_dcs_ignore }, { 0x40, 0x7e, input_input, &input_state_dcs_handler }, { 0x7f, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* dcs_handler state table. */ static const struct input_transition input_state_dcs_handler_table[] = { /* No INPUT_STATE_ANYWHERE */ { 0x00, 0x1a, input_input, NULL }, { 0x1b, 0x1b, NULL, &input_state_dcs_escape }, { 0x1c, 0xff, input_input, NULL }, { -1, -1, NULL, NULL } }; /* dcs_escape state table. */ static const struct input_transition input_state_dcs_escape_table[] = { /* No INPUT_STATE_ANYWHERE */ { 0x00, 0x5b, input_input, &input_state_dcs_handler }, { 0x5c, 0x5c, input_dcs_dispatch, &input_state_ground }, { 0x5d, 0xff, input_input, &input_state_dcs_handler }, { -1, -1, NULL, NULL } }; /* dcs_ignore state table. */ static const struct input_transition input_state_dcs_ignore_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* osc_string state table. */ static const struct input_transition input_state_osc_string_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x06, NULL, NULL }, { 0x07, 0x07, input_end_bel, &input_state_ground }, { 0x08, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0xff, input_input, NULL }, { -1, -1, NULL, NULL } }; /* apc_string state table. */ static const struct input_transition input_state_apc_string_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0xff, input_input, NULL }, { -1, -1, NULL, NULL } }; /* rename_string state table. */ static const struct input_transition input_state_rename_string_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0xff, input_input, NULL }, { -1, -1, NULL, NULL } }; /* consume_st state table. */ static const struct input_transition input_state_consume_st_table[] = { INPUT_STATE_ANYWHERE, { 0x00, 0x17, NULL, NULL }, { 0x19, 0x19, NULL, NULL }, { 0x1c, 0x1f, NULL, NULL }, { 0x20, 0xff, NULL, NULL }, { -1, -1, NULL, NULL } }; /* Input table compare. */ static int input_table_compare(const void *key, const void *value) { const struct input_ctx *ictx = key; const struct input_table_entry *entry = value; if (ictx->ch != entry->ch) return (ictx->ch - entry->ch); return (strcmp(ictx->interm_buf, entry->interm)); } /* * Timer - if this expires then have been waiting for a terminator for too * long, so reset to ground. */ static void input_timer_callback(__unused int fd, __unused short events, void *arg) { struct input_ctx *ictx = arg; log_debug("%s: %s expired" , __func__, ictx->state->name); input_reset(ictx, 0); } /* Start the timer. */ static void input_start_timer(struct input_ctx *ictx) { struct timeval tv = { .tv_sec = 5, .tv_usec = 0 }; event_del(&ictx->timer); event_add(&ictx->timer, &tv); } /* Reset cell state to default. */ static void input_reset_cell(struct input_ctx *ictx) { memcpy(&ictx->cell.cell, &grid_default_cell, sizeof ictx->cell.cell); ictx->cell.set = 0; ictx->cell.g0set = ictx->cell.g1set = 0; memcpy(&ictx->old_cell, &ictx->cell, sizeof ictx->old_cell); ictx->old_cx = 0; ictx->old_cy = 0; } /* Save screen state. */ static void input_save_state(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct screen *s = sctx->s; memcpy(&ictx->old_cell, &ictx->cell, sizeof ictx->old_cell); ictx->old_cx = s->cx; ictx->old_cy = s->cy; ictx->old_mode = s->mode; } /* Restore screen state. */ static void input_restore_state(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; memcpy(&ictx->cell, &ictx->old_cell, sizeof ictx->cell); if (ictx->old_mode & MODE_ORIGIN) screen_write_mode_set(sctx, MODE_ORIGIN); else screen_write_mode_clear(sctx, MODE_ORIGIN); screen_write_cursormove(sctx, ictx->old_cx, ictx->old_cy, 0); } /* Initialise input parser. */ struct input_ctx * input_init(struct window_pane *wp, struct bufferevent *bev) { struct input_ctx *ictx; ictx = xcalloc(1, sizeof *ictx); ictx->wp = wp; ictx->event = bev; ictx->input_space = INPUT_BUF_START; ictx->input_buf = xmalloc(INPUT_BUF_START); ictx->since_ground = evbuffer_new(); if (ictx->since_ground == NULL) fatalx("out of memory"); evtimer_set(&ictx->timer, input_timer_callback, ictx); input_reset(ictx, 0); return (ictx); } /* Destroy input parser. */ void input_free(struct input_ctx *ictx) { u_int i; for (i = 0; i < ictx->param_list_len; i++) { if (ictx->param_list[i].type == INPUT_STRING) free(ictx->param_list[i].str); } event_del(&ictx->timer); free(ictx->input_buf); evbuffer_free(ictx->since_ground); free(ictx); } /* Reset input state and clear screen. */ void input_reset(struct input_ctx *ictx, int clear) { struct screen_write_ctx *sctx = &ictx->ctx; struct window_pane *wp = ictx->wp; input_reset_cell(ictx); if (clear && wp != NULL) { if (TAILQ_EMPTY(&wp->modes)) screen_write_start_pane(sctx, wp, &wp->base); else screen_write_start(sctx, &wp->base); screen_write_reset(sctx); screen_write_stop(sctx); } input_clear(ictx); ictx->last = -1; ictx->state = &input_state_ground; ictx->flags = 0; } /* Return pending data. */ struct evbuffer * input_pending(struct input_ctx *ictx) { return (ictx->since_ground); } /* Change input state. */ static void input_set_state(struct input_ctx *ictx, const struct input_transition *itr) { if (ictx->state->exit != NULL) ictx->state->exit(ictx); ictx->state = itr->state; if (ictx->state->enter != NULL) ictx->state->enter(ictx); } /* Parse data. */ static void input_parse(struct input_ctx *ictx, u_char *buf, size_t len) { struct screen_write_ctx *sctx = &ictx->ctx; const struct input_state *state = NULL; const struct input_transition *itr = NULL; size_t off = 0; /* Parse the input. */ while (off < len) { ictx->ch = buf[off++]; /* Find the transition. */ if (ictx->state != state || itr == NULL || ictx->ch < itr->first || ictx->ch > itr->last) { itr = ictx->state->transitions; while (itr->first != -1 && itr->last != -1) { if (ictx->ch >= itr->first && ictx->ch <= itr->last) break; itr++; } if (itr->first == -1 || itr->last == -1) { /* No transition? Eh? */ fatalx("no transition from state"); } } state = ictx->state; /* * Any state except print stops the current collection. This is * an optimization to avoid checking if the attributes have * changed for every character. It will stop unnecessarily for * sequences that don't make a terminal change, but they should * be the minority. */ if (itr->handler != input_print) screen_write_collect_end(sctx); /* * Execute the handler, if any. Don't switch state if it * returns non-zero. */ if (itr->handler != NULL && itr->handler(ictx) != 0) continue; /* And switch state, if necessary. */ if (itr->state != NULL) input_set_state(ictx, itr); /* If not in ground state, save input. */ if (ictx->state != &input_state_ground) evbuffer_add(ictx->since_ground, &ictx->ch, 1); } } /* Parse input from pane. */ void input_parse_pane(struct window_pane *wp) { void *new_data; size_t new_size; new_data = window_pane_get_new_data(wp, &wp->offset, &new_size); input_parse_buffer(wp, new_data, new_size); window_pane_update_used_data(wp, &wp->offset, new_size); } /* Parse given input. */ void input_parse_buffer(struct window_pane *wp, u_char *buf, size_t len) { struct input_ctx *ictx = wp->ictx; struct screen_write_ctx *sctx = &ictx->ctx; if (len == 0) return; window_update_activity(wp->window); wp->flags |= PANE_CHANGED; /* NULL wp if there is a mode set as don't want to update the tty. */ if (TAILQ_EMPTY(&wp->modes)) screen_write_start_pane(sctx, wp, &wp->base); else screen_write_start(sctx, &wp->base); log_debug("%s: %%%u %s, %zu bytes: %.*s", __func__, wp->id, ictx->state->name, len, (int)len, buf); input_parse(ictx, buf, len); screen_write_stop(sctx); } /* Parse given input for screen. */ void input_parse_screen(struct input_ctx *ictx, struct screen *s, screen_write_init_ctx_cb cb, void *arg, u_char *buf, size_t len) { struct screen_write_ctx *sctx = &ictx->ctx; if (len == 0) return; screen_write_start_callback(sctx, s, cb, arg); input_parse(ictx, buf, len); screen_write_stop(sctx); } /* Split the parameter list (if any). */ static int input_split(struct input_ctx *ictx) { const char *errstr; char *ptr, *out; struct input_param *ip; u_int i; for (i = 0; i < ictx->param_list_len; i++) { if (ictx->param_list[i].type == INPUT_STRING) free(ictx->param_list[i].str); } ictx->param_list_len = 0; if (ictx->param_len == 0) return (0); ip = &ictx->param_list[0]; ptr = ictx->param_buf; while ((out = strsep(&ptr, ";")) != NULL) { if (*out == '\0') ip->type = INPUT_MISSING; else { if (strchr(out, ':') != NULL) { ip->type = INPUT_STRING; ip->str = xstrdup(out); } else { ip->type = INPUT_NUMBER; ip->num = strtonum(out, 0, INT_MAX, &errstr); if (errstr != NULL) return (-1); } } ip = &ictx->param_list[++ictx->param_list_len]; if (ictx->param_list_len == nitems(ictx->param_list)) return (-1); } for (i = 0; i < ictx->param_list_len; i++) { ip = &ictx->param_list[i]; if (ip->type == INPUT_MISSING) log_debug("parameter %u: missing", i); else if (ip->type == INPUT_STRING) log_debug("parameter %u: string %s", i, ip->str); else if (ip->type == INPUT_NUMBER) log_debug("parameter %u: number %d", i, ip->num); } return (0); } /* Get an argument or return default value. */ static int input_get(struct input_ctx *ictx, u_int validx, int minval, int defval) { struct input_param *ip; int retval; if (validx >= ictx->param_list_len) return (defval); ip = &ictx->param_list[validx]; if (ip->type == INPUT_MISSING) return (defval); if (ip->type == INPUT_STRING) return (-1); retval = ip->num; if (retval < minval) return (minval); return (retval); } /* Reply to terminal query. */ static void input_reply(struct input_ctx *ictx, const char *fmt, ...) { struct bufferevent *bev = ictx->event; va_list ap; char *reply; va_start(ap, fmt); xvasprintf(&reply, fmt, ap); va_end(ap); bufferevent_write(bev, reply, strlen(reply)); free(reply); } /* Clear saved state. */ static void input_clear(struct input_ctx *ictx) { event_del(&ictx->timer); *ictx->interm_buf = '\0'; ictx->interm_len = 0; *ictx->param_buf = '\0'; ictx->param_len = 0; *ictx->input_buf = '\0'; ictx->input_len = 0; ictx->input_end = INPUT_END_ST; ictx->flags &= ~INPUT_DISCARD; } /* Reset for ground state. */ static void input_ground(struct input_ctx *ictx) { event_del(&ictx->timer); evbuffer_drain(ictx->since_ground, EVBUFFER_LENGTH(ictx->since_ground)); if (ictx->input_space > INPUT_BUF_START) { ictx->input_space = INPUT_BUF_START; ictx->input_buf = xrealloc(ictx->input_buf, INPUT_BUF_START); } } /* Output this character to the screen. */ static int input_print(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; int set; ictx->utf8started = 0; /* can't be valid UTF-8 */ set = ictx->cell.set == 0 ? ictx->cell.g0set : ictx->cell.g1set; if (set == 1) ictx->cell.cell.attr |= GRID_ATTR_CHARSET; else ictx->cell.cell.attr &= ~GRID_ATTR_CHARSET; utf8_set(&ictx->cell.cell.data, ictx->ch); screen_write_collect_add(sctx, &ictx->cell.cell); ictx->last = ictx->ch; ictx->cell.cell.attr &= ~GRID_ATTR_CHARSET; return (0); } /* Collect intermediate string. */ static int input_intermediate(struct input_ctx *ictx) { if (ictx->interm_len == (sizeof ictx->interm_buf) - 1) ictx->flags |= INPUT_DISCARD; else { ictx->interm_buf[ictx->interm_len++] = ictx->ch; ictx->interm_buf[ictx->interm_len] = '\0'; } return (0); } /* Collect parameter string. */ static int input_parameter(struct input_ctx *ictx) { if (ictx->param_len == (sizeof ictx->param_buf) - 1) ictx->flags |= INPUT_DISCARD; else { ictx->param_buf[ictx->param_len++] = ictx->ch; ictx->param_buf[ictx->param_len] = '\0'; } return (0); } /* Collect input string. */ static int input_input(struct input_ctx *ictx) { size_t available; available = ictx->input_space; while (ictx->input_len + 1 >= available) { available *= 2; if (available > INPUT_BUF_LIMIT) { ictx->flags |= INPUT_DISCARD; return (0); } ictx->input_buf = xrealloc(ictx->input_buf, available); ictx->input_space = available; } ictx->input_buf[ictx->input_len++] = ictx->ch; ictx->input_buf[ictx->input_len] = '\0'; return (0); } /* Execute C0 control sequence. */ static int input_c0_dispatch(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct window_pane *wp = ictx->wp; struct screen *s = sctx->s; ictx->utf8started = 0; /* can't be valid UTF-8 */ log_debug("%s: '%c'", __func__, ictx->ch); switch (ictx->ch) { case '\000': /* NUL */ break; case '\007': /* BEL */ if (wp != NULL) alerts_queue(wp->window, WINDOW_BELL); break; case '\010': /* BS */ screen_write_backspace(sctx); break; case '\011': /* HT */ /* Don't tab beyond the end of the line. */ if (s->cx >= screen_size_x(s) - 1) break; /* Find the next tab point, or use the last column if none. */ do { s->cx++; if (bit_test(s->tabs, s->cx)) break; } while (s->cx < screen_size_x(s) - 1); break; case '\012': /* LF */ case '\013': /* VT */ case '\014': /* FF */ screen_write_linefeed(sctx, 0, ictx->cell.cell.bg); if (s->mode & MODE_CRLF) screen_write_carriagereturn(sctx); break; case '\015': /* CR */ screen_write_carriagereturn(sctx); break; case '\016': /* SO */ ictx->cell.set = 1; break; case '\017': /* SI */ ictx->cell.set = 0; break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } ictx->last = -1; return (0); } /* Execute escape sequence. */ static int input_esc_dispatch(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct window_pane *wp = ictx->wp; struct screen *s = sctx->s; struct input_table_entry *entry; if (ictx->flags & INPUT_DISCARD) return (0); log_debug("%s: '%c', %s", __func__, ictx->ch, ictx->interm_buf); entry = bsearch(ictx, input_esc_table, nitems(input_esc_table), sizeof input_esc_table[0], input_table_compare); if (entry == NULL) { log_debug("%s: unknown '%c'", __func__, ictx->ch); return (0); } switch (entry->type) { case INPUT_ESC_RIS: if (wp != NULL) window_pane_reset_palette(wp); input_reset_cell(ictx); screen_write_reset(sctx); break; case INPUT_ESC_IND: screen_write_linefeed(sctx, 0, ictx->cell.cell.bg); break; case INPUT_ESC_NEL: screen_write_carriagereturn(sctx); screen_write_linefeed(sctx, 0, ictx->cell.cell.bg); break; case INPUT_ESC_HTS: if (s->cx < screen_size_x(s)) bit_set(s->tabs, s->cx); break; case INPUT_ESC_RI: screen_write_reverseindex(sctx, ictx->cell.cell.bg); break; case INPUT_ESC_DECKPAM: screen_write_mode_set(sctx, MODE_KKEYPAD); break; case INPUT_ESC_DECKPNM: screen_write_mode_clear(sctx, MODE_KKEYPAD); break; case INPUT_ESC_DECSC: input_save_state(ictx); break; case INPUT_ESC_DECRC: input_restore_state(ictx); break; case INPUT_ESC_DECALN: screen_write_alignmenttest(sctx); break; case INPUT_ESC_SCSG0_ON: ictx->cell.g0set = 1; break; case INPUT_ESC_SCSG0_OFF: ictx->cell.g0set = 0; break; case INPUT_ESC_SCSG1_ON: ictx->cell.g1set = 1; break; case INPUT_ESC_SCSG1_OFF: ictx->cell.g1set = 0; break; case INPUT_ESC_ST: /* ST terminates OSC but the state transition already did it. */ break; } ictx->last = -1; return (0); } /* Execute control sequence. */ static int input_csi_dispatch(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct screen *s = sctx->s; struct input_table_entry *entry; int i, n, m; u_int cx, bg = ictx->cell.cell.bg; if (ictx->flags & INPUT_DISCARD) return (0); log_debug("%s: '%c' \"%s\" \"%s\"", __func__, ictx->ch, ictx->interm_buf, ictx->param_buf); if (input_split(ictx) != 0) return (0); entry = bsearch(ictx, input_csi_table, nitems(input_csi_table), sizeof input_csi_table[0], input_table_compare); if (entry == NULL) { log_debug("%s: unknown '%c'", __func__, ictx->ch); return (0); } switch (entry->type) { case INPUT_CSI_CBT: /* Find the previous tab point, n times. */ cx = s->cx; if (cx > screen_size_x(s) - 1) cx = screen_size_x(s) - 1; n = input_get(ictx, 0, 1, 1); if (n == -1) break; while (cx > 0 && n-- > 0) { do cx--; while (cx > 0 && !bit_test(s->tabs, cx)); } s->cx = cx; break; case INPUT_CSI_CUB: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_cursorleft(sctx, n); break; case INPUT_CSI_CUD: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_cursordown(sctx, n); break; case INPUT_CSI_CUF: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_cursorright(sctx, n); break; case INPUT_CSI_CUP: n = input_get(ictx, 0, 1, 1); m = input_get(ictx, 1, 1, 1); if (n != -1 && m != -1) screen_write_cursormove(sctx, m - 1, n - 1, 1); break; case INPUT_CSI_MODSET: n = input_get(ictx, 0, 0, 0); m = input_get(ictx, 1, 0, 0); if (n == 0 || (n == 4 && m == 0)) screen_write_mode_clear(sctx, MODE_KEXTENDED); else if (n == 4 && (m == 1 || m == 2)) screen_write_mode_set(sctx, MODE_KEXTENDED); break; case INPUT_CSI_MODOFF: n = input_get(ictx, 0, 0, 0); if (n == 4) screen_write_mode_clear(sctx, MODE_KEXTENDED); break; case INPUT_CSI_WINOPS: input_csi_dispatch_winops(ictx); break; case INPUT_CSI_CUU: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_cursorup(sctx, n); break; case INPUT_CSI_CNL: n = input_get(ictx, 0, 1, 1); if (n != -1) { screen_write_carriagereturn(sctx); screen_write_cursordown(sctx, n); } break; case INPUT_CSI_CPL: n = input_get(ictx, 0, 1, 1); if (n != -1) { screen_write_carriagereturn(sctx); screen_write_cursorup(sctx, n); } break; case INPUT_CSI_DA: switch (input_get(ictx, 0, 0, 0)) { case -1: break; case 0: input_reply(ictx, "\033[?1;2c"); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } break; case INPUT_CSI_DA_TWO: switch (input_get(ictx, 0, 0, 0)) { case -1: break; case 0: input_reply(ictx, "\033[>84;0;0c"); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } break; case INPUT_CSI_ECH: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_clearcharacter(sctx, n, bg); break; case INPUT_CSI_DCH: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_deletecharacter(sctx, n, bg); break; case INPUT_CSI_DECSTBM: n = input_get(ictx, 0, 1, 1); m = input_get(ictx, 1, 1, screen_size_y(s)); if (n != -1 && m != -1) screen_write_scrollregion(sctx, n - 1, m - 1); break; case INPUT_CSI_DL: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_deleteline(sctx, n, bg); break; case INPUT_CSI_DSR: switch (input_get(ictx, 0, 0, 0)) { case -1: break; case 5: input_reply(ictx, "\033[0n"); break; case 6: input_reply(ictx, "\033[%u;%uR", s->cy + 1, s->cx + 1); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } break; case INPUT_CSI_ED: switch (input_get(ictx, 0, 0, 0)) { case -1: break; case 0: screen_write_clearendofscreen(sctx, bg); break; case 1: screen_write_clearstartofscreen(sctx, bg); break; case 2: screen_write_clearscreen(sctx, bg); break; case 3: if (input_get(ictx, 1, 0, 0) == 0) { /* * Linux console extension to clear history * (for example before locking the screen). */ screen_write_clearhistory(sctx); } break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } break; case INPUT_CSI_EL: switch (input_get(ictx, 0, 0, 0)) { case -1: break; case 0: screen_write_clearendofline(sctx, bg); break; case 1: screen_write_clearstartofline(sctx, bg); break; case 2: screen_write_clearline(sctx, bg); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } break; case INPUT_CSI_HPA: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_cursormove(sctx, n - 1, -1, 1); break; case INPUT_CSI_ICH: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_insertcharacter(sctx, n, bg); break; case INPUT_CSI_IL: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_insertline(sctx, n, bg); break; case INPUT_CSI_REP: n = input_get(ictx, 0, 1, 1); if (n == -1) break; if (ictx->last == -1) break; ictx->ch = ictx->last; for (i = 0; i < n; i++) input_print(ictx); break; case INPUT_CSI_RCP: input_restore_state(ictx); break; case INPUT_CSI_RM: input_csi_dispatch_rm(ictx); break; case INPUT_CSI_RM_PRIVATE: input_csi_dispatch_rm_private(ictx); break; case INPUT_CSI_SCP: input_save_state(ictx); break; case INPUT_CSI_SGR: input_csi_dispatch_sgr(ictx); break; case INPUT_CSI_SM: input_csi_dispatch_sm(ictx); break; case INPUT_CSI_SM_PRIVATE: input_csi_dispatch_sm_private(ictx); break; case INPUT_CSI_SU: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_scrollup(sctx, n, bg); break; case INPUT_CSI_SD: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_scrolldown(sctx, n, bg); break; case INPUT_CSI_TBC: switch (input_get(ictx, 0, 0, 0)) { case -1: break; case 0: if (s->cx < screen_size_x(s)) bit_clear(s->tabs, s->cx); break; case 3: bit_nclear(s->tabs, 0, screen_size_x(s) - 1); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } break; case INPUT_CSI_VPA: n = input_get(ictx, 0, 1, 1); if (n != -1) screen_write_cursormove(sctx, -1, n - 1, 1); break; case INPUT_CSI_DECSCUSR: n = input_get(ictx, 0, 0, 0); if (n != -1) screen_set_cursor_style(s, n); break; case INPUT_CSI_XDA: n = input_get(ictx, 0, 0, 0); if (n == 0) input_reply(ictx, "\033P>|tmux %s\033\\", getversion()); break; } ictx->last = -1; return (0); } /* Handle CSI RM. */ static void input_csi_dispatch_rm(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; u_int i; for (i = 0; i < ictx->param_list_len; i++) { switch (input_get(ictx, i, 0, -1)) { case -1: break; case 4: /* IRM */ screen_write_mode_clear(sctx, MODE_INSERT); break; case 34: screen_write_mode_set(sctx, MODE_BLINKING); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } } } /* Handle CSI private RM. */ static void input_csi_dispatch_rm_private(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct grid_cell *gc = &ictx->cell.cell; u_int i; for (i = 0; i < ictx->param_list_len; i++) { switch (input_get(ictx, i, 0, -1)) { case -1: break; case 1: /* DECCKM */ screen_write_mode_clear(sctx, MODE_KCURSOR); break; case 3: /* DECCOLM */ screen_write_cursormove(sctx, 0, 0, 1); screen_write_clearscreen(sctx, gc->bg); break; case 6: /* DECOM */ screen_write_mode_clear(sctx, MODE_ORIGIN); screen_write_cursormove(sctx, 0, 0, 1); break; case 7: /* DECAWM */ screen_write_mode_clear(sctx, MODE_WRAP); break; case 12: screen_write_mode_clear(sctx, MODE_BLINKING); break; case 25: /* TCEM */ screen_write_mode_clear(sctx, MODE_CURSOR); break; case 1000: case 1001: case 1002: case 1003: screen_write_mode_clear(sctx, ALL_MOUSE_MODES); break; case 1004: screen_write_mode_clear(sctx, MODE_FOCUSON); break; case 1005: screen_write_mode_clear(sctx, MODE_MOUSE_UTF8); break; case 1006: screen_write_mode_clear(sctx, MODE_MOUSE_SGR); break; case 47: case 1047: screen_write_alternateoff(sctx, gc, 0); break; case 1049: screen_write_alternateoff(sctx, gc, 1); break; case 2004: screen_write_mode_clear(sctx, MODE_BRACKETPASTE); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } } } /* Handle CSI SM. */ static void input_csi_dispatch_sm(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; u_int i; for (i = 0; i < ictx->param_list_len; i++) { switch (input_get(ictx, i, 0, -1)) { case -1: break; case 4: /* IRM */ screen_write_mode_set(sctx, MODE_INSERT); break; case 34: screen_write_mode_clear(sctx, MODE_BLINKING); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } } } /* Handle CSI private SM. */ static void input_csi_dispatch_sm_private(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct window_pane *wp = ictx->wp; struct grid_cell *gc = &ictx->cell.cell; u_int i; for (i = 0; i < ictx->param_list_len; i++) { switch (input_get(ictx, i, 0, -1)) { case -1: break; case 1: /* DECCKM */ screen_write_mode_set(sctx, MODE_KCURSOR); break; case 3: /* DECCOLM */ screen_write_cursormove(sctx, 0, 0, 1); screen_write_clearscreen(sctx, ictx->cell.cell.bg); break; case 6: /* DECOM */ screen_write_mode_set(sctx, MODE_ORIGIN); screen_write_cursormove(sctx, 0, 0, 1); break; case 7: /* DECAWM */ screen_write_mode_set(sctx, MODE_WRAP); break; case 12: screen_write_mode_set(sctx, MODE_BLINKING); break; case 25: /* TCEM */ screen_write_mode_set(sctx, MODE_CURSOR); break; case 1000: screen_write_mode_clear(sctx, ALL_MOUSE_MODES); screen_write_mode_set(sctx, MODE_MOUSE_STANDARD); break; case 1002: screen_write_mode_clear(sctx, ALL_MOUSE_MODES); screen_write_mode_set(sctx, MODE_MOUSE_BUTTON); break; case 1003: screen_write_mode_clear(sctx, ALL_MOUSE_MODES); screen_write_mode_set(sctx, MODE_MOUSE_ALL); break; case 1004: if (sctx->s->mode & MODE_FOCUSON) break; screen_write_mode_set(sctx, MODE_FOCUSON); if (wp != NULL) wp->flags |= PANE_FOCUSPUSH; /* force update */ break; case 1005: screen_write_mode_set(sctx, MODE_MOUSE_UTF8); break; case 1006: screen_write_mode_set(sctx, MODE_MOUSE_SGR); break; case 47: case 1047: screen_write_alternateon(sctx, gc, 0); break; case 1049: screen_write_alternateon(sctx, gc, 1); break; case 2004: screen_write_mode_set(sctx, MODE_BRACKETPASTE); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } } } /* Handle CSI window operations. */ static void input_csi_dispatch_winops(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct screen *s = sctx->s; struct window_pane *wp = ictx->wp; u_int x = screen_size_x(s), y = screen_size_y(s); int n, m; m = 0; while ((n = input_get(ictx, m, 0, -1)) != -1) { switch (n) { case 1: case 2: case 5: case 6: case 7: case 11: case 13: case 14: case 19: case 20: case 21: case 24: break; case 3: case 4: case 8: m++; if (input_get(ictx, m, 0, -1) == -1) return; /* FALLTHROUGH */ case 9: case 10: m++; if (input_get(ictx, m, 0, -1) == -1) return; break; case 22: m++; switch (input_get(ictx, m, 0, -1)) { case -1: return; case 0: case 2: screen_push_title(sctx->s); break; } break; case 23: m++; switch (input_get(ictx, m, 0, -1)) { case -1: return; case 0: case 2: screen_pop_title(sctx->s); if (wp != NULL) { notify_pane("pane-title-changed", wp); server_redraw_window_borders(wp->window); server_status_window(wp->window); } break; } break; case 18: input_reply(ictx, "\033[8;%u;%ut", x, y); break; default: log_debug("%s: unknown '%c'", __func__, ictx->ch); break; } m++; } } /* Helper for 256 colour SGR. */ static int input_csi_dispatch_sgr_256_do(struct input_ctx *ictx, int fgbg, int c) { struct grid_cell *gc = &ictx->cell.cell; if (c == -1 || c > 255) { if (fgbg == 38) gc->fg = 8; else if (fgbg == 48) gc->bg = 8; } else { if (fgbg == 38) gc->fg = c | COLOUR_FLAG_256; else if (fgbg == 48) gc->bg = c | COLOUR_FLAG_256; else if (fgbg == 58) gc->us = c | COLOUR_FLAG_256; } return (1); } /* Handle CSI SGR for 256 colours. */ static void input_csi_dispatch_sgr_256(struct input_ctx *ictx, int fgbg, u_int *i) { int c; c = input_get(ictx, (*i) + 1, 0, -1); if (input_csi_dispatch_sgr_256_do(ictx, fgbg, c)) (*i)++; } /* Helper for RGB colour SGR. */ static int input_csi_dispatch_sgr_rgb_do(struct input_ctx *ictx, int fgbg, int r, int g, int b) { struct grid_cell *gc = &ictx->cell.cell; if (r == -1 || r > 255) return (0); if (g == -1 || g > 255) return (0); if (b == -1 || b > 255) return (0); if (fgbg == 38) gc->fg = colour_join_rgb(r, g, b); else if (fgbg == 48) gc->bg = colour_join_rgb(r, g, b); else if (fgbg == 58) gc->us = colour_join_rgb(r, g, b); return (1); } /* Handle CSI SGR for RGB colours. */ static void input_csi_dispatch_sgr_rgb(struct input_ctx *ictx, int fgbg, u_int *i) { int r, g, b; r = input_get(ictx, (*i) + 1, 0, -1); g = input_get(ictx, (*i) + 2, 0, -1); b = input_get(ictx, (*i) + 3, 0, -1); if (input_csi_dispatch_sgr_rgb_do(ictx, fgbg, r, g, b)) (*i) += 3; } /* Handle CSI SGR with a ISO parameter. */ static void input_csi_dispatch_sgr_colon(struct input_ctx *ictx, u_int i) { struct grid_cell *gc = &ictx->cell.cell; char *s = ictx->param_list[i].str, *copy, *ptr, *out; int p[8]; u_int n; const char *errstr; for (n = 0; n < nitems(p); n++) p[n] = -1; n = 0; ptr = copy = xstrdup(s); while ((out = strsep(&ptr, ":")) != NULL) { if (*out != '\0') { p[n++] = strtonum(out, 0, INT_MAX, &errstr); if (errstr != NULL || n == nitems(p)) { free(copy); return; } } else { n++; if (n == nitems(p)) { free(copy); return; } } log_debug("%s: %u = %d", __func__, n - 1, p[n - 1]); } free(copy); if (n == 0) return; if (p[0] == 4) { if (n != 2) return; switch (p[1]) { case 0: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; break; case 1: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; gc->attr |= GRID_ATTR_UNDERSCORE; break; case 2: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; gc->attr |= GRID_ATTR_UNDERSCORE_2; break; case 3: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; gc->attr |= GRID_ATTR_UNDERSCORE_3; break; case 4: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; gc->attr |= GRID_ATTR_UNDERSCORE_4; break; case 5: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; gc->attr |= GRID_ATTR_UNDERSCORE_5; break; } return; } if (n < 2 || (p[0] != 38 && p[0] != 48 && p[0] != 58)) return; switch (p[1]) { case 2: if (n < 3) break; if (n == 5) i = 2; else i = 3; if (n < i + 3) break; input_csi_dispatch_sgr_rgb_do(ictx, p[0], p[i], p[i + 1], p[i + 2]); break; case 5: if (n < 3) break; input_csi_dispatch_sgr_256_do(ictx, p[0], p[2]); break; } } /* Handle CSI SGR. */ static void input_csi_dispatch_sgr(struct input_ctx *ictx) { struct grid_cell *gc = &ictx->cell.cell; u_int i; int n; if (ictx->param_list_len == 0) { memcpy(gc, &grid_default_cell, sizeof *gc); return; } for (i = 0; i < ictx->param_list_len; i++) { if (ictx->param_list[i].type == INPUT_STRING) { input_csi_dispatch_sgr_colon(ictx, i); continue; } n = input_get(ictx, i, 0, 0); if (n == -1) continue; if (n == 38 || n == 48 || n == 58) { i++; switch (input_get(ictx, i, 0, -1)) { case 2: input_csi_dispatch_sgr_rgb(ictx, n, &i); break; case 5: input_csi_dispatch_sgr_256(ictx, n, &i); break; } continue; } switch (n) { case 0: memcpy(gc, &grid_default_cell, sizeof *gc); break; case 1: gc->attr |= GRID_ATTR_BRIGHT; break; case 2: gc->attr |= GRID_ATTR_DIM; break; case 3: gc->attr |= GRID_ATTR_ITALICS; break; case 4: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; gc->attr |= GRID_ATTR_UNDERSCORE; break; case 5: gc->attr |= GRID_ATTR_BLINK; break; case 7: gc->attr |= GRID_ATTR_REVERSE; break; case 8: gc->attr |= GRID_ATTR_HIDDEN; break; case 9: gc->attr |= GRID_ATTR_STRIKETHROUGH; break; case 22: gc->attr &= ~(GRID_ATTR_BRIGHT|GRID_ATTR_DIM); break; case 23: gc->attr &= ~GRID_ATTR_ITALICS; break; case 24: gc->attr &= ~GRID_ATTR_ALL_UNDERSCORE; break; case 25: gc->attr &= ~GRID_ATTR_BLINK; break; case 27: gc->attr &= ~GRID_ATTR_REVERSE; break; case 28: gc->attr &= ~GRID_ATTR_HIDDEN; break; case 29: gc->attr &= ~GRID_ATTR_STRIKETHROUGH; break; case 30: case 31: case 32: case 33: case 34: case 35: case 36: case 37: gc->fg = n - 30; break; case 39: gc->fg = 8; break; case 40: case 41: case 42: case 43: case 44: case 45: case 46: case 47: gc->bg = n - 40; break; case 49: gc->bg = 8; break; case 53: gc->attr |= GRID_ATTR_OVERLINE; break; case 55: gc->attr &= ~GRID_ATTR_OVERLINE; break; case 59: gc->us = 0; break; case 90: case 91: case 92: case 93: case 94: case 95: case 96: case 97: gc->fg = n; break; case 100: case 101: case 102: case 103: case 104: case 105: case 106: case 107: gc->bg = n - 10; break; } } } /* End of input with BEL. */ static int input_end_bel(struct input_ctx *ictx) { log_debug("%s", __func__); ictx->input_end = INPUT_END_BEL; return (0); } /* DCS string started. */ static void input_enter_dcs(struct input_ctx *ictx) { log_debug("%s", __func__); input_clear(ictx); input_start_timer(ictx); ictx->last = -1; } /* DCS terminator (ST) received. */ static int input_dcs_dispatch(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; u_char *buf = ictx->input_buf; size_t len = ictx->input_len; const char prefix[] = "tmux;"; const u_int prefixlen = (sizeof prefix) - 1; if (ictx->flags & INPUT_DISCARD) return (0); log_debug("%s: \"%s\"", __func__, buf); if (len >= prefixlen && strncmp(buf, prefix, prefixlen) == 0) screen_write_rawstring(sctx, buf + prefixlen, len - prefixlen); return (0); } /* OSC string started. */ static void input_enter_osc(struct input_ctx *ictx) { log_debug("%s", __func__); input_clear(ictx); input_start_timer(ictx); ictx->last = -1; } /* OSC terminator (ST) received. */ static void input_exit_osc(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct window_pane *wp = ictx->wp; u_char *p = ictx->input_buf; u_int option; if (ictx->flags & INPUT_DISCARD) return; if (ictx->input_len < 1 || *p < '0' || *p > '9') return; log_debug("%s: \"%s\" (end %s)", __func__, p, ictx->input_end == INPUT_END_ST ? "ST" : "BEL"); option = 0; while (*p >= '0' && *p <= '9') option = option * 10 + *p++ - '0'; if (*p == ';') p++; switch (option) { case 0: case 2: if (screen_set_title(sctx->s, p) && wp != NULL) { notify_pane("pane-title-changed", wp); server_redraw_window_borders(wp->window); server_status_window(wp->window); } break; case 4: input_osc_4(ictx, p); break; case 7: if (utf8_isvalid(p)) { screen_set_path(sctx->s, p); if (wp != NULL) { server_redraw_window_borders(wp->window); server_status_window(wp->window); } } break; case 10: input_osc_10(ictx, p); break; case 11: input_osc_11(ictx, p); break; case 12: if (utf8_isvalid(p) && *p != '?') /* ? is colour request */ screen_set_cursor_colour(sctx->s, p); break; case 52: input_osc_52(ictx, p); break; case 104: input_osc_104(ictx, p); break; case 112: if (*p == '\0') /* no arguments allowed */ screen_set_cursor_colour(sctx->s, ""); break; default: log_debug("%s: unknown '%u'", __func__, option); break; } } /* APC string started. */ static void input_enter_apc(struct input_ctx *ictx) { log_debug("%s", __func__); input_clear(ictx); input_start_timer(ictx); ictx->last = -1; } /* APC terminator (ST) received. */ static void input_exit_apc(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct window_pane *wp = ictx->wp; if (ictx->flags & INPUT_DISCARD) return; log_debug("%s: \"%s\"", __func__, ictx->input_buf); if (screen_set_title(sctx->s, ictx->input_buf) && wp != NULL) { notify_pane("pane-title-changed", wp); server_redraw_window_borders(wp->window); server_status_window(wp->window); } } /* Rename string started. */ static void input_enter_rename(struct input_ctx *ictx) { log_debug("%s", __func__); input_clear(ictx); input_start_timer(ictx); ictx->last = -1; } /* Rename terminator (ST) received. */ static void input_exit_rename(struct input_ctx *ictx) { struct window_pane *wp = ictx->wp; struct options_entry *o; if (wp == NULL) return; if (ictx->flags & INPUT_DISCARD) return; if (!options_get_number(ictx->wp->options, "allow-rename")) return; log_debug("%s: \"%s\"", __func__, ictx->input_buf); if (!utf8_isvalid(ictx->input_buf)) return; if (ictx->input_len == 0) { o = options_get_only(wp->window->options, "automatic-rename"); if (o != NULL) options_remove_or_default(o, -1, NULL); return; } window_set_name(wp->window, ictx->input_buf); options_set_number(wp->window->options, "automatic-rename", 0); server_redraw_window_borders(wp->window); server_status_window(wp->window); } /* Open UTF-8 character. */ static int input_top_bit_set(struct input_ctx *ictx) { struct screen_write_ctx *sctx = &ictx->ctx; struct utf8_data *ud = &ictx->utf8data; ictx->last = -1; if (!ictx->utf8started) { if (utf8_open(ud, ictx->ch) != UTF8_MORE) return (0); ictx->utf8started = 1; return (0); } switch (utf8_append(ud, ictx->ch)) { case UTF8_MORE: return (0); case UTF8_ERROR: ictx->utf8started = 0; return (0); case UTF8_DONE: break; } ictx->utf8started = 0; log_debug("%s %hhu '%*s' (width %hhu)", __func__, ud->size, (int)ud->size, ud->data, ud->width); utf8_copy(&ictx->cell.cell.data, ud); screen_write_collect_add(sctx, &ictx->cell.cell); return (0); } /* Parse colour from OSC. */ static int input_osc_parse_colour(const char *p, u_int *r, u_int *g, u_int *b) { u_int rsize, gsize, bsize; const char *cp, *s = p; if (sscanf(p, "rgb:%x/%x/%x", r, g, b) != 3) return (0); p += 4; cp = strchr(p, '/'); rsize = cp - p; if (rsize == 1) (*r) = (*r) | ((*r) << 4); else if (rsize == 3) (*r) >>= 4; else if (rsize == 4) (*r) >>= 8; else if (rsize != 2) return (0); p = cp + 1; cp = strchr(p, '/'); gsize = cp - p; if (gsize == 1) (*g) = (*g) | ((*g) << 4); else if (gsize == 3) (*g) >>= 4; else if (gsize == 4) (*g) >>= 8; else if (gsize != 2) return (0); bsize = strlen(cp + 1); if (bsize == 1) (*b) = (*b) | ((*b) << 4); else if (bsize == 3) (*b) >>= 4; else if (bsize == 4) (*b) >>= 8; else if (bsize != 2) return (0); log_debug("%s: %s = %02x%02x%02x", __func__, s, *r, *g, *b); return (1); } /* Reply to a colour request. */ static void input_osc_colour_reply(struct input_ctx *ictx, u_int n, int c) { u_char r, g, b; const char *end; if (c == 8 || (~c & COLOUR_FLAG_RGB)) return; colour_split_rgb(c, &r, &g, &b); if (ictx->input_end == INPUT_END_BEL) end = "\007"; else end = "\033\\"; input_reply(ictx, "\033]%u;rgb:%02hhx/%02hhx/%02hhx%s", n, r, g, b, end); } /* Handle the OSC 4 sequence for setting (multiple) palette entries. */ static void input_osc_4(struct input_ctx *ictx, const char *p) { struct window_pane *wp = ictx->wp; char *copy, *s, *next = NULL; long idx; u_int r, g, b; if (wp == NULL) return; copy = s = xstrdup(p); while (s != NULL && *s != '\0') { idx = strtol(s, &next, 10); if (*next++ != ';') goto bad; if (idx < 0 || idx >= 0x100) goto bad; s = strsep(&next, ";"); if (!input_osc_parse_colour(s, &r, &g, &b)) { s = next; continue; } window_pane_set_palette(wp, idx, colour_join_rgb(r, g, b)); s = next; } free(copy); return; bad: log_debug("bad OSC 4: %s", p); free(copy); } /* Handle the OSC 10 sequence for setting and querying foreground colour. */ static void input_osc_10(struct input_ctx *ictx, const char *p) { struct window_pane *wp = ictx->wp; struct grid_cell defaults; u_int r, g, b; if (wp == NULL) return; if (strcmp(p, "?") == 0) { tty_default_colours(&defaults, wp); input_osc_colour_reply(ictx, 10, defaults.fg); return; } if (!input_osc_parse_colour(p, &r, &g, &b)) goto bad; wp->fg = colour_join_rgb(r, g, b); wp->flags |= (PANE_REDRAW|PANE_STYLECHANGED); return; bad: log_debug("bad OSC 10: %s", p); } /* Handle the OSC 11 sequence for setting and querying background colour. */ static void input_osc_11(struct input_ctx *ictx, const char *p) { struct window_pane *wp = ictx->wp; struct grid_cell defaults; u_int r, g, b; if (wp == NULL) return; if (strcmp(p, "?") == 0) { tty_default_colours(&defaults, wp); input_osc_colour_reply(ictx, 11, defaults.bg); return; } if (!input_osc_parse_colour(p, &r, &g, &b)) goto bad; wp->bg = colour_join_rgb(r, g, b); wp->flags |= (PANE_REDRAW|PANE_STYLECHANGED); return; bad: log_debug("bad OSC 11: %s", p); } /* Handle the OSC 52 sequence for setting the clipboard. */ static void input_osc_52(struct input_ctx *ictx, const char *p) { struct window_pane *wp = ictx->wp; char *end; const char *buf; size_t len; u_char *out; int outlen, state; struct screen_write_ctx ctx; struct paste_buffer *pb; if (wp == NULL) return; state = options_get_number(global_options, "set-clipboard"); if (state != 2) return; if ((end = strchr(p, ';')) == NULL) return; end++; if (*end == '\0') return; log_debug("%s: %s", __func__, end); if (strcmp(end, "?") == 0) { if ((pb = paste_get_top(NULL)) != NULL) { buf = paste_buffer_data(pb, &len); outlen = 4 * ((len + 2) / 3) + 1; out = xmalloc(outlen); if ((outlen = b64_ntop(buf, len, out, outlen)) == -1) { free(out); return; } } else { outlen = 0; out = NULL; } bufferevent_write(ictx->event, "\033]52;;", 6); if (outlen != 0) bufferevent_write(ictx->event, out, outlen); if (ictx->input_end == INPUT_END_BEL) bufferevent_write(ictx->event, "\007", 1); else bufferevent_write(ictx->event, "\033\\", 2); free(out); return; } len = (strlen(end) / 4) * 3; if (len == 0) return; out = xmalloc(len); if ((outlen = b64_pton(end, out, len)) == -1) { free(out); return; } screen_write_start_pane(&ctx, wp, NULL); screen_write_setselection(&ctx, out, outlen); screen_write_stop(&ctx); notify_pane("pane-set-clipboard", wp); paste_add(NULL, out, outlen); } /* Handle the OSC 104 sequence for unsetting (multiple) palette entries. */ static void input_osc_104(struct input_ctx *ictx, const char *p) { struct window_pane *wp = ictx->wp; char *copy, *s; long idx; if (wp == NULL) return; if (*p == '\0') { window_pane_reset_palette(wp); return; } copy = s = xstrdup(p); while (*s != '\0') { idx = strtol(s, &s, 10); if (*s != '\0' && *s != ';') goto bad; if (idx < 0 || idx >= 0x100) goto bad; window_pane_unset_palette(wp, idx); if (*s == ';') s++; } free(copy); return; bad: log_debug("bad OSC 104: %s", p); free(copy); }
null
214
CWE-787
CVE-2020-28248
#include "./PngImg.h" #include "./PngStructs.h" #include <png.h> #include <string.h> #include <memory> #include <algorithm> using namespace std; struct BufPtr { const char* ptr; size_t len; }; /// void readFromBuf(png_structp pngPtr, png_bytep data, png_size_t length) { BufPtr* bufPtr = (BufPtr*)png_get_io_ptr(pngPtr); memcpy((char*)data, bufPtr->ptr, length); bufPtr->ptr += length; bufPtr->len -= length; } /// PngImg::PngImg(const char* buf, const size_t bufLen) : data_(nullptr) { memset(&info_, 0, sizeof(info_)); PngReadStruct rs; if(rs.Valid()) { BufPtr bufPtr = {buf, bufLen}; png_set_read_fn(rs.pngPtr, (png_voidp)&bufPtr, readFromBuf); ReadInfo_(rs); InitStorage_(); png_read_image(rs.pngPtr, &rowPtrs_[0]); } } /// PngImg::~PngImg() { if(data_) delete [] data_; } /// void PngImg::ReadInfo_(PngReadStruct& rs) { png_read_info(rs.pngPtr, rs.infoPtr); info_.width = png_get_image_width(rs.pngPtr, rs.infoPtr); info_.height = png_get_image_height(rs.pngPtr, rs.infoPtr); info_.bit_depth = png_get_bit_depth(rs.pngPtr, rs.infoPtr); info_.color_type = png_get_color_type(rs.pngPtr, rs.infoPtr); info_.interlace_type = png_get_interlace_type(rs.pngPtr, rs.infoPtr); info_.compression_type = png_get_compression_type(rs.pngPtr, rs.infoPtr); info_.filter_type = png_get_filter_type(rs.pngPtr, rs.infoPtr); info_.rowbytes = png_get_rowbytes(rs.pngPtr, rs.infoPtr); info_.pxlsize = info_.rowbytes / info_.width; } /// void PngImg::InitStorage_() { rowPtrs_.resize(info_.height, nullptr); data_ = new png_byte[info_.height * info_.rowbytes]; for(size_t i = 0; i < info_.height; ++i) { rowPtrs_[i] = data_ + i * info_.rowbytes; } } /// unique_ptr<Pxl> PngImg::Get(png_uint_32 x, png_uint_32 y) const { if(x >= info_.width || y >= info_.height) { error_ = "Out of the bounds"; return nullptr; } png_bytep p = rowPtrs_[y] + info_.pxlsize * x; unique_ptr<Pxl> pPxl(new Pxl{0, 0, 0, 0}); pPxl->r = p[0]; pPxl->g = p[1]; pPxl->b = p[2]; pPxl->a = info_.pxlsize > 3 ? p[3] : 255; return pPxl; } /// bool PngImg::Fill(png_uint_32 offsetX, png_uint_32 offsetY, png_uint_32 width, png_uint_32 height, const Pxl& pxl) { if(!InBounds_(offsetX, offsetY, width, height)) { error_ = "Out of the bounds"; return false; } for(size_t i = 0; i < height; ++i) { for(size_t j = 0; j < width; ++j) { Set_(offsetX + j, offsetY + i, pxl); } } return true; } /// void PngImg::Set_(png_uint_32 x, png_uint_32 y, const Pxl& pxl) { png_bytep p = rowPtrs_[y] + info_.pxlsize * x; p[0] = pxl.r; p[1] = pxl.g; p[2] = pxl.b; if(info_.pxlsize > 3) { p[3] = pxl.a; } } /// bool PngImg::Crop(png_uint_32 offsetX, png_uint_32 offsetY, png_uint_32 width, png_uint_32 height) { if(!InBounds_(offsetX, offsetY, width, height)) { error_ = "Out of the bounds"; return false; } for(size_t i = 0; i < height; ++i) { rowPtrs_[i] = rowPtrs_[i + offsetY] + offsetX * info_.pxlsize; } rowPtrs_.resize(height); info_.width = width; info_.height = height; info_.rowbytes = info_.pxlsize * width; return true; } /// bool PngImg::InBounds_(png_uint_32 offsetX, png_uint_32 offsetY, png_uint_32 width, png_uint_32 height) const { return width != 0 && height != 0 && width <= info_.width && height <= info_.height && offsetX < info_.width && offsetY < info_.height && offsetX + width <= info_.width && offsetY + height <= info_.height; } /// void PngImg::SetSize(png_uint_32 width, png_uint_32 height) { const ImgInfo oldInfo = info_; const unique_ptr<png_byte[]> oldData{data_}; const vector<png_bytep> oldRowPtrs{rowPtrs_}; info_.width = width; info_.height = height; info_.rowbytes = info_.pxlsize * width; InitStorage_(); memset(data_, 0, info_.height * info_.rowbytes); CopyRows_(oldRowPtrs, min(height, oldInfo.height), min(oldInfo.rowbytes, info_.rowbytes)); } /// void PngImg::Insert(const PngImg& img, png_uint_32 offsetX, png_uint_32 offsetY) { if(info_.pxlsize == img.info_.pxlsize) { CopyRows_(img.rowPtrs_, img.info_.height, img.info_.rowbytes, offsetX, offsetY); } else { CopyPxlByPxl_(img, offsetX, offsetY); } } /// void PngImg::CopyPxlByPxl_(const PngImg& img, png_uint_32 offsetX, png_uint_32 offsetY) { for(size_t x = 0; x < img.info_.width; ++x) { for(size_t y = 0; y < img.info_.height; ++y) { Set_(offsetX + x, offsetY + y, *img.Get(x, y)); } } } /// void PngImg::CopyRows_(const vector<png_bytep>& rowPtrs, const size_t numRows, const size_t rowLen, png_uint_32 offsetX, png_uint_32 offsetY) { for(size_t y = 0; y < numRows; ++y) { memcpy(rowPtrs_[y + offsetY] + offsetX * info_.pxlsize, rowPtrs[y], rowLen); } } /// void PngImg::RotateRight() { Rotate_([](const Point& p, const ImgInfo& img) { return Point{img.height - p.y - 1, p.x}; }); } /// void PngImg::RotateLeft() { Rotate_([](const Point& p, const ImgInfo& img) { return Point{p.y, img.width - p.x - 1}; }); } /// void PngImg::Rotate_(function<Point(const Point&, const ImgInfo&)> moveFn) { const ImgInfo oldInfo = info_; const unique_ptr<png_byte[]> oldData{data_}; const vector<png_bytep> oldRowPtrs{rowPtrs_}; info_.width = oldInfo.height; info_.height = oldInfo.width; info_.rowbytes = info_.pxlsize * info_.width; InitStorage_(); for(size_t x = 0; x < oldInfo.width; ++x) { for(size_t y = 0; y < oldInfo.height; ++y) { auto newPoint = moveFn({x, y}, oldInfo); png_bytep p = oldRowPtrs[y] + oldInfo.pxlsize * x; png_bytep newP = rowPtrs_[newPoint.y] + info_.pxlsize * newPoint.x; copy(p, p + info_.pxlsize, newP); } } } /// bool PngImg::Write(const string& file) { auto fileClose = [](FILE* fp){ if(fp) fclose(fp); }; unique_ptr<FILE, decltype(fileClose)> fp(fopen(file.c_str(), "wb"), fileClose); if(!fp) { error_ = "Can't open file for writing"; return false; } PngWriteStruct pws; if(!pws.Valid()) { error_ = "Can't create png structs"; return false; } if(setjmp(png_jmpbuf(pws.pngPtr))) { error_ = "Can't write file"; return false; } png_init_io(pws.pngPtr, fp.get()); png_set_IHDR(pws.pngPtr, pws.infoPtr, info_.width, info_.height, info_.bit_depth, info_.color_type, info_.interlace_type, info_.compression_type, info_.filter_type ); png_set_rows(pws.pngPtr, pws.infoPtr, &rowPtrs_[0]); png_write_png(pws.pngPtr, pws.infoPtr, PNG_TRANSFORM_IDENTITY, NULL); return true; }
null
#include "./PngImg.h" #include "./PngStructs.h" #include <png.h> #include <string.h> #include <memory> #include <algorithm> using namespace std; struct BufPtr { const char* ptr; size_t len; }; /// void readFromBuf(png_structp pngPtr, png_bytep data, png_size_t length) { BufPtr* bufPtr = (BufPtr*)png_get_io_ptr(pngPtr); memcpy((char*)data, bufPtr->ptr, length); bufPtr->ptr += length; bufPtr->len -= length; } /// PngImg::PngImg(const char* buf, const size_t bufLen) : data_(nullptr) { memset(&info_, 0, sizeof(info_)); PngReadStruct rs; if(rs.Valid()) { BufPtr bufPtr = {buf, bufLen}; png_set_read_fn(rs.pngPtr, (png_voidp)&bufPtr, readFromBuf); ReadInfo_(rs); InitStorage_(); png_read_image(rs.pngPtr, &rowPtrs_[0]); } } /// PngImg::~PngImg() { if(data_) delete [] data_; } /// void PngImg::ReadInfo_(PngReadStruct& rs) { png_read_info(rs.pngPtr, rs.infoPtr); info_.width = png_get_image_width(rs.pngPtr, rs.infoPtr); info_.height = png_get_image_height(rs.pngPtr, rs.infoPtr); info_.bit_depth = png_get_bit_depth(rs.pngPtr, rs.infoPtr); info_.color_type = png_get_color_type(rs.pngPtr, rs.infoPtr); info_.interlace_type = png_get_interlace_type(rs.pngPtr, rs.infoPtr); info_.compression_type = png_get_compression_type(rs.pngPtr, rs.infoPtr); info_.filter_type = png_get_filter_type(rs.pngPtr, rs.infoPtr); info_.rowbytes = png_get_rowbytes(rs.pngPtr, rs.infoPtr); info_.pxlsize = info_.rowbytes / info_.width; } /// void PngImg::InitStorage_() { rowPtrs_.resize(info_.height, nullptr); // Extend height and rowbytes from uint32_t to size_t to avoid multiplication overflow when size_t is larger size_t h = info_.height; size_t rb = info_.rowbytes; // We need to make sure that info_.height * info_.rowbytes will not overflow size_t // Unfotunately, there's no simple and portable way to do this in C++ // For integer division of positive numbers a * b > c <==> a > c / b holds if (h > std::numeric_limits<size_t>::max() / rb) { // TODO Propagate this exception to JS, and test it throw std::runtime_error("Image is too large to allocate single buffer"); } data_ = new png_byte[h * rb]; for(size_t i = 0; i < info_.height; ++i) { rowPtrs_[i] = data_ + i * rb; } } /// unique_ptr<Pxl> PngImg::Get(png_uint_32 x, png_uint_32 y) const { if(x >= info_.width || y >= info_.height) { error_ = "Out of the bounds"; return nullptr; } png_bytep p = rowPtrs_[y] + info_.pxlsize * x; unique_ptr<Pxl> pPxl(new Pxl{0, 0, 0, 0}); pPxl->r = p[0]; pPxl->g = p[1]; pPxl->b = p[2]; pPxl->a = info_.pxlsize > 3 ? p[3] : 255; return pPxl; } /// bool PngImg::Fill(png_uint_32 offsetX, png_uint_32 offsetY, png_uint_32 width, png_uint_32 height, const Pxl& pxl) { if(!InBounds_(offsetX, offsetY, width, height)) { error_ = "Out of the bounds"; return false; } for(size_t i = 0; i < height; ++i) { for(size_t j = 0; j < width; ++j) { Set_(offsetX + j, offsetY + i, pxl); } } return true; } /// void PngImg::Set_(png_uint_32 x, png_uint_32 y, const Pxl& pxl) { png_bytep p = rowPtrs_[y] + info_.pxlsize * x; p[0] = pxl.r; p[1] = pxl.g; p[2] = pxl.b; if(info_.pxlsize > 3) { p[3] = pxl.a; } } /// bool PngImg::Crop(png_uint_32 offsetX, png_uint_32 offsetY, png_uint_32 width, png_uint_32 height) { if(!InBounds_(offsetX, offsetY, width, height)) { error_ = "Out of the bounds"; return false; } for(size_t i = 0; i < height; ++i) { rowPtrs_[i] = rowPtrs_[i + offsetY] + offsetX * info_.pxlsize; } rowPtrs_.resize(height); info_.width = width; info_.height = height; info_.rowbytes = info_.pxlsize * width; return true; } /// bool PngImg::InBounds_(png_uint_32 offsetX, png_uint_32 offsetY, png_uint_32 width, png_uint_32 height) const { return width != 0 && height != 0 && width <= info_.width && height <= info_.height && offsetX < info_.width && offsetY < info_.height && offsetX + width <= info_.width && offsetY + height <= info_.height; } /// void PngImg::SetSize(png_uint_32 width, png_uint_32 height) { const ImgInfo oldInfo = info_; const unique_ptr<png_byte[]> oldData{data_}; const vector<png_bytep> oldRowPtrs{rowPtrs_}; info_.width = width; info_.height = height; info_.rowbytes = info_.pxlsize * width; InitStorage_(); memset(data_, 0, info_.height * info_.rowbytes); CopyRows_(oldRowPtrs, min(height, oldInfo.height), min(oldInfo.rowbytes, info_.rowbytes)); } /// void PngImg::Insert(const PngImg& img, png_uint_32 offsetX, png_uint_32 offsetY) { if(info_.pxlsize == img.info_.pxlsize) { CopyRows_(img.rowPtrs_, img.info_.height, img.info_.rowbytes, offsetX, offsetY); } else { CopyPxlByPxl_(img, offsetX, offsetY); } } /// void PngImg::CopyPxlByPxl_(const PngImg& img, png_uint_32 offsetX, png_uint_32 offsetY) { for(size_t x = 0; x < img.info_.width; ++x) { for(size_t y = 0; y < img.info_.height; ++y) { Set_(offsetX + x, offsetY + y, *img.Get(x, y)); } } } /// void PngImg::CopyRows_(const vector<png_bytep>& rowPtrs, const size_t numRows, const size_t rowLen, png_uint_32 offsetX, png_uint_32 offsetY) { for(size_t y = 0; y < numRows; ++y) { memcpy(rowPtrs_[y + offsetY] + offsetX * info_.pxlsize, rowPtrs[y], rowLen); } } /// void PngImg::RotateRight() { Rotate_([](const Point& p, const ImgInfo& img) { return Point{img.height - p.y - 1, p.x}; }); } /// void PngImg::RotateLeft() { Rotate_([](const Point& p, const ImgInfo& img) { return Point{p.y, img.width - p.x - 1}; }); } /// void PngImg::Rotate_(function<Point(const Point&, const ImgInfo&)> moveFn) { const ImgInfo oldInfo = info_; const unique_ptr<png_byte[]> oldData{data_}; const vector<png_bytep> oldRowPtrs{rowPtrs_}; info_.width = oldInfo.height; info_.height = oldInfo.width; info_.rowbytes = info_.pxlsize * info_.width; InitStorage_(); for(size_t x = 0; x < oldInfo.width; ++x) { for(size_t y = 0; y < oldInfo.height; ++y) { auto newPoint = moveFn({x, y}, oldInfo); png_bytep p = oldRowPtrs[y] + oldInfo.pxlsize * x; png_bytep newP = rowPtrs_[newPoint.y] + info_.pxlsize * newPoint.x; copy(p, p + info_.pxlsize, newP); } } } /// bool PngImg::Write(const string& file) { auto fileClose = [](FILE* fp){ if(fp) fclose(fp); }; unique_ptr<FILE, decltype(fileClose)> fp(fopen(file.c_str(), "wb"), fileClose); if(!fp) { error_ = "Can't open file for writing"; return false; } PngWriteStruct pws; if(!pws.Valid()) { error_ = "Can't create png structs"; return false; } if(setjmp(png_jmpbuf(pws.pngPtr))) { error_ = "Can't write file"; return false; } png_init_io(pws.pngPtr, fp.get()); png_set_IHDR(pws.pngPtr, pws.infoPtr, info_.width, info_.height, info_.bit_depth, info_.color_type, info_.interlace_type, info_.compression_type, info_.filter_type ); png_set_rows(pws.pngPtr, pws.infoPtr, &rowPtrs_[0]); png_write_png(pws.pngPtr, pws.infoPtr, PNG_TRANSFORM_IDENTITY, NULL); return true; }
null
215
CWE-787
CVE-2020-28371
/* Copyright (c) 2008-2015, Avian Contributors Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. There is NO WARRANTY for this software. See license.txt for details. */ package java.io; public class FileOutputStream extends OutputStream { // static { // System.loadLibrary("natives"); // } private int fd; public FileOutputStream(FileDescriptor fd) { this.fd = fd.value; } public FileOutputStream(String path) throws IOException { this(path, false); } public FileOutputStream(String path, boolean append) throws IOException { fd = open(path, append); } public FileOutputStream(File file) throws IOException { this(file.getPath()); } private static native int open(String path, boolean append) throws IOException; private static native void write(int fd, int c) throws IOException; private static native void write(int fd, byte[] b, int offset, int length) throws IOException; private static native void close(int fd) throws IOException; public void write(int c) throws IOException { write(fd, c); } public void write(byte[] b, int offset, int length) throws IOException { if (b == null) { throw new NullPointerException(); } if (offset < 0 || offset + length > b.length) { throw new ArrayIndexOutOfBoundsException(); } write(fd, b, offset, length); } public void close() throws IOException { if (fd != -1) { close(fd); fd = -1; } } }
null
/* Copyright (c) 2008-2015, Avian Contributors Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. There is NO WARRANTY for this software. See license.txt for details. */ package java.io; public class FileOutputStream extends OutputStream { // static { // System.loadLibrary("natives"); // } private int fd; public FileOutputStream(FileDescriptor fd) { this.fd = fd.value; } public FileOutputStream(String path) throws IOException { this(path, false); } public FileOutputStream(String path, boolean append) throws IOException { fd = open(path, append); } public FileOutputStream(File file) throws IOException { this(file.getPath()); } private static native int open(String path, boolean append) throws IOException; private static native void write(int fd, int c) throws IOException; private static native void write(int fd, byte[] b, int offset, int length) throws IOException; private static native void close(int fd) throws IOException; public void write(int c) throws IOException { write(fd, c); } public void write(byte[] b, int offset, int length) throws IOException { if (b == null) { throw new NullPointerException(); } if (offset < 0 || length < 0 || length > b.length || offset > b.length - length) { throw new ArrayIndexOutOfBoundsException(); } write(fd, b, offset, length); } public void close() throws IOException { if (fd != -1) { close(fd); fd = -1; } } }
null
216
CWE-787
CVE-2020-29367
/********************************************************************* Blosc - Blocked Shuffling and Compression Library Author: Francesc Alted <francesc@blosc.org> Creation date: 2009-05-20 See LICENSE.txt for details about copyright and rights to use. **********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <string.h> #include <sys/types.h> #include <assert.h> #include "blosc2.h" #include "blosc-private.h" #include "blosc2-common.h" #if defined(USING_CMAKE) #include "config.h" #endif /* USING_CMAKE */ #include "context.h" #include "shuffle.h" #include "delta.h" #include "trunc-prec.h" #include "blosclz.h" #include "btune.h" #if defined(HAVE_LZ4) #include "lz4.h" #include "lz4hc.h" #ifdef HAVE_IPP #include <ipps.h> #include <ippdc.h> #endif #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) #include "lizard_compress.h" #include "lizard_decompress.h" #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) #include "snappy-c.h" #endif /* HAVE_SNAPPY */ #if defined(HAVE_MINIZ) #include "miniz.c" #elif defined(HAVE_ZLIB) #include "zlib.h" #endif /* HAVE_MINIZ */ #if defined(HAVE_ZSTD) #include "zstd.h" #include "zstd_errors.h" // #include "cover.h" // for experimenting with fast cover training for building dicts #include "zdict.h" #endif /* HAVE_ZSTD */ #if defined(_WIN32) && !defined(__MINGW32__) #include <windows.h> #include <malloc.h> /* stdint.h only available in VS2010 (VC++ 16.0) and newer */ #if defined(_MSC_VER) && _MSC_VER < 1600 #include "win32/stdint-windows.h" #else #include <stdint.h> #endif #include <process.h> #define getpid _getpid #else #include <unistd.h> #endif /* _WIN32 */ #if defined(_WIN32) && !defined(__GNUC__) #include "win32/pthread.c" #endif /* Synchronization variables */ /* Global context for non-contextual API */ static blosc2_context* g_global_context; static pthread_mutex_t global_comp_mutex; static int g_compressor = BLOSC_BLOSCLZ; static int g_delta = 0; /* the compressor to use by default */ static int g_nthreads = 1; static int32_t g_force_blocksize = 0; static int g_initlib = 0; static blosc2_schunk* g_schunk = NULL; /* the pointer to super-chunk */ // Forward declarations int init_threadpool(blosc2_context *context); int release_threadpool(blosc2_context *context); /* Macros for synchronization */ /* Wait until all threads are initialized */ #ifdef BLOSC_POSIX_BARRIERS #define WAIT_INIT(RET_VAL, CONTEXT_PTR) \ rc = pthread_barrier_wait(&(CONTEXT_PTR)->barr_init); \ if (rc != 0 && rc != PTHREAD_BARRIER_SERIAL_THREAD) { \ printf("Could not wait on barrier (init): %d\n", rc); \ return((RET_VAL)); \ } #else #define WAIT_INIT(RET_VAL, CONTEXT_PTR) \ pthread_mutex_lock(&(CONTEXT_PTR)->count_threads_mutex); \ if ((CONTEXT_PTR)->count_threads < (CONTEXT_PTR)->nthreads) { \ (CONTEXT_PTR)->count_threads++; \ pthread_cond_wait(&(CONTEXT_PTR)->count_threads_cv, \ &(CONTEXT_PTR)->count_threads_mutex); \ } \ else { \ pthread_cond_broadcast(&(CONTEXT_PTR)->count_threads_cv); \ } \ pthread_mutex_unlock(&(CONTEXT_PTR)->count_threads_mutex); #endif /* Wait for all threads to finish */ #ifdef BLOSC_POSIX_BARRIERS #define WAIT_FINISH(RET_VAL, CONTEXT_PTR) \ rc = pthread_barrier_wait(&(CONTEXT_PTR)->barr_finish); \ if (rc != 0 && rc != PTHREAD_BARRIER_SERIAL_THREAD) { \ printf("Could not wait on barrier (finish)\n"); \ return((RET_VAL)); \ } #else #define WAIT_FINISH(RET_VAL, CONTEXT_PTR) \ pthread_mutex_lock(&(CONTEXT_PTR)->count_threads_mutex); \ if ((CONTEXT_PTR)->count_threads > 0) { \ (CONTEXT_PTR)->count_threads--; \ pthread_cond_wait(&(CONTEXT_PTR)->count_threads_cv, \ &(CONTEXT_PTR)->count_threads_mutex); \ } \ else { \ pthread_cond_broadcast(&(CONTEXT_PTR)->count_threads_cv); \ } \ pthread_mutex_unlock(&(CONTEXT_PTR)->count_threads_mutex); #endif /* global variable to change threading backend from Blosc-managed to caller-managed */ static blosc_threads_callback threads_callback = 0; static void *threads_callback_data = 0; /* non-threadsafe function should be called before any other Blosc function in order to change how threads are managed */ void blosc_set_threads_callback(blosc_threads_callback callback, void *callback_data) { threads_callback = callback; threads_callback_data = callback_data; } /* A function for aligned malloc that is portable */ static uint8_t* my_malloc(size_t size) { void* block = NULL; int res = 0; /* Do an alignment to 32 bytes because AVX2 is supported */ #if defined(_WIN32) /* A (void *) cast needed for avoiding a warning with MINGW :-/ */ block = (void *)_aligned_malloc(size, 32); #elif _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 /* Platform does have an implementation of posix_memalign */ res = posix_memalign(&block, 32, size); #else block = malloc(size); #endif /* _WIN32 */ if (block == NULL || res != 0) { printf("Error allocating memory!"); return NULL; } return (uint8_t*)block; } /* Release memory booked by my_malloc */ static void my_free(void* block) { #if defined(_WIN32) _aligned_free(block); #else free(block); #endif /* _WIN32 */ } /* * Conversion routines between compressor and compression libraries */ /* Return the library code associated with the compressor name */ static int compname_to_clibcode(const char* compname) { if (strcmp(compname, BLOSC_BLOSCLZ_COMPNAME) == 0) return BLOSC_BLOSCLZ_LIB; if (strcmp(compname, BLOSC_LZ4_COMPNAME) == 0) return BLOSC_LZ4_LIB; if (strcmp(compname, BLOSC_LZ4HC_COMPNAME) == 0) return BLOSC_LZ4_LIB; if (strcmp(compname, BLOSC_LIZARD_COMPNAME) == 0) return BLOSC_LIZARD_LIB; if (strcmp(compname, BLOSC_SNAPPY_COMPNAME) == 0) return BLOSC_SNAPPY_LIB; if (strcmp(compname, BLOSC_ZLIB_COMPNAME) == 0) return BLOSC_ZLIB_LIB; if (strcmp(compname, BLOSC_ZSTD_COMPNAME) == 0) return BLOSC_ZSTD_LIB; return -1; } /* Return the library name associated with the compressor code */ static const char* clibcode_to_clibname(int clibcode) { if (clibcode == BLOSC_BLOSCLZ_LIB) return BLOSC_BLOSCLZ_LIBNAME; if (clibcode == BLOSC_LZ4_LIB) return BLOSC_LZ4_LIBNAME; if (clibcode == BLOSC_LIZARD_LIB) return BLOSC_LIZARD_LIBNAME; if (clibcode == BLOSC_SNAPPY_LIB) return BLOSC_SNAPPY_LIBNAME; if (clibcode == BLOSC_ZLIB_LIB) return BLOSC_ZLIB_LIBNAME; if (clibcode == BLOSC_ZSTD_LIB) return BLOSC_ZSTD_LIBNAME; return NULL; /* should never happen */ } /* * Conversion routines between compressor names and compressor codes */ /* Get the compressor name associated with the compressor code */ int blosc_compcode_to_compname(int compcode, const char** compname) { int code = -1; /* -1 means non-existent compressor code */ const char* name = NULL; /* Map the compressor code */ if (compcode == BLOSC_BLOSCLZ) name = BLOSC_BLOSCLZ_COMPNAME; else if (compcode == BLOSC_LZ4) name = BLOSC_LZ4_COMPNAME; else if (compcode == BLOSC_LZ4HC) name = BLOSC_LZ4HC_COMPNAME; else if (compcode == BLOSC_LIZARD) name = BLOSC_LIZARD_COMPNAME; else if (compcode == BLOSC_SNAPPY) name = BLOSC_SNAPPY_COMPNAME; else if (compcode == BLOSC_ZLIB) name = BLOSC_ZLIB_COMPNAME; else if (compcode == BLOSC_ZSTD) name = BLOSC_ZSTD_COMPNAME; *compname = name; /* Guess if there is support for this code */ if (compcode == BLOSC_BLOSCLZ) code = BLOSC_BLOSCLZ; #if defined(HAVE_LZ4) else if (compcode == BLOSC_LZ4) code = BLOSC_LZ4; else if (compcode == BLOSC_LZ4HC) code = BLOSC_LZ4HC; #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) else if (compcode == BLOSC_LIZARD) code = BLOSC_LIZARD; #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) else if (compcode == BLOSC_SNAPPY) code = BLOSC_SNAPPY; #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) else if (compcode == BLOSC_ZLIB) code = BLOSC_ZLIB; #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) else if (compcode == BLOSC_ZSTD) code = BLOSC_ZSTD; #endif /* HAVE_ZSTD */ return code; } /* Get the compressor code for the compressor name. -1 if it is not available */ int blosc_compname_to_compcode(const char* compname) { int code = -1; /* -1 means non-existent compressor code */ if (strcmp(compname, BLOSC_BLOSCLZ_COMPNAME) == 0) { code = BLOSC_BLOSCLZ; } #if defined(HAVE_LZ4) else if (strcmp(compname, BLOSC_LZ4_COMPNAME) == 0) { code = BLOSC_LZ4; } else if (strcmp(compname, BLOSC_LZ4HC_COMPNAME) == 0) { code = BLOSC_LZ4HC; } #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) else if (strcmp(compname, BLOSC_LIZARD_COMPNAME) == 0) { code = BLOSC_LIZARD; } #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) else if (strcmp(compname, BLOSC_SNAPPY_COMPNAME) == 0) { code = BLOSC_SNAPPY; } #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) else if (strcmp(compname, BLOSC_ZLIB_COMPNAME) == 0) { code = BLOSC_ZLIB; } #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) else if (strcmp(compname, BLOSC_ZSTD_COMPNAME) == 0) { code = BLOSC_ZSTD; } #endif /* HAVE_ZSTD */ return code; } #if defined(HAVE_LZ4) static int lz4_wrap_compress(const char* input, size_t input_length, char* output, size_t maxout, int accel, void* hash_table) { BLOSC_UNUSED_PARAM(accel); int cbytes; #ifdef HAVE_IPP if (hash_table == NULL) { return -1; // the hash table should always be initialized } int outlen = (int)maxout; int inlen = (int)input_length; // I have not found any function that uses `accel` like in `LZ4_compress_fast`, but // the IPP LZ4Safe call does a pretty good job on compressing well, so let's use it IppStatus status = ippsEncodeLZ4Safe_8u((const Ipp8u*)input, &inlen, (Ipp8u*)output, &outlen, (Ipp8u*)hash_table); if (status == ippStsDstSizeLessExpected) { return 0; // we cannot compress in required outlen } else if (status != ippStsNoErr) { return -1; // an unexpected error happened } cbytes = outlen; #else BLOSC_UNUSED_PARAM(hash_table); accel = 1; // deactivate acceleration to match IPP behaviour cbytes = LZ4_compress_fast(input, output, (int)input_length, (int)maxout, accel); #endif return cbytes; } static int lz4hc_wrap_compress(const char* input, size_t input_length, char* output, size_t maxout, int clevel) { int cbytes; if (input_length > (size_t)(UINT32_C(2) << 30)) return -1; /* input larger than 2 GB is not supported */ /* clevel for lz4hc goes up to 12, at least in LZ4 1.7.5 * but levels larger than 9 do not buy much compression. */ cbytes = LZ4_compress_HC(input, output, (int)input_length, (int)maxout, clevel); return cbytes; } static int lz4_wrap_decompress(const char* input, size_t compressed_length, char* output, size_t maxout) { int nbytes; #ifdef HAVE_IPP int outlen = (int)maxout; int inlen = (int)compressed_length; IppStatus status; status = ippsDecodeLZ4_8u((const Ipp8u*)input, inlen, (Ipp8u*)output, &outlen); //status = ippsDecodeLZ4Dict_8u((const Ipp8u*)input, &inlen, (Ipp8u*)output, 0, &outlen, NULL, 1 << 16); nbytes = (status == ippStsNoErr) ? outlen : -outlen; #else nbytes = LZ4_decompress_safe(input, output, (int)compressed_length, (int)maxout); #endif if (nbytes != (int)maxout) { return 0; } return (int)maxout; } #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) static int lizard_wrap_compress(const char* input, size_t input_length, char* output, size_t maxout, int clevel) { int cbytes; cbytes = Lizard_compress(input, output, (int)input_length, (int)maxout, clevel); return cbytes; } static int lizard_wrap_decompress(const char* input, size_t compressed_length, char* output, size_t maxout) { int dbytes; dbytes = Lizard_decompress_safe(input, output, (int)compressed_length, (int)maxout); if (dbytes < 0) { return 0; } return dbytes; } #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) static int snappy_wrap_compress(const char* input, size_t input_length, char* output, size_t maxout) { snappy_status status; size_t cl = maxout; status = snappy_compress(input, input_length, output, &cl); if (status != SNAPPY_OK) { return 0; } return (int)cl; } static int snappy_wrap_decompress(const char* input, size_t compressed_length, char* output, size_t maxout) { snappy_status status; size_t ul = maxout; status = snappy_uncompress(input, compressed_length, output, &ul); if (status != SNAPPY_OK) { return 0; } return (int)ul; } #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) /* zlib is not very respectful with sharing name space with others. Fortunately, its names do not collide with those already in blosc. */ static int zlib_wrap_compress(const char* input, size_t input_length, char* output, size_t maxout, int clevel) { int status; uLongf cl = (uLongf)maxout; status = compress2( (Bytef*)output, &cl, (Bytef*)input, (uLong)input_length, clevel); if (status != Z_OK) { return 0; } return (int)cl; } static int zlib_wrap_decompress(const char* input, size_t compressed_length, char* output, size_t maxout) { int status; uLongf ul = (uLongf)maxout; status = uncompress( (Bytef*)output, &ul, (Bytef*)input, (uLong)compressed_length); if (status != Z_OK) { return 0; } return (int)ul; } #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) static int zstd_wrap_compress(struct thread_context* thread_context, const char* input, size_t input_length, char* output, size_t maxout, int clevel) { size_t code; blosc2_context* context = thread_context->parent_context; clevel = (clevel < 9) ? clevel * 2 - 1 : ZSTD_maxCLevel(); /* Make the level 8 close enough to maxCLevel */ if (clevel == 8) clevel = ZSTD_maxCLevel() - 2; if (thread_context->zstd_cctx == NULL) { thread_context->zstd_cctx = ZSTD_createCCtx(); } if (context->use_dict) { assert(context->dict_cdict != NULL); code = ZSTD_compress_usingCDict( thread_context->zstd_cctx, (void*)output, maxout, (void*)input, input_length, context->dict_cdict); } else { code = ZSTD_compressCCtx(thread_context->zstd_cctx, (void*)output, maxout, (void*)input, input_length, clevel); } if (ZSTD_isError(code) != ZSTD_error_no_error) { // Do not print anything because blosc will just memcpy this buffer // fprintf(stderr, "Error in ZSTD compression: '%s'. Giving up.\n", // ZDICT_getErrorName(code)); return 0; } return (int)code; } static int zstd_wrap_decompress(struct thread_context* thread_context, const char* input, size_t compressed_length, char* output, size_t maxout) { size_t code; blosc2_context* context = thread_context->parent_context; if (thread_context->zstd_dctx == NULL) { thread_context->zstd_dctx = ZSTD_createDCtx(); } if (context->use_dict) { assert(context->dict_ddict != NULL); code = ZSTD_decompress_usingDDict( thread_context->zstd_dctx, (void*)output, maxout, (void*)input, compressed_length, context->dict_ddict); } else { code = ZSTD_decompressDCtx(thread_context->zstd_dctx, (void*)output, maxout, (void*)input, compressed_length); } if (ZSTD_isError(code) != ZSTD_error_no_error) { fprintf(stderr, "Error in ZSTD decompression: '%s'. Giving up.\n", ZDICT_getErrorName(code)); return 0; } return (int)code; } #endif /* HAVE_ZSTD */ /* Compute acceleration for blosclz */ static int get_accel(const blosc2_context* context) { int clevel = context->clevel; if (context->compcode == BLOSC_LZ4) { /* This acceleration setting based on discussions held in: * https://groups.google.com/forum/#!topic/lz4c/zosy90P8MQw */ return (10 - clevel); } else if (context->compcode == BLOSC_LIZARD) { /* Lizard currently accepts clevels from 10 to 49 */ switch (clevel) { case 1 : return 10; case 2 : return 10; case 3 : return 10; case 4 : return 10; case 5 : return 20; case 6 : return 20; case 7 : return 20; case 8 : return 41; case 9 : return 41; default : break; } } return 1; } int do_nothing(int8_t filter, char cmode) { if (cmode == 'c') { return (filter == BLOSC_NOFILTER); } else { // TRUNC_PREC do not have to be applied during decompression return ((filter == BLOSC_NOFILTER) || (filter == BLOSC_TRUNC_PREC)); } } int next_filter(const uint8_t* filters, int current_filter, char cmode) { for (int i = current_filter - 1; i >= 0; i--) { if (!do_nothing(filters[i], cmode)) { return filters[i]; } } return BLOSC_NOFILTER; } int last_filter(const uint8_t* filters, char cmode) { int last_index = -1; for (int i = BLOSC2_MAX_FILTERS - 1; i >= 0; i--) { if (!do_nothing(filters[i], cmode)) { last_index = i; } } return last_index; } uint8_t* pipeline_c(struct thread_context* thread_context, const int32_t bsize, const uint8_t* src, const int32_t offset, uint8_t* dest, uint8_t* tmp, uint8_t* tmp2) { blosc2_context* context = thread_context->parent_context; uint8_t* _src = (uint8_t*)src + offset; uint8_t* _tmp = tmp; uint8_t* _dest = dest; int32_t typesize = context->typesize; uint8_t* filters = context->filters; uint8_t* filters_meta = context->filters_meta; bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; /* Prefilter function */ if (context->prefilter != NULL) { // Create new prefilter parameters for this block (must be private for each thread) blosc2_prefilter_params pparams; memcpy(&pparams, context->pparams, sizeof(pparams)); pparams.out = _dest; pparams.out_size = (size_t)bsize; pparams.out_typesize = typesize; pparams.out_offset = offset; pparams.tid = thread_context->tid; pparams.ttmp = thread_context->tmp; pparams.ttmp_nbytes = thread_context->tmp_nbytes; pparams.ctx = context; if (context->prefilter(&pparams) != 0) { fprintf(stderr, "Execution of prefilter function failed\n"); return NULL; } if (memcpyed) { // No more filters are required return _dest; } // Cycle buffers _src = _dest; _dest = _tmp; _tmp = _src; } /* Process the filter pipeline */ for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { switch (filters[i]) { case BLOSC_SHUFFLE: for (int j = 0; j <= filters_meta[i]; j++) { shuffle(typesize, bsize, _src, _dest); // Cycle filters when required if (j < filters_meta[i]) { _src = _dest; _dest = _tmp; _tmp = _src; } } break; case BLOSC_BITSHUFFLE: bitshuffle(typesize, bsize, _src, _dest, tmp2); break; case BLOSC_DELTA: delta_encoder(src, offset, bsize, typesize, _src, _dest); break; case BLOSC_TRUNC_PREC: truncate_precision(filters_meta[i], typesize, bsize, _src, _dest); break; default: if (filters[i] != BLOSC_NOFILTER) { fprintf(stderr, "Filter %d not handled during compression\n", filters[i]); return NULL; } } // Cycle buffers when required if (filters[i] != BLOSC_NOFILTER) { _src = _dest; _dest = _tmp; _tmp = _src; } } return _src; } // Optimized version for detecting runs. It compares 8 bytes values wherever possible. static bool get_run(const uint8_t* ip, const uint8_t* ip_bound) { uint8_t x = *ip; int64_t value, value2; /* Broadcast the value for every byte in a 64-bit register */ memset(&value, x, 8); while (ip < (ip_bound - 8)) { #if defined(BLOSC_STRICT_ALIGN) memcpy(&value2, ref, 8); #else value2 = *(int64_t*)ip; #endif if (value != value2) { // Values differ. We don't have a run. return false; } else { ip += 8; } } /* Look into the remainder */ while ((ip < ip_bound) && (*ip == x)) ip++; return ip == ip_bound ? true : false; } /* Shuffle & compress a single block */ static int blosc_c(struct thread_context* thread_context, int32_t bsize, int32_t leftoverblock, int32_t ntbytes, int32_t maxbytes, const uint8_t* src, const int32_t offset, uint8_t* dest, uint8_t* tmp, uint8_t* tmp2) { blosc2_context* context = thread_context->parent_context; int dont_split = (context->header_flags & 0x10) >> 4; int dict_training = context->use_dict && context->dict_cdict == NULL; int32_t j, neblock, nstreams; int32_t cbytes; /* number of compressed bytes in split */ int32_t ctbytes = 0; /* number of compressed bytes in block */ int64_t maxout; int32_t typesize = context->typesize; const char* compname; int accel; const uint8_t* _src; uint8_t *_tmp = tmp, *_tmp2 = tmp2; uint8_t *_tmp3 = thread_context->tmp4; int last_filter_index = last_filter(context->filters, 'c'); bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; if (last_filter_index >= 0 || context->prefilter != NULL) { /* Apply the filter pipeline just for the prefilter */ if (memcpyed && context->prefilter != NULL) { // We only need the prefilter output _src = pipeline_c(thread_context, bsize, src, offset, dest, _tmp2, _tmp3); if (_src == NULL) { return -9; // signals a problem with the filter pipeline } return bsize; } /* Apply regular filter pipeline */ _src = pipeline_c(thread_context, bsize, src, offset, _tmp, _tmp2, _tmp3); if (_src == NULL) { return -9; // signals a problem with the filter pipeline } } else { _src = src + offset; } assert(context->clevel > 0); /* Calculate acceleration for different compressors */ accel = get_accel(context); /* The number of compressed data streams for this block */ if (!dont_split && !leftoverblock && !dict_training) { nstreams = (int32_t)typesize; } else { nstreams = 1; } neblock = bsize / nstreams; for (j = 0; j < nstreams; j++) { if (!dict_training) { dest += sizeof(int32_t); ntbytes += sizeof(int32_t); ctbytes += sizeof(int32_t); } // See if we have a run here const uint8_t* ip = (uint8_t*)_src + j * neblock; const uint8_t* ipbound = (uint8_t*)_src + (j + 1) * neblock; if (get_run(ip, ipbound)) { // A run. Encode the repeated byte as a negative length in the length of the split. int32_t value = _src[j * neblock]; _sw32(dest - 4, -value); continue; } maxout = neblock; #if defined(HAVE_SNAPPY) if (context->compcode == BLOSC_SNAPPY) { maxout = (int32_t)snappy_max_compressed_length((size_t)neblock); } #endif /* HAVE_SNAPPY */ if (ntbytes + maxout > maxbytes) { /* avoid buffer * overrun */ maxout = (int64_t)maxbytes - (int64_t)ntbytes; if (maxout <= 0) { return 0; /* non-compressible block */ } } if (dict_training) { // We are in the build dict state, so don't compress // TODO: copy only a percentage for sampling memcpy(dest, _src + j * neblock, (unsigned int)neblock); cbytes = (int32_t)neblock; } else if (context->compcode == BLOSC_BLOSCLZ) { cbytes = blosclz_compress(context->clevel, _src + j * neblock, (int)neblock, dest, (int)maxout); } #if defined(HAVE_LZ4) else if (context->compcode == BLOSC_LZ4) { void *hash_table = NULL; #ifdef HAVE_IPP hash_table = (void*)thread_context->lz4_hash_table; #endif cbytes = lz4_wrap_compress((char*)_src + j * neblock, (size_t)neblock, (char*)dest, (size_t)maxout, accel, hash_table); } else if (context->compcode == BLOSC_LZ4HC) { cbytes = lz4hc_wrap_compress((char*)_src + j * neblock, (size_t)neblock, (char*)dest, (size_t)maxout, context->clevel); } #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) else if (context->compcode == BLOSC_LIZARD) { cbytes = lizard_wrap_compress((char*)_src + j * neblock, (size_t)neblock, (char*)dest, (size_t)maxout, accel); } #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) else if (context->compcode == BLOSC_SNAPPY) { cbytes = snappy_wrap_compress((char*)_src + j * neblock, (size_t)neblock, (char*)dest, (size_t)maxout); } #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) else if (context->compcode == BLOSC_ZLIB) { cbytes = zlib_wrap_compress((char*)_src + j * neblock, (size_t)neblock, (char*)dest, (size_t)maxout, context->clevel); } #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) else if (context->compcode == BLOSC_ZSTD) { cbytes = zstd_wrap_compress(thread_context, (char*)_src + j * neblock, (size_t)neblock, (char*)dest, (size_t)maxout, context->clevel); } #endif /* HAVE_ZSTD */ else { blosc_compcode_to_compname(context->compcode, &compname); fprintf(stderr, "Blosc has not been compiled with '%s' ", compname); fprintf(stderr, "compression support. Please use one having it."); return -5; /* signals no compression support */ } if (cbytes > maxout) { /* Buffer overrun caused by compression (should never happen) */ return -1; } if (cbytes < 0) { /* cbytes should never be negative */ return -2; } if (!dict_training) { if (cbytes == 0 || cbytes == neblock) { /* The compressor has been unable to compress data at all. */ /* Before doing the copy, check that we are not running into a buffer overflow. */ if ((ntbytes + neblock) > maxbytes) { return 0; /* Non-compressible data */ } memcpy(dest, _src + j * neblock, (unsigned int)neblock); cbytes = neblock; } _sw32(dest - 4, cbytes); } dest += cbytes; ntbytes += cbytes; ctbytes += cbytes; } /* Closes j < nstreams */ //printf("c%d", ctbytes); return ctbytes; } /* Process the filter pipeline (decompression mode) */ int pipeline_d(blosc2_context* context, const int32_t bsize, uint8_t* dest, const int32_t offset, uint8_t* src, uint8_t* tmp, uint8_t* tmp2, int last_filter_index) { int32_t typesize = context->typesize; uint8_t* filters = context->filters; uint8_t* filters_meta = context->filters_meta; uint8_t* _src = src; uint8_t* _dest = tmp; uint8_t* _tmp = tmp2; int errcode = 0; for (int i = BLOSC2_MAX_FILTERS - 1; i >= 0; i--) { // Delta filter requires the whole chunk ready int last_copy_filter = (last_filter_index == i) || (next_filter(filters, i, 'd') == BLOSC_DELTA); if (last_copy_filter) { _dest = dest + offset; } switch (filters[i]) { case BLOSC_SHUFFLE: for (int j = 0; j <= filters_meta[i]; j++) { unshuffle(typesize, bsize, _src, _dest); // Cycle filters when required if (j < filters_meta[i]) { _src = _dest; _dest = _tmp; _tmp = _src; } // Check whether we have to copy the intermediate _dest buffer to final destination if (last_copy_filter && (filters_meta[i] % 2) == 1 && j == filters_meta[i]) { memcpy(dest + offset, _dest, (unsigned int)bsize); } } break; case BLOSC_BITSHUFFLE: bitunshuffle(typesize, bsize, _src, _dest, _tmp, context->src[0]); break; case BLOSC_DELTA: if (context->nthreads == 1) { /* Serial mode */ delta_decoder(dest, offset, bsize, typesize, _dest); } else { /* Force the thread in charge of the block 0 to go first */ pthread_mutex_lock(&context->delta_mutex); if (context->dref_not_init) { if (offset != 0) { pthread_cond_wait(&context->delta_cv, &context->delta_mutex); } else { delta_decoder(dest, offset, bsize, typesize, _dest); context->dref_not_init = 0; pthread_cond_broadcast(&context->delta_cv); } } pthread_mutex_unlock(&context->delta_mutex); if (offset != 0) { delta_decoder(dest, offset, bsize, typesize, _dest); } } break; case BLOSC_TRUNC_PREC: // TRUNC_PREC filter does not need to be undone break; default: if (filters[i] != BLOSC_NOFILTER) { fprintf(stderr, "Filter %d not handled during decompression\n", filters[i]); errcode = -1; } } if (last_filter_index == i) { return errcode; } // Cycle buffers when required if ((filters[i] != BLOSC_NOFILTER) && (filters[i] != BLOSC_TRUNC_PREC)) { _src = _dest; _dest = _tmp; _tmp = _src; } } return errcode; } /* Decompress & unshuffle a single block */ static int blosc_d( struct thread_context* thread_context, int32_t bsize, int32_t leftoverblock, const uint8_t* src, int32_t srcsize, int32_t src_offset, uint8_t* dest, int32_t dest_offset, uint8_t* tmp, uint8_t* tmp2) { blosc2_context* context = thread_context->parent_context; uint8_t* filters = context->filters; uint8_t *tmp3 = thread_context->tmp4; int32_t compformat = (context->header_flags & 0xe0) >> 5; int dont_split = (context->header_flags & 0x10) >> 4; //uint8_t blosc_version_format = src[0]; int nstreams; int32_t neblock; int32_t nbytes; /* number of decompressed bytes in split */ int32_t cbytes; /* number of compressed bytes in split */ int32_t ctbytes = 0; /* number of compressed bytes in block */ int32_t ntbytes = 0; /* number of uncompressed bytes in block */ uint8_t* _dest; int32_t typesize = context->typesize; int32_t nblock = dest_offset / context->blocksize; const char* compname; if (context->block_maskout != NULL && context->block_maskout[nblock]) { // Do not decompress, but act as if we successfully decompressed everything return bsize; } if (src_offset <= 0 || src_offset >= srcsize) { /* Invalid block src offset encountered */ return -1; } src += src_offset; srcsize -= src_offset; int last_filter_index = last_filter(filters, 'd'); if ((last_filter_index >= 0) && (next_filter(filters, BLOSC2_MAX_FILTERS, 'd') != BLOSC_DELTA)) { // We are making use of some filter, so use a temp for destination _dest = tmp; } else { // If no filters, or only DELTA in pipeline _dest = dest + dest_offset; } /* The number of compressed data streams for this block */ if (!dont_split && !leftoverblock && !context->use_dict) { // We don't want to split when in a training dict state nstreams = (int32_t)typesize; } else { nstreams = 1; } neblock = bsize / nstreams; for (int j = 0; j < nstreams; j++) { if (srcsize < sizeof(int32_t)) { /* Not enough input to read compressed size */ return -1; } srcsize -= sizeof(int32_t); cbytes = sw32_(src); /* amount of compressed bytes */ if (cbytes > 0) { if (srcsize < cbytes) { /* Not enough input to read compressed bytes */ return -1; } srcsize -= cbytes; } src += sizeof(int32_t); ctbytes += (int32_t)sizeof(int32_t); /* Uncompress */ if (cbytes <= 0) { // A run if (cbytes < -255) { // Runs can only encode a byte return -2; } uint8_t value = -cbytes; memset(_dest, value, (unsigned int)neblock); nbytes = neblock; cbytes = 0; // everything is encoded in the cbytes token } else if (cbytes == neblock) { memcpy(_dest, src, (unsigned int)neblock); nbytes = (int32_t)neblock; } else { if (compformat == BLOSC_BLOSCLZ_FORMAT) { nbytes = blosclz_decompress(src, cbytes, _dest, (int)neblock); } #if defined(HAVE_LZ4) else if (compformat == BLOSC_LZ4_FORMAT) { nbytes = lz4_wrap_decompress((char*)src, (size_t)cbytes, (char*)_dest, (size_t)neblock); } #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) else if (compformat == BLOSC_LIZARD_FORMAT) { nbytes = lizard_wrap_decompress((char*)src, (size_t)cbytes, (char*)_dest, (size_t)neblock); } #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) else if (compformat == BLOSC_SNAPPY_FORMAT) { nbytes = snappy_wrap_decompress((char*)src, (size_t)cbytes, (char*)_dest, (size_t)neblock); } #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) else if (compformat == BLOSC_ZLIB_FORMAT) { nbytes = zlib_wrap_decompress((char*)src, (size_t)cbytes, (char*)_dest, (size_t)neblock); } #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) else if (compformat == BLOSC_ZSTD_FORMAT) { nbytes = zstd_wrap_decompress(thread_context, (char*)src, (size_t)cbytes, (char*)_dest, (size_t)neblock); } #endif /* HAVE_ZSTD */ else { compname = clibcode_to_clibname(compformat); fprintf(stderr, "Blosc has not been compiled with decompression " "support for '%s' format. ", compname); fprintf(stderr, "Please recompile for adding this support.\n"); return -5; /* signals no decompression support */ } /* Check that decompressed bytes number is correct */ if (nbytes != neblock) { return -2; } } src += cbytes; ctbytes += cbytes; _dest += nbytes; ntbytes += nbytes; } /* Closes j < nstreams */ if (last_filter_index >= 0) { int errcode = pipeline_d(context, bsize, dest, dest_offset, tmp, tmp2, tmp3, last_filter_index); if (errcode < 0) return errcode; } /* Return the number of uncompressed bytes */ return (int)ntbytes; } /* Serial version for compression/decompression */ static int serial_blosc(struct thread_context* thread_context) { blosc2_context* context = thread_context->parent_context; int32_t j, bsize, leftoverblock; int32_t cbytes; int32_t ntbytes = (int32_t)context->output_bytes; int32_t* bstarts = context->bstarts; uint8_t* tmp = thread_context->tmp; uint8_t* tmp2 = thread_context->tmp2; int dict_training = context->use_dict && (context->dict_cdict == NULL); bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; for (j = 0; j < context->nblocks; j++) { if (context->do_compress && !memcpyed && !dict_training) { _sw32(bstarts + j, ntbytes); } bsize = context->blocksize; leftoverblock = 0; if ((j == context->nblocks - 1) && (context->leftover > 0)) { bsize = context->leftover; leftoverblock = 1; } if (context->do_compress) { if (memcpyed && !context->prefilter) { /* We want to memcpy only */ memcpy(context->dest + BLOSC_MAX_OVERHEAD + j * context->blocksize, context->src + j * context->blocksize, (unsigned int)bsize); cbytes = (int32_t)bsize; } else { /* Regular compression */ cbytes = blosc_c(thread_context, bsize, leftoverblock, ntbytes, context->destsize, context->src, j * context->blocksize, context->dest + ntbytes, tmp, tmp2); if (cbytes == 0) { ntbytes = 0; /* uncompressible data */ break; } } } else { if (memcpyed) { // Check that sizes in header are compatible, otherwise there is a header corruption int32_t csize = sw32_(context->src + 12); /* compressed buffer size */ if (context->sourcesize + BLOSC_MAX_OVERHEAD != csize) { return -1; } if (context->srcsize < BLOSC_MAX_OVERHEAD + (j * context->blocksize) + bsize) { /* Not enough input to copy block */ return -1; } memcpy(context->dest + j * context->blocksize, context->src + BLOSC_MAX_OVERHEAD + j * context->blocksize, (unsigned int)bsize); cbytes = (int32_t)bsize; } else { /* Regular decompression */ cbytes = blosc_d(thread_context, bsize, leftoverblock, context->src, context->srcsize, sw32_(bstarts + j), context->dest, j * context->blocksize, tmp, tmp2); } } if (cbytes < 0) { ntbytes = cbytes; /* error in blosc_c or blosc_d */ break; } ntbytes += cbytes; } return ntbytes; } static void t_blosc_do_job(void *ctxt); /* Threaded version for compression/decompression */ static int parallel_blosc(blosc2_context* context) { #ifdef BLOSC_POSIX_BARRIERS int rc; #endif /* Set sentinels */ context->thread_giveup_code = 1; context->thread_nblock = -1; if (threads_callback) { threads_callback(threads_callback_data, t_blosc_do_job, context->nthreads, sizeof(struct thread_context), (void*) context->thread_contexts); } else { /* Synchronization point for all threads (wait for initialization) */ WAIT_INIT(-1, context); /* Synchronization point for all threads (wait for finalization) */ WAIT_FINISH(-1, context); } if (context->thread_giveup_code <= 0) { /* Compression/decompression gave up. Return error code. */ return context->thread_giveup_code; } /* Return the total bytes (de-)compressed in threads */ return (int)context->output_bytes; } /* initialize a thread_context that has already been allocated */ static void init_thread_context(struct thread_context* thread_context, blosc2_context* context, int32_t tid) { int32_t ebsize; thread_context->parent_context = context; thread_context->tid = tid; ebsize = context->blocksize + context->typesize * (int32_t)sizeof(int32_t); thread_context->tmp_nbytes = (size_t)3 * context->blocksize + ebsize; thread_context->tmp = my_malloc(thread_context->tmp_nbytes); thread_context->tmp2 = thread_context->tmp + context->blocksize; thread_context->tmp3 = thread_context->tmp + context->blocksize + ebsize; thread_context->tmp4 = thread_context->tmp + 2 * context->blocksize + ebsize; thread_context->tmp_blocksize = context->blocksize; #if defined(HAVE_ZSTD) thread_context->zstd_cctx = NULL; thread_context->zstd_dctx = NULL; #endif /* Create the hash table for LZ4 in case we are using IPP */ #ifdef HAVE_IPP IppStatus status; int inlen = thread_context->tmp_blocksize > 0 ? thread_context->tmp_blocksize : 1 << 16; int hash_size = 0; status = ippsEncodeLZ4HashTableGetSize_8u(&hash_size); if (status != ippStsNoErr) { fprintf(stderr, "Error in ippsEncodeLZ4HashTableGetSize_8u"); } Ipp8u *hash_table = ippsMalloc_8u(hash_size); status = ippsEncodeLZ4HashTableInit_8u(hash_table, inlen); if (status != ippStsNoErr) { fprintf(stderr, "Error in ippsEncodeLZ4HashTableInit_8u"); } thread_context->lz4_hash_table = hash_table; #endif } static struct thread_context* create_thread_context(blosc2_context* context, int32_t tid) { struct thread_context* thread_context; thread_context = (struct thread_context*)my_malloc(sizeof(struct thread_context)); init_thread_context(thread_context, context, tid); return thread_context; } /* free members of thread_context, but not thread_context itself */ static void destroy_thread_context(struct thread_context* thread_context) { my_free(thread_context->tmp); #if defined(HAVE_ZSTD) if (thread_context->zstd_cctx != NULL) { ZSTD_freeCCtx(thread_context->zstd_cctx); } if (thread_context->zstd_dctx != NULL) { ZSTD_freeDCtx(thread_context->zstd_dctx); } #endif #ifdef HAVE_IPP if (thread_context->lz4_hash_table != NULL) { ippsFree(thread_context->lz4_hash_table); } #endif } void free_thread_context(struct thread_context* thread_context) { destroy_thread_context(thread_context); my_free(thread_context); } int check_nthreads(blosc2_context* context) { if (context->nthreads <= 0) { fprintf(stderr, "Error. nthreads must be a positive integer"); return -1; } if (context->new_nthreads != context->nthreads) { if (context->nthreads > 1) { release_threadpool(context); } context->nthreads = context->new_nthreads; } if (context->new_nthreads > 1 && context->threads_started == 0) { init_threadpool(context); } return context->nthreads; } /* Do the compression or decompression of the buffer depending on the global params. */ static int do_job(blosc2_context* context) { int32_t ntbytes; /* Set sentinels */ context->dref_not_init = 1; /* Check whether we need to restart threads */ check_nthreads(context); /* Run the serial version when nthreads is 1 or when the buffers are not larger than blocksize */ if (context->nthreads == 1 || (context->sourcesize / context->blocksize) <= 1) { /* The context for this 'thread' has no been initialized yet */ if (context->serial_context == NULL) { context->serial_context = create_thread_context(context, 0); } else if (context->blocksize != context->serial_context->tmp_blocksize) { free_thread_context(context->serial_context); context->serial_context = create_thread_context(context, 0); } ntbytes = serial_blosc(context->serial_context); } else { ntbytes = parallel_blosc(context); } return ntbytes; } /* Convert filter pipeline to filter flags */ static uint8_t filters_to_flags(const uint8_t* filters) { uint8_t flags = 0; for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { switch (filters[i]) { case BLOSC_SHUFFLE: flags |= BLOSC_DOSHUFFLE; break; case BLOSC_BITSHUFFLE: flags |= BLOSC_DOBITSHUFFLE; break; case BLOSC_DELTA: flags |= BLOSC_DODELTA; break; default : break; } } return flags; } /* Convert filter flags to filter pipeline */ static void flags_to_filters(const uint8_t flags, uint8_t* filters) { /* Initialize the filter pipeline */ memset(filters, 0, BLOSC2_MAX_FILTERS); /* Fill the filter pipeline */ if (flags & BLOSC_DOSHUFFLE) filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_SHUFFLE; if (flags & BLOSC_DOBITSHUFFLE) filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_BITSHUFFLE; if (flags & BLOSC_DODELTA) filters[BLOSC2_MAX_FILTERS - 2] = BLOSC_DELTA; } static int initialize_context_compression( blosc2_context* context, const void* src, int32_t srcsize, void* dest, int32_t destsize, int clevel, uint8_t const *filters, uint8_t const *filters_meta, int32_t typesize, int compressor, int32_t blocksize, int new_nthreads, int nthreads, blosc2_schunk* schunk) { /* Set parameters */ context->do_compress = 1; context->src = (const uint8_t*)src; context->srcsize = srcsize; context->dest = (uint8_t*)dest; context->output_bytes = 0; context->destsize = destsize; context->sourcesize = srcsize; context->typesize = (int32_t)typesize; context->filter_flags = filters_to_flags(filters); for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { context->filters[i] = filters[i]; context->filters_meta[i] = filters_meta[i]; } context->compcode = compressor; context->nthreads = nthreads; context->new_nthreads = new_nthreads; context->end_threads = 0; context->clevel = clevel; context->schunk = schunk; /* Tune some compression parameters */ context->blocksize = (int32_t)blocksize; if (context->btune != NULL) { btune_next_cparams(context); } else { btune_next_blocksize(context); } char* envvar = getenv("BLOSC_WARN"); int warnlvl = 0; if (envvar != NULL) { warnlvl = strtol(envvar, NULL, 10); } /* Check buffer size limits */ if (srcsize > BLOSC_MAX_BUFFERSIZE) { if (warnlvl > 0) { fprintf(stderr, "Input buffer size cannot exceed %d bytes\n", BLOSC_MAX_BUFFERSIZE); } return 0; } if (destsize < BLOSC_MAX_OVERHEAD) { if (warnlvl > 0) { fprintf(stderr, "Output buffer size should be larger than %d bytes\n", BLOSC_MAX_OVERHEAD); } return 0; } if (destsize < BLOSC_MAX_OVERHEAD) { if (warnlvl > 0) { fprintf(stderr, "Output buffer size should be larger than %d bytes\n", BLOSC_MAX_OVERHEAD); } return -2; } if (destsize < BLOSC_MAX_OVERHEAD) { fprintf(stderr, "Output buffer size should be larger than %d bytes\n", BLOSC_MAX_OVERHEAD); return -1; } /* Compression level */ if (clevel < 0 || clevel > 9) { /* If clevel not in 0..9, print an error */ fprintf(stderr, "`clevel` parameter must be between 0 and 9!\n"); return -10; } /* Check typesize limits */ if (context->typesize > BLOSC_MAX_TYPESIZE) { /* If typesize is too large, treat buffer as an 1-byte stream. */ context->typesize = 1; } /* Compute number of blocks in buffer */ context->nblocks = context->sourcesize / context->blocksize; context->leftover = context->sourcesize % context->blocksize; context->nblocks = (context->leftover > 0) ? (context->nblocks + 1) : context->nblocks; return 1; } /* Get filter flags from header flags */ static uint8_t get_filter_flags(const uint8_t header_flags, const int32_t typesize) { uint8_t flags = 0; if ((header_flags & BLOSC_DOSHUFFLE) && (typesize > 1)) { flags |= BLOSC_DOSHUFFLE; } if (header_flags & BLOSC_DOBITSHUFFLE) { flags |= BLOSC_DOBITSHUFFLE; } if (header_flags & BLOSC_DODELTA) { flags |= BLOSC_DODELTA; } if (header_flags & BLOSC_MEMCPYED) { flags |= BLOSC_MEMCPYED; } return flags; } static int initialize_context_decompression(blosc2_context* context, const void* src, int32_t srcsize, void* dest, int32_t destsize) { uint8_t blosc2_flags = 0; int32_t cbytes; int32_t bstarts_offset; int32_t bstarts_end; context->do_compress = 0; context->src = (const uint8_t*)src; context->srcsize = srcsize; context->dest = (uint8_t*)dest; context->destsize = destsize; context->output_bytes = 0; context->end_threads = 0; if (context->srcsize < BLOSC_MIN_HEADER_LENGTH) { /* Not enough input to read minimum header */ return -1; } context->header_flags = context->src[2]; context->typesize = context->src[3]; context->sourcesize = sw32_(context->src + 4); context->blocksize = sw32_(context->src + 8); cbytes = sw32_(context->src + 12); // Some checks for malformed headers if (context->blocksize <= 0 || context->blocksize > destsize || context->typesize <= 0 || context->typesize > BLOSC_MAX_TYPESIZE || cbytes > srcsize) { return -1; } /* Check that we have enough space to decompress */ if (context->sourcesize > (int32_t)destsize) { return -1; } /* Total blocks */ context->nblocks = context->sourcesize / context->blocksize; context->leftover = context->sourcesize % context->blocksize; context->nblocks = (context->leftover > 0) ? context->nblocks + 1 : context->nblocks; if (context->block_maskout != NULL && context->block_maskout_nitems != context->nblocks) { fprintf(stderr, "The number of items in block_maskout (%d) must match the number" " of blocks in chunk (%d)", context->block_maskout_nitems, context->nblocks); return -2; } if ((context->header_flags & BLOSC_DOSHUFFLE) && (context->header_flags & BLOSC_DOBITSHUFFLE)) { /* Extended header */ if (context->srcsize < BLOSC_EXTENDED_HEADER_LENGTH) { /* Not enough input to read extended header */ return -1; } uint8_t* filters = (uint8_t*)(context->src + BLOSC_MIN_HEADER_LENGTH); uint8_t* filters_meta = filters + 8; uint8_t header_version = context->src[0]; // The number of filters depends on the version of the header // (we need to read less because filters where not initialized to zero in blosc2 alpha series) int max_filters = (header_version == BLOSC2_VERSION_FORMAT_ALPHA) ? 5 : BLOSC2_MAX_FILTERS; for (int i = 0; i < max_filters; i++) { context->filters[i] = filters[i]; context->filters_meta[i] = filters_meta[i]; } context->filter_flags = filters_to_flags(filters); bstarts_offset = BLOSC_EXTENDED_HEADER_LENGTH; blosc2_flags = context->src[0x1F]; } else { /* Regular (Blosc1) header */ context->filter_flags = get_filter_flags(context->header_flags, context->typesize); flags_to_filters(context->header_flags, context->filters); bstarts_offset = BLOSC_MIN_HEADER_LENGTH; } context->bstarts = (int32_t*)(context->src + bstarts_offset); bstarts_end = bstarts_offset + (context->nblocks * sizeof(int32_t)); if (srcsize < bstarts_end) { /* Not enough input to read entire `bstarts` section */ return -1; } srcsize -= bstarts_end; /* Read optional dictionary if flag set */ if (blosc2_flags & BLOSC2_USEDICT) { #if defined(HAVE_ZSTD) context->use_dict = 1; if (context->dict_ddict != NULL) { // Free the existing dictionary (probably from another chunk) ZSTD_freeDDict(context->dict_ddict); } // The trained dictionary is after the bstarts block if (srcsize < sizeof(int32_t)) { /* Not enough input to size of dictionary */ return -1; } srcsize -= sizeof(int32_t); context->dict_size = (size_t)sw32_(context->src + bstarts_end); if (context->dict_size <= 0 || context->dict_size > BLOSC2_MAXDICTSIZE) { /* Dictionary size is smaller than minimum or larger than maximum allowed */ return -1; } if (srcsize < (int32_t)context->dict_size) { /* Not enough input to read entire dictionary */ return -1; } srcsize -= context->dict_size; context->dict_buffer = (void*)(context->src + bstarts_end + sizeof(int32_t)); context->dict_ddict = ZSTD_createDDict(context->dict_buffer, context->dict_size); #endif // HAVE_ZSTD } return 0; } static int write_compression_header(blosc2_context* context, bool extended_header) { int32_t compformat; int dont_split; int dict_training = context->use_dict && (context->dict_cdict == NULL); // Set the whole header to zeros so that the reserved values are zeroed if (extended_header) { memset(context->dest, 0, BLOSC_EXTENDED_HEADER_LENGTH); } else { memset(context->dest, 0, BLOSC_MIN_HEADER_LENGTH); } /* Write version header for this block */ context->dest[0] = BLOSC_VERSION_FORMAT; /* Write compressor format */ compformat = -1; switch (context->compcode) { case BLOSC_BLOSCLZ: compformat = BLOSC_BLOSCLZ_FORMAT; context->dest[1] = BLOSC_BLOSCLZ_VERSION_FORMAT; break; #if defined(HAVE_LZ4) case BLOSC_LZ4: compformat = BLOSC_LZ4_FORMAT; context->dest[1] = BLOSC_LZ4_VERSION_FORMAT; break; case BLOSC_LZ4HC: compformat = BLOSC_LZ4HC_FORMAT; context->dest[1] = BLOSC_LZ4HC_VERSION_FORMAT; break; #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) case BLOSC_LIZARD: compformat = BLOSC_LIZARD_FORMAT; context->dest[1] = BLOSC_LIZARD_VERSION_FORMAT; break; #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) case BLOSC_SNAPPY: compformat = BLOSC_SNAPPY_FORMAT; context->dest[1] = BLOSC_SNAPPY_VERSION_FORMAT; break; #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) case BLOSC_ZLIB: compformat = BLOSC_ZLIB_FORMAT; context->dest[1] = BLOSC_ZLIB_VERSION_FORMAT; break; #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) case BLOSC_ZSTD: compformat = BLOSC_ZSTD_FORMAT; context->dest[1] = BLOSC_ZSTD_VERSION_FORMAT; break; #endif /* HAVE_ZSTD */ default: { const char* compname; compname = clibcode_to_clibname(compformat); fprintf(stderr, "Blosc has not been compiled with '%s' ", compname); fprintf(stderr, "compression support. Please use one having it."); return -5; /* signals no compression support */ break; } } if (context->clevel == 0) { /* Compression level 0 means buffer to be memcpy'ed */ context->header_flags |= (uint8_t)BLOSC_MEMCPYED; } if (context->sourcesize < BLOSC_MIN_BUFFERSIZE) { /* Buffer is too small. Try memcpy'ing. */ context->header_flags |= (uint8_t)BLOSC_MEMCPYED; } bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; context->dest[2] = 0; /* zeroes flags */ context->dest[3] = (uint8_t)context->typesize; _sw32(context->dest + 4, (int32_t)context->sourcesize); _sw32(context->dest + 8, (int32_t)context->blocksize); if (extended_header) { /* Mark that we are handling an extended header */ context->header_flags |= (BLOSC_DOSHUFFLE | BLOSC_DOBITSHUFFLE); /* Store filter pipeline info at the end of the header */ uint8_t *filters = context->dest + BLOSC_MIN_HEADER_LENGTH; uint8_t *filters_meta = filters + 8; for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { filters[i] = context->filters[i]; filters_meta[i] = context->filters_meta[i]; } uint8_t* blosc2_flags = context->dest + 0x1F; *blosc2_flags = 0; // zeroes flags *blosc2_flags |= is_little_endian() ? 0 : BLOSC2_BIGENDIAN; // endianness if (dict_training || memcpyed) { context->bstarts = NULL; context->output_bytes = BLOSC_EXTENDED_HEADER_LENGTH; } else { context->bstarts = (int32_t*)(context->dest + BLOSC_EXTENDED_HEADER_LENGTH); context->output_bytes = BLOSC_EXTENDED_HEADER_LENGTH + sizeof(int32_t) * context->nblocks; } if (context->use_dict) { *blosc2_flags |= BLOSC2_USEDICT; } } else { // Regular header if (memcpyed) { context->bstarts = NULL; context->output_bytes = BLOSC_MIN_HEADER_LENGTH; } else { context->bstarts = (int32_t *) (context->dest + BLOSC_MIN_HEADER_LENGTH); context->output_bytes = BLOSC_MIN_HEADER_LENGTH + sizeof(int32_t) * context->nblocks; } } // when memcpyed bit is set, there is no point in dealing with others if (!memcpyed) { if (context->filter_flags & BLOSC_DOSHUFFLE) { /* Byte-shuffle is active */ context->header_flags |= BLOSC_DOSHUFFLE; } if (context->filter_flags & BLOSC_DOBITSHUFFLE) { /* Bit-shuffle is active */ context->header_flags |= BLOSC_DOBITSHUFFLE; } if (context->filter_flags & BLOSC_DODELTA) { /* Delta is active */ context->header_flags |= BLOSC_DODELTA; } dont_split = !split_block(context, context->typesize, context->blocksize, extended_header); context->header_flags |= dont_split << 4; /* dont_split is in bit 4 */ context->header_flags |= compformat << 5; /* codec starts at bit 5 */ } // store header flags in dest context->dest[2] = context->header_flags; return 1; } int blosc_compress_context(blosc2_context* context) { int ntbytes = 0; blosc_timestamp_t last, current; bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; blosc_set_timestamp(&last); if (!memcpyed) { /* Do the actual compression */ ntbytes = do_job(context); if (ntbytes < 0) { return -1; } if (ntbytes == 0) { // Try out with a memcpy later on (last chance for fitting src buffer in dest). context->header_flags |= (uint8_t)BLOSC_MEMCPYED; memcpyed = true; } } if (memcpyed) { if (context->sourcesize + BLOSC_MAX_OVERHEAD > context->destsize) { /* We are exceeding maximum output size */ ntbytes = 0; } else { context->output_bytes = BLOSC_MAX_OVERHEAD; ntbytes = do_job(context); if (ntbytes < 0) { return -1; } // Success! update the memcpy bit in header context->dest[2] = context->header_flags; // and clear the memcpy bit in context (for next reuse) context->header_flags &= ~(uint8_t)BLOSC_MEMCPYED; } } /* Set the number of compressed bytes in header */ _sw32(context->dest + 12, ntbytes); /* Set the number of bytes in dest buffer (might be useful for btune) */ context->destsize = ntbytes; assert(ntbytes <= context->destsize); if (context->btune != NULL) { blosc_set_timestamp(&current); double ctime = blosc_elapsed_secs(last, current); btune_update(context, ctime); } return ntbytes; } /* The public secure routine for compression with context. */ int blosc2_compress_ctx(blosc2_context* context, const void* src, int32_t srcsize, void* dest, int32_t destsize) { int error, cbytes; if (context->do_compress != 1) { fprintf(stderr, "Context is not meant for compression. Giving up.\n"); return -10; } error = initialize_context_compression( context, src, srcsize, dest, destsize, context->clevel, context->filters, context->filters_meta, context->typesize, context->compcode, context->blocksize, context->new_nthreads, context->nthreads, context->schunk); if (error <= 0) { return error; } /* Write the extended header */ error = write_compression_header(context, true); if (error < 0) { return error; } cbytes = blosc_compress_context(context); if (cbytes < 0) { return cbytes; } if (context->use_dict && context->dict_cdict == NULL) { if (context->compcode != BLOSC_ZSTD) { const char* compname; compname = clibcode_to_clibname(context->compcode); fprintf(stderr, "Codec %s does not support dicts. Giving up.\n", compname); return -20; } #ifdef HAVE_ZSTD // Build the dictionary out of the filters outcome and compress with it int32_t dict_maxsize = BLOSC2_MAXDICTSIZE; // Do not make the dict more than 5% larger than uncompressed buffer if (dict_maxsize > srcsize / 20) { dict_maxsize = srcsize / 20; } void* samples_buffer = context->dest + BLOSC_EXTENDED_HEADER_LENGTH; unsigned nblocks = 8; // the minimum that accepts zstd as of 1.4.0 unsigned sample_fraction = 1; // 1 allows to use most of the chunk for training size_t sample_size = context->sourcesize / nblocks / sample_fraction; // Populate the samples sizes for training the dictionary size_t* samples_sizes = malloc(nblocks * sizeof(void*)); for (size_t i = 0; i < nblocks; i++) { samples_sizes[i] = sample_size; } // Train from samples void* dict_buffer = malloc(dict_maxsize); size_t dict_actual_size = ZDICT_trainFromBuffer(dict_buffer, dict_maxsize, samples_buffer, samples_sizes, nblocks); // TODO: experiment with parameters of low-level fast cover algorithm // Note that this API is still unstable. See: https://github.com/facebook/zstd/issues/1599 // ZDICT_fastCover_params_t fast_cover_params; // memset(&fast_cover_params, 0, sizeof(fast_cover_params)); // fast_cover_params.d = nblocks; // fast_cover_params.steps = 4; // fast_cover_params.zParams.compressionLevel = context->clevel; //size_t dict_actual_size = ZDICT_optimizeTrainFromBuffer_fastCover(dict_buffer, dict_maxsize, samples_buffer, samples_sizes, nblocks, &fast_cover_params); if (ZDICT_isError(dict_actual_size) != ZSTD_error_no_error) { fprintf(stderr, "Error in ZDICT_trainFromBuffer(): '%s'." " Giving up.\n", ZDICT_getErrorName(dict_actual_size)); return -20; } assert(dict_actual_size > 0); free(samples_sizes); // Update bytes counter and pointers to bstarts for the new compressed buffer context->bstarts = (int32_t*)(context->dest + BLOSC_EXTENDED_HEADER_LENGTH); context->output_bytes = BLOSC_EXTENDED_HEADER_LENGTH + sizeof(int32_t) * context->nblocks; /* Write the size of trained dict at the end of bstarts */ _sw32(context->dest + context->output_bytes, (int32_t)dict_actual_size); context->output_bytes += sizeof(int32_t); /* Write the trained dict afterwards */ context->dict_buffer = context->dest + context->output_bytes; memcpy(context->dict_buffer, dict_buffer, (unsigned int)dict_actual_size); context->dict_cdict = ZSTD_createCDict(dict_buffer, dict_actual_size, 1); // TODO: use get_accel() free(dict_buffer); // the dictionary is copied in the header now context->output_bytes += (int32_t)dict_actual_size; context->dict_size = dict_actual_size; /* Compress with dict */ cbytes = blosc_compress_context(context); // Invalidate the dictionary for compressing other chunks using the same context context->dict_buffer = NULL; ZSTD_freeCDict(context->dict_cdict); context->dict_cdict = NULL; #endif // HAVE_ZSTD } return cbytes; } void build_filters(const int doshuffle, const int delta, const size_t typesize, uint8_t* filters) { /* Fill the end part of the filter pipeline */ if ((doshuffle == BLOSC_SHUFFLE) && (typesize > 1)) filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_SHUFFLE; if (doshuffle == BLOSC_BITSHUFFLE) filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_BITSHUFFLE; if (delta) filters[BLOSC2_MAX_FILTERS - 2] = BLOSC_DELTA; } /* The public secure routine for compression. */ int blosc2_compress(int clevel, int doshuffle, int32_t typesize, const void* src, int32_t srcsize, void* dest, int32_t destsize) { int error; int result; char* envvar; /* Check whether the library should be initialized */ if (!g_initlib) blosc_init(); /* Check for a BLOSC_CLEVEL environment variable */ envvar = getenv("BLOSC_CLEVEL"); if (envvar != NULL) { long value; value = strtol(envvar, NULL, 10); if ((value != EINVAL) && (value >= 0)) { clevel = (int)value; } } /* Check for a BLOSC_SHUFFLE environment variable */ envvar = getenv("BLOSC_SHUFFLE"); if (envvar != NULL) { if (strcmp(envvar, "NOSHUFFLE") == 0) { doshuffle = BLOSC_NOSHUFFLE; } if (strcmp(envvar, "SHUFFLE") == 0) { doshuffle = BLOSC_SHUFFLE; } if (strcmp(envvar, "BITSHUFFLE") == 0) { doshuffle = BLOSC_BITSHUFFLE; } } /* Check for a BLOSC_DELTA environment variable */ envvar = getenv("BLOSC_DELTA"); if (envvar != NULL) { if (strcmp(envvar, "1") == 0) { blosc_set_delta(1); } else { blosc_set_delta(0); } } /* Check for a BLOSC_TYPESIZE environment variable */ envvar = getenv("BLOSC_TYPESIZE"); if (envvar != NULL) { long value; value = strtol(envvar, NULL, 10); if ((value != EINVAL) && (value > 0)) { typesize = (size_t)value; } } /* Check for a BLOSC_COMPRESSOR environment variable */ envvar = getenv("BLOSC_COMPRESSOR"); if (envvar != NULL) { result = blosc_set_compressor(envvar); if (result < 0) { return result; } } /* Check for a BLOSC_COMPRESSOR environment variable */ envvar = getenv("BLOSC_BLOCKSIZE"); if (envvar != NULL) { long blocksize; blocksize = strtol(envvar, NULL, 10); if ((blocksize != EINVAL) && (blocksize > 0)) { blosc_set_blocksize((size_t)blocksize); } } /* Check for a BLOSC_NTHREADS environment variable */ envvar = getenv("BLOSC_NTHREADS"); if (envvar != NULL) { long nthreads; nthreads = strtol(envvar, NULL, 10); if ((nthreads != EINVAL) && (nthreads > 0)) { result = blosc_set_nthreads((int)nthreads); if (result < 0) { return result; } } } /* Check for a BLOSC_NOLOCK environment variable. It is important that this should be the last env var so that it can take the previous ones into account */ envvar = getenv("BLOSC_NOLOCK"); if (envvar != NULL) { // TODO: here is the only place that returns an extended header from // a blosc_compress() call. This should probably be fixed. const char *compname; blosc2_context *cctx; blosc2_cparams cparams = BLOSC2_CPARAMS_DEFAULTS; blosc_compcode_to_compname(g_compressor, &compname); /* Create a context for compression */ build_filters(doshuffle, g_delta, typesize, cparams.filters); // TODO: cparams can be shared in a multithreaded environment. do a copy! cparams.typesize = (uint8_t)typesize; cparams.compcode = (uint8_t)g_compressor; cparams.clevel = (uint8_t)clevel; cparams.nthreads = (uint8_t)g_nthreads; cctx = blosc2_create_cctx(cparams); /* Do the actual compression */ result = blosc2_compress_ctx(cctx, src, srcsize, dest, destsize); /* Release context resources */ blosc2_free_ctx(cctx); return result; } pthread_mutex_lock(&global_comp_mutex); /* Initialize a context compression */ uint8_t* filters = calloc(1, BLOSC2_MAX_FILTERS); uint8_t* filters_meta = calloc(1, BLOSC2_MAX_FILTERS); build_filters(doshuffle, g_delta, typesize, filters); error = initialize_context_compression( g_global_context, src, srcsize, dest, destsize, clevel, filters, filters_meta, (int32_t)typesize, g_compressor, g_force_blocksize, g_nthreads, g_nthreads, g_schunk); free(filters); free(filters_meta); if (error <= 0) { pthread_mutex_unlock(&global_comp_mutex); return error; } /* Write chunk header without extended header (Blosc1 compatibility mode) */ error = write_compression_header(g_global_context, false); if (error < 0) { pthread_mutex_unlock(&global_comp_mutex); return error; } result = blosc_compress_context(g_global_context); pthread_mutex_unlock(&global_comp_mutex); return result; } /* The public routine for compression. */ int blosc_compress(int clevel, int doshuffle, size_t typesize, size_t nbytes, const void* src, void* dest, size_t destsize) { return blosc2_compress(clevel, doshuffle, (int32_t)typesize, src, (int32_t)nbytes, dest, (int32_t)destsize); } int blosc_run_decompression_with_context(blosc2_context* context, const void* src, int32_t srcsize, void* dest, int32_t destsize) { int32_t ntbytes; uint8_t* _src = (uint8_t*)src; uint8_t version; int error; if (srcsize <= 0) { /* Invalid argument */ return -1; } version = _src[0]; /* blosc format version */ if (version > BLOSC_VERSION_FORMAT) { /* Version from future */ return -1; } error = initialize_context_decompression(context, src, srcsize, dest, destsize); if (error < 0) { return error; } /* Check whether this buffer is memcpy'ed */ bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; if (memcpyed) { // Check that sizes in header are compatible, otherwise there is a header corruption ntbytes = context->sourcesize; int32_t cbytes = sw32_(_src + 12); /* compressed buffer size */ if (ntbytes + BLOSC_MAX_OVERHEAD != cbytes) { return -1; } // Check that we have enough space in destination for the copy operation if (destsize < ntbytes) { return -1; } memcpy(dest, _src + BLOSC_MAX_OVERHEAD, (unsigned int)ntbytes); } else { /* Do the actual decompression */ ntbytes = do_job(context); if (ntbytes < 0) { return -1; } } assert(ntbytes <= (int32_t)destsize); return ntbytes; } /* The public secure routine for decompression with context. */ int blosc2_decompress_ctx(blosc2_context* context, const void* src, int32_t srcsize, void* dest, int32_t destsize) { int result; if (context->do_compress != 0) { fprintf(stderr, "Context is not meant for decompression. Giving up.\n"); return -10; } result = blosc_run_decompression_with_context(context, src, srcsize, dest, destsize); // Reset a possible block_maskout if (context->block_maskout != NULL) { free(context->block_maskout); context->block_maskout = NULL; } context->block_maskout_nitems = 0; return result; } /* The public secure routine for decompression. */ int blosc2_decompress(const void* src, int32_t srcsize, void* dest, int32_t destsize) { int result; char* envvar; long nthreads; blosc2_context *dctx; blosc2_dparams dparams = BLOSC2_DPARAMS_DEFAULTS; /* Check whether the library should be initialized */ if (!g_initlib) blosc_init(); /* Check for a BLOSC_NTHREADS environment variable */ envvar = getenv("BLOSC_NTHREADS"); if (envvar != NULL) { nthreads = strtol(envvar, NULL, 10); if ((nthreads != EINVAL) && (nthreads > 0)) { result = blosc_set_nthreads((int)nthreads); if (result < 0) { return result; } } } /* Check for a BLOSC_NOLOCK environment variable. It is important that this should be the last env var so that it can take the previous ones into account */ envvar = getenv("BLOSC_NOLOCK"); if (envvar != NULL) { dparams.nthreads = g_nthreads; dctx = blosc2_create_dctx(dparams); result = blosc2_decompress_ctx(dctx, src, srcsize, dest, destsize); blosc2_free_ctx(dctx); return result; } pthread_mutex_lock(&global_comp_mutex); result = blosc_run_decompression_with_context( g_global_context, src, srcsize, dest, destsize); pthread_mutex_unlock(&global_comp_mutex); return result; } /* The public routine for decompression. */ int blosc_decompress(const void* src, void* dest, size_t destsize) { return blosc2_decompress(src, INT32_MAX, dest, (int32_t)destsize); } /* Specific routine optimized for decompression a small number of items out of a compressed chunk. This does not use threads because it would affect negatively to performance. */ int _blosc_getitem(blosc2_context* context, const void* src, int32_t srcsize, int start, int nitems, void* dest) { uint8_t* _src = NULL; /* current pos for source buffer */ uint8_t flags; /* flags for header */ int32_t ntbytes = 0; /* the number of uncompressed bytes */ int32_t nblocks; /* number of total blocks in buffer */ int32_t leftover; /* extra bytes at end of buffer */ int32_t* bstarts; /* start pointers for each block */ int32_t typesize, blocksize, nbytes; int32_t bsize, bsize2, ebsize, leftoverblock; int32_t cbytes; int32_t startb, stopb; int32_t stop = start + nitems; int j; if (srcsize < BLOSC_MIN_HEADER_LENGTH) { /* Not enough input to parse Blosc1 header */ return -1; } _src = (uint8_t*)(src); /* Read the header block */ flags = _src[2]; /* flags */ bool memcpyed = flags & (uint8_t)BLOSC_MEMCPYED; typesize = (int32_t)_src[3]; /* typesize */ nbytes = sw32_(_src + 4); /* buffer size */ blocksize = sw32_(_src + 8); /* block size */ cbytes = sw32_(_src + 12); /* compressed buffer size */ ebsize = blocksize + typesize * (int32_t)sizeof(int32_t); if ((context->header_flags & BLOSC_DOSHUFFLE) && (context->header_flags & BLOSC_DOBITSHUFFLE)) { /* Extended header */ if (srcsize < BLOSC_EXTENDED_HEADER_LENGTH) { /* Not enough input to parse Blosc2 header */ return -1; } uint8_t* filters = _src + BLOSC_MIN_HEADER_LENGTH; uint8_t* filters_meta = filters + 8; for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { context->filters[i] = filters[i]; context->filters_meta[i] = filters_meta[i]; } bstarts = (int32_t*)(_src + BLOSC_EXTENDED_HEADER_LENGTH); } else { /* Minimal header */ flags_to_filters(flags, context->filters); bstarts = (int32_t*)(_src + BLOSC_MIN_HEADER_LENGTH); } // Some checks for malformed buffers if (blocksize <= 0 || blocksize > nbytes || typesize <= 0 || typesize > BLOSC_MAX_TYPESIZE) { return -1; } /* Compute some params */ /* Total blocks */ nblocks = nbytes / blocksize; leftover = nbytes % blocksize; nblocks = (leftover > 0) ? nblocks + 1 : nblocks; /* Check region boundaries */ if ((start < 0) || (start * typesize > nbytes)) { fprintf(stderr, "`start` out of bounds"); return -1; } if ((stop < 0) || (stop * typesize > nbytes)) { fprintf(stderr, "`start`+`nitems` out of bounds"); return -1; } if (_src + srcsize < (uint8_t *)(bstarts + nblocks)) { /* Not enough input to read all `bstarts` */ return -1; } for (j = 0; j < nblocks; j++) { bsize = blocksize; leftoverblock = 0; if ((j == nblocks - 1) && (leftover > 0)) { bsize = leftover; leftoverblock = 1; } /* Compute start & stop for each block */ startb = start * (int)typesize - j * (int)blocksize; stopb = stop * (int)typesize - j * (int)blocksize; if ((startb >= (int)blocksize) || (stopb <= 0)) { continue; } if (startb < 0) { startb = 0; } if (stopb > (int)blocksize) { stopb = (int)blocksize; } bsize2 = stopb - startb; /* Do the actual data copy */ if (memcpyed) { // Check that sizes in header are compatible, otherwise there is a header corruption if (nbytes + BLOSC_MAX_OVERHEAD != cbytes) { return -1; } if (srcsize < BLOSC_MAX_OVERHEAD + j * blocksize + startb + bsize2) { /* Not enough input to copy data */ return -1; } memcpy((uint8_t*)dest + ntbytes, (uint8_t*)src + BLOSC_MAX_OVERHEAD + j * blocksize + startb, (unsigned int)bsize2); cbytes = (int)bsize2; } else { struct thread_context* scontext = context->serial_context; /* Resize the temporaries in serial context if needed */ if (blocksize != scontext->tmp_blocksize) { my_free(scontext->tmp); scontext->tmp_nbytes = (size_t)3 * context->blocksize + ebsize; scontext->tmp = my_malloc(scontext->tmp_nbytes); scontext->tmp2 = scontext->tmp + blocksize; scontext->tmp3 = scontext->tmp + blocksize + ebsize; scontext->tmp4 = scontext->tmp + 2 * blocksize + ebsize; scontext->tmp_blocksize = (int32_t)blocksize; } // Regular decompression. Put results in tmp2. // If the block is aligned and the worst case fits in destination, let's avoid a copy bool get_single_block = ((startb == 0) && (bsize == nitems * typesize)); uint8_t* tmp2 = get_single_block ? dest : scontext->tmp2; cbytes = blosc_d(context->serial_context, bsize, leftoverblock, src, srcsize, sw32_(bstarts + j), tmp2, 0, scontext->tmp, scontext->tmp3); if (cbytes < 0) { ntbytes = cbytes; break; } if (!get_single_block) { /* Copy to destination */ memcpy((uint8_t *) dest + ntbytes, tmp2 + startb, (unsigned int) bsize2); } cbytes = (int)bsize2; } ntbytes += cbytes; } return ntbytes; } /* Specific routine optimized for decompression a small number of items out of a compressed chunk. Public non-contextual API. */ int blosc_getitem(const void* src, int start, int nitems, void* dest) { uint8_t* _src = (uint8_t*)(src); blosc2_context context; int result; uint8_t version = _src[0]; /* blosc format version */ if (version > BLOSC_VERSION_FORMAT) { /* Version from future */ return -1; } /* Minimally populate the context */ memset(&context, 0, sizeof(blosc2_context)); context.src = src; context.dest = dest; context.typesize = (uint8_t)_src[3]; context.blocksize = sw32_(_src + 8); context.header_flags = *(_src + 2); context.filter_flags = get_filter_flags(context.header_flags, context.typesize); context.schunk = g_schunk; context.nthreads = 1; // force a serial decompression; fixes #95 context.serial_context = create_thread_context(&context, 0); /* Call the actual getitem function */ result = _blosc_getitem(&context, src, INT32_MAX, start, nitems, dest); /* Release resources */ free_thread_context(context.serial_context); return result; } int blosc2_getitem_ctx(blosc2_context* context, const void* src, int32_t srcsize, int start, int nitems, void* dest) { uint8_t* _src = (uint8_t*)(src); int result; /* Minimally populate the context */ context->typesize = (uint8_t)_src[3]; context->blocksize = sw32_(_src + 8); context->header_flags = *(_src + 2); context->filter_flags = get_filter_flags(*(_src + 2), context->typesize); if (context->serial_context == NULL) { context->serial_context = create_thread_context(context, 0); } /* Call the actual getitem function */ result = _blosc_getitem(context, src, srcsize, start, nitems, dest); return result; } /* execute single compression/decompression job for a single thread_context */ static void t_blosc_do_job(void *ctxt) { struct thread_context* thcontext = (struct thread_context*)ctxt; blosc2_context* context = thcontext->parent_context; int32_t cbytes; int32_t ntdest; int32_t tblocks; /* number of blocks per thread */ int32_t tblock; /* limit block on a thread */ int32_t nblock_; /* private copy of nblock */ int32_t bsize; int32_t leftoverblock; /* Parameters for threads */ int32_t blocksize; int32_t ebsize; int32_t srcsize; bool compress = context->do_compress != 0; int32_t maxbytes; int32_t nblocks; int32_t leftover; int32_t leftover2; int32_t* bstarts; const uint8_t* src; uint8_t* dest; uint8_t* tmp; uint8_t* tmp2; uint8_t* tmp3; /* Get parameters for this thread before entering the main loop */ blocksize = context->blocksize; ebsize = blocksize + context->typesize * sizeof(int32_t); maxbytes = context->destsize; nblocks = context->nblocks; leftover = context->leftover; bstarts = context->bstarts; src = context->src; srcsize = context->srcsize; dest = context->dest; /* Resize the temporaries if needed */ if (blocksize != thcontext->tmp_blocksize) { my_free(thcontext->tmp); thcontext->tmp_nbytes = (size_t)3 * context->blocksize + ebsize; thcontext->tmp = my_malloc(thcontext->tmp_nbytes); thcontext->tmp2 = thcontext->tmp + blocksize; thcontext->tmp3 = thcontext->tmp + blocksize + ebsize; thcontext->tmp4 = thcontext->tmp + 2 * blocksize + ebsize; thcontext->tmp_blocksize = blocksize; } tmp = thcontext->tmp; tmp2 = thcontext->tmp2; tmp3 = thcontext->tmp3; // Determine whether we can do a static distribution of workload among different threads bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; bool static_schedule = (!compress || memcpyed) && context->block_maskout == NULL; if (static_schedule) { /* Blocks per thread */ tblocks = nblocks / context->nthreads; leftover2 = nblocks % context->nthreads; tblocks = (leftover2 > 0) ? tblocks + 1 : tblocks; nblock_ = thcontext->tid * tblocks; tblock = nblock_ + tblocks; if (tblock > nblocks) { tblock = nblocks; } } else { // Use dynamic schedule via a queue. Get the next block. pthread_mutex_lock(&context->count_mutex); context->thread_nblock++; nblock_ = context->thread_nblock; pthread_mutex_unlock(&context->count_mutex); tblock = nblocks; } /* Loop over blocks */ leftoverblock = 0; while ((nblock_ < tblock) && (context->thread_giveup_code > 0)) { bsize = blocksize; if (nblock_ == (nblocks - 1) && (leftover > 0)) { bsize = leftover; leftoverblock = 1; } if (compress) { if (memcpyed) { if (!context->prefilter) { /* We want to memcpy only */ memcpy(dest + BLOSC_MAX_OVERHEAD + nblock_ * blocksize, src + nblock_ * blocksize, (unsigned int) bsize); cbytes = (int32_t) bsize; } else { /* Only the prefilter has to be executed, and this is done in blosc_c(). * However, no further actions are needed, so we can put the result * directly in dest. */ cbytes = blosc_c(thcontext, bsize, leftoverblock, 0, ebsize, src, nblock_ * blocksize, dest + BLOSC_MAX_OVERHEAD + nblock_ * blocksize, tmp, tmp3); } } else { /* Regular compression */ cbytes = blosc_c(thcontext, bsize, leftoverblock, 0, ebsize, src, nblock_ * blocksize, tmp2, tmp, tmp3); } } else { if (memcpyed) { /* We want to memcpy only */ if (srcsize < BLOSC_MAX_OVERHEAD + (nblock_ * blocksize) + bsize) { /* Not enough input to copy data */ cbytes = -1; } else { memcpy(dest + nblock_ * blocksize, src + BLOSC_MAX_OVERHEAD + nblock_ * blocksize, (unsigned int)bsize); cbytes = (int32_t)bsize; } } else { if (srcsize < (int32_t)(BLOSC_MAX_OVERHEAD + (sizeof(int32_t) * nblocks))) { /* Not enough input to read all `bstarts` */ cbytes = -1; } else { cbytes = blosc_d(thcontext, bsize, leftoverblock, src, srcsize, sw32_(bstarts + nblock_), dest, nblock_ * blocksize, tmp, tmp2); } } } /* Check whether current thread has to giveup */ if (context->thread_giveup_code <= 0) { break; } /* Check results for the compressed/decompressed block */ if (cbytes < 0) { /* compr/decompr failure */ /* Set giveup_code error */ pthread_mutex_lock(&context->count_mutex); context->thread_giveup_code = cbytes; pthread_mutex_unlock(&context->count_mutex); break; } if (compress && !memcpyed) { /* Start critical section */ pthread_mutex_lock(&context->count_mutex); ntdest = context->output_bytes; // Note: do not use a typical local dict_training variable here // because it is probably cached from previous calls if the number of // threads does not change (the usual thing). if (!(context->use_dict && context->dict_cdict == NULL)) { _sw32(bstarts + nblock_, (int32_t) ntdest); } if ((cbytes == 0) || (ntdest + cbytes > maxbytes)) { context->thread_giveup_code = 0; /* uncompressible buf */ pthread_mutex_unlock(&context->count_mutex); break; } context->thread_nblock++; nblock_ = context->thread_nblock; context->output_bytes += cbytes; pthread_mutex_unlock(&context->count_mutex); /* End of critical section */ /* Copy the compressed buffer to destination */ memcpy(dest + ntdest, tmp2, (unsigned int) cbytes); } else if (static_schedule) { nblock_++; } else { pthread_mutex_lock(&context->count_mutex); context->thread_nblock++; nblock_ = context->thread_nblock; context->output_bytes += cbytes; pthread_mutex_unlock(&context->count_mutex); } } /* closes while (nblock_) */ if (static_schedule) { context->output_bytes = context->sourcesize; if (compress) { context->output_bytes += BLOSC_MAX_OVERHEAD; } } } /* Decompress & unshuffle several blocks in a single thread */ static void* t_blosc(void* ctxt) { struct thread_context* thcontext = (struct thread_context*)ctxt; blosc2_context* context = thcontext->parent_context; #ifdef BLOSC_POSIX_BARRIERS int rc; #endif while (1) { /* Synchronization point for all threads (wait for initialization) */ WAIT_INIT(NULL, context); if (context->end_threads) { break; } t_blosc_do_job(ctxt); /* Meeting point for all threads (wait for finalization) */ WAIT_FINISH(NULL, context); } /* Cleanup our working space and context */ free_thread_context(thcontext); return (NULL); } int init_threadpool(blosc2_context *context) { int32_t tid; int rc2; /* Initialize mutex and condition variable objects */ pthread_mutex_init(&context->count_mutex, NULL); pthread_mutex_init(&context->delta_mutex, NULL); pthread_cond_init(&context->delta_cv, NULL); /* Set context thread sentinels */ context->thread_giveup_code = 1; context->thread_nblock = -1; /* Barrier initialization */ #ifdef BLOSC_POSIX_BARRIERS pthread_barrier_init(&context->barr_init, NULL, context->nthreads + 1); pthread_barrier_init(&context->barr_finish, NULL, context->nthreads + 1); #else pthread_mutex_init(&context->count_threads_mutex, NULL); pthread_cond_init(&context->count_threads_cv, NULL); context->count_threads = 0; /* Reset threads counter */ #endif if (threads_callback) { /* Create thread contexts to store data for callback threads */ context->thread_contexts = (struct thread_context *)my_malloc( context->nthreads * sizeof(struct thread_context)); for (tid = 0; tid < context->nthreads; tid++) init_thread_context(context->thread_contexts + tid, context, tid); } else { #if !defined(_WIN32) /* Initialize and set thread detached attribute */ pthread_attr_init(&context->ct_attr); pthread_attr_setdetachstate(&context->ct_attr, PTHREAD_CREATE_JOINABLE); #endif /* Make space for thread handlers */ context->threads = (pthread_t*)my_malloc( context->nthreads * sizeof(pthread_t)); /* Finally, create the threads */ for (tid = 0; tid < context->nthreads; tid++) { /* Create a thread context (will destroy when finished) */ struct thread_context *thread_context = create_thread_context(context, tid); #if !defined(_WIN32) rc2 = pthread_create(&context->threads[tid], &context->ct_attr, t_blosc, (void*)thread_context); #else rc2 = pthread_create(&context->threads[tid], NULL, t_blosc, (void *)thread_context); #endif if (rc2) { fprintf(stderr, "ERROR; return code from pthread_create() is %d\n", rc2); fprintf(stderr, "\tError detail: %s\n", strerror(rc2)); return (-1); } } } /* We have now started/initialized the threads */ context->threads_started = context->nthreads; context->new_nthreads = context->nthreads; return (0); } int blosc_get_nthreads(void) { return g_nthreads; } int blosc_set_nthreads(int nthreads_new) { int ret = g_nthreads; /* the previous number of threads */ /* Check whether the library should be initialized */ if (!g_initlib) blosc_init(); if (nthreads_new != ret) { g_nthreads = nthreads_new; g_global_context->new_nthreads = nthreads_new; check_nthreads(g_global_context); } return ret; } const char* blosc_get_compressor(void) { const char* compname; blosc_compcode_to_compname(g_compressor, &compname); return compname; } int blosc_set_compressor(const char* compname) { int code = blosc_compname_to_compcode(compname); g_compressor = code; /* Check whether the library should be initialized */ if (!g_initlib) blosc_init(); return code; } void blosc_set_delta(int dodelta) { g_delta = dodelta; /* Check whether the library should be initialized */ if (!g_initlib) blosc_init(); } const char* blosc_list_compressors(void) { static int compressors_list_done = 0; static char ret[256]; if (compressors_list_done) return ret; ret[0] = '\0'; strcat(ret, BLOSC_BLOSCLZ_COMPNAME); #if defined(HAVE_LZ4) strcat(ret, ","); strcat(ret, BLOSC_LZ4_COMPNAME); strcat(ret, ","); strcat(ret, BLOSC_LZ4HC_COMPNAME); #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) strcat(ret, ","); strcat(ret, BLOSC_LIZARD_COMPNAME); #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) strcat(ret, ","); strcat(ret, BLOSC_SNAPPY_COMPNAME); #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) strcat(ret, ","); strcat(ret, BLOSC_ZLIB_COMPNAME); #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) strcat(ret, ","); strcat(ret, BLOSC_ZSTD_COMPNAME); #endif /* HAVE_ZSTD */ compressors_list_done = 1; return ret; } const char* blosc_get_version_string(void) { return BLOSC_VERSION_STRING; } int blosc_get_complib_info(const char* compname, char** complib, char** version) { int clibcode; const char* clibname; const char* clibversion = "unknown"; #if (defined(HAVE_LZ4) && defined(LZ4_VERSION_MAJOR)) || \ (defined(HAVE_LIZARD) && defined(LIZARD_VERSION_MAJOR)) || \ (defined(HAVE_SNAPPY) && defined(SNAPPY_VERSION)) || \ (defined(HAVE_ZSTD) && defined(ZSTD_VERSION_MAJOR)) char sbuffer[256]; #endif clibcode = compname_to_clibcode(compname); clibname = clibcode_to_clibname(clibcode); /* complib version */ if (clibcode == BLOSC_BLOSCLZ_LIB) { clibversion = BLOSCLZ_VERSION_STRING; } #if defined(HAVE_LZ4) else if (clibcode == BLOSC_LZ4_LIB) { #if defined(LZ4_VERSION_MAJOR) sprintf(sbuffer, "%d.%d.%d", LZ4_VERSION_MAJOR, LZ4_VERSION_MINOR, LZ4_VERSION_RELEASE); clibversion = sbuffer; #endif /* LZ4_VERSION_MAJOR */ } #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) else if (clibcode == BLOSC_LIZARD_LIB) { sprintf(sbuffer, "%d.%d.%d", LIZARD_VERSION_MAJOR, LIZARD_VERSION_MINOR, LIZARD_VERSION_RELEASE); clibversion = sbuffer; } #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) else if (clibcode == BLOSC_SNAPPY_LIB) { #if defined(SNAPPY_VERSION) sprintf(sbuffer, "%d.%d.%d", SNAPPY_MAJOR, SNAPPY_MINOR, SNAPPY_PATCHLEVEL); clibversion = sbuffer; #endif /* SNAPPY_VERSION */ } #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) else if (clibcode == BLOSC_ZLIB_LIB) { clibversion = ZLIB_VERSION; } #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) else if (clibcode == BLOSC_ZSTD_LIB) { sprintf(sbuffer, "%d.%d.%d", ZSTD_VERSION_MAJOR, ZSTD_VERSION_MINOR, ZSTD_VERSION_RELEASE); clibversion = sbuffer; } #endif /* HAVE_ZSTD */ #ifdef _MSC_VER *complib = _strdup(clibname); *version = _strdup(clibversion); #else *complib = strdup(clibname); *version = strdup(clibversion); #endif return clibcode; } /* Return `nbytes`, `cbytes` and `blocksize` from a compressed buffer. */ void blosc_cbuffer_sizes(const void* cbuffer, size_t* nbytes, size_t* cbytes, size_t* blocksize) { uint8_t* _src = (uint8_t*)(cbuffer); /* current pos for source buffer */ uint8_t version = _src[0]; /* blosc format version */ if (version > BLOSC_VERSION_FORMAT) { /* Version from future */ *nbytes = *blocksize = *cbytes = 0; return; } /* Read the interesting values */ *nbytes = (size_t)sw32_(_src + 4); /* uncompressed buffer size */ *blocksize = (size_t)sw32_(_src + 8); /* block size */ *cbytes = (size_t)sw32_(_src + 12); /* compressed buffer size */ } int blosc_cbuffer_validate(const void* cbuffer, size_t cbytes, size_t* nbytes) { size_t header_cbytes, header_blocksize; if (cbytes < BLOSC_MIN_HEADER_LENGTH) { /* Compressed data should contain enough space for header */ *nbytes = 0; return -1; } blosc_cbuffer_sizes(cbuffer, nbytes, &header_cbytes, &header_blocksize); if (header_cbytes != cbytes) { /* Compressed size from header does not match `cbytes` */ *nbytes = 0; return -1; } if (*nbytes > BLOSC_MAX_BUFFERSIZE) { /* Uncompressed size is larger than allowed */ return -1; } return 0; } /* Return `typesize` and `flags` from a compressed buffer. */ void blosc_cbuffer_metainfo(const void* cbuffer, size_t* typesize, int* flags) { uint8_t* _src = (uint8_t*)(cbuffer); /* current pos for source buffer */ uint8_t version = _src[0]; /* blosc format version */ if (version > BLOSC_VERSION_FORMAT) { /* Version from future */ *flags = 0; *typesize = 0; return; } /* Read the interesting values */ *flags = (int)_src[2]; /* flags */ *typesize = (size_t)_src[3]; /* typesize */ } /* Return version information from a compressed buffer. */ void blosc_cbuffer_versions(const void* cbuffer, int* version, int* versionlz) { uint8_t* _src = (uint8_t*)(cbuffer); /* current pos for source buffer */ /* Read the version info */ *version = (int)_src[0]; /* blosc format version */ *versionlz = (int)_src[1]; /* Lempel-Ziv compressor format version */ } /* Return the compressor library/format used in a compressed buffer. */ const char* blosc_cbuffer_complib(const void* cbuffer) { uint8_t* _src = (uint8_t*)(cbuffer); /* current pos for source buffer */ int clibcode; const char* complib; /* Read the compressor format/library info */ clibcode = (_src[2] & 0xe0) >> 5; complib = clibcode_to_clibname(clibcode); return complib; } /* Get the internal blocksize to be used during compression. 0 means that an automatic blocksize is computed internally. */ int blosc_get_blocksize(void) { return (int)g_force_blocksize; } /* Force the use of a specific blocksize. If 0, an automatic blocksize will be used (the default). */ void blosc_set_blocksize(size_t size) { g_force_blocksize = (int32_t)size; } /* Set pointer to super-chunk. If NULL, no super-chunk will be reachable (the default). */ void blosc_set_schunk(blosc2_schunk* schunk) { g_schunk = schunk; g_global_context->schunk = schunk; } void blosc_init(void) { /* Return if Blosc is already initialized */ if (g_initlib) return; pthread_mutex_init(&global_comp_mutex, NULL); /* Create a global context */ g_global_context = (blosc2_context*)my_malloc(sizeof(blosc2_context)); memset(g_global_context, 0, sizeof(blosc2_context)); g_global_context->nthreads = g_nthreads; g_global_context->new_nthreads = g_nthreads; g_initlib = 1; } void blosc_destroy(void) { /* Return if Blosc is not initialized */ if (!g_initlib) return; g_initlib = 0; release_threadpool(g_global_context); if (g_global_context->serial_context != NULL) { free_thread_context(g_global_context->serial_context); } my_free(g_global_context); pthread_mutex_destroy(&global_comp_mutex); } int release_threadpool(blosc2_context *context) { int32_t t; void* status; int rc; if (context->threads_started > 0) { if (threads_callback) { /* free context data for user-managed threads */ for (t=0; t<context->threads_started; t++) destroy_thread_context(context->thread_contexts + t); my_free(context->thread_contexts); } else { /* Tell all existing threads to finish */ context->end_threads = 1; WAIT_INIT(-1, context); /* Join exiting threads */ for (t = 0; t < context->threads_started; t++) { rc = pthread_join(context->threads[t], &status); if (rc) { fprintf(stderr, "ERROR; return code from pthread_join() is %d\n", rc); fprintf(stderr, "\tError detail: %s\n", strerror(rc)); } } /* Thread attributes */ #if !defined(_WIN32) pthread_attr_destroy(&context->ct_attr); #endif /* Release thread handlers */ my_free(context->threads); } /* Release mutex and condition variable objects */ pthread_mutex_destroy(&context->count_mutex); pthread_mutex_destroy(&context->delta_mutex); pthread_cond_destroy(&context->delta_cv); /* Barriers */ #ifdef BLOSC_POSIX_BARRIERS pthread_barrier_destroy(&context->barr_init); pthread_barrier_destroy(&context->barr_finish); #else pthread_mutex_destroy(&context->count_threads_mutex); pthread_cond_destroy(&context->count_threads_cv); context->count_threads = 0; /* Reset threads counter */ #endif /* Reset flags and counters */ context->end_threads = 0; context->threads_started = 0; } return 0; } int blosc_free_resources(void) { /* Return if Blosc is not initialized */ if (!g_initlib) return -1; return release_threadpool(g_global_context); } /* Contexts */ /* Create a context for compression */ blosc2_context* blosc2_create_cctx(blosc2_cparams cparams) { blosc2_context* context = (blosc2_context*)my_malloc(sizeof(blosc2_context)); /* Populate the context, using zeros as default values */ memset(context, 0, sizeof(blosc2_context)); context->do_compress = 1; /* meant for compression */ context->compcode = cparams.compcode; context->clevel = cparams.clevel; context->use_dict = cparams.use_dict; context->typesize = cparams.typesize; for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { context->filters[i] = cparams.filters[i]; context->filters_meta[i] = cparams.filters_meta[i]; } context->nthreads = cparams.nthreads; context->new_nthreads = context->nthreads; context->blocksize = cparams.blocksize; context->threads_started = 0; context->schunk = cparams.schunk; if (cparams.prefilter != NULL) { context->prefilter = cparams.prefilter; context->pparams = (blosc2_prefilter_params*)my_malloc(sizeof(blosc2_prefilter_params)); memcpy(context->pparams, cparams.pparams, sizeof(blosc2_prefilter_params)); } return context; } /* Create a context for decompression */ blosc2_context* blosc2_create_dctx(blosc2_dparams dparams) { blosc2_context* context = (blosc2_context*)my_malloc(sizeof(blosc2_context)); /* Populate the context, using zeros as default values */ memset(context, 0, sizeof(blosc2_context)); context->do_compress = 0; /* Meant for decompression */ context->nthreads = dparams.nthreads; context->new_nthreads = context->nthreads; context->threads_started = 0; context->block_maskout = NULL; context->block_maskout_nitems = 0; context->schunk = dparams.schunk; return context; } void blosc2_free_ctx(blosc2_context* context) { release_threadpool(context); if (context->serial_context != NULL) { free_thread_context(context->serial_context); } if (context->dict_cdict != NULL) { #ifdef HAVE_ZSTD ZSTD_freeCDict(context->dict_cdict); #endif } if (context->dict_ddict != NULL) { #ifdef HAVE_ZSTD ZSTD_freeDDict(context->dict_ddict); #endif } if (context->btune != NULL) { btune_free(context); } if (context->prefilter != NULL) { my_free(context->pparams); } if (context->block_maskout != NULL) { free(context->block_maskout); } my_free(context); } /* Set a maskout in decompression context */ int blosc2_set_maskout(blosc2_context *ctx, bool *maskout, int nblocks) { if (ctx->block_maskout != NULL) { // Get rid of a possible mask here free(ctx->block_maskout); } bool *maskout_ = malloc(nblocks); memcpy(maskout_, maskout, nblocks); ctx->block_maskout = maskout_; ctx->block_maskout_nitems = nblocks; return 0; }
null
/********************************************************************* Blosc - Blocked Shuffling and Compression Library Author: Francesc Alted <francesc@blosc.org> Creation date: 2009-05-20 See LICENSE.txt for details about copyright and rights to use. **********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <string.h> #include <sys/types.h> #include <assert.h> #include "blosc2.h" #include "blosc-private.h" #include "blosc2-common.h" #if defined(USING_CMAKE) #include "config.h" #endif /* USING_CMAKE */ #include "context.h" #include "shuffle.h" #include "delta.h" #include "trunc-prec.h" #include "blosclz.h" #include "btune.h" #if defined(HAVE_LZ4) #include "lz4.h" #include "lz4hc.h" #ifdef HAVE_IPP #include <ipps.h> #include <ippdc.h> #endif #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) #include "lizard_compress.h" #include "lizard_decompress.h" #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) #include "snappy-c.h" #endif /* HAVE_SNAPPY */ #if defined(HAVE_MINIZ) #include "miniz.c" #elif defined(HAVE_ZLIB) #include "zlib.h" #endif /* HAVE_MINIZ */ #if defined(HAVE_ZSTD) #include "zstd.h" #include "zstd_errors.h" // #include "cover.h" // for experimenting with fast cover training for building dicts #include "zdict.h" #endif /* HAVE_ZSTD */ #if defined(_WIN32) && !defined(__MINGW32__) #include <windows.h> #include <malloc.h> /* stdint.h only available in VS2010 (VC++ 16.0) and newer */ #if defined(_MSC_VER) && _MSC_VER < 1600 #include "win32/stdint-windows.h" #else #include <stdint.h> #endif #include <process.h> #define getpid _getpid #else #include <unistd.h> #endif /* _WIN32 */ #if defined(_WIN32) && !defined(__GNUC__) #include "win32/pthread.c" #endif /* Synchronization variables */ /* Global context for non-contextual API */ static blosc2_context* g_global_context; static pthread_mutex_t global_comp_mutex; static int g_compressor = BLOSC_BLOSCLZ; static int g_delta = 0; /* the compressor to use by default */ static int g_nthreads = 1; static int32_t g_force_blocksize = 0; static int g_initlib = 0; static blosc2_schunk* g_schunk = NULL; /* the pointer to super-chunk */ // Forward declarations int init_threadpool(blosc2_context *context); int release_threadpool(blosc2_context *context); /* Macros for synchronization */ /* Wait until all threads are initialized */ #ifdef BLOSC_POSIX_BARRIERS #define WAIT_INIT(RET_VAL, CONTEXT_PTR) \ rc = pthread_barrier_wait(&(CONTEXT_PTR)->barr_init); \ if (rc != 0 && rc != PTHREAD_BARRIER_SERIAL_THREAD) { \ printf("Could not wait on barrier (init): %d\n", rc); \ return((RET_VAL)); \ } #else #define WAIT_INIT(RET_VAL, CONTEXT_PTR) \ pthread_mutex_lock(&(CONTEXT_PTR)->count_threads_mutex); \ if ((CONTEXT_PTR)->count_threads < (CONTEXT_PTR)->nthreads) { \ (CONTEXT_PTR)->count_threads++; \ pthread_cond_wait(&(CONTEXT_PTR)->count_threads_cv, \ &(CONTEXT_PTR)->count_threads_mutex); \ } \ else { \ pthread_cond_broadcast(&(CONTEXT_PTR)->count_threads_cv); \ } \ pthread_mutex_unlock(&(CONTEXT_PTR)->count_threads_mutex); #endif /* Wait for all threads to finish */ #ifdef BLOSC_POSIX_BARRIERS #define WAIT_FINISH(RET_VAL, CONTEXT_PTR) \ rc = pthread_barrier_wait(&(CONTEXT_PTR)->barr_finish); \ if (rc != 0 && rc != PTHREAD_BARRIER_SERIAL_THREAD) { \ printf("Could not wait on barrier (finish)\n"); \ return((RET_VAL)); \ } #else #define WAIT_FINISH(RET_VAL, CONTEXT_PTR) \ pthread_mutex_lock(&(CONTEXT_PTR)->count_threads_mutex); \ if ((CONTEXT_PTR)->count_threads > 0) { \ (CONTEXT_PTR)->count_threads--; \ pthread_cond_wait(&(CONTEXT_PTR)->count_threads_cv, \ &(CONTEXT_PTR)->count_threads_mutex); \ } \ else { \ pthread_cond_broadcast(&(CONTEXT_PTR)->count_threads_cv); \ } \ pthread_mutex_unlock(&(CONTEXT_PTR)->count_threads_mutex); #endif /* global variable to change threading backend from Blosc-managed to caller-managed */ static blosc_threads_callback threads_callback = 0; static void *threads_callback_data = 0; /* non-threadsafe function should be called before any other Blosc function in order to change how threads are managed */ void blosc_set_threads_callback(blosc_threads_callback callback, void *callback_data) { threads_callback = callback; threads_callback_data = callback_data; } /* A function for aligned malloc that is portable */ static uint8_t* my_malloc(size_t size) { void* block = NULL; int res = 0; /* Do an alignment to 32 bytes because AVX2 is supported */ #if defined(_WIN32) /* A (void *) cast needed for avoiding a warning with MINGW :-/ */ block = (void *)_aligned_malloc(size, 32); #elif _POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600 /* Platform does have an implementation of posix_memalign */ res = posix_memalign(&block, 32, size); #else block = malloc(size); #endif /* _WIN32 */ if (block == NULL || res != 0) { printf("Error allocating memory!"); return NULL; } return (uint8_t*)block; } /* Release memory booked by my_malloc */ static void my_free(void* block) { #if defined(_WIN32) _aligned_free(block); #else free(block); #endif /* _WIN32 */ } /* * Conversion routines between compressor and compression libraries */ /* Return the library code associated with the compressor name */ static int compname_to_clibcode(const char* compname) { if (strcmp(compname, BLOSC_BLOSCLZ_COMPNAME) == 0) return BLOSC_BLOSCLZ_LIB; if (strcmp(compname, BLOSC_LZ4_COMPNAME) == 0) return BLOSC_LZ4_LIB; if (strcmp(compname, BLOSC_LZ4HC_COMPNAME) == 0) return BLOSC_LZ4_LIB; if (strcmp(compname, BLOSC_LIZARD_COMPNAME) == 0) return BLOSC_LIZARD_LIB; if (strcmp(compname, BLOSC_SNAPPY_COMPNAME) == 0) return BLOSC_SNAPPY_LIB; if (strcmp(compname, BLOSC_ZLIB_COMPNAME) == 0) return BLOSC_ZLIB_LIB; if (strcmp(compname, BLOSC_ZSTD_COMPNAME) == 0) return BLOSC_ZSTD_LIB; return -1; } /* Return the library name associated with the compressor code */ static const char* clibcode_to_clibname(int clibcode) { if (clibcode == BLOSC_BLOSCLZ_LIB) return BLOSC_BLOSCLZ_LIBNAME; if (clibcode == BLOSC_LZ4_LIB) return BLOSC_LZ4_LIBNAME; if (clibcode == BLOSC_LIZARD_LIB) return BLOSC_LIZARD_LIBNAME; if (clibcode == BLOSC_SNAPPY_LIB) return BLOSC_SNAPPY_LIBNAME; if (clibcode == BLOSC_ZLIB_LIB) return BLOSC_ZLIB_LIBNAME; if (clibcode == BLOSC_ZSTD_LIB) return BLOSC_ZSTD_LIBNAME; return NULL; /* should never happen */ } /* * Conversion routines between compressor names and compressor codes */ /* Get the compressor name associated with the compressor code */ int blosc_compcode_to_compname(int compcode, const char** compname) { int code = -1; /* -1 means non-existent compressor code */ const char* name = NULL; /* Map the compressor code */ if (compcode == BLOSC_BLOSCLZ) name = BLOSC_BLOSCLZ_COMPNAME; else if (compcode == BLOSC_LZ4) name = BLOSC_LZ4_COMPNAME; else if (compcode == BLOSC_LZ4HC) name = BLOSC_LZ4HC_COMPNAME; else if (compcode == BLOSC_LIZARD) name = BLOSC_LIZARD_COMPNAME; else if (compcode == BLOSC_SNAPPY) name = BLOSC_SNAPPY_COMPNAME; else if (compcode == BLOSC_ZLIB) name = BLOSC_ZLIB_COMPNAME; else if (compcode == BLOSC_ZSTD) name = BLOSC_ZSTD_COMPNAME; *compname = name; /* Guess if there is support for this code */ if (compcode == BLOSC_BLOSCLZ) code = BLOSC_BLOSCLZ; #if defined(HAVE_LZ4) else if (compcode == BLOSC_LZ4) code = BLOSC_LZ4; else if (compcode == BLOSC_LZ4HC) code = BLOSC_LZ4HC; #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) else if (compcode == BLOSC_LIZARD) code = BLOSC_LIZARD; #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) else if (compcode == BLOSC_SNAPPY) code = BLOSC_SNAPPY; #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) else if (compcode == BLOSC_ZLIB) code = BLOSC_ZLIB; #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) else if (compcode == BLOSC_ZSTD) code = BLOSC_ZSTD; #endif /* HAVE_ZSTD */ return code; } /* Get the compressor code for the compressor name. -1 if it is not available */ int blosc_compname_to_compcode(const char* compname) { int code = -1; /* -1 means non-existent compressor code */ if (strcmp(compname, BLOSC_BLOSCLZ_COMPNAME) == 0) { code = BLOSC_BLOSCLZ; } #if defined(HAVE_LZ4) else if (strcmp(compname, BLOSC_LZ4_COMPNAME) == 0) { code = BLOSC_LZ4; } else if (strcmp(compname, BLOSC_LZ4HC_COMPNAME) == 0) { code = BLOSC_LZ4HC; } #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) else if (strcmp(compname, BLOSC_LIZARD_COMPNAME) == 0) { code = BLOSC_LIZARD; } #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) else if (strcmp(compname, BLOSC_SNAPPY_COMPNAME) == 0) { code = BLOSC_SNAPPY; } #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) else if (strcmp(compname, BLOSC_ZLIB_COMPNAME) == 0) { code = BLOSC_ZLIB; } #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) else if (strcmp(compname, BLOSC_ZSTD_COMPNAME) == 0) { code = BLOSC_ZSTD; } #endif /* HAVE_ZSTD */ return code; } #if defined(HAVE_LZ4) static int lz4_wrap_compress(const char* input, size_t input_length, char* output, size_t maxout, int accel, void* hash_table) { BLOSC_UNUSED_PARAM(accel); int cbytes; #ifdef HAVE_IPP if (hash_table == NULL) { return -1; // the hash table should always be initialized } int outlen = (int)maxout; int inlen = (int)input_length; // I have not found any function that uses `accel` like in `LZ4_compress_fast`, but // the IPP LZ4Safe call does a pretty good job on compressing well, so let's use it IppStatus status = ippsEncodeLZ4Safe_8u((const Ipp8u*)input, &inlen, (Ipp8u*)output, &outlen, (Ipp8u*)hash_table); if (status == ippStsDstSizeLessExpected) { return 0; // we cannot compress in required outlen } else if (status != ippStsNoErr) { return -1; // an unexpected error happened } cbytes = outlen; #else BLOSC_UNUSED_PARAM(hash_table); accel = 1; // deactivate acceleration to match IPP behaviour cbytes = LZ4_compress_fast(input, output, (int)input_length, (int)maxout, accel); #endif return cbytes; } static int lz4hc_wrap_compress(const char* input, size_t input_length, char* output, size_t maxout, int clevel) { int cbytes; if (input_length > (size_t)(UINT32_C(2) << 30)) return -1; /* input larger than 2 GB is not supported */ /* clevel for lz4hc goes up to 12, at least in LZ4 1.7.5 * but levels larger than 9 do not buy much compression. */ cbytes = LZ4_compress_HC(input, output, (int)input_length, (int)maxout, clevel); return cbytes; } static int lz4_wrap_decompress(const char* input, size_t compressed_length, char* output, size_t maxout) { int nbytes; #ifdef HAVE_IPP int outlen = (int)maxout; int inlen = (int)compressed_length; IppStatus status; status = ippsDecodeLZ4_8u((const Ipp8u*)input, inlen, (Ipp8u*)output, &outlen); //status = ippsDecodeLZ4Dict_8u((const Ipp8u*)input, &inlen, (Ipp8u*)output, 0, &outlen, NULL, 1 << 16); nbytes = (status == ippStsNoErr) ? outlen : -outlen; #else nbytes = LZ4_decompress_safe(input, output, (int)compressed_length, (int)maxout); #endif if (nbytes != (int)maxout) { return 0; } return (int)maxout; } #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) static int lizard_wrap_compress(const char* input, size_t input_length, char* output, size_t maxout, int clevel) { int cbytes; cbytes = Lizard_compress(input, output, (int)input_length, (int)maxout, clevel); return cbytes; } static int lizard_wrap_decompress(const char* input, size_t compressed_length, char* output, size_t maxout) { int dbytes; dbytes = Lizard_decompress_safe(input, output, (int)compressed_length, (int)maxout); if (dbytes < 0) { return 0; } return dbytes; } #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) static int snappy_wrap_compress(const char* input, size_t input_length, char* output, size_t maxout) { snappy_status status; size_t cl = maxout; status = snappy_compress(input, input_length, output, &cl); if (status != SNAPPY_OK) { return 0; } return (int)cl; } static int snappy_wrap_decompress(const char* input, size_t compressed_length, char* output, size_t maxout) { snappy_status status; size_t ul = maxout; status = snappy_uncompress(input, compressed_length, output, &ul); if (status != SNAPPY_OK) { return 0; } return (int)ul; } #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) /* zlib is not very respectful with sharing name space with others. Fortunately, its names do not collide with those already in blosc. */ static int zlib_wrap_compress(const char* input, size_t input_length, char* output, size_t maxout, int clevel) { int status; uLongf cl = (uLongf)maxout; status = compress2( (Bytef*)output, &cl, (Bytef*)input, (uLong)input_length, clevel); if (status != Z_OK) { return 0; } return (int)cl; } static int zlib_wrap_decompress(const char* input, size_t compressed_length, char* output, size_t maxout) { int status; uLongf ul = (uLongf)maxout; status = uncompress( (Bytef*)output, &ul, (Bytef*)input, (uLong)compressed_length); if (status != Z_OK) { return 0; } return (int)ul; } #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) static int zstd_wrap_compress(struct thread_context* thread_context, const char* input, size_t input_length, char* output, size_t maxout, int clevel) { size_t code; blosc2_context* context = thread_context->parent_context; clevel = (clevel < 9) ? clevel * 2 - 1 : ZSTD_maxCLevel(); /* Make the level 8 close enough to maxCLevel */ if (clevel == 8) clevel = ZSTD_maxCLevel() - 2; if (thread_context->zstd_cctx == NULL) { thread_context->zstd_cctx = ZSTD_createCCtx(); } if (context->use_dict) { assert(context->dict_cdict != NULL); code = ZSTD_compress_usingCDict( thread_context->zstd_cctx, (void*)output, maxout, (void*)input, input_length, context->dict_cdict); } else { code = ZSTD_compressCCtx(thread_context->zstd_cctx, (void*)output, maxout, (void*)input, input_length, clevel); } if (ZSTD_isError(code) != ZSTD_error_no_error) { // Do not print anything because blosc will just memcpy this buffer // fprintf(stderr, "Error in ZSTD compression: '%s'. Giving up.\n", // ZDICT_getErrorName(code)); return 0; } return (int)code; } static int zstd_wrap_decompress(struct thread_context* thread_context, const char* input, size_t compressed_length, char* output, size_t maxout) { size_t code; blosc2_context* context = thread_context->parent_context; if (thread_context->zstd_dctx == NULL) { thread_context->zstd_dctx = ZSTD_createDCtx(); } if (context->use_dict) { assert(context->dict_ddict != NULL); code = ZSTD_decompress_usingDDict( thread_context->zstd_dctx, (void*)output, maxout, (void*)input, compressed_length, context->dict_ddict); } else { code = ZSTD_decompressDCtx(thread_context->zstd_dctx, (void*)output, maxout, (void*)input, compressed_length); } if (ZSTD_isError(code) != ZSTD_error_no_error) { fprintf(stderr, "Error in ZSTD decompression: '%s'. Giving up.\n", ZDICT_getErrorName(code)); return 0; } return (int)code; } #endif /* HAVE_ZSTD */ /* Compute acceleration for blosclz */ static int get_accel(const blosc2_context* context) { int clevel = context->clevel; if (context->compcode == BLOSC_LZ4) { /* This acceleration setting based on discussions held in: * https://groups.google.com/forum/#!topic/lz4c/zosy90P8MQw */ return (10 - clevel); } else if (context->compcode == BLOSC_LIZARD) { /* Lizard currently accepts clevels from 10 to 49 */ switch (clevel) { case 1 : return 10; case 2 : return 10; case 3 : return 10; case 4 : return 10; case 5 : return 20; case 6 : return 20; case 7 : return 20; case 8 : return 41; case 9 : return 41; default : break; } } return 1; } int do_nothing(int8_t filter, char cmode) { if (cmode == 'c') { return (filter == BLOSC_NOFILTER); } else { // TRUNC_PREC do not have to be applied during decompression return ((filter == BLOSC_NOFILTER) || (filter == BLOSC_TRUNC_PREC)); } } int next_filter(const uint8_t* filters, int current_filter, char cmode) { for (int i = current_filter - 1; i >= 0; i--) { if (!do_nothing(filters[i], cmode)) { return filters[i]; } } return BLOSC_NOFILTER; } int last_filter(const uint8_t* filters, char cmode) { int last_index = -1; for (int i = BLOSC2_MAX_FILTERS - 1; i >= 0; i--) { if (!do_nothing(filters[i], cmode)) { last_index = i; } } return last_index; } uint8_t* pipeline_c(struct thread_context* thread_context, const int32_t bsize, const uint8_t* src, const int32_t offset, uint8_t* dest, uint8_t* tmp, uint8_t* tmp2) { blosc2_context* context = thread_context->parent_context; uint8_t* _src = (uint8_t*)src + offset; uint8_t* _tmp = tmp; uint8_t* _dest = dest; int32_t typesize = context->typesize; uint8_t* filters = context->filters; uint8_t* filters_meta = context->filters_meta; bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; /* Prefilter function */ if (context->prefilter != NULL) { // Create new prefilter parameters for this block (must be private for each thread) blosc2_prefilter_params pparams; memcpy(&pparams, context->pparams, sizeof(pparams)); pparams.out = _dest; pparams.out_size = (size_t)bsize; pparams.out_typesize = typesize; pparams.out_offset = offset; pparams.tid = thread_context->tid; pparams.ttmp = thread_context->tmp; pparams.ttmp_nbytes = thread_context->tmp_nbytes; pparams.ctx = context; if (context->prefilter(&pparams) != 0) { fprintf(stderr, "Execution of prefilter function failed\n"); return NULL; } if (memcpyed) { // No more filters are required return _dest; } // Cycle buffers _src = _dest; _dest = _tmp; _tmp = _src; } /* Process the filter pipeline */ for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { switch (filters[i]) { case BLOSC_SHUFFLE: for (int j = 0; j <= filters_meta[i]; j++) { shuffle(typesize, bsize, _src, _dest); // Cycle filters when required if (j < filters_meta[i]) { _src = _dest; _dest = _tmp; _tmp = _src; } } break; case BLOSC_BITSHUFFLE: bitshuffle(typesize, bsize, _src, _dest, tmp2); break; case BLOSC_DELTA: delta_encoder(src, offset, bsize, typesize, _src, _dest); break; case BLOSC_TRUNC_PREC: truncate_precision(filters_meta[i], typesize, bsize, _src, _dest); break; default: if (filters[i] != BLOSC_NOFILTER) { fprintf(stderr, "Filter %d not handled during compression\n", filters[i]); return NULL; } } // Cycle buffers when required if (filters[i] != BLOSC_NOFILTER) { _src = _dest; _dest = _tmp; _tmp = _src; } } return _src; } // Optimized version for detecting runs. It compares 8 bytes values wherever possible. static bool get_run(const uint8_t* ip, const uint8_t* ip_bound) { uint8_t x = *ip; int64_t value, value2; /* Broadcast the value for every byte in a 64-bit register */ memset(&value, x, 8); while (ip < (ip_bound - 8)) { #if defined(BLOSC_STRICT_ALIGN) memcpy(&value2, ref, 8); #else value2 = *(int64_t*)ip; #endif if (value != value2) { // Values differ. We don't have a run. return false; } else { ip += 8; } } /* Look into the remainder */ while ((ip < ip_bound) && (*ip == x)) ip++; return ip == ip_bound ? true : false; } /* Shuffle & compress a single block */ static int blosc_c(struct thread_context* thread_context, int32_t bsize, int32_t leftoverblock, int32_t ntbytes, int32_t destsize, const uint8_t* src, const int32_t offset, uint8_t* dest, uint8_t* tmp, uint8_t* tmp2) { blosc2_context* context = thread_context->parent_context; int dont_split = (context->header_flags & 0x10) >> 4; int dict_training = context->use_dict && context->dict_cdict == NULL; int32_t j, neblock, nstreams; int32_t cbytes; /* number of compressed bytes in split */ int32_t ctbytes = 0; /* number of compressed bytes in block */ int64_t maxout; int32_t typesize = context->typesize; const char* compname; int accel; const uint8_t* _src; uint8_t *_tmp = tmp, *_tmp2 = tmp2; uint8_t *_tmp3 = thread_context->tmp4; int last_filter_index = last_filter(context->filters, 'c'); bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; if (last_filter_index >= 0 || context->prefilter != NULL) { /* Apply the filter pipeline just for the prefilter */ if (memcpyed && context->prefilter != NULL) { // We only need the prefilter output _src = pipeline_c(thread_context, bsize, src, offset, dest, _tmp2, _tmp3); if (_src == NULL) { return -9; // signals a problem with the filter pipeline } return bsize; } /* Apply regular filter pipeline */ _src = pipeline_c(thread_context, bsize, src, offset, _tmp, _tmp2, _tmp3); if (_src == NULL) { return -9; // signals a problem with the filter pipeline } } else { _src = src + offset; } assert(context->clevel > 0); /* Calculate acceleration for different compressors */ accel = get_accel(context); /* The number of compressed data streams for this block */ if (!dont_split && !leftoverblock && !dict_training) { nstreams = (int32_t)typesize; } else { nstreams = 1; } neblock = bsize / nstreams; for (j = 0; j < nstreams; j++) { if (!dict_training) { dest += sizeof(int32_t); ntbytes += sizeof(int32_t); ctbytes += sizeof(int32_t); } // See if we have a run here const uint8_t* ip = (uint8_t*)_src + j * neblock; const uint8_t* ipbound = (uint8_t*)_src + (j + 1) * neblock; if (get_run(ip, ipbound)) { // A run. Encode the repeated byte as a negative length in the length of the split. int32_t value = _src[j * neblock]; if (ntbytes > destsize) { /* Not enough space to write out compressed block size */ return -1; } _sw32(dest - 4, -value); continue; } maxout = neblock; #if defined(HAVE_SNAPPY) if (context->compcode == BLOSC_SNAPPY) { maxout = (int32_t)snappy_max_compressed_length((size_t)neblock); } #endif /* HAVE_SNAPPY */ if (ntbytes + maxout > destsize) { /* avoid buffer * overrun */ maxout = (int64_t)destsize - (int64_t)ntbytes; if (maxout <= 0) { return 0; /* non-compressible block */ } } if (dict_training) { // We are in the build dict state, so don't compress // TODO: copy only a percentage for sampling memcpy(dest, _src + j * neblock, (unsigned int)neblock); cbytes = (int32_t)neblock; } else if (context->compcode == BLOSC_BLOSCLZ) { cbytes = blosclz_compress(context->clevel, _src + j * neblock, (int)neblock, dest, (int)maxout); } #if defined(HAVE_LZ4) else if (context->compcode == BLOSC_LZ4) { void *hash_table = NULL; #ifdef HAVE_IPP hash_table = (void*)thread_context->lz4_hash_table; #endif cbytes = lz4_wrap_compress((char*)_src + j * neblock, (size_t)neblock, (char*)dest, (size_t)maxout, accel, hash_table); } else if (context->compcode == BLOSC_LZ4HC) { cbytes = lz4hc_wrap_compress((char*)_src + j * neblock, (size_t)neblock, (char*)dest, (size_t)maxout, context->clevel); } #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) else if (context->compcode == BLOSC_LIZARD) { cbytes = lizard_wrap_compress((char*)_src + j * neblock, (size_t)neblock, (char*)dest, (size_t)maxout, accel); } #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) else if (context->compcode == BLOSC_SNAPPY) { cbytes = snappy_wrap_compress((char*)_src + j * neblock, (size_t)neblock, (char*)dest, (size_t)maxout); } #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) else if (context->compcode == BLOSC_ZLIB) { cbytes = zlib_wrap_compress((char*)_src + j * neblock, (size_t)neblock, (char*)dest, (size_t)maxout, context->clevel); } #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) else if (context->compcode == BLOSC_ZSTD) { cbytes = zstd_wrap_compress(thread_context, (char*)_src + j * neblock, (size_t)neblock, (char*)dest, (size_t)maxout, context->clevel); } #endif /* HAVE_ZSTD */ else { blosc_compcode_to_compname(context->compcode, &compname); fprintf(stderr, "Blosc has not been compiled with '%s' ", compname); fprintf(stderr, "compression support. Please use one having it."); return -5; /* signals no compression support */ } if (cbytes > maxout) { /* Buffer overrun caused by compression (should never happen) */ return -1; } if (cbytes < 0) { /* cbytes should never be negative */ return -2; } if (!dict_training) { if (cbytes == 0 || cbytes == neblock) { /* The compressor has been unable to compress data at all. */ /* Before doing the copy, check that we are not running into a buffer overflow. */ if ((ntbytes + neblock) > destsize) { return 0; /* Non-compressible data */ } memcpy(dest, _src + j * neblock, (unsigned int)neblock); cbytes = neblock; } _sw32(dest - 4, cbytes); } dest += cbytes; ntbytes += cbytes; ctbytes += cbytes; } /* Closes j < nstreams */ //printf("c%d", ctbytes); return ctbytes; } /* Process the filter pipeline (decompression mode) */ int pipeline_d(blosc2_context* context, const int32_t bsize, uint8_t* dest, const int32_t offset, uint8_t* src, uint8_t* tmp, uint8_t* tmp2, int last_filter_index) { int32_t typesize = context->typesize; uint8_t* filters = context->filters; uint8_t* filters_meta = context->filters_meta; uint8_t* _src = src; uint8_t* _dest = tmp; uint8_t* _tmp = tmp2; int errcode = 0; for (int i = BLOSC2_MAX_FILTERS - 1; i >= 0; i--) { // Delta filter requires the whole chunk ready int last_copy_filter = (last_filter_index == i) || (next_filter(filters, i, 'd') == BLOSC_DELTA); if (last_copy_filter) { _dest = dest + offset; } switch (filters[i]) { case BLOSC_SHUFFLE: for (int j = 0; j <= filters_meta[i]; j++) { unshuffle(typesize, bsize, _src, _dest); // Cycle filters when required if (j < filters_meta[i]) { _src = _dest; _dest = _tmp; _tmp = _src; } // Check whether we have to copy the intermediate _dest buffer to final destination if (last_copy_filter && (filters_meta[i] % 2) == 1 && j == filters_meta[i]) { memcpy(dest + offset, _dest, (unsigned int)bsize); } } break; case BLOSC_BITSHUFFLE: bitunshuffle(typesize, bsize, _src, _dest, _tmp, context->src[0]); break; case BLOSC_DELTA: if (context->nthreads == 1) { /* Serial mode */ delta_decoder(dest, offset, bsize, typesize, _dest); } else { /* Force the thread in charge of the block 0 to go first */ pthread_mutex_lock(&context->delta_mutex); if (context->dref_not_init) { if (offset != 0) { pthread_cond_wait(&context->delta_cv, &context->delta_mutex); } else { delta_decoder(dest, offset, bsize, typesize, _dest); context->dref_not_init = 0; pthread_cond_broadcast(&context->delta_cv); } } pthread_mutex_unlock(&context->delta_mutex); if (offset != 0) { delta_decoder(dest, offset, bsize, typesize, _dest); } } break; case BLOSC_TRUNC_PREC: // TRUNC_PREC filter does not need to be undone break; default: if (filters[i] != BLOSC_NOFILTER) { fprintf(stderr, "Filter %d not handled during decompression\n", filters[i]); errcode = -1; } } if (last_filter_index == i) { return errcode; } // Cycle buffers when required if ((filters[i] != BLOSC_NOFILTER) && (filters[i] != BLOSC_TRUNC_PREC)) { _src = _dest; _dest = _tmp; _tmp = _src; } } return errcode; } /* Decompress & unshuffle a single block */ static int blosc_d( struct thread_context* thread_context, int32_t bsize, int32_t leftoverblock, const uint8_t* src, int32_t srcsize, int32_t src_offset, uint8_t* dest, int32_t dest_offset, uint8_t* tmp, uint8_t* tmp2) { blosc2_context* context = thread_context->parent_context; uint8_t* filters = context->filters; uint8_t *tmp3 = thread_context->tmp4; int32_t compformat = (context->header_flags & 0xe0) >> 5; int dont_split = (context->header_flags & 0x10) >> 4; //uint8_t blosc_version_format = src[0]; int nstreams; int32_t neblock; int32_t nbytes; /* number of decompressed bytes in split */ int32_t cbytes; /* number of compressed bytes in split */ int32_t ctbytes = 0; /* number of compressed bytes in block */ int32_t ntbytes = 0; /* number of uncompressed bytes in block */ uint8_t* _dest; int32_t typesize = context->typesize; int32_t nblock = dest_offset / context->blocksize; const char* compname; if (context->block_maskout != NULL && context->block_maskout[nblock]) { // Do not decompress, but act as if we successfully decompressed everything return bsize; } if (src_offset <= 0 || src_offset >= srcsize) { /* Invalid block src offset encountered */ return -1; } src += src_offset; srcsize -= src_offset; int last_filter_index = last_filter(filters, 'd'); if ((last_filter_index >= 0) && (next_filter(filters, BLOSC2_MAX_FILTERS, 'd') != BLOSC_DELTA)) { // We are making use of some filter, so use a temp for destination _dest = tmp; } else { // If no filters, or only DELTA in pipeline _dest = dest + dest_offset; } /* The number of compressed data streams for this block */ if (!dont_split && !leftoverblock && !context->use_dict) { // We don't want to split when in a training dict state nstreams = (int32_t)typesize; } else { nstreams = 1; } neblock = bsize / nstreams; for (int j = 0; j < nstreams; j++) { if (srcsize < sizeof(int32_t)) { /* Not enough input to read compressed size */ return -1; } srcsize -= sizeof(int32_t); cbytes = sw32_(src); /* amount of compressed bytes */ if (cbytes > 0) { if (srcsize < cbytes) { /* Not enough input to read compressed bytes */ return -1; } srcsize -= cbytes; } src += sizeof(int32_t); ctbytes += (int32_t)sizeof(int32_t); /* Uncompress */ if (cbytes <= 0) { // A run if (cbytes < -255) { // Runs can only encode a byte return -2; } uint8_t value = -cbytes; memset(_dest, value, (unsigned int)neblock); nbytes = neblock; cbytes = 0; // everything is encoded in the cbytes token } else if (cbytes == neblock) { memcpy(_dest, src, (unsigned int)neblock); nbytes = (int32_t)neblock; } else { if (compformat == BLOSC_BLOSCLZ_FORMAT) { nbytes = blosclz_decompress(src, cbytes, _dest, (int)neblock); } #if defined(HAVE_LZ4) else if (compformat == BLOSC_LZ4_FORMAT) { nbytes = lz4_wrap_decompress((char*)src, (size_t)cbytes, (char*)_dest, (size_t)neblock); } #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) else if (compformat == BLOSC_LIZARD_FORMAT) { nbytes = lizard_wrap_decompress((char*)src, (size_t)cbytes, (char*)_dest, (size_t)neblock); } #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) else if (compformat == BLOSC_SNAPPY_FORMAT) { nbytes = snappy_wrap_decompress((char*)src, (size_t)cbytes, (char*)_dest, (size_t)neblock); } #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) else if (compformat == BLOSC_ZLIB_FORMAT) { nbytes = zlib_wrap_decompress((char*)src, (size_t)cbytes, (char*)_dest, (size_t)neblock); } #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) else if (compformat == BLOSC_ZSTD_FORMAT) { nbytes = zstd_wrap_decompress(thread_context, (char*)src, (size_t)cbytes, (char*)_dest, (size_t)neblock); } #endif /* HAVE_ZSTD */ else { compname = clibcode_to_clibname(compformat); fprintf(stderr, "Blosc has not been compiled with decompression " "support for '%s' format. ", compname); fprintf(stderr, "Please recompile for adding this support.\n"); return -5; /* signals no decompression support */ } /* Check that decompressed bytes number is correct */ if (nbytes != neblock) { return -2; } } src += cbytes; ctbytes += cbytes; _dest += nbytes; ntbytes += nbytes; } /* Closes j < nstreams */ if (last_filter_index >= 0) { int errcode = pipeline_d(context, bsize, dest, dest_offset, tmp, tmp2, tmp3, last_filter_index); if (errcode < 0) return errcode; } /* Return the number of uncompressed bytes */ return (int)ntbytes; } /* Serial version for compression/decompression */ static int serial_blosc(struct thread_context* thread_context) { blosc2_context* context = thread_context->parent_context; int32_t j, bsize, leftoverblock; int32_t cbytes; int32_t ntbytes = (int32_t)context->output_bytes; int32_t* bstarts = context->bstarts; uint8_t* tmp = thread_context->tmp; uint8_t* tmp2 = thread_context->tmp2; int dict_training = context->use_dict && (context->dict_cdict == NULL); bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; for (j = 0; j < context->nblocks; j++) { if (context->do_compress && !memcpyed && !dict_training) { _sw32(bstarts + j, ntbytes); } bsize = context->blocksize; leftoverblock = 0; if ((j == context->nblocks - 1) && (context->leftover > 0)) { bsize = context->leftover; leftoverblock = 1; } if (context->do_compress) { if (memcpyed && !context->prefilter) { /* We want to memcpy only */ memcpy(context->dest + BLOSC_MAX_OVERHEAD + j * context->blocksize, context->src + j * context->blocksize, (unsigned int)bsize); cbytes = (int32_t)bsize; } else { /* Regular compression */ cbytes = blosc_c(thread_context, bsize, leftoverblock, ntbytes, context->destsize, context->src, j * context->blocksize, context->dest + ntbytes, tmp, tmp2); if (cbytes == 0) { ntbytes = 0; /* uncompressible data */ break; } } } else { if (memcpyed) { // Check that sizes in header are compatible, otherwise there is a header corruption int32_t csize = sw32_(context->src + 12); /* compressed buffer size */ if (context->sourcesize + BLOSC_MAX_OVERHEAD != csize) { return -1; } if (context->srcsize < BLOSC_MAX_OVERHEAD + (j * context->blocksize) + bsize) { /* Not enough input to copy block */ return -1; } memcpy(context->dest + j * context->blocksize, context->src + BLOSC_MAX_OVERHEAD + j * context->blocksize, (unsigned int)bsize); cbytes = (int32_t)bsize; } else { /* Regular decompression */ cbytes = blosc_d(thread_context, bsize, leftoverblock, context->src, context->srcsize, sw32_(bstarts + j), context->dest, j * context->blocksize, tmp, tmp2); } } if (cbytes < 0) { ntbytes = cbytes; /* error in blosc_c or blosc_d */ break; } ntbytes += cbytes; } return ntbytes; } static void t_blosc_do_job(void *ctxt); /* Threaded version for compression/decompression */ static int parallel_blosc(blosc2_context* context) { #ifdef BLOSC_POSIX_BARRIERS int rc; #endif /* Set sentinels */ context->thread_giveup_code = 1; context->thread_nblock = -1; if (threads_callback) { threads_callback(threads_callback_data, t_blosc_do_job, context->nthreads, sizeof(struct thread_context), (void*) context->thread_contexts); } else { /* Synchronization point for all threads (wait for initialization) */ WAIT_INIT(-1, context); /* Synchronization point for all threads (wait for finalization) */ WAIT_FINISH(-1, context); } if (context->thread_giveup_code <= 0) { /* Compression/decompression gave up. Return error code. */ return context->thread_giveup_code; } /* Return the total bytes (de-)compressed in threads */ return (int)context->output_bytes; } /* initialize a thread_context that has already been allocated */ static void init_thread_context(struct thread_context* thread_context, blosc2_context* context, int32_t tid) { int32_t ebsize; thread_context->parent_context = context; thread_context->tid = tid; ebsize = context->blocksize + context->typesize * (int32_t)sizeof(int32_t); thread_context->tmp_nbytes = (size_t)3 * context->blocksize + ebsize; thread_context->tmp = my_malloc(thread_context->tmp_nbytes); thread_context->tmp2 = thread_context->tmp + context->blocksize; thread_context->tmp3 = thread_context->tmp + context->blocksize + ebsize; thread_context->tmp4 = thread_context->tmp + 2 * context->blocksize + ebsize; thread_context->tmp_blocksize = context->blocksize; #if defined(HAVE_ZSTD) thread_context->zstd_cctx = NULL; thread_context->zstd_dctx = NULL; #endif /* Create the hash table for LZ4 in case we are using IPP */ #ifdef HAVE_IPP IppStatus status; int inlen = thread_context->tmp_blocksize > 0 ? thread_context->tmp_blocksize : 1 << 16; int hash_size = 0; status = ippsEncodeLZ4HashTableGetSize_8u(&hash_size); if (status != ippStsNoErr) { fprintf(stderr, "Error in ippsEncodeLZ4HashTableGetSize_8u"); } Ipp8u *hash_table = ippsMalloc_8u(hash_size); status = ippsEncodeLZ4HashTableInit_8u(hash_table, inlen); if (status != ippStsNoErr) { fprintf(stderr, "Error in ippsEncodeLZ4HashTableInit_8u"); } thread_context->lz4_hash_table = hash_table; #endif } static struct thread_context* create_thread_context(blosc2_context* context, int32_t tid) { struct thread_context* thread_context; thread_context = (struct thread_context*)my_malloc(sizeof(struct thread_context)); init_thread_context(thread_context, context, tid); return thread_context; } /* free members of thread_context, but not thread_context itself */ static void destroy_thread_context(struct thread_context* thread_context) { my_free(thread_context->tmp); #if defined(HAVE_ZSTD) if (thread_context->zstd_cctx != NULL) { ZSTD_freeCCtx(thread_context->zstd_cctx); } if (thread_context->zstd_dctx != NULL) { ZSTD_freeDCtx(thread_context->zstd_dctx); } #endif #ifdef HAVE_IPP if (thread_context->lz4_hash_table != NULL) { ippsFree(thread_context->lz4_hash_table); } #endif } void free_thread_context(struct thread_context* thread_context) { destroy_thread_context(thread_context); my_free(thread_context); } int check_nthreads(blosc2_context* context) { if (context->nthreads <= 0) { fprintf(stderr, "Error. nthreads must be a positive integer"); return -1; } if (context->new_nthreads != context->nthreads) { if (context->nthreads > 1) { release_threadpool(context); } context->nthreads = context->new_nthreads; } if (context->new_nthreads > 1 && context->threads_started == 0) { init_threadpool(context); } return context->nthreads; } /* Do the compression or decompression of the buffer depending on the global params. */ static int do_job(blosc2_context* context) { int32_t ntbytes; /* Set sentinels */ context->dref_not_init = 1; /* Check whether we need to restart threads */ check_nthreads(context); /* Run the serial version when nthreads is 1 or when the buffers are not larger than blocksize */ if (context->nthreads == 1 || (context->sourcesize / context->blocksize) <= 1) { /* The context for this 'thread' has no been initialized yet */ if (context->serial_context == NULL) { context->serial_context = create_thread_context(context, 0); } else if (context->blocksize != context->serial_context->tmp_blocksize) { free_thread_context(context->serial_context); context->serial_context = create_thread_context(context, 0); } ntbytes = serial_blosc(context->serial_context); } else { ntbytes = parallel_blosc(context); } return ntbytes; } /* Convert filter pipeline to filter flags */ static uint8_t filters_to_flags(const uint8_t* filters) { uint8_t flags = 0; for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { switch (filters[i]) { case BLOSC_SHUFFLE: flags |= BLOSC_DOSHUFFLE; break; case BLOSC_BITSHUFFLE: flags |= BLOSC_DOBITSHUFFLE; break; case BLOSC_DELTA: flags |= BLOSC_DODELTA; break; default : break; } } return flags; } /* Convert filter flags to filter pipeline */ static void flags_to_filters(const uint8_t flags, uint8_t* filters) { /* Initialize the filter pipeline */ memset(filters, 0, BLOSC2_MAX_FILTERS); /* Fill the filter pipeline */ if (flags & BLOSC_DOSHUFFLE) filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_SHUFFLE; if (flags & BLOSC_DOBITSHUFFLE) filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_BITSHUFFLE; if (flags & BLOSC_DODELTA) filters[BLOSC2_MAX_FILTERS - 2] = BLOSC_DELTA; } static int initialize_context_compression( blosc2_context* context, const void* src, int32_t srcsize, void* dest, int32_t destsize, int clevel, uint8_t const *filters, uint8_t const *filters_meta, int32_t typesize, int compressor, int32_t blocksize, int new_nthreads, int nthreads, blosc2_schunk* schunk) { /* Set parameters */ context->do_compress = 1; context->src = (const uint8_t*)src; context->srcsize = srcsize; context->dest = (uint8_t*)dest; context->output_bytes = 0; context->destsize = destsize; context->sourcesize = srcsize; context->typesize = (int32_t)typesize; context->filter_flags = filters_to_flags(filters); for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { context->filters[i] = filters[i]; context->filters_meta[i] = filters_meta[i]; } context->compcode = compressor; context->nthreads = nthreads; context->new_nthreads = new_nthreads; context->end_threads = 0; context->clevel = clevel; context->schunk = schunk; /* Tune some compression parameters */ context->blocksize = (int32_t)blocksize; if (context->btune != NULL) { btune_next_cparams(context); } else { btune_next_blocksize(context); } char* envvar = getenv("BLOSC_WARN"); int warnlvl = 0; if (envvar != NULL) { warnlvl = strtol(envvar, NULL, 10); } /* Check buffer size limits */ if (srcsize > BLOSC_MAX_BUFFERSIZE) { if (warnlvl > 0) { fprintf(stderr, "Input buffer size cannot exceed %d bytes\n", BLOSC_MAX_BUFFERSIZE); } return 0; } if (destsize < BLOSC_MAX_OVERHEAD) { if (warnlvl > 0) { fprintf(stderr, "Output buffer size should be larger than %d bytes\n", BLOSC_MAX_OVERHEAD); } return 0; } if (destsize < BLOSC_MAX_OVERHEAD) { if (warnlvl > 0) { fprintf(stderr, "Output buffer size should be larger than %d bytes\n", BLOSC_MAX_OVERHEAD); } return -2; } if (destsize < BLOSC_MAX_OVERHEAD) { fprintf(stderr, "Output buffer size should be larger than %d bytes\n", BLOSC_MAX_OVERHEAD); return -1; } /* Compression level */ if (clevel < 0 || clevel > 9) { /* If clevel not in 0..9, print an error */ fprintf(stderr, "`clevel` parameter must be between 0 and 9!\n"); return -10; } /* Check typesize limits */ if (context->typesize > BLOSC_MAX_TYPESIZE) { /* If typesize is too large, treat buffer as an 1-byte stream. */ context->typesize = 1; } /* Compute number of blocks in buffer */ context->nblocks = context->sourcesize / context->blocksize; context->leftover = context->sourcesize % context->blocksize; context->nblocks = (context->leftover > 0) ? (context->nblocks + 1) : context->nblocks; return 1; } /* Get filter flags from header flags */ static uint8_t get_filter_flags(const uint8_t header_flags, const int32_t typesize) { uint8_t flags = 0; if ((header_flags & BLOSC_DOSHUFFLE) && (typesize > 1)) { flags |= BLOSC_DOSHUFFLE; } if (header_flags & BLOSC_DOBITSHUFFLE) { flags |= BLOSC_DOBITSHUFFLE; } if (header_flags & BLOSC_DODELTA) { flags |= BLOSC_DODELTA; } if (header_flags & BLOSC_MEMCPYED) { flags |= BLOSC_MEMCPYED; } return flags; } static int initialize_context_decompression(blosc2_context* context, const void* src, int32_t srcsize, void* dest, int32_t destsize) { uint8_t blosc2_flags = 0; int32_t cbytes; int32_t bstarts_offset; int32_t bstarts_end; context->do_compress = 0; context->src = (const uint8_t*)src; context->srcsize = srcsize; context->dest = (uint8_t*)dest; context->destsize = destsize; context->output_bytes = 0; context->end_threads = 0; if (context->srcsize < BLOSC_MIN_HEADER_LENGTH) { /* Not enough input to read minimum header */ return -1; } context->header_flags = context->src[2]; context->typesize = context->src[3]; context->sourcesize = sw32_(context->src + 4); context->blocksize = sw32_(context->src + 8); cbytes = sw32_(context->src + 12); // Some checks for malformed headers if (context->blocksize <= 0 || context->blocksize > destsize || context->typesize <= 0 || context->typesize > BLOSC_MAX_TYPESIZE || cbytes > srcsize) { return -1; } /* Check that we have enough space to decompress */ if (context->sourcesize > (int32_t)destsize) { return -1; } /* Total blocks */ context->nblocks = context->sourcesize / context->blocksize; context->leftover = context->sourcesize % context->blocksize; context->nblocks = (context->leftover > 0) ? context->nblocks + 1 : context->nblocks; if (context->block_maskout != NULL && context->block_maskout_nitems != context->nblocks) { fprintf(stderr, "The number of items in block_maskout (%d) must match the number" " of blocks in chunk (%d)", context->block_maskout_nitems, context->nblocks); return -2; } if ((context->header_flags & BLOSC_DOSHUFFLE) && (context->header_flags & BLOSC_DOBITSHUFFLE)) { /* Extended header */ if (context->srcsize < BLOSC_EXTENDED_HEADER_LENGTH) { /* Not enough input to read extended header */ return -1; } uint8_t* filters = (uint8_t*)(context->src + BLOSC_MIN_HEADER_LENGTH); uint8_t* filters_meta = filters + 8; uint8_t header_version = context->src[0]; // The number of filters depends on the version of the header // (we need to read less because filters where not initialized to zero in blosc2 alpha series) int max_filters = (header_version == BLOSC2_VERSION_FORMAT_ALPHA) ? 5 : BLOSC2_MAX_FILTERS; for (int i = 0; i < max_filters; i++) { context->filters[i] = filters[i]; context->filters_meta[i] = filters_meta[i]; } context->filter_flags = filters_to_flags(filters); bstarts_offset = BLOSC_EXTENDED_HEADER_LENGTH; blosc2_flags = context->src[0x1F]; } else { /* Regular (Blosc1) header */ context->filter_flags = get_filter_flags(context->header_flags, context->typesize); flags_to_filters(context->header_flags, context->filters); bstarts_offset = BLOSC_MIN_HEADER_LENGTH; } context->bstarts = (int32_t*)(context->src + bstarts_offset); bstarts_end = bstarts_offset + (context->nblocks * sizeof(int32_t)); if (srcsize < bstarts_end) { /* Not enough input to read entire `bstarts` section */ return -1; } srcsize -= bstarts_end; /* Read optional dictionary if flag set */ if (blosc2_flags & BLOSC2_USEDICT) { #if defined(HAVE_ZSTD) context->use_dict = 1; if (context->dict_ddict != NULL) { // Free the existing dictionary (probably from another chunk) ZSTD_freeDDict(context->dict_ddict); } // The trained dictionary is after the bstarts block if (srcsize < sizeof(int32_t)) { /* Not enough input to size of dictionary */ return -1; } srcsize -= sizeof(int32_t); context->dict_size = (size_t)sw32_(context->src + bstarts_end); if (context->dict_size <= 0 || context->dict_size > BLOSC2_MAXDICTSIZE) { /* Dictionary size is smaller than minimum or larger than maximum allowed */ return -1; } if (srcsize < (int32_t)context->dict_size) { /* Not enough input to read entire dictionary */ return -1; } srcsize -= context->dict_size; context->dict_buffer = (void*)(context->src + bstarts_end + sizeof(int32_t)); context->dict_ddict = ZSTD_createDDict(context->dict_buffer, context->dict_size); #endif // HAVE_ZSTD } return 0; } static int write_compression_header(blosc2_context* context, bool extended_header) { int32_t compformat; int dont_split; int dict_training = context->use_dict && (context->dict_cdict == NULL); // Set the whole header to zeros so that the reserved values are zeroed if (extended_header) { memset(context->dest, 0, BLOSC_EXTENDED_HEADER_LENGTH); } else { memset(context->dest, 0, BLOSC_MIN_HEADER_LENGTH); } /* Write version header for this block */ context->dest[0] = BLOSC_VERSION_FORMAT; /* Write compressor format */ compformat = -1; switch (context->compcode) { case BLOSC_BLOSCLZ: compformat = BLOSC_BLOSCLZ_FORMAT; context->dest[1] = BLOSC_BLOSCLZ_VERSION_FORMAT; break; #if defined(HAVE_LZ4) case BLOSC_LZ4: compformat = BLOSC_LZ4_FORMAT; context->dest[1] = BLOSC_LZ4_VERSION_FORMAT; break; case BLOSC_LZ4HC: compformat = BLOSC_LZ4HC_FORMAT; context->dest[1] = BLOSC_LZ4HC_VERSION_FORMAT; break; #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) case BLOSC_LIZARD: compformat = BLOSC_LIZARD_FORMAT; context->dest[1] = BLOSC_LIZARD_VERSION_FORMAT; break; #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) case BLOSC_SNAPPY: compformat = BLOSC_SNAPPY_FORMAT; context->dest[1] = BLOSC_SNAPPY_VERSION_FORMAT; break; #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) case BLOSC_ZLIB: compformat = BLOSC_ZLIB_FORMAT; context->dest[1] = BLOSC_ZLIB_VERSION_FORMAT; break; #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) case BLOSC_ZSTD: compformat = BLOSC_ZSTD_FORMAT; context->dest[1] = BLOSC_ZSTD_VERSION_FORMAT; break; #endif /* HAVE_ZSTD */ default: { const char* compname; compname = clibcode_to_clibname(compformat); fprintf(stderr, "Blosc has not been compiled with '%s' ", compname); fprintf(stderr, "compression support. Please use one having it."); return -5; /* signals no compression support */ break; } } if (context->clevel == 0) { /* Compression level 0 means buffer to be memcpy'ed */ context->header_flags |= (uint8_t)BLOSC_MEMCPYED; } if (context->sourcesize < BLOSC_MIN_BUFFERSIZE) { /* Buffer is too small. Try memcpy'ing. */ context->header_flags |= (uint8_t)BLOSC_MEMCPYED; } bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; context->dest[2] = 0; /* zeroes flags */ context->dest[3] = (uint8_t)context->typesize; _sw32(context->dest + 4, (int32_t)context->sourcesize); _sw32(context->dest + 8, (int32_t)context->blocksize); if (extended_header) { /* Mark that we are handling an extended header */ context->header_flags |= (BLOSC_DOSHUFFLE | BLOSC_DOBITSHUFFLE); /* Store filter pipeline info at the end of the header */ uint8_t *filters = context->dest + BLOSC_MIN_HEADER_LENGTH; uint8_t *filters_meta = filters + 8; for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { filters[i] = context->filters[i]; filters_meta[i] = context->filters_meta[i]; } uint8_t* blosc2_flags = context->dest + 0x1F; *blosc2_flags = 0; // zeroes flags *blosc2_flags |= is_little_endian() ? 0 : BLOSC2_BIGENDIAN; // endianness if (dict_training || memcpyed) { context->bstarts = NULL; context->output_bytes = BLOSC_EXTENDED_HEADER_LENGTH; } else { context->bstarts = (int32_t*)(context->dest + BLOSC_EXTENDED_HEADER_LENGTH); context->output_bytes = BLOSC_EXTENDED_HEADER_LENGTH + sizeof(int32_t) * context->nblocks; } if (context->use_dict) { *blosc2_flags |= BLOSC2_USEDICT; } } else { // Regular header if (memcpyed) { context->bstarts = NULL; context->output_bytes = BLOSC_MIN_HEADER_LENGTH; } else { context->bstarts = (int32_t *) (context->dest + BLOSC_MIN_HEADER_LENGTH); context->output_bytes = BLOSC_MIN_HEADER_LENGTH + sizeof(int32_t) * context->nblocks; } } // when memcpyed bit is set, there is no point in dealing with others if (!memcpyed) { if (context->filter_flags & BLOSC_DOSHUFFLE) { /* Byte-shuffle is active */ context->header_flags |= BLOSC_DOSHUFFLE; } if (context->filter_flags & BLOSC_DOBITSHUFFLE) { /* Bit-shuffle is active */ context->header_flags |= BLOSC_DOBITSHUFFLE; } if (context->filter_flags & BLOSC_DODELTA) { /* Delta is active */ context->header_flags |= BLOSC_DODELTA; } dont_split = !split_block(context, context->typesize, context->blocksize, extended_header); context->header_flags |= dont_split << 4; /* dont_split is in bit 4 */ context->header_flags |= compformat << 5; /* codec starts at bit 5 */ } // store header flags in dest context->dest[2] = context->header_flags; return 1; } int blosc_compress_context(blosc2_context* context) { int ntbytes = 0; blosc_timestamp_t last, current; bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; blosc_set_timestamp(&last); if (!memcpyed) { /* Do the actual compression */ ntbytes = do_job(context); if (ntbytes < 0) { return -1; } if (ntbytes == 0) { // Try out with a memcpy later on (last chance for fitting src buffer in dest). context->header_flags |= (uint8_t)BLOSC_MEMCPYED; memcpyed = true; } } if (memcpyed) { if (context->sourcesize + BLOSC_MAX_OVERHEAD > context->destsize) { /* We are exceeding maximum output size */ ntbytes = 0; } else { context->output_bytes = BLOSC_MAX_OVERHEAD; ntbytes = do_job(context); if (ntbytes < 0) { return -1; } // Success! update the memcpy bit in header context->dest[2] = context->header_flags; // and clear the memcpy bit in context (for next reuse) context->header_flags &= ~(uint8_t)BLOSC_MEMCPYED; } } /* Set the number of compressed bytes in header */ _sw32(context->dest + 12, ntbytes); /* Set the number of bytes in dest buffer (might be useful for btune) */ context->destsize = ntbytes; assert(ntbytes <= context->destsize); if (context->btune != NULL) { blosc_set_timestamp(&current); double ctime = blosc_elapsed_secs(last, current); btune_update(context, ctime); } return ntbytes; } /* The public secure routine for compression with context. */ int blosc2_compress_ctx(blosc2_context* context, const void* src, int32_t srcsize, void* dest, int32_t destsize) { int error, cbytes; if (context->do_compress != 1) { fprintf(stderr, "Context is not meant for compression. Giving up.\n"); return -10; } error = initialize_context_compression( context, src, srcsize, dest, destsize, context->clevel, context->filters, context->filters_meta, context->typesize, context->compcode, context->blocksize, context->new_nthreads, context->nthreads, context->schunk); if (error <= 0) { return error; } /* Write the extended header */ error = write_compression_header(context, true); if (error < 0) { return error; } cbytes = blosc_compress_context(context); if (cbytes < 0) { return cbytes; } if (context->use_dict && context->dict_cdict == NULL) { if (context->compcode != BLOSC_ZSTD) { const char* compname; compname = clibcode_to_clibname(context->compcode); fprintf(stderr, "Codec %s does not support dicts. Giving up.\n", compname); return -20; } #ifdef HAVE_ZSTD // Build the dictionary out of the filters outcome and compress with it int32_t dict_maxsize = BLOSC2_MAXDICTSIZE; // Do not make the dict more than 5% larger than uncompressed buffer if (dict_maxsize > srcsize / 20) { dict_maxsize = srcsize / 20; } void* samples_buffer = context->dest + BLOSC_EXTENDED_HEADER_LENGTH; unsigned nblocks = 8; // the minimum that accepts zstd as of 1.4.0 unsigned sample_fraction = 1; // 1 allows to use most of the chunk for training size_t sample_size = context->sourcesize / nblocks / sample_fraction; // Populate the samples sizes for training the dictionary size_t* samples_sizes = malloc(nblocks * sizeof(void*)); for (size_t i = 0; i < nblocks; i++) { samples_sizes[i] = sample_size; } // Train from samples void* dict_buffer = malloc(dict_maxsize); size_t dict_actual_size = ZDICT_trainFromBuffer(dict_buffer, dict_maxsize, samples_buffer, samples_sizes, nblocks); // TODO: experiment with parameters of low-level fast cover algorithm // Note that this API is still unstable. See: https://github.com/facebook/zstd/issues/1599 // ZDICT_fastCover_params_t fast_cover_params; // memset(&fast_cover_params, 0, sizeof(fast_cover_params)); // fast_cover_params.d = nblocks; // fast_cover_params.steps = 4; // fast_cover_params.zParams.compressionLevel = context->clevel; //size_t dict_actual_size = ZDICT_optimizeTrainFromBuffer_fastCover(dict_buffer, dict_maxsize, samples_buffer, samples_sizes, nblocks, &fast_cover_params); if (ZDICT_isError(dict_actual_size) != ZSTD_error_no_error) { fprintf(stderr, "Error in ZDICT_trainFromBuffer(): '%s'." " Giving up.\n", ZDICT_getErrorName(dict_actual_size)); return -20; } assert(dict_actual_size > 0); free(samples_sizes); // Update bytes counter and pointers to bstarts for the new compressed buffer context->bstarts = (int32_t*)(context->dest + BLOSC_EXTENDED_HEADER_LENGTH); context->output_bytes = BLOSC_EXTENDED_HEADER_LENGTH + sizeof(int32_t) * context->nblocks; /* Write the size of trained dict at the end of bstarts */ _sw32(context->dest + context->output_bytes, (int32_t)dict_actual_size); context->output_bytes += sizeof(int32_t); /* Write the trained dict afterwards */ context->dict_buffer = context->dest + context->output_bytes; memcpy(context->dict_buffer, dict_buffer, (unsigned int)dict_actual_size); context->dict_cdict = ZSTD_createCDict(dict_buffer, dict_actual_size, 1); // TODO: use get_accel() free(dict_buffer); // the dictionary is copied in the header now context->output_bytes += (int32_t)dict_actual_size; context->dict_size = dict_actual_size; /* Compress with dict */ cbytes = blosc_compress_context(context); // Invalidate the dictionary for compressing other chunks using the same context context->dict_buffer = NULL; ZSTD_freeCDict(context->dict_cdict); context->dict_cdict = NULL; #endif // HAVE_ZSTD } return cbytes; } void build_filters(const int doshuffle, const int delta, const size_t typesize, uint8_t* filters) { /* Fill the end part of the filter pipeline */ if ((doshuffle == BLOSC_SHUFFLE) && (typesize > 1)) filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_SHUFFLE; if (doshuffle == BLOSC_BITSHUFFLE) filters[BLOSC2_MAX_FILTERS - 1] = BLOSC_BITSHUFFLE; if (delta) filters[BLOSC2_MAX_FILTERS - 2] = BLOSC_DELTA; } /* The public secure routine for compression. */ int blosc2_compress(int clevel, int doshuffle, int32_t typesize, const void* src, int32_t srcsize, void* dest, int32_t destsize) { int error; int result; char* envvar; /* Check whether the library should be initialized */ if (!g_initlib) blosc_init(); /* Check for a BLOSC_CLEVEL environment variable */ envvar = getenv("BLOSC_CLEVEL"); if (envvar != NULL) { long value; value = strtol(envvar, NULL, 10); if ((value != EINVAL) && (value >= 0)) { clevel = (int)value; } } /* Check for a BLOSC_SHUFFLE environment variable */ envvar = getenv("BLOSC_SHUFFLE"); if (envvar != NULL) { if (strcmp(envvar, "NOSHUFFLE") == 0) { doshuffle = BLOSC_NOSHUFFLE; } if (strcmp(envvar, "SHUFFLE") == 0) { doshuffle = BLOSC_SHUFFLE; } if (strcmp(envvar, "BITSHUFFLE") == 0) { doshuffle = BLOSC_BITSHUFFLE; } } /* Check for a BLOSC_DELTA environment variable */ envvar = getenv("BLOSC_DELTA"); if (envvar != NULL) { if (strcmp(envvar, "1") == 0) { blosc_set_delta(1); } else { blosc_set_delta(0); } } /* Check for a BLOSC_TYPESIZE environment variable */ envvar = getenv("BLOSC_TYPESIZE"); if (envvar != NULL) { long value; value = strtol(envvar, NULL, 10); if ((value != EINVAL) && (value > 0)) { typesize = (size_t)value; } } /* Check for a BLOSC_COMPRESSOR environment variable */ envvar = getenv("BLOSC_COMPRESSOR"); if (envvar != NULL) { result = blosc_set_compressor(envvar); if (result < 0) { return result; } } /* Check for a BLOSC_COMPRESSOR environment variable */ envvar = getenv("BLOSC_BLOCKSIZE"); if (envvar != NULL) { long blocksize; blocksize = strtol(envvar, NULL, 10); if ((blocksize != EINVAL) && (blocksize > 0)) { blosc_set_blocksize((size_t)blocksize); } } /* Check for a BLOSC_NTHREADS environment variable */ envvar = getenv("BLOSC_NTHREADS"); if (envvar != NULL) { long nthreads; nthreads = strtol(envvar, NULL, 10); if ((nthreads != EINVAL) && (nthreads > 0)) { result = blosc_set_nthreads((int)nthreads); if (result < 0) { return result; } } } /* Check for a BLOSC_NOLOCK environment variable. It is important that this should be the last env var so that it can take the previous ones into account */ envvar = getenv("BLOSC_NOLOCK"); if (envvar != NULL) { // TODO: here is the only place that returns an extended header from // a blosc_compress() call. This should probably be fixed. const char *compname; blosc2_context *cctx; blosc2_cparams cparams = BLOSC2_CPARAMS_DEFAULTS; blosc_compcode_to_compname(g_compressor, &compname); /* Create a context for compression */ build_filters(doshuffle, g_delta, typesize, cparams.filters); // TODO: cparams can be shared in a multithreaded environment. do a copy! cparams.typesize = (uint8_t)typesize; cparams.compcode = (uint8_t)g_compressor; cparams.clevel = (uint8_t)clevel; cparams.nthreads = (uint8_t)g_nthreads; cctx = blosc2_create_cctx(cparams); /* Do the actual compression */ result = blosc2_compress_ctx(cctx, src, srcsize, dest, destsize); /* Release context resources */ blosc2_free_ctx(cctx); return result; } pthread_mutex_lock(&global_comp_mutex); /* Initialize a context compression */ uint8_t* filters = calloc(1, BLOSC2_MAX_FILTERS); uint8_t* filters_meta = calloc(1, BLOSC2_MAX_FILTERS); build_filters(doshuffle, g_delta, typesize, filters); error = initialize_context_compression( g_global_context, src, srcsize, dest, destsize, clevel, filters, filters_meta, (int32_t)typesize, g_compressor, g_force_blocksize, g_nthreads, g_nthreads, g_schunk); free(filters); free(filters_meta); if (error <= 0) { pthread_mutex_unlock(&global_comp_mutex); return error; } /* Write chunk header without extended header (Blosc1 compatibility mode) */ error = write_compression_header(g_global_context, false); if (error < 0) { pthread_mutex_unlock(&global_comp_mutex); return error; } result = blosc_compress_context(g_global_context); pthread_mutex_unlock(&global_comp_mutex); return result; } /* The public routine for compression. */ int blosc_compress(int clevel, int doshuffle, size_t typesize, size_t nbytes, const void* src, void* dest, size_t destsize) { return blosc2_compress(clevel, doshuffle, (int32_t)typesize, src, (int32_t)nbytes, dest, (int32_t)destsize); } int blosc_run_decompression_with_context(blosc2_context* context, const void* src, int32_t srcsize, void* dest, int32_t destsize) { int32_t ntbytes; uint8_t* _src = (uint8_t*)src; uint8_t version; int error; if (srcsize <= 0) { /* Invalid argument */ return -1; } version = _src[0]; /* blosc format version */ if (version > BLOSC_VERSION_FORMAT) { /* Version from future */ return -1; } error = initialize_context_decompression(context, src, srcsize, dest, destsize); if (error < 0) { return error; } /* Check whether this buffer is memcpy'ed */ bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; if (memcpyed) { // Check that sizes in header are compatible, otherwise there is a header corruption ntbytes = context->sourcesize; int32_t cbytes = sw32_(_src + 12); /* compressed buffer size */ if (ntbytes + BLOSC_MAX_OVERHEAD != cbytes) { return -1; } // Check that we have enough space in destination for the copy operation if (destsize < ntbytes) { return -1; } memcpy(dest, _src + BLOSC_MAX_OVERHEAD, (unsigned int)ntbytes); } else { /* Do the actual decompression */ ntbytes = do_job(context); if (ntbytes < 0) { return -1; } } assert(ntbytes <= (int32_t)destsize); return ntbytes; } /* The public secure routine for decompression with context. */ int blosc2_decompress_ctx(blosc2_context* context, const void* src, int32_t srcsize, void* dest, int32_t destsize) { int result; if (context->do_compress != 0) { fprintf(stderr, "Context is not meant for decompression. Giving up.\n"); return -10; } result = blosc_run_decompression_with_context(context, src, srcsize, dest, destsize); // Reset a possible block_maskout if (context->block_maskout != NULL) { free(context->block_maskout); context->block_maskout = NULL; } context->block_maskout_nitems = 0; return result; } /* The public secure routine for decompression. */ int blosc2_decompress(const void* src, int32_t srcsize, void* dest, int32_t destsize) { int result; char* envvar; long nthreads; blosc2_context *dctx; blosc2_dparams dparams = BLOSC2_DPARAMS_DEFAULTS; /* Check whether the library should be initialized */ if (!g_initlib) blosc_init(); /* Check for a BLOSC_NTHREADS environment variable */ envvar = getenv("BLOSC_NTHREADS"); if (envvar != NULL) { nthreads = strtol(envvar, NULL, 10); if ((nthreads != EINVAL) && (nthreads > 0)) { result = blosc_set_nthreads((int)nthreads); if (result < 0) { return result; } } } /* Check for a BLOSC_NOLOCK environment variable. It is important that this should be the last env var so that it can take the previous ones into account */ envvar = getenv("BLOSC_NOLOCK"); if (envvar != NULL) { dparams.nthreads = g_nthreads; dctx = blosc2_create_dctx(dparams); result = blosc2_decompress_ctx(dctx, src, srcsize, dest, destsize); blosc2_free_ctx(dctx); return result; } pthread_mutex_lock(&global_comp_mutex); result = blosc_run_decompression_with_context( g_global_context, src, srcsize, dest, destsize); pthread_mutex_unlock(&global_comp_mutex); return result; } /* The public routine for decompression. */ int blosc_decompress(const void* src, void* dest, size_t destsize) { return blosc2_decompress(src, INT32_MAX, dest, (int32_t)destsize); } /* Specific routine optimized for decompression a small number of items out of a compressed chunk. This does not use threads because it would affect negatively to performance. */ int _blosc_getitem(blosc2_context* context, const void* src, int32_t srcsize, int start, int nitems, void* dest) { uint8_t* _src = NULL; /* current pos for source buffer */ uint8_t flags; /* flags for header */ int32_t ntbytes = 0; /* the number of uncompressed bytes */ int32_t nblocks; /* number of total blocks in buffer */ int32_t leftover; /* extra bytes at end of buffer */ int32_t* bstarts; /* start pointers for each block */ int32_t typesize, blocksize, nbytes; int32_t bsize, bsize2, ebsize, leftoverblock; int32_t cbytes; int32_t startb, stopb; int32_t stop = start + nitems; int j; if (srcsize < BLOSC_MIN_HEADER_LENGTH) { /* Not enough input to parse Blosc1 header */ return -1; } _src = (uint8_t*)(src); /* Read the header block */ flags = _src[2]; /* flags */ bool memcpyed = flags & (uint8_t)BLOSC_MEMCPYED; typesize = (int32_t)_src[3]; /* typesize */ nbytes = sw32_(_src + 4); /* buffer size */ blocksize = sw32_(_src + 8); /* block size */ cbytes = sw32_(_src + 12); /* compressed buffer size */ ebsize = blocksize + typesize * (int32_t)sizeof(int32_t); if ((context->header_flags & BLOSC_DOSHUFFLE) && (context->header_flags & BLOSC_DOBITSHUFFLE)) { /* Extended header */ if (srcsize < BLOSC_EXTENDED_HEADER_LENGTH) { /* Not enough input to parse Blosc2 header */ return -1; } uint8_t* filters = _src + BLOSC_MIN_HEADER_LENGTH; uint8_t* filters_meta = filters + 8; for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { context->filters[i] = filters[i]; context->filters_meta[i] = filters_meta[i]; } bstarts = (int32_t*)(_src + BLOSC_EXTENDED_HEADER_LENGTH); } else { /* Minimal header */ flags_to_filters(flags, context->filters); bstarts = (int32_t*)(_src + BLOSC_MIN_HEADER_LENGTH); } // Some checks for malformed buffers if (blocksize <= 0 || blocksize > nbytes || typesize <= 0 || typesize > BLOSC_MAX_TYPESIZE) { return -1; } /* Compute some params */ /* Total blocks */ nblocks = nbytes / blocksize; leftover = nbytes % blocksize; nblocks = (leftover > 0) ? nblocks + 1 : nblocks; /* Check region boundaries */ if ((start < 0) || (start * typesize > nbytes)) { fprintf(stderr, "`start` out of bounds"); return -1; } if ((stop < 0) || (stop * typesize > nbytes)) { fprintf(stderr, "`start`+`nitems` out of bounds"); return -1; } if (_src + srcsize < (uint8_t *)(bstarts + nblocks)) { /* Not enough input to read all `bstarts` */ return -1; } for (j = 0; j < nblocks; j++) { bsize = blocksize; leftoverblock = 0; if ((j == nblocks - 1) && (leftover > 0)) { bsize = leftover; leftoverblock = 1; } /* Compute start & stop for each block */ startb = start * (int)typesize - j * (int)blocksize; stopb = stop * (int)typesize - j * (int)blocksize; if ((startb >= (int)blocksize) || (stopb <= 0)) { continue; } if (startb < 0) { startb = 0; } if (stopb > (int)blocksize) { stopb = (int)blocksize; } bsize2 = stopb - startb; /* Do the actual data copy */ if (memcpyed) { // Check that sizes in header are compatible, otherwise there is a header corruption if (nbytes + BLOSC_MAX_OVERHEAD != cbytes) { return -1; } if (srcsize < BLOSC_MAX_OVERHEAD + j * blocksize + startb + bsize2) { /* Not enough input to copy data */ return -1; } memcpy((uint8_t*)dest + ntbytes, (uint8_t*)src + BLOSC_MAX_OVERHEAD + j * blocksize + startb, (unsigned int)bsize2); cbytes = (int)bsize2; } else { struct thread_context* scontext = context->serial_context; /* Resize the temporaries in serial context if needed */ if (blocksize != scontext->tmp_blocksize) { my_free(scontext->tmp); scontext->tmp_nbytes = (size_t)3 * context->blocksize + ebsize; scontext->tmp = my_malloc(scontext->tmp_nbytes); scontext->tmp2 = scontext->tmp + blocksize; scontext->tmp3 = scontext->tmp + blocksize + ebsize; scontext->tmp4 = scontext->tmp + 2 * blocksize + ebsize; scontext->tmp_blocksize = (int32_t)blocksize; } // Regular decompression. Put results in tmp2. // If the block is aligned and the worst case fits in destination, let's avoid a copy bool get_single_block = ((startb == 0) && (bsize == nitems * typesize)); uint8_t* tmp2 = get_single_block ? dest : scontext->tmp2; cbytes = blosc_d(context->serial_context, bsize, leftoverblock, src, srcsize, sw32_(bstarts + j), tmp2, 0, scontext->tmp, scontext->tmp3); if (cbytes < 0) { ntbytes = cbytes; break; } if (!get_single_block) { /* Copy to destination */ memcpy((uint8_t *) dest + ntbytes, tmp2 + startb, (unsigned int) bsize2); } cbytes = (int)bsize2; } ntbytes += cbytes; } return ntbytes; } /* Specific routine optimized for decompression a small number of items out of a compressed chunk. Public non-contextual API. */ int blosc_getitem(const void* src, int start, int nitems, void* dest) { uint8_t* _src = (uint8_t*)(src); blosc2_context context; int result; uint8_t version = _src[0]; /* blosc format version */ if (version > BLOSC_VERSION_FORMAT) { /* Version from future */ return -1; } /* Minimally populate the context */ memset(&context, 0, sizeof(blosc2_context)); context.src = src; context.dest = dest; context.typesize = (uint8_t)_src[3]; context.blocksize = sw32_(_src + 8); context.header_flags = *(_src + 2); context.filter_flags = get_filter_flags(context.header_flags, context.typesize); context.schunk = g_schunk; context.nthreads = 1; // force a serial decompression; fixes #95 context.serial_context = create_thread_context(&context, 0); /* Call the actual getitem function */ result = _blosc_getitem(&context, src, INT32_MAX, start, nitems, dest); /* Release resources */ free_thread_context(context.serial_context); return result; } int blosc2_getitem_ctx(blosc2_context* context, const void* src, int32_t srcsize, int start, int nitems, void* dest) { uint8_t* _src = (uint8_t*)(src); int result; /* Minimally populate the context */ context->typesize = (uint8_t)_src[3]; context->blocksize = sw32_(_src + 8); context->header_flags = *(_src + 2); context->filter_flags = get_filter_flags(*(_src + 2), context->typesize); if (context->serial_context == NULL) { context->serial_context = create_thread_context(context, 0); } /* Call the actual getitem function */ result = _blosc_getitem(context, src, srcsize, start, nitems, dest); return result; } /* execute single compression/decompression job for a single thread_context */ static void t_blosc_do_job(void *ctxt) { struct thread_context* thcontext = (struct thread_context*)ctxt; blosc2_context* context = thcontext->parent_context; int32_t cbytes; int32_t ntdest; int32_t tblocks; /* number of blocks per thread */ int32_t tblock; /* limit block on a thread */ int32_t nblock_; /* private copy of nblock */ int32_t bsize; int32_t leftoverblock; /* Parameters for threads */ int32_t blocksize; int32_t ebsize; int32_t srcsize; bool compress = context->do_compress != 0; int32_t maxbytes; int32_t nblocks; int32_t leftover; int32_t leftover2; int32_t* bstarts; const uint8_t* src; uint8_t* dest; uint8_t* tmp; uint8_t* tmp2; uint8_t* tmp3; /* Get parameters for this thread before entering the main loop */ blocksize = context->blocksize; ebsize = blocksize + context->typesize * sizeof(int32_t); maxbytes = context->destsize; nblocks = context->nblocks; leftover = context->leftover; bstarts = context->bstarts; src = context->src; srcsize = context->srcsize; dest = context->dest; /* Resize the temporaries if needed */ if (blocksize != thcontext->tmp_blocksize) { my_free(thcontext->tmp); thcontext->tmp_nbytes = (size_t)3 * context->blocksize + ebsize; thcontext->tmp = my_malloc(thcontext->tmp_nbytes); thcontext->tmp2 = thcontext->tmp + blocksize; thcontext->tmp3 = thcontext->tmp + blocksize + ebsize; thcontext->tmp4 = thcontext->tmp + 2 * blocksize + ebsize; thcontext->tmp_blocksize = blocksize; } tmp = thcontext->tmp; tmp2 = thcontext->tmp2; tmp3 = thcontext->tmp3; // Determine whether we can do a static distribution of workload among different threads bool memcpyed = context->header_flags & (uint8_t)BLOSC_MEMCPYED; bool static_schedule = (!compress || memcpyed) && context->block_maskout == NULL; if (static_schedule) { /* Blocks per thread */ tblocks = nblocks / context->nthreads; leftover2 = nblocks % context->nthreads; tblocks = (leftover2 > 0) ? tblocks + 1 : tblocks; nblock_ = thcontext->tid * tblocks; tblock = nblock_ + tblocks; if (tblock > nblocks) { tblock = nblocks; } } else { // Use dynamic schedule via a queue. Get the next block. pthread_mutex_lock(&context->count_mutex); context->thread_nblock++; nblock_ = context->thread_nblock; pthread_mutex_unlock(&context->count_mutex); tblock = nblocks; } /* Loop over blocks */ leftoverblock = 0; while ((nblock_ < tblock) && (context->thread_giveup_code > 0)) { bsize = blocksize; if (nblock_ == (nblocks - 1) && (leftover > 0)) { bsize = leftover; leftoverblock = 1; } if (compress) { if (memcpyed) { if (!context->prefilter) { /* We want to memcpy only */ memcpy(dest + BLOSC_MAX_OVERHEAD + nblock_ * blocksize, src + nblock_ * blocksize, (unsigned int) bsize); cbytes = (int32_t) bsize; } else { /* Only the prefilter has to be executed, and this is done in blosc_c(). * However, no further actions are needed, so we can put the result * directly in dest. */ cbytes = blosc_c(thcontext, bsize, leftoverblock, 0, ebsize, src, nblock_ * blocksize, dest + BLOSC_MAX_OVERHEAD + nblock_ * blocksize, tmp, tmp3); } } else { /* Regular compression */ cbytes = blosc_c(thcontext, bsize, leftoverblock, 0, ebsize, src, nblock_ * blocksize, tmp2, tmp, tmp3); } } else { if (memcpyed) { /* We want to memcpy only */ if (srcsize < BLOSC_MAX_OVERHEAD + (nblock_ * blocksize) + bsize) { /* Not enough input to copy data */ cbytes = -1; } else { memcpy(dest + nblock_ * blocksize, src + BLOSC_MAX_OVERHEAD + nblock_ * blocksize, (unsigned int)bsize); cbytes = (int32_t)bsize; } } else { if (srcsize < (int32_t)(BLOSC_MAX_OVERHEAD + (sizeof(int32_t) * nblocks))) { /* Not enough input to read all `bstarts` */ cbytes = -1; } else { cbytes = blosc_d(thcontext, bsize, leftoverblock, src, srcsize, sw32_(bstarts + nblock_), dest, nblock_ * blocksize, tmp, tmp2); } } } /* Check whether current thread has to giveup */ if (context->thread_giveup_code <= 0) { break; } /* Check results for the compressed/decompressed block */ if (cbytes < 0) { /* compr/decompr failure */ /* Set giveup_code error */ pthread_mutex_lock(&context->count_mutex); context->thread_giveup_code = cbytes; pthread_mutex_unlock(&context->count_mutex); break; } if (compress && !memcpyed) { /* Start critical section */ pthread_mutex_lock(&context->count_mutex); ntdest = context->output_bytes; // Note: do not use a typical local dict_training variable here // because it is probably cached from previous calls if the number of // threads does not change (the usual thing). if (!(context->use_dict && context->dict_cdict == NULL)) { _sw32(bstarts + nblock_, (int32_t) ntdest); } if ((cbytes == 0) || (ntdest + cbytes > maxbytes)) { context->thread_giveup_code = 0; /* uncompressible buf */ pthread_mutex_unlock(&context->count_mutex); break; } context->thread_nblock++; nblock_ = context->thread_nblock; context->output_bytes += cbytes; pthread_mutex_unlock(&context->count_mutex); /* End of critical section */ /* Copy the compressed buffer to destination */ memcpy(dest + ntdest, tmp2, (unsigned int) cbytes); } else if (static_schedule) { nblock_++; } else { pthread_mutex_lock(&context->count_mutex); context->thread_nblock++; nblock_ = context->thread_nblock; context->output_bytes += cbytes; pthread_mutex_unlock(&context->count_mutex); } } /* closes while (nblock_) */ if (static_schedule) { context->output_bytes = context->sourcesize; if (compress) { context->output_bytes += BLOSC_MAX_OVERHEAD; } } } /* Decompress & unshuffle several blocks in a single thread */ static void* t_blosc(void* ctxt) { struct thread_context* thcontext = (struct thread_context*)ctxt; blosc2_context* context = thcontext->parent_context; #ifdef BLOSC_POSIX_BARRIERS int rc; #endif while (1) { /* Synchronization point for all threads (wait for initialization) */ WAIT_INIT(NULL, context); if (context->end_threads) { break; } t_blosc_do_job(ctxt); /* Meeting point for all threads (wait for finalization) */ WAIT_FINISH(NULL, context); } /* Cleanup our working space and context */ free_thread_context(thcontext); return (NULL); } int init_threadpool(blosc2_context *context) { int32_t tid; int rc2; /* Initialize mutex and condition variable objects */ pthread_mutex_init(&context->count_mutex, NULL); pthread_mutex_init(&context->delta_mutex, NULL); pthread_cond_init(&context->delta_cv, NULL); /* Set context thread sentinels */ context->thread_giveup_code = 1; context->thread_nblock = -1; /* Barrier initialization */ #ifdef BLOSC_POSIX_BARRIERS pthread_barrier_init(&context->barr_init, NULL, context->nthreads + 1); pthread_barrier_init(&context->barr_finish, NULL, context->nthreads + 1); #else pthread_mutex_init(&context->count_threads_mutex, NULL); pthread_cond_init(&context->count_threads_cv, NULL); context->count_threads = 0; /* Reset threads counter */ #endif if (threads_callback) { /* Create thread contexts to store data for callback threads */ context->thread_contexts = (struct thread_context *)my_malloc( context->nthreads * sizeof(struct thread_context)); for (tid = 0; tid < context->nthreads; tid++) init_thread_context(context->thread_contexts + tid, context, tid); } else { #if !defined(_WIN32) /* Initialize and set thread detached attribute */ pthread_attr_init(&context->ct_attr); pthread_attr_setdetachstate(&context->ct_attr, PTHREAD_CREATE_JOINABLE); #endif /* Make space for thread handlers */ context->threads = (pthread_t*)my_malloc( context->nthreads * sizeof(pthread_t)); /* Finally, create the threads */ for (tid = 0; tid < context->nthreads; tid++) { /* Create a thread context (will destroy when finished) */ struct thread_context *thread_context = create_thread_context(context, tid); #if !defined(_WIN32) rc2 = pthread_create(&context->threads[tid], &context->ct_attr, t_blosc, (void*)thread_context); #else rc2 = pthread_create(&context->threads[tid], NULL, t_blosc, (void *)thread_context); #endif if (rc2) { fprintf(stderr, "ERROR; return code from pthread_create() is %d\n", rc2); fprintf(stderr, "\tError detail: %s\n", strerror(rc2)); return (-1); } } } /* We have now started/initialized the threads */ context->threads_started = context->nthreads; context->new_nthreads = context->nthreads; return (0); } int blosc_get_nthreads(void) { return g_nthreads; } int blosc_set_nthreads(int nthreads_new) { int ret = g_nthreads; /* the previous number of threads */ /* Check whether the library should be initialized */ if (!g_initlib) blosc_init(); if (nthreads_new != ret) { g_nthreads = nthreads_new; g_global_context->new_nthreads = nthreads_new; check_nthreads(g_global_context); } return ret; } const char* blosc_get_compressor(void) { const char* compname; blosc_compcode_to_compname(g_compressor, &compname); return compname; } int blosc_set_compressor(const char* compname) { int code = blosc_compname_to_compcode(compname); g_compressor = code; /* Check whether the library should be initialized */ if (!g_initlib) blosc_init(); return code; } void blosc_set_delta(int dodelta) { g_delta = dodelta; /* Check whether the library should be initialized */ if (!g_initlib) blosc_init(); } const char* blosc_list_compressors(void) { static int compressors_list_done = 0; static char ret[256]; if (compressors_list_done) return ret; ret[0] = '\0'; strcat(ret, BLOSC_BLOSCLZ_COMPNAME); #if defined(HAVE_LZ4) strcat(ret, ","); strcat(ret, BLOSC_LZ4_COMPNAME); strcat(ret, ","); strcat(ret, BLOSC_LZ4HC_COMPNAME); #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) strcat(ret, ","); strcat(ret, BLOSC_LIZARD_COMPNAME); #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) strcat(ret, ","); strcat(ret, BLOSC_SNAPPY_COMPNAME); #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) strcat(ret, ","); strcat(ret, BLOSC_ZLIB_COMPNAME); #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) strcat(ret, ","); strcat(ret, BLOSC_ZSTD_COMPNAME); #endif /* HAVE_ZSTD */ compressors_list_done = 1; return ret; } const char* blosc_get_version_string(void) { return BLOSC_VERSION_STRING; } int blosc_get_complib_info(const char* compname, char** complib, char** version) { int clibcode; const char* clibname; const char* clibversion = "unknown"; #if (defined(HAVE_LZ4) && defined(LZ4_VERSION_MAJOR)) || \ (defined(HAVE_LIZARD) && defined(LIZARD_VERSION_MAJOR)) || \ (defined(HAVE_SNAPPY) && defined(SNAPPY_VERSION)) || \ (defined(HAVE_ZSTD) && defined(ZSTD_VERSION_MAJOR)) char sbuffer[256]; #endif clibcode = compname_to_clibcode(compname); clibname = clibcode_to_clibname(clibcode); /* complib version */ if (clibcode == BLOSC_BLOSCLZ_LIB) { clibversion = BLOSCLZ_VERSION_STRING; } #if defined(HAVE_LZ4) else if (clibcode == BLOSC_LZ4_LIB) { #if defined(LZ4_VERSION_MAJOR) sprintf(sbuffer, "%d.%d.%d", LZ4_VERSION_MAJOR, LZ4_VERSION_MINOR, LZ4_VERSION_RELEASE); clibversion = sbuffer; #endif /* LZ4_VERSION_MAJOR */ } #endif /* HAVE_LZ4 */ #if defined(HAVE_LIZARD) else if (clibcode == BLOSC_LIZARD_LIB) { sprintf(sbuffer, "%d.%d.%d", LIZARD_VERSION_MAJOR, LIZARD_VERSION_MINOR, LIZARD_VERSION_RELEASE); clibversion = sbuffer; } #endif /* HAVE_LIZARD */ #if defined(HAVE_SNAPPY) else if (clibcode == BLOSC_SNAPPY_LIB) { #if defined(SNAPPY_VERSION) sprintf(sbuffer, "%d.%d.%d", SNAPPY_MAJOR, SNAPPY_MINOR, SNAPPY_PATCHLEVEL); clibversion = sbuffer; #endif /* SNAPPY_VERSION */ } #endif /* HAVE_SNAPPY */ #if defined(HAVE_ZLIB) else if (clibcode == BLOSC_ZLIB_LIB) { clibversion = ZLIB_VERSION; } #endif /* HAVE_ZLIB */ #if defined(HAVE_ZSTD) else if (clibcode == BLOSC_ZSTD_LIB) { sprintf(sbuffer, "%d.%d.%d", ZSTD_VERSION_MAJOR, ZSTD_VERSION_MINOR, ZSTD_VERSION_RELEASE); clibversion = sbuffer; } #endif /* HAVE_ZSTD */ #ifdef _MSC_VER *complib = _strdup(clibname); *version = _strdup(clibversion); #else *complib = strdup(clibname); *version = strdup(clibversion); #endif return clibcode; } /* Return `nbytes`, `cbytes` and `blocksize` from a compressed buffer. */ void blosc_cbuffer_sizes(const void* cbuffer, size_t* nbytes, size_t* cbytes, size_t* blocksize) { uint8_t* _src = (uint8_t*)(cbuffer); /* current pos for source buffer */ uint8_t version = _src[0]; /* blosc format version */ if (version > BLOSC_VERSION_FORMAT) { /* Version from future */ *nbytes = *blocksize = *cbytes = 0; return; } /* Read the interesting values */ *nbytes = (size_t)sw32_(_src + 4); /* uncompressed buffer size */ *blocksize = (size_t)sw32_(_src + 8); /* block size */ *cbytes = (size_t)sw32_(_src + 12); /* compressed buffer size */ } int blosc_cbuffer_validate(const void* cbuffer, size_t cbytes, size_t* nbytes) { size_t header_cbytes, header_blocksize; if (cbytes < BLOSC_MIN_HEADER_LENGTH) { /* Compressed data should contain enough space for header */ *nbytes = 0; return -1; } blosc_cbuffer_sizes(cbuffer, nbytes, &header_cbytes, &header_blocksize); if (header_cbytes != cbytes) { /* Compressed size from header does not match `cbytes` */ *nbytes = 0; return -1; } if (*nbytes > BLOSC_MAX_BUFFERSIZE) { /* Uncompressed size is larger than allowed */ return -1; } return 0; } /* Return `typesize` and `flags` from a compressed buffer. */ void blosc_cbuffer_metainfo(const void* cbuffer, size_t* typesize, int* flags) { uint8_t* _src = (uint8_t*)(cbuffer); /* current pos for source buffer */ uint8_t version = _src[0]; /* blosc format version */ if (version > BLOSC_VERSION_FORMAT) { /* Version from future */ *flags = 0; *typesize = 0; return; } /* Read the interesting values */ *flags = (int)_src[2]; /* flags */ *typesize = (size_t)_src[3]; /* typesize */ } /* Return version information from a compressed buffer. */ void blosc_cbuffer_versions(const void* cbuffer, int* version, int* versionlz) { uint8_t* _src = (uint8_t*)(cbuffer); /* current pos for source buffer */ /* Read the version info */ *version = (int)_src[0]; /* blosc format version */ *versionlz = (int)_src[1]; /* Lempel-Ziv compressor format version */ } /* Return the compressor library/format used in a compressed buffer. */ const char* blosc_cbuffer_complib(const void* cbuffer) { uint8_t* _src = (uint8_t*)(cbuffer); /* current pos for source buffer */ int clibcode; const char* complib; /* Read the compressor format/library info */ clibcode = (_src[2] & 0xe0) >> 5; complib = clibcode_to_clibname(clibcode); return complib; } /* Get the internal blocksize to be used during compression. 0 means that an automatic blocksize is computed internally. */ int blosc_get_blocksize(void) { return (int)g_force_blocksize; } /* Force the use of a specific blocksize. If 0, an automatic blocksize will be used (the default). */ void blosc_set_blocksize(size_t size) { g_force_blocksize = (int32_t)size; } /* Set pointer to super-chunk. If NULL, no super-chunk will be reachable (the default). */ void blosc_set_schunk(blosc2_schunk* schunk) { g_schunk = schunk; g_global_context->schunk = schunk; } void blosc_init(void) { /* Return if Blosc is already initialized */ if (g_initlib) return; pthread_mutex_init(&global_comp_mutex, NULL); /* Create a global context */ g_global_context = (blosc2_context*)my_malloc(sizeof(blosc2_context)); memset(g_global_context, 0, sizeof(blosc2_context)); g_global_context->nthreads = g_nthreads; g_global_context->new_nthreads = g_nthreads; g_initlib = 1; } void blosc_destroy(void) { /* Return if Blosc is not initialized */ if (!g_initlib) return; g_initlib = 0; release_threadpool(g_global_context); if (g_global_context->serial_context != NULL) { free_thread_context(g_global_context->serial_context); } my_free(g_global_context); pthread_mutex_destroy(&global_comp_mutex); } int release_threadpool(blosc2_context *context) { int32_t t; void* status; int rc; if (context->threads_started > 0) { if (threads_callback) { /* free context data for user-managed threads */ for (t=0; t<context->threads_started; t++) destroy_thread_context(context->thread_contexts + t); my_free(context->thread_contexts); } else { /* Tell all existing threads to finish */ context->end_threads = 1; WAIT_INIT(-1, context); /* Join exiting threads */ for (t = 0; t < context->threads_started; t++) { rc = pthread_join(context->threads[t], &status); if (rc) { fprintf(stderr, "ERROR; return code from pthread_join() is %d\n", rc); fprintf(stderr, "\tError detail: %s\n", strerror(rc)); } } /* Thread attributes */ #if !defined(_WIN32) pthread_attr_destroy(&context->ct_attr); #endif /* Release thread handlers */ my_free(context->threads); } /* Release mutex and condition variable objects */ pthread_mutex_destroy(&context->count_mutex); pthread_mutex_destroy(&context->delta_mutex); pthread_cond_destroy(&context->delta_cv); /* Barriers */ #ifdef BLOSC_POSIX_BARRIERS pthread_barrier_destroy(&context->barr_init); pthread_barrier_destroy(&context->barr_finish); #else pthread_mutex_destroy(&context->count_threads_mutex); pthread_cond_destroy(&context->count_threads_cv); context->count_threads = 0; /* Reset threads counter */ #endif /* Reset flags and counters */ context->end_threads = 0; context->threads_started = 0; } return 0; } int blosc_free_resources(void) { /* Return if Blosc is not initialized */ if (!g_initlib) return -1; return release_threadpool(g_global_context); } /* Contexts */ /* Create a context for compression */ blosc2_context* blosc2_create_cctx(blosc2_cparams cparams) { blosc2_context* context = (blosc2_context*)my_malloc(sizeof(blosc2_context)); /* Populate the context, using zeros as default values */ memset(context, 0, sizeof(blosc2_context)); context->do_compress = 1; /* meant for compression */ context->compcode = cparams.compcode; context->clevel = cparams.clevel; context->use_dict = cparams.use_dict; context->typesize = cparams.typesize; for (int i = 0; i < BLOSC2_MAX_FILTERS; i++) { context->filters[i] = cparams.filters[i]; context->filters_meta[i] = cparams.filters_meta[i]; } context->nthreads = cparams.nthreads; context->new_nthreads = context->nthreads; context->blocksize = cparams.blocksize; context->threads_started = 0; context->schunk = cparams.schunk; if (cparams.prefilter != NULL) { context->prefilter = cparams.prefilter; context->pparams = (blosc2_prefilter_params*)my_malloc(sizeof(blosc2_prefilter_params)); memcpy(context->pparams, cparams.pparams, sizeof(blosc2_prefilter_params)); } return context; } /* Create a context for decompression */ blosc2_context* blosc2_create_dctx(blosc2_dparams dparams) { blosc2_context* context = (blosc2_context*)my_malloc(sizeof(blosc2_context)); /* Populate the context, using zeros as default values */ memset(context, 0, sizeof(blosc2_context)); context->do_compress = 0; /* Meant for decompression */ context->nthreads = dparams.nthreads; context->new_nthreads = context->nthreads; context->threads_started = 0; context->block_maskout = NULL; context->block_maskout_nitems = 0; context->schunk = dparams.schunk; return context; } void blosc2_free_ctx(blosc2_context* context) { release_threadpool(context); if (context->serial_context != NULL) { free_thread_context(context->serial_context); } if (context->dict_cdict != NULL) { #ifdef HAVE_ZSTD ZSTD_freeCDict(context->dict_cdict); #endif } if (context->dict_ddict != NULL) { #ifdef HAVE_ZSTD ZSTD_freeDDict(context->dict_ddict); #endif } if (context->btune != NULL) { btune_free(context); } if (context->prefilter != NULL) { my_free(context->pparams); } if (context->block_maskout != NULL) { free(context->block_maskout); } my_free(context); } /* Set a maskout in decompression context */ int blosc2_set_maskout(blosc2_context *ctx, bool *maskout, int nblocks) { if (ctx->block_maskout != NULL) { // Get rid of a possible mask here free(ctx->block_maskout); } bool *maskout_ = malloc(nblocks); memcpy(maskout_, maskout, nblocks); ctx->block_maskout = maskout_; ctx->block_maskout_nitems = nblocks; return 0; }
null
217
CWE-787
CVE-2020-35963
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Fluent Bit * ========== * Copyright (C) 2019-2020 The Fluent Bit Authors * Copyright (C) 2015-2018 Treasure Data Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fluent-bit/flb_info.h> #include <fluent-bit/flb_mem.h> #include <fluent-bit/flb_log.h> #include <fluent-bit/flb_gzip.h> #include <miniz/miniz.h> #define FLB_GZIP_HEADER_OFFSET 10 typedef enum { FTEXT = 1, FHCRC = 2, FEXTRA = 4, FNAME = 8, FCOMMENT = 16 } flb_tinf_gzip_flag; static unsigned int read_le16(const unsigned char *p) { return ((unsigned int) p[0]) | ((unsigned int) p[1] << 8); } static unsigned int read_le32(const unsigned char *p) { return ((unsigned int) p[0]) | ((unsigned int) p[1] << 8) | ((unsigned int) p[2] << 16) | ((unsigned int) p[3] << 24); } static inline void gzip_header(void *buf) { uint8_t *p; /* GZip Magic bytes */ p = buf; *p++ = 0x1F; *p++ = 0x8B; *p++ = 8; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0xFF; } int flb_gzip_compress(void *in_data, size_t in_len, void **out_data, size_t *out_len) { int flush; int status; int footer_start; uint8_t *pb; size_t out_size; void *out_buf; z_stream strm; mz_ulong crc; out_size = in_len + 32; out_buf = flb_malloc(out_size); if (!out_buf) { flb_errno(); flb_error("[gzip] could not allocate outgoing buffer"); return -1; } /* Initialize streaming buffer context */ memset(&strm, '\0', sizeof(strm)); strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = in_data; strm.avail_in = in_len; strm.total_out = 0; /* Deflate mode */ deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, -Z_DEFAULT_WINDOW_BITS, 9, Z_DEFAULT_STRATEGY); /* * Miniz don't support GZip format directly, instead we will: * * - append manual GZip magic bytes * - deflate raw content * - append manual CRC32 data */ gzip_header(out_buf); /* Header offset */ pb = (uint8_t *) out_buf + FLB_GZIP_HEADER_OFFSET; flush = Z_NO_FLUSH; while (1) { strm.next_out = pb + strm.total_out; strm.avail_out = out_size - (pb - (uint8_t *) out_buf); if (strm.avail_in == 0) { flush = Z_FINISH; } status = deflate(&strm, flush); if (status == Z_STREAM_END) { break; } else if (status != Z_OK) { deflateEnd(&strm); return -1; } } if (deflateEnd(&strm) != Z_OK) { flb_free(out_buf); return -1; } *out_len = strm.total_out; /* Construct the gzip checksum (CRC32 footer) */ footer_start = FLB_GZIP_HEADER_OFFSET + *out_len; pb = (uint8_t *) out_buf + footer_start; crc = mz_crc32(MZ_CRC32_INIT, in_data, in_len); *pb++ = crc & 0xFF; *pb++ = (crc >> 8) & 0xFF; *pb++ = (crc >> 16) & 0xFF; *pb++ = (crc >> 24) & 0xFF; *pb++ = in_len & 0xFF; *pb++ = (in_len >> 8) & 0xFF; *pb++ = (in_len >> 16) & 0xFF; *pb++ = (in_len >> 24) & 0xFF; /* Set the real buffer size for the caller */ *out_len += FLB_GZIP_HEADER_OFFSET + 8; *out_data = out_buf; return 0; } /* Uncompress (inflate) GZip data */ int flb_gzip_uncompress(void *in_data, size_t in_len, void **out_data, size_t *out_len) { int status; uint8_t *p; void *out_buf; size_t out_size = 0; void *zip_data; size_t zip_len; unsigned char flg; unsigned int xlen, hcrc; unsigned int dlen, crc; mz_ulong crc_out; mz_stream stream; const unsigned char *start; /* Minimal length: header + crc32 */ if (in_len < 18) { flb_error("[gzip] unexpected content length"); return -1; } /* Magic bytes */ p = in_data; if (p[0] != 0x1F || p[1] != 0x8B) { flb_error("[gzip] invalid magic bytes"); return -1; } if (p[2] != 8) { flb_error("[gzip] invalid method"); return -1; } /* Flag byte */ flg = p[3]; /* Reserved bits */ if (flg & 0xE0) { flb_error("[gzip] invalid flag"); return -1; } /* Skip base header of 10 bytes */ start = p + FLB_GZIP_HEADER_OFFSET; /* Skip extra data if present */ if (flg & FEXTRA) { xlen = read_le16(start); if (xlen > in_len - 12) { flb_error("[gzip] invalid gzip data"); return -1; } start += xlen + 2; } /* Skip file name if present */ if (flg & FNAME) { do { if (start - p >= in_len) { flb_error("[gzip] invalid gzip data (FNAME)"); return -1; } } while (*start++); } /* Skip file comment if present */ if (flg & FCOMMENT) { do { if (start - p >= in_len) { flb_error("[gzip] invalid gzip data (FCOMMENT)"); return -1; } } while (*start++); } /* Check header crc if present */ if (flg & FHCRC) { if (start - p > in_len - 2) { flb_error("[gzip] invalid gzip data (FHRC)"); return -1; } hcrc = read_le16(start); crc = mz_crc32(MZ_CRC32_INIT, p, start - p) & 0x0000FFFF; if (hcrc != crc) { flb_error("[gzip] invalid gzip header CRC"); return -1; } start += 2; } /* Get decompressed length */ dlen = read_le32(&p[in_len - 4]); /* Get CRC32 checksum of original data */ crc = read_le32(&p[in_len - 8]); /* Decompress data */ if ((p + in_len) - p < 8) { flb_error("[gzip] invalid gzip CRC32 checksum"); return -1; } /* Allocate outgoing buffer */ out_buf = flb_malloc(dlen); if (!out_buf) { flb_errno(); return -1; } out_size = dlen; /* Map zip content */ zip_data = (uint8_t *) start; zip_len = (p + in_len) - start - 8; memset(&stream, 0, sizeof(stream)); stream.next_in = zip_data; stream.avail_in = zip_len; stream.next_out = out_buf; stream.avail_out = out_size; status = mz_inflateInit2(&stream, -Z_DEFAULT_WINDOW_BITS); if (status != MZ_OK) { flb_free(out_buf); return -1; } status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); flb_free(out_buf); return -1; } if (stream.total_out != dlen) { mz_inflateEnd(&stream); flb_free(out_buf); flb_error("[gzip] invalid gzip data size"); return -1; } /* terminate the stream, it's not longer required */ mz_inflateEnd(&stream); /* Validate message CRC vs inflated data CRC */ crc_out = mz_crc32(MZ_CRC32_INIT, out_buf, dlen); if (crc_out != crc) { flb_free(out_buf); flb_error("[gzip] invalid GZip checksum (CRC32)"); return -1; } /* set the uncompressed data */ *out_len = dlen; *out_data = out_buf; return 0; }
null
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* Fluent Bit * ========== * Copyright (C) 2019-2020 The Fluent Bit Authors * Copyright (C) 2015-2018 Treasure Data Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fluent-bit/flb_info.h> #include <fluent-bit/flb_mem.h> #include <fluent-bit/flb_log.h> #include <fluent-bit/flb_gzip.h> #include <miniz/miniz.h> #define FLB_GZIP_HEADER_OFFSET 10 typedef enum { FTEXT = 1, FHCRC = 2, FEXTRA = 4, FNAME = 8, FCOMMENT = 16 } flb_tinf_gzip_flag; static unsigned int read_le16(const unsigned char *p) { return ((unsigned int) p[0]) | ((unsigned int) p[1] << 8); } static unsigned int read_le32(const unsigned char *p) { return ((unsigned int) p[0]) | ((unsigned int) p[1] << 8) | ((unsigned int) p[2] << 16) | ((unsigned int) p[3] << 24); } static inline void gzip_header(void *buf) { uint8_t *p; /* GZip Magic bytes */ p = buf; *p++ = 0x1F; *p++ = 0x8B; *p++ = 8; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0; *p++ = 0xFF; } int flb_gzip_compress(void *in_data, size_t in_len, void **out_data, size_t *out_len) { int flush; int status; int footer_start; uint8_t *pb; size_t out_size; void *out_buf; z_stream strm; mz_ulong crc; /* * GZIP relies on an algorithm with worst-case expansion * of 5 bytes per 32KB data. This means we need to create a variable * length output, that depends on the input length. * See RFC 1951 for details. */ int max_input_expansion = ((int)(in_len / 32000) + 1) * 5; /* * Max compressed size is equal to sum of: * 10 byte header * 8 byte foot * max input expansion * size of input */ out_size = 10 + 8 + max_input_expansion + in_len; out_buf = flb_malloc(out_size); if (!out_buf) { flb_errno(); flb_error("[gzip] could not allocate outgoing buffer"); return -1; } /* Initialize streaming buffer context */ memset(&strm, '\0', sizeof(strm)); strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = in_data; strm.avail_in = in_len; strm.total_out = 0; /* Deflate mode */ deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, -Z_DEFAULT_WINDOW_BITS, 9, Z_DEFAULT_STRATEGY); /* * Miniz don't support GZip format directly, instead we will: * * - append manual GZip magic bytes * - deflate raw content * - append manual CRC32 data */ gzip_header(out_buf); /* Header offset */ pb = (uint8_t *) out_buf + FLB_GZIP_HEADER_OFFSET; flush = Z_NO_FLUSH; while (1) { strm.next_out = pb + strm.total_out; strm.avail_out = out_size - (pb - (uint8_t *) out_buf); if (strm.avail_in == 0) { flush = Z_FINISH; } status = deflate(&strm, flush); if (status == Z_STREAM_END) { break; } else if (status != Z_OK) { deflateEnd(&strm); return -1; } } if (deflateEnd(&strm) != Z_OK) { flb_free(out_buf); return -1; } *out_len = strm.total_out; /* Construct the gzip checksum (CRC32 footer) */ footer_start = FLB_GZIP_HEADER_OFFSET + *out_len; pb = (uint8_t *) out_buf + footer_start; crc = mz_crc32(MZ_CRC32_INIT, in_data, in_len); *pb++ = crc & 0xFF; *pb++ = (crc >> 8) & 0xFF; *pb++ = (crc >> 16) & 0xFF; *pb++ = (crc >> 24) & 0xFF; *pb++ = in_len & 0xFF; *pb++ = (in_len >> 8) & 0xFF; *pb++ = (in_len >> 16) & 0xFF; *pb++ = (in_len >> 24) & 0xFF; /* Set the real buffer size for the caller */ *out_len += FLB_GZIP_HEADER_OFFSET + 8; *out_data = out_buf; return 0; } /* Uncompress (inflate) GZip data */ int flb_gzip_uncompress(void *in_data, size_t in_len, void **out_data, size_t *out_len) { int status; uint8_t *p; void *out_buf; size_t out_size = 0; void *zip_data; size_t zip_len; unsigned char flg; unsigned int xlen, hcrc; unsigned int dlen, crc; mz_ulong crc_out; mz_stream stream; const unsigned char *start; /* Minimal length: header + crc32 */ if (in_len < 18) { flb_error("[gzip] unexpected content length"); return -1; } /* Magic bytes */ p = in_data; if (p[0] != 0x1F || p[1] != 0x8B) { flb_error("[gzip] invalid magic bytes"); return -1; } if (p[2] != 8) { flb_error("[gzip] invalid method"); return -1; } /* Flag byte */ flg = p[3]; /* Reserved bits */ if (flg & 0xE0) { flb_error("[gzip] invalid flag"); return -1; } /* Skip base header of 10 bytes */ start = p + FLB_GZIP_HEADER_OFFSET; /* Skip extra data if present */ if (flg & FEXTRA) { xlen = read_le16(start); if (xlen > in_len - 12) { flb_error("[gzip] invalid gzip data"); return -1; } start += xlen + 2; } /* Skip file name if present */ if (flg & FNAME) { do { if (start - p >= in_len) { flb_error("[gzip] invalid gzip data (FNAME)"); return -1; } } while (*start++); } /* Skip file comment if present */ if (flg & FCOMMENT) { do { if (start - p >= in_len) { flb_error("[gzip] invalid gzip data (FCOMMENT)"); return -1; } } while (*start++); } /* Check header crc if present */ if (flg & FHCRC) { if (start - p > in_len - 2) { flb_error("[gzip] invalid gzip data (FHRC)"); return -1; } hcrc = read_le16(start); crc = mz_crc32(MZ_CRC32_INIT, p, start - p) & 0x0000FFFF; if (hcrc != crc) { flb_error("[gzip] invalid gzip header CRC"); return -1; } start += 2; } /* Get decompressed length */ dlen = read_le32(&p[in_len - 4]); /* Get CRC32 checksum of original data */ crc = read_le32(&p[in_len - 8]); /* Decompress data */ if ((p + in_len) - p < 8) { flb_error("[gzip] invalid gzip CRC32 checksum"); return -1; } /* Allocate outgoing buffer */ out_buf = flb_malloc(dlen); if (!out_buf) { flb_errno(); return -1; } out_size = dlen; /* Map zip content */ zip_data = (uint8_t *) start; zip_len = (p + in_len) - start - 8; memset(&stream, 0, sizeof(stream)); stream.next_in = zip_data; stream.avail_in = zip_len; stream.next_out = out_buf; stream.avail_out = out_size; status = mz_inflateInit2(&stream, -Z_DEFAULT_WINDOW_BITS); if (status != MZ_OK) { flb_free(out_buf); return -1; } status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); flb_free(out_buf); return -1; } if (stream.total_out != dlen) { mz_inflateEnd(&stream); flb_free(out_buf); flb_error("[gzip] invalid gzip data size"); return -1; } /* terminate the stream, it's not longer required */ mz_inflateEnd(&stream); /* Validate message CRC vs inflated data CRC */ crc_out = mz_crc32(MZ_CRC32_INIT, out_buf, dlen); if (crc_out != crc) { flb_free(out_buf); flb_error("[gzip] invalid GZip checksum (CRC32)"); return -1; } /* set the uncompressed data */ *out_len = dlen; *out_data = out_buf; return 0; }
null
218
CWE-787
CVE-2020-35964
/* * Vividas VIV format Demuxer * Copyright (c) 2012 Krzysztof Klinikowski * Copyright (c) 2010 Andrzej Szombierski * based on vivparse Copyright (c) 2007 Måns Rullgård * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief Vividas VIV (.viv) file demuxer * @author Andrzej Szombierski [qq at kuku eu org] (2010-07) * @sa http://wiki.multimedia.cx/index.php?title=Vividas_VIV */ #include "libavutil/intreadwrite.h" #include "avio_internal.h" #include "avformat.h" #include "internal.h" #define MAX_AUDIO_SUBPACKETS 100 typedef struct VIV_SB_block { int size, n_packets; int64_t byte_offset; int64_t packet_offset; } VIV_SB_block; typedef struct VIV_SB_entry { int size, flag; } VIV_SB_entry; typedef struct VIV_AudioSubpacket { int start, pcm_bytes; } VIV_AudioSubpacket; typedef struct VividasDemuxContext { int n_sb_blocks; VIV_SB_block *sb_blocks; int num_audio; uint32_t sb_key; int64_t sb_offset; int current_sb, current_sb_entry; uint8_t *sb_buf; AVIOContext *sb_pb; int n_sb_entries; VIV_SB_entry *sb_entries; int n_audio_subpackets; int current_audio_subpacket; int64_t audio_sample; VIV_AudioSubpacket audio_subpackets[MAX_AUDIO_SUBPACKETS]; } VividasDemuxContext; static int viv_probe(const AVProbeData *p) { if (memcmp(p->buf, "vividas03", 9)) return 0; return AVPROBE_SCORE_MAX; } static const uint8_t keybits[32] = { 20, 52, 111, 10, 27, 71, 142, 53, 82, 138, 1, 78, 86, 121, 183, 85, 105, 152, 39, 140, 172, 11, 64, 144, 155, 6, 71, 163, 186, 49, 126, 43, }; static uint32_t decode_key(uint8_t *buf) { uint32_t key = 0; for (int i = 0; i < 32; i++) { unsigned p = keybits[i]; key |= ((buf[p] >> ((i*5+3)&7)) & 1u) << i; } return key; } static void put_v(uint8_t *p, unsigned v) { if (v>>28) *p++ = ((v>>28)&0x7f)|0x80; if (v>>21) *p++ = ((v>>21)&0x7f)|0x80; if (v>>14) *p++ = ((v>>14)&0x7f)|0x80; if (v>>7) *p++ = ((v>>7)&0x7f)|0x80; } static unsigned recover_key(unsigned char sample[4], unsigned expected_size) { unsigned char plaintext[8] = { 'S', 'B' }; put_v(plaintext+2, expected_size); return AV_RL32(sample) ^ AV_RL32(plaintext); } static void xor_block(void *p1, void *p2, unsigned size, int key, unsigned *key_ptr) { unsigned *d1 = p1; unsigned *d2 = p2; unsigned k = *key_ptr; size >>= 2; while (size > 0) { *d2 = *d1 ^ (HAVE_BIGENDIAN ? av_bswap32(k) : k); k += key; d1++; d2++; size--; } *key_ptr = k; } static void decode_block(uint8_t *src, uint8_t *dest, unsigned size, uint32_t key, uint32_t *key_ptr, int align) { unsigned s = size; char tmp[4]; int a2; if (!size) return; align &= 3; a2 = (4 - align) & 3; if (align) { uint32_t tmpkey = *key_ptr - key; if (a2 > s) { a2 = s; avpriv_request_sample(NULL, "tiny aligned block"); } memcpy(tmp + align, src, a2); xor_block(tmp, tmp, 4, key, &tmpkey); memcpy(dest, tmp + align, a2); s -= a2; } if (s >= 4) { xor_block(src + a2, dest + a2, s & ~3, key, key_ptr); s &= 3; } if (s) { size -= s; memcpy(tmp, src + size, s); xor_block(&tmp, &tmp, 4, key, key_ptr); memcpy(dest + size, tmp, s); } } static uint32_t get_v(uint8_t *p, int len) { uint32_t v = 0; const uint8_t *end = p + len; do { if (p >= end || v >= UINT_MAX / 128 - *p) return v; v <<= 7; v += *p & 0x7f; } while (*p++ & 0x80); return v; } static uint8_t *read_vblock(AVIOContext *src, uint32_t *size, uint32_t key, uint32_t *k2, int align) { uint8_t tmp[4]; uint8_t *buf; unsigned n; if (avio_read(src, tmp, 4) != 4) return NULL; decode_block(tmp, tmp, 4, key, k2, align); n = get_v(tmp, 4); if (n < 4) return NULL; buf = av_malloc(n); if (!buf) return NULL; *size = n; n -= 4; memcpy(buf, tmp, 4); if (avio_read(src, buf + 4, n) == n) { decode_block(buf + 4, buf + 4, n, key, k2, align); } else { av_free(buf); buf = NULL; } return buf; } static uint8_t *read_sb_block(AVIOContext *src, unsigned *size, uint32_t *key, unsigned expected_size) { uint8_t *buf; uint8_t ibuf[8], sbuf[8]; uint32_t k2; unsigned n; if (avio_read(src, ibuf, 8) < 8) return NULL; k2 = *key; decode_block(ibuf, sbuf, 8, *key, &k2, 0); n = get_v(sbuf+2, 6); if (sbuf[0] != 'S' || sbuf[1] != 'B' || (expected_size>0 && n != expected_size)) { uint32_t tmpkey = recover_key(ibuf, expected_size); k2 = tmpkey; decode_block(ibuf, sbuf, 8, tmpkey, &k2, 0); n = get_v(sbuf+2, 6); if (sbuf[0] != 'S' || sbuf[1] != 'B' || expected_size != n) return NULL; *key = tmpkey; } if (n < 8) return NULL; buf = av_malloc(n); if (!buf) return NULL; memcpy(buf, sbuf, 8); *size = n; n -= 8; if (avio_read(src, buf+8, n) < n) { av_free(buf); return NULL; } decode_block(buf + 8, buf + 8, n, *key, &k2, 0); return buf; } static int track_header(VividasDemuxContext *viv, AVFormatContext *s, uint8_t *buf, int size) { int i, j, ret; int64_t off; int val_1; int num_video; AVIOContext pb0, *pb = &pb0; ffio_init_context(pb, buf, size, 0, NULL, NULL, NULL, NULL); ffio_read_varlen(pb); // track_header_len avio_r8(pb); // '1' val_1 = ffio_read_varlen(pb); for (i=0;i<val_1;i++) { int c = avio_r8(pb); if (avio_feof(pb)) return AVERROR_EOF; for (j=0;j<c;j++) { if (avio_feof(pb)) return AVERROR_EOF; avio_r8(pb); // val_3 avio_r8(pb); // val_4 } } avio_r8(pb); // num_streams off = avio_tell(pb); off += ffio_read_varlen(pb); // val_5 avio_r8(pb); // '2' num_video = avio_r8(pb); avio_seek(pb, off, SEEK_SET); if (num_video != 1) { av_log(s, AV_LOG_ERROR, "number of video tracks %d is not 1\n", num_video); return AVERROR_PATCHWELCOME; } for (i = 0; i < num_video; i++) { AVStream *st = avformat_new_stream(s, NULL); int num, den; if (!st) return AVERROR(ENOMEM); st->id = i; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_id = AV_CODEC_ID_VP6; off = avio_tell(pb); off += ffio_read_varlen(pb); avio_r8(pb); // '3' avio_r8(pb); // val_7 num = avio_rl32(pb); // frame_time den = avio_rl32(pb); // time_base avpriv_set_pts_info(st, 64, num, den); st->nb_frames = avio_rl32(pb); // n frames st->codecpar->width = avio_rl16(pb); // width st->codecpar->height = avio_rl16(pb); // height avio_r8(pb); // val_8 avio_rl32(pb); // val_9 avio_seek(pb, off, SEEK_SET); } off = avio_tell(pb); off += ffio_read_varlen(pb); // val_10 avio_r8(pb); // '4' viv->num_audio = avio_r8(pb); avio_seek(pb, off, SEEK_SET); if (viv->num_audio != 1) av_log(s, AV_LOG_WARNING, "number of audio tracks %d is not 1\n", viv->num_audio); for(i=0;i<viv->num_audio;i++) { int q; AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->id = num_video + i; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_id = AV_CODEC_ID_VORBIS; off = avio_tell(pb); off += ffio_read_varlen(pb); // length avio_r8(pb); // '5' avio_r8(pb); //codec_id avio_rl16(pb); //codec_subid st->codecpar->channels = avio_rl16(pb); // channels st->codecpar->sample_rate = avio_rl32(pb); // sample_rate avio_seek(pb, 10, SEEK_CUR); // data_1 q = avio_r8(pb); avio_seek(pb, q, SEEK_CUR); // data_2 avio_r8(pb); // zeropad if (avio_tell(pb) < off) { int num_data; int xd_size = 0; int data_len[256]; int offset = 1; uint8_t *p; ffio_read_varlen(pb); // val_13 avio_r8(pb); // '19' ffio_read_varlen(pb); // len_3 num_data = avio_r8(pb); for (j = 0; j < num_data; j++) { uint64_t len = ffio_read_varlen(pb); if (len > INT_MAX/2 - xd_size) { return AVERROR_INVALIDDATA; } data_len[j] = len; xd_size += len; } ret = ff_alloc_extradata(st->codecpar, 64 + xd_size + xd_size / 255); if (ret < 0) return ret; p = st->codecpar->extradata; p[0] = 2; for (j = 0; j < num_data - 1; j++) { unsigned delta = av_xiphlacing(&p[offset], data_len[j]); if (delta > data_len[j]) { return AVERROR_INVALIDDATA; } offset += delta; } for (j = 0; j < num_data; j++) { int ret = avio_read(pb, &p[offset], data_len[j]); if (ret < data_len[j]) { st->codecpar->extradata_size = 0; av_freep(&st->codecpar->extradata); break; } offset += data_len[j]; } if (offset < st->codecpar->extradata_size) st->codecpar->extradata_size = offset; } } return 0; } static int track_index(VividasDemuxContext *viv, AVFormatContext *s, uint8_t *buf, unsigned size) { int64_t off; int64_t poff; int maxnp=0; AVIOContext pb0, *pb = &pb0; int i; int64_t filesize = avio_size(s->pb); uint64_t n_sb_blocks_tmp; ffio_init_context(pb, buf, size, 0, NULL, NULL, NULL, NULL); ffio_read_varlen(pb); // track_index_len avio_r8(pb); // 'c' n_sb_blocks_tmp = ffio_read_varlen(pb); if (n_sb_blocks_tmp > size / 2) return AVERROR_INVALIDDATA; viv->sb_blocks = av_calloc(n_sb_blocks_tmp, sizeof(*viv->sb_blocks)); if (!viv->sb_blocks) { return AVERROR(ENOMEM); } viv->n_sb_blocks = n_sb_blocks_tmp; off = 0; poff = 0; for (i = 0; i < viv->n_sb_blocks; i++) { uint64_t size_tmp = ffio_read_varlen(pb); uint64_t n_packets_tmp = ffio_read_varlen(pb); if (size_tmp > INT_MAX || n_packets_tmp > INT_MAX) return AVERROR_INVALIDDATA; viv->sb_blocks[i].byte_offset = off; viv->sb_blocks[i].packet_offset = poff; viv->sb_blocks[i].size = size_tmp; viv->sb_blocks[i].n_packets = n_packets_tmp; off += viv->sb_blocks[i].size; poff += viv->sb_blocks[i].n_packets; if (maxnp < viv->sb_blocks[i].n_packets) maxnp = viv->sb_blocks[i].n_packets; } if (filesize > 0 && poff > filesize) return AVERROR_INVALIDDATA; viv->sb_entries = av_calloc(maxnp, sizeof(VIV_SB_entry)); if (!viv->sb_entries) return AVERROR(ENOMEM); return 0; } static void load_sb_block(AVFormatContext *s, VividasDemuxContext *viv, unsigned expected_size) { uint32_t size = 0; int i; AVIOContext *pb = 0; if (viv->sb_pb) { av_free(viv->sb_pb); viv->sb_pb = NULL; } if (viv->sb_buf) av_free(viv->sb_buf); viv->sb_buf = read_sb_block(s->pb, &size, &viv->sb_key, expected_size); if (!viv->sb_buf) { return; } pb = avio_alloc_context(viv->sb_buf, size, 0, NULL, NULL, NULL, NULL); if (!pb) return; viv->sb_pb = pb; avio_r8(pb); // 'S' avio_r8(pb); // 'B' ffio_read_varlen(pb); // size avio_r8(pb); // junk ffio_read_varlen(pb); // first packet viv->n_sb_entries = viv->sb_blocks[viv->current_sb].n_packets; for (i = 0; i < viv->n_sb_entries; i++) { viv->sb_entries[i].size = ffio_read_varlen(pb); viv->sb_entries[i].flag = avio_r8(pb); } ffio_read_varlen(pb); avio_r8(pb); viv->current_sb_entry = 0; } static int viv_read_header(AVFormatContext *s) { VividasDemuxContext *viv = s->priv_data; AVIOContext *pb = s->pb; int64_t header_end; int num_tracks; uint32_t key, k2; uint32_t v; uint8_t keybuffer[187]; uint32_t b22_size = 0; uint32_t b22_key = 0; uint8_t *buf = 0; int ret; avio_skip(pb, 9); header_end = avio_tell(pb); header_end += ffio_read_varlen(pb); num_tracks = avio_r8(pb); if (num_tracks != 1) { av_log(s, AV_LOG_ERROR, "number of tracks %d is not 1\n", num_tracks); return AVERROR(EINVAL); } v = avio_r8(pb); avio_seek(pb, v, SEEK_CUR); avio_read(pb, keybuffer, 187); key = decode_key(keybuffer); viv->sb_key = key; avio_rl32(pb); for (;;) { int64_t here = avio_tell(pb); int block_len, block_type; if (here >= header_end) break; block_len = ffio_read_varlen(pb); if (avio_feof(pb) || block_len <= 0) return AVERROR_INVALIDDATA; block_type = avio_r8(pb); if (block_type == 22) { avio_read(pb, keybuffer, 187); b22_key = decode_key(keybuffer); b22_size = avio_rl32(pb); } avio_seek(pb, here + block_len, SEEK_SET); } if (b22_size) { k2 = b22_key; buf = read_vblock(pb, &v, b22_key, &k2, 0); if (!buf) return AVERROR(EIO); av_free(buf); } k2 = key; buf = read_vblock(pb, &v, key, &k2, 0); if (!buf) return AVERROR(EIO); ret = track_header(viv, s, buf, v); av_free(buf); if (ret < 0) return ret; buf = read_vblock(pb, &v, key, &k2, v); if (!buf) return AVERROR(EIO); ret = track_index(viv, s, buf, v); av_free(buf); if (ret < 0) goto fail; viv->sb_offset = avio_tell(pb); if (viv->n_sb_blocks > 0) { viv->current_sb = 0; load_sb_block(s, viv, viv->sb_blocks[0].size); } else { viv->current_sb = -1; } return 0; fail: av_freep(&viv->sb_blocks); return ret; } static int viv_read_packet(AVFormatContext *s, AVPacket *pkt) { VividasDemuxContext *viv = s->priv_data; AVIOContext *pb; int64_t off; int ret; if (!viv->sb_pb) return AVERROR(EIO); if (avio_feof(viv->sb_pb)) return AVERROR_EOF; if (viv->current_audio_subpacket < viv->n_audio_subpackets) { AVStream *astream; int size = viv->audio_subpackets[viv->current_audio_subpacket+1].start - viv->audio_subpackets[viv->current_audio_subpacket].start; pb = viv->sb_pb; ret = av_get_packet(pb, pkt, size); if (ret < 0) return ret; pkt->pos += viv->sb_offset + viv->sb_blocks[viv->current_sb].byte_offset; pkt->stream_index = 1; astream = s->streams[pkt->stream_index]; pkt->pts = av_rescale_q(viv->audio_sample, av_make_q(1, astream->codecpar->sample_rate), astream->time_base); viv->audio_sample += viv->audio_subpackets[viv->current_audio_subpacket].pcm_bytes / 2 / astream->codecpar->channels; pkt->flags |= AV_PKT_FLAG_KEY; viv->current_audio_subpacket++; return 0; } if (viv->current_sb_entry >= viv->n_sb_entries) { if (viv->current_sb+1 >= viv->n_sb_blocks) return AVERROR(EIO); viv->current_sb++; load_sb_block(s, viv, 0); viv->current_sb_entry = 0; } pb = viv->sb_pb; if (!pb) return AVERROR(EIO); off = avio_tell(pb); if (viv->current_sb_entry >= viv->n_sb_entries) return AVERROR_INVALIDDATA; off += viv->sb_entries[viv->current_sb_entry].size; if (viv->sb_entries[viv->current_sb_entry].flag == 0) { uint64_t v_size = ffio_read_varlen(pb); if (!viv->num_audio) return AVERROR_INVALIDDATA; ffio_read_varlen(pb); if (v_size > INT_MAX || !v_size) return AVERROR_INVALIDDATA; ret = av_get_packet(pb, pkt, v_size); if (ret < 0) return ret; pkt->pos += viv->sb_offset + viv->sb_blocks[viv->current_sb].byte_offset; pkt->pts = viv->sb_blocks[viv->current_sb].packet_offset + viv->current_sb_entry; pkt->flags |= (pkt->data[0]&0x80)?0:AV_PKT_FLAG_KEY; pkt->stream_index = 0; for (int i = 0; i < MAX_AUDIO_SUBPACKETS - 1; i++) { int start, pcm_bytes; start = ffio_read_varlen(pb); pcm_bytes = ffio_read_varlen(pb); if (i > 0 && start == 0) break; viv->n_audio_subpackets = i + 1; viv->audio_subpackets[i].start = start; viv->audio_subpackets[i].pcm_bytes = pcm_bytes; } viv->audio_subpackets[viv->n_audio_subpackets].start = (int)(off - avio_tell(pb)); viv->current_audio_subpacket = 0; } else { uint64_t v_size = ffio_read_varlen(pb); if (v_size > INT_MAX || !v_size) return AVERROR_INVALIDDATA; ret = av_get_packet(pb, pkt, v_size); if (ret < 0) return ret; pkt->pos += viv->sb_offset + viv->sb_blocks[viv->current_sb].byte_offset; pkt->pts = viv->sb_blocks[viv->current_sb].packet_offset + viv->current_sb_entry; pkt->flags |= (pkt->data[0] & 0x80) ? 0 : AV_PKT_FLAG_KEY; pkt->stream_index = 0; } viv->current_sb_entry++; return 0; } static int viv_read_close(AVFormatContext *s) { VividasDemuxContext *viv = s->priv_data; av_freep(&viv->sb_pb); av_freep(&viv->sb_buf); av_freep(&viv->sb_blocks); av_freep(&viv->sb_entries); return 0; } static int viv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { VividasDemuxContext *viv = s->priv_data; int64_t frame; if (stream_index == 0) frame = timestamp; else frame = av_rescale_q(timestamp, s->streams[0]->time_base, s->streams[stream_index]->time_base); for (int i = 0; i < viv->n_sb_blocks; i++) { if (frame >= viv->sb_blocks[i].packet_offset && frame < viv->sb_blocks[i].packet_offset + viv->sb_blocks[i].n_packets) { // flush audio packet queue viv->current_audio_subpacket = 0; viv->n_audio_subpackets = 0; viv->current_sb = i; // seek to ith sb block avio_seek(s->pb, viv->sb_offset + viv->sb_blocks[i].byte_offset, SEEK_SET); // load the block load_sb_block(s, viv, 0); // most problematic part: guess audio offset viv->audio_sample = av_rescale_q(viv->sb_blocks[i].packet_offset, av_make_q(s->streams[1]->codecpar->sample_rate, 1), av_inv_q(s->streams[0]->time_base)); // hand-tuned 1.s a/v offset viv->audio_sample += s->streams[1]->codecpar->sample_rate; viv->current_sb_entry = 0; return 1; } } return 0; } AVInputFormat ff_vividas_demuxer = { .name = "vividas", .long_name = NULL_IF_CONFIG_SMALL("Vividas VIV"), .priv_data_size = sizeof(VividasDemuxContext), .read_probe = viv_probe, .read_header = viv_read_header, .read_packet = viv_read_packet, .read_close = viv_read_close, .read_seek = viv_read_seek, };
null
/* * Vividas VIV format Demuxer * Copyright (c) 2012 Krzysztof Klinikowski * Copyright (c) 2010 Andrzej Szombierski * based on vivparse Copyright (c) 2007 Måns Rullgård * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * @brief Vividas VIV (.viv) file demuxer * @author Andrzej Szombierski [qq at kuku eu org] (2010-07) * @sa http://wiki.multimedia.cx/index.php?title=Vividas_VIV */ #include "libavutil/avassert.h" #include "libavutil/intreadwrite.h" #include "avio_internal.h" #include "avformat.h" #include "internal.h" #define MAX_AUDIO_SUBPACKETS 100 typedef struct VIV_SB_block { int size, n_packets; int64_t byte_offset; int64_t packet_offset; } VIV_SB_block; typedef struct VIV_SB_entry { int size, flag; } VIV_SB_entry; typedef struct VIV_AudioSubpacket { int start, pcm_bytes; } VIV_AudioSubpacket; typedef struct VividasDemuxContext { int n_sb_blocks; VIV_SB_block *sb_blocks; int num_audio; uint32_t sb_key; int64_t sb_offset; int current_sb, current_sb_entry; uint8_t *sb_buf; AVIOContext *sb_pb; int n_sb_entries; VIV_SB_entry *sb_entries; int n_audio_subpackets; int current_audio_subpacket; int64_t audio_sample; VIV_AudioSubpacket audio_subpackets[MAX_AUDIO_SUBPACKETS]; } VividasDemuxContext; static int viv_probe(const AVProbeData *p) { if (memcmp(p->buf, "vividas03", 9)) return 0; return AVPROBE_SCORE_MAX; } static const uint8_t keybits[32] = { 20, 52, 111, 10, 27, 71, 142, 53, 82, 138, 1, 78, 86, 121, 183, 85, 105, 152, 39, 140, 172, 11, 64, 144, 155, 6, 71, 163, 186, 49, 126, 43, }; static uint32_t decode_key(uint8_t *buf) { uint32_t key = 0; for (int i = 0; i < 32; i++) { unsigned p = keybits[i]; key |= ((buf[p] >> ((i*5+3)&7)) & 1u) << i; } return key; } static void put_v(uint8_t *p, unsigned v) { if (v>>28) *p++ = ((v>>28)&0x7f)|0x80; if (v>>21) *p++ = ((v>>21)&0x7f)|0x80; if (v>>14) *p++ = ((v>>14)&0x7f)|0x80; if (v>>7) *p++ = ((v>>7)&0x7f)|0x80; } static unsigned recover_key(unsigned char sample[4], unsigned expected_size) { unsigned char plaintext[8] = { 'S', 'B' }; put_v(plaintext+2, expected_size); return AV_RL32(sample) ^ AV_RL32(plaintext); } static void xor_block(void *p1, void *p2, unsigned size, int key, unsigned *key_ptr) { unsigned *d1 = p1; unsigned *d2 = p2; unsigned k = *key_ptr; size >>= 2; while (size > 0) { *d2 = *d1 ^ (HAVE_BIGENDIAN ? av_bswap32(k) : k); k += key; d1++; d2++; size--; } *key_ptr = k; } static void decode_block(uint8_t *src, uint8_t *dest, unsigned size, uint32_t key, uint32_t *key_ptr, int align) { unsigned s = size; char tmp[4]; int a2; if (!size) return; align &= 3; a2 = (4 - align) & 3; if (align) { uint32_t tmpkey = *key_ptr - key; if (a2 > s) { a2 = s; avpriv_request_sample(NULL, "tiny aligned block"); } memcpy(tmp + align, src, a2); xor_block(tmp, tmp, 4, key, &tmpkey); memcpy(dest, tmp + align, a2); s -= a2; } if (s >= 4) { xor_block(src + a2, dest + a2, s & ~3, key, key_ptr); s &= 3; } if (s) { size -= s; memcpy(tmp, src + size, s); xor_block(&tmp, &tmp, 4, key, key_ptr); memcpy(dest + size, tmp, s); } } static uint32_t get_v(uint8_t *p, int len) { uint32_t v = 0; const uint8_t *end = p + len; do { if (p >= end || v >= UINT_MAX / 128 - *p) return v; v <<= 7; v += *p & 0x7f; } while (*p++ & 0x80); return v; } static uint8_t *read_vblock(AVIOContext *src, uint32_t *size, uint32_t key, uint32_t *k2, int align) { uint8_t tmp[4]; uint8_t *buf; unsigned n; if (avio_read(src, tmp, 4) != 4) return NULL; decode_block(tmp, tmp, 4, key, k2, align); n = get_v(tmp, 4); if (n < 4) return NULL; buf = av_malloc(n); if (!buf) return NULL; *size = n; n -= 4; memcpy(buf, tmp, 4); if (avio_read(src, buf + 4, n) == n) { decode_block(buf + 4, buf + 4, n, key, k2, align); } else { av_free(buf); buf = NULL; } return buf; } static uint8_t *read_sb_block(AVIOContext *src, unsigned *size, uint32_t *key, unsigned expected_size) { uint8_t *buf; uint8_t ibuf[8], sbuf[8]; uint32_t k2; unsigned n; if (avio_read(src, ibuf, 8) < 8) return NULL; k2 = *key; decode_block(ibuf, sbuf, 8, *key, &k2, 0); n = get_v(sbuf+2, 6); if (sbuf[0] != 'S' || sbuf[1] != 'B' || (expected_size>0 && n != expected_size)) { uint32_t tmpkey = recover_key(ibuf, expected_size); k2 = tmpkey; decode_block(ibuf, sbuf, 8, tmpkey, &k2, 0); n = get_v(sbuf+2, 6); if (sbuf[0] != 'S' || sbuf[1] != 'B' || expected_size != n) return NULL; *key = tmpkey; } if (n < 8) return NULL; buf = av_malloc(n); if (!buf) return NULL; memcpy(buf, sbuf, 8); *size = n; n -= 8; if (avio_read(src, buf+8, n) < n) { av_free(buf); return NULL; } decode_block(buf + 8, buf + 8, n, *key, &k2, 0); return buf; } static int track_header(VividasDemuxContext *viv, AVFormatContext *s, uint8_t *buf, int size) { int i, j, ret; int64_t off; int val_1; int num_video; AVIOContext pb0, *pb = &pb0; ffio_init_context(pb, buf, size, 0, NULL, NULL, NULL, NULL); ffio_read_varlen(pb); // track_header_len avio_r8(pb); // '1' val_1 = ffio_read_varlen(pb); for (i=0;i<val_1;i++) { int c = avio_r8(pb); if (avio_feof(pb)) return AVERROR_EOF; for (j=0;j<c;j++) { if (avio_feof(pb)) return AVERROR_EOF; avio_r8(pb); // val_3 avio_r8(pb); // val_4 } } avio_r8(pb); // num_streams off = avio_tell(pb); off += ffio_read_varlen(pb); // val_5 avio_r8(pb); // '2' num_video = avio_r8(pb); avio_seek(pb, off, SEEK_SET); if (num_video != 1) { av_log(s, AV_LOG_ERROR, "number of video tracks %d is not 1\n", num_video); return AVERROR_PATCHWELCOME; } for (i = 0; i < num_video; i++) { AVStream *st = avformat_new_stream(s, NULL); int num, den; if (!st) return AVERROR(ENOMEM); st->id = i; st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_id = AV_CODEC_ID_VP6; off = avio_tell(pb); off += ffio_read_varlen(pb); avio_r8(pb); // '3' avio_r8(pb); // val_7 num = avio_rl32(pb); // frame_time den = avio_rl32(pb); // time_base avpriv_set_pts_info(st, 64, num, den); st->nb_frames = avio_rl32(pb); // n frames st->codecpar->width = avio_rl16(pb); // width st->codecpar->height = avio_rl16(pb); // height avio_r8(pb); // val_8 avio_rl32(pb); // val_9 avio_seek(pb, off, SEEK_SET); } off = avio_tell(pb); off += ffio_read_varlen(pb); // val_10 avio_r8(pb); // '4' viv->num_audio = avio_r8(pb); avio_seek(pb, off, SEEK_SET); if (viv->num_audio != 1) av_log(s, AV_LOG_WARNING, "number of audio tracks %d is not 1\n", viv->num_audio); for(i=0;i<viv->num_audio;i++) { int q; AVStream *st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); st->id = num_video + i; st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_id = AV_CODEC_ID_VORBIS; off = avio_tell(pb); off += ffio_read_varlen(pb); // length avio_r8(pb); // '5' avio_r8(pb); //codec_id avio_rl16(pb); //codec_subid st->codecpar->channels = avio_rl16(pb); // channels st->codecpar->sample_rate = avio_rl32(pb); // sample_rate avio_seek(pb, 10, SEEK_CUR); // data_1 q = avio_r8(pb); avio_seek(pb, q, SEEK_CUR); // data_2 avio_r8(pb); // zeropad if (avio_tell(pb) < off) { int num_data; int xd_size = 1; int data_len[256]; int offset = 1; uint8_t *p; ffio_read_varlen(pb); // val_13 avio_r8(pb); // '19' ffio_read_varlen(pb); // len_3 num_data = avio_r8(pb); for (j = 0; j < num_data; j++) { uint64_t len = ffio_read_varlen(pb); if (len > INT_MAX/2 - xd_size) { return AVERROR_INVALIDDATA; } data_len[j] = len; xd_size += len + 1 + len/255; } ret = ff_alloc_extradata(st->codecpar, xd_size); if (ret < 0) return ret; p = st->codecpar->extradata; p[0] = 2; for (j = 0; j < num_data - 1; j++) { unsigned delta = av_xiphlacing(&p[offset], data_len[j]); av_assert0(delta <= xd_size - offset); offset += delta; } for (j = 0; j < num_data; j++) { int ret = avio_read(pb, &p[offset], data_len[j]); if (ret < data_len[j]) { st->codecpar->extradata_size = 0; av_freep(&st->codecpar->extradata); break; } av_assert0(data_len[j] <= xd_size - offset); offset += data_len[j]; } if (offset < st->codecpar->extradata_size) st->codecpar->extradata_size = offset; } } return 0; } static int track_index(VividasDemuxContext *viv, AVFormatContext *s, uint8_t *buf, unsigned size) { int64_t off; int64_t poff; int maxnp=0; AVIOContext pb0, *pb = &pb0; int i; int64_t filesize = avio_size(s->pb); uint64_t n_sb_blocks_tmp; ffio_init_context(pb, buf, size, 0, NULL, NULL, NULL, NULL); ffio_read_varlen(pb); // track_index_len avio_r8(pb); // 'c' n_sb_blocks_tmp = ffio_read_varlen(pb); if (n_sb_blocks_tmp > size / 2) return AVERROR_INVALIDDATA; viv->sb_blocks = av_calloc(n_sb_blocks_tmp, sizeof(*viv->sb_blocks)); if (!viv->sb_blocks) { return AVERROR(ENOMEM); } viv->n_sb_blocks = n_sb_blocks_tmp; off = 0; poff = 0; for (i = 0; i < viv->n_sb_blocks; i++) { uint64_t size_tmp = ffio_read_varlen(pb); uint64_t n_packets_tmp = ffio_read_varlen(pb); if (size_tmp > INT_MAX || n_packets_tmp > INT_MAX) return AVERROR_INVALIDDATA; viv->sb_blocks[i].byte_offset = off; viv->sb_blocks[i].packet_offset = poff; viv->sb_blocks[i].size = size_tmp; viv->sb_blocks[i].n_packets = n_packets_tmp; off += viv->sb_blocks[i].size; poff += viv->sb_blocks[i].n_packets; if (maxnp < viv->sb_blocks[i].n_packets) maxnp = viv->sb_blocks[i].n_packets; } if (filesize > 0 && poff > filesize) return AVERROR_INVALIDDATA; viv->sb_entries = av_calloc(maxnp, sizeof(VIV_SB_entry)); if (!viv->sb_entries) return AVERROR(ENOMEM); return 0; } static void load_sb_block(AVFormatContext *s, VividasDemuxContext *viv, unsigned expected_size) { uint32_t size = 0; int i; AVIOContext *pb = 0; if (viv->sb_pb) { av_free(viv->sb_pb); viv->sb_pb = NULL; } if (viv->sb_buf) av_free(viv->sb_buf); viv->sb_buf = read_sb_block(s->pb, &size, &viv->sb_key, expected_size); if (!viv->sb_buf) { return; } pb = avio_alloc_context(viv->sb_buf, size, 0, NULL, NULL, NULL, NULL); if (!pb) return; viv->sb_pb = pb; avio_r8(pb); // 'S' avio_r8(pb); // 'B' ffio_read_varlen(pb); // size avio_r8(pb); // junk ffio_read_varlen(pb); // first packet viv->n_sb_entries = viv->sb_blocks[viv->current_sb].n_packets; for (i = 0; i < viv->n_sb_entries; i++) { viv->sb_entries[i].size = ffio_read_varlen(pb); viv->sb_entries[i].flag = avio_r8(pb); } ffio_read_varlen(pb); avio_r8(pb); viv->current_sb_entry = 0; } static int viv_read_header(AVFormatContext *s) { VividasDemuxContext *viv = s->priv_data; AVIOContext *pb = s->pb; int64_t header_end; int num_tracks; uint32_t key, k2; uint32_t v; uint8_t keybuffer[187]; uint32_t b22_size = 0; uint32_t b22_key = 0; uint8_t *buf = 0; int ret; avio_skip(pb, 9); header_end = avio_tell(pb); header_end += ffio_read_varlen(pb); num_tracks = avio_r8(pb); if (num_tracks != 1) { av_log(s, AV_LOG_ERROR, "number of tracks %d is not 1\n", num_tracks); return AVERROR(EINVAL); } v = avio_r8(pb); avio_seek(pb, v, SEEK_CUR); avio_read(pb, keybuffer, 187); key = decode_key(keybuffer); viv->sb_key = key; avio_rl32(pb); for (;;) { int64_t here = avio_tell(pb); int block_len, block_type; if (here >= header_end) break; block_len = ffio_read_varlen(pb); if (avio_feof(pb) || block_len <= 0) return AVERROR_INVALIDDATA; block_type = avio_r8(pb); if (block_type == 22) { avio_read(pb, keybuffer, 187); b22_key = decode_key(keybuffer); b22_size = avio_rl32(pb); } avio_seek(pb, here + block_len, SEEK_SET); } if (b22_size) { k2 = b22_key; buf = read_vblock(pb, &v, b22_key, &k2, 0); if (!buf) return AVERROR(EIO); av_free(buf); } k2 = key; buf = read_vblock(pb, &v, key, &k2, 0); if (!buf) return AVERROR(EIO); ret = track_header(viv, s, buf, v); av_free(buf); if (ret < 0) return ret; buf = read_vblock(pb, &v, key, &k2, v); if (!buf) return AVERROR(EIO); ret = track_index(viv, s, buf, v); av_free(buf); if (ret < 0) goto fail; viv->sb_offset = avio_tell(pb); if (viv->n_sb_blocks > 0) { viv->current_sb = 0; load_sb_block(s, viv, viv->sb_blocks[0].size); } else { viv->current_sb = -1; } return 0; fail: av_freep(&viv->sb_blocks); return ret; } static int viv_read_packet(AVFormatContext *s, AVPacket *pkt) { VividasDemuxContext *viv = s->priv_data; AVIOContext *pb; int64_t off; int ret; if (!viv->sb_pb) return AVERROR(EIO); if (avio_feof(viv->sb_pb)) return AVERROR_EOF; if (viv->current_audio_subpacket < viv->n_audio_subpackets) { AVStream *astream; int size = viv->audio_subpackets[viv->current_audio_subpacket+1].start - viv->audio_subpackets[viv->current_audio_subpacket].start; pb = viv->sb_pb; ret = av_get_packet(pb, pkt, size); if (ret < 0) return ret; pkt->pos += viv->sb_offset + viv->sb_blocks[viv->current_sb].byte_offset; pkt->stream_index = 1; astream = s->streams[pkt->stream_index]; pkt->pts = av_rescale_q(viv->audio_sample, av_make_q(1, astream->codecpar->sample_rate), astream->time_base); viv->audio_sample += viv->audio_subpackets[viv->current_audio_subpacket].pcm_bytes / 2 / astream->codecpar->channels; pkt->flags |= AV_PKT_FLAG_KEY; viv->current_audio_subpacket++; return 0; } if (viv->current_sb_entry >= viv->n_sb_entries) { if (viv->current_sb+1 >= viv->n_sb_blocks) return AVERROR(EIO); viv->current_sb++; load_sb_block(s, viv, 0); viv->current_sb_entry = 0; } pb = viv->sb_pb; if (!pb) return AVERROR(EIO); off = avio_tell(pb); if (viv->current_sb_entry >= viv->n_sb_entries) return AVERROR_INVALIDDATA; off += viv->sb_entries[viv->current_sb_entry].size; if (viv->sb_entries[viv->current_sb_entry].flag == 0) { uint64_t v_size = ffio_read_varlen(pb); if (!viv->num_audio) return AVERROR_INVALIDDATA; ffio_read_varlen(pb); if (v_size > INT_MAX || !v_size) return AVERROR_INVALIDDATA; ret = av_get_packet(pb, pkt, v_size); if (ret < 0) return ret; pkt->pos += viv->sb_offset + viv->sb_blocks[viv->current_sb].byte_offset; pkt->pts = viv->sb_blocks[viv->current_sb].packet_offset + viv->current_sb_entry; pkt->flags |= (pkt->data[0]&0x80)?0:AV_PKT_FLAG_KEY; pkt->stream_index = 0; for (int i = 0; i < MAX_AUDIO_SUBPACKETS - 1; i++) { int start, pcm_bytes; start = ffio_read_varlen(pb); pcm_bytes = ffio_read_varlen(pb); if (i > 0 && start == 0) break; viv->n_audio_subpackets = i + 1; viv->audio_subpackets[i].start = start; viv->audio_subpackets[i].pcm_bytes = pcm_bytes; } viv->audio_subpackets[viv->n_audio_subpackets].start = (int)(off - avio_tell(pb)); viv->current_audio_subpacket = 0; } else { uint64_t v_size = ffio_read_varlen(pb); if (v_size > INT_MAX || !v_size) return AVERROR_INVALIDDATA; ret = av_get_packet(pb, pkt, v_size); if (ret < 0) return ret; pkt->pos += viv->sb_offset + viv->sb_blocks[viv->current_sb].byte_offset; pkt->pts = viv->sb_blocks[viv->current_sb].packet_offset + viv->current_sb_entry; pkt->flags |= (pkt->data[0] & 0x80) ? 0 : AV_PKT_FLAG_KEY; pkt->stream_index = 0; } viv->current_sb_entry++; return 0; } static int viv_read_close(AVFormatContext *s) { VividasDemuxContext *viv = s->priv_data; av_freep(&viv->sb_pb); av_freep(&viv->sb_buf); av_freep(&viv->sb_blocks); av_freep(&viv->sb_entries); return 0; } static int viv_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags) { VividasDemuxContext *viv = s->priv_data; int64_t frame; if (stream_index == 0) frame = timestamp; else frame = av_rescale_q(timestamp, s->streams[0]->time_base, s->streams[stream_index]->time_base); for (int i = 0; i < viv->n_sb_blocks; i++) { if (frame >= viv->sb_blocks[i].packet_offset && frame < viv->sb_blocks[i].packet_offset + viv->sb_blocks[i].n_packets) { // flush audio packet queue viv->current_audio_subpacket = 0; viv->n_audio_subpackets = 0; viv->current_sb = i; // seek to ith sb block avio_seek(s->pb, viv->sb_offset + viv->sb_blocks[i].byte_offset, SEEK_SET); // load the block load_sb_block(s, viv, 0); // most problematic part: guess audio offset viv->audio_sample = av_rescale_q(viv->sb_blocks[i].packet_offset, av_make_q(s->streams[1]->codecpar->sample_rate, 1), av_inv_q(s->streams[0]->time_base)); // hand-tuned 1.s a/v offset viv->audio_sample += s->streams[1]->codecpar->sample_rate; viv->current_sb_entry = 0; return 1; } } return 0; } AVInputFormat ff_vividas_demuxer = { .name = "vividas", .long_name = NULL_IF_CONFIG_SMALL("Vividas VIV"), .priv_data_size = sizeof(VividasDemuxContext), .read_probe = viv_probe, .read_header = viv_read_header, .read_packet = viv_read_packet, .read_close = viv_read_close, .read_seek = viv_read_seek, };
null
219
CWE-787
CVE-2020-35965
/* * OpenEXR (.exr) image decoder * Copyright (c) 2006 Industrial Light & Magic, a division of Lucas Digital Ltd. LLC * Copyright (c) 2009 Jimmy Christensen * * B44/B44A, Tile, UINT32 added by Jokyo Images support by CNC - French National Center for Cinema * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * OpenEXR decoder * @author Jimmy Christensen * * For more information on the OpenEXR format, visit: * http://openexr.com/ * * exr_half2float() is credited to Aaftab Munshi, Dan Ginsburg, Dave Shreiner. */ #include <float.h> #include <zlib.h> #include "libavutil/avassert.h" #include "libavutil/common.h" #include "libavutil/imgutils.h" #include "libavutil/intfloat.h" #include "libavutil/avstring.h" #include "libavutil/opt.h" #include "libavutil/color_utils.h" #include "avcodec.h" #include "bytestream.h" #if HAVE_BIGENDIAN #include "bswapdsp.h" #endif #include "exrdsp.h" #include "get_bits.h" #include "internal.h" #include "mathops.h" #include "thread.h" enum ExrCompr { EXR_RAW, EXR_RLE, EXR_ZIP1, EXR_ZIP16, EXR_PIZ, EXR_PXR24, EXR_B44, EXR_B44A, EXR_DWA, EXR_DWB, EXR_UNKN, }; enum ExrPixelType { EXR_UINT, EXR_HALF, EXR_FLOAT, EXR_UNKNOWN, }; enum ExrTileLevelMode { EXR_TILE_LEVEL_ONE, EXR_TILE_LEVEL_MIPMAP, EXR_TILE_LEVEL_RIPMAP, EXR_TILE_LEVEL_UNKNOWN, }; enum ExrTileLevelRound { EXR_TILE_ROUND_UP, EXR_TILE_ROUND_DOWN, EXR_TILE_ROUND_UNKNOWN, }; typedef struct EXRChannel { int xsub, ysub; enum ExrPixelType pixel_type; } EXRChannel; typedef struct EXRTileAttribute { int32_t xSize; int32_t ySize; enum ExrTileLevelMode level_mode; enum ExrTileLevelRound level_round; } EXRTileAttribute; typedef struct EXRThreadData { uint8_t *uncompressed_data; int uncompressed_size; uint8_t *tmp; int tmp_size; uint8_t *bitmap; uint16_t *lut; int ysize, xsize; int channel_line_size; } EXRThreadData; typedef struct EXRContext { AVClass *class; AVFrame *picture; AVCodecContext *avctx; ExrDSPContext dsp; #if HAVE_BIGENDIAN BswapDSPContext bbdsp; #endif enum ExrCompr compression; enum ExrPixelType pixel_type; int channel_offsets[4]; // 0 = red, 1 = green, 2 = blue and 3 = alpha const AVPixFmtDescriptor *desc; int w, h; int32_t xmax, xmin; int32_t ymax, ymin; uint32_t xdelta, ydelta; int scan_lines_per_block; EXRTileAttribute tile_attr; /* header data attribute of tile */ int is_tile; /* 0 if scanline, 1 if tile */ int is_luma;/* 1 if there is an Y plane */ GetByteContext gb; const uint8_t *buf; int buf_size; EXRChannel *channels; int nb_channels; int current_channel_offset; EXRThreadData *thread_data; const char *layer; enum AVColorTransferCharacteristic apply_trc_type; float gamma; union av_intfloat32 gamma_table[65536]; } EXRContext; /* -15 stored using a single precision bias of 127 */ #define HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP 0x38000000 /* max exponent value in single precision that will be converted * to Inf or Nan when stored as a half-float */ #define HALF_FLOAT_MAX_BIASED_EXP_AS_SINGLE_FP_EXP 0x47800000 /* 255 is the max exponent biased value */ #define FLOAT_MAX_BIASED_EXP (0xFF << 23) #define HALF_FLOAT_MAX_BIASED_EXP (0x1F << 10) /** * Convert a half float as a uint16_t into a full float. * * @param hf half float as uint16_t * * @return float value */ static union av_intfloat32 exr_half2float(uint16_t hf) { unsigned int sign = (unsigned int) (hf >> 15); unsigned int mantissa = (unsigned int) (hf & ((1 << 10) - 1)); unsigned int exp = (unsigned int) (hf & HALF_FLOAT_MAX_BIASED_EXP); union av_intfloat32 f; if (exp == HALF_FLOAT_MAX_BIASED_EXP) { // we have a half-float NaN or Inf // half-float NaNs will be converted to a single precision NaN // half-float Infs will be converted to a single precision Inf exp = FLOAT_MAX_BIASED_EXP; if (mantissa) mantissa = (1 << 23) - 1; // set all bits to indicate a NaN } else if (exp == 0x0) { // convert half-float zero/denorm to single precision value if (mantissa) { mantissa <<= 1; exp = HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP; // check for leading 1 in denorm mantissa while (!(mantissa & (1 << 10))) { // for every leading 0, decrement single precision exponent by 1 // and shift half-float mantissa value to the left mantissa <<= 1; exp -= (1 << 23); } // clamp the mantissa to 10 bits mantissa &= ((1 << 10) - 1); // shift left to generate single-precision mantissa of 23 bits mantissa <<= 13; } } else { // shift left to generate single-precision mantissa of 23 bits mantissa <<= 13; // generate single precision biased exponent value exp = (exp << 13) + HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP; } f.i = (sign << 31) | exp | mantissa; return f; } static int zip_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td) { unsigned long dest_len = uncompressed_size; if (uncompress(td->tmp, &dest_len, src, compressed_size) != Z_OK || dest_len != uncompressed_size) return AVERROR_INVALIDDATA; av_assert1(uncompressed_size % 2 == 0); s->dsp.predictor(td->tmp, uncompressed_size); s->dsp.reorder_pixels(td->uncompressed_data, td->tmp, uncompressed_size); return 0; } static int rle_uncompress(EXRContext *ctx, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td) { uint8_t *d = td->tmp; const int8_t *s = src; int ssize = compressed_size; int dsize = uncompressed_size; uint8_t *dend = d + dsize; int count; while (ssize > 0) { count = *s++; if (count < 0) { count = -count; if ((dsize -= count) < 0 || (ssize -= count + 1) < 0) return AVERROR_INVALIDDATA; while (count--) *d++ = *s++; } else { count++; if ((dsize -= count) < 0 || (ssize -= 2) < 0) return AVERROR_INVALIDDATA; while (count--) *d++ = *s; s++; } } if (dend != d) return AVERROR_INVALIDDATA; av_assert1(uncompressed_size % 2 == 0); ctx->dsp.predictor(td->tmp, uncompressed_size); ctx->dsp.reorder_pixels(td->uncompressed_data, td->tmp, uncompressed_size); return 0; } #define USHORT_RANGE (1 << 16) #define BITMAP_SIZE (1 << 13) static uint16_t reverse_lut(const uint8_t *bitmap, uint16_t *lut) { int i, k = 0; for (i = 0; i < USHORT_RANGE; i++) if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; i = k - 1; memset(lut + k, 0, (USHORT_RANGE - k) * 2); return i; } static void apply_lut(const uint16_t *lut, uint16_t *dst, int dsize) { int i; for (i = 0; i < dsize; ++i) dst[i] = lut[dst[i]]; } #define HUF_ENCBITS 16 // literal (value) bit length #define HUF_DECBITS 14 // decoding bit size (>= 8) #define HUF_ENCSIZE ((1 << HUF_ENCBITS) + 1) // encoding table size #define HUF_DECSIZE (1 << HUF_DECBITS) // decoding table size #define HUF_DECMASK (HUF_DECSIZE - 1) typedef struct HufDec { int len; int lit; int *p; } HufDec; static void huf_canonical_code_table(uint64_t *hcode) { uint64_t c, n[59] = { 0 }; int i; for (i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; c = 0; for (i = 58; i > 0; --i) { uint64_t nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } for (i = 0; i < HUF_ENCSIZE; ++i) { int l = hcode[i]; if (l > 0) hcode[i] = l | (n[l]++ << 6); } } #define SHORT_ZEROCODE_RUN 59 #define LONG_ZEROCODE_RUN 63 #define SHORTEST_LONG_RUN (2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN) #define LONGEST_LONG_RUN (255 + SHORTEST_LONG_RUN) static int huf_unpack_enc_table(GetByteContext *gb, int32_t im, int32_t iM, uint64_t *hcode) { GetBitContext gbit; int ret = init_get_bits8(&gbit, gb->buffer, bytestream2_get_bytes_left(gb)); if (ret < 0) return ret; for (; im <= iM; im++) { uint64_t l = hcode[im] = get_bits(&gbit, 6); if (l == LONG_ZEROCODE_RUN) { int zerun = get_bits(&gbit, 8) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) return AVERROR_INVALIDDATA; while (zerun--) hcode[im++] = 0; im--; } else if (l >= SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) return AVERROR_INVALIDDATA; while (zerun--) hcode[im++] = 0; im--; } } bytestream2_skip(gb, (get_bits_count(&gbit) + 7) / 8); huf_canonical_code_table(hcode); return 0; } static int huf_build_dec_table(const uint64_t *hcode, int im, int iM, HufDec *hdecod) { for (; im <= iM; im++) { uint64_t c = hcode[im] >> 6; int i, l = hcode[im] & 63; if (c >> l) return AVERROR_INVALIDDATA; if (l > HUF_DECBITS) { HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) return AVERROR_INVALIDDATA; pl->lit++; pl->p = av_realloc(pl->p, pl->lit * sizeof(int)); if (!pl->p) return AVERROR(ENOMEM); pl->p[pl->lit - 1] = im; } else if (l) { HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (i = 1 << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) return AVERROR_INVALIDDATA; pl->len = l; pl->lit = im; } } } return 0; } #define get_char(c, lc, gb) \ { \ c = (c << 8) | bytestream2_get_byte(gb); \ lc += 8; \ } #define get_code(po, rlc, c, lc, gb, out, oe, outb) \ { \ if (po == rlc) { \ if (lc < 8) \ get_char(c, lc, gb); \ lc -= 8; \ \ cs = c >> lc; \ \ if (out + cs > oe || out == outb) \ return AVERROR_INVALIDDATA; \ \ s = out[-1]; \ \ while (cs-- > 0) \ *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return AVERROR_INVALIDDATA; \ } \ } static int huf_decode(const uint64_t *hcode, const HufDec *hdecod, GetByteContext *gb, int nbits, int rlc, int no, uint16_t *out) { uint64_t c = 0; uint16_t *outb = out; uint16_t *oe = out + no; const uint8_t *ie = gb->buffer + (nbits + 7) / 8; // input byte size uint8_t cs; uint16_t s; int i, lc = 0; while (gb->buffer < ie) { get_char(c, lc, gb); while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; get_code(pl.lit, rlc, c, lc, gb, out, oe, outb); } else { int j; if (!pl.p) return AVERROR_INVALIDDATA; for (j = 0; j < pl.lit; j++) { int l = hcode[pl.p[j]] & 63; while (lc < l && bytestream2_get_bytes_left(gb) > 0) get_char(c, lc, gb); if (lc >= l) { if ((hcode[pl.p[j]] >> 6) == ((c >> (lc - l)) & ((1LL << l) - 1))) { lc -= l; get_code(pl.p[j], rlc, c, lc, gb, out, oe, outb); break; } } } if (j == pl.lit) return AVERROR_INVALIDDATA; } } } i = (8 - nbits) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len && lc >= pl.len) { lc -= pl.len; get_code(pl.lit, rlc, c, lc, gb, out, oe, outb); } else { return AVERROR_INVALIDDATA; } } if (out - outb != no) return AVERROR_INVALIDDATA; return 0; } static int huf_uncompress(GetByteContext *gb, uint16_t *dst, int dst_size) { int32_t src_size, im, iM; uint32_t nBits; uint64_t *freq; HufDec *hdec; int ret, i; src_size = bytestream2_get_le32(gb); im = bytestream2_get_le32(gb); iM = bytestream2_get_le32(gb); bytestream2_skip(gb, 4); nBits = bytestream2_get_le32(gb); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE || src_size < 0) return AVERROR_INVALIDDATA; bytestream2_skip(gb, 4); freq = av_mallocz_array(HUF_ENCSIZE, sizeof(*freq)); hdec = av_mallocz_array(HUF_DECSIZE, sizeof(*hdec)); if (!freq || !hdec) { ret = AVERROR(ENOMEM); goto fail; } if ((ret = huf_unpack_enc_table(gb, im, iM, freq)) < 0) goto fail; if (nBits > 8 * bytestream2_get_bytes_left(gb)) { ret = AVERROR_INVALIDDATA; goto fail; } if ((ret = huf_build_dec_table(freq, im, iM, hdec)) < 0) goto fail; ret = huf_decode(freq, hdec, gb, nBits, iM, dst_size, dst); fail: for (i = 0; i < HUF_DECSIZE; i++) if (hdec) av_freep(&hdec[i].p); av_free(freq); av_free(hdec); return ret; } static inline void wdec14(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b) { int16_t ls = l; int16_t hs = h; int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); int16_t as = ai; int16_t bs = ai - hi; *a = as; *b = bs; } #define NBITS 16 #define A_OFFSET (1 << (NBITS - 1)) #define MOD_MASK ((1 << NBITS) - 1) static inline void wdec16(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; *b = bb; *a = aa; } static void wav_decode(uint16_t *in, int nx, int ox, int ny, int oy, uint16_t mx) { int w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; while (p >= 1) { uint16_t *py = in; uint16_t *ey = in + oy * (ny - p2); uint16_t i00, i01, i10, i11; int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; for (; py <= ey; py += oy2) { uint16_t *px = py; uint16_t *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { uint16_t *p01 = px + ox1; uint16_t *p10 = px + oy1; uint16_t *p11 = p10 + ox1; if (w14) { wdec14(*px, *p10, &i00, &i10); wdec14(*p01, *p11, &i01, &i11); wdec14(i00, i01, px, p01); wdec14(i10, i11, p10, p11); } else { wdec16(*px, *p10, &i00, &i10); wdec16(*p01, *p11, &i01, &i11); wdec16(i00, i01, px, p01); wdec16(i10, i11, p10, p11); } } if (nx & p) { uint16_t *p10 = px + oy1; if (w14) wdec14(*px, *p10, &i00, p10); else wdec16(*px, *p10, &i00, p10); *px = i00; } } if (ny & p) { uint16_t *px = py; uint16_t *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { uint16_t *p01 = px + ox1; if (w14) wdec14(*px, *p01, &i00, p01); else wdec16(*px, *p01, &i00, p01); *px = i00; } } p2 = p; p >>= 1; } } static int piz_uncompress(EXRContext *s, const uint8_t *src, int ssize, int dsize, EXRThreadData *td) { GetByteContext gb; uint16_t maxval, min_non_zero, max_non_zero; uint16_t *ptr; uint16_t *tmp = (uint16_t *)td->tmp; uint16_t *out; uint16_t *in; int ret, i, j; int pixel_half_size;/* 1 for half, 2 for float and uint32 */ EXRChannel *channel; int tmp_offset; if (!td->bitmap) td->bitmap = av_malloc(BITMAP_SIZE); if (!td->lut) td->lut = av_malloc(1 << 17); if (!td->bitmap || !td->lut) { av_freep(&td->bitmap); av_freep(&td->lut); return AVERROR(ENOMEM); } bytestream2_init(&gb, src, ssize); min_non_zero = bytestream2_get_le16(&gb); max_non_zero = bytestream2_get_le16(&gb); if (max_non_zero >= BITMAP_SIZE) return AVERROR_INVALIDDATA; memset(td->bitmap, 0, FFMIN(min_non_zero, BITMAP_SIZE)); if (min_non_zero <= max_non_zero) bytestream2_get_buffer(&gb, td->bitmap + min_non_zero, max_non_zero - min_non_zero + 1); memset(td->bitmap + max_non_zero + 1, 0, BITMAP_SIZE - max_non_zero - 1); maxval = reverse_lut(td->bitmap, td->lut); ret = huf_uncompress(&gb, tmp, dsize / sizeof(uint16_t)); if (ret) return ret; ptr = tmp; for (i = 0; i < s->nb_channels; i++) { channel = &s->channels[i]; if (channel->pixel_type == EXR_HALF) pixel_half_size = 1; else pixel_half_size = 2; for (j = 0; j < pixel_half_size; j++) wav_decode(ptr + j, td->xsize, pixel_half_size, td->ysize, td->xsize * pixel_half_size, maxval); ptr += td->xsize * td->ysize * pixel_half_size; } apply_lut(td->lut, tmp, dsize / sizeof(uint16_t)); out = (uint16_t *)td->uncompressed_data; for (i = 0; i < td->ysize; i++) { tmp_offset = 0; for (j = 0; j < s->nb_channels; j++) { channel = &s->channels[j]; if (channel->pixel_type == EXR_HALF) pixel_half_size = 1; else pixel_half_size = 2; in = tmp + tmp_offset * td->xsize * td->ysize + i * td->xsize * pixel_half_size; tmp_offset += pixel_half_size; #if HAVE_BIGENDIAN s->bbdsp.bswap16_buf(out, in, td->xsize * pixel_half_size); #else memcpy(out, in, td->xsize * 2 * pixel_half_size); #endif out += td->xsize * pixel_half_size; } } return 0; } static int pxr24_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td) { unsigned long dest_len, expected_len = 0; const uint8_t *in = td->tmp; uint8_t *out; int c, i, j; for (i = 0; i < s->nb_channels; i++) { if (s->channels[i].pixel_type == EXR_FLOAT) { expected_len += (td->xsize * td->ysize * 3);/* PRX 24 store float in 24 bit instead of 32 */ } else if (s->channels[i].pixel_type == EXR_HALF) { expected_len += (td->xsize * td->ysize * 2); } else {//UINT 32 expected_len += (td->xsize * td->ysize * 4); } } dest_len = expected_len; if (uncompress(td->tmp, &dest_len, src, compressed_size) != Z_OK) { return AVERROR_INVALIDDATA; } else if (dest_len != expected_len) { return AVERROR_INVALIDDATA; } out = td->uncompressed_data; for (i = 0; i < td->ysize; i++) for (c = 0; c < s->nb_channels; c++) { EXRChannel *channel = &s->channels[c]; const uint8_t *ptr[4]; uint32_t pixel = 0; switch (channel->pixel_type) { case EXR_FLOAT: ptr[0] = in; ptr[1] = ptr[0] + td->xsize; ptr[2] = ptr[1] + td->xsize; in = ptr[2] + td->xsize; for (j = 0; j < td->xsize; ++j) { uint32_t diff = ((unsigned)*(ptr[0]++) << 24) | (*(ptr[1]++) << 16) | (*(ptr[2]++) << 8); pixel += diff; bytestream_put_le32(&out, pixel); } break; case EXR_HALF: ptr[0] = in; ptr[1] = ptr[0] + td->xsize; in = ptr[1] + td->xsize; for (j = 0; j < td->xsize; j++) { uint32_t diff = (*(ptr[0]++) << 8) | *(ptr[1]++); pixel += diff; bytestream_put_le16(&out, pixel); } break; case EXR_UINT: ptr[0] = in; ptr[1] = ptr[0] + s->xdelta; ptr[2] = ptr[1] + s->xdelta; ptr[3] = ptr[2] + s->xdelta; in = ptr[3] + s->xdelta; for (j = 0; j < s->xdelta; ++j) { uint32_t diff = ((uint32_t)*(ptr[0]++) << 24) | (*(ptr[1]++) << 16) | (*(ptr[2]++) << 8 ) | (*(ptr[3]++)); pixel += diff; bytestream_put_le32(&out, pixel); } break; default: return AVERROR_INVALIDDATA; } } return 0; } static void unpack_14(const uint8_t b[14], uint16_t s[16]) { unsigned short shift = (b[ 2] >> 2) & 15; unsigned short bias = (0x20 << shift); int i; s[ 0] = (b[0] << 8) | b[1]; s[ 4] = s[ 0] + ((((b[ 2] << 4) | (b[ 3] >> 4)) & 0x3f) << shift) - bias; s[ 8] = s[ 4] + ((((b[ 3] << 2) | (b[ 4] >> 6)) & 0x3f) << shift) - bias; s[12] = s[ 8] + ((b[ 4] & 0x3f) << shift) - bias; s[ 1] = s[ 0] + ((b[ 5] >> 2) << shift) - bias; s[ 5] = s[ 4] + ((((b[ 5] << 4) | (b[ 6] >> 4)) & 0x3f) << shift) - bias; s[ 9] = s[ 8] + ((((b[ 6] << 2) | (b[ 7] >> 6)) & 0x3f) << shift) - bias; s[13] = s[12] + ((b[ 7] & 0x3f) << shift) - bias; s[ 2] = s[ 1] + ((b[ 8] >> 2) << shift) - bias; s[ 6] = s[ 5] + ((((b[ 8] << 4) | (b[ 9] >> 4)) & 0x3f) << shift) - bias; s[10] = s[ 9] + ((((b[ 9] << 2) | (b[10] >> 6)) & 0x3f) << shift) - bias; s[14] = s[13] + ((b[10] & 0x3f) << shift) - bias; s[ 3] = s[ 2] + ((b[11] >> 2) << shift) - bias; s[ 7] = s[ 6] + ((((b[11] << 4) | (b[12] >> 4)) & 0x3f) << shift) - bias; s[11] = s[10] + ((((b[12] << 2) | (b[13] >> 6)) & 0x3f) << shift) - bias; s[15] = s[14] + ((b[13] & 0x3f) << shift) - bias; for (i = 0; i < 16; ++i) { if (s[i] & 0x8000) s[i] &= 0x7fff; else s[i] = ~s[i]; } } static void unpack_3(const uint8_t b[3], uint16_t s[16]) { int i; s[0] = (b[0] << 8) | b[1]; if (s[0] & 0x8000) s[0] &= 0x7fff; else s[0] = ~s[0]; for (i = 1; i < 16; i++) s[i] = s[0]; } static int b44_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td) { const int8_t *sr = src; int stay_to_uncompress = compressed_size; int nb_b44_block_w, nb_b44_block_h; int index_tl_x, index_tl_y, index_out, index_tmp; uint16_t tmp_buffer[16]; /* B44 use 4x4 half float pixel */ int c, iY, iX, y, x; int target_channel_offset = 0; /* calc B44 block count */ nb_b44_block_w = td->xsize / 4; if ((td->xsize % 4) != 0) nb_b44_block_w++; nb_b44_block_h = td->ysize / 4; if ((td->ysize % 4) != 0) nb_b44_block_h++; for (c = 0; c < s->nb_channels; c++) { if (s->channels[c].pixel_type == EXR_HALF) {/* B44 only compress half float data */ for (iY = 0; iY < nb_b44_block_h; iY++) { for (iX = 0; iX < nb_b44_block_w; iX++) {/* For each B44 block */ if (stay_to_uncompress < 3) { av_log(s, AV_LOG_ERROR, "Not enough data for B44A block: %d", stay_to_uncompress); return AVERROR_INVALIDDATA; } if (src[compressed_size - stay_to_uncompress + 2] == 0xfc) { /* B44A block */ unpack_3(sr, tmp_buffer); sr += 3; stay_to_uncompress -= 3; } else {/* B44 Block */ if (stay_to_uncompress < 14) { av_log(s, AV_LOG_ERROR, "Not enough data for B44 block: %d", stay_to_uncompress); return AVERROR_INVALIDDATA; } unpack_14(sr, tmp_buffer); sr += 14; stay_to_uncompress -= 14; } /* copy data to uncompress buffer (B44 block can exceed target resolution)*/ index_tl_x = iX * 4; index_tl_y = iY * 4; for (y = index_tl_y; y < FFMIN(index_tl_y + 4, td->ysize); y++) { for (x = index_tl_x; x < FFMIN(index_tl_x + 4, td->xsize); x++) { index_out = target_channel_offset * td->xsize + y * td->channel_line_size + 2 * x; index_tmp = (y-index_tl_y) * 4 + (x-index_tl_x); td->uncompressed_data[index_out] = tmp_buffer[index_tmp] & 0xff; td->uncompressed_data[index_out + 1] = tmp_buffer[index_tmp] >> 8; } } } } target_channel_offset += 2; } else {/* Float or UINT 32 channel */ if (stay_to_uncompress < td->ysize * td->xsize * 4) { av_log(s, AV_LOG_ERROR, "Not enough data for uncompress channel: %d", stay_to_uncompress); return AVERROR_INVALIDDATA; } for (y = 0; y < td->ysize; y++) { index_out = target_channel_offset * td->xsize + y * td->channel_line_size; memcpy(&td->uncompressed_data[index_out], sr, td->xsize * 4); sr += td->xsize * 4; } target_channel_offset += 4; stay_to_uncompress -= td->ysize * td->xsize * 4; } } return 0; } static int decode_block(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr) { EXRContext *s = avctx->priv_data; AVFrame *const p = s->picture; EXRThreadData *td = &s->thread_data[threadnr]; const uint8_t *channel_buffer[4] = { 0 }; const uint8_t *buf = s->buf; uint64_t line_offset, uncompressed_size; uint8_t *ptr; uint32_t data_size; int line, col = 0; uint64_t tile_x, tile_y, tile_level_x, tile_level_y; const uint8_t *src; int step = s->desc->flags & AV_PIX_FMT_FLAG_FLOAT ? 4 : 2 * s->desc->nb_components; int bxmin = 0, axmax = 0, window_xoffset = 0; int window_xmin, window_xmax, window_ymin, window_ymax; int data_xoffset, data_yoffset, data_window_offset, xsize, ysize; int i, x, buf_size = s->buf_size; int c, rgb_channel_count; float one_gamma = 1.0f / s->gamma; avpriv_trc_function trc_func = avpriv_get_trc_function_from_trc(s->apply_trc_type); int ret; line_offset = AV_RL64(s->gb.buffer + jobnr * 8); if (s->is_tile) { if (buf_size < 20 || line_offset > buf_size - 20) return AVERROR_INVALIDDATA; src = buf + line_offset + 20; tile_x = AV_RL32(src - 20); tile_y = AV_RL32(src - 16); tile_level_x = AV_RL32(src - 12); tile_level_y = AV_RL32(src - 8); data_size = AV_RL32(src - 4); if (data_size <= 0 || data_size > buf_size - line_offset - 20) return AVERROR_INVALIDDATA; if (tile_level_x || tile_level_y) { /* tile level, is not the full res level */ avpriv_report_missing_feature(s->avctx, "Subres tile before full res tile"); return AVERROR_PATCHWELCOME; } line = s->ymin + s->tile_attr.ySize * tile_y; col = s->tile_attr.xSize * tile_x; if (line < s->ymin || line > s->ymax || s->xmin + col < s->xmin || s->xmin + col > s->xmax) return AVERROR_INVALIDDATA; td->ysize = FFMIN(s->tile_attr.ySize, s->ydelta - tile_y * s->tile_attr.ySize); td->xsize = FFMIN(s->tile_attr.xSize, s->xdelta - tile_x * s->tile_attr.xSize); if (td->xsize * (uint64_t)s->current_channel_offset > INT_MAX) return AVERROR_INVALIDDATA; td->channel_line_size = td->xsize * s->current_channel_offset;/* uncompress size of one line */ uncompressed_size = td->channel_line_size * (uint64_t)td->ysize;/* uncompress size of the block */ } else { if (buf_size < 8 || line_offset > buf_size - 8) return AVERROR_INVALIDDATA; src = buf + line_offset + 8; line = AV_RL32(src - 8); if (line < s->ymin || line > s->ymax) return AVERROR_INVALIDDATA; data_size = AV_RL32(src - 4); if (data_size <= 0 || data_size > buf_size - line_offset - 8) return AVERROR_INVALIDDATA; td->ysize = FFMIN(s->scan_lines_per_block, s->ymax - line + 1); /* s->ydelta - line ?? */ td->xsize = s->xdelta; if (td->xsize * (uint64_t)s->current_channel_offset > INT_MAX) return AVERROR_INVALIDDATA; td->channel_line_size = td->xsize * s->current_channel_offset;/* uncompress size of one line */ uncompressed_size = td->channel_line_size * (uint64_t)td->ysize;/* uncompress size of the block */ if ((s->compression == EXR_RAW && (data_size != uncompressed_size || line_offset > buf_size - uncompressed_size)) || (s->compression != EXR_RAW && (data_size > uncompressed_size || line_offset > buf_size - data_size))) { return AVERROR_INVALIDDATA; } } window_xmin = FFMIN(avctx->width, FFMAX(0, s->xmin + col)); window_xmax = FFMIN(avctx->width, FFMAX(0, s->xmin + col + td->xsize)); window_ymin = FFMIN(avctx->height, FFMAX(0, line )); window_ymax = FFMIN(avctx->height, FFMAX(0, line + td->ysize)); xsize = window_xmax - window_xmin; ysize = window_ymax - window_ymin; /* tile or scanline not visible skip decoding */ if (xsize <= 0 || ysize <= 0) return 0; /* is the first tile or is a scanline */ if(col == 0) { window_xmin = 0; /* pixels to add at the left of the display window */ window_xoffset = FFMAX(0, s->xmin); /* bytes to add at the left of the display window */ bxmin = window_xoffset * step; } /* is the last tile or is a scanline */ if(col + td->xsize == s->xdelta) { window_xmax = avctx->width; /* bytes to add at the right of the display window */ axmax = FFMAX(0, (avctx->width - (s->xmax + 1))) * step; } if (data_size < uncompressed_size || s->is_tile) { /* td->tmp is use for tile reorganization */ av_fast_padded_malloc(&td->tmp, &td->tmp_size, uncompressed_size); if (!td->tmp) return AVERROR(ENOMEM); } if (data_size < uncompressed_size) { av_fast_padded_malloc(&td->uncompressed_data, &td->uncompressed_size, uncompressed_size + 64);/* Force 64 padding for AVX2 reorder_pixels dst */ if (!td->uncompressed_data) return AVERROR(ENOMEM); ret = AVERROR_INVALIDDATA; switch (s->compression) { case EXR_ZIP1: case EXR_ZIP16: ret = zip_uncompress(s, src, data_size, uncompressed_size, td); break; case EXR_PIZ: ret = piz_uncompress(s, src, data_size, uncompressed_size, td); break; case EXR_PXR24: ret = pxr24_uncompress(s, src, data_size, uncompressed_size, td); break; case EXR_RLE: ret = rle_uncompress(s, src, data_size, uncompressed_size, td); break; case EXR_B44: case EXR_B44A: ret = b44_uncompress(s, src, data_size, uncompressed_size, td); break; } if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "decode_block() failed.\n"); return ret; } src = td->uncompressed_data; } /* offsets to crop data outside display window */ data_xoffset = FFABS(FFMIN(0, s->xmin + col)) * (s->pixel_type == EXR_HALF ? 2 : 4); data_yoffset = FFABS(FFMIN(0, line)); data_window_offset = (data_yoffset * td->channel_line_size) + data_xoffset; if (!s->is_luma) { channel_buffer[0] = src + (td->xsize * s->channel_offsets[0]) + data_window_offset; channel_buffer[1] = src + (td->xsize * s->channel_offsets[1]) + data_window_offset; channel_buffer[2] = src + (td->xsize * s->channel_offsets[2]) + data_window_offset; rgb_channel_count = 3; } else { /* put y data in the first channel_buffer */ channel_buffer[0] = src + (td->xsize * s->channel_offsets[1]) + data_window_offset; rgb_channel_count = 1; } if (s->channel_offsets[3] >= 0) channel_buffer[3] = src + (td->xsize * s->channel_offsets[3]) + data_window_offset; if (s->desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* todo: change this when a floating point pixel format with luma with alpha is implemented */ int channel_count = s->channel_offsets[3] >= 0 ? 4 : rgb_channel_count; if (s->is_luma) { channel_buffer[1] = channel_buffer[0]; channel_buffer[2] = channel_buffer[0]; } for (c = 0; c < channel_count; c++) { int plane = s->desc->comp[c].plane; ptr = p->data[plane] + window_ymin * p->linesize[plane] + (window_xmin * 4); for (i = 0; i < ysize; i++, ptr += p->linesize[plane]) { const uint8_t *src; union av_intfloat32 *ptr_x; src = channel_buffer[c]; ptr_x = (union av_intfloat32 *)ptr; // Zero out the start if xmin is not 0 memset(ptr_x, 0, bxmin); ptr_x += window_xoffset; if (s->pixel_type == EXR_FLOAT) { // 32-bit union av_intfloat32 t; if (trc_func && c < 3) { for (x = 0; x < xsize; x++) { t.i = bytestream_get_le32(&src); t.f = trc_func(t.f); *ptr_x++ = t; } } else { for (x = 0; x < xsize; x++) { t.i = bytestream_get_le32(&src); if (t.f > 0.0f && c < 3) /* avoid negative values */ t.f = powf(t.f, one_gamma); *ptr_x++ = t; } } } else if (s->pixel_type == EXR_HALF) { // 16-bit if (c < 3 || !trc_func) { for (x = 0; x < xsize; x++) { *ptr_x++ = s->gamma_table[bytestream_get_le16(&src)]; } } else { for (x = 0; x < xsize; x++) { *ptr_x++ = exr_half2float(bytestream_get_le16(&src));; } } } // Zero out the end if xmax+1 is not w memset(ptr_x, 0, axmax); channel_buffer[c] += td->channel_line_size; } } } else { av_assert1(s->pixel_type == EXR_UINT); ptr = p->data[0] + window_ymin * p->linesize[0] + (window_xmin * s->desc->nb_components * 2); for (i = 0; i < ysize; i++, ptr += p->linesize[0]) { const uint8_t * a; const uint8_t *rgb[3]; uint16_t *ptr_x; for (c = 0; c < rgb_channel_count; c++) { rgb[c] = channel_buffer[c]; } if (channel_buffer[3]) a = channel_buffer[3]; ptr_x = (uint16_t *) ptr; // Zero out the start if xmin is not 0 memset(ptr_x, 0, bxmin); ptr_x += window_xoffset * s->desc->nb_components; for (x = 0; x < xsize; x++) { for (c = 0; c < rgb_channel_count; c++) { *ptr_x++ = bytestream_get_le32(&rgb[c]) >> 16; } if (channel_buffer[3]) *ptr_x++ = bytestream_get_le32(&a) >> 16; } // Zero out the end if xmax+1 is not w memset(ptr_x, 0, axmax); channel_buffer[0] += td->channel_line_size; channel_buffer[1] += td->channel_line_size; channel_buffer[2] += td->channel_line_size; if (channel_buffer[3]) channel_buffer[3] += td->channel_line_size; } } return 0; } /** * Check if the variable name corresponds to its data type. * * @param s the EXRContext * @param value_name name of the variable to check * @param value_type type of the variable to check * @param minimum_length minimum length of the variable data * * @return bytes to read containing variable data * -1 if variable is not found * 0 if buffer ended prematurely */ static int check_header_variable(EXRContext *s, const char *value_name, const char *value_type, unsigned int minimum_length) { int var_size = -1; if (bytestream2_get_bytes_left(&s->gb) >= minimum_length && !strcmp(s->gb.buffer, value_name)) { // found value_name, jump to value_type (null terminated strings) s->gb.buffer += strlen(value_name) + 1; if (!strcmp(s->gb.buffer, value_type)) { s->gb.buffer += strlen(value_type) + 1; var_size = bytestream2_get_le32(&s->gb); // don't go read past boundaries if (var_size > bytestream2_get_bytes_left(&s->gb)) var_size = 0; } else { // value_type not found, reset the buffer s->gb.buffer -= strlen(value_name) + 1; av_log(s->avctx, AV_LOG_WARNING, "Unknown data type %s for header variable %s.\n", value_type, value_name); } } return var_size; } static int decode_header(EXRContext *s, AVFrame *frame) { AVDictionary *metadata = NULL; int magic_number, version, i, flags, sar = 0; int layer_match = 0; int ret; int dup_channels = 0; s->current_channel_offset = 0; s->xmin = ~0; s->xmax = ~0; s->ymin = ~0; s->ymax = ~0; s->xdelta = ~0; s->ydelta = ~0; s->channel_offsets[0] = -1; s->channel_offsets[1] = -1; s->channel_offsets[2] = -1; s->channel_offsets[3] = -1; s->pixel_type = EXR_UNKNOWN; s->compression = EXR_UNKN; s->nb_channels = 0; s->w = 0; s->h = 0; s->tile_attr.xSize = -1; s->tile_attr.ySize = -1; s->is_tile = 0; s->is_luma = 0; if (bytestream2_get_bytes_left(&s->gb) < 10) { av_log(s->avctx, AV_LOG_ERROR, "Header too short to parse.\n"); return AVERROR_INVALIDDATA; } magic_number = bytestream2_get_le32(&s->gb); if (magic_number != 20000630) { /* As per documentation of OpenEXR, it is supposed to be * int 20000630 little-endian */ av_log(s->avctx, AV_LOG_ERROR, "Wrong magic number %d.\n", magic_number); return AVERROR_INVALIDDATA; } version = bytestream2_get_byte(&s->gb); if (version != 2) { avpriv_report_missing_feature(s->avctx, "Version %d", version); return AVERROR_PATCHWELCOME; } flags = bytestream2_get_le24(&s->gb); if (flags & 0x02) s->is_tile = 1; if (flags & 0x08) { avpriv_report_missing_feature(s->avctx, "deep data"); return AVERROR_PATCHWELCOME; } if (flags & 0x10) { avpriv_report_missing_feature(s->avctx, "multipart"); return AVERROR_PATCHWELCOME; } // Parse the header while (bytestream2_get_bytes_left(&s->gb) > 0 && *s->gb.buffer) { int var_size; if ((var_size = check_header_variable(s, "channels", "chlist", 38)) >= 0) { GetByteContext ch_gb; if (!var_size) { ret = AVERROR_INVALIDDATA; goto fail; } bytestream2_init(&ch_gb, s->gb.buffer, var_size); while (bytestream2_get_bytes_left(&ch_gb) >= 19) { EXRChannel *channel; enum ExrPixelType current_pixel_type; int channel_index = -1; int xsub, ysub; if (strcmp(s->layer, "") != 0) { if (strncmp(ch_gb.buffer, s->layer, strlen(s->layer)) == 0) { layer_match = 1; av_log(s->avctx, AV_LOG_INFO, "Channel match layer : %s.\n", ch_gb.buffer); ch_gb.buffer += strlen(s->layer); if (*ch_gb.buffer == '.') ch_gb.buffer++; /* skip dot if not given */ } else { layer_match = 0; av_log(s->avctx, AV_LOG_INFO, "Channel doesn't match layer : %s.\n", ch_gb.buffer); } } else { layer_match = 1; } if (layer_match) { /* only search channel if the layer match is valid */ if (!av_strcasecmp(ch_gb.buffer, "R") || !av_strcasecmp(ch_gb.buffer, "X") || !av_strcasecmp(ch_gb.buffer, "U")) { channel_index = 0; s->is_luma = 0; } else if (!av_strcasecmp(ch_gb.buffer, "G") || !av_strcasecmp(ch_gb.buffer, "V")) { channel_index = 1; s->is_luma = 0; } else if (!av_strcasecmp(ch_gb.buffer, "Y")) { channel_index = 1; s->is_luma = 1; } else if (!av_strcasecmp(ch_gb.buffer, "B") || !av_strcasecmp(ch_gb.buffer, "Z") || !av_strcasecmp(ch_gb.buffer, "W")) { channel_index = 2; s->is_luma = 0; } else if (!av_strcasecmp(ch_gb.buffer, "A")) { channel_index = 3; } else { av_log(s->avctx, AV_LOG_WARNING, "Unsupported channel %.256s.\n", ch_gb.buffer); } } /* skip until you get a 0 */ while (bytestream2_get_bytes_left(&ch_gb) > 0 && bytestream2_get_byte(&ch_gb)) continue; if (bytestream2_get_bytes_left(&ch_gb) < 4) { av_log(s->avctx, AV_LOG_ERROR, "Incomplete header.\n"); ret = AVERROR_INVALIDDATA; goto fail; } current_pixel_type = bytestream2_get_le32(&ch_gb); if (current_pixel_type >= EXR_UNKNOWN) { avpriv_report_missing_feature(s->avctx, "Pixel type %d", current_pixel_type); ret = AVERROR_PATCHWELCOME; goto fail; } bytestream2_skip(&ch_gb, 4); xsub = bytestream2_get_le32(&ch_gb); ysub = bytestream2_get_le32(&ch_gb); if (xsub != 1 || ysub != 1) { avpriv_report_missing_feature(s->avctx, "Subsampling %dx%d", xsub, ysub); ret = AVERROR_PATCHWELCOME; goto fail; } if (channel_index >= 0 && s->channel_offsets[channel_index] == -1) { /* channel has not been previously assigned */ if (s->pixel_type != EXR_UNKNOWN && s->pixel_type != current_pixel_type) { av_log(s->avctx, AV_LOG_ERROR, "RGB channels not of the same depth.\n"); ret = AVERROR_INVALIDDATA; goto fail; } s->pixel_type = current_pixel_type; s->channel_offsets[channel_index] = s->current_channel_offset; } else if (channel_index >= 0) { av_log(s->avctx, AV_LOG_WARNING, "Multiple channels with index %d.\n", channel_index); if (++dup_channels > 10) { ret = AVERROR_INVALIDDATA; goto fail; } } s->channels = av_realloc(s->channels, ++s->nb_channels * sizeof(EXRChannel)); if (!s->channels) { ret = AVERROR(ENOMEM); goto fail; } channel = &s->channels[s->nb_channels - 1]; channel->pixel_type = current_pixel_type; channel->xsub = xsub; channel->ysub = ysub; if (current_pixel_type == EXR_HALF) { s->current_channel_offset += 2; } else {/* Float or UINT32 */ s->current_channel_offset += 4; } } /* Check if all channels are set with an offset or if the channels * are causing an overflow */ if (!s->is_luma) {/* if we expected to have at least 3 channels */ if (FFMIN3(s->channel_offsets[0], s->channel_offsets[1], s->channel_offsets[2]) < 0) { if (s->channel_offsets[0] < 0) av_log(s->avctx, AV_LOG_ERROR, "Missing red channel.\n"); if (s->channel_offsets[1] < 0) av_log(s->avctx, AV_LOG_ERROR, "Missing green channel.\n"); if (s->channel_offsets[2] < 0) av_log(s->avctx, AV_LOG_ERROR, "Missing blue channel.\n"); ret = AVERROR_INVALIDDATA; goto fail; } } // skip one last byte and update main gb s->gb.buffer = ch_gb.buffer + 1; continue; } else if ((var_size = check_header_variable(s, "dataWindow", "box2i", 31)) >= 0) { int xmin, ymin, xmax, ymax; if (!var_size) { ret = AVERROR_INVALIDDATA; goto fail; } xmin = bytestream2_get_le32(&s->gb); ymin = bytestream2_get_le32(&s->gb); xmax = bytestream2_get_le32(&s->gb); ymax = bytestream2_get_le32(&s->gb); if (xmin > xmax || ymin > ymax || (unsigned)xmax - xmin >= INT_MAX || (unsigned)ymax - ymin >= INT_MAX) { ret = AVERROR_INVALIDDATA; goto fail; } s->xmin = xmin; s->xmax = xmax; s->ymin = ymin; s->ymax = ymax; s->xdelta = (s->xmax - s->xmin) + 1; s->ydelta = (s->ymax - s->ymin) + 1; continue; } else if ((var_size = check_header_variable(s, "displayWindow", "box2i", 34)) >= 0) { if (!var_size) { ret = AVERROR_INVALIDDATA; goto fail; } bytestream2_skip(&s->gb, 8); s->w = bytestream2_get_le32(&s->gb) + 1; s->h = bytestream2_get_le32(&s->gb) + 1; continue; } else if ((var_size = check_header_variable(s, "lineOrder", "lineOrder", 25)) >= 0) { int line_order; if (!var_size) { ret = AVERROR_INVALIDDATA; goto fail; } line_order = bytestream2_get_byte(&s->gb); av_log(s->avctx, AV_LOG_DEBUG, "line order: %d.\n", line_order); if (line_order > 2) { av_log(s->avctx, AV_LOG_ERROR, "Unknown line order.\n"); ret = AVERROR_INVALIDDATA; goto fail; } continue; } else if ((var_size = check_header_variable(s, "pixelAspectRatio", "float", 31)) >= 0) { if (!var_size) { ret = AVERROR_INVALIDDATA; goto fail; } sar = bytestream2_get_le32(&s->gb); continue; } else if ((var_size = check_header_variable(s, "compression", "compression", 29)) >= 0) { if (!var_size) { ret = AVERROR_INVALIDDATA; goto fail; } if (s->compression == EXR_UNKN) s->compression = bytestream2_get_byte(&s->gb); else av_log(s->avctx, AV_LOG_WARNING, "Found more than one compression attribute.\n"); continue; } else if ((var_size = check_header_variable(s, "tiles", "tiledesc", 22)) >= 0) { char tileLevel; if (!s->is_tile) av_log(s->avctx, AV_LOG_WARNING, "Found tile attribute and scanline flags. Exr will be interpreted as scanline.\n"); s->tile_attr.xSize = bytestream2_get_le32(&s->gb); s->tile_attr.ySize = bytestream2_get_le32(&s->gb); tileLevel = bytestream2_get_byte(&s->gb); s->tile_attr.level_mode = tileLevel & 0x0f; s->tile_attr.level_round = (tileLevel >> 4) & 0x0f; if (s->tile_attr.level_mode >= EXR_TILE_LEVEL_UNKNOWN) { avpriv_report_missing_feature(s->avctx, "Tile level mode %d", s->tile_attr.level_mode); ret = AVERROR_PATCHWELCOME; goto fail; } if (s->tile_attr.level_round >= EXR_TILE_ROUND_UNKNOWN) { avpriv_report_missing_feature(s->avctx, "Tile level round %d", s->tile_attr.level_round); ret = AVERROR_PATCHWELCOME; goto fail; } continue; } else if ((var_size = check_header_variable(s, "writer", "string", 1)) >= 0) { uint8_t key[256] = { 0 }; bytestream2_get_buffer(&s->gb, key, FFMIN(sizeof(key) - 1, var_size)); av_dict_set(&metadata, "writer", key, 0); continue; } // Check if there are enough bytes for a header if (bytestream2_get_bytes_left(&s->gb) <= 9) { av_log(s->avctx, AV_LOG_ERROR, "Incomplete header\n"); ret = AVERROR_INVALIDDATA; goto fail; } // Process unknown variables for (i = 0; i < 2; i++) // value_name and value_type while (bytestream2_get_byte(&s->gb) != 0); // Skip variable length bytestream2_skip(&s->gb, bytestream2_get_le32(&s->gb)); } ff_set_sar(s->avctx, av_d2q(av_int2float(sar), 255)); if (s->compression == EXR_UNKN) { av_log(s->avctx, AV_LOG_ERROR, "Missing compression attribute.\n"); ret = AVERROR_INVALIDDATA; goto fail; } if (s->is_tile) { if (s->tile_attr.xSize < 1 || s->tile_attr.ySize < 1) { av_log(s->avctx, AV_LOG_ERROR, "Invalid tile attribute.\n"); ret = AVERROR_INVALIDDATA; goto fail; } } if (bytestream2_get_bytes_left(&s->gb) <= 0) { av_log(s->avctx, AV_LOG_ERROR, "Incomplete frame.\n"); ret = AVERROR_INVALIDDATA; goto fail; } frame->metadata = metadata; // aaand we are done bytestream2_skip(&s->gb, 1); return 0; fail: av_dict_free(&metadata); return ret; } static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { EXRContext *s = avctx->priv_data; ThreadFrame frame = { .f = data }; AVFrame *picture = data; uint8_t *ptr; int i, y, ret, ymax; int planes; int out_line_size; int nb_blocks; /* nb scanline or nb tile */ uint64_t start_offset_table; uint64_t start_next_scanline; PutByteContext offset_table_writer; bytestream2_init(&s->gb, avpkt->data, avpkt->size); if ((ret = decode_header(s, picture)) < 0) return ret; switch (s->pixel_type) { case EXR_FLOAT: case EXR_HALF: if (s->channel_offsets[3] >= 0) { if (!s->is_luma) { avctx->pix_fmt = AV_PIX_FMT_GBRAPF32; } else { /* todo: change this when a floating point pixel format with luma with alpha is implemented */ avctx->pix_fmt = AV_PIX_FMT_GBRAPF32; } } else { if (!s->is_luma) { avctx->pix_fmt = AV_PIX_FMT_GBRPF32; } else { avctx->pix_fmt = AV_PIX_FMT_GRAYF32; } } break; case EXR_UINT: if (s->channel_offsets[3] >= 0) { if (!s->is_luma) { avctx->pix_fmt = AV_PIX_FMT_RGBA64; } else { avctx->pix_fmt = AV_PIX_FMT_YA16; } } else { if (!s->is_luma) { avctx->pix_fmt = AV_PIX_FMT_RGB48; } else { avctx->pix_fmt = AV_PIX_FMT_GRAY16; } } break; default: av_log(avctx, AV_LOG_ERROR, "Missing channel list.\n"); return AVERROR_INVALIDDATA; } if (s->apply_trc_type != AVCOL_TRC_UNSPECIFIED) avctx->color_trc = s->apply_trc_type; switch (s->compression) { case EXR_RAW: case EXR_RLE: case EXR_ZIP1: s->scan_lines_per_block = 1; break; case EXR_PXR24: case EXR_ZIP16: s->scan_lines_per_block = 16; break; case EXR_PIZ: case EXR_B44: case EXR_B44A: s->scan_lines_per_block = 32; break; default: avpriv_report_missing_feature(avctx, "Compression %d", s->compression); return AVERROR_PATCHWELCOME; } /* Verify the xmin, xmax, ymin and ymax before setting the actual image size. * It's possible for the data window can larger or outside the display window */ if (s->xmin > s->xmax || s->ymin > s->ymax || s->ydelta == 0xFFFFFFFF || s->xdelta == 0xFFFFFFFF) { av_log(avctx, AV_LOG_ERROR, "Wrong or missing size information.\n"); return AVERROR_INVALIDDATA; } if ((ret = ff_set_dimensions(avctx, s->w, s->h)) < 0) return ret; s->desc = av_pix_fmt_desc_get(avctx->pix_fmt); if (!s->desc) return AVERROR_INVALIDDATA; if (s->desc->flags & AV_PIX_FMT_FLAG_FLOAT) { planes = s->desc->nb_components; out_line_size = avctx->width * 4; } else { planes = 1; out_line_size = avctx->width * 2 * s->desc->nb_components; } if (s->is_tile) { nb_blocks = ((s->xdelta + s->tile_attr.xSize - 1) / s->tile_attr.xSize) * ((s->ydelta + s->tile_attr.ySize - 1) / s->tile_attr.ySize); } else { /* scanline */ nb_blocks = (s->ydelta + s->scan_lines_per_block - 1) / s->scan_lines_per_block; } if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) return ret; if (bytestream2_get_bytes_left(&s->gb)/8 < nb_blocks) return AVERROR_INVALIDDATA; // check offset table and recreate it if need if (!s->is_tile && bytestream2_peek_le64(&s->gb) == 0) { av_log(s->avctx, AV_LOG_DEBUG, "recreating invalid scanline offset table\n"); start_offset_table = bytestream2_tell(&s->gb); start_next_scanline = start_offset_table + nb_blocks * 8; bytestream2_init_writer(&offset_table_writer, &avpkt->data[start_offset_table], nb_blocks * 8); for (y = 0; y < nb_blocks; y++) { /* write offset of prev scanline in offset table */ bytestream2_put_le64(&offset_table_writer, start_next_scanline); /* get len of next scanline */ bytestream2_seek(&s->gb, start_next_scanline + 4, SEEK_SET);/* skip line number */ start_next_scanline += (bytestream2_get_le32(&s->gb) + 8); } bytestream2_seek(&s->gb, start_offset_table, SEEK_SET); } // save pointer we are going to use in decode_block s->buf = avpkt->data; s->buf_size = avpkt->size; // Zero out the start if ymin is not 0 for (i = 0; i < planes; i++) { ptr = picture->data[i]; for (y = 0; y < FFMIN(s->ymin, s->h); y++) { memset(ptr, 0, out_line_size); ptr += picture->linesize[i]; } } s->picture = picture; avctx->execute2(avctx, decode_block, s->thread_data, NULL, nb_blocks); ymax = FFMAX(0, s->ymax + 1); // Zero out the end if ymax+1 is not h for (i = 0; i < planes; i++) { ptr = picture->data[i] + (ymax * picture->linesize[i]); for (y = ymax; y < avctx->height; y++) { memset(ptr, 0, out_line_size); ptr += picture->linesize[i]; } } picture->pict_type = AV_PICTURE_TYPE_I; *got_frame = 1; return avpkt->size; } static av_cold int decode_init(AVCodecContext *avctx) { EXRContext *s = avctx->priv_data; uint32_t i; union av_intfloat32 t; float one_gamma = 1.0f / s->gamma; avpriv_trc_function trc_func = NULL; s->avctx = avctx; ff_exrdsp_init(&s->dsp); #if HAVE_BIGENDIAN ff_bswapdsp_init(&s->bbdsp); #endif trc_func = avpriv_get_trc_function_from_trc(s->apply_trc_type); if (trc_func) { for (i = 0; i < 65536; ++i) { t = exr_half2float(i); t.f = trc_func(t.f); s->gamma_table[i] = t; } } else { if (one_gamma > 0.9999f && one_gamma < 1.0001f) { for (i = 0; i < 65536; ++i) { s->gamma_table[i] = exr_half2float(i); } } else { for (i = 0; i < 65536; ++i) { t = exr_half2float(i); /* If negative value we reuse half value */ if (t.f <= 0.0f) { s->gamma_table[i] = t; } else { t.f = powf(t.f, one_gamma); s->gamma_table[i] = t; } } } } // allocate thread data, used for non EXR_RAW compression types s->thread_data = av_mallocz_array(avctx->thread_count, sizeof(EXRThreadData)); if (!s->thread_data) return AVERROR_INVALIDDATA; return 0; } static av_cold int decode_end(AVCodecContext *avctx) { EXRContext *s = avctx->priv_data; int i; for (i = 0; i < avctx->thread_count; i++) { EXRThreadData *td = &s->thread_data[i]; av_freep(&td->uncompressed_data); av_freep(&td->tmp); av_freep(&td->bitmap); av_freep(&td->lut); } av_freep(&s->thread_data); av_freep(&s->channels); return 0; } #define OFFSET(x) offsetof(EXRContext, x) #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM static const AVOption options[] = { { "layer", "Set the decoding layer", OFFSET(layer), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VD }, { "gamma", "Set the float gamma value when decoding", OFFSET(gamma), AV_OPT_TYPE_FLOAT, { .dbl = 1.0f }, 0.001, FLT_MAX, VD }, // XXX: Note the abuse of the enum using AVCOL_TRC_UNSPECIFIED to subsume the existing gamma option { "apply_trc", "color transfer characteristics to apply to EXR linear input", OFFSET(apply_trc_type), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_UNSPECIFIED }, 1, AVCOL_TRC_NB-1, VD, "apply_trc_type"}, { "bt709", "BT.709", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT709 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "gamma", "gamma", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_UNSPECIFIED }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "gamma22", "BT.470 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_GAMMA22 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "gamma28", "BT.470 BG", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_GAMMA28 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "smpte170m", "SMPTE 170 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE170M }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "smpte240m", "SMPTE 240 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE240M }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "linear", "Linear", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LINEAR }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "log", "Log", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LOG }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "log_sqrt", "Log square root", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LOG_SQRT }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "iec61966_2_4", "IEC 61966-2-4", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_IEC61966_2_4 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "bt1361", "BT.1361", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT1361_ECG }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "iec61966_2_1", "IEC 61966-2-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_IEC61966_2_1 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "bt2020_10bit", "BT.2020 - 10 bit", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_10 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "bt2020_12bit", "BT.2020 - 12 bit", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_12 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "smpte2084", "SMPTE ST 2084", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTEST2084 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "smpte428_1", "SMPTE ST 428-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTEST428_1 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { NULL }, }; static const AVClass exr_class = { .class_name = "EXR", .item_name = av_default_item_name, .option = options, .version = LIBAVUTIL_VERSION_INT, }; AVCodec ff_exr_decoder = { .name = "exr", .long_name = NULL_IF_CONFIG_SMALL("OpenEXR image"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_EXR, .priv_data_size = sizeof(EXRContext), .init = decode_init, .close = decode_end, .decode = decode_frame, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS, .priv_class = &exr_class, };
null
/* * OpenEXR (.exr) image decoder * Copyright (c) 2006 Industrial Light & Magic, a division of Lucas Digital Ltd. LLC * Copyright (c) 2009 Jimmy Christensen * * B44/B44A, Tile, UINT32 added by Jokyo Images support by CNC - French National Center for Cinema * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * OpenEXR decoder * @author Jimmy Christensen * * For more information on the OpenEXR format, visit: * http://openexr.com/ * * exr_half2float() is credited to Aaftab Munshi, Dan Ginsburg, Dave Shreiner. */ #include <float.h> #include <zlib.h> #include "libavutil/avassert.h" #include "libavutil/common.h" #include "libavutil/imgutils.h" #include "libavutil/intfloat.h" #include "libavutil/avstring.h" #include "libavutil/opt.h" #include "libavutil/color_utils.h" #include "avcodec.h" #include "bytestream.h" #if HAVE_BIGENDIAN #include "bswapdsp.h" #endif #include "exrdsp.h" #include "get_bits.h" #include "internal.h" #include "mathops.h" #include "thread.h" enum ExrCompr { EXR_RAW, EXR_RLE, EXR_ZIP1, EXR_ZIP16, EXR_PIZ, EXR_PXR24, EXR_B44, EXR_B44A, EXR_DWA, EXR_DWB, EXR_UNKN, }; enum ExrPixelType { EXR_UINT, EXR_HALF, EXR_FLOAT, EXR_UNKNOWN, }; enum ExrTileLevelMode { EXR_TILE_LEVEL_ONE, EXR_TILE_LEVEL_MIPMAP, EXR_TILE_LEVEL_RIPMAP, EXR_TILE_LEVEL_UNKNOWN, }; enum ExrTileLevelRound { EXR_TILE_ROUND_UP, EXR_TILE_ROUND_DOWN, EXR_TILE_ROUND_UNKNOWN, }; typedef struct EXRChannel { int xsub, ysub; enum ExrPixelType pixel_type; } EXRChannel; typedef struct EXRTileAttribute { int32_t xSize; int32_t ySize; enum ExrTileLevelMode level_mode; enum ExrTileLevelRound level_round; } EXRTileAttribute; typedef struct EXRThreadData { uint8_t *uncompressed_data; int uncompressed_size; uint8_t *tmp; int tmp_size; uint8_t *bitmap; uint16_t *lut; int ysize, xsize; int channel_line_size; } EXRThreadData; typedef struct EXRContext { AVClass *class; AVFrame *picture; AVCodecContext *avctx; ExrDSPContext dsp; #if HAVE_BIGENDIAN BswapDSPContext bbdsp; #endif enum ExrCompr compression; enum ExrPixelType pixel_type; int channel_offsets[4]; // 0 = red, 1 = green, 2 = blue and 3 = alpha const AVPixFmtDescriptor *desc; int w, h; int32_t xmax, xmin; int32_t ymax, ymin; uint32_t xdelta, ydelta; int scan_lines_per_block; EXRTileAttribute tile_attr; /* header data attribute of tile */ int is_tile; /* 0 if scanline, 1 if tile */ int is_luma;/* 1 if there is an Y plane */ GetByteContext gb; const uint8_t *buf; int buf_size; EXRChannel *channels; int nb_channels; int current_channel_offset; EXRThreadData *thread_data; const char *layer; enum AVColorTransferCharacteristic apply_trc_type; float gamma; union av_intfloat32 gamma_table[65536]; } EXRContext; /* -15 stored using a single precision bias of 127 */ #define HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP 0x38000000 /* max exponent value in single precision that will be converted * to Inf or Nan when stored as a half-float */ #define HALF_FLOAT_MAX_BIASED_EXP_AS_SINGLE_FP_EXP 0x47800000 /* 255 is the max exponent biased value */ #define FLOAT_MAX_BIASED_EXP (0xFF << 23) #define HALF_FLOAT_MAX_BIASED_EXP (0x1F << 10) /** * Convert a half float as a uint16_t into a full float. * * @param hf half float as uint16_t * * @return float value */ static union av_intfloat32 exr_half2float(uint16_t hf) { unsigned int sign = (unsigned int) (hf >> 15); unsigned int mantissa = (unsigned int) (hf & ((1 << 10) - 1)); unsigned int exp = (unsigned int) (hf & HALF_FLOAT_MAX_BIASED_EXP); union av_intfloat32 f; if (exp == HALF_FLOAT_MAX_BIASED_EXP) { // we have a half-float NaN or Inf // half-float NaNs will be converted to a single precision NaN // half-float Infs will be converted to a single precision Inf exp = FLOAT_MAX_BIASED_EXP; if (mantissa) mantissa = (1 << 23) - 1; // set all bits to indicate a NaN } else if (exp == 0x0) { // convert half-float zero/denorm to single precision value if (mantissa) { mantissa <<= 1; exp = HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP; // check for leading 1 in denorm mantissa while (!(mantissa & (1 << 10))) { // for every leading 0, decrement single precision exponent by 1 // and shift half-float mantissa value to the left mantissa <<= 1; exp -= (1 << 23); } // clamp the mantissa to 10 bits mantissa &= ((1 << 10) - 1); // shift left to generate single-precision mantissa of 23 bits mantissa <<= 13; } } else { // shift left to generate single-precision mantissa of 23 bits mantissa <<= 13; // generate single precision biased exponent value exp = (exp << 13) + HALF_FLOAT_MIN_BIASED_EXP_AS_SINGLE_FP_EXP; } f.i = (sign << 31) | exp | mantissa; return f; } static int zip_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td) { unsigned long dest_len = uncompressed_size; if (uncompress(td->tmp, &dest_len, src, compressed_size) != Z_OK || dest_len != uncompressed_size) return AVERROR_INVALIDDATA; av_assert1(uncompressed_size % 2 == 0); s->dsp.predictor(td->tmp, uncompressed_size); s->dsp.reorder_pixels(td->uncompressed_data, td->tmp, uncompressed_size); return 0; } static int rle_uncompress(EXRContext *ctx, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td) { uint8_t *d = td->tmp; const int8_t *s = src; int ssize = compressed_size; int dsize = uncompressed_size; uint8_t *dend = d + dsize; int count; while (ssize > 0) { count = *s++; if (count < 0) { count = -count; if ((dsize -= count) < 0 || (ssize -= count + 1) < 0) return AVERROR_INVALIDDATA; while (count--) *d++ = *s++; } else { count++; if ((dsize -= count) < 0 || (ssize -= 2) < 0) return AVERROR_INVALIDDATA; while (count--) *d++ = *s; s++; } } if (dend != d) return AVERROR_INVALIDDATA; av_assert1(uncompressed_size % 2 == 0); ctx->dsp.predictor(td->tmp, uncompressed_size); ctx->dsp.reorder_pixels(td->uncompressed_data, td->tmp, uncompressed_size); return 0; } #define USHORT_RANGE (1 << 16) #define BITMAP_SIZE (1 << 13) static uint16_t reverse_lut(const uint8_t *bitmap, uint16_t *lut) { int i, k = 0; for (i = 0; i < USHORT_RANGE; i++) if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; i = k - 1; memset(lut + k, 0, (USHORT_RANGE - k) * 2); return i; } static void apply_lut(const uint16_t *lut, uint16_t *dst, int dsize) { int i; for (i = 0; i < dsize; ++i) dst[i] = lut[dst[i]]; } #define HUF_ENCBITS 16 // literal (value) bit length #define HUF_DECBITS 14 // decoding bit size (>= 8) #define HUF_ENCSIZE ((1 << HUF_ENCBITS) + 1) // encoding table size #define HUF_DECSIZE (1 << HUF_DECBITS) // decoding table size #define HUF_DECMASK (HUF_DECSIZE - 1) typedef struct HufDec { int len; int lit; int *p; } HufDec; static void huf_canonical_code_table(uint64_t *hcode) { uint64_t c, n[59] = { 0 }; int i; for (i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; c = 0; for (i = 58; i > 0; --i) { uint64_t nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } for (i = 0; i < HUF_ENCSIZE; ++i) { int l = hcode[i]; if (l > 0) hcode[i] = l | (n[l]++ << 6); } } #define SHORT_ZEROCODE_RUN 59 #define LONG_ZEROCODE_RUN 63 #define SHORTEST_LONG_RUN (2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN) #define LONGEST_LONG_RUN (255 + SHORTEST_LONG_RUN) static int huf_unpack_enc_table(GetByteContext *gb, int32_t im, int32_t iM, uint64_t *hcode) { GetBitContext gbit; int ret = init_get_bits8(&gbit, gb->buffer, bytestream2_get_bytes_left(gb)); if (ret < 0) return ret; for (; im <= iM; im++) { uint64_t l = hcode[im] = get_bits(&gbit, 6); if (l == LONG_ZEROCODE_RUN) { int zerun = get_bits(&gbit, 8) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) return AVERROR_INVALIDDATA; while (zerun--) hcode[im++] = 0; im--; } else if (l >= SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) return AVERROR_INVALIDDATA; while (zerun--) hcode[im++] = 0; im--; } } bytestream2_skip(gb, (get_bits_count(&gbit) + 7) / 8); huf_canonical_code_table(hcode); return 0; } static int huf_build_dec_table(const uint64_t *hcode, int im, int iM, HufDec *hdecod) { for (; im <= iM; im++) { uint64_t c = hcode[im] >> 6; int i, l = hcode[im] & 63; if (c >> l) return AVERROR_INVALIDDATA; if (l > HUF_DECBITS) { HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) return AVERROR_INVALIDDATA; pl->lit++; pl->p = av_realloc(pl->p, pl->lit * sizeof(int)); if (!pl->p) return AVERROR(ENOMEM); pl->p[pl->lit - 1] = im; } else if (l) { HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (i = 1 << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) return AVERROR_INVALIDDATA; pl->len = l; pl->lit = im; } } } return 0; } #define get_char(c, lc, gb) \ { \ c = (c << 8) | bytestream2_get_byte(gb); \ lc += 8; \ } #define get_code(po, rlc, c, lc, gb, out, oe, outb) \ { \ if (po == rlc) { \ if (lc < 8) \ get_char(c, lc, gb); \ lc -= 8; \ \ cs = c >> lc; \ \ if (out + cs > oe || out == outb) \ return AVERROR_INVALIDDATA; \ \ s = out[-1]; \ \ while (cs-- > 0) \ *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return AVERROR_INVALIDDATA; \ } \ } static int huf_decode(const uint64_t *hcode, const HufDec *hdecod, GetByteContext *gb, int nbits, int rlc, int no, uint16_t *out) { uint64_t c = 0; uint16_t *outb = out; uint16_t *oe = out + no; const uint8_t *ie = gb->buffer + (nbits + 7) / 8; // input byte size uint8_t cs; uint16_t s; int i, lc = 0; while (gb->buffer < ie) { get_char(c, lc, gb); while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; get_code(pl.lit, rlc, c, lc, gb, out, oe, outb); } else { int j; if (!pl.p) return AVERROR_INVALIDDATA; for (j = 0; j < pl.lit; j++) { int l = hcode[pl.p[j]] & 63; while (lc < l && bytestream2_get_bytes_left(gb) > 0) get_char(c, lc, gb); if (lc >= l) { if ((hcode[pl.p[j]] >> 6) == ((c >> (lc - l)) & ((1LL << l) - 1))) { lc -= l; get_code(pl.p[j], rlc, c, lc, gb, out, oe, outb); break; } } } if (j == pl.lit) return AVERROR_INVALIDDATA; } } } i = (8 - nbits) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len && lc >= pl.len) { lc -= pl.len; get_code(pl.lit, rlc, c, lc, gb, out, oe, outb); } else { return AVERROR_INVALIDDATA; } } if (out - outb != no) return AVERROR_INVALIDDATA; return 0; } static int huf_uncompress(GetByteContext *gb, uint16_t *dst, int dst_size) { int32_t src_size, im, iM; uint32_t nBits; uint64_t *freq; HufDec *hdec; int ret, i; src_size = bytestream2_get_le32(gb); im = bytestream2_get_le32(gb); iM = bytestream2_get_le32(gb); bytestream2_skip(gb, 4); nBits = bytestream2_get_le32(gb); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE || src_size < 0) return AVERROR_INVALIDDATA; bytestream2_skip(gb, 4); freq = av_mallocz_array(HUF_ENCSIZE, sizeof(*freq)); hdec = av_mallocz_array(HUF_DECSIZE, sizeof(*hdec)); if (!freq || !hdec) { ret = AVERROR(ENOMEM); goto fail; } if ((ret = huf_unpack_enc_table(gb, im, iM, freq)) < 0) goto fail; if (nBits > 8 * bytestream2_get_bytes_left(gb)) { ret = AVERROR_INVALIDDATA; goto fail; } if ((ret = huf_build_dec_table(freq, im, iM, hdec)) < 0) goto fail; ret = huf_decode(freq, hdec, gb, nBits, iM, dst_size, dst); fail: for (i = 0; i < HUF_DECSIZE; i++) if (hdec) av_freep(&hdec[i].p); av_free(freq); av_free(hdec); return ret; } static inline void wdec14(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b) { int16_t ls = l; int16_t hs = h; int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); int16_t as = ai; int16_t bs = ai - hi; *a = as; *b = bs; } #define NBITS 16 #define A_OFFSET (1 << (NBITS - 1)) #define MOD_MASK ((1 << NBITS) - 1) static inline void wdec16(uint16_t l, uint16_t h, uint16_t *a, uint16_t *b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; *b = bb; *a = aa; } static void wav_decode(uint16_t *in, int nx, int ox, int ny, int oy, uint16_t mx) { int w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; while (p >= 1) { uint16_t *py = in; uint16_t *ey = in + oy * (ny - p2); uint16_t i00, i01, i10, i11; int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; for (; py <= ey; py += oy2) { uint16_t *px = py; uint16_t *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { uint16_t *p01 = px + ox1; uint16_t *p10 = px + oy1; uint16_t *p11 = p10 + ox1; if (w14) { wdec14(*px, *p10, &i00, &i10); wdec14(*p01, *p11, &i01, &i11); wdec14(i00, i01, px, p01); wdec14(i10, i11, p10, p11); } else { wdec16(*px, *p10, &i00, &i10); wdec16(*p01, *p11, &i01, &i11); wdec16(i00, i01, px, p01); wdec16(i10, i11, p10, p11); } } if (nx & p) { uint16_t *p10 = px + oy1; if (w14) wdec14(*px, *p10, &i00, p10); else wdec16(*px, *p10, &i00, p10); *px = i00; } } if (ny & p) { uint16_t *px = py; uint16_t *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { uint16_t *p01 = px + ox1; if (w14) wdec14(*px, *p01, &i00, p01); else wdec16(*px, *p01, &i00, p01); *px = i00; } } p2 = p; p >>= 1; } } static int piz_uncompress(EXRContext *s, const uint8_t *src, int ssize, int dsize, EXRThreadData *td) { GetByteContext gb; uint16_t maxval, min_non_zero, max_non_zero; uint16_t *ptr; uint16_t *tmp = (uint16_t *)td->tmp; uint16_t *out; uint16_t *in; int ret, i, j; int pixel_half_size;/* 1 for half, 2 for float and uint32 */ EXRChannel *channel; int tmp_offset; if (!td->bitmap) td->bitmap = av_malloc(BITMAP_SIZE); if (!td->lut) td->lut = av_malloc(1 << 17); if (!td->bitmap || !td->lut) { av_freep(&td->bitmap); av_freep(&td->lut); return AVERROR(ENOMEM); } bytestream2_init(&gb, src, ssize); min_non_zero = bytestream2_get_le16(&gb); max_non_zero = bytestream2_get_le16(&gb); if (max_non_zero >= BITMAP_SIZE) return AVERROR_INVALIDDATA; memset(td->bitmap, 0, FFMIN(min_non_zero, BITMAP_SIZE)); if (min_non_zero <= max_non_zero) bytestream2_get_buffer(&gb, td->bitmap + min_non_zero, max_non_zero - min_non_zero + 1); memset(td->bitmap + max_non_zero + 1, 0, BITMAP_SIZE - max_non_zero - 1); maxval = reverse_lut(td->bitmap, td->lut); ret = huf_uncompress(&gb, tmp, dsize / sizeof(uint16_t)); if (ret) return ret; ptr = tmp; for (i = 0; i < s->nb_channels; i++) { channel = &s->channels[i]; if (channel->pixel_type == EXR_HALF) pixel_half_size = 1; else pixel_half_size = 2; for (j = 0; j < pixel_half_size; j++) wav_decode(ptr + j, td->xsize, pixel_half_size, td->ysize, td->xsize * pixel_half_size, maxval); ptr += td->xsize * td->ysize * pixel_half_size; } apply_lut(td->lut, tmp, dsize / sizeof(uint16_t)); out = (uint16_t *)td->uncompressed_data; for (i = 0; i < td->ysize; i++) { tmp_offset = 0; for (j = 0; j < s->nb_channels; j++) { channel = &s->channels[j]; if (channel->pixel_type == EXR_HALF) pixel_half_size = 1; else pixel_half_size = 2; in = tmp + tmp_offset * td->xsize * td->ysize + i * td->xsize * pixel_half_size; tmp_offset += pixel_half_size; #if HAVE_BIGENDIAN s->bbdsp.bswap16_buf(out, in, td->xsize * pixel_half_size); #else memcpy(out, in, td->xsize * 2 * pixel_half_size); #endif out += td->xsize * pixel_half_size; } } return 0; } static int pxr24_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td) { unsigned long dest_len, expected_len = 0; const uint8_t *in = td->tmp; uint8_t *out; int c, i, j; for (i = 0; i < s->nb_channels; i++) { if (s->channels[i].pixel_type == EXR_FLOAT) { expected_len += (td->xsize * td->ysize * 3);/* PRX 24 store float in 24 bit instead of 32 */ } else if (s->channels[i].pixel_type == EXR_HALF) { expected_len += (td->xsize * td->ysize * 2); } else {//UINT 32 expected_len += (td->xsize * td->ysize * 4); } } dest_len = expected_len; if (uncompress(td->tmp, &dest_len, src, compressed_size) != Z_OK) { return AVERROR_INVALIDDATA; } else if (dest_len != expected_len) { return AVERROR_INVALIDDATA; } out = td->uncompressed_data; for (i = 0; i < td->ysize; i++) for (c = 0; c < s->nb_channels; c++) { EXRChannel *channel = &s->channels[c]; const uint8_t *ptr[4]; uint32_t pixel = 0; switch (channel->pixel_type) { case EXR_FLOAT: ptr[0] = in; ptr[1] = ptr[0] + td->xsize; ptr[2] = ptr[1] + td->xsize; in = ptr[2] + td->xsize; for (j = 0; j < td->xsize; ++j) { uint32_t diff = ((unsigned)*(ptr[0]++) << 24) | (*(ptr[1]++) << 16) | (*(ptr[2]++) << 8); pixel += diff; bytestream_put_le32(&out, pixel); } break; case EXR_HALF: ptr[0] = in; ptr[1] = ptr[0] + td->xsize; in = ptr[1] + td->xsize; for (j = 0; j < td->xsize; j++) { uint32_t diff = (*(ptr[0]++) << 8) | *(ptr[1]++); pixel += diff; bytestream_put_le16(&out, pixel); } break; case EXR_UINT: ptr[0] = in; ptr[1] = ptr[0] + s->xdelta; ptr[2] = ptr[1] + s->xdelta; ptr[3] = ptr[2] + s->xdelta; in = ptr[3] + s->xdelta; for (j = 0; j < s->xdelta; ++j) { uint32_t diff = ((uint32_t)*(ptr[0]++) << 24) | (*(ptr[1]++) << 16) | (*(ptr[2]++) << 8 ) | (*(ptr[3]++)); pixel += diff; bytestream_put_le32(&out, pixel); } break; default: return AVERROR_INVALIDDATA; } } return 0; } static void unpack_14(const uint8_t b[14], uint16_t s[16]) { unsigned short shift = (b[ 2] >> 2) & 15; unsigned short bias = (0x20 << shift); int i; s[ 0] = (b[0] << 8) | b[1]; s[ 4] = s[ 0] + ((((b[ 2] << 4) | (b[ 3] >> 4)) & 0x3f) << shift) - bias; s[ 8] = s[ 4] + ((((b[ 3] << 2) | (b[ 4] >> 6)) & 0x3f) << shift) - bias; s[12] = s[ 8] + ((b[ 4] & 0x3f) << shift) - bias; s[ 1] = s[ 0] + ((b[ 5] >> 2) << shift) - bias; s[ 5] = s[ 4] + ((((b[ 5] << 4) | (b[ 6] >> 4)) & 0x3f) << shift) - bias; s[ 9] = s[ 8] + ((((b[ 6] << 2) | (b[ 7] >> 6)) & 0x3f) << shift) - bias; s[13] = s[12] + ((b[ 7] & 0x3f) << shift) - bias; s[ 2] = s[ 1] + ((b[ 8] >> 2) << shift) - bias; s[ 6] = s[ 5] + ((((b[ 8] << 4) | (b[ 9] >> 4)) & 0x3f) << shift) - bias; s[10] = s[ 9] + ((((b[ 9] << 2) | (b[10] >> 6)) & 0x3f) << shift) - bias; s[14] = s[13] + ((b[10] & 0x3f) << shift) - bias; s[ 3] = s[ 2] + ((b[11] >> 2) << shift) - bias; s[ 7] = s[ 6] + ((((b[11] << 4) | (b[12] >> 4)) & 0x3f) << shift) - bias; s[11] = s[10] + ((((b[12] << 2) | (b[13] >> 6)) & 0x3f) << shift) - bias; s[15] = s[14] + ((b[13] & 0x3f) << shift) - bias; for (i = 0; i < 16; ++i) { if (s[i] & 0x8000) s[i] &= 0x7fff; else s[i] = ~s[i]; } } static void unpack_3(const uint8_t b[3], uint16_t s[16]) { int i; s[0] = (b[0] << 8) | b[1]; if (s[0] & 0x8000) s[0] &= 0x7fff; else s[0] = ~s[0]; for (i = 1; i < 16; i++) s[i] = s[0]; } static int b44_uncompress(EXRContext *s, const uint8_t *src, int compressed_size, int uncompressed_size, EXRThreadData *td) { const int8_t *sr = src; int stay_to_uncompress = compressed_size; int nb_b44_block_w, nb_b44_block_h; int index_tl_x, index_tl_y, index_out, index_tmp; uint16_t tmp_buffer[16]; /* B44 use 4x4 half float pixel */ int c, iY, iX, y, x; int target_channel_offset = 0; /* calc B44 block count */ nb_b44_block_w = td->xsize / 4; if ((td->xsize % 4) != 0) nb_b44_block_w++; nb_b44_block_h = td->ysize / 4; if ((td->ysize % 4) != 0) nb_b44_block_h++; for (c = 0; c < s->nb_channels; c++) { if (s->channels[c].pixel_type == EXR_HALF) {/* B44 only compress half float data */ for (iY = 0; iY < nb_b44_block_h; iY++) { for (iX = 0; iX < nb_b44_block_w; iX++) {/* For each B44 block */ if (stay_to_uncompress < 3) { av_log(s, AV_LOG_ERROR, "Not enough data for B44A block: %d", stay_to_uncompress); return AVERROR_INVALIDDATA; } if (src[compressed_size - stay_to_uncompress + 2] == 0xfc) { /* B44A block */ unpack_3(sr, tmp_buffer); sr += 3; stay_to_uncompress -= 3; } else {/* B44 Block */ if (stay_to_uncompress < 14) { av_log(s, AV_LOG_ERROR, "Not enough data for B44 block: %d", stay_to_uncompress); return AVERROR_INVALIDDATA; } unpack_14(sr, tmp_buffer); sr += 14; stay_to_uncompress -= 14; } /* copy data to uncompress buffer (B44 block can exceed target resolution)*/ index_tl_x = iX * 4; index_tl_y = iY * 4; for (y = index_tl_y; y < FFMIN(index_tl_y + 4, td->ysize); y++) { for (x = index_tl_x; x < FFMIN(index_tl_x + 4, td->xsize); x++) { index_out = target_channel_offset * td->xsize + y * td->channel_line_size + 2 * x; index_tmp = (y-index_tl_y) * 4 + (x-index_tl_x); td->uncompressed_data[index_out] = tmp_buffer[index_tmp] & 0xff; td->uncompressed_data[index_out + 1] = tmp_buffer[index_tmp] >> 8; } } } } target_channel_offset += 2; } else {/* Float or UINT 32 channel */ if (stay_to_uncompress < td->ysize * td->xsize * 4) { av_log(s, AV_LOG_ERROR, "Not enough data for uncompress channel: %d", stay_to_uncompress); return AVERROR_INVALIDDATA; } for (y = 0; y < td->ysize; y++) { index_out = target_channel_offset * td->xsize + y * td->channel_line_size; memcpy(&td->uncompressed_data[index_out], sr, td->xsize * 4); sr += td->xsize * 4; } target_channel_offset += 4; stay_to_uncompress -= td->ysize * td->xsize * 4; } } return 0; } static int decode_block(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr) { EXRContext *s = avctx->priv_data; AVFrame *const p = s->picture; EXRThreadData *td = &s->thread_data[threadnr]; const uint8_t *channel_buffer[4] = { 0 }; const uint8_t *buf = s->buf; uint64_t line_offset, uncompressed_size; uint8_t *ptr; uint32_t data_size; int line, col = 0; uint64_t tile_x, tile_y, tile_level_x, tile_level_y; const uint8_t *src; int step = s->desc->flags & AV_PIX_FMT_FLAG_FLOAT ? 4 : 2 * s->desc->nb_components; int bxmin = 0, axmax = 0, window_xoffset = 0; int window_xmin, window_xmax, window_ymin, window_ymax; int data_xoffset, data_yoffset, data_window_offset, xsize, ysize; int i, x, buf_size = s->buf_size; int c, rgb_channel_count; float one_gamma = 1.0f / s->gamma; avpriv_trc_function trc_func = avpriv_get_trc_function_from_trc(s->apply_trc_type); int ret; line_offset = AV_RL64(s->gb.buffer + jobnr * 8); if (s->is_tile) { if (buf_size < 20 || line_offset > buf_size - 20) return AVERROR_INVALIDDATA; src = buf + line_offset + 20; tile_x = AV_RL32(src - 20); tile_y = AV_RL32(src - 16); tile_level_x = AV_RL32(src - 12); tile_level_y = AV_RL32(src - 8); data_size = AV_RL32(src - 4); if (data_size <= 0 || data_size > buf_size - line_offset - 20) return AVERROR_INVALIDDATA; if (tile_level_x || tile_level_y) { /* tile level, is not the full res level */ avpriv_report_missing_feature(s->avctx, "Subres tile before full res tile"); return AVERROR_PATCHWELCOME; } line = s->ymin + s->tile_attr.ySize * tile_y; col = s->tile_attr.xSize * tile_x; if (line < s->ymin || line > s->ymax || s->xmin + col < s->xmin || s->xmin + col > s->xmax) return AVERROR_INVALIDDATA; td->ysize = FFMIN(s->tile_attr.ySize, s->ydelta - tile_y * s->tile_attr.ySize); td->xsize = FFMIN(s->tile_attr.xSize, s->xdelta - tile_x * s->tile_attr.xSize); if (td->xsize * (uint64_t)s->current_channel_offset > INT_MAX) return AVERROR_INVALIDDATA; td->channel_line_size = td->xsize * s->current_channel_offset;/* uncompress size of one line */ uncompressed_size = td->channel_line_size * (uint64_t)td->ysize;/* uncompress size of the block */ } else { if (buf_size < 8 || line_offset > buf_size - 8) return AVERROR_INVALIDDATA; src = buf + line_offset + 8; line = AV_RL32(src - 8); if (line < s->ymin || line > s->ymax) return AVERROR_INVALIDDATA; data_size = AV_RL32(src - 4); if (data_size <= 0 || data_size > buf_size - line_offset - 8) return AVERROR_INVALIDDATA; td->ysize = FFMIN(s->scan_lines_per_block, s->ymax - line + 1); /* s->ydelta - line ?? */ td->xsize = s->xdelta; if (td->xsize * (uint64_t)s->current_channel_offset > INT_MAX) return AVERROR_INVALIDDATA; td->channel_line_size = td->xsize * s->current_channel_offset;/* uncompress size of one line */ uncompressed_size = td->channel_line_size * (uint64_t)td->ysize;/* uncompress size of the block */ if ((s->compression == EXR_RAW && (data_size != uncompressed_size || line_offset > buf_size - uncompressed_size)) || (s->compression != EXR_RAW && (data_size > uncompressed_size || line_offset > buf_size - data_size))) { return AVERROR_INVALIDDATA; } } window_xmin = FFMIN(avctx->width, FFMAX(0, s->xmin + col)); window_xmax = FFMIN(avctx->width, FFMAX(0, s->xmin + col + td->xsize)); window_ymin = FFMIN(avctx->height, FFMAX(0, line )); window_ymax = FFMIN(avctx->height, FFMAX(0, line + td->ysize)); xsize = window_xmax - window_xmin; ysize = window_ymax - window_ymin; /* tile or scanline not visible skip decoding */ if (xsize <= 0 || ysize <= 0) return 0; /* is the first tile or is a scanline */ if(col == 0) { window_xmin = 0; /* pixels to add at the left of the display window */ window_xoffset = FFMAX(0, s->xmin); /* bytes to add at the left of the display window */ bxmin = window_xoffset * step; } /* is the last tile or is a scanline */ if(col + td->xsize == s->xdelta) { window_xmax = avctx->width; /* bytes to add at the right of the display window */ axmax = FFMAX(0, (avctx->width - (s->xmax + 1))) * step; } if (data_size < uncompressed_size || s->is_tile) { /* td->tmp is use for tile reorganization */ av_fast_padded_malloc(&td->tmp, &td->tmp_size, uncompressed_size); if (!td->tmp) return AVERROR(ENOMEM); } if (data_size < uncompressed_size) { av_fast_padded_malloc(&td->uncompressed_data, &td->uncompressed_size, uncompressed_size + 64);/* Force 64 padding for AVX2 reorder_pixels dst */ if (!td->uncompressed_data) return AVERROR(ENOMEM); ret = AVERROR_INVALIDDATA; switch (s->compression) { case EXR_ZIP1: case EXR_ZIP16: ret = zip_uncompress(s, src, data_size, uncompressed_size, td); break; case EXR_PIZ: ret = piz_uncompress(s, src, data_size, uncompressed_size, td); break; case EXR_PXR24: ret = pxr24_uncompress(s, src, data_size, uncompressed_size, td); break; case EXR_RLE: ret = rle_uncompress(s, src, data_size, uncompressed_size, td); break; case EXR_B44: case EXR_B44A: ret = b44_uncompress(s, src, data_size, uncompressed_size, td); break; } if (ret < 0) { av_log(avctx, AV_LOG_ERROR, "decode_block() failed.\n"); return ret; } src = td->uncompressed_data; } /* offsets to crop data outside display window */ data_xoffset = FFABS(FFMIN(0, s->xmin + col)) * (s->pixel_type == EXR_HALF ? 2 : 4); data_yoffset = FFABS(FFMIN(0, line)); data_window_offset = (data_yoffset * td->channel_line_size) + data_xoffset; if (!s->is_luma) { channel_buffer[0] = src + (td->xsize * s->channel_offsets[0]) + data_window_offset; channel_buffer[1] = src + (td->xsize * s->channel_offsets[1]) + data_window_offset; channel_buffer[2] = src + (td->xsize * s->channel_offsets[2]) + data_window_offset; rgb_channel_count = 3; } else { /* put y data in the first channel_buffer */ channel_buffer[0] = src + (td->xsize * s->channel_offsets[1]) + data_window_offset; rgb_channel_count = 1; } if (s->channel_offsets[3] >= 0) channel_buffer[3] = src + (td->xsize * s->channel_offsets[3]) + data_window_offset; if (s->desc->flags & AV_PIX_FMT_FLAG_FLOAT) { /* todo: change this when a floating point pixel format with luma with alpha is implemented */ int channel_count = s->channel_offsets[3] >= 0 ? 4 : rgb_channel_count; if (s->is_luma) { channel_buffer[1] = channel_buffer[0]; channel_buffer[2] = channel_buffer[0]; } for (c = 0; c < channel_count; c++) { int plane = s->desc->comp[c].plane; ptr = p->data[plane] + window_ymin * p->linesize[plane] + (window_xmin * 4); for (i = 0; i < ysize; i++, ptr += p->linesize[plane]) { const uint8_t *src; union av_intfloat32 *ptr_x; src = channel_buffer[c]; ptr_x = (union av_intfloat32 *)ptr; // Zero out the start if xmin is not 0 memset(ptr_x, 0, bxmin); ptr_x += window_xoffset; if (s->pixel_type == EXR_FLOAT) { // 32-bit union av_intfloat32 t; if (trc_func && c < 3) { for (x = 0; x < xsize; x++) { t.i = bytestream_get_le32(&src); t.f = trc_func(t.f); *ptr_x++ = t; } } else { for (x = 0; x < xsize; x++) { t.i = bytestream_get_le32(&src); if (t.f > 0.0f && c < 3) /* avoid negative values */ t.f = powf(t.f, one_gamma); *ptr_x++ = t; } } } else if (s->pixel_type == EXR_HALF) { // 16-bit if (c < 3 || !trc_func) { for (x = 0; x < xsize; x++) { *ptr_x++ = s->gamma_table[bytestream_get_le16(&src)]; } } else { for (x = 0; x < xsize; x++) { *ptr_x++ = exr_half2float(bytestream_get_le16(&src));; } } } // Zero out the end if xmax+1 is not w memset(ptr_x, 0, axmax); channel_buffer[c] += td->channel_line_size; } } } else { av_assert1(s->pixel_type == EXR_UINT); ptr = p->data[0] + window_ymin * p->linesize[0] + (window_xmin * s->desc->nb_components * 2); for (i = 0; i < ysize; i++, ptr += p->linesize[0]) { const uint8_t * a; const uint8_t *rgb[3]; uint16_t *ptr_x; for (c = 0; c < rgb_channel_count; c++) { rgb[c] = channel_buffer[c]; } if (channel_buffer[3]) a = channel_buffer[3]; ptr_x = (uint16_t *) ptr; // Zero out the start if xmin is not 0 memset(ptr_x, 0, bxmin); ptr_x += window_xoffset * s->desc->nb_components; for (x = 0; x < xsize; x++) { for (c = 0; c < rgb_channel_count; c++) { *ptr_x++ = bytestream_get_le32(&rgb[c]) >> 16; } if (channel_buffer[3]) *ptr_x++ = bytestream_get_le32(&a) >> 16; } // Zero out the end if xmax+1 is not w memset(ptr_x, 0, axmax); channel_buffer[0] += td->channel_line_size; channel_buffer[1] += td->channel_line_size; channel_buffer[2] += td->channel_line_size; if (channel_buffer[3]) channel_buffer[3] += td->channel_line_size; } } return 0; } /** * Check if the variable name corresponds to its data type. * * @param s the EXRContext * @param value_name name of the variable to check * @param value_type type of the variable to check * @param minimum_length minimum length of the variable data * * @return bytes to read containing variable data * -1 if variable is not found * 0 if buffer ended prematurely */ static int check_header_variable(EXRContext *s, const char *value_name, const char *value_type, unsigned int minimum_length) { int var_size = -1; if (bytestream2_get_bytes_left(&s->gb) >= minimum_length && !strcmp(s->gb.buffer, value_name)) { // found value_name, jump to value_type (null terminated strings) s->gb.buffer += strlen(value_name) + 1; if (!strcmp(s->gb.buffer, value_type)) { s->gb.buffer += strlen(value_type) + 1; var_size = bytestream2_get_le32(&s->gb); // don't go read past boundaries if (var_size > bytestream2_get_bytes_left(&s->gb)) var_size = 0; } else { // value_type not found, reset the buffer s->gb.buffer -= strlen(value_name) + 1; av_log(s->avctx, AV_LOG_WARNING, "Unknown data type %s for header variable %s.\n", value_type, value_name); } } return var_size; } static int decode_header(EXRContext *s, AVFrame *frame) { AVDictionary *metadata = NULL; int magic_number, version, i, flags, sar = 0; int layer_match = 0; int ret; int dup_channels = 0; s->current_channel_offset = 0; s->xmin = ~0; s->xmax = ~0; s->ymin = ~0; s->ymax = ~0; s->xdelta = ~0; s->ydelta = ~0; s->channel_offsets[0] = -1; s->channel_offsets[1] = -1; s->channel_offsets[2] = -1; s->channel_offsets[3] = -1; s->pixel_type = EXR_UNKNOWN; s->compression = EXR_UNKN; s->nb_channels = 0; s->w = 0; s->h = 0; s->tile_attr.xSize = -1; s->tile_attr.ySize = -1; s->is_tile = 0; s->is_luma = 0; if (bytestream2_get_bytes_left(&s->gb) < 10) { av_log(s->avctx, AV_LOG_ERROR, "Header too short to parse.\n"); return AVERROR_INVALIDDATA; } magic_number = bytestream2_get_le32(&s->gb); if (magic_number != 20000630) { /* As per documentation of OpenEXR, it is supposed to be * int 20000630 little-endian */ av_log(s->avctx, AV_LOG_ERROR, "Wrong magic number %d.\n", magic_number); return AVERROR_INVALIDDATA; } version = bytestream2_get_byte(&s->gb); if (version != 2) { avpriv_report_missing_feature(s->avctx, "Version %d", version); return AVERROR_PATCHWELCOME; } flags = bytestream2_get_le24(&s->gb); if (flags & 0x02) s->is_tile = 1; if (flags & 0x08) { avpriv_report_missing_feature(s->avctx, "deep data"); return AVERROR_PATCHWELCOME; } if (flags & 0x10) { avpriv_report_missing_feature(s->avctx, "multipart"); return AVERROR_PATCHWELCOME; } // Parse the header while (bytestream2_get_bytes_left(&s->gb) > 0 && *s->gb.buffer) { int var_size; if ((var_size = check_header_variable(s, "channels", "chlist", 38)) >= 0) { GetByteContext ch_gb; if (!var_size) { ret = AVERROR_INVALIDDATA; goto fail; } bytestream2_init(&ch_gb, s->gb.buffer, var_size); while (bytestream2_get_bytes_left(&ch_gb) >= 19) { EXRChannel *channel; enum ExrPixelType current_pixel_type; int channel_index = -1; int xsub, ysub; if (strcmp(s->layer, "") != 0) { if (strncmp(ch_gb.buffer, s->layer, strlen(s->layer)) == 0) { layer_match = 1; av_log(s->avctx, AV_LOG_INFO, "Channel match layer : %s.\n", ch_gb.buffer); ch_gb.buffer += strlen(s->layer); if (*ch_gb.buffer == '.') ch_gb.buffer++; /* skip dot if not given */ } else { layer_match = 0; av_log(s->avctx, AV_LOG_INFO, "Channel doesn't match layer : %s.\n", ch_gb.buffer); } } else { layer_match = 1; } if (layer_match) { /* only search channel if the layer match is valid */ if (!av_strcasecmp(ch_gb.buffer, "R") || !av_strcasecmp(ch_gb.buffer, "X") || !av_strcasecmp(ch_gb.buffer, "U")) { channel_index = 0; s->is_luma = 0; } else if (!av_strcasecmp(ch_gb.buffer, "G") || !av_strcasecmp(ch_gb.buffer, "V")) { channel_index = 1; s->is_luma = 0; } else if (!av_strcasecmp(ch_gb.buffer, "Y")) { channel_index = 1; s->is_luma = 1; } else if (!av_strcasecmp(ch_gb.buffer, "B") || !av_strcasecmp(ch_gb.buffer, "Z") || !av_strcasecmp(ch_gb.buffer, "W")) { channel_index = 2; s->is_luma = 0; } else if (!av_strcasecmp(ch_gb.buffer, "A")) { channel_index = 3; } else { av_log(s->avctx, AV_LOG_WARNING, "Unsupported channel %.256s.\n", ch_gb.buffer); } } /* skip until you get a 0 */ while (bytestream2_get_bytes_left(&ch_gb) > 0 && bytestream2_get_byte(&ch_gb)) continue; if (bytestream2_get_bytes_left(&ch_gb) < 4) { av_log(s->avctx, AV_LOG_ERROR, "Incomplete header.\n"); ret = AVERROR_INVALIDDATA; goto fail; } current_pixel_type = bytestream2_get_le32(&ch_gb); if (current_pixel_type >= EXR_UNKNOWN) { avpriv_report_missing_feature(s->avctx, "Pixel type %d", current_pixel_type); ret = AVERROR_PATCHWELCOME; goto fail; } bytestream2_skip(&ch_gb, 4); xsub = bytestream2_get_le32(&ch_gb); ysub = bytestream2_get_le32(&ch_gb); if (xsub != 1 || ysub != 1) { avpriv_report_missing_feature(s->avctx, "Subsampling %dx%d", xsub, ysub); ret = AVERROR_PATCHWELCOME; goto fail; } if (channel_index >= 0 && s->channel_offsets[channel_index] == -1) { /* channel has not been previously assigned */ if (s->pixel_type != EXR_UNKNOWN && s->pixel_type != current_pixel_type) { av_log(s->avctx, AV_LOG_ERROR, "RGB channels not of the same depth.\n"); ret = AVERROR_INVALIDDATA; goto fail; } s->pixel_type = current_pixel_type; s->channel_offsets[channel_index] = s->current_channel_offset; } else if (channel_index >= 0) { av_log(s->avctx, AV_LOG_WARNING, "Multiple channels with index %d.\n", channel_index); if (++dup_channels > 10) { ret = AVERROR_INVALIDDATA; goto fail; } } s->channels = av_realloc(s->channels, ++s->nb_channels * sizeof(EXRChannel)); if (!s->channels) { ret = AVERROR(ENOMEM); goto fail; } channel = &s->channels[s->nb_channels - 1]; channel->pixel_type = current_pixel_type; channel->xsub = xsub; channel->ysub = ysub; if (current_pixel_type == EXR_HALF) { s->current_channel_offset += 2; } else {/* Float or UINT32 */ s->current_channel_offset += 4; } } /* Check if all channels are set with an offset or if the channels * are causing an overflow */ if (!s->is_luma) {/* if we expected to have at least 3 channels */ if (FFMIN3(s->channel_offsets[0], s->channel_offsets[1], s->channel_offsets[2]) < 0) { if (s->channel_offsets[0] < 0) av_log(s->avctx, AV_LOG_ERROR, "Missing red channel.\n"); if (s->channel_offsets[1] < 0) av_log(s->avctx, AV_LOG_ERROR, "Missing green channel.\n"); if (s->channel_offsets[2] < 0) av_log(s->avctx, AV_LOG_ERROR, "Missing blue channel.\n"); ret = AVERROR_INVALIDDATA; goto fail; } } // skip one last byte and update main gb s->gb.buffer = ch_gb.buffer + 1; continue; } else if ((var_size = check_header_variable(s, "dataWindow", "box2i", 31)) >= 0) { int xmin, ymin, xmax, ymax; if (!var_size) { ret = AVERROR_INVALIDDATA; goto fail; } xmin = bytestream2_get_le32(&s->gb); ymin = bytestream2_get_le32(&s->gb); xmax = bytestream2_get_le32(&s->gb); ymax = bytestream2_get_le32(&s->gb); if (xmin > xmax || ymin > ymax || (unsigned)xmax - xmin >= INT_MAX || (unsigned)ymax - ymin >= INT_MAX) { ret = AVERROR_INVALIDDATA; goto fail; } s->xmin = xmin; s->xmax = xmax; s->ymin = ymin; s->ymax = ymax; s->xdelta = (s->xmax - s->xmin) + 1; s->ydelta = (s->ymax - s->ymin) + 1; continue; } else if ((var_size = check_header_variable(s, "displayWindow", "box2i", 34)) >= 0) { if (!var_size) { ret = AVERROR_INVALIDDATA; goto fail; } bytestream2_skip(&s->gb, 8); s->w = bytestream2_get_le32(&s->gb) + 1; s->h = bytestream2_get_le32(&s->gb) + 1; continue; } else if ((var_size = check_header_variable(s, "lineOrder", "lineOrder", 25)) >= 0) { int line_order; if (!var_size) { ret = AVERROR_INVALIDDATA; goto fail; } line_order = bytestream2_get_byte(&s->gb); av_log(s->avctx, AV_LOG_DEBUG, "line order: %d.\n", line_order); if (line_order > 2) { av_log(s->avctx, AV_LOG_ERROR, "Unknown line order.\n"); ret = AVERROR_INVALIDDATA; goto fail; } continue; } else if ((var_size = check_header_variable(s, "pixelAspectRatio", "float", 31)) >= 0) { if (!var_size) { ret = AVERROR_INVALIDDATA; goto fail; } sar = bytestream2_get_le32(&s->gb); continue; } else if ((var_size = check_header_variable(s, "compression", "compression", 29)) >= 0) { if (!var_size) { ret = AVERROR_INVALIDDATA; goto fail; } if (s->compression == EXR_UNKN) s->compression = bytestream2_get_byte(&s->gb); else av_log(s->avctx, AV_LOG_WARNING, "Found more than one compression attribute.\n"); continue; } else if ((var_size = check_header_variable(s, "tiles", "tiledesc", 22)) >= 0) { char tileLevel; if (!s->is_tile) av_log(s->avctx, AV_LOG_WARNING, "Found tile attribute and scanline flags. Exr will be interpreted as scanline.\n"); s->tile_attr.xSize = bytestream2_get_le32(&s->gb); s->tile_attr.ySize = bytestream2_get_le32(&s->gb); tileLevel = bytestream2_get_byte(&s->gb); s->tile_attr.level_mode = tileLevel & 0x0f; s->tile_attr.level_round = (tileLevel >> 4) & 0x0f; if (s->tile_attr.level_mode >= EXR_TILE_LEVEL_UNKNOWN) { avpriv_report_missing_feature(s->avctx, "Tile level mode %d", s->tile_attr.level_mode); ret = AVERROR_PATCHWELCOME; goto fail; } if (s->tile_attr.level_round >= EXR_TILE_ROUND_UNKNOWN) { avpriv_report_missing_feature(s->avctx, "Tile level round %d", s->tile_attr.level_round); ret = AVERROR_PATCHWELCOME; goto fail; } continue; } else if ((var_size = check_header_variable(s, "writer", "string", 1)) >= 0) { uint8_t key[256] = { 0 }; bytestream2_get_buffer(&s->gb, key, FFMIN(sizeof(key) - 1, var_size)); av_dict_set(&metadata, "writer", key, 0); continue; } // Check if there are enough bytes for a header if (bytestream2_get_bytes_left(&s->gb) <= 9) { av_log(s->avctx, AV_LOG_ERROR, "Incomplete header\n"); ret = AVERROR_INVALIDDATA; goto fail; } // Process unknown variables for (i = 0; i < 2; i++) // value_name and value_type while (bytestream2_get_byte(&s->gb) != 0); // Skip variable length bytestream2_skip(&s->gb, bytestream2_get_le32(&s->gb)); } ff_set_sar(s->avctx, av_d2q(av_int2float(sar), 255)); if (s->compression == EXR_UNKN) { av_log(s->avctx, AV_LOG_ERROR, "Missing compression attribute.\n"); ret = AVERROR_INVALIDDATA; goto fail; } if (s->is_tile) { if (s->tile_attr.xSize < 1 || s->tile_attr.ySize < 1) { av_log(s->avctx, AV_LOG_ERROR, "Invalid tile attribute.\n"); ret = AVERROR_INVALIDDATA; goto fail; } } if (bytestream2_get_bytes_left(&s->gb) <= 0) { av_log(s->avctx, AV_LOG_ERROR, "Incomplete frame.\n"); ret = AVERROR_INVALIDDATA; goto fail; } frame->metadata = metadata; // aaand we are done bytestream2_skip(&s->gb, 1); return 0; fail: av_dict_free(&metadata); return ret; } static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { EXRContext *s = avctx->priv_data; ThreadFrame frame = { .f = data }; AVFrame *picture = data; uint8_t *ptr; int i, y, ret, ymax; int planes; int out_line_size; int nb_blocks; /* nb scanline or nb tile */ uint64_t start_offset_table; uint64_t start_next_scanline; PutByteContext offset_table_writer; bytestream2_init(&s->gb, avpkt->data, avpkt->size); if ((ret = decode_header(s, picture)) < 0) return ret; switch (s->pixel_type) { case EXR_FLOAT: case EXR_HALF: if (s->channel_offsets[3] >= 0) { if (!s->is_luma) { avctx->pix_fmt = AV_PIX_FMT_GBRAPF32; } else { /* todo: change this when a floating point pixel format with luma with alpha is implemented */ avctx->pix_fmt = AV_PIX_FMT_GBRAPF32; } } else { if (!s->is_luma) { avctx->pix_fmt = AV_PIX_FMT_GBRPF32; } else { avctx->pix_fmt = AV_PIX_FMT_GRAYF32; } } break; case EXR_UINT: if (s->channel_offsets[3] >= 0) { if (!s->is_luma) { avctx->pix_fmt = AV_PIX_FMT_RGBA64; } else { avctx->pix_fmt = AV_PIX_FMT_YA16; } } else { if (!s->is_luma) { avctx->pix_fmt = AV_PIX_FMT_RGB48; } else { avctx->pix_fmt = AV_PIX_FMT_GRAY16; } } break; default: av_log(avctx, AV_LOG_ERROR, "Missing channel list.\n"); return AVERROR_INVALIDDATA; } if (s->apply_trc_type != AVCOL_TRC_UNSPECIFIED) avctx->color_trc = s->apply_trc_type; switch (s->compression) { case EXR_RAW: case EXR_RLE: case EXR_ZIP1: s->scan_lines_per_block = 1; break; case EXR_PXR24: case EXR_ZIP16: s->scan_lines_per_block = 16; break; case EXR_PIZ: case EXR_B44: case EXR_B44A: s->scan_lines_per_block = 32; break; default: avpriv_report_missing_feature(avctx, "Compression %d", s->compression); return AVERROR_PATCHWELCOME; } /* Verify the xmin, xmax, ymin and ymax before setting the actual image size. * It's possible for the data window can larger or outside the display window */ if (s->xmin > s->xmax || s->ymin > s->ymax || s->ydelta == 0xFFFFFFFF || s->xdelta == 0xFFFFFFFF) { av_log(avctx, AV_LOG_ERROR, "Wrong or missing size information.\n"); return AVERROR_INVALIDDATA; } if ((ret = ff_set_dimensions(avctx, s->w, s->h)) < 0) return ret; s->desc = av_pix_fmt_desc_get(avctx->pix_fmt); if (!s->desc) return AVERROR_INVALIDDATA; if (s->desc->flags & AV_PIX_FMT_FLAG_FLOAT) { planes = s->desc->nb_components; out_line_size = avctx->width * 4; } else { planes = 1; out_line_size = avctx->width * 2 * s->desc->nb_components; } if (s->is_tile) { nb_blocks = ((s->xdelta + s->tile_attr.xSize - 1) / s->tile_attr.xSize) * ((s->ydelta + s->tile_attr.ySize - 1) / s->tile_attr.ySize); } else { /* scanline */ nb_blocks = (s->ydelta + s->scan_lines_per_block - 1) / s->scan_lines_per_block; } if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) return ret; if (bytestream2_get_bytes_left(&s->gb)/8 < nb_blocks) return AVERROR_INVALIDDATA; // check offset table and recreate it if need if (!s->is_tile && bytestream2_peek_le64(&s->gb) == 0) { av_log(s->avctx, AV_LOG_DEBUG, "recreating invalid scanline offset table\n"); start_offset_table = bytestream2_tell(&s->gb); start_next_scanline = start_offset_table + nb_blocks * 8; bytestream2_init_writer(&offset_table_writer, &avpkt->data[start_offset_table], nb_blocks * 8); for (y = 0; y < nb_blocks; y++) { /* write offset of prev scanline in offset table */ bytestream2_put_le64(&offset_table_writer, start_next_scanline); /* get len of next scanline */ bytestream2_seek(&s->gb, start_next_scanline + 4, SEEK_SET);/* skip line number */ start_next_scanline += (bytestream2_get_le32(&s->gb) + 8); } bytestream2_seek(&s->gb, start_offset_table, SEEK_SET); } // save pointer we are going to use in decode_block s->buf = avpkt->data; s->buf_size = avpkt->size; // Zero out the start if ymin is not 0 for (i = 0; i < planes; i++) { ptr = picture->data[i]; for (y = 0; y < FFMIN(s->ymin, s->h); y++) { memset(ptr, 0, out_line_size); ptr += picture->linesize[i]; } } s->picture = picture; avctx->execute2(avctx, decode_block, s->thread_data, NULL, nb_blocks); ymax = FFMAX(0, s->ymax + 1); // Zero out the end if ymax+1 is not h if (ymax < avctx->height) for (i = 0; i < planes; i++) { ptr = picture->data[i] + (ymax * picture->linesize[i]); for (y = ymax; y < avctx->height; y++) { memset(ptr, 0, out_line_size); ptr += picture->linesize[i]; } } picture->pict_type = AV_PICTURE_TYPE_I; *got_frame = 1; return avpkt->size; } static av_cold int decode_init(AVCodecContext *avctx) { EXRContext *s = avctx->priv_data; uint32_t i; union av_intfloat32 t; float one_gamma = 1.0f / s->gamma; avpriv_trc_function trc_func = NULL; s->avctx = avctx; ff_exrdsp_init(&s->dsp); #if HAVE_BIGENDIAN ff_bswapdsp_init(&s->bbdsp); #endif trc_func = avpriv_get_trc_function_from_trc(s->apply_trc_type); if (trc_func) { for (i = 0; i < 65536; ++i) { t = exr_half2float(i); t.f = trc_func(t.f); s->gamma_table[i] = t; } } else { if (one_gamma > 0.9999f && one_gamma < 1.0001f) { for (i = 0; i < 65536; ++i) { s->gamma_table[i] = exr_half2float(i); } } else { for (i = 0; i < 65536; ++i) { t = exr_half2float(i); /* If negative value we reuse half value */ if (t.f <= 0.0f) { s->gamma_table[i] = t; } else { t.f = powf(t.f, one_gamma); s->gamma_table[i] = t; } } } } // allocate thread data, used for non EXR_RAW compression types s->thread_data = av_mallocz_array(avctx->thread_count, sizeof(EXRThreadData)); if (!s->thread_data) return AVERROR_INVALIDDATA; return 0; } static av_cold int decode_end(AVCodecContext *avctx) { EXRContext *s = avctx->priv_data; int i; for (i = 0; i < avctx->thread_count; i++) { EXRThreadData *td = &s->thread_data[i]; av_freep(&td->uncompressed_data); av_freep(&td->tmp); av_freep(&td->bitmap); av_freep(&td->lut); } av_freep(&s->thread_data); av_freep(&s->channels); return 0; } #define OFFSET(x) offsetof(EXRContext, x) #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM static const AVOption options[] = { { "layer", "Set the decoding layer", OFFSET(layer), AV_OPT_TYPE_STRING, { .str = "" }, 0, 0, VD }, { "gamma", "Set the float gamma value when decoding", OFFSET(gamma), AV_OPT_TYPE_FLOAT, { .dbl = 1.0f }, 0.001, FLT_MAX, VD }, // XXX: Note the abuse of the enum using AVCOL_TRC_UNSPECIFIED to subsume the existing gamma option { "apply_trc", "color transfer characteristics to apply to EXR linear input", OFFSET(apply_trc_type), AV_OPT_TYPE_INT, {.i64 = AVCOL_TRC_UNSPECIFIED }, 1, AVCOL_TRC_NB-1, VD, "apply_trc_type"}, { "bt709", "BT.709", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT709 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "gamma", "gamma", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_UNSPECIFIED }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "gamma22", "BT.470 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_GAMMA22 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "gamma28", "BT.470 BG", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_GAMMA28 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "smpte170m", "SMPTE 170 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE170M }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "smpte240m", "SMPTE 240 M", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTE240M }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "linear", "Linear", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LINEAR }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "log", "Log", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LOG }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "log_sqrt", "Log square root", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_LOG_SQRT }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "iec61966_2_4", "IEC 61966-2-4", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_IEC61966_2_4 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "bt1361", "BT.1361", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT1361_ECG }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "iec61966_2_1", "IEC 61966-2-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_IEC61966_2_1 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "bt2020_10bit", "BT.2020 - 10 bit", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_10 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "bt2020_12bit", "BT.2020 - 12 bit", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_BT2020_12 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "smpte2084", "SMPTE ST 2084", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTEST2084 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { "smpte428_1", "SMPTE ST 428-1", 0, AV_OPT_TYPE_CONST, {.i64 = AVCOL_TRC_SMPTEST428_1 }, INT_MIN, INT_MAX, VD, "apply_trc_type"}, { NULL }, }; static const AVClass exr_class = { .class_name = "EXR", .item_name = av_default_item_name, .option = options, .version = LIBAVUTIL_VERSION_INT, }; AVCodec ff_exr_decoder = { .name = "exr", .long_name = NULL_IF_CONFIG_SMALL("OpenEXR image"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_EXR, .priv_data_size = sizeof(EXRContext), .init = decode_init, .close = decode_end, .decode = decode_frame, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS, .priv_class = &exr_class, };
null
220
CWE-787
CVE-2020-35979
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2012 * All rights reserved * * This file is part of GPAC / Media Tools sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/media_dev.h> #include <gpac/base_coding.h> #include <gpac/mpeg4_odf.h> #include <gpac/constants.h> #include <gpac/maths.h> #include <gpac/internal/ietf_dev.h> #ifndef GPAC_DISABLE_ISOM void gf_media_get_sample_average_infos(GF_ISOFile *file, u32 Track, u32 *avgSize, u32 *MaxSize, u32 *TimeDelta, u32 *maxCTSDelta, u32 *const_duration, u32 *bandwidth) { u32 i, count, ts_diff; u64 prevTS, tdelta; Double bw; GF_ISOSample *samp; *avgSize = *MaxSize = 0; *TimeDelta = 0; *maxCTSDelta = 0; bw = 0; prevTS = 0; tdelta = 0; count = gf_isom_get_sample_count(file, Track); if (!count) return; *const_duration = 0; for (i=0; i<count; i++) { samp = gf_isom_get_sample_info(file, Track, i+1, NULL, NULL); if (!samp) break; //get the size *avgSize += samp->dataLength; if (*MaxSize < samp->dataLength) *MaxSize = samp->dataLength; ts_diff = (u32) (samp->DTS+samp->CTS_Offset - prevTS); //get the time tdelta += ts_diff; if (i==1) { *const_duration = ts_diff; } else if ( (i<count-1) && (*const_duration != ts_diff) ) { *const_duration = 0; } prevTS = samp->DTS+samp->CTS_Offset; bw += 8*samp->dataLength; //get the CTS delta if ((samp->CTS_Offset>=0) && ((u32)samp->CTS_Offset > *maxCTSDelta)) *maxCTSDelta = samp->CTS_Offset; gf_isom_sample_del(&samp); } if (count>1) *TimeDelta = (u32) (tdelta/ (count-1) ); else *TimeDelta = (u32) tdelta; *avgSize /= count; bw *= gf_isom_get_media_timescale(file, Track); bw /= (s64) gf_isom_get_media_duration(file, Track); bw /= 1000; (*bandwidth) = (u32) (bw+0.5); //delta is NOT an average, we need to know exactly how many bits are //needed to encode CTS-DTS for ANY samples } #ifndef GPAC_DISABLE_ISOM_HINTING /*RTP track hinter*/ struct __tag_isom_hinter { GF_ISOFile *file; /*IDs are kept for mp4 hint sample building*/ u32 TrackNum, TrackID, HintTrack, HintID; /*current Hint sample and associated RTP time*/ u32 HintSample, RTPTime; /*track has composition time offset*/ Bool has_ctts; /*remember if first SL packet in RTP packet is RAP*/ u8 SampleIsRAP; u32 base_offset_in_sample; u32 OrigTimeScale; /*rtp builder*/ GP_RTPPacketizer *rtp_p; u32 bandwidth, nb_chan; /*NALU size for H264/AVC*/ u32 avc_nalu_size; /*stats*/ u32 TotalSample, CurrentSample; }; /* offset for group ID for hint tracks in SimpleAV mode when all media data is copied to the hint track (no use interleaving hint and original in this case) this offset is applied internally by the track hinter. Thus you shouldn't specify a GroupID >= OFFSET_HINT_GROUP_ID if you want the lib to perform efficient interleaving in any cases (referenced or copied media) */ #define OFFSET_HINT_GROUP_ID 0x8000 void InitSL_RTP(GF_SLConfig *slc) { memset(slc, 0, sizeof(GF_SLConfig)); slc->tag = GF_ODF_SLC_TAG; slc->useTimestampsFlag = 1; slc->timestampLength = 32; } void InitSL_NULL(GF_SLConfig *slc) { memset(slc, 0, sizeof(GF_SLConfig)); slc->tag = GF_ODF_SLC_TAG; slc->predefined = 0x01; } void MP4T_OnPacketDone(void *cbk, GF_RTPHeader *header) { u8 disposable; GF_RTPHinter *tkHint = (GF_RTPHinter *)cbk; if (!tkHint || !tkHint->HintSample) return; assert(header->TimeStamp == tkHint->RTPTime); disposable = 0; if (tkHint->avc_nalu_size) { disposable = tkHint->rtp_p->avc_non_idr ? 1 : 0; } /*for all other, assume that CTS=DTS means B-frame -> disposable*/ else if (tkHint->has_ctts && (tkHint->rtp_p->sl_header.compositionTimeStamp==tkHint->rtp_p->sl_header.decodingTimeStamp)) { disposable = 1; } gf_isom_rtp_packet_set_flags(tkHint->file, tkHint->HintTrack, 0, 0, header->Marker, disposable, 0); } void MP4T_OnDataRef(void *cbk, u32 payload_size, u32 offset_from_orig) { GF_RTPHinter *tkHint = (GF_RTPHinter *)cbk; if (!tkHint || !payload_size) return; /*add reference*/ gf_isom_hint_sample_data(tkHint->file, tkHint->HintTrack, tkHint->TrackID, tkHint->CurrentSample, (u16) payload_size, offset_from_orig + tkHint->base_offset_in_sample, NULL, 0); } void MP4T_OnData(void *cbk, u8 *data, u32 data_size, Bool is_header) { u8 at_begin; GF_RTPHinter *tkHint = (GF_RTPHinter *)cbk; if (!data_size) return; at_begin = is_header ? 1 : 0; if (data_size <= 14) { gf_isom_hint_direct_data(tkHint->file, tkHint->HintTrack, data, data_size, at_begin); } else { gf_isom_hint_sample_data(tkHint->file, tkHint->HintTrack, tkHint->HintID, 0, (u16) data_size, 0, data, at_begin); } } void MP4T_OnNewPacket(void *cbk, GF_RTPHeader *header) { s32 res; GF_RTPHinter *tkHint = (GF_RTPHinter *)cbk; if (!tkHint) return; res = (s32) (tkHint->rtp_p->sl_header.compositionTimeStamp - tkHint->rtp_p->sl_header.decodingTimeStamp); assert( !res || tkHint->has_ctts); /*do we need a new sample*/ if (!tkHint->HintSample || (tkHint->RTPTime != header->TimeStamp)) { /*close current sample*/ if (tkHint->HintSample) gf_isom_end_hint_sample(tkHint->file, tkHint->HintTrack, tkHint->SampleIsRAP); /*start new sample: We use DTS as the sampling instant (RTP TS) to make sure all packets are sent in order*/ gf_isom_begin_hint_sample(tkHint->file, tkHint->HintTrack, 1, header->TimeStamp-res); tkHint->HintSample ++; tkHint->RTPTime = header->TimeStamp; tkHint->SampleIsRAP = tkHint->rtp_p->sl_config.hasRandomAccessUnitsOnlyFlag ? 1 : tkHint->rtp_p->sl_header.randomAccessPointFlag; } /*create an RTP Packet with the appropriated marker flag - note: the flags are temp ones, they are set when the full packet is signaled (to handle multi AUs per RTP)*/ gf_isom_rtp_packet_begin(tkHint->file, tkHint->HintTrack, 0, 0, 0, header->Marker, header->PayloadType, 0, 0, header->SequenceNumber); /*Add the delta TS to make sure RTP TS is indeed the CTS (sampling time)*/ if (res) gf_isom_rtp_packet_set_offset(tkHint->file, tkHint->HintTrack, res); } GF_EXPORT GF_RTPHinter *gf_hinter_track_new(GF_ISOFile *file, u32 TrackNum, u32 Path_MTU, u32 max_ptime, u32 default_rtp_rate, u32 flags, u8 PayloadID, Bool copy_media, u32 InterleaveGroupID, u8 InterleaveGroupPriority, GF_Err *e) { GF_SLConfig my_sl; u32 descIndex, MinSize, MaxSize, avgTS, streamType, codecid, const_dur, nb_ch, maxDTSDelta; u8 OfficialPayloadID; u32 TrackMediaSubType, TrackMediaType, hintType, nbEdts, required_rate, force_dts_delta, avc_nalu_size, PL_ID, bandwidth, IV_length, KI_length; const char *url, *urn; char *mpeg4mode; Bool is_crypted, has_mpeg4_mapping; GF_RTPHinter *tmp; GF_ESD *esd; *e = GF_BAD_PARAM; if (!file || !TrackNum || !gf_isom_get_track_id(file, TrackNum)) return NULL; if (!gf_isom_get_sample_count(file, TrackNum)) { *e = GF_OK; return NULL; } *e = GF_NOT_SUPPORTED; nbEdts = gf_isom_get_edits_count(file, TrackNum); if (nbEdts>1) { u64 et, sd, mt; GF_ISOEditType em; gf_isom_get_edit(file, TrackNum, 1, &et, &sd, &mt, &em); if ((nbEdts>2) || (em!=GF_ISOM_EDIT_EMPTY)) { GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Cannot hint track whith EditList\n")); return NULL; } } if (nbEdts) gf_isom_remove_edits(file, TrackNum); if (!gf_isom_is_track_enabled(file, TrackNum)) return NULL; /*by default NO PL signaled*/ PL_ID = 0; OfficialPayloadID = 0; force_dts_delta = 0; streamType = 0; mpeg4mode = NULL; required_rate = 0; is_crypted = 0; IV_length = KI_length = 0; codecid = 0; nb_ch = 0; avc_nalu_size = 0; has_mpeg4_mapping = 1; const_dur = 0; bandwidth=0; TrackMediaType = gf_isom_get_media_type(file, TrackNum); /*for max compatibility with QT*/ if (!default_rtp_rate) default_rtp_rate = 90000; /*timed-text is a bit special, we support multiple stream descriptions & co*/ if ( (TrackMediaType==GF_ISOM_MEDIA_TEXT) || (TrackMediaType==GF_ISOM_MEDIA_SUBT)) { hintType = GF_RTP_PAYT_3GPP_TEXT; codecid = GF_CODECID_TEXT_MPEG4; streamType = GF_STREAM_TEXT; /*fixme - this works cos there's only one PL for text in mpeg4 at the current time*/ PL_ID = 0x10; } else { if (gf_isom_get_sample_description_count(file, TrackNum) > 1) return NULL; TrackMediaSubType = gf_isom_get_media_subtype(file, TrackNum, 1); switch (TrackMediaSubType) { case GF_ISOM_SUBTYPE_MPEG4_CRYP: is_crypted = 1; case GF_ISOM_SUBTYPE_MPEG4: esd = gf_isom_get_esd(file, TrackNum, 1); hintType = GF_RTP_PAYT_MPEG4; if (esd && esd->decoderConfig) { streamType = esd->decoderConfig->streamType; codecid = esd->decoderConfig->objectTypeIndication; if (esd->URLString) hintType = 0; /*AAC*/ if ((streamType==GF_STREAM_AUDIO) && esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data /*(nb: we use mpeg4 for MPEG-2 AAC)*/ && ((codecid==GF_CODECID_AAC_MPEG4) || (codecid==GF_CODECID_AAC_MPEG2_MP) || (codecid==GF_CODECID_AAC_MPEG2_LCP) || (codecid==GF_CODECID_AAC_MPEG2_SSRP)) ) { u32 sample_rate; GF_M4ADecSpecInfo a_cfg; gf_m4a_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &a_cfg); nb_ch = a_cfg.nb_chan; sample_rate = a_cfg.base_sr; PL_ID = a_cfg.audioPL; switch (a_cfg.base_object_type) { case GF_M4A_AAC_MAIN: case GF_M4A_AAC_LC: if (flags & GP_RTP_PCK_USE_LATM_AAC) { hintType = GF_RTP_PAYT_LATM; break; } case GF_M4A_AAC_SBR: case GF_M4A_AAC_PS: case GF_M4A_AAC_LTP: case GF_M4A_AAC_SCALABLE: case GF_M4A_ER_AAC_LC: case GF_M4A_ER_AAC_LTP: case GF_M4A_ER_AAC_SCALABLE: mpeg4mode = "AAC"; break; case GF_M4A_CELP: case GF_M4A_ER_CELP: mpeg4mode = "CELP"; break; } required_rate = sample_rate; } /*MPEG1/2 audio*/ else if ((streamType==GF_STREAM_AUDIO) && ((codecid==GF_CODECID_MPEG2_PART3) || (codecid==GF_CODECID_MPEG_AUDIO))) { GF_ISOSample *samp = NULL; if (!is_crypted) samp = gf_isom_get_sample(file, TrackNum, 1, NULL); if (samp && (samp->dataLength>3)) { u32 hdr = GF_4CC((u32)samp->data[0], (u8)samp->data[1], (u8)samp->data[2], (u8)samp->data[3]); nb_ch = gf_mp3_num_channels(hdr); hintType = GF_RTP_PAYT_MPEG12_AUDIO; /*use official RTP/AVP payload type*/ OfficialPayloadID = 14; required_rate = 90000; } /*encrypted MP3 must be sent through MPEG-4 generic to signal all ISMACryp stuff*/ else { u32 sample_rate; gf_isom_get_audio_info(file, TrackNum, 1, &sample_rate, &nb_ch, NULL); required_rate = sample_rate; } if (samp) gf_isom_sample_del(&samp); } /*QCELP audio*/ else if ((streamType==GF_STREAM_AUDIO) && (codecid==GF_CODECID_QCELP)) { hintType = GF_RTP_PAYT_QCELP; OfficialPayloadID = 12; required_rate = 8000; streamType = GF_STREAM_AUDIO; nb_ch = 1; } /*EVRC/SVM audio*/ else if ((streamType==GF_STREAM_AUDIO) && ((codecid==GF_CODECID_EVRC) || (codecid==GF_CODECID_SMV)) ) { hintType = GF_RTP_PAYT_EVRC_SMV; required_rate = 8000; streamType = GF_STREAM_AUDIO; nb_ch = 1; } /*visual streams*/ else if (streamType==GF_STREAM_VISUAL) { if ((codecid==GF_CODECID_MPEG4_PART2) && esd->decoderConfig->decoderSpecificInfo) { GF_M4VDecSpecInfo dsi; gf_m4v_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &dsi); PL_ID = dsi.VideoPL; } /*MPEG1/2 video*/ if ( ((codecid>=GF_CODECID_MPEG2_SIMPLE) && (codecid<=GF_CODECID_MPEG2_422)) || (codecid==GF_CODECID_MPEG1)) { if (!is_crypted) { hintType = GF_RTP_PAYT_MPEG12_VIDEO; OfficialPayloadID = 32; } } /*for ISMA*/ if (is_crypted) { /*that's another pain with ISMACryp, even if no B-frames the DTS is signaled...*/ if (codecid==GF_CODECID_MPEG4_PART2) force_dts_delta = 22; else if ((codecid==GF_CODECID_AVC) || (codecid==GF_CODECID_SVC)) { flags &= ~GP_RTP_PCK_USE_MULTI; force_dts_delta = 22; } flags |= GP_RTP_PCK_SIGNAL_RAP | GP_RTP_PCK_SIGNAL_TS; } required_rate = default_rtp_rate; } /*systems streams*/ else if (gf_isom_has_sync_shadows(file, TrackNum) || gf_isom_has_sample_dependency(file, TrackNum)) { flags |= GP_RTP_PCK_SYSTEMS_CAROUSEL; } gf_odf_desc_del((GF_Descriptor*)esd); } break; case GF_ISOM_SUBTYPE_3GP_H263: hintType = GF_RTP_PAYT_H263; required_rate = 90000; streamType = GF_STREAM_VISUAL; OfficialPayloadID = 34; /*not 100% compliant (short header is missing) but should still work*/ codecid = GF_CODECID_MPEG4_PART2; PL_ID = 0x01; break; case GF_ISOM_SUBTYPE_3GP_AMR: required_rate = 8000; hintType = GF_RTP_PAYT_AMR; streamType = GF_STREAM_AUDIO; has_mpeg4_mapping = 0; nb_ch = 1; break; case GF_ISOM_SUBTYPE_3GP_AMR_WB: required_rate = 16000; hintType = GF_RTP_PAYT_AMR_WB; streamType = GF_STREAM_AUDIO; has_mpeg4_mapping = 0; nb_ch = 1; break; case GF_ISOM_SUBTYPE_AVC_H264: case GF_ISOM_SUBTYPE_AVC2_H264: case GF_ISOM_SUBTYPE_AVC3_H264: case GF_ISOM_SUBTYPE_AVC4_H264: case GF_ISOM_SUBTYPE_SVC_H264: case GF_ISOM_SUBTYPE_MVC_H264: { GF_AVCConfig *avcc = gf_isom_avc_config_get(file, TrackNum, 1); GF_AVCConfig *svcc = gf_isom_svc_config_get(file, TrackNum, 1); GF_AVCConfig *mvcc = gf_isom_mvc_config_get(file, TrackNum, 1); if (!avcc && !svcc && !mvcc) { *e = GF_NON_COMPLIANT_BITSTREAM; return NULL; } required_rate = 90000; /* "90 kHz clock rate MUST be used"*/ hintType = GF_RTP_PAYT_H264_AVC; if (TrackMediaSubType==GF_ISOM_SUBTYPE_SVC_H264) hintType = GF_RTP_PAYT_H264_SVC; else if (TrackMediaSubType==GF_ISOM_SUBTYPE_MVC_H264) hintType = GF_RTP_PAYT_H264_SVC; streamType = GF_STREAM_VISUAL; avc_nalu_size = avcc ? avcc->nal_unit_size : svcc ? svcc->nal_unit_size : mvcc->nal_unit_size; codecid = GF_CODECID_AVC; PL_ID = 0x0F; gf_odf_avc_cfg_del(avcc); gf_odf_avc_cfg_del(svcc); } break; case GF_ISOM_SUBTYPE_HVC1: case GF_ISOM_SUBTYPE_HEV1: case GF_ISOM_SUBTYPE_HVC2: case GF_ISOM_SUBTYPE_HEV2: { GF_HEVCConfig *hevcc = gf_isom_hevc_config_get(file, TrackNum, 1); if (!hevcc) { *e = GF_NON_COMPLIANT_BITSTREAM; return NULL; } required_rate = 90000; /* "90 kHz clock rate MUST be used"*/ hintType = GF_RTP_PAYT_HEVC; streamType = GF_STREAM_VISUAL; avc_nalu_size = hevcc->nal_unit_size; codecid = GF_CODECID_HEVC; PL_ID = 0x0F; flags |= GP_RTP_PCK_USE_MULTI; gf_odf_hevc_cfg_del(hevcc); break; } break; case GF_ISOM_SUBTYPE_3GP_QCELP: required_rate = 8000; hintType = GF_RTP_PAYT_QCELP; streamType = GF_STREAM_AUDIO; codecid = GF_CODECID_QCELP; OfficialPayloadID = 12; nb_ch = 1; break; case GF_ISOM_SUBTYPE_3GP_EVRC: case GF_ISOM_SUBTYPE_3GP_SMV: required_rate = 8000; hintType = GF_RTP_PAYT_EVRC_SMV; streamType = GF_STREAM_AUDIO; codecid = (TrackMediaSubType==GF_ISOM_SUBTYPE_3GP_EVRC) ? GF_CODECID_EVRC : GF_CODECID_SMV; nb_ch = 1; break; case GF_ISOM_SUBTYPE_3GP_DIMS: #if GPAC_ENABLE_3GPP_DIMS_RTP hintType = GF_RTP_PAYT_3GPP_DIMS; streamType = GF_STREAM_SCENE; #else hintType = 0; GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[RTP Packetizer] 3GPP DIMS over RTP disabled in build\n", streamType)); #endif break; case GF_ISOM_SUBTYPE_AC3: hintType = GF_RTP_PAYT_AC3; streamType = GF_STREAM_AUDIO; gf_isom_get_audio_info(file, TrackNum, 1, NULL, &nb_ch, NULL); break; case GF_ISOM_SUBTYPE_MP3: { GF_ISOSample *samp = gf_isom_get_sample(file, TrackNum, 1, NULL); if (samp && (samp->dataLength>3)) { u32 hdr = GF_4CC((u32)samp->data[0], (u8)samp->data[1], (u8)samp->data[2], (u8)samp->data[3]); nb_ch = gf_mp3_num_channels(hdr); } else { u32 bps; gf_isom_get_audio_info(file, TrackNum, 1, &required_rate, &nb_ch, &bps); } hintType = GF_RTP_PAYT_MPEG12_AUDIO; /*use official RTP/AVP payload type*/ OfficialPayloadID = 14; required_rate = 90000; if (samp) gf_isom_sample_del(&samp); } break; default: /*ERROR*/ hintType = 0; break; } } /*not hintable*/ if (!hintType) return NULL; /*we only support self-contained files for hinting*/ gf_isom_get_data_reference(file, TrackNum, 1, &url, &urn); if (url || urn) return NULL; *e = GF_OUT_OF_MEM; GF_SAFEALLOC(tmp, GF_RTPHinter); if (!tmp) return NULL; /*override hinter type if requested and possible*/ if (has_mpeg4_mapping && (flags & GP_RTP_PCK_FORCE_MPEG4)) { hintType = GF_RTP_PAYT_MPEG4; avc_nalu_size = 0; } /*use static payload ID if enabled*/ else if (OfficialPayloadID && (flags & GP_RTP_PCK_USE_STATIC_ID) ) { PayloadID = OfficialPayloadID; } tmp->file = file; tmp->TrackNum = TrackNum; tmp->avc_nalu_size = avc_nalu_size; tmp->nb_chan = nb_ch; /*spatial scalability check*/ tmp->has_ctts = gf_isom_has_time_offset(file, TrackNum); /*get sample info*/ gf_media_get_sample_average_infos(file, TrackNum, &MinSize, &MaxSize, &avgTS, &maxDTSDelta, &const_dur, &bandwidth); /*systems carousel: we need at least IDX and RAP signaling*/ if (flags & GP_RTP_PCK_SYSTEMS_CAROUSEL) { flags |= GP_RTP_PCK_SIGNAL_RAP; } /*update flags in MultiSL*/ if (flags & GP_RTP_PCK_USE_MULTI) { if (MinSize != MaxSize) flags |= GP_RTP_PCK_SIGNAL_SIZE; if (!const_dur) flags |= GP_RTP_PCK_SIGNAL_TS; } if (tmp->has_ctts) flags |= GP_RTP_PCK_SIGNAL_TS; /*default SL for RTP */ InitSL_RTP(&my_sl); my_sl.timestampResolution = gf_isom_get_media_timescale(file, TrackNum); /*override clockrate if set*/ if (required_rate) { Double sc = required_rate; sc /= my_sl.timestampResolution; maxDTSDelta = (u32) (maxDTSDelta*sc); my_sl.timestampResolution = required_rate; } /*switch to RTP TS*/ max_ptime = (u32) (max_ptime * my_sl.timestampResolution / 1000); my_sl.AUSeqNumLength = gf_get_bit_size(gf_isom_get_sample_count(file, TrackNum)); if (my_sl.AUSeqNumLength>16) my_sl.AUSeqNumLength=16; my_sl.CUDuration = const_dur; if (gf_isom_has_sync_points(file, TrackNum)) { my_sl.useRandomAccessPointFlag = 1; } else { my_sl.useRandomAccessPointFlag = 0; my_sl.hasRandomAccessUnitsOnlyFlag = 1; } if (is_crypted) { Bool use_sel_enc; gf_isom_get_ismacryp_info(file, TrackNum, 1, NULL, NULL, NULL, NULL, NULL, &use_sel_enc, &IV_length, &KI_length); if (use_sel_enc) flags |= GP_RTP_PCK_SELECTIVE_ENCRYPTION; } // in case a different timescale was provided tmp->OrigTimeScale = gf_isom_get_media_timescale(file, TrackNum); tmp->rtp_p = gf_rtp_builder_new(hintType, &my_sl, flags, tmp, MP4T_OnNewPacket, MP4T_OnPacketDone, /*if copy, no data ref*/ copy_media ? NULL : MP4T_OnDataRef, MP4T_OnData); //init the builder gf_rtp_builder_init(tmp->rtp_p, PayloadID, Path_MTU, max_ptime, streamType, codecid, PL_ID, MinSize, MaxSize, avgTS, maxDTSDelta, IV_length, KI_length, mpeg4mode); /*ISMA compliance is a pain...*/ if (force_dts_delta) tmp->rtp_p->slMap.DTSDeltaLength = force_dts_delta; /* Hint Track Setup */ tmp->TrackID = gf_isom_get_track_id(file, TrackNum); tmp->HintID = tmp->TrackID + 65535; while (gf_isom_get_track_by_id(file, tmp->HintID)) tmp->HintID++; tmp->HintTrack = gf_isom_new_track(file, tmp->HintID, GF_ISOM_MEDIA_HINT, my_sl.timestampResolution); gf_isom_setup_hint_track(file, tmp->HintTrack, GF_ISOM_HINT_RTP); /*create a hint description*/ gf_isom_new_hint_description(file, tmp->HintTrack, -1, -1, 0, &descIndex); gf_isom_rtp_set_timescale(file, tmp->HintTrack, descIndex, my_sl.timestampResolution); if (hintType==GF_RTP_PAYT_MPEG4) { tmp->rtp_p->slMap.CodecID = codecid; /*set this SL for extraction.*/ gf_isom_set_extraction_slc(file, TrackNum, 1, &my_sl); } tmp->bandwidth = bandwidth; /*set interleaving*/ gf_isom_set_track_interleaving_group(file, TrackNum, InterleaveGroupID); if (!copy_media) { /*if we don't copy data set hint track and media track in the same group*/ gf_isom_set_track_interleaving_group(file, tmp->HintTrack, InterleaveGroupID); } else { gf_isom_set_track_interleaving_group(file, tmp->HintTrack, InterleaveGroupID + OFFSET_HINT_GROUP_ID); } /*use user-secified priority*/ InterleaveGroupPriority*=2; gf_isom_set_track_priority_in_group(file, TrackNum, InterleaveGroupPriority+1); gf_isom_set_track_priority_in_group(file, tmp->HintTrack, InterleaveGroupPriority); #if 0 #endif *e = GF_OK; return tmp; } GF_EXPORT GF_Err gf_hinter_track_force_no_offsets(GF_RTPHinter *tkHinter) { GF_Err e; if (!tkHinter) return GF_BAD_PARAM; e = gf_isom_rtp_set_time_offset(tkHinter->file, tkHinter->HintTrack, 1, 0); if (e) return e; return gf_isom_rtp_set_time_sequence_offset(tkHinter->file, tkHinter->HintTrack, 1, 0); } GF_EXPORT u32 gf_hinter_track_get_bandwidth(GF_RTPHinter *tkHinter) { return tkHinter->bandwidth; } GF_EXPORT u32 gf_hinter_track_get_flags(GF_RTPHinter *tkHinter) { return tkHinter->rtp_p->flags; } GF_EXPORT void gf_hinter_track_get_payload_name(GF_RTPHinter *tkHinter, char *payloadName) { char mediaName[30]; gf_rtp_builder_get_payload_name(tkHinter->rtp_p, payloadName, mediaName); } GF_EXPORT void gf_hinter_track_del(GF_RTPHinter *tkHinter) { if (!tkHinter) return; if (tkHinter->rtp_p) gf_rtp_builder_del(tkHinter->rtp_p); gf_free(tkHinter); } GF_EXPORT GF_Err gf_hinter_track_process(GF_RTPHinter *tkHint) { GF_Err e; u32 i, descIndex, duration; u64 ts; u8 PadBits; GF_Fraction ft; GF_ISOSample *samp; tkHint->HintSample = tkHint->RTPTime = 0; tkHint->TotalSample = gf_isom_get_sample_count(tkHint->file, tkHint->TrackNum); ft.num = tkHint->rtp_p->sl_config.timestampResolution; ft.den = tkHint->OrigTimeScale; e = GF_OK; for (i=0; i<tkHint->TotalSample; i++) { samp = gf_isom_get_sample(tkHint->file, tkHint->TrackNum, i+1, &descIndex); if (!samp) return gf_isom_last_error(tkHint->file); //setup SL tkHint->CurrentSample = i + 1; /*keep same AU indicator if sync shadow - TODO FIXME: this assumes shadows are placed interleaved with the track content which is the case for GPAC scene carousel generation, but may not always be true*/ if (samp->IsRAP==RAP_REDUNDANT) { tkHint->rtp_p->sl_header.AU_sequenceNumber -= 1; samp->IsRAP = RAP; } ts = ft.num * (samp->DTS+samp->CTS_Offset) / ft.den; tkHint->rtp_p->sl_header.compositionTimeStamp = ts; ts = ft.num * samp->DTS / ft.den; tkHint->rtp_p->sl_header.decodingTimeStamp = ts; tkHint->rtp_p->sl_header.randomAccessPointFlag = samp->IsRAP; tkHint->base_offset_in_sample = 0; /*crypted*/ if (tkHint->rtp_p->slMap.IV_length) { GF_ISMASample *s = gf_isom_get_ismacryp_sample(tkHint->file, tkHint->TrackNum, samp, descIndex); /*one byte take for selective_enc flag*/ if (s->flags & GF_ISOM_ISMA_USE_SEL_ENC) tkHint->base_offset_in_sample += 1; if (s->flags & GF_ISOM_ISMA_IS_ENCRYPTED) tkHint->base_offset_in_sample += s->IV_length + s->KI_length; gf_free(samp->data); samp->data = s->data; samp->dataLength = s->dataLength; gf_rtp_builder_set_cryp_info(tkHint->rtp_p, s->IV, (char*)s->key_indicator, (s->flags & GF_ISOM_ISMA_IS_ENCRYPTED) ? 1 : 0); s->data = NULL; s->dataLength = 0; gf_isom_ismacryp_delete_sample(s); } if (tkHint->rtp_p->sl_config.usePaddingFlag) { gf_isom_get_sample_padding_bits(tkHint->file, tkHint->TrackNum, i+1, &PadBits); tkHint->rtp_p->sl_header.paddingBits = PadBits; } else { tkHint->rtp_p->sl_header.paddingBits = 0; } duration = gf_isom_get_sample_duration(tkHint->file, tkHint->TrackNum, i+1); // ts = (u32) (ft * (s64) (duration)); /*unpack nal units*/ if (tkHint->avc_nalu_size) { u32 v, size; u32 remain = samp->dataLength; char *ptr = samp->data; tkHint->rtp_p->sl_header.accessUnitStartFlag = 1; tkHint->rtp_p->sl_header.accessUnitEndFlag = 0; while (remain) { size = 0; v = tkHint->avc_nalu_size; if (v>remain) { GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Broken AVC nalu encapsulation: NALU size length is %d but only %d bytes left in sample %d\n", v, remain, tkHint->CurrentSample)); break; } while (v) { size |= (u8) *ptr; ptr++; remain--; v-=1; if (v) size<<=8; } tkHint->base_offset_in_sample = samp->dataLength-remain; if (remain < size) { GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Broken AVC nalu encapsulation: NALU size is %d but only %d bytes left in sample %d\n", size, remain, tkHint->CurrentSample)); break; } remain -= size; tkHint->rtp_p->sl_header.accessUnitEndFlag = remain ? 0 : 1; e = gf_rtp_builder_process(tkHint->rtp_p, ptr, size, (u8) !remain, samp->dataLength, duration, (u8) (descIndex + GF_RTP_TX3G_SIDX_OFFSET) ); ptr += size; tkHint->rtp_p->sl_header.accessUnitStartFlag = 0; } } else { e = gf_rtp_builder_process(tkHint->rtp_p, samp->data, samp->dataLength, 1, samp->dataLength, duration, (u8) (descIndex + GF_RTP_TX3G_SIDX_OFFSET) ); } tkHint->rtp_p->sl_header.packetSequenceNumber += 1; //signal some progress gf_set_progress("Hinting", tkHint->CurrentSample, tkHint->TotalSample); tkHint->rtp_p->sl_header.AU_sequenceNumber += 1; gf_isom_sample_del(&samp); if (e) return e; } //flush gf_rtp_builder_process(tkHint->rtp_p, NULL, 0, 1, 0, 0, 0); gf_isom_end_hint_sample(tkHint->file, tkHint->HintTrack, (u8) tkHint->SampleIsRAP); return GF_OK; } static u32 write_nalu_config_array(char *sdpLine, GF_List *nalus) { u32 i, count, b64s; char b64[200]; count = gf_list_count(nalus); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *)gf_list_get(nalus, i); b64s = gf_base64_encode(sl->data, sl->size, b64, 200); b64[b64s]=0; strcat(sdpLine, b64); if (i+1<count) strcat(sdpLine, ","); } return count; } static void write_avc_config(char *sdpLine, GF_AVCConfig *avcc, GF_AVCConfig *svcc) { u32 count = 0; if (avcc) count += gf_list_count(avcc->sequenceParameterSets) + gf_list_count(avcc->pictureParameterSets) + gf_list_count(avcc->sequenceParameterSetExtensions); if (svcc) count += gf_list_count(svcc->sequenceParameterSets) + gf_list_count(svcc->pictureParameterSets); if (!count) return; strcat(sdpLine, "; sprop-parameter-sets="); if (avcc) { count = write_nalu_config_array(sdpLine, avcc->sequenceParameterSets); if (count) strcat(sdpLine, ","); count = write_nalu_config_array(sdpLine, avcc->sequenceParameterSetExtensions); if (count) strcat(sdpLine, ","); count = write_nalu_config_array(sdpLine, avcc->pictureParameterSets); if (count) strcat(sdpLine, ","); } if (svcc) { count = write_nalu_config_array(sdpLine, svcc->sequenceParameterSets); if (count) strcat(sdpLine, ","); count = write_nalu_config_array(sdpLine, svcc->pictureParameterSets); if (count) strcat(sdpLine, ","); } count = (u32) strlen(sdpLine); if (sdpLine[count-1] == ',') sdpLine[count-1] = 0; } GF_EXPORT GF_Err gf_hinter_track_finalize(GF_RTPHinter *tkHint, Bool AddSystemInfo) { u32 Width, Height; GF_ESD *esd; char sdpLine[20000]; char mediaName[30], payloadName[30]; u32 mtype; Width = Height = 0; gf_isom_sdp_clean_track(tkHint->file, tkHint->TrackNum); mtype = gf_isom_get_media_type(tkHint->file, tkHint->TrackNum); if (gf_isom_is_video_handler_type(mtype)) gf_isom_get_visual_info(tkHint->file, tkHint->TrackNum, 1, &Width, &Height); gf_rtp_builder_get_payload_name(tkHint->rtp_p, payloadName, mediaName); /*TODO- extract out of rtp_p for future live tools*/ sprintf(sdpLine, "m=%s 0 RTP/%s %d", mediaName, tkHint->rtp_p->slMap.IV_length ? "SAVP" : "AVP", tkHint->rtp_p->PayloadType); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); if (tkHint->bandwidth) { sprintf(sdpLine, "b=AS:%d", tkHint->bandwidth); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } if (tkHint->nb_chan) { sprintf(sdpLine, "a=rtpmap:%d %s/%d/%d", tkHint->rtp_p->PayloadType, payloadName, tkHint->rtp_p->sl_config.timestampResolution, tkHint->nb_chan); } else { sprintf(sdpLine, "a=rtpmap:%d %s/%d", tkHint->rtp_p->PayloadType, payloadName, tkHint->rtp_p->sl_config.timestampResolution); } gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); /*control for MPEG-4*/ if (AddSystemInfo) { sprintf(sdpLine, "a=mpeg4-esid:%d", gf_isom_get_track_id(tkHint->file, tkHint->TrackNum)); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } /*control for QTSS/DSS*/ sprintf(sdpLine, "a=control:trackID=%d", gf_isom_get_track_id(tkHint->file, tkHint->HintTrack)); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); /*H263 extensions*/ if (tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_H263) { sprintf(sdpLine, "a=cliprect:0,0,%d,%d", Height, Width); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } /*AMR*/ else if ((tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_AMR) || (tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_AMR_WB)) { sprintf(sdpLine, "a=fmtp:%d octet-align=1", tkHint->rtp_p->PayloadType); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } /*Text*/ else if (tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_3GPP_TEXT) { u32 w, h, i, m_w, m_h; s32 tx, ty; s16 l; gf_isom_get_track_layout_info(tkHint->file, tkHint->TrackNum, &w, &h, &tx, &ty, &l); m_w = w; m_h = h; for (i=0; i<gf_isom_get_track_count(tkHint->file); i++) { switch (gf_isom_get_media_type(tkHint->file, i+1)) { case GF_ISOM_MEDIA_SCENE: case GF_ISOM_MEDIA_VISUAL: case GF_ISOM_MEDIA_AUXV: case GF_ISOM_MEDIA_PICT: gf_isom_get_track_layout_info(tkHint->file, i+1, &w, &h, &tx, &ty, &l); if (w>m_w) m_w = w; if (h>m_h) m_h = h; break; default: break; } } gf_media_format_ttxt_sdp(tkHint->rtp_p, payloadName, sdpLine, w, h, tx, ty, l, m_w, m_h, NULL); strcat(sdpLine, "; tx3g="); for (i=0; i<gf_isom_get_sample_description_count(tkHint->file, tkHint->TrackNum); i++) { u8 *tx3g; char buffer[2000]; u32 tx3g_len, len; gf_isom_text_get_encoded_tx3g(tkHint->file, tkHint->TrackNum, i+1, GF_RTP_TX3G_SIDX_OFFSET, &tx3g, &tx3g_len); len = gf_base64_encode(tx3g, tx3g_len, buffer, 2000); gf_free(tx3g); buffer[len] = 0; if (i) strcat(sdpLine, ", "); strcat(sdpLine, buffer); } gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } /*EVRC/SMV in non header-free mode*/ else if ((tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_EVRC_SMV) && (tkHint->rtp_p->auh_size>1)) { sprintf(sdpLine, "a=fmtp:%d maxptime=%d", tkHint->rtp_p->PayloadType, tkHint->rtp_p->auh_size*20); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } /*H264/AVC*/ else if ((tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_H264_AVC) || (tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_H264_SVC)) { GF_AVCConfig *avcc = gf_isom_avc_config_get(tkHint->file, tkHint->TrackNum, 1); GF_AVCConfig *svcc = gf_isom_svc_config_get(tkHint->file, tkHint->TrackNum, 1); /*TODO - check syntax for SVC (might be some extra signaling)*/ if (avcc) { sprintf(sdpLine, "a=fmtp:%d profile-level-id=%02X%02X%02X; packetization-mode=1", tkHint->rtp_p->PayloadType, avcc->AVCProfileIndication, avcc->profile_compatibility, avcc->AVCLevelIndication); } else { if (!svcc) return GF_ISOM_INVALID_FILE; sprintf(sdpLine, "a=fmtp:%d profile-level-id=%02X%02X%02X; packetization-mode=1", tkHint->rtp_p->PayloadType, svcc->AVCProfileIndication, svcc->profile_compatibility, svcc->AVCLevelIndication); } write_avc_config(sdpLine, avcc, svcc); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); gf_odf_avc_cfg_del(avcc); gf_odf_avc_cfg_del(svcc); } /*MPEG-4 decoder config*/ else if (tkHint->rtp_p->rtp_payt==GF_RTP_PAYT_MPEG4) { esd = gf_isom_get_esd(tkHint->file, tkHint->TrackNum, 1); if (esd && esd->decoderConfig && esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) { gf_rtp_builder_format_sdp(tkHint->rtp_p, payloadName, sdpLine, esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength); } else { gf_rtp_builder_format_sdp(tkHint->rtp_p, payloadName, sdpLine, NULL, 0); } if (esd) gf_odf_desc_del((GF_Descriptor *)esd); if (tkHint->rtp_p->slMap.IV_length) { const char *kms; gf_isom_get_ismacryp_info(tkHint->file, tkHint->TrackNum, 1, NULL, NULL, NULL, NULL, &kms, NULL, NULL, NULL); if (!strnicmp(kms, "(key)", 5) || !strnicmp(kms, "(ipmp)", 6) || !strnicmp(kms, "(uri)", 5)) { strcat(sdpLine, "; ISMACrypKey="); } else { strcat(sdpLine, "; ISMACrypKey=(uri)"); } strcat(sdpLine, kms); } gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } /*MPEG-4 Audio LATM*/ else if (tkHint->rtp_p->rtp_payt==GF_RTP_PAYT_LATM) { GF_BitStream *bs; u8 *config_bytes; u32 config_size; /* form config string */ bs = gf_bs_new(NULL, 32, GF_BITSTREAM_WRITE); gf_bs_write_int(bs, 0, 1); /* AudioMuxVersion */ gf_bs_write_int(bs, 1, 1); /* all streams same time */ gf_bs_write_int(bs, 0, 6); /* numSubFrames */ gf_bs_write_int(bs, 0, 4); /* numPrograms */ gf_bs_write_int(bs, 0, 3); /* numLayer */ /* audio-specific config */ esd = gf_isom_get_esd(tkHint->file, tkHint->TrackNum, 1); if (esd && esd->decoderConfig && esd->decoderConfig->decoderSpecificInfo) { /*PacketVideo patch: don't signal SBR and PS stuff, not allowed in LATM with audioMuxVersion=0*/ gf_bs_write_data(bs, esd->decoderConfig->decoderSpecificInfo->data, MIN(esd->decoderConfig->decoderSpecificInfo->dataLength, 2) ); } if (esd) gf_odf_desc_del((GF_Descriptor *)esd); /* other data */ gf_bs_write_int(bs, 0, 3); /* frameLengthType */ gf_bs_write_int(bs, 0xff, 8); /* latmBufferFullness */ gf_bs_write_int(bs, 0, 1); /* otherDataPresent */ gf_bs_write_int(bs, 0, 1); /* crcCheckPresent */ gf_bs_get_content(bs, &config_bytes, &config_size); gf_bs_del(bs); gf_rtp_builder_format_sdp(tkHint->rtp_p, payloadName, sdpLine, config_bytes, config_size); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); gf_free(config_bytes); } #if GPAC_ENABLE_3GPP_DIMS_RTP /*3GPP DIMS*/ else if (tkHint->rtp_p->rtp_payt==GF_RTP_PAYT_3GPP_DIMS) { GF_DIMSDescription dims; gf_isom_get_visual_info(tkHint->file, tkHint->TrackNum, 1, &Width, &Height); gf_isom_get_dims_description(tkHint->file, tkHint->TrackNum, 1, &dims); sprintf(sdpLine, "a=fmtp:%d Version-profile=%d", tkHint->rtp_p->PayloadType, dims.profile); if (! dims.fullRequestHost) { char fmt[200]; strcat(sdpLine, ";useFullRequestHost=0"); sprintf(fmt, ";pathComponents=%d", dims.pathComponents); strcat(sdpLine, fmt); } if (!dims.streamType) strcat(sdpLine, ";stream-type=secondary"); if (dims.containsRedundant == 1) strcat(sdpLine, ";contains-redundant=main"); else if (dims.containsRedundant == 2) strcat(sdpLine, ";contains-redundant=redundant"); if (dims.textEncoding && strlen(dims.textEncoding)) { strcat(sdpLine, ";text-encoding="); strcat(sdpLine, dims.textEncoding); } if (dims.contentEncoding && strlen(dims.contentEncoding)) { strcat(sdpLine, ";content-coding="); strcat(sdpLine, dims.contentEncoding); } if (dims.contentEncoding && dims.content_script_types && strlen(dims.content_script_types) ) { strcat(sdpLine, ";content-script-types="); strcat(sdpLine, dims.contentEncoding); } gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } #endif /*extensions for some mobile phones*/ if (Width && Height) { sprintf(sdpLine, "a=framesize:%d %d-%d", tkHint->rtp_p->PayloadType, Width, Height); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } esd = gf_isom_get_esd(tkHint->file, tkHint->TrackNum, 1); if (esd && esd->decoderConfig && (esd->decoderConfig->rvc_config || esd->decoderConfig->predefined_rvc_config)) { if (esd->decoderConfig->predefined_rvc_config) { sprintf(sdpLine, "a=rvc-config-predef:%d", esd->decoderConfig->predefined_rvc_config); } else { /*temporary ...*/ if ((esd->decoderConfig->objectTypeIndication==GF_CODECID_AVC) || (esd->decoderConfig->objectTypeIndication==GF_CODECID_SVC)) { sprintf(sdpLine, "a=rvc-config:%s", "http://download.tsi.telecom-paristech.fr/gpac/RVC/rvc_config_avc.xml"); } else { sprintf(sdpLine, "a=rvc-config:%s", "http://download.tsi.telecom-paristech.fr/gpac/RVC/rvc_config_sp.xml"); } } gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } if (esd) gf_odf_desc_del((GF_Descriptor *)esd); gf_isom_set_track_enabled(tkHint->file, tkHint->HintTrack, GF_TRUE); return GF_OK; } GF_EXPORT Bool gf_hinter_can_embbed_data(u8 *data, u32 data_size, u32 streamType) { char data64[5000]; u32 size64; size64 = gf_base64_encode(data, data_size, data64, 5000); if (!size64) return 0; switch (streamType) { case GF_STREAM_OD: size64 += (u32) strlen("data:application/mpeg4-od-au;base64,"); break; case GF_STREAM_SCENE: size64 += (u32) strlen("data:application/mpeg4-bifs-au;base64,"); break; default: /*NOT NORMATIVE*/ size64 += (u32) strlen("data:application/mpeg4-es-au;base64,"); break; } if (size64>=255) return 0; return 1; } GF_EXPORT GF_Err gf_hinter_finalize(GF_ISOFile *file, GF_SDP_IODProfile IOD_Profile, u32 bandwidth) { u32 i, sceneT, odT, descIndex, size, size64; GF_InitialObjectDescriptor *iod; GF_SLConfig slc; GF_ISOSample *samp; Bool remove_ocr; u8 *buffer; char buf64[5000], sdpLine[5100]; gf_isom_sdp_clean(file); if (bandwidth) { sprintf(buf64, "b=AS:%d", bandwidth); gf_isom_sdp_add_line(file, buf64); } //xtended attribute for copyright if (gf_sys_is_test_mode()) { sprintf(buf64, "a=x-copyright: %s", "MP4/3GP File hinted with GPAC - (c) Telecom ParisTech (http://gpac.io)"); } else { sprintf(buf64, "a=x-copyright: MP4/3GP File hinted with GPAC %s - %s", gf_gpac_version(), gf_gpac_copyright() ); } gf_isom_sdp_add_line(file, buf64); if (IOD_Profile == GF_SDP_IOD_NONE) return GF_OK; odT = sceneT = 0; for (i=0; i<gf_isom_get_track_count(file); i++) { if (!gf_isom_is_track_in_root_od(file, i+1)) continue; switch (gf_isom_get_media_type(file,i+1)) { case GF_ISOM_MEDIA_OD: odT = i+1; break; case GF_ISOM_MEDIA_SCENE: sceneT = i+1; break; } } remove_ocr = 0; if (IOD_Profile == GF_SDP_IOD_ISMA_STRICT) { IOD_Profile = GF_SDP_IOD_ISMA; remove_ocr = 1; } /*if we want ISMA like iods, we need at least BIFS */ if ( (IOD_Profile == GF_SDP_IOD_ISMA) && !sceneT ) return GF_BAD_PARAM; /*do NOT change PLs, we assume they are correct*/ iod = (GF_InitialObjectDescriptor *) gf_isom_get_root_od(file); if (!iod) return GF_NOT_SUPPORTED; /*rewrite an IOD with good SL config - embbed data if possible*/ if (IOD_Profile == GF_SDP_IOD_ISMA) { GF_ESD *esd; Bool is_ok = 1; while (gf_list_count(iod->ESDescriptors)) { esd = (GF_ESD*)gf_list_get(iod->ESDescriptors, 0); gf_odf_desc_del((GF_Descriptor *) esd); gf_list_rem(iod->ESDescriptors, 0); } /*get OD esd, and embbed stream data if possible*/ if (odT) { esd = gf_isom_get_esd(file, odT, 1); if (gf_isom_get_sample_count(file, odT)==1) { samp = gf_isom_get_sample(file, odT, 1, &descIndex); if (gf_hinter_can_embbed_data(samp->data, samp->dataLength, GF_STREAM_OD)) { InitSL_NULL(&slc); slc.predefined = 0; slc.hasRandomAccessUnitsOnlyFlag = 1; slc.timeScale = slc.timestampResolution = gf_isom_get_media_timescale(file, odT); slc.OCRResolution = 1000; slc.startCTS = samp->DTS+samp->CTS_Offset; slc.startDTS = samp->DTS; //set the SL for future extraction gf_isom_set_extraction_slc(file, odT, 1, &slc); size64 = gf_base64_encode(samp->data, samp->dataLength, buf64, 2000); buf64[size64] = 0; sprintf(sdpLine, "data:application/mpeg4-od-au;base64,%s", buf64); esd->decoderConfig->avgBitrate = 0; esd->decoderConfig->bufferSizeDB = samp->dataLength; esd->decoderConfig->maxBitrate = 0; size64 = (u32) strlen(sdpLine)+1; esd->URLString = (char*)gf_malloc(sizeof(char) * size64); strcpy(esd->URLString, sdpLine); } else { GF_LOG(GF_LOG_WARNING, GF_LOG_RTP, ("[rtp hinter] OD sample too large to be embedded in IOD - ISMA disabled\n")); is_ok = 0; } gf_isom_sample_del(&samp); } if (remove_ocr) esd->OCRESID = 0; else if (esd->OCRESID == esd->ESID) esd->OCRESID = 0; //OK, add this to our IOD gf_list_add(iod->ESDescriptors, esd); } esd = gf_isom_get_esd(file, sceneT, 1); if (gf_isom_get_sample_count(file, sceneT)==1) { samp = gf_isom_get_sample(file, sceneT, 1, &descIndex); if (gf_hinter_can_embbed_data(samp->data, samp->dataLength, GF_STREAM_SCENE)) { slc.timeScale = slc.timestampResolution = gf_isom_get_media_timescale(file, sceneT); slc.OCRResolution = 1000; slc.startCTS = samp->DTS+samp->CTS_Offset; slc.startDTS = samp->DTS; //set the SL for future extraction gf_isom_set_extraction_slc(file, sceneT, 1, &slc); //encode in Base64 the sample size64 = gf_base64_encode(samp->data, samp->dataLength, buf64, 2000); buf64[size64] = 0; sprintf(sdpLine, "data:application/mpeg4-bifs-au;base64,%s", buf64); esd->decoderConfig->avgBitrate = 0; esd->decoderConfig->bufferSizeDB = samp->dataLength; esd->decoderConfig->maxBitrate = 0; esd->URLString = (char*)gf_malloc(sizeof(char) * (strlen(sdpLine)+1)); strcpy(esd->URLString, sdpLine); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Scene description sample too large to be embedded in IOD - ISMA disabled\n")); is_ok = 0; } gf_isom_sample_del(&samp); } if (remove_ocr) esd->OCRESID = 0; else if (esd->OCRESID == esd->ESID) esd->OCRESID = 0; gf_list_add(iod->ESDescriptors, esd); if (is_ok) { u32 has_a, has_v, has_i_a, has_i_v; has_a = has_v = has_i_a = has_i_v = 0; for (i=0; i<gf_isom_get_track_count(file); i++) { esd = gf_isom_get_esd(file, i+1, 1); if (!esd) continue; if (esd->decoderConfig->streamType==GF_STREAM_VISUAL) { if (esd->decoderConfig->objectTypeIndication==GF_CODECID_MPEG4_PART2) has_i_v ++; else has_v++; } else if (esd->decoderConfig->streamType==GF_STREAM_AUDIO) { if (esd->decoderConfig->objectTypeIndication==GF_CODECID_AAC_MPEG4) has_i_a ++; else has_a++; } gf_odf_desc_del((GF_Descriptor *)esd); } /*only 1 MPEG-4 visual max and 1 MPEG-4 audio max for ISMA compliancy*/ if (!has_v && !has_a && (has_i_v<=1) && (has_i_a<=1)) { sprintf(sdpLine, "a=isma-compliance:1,1.0,1"); gf_isom_sdp_add_line(file, sdpLine); } } } //encode the IOD buffer = NULL; size = 0; gf_odf_desc_write((GF_Descriptor *) iod, &buffer, &size); gf_odf_desc_del((GF_Descriptor *)iod); //encode in Base64 the iod size64 = gf_base64_encode(buffer, size, buf64, 2000); buf64[size64] = 0; gf_free(buffer); sprintf(sdpLine, "a=mpeg4-iod:\"data:application/mpeg4-iod;base64,%s\"", buf64); gf_isom_sdp_add_line(file, sdpLine); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_HINTING*/ #endif /*GPAC_DISABLE_ISOM*/
null
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2012 * All rights reserved * * This file is part of GPAC / Media Tools sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/media_dev.h> #include <gpac/base_coding.h> #include <gpac/mpeg4_odf.h> #include <gpac/constants.h> #include <gpac/maths.h> #include <gpac/internal/ietf_dev.h> #ifndef GPAC_DISABLE_ISOM void gf_media_get_sample_average_infos(GF_ISOFile *file, u32 Track, u32 *avgSize, u32 *MaxSize, u32 *TimeDelta, u32 *maxCTSDelta, u32 *const_duration, u32 *bandwidth) { u32 i, count, ts_diff; u64 prevTS, tdelta; Double bw; GF_ISOSample *samp; *avgSize = *MaxSize = 0; *TimeDelta = 0; *maxCTSDelta = 0; bw = 0; prevTS = 0; tdelta = 0; count = gf_isom_get_sample_count(file, Track); if (!count) return; *const_duration = 0; for (i=0; i<count; i++) { samp = gf_isom_get_sample_info(file, Track, i+1, NULL, NULL); if (!samp) break; //get the size *avgSize += samp->dataLength; if (*MaxSize < samp->dataLength) *MaxSize = samp->dataLength; ts_diff = (u32) (samp->DTS+samp->CTS_Offset - prevTS); //get the time tdelta += ts_diff; if (i==1) { *const_duration = ts_diff; } else if ( (i<count-1) && (*const_duration != ts_diff) ) { *const_duration = 0; } prevTS = samp->DTS+samp->CTS_Offset; bw += 8*samp->dataLength; //get the CTS delta if ((samp->CTS_Offset>=0) && ((u32)samp->CTS_Offset > *maxCTSDelta)) *maxCTSDelta = samp->CTS_Offset; gf_isom_sample_del(&samp); } if (count>1) *TimeDelta = (u32) (tdelta/ (count-1) ); else *TimeDelta = (u32) tdelta; *avgSize /= count; bw *= gf_isom_get_media_timescale(file, Track); bw /= (s64) gf_isom_get_media_duration(file, Track); bw /= 1000; (*bandwidth) = (u32) (bw+0.5); //delta is NOT an average, we need to know exactly how many bits are //needed to encode CTS-DTS for ANY samples } #ifndef GPAC_DISABLE_ISOM_HINTING /*RTP track hinter*/ struct __tag_isom_hinter { GF_ISOFile *file; /*IDs are kept for mp4 hint sample building*/ u32 TrackNum, TrackID, HintTrack, HintID; /*current Hint sample and associated RTP time*/ u32 HintSample, RTPTime; /*track has composition time offset*/ Bool has_ctts; /*remember if first SL packet in RTP packet is RAP*/ u8 SampleIsRAP; u32 base_offset_in_sample; u32 OrigTimeScale; /*rtp builder*/ GP_RTPPacketizer *rtp_p; u32 bandwidth, nb_chan; /*NALU size for H264/AVC*/ u32 avc_nalu_size; /*stats*/ u32 TotalSample, CurrentSample; }; /* offset for group ID for hint tracks in SimpleAV mode when all media data is copied to the hint track (no use interleaving hint and original in this case) this offset is applied internally by the track hinter. Thus you shouldn't specify a GroupID >= OFFSET_HINT_GROUP_ID if you want the lib to perform efficient interleaving in any cases (referenced or copied media) */ #define OFFSET_HINT_GROUP_ID 0x8000 void InitSL_RTP(GF_SLConfig *slc) { memset(slc, 0, sizeof(GF_SLConfig)); slc->tag = GF_ODF_SLC_TAG; slc->useTimestampsFlag = 1; slc->timestampLength = 32; } void InitSL_NULL(GF_SLConfig *slc) { memset(slc, 0, sizeof(GF_SLConfig)); slc->tag = GF_ODF_SLC_TAG; slc->predefined = 0x01; } void MP4T_OnPacketDone(void *cbk, GF_RTPHeader *header) { u8 disposable; GF_RTPHinter *tkHint = (GF_RTPHinter *)cbk; if (!tkHint || !tkHint->HintSample) return; assert(header->TimeStamp == tkHint->RTPTime); disposable = 0; if (tkHint->avc_nalu_size) { disposable = tkHint->rtp_p->avc_non_idr ? 1 : 0; } /*for all other, assume that CTS=DTS means B-frame -> disposable*/ else if (tkHint->has_ctts && (tkHint->rtp_p->sl_header.compositionTimeStamp==tkHint->rtp_p->sl_header.decodingTimeStamp)) { disposable = 1; } gf_isom_rtp_packet_set_flags(tkHint->file, tkHint->HintTrack, 0, 0, header->Marker, disposable, 0); } void MP4T_OnDataRef(void *cbk, u32 payload_size, u32 offset_from_orig) { GF_RTPHinter *tkHint = (GF_RTPHinter *)cbk; if (!tkHint || !payload_size) return; /*add reference*/ gf_isom_hint_sample_data(tkHint->file, tkHint->HintTrack, tkHint->TrackID, tkHint->CurrentSample, (u16) payload_size, offset_from_orig + tkHint->base_offset_in_sample, NULL, 0); } void MP4T_OnData(void *cbk, u8 *data, u32 data_size, Bool is_header) { u8 at_begin; GF_RTPHinter *tkHint = (GF_RTPHinter *)cbk; if (!data_size) return; at_begin = is_header ? 1 : 0; if (data_size <= 14) { gf_isom_hint_direct_data(tkHint->file, tkHint->HintTrack, data, data_size, at_begin); } else { gf_isom_hint_sample_data(tkHint->file, tkHint->HintTrack, tkHint->HintID, 0, (u16) data_size, 0, data, at_begin); } } void MP4T_OnNewPacket(void *cbk, GF_RTPHeader *header) { s32 res; GF_RTPHinter *tkHint = (GF_RTPHinter *)cbk; if (!tkHint) return; res = (s32) (tkHint->rtp_p->sl_header.compositionTimeStamp - tkHint->rtp_p->sl_header.decodingTimeStamp); assert( !res || tkHint->has_ctts); /*do we need a new sample*/ if (!tkHint->HintSample || (tkHint->RTPTime != header->TimeStamp)) { /*close current sample*/ if (tkHint->HintSample) gf_isom_end_hint_sample(tkHint->file, tkHint->HintTrack, tkHint->SampleIsRAP); /*start new sample: We use DTS as the sampling instant (RTP TS) to make sure all packets are sent in order*/ gf_isom_begin_hint_sample(tkHint->file, tkHint->HintTrack, 1, header->TimeStamp-res); tkHint->HintSample ++; tkHint->RTPTime = header->TimeStamp; tkHint->SampleIsRAP = tkHint->rtp_p->sl_config.hasRandomAccessUnitsOnlyFlag ? 1 : tkHint->rtp_p->sl_header.randomAccessPointFlag; } /*create an RTP Packet with the appropriated marker flag - note: the flags are temp ones, they are set when the full packet is signaled (to handle multi AUs per RTP)*/ gf_isom_rtp_packet_begin(tkHint->file, tkHint->HintTrack, 0, 0, 0, header->Marker, header->PayloadType, 0, 0, header->SequenceNumber); /*Add the delta TS to make sure RTP TS is indeed the CTS (sampling time)*/ if (res) gf_isom_rtp_packet_set_offset(tkHint->file, tkHint->HintTrack, res); } GF_EXPORT GF_RTPHinter *gf_hinter_track_new(GF_ISOFile *file, u32 TrackNum, u32 Path_MTU, u32 max_ptime, u32 default_rtp_rate, u32 flags, u8 PayloadID, Bool copy_media, u32 InterleaveGroupID, u8 InterleaveGroupPriority, GF_Err *e) { GF_SLConfig my_sl; u32 descIndex, MinSize, MaxSize, avgTS, streamType, codecid, const_dur, nb_ch, maxDTSDelta; u8 OfficialPayloadID; u32 TrackMediaSubType, TrackMediaType, hintType, nbEdts, required_rate, force_dts_delta, avc_nalu_size, PL_ID, bandwidth, IV_length, KI_length; const char *url, *urn; char *mpeg4mode; Bool is_crypted, has_mpeg4_mapping; GF_RTPHinter *tmp; GF_ESD *esd; *e = GF_BAD_PARAM; if (!file || !TrackNum || !gf_isom_get_track_id(file, TrackNum)) return NULL; if (!gf_isom_get_sample_count(file, TrackNum)) { *e = GF_OK; return NULL; } *e = GF_NOT_SUPPORTED; nbEdts = gf_isom_get_edits_count(file, TrackNum); if (nbEdts>1) { u64 et, sd, mt; GF_ISOEditType em; gf_isom_get_edit(file, TrackNum, 1, &et, &sd, &mt, &em); if ((nbEdts>2) || (em!=GF_ISOM_EDIT_EMPTY)) { GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Cannot hint track whith EditList\n")); return NULL; } } if (nbEdts) gf_isom_remove_edits(file, TrackNum); if (!gf_isom_is_track_enabled(file, TrackNum)) return NULL; /*by default NO PL signaled*/ PL_ID = 0; OfficialPayloadID = 0; force_dts_delta = 0; streamType = 0; mpeg4mode = NULL; required_rate = 0; is_crypted = 0; IV_length = KI_length = 0; codecid = 0; nb_ch = 0; avc_nalu_size = 0; has_mpeg4_mapping = 1; const_dur = 0; bandwidth=0; TrackMediaType = gf_isom_get_media_type(file, TrackNum); /*for max compatibility with QT*/ if (!default_rtp_rate) default_rtp_rate = 90000; /*timed-text is a bit special, we support multiple stream descriptions & co*/ if ( (TrackMediaType==GF_ISOM_MEDIA_TEXT) || (TrackMediaType==GF_ISOM_MEDIA_SUBT)) { hintType = GF_RTP_PAYT_3GPP_TEXT; codecid = GF_CODECID_TEXT_MPEG4; streamType = GF_STREAM_TEXT; /*fixme - this works cos there's only one PL for text in mpeg4 at the current time*/ PL_ID = 0x10; } else { if (gf_isom_get_sample_description_count(file, TrackNum) > 1) return NULL; TrackMediaSubType = gf_isom_get_media_subtype(file, TrackNum, 1); switch (TrackMediaSubType) { case GF_ISOM_SUBTYPE_MPEG4_CRYP: is_crypted = 1; case GF_ISOM_SUBTYPE_MPEG4: esd = gf_isom_get_esd(file, TrackNum, 1); hintType = GF_RTP_PAYT_MPEG4; if (esd && esd->decoderConfig) { streamType = esd->decoderConfig->streamType; codecid = esd->decoderConfig->objectTypeIndication; if (esd->URLString) hintType = 0; /*AAC*/ if ((streamType==GF_STREAM_AUDIO) && esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data /*(nb: we use mpeg4 for MPEG-2 AAC)*/ && ((codecid==GF_CODECID_AAC_MPEG4) || (codecid==GF_CODECID_AAC_MPEG2_MP) || (codecid==GF_CODECID_AAC_MPEG2_LCP) || (codecid==GF_CODECID_AAC_MPEG2_SSRP)) ) { u32 sample_rate; GF_M4ADecSpecInfo a_cfg; gf_m4a_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &a_cfg); nb_ch = a_cfg.nb_chan; sample_rate = a_cfg.base_sr; PL_ID = a_cfg.audioPL; switch (a_cfg.base_object_type) { case GF_M4A_AAC_MAIN: case GF_M4A_AAC_LC: if (flags & GP_RTP_PCK_USE_LATM_AAC) { hintType = GF_RTP_PAYT_LATM; break; } case GF_M4A_AAC_SBR: case GF_M4A_AAC_PS: case GF_M4A_AAC_LTP: case GF_M4A_AAC_SCALABLE: case GF_M4A_ER_AAC_LC: case GF_M4A_ER_AAC_LTP: case GF_M4A_ER_AAC_SCALABLE: mpeg4mode = "AAC"; break; case GF_M4A_CELP: case GF_M4A_ER_CELP: mpeg4mode = "CELP"; break; } required_rate = sample_rate; } /*MPEG1/2 audio*/ else if ((streamType==GF_STREAM_AUDIO) && ((codecid==GF_CODECID_MPEG2_PART3) || (codecid==GF_CODECID_MPEG_AUDIO))) { GF_ISOSample *samp = NULL; if (!is_crypted) samp = gf_isom_get_sample(file, TrackNum, 1, NULL); if (samp && (samp->dataLength>3)) { u32 hdr = GF_4CC((u32)samp->data[0], (u8)samp->data[1], (u8)samp->data[2], (u8)samp->data[3]); nb_ch = gf_mp3_num_channels(hdr); hintType = GF_RTP_PAYT_MPEG12_AUDIO; /*use official RTP/AVP payload type*/ OfficialPayloadID = 14; required_rate = 90000; } /*encrypted MP3 must be sent through MPEG-4 generic to signal all ISMACryp stuff*/ else { u32 sample_rate; gf_isom_get_audio_info(file, TrackNum, 1, &sample_rate, &nb_ch, NULL); required_rate = sample_rate; } if (samp) gf_isom_sample_del(&samp); } /*QCELP audio*/ else if ((streamType==GF_STREAM_AUDIO) && (codecid==GF_CODECID_QCELP)) { hintType = GF_RTP_PAYT_QCELP; OfficialPayloadID = 12; required_rate = 8000; streamType = GF_STREAM_AUDIO; nb_ch = 1; } /*EVRC/SVM audio*/ else if ((streamType==GF_STREAM_AUDIO) && ((codecid==GF_CODECID_EVRC) || (codecid==GF_CODECID_SMV)) ) { hintType = GF_RTP_PAYT_EVRC_SMV; required_rate = 8000; streamType = GF_STREAM_AUDIO; nb_ch = 1; } /*visual streams*/ else if (streamType==GF_STREAM_VISUAL) { if ((codecid==GF_CODECID_MPEG4_PART2) && esd->decoderConfig->decoderSpecificInfo) { GF_M4VDecSpecInfo dsi; gf_m4v_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &dsi); PL_ID = dsi.VideoPL; } /*MPEG1/2 video*/ if ( ((codecid>=GF_CODECID_MPEG2_SIMPLE) && (codecid<=GF_CODECID_MPEG2_422)) || (codecid==GF_CODECID_MPEG1)) { if (!is_crypted) { hintType = GF_RTP_PAYT_MPEG12_VIDEO; OfficialPayloadID = 32; } } /*for ISMA*/ if (is_crypted) { /*that's another pain with ISMACryp, even if no B-frames the DTS is signaled...*/ if (codecid==GF_CODECID_MPEG4_PART2) force_dts_delta = 22; else if ((codecid==GF_CODECID_AVC) || (codecid==GF_CODECID_SVC)) { flags &= ~GP_RTP_PCK_USE_MULTI; force_dts_delta = 22; } flags |= GP_RTP_PCK_SIGNAL_RAP | GP_RTP_PCK_SIGNAL_TS; } required_rate = default_rtp_rate; } /*systems streams*/ else if (gf_isom_has_sync_shadows(file, TrackNum) || gf_isom_has_sample_dependency(file, TrackNum)) { flags |= GP_RTP_PCK_SYSTEMS_CAROUSEL; } gf_odf_desc_del((GF_Descriptor*)esd); } break; case GF_ISOM_SUBTYPE_3GP_H263: hintType = GF_RTP_PAYT_H263; required_rate = 90000; streamType = GF_STREAM_VISUAL; OfficialPayloadID = 34; /*not 100% compliant (short header is missing) but should still work*/ codecid = GF_CODECID_MPEG4_PART2; PL_ID = 0x01; break; case GF_ISOM_SUBTYPE_3GP_AMR: required_rate = 8000; hintType = GF_RTP_PAYT_AMR; streamType = GF_STREAM_AUDIO; has_mpeg4_mapping = 0; nb_ch = 1; break; case GF_ISOM_SUBTYPE_3GP_AMR_WB: required_rate = 16000; hintType = GF_RTP_PAYT_AMR_WB; streamType = GF_STREAM_AUDIO; has_mpeg4_mapping = 0; nb_ch = 1; break; case GF_ISOM_SUBTYPE_AVC_H264: case GF_ISOM_SUBTYPE_AVC2_H264: case GF_ISOM_SUBTYPE_AVC3_H264: case GF_ISOM_SUBTYPE_AVC4_H264: case GF_ISOM_SUBTYPE_SVC_H264: case GF_ISOM_SUBTYPE_MVC_H264: { GF_AVCConfig *avcc = gf_isom_avc_config_get(file, TrackNum, 1); GF_AVCConfig *svcc = gf_isom_svc_config_get(file, TrackNum, 1); GF_AVCConfig *mvcc = gf_isom_mvc_config_get(file, TrackNum, 1); if (!avcc && !svcc && !mvcc) { *e = GF_NON_COMPLIANT_BITSTREAM; return NULL; } required_rate = 90000; /* "90 kHz clock rate MUST be used"*/ hintType = GF_RTP_PAYT_H264_AVC; if (TrackMediaSubType==GF_ISOM_SUBTYPE_SVC_H264) hintType = GF_RTP_PAYT_H264_SVC; else if (TrackMediaSubType==GF_ISOM_SUBTYPE_MVC_H264) hintType = GF_RTP_PAYT_H264_SVC; streamType = GF_STREAM_VISUAL; avc_nalu_size = avcc ? avcc->nal_unit_size : svcc ? svcc->nal_unit_size : mvcc->nal_unit_size; codecid = GF_CODECID_AVC; PL_ID = 0x0F; gf_odf_avc_cfg_del(avcc); gf_odf_avc_cfg_del(svcc); } break; case GF_ISOM_SUBTYPE_HVC1: case GF_ISOM_SUBTYPE_HEV1: case GF_ISOM_SUBTYPE_HVC2: case GF_ISOM_SUBTYPE_HEV2: { GF_HEVCConfig *hevcc = gf_isom_hevc_config_get(file, TrackNum, 1); if (!hevcc) { *e = GF_NON_COMPLIANT_BITSTREAM; return NULL; } required_rate = 90000; /* "90 kHz clock rate MUST be used"*/ hintType = GF_RTP_PAYT_HEVC; streamType = GF_STREAM_VISUAL; avc_nalu_size = hevcc->nal_unit_size; codecid = GF_CODECID_HEVC; PL_ID = 0x0F; flags |= GP_RTP_PCK_USE_MULTI; gf_odf_hevc_cfg_del(hevcc); break; } break; case GF_ISOM_SUBTYPE_3GP_QCELP: required_rate = 8000; hintType = GF_RTP_PAYT_QCELP; streamType = GF_STREAM_AUDIO; codecid = GF_CODECID_QCELP; OfficialPayloadID = 12; nb_ch = 1; break; case GF_ISOM_SUBTYPE_3GP_EVRC: case GF_ISOM_SUBTYPE_3GP_SMV: required_rate = 8000; hintType = GF_RTP_PAYT_EVRC_SMV; streamType = GF_STREAM_AUDIO; codecid = (TrackMediaSubType==GF_ISOM_SUBTYPE_3GP_EVRC) ? GF_CODECID_EVRC : GF_CODECID_SMV; nb_ch = 1; break; case GF_ISOM_SUBTYPE_3GP_DIMS: #if GPAC_ENABLE_3GPP_DIMS_RTP hintType = GF_RTP_PAYT_3GPP_DIMS; streamType = GF_STREAM_SCENE; #else hintType = 0; GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[RTP Packetizer] 3GPP DIMS over RTP disabled in build\n", streamType)); #endif break; case GF_ISOM_SUBTYPE_AC3: hintType = GF_RTP_PAYT_AC3; streamType = GF_STREAM_AUDIO; gf_isom_get_audio_info(file, TrackNum, 1, NULL, &nb_ch, NULL); break; case GF_ISOM_SUBTYPE_MP3: { GF_ISOSample *samp = gf_isom_get_sample(file, TrackNum, 1, NULL); if (samp && (samp->dataLength>3)) { u32 hdr = GF_4CC((u32)samp->data[0], (u8)samp->data[1], (u8)samp->data[2], (u8)samp->data[3]); nb_ch = gf_mp3_num_channels(hdr); } else { u32 bps; gf_isom_get_audio_info(file, TrackNum, 1, &required_rate, &nb_ch, &bps); } hintType = GF_RTP_PAYT_MPEG12_AUDIO; /*use official RTP/AVP payload type*/ OfficialPayloadID = 14; required_rate = 90000; if (samp) gf_isom_sample_del(&samp); } break; default: /*ERROR*/ hintType = 0; break; } } /*not hintable*/ if (!hintType) return NULL; /*we only support self-contained files for hinting*/ gf_isom_get_data_reference(file, TrackNum, 1, &url, &urn); if (url || urn) return NULL; *e = GF_OUT_OF_MEM; GF_SAFEALLOC(tmp, GF_RTPHinter); if (!tmp) return NULL; /*override hinter type if requested and possible*/ if (has_mpeg4_mapping && (flags & GP_RTP_PCK_FORCE_MPEG4)) { hintType = GF_RTP_PAYT_MPEG4; avc_nalu_size = 0; } /*use static payload ID if enabled*/ else if (OfficialPayloadID && (flags & GP_RTP_PCK_USE_STATIC_ID) ) { PayloadID = OfficialPayloadID; } tmp->file = file; tmp->TrackNum = TrackNum; tmp->avc_nalu_size = avc_nalu_size; tmp->nb_chan = nb_ch; /*spatial scalability check*/ tmp->has_ctts = gf_isom_has_time_offset(file, TrackNum); /*get sample info*/ gf_media_get_sample_average_infos(file, TrackNum, &MinSize, &MaxSize, &avgTS, &maxDTSDelta, &const_dur, &bandwidth); /*systems carousel: we need at least IDX and RAP signaling*/ if (flags & GP_RTP_PCK_SYSTEMS_CAROUSEL) { flags |= GP_RTP_PCK_SIGNAL_RAP; } /*update flags in MultiSL*/ if (flags & GP_RTP_PCK_USE_MULTI) { if (MinSize != MaxSize) flags |= GP_RTP_PCK_SIGNAL_SIZE; if (!const_dur) flags |= GP_RTP_PCK_SIGNAL_TS; } if (tmp->has_ctts) flags |= GP_RTP_PCK_SIGNAL_TS; /*default SL for RTP */ InitSL_RTP(&my_sl); my_sl.timestampResolution = gf_isom_get_media_timescale(file, TrackNum); /*override clockrate if set*/ if (required_rate) { Double sc = required_rate; sc /= my_sl.timestampResolution; maxDTSDelta = (u32) (maxDTSDelta*sc); my_sl.timestampResolution = required_rate; } /*switch to RTP TS*/ max_ptime = (u32) (max_ptime * my_sl.timestampResolution / 1000); my_sl.AUSeqNumLength = gf_get_bit_size(gf_isom_get_sample_count(file, TrackNum)); if (my_sl.AUSeqNumLength>16) my_sl.AUSeqNumLength=16; my_sl.CUDuration = const_dur; if (gf_isom_has_sync_points(file, TrackNum)) { my_sl.useRandomAccessPointFlag = 1; } else { my_sl.useRandomAccessPointFlag = 0; my_sl.hasRandomAccessUnitsOnlyFlag = 1; } if (is_crypted) { Bool use_sel_enc; gf_isom_get_ismacryp_info(file, TrackNum, 1, NULL, NULL, NULL, NULL, NULL, &use_sel_enc, &IV_length, &KI_length); if (use_sel_enc) flags |= GP_RTP_PCK_SELECTIVE_ENCRYPTION; } // in case a different timescale was provided tmp->OrigTimeScale = gf_isom_get_media_timescale(file, TrackNum); tmp->rtp_p = gf_rtp_builder_new(hintType, &my_sl, flags, tmp, MP4T_OnNewPacket, MP4T_OnPacketDone, /*if copy, no data ref*/ copy_media ? NULL : MP4T_OnDataRef, MP4T_OnData); //init the builder gf_rtp_builder_init(tmp->rtp_p, PayloadID, Path_MTU, max_ptime, streamType, codecid, PL_ID, MinSize, MaxSize, avgTS, maxDTSDelta, IV_length, KI_length, mpeg4mode); /*ISMA compliance is a pain...*/ if (force_dts_delta) tmp->rtp_p->slMap.DTSDeltaLength = force_dts_delta; /* Hint Track Setup */ tmp->TrackID = gf_isom_get_track_id(file, TrackNum); tmp->HintID = tmp->TrackID + 65535; while (gf_isom_get_track_by_id(file, tmp->HintID)) tmp->HintID++; tmp->HintTrack = gf_isom_new_track(file, tmp->HintID, GF_ISOM_MEDIA_HINT, my_sl.timestampResolution); gf_isom_setup_hint_track(file, tmp->HintTrack, GF_ISOM_HINT_RTP); /*create a hint description*/ gf_isom_new_hint_description(file, tmp->HintTrack, -1, -1, 0, &descIndex); gf_isom_rtp_set_timescale(file, tmp->HintTrack, descIndex, my_sl.timestampResolution); if (hintType==GF_RTP_PAYT_MPEG4) { tmp->rtp_p->slMap.CodecID = codecid; /*set this SL for extraction.*/ gf_isom_set_extraction_slc(file, TrackNum, 1, &my_sl); } tmp->bandwidth = bandwidth; /*set interleaving*/ gf_isom_set_track_interleaving_group(file, TrackNum, InterleaveGroupID); if (!copy_media) { /*if we don't copy data set hint track and media track in the same group*/ gf_isom_set_track_interleaving_group(file, tmp->HintTrack, InterleaveGroupID); } else { gf_isom_set_track_interleaving_group(file, tmp->HintTrack, InterleaveGroupID + OFFSET_HINT_GROUP_ID); } /*use user-secified priority*/ InterleaveGroupPriority*=2; gf_isom_set_track_priority_in_group(file, TrackNum, InterleaveGroupPriority+1); gf_isom_set_track_priority_in_group(file, tmp->HintTrack, InterleaveGroupPriority); #if 0 #endif *e = GF_OK; return tmp; } GF_EXPORT GF_Err gf_hinter_track_force_no_offsets(GF_RTPHinter *tkHinter) { GF_Err e; if (!tkHinter) return GF_BAD_PARAM; e = gf_isom_rtp_set_time_offset(tkHinter->file, tkHinter->HintTrack, 1, 0); if (e) return e; return gf_isom_rtp_set_time_sequence_offset(tkHinter->file, tkHinter->HintTrack, 1, 0); } GF_EXPORT u32 gf_hinter_track_get_bandwidth(GF_RTPHinter *tkHinter) { return tkHinter->bandwidth; } GF_EXPORT u32 gf_hinter_track_get_flags(GF_RTPHinter *tkHinter) { return tkHinter->rtp_p->flags; } GF_EXPORT void gf_hinter_track_get_payload_name(GF_RTPHinter *tkHinter, char *payloadName) { char mediaName[30]; gf_rtp_builder_get_payload_name(tkHinter->rtp_p, payloadName, mediaName); } GF_EXPORT void gf_hinter_track_del(GF_RTPHinter *tkHinter) { if (!tkHinter) return; if (tkHinter->rtp_p) gf_rtp_builder_del(tkHinter->rtp_p); gf_free(tkHinter); } GF_EXPORT GF_Err gf_hinter_track_process(GF_RTPHinter *tkHint) { GF_Err e; u32 i, descIndex, duration; u64 ts; u8 PadBits; GF_Fraction ft; GF_ISOSample *samp; tkHint->HintSample = tkHint->RTPTime = 0; tkHint->TotalSample = gf_isom_get_sample_count(tkHint->file, tkHint->TrackNum); ft.num = tkHint->rtp_p->sl_config.timestampResolution; ft.den = tkHint->OrigTimeScale; e = GF_OK; for (i=0; i<tkHint->TotalSample; i++) { samp = gf_isom_get_sample(tkHint->file, tkHint->TrackNum, i+1, &descIndex); if (!samp) return gf_isom_last_error(tkHint->file); //setup SL tkHint->CurrentSample = i + 1; /*keep same AU indicator if sync shadow - TODO FIXME: this assumes shadows are placed interleaved with the track content which is the case for GPAC scene carousel generation, but may not always be true*/ if (samp->IsRAP==RAP_REDUNDANT) { tkHint->rtp_p->sl_header.AU_sequenceNumber -= 1; samp->IsRAP = RAP; } ts = ft.num * (samp->DTS+samp->CTS_Offset) / ft.den; tkHint->rtp_p->sl_header.compositionTimeStamp = ts; ts = ft.num * samp->DTS / ft.den; tkHint->rtp_p->sl_header.decodingTimeStamp = ts; tkHint->rtp_p->sl_header.randomAccessPointFlag = samp->IsRAP; tkHint->base_offset_in_sample = 0; /*crypted*/ if (tkHint->rtp_p->slMap.IV_length) { GF_ISMASample *s = gf_isom_get_ismacryp_sample(tkHint->file, tkHint->TrackNum, samp, descIndex); /*one byte take for selective_enc flag*/ if (s->flags & GF_ISOM_ISMA_USE_SEL_ENC) tkHint->base_offset_in_sample += 1; if (s->flags & GF_ISOM_ISMA_IS_ENCRYPTED) tkHint->base_offset_in_sample += s->IV_length + s->KI_length; gf_free(samp->data); samp->data = s->data; samp->dataLength = s->dataLength; gf_rtp_builder_set_cryp_info(tkHint->rtp_p, s->IV, (char*)s->key_indicator, (s->flags & GF_ISOM_ISMA_IS_ENCRYPTED) ? 1 : 0); s->data = NULL; s->dataLength = 0; gf_isom_ismacryp_delete_sample(s); } if (tkHint->rtp_p->sl_config.usePaddingFlag) { gf_isom_get_sample_padding_bits(tkHint->file, tkHint->TrackNum, i+1, &PadBits); tkHint->rtp_p->sl_header.paddingBits = PadBits; } else { tkHint->rtp_p->sl_header.paddingBits = 0; } duration = gf_isom_get_sample_duration(tkHint->file, tkHint->TrackNum, i+1); // ts = (u32) (ft * (s64) (duration)); /*unpack nal units*/ if (tkHint->avc_nalu_size) { u32 v, size; u32 remain = samp->dataLength; char *ptr = samp->data; tkHint->rtp_p->sl_header.accessUnitStartFlag = 1; tkHint->rtp_p->sl_header.accessUnitEndFlag = 0; while (remain) { size = 0; v = tkHint->avc_nalu_size; if (v>remain) { GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Broken AVC nalu encapsulation: NALU size length is %d but only %d bytes left in sample %d\n", v, remain, tkHint->CurrentSample)); break; } while (v) { size |= (u8) *ptr; ptr++; remain--; v-=1; if (v) size<<=8; } tkHint->base_offset_in_sample = samp->dataLength-remain; if (remain < size) { GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Broken AVC nalu encapsulation: NALU size is %d but only %d bytes left in sample %d\n", size, remain, tkHint->CurrentSample)); break; } remain -= size; tkHint->rtp_p->sl_header.accessUnitEndFlag = remain ? 0 : 1; if (!size) { GF_LOG(GF_LOG_WARNING, GF_LOG_RTP, ("[rtp hinter] Broken AVC nalu encapsulation: NALU size is 0, ignoring it\n", size)); } else { e = gf_rtp_builder_process(tkHint->rtp_p, ptr, size, (u8) !remain, samp->dataLength, duration, (u8) (descIndex + GF_RTP_TX3G_SIDX_OFFSET) ); ptr += size; } tkHint->rtp_p->sl_header.accessUnitStartFlag = 0; } } else { e = gf_rtp_builder_process(tkHint->rtp_p, samp->data, samp->dataLength, 1, samp->dataLength, duration, (u8) (descIndex + GF_RTP_TX3G_SIDX_OFFSET) ); } tkHint->rtp_p->sl_header.packetSequenceNumber += 1; //signal some progress gf_set_progress("Hinting", tkHint->CurrentSample, tkHint->TotalSample); tkHint->rtp_p->sl_header.AU_sequenceNumber += 1; gf_isom_sample_del(&samp); if (e) return e; } //flush gf_rtp_builder_process(tkHint->rtp_p, NULL, 0, 1, 0, 0, 0); gf_isom_end_hint_sample(tkHint->file, tkHint->HintTrack, (u8) tkHint->SampleIsRAP); return GF_OK; } static u32 write_nalu_config_array(char *sdpLine, GF_List *nalus) { u32 i, count, b64s; char b64[200]; count = gf_list_count(nalus); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *)gf_list_get(nalus, i); b64s = gf_base64_encode(sl->data, sl->size, b64, 200); b64[b64s]=0; strcat(sdpLine, b64); if (i+1<count) strcat(sdpLine, ","); } return count; } static void write_avc_config(char *sdpLine, GF_AVCConfig *avcc, GF_AVCConfig *svcc) { u32 count = 0; if (avcc) count += gf_list_count(avcc->sequenceParameterSets) + gf_list_count(avcc->pictureParameterSets) + gf_list_count(avcc->sequenceParameterSetExtensions); if (svcc) count += gf_list_count(svcc->sequenceParameterSets) + gf_list_count(svcc->pictureParameterSets); if (!count) return; strcat(sdpLine, "; sprop-parameter-sets="); if (avcc) { count = write_nalu_config_array(sdpLine, avcc->sequenceParameterSets); if (count) strcat(sdpLine, ","); count = write_nalu_config_array(sdpLine, avcc->sequenceParameterSetExtensions); if (count) strcat(sdpLine, ","); count = write_nalu_config_array(sdpLine, avcc->pictureParameterSets); if (count) strcat(sdpLine, ","); } if (svcc) { count = write_nalu_config_array(sdpLine, svcc->sequenceParameterSets); if (count) strcat(sdpLine, ","); count = write_nalu_config_array(sdpLine, svcc->pictureParameterSets); if (count) strcat(sdpLine, ","); } count = (u32) strlen(sdpLine); if (sdpLine[count-1] == ',') sdpLine[count-1] = 0; } GF_EXPORT GF_Err gf_hinter_track_finalize(GF_RTPHinter *tkHint, Bool AddSystemInfo) { u32 Width, Height; GF_ESD *esd; char sdpLine[20000]; char mediaName[30], payloadName[30]; u32 mtype; Width = Height = 0; gf_isom_sdp_clean_track(tkHint->file, tkHint->TrackNum); mtype = gf_isom_get_media_type(tkHint->file, tkHint->TrackNum); if (gf_isom_is_video_handler_type(mtype)) gf_isom_get_visual_info(tkHint->file, tkHint->TrackNum, 1, &Width, &Height); gf_rtp_builder_get_payload_name(tkHint->rtp_p, payloadName, mediaName); /*TODO- extract out of rtp_p for future live tools*/ sprintf(sdpLine, "m=%s 0 RTP/%s %d", mediaName, tkHint->rtp_p->slMap.IV_length ? "SAVP" : "AVP", tkHint->rtp_p->PayloadType); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); if (tkHint->bandwidth) { sprintf(sdpLine, "b=AS:%d", tkHint->bandwidth); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } if (tkHint->nb_chan) { sprintf(sdpLine, "a=rtpmap:%d %s/%d/%d", tkHint->rtp_p->PayloadType, payloadName, tkHint->rtp_p->sl_config.timestampResolution, tkHint->nb_chan); } else { sprintf(sdpLine, "a=rtpmap:%d %s/%d", tkHint->rtp_p->PayloadType, payloadName, tkHint->rtp_p->sl_config.timestampResolution); } gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); /*control for MPEG-4*/ if (AddSystemInfo) { sprintf(sdpLine, "a=mpeg4-esid:%d", gf_isom_get_track_id(tkHint->file, tkHint->TrackNum)); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } /*control for QTSS/DSS*/ sprintf(sdpLine, "a=control:trackID=%d", gf_isom_get_track_id(tkHint->file, tkHint->HintTrack)); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); /*H263 extensions*/ if (tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_H263) { sprintf(sdpLine, "a=cliprect:0,0,%d,%d", Height, Width); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } /*AMR*/ else if ((tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_AMR) || (tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_AMR_WB)) { sprintf(sdpLine, "a=fmtp:%d octet-align=1", tkHint->rtp_p->PayloadType); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } /*Text*/ else if (tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_3GPP_TEXT) { u32 w, h, i, m_w, m_h; s32 tx, ty; s16 l; gf_isom_get_track_layout_info(tkHint->file, tkHint->TrackNum, &w, &h, &tx, &ty, &l); m_w = w; m_h = h; for (i=0; i<gf_isom_get_track_count(tkHint->file); i++) { switch (gf_isom_get_media_type(tkHint->file, i+1)) { case GF_ISOM_MEDIA_SCENE: case GF_ISOM_MEDIA_VISUAL: case GF_ISOM_MEDIA_AUXV: case GF_ISOM_MEDIA_PICT: gf_isom_get_track_layout_info(tkHint->file, i+1, &w, &h, &tx, &ty, &l); if (w>m_w) m_w = w; if (h>m_h) m_h = h; break; default: break; } } gf_media_format_ttxt_sdp(tkHint->rtp_p, payloadName, sdpLine, w, h, tx, ty, l, m_w, m_h, NULL); strcat(sdpLine, "; tx3g="); for (i=0; i<gf_isom_get_sample_description_count(tkHint->file, tkHint->TrackNum); i++) { u8 *tx3g; char buffer[2000]; u32 tx3g_len, len; gf_isom_text_get_encoded_tx3g(tkHint->file, tkHint->TrackNum, i+1, GF_RTP_TX3G_SIDX_OFFSET, &tx3g, &tx3g_len); len = gf_base64_encode(tx3g, tx3g_len, buffer, 2000); gf_free(tx3g); buffer[len] = 0; if (i) strcat(sdpLine, ", "); strcat(sdpLine, buffer); } gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } /*EVRC/SMV in non header-free mode*/ else if ((tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_EVRC_SMV) && (tkHint->rtp_p->auh_size>1)) { sprintf(sdpLine, "a=fmtp:%d maxptime=%d", tkHint->rtp_p->PayloadType, tkHint->rtp_p->auh_size*20); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } /*H264/AVC*/ else if ((tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_H264_AVC) || (tkHint->rtp_p->rtp_payt == GF_RTP_PAYT_H264_SVC)) { GF_AVCConfig *avcc = gf_isom_avc_config_get(tkHint->file, tkHint->TrackNum, 1); GF_AVCConfig *svcc = gf_isom_svc_config_get(tkHint->file, tkHint->TrackNum, 1); /*TODO - check syntax for SVC (might be some extra signaling)*/ if (avcc) { sprintf(sdpLine, "a=fmtp:%d profile-level-id=%02X%02X%02X; packetization-mode=1", tkHint->rtp_p->PayloadType, avcc->AVCProfileIndication, avcc->profile_compatibility, avcc->AVCLevelIndication); } else { if (!svcc) return GF_ISOM_INVALID_FILE; sprintf(sdpLine, "a=fmtp:%d profile-level-id=%02X%02X%02X; packetization-mode=1", tkHint->rtp_p->PayloadType, svcc->AVCProfileIndication, svcc->profile_compatibility, svcc->AVCLevelIndication); } write_avc_config(sdpLine, avcc, svcc); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); gf_odf_avc_cfg_del(avcc); gf_odf_avc_cfg_del(svcc); } /*MPEG-4 decoder config*/ else if (tkHint->rtp_p->rtp_payt==GF_RTP_PAYT_MPEG4) { esd = gf_isom_get_esd(tkHint->file, tkHint->TrackNum, 1); if (esd && esd->decoderConfig && esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) { gf_rtp_builder_format_sdp(tkHint->rtp_p, payloadName, sdpLine, esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength); } else { gf_rtp_builder_format_sdp(tkHint->rtp_p, payloadName, sdpLine, NULL, 0); } if (esd) gf_odf_desc_del((GF_Descriptor *)esd); if (tkHint->rtp_p->slMap.IV_length) { const char *kms; gf_isom_get_ismacryp_info(tkHint->file, tkHint->TrackNum, 1, NULL, NULL, NULL, NULL, &kms, NULL, NULL, NULL); if (!strnicmp(kms, "(key)", 5) || !strnicmp(kms, "(ipmp)", 6) || !strnicmp(kms, "(uri)", 5)) { strcat(sdpLine, "; ISMACrypKey="); } else { strcat(sdpLine, "; ISMACrypKey=(uri)"); } strcat(sdpLine, kms); } gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } /*MPEG-4 Audio LATM*/ else if (tkHint->rtp_p->rtp_payt==GF_RTP_PAYT_LATM) { GF_BitStream *bs; u8 *config_bytes; u32 config_size; /* form config string */ bs = gf_bs_new(NULL, 32, GF_BITSTREAM_WRITE); gf_bs_write_int(bs, 0, 1); /* AudioMuxVersion */ gf_bs_write_int(bs, 1, 1); /* all streams same time */ gf_bs_write_int(bs, 0, 6); /* numSubFrames */ gf_bs_write_int(bs, 0, 4); /* numPrograms */ gf_bs_write_int(bs, 0, 3); /* numLayer */ /* audio-specific config */ esd = gf_isom_get_esd(tkHint->file, tkHint->TrackNum, 1); if (esd && esd->decoderConfig && esd->decoderConfig->decoderSpecificInfo) { /*PacketVideo patch: don't signal SBR and PS stuff, not allowed in LATM with audioMuxVersion=0*/ gf_bs_write_data(bs, esd->decoderConfig->decoderSpecificInfo->data, MIN(esd->decoderConfig->decoderSpecificInfo->dataLength, 2) ); } if (esd) gf_odf_desc_del((GF_Descriptor *)esd); /* other data */ gf_bs_write_int(bs, 0, 3); /* frameLengthType */ gf_bs_write_int(bs, 0xff, 8); /* latmBufferFullness */ gf_bs_write_int(bs, 0, 1); /* otherDataPresent */ gf_bs_write_int(bs, 0, 1); /* crcCheckPresent */ gf_bs_get_content(bs, &config_bytes, &config_size); gf_bs_del(bs); gf_rtp_builder_format_sdp(tkHint->rtp_p, payloadName, sdpLine, config_bytes, config_size); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); gf_free(config_bytes); } #if GPAC_ENABLE_3GPP_DIMS_RTP /*3GPP DIMS*/ else if (tkHint->rtp_p->rtp_payt==GF_RTP_PAYT_3GPP_DIMS) { GF_DIMSDescription dims; gf_isom_get_visual_info(tkHint->file, tkHint->TrackNum, 1, &Width, &Height); gf_isom_get_dims_description(tkHint->file, tkHint->TrackNum, 1, &dims); sprintf(sdpLine, "a=fmtp:%d Version-profile=%d", tkHint->rtp_p->PayloadType, dims.profile); if (! dims.fullRequestHost) { char fmt[200]; strcat(sdpLine, ";useFullRequestHost=0"); sprintf(fmt, ";pathComponents=%d", dims.pathComponents); strcat(sdpLine, fmt); } if (!dims.streamType) strcat(sdpLine, ";stream-type=secondary"); if (dims.containsRedundant == 1) strcat(sdpLine, ";contains-redundant=main"); else if (dims.containsRedundant == 2) strcat(sdpLine, ";contains-redundant=redundant"); if (dims.textEncoding && strlen(dims.textEncoding)) { strcat(sdpLine, ";text-encoding="); strcat(sdpLine, dims.textEncoding); } if (dims.contentEncoding && strlen(dims.contentEncoding)) { strcat(sdpLine, ";content-coding="); strcat(sdpLine, dims.contentEncoding); } if (dims.contentEncoding && dims.content_script_types && strlen(dims.content_script_types) ) { strcat(sdpLine, ";content-script-types="); strcat(sdpLine, dims.contentEncoding); } gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } #endif /*extensions for some mobile phones*/ if (Width && Height) { sprintf(sdpLine, "a=framesize:%d %d-%d", tkHint->rtp_p->PayloadType, Width, Height); gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } esd = gf_isom_get_esd(tkHint->file, tkHint->TrackNum, 1); if (esd && esd->decoderConfig && (esd->decoderConfig->rvc_config || esd->decoderConfig->predefined_rvc_config)) { if (esd->decoderConfig->predefined_rvc_config) { sprintf(sdpLine, "a=rvc-config-predef:%d", esd->decoderConfig->predefined_rvc_config); } else { /*temporary ...*/ if ((esd->decoderConfig->objectTypeIndication==GF_CODECID_AVC) || (esd->decoderConfig->objectTypeIndication==GF_CODECID_SVC)) { sprintf(sdpLine, "a=rvc-config:%s", "http://download.tsi.telecom-paristech.fr/gpac/RVC/rvc_config_avc.xml"); } else { sprintf(sdpLine, "a=rvc-config:%s", "http://download.tsi.telecom-paristech.fr/gpac/RVC/rvc_config_sp.xml"); } } gf_isom_sdp_add_track_line(tkHint->file, tkHint->HintTrack, sdpLine); } if (esd) gf_odf_desc_del((GF_Descriptor *)esd); gf_isom_set_track_enabled(tkHint->file, tkHint->HintTrack, GF_TRUE); return GF_OK; } GF_EXPORT Bool gf_hinter_can_embbed_data(u8 *data, u32 data_size, u32 streamType) { char data64[5000]; u32 size64; size64 = gf_base64_encode(data, data_size, data64, 5000); if (!size64) return 0; switch (streamType) { case GF_STREAM_OD: size64 += (u32) strlen("data:application/mpeg4-od-au;base64,"); break; case GF_STREAM_SCENE: size64 += (u32) strlen("data:application/mpeg4-bifs-au;base64,"); break; default: /*NOT NORMATIVE*/ size64 += (u32) strlen("data:application/mpeg4-es-au;base64,"); break; } if (size64>=255) return 0; return 1; } GF_EXPORT GF_Err gf_hinter_finalize(GF_ISOFile *file, GF_SDP_IODProfile IOD_Profile, u32 bandwidth) { u32 i, sceneT, odT, descIndex, size, size64; GF_InitialObjectDescriptor *iod; GF_SLConfig slc; GF_ISOSample *samp; Bool remove_ocr; u8 *buffer; char buf64[5000], sdpLine[5100]; gf_isom_sdp_clean(file); if (bandwidth) { sprintf(buf64, "b=AS:%d", bandwidth); gf_isom_sdp_add_line(file, buf64); } //xtended attribute for copyright if (gf_sys_is_test_mode()) { sprintf(buf64, "a=x-copyright: %s", "MP4/3GP File hinted with GPAC - (c) Telecom ParisTech (http://gpac.io)"); } else { sprintf(buf64, "a=x-copyright: MP4/3GP File hinted with GPAC %s - %s", gf_gpac_version(), gf_gpac_copyright() ); } gf_isom_sdp_add_line(file, buf64); if (IOD_Profile == GF_SDP_IOD_NONE) return GF_OK; odT = sceneT = 0; for (i=0; i<gf_isom_get_track_count(file); i++) { if (!gf_isom_is_track_in_root_od(file, i+1)) continue; switch (gf_isom_get_media_type(file,i+1)) { case GF_ISOM_MEDIA_OD: odT = i+1; break; case GF_ISOM_MEDIA_SCENE: sceneT = i+1; break; } } remove_ocr = 0; if (IOD_Profile == GF_SDP_IOD_ISMA_STRICT) { IOD_Profile = GF_SDP_IOD_ISMA; remove_ocr = 1; } /*if we want ISMA like iods, we need at least BIFS */ if ( (IOD_Profile == GF_SDP_IOD_ISMA) && !sceneT ) return GF_BAD_PARAM; /*do NOT change PLs, we assume they are correct*/ iod = (GF_InitialObjectDescriptor *) gf_isom_get_root_od(file); if (!iod) return GF_NOT_SUPPORTED; /*rewrite an IOD with good SL config - embbed data if possible*/ if (IOD_Profile == GF_SDP_IOD_ISMA) { GF_ESD *esd; Bool is_ok = 1; while (gf_list_count(iod->ESDescriptors)) { esd = (GF_ESD*)gf_list_get(iod->ESDescriptors, 0); gf_odf_desc_del((GF_Descriptor *) esd); gf_list_rem(iod->ESDescriptors, 0); } /*get OD esd, and embbed stream data if possible*/ if (odT) { esd = gf_isom_get_esd(file, odT, 1); if (gf_isom_get_sample_count(file, odT)==1) { samp = gf_isom_get_sample(file, odT, 1, &descIndex); if (gf_hinter_can_embbed_data(samp->data, samp->dataLength, GF_STREAM_OD)) { InitSL_NULL(&slc); slc.predefined = 0; slc.hasRandomAccessUnitsOnlyFlag = 1; slc.timeScale = slc.timestampResolution = gf_isom_get_media_timescale(file, odT); slc.OCRResolution = 1000; slc.startCTS = samp->DTS+samp->CTS_Offset; slc.startDTS = samp->DTS; //set the SL for future extraction gf_isom_set_extraction_slc(file, odT, 1, &slc); size64 = gf_base64_encode(samp->data, samp->dataLength, buf64, 2000); buf64[size64] = 0; sprintf(sdpLine, "data:application/mpeg4-od-au;base64,%s", buf64); esd->decoderConfig->avgBitrate = 0; esd->decoderConfig->bufferSizeDB = samp->dataLength; esd->decoderConfig->maxBitrate = 0; size64 = (u32) strlen(sdpLine)+1; esd->URLString = (char*)gf_malloc(sizeof(char) * size64); strcpy(esd->URLString, sdpLine); } else { GF_LOG(GF_LOG_WARNING, GF_LOG_RTP, ("[rtp hinter] OD sample too large to be embedded in IOD - ISMA disabled\n")); is_ok = 0; } gf_isom_sample_del(&samp); } if (remove_ocr) esd->OCRESID = 0; else if (esd->OCRESID == esd->ESID) esd->OCRESID = 0; //OK, add this to our IOD gf_list_add(iod->ESDescriptors, esd); } esd = gf_isom_get_esd(file, sceneT, 1); if (gf_isom_get_sample_count(file, sceneT)==1) { samp = gf_isom_get_sample(file, sceneT, 1, &descIndex); if (gf_hinter_can_embbed_data(samp->data, samp->dataLength, GF_STREAM_SCENE)) { slc.timeScale = slc.timestampResolution = gf_isom_get_media_timescale(file, sceneT); slc.OCRResolution = 1000; slc.startCTS = samp->DTS+samp->CTS_Offset; slc.startDTS = samp->DTS; //set the SL for future extraction gf_isom_set_extraction_slc(file, sceneT, 1, &slc); //encode in Base64 the sample size64 = gf_base64_encode(samp->data, samp->dataLength, buf64, 2000); buf64[size64] = 0; sprintf(sdpLine, "data:application/mpeg4-bifs-au;base64,%s", buf64); esd->decoderConfig->avgBitrate = 0; esd->decoderConfig->bufferSizeDB = samp->dataLength; esd->decoderConfig->maxBitrate = 0; esd->URLString = (char*)gf_malloc(sizeof(char) * (strlen(sdpLine)+1)); strcpy(esd->URLString, sdpLine); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_RTP, ("[rtp hinter] Scene description sample too large to be embedded in IOD - ISMA disabled\n")); is_ok = 0; } gf_isom_sample_del(&samp); } if (remove_ocr) esd->OCRESID = 0; else if (esd->OCRESID == esd->ESID) esd->OCRESID = 0; gf_list_add(iod->ESDescriptors, esd); if (is_ok) { u32 has_a, has_v, has_i_a, has_i_v; has_a = has_v = has_i_a = has_i_v = 0; for (i=0; i<gf_isom_get_track_count(file); i++) { esd = gf_isom_get_esd(file, i+1, 1); if (!esd) continue; if (esd->decoderConfig->streamType==GF_STREAM_VISUAL) { if (esd->decoderConfig->objectTypeIndication==GF_CODECID_MPEG4_PART2) has_i_v ++; else has_v++; } else if (esd->decoderConfig->streamType==GF_STREAM_AUDIO) { if (esd->decoderConfig->objectTypeIndication==GF_CODECID_AAC_MPEG4) has_i_a ++; else has_a++; } gf_odf_desc_del((GF_Descriptor *)esd); } /*only 1 MPEG-4 visual max and 1 MPEG-4 audio max for ISMA compliancy*/ if (!has_v && !has_a && (has_i_v<=1) && (has_i_a<=1)) { sprintf(sdpLine, "a=isma-compliance:1,1.0,1"); gf_isom_sdp_add_line(file, sdpLine); } } } //encode the IOD buffer = NULL; size = 0; gf_odf_desc_write((GF_Descriptor *) iod, &buffer, &size); gf_odf_desc_del((GF_Descriptor *)iod); //encode in Base64 the iod size64 = gf_base64_encode(buffer, size, buf64, 2000); buf64[size64] = 0; gf_free(buffer); sprintf(sdpLine, "a=mpeg4-iod:\"data:application/mpeg4-iod;base64,%s\"", buf64); gf_isom_sdp_add_line(file, sdpLine); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_HINTING*/ #endif /*GPAC_DISABLE_ISOM*/
null
221
CWE-787
CVE-2020-36177
/* rsa.c * * Copyright (C) 2006-2020 wolfSSL Inc. * * This file is part of wolfSSL. * * wolfSSL is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * wolfSSL is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA */ /* DESCRIPTION This library provides the interface to the RSA. RSA keys can be used to encrypt, decrypt, sign and verify data. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <wolfssl/wolfcrypt/settings.h> #include <wolfssl/wolfcrypt/error-crypt.h> #ifndef NO_RSA #if defined(HAVE_FIPS) && \ defined(HAVE_FIPS_VERSION) && (HAVE_FIPS_VERSION >= 2) /* set NO_WRAPPERS before headers, use direct internal f()s not wrappers */ #define FIPS_NO_WRAPPERS #ifdef USE_WINDOWS_API #pragma code_seg(".fipsA$e") #pragma const_seg(".fipsB$e") #endif #endif #include <wolfssl/wolfcrypt/rsa.h> #ifdef WOLFSSL_AFALG_XILINX_RSA #include <wolfssl/wolfcrypt/port/af_alg/wc_afalg.h> #endif #ifdef WOLFSSL_HAVE_SP_RSA #include <wolfssl/wolfcrypt/sp.h> #endif /* Possible RSA enable options: * NO_RSA: Overall control of RSA default: on (not defined) * WC_RSA_BLINDING: Uses Blinding w/ Private Ops default: off Note: slower by ~20% * WOLFSSL_KEY_GEN: Allows Private Key Generation default: off * RSA_LOW_MEM: NON CRT Private Operations, less memory default: off * WC_NO_RSA_OAEP: Disables RSA OAEP padding default: on (not defined) * WC_RSA_NONBLOCK: Enables support for RSA non-blocking default: off * WC_RSA_NONBLOCK_TIME:Enables support for time based blocking default: off * time calculation. */ /* RSA Key Size Configuration: * FP_MAX_BITS: With USE_FAST_MATH only default: 4096 If USE_FAST_MATH then use this to override default. Value is key size * 2. Example: RSA 3072 = 6144 */ /* If building for old FIPS. */ #if defined(HAVE_FIPS) && \ (!defined(HAVE_FIPS_VERSION) || (HAVE_FIPS_VERSION < 2)) int wc_InitRsaKey(RsaKey* key, void* ptr) { if (key == NULL) { return BAD_FUNC_ARG; } return InitRsaKey_fips(key, ptr); } int wc_InitRsaKey_ex(RsaKey* key, void* ptr, int devId) { (void)devId; if (key == NULL) { return BAD_FUNC_ARG; } return InitRsaKey_fips(key, ptr); } int wc_FreeRsaKey(RsaKey* key) { return FreeRsaKey_fips(key); } #ifndef WOLFSSL_RSA_VERIFY_ONLY int wc_RsaPublicEncrypt(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, WC_RNG* rng) { if (in == NULL || out == NULL || key == NULL || rng == NULL) { return BAD_FUNC_ARG; } return RsaPublicEncrypt_fips(in, inLen, out, outLen, key, rng); } #endif #ifndef WOLFSSL_RSA_PUBLIC_ONLY int wc_RsaPrivateDecryptInline(byte* in, word32 inLen, byte** out, RsaKey* key) { if (in == NULL || out == NULL || key == NULL) { return BAD_FUNC_ARG; } return RsaPrivateDecryptInline_fips(in, inLen, out, key); } int wc_RsaPrivateDecrypt(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key) { if (in == NULL || out == NULL || key == NULL) { return BAD_FUNC_ARG; } return RsaPrivateDecrypt_fips(in, inLen, out, outLen, key); } int wc_RsaSSL_Sign(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, WC_RNG* rng) { if (in == NULL || out == NULL || key == NULL || inLen == 0) { return BAD_FUNC_ARG; } return RsaSSL_Sign_fips(in, inLen, out, outLen, key, rng); } #endif int wc_RsaSSL_VerifyInline(byte* in, word32 inLen, byte** out, RsaKey* key) { if (in == NULL || out == NULL || key == NULL) { return BAD_FUNC_ARG; } return RsaSSL_VerifyInline_fips(in, inLen, out, key); } int wc_RsaSSL_Verify(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key) { if (in == NULL || out == NULL || key == NULL || inLen == 0) { return BAD_FUNC_ARG; } return RsaSSL_Verify_fips(in, inLen, out, outLen, key); } int wc_RsaEncryptSize(RsaKey* key) { if (key == NULL) { return BAD_FUNC_ARG; } return RsaEncryptSize_fips(key); } #ifndef WOLFSSL_RSA_VERIFY_ONLY int wc_RsaFlattenPublicKey(RsaKey* key, byte* a, word32* aSz, byte* b, word32* bSz) { /* not specified as fips so not needing _fips */ return RsaFlattenPublicKey(key, a, aSz, b, bSz); } #endif #ifdef WOLFSSL_KEY_GEN int wc_MakeRsaKey(RsaKey* key, int size, long e, WC_RNG* rng) { return MakeRsaKey(key, size, e, rng); } #endif /* these are functions in asn and are routed to wolfssl/wolfcrypt/asn.c * wc_RsaPrivateKeyDecode * wc_RsaPublicKeyDecode */ #else /* else build without fips, or for new fips */ #include <wolfssl/wolfcrypt/random.h> #include <wolfssl/wolfcrypt/logging.h> #ifdef WOLF_CRYPTO_CB #include <wolfssl/wolfcrypt/cryptocb.h> #endif #ifdef NO_INLINE #include <wolfssl/wolfcrypt/misc.h> #else #define WOLFSSL_MISC_INCLUDED #include <wolfcrypt/src/misc.c> #endif enum { RSA_STATE_NONE = 0, RSA_STATE_ENCRYPT_PAD, RSA_STATE_ENCRYPT_EXPTMOD, RSA_STATE_ENCRYPT_RES, RSA_STATE_DECRYPT_EXPTMOD, RSA_STATE_DECRYPT_UNPAD, RSA_STATE_DECRYPT_RES, }; static void wc_RsaCleanup(RsaKey* key) { #ifndef WOLFSSL_RSA_VERIFY_INLINE if (key && key->data) { /* make sure any allocated memory is free'd */ if (key->dataIsAlloc) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY if (key->type == RSA_PRIVATE_DECRYPT || key->type == RSA_PRIVATE_ENCRYPT) { ForceZero(key->data, key->dataLen); } #endif XFREE(key->data, key->heap, DYNAMIC_TYPE_WOLF_BIGINT); key->dataIsAlloc = 0; } key->data = NULL; key->dataLen = 0; } #else (void)key; #endif } int wc_InitRsaKey_ex(RsaKey* key, void* heap, int devId) { int ret = 0; if (key == NULL) { return BAD_FUNC_ARG; } XMEMSET(key, 0, sizeof(RsaKey)); key->type = RSA_TYPE_UNKNOWN; key->state = RSA_STATE_NONE; key->heap = heap; #ifndef WOLFSSL_RSA_VERIFY_INLINE key->dataIsAlloc = 0; key->data = NULL; #endif key->dataLen = 0; #ifdef WC_RSA_BLINDING key->rng = NULL; #endif #ifdef WOLF_CRYPTO_CB key->devId = devId; #else (void)devId; #endif #ifdef WOLFSSL_ASYNC_CRYPT #ifdef WOLFSSL_CERT_GEN XMEMSET(&key->certSignCtx, 0, sizeof(CertSignCtx)); #endif #ifdef WC_ASYNC_ENABLE_RSA /* handle as async */ ret = wolfAsync_DevCtxInit(&key->asyncDev, WOLFSSL_ASYNC_MARKER_RSA, key->heap, devId); if (ret != 0) return ret; #endif /* WC_ASYNC_ENABLE_RSA */ #endif /* WOLFSSL_ASYNC_CRYPT */ #ifndef WOLFSSL_RSA_PUBLIC_ONLY ret = mp_init_multi(&key->n, &key->e, NULL, NULL, NULL, NULL); if (ret != MP_OKAY) return ret; #if !defined(WOLFSSL_KEY_GEN) && !defined(OPENSSL_EXTRA) && defined(RSA_LOW_MEM) ret = mp_init_multi(&key->d, &key->p, &key->q, NULL, NULL, NULL); #else ret = mp_init_multi(&key->d, &key->p, &key->q, &key->dP, &key->dQ, &key->u); #endif if (ret != MP_OKAY) { mp_clear(&key->n); mp_clear(&key->e); return ret; } #else ret = mp_init(&key->n); if (ret != MP_OKAY) return ret; ret = mp_init(&key->e); if (ret != MP_OKAY) { mp_clear(&key->n); return ret; } #endif #ifdef WOLFSSL_XILINX_CRYPT key->pubExp = 0; key->mod = NULL; #endif #ifdef WOLFSSL_AFALG_XILINX_RSA key->alFd = WC_SOCK_NOTSET; key->rdFd = WC_SOCK_NOTSET; #endif return ret; } int wc_InitRsaKey(RsaKey* key, void* heap) { return wc_InitRsaKey_ex(key, heap, INVALID_DEVID); } #ifdef HAVE_PKCS11 int wc_InitRsaKey_Id(RsaKey* key, unsigned char* id, int len, void* heap, int devId) { int ret = 0; if (key == NULL) ret = BAD_FUNC_ARG; if (ret == 0 && (len < 0 || len > RSA_MAX_ID_LEN)) ret = BUFFER_E; if (ret == 0) ret = wc_InitRsaKey_ex(key, heap, devId); if (ret == 0 && id != NULL && len != 0) { XMEMCPY(key->id, id, len); key->idLen = len; } return ret; } #endif #ifdef WOLFSSL_XILINX_CRYPT #define MAX_E_SIZE 4 /* Used to setup hardware state * * key the RSA key to setup * * returns 0 on success */ int wc_InitRsaHw(RsaKey* key) { unsigned char* m; /* RSA modulous */ word32 e = 0; /* RSA public exponent */ int mSz; int eSz; if (key == NULL) { return BAD_FUNC_ARG; } mSz = mp_unsigned_bin_size(&(key->n)); m = (unsigned char*)XMALLOC(mSz, key->heap, DYNAMIC_TYPE_KEY); if (m == NULL) { return MEMORY_E; } if (mp_to_unsigned_bin(&(key->n), m) != MP_OKAY) { WOLFSSL_MSG("Unable to get RSA key modulus"); XFREE(m, key->heap, DYNAMIC_TYPE_KEY); return MP_READ_E; } eSz = mp_unsigned_bin_size(&(key->e)); if (eSz > MAX_E_SIZE) { WOLFSSL_MSG("Exponent of size 4 bytes expected"); XFREE(m, key->heap, DYNAMIC_TYPE_KEY); return BAD_FUNC_ARG; } if (mp_to_unsigned_bin(&(key->e), (byte*)&e + (MAX_E_SIZE - eSz)) != MP_OKAY) { XFREE(m, key->heap, DYNAMIC_TYPE_KEY); WOLFSSL_MSG("Unable to get RSA key exponent"); return MP_READ_E; } /* check for existing mod buffer to avoid memory leak */ if (key->mod != NULL) { XFREE(key->mod, key->heap, DYNAMIC_TYPE_KEY); } key->pubExp = e; key->mod = m; if (XSecure_RsaInitialize(&(key->xRsa), key->mod, NULL, (byte*)&(key->pubExp)) != XST_SUCCESS) { WOLFSSL_MSG("Unable to initialize RSA on hardware"); XFREE(m, key->heap, DYNAMIC_TYPE_KEY); return BAD_STATE_E; } #ifdef WOLFSSL_XILINX_PATCH /* currently a patch of xsecure_rsa.c for 2048 bit keys */ if (wc_RsaEncryptSize(key) == 256) { if (XSecure_RsaSetSize(&(key->xRsa), 2048) != XST_SUCCESS) { WOLFSSL_MSG("Unable to set RSA key size on hardware"); XFREE(m, key->heap, DYNAMIC_TYPE_KEY); return BAD_STATE_E; } } #endif return 0; } /* WOLFSSL_XILINX_CRYPT*/ #elif defined(WOLFSSL_CRYPTOCELL) int wc_InitRsaHw(RsaKey* key) { CRYSError_t ret = 0; byte e[3]; word32 eSz = sizeof(e); byte n[256]; word32 nSz = sizeof(n); byte d[256]; word32 dSz = sizeof(d); byte p[128]; word32 pSz = sizeof(p); byte q[128]; word32 qSz = sizeof(q); if (key == NULL) { return BAD_FUNC_ARG; } ret = wc_RsaExportKey(key, e, &eSz, n, &nSz, d, &dSz, p, &pSz, q, &qSz); if (ret != 0) return MP_READ_E; ret = CRYS_RSA_Build_PubKey(&key->ctx.pubKey, e, eSz, n, nSz); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_Build_PubKey failed"); return ret; } ret = CRYS_RSA_Build_PrivKey(&key->ctx.privKey, d, dSz, e, eSz, n, nSz); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_Build_PrivKey failed"); return ret; } key->type = RSA_PRIVATE; return 0; } static int cc310_RSA_GenerateKeyPair(RsaKey* key, int size, long e) { CRYSError_t ret = 0; CRYS_RSAKGData_t KeyGenData; CRYS_RSAKGFipsContext_t FipsCtx; byte ex[3]; uint16_t eSz = sizeof(ex); byte n[256]; uint16_t nSz = sizeof(n); ret = CRYS_RSA_KG_GenerateKeyPair(&wc_rndState, wc_rndGenVectFunc, (byte*)&e, 3*sizeof(uint8_t), size, &key->ctx.privKey, &key->ctx.pubKey, &KeyGenData, &FipsCtx); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_KG_GenerateKeyPair failed"); return ret; } ret = CRYS_RSA_Get_PubKey(&key->ctx.pubKey, ex, &eSz, n, &nSz); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_Get_PubKey failed"); return ret; } ret = wc_RsaPublicKeyDecodeRaw(n, nSz, ex, eSz, key); key->type = RSA_PRIVATE; return ret; } #endif /* WOLFSSL_CRYPTOCELL */ int wc_FreeRsaKey(RsaKey* key) { int ret = 0; if (key == NULL) { return BAD_FUNC_ARG; } wc_RsaCleanup(key); #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) wolfAsync_DevCtxFree(&key->asyncDev, WOLFSSL_ASYNC_MARKER_RSA); #endif #ifndef WOLFSSL_RSA_PUBLIC_ONLY if (key->type == RSA_PRIVATE) { #if defined(WOLFSSL_KEY_GEN) || defined(OPENSSL_EXTRA) || !defined(RSA_LOW_MEM) mp_forcezero(&key->u); mp_forcezero(&key->dQ); mp_forcezero(&key->dP); #endif mp_forcezero(&key->q); mp_forcezero(&key->p); mp_forcezero(&key->d); } /* private part */ #if defined(WOLFSSL_KEY_GEN) || defined(OPENSSL_EXTRA) || !defined(RSA_LOW_MEM) mp_clear(&key->u); mp_clear(&key->dQ); mp_clear(&key->dP); #endif mp_clear(&key->q); mp_clear(&key->p); mp_clear(&key->d); #endif /* WOLFSSL_RSA_PUBLIC_ONLY */ /* public part */ mp_clear(&key->e); mp_clear(&key->n); #ifdef WOLFSSL_XILINX_CRYPT XFREE(key->mod, key->heap, DYNAMIC_TYPE_KEY); key->mod = NULL; #endif #ifdef WOLFSSL_AFALG_XILINX_RSA /* make sure that sockets are closed on cleanup */ if (key->alFd > 0) { close(key->alFd); key->alFd = WC_SOCK_NOTSET; } if (key->rdFd > 0) { close(key->rdFd); key->rdFd = WC_SOCK_NOTSET; } #endif return ret; } #ifndef WOLFSSL_RSA_PUBLIC_ONLY #if defined(WOLFSSL_KEY_GEN) && !defined(WOLFSSL_NO_RSA_KEY_CHECK) /* Check the pair-wise consistency of the RSA key. * From NIST SP 800-56B, section 6.4.1.1. * Verify that k = (k^e)^d, for some k: 1 < k < n-1. */ int wc_CheckRsaKey(RsaKey* key) { #if defined(WOLFSSL_CRYPTOCELL) return 0; #endif #ifdef WOLFSSL_SMALL_STACK mp_int *k = NULL, *tmp = NULL; #else mp_int k[1], tmp[1]; #endif int ret = 0; #ifdef WOLFSSL_SMALL_STACK k = (mp_int*)XMALLOC(sizeof(mp_int) * 2, NULL, DYNAMIC_TYPE_RSA); if (k == NULL) return MEMORY_E; tmp = k + 1; #endif if (mp_init_multi(k, tmp, NULL, NULL, NULL, NULL) != MP_OKAY) ret = MP_INIT_E; if (ret == 0) { if (key == NULL) ret = BAD_FUNC_ARG; } if (ret == 0) { if (mp_set_int(k, 0x2342) != MP_OKAY) ret = MP_READ_E; } #ifdef WOLFSSL_HAVE_SP_RSA if (ret == 0) { switch (mp_count_bits(&key->n)) { #ifndef WOLFSSL_SP_NO_2048 case 2048: ret = sp_ModExp_2048(k, &key->e, &key->n, tmp); if (ret != 0) ret = MP_EXPTMOD_E; if (ret == 0) { ret = sp_ModExp_2048(tmp, &key->d, &key->n, tmp); if (ret != 0) ret = MP_EXPTMOD_E; } break; #endif /* WOLFSSL_SP_NO_2048 */ #ifndef WOLFSSL_SP_NO_3072 case 3072: ret = sp_ModExp_3072(k, &key->e, &key->n, tmp); if (ret != 0) ret = MP_EXPTMOD_E; if (ret == 0) { ret = sp_ModExp_3072(tmp, &key->d, &key->n, tmp); if (ret != 0) ret = MP_EXPTMOD_E; } break; #endif /* WOLFSSL_SP_NO_3072 */ #ifdef WOLFSSL_SP_4096 case 4096: ret = sp_ModExp_4096(k, &key->e, &key->n, tmp); if (ret != 0) ret = MP_EXPTMOD_E; if (ret == 0) { ret = sp_ModExp_4096(tmp, &key->d, &key->n, tmp); if (ret != 0) ret = MP_EXPTMOD_E; } break; #endif /* WOLFSSL_SP_4096 */ default: /* If using only single prcsision math then issue key size error, otherwise fall-back to multi-precision math calculation */ #ifdef WOLFSSL_SP_MATH ret = WC_KEY_SIZE_E; #endif break; } } #endif /* WOLFSSL_HAVE_SP_RSA */ #ifndef WOLFSSL_SP_MATH if (ret == 0) { if (mp_exptmod(k, &key->e, &key->n, tmp) != MP_OKAY) ret = MP_EXPTMOD_E; } if (ret == 0) { if (mp_exptmod(tmp, &key->d, &key->n, tmp) != MP_OKAY) ret = MP_EXPTMOD_E; } #endif /* !WOLFSSL_SP_MATH */ if (ret == 0) { if (mp_cmp(k, tmp) != MP_EQ) ret = RSA_KEY_PAIR_E; } /* Check d is less than n. */ if (ret == 0 ) { if (mp_cmp(&key->d, &key->n) != MP_LT) { ret = MP_EXPTMOD_E; } } /* Check p*q = n. */ if (ret == 0 ) { if (mp_mul(&key->p, &key->q, tmp) != MP_OKAY) { ret = MP_EXPTMOD_E; } } if (ret == 0 ) { if (mp_cmp(&key->n, tmp) != MP_EQ) { ret = MP_EXPTMOD_E; } } /* Check dP, dQ and u if they exist */ if (ret == 0 && !mp_iszero(&key->dP)) { if (mp_sub_d(&key->p, 1, tmp) != MP_OKAY) { ret = MP_EXPTMOD_E; } /* Check dP <= p-1. */ if (ret == 0) { if (mp_cmp(&key->dP, tmp) != MP_LT) { ret = MP_EXPTMOD_E; } } /* Check e*dP mod p-1 = 1. (dP = 1/e mod p-1) */ if (ret == 0) { if (mp_mulmod(&key->dP, &key->e, tmp, tmp) != MP_OKAY) { ret = MP_EXPTMOD_E; } } if (ret == 0 ) { if (!mp_isone(tmp)) { ret = MP_EXPTMOD_E; } } if (ret == 0) { if (mp_sub_d(&key->q, 1, tmp) != MP_OKAY) { ret = MP_EXPTMOD_E; } } /* Check dQ <= q-1. */ if (ret == 0) { if (mp_cmp(&key->dQ, tmp) != MP_LT) { ret = MP_EXPTMOD_E; } } /* Check e*dP mod p-1 = 1. (dQ = 1/e mod q-1) */ if (ret == 0) { if (mp_mulmod(&key->dQ, &key->e, tmp, tmp) != MP_OKAY) { ret = MP_EXPTMOD_E; } } if (ret == 0 ) { if (!mp_isone(tmp)) { ret = MP_EXPTMOD_E; } } /* Check u <= p. */ if (ret == 0) { if (mp_cmp(&key->u, &key->p) != MP_LT) { ret = MP_EXPTMOD_E; } } /* Check u*q mod p = 1. (u = 1/q mod p) */ if (ret == 0) { if (mp_mulmod(&key->u, &key->q, &key->p, tmp) != MP_OKAY) { ret = MP_EXPTMOD_E; } } if (ret == 0 ) { if (!mp_isone(tmp)) { ret = MP_EXPTMOD_E; } } } mp_forcezero(tmp); mp_clear(tmp); mp_clear(k); #ifdef WOLFSSL_SMALL_STACK XFREE(k, NULL, DYNAMIC_TYPE_RSA); #endif return ret; } #endif /* WOLFSSL_KEY_GEN && !WOLFSSL_NO_RSA_KEY_CHECK */ #endif /* WOLFSSL_RSA_PUBLIC_ONLY */ #if !defined(WC_NO_RSA_OAEP) || defined(WC_RSA_PSS) /* Uses MGF1 standard as a mask generation function hType: hash type used seed: seed to use for generating mask seedSz: size of seed buffer out: mask output after generation outSz: size of output buffer */ #if !defined(NO_SHA) || !defined(NO_SHA256) || defined(WOLFSSL_SHA384) || defined(WOLFSSL_SHA512) static int RsaMGF1(enum wc_HashType hType, byte* seed, word32 seedSz, byte* out, word32 outSz, void* heap) { byte* tmp; /* needs to be large enough for seed size plus counter(4) */ byte tmpA[WC_MAX_DIGEST_SIZE + 4]; byte tmpF; /* 1 if dynamic memory needs freed */ word32 tmpSz; int hLen; int ret; word32 counter; word32 idx; hLen = wc_HashGetDigestSize(hType); counter = 0; idx = 0; (void)heap; /* check error return of wc_HashGetDigestSize */ if (hLen < 0) { return hLen; } /* if tmp is not large enough than use some dynamic memory */ if ((seedSz + 4) > sizeof(tmpA) || (word32)hLen > sizeof(tmpA)) { /* find largest amount of memory needed which will be the max of * hLen and (seedSz + 4) since tmp is used to store the hash digest */ tmpSz = ((seedSz + 4) > (word32)hLen)? seedSz + 4: (word32)hLen; tmp = (byte*)XMALLOC(tmpSz, heap, DYNAMIC_TYPE_RSA_BUFFER); if (tmp == NULL) { return MEMORY_E; } tmpF = 1; /* make sure to free memory when done */ } else { /* use array on the stack */ tmpSz = sizeof(tmpA); tmp = tmpA; tmpF = 0; /* no need to free memory at end */ } do { int i = 0; XMEMCPY(tmp, seed, seedSz); /* counter to byte array appended to tmp */ tmp[seedSz] = (byte)((counter >> 24) & 0xFF); tmp[seedSz + 1] = (byte)((counter >> 16) & 0xFF); tmp[seedSz + 2] = (byte)((counter >> 8) & 0xFF); tmp[seedSz + 3] = (byte)((counter) & 0xFF); /* hash and append to existing output */ if ((ret = wc_Hash(hType, tmp, (seedSz + 4), tmp, tmpSz)) != 0) { /* check for if dynamic memory was needed, then free */ if (tmpF) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); } return ret; } for (i = 0; i < hLen && idx < outSz; i++) { out[idx++] = tmp[i]; } counter++; } while (idx < outSz); /* check for if dynamic memory was needed, then free */ if (tmpF) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); } return 0; } #endif /* SHA2 Hashes */ /* helper function to direct which mask generation function is used switched on type input */ static int RsaMGF(int type, byte* seed, word32 seedSz, byte* out, word32 outSz, void* heap) { int ret; switch(type) { #ifndef NO_SHA case WC_MGF1SHA1: ret = RsaMGF1(WC_HASH_TYPE_SHA, seed, seedSz, out, outSz, heap); break; #endif #ifndef NO_SHA256 #ifdef WOLFSSL_SHA224 case WC_MGF1SHA224: ret = RsaMGF1(WC_HASH_TYPE_SHA224, seed, seedSz, out, outSz, heap); break; #endif case WC_MGF1SHA256: ret = RsaMGF1(WC_HASH_TYPE_SHA256, seed, seedSz, out, outSz, heap); break; #endif #ifdef WOLFSSL_SHA384 case WC_MGF1SHA384: ret = RsaMGF1(WC_HASH_TYPE_SHA384, seed, seedSz, out, outSz, heap); break; #endif #ifdef WOLFSSL_SHA512 case WC_MGF1SHA512: ret = RsaMGF1(WC_HASH_TYPE_SHA512, seed, seedSz, out, outSz, heap); break; #endif default: WOLFSSL_MSG("Unknown MGF type: check build options"); ret = BAD_FUNC_ARG; } /* in case of default avoid unused warning */ (void)seed; (void)seedSz; (void)out; (void)outSz; (void)heap; return ret; } #endif /* !WC_NO_RSA_OAEP || WC_RSA_PSS */ /* Padding */ #ifndef WOLFSSL_RSA_VERIFY_ONLY #ifndef WC_NO_RNG #ifndef WC_NO_RSA_OAEP static int RsaPad_OAEP(const byte* input, word32 inputLen, byte* pkcsBlock, word32 pkcsBlockLen, byte padValue, WC_RNG* rng, enum wc_HashType hType, int mgf, byte* optLabel, word32 labelLen, void* heap) { int ret; int hLen; int psLen; int i; word32 idx; byte* dbMask; #ifdef WOLFSSL_SMALL_STACK byte* lHash = NULL; byte* seed = NULL; #else /* must be large enough to contain largest hash */ byte lHash[WC_MAX_DIGEST_SIZE]; byte seed[ WC_MAX_DIGEST_SIZE]; #endif /* no label is allowed, but catch if no label provided and length > 0 */ if (optLabel == NULL && labelLen > 0) { return BUFFER_E; } /* limit of label is the same as limit of hash function which is massive */ hLen = wc_HashGetDigestSize(hType); if (hLen < 0) { return hLen; } #ifdef WOLFSSL_SMALL_STACK lHash = (byte*)XMALLOC(hLen, heap, DYNAMIC_TYPE_RSA_BUFFER); if (lHash == NULL) { return MEMORY_E; } seed = (byte*)XMALLOC(hLen, heap, DYNAMIC_TYPE_RSA_BUFFER); if (seed == NULL) { XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); return MEMORY_E; } #else /* hLen should never be larger than lHash since size is max digest size, but check before blindly calling wc_Hash */ if ((word32)hLen > sizeof(lHash)) { WOLFSSL_MSG("OAEP lHash to small for digest!!"); return MEMORY_E; } #endif if ((ret = wc_Hash(hType, optLabel, labelLen, lHash, hLen)) != 0) { WOLFSSL_MSG("OAEP hash type possibly not supported or lHash to small"); #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return ret; } /* handles check of location for idx as well as psLen, cast to int to check for pkcsBlockLen(k) - 2 * hLen - 2 being negative This check is similar to decryption where k > 2 * hLen + 2 as msg size approaches 0. In decryption if k is less than or equal -- then there is no possible room for msg. k = RSA key size hLen = hash digest size -- will always be >= 0 at this point */ if ((word32)(2 * hLen + 2) > pkcsBlockLen) { WOLFSSL_MSG("OAEP pad error hash to big for RSA key size"); #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return BAD_FUNC_ARG; } if (inputLen > (pkcsBlockLen - 2 * hLen - 2)) { WOLFSSL_MSG("OAEP pad error message too long"); #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return BAD_FUNC_ARG; } /* concatenate lHash || PS || 0x01 || msg */ idx = pkcsBlockLen - 1 - inputLen; psLen = pkcsBlockLen - inputLen - 2 * hLen - 2; if (pkcsBlockLen < inputLen) { /*make sure not writing over end of buffer */ #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return BUFFER_E; } XMEMCPY(pkcsBlock + (pkcsBlockLen - inputLen), input, inputLen); pkcsBlock[idx--] = 0x01; /* PS and M separator */ while (psLen > 0 && idx > 0) { pkcsBlock[idx--] = 0x00; psLen--; } idx = idx - hLen + 1; XMEMCPY(pkcsBlock + idx, lHash, hLen); /* generate random seed */ if ((ret = wc_RNG_GenerateBlock(rng, seed, hLen)) != 0) { #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return ret; } /* create maskedDB from dbMask */ dbMask = (byte*)XMALLOC(pkcsBlockLen - hLen - 1, heap, DYNAMIC_TYPE_RSA); if (dbMask == NULL) { #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return MEMORY_E; } XMEMSET(dbMask, 0, pkcsBlockLen - hLen - 1); /* help static analyzer */ ret = RsaMGF(mgf, seed, hLen, dbMask, pkcsBlockLen - hLen - 1, heap); if (ret != 0) { XFREE(dbMask, heap, DYNAMIC_TYPE_RSA); #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return ret; } i = 0; idx = hLen + 1; while (idx < pkcsBlockLen && (word32)i < (pkcsBlockLen - hLen -1)) { pkcsBlock[idx] = dbMask[i++] ^ pkcsBlock[idx]; idx++; } XFREE(dbMask, heap, DYNAMIC_TYPE_RSA); /* create maskedSeed from seedMask */ idx = 0; pkcsBlock[idx++] = 0x00; /* create seedMask inline */ if ((ret = RsaMGF(mgf, pkcsBlock + hLen + 1, pkcsBlockLen - hLen - 1, pkcsBlock + 1, hLen, heap)) != 0) { #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return ret; } /* xor created seedMask with seed to make maskedSeed */ i = 0; while (idx < (word32)(hLen + 1) && i < hLen) { pkcsBlock[idx] = pkcsBlock[idx] ^ seed[i++]; idx++; } #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif (void)padValue; return 0; } #endif /* !WC_NO_RSA_OAEP */ #ifdef WC_RSA_PSS /* 0x00 .. 0x00 0x01 | Salt | Gen Hash | 0xbc * XOR MGF over all bytes down to end of Salt * Gen Hash = HASH(8 * 0x00 | Message Hash | Salt) * * input Digest of the message. * inputLen Length of digest. * pkcsBlock Buffer to write to. * pkcsBlockLen Length of buffer to write to. * rng Random number generator (for salt). * htype Hash function to use. * mgf Mask generation function. * saltLen Length of salt to put in padding. * bits Length of key in bits. * heap Used for dynamic memory allocation. * returns 0 on success, PSS_SALTLEN_E when the salt length is invalid * and other negative values on error. */ static int RsaPad_PSS(const byte* input, word32 inputLen, byte* pkcsBlock, word32 pkcsBlockLen, WC_RNG* rng, enum wc_HashType hType, int mgf, int saltLen, int bits, void* heap) { int ret = 0; int hLen, i, o, maskLen, hiBits; byte* m; byte* s; #if defined(WOLFSSL_PSS_LONG_SALT) || defined(WOLFSSL_PSS_SALT_LEN_DISCOVER) #if defined(WOLFSSL_NO_MALLOC) && !defined(WOLFSSL_STATIC_MEMORY) byte salt[RSA_MAX_SIZE/8 + RSA_PSS_PAD_SZ]; #else byte* salt = NULL; #endif #else byte salt[WC_MAX_DIGEST_SIZE]; #endif #if defined(WOLFSSL_PSS_LONG_SALT) || defined(WOLFSSL_PSS_SALT_LEN_DISCOVER) if (pkcsBlockLen > RSA_MAX_SIZE/8) { return MEMORY_E; } #endif hLen = wc_HashGetDigestSize(hType); if (hLen < 0) return hLen; if ((int)inputLen != hLen) { return BAD_FUNC_ARG; } hiBits = (bits - 1) & 0x7; if (hiBits == 0) { /* Per RFC8017, set the leftmost 8emLen - emBits bits of the leftmost octet in DB to zero. */ *(pkcsBlock++) = 0; pkcsBlockLen--; } if (saltLen == RSA_PSS_SALT_LEN_DEFAULT) { saltLen = hLen; #ifdef WOLFSSL_SHA512 /* See FIPS 186-4 section 5.5 item (e). */ if (bits == 1024 && hLen == WC_SHA512_DIGEST_SIZE) { saltLen = RSA_PSS_SALT_MAX_SZ; } #endif } #ifndef WOLFSSL_PSS_LONG_SALT else if (saltLen > hLen) { return PSS_SALTLEN_E; } #endif #ifndef WOLFSSL_PSS_SALT_LEN_DISCOVER else if (saltLen < RSA_PSS_SALT_LEN_DEFAULT) { return PSS_SALTLEN_E; } #else else if (saltLen == RSA_PSS_SALT_LEN_DISCOVER) { saltLen = (int)pkcsBlockLen - hLen - 2; if (saltLen < 0) { return PSS_SALTLEN_E; } } else if (saltLen < RSA_PSS_SALT_LEN_DISCOVER) { return PSS_SALTLEN_E; } #endif if ((int)pkcsBlockLen - hLen < saltLen + 2) { return PSS_SALTLEN_E; } maskLen = pkcsBlockLen - 1 - hLen; #if defined(WOLFSSL_PSS_LONG_SALT) || defined(WOLFSSL_PSS_SALT_LEN_DISCOVER) #if !defined(WOLFSSL_NO_MALLOC) || defined(WOLFSSL_STATIC_MEMORY) salt = (byte*)XMALLOC(RSA_PSS_PAD_SZ + inputLen + saltLen, heap, DYNAMIC_TYPE_RSA_BUFFER); if (salt == NULL) { return MEMORY_E; } #endif s = m = salt; XMEMSET(m, 0, RSA_PSS_PAD_SZ); m += RSA_PSS_PAD_SZ; XMEMCPY(m, input, inputLen); m += inputLen; o = (int)(m - s); if (saltLen > 0) { ret = wc_RNG_GenerateBlock(rng, m, saltLen); if (ret == 0) { m += saltLen; } } #else s = m = pkcsBlock; XMEMSET(m, 0, RSA_PSS_PAD_SZ); m += RSA_PSS_PAD_SZ; XMEMCPY(m, input, inputLen); m += inputLen; o = 0; if (saltLen > 0) { ret = wc_RNG_GenerateBlock(rng, salt, saltLen); if (ret == 0) { XMEMCPY(m, salt, saltLen); m += saltLen; } } #endif if (ret == 0) { /* Put Hash at end of pkcsBlock - 1 */ ret = wc_Hash(hType, s, (word32)(m - s), pkcsBlock + maskLen, hLen); } if (ret == 0) { /* Set the last eight bits or trailer field to the octet 0xbc */ pkcsBlock[pkcsBlockLen - 1] = RSA_PSS_PAD_TERM; ret = RsaMGF(mgf, pkcsBlock + maskLen, hLen, pkcsBlock, maskLen, heap); } if (ret == 0) { /* Clear the first high bit when "8emLen - emBits" is non-zero. where emBits = n modBits - 1 */ if (hiBits) pkcsBlock[0] &= (1 << hiBits) - 1; m = pkcsBlock + maskLen - saltLen - 1; *(m++) ^= 0x01; for (i = 0; i < saltLen; i++) { m[i] ^= salt[o + i]; } } #if defined(WOLFSSL_PSS_LONG_SALT) || defined(WOLFSSL_PSS_SALT_LEN_DISCOVER) #if !defined(WOLFSSL_NO_MALLOC) || defined(WOLFSSL_STATIC_MEMORY) if (salt != NULL) { XFREE(salt, heap, DYNAMIC_TYPE_RSA_BUFFER); } #endif #endif return ret; } #endif /* WC_RSA_PSS */ #endif /* !WC_NO_RNG */ static int RsaPad(const byte* input, word32 inputLen, byte* pkcsBlock, word32 pkcsBlockLen, byte padValue, WC_RNG* rng) { if (input == NULL || inputLen == 0 || pkcsBlock == NULL || pkcsBlockLen == 0) { return BAD_FUNC_ARG; } if (pkcsBlockLen - RSA_MIN_PAD_SZ < inputLen) { WOLFSSL_MSG("RsaPad error, invalid length"); return RSA_PAD_E; } pkcsBlock[0] = 0x0; /* set first byte to zero and advance */ pkcsBlock++; pkcsBlockLen--; pkcsBlock[0] = padValue; /* insert padValue */ if (padValue == RSA_BLOCK_TYPE_1) { /* pad with 0xff bytes */ XMEMSET(&pkcsBlock[1], 0xFF, pkcsBlockLen - inputLen - 2); } else { #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WC_NO_RNG) /* pad with non-zero random bytes */ word32 padLen, i; int ret; padLen = pkcsBlockLen - inputLen - 1; ret = wc_RNG_GenerateBlock(rng, &pkcsBlock[1], padLen); if (ret != 0) { return ret; } /* remove zeros */ for (i = 1; i < padLen; i++) { if (pkcsBlock[i] == 0) pkcsBlock[i] = 0x01; } #else (void)rng; return RSA_WRONG_TYPE_E; #endif } pkcsBlock[pkcsBlockLen-inputLen-1] = 0; /* separator */ XMEMCPY(pkcsBlock+pkcsBlockLen-inputLen, input, inputLen); return 0; } /* helper function to direct which padding is used */ int wc_RsaPad_ex(const byte* input, word32 inputLen, byte* pkcsBlock, word32 pkcsBlockLen, byte padValue, WC_RNG* rng, int padType, enum wc_HashType hType, int mgf, byte* optLabel, word32 labelLen, int saltLen, int bits, void* heap) { int ret; switch (padType) { case WC_RSA_PKCSV15_PAD: /*WOLFSSL_MSG("wolfSSL Using RSA PKCSV15 padding");*/ ret = RsaPad(input, inputLen, pkcsBlock, pkcsBlockLen, padValue, rng); break; #ifndef WC_NO_RNG #ifndef WC_NO_RSA_OAEP case WC_RSA_OAEP_PAD: WOLFSSL_MSG("wolfSSL Using RSA OAEP padding"); ret = RsaPad_OAEP(input, inputLen, pkcsBlock, pkcsBlockLen, padValue, rng, hType, mgf, optLabel, labelLen, heap); break; #endif #ifdef WC_RSA_PSS case WC_RSA_PSS_PAD: WOLFSSL_MSG("wolfSSL Using RSA PSS padding"); ret = RsaPad_PSS(input, inputLen, pkcsBlock, pkcsBlockLen, rng, hType, mgf, saltLen, bits, heap); break; #endif #endif /* !WC_NO_RNG */ #ifdef WC_RSA_NO_PADDING case WC_RSA_NO_PAD: WOLFSSL_MSG("wolfSSL Using NO padding"); /* In the case of no padding being used check that input is exactly * the RSA key length */ if (bits <= 0 || inputLen != ((word32)bits/WOLFSSL_BIT_SIZE)) { WOLFSSL_MSG("Bad input size"); ret = RSA_PAD_E; } else { XMEMCPY(pkcsBlock, input, inputLen); ret = 0; } break; #endif default: WOLFSSL_MSG("Unknown RSA Pad Type"); ret = RSA_PAD_E; } /* silence warning if not used with padding scheme */ (void)input; (void)inputLen; (void)pkcsBlock; (void)pkcsBlockLen; (void)padValue; (void)rng; (void)padType; (void)hType; (void)mgf; (void)optLabel; (void)labelLen; (void)saltLen; (void)bits; (void)heap; return ret; } #endif /* WOLFSSL_RSA_VERIFY_ONLY */ /* UnPadding */ #ifndef WC_NO_RSA_OAEP /* UnPad plaintext, set start to *output, return length of plaintext, * < 0 on error */ static int RsaUnPad_OAEP(byte *pkcsBlock, unsigned int pkcsBlockLen, byte **output, enum wc_HashType hType, int mgf, byte* optLabel, word32 labelLen, void* heap) { int hLen; int ret; byte h[WC_MAX_DIGEST_SIZE]; /* max digest size */ byte* tmp; word32 idx; /* no label is allowed, but catch if no label provided and length > 0 */ if (optLabel == NULL && labelLen > 0) { return BUFFER_E; } hLen = wc_HashGetDigestSize(hType); if ((hLen < 0) || (pkcsBlockLen < (2 * (word32)hLen + 2))) { return BAD_FUNC_ARG; } tmp = (byte*)XMALLOC(pkcsBlockLen, heap, DYNAMIC_TYPE_RSA_BUFFER); if (tmp == NULL) { return MEMORY_E; } XMEMSET(tmp, 0, pkcsBlockLen); /* find seedMask value */ if ((ret = RsaMGF(mgf, (byte*)(pkcsBlock + (hLen + 1)), pkcsBlockLen - hLen - 1, tmp, hLen, heap)) != 0) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); return ret; } /* xor seedMask value with maskedSeed to get seed value */ for (idx = 0; idx < (word32)hLen; idx++) { tmp[idx] = tmp[idx] ^ pkcsBlock[1 + idx]; } /* get dbMask value */ if ((ret = RsaMGF(mgf, tmp, hLen, tmp + hLen, pkcsBlockLen - hLen - 1, heap)) != 0) { XFREE(tmp, NULL, DYNAMIC_TYPE_RSA_BUFFER); return ret; } /* get DB value by doing maskedDB xor dbMask */ for (idx = 0; idx < (pkcsBlockLen - hLen - 1); idx++) { pkcsBlock[hLen + 1 + idx] = pkcsBlock[hLen + 1 + idx] ^ tmp[idx + hLen]; } /* done with use of tmp buffer */ XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); /* advance idx to index of PS and msg separator, account for PS size of 0*/ idx = hLen + 1 + hLen; while (idx < pkcsBlockLen && pkcsBlock[idx] == 0) {idx++;} /* create hash of label for comparison with hash sent */ if ((ret = wc_Hash(hType, optLabel, labelLen, h, hLen)) != 0) { return ret; } /* say no to chosen ciphertext attack. Comparison of lHash, Y, and separator value needs to all happen in constant time. Attackers should not be able to get error condition from the timing of these checks. */ ret = 0; ret |= ConstantCompare(pkcsBlock + hLen + 1, h, hLen); ret += pkcsBlock[idx++] ^ 0x01; /* separator value is 0x01 */ ret += pkcsBlock[0] ^ 0x00; /* Y, the first value, should be 0 */ /* Return 0 data length on error. */ idx = ctMaskSelInt(ctMaskEq(ret, 0), idx, pkcsBlockLen); /* adjust pointer to correct location in array and return size of M */ *output = (byte*)(pkcsBlock + idx); return pkcsBlockLen - idx; } #endif /* WC_NO_RSA_OAEP */ #ifdef WC_RSA_PSS /* 0x00 .. 0x00 0x01 | Salt | Gen Hash | 0xbc * MGF over all bytes down to end of Salt * * pkcsBlock Buffer holding decrypted data. * pkcsBlockLen Length of buffer. * htype Hash function to use. * mgf Mask generation function. * saltLen Length of salt to put in padding. * bits Length of key in bits. * heap Used for dynamic memory allocation. * returns the sum of salt length and SHA-256 digest size on success. * Otherwise, PSS_SALTLEN_E for an incorrect salt length, * WC_KEY_SIZE_E for an incorrect encoded message (EM) size and other negative values on error. */ static int RsaUnPad_PSS(byte *pkcsBlock, unsigned int pkcsBlockLen, byte **output, enum wc_HashType hType, int mgf, int saltLen, int bits, void* heap) { int ret; byte* tmp; int hLen, i, maskLen; #ifdef WOLFSSL_SHA512 int orig_bits = bits; #endif #if defined(WOLFSSL_NO_MALLOC) && !defined(WOLFSSL_STATIC_MEMORY) byte tmp_buf[RSA_MAX_SIZE/8]; tmp = tmp_buf; if (pkcsBlockLen > RSA_MAX_SIZE/8) { return MEMORY_E; } #endif hLen = wc_HashGetDigestSize(hType); if (hLen < 0) return hLen; bits = (bits - 1) & 0x7; if ((pkcsBlock[0] & (0xff << bits)) != 0) { return BAD_PADDING_E; } if (bits == 0) { pkcsBlock++; pkcsBlockLen--; } maskLen = (int)pkcsBlockLen - 1 - hLen; if (maskLen < 0) { WOLFSSL_MSG("RsaUnPad_PSS: Hash too large"); return WC_KEY_SIZE_E; } if (saltLen == RSA_PSS_SALT_LEN_DEFAULT) { saltLen = hLen; #ifdef WOLFSSL_SHA512 /* See FIPS 186-4 section 5.5 item (e). */ if (orig_bits == 1024 && hLen == WC_SHA512_DIGEST_SIZE) saltLen = RSA_PSS_SALT_MAX_SZ; #endif } #ifndef WOLFSSL_PSS_LONG_SALT else if (saltLen > hLen) return PSS_SALTLEN_E; #endif #ifndef WOLFSSL_PSS_SALT_LEN_DISCOVER else if (saltLen < RSA_PSS_SALT_LEN_DEFAULT) return PSS_SALTLEN_E; if (maskLen < saltLen + 1) { return PSS_SALTLEN_E; } #else else if (saltLen < RSA_PSS_SALT_LEN_DISCOVER) return PSS_SALTLEN_E; if (saltLen != RSA_PSS_SALT_LEN_DISCOVER && maskLen < saltLen + 1) { return WC_KEY_SIZE_E; } #endif if (pkcsBlock[pkcsBlockLen - 1] != RSA_PSS_PAD_TERM) { WOLFSSL_MSG("RsaUnPad_PSS: Padding Term Error"); return BAD_PADDING_E; } #if !defined(WOLFSSL_NO_MALLOC) || defined(WOLFSSL_STATIC_MEMORY) tmp = (byte*)XMALLOC(maskLen, heap, DYNAMIC_TYPE_RSA_BUFFER); if (tmp == NULL) { return MEMORY_E; } #endif if ((ret = RsaMGF(mgf, pkcsBlock + maskLen, hLen, tmp, maskLen, heap)) != 0) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); return ret; } tmp[0] &= (1 << bits) - 1; pkcsBlock[0] &= (1 << bits) - 1; #ifdef WOLFSSL_PSS_SALT_LEN_DISCOVER if (saltLen == RSA_PSS_SALT_LEN_DISCOVER) { for (i = 0; i < maskLen - 1; i++) { if (tmp[i] != pkcsBlock[i]) { break; } } if (tmp[i] != (pkcsBlock[i] ^ 0x01)) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); WOLFSSL_MSG("RsaUnPad_PSS: Padding Error Match"); return PSS_SALTLEN_RECOVER_E; } saltLen = maskLen - (i + 1); } else #endif { for (i = 0; i < maskLen - 1 - saltLen; i++) { if (tmp[i] != pkcsBlock[i]) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); WOLFSSL_MSG("RsaUnPad_PSS: Padding Error Match"); return PSS_SALTLEN_E; } } if (tmp[i] != (pkcsBlock[i] ^ 0x01)) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); WOLFSSL_MSG("RsaUnPad_PSS: Padding Error End"); return PSS_SALTLEN_E; } } for (i++; i < maskLen; i++) pkcsBlock[i] ^= tmp[i]; #if !defined(WOLFSSL_NO_MALLOC) || defined(WOLFSSL_STATIC_MEMORY) XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif *output = pkcsBlock + maskLen - saltLen; return saltLen + hLen; } #endif /* UnPad plaintext, set start to *output, return length of plaintext, * < 0 on error */ static int RsaUnPad(const byte *pkcsBlock, unsigned int pkcsBlockLen, byte **output, byte padValue) { int ret = BAD_FUNC_ARG; word16 i; #ifndef WOLFSSL_RSA_VERIFY_ONLY byte invalid = 0; #endif if (output == NULL || pkcsBlockLen < 2 || pkcsBlockLen > 0xFFFF) { return BAD_FUNC_ARG; } if (padValue == RSA_BLOCK_TYPE_1) { /* First byte must be 0x00 and Second byte, block type, 0x01 */ if (pkcsBlock[0] != 0 || pkcsBlock[1] != RSA_BLOCK_TYPE_1) { WOLFSSL_MSG("RsaUnPad error, invalid formatting"); return RSA_PAD_E; } /* check the padding until we find the separator */ for (i = 2; i < pkcsBlockLen && pkcsBlock[i++] == 0xFF; ) { } /* Minimum of 11 bytes of pre-message data and must have separator. */ if (i < RSA_MIN_PAD_SZ || pkcsBlock[i-1] != 0) { WOLFSSL_MSG("RsaUnPad error, bad formatting"); return RSA_PAD_E; } *output = (byte *)(pkcsBlock + i); ret = pkcsBlockLen - i; } #ifndef WOLFSSL_RSA_VERIFY_ONLY else { word16 j; word16 pastSep = 0; /* Decrypted with private key - unpad must be constant time. */ for (i = 0, j = 2; j < pkcsBlockLen; j++) { /* Update i if not passed the separator and at separator. */ i |= (~pastSep) & ctMask16Eq(pkcsBlock[j], 0x00) & (j + 1); pastSep |= ctMask16Eq(pkcsBlock[j], 0x00); } /* Minimum of 11 bytes of pre-message data - including leading 0x00. */ invalid |= ctMaskLT(i, RSA_MIN_PAD_SZ); /* Must have seen separator. */ invalid |= ~pastSep; /* First byte must be 0x00. */ invalid |= ctMaskNotEq(pkcsBlock[0], 0x00); /* Check against expected block type: padValue */ invalid |= ctMaskNotEq(pkcsBlock[1], padValue); *output = (byte *)(pkcsBlock + i); ret = ((int)~invalid) & (pkcsBlockLen - i); } #endif return ret; } /* helper function to direct unpadding * * bits is the key modulus size in bits */ int wc_RsaUnPad_ex(byte* pkcsBlock, word32 pkcsBlockLen, byte** out, byte padValue, int padType, enum wc_HashType hType, int mgf, byte* optLabel, word32 labelLen, int saltLen, int bits, void* heap) { int ret; switch (padType) { case WC_RSA_PKCSV15_PAD: /*WOLFSSL_MSG("wolfSSL Using RSA PKCSV15 un-padding");*/ ret = RsaUnPad(pkcsBlock, pkcsBlockLen, out, padValue); break; #ifndef WC_NO_RSA_OAEP case WC_RSA_OAEP_PAD: WOLFSSL_MSG("wolfSSL Using RSA OAEP un-padding"); ret = RsaUnPad_OAEP((byte*)pkcsBlock, pkcsBlockLen, out, hType, mgf, optLabel, labelLen, heap); break; #endif #ifdef WC_RSA_PSS case WC_RSA_PSS_PAD: WOLFSSL_MSG("wolfSSL Using RSA PSS un-padding"); ret = RsaUnPad_PSS((byte*)pkcsBlock, pkcsBlockLen, out, hType, mgf, saltLen, bits, heap); break; #endif #ifdef WC_RSA_NO_PADDING case WC_RSA_NO_PAD: WOLFSSL_MSG("wolfSSL Using NO un-padding"); /* In the case of no padding being used check that input is exactly * the RSA key length */ if (bits <= 0 || pkcsBlockLen != ((word32)(bits+WOLFSSL_BIT_SIZE-1)/WOLFSSL_BIT_SIZE)) { WOLFSSL_MSG("Bad input size"); ret = RSA_PAD_E; } else { if (out != NULL) { *out = pkcsBlock; } ret = pkcsBlockLen; } break; #endif /* WC_RSA_NO_PADDING */ default: WOLFSSL_MSG("Unknown RSA UnPad Type"); ret = RSA_PAD_E; } /* silence warning if not used with padding scheme */ (void)hType; (void)mgf; (void)optLabel; (void)labelLen; (void)saltLen; (void)bits; (void)heap; return ret; } #ifdef WC_RSA_NONBLOCK static int wc_RsaFunctionNonBlock(const byte* in, word32 inLen, byte* out, word32* outLen, int type, RsaKey* key) { int ret = 0; word32 keyLen, len; if (key == NULL || key->nb == NULL) { return BAD_FUNC_ARG; } if (key->nb->exptmod.state == TFM_EXPTMOD_NB_INIT) { if (mp_init(&key->nb->tmp) != MP_OKAY) { ret = MP_INIT_E; } if (ret == 0) { if (mp_read_unsigned_bin(&key->nb->tmp, (byte*)in, inLen) != MP_OKAY) { ret = MP_READ_E; } } } if (ret == 0) { switch(type) { case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: ret = fp_exptmod_nb(&key->nb->exptmod, &key->nb->tmp, &key->d, &key->n, &key->nb->tmp); if (ret == FP_WOULDBLOCK) return ret; if (ret != MP_OKAY) ret = MP_EXPTMOD_E; break; case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: ret = fp_exptmod_nb(&key->nb->exptmod, &key->nb->tmp, &key->e, &key->n, &key->nb->tmp); if (ret == FP_WOULDBLOCK) return ret; if (ret != MP_OKAY) ret = MP_EXPTMOD_E; break; default: ret = RSA_WRONG_TYPE_E; break; } } if (ret == 0) { keyLen = wc_RsaEncryptSize(key); if (keyLen > *outLen) ret = RSA_BUFFER_E; } if (ret == 0) { len = mp_unsigned_bin_size(&key->nb->tmp); /* pad front w/ zeros to match key length */ while (len < keyLen) { *out++ = 0x00; len++; } *outLen = keyLen; /* convert */ if (mp_to_unsigned_bin(&key->nb->tmp, out) != MP_OKAY) { ret = MP_TO_E; } } mp_clear(&key->nb->tmp); return ret; } #endif /* WC_RSA_NONBLOCK */ #ifdef WOLFSSL_XILINX_CRYPT /* * Xilinx hardened crypto acceleration. * * Returns 0 on success and negative values on error. */ static int wc_RsaFunctionSync(const byte* in, word32 inLen, byte* out, word32* outLen, int type, RsaKey* key, WC_RNG* rng) { int ret = 0; word32 keyLen; (void)rng; keyLen = wc_RsaEncryptSize(key); if (keyLen > *outLen) { WOLFSSL_MSG("Output buffer is not big enough"); return BAD_FUNC_ARG; } if (inLen != keyLen) { WOLFSSL_MSG("Expected that inLen equals RSA key length"); return BAD_FUNC_ARG; } switch(type) { case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: #ifdef WOLFSSL_XILINX_CRYPTO_OLD /* Currently public exponent is loaded by default. * In SDK 2017.1 RSA exponent values are expected to be of 4 bytes * leading to private key operations with Xsecure_RsaDecrypt not being * supported */ ret = RSA_WRONG_TYPE_E; #else { byte *d; int dSz; XSecure_Rsa rsa; dSz = mp_unsigned_bin_size(&key->d); d = (byte*)XMALLOC(dSz, key->heap, DYNAMIC_TYPE_PRIVATE_KEY); if (d == NULL) { ret = MEMORY_E; } else { ret = mp_to_unsigned_bin(&key->d, d); XSecure_RsaInitialize(&rsa, key->mod, NULL, d); } if (ret == 0) { if (XSecure_RsaPrivateDecrypt(&rsa, (u8*)in, inLen, out) != XST_SUCCESS) { ret = BAD_STATE_E; } } if (d != NULL) { XFREE(d, key->heap, DYNAMIC_TYPE_PRIVATE_KEY); } } #endif break; case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: #ifdef WOLFSSL_XILINX_CRYPTO_OLD if (XSecure_RsaDecrypt(&(key->xRsa), in, out) != XST_SUCCESS) { ret = BAD_STATE_E; } #else /* starting at Xilinx release 2019 the function XSecure_RsaDecrypt was removed */ if (XSecure_RsaPublicEncrypt(&(key->xRsa), (u8*)in, inLen, out) != XST_SUCCESS) { WOLFSSL_MSG("Error happened when calling hardware RSA public operation"); ret = BAD_STATE_E; } #endif break; default: ret = RSA_WRONG_TYPE_E; } *outLen = keyLen; return ret; } #elif defined(WOLFSSL_AFALG_XILINX_RSA) #ifndef ERROR_OUT #define ERROR_OUT(x) ret = (x); goto done #endif static const char WC_TYPE_ASYMKEY[] = "skcipher"; static const char WC_NAME_RSA[] = "xilinx-zynqmp-rsa"; #ifndef MAX_XILINX_RSA_KEY /* max key size of 4096 bits / 512 bytes */ #define MAX_XILINX_RSA_KEY 512 #endif static const byte XILINX_RSA_FLAG[] = {0x1}; /* AF_ALG implementation of RSA */ static int wc_RsaFunctionSync(const byte* in, word32 inLen, byte* out, word32* outLen, int type, RsaKey* key, WC_RNG* rng) { struct msghdr msg; struct cmsghdr* cmsg; struct iovec iov; byte* keyBuf = NULL; word32 keyBufSz = 0; char cbuf[CMSG_SPACE(4) + CMSG_SPACE(sizeof(struct af_alg_iv) + 1)] = {0}; int ret = 0; int op = 0; /* decryption vs encryption flag */ word32 keyLen; /* input and output buffer need to be aligned */ ALIGN64 byte outBuf[MAX_XILINX_RSA_KEY]; ALIGN64 byte inBuf[MAX_XILINX_RSA_KEY]; XMEMSET(&msg, 0, sizeof(struct msghdr)); (void)rng; keyLen = wc_RsaEncryptSize(key); if (keyLen > *outLen) { ERROR_OUT(RSA_BUFFER_E); } if (keyLen > MAX_XILINX_RSA_KEY) { WOLFSSL_MSG("RSA key size larger than supported"); ERROR_OUT(BAD_FUNC_ARG); } if ((keyBuf = (byte*)XMALLOC(keyLen * 2, key->heap, DYNAMIC_TYPE_KEY)) == NULL) { ERROR_OUT(MEMORY_E); } if ((ret = mp_to_unsigned_bin(&(key->n), keyBuf)) != MP_OKAY) { ERROR_OUT(MP_TO_E); } switch(type) { case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: op = 1; /* set as decrypt */ { keyBufSz = mp_unsigned_bin_size(&(key->d)); if ((mp_to_unsigned_bin(&(key->d), keyBuf + keyLen)) != MP_OKAY) { ERROR_OUT(MP_TO_E); } } break; case RSA_PUBLIC_DECRYPT: case RSA_PUBLIC_ENCRYPT: { word32 exp = 0; word32 eSz = mp_unsigned_bin_size(&(key->e)); if ((mp_to_unsigned_bin(&(key->e), (byte*)&exp + (sizeof(word32) - eSz))) != MP_OKAY) { ERROR_OUT(MP_TO_E); } keyBufSz = sizeof(word32); XMEMCPY(keyBuf + keyLen, (byte*)&exp, keyBufSz); break; } default: ERROR_OUT(RSA_WRONG_TYPE_E); } keyBufSz += keyLen; /* add size of modulus */ /* check for existing sockets before creating new ones */ if (key->alFd > 0) { close(key->alFd); key->alFd = WC_SOCK_NOTSET; } if (key->rdFd > 0) { close(key->rdFd); key->rdFd = WC_SOCK_NOTSET; } /* create new sockets and set the key to use */ if ((key->alFd = wc_Afalg_Socket()) < 0) { WOLFSSL_MSG("Unable to create socket"); ERROR_OUT(key->alFd); } if ((key->rdFd = wc_Afalg_CreateRead(key->alFd, WC_TYPE_ASYMKEY, WC_NAME_RSA)) < 0) { WOLFSSL_MSG("Unable to bind and create read/send socket"); ERROR_OUT(key->rdFd); } if ((ret = setsockopt(key->alFd, SOL_ALG, ALG_SET_KEY, keyBuf, keyBufSz)) < 0) { WOLFSSL_MSG("Error setting RSA key"); ERROR_OUT(ret); } msg.msg_control = cbuf; msg.msg_controllen = sizeof(cbuf); cmsg = CMSG_FIRSTHDR(&msg); if ((ret = wc_Afalg_SetOp(cmsg, op)) < 0) { ERROR_OUT(ret); } /* set flag in IV spot, needed for Xilinx hardware acceleration use */ cmsg = CMSG_NXTHDR(&msg, cmsg); if ((ret = wc_Afalg_SetIv(cmsg, (byte*)XILINX_RSA_FLAG, sizeof(XILINX_RSA_FLAG))) != 0) { ERROR_OUT(ret); } /* compose and send msg */ XMEMCPY(inBuf, (byte*)in, inLen); /* for alignment */ iov.iov_base = inBuf; iov.iov_len = inLen; msg.msg_iov = &iov; msg.msg_iovlen = 1; if ((ret = sendmsg(key->rdFd, &msg, 0)) <= 0) { ERROR_OUT(WC_AFALG_SOCK_E); } if ((ret = read(key->rdFd, outBuf, inLen)) <= 0) { ERROR_OUT(WC_AFALG_SOCK_E); } XMEMCPY(out, outBuf, ret); *outLen = keyLen; done: /* clear key data and free buffer */ if (keyBuf != NULL) { ForceZero(keyBuf, keyBufSz); } XFREE(keyBuf, key->heap, DYNAMIC_TYPE_KEY); if (key->alFd > 0) { close(key->alFd); key->alFd = WC_SOCK_NOTSET; } if (key->rdFd > 0) { close(key->rdFd); key->rdFd = WC_SOCK_NOTSET; } return ret; } #else static int wc_RsaFunctionSync(const byte* in, word32 inLen, byte* out, word32* outLen, int type, RsaKey* key, WC_RNG* rng) { #ifndef WOLFSSL_SP_MATH #ifdef WOLFSSL_SMALL_STACK mp_int* tmp; #ifdef WC_RSA_BLINDING mp_int* rnd; mp_int* rndi; #endif #else mp_int tmp[1]; #ifdef WC_RSA_BLINDING mp_int rnd[1], rndi[1]; #endif #endif int ret = 0; word32 keyLen = 0; #endif #ifdef WOLFSSL_HAVE_SP_RSA #ifndef WOLFSSL_SP_NO_2048 if (mp_count_bits(&key->n) == 2048) { switch(type) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: #ifdef WC_RSA_BLINDING if (rng == NULL) return MISSING_RNG_E; #endif #ifndef RSA_LOW_MEM if ((mp_count_bits(&key->p) == 1024) && (mp_count_bits(&key->q) == 1024)) { return sp_RsaPrivate_2048(in, inLen, &key->d, &key->p, &key->q, &key->dP, &key->dQ, &key->u, &key->n, out, outLen); } break; #else return sp_RsaPrivate_2048(in, inLen, &key->d, NULL, NULL, NULL, NULL, NULL, &key->n, out, outLen); #endif #endif case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: return sp_RsaPublic_2048(in, inLen, &key->e, &key->n, out, outLen); } } #endif #ifndef WOLFSSL_SP_NO_3072 if (mp_count_bits(&key->n) == 3072) { switch(type) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: #ifdef WC_RSA_BLINDING if (rng == NULL) return MISSING_RNG_E; #endif #ifndef RSA_LOW_MEM if ((mp_count_bits(&key->p) == 1536) && (mp_count_bits(&key->q) == 1536)) { return sp_RsaPrivate_3072(in, inLen, &key->d, &key->p, &key->q, &key->dP, &key->dQ, &key->u, &key->n, out, outLen); } break; #else return sp_RsaPrivate_3072(in, inLen, &key->d, NULL, NULL, NULL, NULL, NULL, &key->n, out, outLen); #endif #endif case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: return sp_RsaPublic_3072(in, inLen, &key->e, &key->n, out, outLen); } } #endif #ifdef WOLFSSL_SP_4096 if (mp_count_bits(&key->n) == 4096) { switch(type) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: #ifdef WC_RSA_BLINDING if (rng == NULL) return MISSING_RNG_E; #endif #ifndef RSA_LOW_MEM if ((mp_count_bits(&key->p) == 2048) && (mp_count_bits(&key->q) == 2048)) { return sp_RsaPrivate_4096(in, inLen, &key->d, &key->p, &key->q, &key->dP, &key->dQ, &key->u, &key->n, out, outLen); } break; #else return sp_RsaPrivate_4096(in, inLen, &key->d, NULL, NULL, NULL, NULL, NULL, &key->n, out, outLen); #endif #endif case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: return sp_RsaPublic_4096(in, inLen, &key->e, &key->n, out, outLen); } } #endif #endif /* WOLFSSL_HAVE_SP_RSA */ #ifdef WOLFSSL_SP_MATH (void)rng; WOLFSSL_MSG("SP Key Size Error"); return WC_KEY_SIZE_E; #else (void)rng; #ifdef WOLFSSL_SMALL_STACK tmp = (mp_int*)XMALLOC(sizeof(mp_int), key->heap, DYNAMIC_TYPE_RSA); if (tmp == NULL) return MEMORY_E; #ifdef WC_RSA_BLINDING rnd = (mp_int*)XMALLOC(sizeof(mp_int) * 2, key->heap, DYNAMIC_TYPE_RSA); if (rnd == NULL) { XFREE(tmp, key->heap, DYNAMIC_TYPE_RSA); return MEMORY_E; } rndi = rnd + 1; #endif /* WC_RSA_BLINDING */ #endif /* WOLFSSL_SMALL_STACK */ if (mp_init(tmp) != MP_OKAY) ret = MP_INIT_E; #ifdef WC_RSA_BLINDING if (ret == 0) { if (type == RSA_PRIVATE_DECRYPT || type == RSA_PRIVATE_ENCRYPT) { if (mp_init_multi(rnd, rndi, NULL, NULL, NULL, NULL) != MP_OKAY) { mp_clear(tmp); ret = MP_INIT_E; } } } #endif #ifndef TEST_UNPAD_CONSTANT_TIME if (ret == 0 && mp_read_unsigned_bin(tmp, (byte*)in, inLen) != MP_OKAY) ret = MP_READ_E; if (ret == 0) { switch(type) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: { #if defined(WC_RSA_BLINDING) && !defined(WC_NO_RNG) /* blind */ ret = mp_rand(rnd, get_digit_count(&key->n), rng); /* rndi = 1/rnd mod n */ if (ret == 0 && mp_invmod(rnd, &key->n, rndi) != MP_OKAY) ret = MP_INVMOD_E; /* rnd = rnd^e */ if (ret == 0 && mp_exptmod(rnd, &key->e, &key->n, rnd) != MP_OKAY) ret = MP_EXPTMOD_E; /* tmp = tmp*rnd mod n */ if (ret == 0 && mp_mulmod(tmp, rnd, &key->n, tmp) != MP_OKAY) ret = MP_MULMOD_E; #endif /* WC_RSA_BLINDING && !WC_NO_RNG */ #ifdef RSA_LOW_MEM /* half as much memory but twice as slow */ if (ret == 0 && mp_exptmod(tmp, &key->d, &key->n, tmp) != MP_OKAY) ret = MP_EXPTMOD_E; #else if (ret == 0) { #ifdef WOLFSSL_SMALL_STACK mp_int* tmpa; mp_int* tmpb = NULL; #else mp_int tmpa[1], tmpb[1]; #endif int cleara = 0, clearb = 0; #ifdef WOLFSSL_SMALL_STACK tmpa = (mp_int*)XMALLOC(sizeof(mp_int) * 2, key->heap, DYNAMIC_TYPE_RSA); if (tmpa != NULL) tmpb = tmpa + 1; else ret = MEMORY_E; #endif if (ret == 0) { if (mp_init(tmpa) != MP_OKAY) ret = MP_INIT_E; else cleara = 1; } if (ret == 0) { if (mp_init(tmpb) != MP_OKAY) ret = MP_INIT_E; else clearb = 1; } /* tmpa = tmp^dP mod p */ if (ret == 0 && mp_exptmod(tmp, &key->dP, &key->p, tmpa) != MP_OKAY) ret = MP_EXPTMOD_E; /* tmpb = tmp^dQ mod q */ if (ret == 0 && mp_exptmod(tmp, &key->dQ, &key->q, tmpb) != MP_OKAY) ret = MP_EXPTMOD_E; /* tmp = (tmpa - tmpb) * qInv (mod p) */ if (ret == 0 && mp_sub(tmpa, tmpb, tmp) != MP_OKAY) ret = MP_SUB_E; if (ret == 0 && mp_mulmod(tmp, &key->u, &key->p, tmp) != MP_OKAY) ret = MP_MULMOD_E; /* tmp = tmpb + q * tmp */ if (ret == 0 && mp_mul(tmp, &key->q, tmp) != MP_OKAY) ret = MP_MUL_E; if (ret == 0 && mp_add(tmp, tmpb, tmp) != MP_OKAY) ret = MP_ADD_E; #ifdef WOLFSSL_SMALL_STACK if (tmpa != NULL) #endif { if (cleara) mp_clear(tmpa); if (clearb) mp_clear(tmpb); #ifdef WOLFSSL_SMALL_STACK XFREE(tmpa, key->heap, DYNAMIC_TYPE_RSA); #endif } } /* tmpa/b scope */ #endif /* RSA_LOW_MEM */ #ifdef WC_RSA_BLINDING /* unblind */ if (ret == 0 && mp_mulmod(tmp, rndi, &key->n, tmp) != MP_OKAY) ret = MP_MULMOD_E; #endif /* WC_RSA_BLINDING */ break; } #endif case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: if (mp_exptmod_nct(tmp, &key->e, &key->n, tmp) != MP_OKAY) ret = MP_EXPTMOD_E; break; default: ret = RSA_WRONG_TYPE_E; break; } } if (ret == 0) { keyLen = wc_RsaEncryptSize(key); if (keyLen > *outLen) ret = RSA_BUFFER_E; } #ifndef WOLFSSL_XILINX_CRYPT if (ret == 0) { *outLen = keyLen; if (mp_to_unsigned_bin_len(tmp, out, keyLen) != MP_OKAY) ret = MP_TO_E; } #endif #else (void)type; (void)key; (void)keyLen; XMEMCPY(out, in, inLen); *outLen = inLen; #endif mp_clear(tmp); #ifdef WOLFSSL_SMALL_STACK XFREE(tmp, key->heap, DYNAMIC_TYPE_RSA); #endif #ifdef WC_RSA_BLINDING if (type == RSA_PRIVATE_DECRYPT || type == RSA_PRIVATE_ENCRYPT) { mp_clear(rndi); mp_clear(rnd); } #ifdef WOLFSSL_SMALL_STACK XFREE(rnd, key->heap, DYNAMIC_TYPE_RSA); #endif #endif /* WC_RSA_BLINDING */ return ret; #endif /* WOLFSSL_SP_MATH */ } #endif #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) static int wc_RsaFunctionAsync(const byte* in, word32 inLen, byte* out, word32* outLen, int type, RsaKey* key, WC_RNG* rng) { int ret = 0; (void)rng; #ifdef WOLFSSL_ASYNC_CRYPT_TEST if (wc_AsyncTestInit(&key->asyncDev, ASYNC_TEST_RSA_FUNC)) { WC_ASYNC_TEST* testDev = &key->asyncDev.test; testDev->rsaFunc.in = in; testDev->rsaFunc.inSz = inLen; testDev->rsaFunc.out = out; testDev->rsaFunc.outSz = outLen; testDev->rsaFunc.type = type; testDev->rsaFunc.key = key; testDev->rsaFunc.rng = rng; return WC_PENDING_E; } #endif /* WOLFSSL_ASYNC_CRYPT_TEST */ switch(type) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: #ifdef HAVE_CAVIUM key->dataLen = key->n.raw.len; ret = NitroxRsaExptMod(in, inLen, key->d.raw.buf, key->d.raw.len, key->n.raw.buf, key->n.raw.len, out, outLen, key); #elif defined(HAVE_INTEL_QA) #ifdef RSA_LOW_MEM ret = IntelQaRsaPrivate(&key->asyncDev, in, inLen, &key->d.raw, &key->n.raw, out, outLen); #else ret = IntelQaRsaCrtPrivate(&key->asyncDev, in, inLen, &key->p.raw, &key->q.raw, &key->dP.raw, &key->dQ.raw, &key->u.raw, out, outLen); #endif #else /* WOLFSSL_ASYNC_CRYPT_TEST */ ret = wc_RsaFunctionSync(in, inLen, out, outLen, type, key, rng); #endif break; #endif case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: #ifdef HAVE_CAVIUM key->dataLen = key->n.raw.len; ret = NitroxRsaExptMod(in, inLen, key->e.raw.buf, key->e.raw.len, key->n.raw.buf, key->n.raw.len, out, outLen, key); #elif defined(HAVE_INTEL_QA) ret = IntelQaRsaPublic(&key->asyncDev, in, inLen, &key->e.raw, &key->n.raw, out, outLen); #else /* WOLFSSL_ASYNC_CRYPT_TEST */ ret = wc_RsaFunctionSync(in, inLen, out, outLen, type, key, rng); #endif break; default: ret = RSA_WRONG_TYPE_E; } return ret; } #endif /* WOLFSSL_ASYNC_CRYPT && WC_ASYNC_ENABLE_RSA */ #if defined(WC_RSA_DIRECT) || defined(WC_RSA_NO_PADDING) /* Function that does the RSA operation directly with no padding. * * in buffer to do operation on * inLen length of input buffer * out buffer to hold results * outSz gets set to size of result buffer. Should be passed in as length * of out buffer. If the pointer "out" is null then outSz gets set to * the expected buffer size needed and LENGTH_ONLY_E gets returned. * key RSA key to use for encrypt/decrypt * type if using private or public key {RSA_PUBLIC_ENCRYPT, * RSA_PUBLIC_DECRYPT, RSA_PRIVATE_ENCRYPT, RSA_PRIVATE_DECRYPT} * rng wolfSSL RNG to use if needed * * returns size of result on success */ int wc_RsaDirect(byte* in, word32 inLen, byte* out, word32* outSz, RsaKey* key, int type, WC_RNG* rng) { int ret; if (in == NULL || outSz == NULL || key == NULL) { return BAD_FUNC_ARG; } /* sanity check on type of RSA operation */ switch (type) { case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: case RSA_PRIVATE_ENCRYPT: case RSA_PRIVATE_DECRYPT: break; default: WOLFSSL_MSG("Bad RSA type"); return BAD_FUNC_ARG; } if ((ret = wc_RsaEncryptSize(key)) < 0) { return BAD_FUNC_ARG; } if (inLen != (word32)ret) { WOLFSSL_MSG("Bad input length. Should be RSA key size"); return BAD_FUNC_ARG; } if (out == NULL) { *outSz = inLen; return LENGTH_ONLY_E; } switch (key->state) { case RSA_STATE_NONE: case RSA_STATE_ENCRYPT_PAD: case RSA_STATE_ENCRYPT_EXPTMOD: case RSA_STATE_DECRYPT_EXPTMOD: case RSA_STATE_DECRYPT_UNPAD: key->state = (type == RSA_PRIVATE_ENCRYPT || type == RSA_PUBLIC_ENCRYPT) ? RSA_STATE_ENCRYPT_EXPTMOD: RSA_STATE_DECRYPT_EXPTMOD; key->dataLen = *outSz; ret = wc_RsaFunction(in, inLen, out, &key->dataLen, type, key, rng); if (ret >= 0 || ret == WC_PENDING_E) { key->state = (type == RSA_PRIVATE_ENCRYPT || type == RSA_PUBLIC_ENCRYPT) ? RSA_STATE_ENCRYPT_RES: RSA_STATE_DECRYPT_RES; } if (ret < 0) { break; } FALL_THROUGH; case RSA_STATE_ENCRYPT_RES: case RSA_STATE_DECRYPT_RES: ret = key->dataLen; break; default: ret = BAD_STATE_E; } /* if async pending then skip cleanup*/ if (ret == WC_PENDING_E #ifdef WC_RSA_NONBLOCK || ret == FP_WOULDBLOCK #endif ) { return ret; } key->state = RSA_STATE_NONE; wc_RsaCleanup(key); return ret; } #endif /* WC_RSA_DIRECT || WC_RSA_NO_PADDING */ #if defined(WOLFSSL_CRYPTOCELL) static int cc310_RsaPublicEncrypt(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key) { CRYSError_t ret = 0; CRYS_RSAPrimeData_t primeData; int modulusSize = wc_RsaEncryptSize(key); /* The out buffer must be at least modulus size bytes long. */ if (outLen < modulusSize) return BAD_FUNC_ARG; ret = CRYS_RSA_PKCS1v15_Encrypt(&wc_rndState, wc_rndGenVectFunc, &key->ctx.pubKey, &primeData, (byte*)in, inLen, out); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_PKCS1v15_Encrypt failed"); return -1; } return modulusSize; } static int cc310_RsaPublicDecrypt(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key) { CRYSError_t ret = 0; CRYS_RSAPrimeData_t primeData; uint16_t actualOutLen = outLen; ret = CRYS_RSA_PKCS1v15_Decrypt(&key->ctx.privKey, &primeData, (byte*)in, inLen, out, &actualOutLen); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_PKCS1v15_Decrypt failed"); return -1; } return actualOutLen; } int cc310_RsaSSL_Sign(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, CRYS_RSA_HASH_OpMode_t mode) { CRYSError_t ret = 0; uint16_t actualOutLen = outLen*sizeof(byte); CRYS_RSAPrivUserContext_t contextPrivate; ret = CRYS_RSA_PKCS1v15_Sign(&wc_rndState, wc_rndGenVectFunc, &contextPrivate, &key->ctx.privKey, mode, (byte*)in, inLen, out, &actualOutLen); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_PKCS1v15_Sign failed"); return -1; } return actualOutLen; } int cc310_RsaSSL_Verify(const byte* in, word32 inLen, byte* sig, RsaKey* key, CRYS_RSA_HASH_OpMode_t mode) { CRYSError_t ret = 0; CRYS_RSAPubUserContext_t contextPub; /* verify the signature in the sig pointer */ ret = CRYS_RSA_PKCS1v15_Verify(&contextPub, &key->ctx.pubKey, mode, (byte*)in, inLen, sig); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_PKCS1v15_Verify failed"); return -1; } return ret; } #endif /* WOLFSSL_CRYPTOCELL */ int wc_RsaFunction(const byte* in, word32 inLen, byte* out, word32* outLen, int type, RsaKey* key, WC_RNG* rng) { int ret = 0; if (key == NULL || in == NULL || inLen == 0 || out == NULL || outLen == NULL || *outLen == 0 || type == RSA_TYPE_UNKNOWN) { return BAD_FUNC_ARG; } #ifdef WOLF_CRYPTO_CB if (key->devId != INVALID_DEVID) { ret = wc_CryptoCb_Rsa(in, inLen, out, outLen, type, key, rng); if (ret != CRYPTOCB_UNAVAILABLE) return ret; /* fall-through when unavailable */ ret = 0; /* reset error code and try using software */ } #endif #ifndef TEST_UNPAD_CONSTANT_TIME #ifndef NO_RSA_BOUNDS_CHECK if (type == RSA_PRIVATE_DECRYPT && key->state == RSA_STATE_DECRYPT_EXPTMOD) { /* Check that 1 < in < n-1. (Requirement of 800-56B.) */ #ifdef WOLFSSL_SMALL_STACK mp_int* c; #else mp_int c[1]; #endif #ifdef WOLFSSL_SMALL_STACK c = (mp_int*)XMALLOC(sizeof(mp_int), key->heap, DYNAMIC_TYPE_RSA); if (c == NULL) ret = MEMORY_E; #endif if (mp_init(c) != MP_OKAY) ret = MP_INIT_E; if (ret == 0) { if (mp_read_unsigned_bin(c, in, inLen) != 0) ret = MP_READ_E; } if (ret == 0) { /* check c > 1 */ if (mp_cmp_d(c, 1) != MP_GT) ret = RSA_OUT_OF_RANGE_E; } if (ret == 0) { /* add c+1 */ if (mp_add_d(c, 1, c) != MP_OKAY) ret = MP_ADD_E; } if (ret == 0) { /* check c+1 < n */ if (mp_cmp(c, &key->n) != MP_LT) ret = RSA_OUT_OF_RANGE_E; } mp_clear(c); #ifdef WOLFSSL_SMALL_STACK XFREE(c, key->heap, DYNAMIC_TYPE_RSA); #endif if (ret != 0) return ret; } #endif /* NO_RSA_BOUNDS_CHECK */ #endif #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_RSA && key->n.raw.len > 0) { ret = wc_RsaFunctionAsync(in, inLen, out, outLen, type, key, rng); } else #endif #ifdef WC_RSA_NONBLOCK if (key->nb) { ret = wc_RsaFunctionNonBlock(in, inLen, out, outLen, type, key); } else #endif { ret = wc_RsaFunctionSync(in, inLen, out, outLen, type, key, rng); } /* handle error */ if (ret < 0 && ret != WC_PENDING_E #ifdef WC_RSA_NONBLOCK && ret != FP_WOULDBLOCK #endif ) { if (ret == MP_EXPTMOD_E) { /* This can happen due to incorrectly set FP_MAX_BITS or missing XREALLOC */ WOLFSSL_MSG("RSA_FUNCTION MP_EXPTMOD_E: memory/config problem"); } key->state = RSA_STATE_NONE; wc_RsaCleanup(key); } return ret; } #ifndef WOLFSSL_RSA_VERIFY_ONLY /* Internal Wrappers */ /* Gives the option of choosing padding type in : input to be encrypted inLen: length of input buffer out: encrypted output outLen: length of encrypted output buffer key : wolfSSL initialized RSA key struct rng : wolfSSL initialized random number struct rsa_type : type of RSA: RSA_PUBLIC_ENCRYPT, RSA_PUBLIC_DECRYPT, RSA_PRIVATE_ENCRYPT or RSA_PRIVATE_DECRYPT pad_value: RSA_BLOCK_TYPE_1 or RSA_BLOCK_TYPE_2 pad_type : type of padding: WC_RSA_PKCSV15_PAD, WC_RSA_OAEP_PAD, WC_RSA_NO_PAD or WC_RSA_PSS_PAD hash : type of hash algorithm to use found in wolfssl/wolfcrypt/hash.h mgf : type of mask generation function to use label : optional label labelSz : size of optional label buffer saltLen : Length of salt used in PSS rng : random number generator */ static int RsaPublicEncryptEx(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, int rsa_type, byte pad_value, int pad_type, enum wc_HashType hash, int mgf, byte* label, word32 labelSz, int saltLen, WC_RNG* rng) { int ret, sz; if (in == NULL || inLen == 0 || out == NULL || key == NULL) { return BAD_FUNC_ARG; } sz = wc_RsaEncryptSize(key); if (sz > (int)outLen) { return RSA_BUFFER_E; } if (sz < RSA_MIN_PAD_SZ) { return WC_KEY_SIZE_E; } if (inLen > (word32)(sz - RSA_MIN_PAD_SZ)) { #ifdef WC_RSA_NO_PADDING /* In the case that no padding is used the input length can and should * be the same size as the RSA key. */ if (pad_type != WC_RSA_NO_PAD) #endif return RSA_BUFFER_E; } switch (key->state) { case RSA_STATE_NONE: case RSA_STATE_ENCRYPT_PAD: #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) && \ defined(HAVE_CAVIUM) if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_RSA && pad_type != WC_RSA_PSS_PAD && key->n.raw.buf) { /* Async operations that include padding */ if (rsa_type == RSA_PUBLIC_ENCRYPT && pad_value == RSA_BLOCK_TYPE_2) { key->state = RSA_STATE_ENCRYPT_RES; key->dataLen = key->n.raw.len; return NitroxRsaPublicEncrypt(in, inLen, out, outLen, key); } else if (rsa_type == RSA_PRIVATE_ENCRYPT && pad_value == RSA_BLOCK_TYPE_1) { key->state = RSA_STATE_ENCRYPT_RES; key->dataLen = key->n.raw.len; return NitroxRsaSSL_Sign(in, inLen, out, outLen, key); } } #elif defined(WOLFSSL_CRYPTOCELL) if (rsa_type == RSA_PUBLIC_ENCRYPT && pad_value == RSA_BLOCK_TYPE_2) { return cc310_RsaPublicEncrypt(in, inLen, out, outLen, key); } else if (rsa_type == RSA_PRIVATE_ENCRYPT && pad_value == RSA_BLOCK_TYPE_1) { return cc310_RsaSSL_Sign(in, inLen, out, outLen, key, cc310_hashModeRSA(hash, 0)); } #endif /* WOLFSSL_CRYPTOCELL */ key->state = RSA_STATE_ENCRYPT_PAD; ret = wc_RsaPad_ex(in, inLen, out, sz, pad_value, rng, pad_type, hash, mgf, label, labelSz, saltLen, mp_count_bits(&key->n), key->heap); if (ret < 0) { break; } key->state = RSA_STATE_ENCRYPT_EXPTMOD; FALL_THROUGH; case RSA_STATE_ENCRYPT_EXPTMOD: key->dataLen = outLen; ret = wc_RsaFunction(out, sz, out, &key->dataLen, rsa_type, key, rng); if (ret >= 0 || ret == WC_PENDING_E) { key->state = RSA_STATE_ENCRYPT_RES; } if (ret < 0) { break; } FALL_THROUGH; case RSA_STATE_ENCRYPT_RES: ret = key->dataLen; break; default: ret = BAD_STATE_E; break; } /* if async pending then return and skip done cleanup below */ if (ret == WC_PENDING_E #ifdef WC_RSA_NONBLOCK || ret == FP_WOULDBLOCK #endif ) { return ret; } key->state = RSA_STATE_NONE; wc_RsaCleanup(key); return ret; } #endif /* Gives the option of choosing padding type in : input to be decrypted inLen: length of input buffer out: decrypted message outLen: length of decrypted message in bytes outPtr: optional inline output pointer (if provided doing inline) key : wolfSSL initialized RSA key struct rsa_type : type of RSA: RSA_PUBLIC_ENCRYPT, RSA_PUBLIC_DECRYPT, RSA_PRIVATE_ENCRYPT or RSA_PRIVATE_DECRYPT pad_value: RSA_BLOCK_TYPE_1 or RSA_BLOCK_TYPE_2 pad_type : type of padding: WC_RSA_PKCSV15_PAD, WC_RSA_OAEP_PAD, WC_RSA_NO_PAD, WC_RSA_PSS_PAD hash : type of hash algorithm to use found in wolfssl/wolfcrypt/hash.h mgf : type of mask generation function to use label : optional label labelSz : size of optional label buffer saltLen : Length of salt used in PSS rng : random number generator */ static int RsaPrivateDecryptEx(byte* in, word32 inLen, byte* out, word32 outLen, byte** outPtr, RsaKey* key, int rsa_type, byte pad_value, int pad_type, enum wc_HashType hash, int mgf, byte* label, word32 labelSz, int saltLen, WC_RNG* rng) { int ret = RSA_WRONG_TYPE_E; byte* pad = NULL; if (in == NULL || inLen == 0 || out == NULL || key == NULL) { return BAD_FUNC_ARG; } switch (key->state) { case RSA_STATE_NONE: key->dataLen = inLen; #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) && \ defined(HAVE_CAVIUM) /* Async operations that include padding */ if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_RSA && pad_type != WC_RSA_PSS_PAD) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY if (rsa_type == RSA_PRIVATE_DECRYPT && pad_value == RSA_BLOCK_TYPE_2) { key->state = RSA_STATE_DECRYPT_RES; key->data = NULL; return NitroxRsaPrivateDecrypt(in, inLen, out, &key->dataLen, key); #endif } else if (rsa_type == RSA_PUBLIC_DECRYPT && pad_value == RSA_BLOCK_TYPE_1) { key->state = RSA_STATE_DECRYPT_RES; key->data = NULL; return NitroxRsaSSL_Verify(in, inLen, out, &key->dataLen, key); } } #elif defined(WOLFSSL_CRYPTOCELL) if (rsa_type == RSA_PRIVATE_DECRYPT && pad_value == RSA_BLOCK_TYPE_2) { ret = cc310_RsaPublicDecrypt(in, inLen, out, outLen, key); if (outPtr != NULL) *outPtr = out; /* for inline */ return ret; } else if (rsa_type == RSA_PUBLIC_DECRYPT && pad_value == RSA_BLOCK_TYPE_1) { return cc310_RsaSSL_Verify(in, inLen, out, key, cc310_hashModeRSA(hash, 0)); } #endif /* WOLFSSL_CRYPTOCELL */ #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WOLFSSL_RSA_VERIFY_INLINE) /* verify the tmp ptr is NULL, otherwise indicates bad state */ if (key->data != NULL) { ret = BAD_STATE_E; break; } /* if not doing this inline then allocate a buffer for it */ if (outPtr == NULL) { key->data = (byte*)XMALLOC(inLen, key->heap, DYNAMIC_TYPE_WOLF_BIGINT); key->dataIsAlloc = 1; if (key->data == NULL) { ret = MEMORY_E; break; } XMEMCPY(key->data, in, inLen); } else { key->data = out; } #endif key->state = RSA_STATE_DECRYPT_EXPTMOD; FALL_THROUGH; case RSA_STATE_DECRYPT_EXPTMOD: #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WOLFSSL_RSA_VERIFY_INLINE) ret = wc_RsaFunction(key->data, inLen, key->data, &key->dataLen, rsa_type, key, rng); #else ret = wc_RsaFunction(in, inLen, out, &key->dataLen, rsa_type, key, rng); #endif if (ret >= 0 || ret == WC_PENDING_E) { key->state = RSA_STATE_DECRYPT_UNPAD; } if (ret < 0) { break; } FALL_THROUGH; case RSA_STATE_DECRYPT_UNPAD: #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WOLFSSL_RSA_VERIFY_INLINE) ret = wc_RsaUnPad_ex(key->data, key->dataLen, &pad, pad_value, pad_type, hash, mgf, label, labelSz, saltLen, mp_count_bits(&key->n), key->heap); #else ret = wc_RsaUnPad_ex(out, key->dataLen, &pad, pad_value, pad_type, hash, mgf, label, labelSz, saltLen, mp_count_bits(&key->n), key->heap); #endif if (rsa_type == RSA_PUBLIC_DECRYPT && ret > (int)outLen) ret = RSA_BUFFER_E; else if (ret >= 0 && pad != NULL) { #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WOLFSSL_RSA_VERIFY_INLINE) signed char c; #endif /* only copy output if not inline */ if (outPtr == NULL) { #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WOLFSSL_RSA_VERIFY_INLINE) if (rsa_type == RSA_PRIVATE_DECRYPT) { word32 i, j; int start = (int)((size_t)pad - (size_t)key->data); for (i = 0, j = 0; j < key->dataLen; j++) { out[i] = key->data[j]; c = ctMaskGTE(j, start); c &= ctMaskLT(i, outLen); /* 0 - no add, -1 add */ i += (word32)((byte)(-c)); } } else #endif { XMEMCPY(out, pad, ret); } } else *outPtr = pad; #if !defined(WOLFSSL_RSA_VERIFY_ONLY) ret = ctMaskSelInt(ctMaskLTE(ret, outLen), ret, RSA_BUFFER_E); ret = ctMaskSelInt(ctMaskNotEq(ret, 0), ret, RSA_BUFFER_E); #else if (outLen < (word32)ret) ret = RSA_BUFFER_E; #endif } key->state = RSA_STATE_DECRYPT_RES; FALL_THROUGH; case RSA_STATE_DECRYPT_RES: #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) && \ defined(HAVE_CAVIUM) if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_RSA && pad_type != WC_RSA_PSS_PAD) { if (ret > 0) { /* convert result */ byte* dataLen = (byte*)&key->dataLen; ret = (dataLen[0] << 8) | (dataLen[1]); if (outPtr) *outPtr = in; } } #endif break; default: ret = BAD_STATE_E; break; } /* if async pending then return and skip done cleanup below */ if (ret == WC_PENDING_E #ifdef WC_RSA_NONBLOCK || ret == FP_WOULDBLOCK #endif ) { return ret; } key->state = RSA_STATE_NONE; wc_RsaCleanup(key); return ret; } #ifndef WOLFSSL_RSA_VERIFY_ONLY /* Public RSA Functions */ int wc_RsaPublicEncrypt(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, WC_RNG* rng) { return RsaPublicEncryptEx(in, inLen, out, outLen, key, RSA_PUBLIC_ENCRYPT, RSA_BLOCK_TYPE_2, WC_RSA_PKCSV15_PAD, WC_HASH_TYPE_NONE, WC_MGF1NONE, NULL, 0, 0, rng); } #if !defined(WC_NO_RSA_OAEP) || defined(WC_RSA_NO_PADDING) int wc_RsaPublicEncrypt_ex(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, WC_RNG* rng, int type, enum wc_HashType hash, int mgf, byte* label, word32 labelSz) { return RsaPublicEncryptEx(in, inLen, out, outLen, key, RSA_PUBLIC_ENCRYPT, RSA_BLOCK_TYPE_2, type, hash, mgf, label, labelSz, 0, rng); } #endif /* WC_NO_RSA_OAEP */ #endif #ifndef WOLFSSL_RSA_PUBLIC_ONLY int wc_RsaPrivateDecryptInline(byte* in, word32 inLen, byte** out, RsaKey* key) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx(in, inLen, in, inLen, out, key, RSA_PRIVATE_DECRYPT, RSA_BLOCK_TYPE_2, WC_RSA_PKCSV15_PAD, WC_HASH_TYPE_NONE, WC_MGF1NONE, NULL, 0, 0, rng); } #ifndef WC_NO_RSA_OAEP int wc_RsaPrivateDecryptInline_ex(byte* in, word32 inLen, byte** out, RsaKey* key, int type, enum wc_HashType hash, int mgf, byte* label, word32 labelSz) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx(in, inLen, in, inLen, out, key, RSA_PRIVATE_DECRYPT, RSA_BLOCK_TYPE_2, type, hash, mgf, label, labelSz, 0, rng); } #endif /* WC_NO_RSA_OAEP */ int wc_RsaPrivateDecrypt(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx((byte*)in, inLen, out, outLen, NULL, key, RSA_PRIVATE_DECRYPT, RSA_BLOCK_TYPE_2, WC_RSA_PKCSV15_PAD, WC_HASH_TYPE_NONE, WC_MGF1NONE, NULL, 0, 0, rng); } #if !defined(WC_NO_RSA_OAEP) || defined(WC_RSA_NO_PADDING) int wc_RsaPrivateDecrypt_ex(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, int type, enum wc_HashType hash, int mgf, byte* label, word32 labelSz) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx((byte*)in, inLen, out, outLen, NULL, key, RSA_PRIVATE_DECRYPT, RSA_BLOCK_TYPE_2, type, hash, mgf, label, labelSz, 0, rng); } #endif /* WC_NO_RSA_OAEP || WC_RSA_NO_PADDING */ #endif /* WOLFSSL_RSA_PUBLIC_ONLY */ #if !defined(WOLFSSL_CRYPTOCELL) int wc_RsaSSL_VerifyInline(byte* in, word32 inLen, byte** out, RsaKey* key) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx(in, inLen, in, inLen, out, key, RSA_PUBLIC_DECRYPT, RSA_BLOCK_TYPE_1, WC_RSA_PKCSV15_PAD, WC_HASH_TYPE_NONE, WC_MGF1NONE, NULL, 0, 0, rng); } #endif #ifndef WOLFSSL_RSA_VERIFY_ONLY int wc_RsaSSL_Verify(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key) { return wc_RsaSSL_Verify_ex(in, inLen, out, outLen, key , WC_RSA_PKCSV15_PAD); } int wc_RsaSSL_Verify_ex(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, int pad_type) { WC_RNG* rng; if (key == NULL) { return BAD_FUNC_ARG; } #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx((byte*)in, inLen, out, outLen, NULL, key, RSA_PUBLIC_DECRYPT, RSA_BLOCK_TYPE_1, pad_type, WC_HASH_TYPE_NONE, WC_MGF1NONE, NULL, 0, 0, rng); } #endif #ifdef WC_RSA_PSS /* Verify the message signed with RSA-PSS. * The input buffer is reused for the output buffer. * Salt length is equal to hash length. * * in Buffer holding encrypted data. * inLen Length of data in buffer. * out Pointer to address containing the PSS data. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * returns the length of the PSS data on success and negative indicates failure. */ int wc_RsaPSS_VerifyInline(byte* in, word32 inLen, byte** out, enum wc_HashType hash, int mgf, RsaKey* key) { #ifndef WOLFSSL_PSS_SALT_LEN_DISCOVER return wc_RsaPSS_VerifyInline_ex(in, inLen, out, hash, mgf, RSA_PSS_SALT_LEN_DEFAULT, key); #else return wc_RsaPSS_VerifyInline_ex(in, inLen, out, hash, mgf, RSA_PSS_SALT_LEN_DISCOVER, key); #endif } /* Verify the message signed with RSA-PSS. * The input buffer is reused for the output buffer. * * in Buffer holding encrypted data. * inLen Length of data in buffer. * out Pointer to address containing the PSS data. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * saltLen Length of salt used. RSA_PSS_SALT_LEN_DEFAULT (-1) indicates salt * length is the same as the hash length. RSA_PSS_SALT_LEN_DISCOVER * indicates salt length is determined from the data. * returns the length of the PSS data on success and negative indicates failure. */ int wc_RsaPSS_VerifyInline_ex(byte* in, word32 inLen, byte** out, enum wc_HashType hash, int mgf, int saltLen, RsaKey* key) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx(in, inLen, in, inLen, out, key, RSA_PUBLIC_DECRYPT, RSA_BLOCK_TYPE_1, WC_RSA_PSS_PAD, hash, mgf, NULL, 0, saltLen, rng); } /* Verify the message signed with RSA-PSS. * Salt length is equal to hash length. * * in Buffer holding encrypted data. * inLen Length of data in buffer. * out Pointer to address containing the PSS data. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * returns the length of the PSS data on success and negative indicates failure. */ int wc_RsaPSS_Verify(byte* in, word32 inLen, byte* out, word32 outLen, enum wc_HashType hash, int mgf, RsaKey* key) { #ifndef WOLFSSL_PSS_SALT_LEN_DISCOVER return wc_RsaPSS_Verify_ex(in, inLen, out, outLen, hash, mgf, RSA_PSS_SALT_LEN_DEFAULT, key); #else return wc_RsaPSS_Verify_ex(in, inLen, out, outLen, hash, mgf, RSA_PSS_SALT_LEN_DISCOVER, key); #endif } /* Verify the message signed with RSA-PSS. * * in Buffer holding encrypted data. * inLen Length of data in buffer. * out Pointer to address containing the PSS data. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * saltLen Length of salt used. RSA_PSS_SALT_LEN_DEFAULT (-1) indicates salt * length is the same as the hash length. RSA_PSS_SALT_LEN_DISCOVER * indicates salt length is determined from the data. * returns the length of the PSS data on success and negative indicates failure. */ int wc_RsaPSS_Verify_ex(byte* in, word32 inLen, byte* out, word32 outLen, enum wc_HashType hash, int mgf, int saltLen, RsaKey* key) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx(in, inLen, out, outLen, NULL, key, RSA_PUBLIC_DECRYPT, RSA_BLOCK_TYPE_1, WC_RSA_PSS_PAD, hash, mgf, NULL, 0, saltLen, rng); } /* Checks the PSS data to ensure that the signature matches. * Salt length is equal to hash length. * * in Hash of the data that is being verified. * inSz Length of hash. * sig Buffer holding PSS data. * sigSz Size of PSS data. * hashType Hash algorithm. * returns BAD_PADDING_E when the PSS data is invalid, BAD_FUNC_ARG when * NULL is passed in to in or sig or inSz is not the same as the hash * algorithm length and 0 on success. */ int wc_RsaPSS_CheckPadding(const byte* in, word32 inSz, byte* sig, word32 sigSz, enum wc_HashType hashType) { return wc_RsaPSS_CheckPadding_ex(in, inSz, sig, sigSz, hashType, inSz, 0); } /* Checks the PSS data to ensure that the signature matches. * * in Hash of the data that is being verified. * inSz Length of hash. * sig Buffer holding PSS data. * sigSz Size of PSS data. * hashType Hash algorithm. * saltLen Length of salt used. RSA_PSS_SALT_LEN_DEFAULT (-1) indicates salt * length is the same as the hash length. RSA_PSS_SALT_LEN_DISCOVER * indicates salt length is determined from the data. * returns BAD_PADDING_E when the PSS data is invalid, BAD_FUNC_ARG when * NULL is passed in to in or sig or inSz is not the same as the hash * algorithm length and 0 on success. */ int wc_RsaPSS_CheckPadding_ex(const byte* in, word32 inSz, byte* sig, word32 sigSz, enum wc_HashType hashType, int saltLen, int bits) { int ret = 0; #ifndef WOLFSSL_PSS_LONG_SALT byte sigCheck[WC_MAX_DIGEST_SIZE*2 + RSA_PSS_PAD_SZ]; #else byte *sigCheck = NULL; #endif (void)bits; if (in == NULL || sig == NULL || inSz != (word32)wc_HashGetDigestSize(hashType)) { ret = BAD_FUNC_ARG; } if (ret == 0) { if (saltLen == RSA_PSS_SALT_LEN_DEFAULT) { saltLen = inSz; #ifdef WOLFSSL_SHA512 /* See FIPS 186-4 section 5.5 item (e). */ if (bits == 1024 && inSz == WC_SHA512_DIGEST_SIZE) { saltLen = RSA_PSS_SALT_MAX_SZ; } #endif } #ifndef WOLFSSL_PSS_LONG_SALT else if ((word32)saltLen > inSz) { ret = PSS_SALTLEN_E; } #endif #ifndef WOLFSSL_PSS_SALT_LEN_DISCOVER else if (saltLen < RSA_PSS_SALT_LEN_DEFAULT) { ret = PSS_SALTLEN_E; } #else else if (saltLen == RSA_PSS_SALT_LEN_DISCOVER) { saltLen = sigSz - inSz; if (saltLen < 0) { ret = PSS_SALTLEN_E; } } else if (saltLen < RSA_PSS_SALT_LEN_DISCOVER) { ret = PSS_SALTLEN_E; } #endif } /* Sig = Salt | Exp Hash */ if (ret == 0) { if (sigSz != inSz + saltLen) { ret = PSS_SALTLEN_E; } } #ifdef WOLFSSL_PSS_LONG_SALT if (ret == 0) { sigCheck = (byte*)XMALLOC(RSA_PSS_PAD_SZ + inSz + saltLen, NULL, DYNAMIC_TYPE_RSA_BUFFER); if (sigCheck == NULL) { ret = MEMORY_E; } } #endif /* Exp Hash = HASH(8 * 0x00 | Message Hash | Salt) */ if (ret == 0) { XMEMSET(sigCheck, 0, RSA_PSS_PAD_SZ); XMEMCPY(sigCheck + RSA_PSS_PAD_SZ, in, inSz); XMEMCPY(sigCheck + RSA_PSS_PAD_SZ + inSz, sig, saltLen); ret = wc_Hash(hashType, sigCheck, RSA_PSS_PAD_SZ + inSz + saltLen, sigCheck, inSz); } if (ret == 0) { if (XMEMCMP(sigCheck, sig + saltLen, inSz) != 0) { WOLFSSL_MSG("RsaPSS_CheckPadding: Padding Error"); ret = BAD_PADDING_E; } } #ifdef WOLFSSL_PSS_LONG_SALT if (sigCheck != NULL) { XFREE(sigCheck, NULL, DYNAMIC_TYPE_RSA_BUFFER); } #endif return ret; } /* Verify the message signed with RSA-PSS. * The input buffer is reused for the output buffer. * Salt length is equal to hash length. * * in Buffer holding encrypted data. * inLen Length of data in buffer. * out Pointer to address containing the PSS data. * digest Hash of the data that is being verified. * digestLen Length of hash. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * returns the length of the PSS data on success and negative indicates failure. */ int wc_RsaPSS_VerifyCheckInline(byte* in, word32 inLen, byte** out, const byte* digest, word32 digestLen, enum wc_HashType hash, int mgf, RsaKey* key) { int ret = 0, verify, saltLen, hLen, bits = 0; hLen = wc_HashGetDigestSize(hash); if (hLen < 0) return hLen; if ((word32)hLen != digestLen) return BAD_FUNC_ARG; saltLen = hLen; #ifdef WOLFSSL_SHA512 /* See FIPS 186-4 section 5.5 item (e). */ bits = mp_count_bits(&key->n); if (bits == 1024 && hLen == WC_SHA512_DIGEST_SIZE) saltLen = RSA_PSS_SALT_MAX_SZ; #endif verify = wc_RsaPSS_VerifyInline_ex(in, inLen, out, hash, mgf, saltLen, key); if (verify > 0) ret = wc_RsaPSS_CheckPadding_ex(digest, digestLen, *out, verify, hash, saltLen, bits); if (ret == 0) ret = verify; return ret; } /* Verify the message signed with RSA-PSS. * Salt length is equal to hash length. * * in Buffer holding encrypted data. * inLen Length of data in buffer. * out Pointer to address containing the PSS data. * outLen Length of the output. * digest Hash of the data that is being verified. * digestLen Length of hash. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * returns the length of the PSS data on success and negative indicates failure. */ int wc_RsaPSS_VerifyCheck(byte* in, word32 inLen, byte* out, word32 outLen, const byte* digest, word32 digestLen, enum wc_HashType hash, int mgf, RsaKey* key) { int ret = 0, verify, saltLen, hLen, bits = 0; hLen = wc_HashGetDigestSize(hash); if (hLen < 0) return hLen; if ((word32)hLen != digestLen) return BAD_FUNC_ARG; saltLen = hLen; #ifdef WOLFSSL_SHA512 /* See FIPS 186-4 section 5.5 item (e). */ bits = mp_count_bits(&key->n); if (bits == 1024 && hLen == WC_SHA512_DIGEST_SIZE) saltLen = RSA_PSS_SALT_MAX_SZ; #endif verify = wc_RsaPSS_Verify_ex(in, inLen, out, outLen, hash, mgf, saltLen, key); if (verify > 0) ret = wc_RsaPSS_CheckPadding_ex(digest, digestLen, out, verify, hash, saltLen, bits); if (ret == 0) ret = verify; return ret; } #endif #if !defined(WOLFSSL_RSA_PUBLIC_ONLY) && !defined(WOLFSSL_RSA_VERIFY_ONLY) int wc_RsaSSL_Sign(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, WC_RNG* rng) { return RsaPublicEncryptEx(in, inLen, out, outLen, key, RSA_PRIVATE_ENCRYPT, RSA_BLOCK_TYPE_1, WC_RSA_PKCSV15_PAD, WC_HASH_TYPE_NONE, WC_MGF1NONE, NULL, 0, 0, rng); } #ifdef WC_RSA_PSS /* Sign the hash of a message using RSA-PSS. * Salt length is equal to hash length. * * in Buffer holding hash of message. * inLen Length of data in buffer (hash length). * out Buffer to write encrypted signature into. * outLen Size of buffer to write to. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * rng Random number generator. * returns the length of the encrypted signature on success, a negative value * indicates failure. */ int wc_RsaPSS_Sign(const byte* in, word32 inLen, byte* out, word32 outLen, enum wc_HashType hash, int mgf, RsaKey* key, WC_RNG* rng) { return wc_RsaPSS_Sign_ex(in, inLen, out, outLen, hash, mgf, RSA_PSS_SALT_LEN_DEFAULT, key, rng); } /* Sign the hash of a message using RSA-PSS. * * in Buffer holding hash of message. * inLen Length of data in buffer (hash length). * out Buffer to write encrypted signature into. * outLen Size of buffer to write to. * hash Hash algorithm. * mgf Mask generation function. * saltLen Length of salt used. RSA_PSS_SALT_LEN_DEFAULT (-1) indicates salt * length is the same as the hash length. RSA_PSS_SALT_LEN_DISCOVER * indicates salt length is determined from the data. * key Public RSA key. * rng Random number generator. * returns the length of the encrypted signature on success, a negative value * indicates failure. */ int wc_RsaPSS_Sign_ex(const byte* in, word32 inLen, byte* out, word32 outLen, enum wc_HashType hash, int mgf, int saltLen, RsaKey* key, WC_RNG* rng) { return RsaPublicEncryptEx(in, inLen, out, outLen, key, RSA_PRIVATE_ENCRYPT, RSA_BLOCK_TYPE_1, WC_RSA_PSS_PAD, hash, mgf, NULL, 0, saltLen, rng); } #endif #endif #if !defined(WOLFSSL_RSA_VERIFY_ONLY) || !defined(WOLFSSL_SP_MATH) || \ defined(WC_RSA_PSS) int wc_RsaEncryptSize(RsaKey* key) { int ret; if (key == NULL) { return BAD_FUNC_ARG; } ret = mp_unsigned_bin_size(&key->n); #ifdef WOLF_CRYPTO_CB if (ret == 0 && key->devId != INVALID_DEVID) { ret = 2048/8; /* hardware handles, use 2048-bit as default */ } #endif return ret; } #endif #ifndef WOLFSSL_RSA_VERIFY_ONLY /* flatten RsaKey structure into individual elements (e, n) */ int wc_RsaFlattenPublicKey(RsaKey* key, byte* e, word32* eSz, byte* n, word32* nSz) { int sz, ret; if (key == NULL || e == NULL || eSz == NULL || n == NULL || nSz == NULL) { return BAD_FUNC_ARG; } sz = mp_unsigned_bin_size(&key->e); if ((word32)sz > *eSz) return RSA_BUFFER_E; ret = mp_to_unsigned_bin(&key->e, e); if (ret != MP_OKAY) return ret; *eSz = (word32)sz; sz = wc_RsaEncryptSize(key); if ((word32)sz > *nSz) return RSA_BUFFER_E; ret = mp_to_unsigned_bin(&key->n, n); if (ret != MP_OKAY) return ret; *nSz = (word32)sz; return 0; } #endif #endif /* HAVE_FIPS */ #ifndef WOLFSSL_RSA_VERIFY_ONLY static int RsaGetValue(mp_int* in, byte* out, word32* outSz) { word32 sz; int ret = 0; /* Parameters ensured by calling function. */ sz = (word32)mp_unsigned_bin_size(in); if (sz > *outSz) ret = RSA_BUFFER_E; if (ret == 0) ret = mp_to_unsigned_bin(in, out); if (ret == MP_OKAY) *outSz = sz; return ret; } int wc_RsaExportKey(RsaKey* key, byte* e, word32* eSz, byte* n, word32* nSz, byte* d, word32* dSz, byte* p, word32* pSz, byte* q, word32* qSz) { int ret = BAD_FUNC_ARG; if (key && e && eSz && n && nSz && d && dSz && p && pSz && q && qSz) ret = 0; if (ret == 0) ret = RsaGetValue(&key->e, e, eSz); if (ret == 0) ret = RsaGetValue(&key->n, n, nSz); #ifndef WOLFSSL_RSA_PUBLIC_ONLY if (ret == 0) ret = RsaGetValue(&key->d, d, dSz); if (ret == 0) ret = RsaGetValue(&key->p, p, pSz); if (ret == 0) ret = RsaGetValue(&key->q, q, qSz); #else /* no private parts to key */ if (d == NULL || p == NULL || q == NULL || dSz == NULL || pSz == NULL || qSz == NULL) { ret = BAD_FUNC_ARG; } else { *dSz = 0; *pSz = 0; *qSz = 0; } #endif /* WOLFSSL_RSA_PUBLIC_ONLY */ return ret; } #endif #ifdef WOLFSSL_KEY_GEN /* Check that |p-q| > 2^((size/2)-100) */ static int wc_CompareDiffPQ(mp_int* p, mp_int* q, int size) { mp_int c, d; int ret; if (p == NULL || q == NULL) return BAD_FUNC_ARG; ret = mp_init_multi(&c, &d, NULL, NULL, NULL, NULL); /* c = 2^((size/2)-100) */ if (ret == 0) ret = mp_2expt(&c, (size/2)-100); /* d = |p-q| */ if (ret == 0) ret = mp_sub(p, q, &d); if (ret == 0) ret = mp_abs(&d, &d); /* compare */ if (ret == 0) ret = mp_cmp(&d, &c); if (ret == MP_GT) ret = MP_OKAY; mp_clear(&d); mp_clear(&c); return ret; } /* The lower_bound value is floor(2^(0.5) * 2^((nlen/2)-1)) where nlen is 4096. * This number was calculated using a small test tool written with a common * large number math library. Other values of nlen may be checked with a subset * of lower_bound. */ static const byte lower_bound[] = { 0xB5, 0x04, 0xF3, 0x33, 0xF9, 0xDE, 0x64, 0x84, 0x59, 0x7D, 0x89, 0xB3, 0x75, 0x4A, 0xBE, 0x9F, 0x1D, 0x6F, 0x60, 0xBA, 0x89, 0x3B, 0xA8, 0x4C, 0xED, 0x17, 0xAC, 0x85, 0x83, 0x33, 0x99, 0x15, /* 512 */ 0x4A, 0xFC, 0x83, 0x04, 0x3A, 0xB8, 0xA2, 0xC3, 0xA8, 0xB1, 0xFE, 0x6F, 0xDC, 0x83, 0xDB, 0x39, 0x0F, 0x74, 0xA8, 0x5E, 0x43, 0x9C, 0x7B, 0x4A, 0x78, 0x04, 0x87, 0x36, 0x3D, 0xFA, 0x27, 0x68, /* 1024 */ 0xD2, 0x20, 0x2E, 0x87, 0x42, 0xAF, 0x1F, 0x4E, 0x53, 0x05, 0x9C, 0x60, 0x11, 0xBC, 0x33, 0x7B, 0xCA, 0xB1, 0xBC, 0x91, 0x16, 0x88, 0x45, 0x8A, 0x46, 0x0A, 0xBC, 0x72, 0x2F, 0x7C, 0x4E, 0x33, 0xC6, 0xD5, 0xA8, 0xA3, 0x8B, 0xB7, 0xE9, 0xDC, 0xCB, 0x2A, 0x63, 0x43, 0x31, 0xF3, 0xC8, 0x4D, 0xF5, 0x2F, 0x12, 0x0F, 0x83, 0x6E, 0x58, 0x2E, 0xEA, 0xA4, 0xA0, 0x89, 0x90, 0x40, 0xCA, 0x4A, /* 2048 */ 0x81, 0x39, 0x4A, 0xB6, 0xD8, 0xFD, 0x0E, 0xFD, 0xF4, 0xD3, 0xA0, 0x2C, 0xEB, 0xC9, 0x3E, 0x0C, 0x42, 0x64, 0xDA, 0xBC, 0xD5, 0x28, 0xB6, 0x51, 0xB8, 0xCF, 0x34, 0x1B, 0x6F, 0x82, 0x36, 0xC7, 0x01, 0x04, 0xDC, 0x01, 0xFE, 0x32, 0x35, 0x2F, 0x33, 0x2A, 0x5E, 0x9F, 0x7B, 0xDA, 0x1E, 0xBF, 0xF6, 0xA1, 0xBE, 0x3F, 0xCA, 0x22, 0x13, 0x07, 0xDE, 0xA0, 0x62, 0x41, 0xF7, 0xAA, 0x81, 0xC2, /* 3072 */ 0xC1, 0xFC, 0xBD, 0xDE, 0xA2, 0xF7, 0xDC, 0x33, 0x18, 0x83, 0x8A, 0x2E, 0xAF, 0xF5, 0xF3, 0xB2, 0xD2, 0x4F, 0x4A, 0x76, 0x3F, 0xAC, 0xB8, 0x82, 0xFD, 0xFE, 0x17, 0x0F, 0xD3, 0xB1, 0xF7, 0x80, 0xF9, 0xAC, 0xCE, 0x41, 0x79, 0x7F, 0x28, 0x05, 0xC2, 0x46, 0x78, 0x5E, 0x92, 0x95, 0x70, 0x23, 0x5F, 0xCF, 0x8F, 0x7B, 0xCA, 0x3E, 0xA3, 0x3B, 0x4D, 0x7C, 0x60, 0xA5, 0xE6, 0x33, 0xE3, 0xE1 /* 4096 */ }; /* returns 1 on key size ok and 0 if not ok */ static WC_INLINE int RsaSizeCheck(int size) { if (size < RSA_MIN_SIZE || size > RSA_MAX_SIZE) { return 0; } #ifdef HAVE_FIPS /* Key size requirements for CAVP */ switch (size) { case 1024: case 2048: case 3072: case 4096: return 1; } return 0; #else return 1; /* allow unusual key sizes in non FIPS mode */ #endif /* HAVE_FIPS */ } static int _CheckProbablePrime(mp_int* p, mp_int* q, mp_int* e, int nlen, int* isPrime, WC_RNG* rng) { int ret; mp_int tmp1, tmp2; mp_int* prime; if (p == NULL || e == NULL || isPrime == NULL) return BAD_FUNC_ARG; if (!RsaSizeCheck(nlen)) return BAD_FUNC_ARG; *isPrime = MP_NO; if (q != NULL) { /* 5.4 - check that |p-q| <= (2^(1/2))(2^((nlen/2)-1)) */ ret = wc_CompareDiffPQ(p, q, nlen); if (ret != MP_OKAY) goto notOkay; prime = q; } else prime = p; ret = mp_init_multi(&tmp1, &tmp2, NULL, NULL, NULL, NULL); if (ret != MP_OKAY) goto notOkay; /* 4.4,5.5 - Check that prime >= (2^(1/2))(2^((nlen/2)-1)) * This is a comparison against lowerBound */ ret = mp_read_unsigned_bin(&tmp1, lower_bound, nlen/16); if (ret != MP_OKAY) goto notOkay; ret = mp_cmp(prime, &tmp1); if (ret == MP_LT) goto exit; /* 4.5,5.6 - Check that GCD(p-1, e) == 1 */ ret = mp_sub_d(prime, 1, &tmp1); /* tmp1 = prime-1 */ if (ret != MP_OKAY) goto notOkay; ret = mp_gcd(&tmp1, e, &tmp2); /* tmp2 = gcd(prime-1, e) */ if (ret != MP_OKAY) goto notOkay; ret = mp_cmp_d(&tmp2, 1); if (ret != MP_EQ) goto exit; /* e divides p-1 */ /* 4.5.1,5.6.1 - Check primality of p with 8 rounds of M-R. * mp_prime_is_prime_ex() performs test divisions against the first 256 * prime numbers. After that it performs 8 rounds of M-R using random * bases between 2 and n-2. * mp_prime_is_prime() performs the same test divisions and then does * M-R with the first 8 primes. Both functions set isPrime as a * side-effect. */ if (rng != NULL) ret = mp_prime_is_prime_ex(prime, 8, isPrime, rng); else ret = mp_prime_is_prime(prime, 8, isPrime); if (ret != MP_OKAY) goto notOkay; exit: ret = MP_OKAY; notOkay: mp_clear(&tmp1); mp_clear(&tmp2); return ret; } int wc_CheckProbablePrime_ex(const byte* pRaw, word32 pRawSz, const byte* qRaw, word32 qRawSz, const byte* eRaw, word32 eRawSz, int nlen, int* isPrime, WC_RNG* rng) { mp_int p, q, e; mp_int* Q = NULL; int ret; if (pRaw == NULL || pRawSz == 0 || eRaw == NULL || eRawSz == 0 || isPrime == NULL) { return BAD_FUNC_ARG; } if ((qRaw != NULL && qRawSz == 0) || (qRaw == NULL && qRawSz != 0)) return BAD_FUNC_ARG; ret = mp_init_multi(&p, &q, &e, NULL, NULL, NULL); if (ret == MP_OKAY) ret = mp_read_unsigned_bin(&p, pRaw, pRawSz); if (ret == MP_OKAY) { if (qRaw != NULL) { ret = mp_read_unsigned_bin(&q, qRaw, qRawSz); if (ret == MP_OKAY) Q = &q; } } if (ret == MP_OKAY) ret = mp_read_unsigned_bin(&e, eRaw, eRawSz); if (ret == MP_OKAY) ret = _CheckProbablePrime(&p, Q, &e, nlen, isPrime, rng); ret = (ret == MP_OKAY) ? 0 : PRIME_GEN_E; mp_clear(&p); mp_clear(&q); mp_clear(&e); return ret; } int wc_CheckProbablePrime(const byte* pRaw, word32 pRawSz, const byte* qRaw, word32 qRawSz, const byte* eRaw, word32 eRawSz, int nlen, int* isPrime) { return wc_CheckProbablePrime_ex(pRaw, pRawSz, qRaw, qRawSz, eRaw, eRawSz, nlen, isPrime, NULL); } #if !defined(HAVE_FIPS) || (defined(HAVE_FIPS) && \ defined(HAVE_FIPS_VERSION) && (HAVE_FIPS_VERSION >= 2)) /* Make an RSA key for size bits, with e specified, 65537 is a good e */ int wc_MakeRsaKey(RsaKey* key, int size, long e, WC_RNG* rng) { #ifndef WC_NO_RNG #ifdef WOLFSSL_SMALL_STACK mp_int *p = (mp_int *)XMALLOC(sizeof *p, key->heap, DYNAMIC_TYPE_RSA); mp_int *q = (mp_int *)XMALLOC(sizeof *q, key->heap, DYNAMIC_TYPE_RSA); mp_int *tmp1 = (mp_int *)XMALLOC(sizeof *tmp1, key->heap, DYNAMIC_TYPE_RSA); mp_int *tmp2 = (mp_int *)XMALLOC(sizeof *tmp2, key->heap, DYNAMIC_TYPE_RSA); mp_int *tmp3 = (mp_int *)XMALLOC(sizeof *tmp3, key->heap, DYNAMIC_TYPE_RSA); #else mp_int p_buf, *p = &p_buf; mp_int q_buf, *q = &q_buf; mp_int tmp1_buf, *tmp1 = &tmp1_buf; mp_int tmp2_buf, *tmp2 = &tmp2_buf; mp_int tmp3_buf, *tmp3 = &tmp3_buf; #endif int err, i, failCount, primeSz, isPrime = 0; byte* buf = NULL; #ifdef WOLFSSL_SMALL_STACK if ((p == NULL) || (q == NULL) || (tmp1 == NULL) || (tmp2 == NULL) || (tmp3 == NULL)) { err = MEMORY_E; goto out; } #endif if (key == NULL || rng == NULL) { err = BAD_FUNC_ARG; goto out; } if (!RsaSizeCheck(size)) { err = BAD_FUNC_ARG; goto out; } if (e < 3 || (e & 1) == 0) { err = BAD_FUNC_ARG; goto out; } #if defined(WOLFSSL_CRYPTOCELL) err = cc310_RSA_GenerateKeyPair(key, size, e); goto out; #endif /*WOLFSSL_CRYPTOCELL*/ #ifdef WOLF_CRYPTO_CB if (key->devId != INVALID_DEVID) { err = wc_CryptoCb_MakeRsaKey(key, size, e, rng); if (err != CRYPTOCB_UNAVAILABLE) goto out; /* fall-through when unavailable */ } #endif #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) && \ defined(WC_ASYNC_ENABLE_RSA_KEYGEN) if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_RSA) { #ifdef HAVE_CAVIUM /* TODO: Not implemented */ #elif defined(HAVE_INTEL_QA) err = IntelQaRsaKeyGen(&key->asyncDev, key, size, e, rng); goto out; #else if (wc_AsyncTestInit(&key->asyncDev, ASYNC_TEST_RSA_MAKE)) { WC_ASYNC_TEST* testDev = &key->asyncDev.test; testDev->rsaMake.rng = rng; testDev->rsaMake.key = key; testDev->rsaMake.size = size; testDev->rsaMake.e = e; err = WC_PENDING_E; goto out; } #endif } #endif err = mp_init_multi(p, q, tmp1, tmp2, tmp3, NULL); if (err == MP_OKAY) err = mp_set_int(tmp3, e); /* The failCount value comes from NIST FIPS 186-4, section B.3.3, * process steps 4.7 and 5.8. */ failCount = 5 * (size / 2); primeSz = size / 16; /* size is the size of n in bits. primeSz is in bytes. */ /* allocate buffer to work with */ if (err == MP_OKAY) { buf = (byte*)XMALLOC(primeSz, key->heap, DYNAMIC_TYPE_RSA); if (buf == NULL) err = MEMORY_E; } /* make p */ if (err == MP_OKAY) { isPrime = 0; i = 0; do { #ifdef SHOW_GEN printf("."); fflush(stdout); #endif /* generate value */ err = wc_RNG_GenerateBlock(rng, buf, primeSz); if (err == 0) { /* prime lower bound has the MSB set, set it in candidate */ buf[0] |= 0x80; /* make candidate odd */ buf[primeSz-1] |= 0x01; /* load value */ err = mp_read_unsigned_bin(p, buf, primeSz); } if (err == MP_OKAY) err = _CheckProbablePrime(p, NULL, tmp3, size, &isPrime, rng); #ifdef HAVE_FIPS i++; #else /* Keep the old retry behavior in non-FIPS build. */ (void)i; #endif } while (err == MP_OKAY && !isPrime && i < failCount); } if (err == MP_OKAY && !isPrime) err = PRIME_GEN_E; /* make q */ if (err == MP_OKAY) { isPrime = 0; i = 0; do { #ifdef SHOW_GEN printf("."); fflush(stdout); #endif /* generate value */ err = wc_RNG_GenerateBlock(rng, buf, primeSz); if (err == 0) { /* prime lower bound has the MSB set, set it in candidate */ buf[0] |= 0x80; /* make candidate odd */ buf[primeSz-1] |= 0x01; /* load value */ err = mp_read_unsigned_bin(q, buf, primeSz); } if (err == MP_OKAY) err = _CheckProbablePrime(p, q, tmp3, size, &isPrime, rng); #ifdef HAVE_FIPS i++; #else /* Keep the old retry behavior in non-FIPS build. */ (void)i; #endif } while (err == MP_OKAY && !isPrime && i < failCount); } if (err == MP_OKAY && !isPrime) err = PRIME_GEN_E; if (buf) { ForceZero(buf, primeSz); XFREE(buf, key->heap, DYNAMIC_TYPE_RSA); } if (err == MP_OKAY && mp_cmp(p, q) < 0) { err = mp_copy(p, tmp1); if (err == MP_OKAY) err = mp_copy(q, p); if (err == MP_OKAY) mp_copy(tmp1, q); } /* Setup RsaKey buffers */ if (err == MP_OKAY) err = mp_init_multi(&key->n, &key->e, &key->d, &key->p, &key->q, NULL); if (err == MP_OKAY) err = mp_init_multi(&key->dP, &key->dQ, &key->u, NULL, NULL, NULL); /* Software Key Calculation */ if (err == MP_OKAY) /* tmp1 = p-1 */ err = mp_sub_d(p, 1, tmp1); if (err == MP_OKAY) /* tmp2 = q-1 */ err = mp_sub_d(q, 1, tmp2); #ifdef WC_RSA_BLINDING if (err == MP_OKAY) /* tmp3 = order of n */ err = mp_mul(tmp1, tmp2, tmp3); #else if (err == MP_OKAY) /* tmp3 = lcm(p-1, q-1), last loop */ err = mp_lcm(tmp1, tmp2, tmp3); #endif /* make key */ if (err == MP_OKAY) /* key->e = e */ err = mp_set_int(&key->e, (mp_digit)e); #ifdef WC_RSA_BLINDING /* Blind the inverse operation with a value that is invertable */ if (err == MP_OKAY) { do { err = mp_rand(&key->p, get_digit_count(tmp3), rng); if (err == MP_OKAY) err = mp_set_bit(&key->p, 0); if (err == MP_OKAY) err = mp_set_bit(&key->p, size - 1); if (err == MP_OKAY) err = mp_gcd(&key->p, tmp3, &key->q); } while ((err == MP_OKAY) && !mp_isone(&key->q)); } if (err == MP_OKAY) err = mp_mul_d(&key->p, (mp_digit)e, &key->e); #endif if (err == MP_OKAY) /* key->d = 1/e mod lcm(p-1, q-1) */ err = mp_invmod(&key->e, tmp3, &key->d); #ifdef WC_RSA_BLINDING /* Take off blinding from d and reset e */ if (err == MP_OKAY) err = mp_mulmod(&key->d, &key->p, tmp3, &key->d); if (err == MP_OKAY) err = mp_set_int(&key->e, (mp_digit)e); #endif if (err == MP_OKAY) /* key->n = pq */ err = mp_mul(p, q, &key->n); if (err == MP_OKAY) /* key->dP = d mod(p-1) */ err = mp_mod(&key->d, tmp1, &key->dP); if (err == MP_OKAY) /* key->dQ = d mod(q-1) */ err = mp_mod(&key->d, tmp2, &key->dQ); #ifdef WOLFSSL_MP_INVMOD_CONSTANT_TIME if (err == MP_OKAY) /* key->u = 1/q mod p */ err = mp_invmod(q, p, &key->u); #else if (err == MP_OKAY) err = mp_sub_d(p, 2, tmp3); if (err == MP_OKAY) /* key->u = 1/q mod p = q^p-2 mod p */ err = mp_exptmod(q, tmp3 , p, &key->u); #endif if (err == MP_OKAY) err = mp_copy(p, &key->p); if (err == MP_OKAY) err = mp_copy(q, &key->q); #ifdef HAVE_WOLF_BIGINT /* make sure raw unsigned bin version is available */ if (err == MP_OKAY) err = wc_mp_to_bigint(&key->n, &key->n.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->e, &key->e.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->d, &key->d.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->p, &key->p.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->q, &key->q.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->dP, &key->dP.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->dQ, &key->dQ.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->u, &key->u.raw); #endif if (err == MP_OKAY) key->type = RSA_PRIVATE; mp_clear(tmp1); mp_clear(tmp2); mp_clear(tmp3); mp_clear(p); mp_clear(q); #if defined(WOLFSSL_KEY_GEN) && !defined(WOLFSSL_NO_RSA_KEY_CHECK) /* Perform the pair-wise consistency test on the new key. */ if (err == 0) err = wc_CheckRsaKey(key); #endif if (err != 0) { wc_FreeRsaKey(key); goto out; } #if defined(WOLFSSL_XILINX_CRYPT) || defined(WOLFSSL_CRYPTOCELL) if (wc_InitRsaHw(key) != 0) { return BAD_STATE_E; } #endif err = 0; out: #ifdef WOLFSSL_SMALL_STACK if (p) XFREE(p, key->heap, DYNAMIC_TYPE_RSA); if (q) XFREE(q, key->heap, DYNAMIC_TYPE_RSA); if (tmp1) XFREE(tmp1, key->heap, DYNAMIC_TYPE_RSA); if (tmp2) XFREE(tmp2, key->heap, DYNAMIC_TYPE_RSA); if (tmp3) XFREE(tmp3, key->heap, DYNAMIC_TYPE_RSA); #endif return err; #else return NOT_COMPILED_IN; #endif } #endif /* !FIPS || FIPS_VER >= 2 */ #endif /* WOLFSSL_KEY_GEN */ #ifdef WC_RSA_BLINDING int wc_RsaSetRNG(RsaKey* key, WC_RNG* rng) { if (key == NULL) return BAD_FUNC_ARG; key->rng = rng; return 0; } #endif /* WC_RSA_BLINDING */ #ifdef WC_RSA_NONBLOCK int wc_RsaSetNonBlock(RsaKey* key, RsaNb* nb) { if (key == NULL) return BAD_FUNC_ARG; if (nb) { XMEMSET(nb, 0, sizeof(RsaNb)); } /* Allow nb == NULL to clear non-block mode */ key->nb = nb; return 0; } #ifdef WC_RSA_NONBLOCK_TIME int wc_RsaSetNonBlockTime(RsaKey* key, word32 maxBlockUs, word32 cpuMHz) { if (key == NULL || key->nb == NULL) { return BAD_FUNC_ARG; } /* calculate maximum number of instructions to block */ key->nb->exptmod.maxBlockInst = cpuMHz * maxBlockUs; return 0; } #endif /* WC_RSA_NONBLOCK_TIME */ #endif /* WC_RSA_NONBLOCK */ #endif /* NO_RSA */
null
/* rsa.c * * Copyright (C) 2006-2020 wolfSSL Inc. * * This file is part of wolfSSL. * * wolfSSL is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * wolfSSL is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA */ /* DESCRIPTION This library provides the interface to the RSA. RSA keys can be used to encrypt, decrypt, sign and verify data. */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <wolfssl/wolfcrypt/settings.h> #include <wolfssl/wolfcrypt/error-crypt.h> #ifndef NO_RSA #if defined(HAVE_FIPS) && \ defined(HAVE_FIPS_VERSION) && (HAVE_FIPS_VERSION >= 2) /* set NO_WRAPPERS before headers, use direct internal f()s not wrappers */ #define FIPS_NO_WRAPPERS #ifdef USE_WINDOWS_API #pragma code_seg(".fipsA$e") #pragma const_seg(".fipsB$e") #endif #endif #include <wolfssl/wolfcrypt/rsa.h> #ifdef WOLFSSL_AFALG_XILINX_RSA #include <wolfssl/wolfcrypt/port/af_alg/wc_afalg.h> #endif #ifdef WOLFSSL_HAVE_SP_RSA #include <wolfssl/wolfcrypt/sp.h> #endif /* Possible RSA enable options: * NO_RSA: Overall control of RSA default: on (not defined) * WC_RSA_BLINDING: Uses Blinding w/ Private Ops default: off Note: slower by ~20% * WOLFSSL_KEY_GEN: Allows Private Key Generation default: off * RSA_LOW_MEM: NON CRT Private Operations, less memory default: off * WC_NO_RSA_OAEP: Disables RSA OAEP padding default: on (not defined) * WC_RSA_NONBLOCK: Enables support for RSA non-blocking default: off * WC_RSA_NONBLOCK_TIME:Enables support for time based blocking default: off * time calculation. */ /* RSA Key Size Configuration: * FP_MAX_BITS: With USE_FAST_MATH only default: 4096 If USE_FAST_MATH then use this to override default. Value is key size * 2. Example: RSA 3072 = 6144 */ /* If building for old FIPS. */ #if defined(HAVE_FIPS) && \ (!defined(HAVE_FIPS_VERSION) || (HAVE_FIPS_VERSION < 2)) int wc_InitRsaKey(RsaKey* key, void* ptr) { if (key == NULL) { return BAD_FUNC_ARG; } return InitRsaKey_fips(key, ptr); } int wc_InitRsaKey_ex(RsaKey* key, void* ptr, int devId) { (void)devId; if (key == NULL) { return BAD_FUNC_ARG; } return InitRsaKey_fips(key, ptr); } int wc_FreeRsaKey(RsaKey* key) { return FreeRsaKey_fips(key); } #ifndef WOLFSSL_RSA_VERIFY_ONLY int wc_RsaPublicEncrypt(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, WC_RNG* rng) { if (in == NULL || out == NULL || key == NULL || rng == NULL) { return BAD_FUNC_ARG; } return RsaPublicEncrypt_fips(in, inLen, out, outLen, key, rng); } #endif #ifndef WOLFSSL_RSA_PUBLIC_ONLY int wc_RsaPrivateDecryptInline(byte* in, word32 inLen, byte** out, RsaKey* key) { if (in == NULL || out == NULL || key == NULL) { return BAD_FUNC_ARG; } return RsaPrivateDecryptInline_fips(in, inLen, out, key); } int wc_RsaPrivateDecrypt(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key) { if (in == NULL || out == NULL || key == NULL) { return BAD_FUNC_ARG; } return RsaPrivateDecrypt_fips(in, inLen, out, outLen, key); } int wc_RsaSSL_Sign(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, WC_RNG* rng) { if (in == NULL || out == NULL || key == NULL || inLen == 0) { return BAD_FUNC_ARG; } return RsaSSL_Sign_fips(in, inLen, out, outLen, key, rng); } #endif int wc_RsaSSL_VerifyInline(byte* in, word32 inLen, byte** out, RsaKey* key) { if (in == NULL || out == NULL || key == NULL) { return BAD_FUNC_ARG; } return RsaSSL_VerifyInline_fips(in, inLen, out, key); } int wc_RsaSSL_Verify(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key) { if (in == NULL || out == NULL || key == NULL || inLen == 0) { return BAD_FUNC_ARG; } return RsaSSL_Verify_fips(in, inLen, out, outLen, key); } int wc_RsaEncryptSize(RsaKey* key) { if (key == NULL) { return BAD_FUNC_ARG; } return RsaEncryptSize_fips(key); } #ifndef WOLFSSL_RSA_VERIFY_ONLY int wc_RsaFlattenPublicKey(RsaKey* key, byte* a, word32* aSz, byte* b, word32* bSz) { /* not specified as fips so not needing _fips */ return RsaFlattenPublicKey(key, a, aSz, b, bSz); } #endif #ifdef WOLFSSL_KEY_GEN int wc_MakeRsaKey(RsaKey* key, int size, long e, WC_RNG* rng) { return MakeRsaKey(key, size, e, rng); } #endif /* these are functions in asn and are routed to wolfssl/wolfcrypt/asn.c * wc_RsaPrivateKeyDecode * wc_RsaPublicKeyDecode */ #else /* else build without fips, or for new fips */ #include <wolfssl/wolfcrypt/random.h> #include <wolfssl/wolfcrypt/logging.h> #ifdef WOLF_CRYPTO_CB #include <wolfssl/wolfcrypt/cryptocb.h> #endif #ifdef NO_INLINE #include <wolfssl/wolfcrypt/misc.h> #else #define WOLFSSL_MISC_INCLUDED #include <wolfcrypt/src/misc.c> #endif enum { RSA_STATE_NONE = 0, RSA_STATE_ENCRYPT_PAD, RSA_STATE_ENCRYPT_EXPTMOD, RSA_STATE_ENCRYPT_RES, RSA_STATE_DECRYPT_EXPTMOD, RSA_STATE_DECRYPT_UNPAD, RSA_STATE_DECRYPT_RES, }; static void wc_RsaCleanup(RsaKey* key) { #ifndef WOLFSSL_RSA_VERIFY_INLINE if (key && key->data) { /* make sure any allocated memory is free'd */ if (key->dataIsAlloc) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY if (key->type == RSA_PRIVATE_DECRYPT || key->type == RSA_PRIVATE_ENCRYPT) { ForceZero(key->data, key->dataLen); } #endif XFREE(key->data, key->heap, DYNAMIC_TYPE_WOLF_BIGINT); key->dataIsAlloc = 0; } key->data = NULL; key->dataLen = 0; } #else (void)key; #endif } int wc_InitRsaKey_ex(RsaKey* key, void* heap, int devId) { int ret = 0; if (key == NULL) { return BAD_FUNC_ARG; } XMEMSET(key, 0, sizeof(RsaKey)); key->type = RSA_TYPE_UNKNOWN; key->state = RSA_STATE_NONE; key->heap = heap; #ifndef WOLFSSL_RSA_VERIFY_INLINE key->dataIsAlloc = 0; key->data = NULL; #endif key->dataLen = 0; #ifdef WC_RSA_BLINDING key->rng = NULL; #endif #ifdef WOLF_CRYPTO_CB key->devId = devId; #else (void)devId; #endif #ifdef WOLFSSL_ASYNC_CRYPT #ifdef WOLFSSL_CERT_GEN XMEMSET(&key->certSignCtx, 0, sizeof(CertSignCtx)); #endif #ifdef WC_ASYNC_ENABLE_RSA /* handle as async */ ret = wolfAsync_DevCtxInit(&key->asyncDev, WOLFSSL_ASYNC_MARKER_RSA, key->heap, devId); if (ret != 0) return ret; #endif /* WC_ASYNC_ENABLE_RSA */ #endif /* WOLFSSL_ASYNC_CRYPT */ #ifndef WOLFSSL_RSA_PUBLIC_ONLY ret = mp_init_multi(&key->n, &key->e, NULL, NULL, NULL, NULL); if (ret != MP_OKAY) return ret; #if !defined(WOLFSSL_KEY_GEN) && !defined(OPENSSL_EXTRA) && defined(RSA_LOW_MEM) ret = mp_init_multi(&key->d, &key->p, &key->q, NULL, NULL, NULL); #else ret = mp_init_multi(&key->d, &key->p, &key->q, &key->dP, &key->dQ, &key->u); #endif if (ret != MP_OKAY) { mp_clear(&key->n); mp_clear(&key->e); return ret; } #else ret = mp_init(&key->n); if (ret != MP_OKAY) return ret; ret = mp_init(&key->e); if (ret != MP_OKAY) { mp_clear(&key->n); return ret; } #endif #ifdef WOLFSSL_XILINX_CRYPT key->pubExp = 0; key->mod = NULL; #endif #ifdef WOLFSSL_AFALG_XILINX_RSA key->alFd = WC_SOCK_NOTSET; key->rdFd = WC_SOCK_NOTSET; #endif return ret; } int wc_InitRsaKey(RsaKey* key, void* heap) { return wc_InitRsaKey_ex(key, heap, INVALID_DEVID); } #ifdef HAVE_PKCS11 int wc_InitRsaKey_Id(RsaKey* key, unsigned char* id, int len, void* heap, int devId) { int ret = 0; if (key == NULL) ret = BAD_FUNC_ARG; if (ret == 0 && (len < 0 || len > RSA_MAX_ID_LEN)) ret = BUFFER_E; if (ret == 0) ret = wc_InitRsaKey_ex(key, heap, devId); if (ret == 0 && id != NULL && len != 0) { XMEMCPY(key->id, id, len); key->idLen = len; } return ret; } #endif #ifdef WOLFSSL_XILINX_CRYPT #define MAX_E_SIZE 4 /* Used to setup hardware state * * key the RSA key to setup * * returns 0 on success */ int wc_InitRsaHw(RsaKey* key) { unsigned char* m; /* RSA modulous */ word32 e = 0; /* RSA public exponent */ int mSz; int eSz; if (key == NULL) { return BAD_FUNC_ARG; } mSz = mp_unsigned_bin_size(&(key->n)); m = (unsigned char*)XMALLOC(mSz, key->heap, DYNAMIC_TYPE_KEY); if (m == NULL) { return MEMORY_E; } if (mp_to_unsigned_bin(&(key->n), m) != MP_OKAY) { WOLFSSL_MSG("Unable to get RSA key modulus"); XFREE(m, key->heap, DYNAMIC_TYPE_KEY); return MP_READ_E; } eSz = mp_unsigned_bin_size(&(key->e)); if (eSz > MAX_E_SIZE) { WOLFSSL_MSG("Exponent of size 4 bytes expected"); XFREE(m, key->heap, DYNAMIC_TYPE_KEY); return BAD_FUNC_ARG; } if (mp_to_unsigned_bin(&(key->e), (byte*)&e + (MAX_E_SIZE - eSz)) != MP_OKAY) { XFREE(m, key->heap, DYNAMIC_TYPE_KEY); WOLFSSL_MSG("Unable to get RSA key exponent"); return MP_READ_E; } /* check for existing mod buffer to avoid memory leak */ if (key->mod != NULL) { XFREE(key->mod, key->heap, DYNAMIC_TYPE_KEY); } key->pubExp = e; key->mod = m; if (XSecure_RsaInitialize(&(key->xRsa), key->mod, NULL, (byte*)&(key->pubExp)) != XST_SUCCESS) { WOLFSSL_MSG("Unable to initialize RSA on hardware"); XFREE(m, key->heap, DYNAMIC_TYPE_KEY); return BAD_STATE_E; } #ifdef WOLFSSL_XILINX_PATCH /* currently a patch of xsecure_rsa.c for 2048 bit keys */ if (wc_RsaEncryptSize(key) == 256) { if (XSecure_RsaSetSize(&(key->xRsa), 2048) != XST_SUCCESS) { WOLFSSL_MSG("Unable to set RSA key size on hardware"); XFREE(m, key->heap, DYNAMIC_TYPE_KEY); return BAD_STATE_E; } } #endif return 0; } /* WOLFSSL_XILINX_CRYPT*/ #elif defined(WOLFSSL_CRYPTOCELL) int wc_InitRsaHw(RsaKey* key) { CRYSError_t ret = 0; byte e[3]; word32 eSz = sizeof(e); byte n[256]; word32 nSz = sizeof(n); byte d[256]; word32 dSz = sizeof(d); byte p[128]; word32 pSz = sizeof(p); byte q[128]; word32 qSz = sizeof(q); if (key == NULL) { return BAD_FUNC_ARG; } ret = wc_RsaExportKey(key, e, &eSz, n, &nSz, d, &dSz, p, &pSz, q, &qSz); if (ret != 0) return MP_READ_E; ret = CRYS_RSA_Build_PubKey(&key->ctx.pubKey, e, eSz, n, nSz); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_Build_PubKey failed"); return ret; } ret = CRYS_RSA_Build_PrivKey(&key->ctx.privKey, d, dSz, e, eSz, n, nSz); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_Build_PrivKey failed"); return ret; } key->type = RSA_PRIVATE; return 0; } static int cc310_RSA_GenerateKeyPair(RsaKey* key, int size, long e) { CRYSError_t ret = 0; CRYS_RSAKGData_t KeyGenData; CRYS_RSAKGFipsContext_t FipsCtx; byte ex[3]; uint16_t eSz = sizeof(ex); byte n[256]; uint16_t nSz = sizeof(n); ret = CRYS_RSA_KG_GenerateKeyPair(&wc_rndState, wc_rndGenVectFunc, (byte*)&e, 3*sizeof(uint8_t), size, &key->ctx.privKey, &key->ctx.pubKey, &KeyGenData, &FipsCtx); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_KG_GenerateKeyPair failed"); return ret; } ret = CRYS_RSA_Get_PubKey(&key->ctx.pubKey, ex, &eSz, n, &nSz); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_Get_PubKey failed"); return ret; } ret = wc_RsaPublicKeyDecodeRaw(n, nSz, ex, eSz, key); key->type = RSA_PRIVATE; return ret; } #endif /* WOLFSSL_CRYPTOCELL */ int wc_FreeRsaKey(RsaKey* key) { int ret = 0; if (key == NULL) { return BAD_FUNC_ARG; } wc_RsaCleanup(key); #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) wolfAsync_DevCtxFree(&key->asyncDev, WOLFSSL_ASYNC_MARKER_RSA); #endif #ifndef WOLFSSL_RSA_PUBLIC_ONLY if (key->type == RSA_PRIVATE) { #if defined(WOLFSSL_KEY_GEN) || defined(OPENSSL_EXTRA) || !defined(RSA_LOW_MEM) mp_forcezero(&key->u); mp_forcezero(&key->dQ); mp_forcezero(&key->dP); #endif mp_forcezero(&key->q); mp_forcezero(&key->p); mp_forcezero(&key->d); } /* private part */ #if defined(WOLFSSL_KEY_GEN) || defined(OPENSSL_EXTRA) || !defined(RSA_LOW_MEM) mp_clear(&key->u); mp_clear(&key->dQ); mp_clear(&key->dP); #endif mp_clear(&key->q); mp_clear(&key->p); mp_clear(&key->d); #endif /* WOLFSSL_RSA_PUBLIC_ONLY */ /* public part */ mp_clear(&key->e); mp_clear(&key->n); #ifdef WOLFSSL_XILINX_CRYPT XFREE(key->mod, key->heap, DYNAMIC_TYPE_KEY); key->mod = NULL; #endif #ifdef WOLFSSL_AFALG_XILINX_RSA /* make sure that sockets are closed on cleanup */ if (key->alFd > 0) { close(key->alFd); key->alFd = WC_SOCK_NOTSET; } if (key->rdFd > 0) { close(key->rdFd); key->rdFd = WC_SOCK_NOTSET; } #endif return ret; } #ifndef WOLFSSL_RSA_PUBLIC_ONLY #if defined(WOLFSSL_KEY_GEN) && !defined(WOLFSSL_NO_RSA_KEY_CHECK) /* Check the pair-wise consistency of the RSA key. * From NIST SP 800-56B, section 6.4.1.1. * Verify that k = (k^e)^d, for some k: 1 < k < n-1. */ int wc_CheckRsaKey(RsaKey* key) { #if defined(WOLFSSL_CRYPTOCELL) return 0; #endif #ifdef WOLFSSL_SMALL_STACK mp_int *k = NULL, *tmp = NULL; #else mp_int k[1], tmp[1]; #endif int ret = 0; #ifdef WOLFSSL_SMALL_STACK k = (mp_int*)XMALLOC(sizeof(mp_int) * 2, NULL, DYNAMIC_TYPE_RSA); if (k == NULL) return MEMORY_E; tmp = k + 1; #endif if (mp_init_multi(k, tmp, NULL, NULL, NULL, NULL) != MP_OKAY) ret = MP_INIT_E; if (ret == 0) { if (key == NULL) ret = BAD_FUNC_ARG; } if (ret == 0) { if (mp_set_int(k, 0x2342) != MP_OKAY) ret = MP_READ_E; } #ifdef WOLFSSL_HAVE_SP_RSA if (ret == 0) { switch (mp_count_bits(&key->n)) { #ifndef WOLFSSL_SP_NO_2048 case 2048: ret = sp_ModExp_2048(k, &key->e, &key->n, tmp); if (ret != 0) ret = MP_EXPTMOD_E; if (ret == 0) { ret = sp_ModExp_2048(tmp, &key->d, &key->n, tmp); if (ret != 0) ret = MP_EXPTMOD_E; } break; #endif /* WOLFSSL_SP_NO_2048 */ #ifndef WOLFSSL_SP_NO_3072 case 3072: ret = sp_ModExp_3072(k, &key->e, &key->n, tmp); if (ret != 0) ret = MP_EXPTMOD_E; if (ret == 0) { ret = sp_ModExp_3072(tmp, &key->d, &key->n, tmp); if (ret != 0) ret = MP_EXPTMOD_E; } break; #endif /* WOLFSSL_SP_NO_3072 */ #ifdef WOLFSSL_SP_4096 case 4096: ret = sp_ModExp_4096(k, &key->e, &key->n, tmp); if (ret != 0) ret = MP_EXPTMOD_E; if (ret == 0) { ret = sp_ModExp_4096(tmp, &key->d, &key->n, tmp); if (ret != 0) ret = MP_EXPTMOD_E; } break; #endif /* WOLFSSL_SP_4096 */ default: /* If using only single prcsision math then issue key size error, otherwise fall-back to multi-precision math calculation */ #ifdef WOLFSSL_SP_MATH ret = WC_KEY_SIZE_E; #endif break; } } #endif /* WOLFSSL_HAVE_SP_RSA */ #ifndef WOLFSSL_SP_MATH if (ret == 0) { if (mp_exptmod(k, &key->e, &key->n, tmp) != MP_OKAY) ret = MP_EXPTMOD_E; } if (ret == 0) { if (mp_exptmod(tmp, &key->d, &key->n, tmp) != MP_OKAY) ret = MP_EXPTMOD_E; } #endif /* !WOLFSSL_SP_MATH */ if (ret == 0) { if (mp_cmp(k, tmp) != MP_EQ) ret = RSA_KEY_PAIR_E; } /* Check d is less than n. */ if (ret == 0 ) { if (mp_cmp(&key->d, &key->n) != MP_LT) { ret = MP_EXPTMOD_E; } } /* Check p*q = n. */ if (ret == 0 ) { if (mp_mul(&key->p, &key->q, tmp) != MP_OKAY) { ret = MP_EXPTMOD_E; } } if (ret == 0 ) { if (mp_cmp(&key->n, tmp) != MP_EQ) { ret = MP_EXPTMOD_E; } } /* Check dP, dQ and u if they exist */ if (ret == 0 && !mp_iszero(&key->dP)) { if (mp_sub_d(&key->p, 1, tmp) != MP_OKAY) { ret = MP_EXPTMOD_E; } /* Check dP <= p-1. */ if (ret == 0) { if (mp_cmp(&key->dP, tmp) != MP_LT) { ret = MP_EXPTMOD_E; } } /* Check e*dP mod p-1 = 1. (dP = 1/e mod p-1) */ if (ret == 0) { if (mp_mulmod(&key->dP, &key->e, tmp, tmp) != MP_OKAY) { ret = MP_EXPTMOD_E; } } if (ret == 0 ) { if (!mp_isone(tmp)) { ret = MP_EXPTMOD_E; } } if (ret == 0) { if (mp_sub_d(&key->q, 1, tmp) != MP_OKAY) { ret = MP_EXPTMOD_E; } } /* Check dQ <= q-1. */ if (ret == 0) { if (mp_cmp(&key->dQ, tmp) != MP_LT) { ret = MP_EXPTMOD_E; } } /* Check e*dP mod p-1 = 1. (dQ = 1/e mod q-1) */ if (ret == 0) { if (mp_mulmod(&key->dQ, &key->e, tmp, tmp) != MP_OKAY) { ret = MP_EXPTMOD_E; } } if (ret == 0 ) { if (!mp_isone(tmp)) { ret = MP_EXPTMOD_E; } } /* Check u <= p. */ if (ret == 0) { if (mp_cmp(&key->u, &key->p) != MP_LT) { ret = MP_EXPTMOD_E; } } /* Check u*q mod p = 1. (u = 1/q mod p) */ if (ret == 0) { if (mp_mulmod(&key->u, &key->q, &key->p, tmp) != MP_OKAY) { ret = MP_EXPTMOD_E; } } if (ret == 0 ) { if (!mp_isone(tmp)) { ret = MP_EXPTMOD_E; } } } mp_forcezero(tmp); mp_clear(tmp); mp_clear(k); #ifdef WOLFSSL_SMALL_STACK XFREE(k, NULL, DYNAMIC_TYPE_RSA); #endif return ret; } #endif /* WOLFSSL_KEY_GEN && !WOLFSSL_NO_RSA_KEY_CHECK */ #endif /* WOLFSSL_RSA_PUBLIC_ONLY */ #if !defined(WC_NO_RSA_OAEP) || defined(WC_RSA_PSS) /* Uses MGF1 standard as a mask generation function hType: hash type used seed: seed to use for generating mask seedSz: size of seed buffer out: mask output after generation outSz: size of output buffer */ #if !defined(NO_SHA) || !defined(NO_SHA256) || defined(WOLFSSL_SHA384) || defined(WOLFSSL_SHA512) static int RsaMGF1(enum wc_HashType hType, byte* seed, word32 seedSz, byte* out, word32 outSz, void* heap) { byte* tmp; /* needs to be large enough for seed size plus counter(4) */ byte tmpA[WC_MAX_DIGEST_SIZE + 4]; byte tmpF; /* 1 if dynamic memory needs freed */ word32 tmpSz; int hLen; int ret; word32 counter; word32 idx; hLen = wc_HashGetDigestSize(hType); counter = 0; idx = 0; (void)heap; /* check error return of wc_HashGetDigestSize */ if (hLen < 0) { return hLen; } /* if tmp is not large enough than use some dynamic memory */ if ((seedSz + 4) > sizeof(tmpA) || (word32)hLen > sizeof(tmpA)) { /* find largest amount of memory needed which will be the max of * hLen and (seedSz + 4) since tmp is used to store the hash digest */ tmpSz = ((seedSz + 4) > (word32)hLen)? seedSz + 4: (word32)hLen; tmp = (byte*)XMALLOC(tmpSz, heap, DYNAMIC_TYPE_RSA_BUFFER); if (tmp == NULL) { return MEMORY_E; } tmpF = 1; /* make sure to free memory when done */ } else { /* use array on the stack */ tmpSz = sizeof(tmpA); tmp = tmpA; tmpF = 0; /* no need to free memory at end */ } do { int i = 0; XMEMCPY(tmp, seed, seedSz); /* counter to byte array appended to tmp */ tmp[seedSz] = (byte)((counter >> 24) & 0xFF); tmp[seedSz + 1] = (byte)((counter >> 16) & 0xFF); tmp[seedSz + 2] = (byte)((counter >> 8) & 0xFF); tmp[seedSz + 3] = (byte)((counter) & 0xFF); /* hash and append to existing output */ if ((ret = wc_Hash(hType, tmp, (seedSz + 4), tmp, tmpSz)) != 0) { /* check for if dynamic memory was needed, then free */ if (tmpF) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); } return ret; } for (i = 0; i < hLen && idx < outSz; i++) { out[idx++] = tmp[i]; } counter++; } while (idx < outSz); /* check for if dynamic memory was needed, then free */ if (tmpF) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); } return 0; } #endif /* SHA2 Hashes */ /* helper function to direct which mask generation function is used switched on type input */ static int RsaMGF(int type, byte* seed, word32 seedSz, byte* out, word32 outSz, void* heap) { int ret; switch(type) { #ifndef NO_SHA case WC_MGF1SHA1: ret = RsaMGF1(WC_HASH_TYPE_SHA, seed, seedSz, out, outSz, heap); break; #endif #ifndef NO_SHA256 #ifdef WOLFSSL_SHA224 case WC_MGF1SHA224: ret = RsaMGF1(WC_HASH_TYPE_SHA224, seed, seedSz, out, outSz, heap); break; #endif case WC_MGF1SHA256: ret = RsaMGF1(WC_HASH_TYPE_SHA256, seed, seedSz, out, outSz, heap); break; #endif #ifdef WOLFSSL_SHA384 case WC_MGF1SHA384: ret = RsaMGF1(WC_HASH_TYPE_SHA384, seed, seedSz, out, outSz, heap); break; #endif #ifdef WOLFSSL_SHA512 case WC_MGF1SHA512: ret = RsaMGF1(WC_HASH_TYPE_SHA512, seed, seedSz, out, outSz, heap); break; #endif default: WOLFSSL_MSG("Unknown MGF type: check build options"); ret = BAD_FUNC_ARG; } /* in case of default avoid unused warning */ (void)seed; (void)seedSz; (void)out; (void)outSz; (void)heap; return ret; } #endif /* !WC_NO_RSA_OAEP || WC_RSA_PSS */ /* Padding */ #ifndef WOLFSSL_RSA_VERIFY_ONLY #ifndef WC_NO_RNG #ifndef WC_NO_RSA_OAEP static int RsaPad_OAEP(const byte* input, word32 inputLen, byte* pkcsBlock, word32 pkcsBlockLen, byte padValue, WC_RNG* rng, enum wc_HashType hType, int mgf, byte* optLabel, word32 labelLen, void* heap) { int ret; int hLen; int psLen; int i; word32 idx; byte* dbMask; #ifdef WOLFSSL_SMALL_STACK byte* lHash = NULL; byte* seed = NULL; #else /* must be large enough to contain largest hash */ byte lHash[WC_MAX_DIGEST_SIZE]; byte seed[ WC_MAX_DIGEST_SIZE]; #endif /* no label is allowed, but catch if no label provided and length > 0 */ if (optLabel == NULL && labelLen > 0) { return BUFFER_E; } /* limit of label is the same as limit of hash function which is massive */ hLen = wc_HashGetDigestSize(hType); if (hLen < 0) { return hLen; } #ifdef WOLFSSL_SMALL_STACK lHash = (byte*)XMALLOC(hLen, heap, DYNAMIC_TYPE_RSA_BUFFER); if (lHash == NULL) { return MEMORY_E; } seed = (byte*)XMALLOC(hLen, heap, DYNAMIC_TYPE_RSA_BUFFER); if (seed == NULL) { XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); return MEMORY_E; } #else /* hLen should never be larger than lHash since size is max digest size, but check before blindly calling wc_Hash */ if ((word32)hLen > sizeof(lHash)) { WOLFSSL_MSG("OAEP lHash to small for digest!!"); return MEMORY_E; } #endif if ((ret = wc_Hash(hType, optLabel, labelLen, lHash, hLen)) != 0) { WOLFSSL_MSG("OAEP hash type possibly not supported or lHash to small"); #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return ret; } /* handles check of location for idx as well as psLen, cast to int to check for pkcsBlockLen(k) - 2 * hLen - 2 being negative This check is similar to decryption where k > 2 * hLen + 2 as msg size approaches 0. In decryption if k is less than or equal -- then there is no possible room for msg. k = RSA key size hLen = hash digest size -- will always be >= 0 at this point */ if ((word32)(2 * hLen + 2) > pkcsBlockLen) { WOLFSSL_MSG("OAEP pad error hash to big for RSA key size"); #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return BAD_FUNC_ARG; } if (inputLen > (pkcsBlockLen - 2 * hLen - 2)) { WOLFSSL_MSG("OAEP pad error message too long"); #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return BAD_FUNC_ARG; } /* concatenate lHash || PS || 0x01 || msg */ idx = pkcsBlockLen - 1 - inputLen; psLen = pkcsBlockLen - inputLen - 2 * hLen - 2; if (pkcsBlockLen < inputLen) { /*make sure not writing over end of buffer */ #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return BUFFER_E; } XMEMCPY(pkcsBlock + (pkcsBlockLen - inputLen), input, inputLen); pkcsBlock[idx--] = 0x01; /* PS and M separator */ while (psLen > 0 && idx > 0) { pkcsBlock[idx--] = 0x00; psLen--; } idx = idx - hLen + 1; XMEMCPY(pkcsBlock + idx, lHash, hLen); /* generate random seed */ if ((ret = wc_RNG_GenerateBlock(rng, seed, hLen)) != 0) { #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return ret; } /* create maskedDB from dbMask */ dbMask = (byte*)XMALLOC(pkcsBlockLen - hLen - 1, heap, DYNAMIC_TYPE_RSA); if (dbMask == NULL) { #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return MEMORY_E; } XMEMSET(dbMask, 0, pkcsBlockLen - hLen - 1); /* help static analyzer */ ret = RsaMGF(mgf, seed, hLen, dbMask, pkcsBlockLen - hLen - 1, heap); if (ret != 0) { XFREE(dbMask, heap, DYNAMIC_TYPE_RSA); #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return ret; } i = 0; idx = hLen + 1; while (idx < pkcsBlockLen && (word32)i < (pkcsBlockLen - hLen -1)) { pkcsBlock[idx] = dbMask[i++] ^ pkcsBlock[idx]; idx++; } XFREE(dbMask, heap, DYNAMIC_TYPE_RSA); /* create maskedSeed from seedMask */ idx = 0; pkcsBlock[idx++] = 0x00; /* create seedMask inline */ if ((ret = RsaMGF(mgf, pkcsBlock + hLen + 1, pkcsBlockLen - hLen - 1, pkcsBlock + 1, hLen, heap)) != 0) { #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif return ret; } /* xor created seedMask with seed to make maskedSeed */ i = 0; while (idx < (word32)(hLen + 1) && i < hLen) { pkcsBlock[idx] = pkcsBlock[idx] ^ seed[i++]; idx++; } #ifdef WOLFSSL_SMALL_STACK XFREE(lHash, heap, DYNAMIC_TYPE_RSA_BUFFER); XFREE(seed, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif (void)padValue; return 0; } #endif /* !WC_NO_RSA_OAEP */ #ifdef WC_RSA_PSS /* 0x00 .. 0x00 0x01 | Salt | Gen Hash | 0xbc * XOR MGF over all bytes down to end of Salt * Gen Hash = HASH(8 * 0x00 | Message Hash | Salt) * * input Digest of the message. * inputLen Length of digest. * pkcsBlock Buffer to write to. * pkcsBlockLen Length of buffer to write to. * rng Random number generator (for salt). * htype Hash function to use. * mgf Mask generation function. * saltLen Length of salt to put in padding. * bits Length of key in bits. * heap Used for dynamic memory allocation. * returns 0 on success, PSS_SALTLEN_E when the salt length is invalid * and other negative values on error. */ static int RsaPad_PSS(const byte* input, word32 inputLen, byte* pkcsBlock, word32 pkcsBlockLen, WC_RNG* rng, enum wc_HashType hType, int mgf, int saltLen, int bits, void* heap) { int ret = 0; int hLen, i, o, maskLen, hiBits; byte* m; byte* s; #if defined(WOLFSSL_NO_MALLOC) && !defined(WOLFSSL_STATIC_MEMORY) byte msg[RSA_MAX_SIZE/8 + RSA_PSS_PAD_SZ]; #else byte* msg = NULL; #endif #if defined(WOLFSSL_PSS_LONG_SALT) || defined(WOLFSSL_PSS_SALT_LEN_DISCOVER) byte* salt; #else byte salt[WC_MAX_DIGEST_SIZE]; #endif #if defined(WOLFSSL_PSS_LONG_SALT) || defined(WOLFSSL_PSS_SALT_LEN_DISCOVER) if (pkcsBlockLen > RSA_MAX_SIZE/8) { return MEMORY_E; } #endif hLen = wc_HashGetDigestSize(hType); if (hLen < 0) return hLen; if ((int)inputLen != hLen) { return BAD_FUNC_ARG; } hiBits = (bits - 1) & 0x7; if (hiBits == 0) { /* Per RFC8017, set the leftmost 8emLen - emBits bits of the leftmost octet in DB to zero. */ *(pkcsBlock++) = 0; pkcsBlockLen--; } if (saltLen == RSA_PSS_SALT_LEN_DEFAULT) { saltLen = hLen; #ifdef WOLFSSL_SHA512 /* See FIPS 186-4 section 5.5 item (e). */ if (bits == 1024 && hLen == WC_SHA512_DIGEST_SIZE) { saltLen = RSA_PSS_SALT_MAX_SZ; } #endif } #ifndef WOLFSSL_PSS_LONG_SALT else if (saltLen > hLen) { return PSS_SALTLEN_E; } #endif #ifndef WOLFSSL_PSS_SALT_LEN_DISCOVER else if (saltLen < RSA_PSS_SALT_LEN_DEFAULT) { return PSS_SALTLEN_E; } #else else if (saltLen == RSA_PSS_SALT_LEN_DISCOVER) { saltLen = (int)pkcsBlockLen - hLen - 2; if (saltLen < 0) { return PSS_SALTLEN_E; } } else if (saltLen < RSA_PSS_SALT_LEN_DISCOVER) { return PSS_SALTLEN_E; } #endif if ((int)pkcsBlockLen - hLen < saltLen + 2) { return PSS_SALTLEN_E; } maskLen = pkcsBlockLen - 1 - hLen; #if defined(WOLFSSL_PSS_LONG_SALT) || defined(WOLFSSL_PSS_SALT_LEN_DISCOVER) #if !defined(WOLFSSL_NO_MALLOC) || defined(WOLFSSL_STATIC_MEMORY) msg = (byte*)XMALLOC(RSA_PSS_PAD_SZ + inputLen + saltLen, heap, DYNAMIC_TYPE_RSA_BUFFER); if (msg == NULL) { return MEMORY_E; } #endif salt = s = m = msg; XMEMSET(m, 0, RSA_PSS_PAD_SZ); m += RSA_PSS_PAD_SZ; XMEMCPY(m, input, inputLen); m += inputLen; o = (int)(m - s); if (saltLen > 0) { ret = wc_RNG_GenerateBlock(rng, m, saltLen); if (ret == 0) { m += saltLen; } } #else if (pkcsBlockLen < RSA_PSS_PAD_SZ + inputLen + saltLen) { #if !defined(WOLFSSL_NO_MALLOC) || defined(WOLFSSL_STATIC_MEMORY) msg = (byte*)XMALLOC(RSA_PSS_PAD_SZ + inputLen + saltLen, heap, DYNAMIC_TYPE_RSA_BUFFER); if (msg == NULL) { return MEMORY_E; } #endif m = msg; } else { m = pkcsBlock; } s = m; XMEMSET(m, 0, RSA_PSS_PAD_SZ); m += RSA_PSS_PAD_SZ; XMEMCPY(m, input, inputLen); m += inputLen; o = 0; if (saltLen > 0) { ret = wc_RNG_GenerateBlock(rng, salt, saltLen); if (ret == 0) { XMEMCPY(m, salt, saltLen); m += saltLen; } } #endif if (ret == 0) { /* Put Hash at end of pkcsBlock - 1 */ ret = wc_Hash(hType, s, (word32)(m - s), pkcsBlock + maskLen, hLen); } if (ret == 0) { /* Set the last eight bits or trailer field to the octet 0xbc */ pkcsBlock[pkcsBlockLen - 1] = RSA_PSS_PAD_TERM; ret = RsaMGF(mgf, pkcsBlock + maskLen, hLen, pkcsBlock, maskLen, heap); } if (ret == 0) { /* Clear the first high bit when "8emLen - emBits" is non-zero. where emBits = n modBits - 1 */ if (hiBits) pkcsBlock[0] &= (1 << hiBits) - 1; m = pkcsBlock + maskLen - saltLen - 1; *(m++) ^= 0x01; for (i = 0; i < saltLen; i++) { m[i] ^= salt[o + i]; } } #if !defined(WOLFSSL_NO_MALLOC) || defined(WOLFSSL_STATIC_MEMORY) if (msg != NULL) { XFREE(msg, heap, DYNAMIC_TYPE_RSA_BUFFER); } #endif return ret; } #endif /* WC_RSA_PSS */ #endif /* !WC_NO_RNG */ static int RsaPad(const byte* input, word32 inputLen, byte* pkcsBlock, word32 pkcsBlockLen, byte padValue, WC_RNG* rng) { if (input == NULL || inputLen == 0 || pkcsBlock == NULL || pkcsBlockLen == 0) { return BAD_FUNC_ARG; } if (pkcsBlockLen - RSA_MIN_PAD_SZ < inputLen) { WOLFSSL_MSG("RsaPad error, invalid length"); return RSA_PAD_E; } pkcsBlock[0] = 0x0; /* set first byte to zero and advance */ pkcsBlock++; pkcsBlockLen--; pkcsBlock[0] = padValue; /* insert padValue */ if (padValue == RSA_BLOCK_TYPE_1) { /* pad with 0xff bytes */ XMEMSET(&pkcsBlock[1], 0xFF, pkcsBlockLen - inputLen - 2); } else { #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WC_NO_RNG) /* pad with non-zero random bytes */ word32 padLen, i; int ret; padLen = pkcsBlockLen - inputLen - 1; ret = wc_RNG_GenerateBlock(rng, &pkcsBlock[1], padLen); if (ret != 0) { return ret; } /* remove zeros */ for (i = 1; i < padLen; i++) { if (pkcsBlock[i] == 0) pkcsBlock[i] = 0x01; } #else (void)rng; return RSA_WRONG_TYPE_E; #endif } pkcsBlock[pkcsBlockLen-inputLen-1] = 0; /* separator */ XMEMCPY(pkcsBlock+pkcsBlockLen-inputLen, input, inputLen); return 0; } /* helper function to direct which padding is used */ int wc_RsaPad_ex(const byte* input, word32 inputLen, byte* pkcsBlock, word32 pkcsBlockLen, byte padValue, WC_RNG* rng, int padType, enum wc_HashType hType, int mgf, byte* optLabel, word32 labelLen, int saltLen, int bits, void* heap) { int ret; switch (padType) { case WC_RSA_PKCSV15_PAD: /*WOLFSSL_MSG("wolfSSL Using RSA PKCSV15 padding");*/ ret = RsaPad(input, inputLen, pkcsBlock, pkcsBlockLen, padValue, rng); break; #ifndef WC_NO_RNG #ifndef WC_NO_RSA_OAEP case WC_RSA_OAEP_PAD: WOLFSSL_MSG("wolfSSL Using RSA OAEP padding"); ret = RsaPad_OAEP(input, inputLen, pkcsBlock, pkcsBlockLen, padValue, rng, hType, mgf, optLabel, labelLen, heap); break; #endif #ifdef WC_RSA_PSS case WC_RSA_PSS_PAD: WOLFSSL_MSG("wolfSSL Using RSA PSS padding"); ret = RsaPad_PSS(input, inputLen, pkcsBlock, pkcsBlockLen, rng, hType, mgf, saltLen, bits, heap); break; #endif #endif /* !WC_NO_RNG */ #ifdef WC_RSA_NO_PADDING case WC_RSA_NO_PAD: WOLFSSL_MSG("wolfSSL Using NO padding"); /* In the case of no padding being used check that input is exactly * the RSA key length */ if (bits <= 0 || inputLen != ((word32)bits/WOLFSSL_BIT_SIZE)) { WOLFSSL_MSG("Bad input size"); ret = RSA_PAD_E; } else { XMEMCPY(pkcsBlock, input, inputLen); ret = 0; } break; #endif default: WOLFSSL_MSG("Unknown RSA Pad Type"); ret = RSA_PAD_E; } /* silence warning if not used with padding scheme */ (void)input; (void)inputLen; (void)pkcsBlock; (void)pkcsBlockLen; (void)padValue; (void)rng; (void)padType; (void)hType; (void)mgf; (void)optLabel; (void)labelLen; (void)saltLen; (void)bits; (void)heap; return ret; } #endif /* WOLFSSL_RSA_VERIFY_ONLY */ /* UnPadding */ #ifndef WC_NO_RSA_OAEP /* UnPad plaintext, set start to *output, return length of plaintext, * < 0 on error */ static int RsaUnPad_OAEP(byte *pkcsBlock, unsigned int pkcsBlockLen, byte **output, enum wc_HashType hType, int mgf, byte* optLabel, word32 labelLen, void* heap) { int hLen; int ret; byte h[WC_MAX_DIGEST_SIZE]; /* max digest size */ byte* tmp; word32 idx; /* no label is allowed, but catch if no label provided and length > 0 */ if (optLabel == NULL && labelLen > 0) { return BUFFER_E; } hLen = wc_HashGetDigestSize(hType); if ((hLen < 0) || (pkcsBlockLen < (2 * (word32)hLen + 2))) { return BAD_FUNC_ARG; } tmp = (byte*)XMALLOC(pkcsBlockLen, heap, DYNAMIC_TYPE_RSA_BUFFER); if (tmp == NULL) { return MEMORY_E; } XMEMSET(tmp, 0, pkcsBlockLen); /* find seedMask value */ if ((ret = RsaMGF(mgf, (byte*)(pkcsBlock + (hLen + 1)), pkcsBlockLen - hLen - 1, tmp, hLen, heap)) != 0) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); return ret; } /* xor seedMask value with maskedSeed to get seed value */ for (idx = 0; idx < (word32)hLen; idx++) { tmp[idx] = tmp[idx] ^ pkcsBlock[1 + idx]; } /* get dbMask value */ if ((ret = RsaMGF(mgf, tmp, hLen, tmp + hLen, pkcsBlockLen - hLen - 1, heap)) != 0) { XFREE(tmp, NULL, DYNAMIC_TYPE_RSA_BUFFER); return ret; } /* get DB value by doing maskedDB xor dbMask */ for (idx = 0; idx < (pkcsBlockLen - hLen - 1); idx++) { pkcsBlock[hLen + 1 + idx] = pkcsBlock[hLen + 1 + idx] ^ tmp[idx + hLen]; } /* done with use of tmp buffer */ XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); /* advance idx to index of PS and msg separator, account for PS size of 0*/ idx = hLen + 1 + hLen; while (idx < pkcsBlockLen && pkcsBlock[idx] == 0) {idx++;} /* create hash of label for comparison with hash sent */ if ((ret = wc_Hash(hType, optLabel, labelLen, h, hLen)) != 0) { return ret; } /* say no to chosen ciphertext attack. Comparison of lHash, Y, and separator value needs to all happen in constant time. Attackers should not be able to get error condition from the timing of these checks. */ ret = 0; ret |= ConstantCompare(pkcsBlock + hLen + 1, h, hLen); ret += pkcsBlock[idx++] ^ 0x01; /* separator value is 0x01 */ ret += pkcsBlock[0] ^ 0x00; /* Y, the first value, should be 0 */ /* Return 0 data length on error. */ idx = ctMaskSelInt(ctMaskEq(ret, 0), idx, pkcsBlockLen); /* adjust pointer to correct location in array and return size of M */ *output = (byte*)(pkcsBlock + idx); return pkcsBlockLen - idx; } #endif /* WC_NO_RSA_OAEP */ #ifdef WC_RSA_PSS /* 0x00 .. 0x00 0x01 | Salt | Gen Hash | 0xbc * MGF over all bytes down to end of Salt * * pkcsBlock Buffer holding decrypted data. * pkcsBlockLen Length of buffer. * htype Hash function to use. * mgf Mask generation function. * saltLen Length of salt to put in padding. * bits Length of key in bits. * heap Used for dynamic memory allocation. * returns the sum of salt length and SHA-256 digest size on success. * Otherwise, PSS_SALTLEN_E for an incorrect salt length, * WC_KEY_SIZE_E for an incorrect encoded message (EM) size and other negative values on error. */ static int RsaUnPad_PSS(byte *pkcsBlock, unsigned int pkcsBlockLen, byte **output, enum wc_HashType hType, int mgf, int saltLen, int bits, void* heap) { int ret; byte* tmp; int hLen, i, maskLen; #ifdef WOLFSSL_SHA512 int orig_bits = bits; #endif #if defined(WOLFSSL_NO_MALLOC) && !defined(WOLFSSL_STATIC_MEMORY) byte tmp_buf[RSA_MAX_SIZE/8]; tmp = tmp_buf; if (pkcsBlockLen > RSA_MAX_SIZE/8) { return MEMORY_E; } #endif hLen = wc_HashGetDigestSize(hType); if (hLen < 0) return hLen; bits = (bits - 1) & 0x7; if ((pkcsBlock[0] & (0xff << bits)) != 0) { return BAD_PADDING_E; } if (bits == 0) { pkcsBlock++; pkcsBlockLen--; } maskLen = (int)pkcsBlockLen - 1 - hLen; if (maskLen < 0) { WOLFSSL_MSG("RsaUnPad_PSS: Hash too large"); return WC_KEY_SIZE_E; } if (saltLen == RSA_PSS_SALT_LEN_DEFAULT) { saltLen = hLen; #ifdef WOLFSSL_SHA512 /* See FIPS 186-4 section 5.5 item (e). */ if (orig_bits == 1024 && hLen == WC_SHA512_DIGEST_SIZE) saltLen = RSA_PSS_SALT_MAX_SZ; #endif } #ifndef WOLFSSL_PSS_LONG_SALT else if (saltLen > hLen) return PSS_SALTLEN_E; #endif #ifndef WOLFSSL_PSS_SALT_LEN_DISCOVER else if (saltLen < RSA_PSS_SALT_LEN_DEFAULT) return PSS_SALTLEN_E; if (maskLen < saltLen + 1) { return PSS_SALTLEN_E; } #else else if (saltLen < RSA_PSS_SALT_LEN_DISCOVER) return PSS_SALTLEN_E; if (saltLen != RSA_PSS_SALT_LEN_DISCOVER && maskLen < saltLen + 1) { return WC_KEY_SIZE_E; } #endif if (pkcsBlock[pkcsBlockLen - 1] != RSA_PSS_PAD_TERM) { WOLFSSL_MSG("RsaUnPad_PSS: Padding Term Error"); return BAD_PADDING_E; } #if !defined(WOLFSSL_NO_MALLOC) || defined(WOLFSSL_STATIC_MEMORY) tmp = (byte*)XMALLOC(maskLen, heap, DYNAMIC_TYPE_RSA_BUFFER); if (tmp == NULL) { return MEMORY_E; } #endif if ((ret = RsaMGF(mgf, pkcsBlock + maskLen, hLen, tmp, maskLen, heap)) != 0) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); return ret; } tmp[0] &= (1 << bits) - 1; pkcsBlock[0] &= (1 << bits) - 1; #ifdef WOLFSSL_PSS_SALT_LEN_DISCOVER if (saltLen == RSA_PSS_SALT_LEN_DISCOVER) { for (i = 0; i < maskLen - 1; i++) { if (tmp[i] != pkcsBlock[i]) { break; } } if (tmp[i] != (pkcsBlock[i] ^ 0x01)) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); WOLFSSL_MSG("RsaUnPad_PSS: Padding Error Match"); return PSS_SALTLEN_RECOVER_E; } saltLen = maskLen - (i + 1); } else #endif { for (i = 0; i < maskLen - 1 - saltLen; i++) { if (tmp[i] != pkcsBlock[i]) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); WOLFSSL_MSG("RsaUnPad_PSS: Padding Error Match"); return PSS_SALTLEN_E; } } if (tmp[i] != (pkcsBlock[i] ^ 0x01)) { XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); WOLFSSL_MSG("RsaUnPad_PSS: Padding Error End"); return PSS_SALTLEN_E; } } for (i++; i < maskLen; i++) pkcsBlock[i] ^= tmp[i]; #if !defined(WOLFSSL_NO_MALLOC) || defined(WOLFSSL_STATIC_MEMORY) XFREE(tmp, heap, DYNAMIC_TYPE_RSA_BUFFER); #endif *output = pkcsBlock + maskLen - saltLen; return saltLen + hLen; } #endif /* UnPad plaintext, set start to *output, return length of plaintext, * < 0 on error */ static int RsaUnPad(const byte *pkcsBlock, unsigned int pkcsBlockLen, byte **output, byte padValue) { int ret = BAD_FUNC_ARG; word16 i; #ifndef WOLFSSL_RSA_VERIFY_ONLY byte invalid = 0; #endif if (output == NULL || pkcsBlockLen < 2 || pkcsBlockLen > 0xFFFF) { return BAD_FUNC_ARG; } if (padValue == RSA_BLOCK_TYPE_1) { /* First byte must be 0x00 and Second byte, block type, 0x01 */ if (pkcsBlock[0] != 0 || pkcsBlock[1] != RSA_BLOCK_TYPE_1) { WOLFSSL_MSG("RsaUnPad error, invalid formatting"); return RSA_PAD_E; } /* check the padding until we find the separator */ for (i = 2; i < pkcsBlockLen && pkcsBlock[i++] == 0xFF; ) { } /* Minimum of 11 bytes of pre-message data and must have separator. */ if (i < RSA_MIN_PAD_SZ || pkcsBlock[i-1] != 0) { WOLFSSL_MSG("RsaUnPad error, bad formatting"); return RSA_PAD_E; } *output = (byte *)(pkcsBlock + i); ret = pkcsBlockLen - i; } #ifndef WOLFSSL_RSA_VERIFY_ONLY else { word16 j; word16 pastSep = 0; /* Decrypted with private key - unpad must be constant time. */ for (i = 0, j = 2; j < pkcsBlockLen; j++) { /* Update i if not passed the separator and at separator. */ i |= (~pastSep) & ctMask16Eq(pkcsBlock[j], 0x00) & (j + 1); pastSep |= ctMask16Eq(pkcsBlock[j], 0x00); } /* Minimum of 11 bytes of pre-message data - including leading 0x00. */ invalid |= ctMaskLT(i, RSA_MIN_PAD_SZ); /* Must have seen separator. */ invalid |= ~pastSep; /* First byte must be 0x00. */ invalid |= ctMaskNotEq(pkcsBlock[0], 0x00); /* Check against expected block type: padValue */ invalid |= ctMaskNotEq(pkcsBlock[1], padValue); *output = (byte *)(pkcsBlock + i); ret = ((int)~invalid) & (pkcsBlockLen - i); } #endif return ret; } /* helper function to direct unpadding * * bits is the key modulus size in bits */ int wc_RsaUnPad_ex(byte* pkcsBlock, word32 pkcsBlockLen, byte** out, byte padValue, int padType, enum wc_HashType hType, int mgf, byte* optLabel, word32 labelLen, int saltLen, int bits, void* heap) { int ret; switch (padType) { case WC_RSA_PKCSV15_PAD: /*WOLFSSL_MSG("wolfSSL Using RSA PKCSV15 un-padding");*/ ret = RsaUnPad(pkcsBlock, pkcsBlockLen, out, padValue); break; #ifndef WC_NO_RSA_OAEP case WC_RSA_OAEP_PAD: WOLFSSL_MSG("wolfSSL Using RSA OAEP un-padding"); ret = RsaUnPad_OAEP((byte*)pkcsBlock, pkcsBlockLen, out, hType, mgf, optLabel, labelLen, heap); break; #endif #ifdef WC_RSA_PSS case WC_RSA_PSS_PAD: WOLFSSL_MSG("wolfSSL Using RSA PSS un-padding"); ret = RsaUnPad_PSS((byte*)pkcsBlock, pkcsBlockLen, out, hType, mgf, saltLen, bits, heap); break; #endif #ifdef WC_RSA_NO_PADDING case WC_RSA_NO_PAD: WOLFSSL_MSG("wolfSSL Using NO un-padding"); /* In the case of no padding being used check that input is exactly * the RSA key length */ if (bits <= 0 || pkcsBlockLen != ((word32)(bits+WOLFSSL_BIT_SIZE-1)/WOLFSSL_BIT_SIZE)) { WOLFSSL_MSG("Bad input size"); ret = RSA_PAD_E; } else { if (out != NULL) { *out = pkcsBlock; } ret = pkcsBlockLen; } break; #endif /* WC_RSA_NO_PADDING */ default: WOLFSSL_MSG("Unknown RSA UnPad Type"); ret = RSA_PAD_E; } /* silence warning if not used with padding scheme */ (void)hType; (void)mgf; (void)optLabel; (void)labelLen; (void)saltLen; (void)bits; (void)heap; return ret; } #ifdef WC_RSA_NONBLOCK static int wc_RsaFunctionNonBlock(const byte* in, word32 inLen, byte* out, word32* outLen, int type, RsaKey* key) { int ret = 0; word32 keyLen, len; if (key == NULL || key->nb == NULL) { return BAD_FUNC_ARG; } if (key->nb->exptmod.state == TFM_EXPTMOD_NB_INIT) { if (mp_init(&key->nb->tmp) != MP_OKAY) { ret = MP_INIT_E; } if (ret == 0) { if (mp_read_unsigned_bin(&key->nb->tmp, (byte*)in, inLen) != MP_OKAY) { ret = MP_READ_E; } } } if (ret == 0) { switch(type) { case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: ret = fp_exptmod_nb(&key->nb->exptmod, &key->nb->tmp, &key->d, &key->n, &key->nb->tmp); if (ret == FP_WOULDBLOCK) return ret; if (ret != MP_OKAY) ret = MP_EXPTMOD_E; break; case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: ret = fp_exptmod_nb(&key->nb->exptmod, &key->nb->tmp, &key->e, &key->n, &key->nb->tmp); if (ret == FP_WOULDBLOCK) return ret; if (ret != MP_OKAY) ret = MP_EXPTMOD_E; break; default: ret = RSA_WRONG_TYPE_E; break; } } if (ret == 0) { keyLen = wc_RsaEncryptSize(key); if (keyLen > *outLen) ret = RSA_BUFFER_E; } if (ret == 0) { len = mp_unsigned_bin_size(&key->nb->tmp); /* pad front w/ zeros to match key length */ while (len < keyLen) { *out++ = 0x00; len++; } *outLen = keyLen; /* convert */ if (mp_to_unsigned_bin(&key->nb->tmp, out) != MP_OKAY) { ret = MP_TO_E; } } mp_clear(&key->nb->tmp); return ret; } #endif /* WC_RSA_NONBLOCK */ #ifdef WOLFSSL_XILINX_CRYPT /* * Xilinx hardened crypto acceleration. * * Returns 0 on success and negative values on error. */ static int wc_RsaFunctionSync(const byte* in, word32 inLen, byte* out, word32* outLen, int type, RsaKey* key, WC_RNG* rng) { int ret = 0; word32 keyLen; (void)rng; keyLen = wc_RsaEncryptSize(key); if (keyLen > *outLen) { WOLFSSL_MSG("Output buffer is not big enough"); return BAD_FUNC_ARG; } if (inLen != keyLen) { WOLFSSL_MSG("Expected that inLen equals RSA key length"); return BAD_FUNC_ARG; } switch(type) { case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: #ifdef WOLFSSL_XILINX_CRYPTO_OLD /* Currently public exponent is loaded by default. * In SDK 2017.1 RSA exponent values are expected to be of 4 bytes * leading to private key operations with Xsecure_RsaDecrypt not being * supported */ ret = RSA_WRONG_TYPE_E; #else { byte *d; int dSz; XSecure_Rsa rsa; dSz = mp_unsigned_bin_size(&key->d); d = (byte*)XMALLOC(dSz, key->heap, DYNAMIC_TYPE_PRIVATE_KEY); if (d == NULL) { ret = MEMORY_E; } else { ret = mp_to_unsigned_bin(&key->d, d); XSecure_RsaInitialize(&rsa, key->mod, NULL, d); } if (ret == 0) { if (XSecure_RsaPrivateDecrypt(&rsa, (u8*)in, inLen, out) != XST_SUCCESS) { ret = BAD_STATE_E; } } if (d != NULL) { XFREE(d, key->heap, DYNAMIC_TYPE_PRIVATE_KEY); } } #endif break; case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: #ifdef WOLFSSL_XILINX_CRYPTO_OLD if (XSecure_RsaDecrypt(&(key->xRsa), in, out) != XST_SUCCESS) { ret = BAD_STATE_E; } #else /* starting at Xilinx release 2019 the function XSecure_RsaDecrypt was removed */ if (XSecure_RsaPublicEncrypt(&(key->xRsa), (u8*)in, inLen, out) != XST_SUCCESS) { WOLFSSL_MSG("Error happened when calling hardware RSA public operation"); ret = BAD_STATE_E; } #endif break; default: ret = RSA_WRONG_TYPE_E; } *outLen = keyLen; return ret; } #elif defined(WOLFSSL_AFALG_XILINX_RSA) #ifndef ERROR_OUT #define ERROR_OUT(x) ret = (x); goto done #endif static const char WC_TYPE_ASYMKEY[] = "skcipher"; static const char WC_NAME_RSA[] = "xilinx-zynqmp-rsa"; #ifndef MAX_XILINX_RSA_KEY /* max key size of 4096 bits / 512 bytes */ #define MAX_XILINX_RSA_KEY 512 #endif static const byte XILINX_RSA_FLAG[] = {0x1}; /* AF_ALG implementation of RSA */ static int wc_RsaFunctionSync(const byte* in, word32 inLen, byte* out, word32* outLen, int type, RsaKey* key, WC_RNG* rng) { struct msghdr msg; struct cmsghdr* cmsg; struct iovec iov; byte* keyBuf = NULL; word32 keyBufSz = 0; char cbuf[CMSG_SPACE(4) + CMSG_SPACE(sizeof(struct af_alg_iv) + 1)] = {0}; int ret = 0; int op = 0; /* decryption vs encryption flag */ word32 keyLen; /* input and output buffer need to be aligned */ ALIGN64 byte outBuf[MAX_XILINX_RSA_KEY]; ALIGN64 byte inBuf[MAX_XILINX_RSA_KEY]; XMEMSET(&msg, 0, sizeof(struct msghdr)); (void)rng; keyLen = wc_RsaEncryptSize(key); if (keyLen > *outLen) { ERROR_OUT(RSA_BUFFER_E); } if (keyLen > MAX_XILINX_RSA_KEY) { WOLFSSL_MSG("RSA key size larger than supported"); ERROR_OUT(BAD_FUNC_ARG); } if ((keyBuf = (byte*)XMALLOC(keyLen * 2, key->heap, DYNAMIC_TYPE_KEY)) == NULL) { ERROR_OUT(MEMORY_E); } if ((ret = mp_to_unsigned_bin(&(key->n), keyBuf)) != MP_OKAY) { ERROR_OUT(MP_TO_E); } switch(type) { case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: op = 1; /* set as decrypt */ { keyBufSz = mp_unsigned_bin_size(&(key->d)); if ((mp_to_unsigned_bin(&(key->d), keyBuf + keyLen)) != MP_OKAY) { ERROR_OUT(MP_TO_E); } } break; case RSA_PUBLIC_DECRYPT: case RSA_PUBLIC_ENCRYPT: { word32 exp = 0; word32 eSz = mp_unsigned_bin_size(&(key->e)); if ((mp_to_unsigned_bin(&(key->e), (byte*)&exp + (sizeof(word32) - eSz))) != MP_OKAY) { ERROR_OUT(MP_TO_E); } keyBufSz = sizeof(word32); XMEMCPY(keyBuf + keyLen, (byte*)&exp, keyBufSz); break; } default: ERROR_OUT(RSA_WRONG_TYPE_E); } keyBufSz += keyLen; /* add size of modulus */ /* check for existing sockets before creating new ones */ if (key->alFd > 0) { close(key->alFd); key->alFd = WC_SOCK_NOTSET; } if (key->rdFd > 0) { close(key->rdFd); key->rdFd = WC_SOCK_NOTSET; } /* create new sockets and set the key to use */ if ((key->alFd = wc_Afalg_Socket()) < 0) { WOLFSSL_MSG("Unable to create socket"); ERROR_OUT(key->alFd); } if ((key->rdFd = wc_Afalg_CreateRead(key->alFd, WC_TYPE_ASYMKEY, WC_NAME_RSA)) < 0) { WOLFSSL_MSG("Unable to bind and create read/send socket"); ERROR_OUT(key->rdFd); } if ((ret = setsockopt(key->alFd, SOL_ALG, ALG_SET_KEY, keyBuf, keyBufSz)) < 0) { WOLFSSL_MSG("Error setting RSA key"); ERROR_OUT(ret); } msg.msg_control = cbuf; msg.msg_controllen = sizeof(cbuf); cmsg = CMSG_FIRSTHDR(&msg); if ((ret = wc_Afalg_SetOp(cmsg, op)) < 0) { ERROR_OUT(ret); } /* set flag in IV spot, needed for Xilinx hardware acceleration use */ cmsg = CMSG_NXTHDR(&msg, cmsg); if ((ret = wc_Afalg_SetIv(cmsg, (byte*)XILINX_RSA_FLAG, sizeof(XILINX_RSA_FLAG))) != 0) { ERROR_OUT(ret); } /* compose and send msg */ XMEMCPY(inBuf, (byte*)in, inLen); /* for alignment */ iov.iov_base = inBuf; iov.iov_len = inLen; msg.msg_iov = &iov; msg.msg_iovlen = 1; if ((ret = sendmsg(key->rdFd, &msg, 0)) <= 0) { ERROR_OUT(WC_AFALG_SOCK_E); } if ((ret = read(key->rdFd, outBuf, inLen)) <= 0) { ERROR_OUT(WC_AFALG_SOCK_E); } XMEMCPY(out, outBuf, ret); *outLen = keyLen; done: /* clear key data and free buffer */ if (keyBuf != NULL) { ForceZero(keyBuf, keyBufSz); } XFREE(keyBuf, key->heap, DYNAMIC_TYPE_KEY); if (key->alFd > 0) { close(key->alFd); key->alFd = WC_SOCK_NOTSET; } if (key->rdFd > 0) { close(key->rdFd); key->rdFd = WC_SOCK_NOTSET; } return ret; } #else static int wc_RsaFunctionSync(const byte* in, word32 inLen, byte* out, word32* outLen, int type, RsaKey* key, WC_RNG* rng) { #ifndef WOLFSSL_SP_MATH #ifdef WOLFSSL_SMALL_STACK mp_int* tmp; #ifdef WC_RSA_BLINDING mp_int* rnd; mp_int* rndi; #endif #else mp_int tmp[1]; #ifdef WC_RSA_BLINDING mp_int rnd[1], rndi[1]; #endif #endif int ret = 0; word32 keyLen = 0; #endif #ifdef WOLFSSL_HAVE_SP_RSA #ifndef WOLFSSL_SP_NO_2048 if (mp_count_bits(&key->n) == 2048) { switch(type) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: #ifdef WC_RSA_BLINDING if (rng == NULL) return MISSING_RNG_E; #endif #ifndef RSA_LOW_MEM if ((mp_count_bits(&key->p) == 1024) && (mp_count_bits(&key->q) == 1024)) { return sp_RsaPrivate_2048(in, inLen, &key->d, &key->p, &key->q, &key->dP, &key->dQ, &key->u, &key->n, out, outLen); } break; #else return sp_RsaPrivate_2048(in, inLen, &key->d, NULL, NULL, NULL, NULL, NULL, &key->n, out, outLen); #endif #endif case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: return sp_RsaPublic_2048(in, inLen, &key->e, &key->n, out, outLen); } } #endif #ifndef WOLFSSL_SP_NO_3072 if (mp_count_bits(&key->n) == 3072) { switch(type) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: #ifdef WC_RSA_BLINDING if (rng == NULL) return MISSING_RNG_E; #endif #ifndef RSA_LOW_MEM if ((mp_count_bits(&key->p) == 1536) && (mp_count_bits(&key->q) == 1536)) { return sp_RsaPrivate_3072(in, inLen, &key->d, &key->p, &key->q, &key->dP, &key->dQ, &key->u, &key->n, out, outLen); } break; #else return sp_RsaPrivate_3072(in, inLen, &key->d, NULL, NULL, NULL, NULL, NULL, &key->n, out, outLen); #endif #endif case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: return sp_RsaPublic_3072(in, inLen, &key->e, &key->n, out, outLen); } } #endif #ifdef WOLFSSL_SP_4096 if (mp_count_bits(&key->n) == 4096) { switch(type) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: #ifdef WC_RSA_BLINDING if (rng == NULL) return MISSING_RNG_E; #endif #ifndef RSA_LOW_MEM if ((mp_count_bits(&key->p) == 2048) && (mp_count_bits(&key->q) == 2048)) { return sp_RsaPrivate_4096(in, inLen, &key->d, &key->p, &key->q, &key->dP, &key->dQ, &key->u, &key->n, out, outLen); } break; #else return sp_RsaPrivate_4096(in, inLen, &key->d, NULL, NULL, NULL, NULL, NULL, &key->n, out, outLen); #endif #endif case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: return sp_RsaPublic_4096(in, inLen, &key->e, &key->n, out, outLen); } } #endif #endif /* WOLFSSL_HAVE_SP_RSA */ #ifdef WOLFSSL_SP_MATH (void)rng; WOLFSSL_MSG("SP Key Size Error"); return WC_KEY_SIZE_E; #else (void)rng; #ifdef WOLFSSL_SMALL_STACK tmp = (mp_int*)XMALLOC(sizeof(mp_int), key->heap, DYNAMIC_TYPE_RSA); if (tmp == NULL) return MEMORY_E; #ifdef WC_RSA_BLINDING rnd = (mp_int*)XMALLOC(sizeof(mp_int) * 2, key->heap, DYNAMIC_TYPE_RSA); if (rnd == NULL) { XFREE(tmp, key->heap, DYNAMIC_TYPE_RSA); return MEMORY_E; } rndi = rnd + 1; #endif /* WC_RSA_BLINDING */ #endif /* WOLFSSL_SMALL_STACK */ if (mp_init(tmp) != MP_OKAY) ret = MP_INIT_E; #ifdef WC_RSA_BLINDING if (ret == 0) { if (type == RSA_PRIVATE_DECRYPT || type == RSA_PRIVATE_ENCRYPT) { if (mp_init_multi(rnd, rndi, NULL, NULL, NULL, NULL) != MP_OKAY) { mp_clear(tmp); ret = MP_INIT_E; } } } #endif #ifndef TEST_UNPAD_CONSTANT_TIME if (ret == 0 && mp_read_unsigned_bin(tmp, (byte*)in, inLen) != MP_OKAY) ret = MP_READ_E; if (ret == 0) { switch(type) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: { #if defined(WC_RSA_BLINDING) && !defined(WC_NO_RNG) /* blind */ ret = mp_rand(rnd, get_digit_count(&key->n), rng); /* rndi = 1/rnd mod n */ if (ret == 0 && mp_invmod(rnd, &key->n, rndi) != MP_OKAY) ret = MP_INVMOD_E; /* rnd = rnd^e */ if (ret == 0 && mp_exptmod(rnd, &key->e, &key->n, rnd) != MP_OKAY) ret = MP_EXPTMOD_E; /* tmp = tmp*rnd mod n */ if (ret == 0 && mp_mulmod(tmp, rnd, &key->n, tmp) != MP_OKAY) ret = MP_MULMOD_E; #endif /* WC_RSA_BLINDING && !WC_NO_RNG */ #ifdef RSA_LOW_MEM /* half as much memory but twice as slow */ if (ret == 0 && mp_exptmod(tmp, &key->d, &key->n, tmp) != MP_OKAY) ret = MP_EXPTMOD_E; #else if (ret == 0) { #ifdef WOLFSSL_SMALL_STACK mp_int* tmpa; mp_int* tmpb = NULL; #else mp_int tmpa[1], tmpb[1]; #endif int cleara = 0, clearb = 0; #ifdef WOLFSSL_SMALL_STACK tmpa = (mp_int*)XMALLOC(sizeof(mp_int) * 2, key->heap, DYNAMIC_TYPE_RSA); if (tmpa != NULL) tmpb = tmpa + 1; else ret = MEMORY_E; #endif if (ret == 0) { if (mp_init(tmpa) != MP_OKAY) ret = MP_INIT_E; else cleara = 1; } if (ret == 0) { if (mp_init(tmpb) != MP_OKAY) ret = MP_INIT_E; else clearb = 1; } /* tmpa = tmp^dP mod p */ if (ret == 0 && mp_exptmod(tmp, &key->dP, &key->p, tmpa) != MP_OKAY) ret = MP_EXPTMOD_E; /* tmpb = tmp^dQ mod q */ if (ret == 0 && mp_exptmod(tmp, &key->dQ, &key->q, tmpb) != MP_OKAY) ret = MP_EXPTMOD_E; /* tmp = (tmpa - tmpb) * qInv (mod p) */ if (ret == 0 && mp_sub(tmpa, tmpb, tmp) != MP_OKAY) ret = MP_SUB_E; if (ret == 0 && mp_mulmod(tmp, &key->u, &key->p, tmp) != MP_OKAY) ret = MP_MULMOD_E; /* tmp = tmpb + q * tmp */ if (ret == 0 && mp_mul(tmp, &key->q, tmp) != MP_OKAY) ret = MP_MUL_E; if (ret == 0 && mp_add(tmp, tmpb, tmp) != MP_OKAY) ret = MP_ADD_E; #ifdef WOLFSSL_SMALL_STACK if (tmpa != NULL) #endif { if (cleara) mp_clear(tmpa); if (clearb) mp_clear(tmpb); #ifdef WOLFSSL_SMALL_STACK XFREE(tmpa, key->heap, DYNAMIC_TYPE_RSA); #endif } } /* tmpa/b scope */ #endif /* RSA_LOW_MEM */ #ifdef WC_RSA_BLINDING /* unblind */ if (ret == 0 && mp_mulmod(tmp, rndi, &key->n, tmp) != MP_OKAY) ret = MP_MULMOD_E; #endif /* WC_RSA_BLINDING */ break; } #endif case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: if (mp_exptmod_nct(tmp, &key->e, &key->n, tmp) != MP_OKAY) ret = MP_EXPTMOD_E; break; default: ret = RSA_WRONG_TYPE_E; break; } } if (ret == 0) { keyLen = wc_RsaEncryptSize(key); if (keyLen > *outLen) ret = RSA_BUFFER_E; } #ifndef WOLFSSL_XILINX_CRYPT if (ret == 0) { *outLen = keyLen; if (mp_to_unsigned_bin_len(tmp, out, keyLen) != MP_OKAY) ret = MP_TO_E; } #endif #else (void)type; (void)key; (void)keyLen; XMEMCPY(out, in, inLen); *outLen = inLen; #endif mp_clear(tmp); #ifdef WOLFSSL_SMALL_STACK XFREE(tmp, key->heap, DYNAMIC_TYPE_RSA); #endif #ifdef WC_RSA_BLINDING if (type == RSA_PRIVATE_DECRYPT || type == RSA_PRIVATE_ENCRYPT) { mp_clear(rndi); mp_clear(rnd); } #ifdef WOLFSSL_SMALL_STACK XFREE(rnd, key->heap, DYNAMIC_TYPE_RSA); #endif #endif /* WC_RSA_BLINDING */ return ret; #endif /* WOLFSSL_SP_MATH */ } #endif #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) static int wc_RsaFunctionAsync(const byte* in, word32 inLen, byte* out, word32* outLen, int type, RsaKey* key, WC_RNG* rng) { int ret = 0; (void)rng; #ifdef WOLFSSL_ASYNC_CRYPT_TEST if (wc_AsyncTestInit(&key->asyncDev, ASYNC_TEST_RSA_FUNC)) { WC_ASYNC_TEST* testDev = &key->asyncDev.test; testDev->rsaFunc.in = in; testDev->rsaFunc.inSz = inLen; testDev->rsaFunc.out = out; testDev->rsaFunc.outSz = outLen; testDev->rsaFunc.type = type; testDev->rsaFunc.key = key; testDev->rsaFunc.rng = rng; return WC_PENDING_E; } #endif /* WOLFSSL_ASYNC_CRYPT_TEST */ switch(type) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY case RSA_PRIVATE_DECRYPT: case RSA_PRIVATE_ENCRYPT: #ifdef HAVE_CAVIUM key->dataLen = key->n.raw.len; ret = NitroxRsaExptMod(in, inLen, key->d.raw.buf, key->d.raw.len, key->n.raw.buf, key->n.raw.len, out, outLen, key); #elif defined(HAVE_INTEL_QA) #ifdef RSA_LOW_MEM ret = IntelQaRsaPrivate(&key->asyncDev, in, inLen, &key->d.raw, &key->n.raw, out, outLen); #else ret = IntelQaRsaCrtPrivate(&key->asyncDev, in, inLen, &key->p.raw, &key->q.raw, &key->dP.raw, &key->dQ.raw, &key->u.raw, out, outLen); #endif #else /* WOLFSSL_ASYNC_CRYPT_TEST */ ret = wc_RsaFunctionSync(in, inLen, out, outLen, type, key, rng); #endif break; #endif case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: #ifdef HAVE_CAVIUM key->dataLen = key->n.raw.len; ret = NitroxRsaExptMod(in, inLen, key->e.raw.buf, key->e.raw.len, key->n.raw.buf, key->n.raw.len, out, outLen, key); #elif defined(HAVE_INTEL_QA) ret = IntelQaRsaPublic(&key->asyncDev, in, inLen, &key->e.raw, &key->n.raw, out, outLen); #else /* WOLFSSL_ASYNC_CRYPT_TEST */ ret = wc_RsaFunctionSync(in, inLen, out, outLen, type, key, rng); #endif break; default: ret = RSA_WRONG_TYPE_E; } return ret; } #endif /* WOLFSSL_ASYNC_CRYPT && WC_ASYNC_ENABLE_RSA */ #if defined(WC_RSA_DIRECT) || defined(WC_RSA_NO_PADDING) /* Function that does the RSA operation directly with no padding. * * in buffer to do operation on * inLen length of input buffer * out buffer to hold results * outSz gets set to size of result buffer. Should be passed in as length * of out buffer. If the pointer "out" is null then outSz gets set to * the expected buffer size needed and LENGTH_ONLY_E gets returned. * key RSA key to use for encrypt/decrypt * type if using private or public key {RSA_PUBLIC_ENCRYPT, * RSA_PUBLIC_DECRYPT, RSA_PRIVATE_ENCRYPT, RSA_PRIVATE_DECRYPT} * rng wolfSSL RNG to use if needed * * returns size of result on success */ int wc_RsaDirect(byte* in, word32 inLen, byte* out, word32* outSz, RsaKey* key, int type, WC_RNG* rng) { int ret; if (in == NULL || outSz == NULL || key == NULL) { return BAD_FUNC_ARG; } /* sanity check on type of RSA operation */ switch (type) { case RSA_PUBLIC_ENCRYPT: case RSA_PUBLIC_DECRYPT: case RSA_PRIVATE_ENCRYPT: case RSA_PRIVATE_DECRYPT: break; default: WOLFSSL_MSG("Bad RSA type"); return BAD_FUNC_ARG; } if ((ret = wc_RsaEncryptSize(key)) < 0) { return BAD_FUNC_ARG; } if (inLen != (word32)ret) { WOLFSSL_MSG("Bad input length. Should be RSA key size"); return BAD_FUNC_ARG; } if (out == NULL) { *outSz = inLen; return LENGTH_ONLY_E; } switch (key->state) { case RSA_STATE_NONE: case RSA_STATE_ENCRYPT_PAD: case RSA_STATE_ENCRYPT_EXPTMOD: case RSA_STATE_DECRYPT_EXPTMOD: case RSA_STATE_DECRYPT_UNPAD: key->state = (type == RSA_PRIVATE_ENCRYPT || type == RSA_PUBLIC_ENCRYPT) ? RSA_STATE_ENCRYPT_EXPTMOD: RSA_STATE_DECRYPT_EXPTMOD; key->dataLen = *outSz; ret = wc_RsaFunction(in, inLen, out, &key->dataLen, type, key, rng); if (ret >= 0 || ret == WC_PENDING_E) { key->state = (type == RSA_PRIVATE_ENCRYPT || type == RSA_PUBLIC_ENCRYPT) ? RSA_STATE_ENCRYPT_RES: RSA_STATE_DECRYPT_RES; } if (ret < 0) { break; } FALL_THROUGH; case RSA_STATE_ENCRYPT_RES: case RSA_STATE_DECRYPT_RES: ret = key->dataLen; break; default: ret = BAD_STATE_E; } /* if async pending then skip cleanup*/ if (ret == WC_PENDING_E #ifdef WC_RSA_NONBLOCK || ret == FP_WOULDBLOCK #endif ) { return ret; } key->state = RSA_STATE_NONE; wc_RsaCleanup(key); return ret; } #endif /* WC_RSA_DIRECT || WC_RSA_NO_PADDING */ #if defined(WOLFSSL_CRYPTOCELL) static int cc310_RsaPublicEncrypt(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key) { CRYSError_t ret = 0; CRYS_RSAPrimeData_t primeData; int modulusSize = wc_RsaEncryptSize(key); /* The out buffer must be at least modulus size bytes long. */ if (outLen < modulusSize) return BAD_FUNC_ARG; ret = CRYS_RSA_PKCS1v15_Encrypt(&wc_rndState, wc_rndGenVectFunc, &key->ctx.pubKey, &primeData, (byte*)in, inLen, out); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_PKCS1v15_Encrypt failed"); return -1; } return modulusSize; } static int cc310_RsaPublicDecrypt(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key) { CRYSError_t ret = 0; CRYS_RSAPrimeData_t primeData; uint16_t actualOutLen = outLen; ret = CRYS_RSA_PKCS1v15_Decrypt(&key->ctx.privKey, &primeData, (byte*)in, inLen, out, &actualOutLen); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_PKCS1v15_Decrypt failed"); return -1; } return actualOutLen; } int cc310_RsaSSL_Sign(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, CRYS_RSA_HASH_OpMode_t mode) { CRYSError_t ret = 0; uint16_t actualOutLen = outLen*sizeof(byte); CRYS_RSAPrivUserContext_t contextPrivate; ret = CRYS_RSA_PKCS1v15_Sign(&wc_rndState, wc_rndGenVectFunc, &contextPrivate, &key->ctx.privKey, mode, (byte*)in, inLen, out, &actualOutLen); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_PKCS1v15_Sign failed"); return -1; } return actualOutLen; } int cc310_RsaSSL_Verify(const byte* in, word32 inLen, byte* sig, RsaKey* key, CRYS_RSA_HASH_OpMode_t mode) { CRYSError_t ret = 0; CRYS_RSAPubUserContext_t contextPub; /* verify the signature in the sig pointer */ ret = CRYS_RSA_PKCS1v15_Verify(&contextPub, &key->ctx.pubKey, mode, (byte*)in, inLen, sig); if (ret != SA_SILIB_RET_OK){ WOLFSSL_MSG("CRYS_RSA_PKCS1v15_Verify failed"); return -1; } return ret; } #endif /* WOLFSSL_CRYPTOCELL */ int wc_RsaFunction(const byte* in, word32 inLen, byte* out, word32* outLen, int type, RsaKey* key, WC_RNG* rng) { int ret = 0; if (key == NULL || in == NULL || inLen == 0 || out == NULL || outLen == NULL || *outLen == 0 || type == RSA_TYPE_UNKNOWN) { return BAD_FUNC_ARG; } #ifdef WOLF_CRYPTO_CB if (key->devId != INVALID_DEVID) { ret = wc_CryptoCb_Rsa(in, inLen, out, outLen, type, key, rng); if (ret != CRYPTOCB_UNAVAILABLE) return ret; /* fall-through when unavailable */ ret = 0; /* reset error code and try using software */ } #endif #ifndef TEST_UNPAD_CONSTANT_TIME #ifndef NO_RSA_BOUNDS_CHECK if (type == RSA_PRIVATE_DECRYPT && key->state == RSA_STATE_DECRYPT_EXPTMOD) { /* Check that 1 < in < n-1. (Requirement of 800-56B.) */ #ifdef WOLFSSL_SMALL_STACK mp_int* c; #else mp_int c[1]; #endif #ifdef WOLFSSL_SMALL_STACK c = (mp_int*)XMALLOC(sizeof(mp_int), key->heap, DYNAMIC_TYPE_RSA); if (c == NULL) ret = MEMORY_E; #endif if (mp_init(c) != MP_OKAY) ret = MP_INIT_E; if (ret == 0) { if (mp_read_unsigned_bin(c, in, inLen) != 0) ret = MP_READ_E; } if (ret == 0) { /* check c > 1 */ if (mp_cmp_d(c, 1) != MP_GT) ret = RSA_OUT_OF_RANGE_E; } if (ret == 0) { /* add c+1 */ if (mp_add_d(c, 1, c) != MP_OKAY) ret = MP_ADD_E; } if (ret == 0) { /* check c+1 < n */ if (mp_cmp(c, &key->n) != MP_LT) ret = RSA_OUT_OF_RANGE_E; } mp_clear(c); #ifdef WOLFSSL_SMALL_STACK XFREE(c, key->heap, DYNAMIC_TYPE_RSA); #endif if (ret != 0) return ret; } #endif /* NO_RSA_BOUNDS_CHECK */ #endif #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_RSA && key->n.raw.len > 0) { ret = wc_RsaFunctionAsync(in, inLen, out, outLen, type, key, rng); } else #endif #ifdef WC_RSA_NONBLOCK if (key->nb) { ret = wc_RsaFunctionNonBlock(in, inLen, out, outLen, type, key); } else #endif { ret = wc_RsaFunctionSync(in, inLen, out, outLen, type, key, rng); } /* handle error */ if (ret < 0 && ret != WC_PENDING_E #ifdef WC_RSA_NONBLOCK && ret != FP_WOULDBLOCK #endif ) { if (ret == MP_EXPTMOD_E) { /* This can happen due to incorrectly set FP_MAX_BITS or missing XREALLOC */ WOLFSSL_MSG("RSA_FUNCTION MP_EXPTMOD_E: memory/config problem"); } key->state = RSA_STATE_NONE; wc_RsaCleanup(key); } return ret; } #ifndef WOLFSSL_RSA_VERIFY_ONLY /* Internal Wrappers */ /* Gives the option of choosing padding type in : input to be encrypted inLen: length of input buffer out: encrypted output outLen: length of encrypted output buffer key : wolfSSL initialized RSA key struct rng : wolfSSL initialized random number struct rsa_type : type of RSA: RSA_PUBLIC_ENCRYPT, RSA_PUBLIC_DECRYPT, RSA_PRIVATE_ENCRYPT or RSA_PRIVATE_DECRYPT pad_value: RSA_BLOCK_TYPE_1 or RSA_BLOCK_TYPE_2 pad_type : type of padding: WC_RSA_PKCSV15_PAD, WC_RSA_OAEP_PAD, WC_RSA_NO_PAD or WC_RSA_PSS_PAD hash : type of hash algorithm to use found in wolfssl/wolfcrypt/hash.h mgf : type of mask generation function to use label : optional label labelSz : size of optional label buffer saltLen : Length of salt used in PSS rng : random number generator */ static int RsaPublicEncryptEx(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, int rsa_type, byte pad_value, int pad_type, enum wc_HashType hash, int mgf, byte* label, word32 labelSz, int saltLen, WC_RNG* rng) { int ret, sz; if (in == NULL || inLen == 0 || out == NULL || key == NULL) { return BAD_FUNC_ARG; } sz = wc_RsaEncryptSize(key); if (sz > (int)outLen) { return RSA_BUFFER_E; } if (sz < RSA_MIN_PAD_SZ) { return WC_KEY_SIZE_E; } if (inLen > (word32)(sz - RSA_MIN_PAD_SZ)) { #ifdef WC_RSA_NO_PADDING /* In the case that no padding is used the input length can and should * be the same size as the RSA key. */ if (pad_type != WC_RSA_NO_PAD) #endif return RSA_BUFFER_E; } switch (key->state) { case RSA_STATE_NONE: case RSA_STATE_ENCRYPT_PAD: #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) && \ defined(HAVE_CAVIUM) if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_RSA && pad_type != WC_RSA_PSS_PAD && key->n.raw.buf) { /* Async operations that include padding */ if (rsa_type == RSA_PUBLIC_ENCRYPT && pad_value == RSA_BLOCK_TYPE_2) { key->state = RSA_STATE_ENCRYPT_RES; key->dataLen = key->n.raw.len; return NitroxRsaPublicEncrypt(in, inLen, out, outLen, key); } else if (rsa_type == RSA_PRIVATE_ENCRYPT && pad_value == RSA_BLOCK_TYPE_1) { key->state = RSA_STATE_ENCRYPT_RES; key->dataLen = key->n.raw.len; return NitroxRsaSSL_Sign(in, inLen, out, outLen, key); } } #elif defined(WOLFSSL_CRYPTOCELL) if (rsa_type == RSA_PUBLIC_ENCRYPT && pad_value == RSA_BLOCK_TYPE_2) { return cc310_RsaPublicEncrypt(in, inLen, out, outLen, key); } else if (rsa_type == RSA_PRIVATE_ENCRYPT && pad_value == RSA_BLOCK_TYPE_1) { return cc310_RsaSSL_Sign(in, inLen, out, outLen, key, cc310_hashModeRSA(hash, 0)); } #endif /* WOLFSSL_CRYPTOCELL */ key->state = RSA_STATE_ENCRYPT_PAD; ret = wc_RsaPad_ex(in, inLen, out, sz, pad_value, rng, pad_type, hash, mgf, label, labelSz, saltLen, mp_count_bits(&key->n), key->heap); if (ret < 0) { break; } key->state = RSA_STATE_ENCRYPT_EXPTMOD; FALL_THROUGH; case RSA_STATE_ENCRYPT_EXPTMOD: key->dataLen = outLen; ret = wc_RsaFunction(out, sz, out, &key->dataLen, rsa_type, key, rng); if (ret >= 0 || ret == WC_PENDING_E) { key->state = RSA_STATE_ENCRYPT_RES; } if (ret < 0) { break; } FALL_THROUGH; case RSA_STATE_ENCRYPT_RES: ret = key->dataLen; break; default: ret = BAD_STATE_E; break; } /* if async pending then return and skip done cleanup below */ if (ret == WC_PENDING_E #ifdef WC_RSA_NONBLOCK || ret == FP_WOULDBLOCK #endif ) { return ret; } key->state = RSA_STATE_NONE; wc_RsaCleanup(key); return ret; } #endif /* Gives the option of choosing padding type in : input to be decrypted inLen: length of input buffer out: decrypted message outLen: length of decrypted message in bytes outPtr: optional inline output pointer (if provided doing inline) key : wolfSSL initialized RSA key struct rsa_type : type of RSA: RSA_PUBLIC_ENCRYPT, RSA_PUBLIC_DECRYPT, RSA_PRIVATE_ENCRYPT or RSA_PRIVATE_DECRYPT pad_value: RSA_BLOCK_TYPE_1 or RSA_BLOCK_TYPE_2 pad_type : type of padding: WC_RSA_PKCSV15_PAD, WC_RSA_OAEP_PAD, WC_RSA_NO_PAD, WC_RSA_PSS_PAD hash : type of hash algorithm to use found in wolfssl/wolfcrypt/hash.h mgf : type of mask generation function to use label : optional label labelSz : size of optional label buffer saltLen : Length of salt used in PSS rng : random number generator */ static int RsaPrivateDecryptEx(byte* in, word32 inLen, byte* out, word32 outLen, byte** outPtr, RsaKey* key, int rsa_type, byte pad_value, int pad_type, enum wc_HashType hash, int mgf, byte* label, word32 labelSz, int saltLen, WC_RNG* rng) { int ret = RSA_WRONG_TYPE_E; byte* pad = NULL; if (in == NULL || inLen == 0 || out == NULL || key == NULL) { return BAD_FUNC_ARG; } switch (key->state) { case RSA_STATE_NONE: key->dataLen = inLen; #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) && \ defined(HAVE_CAVIUM) /* Async operations that include padding */ if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_RSA && pad_type != WC_RSA_PSS_PAD) { #ifndef WOLFSSL_RSA_PUBLIC_ONLY if (rsa_type == RSA_PRIVATE_DECRYPT && pad_value == RSA_BLOCK_TYPE_2) { key->state = RSA_STATE_DECRYPT_RES; key->data = NULL; return NitroxRsaPrivateDecrypt(in, inLen, out, &key->dataLen, key); #endif } else if (rsa_type == RSA_PUBLIC_DECRYPT && pad_value == RSA_BLOCK_TYPE_1) { key->state = RSA_STATE_DECRYPT_RES; key->data = NULL; return NitroxRsaSSL_Verify(in, inLen, out, &key->dataLen, key); } } #elif defined(WOLFSSL_CRYPTOCELL) if (rsa_type == RSA_PRIVATE_DECRYPT && pad_value == RSA_BLOCK_TYPE_2) { ret = cc310_RsaPublicDecrypt(in, inLen, out, outLen, key); if (outPtr != NULL) *outPtr = out; /* for inline */ return ret; } else if (rsa_type == RSA_PUBLIC_DECRYPT && pad_value == RSA_BLOCK_TYPE_1) { return cc310_RsaSSL_Verify(in, inLen, out, key, cc310_hashModeRSA(hash, 0)); } #endif /* WOLFSSL_CRYPTOCELL */ #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WOLFSSL_RSA_VERIFY_INLINE) /* verify the tmp ptr is NULL, otherwise indicates bad state */ if (key->data != NULL) { ret = BAD_STATE_E; break; } /* if not doing this inline then allocate a buffer for it */ if (outPtr == NULL) { key->data = (byte*)XMALLOC(inLen, key->heap, DYNAMIC_TYPE_WOLF_BIGINT); key->dataIsAlloc = 1; if (key->data == NULL) { ret = MEMORY_E; break; } XMEMCPY(key->data, in, inLen); } else { key->data = out; } #endif key->state = RSA_STATE_DECRYPT_EXPTMOD; FALL_THROUGH; case RSA_STATE_DECRYPT_EXPTMOD: #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WOLFSSL_RSA_VERIFY_INLINE) ret = wc_RsaFunction(key->data, inLen, key->data, &key->dataLen, rsa_type, key, rng); #else ret = wc_RsaFunction(in, inLen, out, &key->dataLen, rsa_type, key, rng); #endif if (ret >= 0 || ret == WC_PENDING_E) { key->state = RSA_STATE_DECRYPT_UNPAD; } if (ret < 0) { break; } FALL_THROUGH; case RSA_STATE_DECRYPT_UNPAD: #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WOLFSSL_RSA_VERIFY_INLINE) ret = wc_RsaUnPad_ex(key->data, key->dataLen, &pad, pad_value, pad_type, hash, mgf, label, labelSz, saltLen, mp_count_bits(&key->n), key->heap); #else ret = wc_RsaUnPad_ex(out, key->dataLen, &pad, pad_value, pad_type, hash, mgf, label, labelSz, saltLen, mp_count_bits(&key->n), key->heap); #endif if (rsa_type == RSA_PUBLIC_DECRYPT && ret > (int)outLen) ret = RSA_BUFFER_E; else if (ret >= 0 && pad != NULL) { #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WOLFSSL_RSA_VERIFY_INLINE) signed char c; #endif /* only copy output if not inline */ if (outPtr == NULL) { #if !defined(WOLFSSL_RSA_VERIFY_ONLY) && !defined(WOLFSSL_RSA_VERIFY_INLINE) if (rsa_type == RSA_PRIVATE_DECRYPT) { word32 i, j; int start = (int)((size_t)pad - (size_t)key->data); for (i = 0, j = 0; j < key->dataLen; j++) { out[i] = key->data[j]; c = ctMaskGTE(j, start); c &= ctMaskLT(i, outLen); /* 0 - no add, -1 add */ i += (word32)((byte)(-c)); } } else #endif { XMEMCPY(out, pad, ret); } } else *outPtr = pad; #if !defined(WOLFSSL_RSA_VERIFY_ONLY) ret = ctMaskSelInt(ctMaskLTE(ret, outLen), ret, RSA_BUFFER_E); ret = ctMaskSelInt(ctMaskNotEq(ret, 0), ret, RSA_BUFFER_E); #else if (outLen < (word32)ret) ret = RSA_BUFFER_E; #endif } key->state = RSA_STATE_DECRYPT_RES; FALL_THROUGH; case RSA_STATE_DECRYPT_RES: #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) && \ defined(HAVE_CAVIUM) if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_RSA && pad_type != WC_RSA_PSS_PAD) { if (ret > 0) { /* convert result */ byte* dataLen = (byte*)&key->dataLen; ret = (dataLen[0] << 8) | (dataLen[1]); if (outPtr) *outPtr = in; } } #endif break; default: ret = BAD_STATE_E; break; } /* if async pending then return and skip done cleanup below */ if (ret == WC_PENDING_E #ifdef WC_RSA_NONBLOCK || ret == FP_WOULDBLOCK #endif ) { return ret; } key->state = RSA_STATE_NONE; wc_RsaCleanup(key); return ret; } #ifndef WOLFSSL_RSA_VERIFY_ONLY /* Public RSA Functions */ int wc_RsaPublicEncrypt(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, WC_RNG* rng) { return RsaPublicEncryptEx(in, inLen, out, outLen, key, RSA_PUBLIC_ENCRYPT, RSA_BLOCK_TYPE_2, WC_RSA_PKCSV15_PAD, WC_HASH_TYPE_NONE, WC_MGF1NONE, NULL, 0, 0, rng); } #if !defined(WC_NO_RSA_OAEP) || defined(WC_RSA_NO_PADDING) int wc_RsaPublicEncrypt_ex(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, WC_RNG* rng, int type, enum wc_HashType hash, int mgf, byte* label, word32 labelSz) { return RsaPublicEncryptEx(in, inLen, out, outLen, key, RSA_PUBLIC_ENCRYPT, RSA_BLOCK_TYPE_2, type, hash, mgf, label, labelSz, 0, rng); } #endif /* WC_NO_RSA_OAEP */ #endif #ifndef WOLFSSL_RSA_PUBLIC_ONLY int wc_RsaPrivateDecryptInline(byte* in, word32 inLen, byte** out, RsaKey* key) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx(in, inLen, in, inLen, out, key, RSA_PRIVATE_DECRYPT, RSA_BLOCK_TYPE_2, WC_RSA_PKCSV15_PAD, WC_HASH_TYPE_NONE, WC_MGF1NONE, NULL, 0, 0, rng); } #ifndef WC_NO_RSA_OAEP int wc_RsaPrivateDecryptInline_ex(byte* in, word32 inLen, byte** out, RsaKey* key, int type, enum wc_HashType hash, int mgf, byte* label, word32 labelSz) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx(in, inLen, in, inLen, out, key, RSA_PRIVATE_DECRYPT, RSA_BLOCK_TYPE_2, type, hash, mgf, label, labelSz, 0, rng); } #endif /* WC_NO_RSA_OAEP */ int wc_RsaPrivateDecrypt(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx((byte*)in, inLen, out, outLen, NULL, key, RSA_PRIVATE_DECRYPT, RSA_BLOCK_TYPE_2, WC_RSA_PKCSV15_PAD, WC_HASH_TYPE_NONE, WC_MGF1NONE, NULL, 0, 0, rng); } #if !defined(WC_NO_RSA_OAEP) || defined(WC_RSA_NO_PADDING) int wc_RsaPrivateDecrypt_ex(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, int type, enum wc_HashType hash, int mgf, byte* label, word32 labelSz) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx((byte*)in, inLen, out, outLen, NULL, key, RSA_PRIVATE_DECRYPT, RSA_BLOCK_TYPE_2, type, hash, mgf, label, labelSz, 0, rng); } #endif /* WC_NO_RSA_OAEP || WC_RSA_NO_PADDING */ #endif /* WOLFSSL_RSA_PUBLIC_ONLY */ #if !defined(WOLFSSL_CRYPTOCELL) int wc_RsaSSL_VerifyInline(byte* in, word32 inLen, byte** out, RsaKey* key) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx(in, inLen, in, inLen, out, key, RSA_PUBLIC_DECRYPT, RSA_BLOCK_TYPE_1, WC_RSA_PKCSV15_PAD, WC_HASH_TYPE_NONE, WC_MGF1NONE, NULL, 0, 0, rng); } #endif #ifndef WOLFSSL_RSA_VERIFY_ONLY int wc_RsaSSL_Verify(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key) { return wc_RsaSSL_Verify_ex(in, inLen, out, outLen, key , WC_RSA_PKCSV15_PAD); } int wc_RsaSSL_Verify_ex(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, int pad_type) { WC_RNG* rng; if (key == NULL) { return BAD_FUNC_ARG; } #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx((byte*)in, inLen, out, outLen, NULL, key, RSA_PUBLIC_DECRYPT, RSA_BLOCK_TYPE_1, pad_type, WC_HASH_TYPE_NONE, WC_MGF1NONE, NULL, 0, 0, rng); } #endif #ifdef WC_RSA_PSS /* Verify the message signed with RSA-PSS. * The input buffer is reused for the output buffer. * Salt length is equal to hash length. * * in Buffer holding encrypted data. * inLen Length of data in buffer. * out Pointer to address containing the PSS data. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * returns the length of the PSS data on success and negative indicates failure. */ int wc_RsaPSS_VerifyInline(byte* in, word32 inLen, byte** out, enum wc_HashType hash, int mgf, RsaKey* key) { #ifndef WOLFSSL_PSS_SALT_LEN_DISCOVER return wc_RsaPSS_VerifyInline_ex(in, inLen, out, hash, mgf, RSA_PSS_SALT_LEN_DEFAULT, key); #else return wc_RsaPSS_VerifyInline_ex(in, inLen, out, hash, mgf, RSA_PSS_SALT_LEN_DISCOVER, key); #endif } /* Verify the message signed with RSA-PSS. * The input buffer is reused for the output buffer. * * in Buffer holding encrypted data. * inLen Length of data in buffer. * out Pointer to address containing the PSS data. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * saltLen Length of salt used. RSA_PSS_SALT_LEN_DEFAULT (-1) indicates salt * length is the same as the hash length. RSA_PSS_SALT_LEN_DISCOVER * indicates salt length is determined from the data. * returns the length of the PSS data on success and negative indicates failure. */ int wc_RsaPSS_VerifyInline_ex(byte* in, word32 inLen, byte** out, enum wc_HashType hash, int mgf, int saltLen, RsaKey* key) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx(in, inLen, in, inLen, out, key, RSA_PUBLIC_DECRYPT, RSA_BLOCK_TYPE_1, WC_RSA_PSS_PAD, hash, mgf, NULL, 0, saltLen, rng); } /* Verify the message signed with RSA-PSS. * Salt length is equal to hash length. * * in Buffer holding encrypted data. * inLen Length of data in buffer. * out Pointer to address containing the PSS data. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * returns the length of the PSS data on success and negative indicates failure. */ int wc_RsaPSS_Verify(byte* in, word32 inLen, byte* out, word32 outLen, enum wc_HashType hash, int mgf, RsaKey* key) { #ifndef WOLFSSL_PSS_SALT_LEN_DISCOVER return wc_RsaPSS_Verify_ex(in, inLen, out, outLen, hash, mgf, RSA_PSS_SALT_LEN_DEFAULT, key); #else return wc_RsaPSS_Verify_ex(in, inLen, out, outLen, hash, mgf, RSA_PSS_SALT_LEN_DISCOVER, key); #endif } /* Verify the message signed with RSA-PSS. * * in Buffer holding encrypted data. * inLen Length of data in buffer. * out Pointer to address containing the PSS data. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * saltLen Length of salt used. RSA_PSS_SALT_LEN_DEFAULT (-1) indicates salt * length is the same as the hash length. RSA_PSS_SALT_LEN_DISCOVER * indicates salt length is determined from the data. * returns the length of the PSS data on success and negative indicates failure. */ int wc_RsaPSS_Verify_ex(byte* in, word32 inLen, byte* out, word32 outLen, enum wc_HashType hash, int mgf, int saltLen, RsaKey* key) { WC_RNG* rng; #ifdef WC_RSA_BLINDING rng = key->rng; #else rng = NULL; #endif return RsaPrivateDecryptEx(in, inLen, out, outLen, NULL, key, RSA_PUBLIC_DECRYPT, RSA_BLOCK_TYPE_1, WC_RSA_PSS_PAD, hash, mgf, NULL, 0, saltLen, rng); } /* Checks the PSS data to ensure that the signature matches. * Salt length is equal to hash length. * * in Hash of the data that is being verified. * inSz Length of hash. * sig Buffer holding PSS data. * sigSz Size of PSS data. * hashType Hash algorithm. * returns BAD_PADDING_E when the PSS data is invalid, BAD_FUNC_ARG when * NULL is passed in to in or sig or inSz is not the same as the hash * algorithm length and 0 on success. */ int wc_RsaPSS_CheckPadding(const byte* in, word32 inSz, byte* sig, word32 sigSz, enum wc_HashType hashType) { return wc_RsaPSS_CheckPadding_ex(in, inSz, sig, sigSz, hashType, inSz, 0); } /* Checks the PSS data to ensure that the signature matches. * * in Hash of the data that is being verified. * inSz Length of hash. * sig Buffer holding PSS data. * sigSz Size of PSS data. * hashType Hash algorithm. * saltLen Length of salt used. RSA_PSS_SALT_LEN_DEFAULT (-1) indicates salt * length is the same as the hash length. RSA_PSS_SALT_LEN_DISCOVER * indicates salt length is determined from the data. * returns BAD_PADDING_E when the PSS data is invalid, BAD_FUNC_ARG when * NULL is passed in to in or sig or inSz is not the same as the hash * algorithm length and 0 on success. */ int wc_RsaPSS_CheckPadding_ex(const byte* in, word32 inSz, byte* sig, word32 sigSz, enum wc_HashType hashType, int saltLen, int bits) { int ret = 0; #ifndef WOLFSSL_PSS_LONG_SALT byte sigCheck[WC_MAX_DIGEST_SIZE*2 + RSA_PSS_PAD_SZ]; #else byte *sigCheck = NULL; #endif (void)bits; if (in == NULL || sig == NULL || inSz != (word32)wc_HashGetDigestSize(hashType)) { ret = BAD_FUNC_ARG; } if (ret == 0) { if (saltLen == RSA_PSS_SALT_LEN_DEFAULT) { saltLen = inSz; #ifdef WOLFSSL_SHA512 /* See FIPS 186-4 section 5.5 item (e). */ if (bits == 1024 && inSz == WC_SHA512_DIGEST_SIZE) { saltLen = RSA_PSS_SALT_MAX_SZ; } #endif } #ifndef WOLFSSL_PSS_LONG_SALT else if ((word32)saltLen > inSz) { ret = PSS_SALTLEN_E; } #endif #ifndef WOLFSSL_PSS_SALT_LEN_DISCOVER else if (saltLen < RSA_PSS_SALT_LEN_DEFAULT) { ret = PSS_SALTLEN_E; } #else else if (saltLen == RSA_PSS_SALT_LEN_DISCOVER) { saltLen = sigSz - inSz; if (saltLen < 0) { ret = PSS_SALTLEN_E; } } else if (saltLen < RSA_PSS_SALT_LEN_DISCOVER) { ret = PSS_SALTLEN_E; } #endif } /* Sig = Salt | Exp Hash */ if (ret == 0) { if (sigSz != inSz + saltLen) { ret = PSS_SALTLEN_E; } } #ifdef WOLFSSL_PSS_LONG_SALT if (ret == 0) { sigCheck = (byte*)XMALLOC(RSA_PSS_PAD_SZ + inSz + saltLen, NULL, DYNAMIC_TYPE_RSA_BUFFER); if (sigCheck == NULL) { ret = MEMORY_E; } } #endif /* Exp Hash = HASH(8 * 0x00 | Message Hash | Salt) */ if (ret == 0) { XMEMSET(sigCheck, 0, RSA_PSS_PAD_SZ); XMEMCPY(sigCheck + RSA_PSS_PAD_SZ, in, inSz); XMEMCPY(sigCheck + RSA_PSS_PAD_SZ + inSz, sig, saltLen); ret = wc_Hash(hashType, sigCheck, RSA_PSS_PAD_SZ + inSz + saltLen, sigCheck, inSz); } if (ret == 0) { if (XMEMCMP(sigCheck, sig + saltLen, inSz) != 0) { WOLFSSL_MSG("RsaPSS_CheckPadding: Padding Error"); ret = BAD_PADDING_E; } } #ifdef WOLFSSL_PSS_LONG_SALT if (sigCheck != NULL) { XFREE(sigCheck, NULL, DYNAMIC_TYPE_RSA_BUFFER); } #endif return ret; } /* Verify the message signed with RSA-PSS. * The input buffer is reused for the output buffer. * Salt length is equal to hash length. * * in Buffer holding encrypted data. * inLen Length of data in buffer. * out Pointer to address containing the PSS data. * digest Hash of the data that is being verified. * digestLen Length of hash. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * returns the length of the PSS data on success and negative indicates failure. */ int wc_RsaPSS_VerifyCheckInline(byte* in, word32 inLen, byte** out, const byte* digest, word32 digestLen, enum wc_HashType hash, int mgf, RsaKey* key) { int ret = 0, verify, saltLen, hLen, bits = 0; hLen = wc_HashGetDigestSize(hash); if (hLen < 0) return hLen; if ((word32)hLen != digestLen) return BAD_FUNC_ARG; saltLen = hLen; #ifdef WOLFSSL_SHA512 /* See FIPS 186-4 section 5.5 item (e). */ bits = mp_count_bits(&key->n); if (bits == 1024 && hLen == WC_SHA512_DIGEST_SIZE) saltLen = RSA_PSS_SALT_MAX_SZ; #endif verify = wc_RsaPSS_VerifyInline_ex(in, inLen, out, hash, mgf, saltLen, key); if (verify > 0) ret = wc_RsaPSS_CheckPadding_ex(digest, digestLen, *out, verify, hash, saltLen, bits); if (ret == 0) ret = verify; return ret; } /* Verify the message signed with RSA-PSS. * Salt length is equal to hash length. * * in Buffer holding encrypted data. * inLen Length of data in buffer. * out Pointer to address containing the PSS data. * outLen Length of the output. * digest Hash of the data that is being verified. * digestLen Length of hash. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * returns the length of the PSS data on success and negative indicates failure. */ int wc_RsaPSS_VerifyCheck(byte* in, word32 inLen, byte* out, word32 outLen, const byte* digest, word32 digestLen, enum wc_HashType hash, int mgf, RsaKey* key) { int ret = 0, verify, saltLen, hLen, bits = 0; hLen = wc_HashGetDigestSize(hash); if (hLen < 0) return hLen; if ((word32)hLen != digestLen) return BAD_FUNC_ARG; saltLen = hLen; #ifdef WOLFSSL_SHA512 /* See FIPS 186-4 section 5.5 item (e). */ bits = mp_count_bits(&key->n); if (bits == 1024 && hLen == WC_SHA512_DIGEST_SIZE) saltLen = RSA_PSS_SALT_MAX_SZ; #endif verify = wc_RsaPSS_Verify_ex(in, inLen, out, outLen, hash, mgf, saltLen, key); if (verify > 0) ret = wc_RsaPSS_CheckPadding_ex(digest, digestLen, out, verify, hash, saltLen, bits); if (ret == 0) ret = verify; return ret; } #endif #if !defined(WOLFSSL_RSA_PUBLIC_ONLY) && !defined(WOLFSSL_RSA_VERIFY_ONLY) int wc_RsaSSL_Sign(const byte* in, word32 inLen, byte* out, word32 outLen, RsaKey* key, WC_RNG* rng) { return RsaPublicEncryptEx(in, inLen, out, outLen, key, RSA_PRIVATE_ENCRYPT, RSA_BLOCK_TYPE_1, WC_RSA_PKCSV15_PAD, WC_HASH_TYPE_NONE, WC_MGF1NONE, NULL, 0, 0, rng); } #ifdef WC_RSA_PSS /* Sign the hash of a message using RSA-PSS. * Salt length is equal to hash length. * * in Buffer holding hash of message. * inLen Length of data in buffer (hash length). * out Buffer to write encrypted signature into. * outLen Size of buffer to write to. * hash Hash algorithm. * mgf Mask generation function. * key Public RSA key. * rng Random number generator. * returns the length of the encrypted signature on success, a negative value * indicates failure. */ int wc_RsaPSS_Sign(const byte* in, word32 inLen, byte* out, word32 outLen, enum wc_HashType hash, int mgf, RsaKey* key, WC_RNG* rng) { return wc_RsaPSS_Sign_ex(in, inLen, out, outLen, hash, mgf, RSA_PSS_SALT_LEN_DEFAULT, key, rng); } /* Sign the hash of a message using RSA-PSS. * * in Buffer holding hash of message. * inLen Length of data in buffer (hash length). * out Buffer to write encrypted signature into. * outLen Size of buffer to write to. * hash Hash algorithm. * mgf Mask generation function. * saltLen Length of salt used. RSA_PSS_SALT_LEN_DEFAULT (-1) indicates salt * length is the same as the hash length. RSA_PSS_SALT_LEN_DISCOVER * indicates salt length is determined from the data. * key Public RSA key. * rng Random number generator. * returns the length of the encrypted signature on success, a negative value * indicates failure. */ int wc_RsaPSS_Sign_ex(const byte* in, word32 inLen, byte* out, word32 outLen, enum wc_HashType hash, int mgf, int saltLen, RsaKey* key, WC_RNG* rng) { return RsaPublicEncryptEx(in, inLen, out, outLen, key, RSA_PRIVATE_ENCRYPT, RSA_BLOCK_TYPE_1, WC_RSA_PSS_PAD, hash, mgf, NULL, 0, saltLen, rng); } #endif #endif #if !defined(WOLFSSL_RSA_VERIFY_ONLY) || !defined(WOLFSSL_SP_MATH) || \ defined(WC_RSA_PSS) int wc_RsaEncryptSize(RsaKey* key) { int ret; if (key == NULL) { return BAD_FUNC_ARG; } ret = mp_unsigned_bin_size(&key->n); #ifdef WOLF_CRYPTO_CB if (ret == 0 && key->devId != INVALID_DEVID) { ret = 2048/8; /* hardware handles, use 2048-bit as default */ } #endif return ret; } #endif #ifndef WOLFSSL_RSA_VERIFY_ONLY /* flatten RsaKey structure into individual elements (e, n) */ int wc_RsaFlattenPublicKey(RsaKey* key, byte* e, word32* eSz, byte* n, word32* nSz) { int sz, ret; if (key == NULL || e == NULL || eSz == NULL || n == NULL || nSz == NULL) { return BAD_FUNC_ARG; } sz = mp_unsigned_bin_size(&key->e); if ((word32)sz > *eSz) return RSA_BUFFER_E; ret = mp_to_unsigned_bin(&key->e, e); if (ret != MP_OKAY) return ret; *eSz = (word32)sz; sz = wc_RsaEncryptSize(key); if ((word32)sz > *nSz) return RSA_BUFFER_E; ret = mp_to_unsigned_bin(&key->n, n); if (ret != MP_OKAY) return ret; *nSz = (word32)sz; return 0; } #endif #endif /* HAVE_FIPS */ #ifndef WOLFSSL_RSA_VERIFY_ONLY static int RsaGetValue(mp_int* in, byte* out, word32* outSz) { word32 sz; int ret = 0; /* Parameters ensured by calling function. */ sz = (word32)mp_unsigned_bin_size(in); if (sz > *outSz) ret = RSA_BUFFER_E; if (ret == 0) ret = mp_to_unsigned_bin(in, out); if (ret == MP_OKAY) *outSz = sz; return ret; } int wc_RsaExportKey(RsaKey* key, byte* e, word32* eSz, byte* n, word32* nSz, byte* d, word32* dSz, byte* p, word32* pSz, byte* q, word32* qSz) { int ret = BAD_FUNC_ARG; if (key && e && eSz && n && nSz && d && dSz && p && pSz && q && qSz) ret = 0; if (ret == 0) ret = RsaGetValue(&key->e, e, eSz); if (ret == 0) ret = RsaGetValue(&key->n, n, nSz); #ifndef WOLFSSL_RSA_PUBLIC_ONLY if (ret == 0) ret = RsaGetValue(&key->d, d, dSz); if (ret == 0) ret = RsaGetValue(&key->p, p, pSz); if (ret == 0) ret = RsaGetValue(&key->q, q, qSz); #else /* no private parts to key */ if (d == NULL || p == NULL || q == NULL || dSz == NULL || pSz == NULL || qSz == NULL) { ret = BAD_FUNC_ARG; } else { *dSz = 0; *pSz = 0; *qSz = 0; } #endif /* WOLFSSL_RSA_PUBLIC_ONLY */ return ret; } #endif #ifdef WOLFSSL_KEY_GEN /* Check that |p-q| > 2^((size/2)-100) */ static int wc_CompareDiffPQ(mp_int* p, mp_int* q, int size) { mp_int c, d; int ret; if (p == NULL || q == NULL) return BAD_FUNC_ARG; ret = mp_init_multi(&c, &d, NULL, NULL, NULL, NULL); /* c = 2^((size/2)-100) */ if (ret == 0) ret = mp_2expt(&c, (size/2)-100); /* d = |p-q| */ if (ret == 0) ret = mp_sub(p, q, &d); if (ret == 0) ret = mp_abs(&d, &d); /* compare */ if (ret == 0) ret = mp_cmp(&d, &c); if (ret == MP_GT) ret = MP_OKAY; mp_clear(&d); mp_clear(&c); return ret; } /* The lower_bound value is floor(2^(0.5) * 2^((nlen/2)-1)) where nlen is 4096. * This number was calculated using a small test tool written with a common * large number math library. Other values of nlen may be checked with a subset * of lower_bound. */ static const byte lower_bound[] = { 0xB5, 0x04, 0xF3, 0x33, 0xF9, 0xDE, 0x64, 0x84, 0x59, 0x7D, 0x89, 0xB3, 0x75, 0x4A, 0xBE, 0x9F, 0x1D, 0x6F, 0x60, 0xBA, 0x89, 0x3B, 0xA8, 0x4C, 0xED, 0x17, 0xAC, 0x85, 0x83, 0x33, 0x99, 0x15, /* 512 */ 0x4A, 0xFC, 0x83, 0x04, 0x3A, 0xB8, 0xA2, 0xC3, 0xA8, 0xB1, 0xFE, 0x6F, 0xDC, 0x83, 0xDB, 0x39, 0x0F, 0x74, 0xA8, 0x5E, 0x43, 0x9C, 0x7B, 0x4A, 0x78, 0x04, 0x87, 0x36, 0x3D, 0xFA, 0x27, 0x68, /* 1024 */ 0xD2, 0x20, 0x2E, 0x87, 0x42, 0xAF, 0x1F, 0x4E, 0x53, 0x05, 0x9C, 0x60, 0x11, 0xBC, 0x33, 0x7B, 0xCA, 0xB1, 0xBC, 0x91, 0x16, 0x88, 0x45, 0x8A, 0x46, 0x0A, 0xBC, 0x72, 0x2F, 0x7C, 0x4E, 0x33, 0xC6, 0xD5, 0xA8, 0xA3, 0x8B, 0xB7, 0xE9, 0xDC, 0xCB, 0x2A, 0x63, 0x43, 0x31, 0xF3, 0xC8, 0x4D, 0xF5, 0x2F, 0x12, 0x0F, 0x83, 0x6E, 0x58, 0x2E, 0xEA, 0xA4, 0xA0, 0x89, 0x90, 0x40, 0xCA, 0x4A, /* 2048 */ 0x81, 0x39, 0x4A, 0xB6, 0xD8, 0xFD, 0x0E, 0xFD, 0xF4, 0xD3, 0xA0, 0x2C, 0xEB, 0xC9, 0x3E, 0x0C, 0x42, 0x64, 0xDA, 0xBC, 0xD5, 0x28, 0xB6, 0x51, 0xB8, 0xCF, 0x34, 0x1B, 0x6F, 0x82, 0x36, 0xC7, 0x01, 0x04, 0xDC, 0x01, 0xFE, 0x32, 0x35, 0x2F, 0x33, 0x2A, 0x5E, 0x9F, 0x7B, 0xDA, 0x1E, 0xBF, 0xF6, 0xA1, 0xBE, 0x3F, 0xCA, 0x22, 0x13, 0x07, 0xDE, 0xA0, 0x62, 0x41, 0xF7, 0xAA, 0x81, 0xC2, /* 3072 */ 0xC1, 0xFC, 0xBD, 0xDE, 0xA2, 0xF7, 0xDC, 0x33, 0x18, 0x83, 0x8A, 0x2E, 0xAF, 0xF5, 0xF3, 0xB2, 0xD2, 0x4F, 0x4A, 0x76, 0x3F, 0xAC, 0xB8, 0x82, 0xFD, 0xFE, 0x17, 0x0F, 0xD3, 0xB1, 0xF7, 0x80, 0xF9, 0xAC, 0xCE, 0x41, 0x79, 0x7F, 0x28, 0x05, 0xC2, 0x46, 0x78, 0x5E, 0x92, 0x95, 0x70, 0x23, 0x5F, 0xCF, 0x8F, 0x7B, 0xCA, 0x3E, 0xA3, 0x3B, 0x4D, 0x7C, 0x60, 0xA5, 0xE6, 0x33, 0xE3, 0xE1 /* 4096 */ }; /* returns 1 on key size ok and 0 if not ok */ static WC_INLINE int RsaSizeCheck(int size) { if (size < RSA_MIN_SIZE || size > RSA_MAX_SIZE) { return 0; } #ifdef HAVE_FIPS /* Key size requirements for CAVP */ switch (size) { case 1024: case 2048: case 3072: case 4096: return 1; } return 0; #else return 1; /* allow unusual key sizes in non FIPS mode */ #endif /* HAVE_FIPS */ } static int _CheckProbablePrime(mp_int* p, mp_int* q, mp_int* e, int nlen, int* isPrime, WC_RNG* rng) { int ret; mp_int tmp1, tmp2; mp_int* prime; if (p == NULL || e == NULL || isPrime == NULL) return BAD_FUNC_ARG; if (!RsaSizeCheck(nlen)) return BAD_FUNC_ARG; *isPrime = MP_NO; if (q != NULL) { /* 5.4 - check that |p-q| <= (2^(1/2))(2^((nlen/2)-1)) */ ret = wc_CompareDiffPQ(p, q, nlen); if (ret != MP_OKAY) goto notOkay; prime = q; } else prime = p; ret = mp_init_multi(&tmp1, &tmp2, NULL, NULL, NULL, NULL); if (ret != MP_OKAY) goto notOkay; /* 4.4,5.5 - Check that prime >= (2^(1/2))(2^((nlen/2)-1)) * This is a comparison against lowerBound */ ret = mp_read_unsigned_bin(&tmp1, lower_bound, nlen/16); if (ret != MP_OKAY) goto notOkay; ret = mp_cmp(prime, &tmp1); if (ret == MP_LT) goto exit; /* 4.5,5.6 - Check that GCD(p-1, e) == 1 */ ret = mp_sub_d(prime, 1, &tmp1); /* tmp1 = prime-1 */ if (ret != MP_OKAY) goto notOkay; ret = mp_gcd(&tmp1, e, &tmp2); /* tmp2 = gcd(prime-1, e) */ if (ret != MP_OKAY) goto notOkay; ret = mp_cmp_d(&tmp2, 1); if (ret != MP_EQ) goto exit; /* e divides p-1 */ /* 4.5.1,5.6.1 - Check primality of p with 8 rounds of M-R. * mp_prime_is_prime_ex() performs test divisions against the first 256 * prime numbers. After that it performs 8 rounds of M-R using random * bases between 2 and n-2. * mp_prime_is_prime() performs the same test divisions and then does * M-R with the first 8 primes. Both functions set isPrime as a * side-effect. */ if (rng != NULL) ret = mp_prime_is_prime_ex(prime, 8, isPrime, rng); else ret = mp_prime_is_prime(prime, 8, isPrime); if (ret != MP_OKAY) goto notOkay; exit: ret = MP_OKAY; notOkay: mp_clear(&tmp1); mp_clear(&tmp2); return ret; } int wc_CheckProbablePrime_ex(const byte* pRaw, word32 pRawSz, const byte* qRaw, word32 qRawSz, const byte* eRaw, word32 eRawSz, int nlen, int* isPrime, WC_RNG* rng) { mp_int p, q, e; mp_int* Q = NULL; int ret; if (pRaw == NULL || pRawSz == 0 || eRaw == NULL || eRawSz == 0 || isPrime == NULL) { return BAD_FUNC_ARG; } if ((qRaw != NULL && qRawSz == 0) || (qRaw == NULL && qRawSz != 0)) return BAD_FUNC_ARG; ret = mp_init_multi(&p, &q, &e, NULL, NULL, NULL); if (ret == MP_OKAY) ret = mp_read_unsigned_bin(&p, pRaw, pRawSz); if (ret == MP_OKAY) { if (qRaw != NULL) { ret = mp_read_unsigned_bin(&q, qRaw, qRawSz); if (ret == MP_OKAY) Q = &q; } } if (ret == MP_OKAY) ret = mp_read_unsigned_bin(&e, eRaw, eRawSz); if (ret == MP_OKAY) ret = _CheckProbablePrime(&p, Q, &e, nlen, isPrime, rng); ret = (ret == MP_OKAY) ? 0 : PRIME_GEN_E; mp_clear(&p); mp_clear(&q); mp_clear(&e); return ret; } int wc_CheckProbablePrime(const byte* pRaw, word32 pRawSz, const byte* qRaw, word32 qRawSz, const byte* eRaw, word32 eRawSz, int nlen, int* isPrime) { return wc_CheckProbablePrime_ex(pRaw, pRawSz, qRaw, qRawSz, eRaw, eRawSz, nlen, isPrime, NULL); } #if !defined(HAVE_FIPS) || (defined(HAVE_FIPS) && \ defined(HAVE_FIPS_VERSION) && (HAVE_FIPS_VERSION >= 2)) /* Make an RSA key for size bits, with e specified, 65537 is a good e */ int wc_MakeRsaKey(RsaKey* key, int size, long e, WC_RNG* rng) { #ifndef WC_NO_RNG #ifdef WOLFSSL_SMALL_STACK mp_int *p = (mp_int *)XMALLOC(sizeof *p, key->heap, DYNAMIC_TYPE_RSA); mp_int *q = (mp_int *)XMALLOC(sizeof *q, key->heap, DYNAMIC_TYPE_RSA); mp_int *tmp1 = (mp_int *)XMALLOC(sizeof *tmp1, key->heap, DYNAMIC_TYPE_RSA); mp_int *tmp2 = (mp_int *)XMALLOC(sizeof *tmp2, key->heap, DYNAMIC_TYPE_RSA); mp_int *tmp3 = (mp_int *)XMALLOC(sizeof *tmp3, key->heap, DYNAMIC_TYPE_RSA); #else mp_int p_buf, *p = &p_buf; mp_int q_buf, *q = &q_buf; mp_int tmp1_buf, *tmp1 = &tmp1_buf; mp_int tmp2_buf, *tmp2 = &tmp2_buf; mp_int tmp3_buf, *tmp3 = &tmp3_buf; #endif int err, i, failCount, primeSz, isPrime = 0; byte* buf = NULL; #ifdef WOLFSSL_SMALL_STACK if ((p == NULL) || (q == NULL) || (tmp1 == NULL) || (tmp2 == NULL) || (tmp3 == NULL)) { err = MEMORY_E; goto out; } #endif if (key == NULL || rng == NULL) { err = BAD_FUNC_ARG; goto out; } if (!RsaSizeCheck(size)) { err = BAD_FUNC_ARG; goto out; } if (e < 3 || (e & 1) == 0) { err = BAD_FUNC_ARG; goto out; } #if defined(WOLFSSL_CRYPTOCELL) err = cc310_RSA_GenerateKeyPair(key, size, e); goto out; #endif /*WOLFSSL_CRYPTOCELL*/ #ifdef WOLF_CRYPTO_CB if (key->devId != INVALID_DEVID) { err = wc_CryptoCb_MakeRsaKey(key, size, e, rng); if (err != CRYPTOCB_UNAVAILABLE) goto out; /* fall-through when unavailable */ } #endif #if defined(WOLFSSL_ASYNC_CRYPT) && defined(WC_ASYNC_ENABLE_RSA) && \ defined(WC_ASYNC_ENABLE_RSA_KEYGEN) if (key->asyncDev.marker == WOLFSSL_ASYNC_MARKER_RSA) { #ifdef HAVE_CAVIUM /* TODO: Not implemented */ #elif defined(HAVE_INTEL_QA) err = IntelQaRsaKeyGen(&key->asyncDev, key, size, e, rng); goto out; #else if (wc_AsyncTestInit(&key->asyncDev, ASYNC_TEST_RSA_MAKE)) { WC_ASYNC_TEST* testDev = &key->asyncDev.test; testDev->rsaMake.rng = rng; testDev->rsaMake.key = key; testDev->rsaMake.size = size; testDev->rsaMake.e = e; err = WC_PENDING_E; goto out; } #endif } #endif err = mp_init_multi(p, q, tmp1, tmp2, tmp3, NULL); if (err == MP_OKAY) err = mp_set_int(tmp3, e); /* The failCount value comes from NIST FIPS 186-4, section B.3.3, * process steps 4.7 and 5.8. */ failCount = 5 * (size / 2); primeSz = size / 16; /* size is the size of n in bits. primeSz is in bytes. */ /* allocate buffer to work with */ if (err == MP_OKAY) { buf = (byte*)XMALLOC(primeSz, key->heap, DYNAMIC_TYPE_RSA); if (buf == NULL) err = MEMORY_E; } /* make p */ if (err == MP_OKAY) { isPrime = 0; i = 0; do { #ifdef SHOW_GEN printf("."); fflush(stdout); #endif /* generate value */ err = wc_RNG_GenerateBlock(rng, buf, primeSz); if (err == 0) { /* prime lower bound has the MSB set, set it in candidate */ buf[0] |= 0x80; /* make candidate odd */ buf[primeSz-1] |= 0x01; /* load value */ err = mp_read_unsigned_bin(p, buf, primeSz); } if (err == MP_OKAY) err = _CheckProbablePrime(p, NULL, tmp3, size, &isPrime, rng); #ifdef HAVE_FIPS i++; #else /* Keep the old retry behavior in non-FIPS build. */ (void)i; #endif } while (err == MP_OKAY && !isPrime && i < failCount); } if (err == MP_OKAY && !isPrime) err = PRIME_GEN_E; /* make q */ if (err == MP_OKAY) { isPrime = 0; i = 0; do { #ifdef SHOW_GEN printf("."); fflush(stdout); #endif /* generate value */ err = wc_RNG_GenerateBlock(rng, buf, primeSz); if (err == 0) { /* prime lower bound has the MSB set, set it in candidate */ buf[0] |= 0x80; /* make candidate odd */ buf[primeSz-1] |= 0x01; /* load value */ err = mp_read_unsigned_bin(q, buf, primeSz); } if (err == MP_OKAY) err = _CheckProbablePrime(p, q, tmp3, size, &isPrime, rng); #ifdef HAVE_FIPS i++; #else /* Keep the old retry behavior in non-FIPS build. */ (void)i; #endif } while (err == MP_OKAY && !isPrime && i < failCount); } if (err == MP_OKAY && !isPrime) err = PRIME_GEN_E; if (buf) { ForceZero(buf, primeSz); XFREE(buf, key->heap, DYNAMIC_TYPE_RSA); } if (err == MP_OKAY && mp_cmp(p, q) < 0) { err = mp_copy(p, tmp1); if (err == MP_OKAY) err = mp_copy(q, p); if (err == MP_OKAY) mp_copy(tmp1, q); } /* Setup RsaKey buffers */ if (err == MP_OKAY) err = mp_init_multi(&key->n, &key->e, &key->d, &key->p, &key->q, NULL); if (err == MP_OKAY) err = mp_init_multi(&key->dP, &key->dQ, &key->u, NULL, NULL, NULL); /* Software Key Calculation */ if (err == MP_OKAY) /* tmp1 = p-1 */ err = mp_sub_d(p, 1, tmp1); if (err == MP_OKAY) /* tmp2 = q-1 */ err = mp_sub_d(q, 1, tmp2); #ifdef WC_RSA_BLINDING if (err == MP_OKAY) /* tmp3 = order of n */ err = mp_mul(tmp1, tmp2, tmp3); #else if (err == MP_OKAY) /* tmp3 = lcm(p-1, q-1), last loop */ err = mp_lcm(tmp1, tmp2, tmp3); #endif /* make key */ if (err == MP_OKAY) /* key->e = e */ err = mp_set_int(&key->e, (mp_digit)e); #ifdef WC_RSA_BLINDING /* Blind the inverse operation with a value that is invertable */ if (err == MP_OKAY) { do { err = mp_rand(&key->p, get_digit_count(tmp3), rng); if (err == MP_OKAY) err = mp_set_bit(&key->p, 0); if (err == MP_OKAY) err = mp_set_bit(&key->p, size - 1); if (err == MP_OKAY) err = mp_gcd(&key->p, tmp3, &key->q); } while ((err == MP_OKAY) && !mp_isone(&key->q)); } if (err == MP_OKAY) err = mp_mul_d(&key->p, (mp_digit)e, &key->e); #endif if (err == MP_OKAY) /* key->d = 1/e mod lcm(p-1, q-1) */ err = mp_invmod(&key->e, tmp3, &key->d); #ifdef WC_RSA_BLINDING /* Take off blinding from d and reset e */ if (err == MP_OKAY) err = mp_mulmod(&key->d, &key->p, tmp3, &key->d); if (err == MP_OKAY) err = mp_set_int(&key->e, (mp_digit)e); #endif if (err == MP_OKAY) /* key->n = pq */ err = mp_mul(p, q, &key->n); if (err == MP_OKAY) /* key->dP = d mod(p-1) */ err = mp_mod(&key->d, tmp1, &key->dP); if (err == MP_OKAY) /* key->dQ = d mod(q-1) */ err = mp_mod(&key->d, tmp2, &key->dQ); #ifdef WOLFSSL_MP_INVMOD_CONSTANT_TIME if (err == MP_OKAY) /* key->u = 1/q mod p */ err = mp_invmod(q, p, &key->u); #else if (err == MP_OKAY) err = mp_sub_d(p, 2, tmp3); if (err == MP_OKAY) /* key->u = 1/q mod p = q^p-2 mod p */ err = mp_exptmod(q, tmp3 , p, &key->u); #endif if (err == MP_OKAY) err = mp_copy(p, &key->p); if (err == MP_OKAY) err = mp_copy(q, &key->q); #ifdef HAVE_WOLF_BIGINT /* make sure raw unsigned bin version is available */ if (err == MP_OKAY) err = wc_mp_to_bigint(&key->n, &key->n.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->e, &key->e.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->d, &key->d.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->p, &key->p.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->q, &key->q.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->dP, &key->dP.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->dQ, &key->dQ.raw); if (err == MP_OKAY) err = wc_mp_to_bigint(&key->u, &key->u.raw); #endif if (err == MP_OKAY) key->type = RSA_PRIVATE; mp_clear(tmp1); mp_clear(tmp2); mp_clear(tmp3); mp_clear(p); mp_clear(q); #if defined(WOLFSSL_KEY_GEN) && !defined(WOLFSSL_NO_RSA_KEY_CHECK) /* Perform the pair-wise consistency test on the new key. */ if (err == 0) err = wc_CheckRsaKey(key); #endif if (err != 0) { wc_FreeRsaKey(key); goto out; } #if defined(WOLFSSL_XILINX_CRYPT) || defined(WOLFSSL_CRYPTOCELL) if (wc_InitRsaHw(key) != 0) { return BAD_STATE_E; } #endif err = 0; out: #ifdef WOLFSSL_SMALL_STACK if (p) XFREE(p, key->heap, DYNAMIC_TYPE_RSA); if (q) XFREE(q, key->heap, DYNAMIC_TYPE_RSA); if (tmp1) XFREE(tmp1, key->heap, DYNAMIC_TYPE_RSA); if (tmp2) XFREE(tmp2, key->heap, DYNAMIC_TYPE_RSA); if (tmp3) XFREE(tmp3, key->heap, DYNAMIC_TYPE_RSA); #endif return err; #else return NOT_COMPILED_IN; #endif } #endif /* !FIPS || FIPS_VER >= 2 */ #endif /* WOLFSSL_KEY_GEN */ #ifdef WC_RSA_BLINDING int wc_RsaSetRNG(RsaKey* key, WC_RNG* rng) { if (key == NULL) return BAD_FUNC_ARG; key->rng = rng; return 0; } #endif /* WC_RSA_BLINDING */ #ifdef WC_RSA_NONBLOCK int wc_RsaSetNonBlock(RsaKey* key, RsaNb* nb) { if (key == NULL) return BAD_FUNC_ARG; if (nb) { XMEMSET(nb, 0, sizeof(RsaNb)); } /* Allow nb == NULL to clear non-block mode */ key->nb = nb; return 0; } #ifdef WC_RSA_NONBLOCK_TIME int wc_RsaSetNonBlockTime(RsaKey* key, word32 maxBlockUs, word32 cpuMHz) { if (key == NULL || key->nb == NULL) { return BAD_FUNC_ARG; } /* calculate maximum number of instructions to block */ key->nb->exptmod.maxBlockInst = cpuMHz * maxBlockUs; return 0; } #endif /* WC_RSA_NONBLOCK_TIME */ #endif /* WC_RSA_NONBLOCK */ #endif /* NO_RSA */
null
222
CWE-787
CVE-2020-36400
/* Copyright (c) 2007-2016 Contributors as noted in the AUTHORS file This file is part of libzmq, the ZeroMQ core engine in C++. libzmq is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License (LGPL) as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. As a special exception, the Contributors give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you must extend this exception to your version of the library. libzmq is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef __ZMQ_DECODER_ALLOCATORS_HPP_INCLUDED__ #define __ZMQ_DECODER_ALLOCATORS_HPP_INCLUDED__ #include <cstddef> #include <cstdlib> #include "atomic_counter.hpp" #include "msg.hpp" #include "err.hpp" namespace zmq { // Static buffer policy. class c_single_allocator { public: explicit c_single_allocator (std::size_t bufsize_) : _buf_size (bufsize_), _buf (static_cast<unsigned char *> (std::malloc (_buf_size))) { alloc_assert (_buf); } ~c_single_allocator () { std::free (_buf); } unsigned char *allocate () { return _buf; } void deallocate () {} std::size_t size () const { return _buf_size; } void resize (std::size_t new_size_) { _buf_size = new_size_; } private: std::size_t _buf_size; unsigned char *_buf; ZMQ_NON_COPYABLE_NOR_MOVABLE (c_single_allocator) }; // This allocator allocates a reference counted buffer which is used by v2_decoder_t // to use zero-copy msg::init_data to create messages with memory from this buffer as // data storage. // // The buffer is allocated with a reference count of 1 to make sure that is is alive while // decoding messages. Otherwise, it is possible that e.g. the first message increases the count // from zero to one, gets passed to the user application, processed in the user thread and deleted // which would then deallocate the buffer. The drawback is that the buffer may be allocated longer // than necessary because it is only deleted when allocate is called the next time. class shared_message_memory_allocator { public: explicit shared_message_memory_allocator (std::size_t bufsize_); // Create an allocator for a maximum number of messages shared_message_memory_allocator (std::size_t bufsize_, std::size_t max_messages_); ~shared_message_memory_allocator (); // Allocate a new buffer // // This releases the current buffer to be bound to the lifetime of the messages // created on this buffer. unsigned char *allocate (); // force deallocation of buffer. void deallocate (); // Give up ownership of the buffer. The buffer's lifetime is now coupled to // the messages constructed on top of it. unsigned char *release (); void inc_ref (); static void call_dec_ref (void *, void *hint_); std::size_t size () const; // Return pointer to the first message data byte. unsigned char *data (); // Return pointer to the first byte of the buffer. unsigned char *buffer () { return _buf; } void resize (std::size_t new_size_) { _buf_size = new_size_; } zmq::msg_t::content_t *provide_content () { return _msg_content; } void advance_content () { _msg_content++; } private: void clear (); unsigned char *_buf; std::size_t _buf_size; const std::size_t _max_size; zmq::msg_t::content_t *_msg_content; std::size_t _max_counters; }; } #endif
null
/* Copyright (c) 2007-2016 Contributors as noted in the AUTHORS file This file is part of libzmq, the ZeroMQ core engine in C++. libzmq is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License (LGPL) as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. As a special exception, the Contributors give you permission to link this library with independent modules to produce an executable, regardless of the license terms of these independent modules, and to copy and distribute the resulting executable under terms of your choice, provided that you also meet, for each linked independent module, the terms and conditions of the license of that module. An independent module is a module which is not derived from or based on this library. If you modify this library, you must extend this exception to your version of the library. libzmq is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef __ZMQ_DECODER_ALLOCATORS_HPP_INCLUDED__ #define __ZMQ_DECODER_ALLOCATORS_HPP_INCLUDED__ #include <cstddef> #include <cstdlib> #include "atomic_counter.hpp" #include "msg.hpp" #include "err.hpp" namespace zmq { // Static buffer policy. class c_single_allocator { public: explicit c_single_allocator (std::size_t bufsize_) : _buf_size (bufsize_), _buf (static_cast<unsigned char *> (std::malloc (_buf_size))) { alloc_assert (_buf); } ~c_single_allocator () { std::free (_buf); } unsigned char *allocate () { return _buf; } void deallocate () {} std::size_t size () const { return _buf_size; } // This buffer is fixed, size must not be changed void resize (std::size_t new_size_) { LIBZMQ_UNUSED (new_size_); } private: std::size_t _buf_size; unsigned char *_buf; ZMQ_NON_COPYABLE_NOR_MOVABLE (c_single_allocator) }; // This allocator allocates a reference counted buffer which is used by v2_decoder_t // to use zero-copy msg::init_data to create messages with memory from this buffer as // data storage. // // The buffer is allocated with a reference count of 1 to make sure that is is alive while // decoding messages. Otherwise, it is possible that e.g. the first message increases the count // from zero to one, gets passed to the user application, processed in the user thread and deleted // which would then deallocate the buffer. The drawback is that the buffer may be allocated longer // than necessary because it is only deleted when allocate is called the next time. class shared_message_memory_allocator { public: explicit shared_message_memory_allocator (std::size_t bufsize_); // Create an allocator for a maximum number of messages shared_message_memory_allocator (std::size_t bufsize_, std::size_t max_messages_); ~shared_message_memory_allocator (); // Allocate a new buffer // // This releases the current buffer to be bound to the lifetime of the messages // created on this buffer. unsigned char *allocate (); // force deallocation of buffer. void deallocate (); // Give up ownership of the buffer. The buffer's lifetime is now coupled to // the messages constructed on top of it. unsigned char *release (); void inc_ref (); static void call_dec_ref (void *, void *hint_); std::size_t size () const; // Return pointer to the first message data byte. unsigned char *data (); // Return pointer to the first byte of the buffer. unsigned char *buffer () { return _buf; } void resize (std::size_t new_size_) { _buf_size = new_size_; } zmq::msg_t::content_t *provide_content () { return _msg_content; } void advance_content () { _msg_content++; } private: void clear (); unsigned char *_buf; std::size_t _buf_size; const std::size_t _max_size; zmq::msg_t::content_t *_msg_content; std::size_t _max_counters; }; } #endif
null
223
CWE-787
CVE-2020-36402
******************************** Solidity IR-based Codegen Changes ******************************** This section highlights the main differences between the old and the IR-based codegen, along with the reasoning behind the changes and how to update affected code. Semantic Only Changes ===================== This section lists the changes that are semantic-only, thus potentially hiding new and different behavior in existing code. * When storage structs are deleted, every storage slot that contains a member of the struct is set to zero entirely. Formally, padding space was left untouched. Consequently, if the padding space within a struct is used to store data (e.g. in the context of a contract upgrade), you have to be aware that ``delete`` will now also clear the added member (while it wouldn't have been cleared in the past). :: // SPDX-License-Identifier: GPL-3.0 pragma solidity >0.7.0; contract C { struct S { uint64 y; uint64 z; } S s; function f() public { // ... delete s; // s occupies only first 16 bytes of the 32 bytes slot // delete will write zero to the full slot } } We have the same behavior for implicit delete, for example when array of structs is shortened. * The order of contract initialization has changed in case of inheritance. The order used to be: - All state variables are zero-initialized at the beginning. - Evaluate base constructor arguments from most derived to most base contract. - Initialize all state variables in the whole inheritance hierarchy from most base to most derived. - Run the constructor, if present, for all contracts in the linearized hierarchy from most base to most derived. New order: - All state variables are zero-initialized at the beginning. - Evaluate base constructor arguments from most derived to most base contract. - For every contract in order from most base to most derived in the linearized hierarchy execute: 1. State variables are assigned value their initial values, if present at declaration. 2. Constructor, if present. This causes differences in some contracts, for example: :: // SPDX-License-Identifier: GPL-3.0 pragma solidity >0.7.0; contract A { uint x; constructor() { x = 42; } function f() public view returns(uint256) { return x; } } contract B is A { uint public y = f(); } Previously, ``y`` would be set to 0. This is due to the fact that we would first initialize state variables: First, ``x`` is set to 0, and when initializing ``y``, ``f()`` would return 0 causing ``y`` to be 0 as well. With the new rules, ``y`` will be set to 42. We first initialize ``x`` to 0, then call A's constructor which sets ``x`` to 42. Finally, when initializing ``y``, ``f()`` returns 42 causing ``y`` to be 42.
null
******************************** Solidity IR-based Codegen Changes ******************************** This section highlights the main differences between the old and the IR-based codegen, along with the reasoning behind the changes and how to update affected code. Semantic Only Changes ===================== This section lists the changes that are semantic-only, thus potentially hiding new and different behavior in existing code. * When storage structs are deleted, every storage slot that contains a member of the struct is set to zero entirely. Formally, padding space was left untouched. Consequently, if the padding space within a struct is used to store data (e.g. in the context of a contract upgrade), you have to be aware that ``delete`` will now also clear the added member (while it wouldn't have been cleared in the past). :: // SPDX-License-Identifier: GPL-3.0 pragma solidity >0.7.0; contract C { struct S { uint64 y; uint64 z; } S s; function f() public { // ... delete s; // s occupies only first 16 bytes of the 32 bytes slot // delete will write zero to the full slot } } We have the same behavior for implicit delete, for example when array of structs is shortened. * The order of contract initialization has changed in case of inheritance. The order used to be: - All state variables are zero-initialized at the beginning. - Evaluate base constructor arguments from most derived to most base contract. - Initialize all state variables in the whole inheritance hierarchy from most base to most derived. - Run the constructor, if present, for all contracts in the linearized hierarchy from most base to most derived. New order: - All state variables are zero-initialized at the beginning. - Evaluate base constructor arguments from most derived to most base contract. - For every contract in order from most base to most derived in the linearized hierarchy execute: 1. State variables are assigned their initial values, if present at declaration. 2. Constructor, if present. This causes differences in some contracts, for example: :: // SPDX-License-Identifier: GPL-3.0 pragma solidity >0.7.0; contract A { uint x; constructor() { x = 42; } function f() public view returns(uint256) { return x; } } contract B is A { uint public y = f(); } Previously, ``y`` would be set to 0. This is due to the fact that we would first initialize state variables: First, ``x`` is set to 0, and when initializing ``y``, ``f()`` would return 0 causing ``y`` to be 0 as well. With the new rules, ``y`` will be set to 42. We first initialize ``x`` to 0, then call A's constructor which sets ``x`` to 42. Finally, when initializing ``y``, ``f()`` returns 42 causing ``y`` to be 42.
null
224
CWE-787
CVE-2020-36403
/* vcf.c -- VCF/BCF API functions. Copyright (C) 2012, 2013 Broad Institute. Copyright (C) 2012-2020 Genome Research Ltd. Portions copyright (C) 2014 Intel Corporation. Author: Heng Li <lh3@sanger.ac.uk> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define HTS_BUILDING_LIBRARY // Enables HTSLIB_EXPORT, see htslib/hts_defs.h #include <config.h> #include <stdio.h> #include <assert.h> #include <string.h> #include <strings.h> #include <stdlib.h> #include <limits.h> #include <stdint.h> #include <inttypes.h> #include <errno.h> #include "htslib/vcf.h" #include "htslib/bgzf.h" #include "htslib/tbx.h" #include "htslib/hfile.h" #include "hts_internal.h" #include "htslib/hts_endian.h" #include "htslib/khash_str2int.h" #include "htslib/kstring.h" #include "htslib/sam.h" #include "htslib/khash.h" KHASH_MAP_INIT_STR(vdict, bcf_idinfo_t) typedef khash_t(vdict) vdict_t; #include "htslib/kseq.h" HTSLIB_EXPORT uint32_t bcf_float_missing = 0x7F800001; HTSLIB_EXPORT uint32_t bcf_float_vector_end = 0x7F800002; HTSLIB_EXPORT uint8_t bcf_type_shift[] = { 0, 0, 1, 2, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static bcf_idinfo_t bcf_idinfo_def = { .info = { 15, 15, 15 }, .hrec = { NULL, NULL, NULL}, .id = -1 }; /* Partial support for 64-bit POS and Number=1 INFO tags. Notes: - the support for 64-bit values is motivated by POS and INFO/END for large genomes - the use of 64-bit values does not conform to the specification - cannot output 64-bit BCF and if it does, it is not compatible with anything - experimental, use at your risk */ #ifdef VCF_ALLOW_INT64 #define BCF_MAX_BT_INT64 (0x7fffffffffffffff) /* INT64_MAX, for internal use only */ #define BCF_MIN_BT_INT64 -9223372036854775800LL /* INT64_MIN + 8, for internal use only */ #endif #define BCF_IS_64BIT (1<<30) static char *find_chrom_header_line(char *s) { char *nl; if (strncmp(s, "#CHROM\t", 7) == 0) return s; else if ((nl = strstr(s, "\n#CHROM\t")) != NULL) return nl+1; else return NULL; } /************************* *** VCF header parser *** *************************/ static int bcf_hdr_add_sample_len(bcf_hdr_t *h, const char *s, size_t len) { if ( !s ) return 0; if (len == 0) len = strlen(s); const char *ss = s; while ( *ss && isspace_c(*ss) && ss - s < len) ss++; if ( !*ss || ss - s == len) { hts_log_error("Empty sample name: trailing spaces/tabs in the header line?"); return -1; } vdict_t *d = (vdict_t*)h->dict[BCF_DT_SAMPLE]; int ret; char *sdup = malloc(len + 1); if (!sdup) return -1; memcpy(sdup, s, len); sdup[len] = 0; // Ensure space is available in h->samples size_t n = kh_size(d); char **new_samples = realloc(h->samples, sizeof(char*) * (n + 1)); if (!new_samples) { free(sdup); return -1; } h->samples = new_samples; int k = kh_put(vdict, d, sdup, &ret); if (ret < 0) { free(sdup); return -1; } if (ret) { // absent kh_val(d, k) = bcf_idinfo_def; kh_val(d, k).id = n; } else { hts_log_error("Duplicated sample name '%s'", s); free(sdup); return -1; } h->samples[n] = sdup; h->dirty = 1; return 0; } int bcf_hdr_add_sample(bcf_hdr_t *h, const char *s) { return bcf_hdr_add_sample_len(h, s, 0); } int HTS_RESULT_USED bcf_hdr_parse_sample_line(bcf_hdr_t *h, const char *str) { int ret = 0; int i = 0; const char *p, *q; // add samples for (p = q = str;; ++q) { if (*q > '\n') continue; if (++i > 9) { if ( bcf_hdr_add_sample_len(h, p, q - p) < 0 ) ret = -1; } if (*q == 0 || *q == '\n' || ret < 0) break; p = q + 1; } return ret; } int bcf_hdr_sync(bcf_hdr_t *h) { int i; for (i = 0; i < 3; i++) { vdict_t *d = (vdict_t*)h->dict[i]; khint_t k; if ( h->n[i] < kh_size(d) ) { bcf_idpair_t *new_idpair; // this should be true only for i=2, BCF_DT_SAMPLE new_idpair = (bcf_idpair_t*) realloc(h->id[i], kh_size(d)*sizeof(bcf_idpair_t)); if (!new_idpair) return -1; h->n[i] = kh_size(d); h->id[i] = new_idpair; } for (k=kh_begin(d); k<kh_end(d); k++) { if (!kh_exist(d,k)) continue; h->id[i][kh_val(d,k).id].key = kh_key(d,k); h->id[i][kh_val(d,k).id].val = &kh_val(d,k); } } h->dirty = 0; return 0; } void bcf_hrec_destroy(bcf_hrec_t *hrec) { if (!hrec) return; free(hrec->key); if ( hrec->value ) free(hrec->value); int i; for (i=0; i<hrec->nkeys; i++) { free(hrec->keys[i]); free(hrec->vals[i]); } free(hrec->keys); free(hrec->vals); free(hrec); } // Copies all fields except IDX. bcf_hrec_t *bcf_hrec_dup(bcf_hrec_t *hrec) { int save_errno; bcf_hrec_t *out = (bcf_hrec_t*) calloc(1,sizeof(bcf_hrec_t)); if (!out) return NULL; out->type = hrec->type; if ( hrec->key ) { out->key = strdup(hrec->key); if (!out->key) goto fail; } if ( hrec->value ) { out->value = strdup(hrec->value); if (!out->value) goto fail; } out->nkeys = hrec->nkeys; out->keys = (char**) malloc(sizeof(char*)*hrec->nkeys); if (!out->keys) goto fail; out->vals = (char**) malloc(sizeof(char*)*hrec->nkeys); if (!out->vals) goto fail; int i, j = 0; for (i=0; i<hrec->nkeys; i++) { if ( hrec->keys[i] && !strcmp("IDX",hrec->keys[i]) ) continue; if ( hrec->keys[i] ) { out->keys[j] = strdup(hrec->keys[i]); if (!out->keys[j]) goto fail; } if ( hrec->vals[i] ) { out->vals[j] = strdup(hrec->vals[i]); if (!out->vals[j]) goto fail; } j++; } if ( i!=j ) out->nkeys -= i-j; // IDX was omitted return out; fail: save_errno = errno; hts_log_error("%s", strerror(errno)); bcf_hrec_destroy(out); errno = save_errno; return NULL; } void bcf_hrec_debug(FILE *fp, bcf_hrec_t *hrec) { fprintf(fp, "key=[%s] value=[%s]", hrec->key, hrec->value?hrec->value:""); int i; for (i=0; i<hrec->nkeys; i++) fprintf(fp, "\t[%s]=[%s]", hrec->keys[i],hrec->vals[i]); fprintf(fp, "\n"); } void bcf_header_debug(bcf_hdr_t *hdr) { int i, j; for (i=0; i<hdr->nhrec; i++) { if ( !hdr->hrec[i]->value ) { fprintf(stderr, "##%s=<", hdr->hrec[i]->key); fprintf(stderr,"%s=%s", hdr->hrec[i]->keys[0], hdr->hrec[i]->vals[0]); for (j=1; j<hdr->hrec[i]->nkeys; j++) fprintf(stderr,",%s=%s", hdr->hrec[i]->keys[j], hdr->hrec[i]->vals[j]); fprintf(stderr,">\n"); } else fprintf(stderr,"##%s=%s\n", hdr->hrec[i]->key,hdr->hrec[i]->value); } } int bcf_hrec_add_key(bcf_hrec_t *hrec, const char *str, size_t len) { char **tmp; size_t n = hrec->nkeys + 1; assert(len > 0 && len < SIZE_MAX); tmp = realloc(hrec->keys, sizeof(char*)*n); if (!tmp) return -1; hrec->keys = tmp; tmp = realloc(hrec->vals, sizeof(char*)*n); if (!tmp) return -1; hrec->vals = tmp; hrec->keys[hrec->nkeys] = (char*) malloc((len+1)*sizeof(char)); if (!hrec->keys[hrec->nkeys]) return -1; memcpy(hrec->keys[hrec->nkeys],str,len); hrec->keys[hrec->nkeys][len] = 0; hrec->vals[hrec->nkeys] = NULL; hrec->nkeys = n; return 0; } int bcf_hrec_set_val(bcf_hrec_t *hrec, int i, const char *str, size_t len, int is_quoted) { if ( hrec->vals[i] ) { free(hrec->vals[i]); hrec->vals[i] = NULL; } if ( !str ) return 0; if ( is_quoted ) { if (len >= SIZE_MAX - 3) { errno = ENOMEM; return -1; } hrec->vals[i] = (char*) malloc((len+3)*sizeof(char)); if (!hrec->vals[i]) return -1; hrec->vals[i][0] = '"'; memcpy(&hrec->vals[i][1],str,len); hrec->vals[i][len+1] = '"'; hrec->vals[i][len+2] = 0; } else { if (len == SIZE_MAX) { errno = ENOMEM; return -1; } hrec->vals[i] = (char*) malloc((len+1)*sizeof(char)); if (!hrec->vals[i]) return -1; memcpy(hrec->vals[i],str,len); hrec->vals[i][len] = 0; } return 0; } int hrec_add_idx(bcf_hrec_t *hrec, int idx) { int n = hrec->nkeys + 1; char **tmp = (char**) realloc(hrec->keys, sizeof(char*)*n); if (!tmp) return -1; hrec->keys = tmp; tmp = (char**) realloc(hrec->vals, sizeof(char*)*n); if (!tmp) return -1; hrec->vals = tmp; hrec->keys[hrec->nkeys] = strdup("IDX"); if (!hrec->keys[hrec->nkeys]) return -1; kstring_t str = {0,0,0}; if (kputw(idx, &str) < 0) { free(hrec->keys[hrec->nkeys]); return -1; } hrec->vals[hrec->nkeys] = str.s; hrec->nkeys = n; return 0; } int bcf_hrec_find_key(bcf_hrec_t *hrec, const char *key) { int i; for (i=0; i<hrec->nkeys; i++) if ( !strcasecmp(key,hrec->keys[i]) ) return i; return -1; } static inline int is_escaped(const char *min, const char *str) { int n = 0; while ( --str>=min && *str=='\\' ) n++; return n%2; } bcf_hrec_t *bcf_hdr_parse_line(const bcf_hdr_t *h, const char *line, int *len) { const char *p = line; if (p[0] != '#' || p[1] != '#') { *len = 0; return NULL; } p += 2; const char *q = p; while ( *q && *q!='=' && *q != '\n' ) q++; ptrdiff_t n = q-p; if ( *q!='=' || !n ) { *len = q-line+1; return NULL; } // wrong format bcf_hrec_t *hrec = (bcf_hrec_t*) calloc(1,sizeof(bcf_hrec_t)); if (!hrec) return NULL; hrec->key = (char*) malloc(sizeof(char)*(n+1)); if (!hrec->key) goto fail; memcpy(hrec->key,p,n); hrec->key[n] = 0; p = ++q; if ( *p!='<' ) // generic field, e.g. ##samtoolsVersion=0.1.18-r579 { while ( *q && *q!='\n' ) q++; hrec->value = (char*) malloc((q-p+1)*sizeof(char)); if (!hrec->value) goto fail; memcpy(hrec->value, p, q-p); hrec->value[q-p] = 0; *len = q - line + (*q ? 1 : 0); // Skip \n but not \0 return hrec; } // structured line, e.g. // ##INFO=<ID=PV1,Number=1,Type=Float,Description="P-value for baseQ bias"> // ##PEDIGREE=<Name_0=G0-ID,Name_1=G1-ID,Name_3=GN-ID> int nopen = 1; while ( *q && *q!='\n' && nopen>0 ) { p = ++q; while ( *q && *q==' ' ) { p++; q++; } // ^[A-Za-z_][0-9A-Za-z_.]*$ if (p==q && *q && (isalpha_c(*q) || *q=='_')) { q++; while ( *q && (isalnum_c(*q) || *q=='_' || *q=='.') ) q++; } n = q-p; int m = 0; while ( *q && *q==' ' ) { q++; m++; } if ( *q!='=' || !n ) { // wrong format while ( *q && *q!='\n' ) q++; hts_log_error("Could not parse the header line: \"%.*s\"", (int) (q - line), line); *len = q - line + (*q ? 1 : 0); bcf_hrec_destroy(hrec); return NULL; } if (bcf_hrec_add_key(hrec, p, q-p-m) < 0) goto fail; p = ++q; while ( *q && *q==' ' ) { p++; q++; } int quoted = *p=='"' ? 1 : 0; if ( quoted ) p++, q++; while ( *q && *q != '\n' ) { if ( quoted ) { if ( *q=='"' && !is_escaped(p,q) ) break; } else { if ( *q=='<' ) nopen++; if ( *q=='>' ) nopen--; if ( !nopen ) break; if ( *q==',' && nopen==1 ) break; } q++; } const char *r = q; while ( r > p && r[-1] == ' ' ) r--; if (bcf_hrec_set_val(hrec, hrec->nkeys-1, p, r-p, quoted) < 0) goto fail; if ( quoted && *q=='"' ) q++; if ( *q=='>' ) { nopen--; q++; } } // Skip to end of line int nonspace = 0; p = q; while ( *q && *q!='\n' ) { nonspace |= !isspace_c(*q); q++; } if (nonspace) { hts_log_warning("Dropped trailing junk from header line '%.*s'", (int) (q - line), line); } *len = q - line + (*q ? 1 : 0); return hrec; fail: bcf_hrec_destroy(hrec); return NULL; } static int bcf_hdr_set_idx(bcf_hdr_t *hdr, int dict_type, const char *tag, bcf_idinfo_t *idinfo) { size_t new_n; // If available, preserve existing IDX if ( idinfo->id==-1 ) idinfo->id = hdr->n[dict_type]; else if ( idinfo->id < hdr->n[dict_type] && hdr->id[dict_type][idinfo->id].key ) { hts_log_error("Conflicting IDX=%d lines in the header dictionary, the new tag is %s", idinfo->id, tag); errno = EINVAL; return -1; } new_n = idinfo->id >= hdr->n[dict_type] ? idinfo->id+1 : hdr->n[dict_type]; if (hts_resize(bcf_idpair_t, new_n, &hdr->m[dict_type], &hdr->id[dict_type], HTS_RESIZE_CLEAR)) { return -1; } hdr->n[dict_type] = new_n; // NB: the next kh_put call can invalidate the idinfo pointer, therefore // we leave it unassigned here. It must be set explicitly in bcf_hdr_sync. hdr->id[dict_type][idinfo->id].key = tag; return 0; } // returns: 1 when hdr needs to be synced, -1 on error, 0 otherwise static int bcf_hdr_register_hrec(bcf_hdr_t *hdr, bcf_hrec_t *hrec) { // contig int i, ret, replacing = 0; khint_t k; char *str = NULL; if ( !strcmp(hrec->key, "contig") ) { hts_pos_t len = 0; hrec->type = BCF_HL_CTG; // Get the contig ID ($str) and length ($j) i = bcf_hrec_find_key(hrec,"length"); if ( i<0 ) len = 0; else { char *end = hrec->vals[i]; len = strtoll(hrec->vals[i], &end, 10); if (end == hrec->vals[i] || len < 0) return 0; } i = bcf_hrec_find_key(hrec,"ID"); if ( i<0 ) return 0; str = strdup(hrec->vals[i]); if (!str) return -1; // Register in the dictionary vdict_t *d = (vdict_t*)hdr->dict[BCF_DT_CTG]; khint_t k = kh_get(vdict, d, str); if ( k != kh_end(d) ) { // already present free(str); str=NULL; if (kh_val(d, k).hrec[0] != NULL) // and not removed return 0; replacing = 1; } else { k = kh_put(vdict, d, str, &ret); if (ret < 0) { free(str); return -1; } } int idx = bcf_hrec_find_key(hrec,"IDX"); if ( idx!=-1 ) { char *tmp = hrec->vals[idx]; idx = strtol(hrec->vals[idx], &tmp, 10); if ( *tmp || idx < 0 || idx >= INT_MAX - 1) { if (!replacing) { kh_del(vdict, d, k); free(str); } hts_log_warning("Error parsing the IDX tag, skipping"); return 0; } } kh_val(d, k) = bcf_idinfo_def; kh_val(d, k).id = idx; kh_val(d, k).info[0] = len; kh_val(d, k).hrec[0] = hrec; if (bcf_hdr_set_idx(hdr, BCF_DT_CTG, kh_key(d,k), &kh_val(d,k)) < 0) { if (!replacing) { kh_del(vdict, d, k); free(str); } return -1; } if ( idx==-1 ) { if (hrec_add_idx(hrec, kh_val(d,k).id) < 0) { return -1; } } return 1; } if ( !strcmp(hrec->key, "INFO") ) hrec->type = BCF_HL_INFO; else if ( !strcmp(hrec->key, "FILTER") ) hrec->type = BCF_HL_FLT; else if ( !strcmp(hrec->key, "FORMAT") ) hrec->type = BCF_HL_FMT; else if ( hrec->nkeys>0 ) { hrec->type = BCF_HL_STR; return 1; } else return 0; // INFO/FILTER/FORMAT char *id = NULL; uint32_t type = UINT32_MAX, var = UINT32_MAX; int num = -1, idx = -1; for (i=0; i<hrec->nkeys; i++) { if ( !strcmp(hrec->keys[i], "ID") ) id = hrec->vals[i]; else if ( !strcmp(hrec->keys[i], "IDX") ) { char *tmp = hrec->vals[i]; idx = strtol(hrec->vals[i], &tmp, 10); if ( *tmp || idx < 0 || idx >= INT_MAX - 1) { hts_log_warning("Error parsing the IDX tag, skipping"); return 0; } } else if ( !strcmp(hrec->keys[i], "Type") ) { if ( !strcmp(hrec->vals[i], "Integer") ) type = BCF_HT_INT; else if ( !strcmp(hrec->vals[i], "Float") ) type = BCF_HT_REAL; else if ( !strcmp(hrec->vals[i], "String") ) type = BCF_HT_STR; else if ( !strcmp(hrec->vals[i], "Character") ) type = BCF_HT_STR; else if ( !strcmp(hrec->vals[i], "Flag") ) type = BCF_HT_FLAG; else { hts_log_warning("The type \"%s\" is not supported, assuming \"String\"", hrec->vals[i]); type = BCF_HT_STR; } } else if ( !strcmp(hrec->keys[i], "Number") ) { if ( !strcmp(hrec->vals[i],"A") ) var = BCF_VL_A; else if ( !strcmp(hrec->vals[i],"R") ) var = BCF_VL_R; else if ( !strcmp(hrec->vals[i],"G") ) var = BCF_VL_G; else if ( !strcmp(hrec->vals[i],".") ) var = BCF_VL_VAR; else { sscanf(hrec->vals[i],"%d",&num); var = BCF_VL_FIXED; } if (var != BCF_VL_FIXED) num = 0xfffff; } } if (hrec->type == BCF_HL_INFO || hrec->type == BCF_HL_FMT) { if (type == -1) { hts_log_warning("%s %s field has no Type defined. Assuming String", *hrec->key == 'I' ? "An" : "A", hrec->key); type = BCF_HT_STR; } if (var == -1) { hts_log_warning("%s %s field has no Number defined. Assuming '.'", *hrec->key == 'I' ? "An" : "A", hrec->key); var = BCF_VL_VAR; } } uint32_t info = ((((uint32_t)num) & 0xfffff)<<12 | (var & 0xf) << 8 | (type & 0xf) << 4 | (((uint32_t) hrec->type) & 0xf)); if ( !id ) return 0; str = strdup(id); if (!str) return -1; vdict_t *d = (vdict_t*)hdr->dict[BCF_DT_ID]; k = kh_get(vdict, d, str); if ( k != kh_end(d) ) { // already present free(str); if ( kh_val(d, k).hrec[info&0xf] ) return 0; kh_val(d, k).info[info&0xf] = info; kh_val(d, k).hrec[info&0xf] = hrec; if ( idx==-1 ) { if (hrec_add_idx(hrec, kh_val(d, k).id) < 0) { return -1; } } return 1; } k = kh_put(vdict, d, str, &ret); if (ret < 0) { free(str); return -1; } kh_val(d, k) = bcf_idinfo_def; kh_val(d, k).info[info&0xf] = info; kh_val(d, k).hrec[info&0xf] = hrec; kh_val(d, k).id = idx; if (bcf_hdr_set_idx(hdr, BCF_DT_ID, kh_key(d,k), &kh_val(d,k)) < 0) { kh_del(vdict, d, k); free(str); return -1; } if ( idx==-1 ) { if (hrec_add_idx(hrec, kh_val(d,k).id) < 0) { return -1; } } return 1; } int bcf_hdr_add_hrec(bcf_hdr_t *hdr, bcf_hrec_t *hrec) { int res; if ( !hrec ) return 0; hrec->type = BCF_HL_GEN; res = bcf_hdr_register_hrec(hdr,hrec); if (res < 0) return -1; if ( !res ) { // If one of the hashed field, then it is already present if ( hrec->type != BCF_HL_GEN ) { bcf_hrec_destroy(hrec); return 0; } // Is one of the generic fields and already present? int i; for (i=0; i<hdr->nhrec; i++) { if ( hdr->hrec[i]->type!=BCF_HL_GEN ) continue; if ( !strcmp(hdr->hrec[i]->key,hrec->key) && !strcmp(hrec->key,"fileformat") ) break; if ( !strcmp(hdr->hrec[i]->key,hrec->key) && !strcmp(hdr->hrec[i]->value,hrec->value) ) break; } if ( i<hdr->nhrec ) { bcf_hrec_destroy(hrec); return 0; } } // New record, needs to be added int n = hdr->nhrec + 1; bcf_hrec_t **new_hrec = realloc(hdr->hrec, n*sizeof(bcf_hrec_t*)); if (!new_hrec) return -1; hdr->hrec = new_hrec; hdr->hrec[hdr->nhrec] = hrec; hdr->dirty = 1; hdr->nhrec = n; return hrec->type==BCF_HL_GEN ? 0 : 1; } /* * Note that while querying of FLT,INFO,FMT,CTG lines is fast (the keys are hashed), * the STR,GEN lines are searched for linearly in a linked list of all header lines. * This may become a problem for VCFs with huge headers, we might need to build a * dictionary for these lines as well. */ bcf_hrec_t *bcf_hdr_get_hrec(const bcf_hdr_t *hdr, int type, const char *key, const char *value, const char *str_class) { int i; if ( type==BCF_HL_GEN ) { for (i=0; i<hdr->nhrec; i++) { if ( hdr->hrec[i]->type!=type ) continue; if ( strcmp(hdr->hrec[i]->key,key) ) continue; if ( !value || !strcmp(hdr->hrec[i]->value,value) ) return hdr->hrec[i]; } return NULL; } else if ( type==BCF_HL_STR ) { for (i=0; i<hdr->nhrec; i++) { if ( hdr->hrec[i]->type!=type ) continue; if ( strcmp(hdr->hrec[i]->key,str_class) ) continue; int j = bcf_hrec_find_key(hdr->hrec[i],key); if ( j>=0 && !strcmp(hdr->hrec[i]->vals[j],value) ) return hdr->hrec[i]; } return NULL; } vdict_t *d = type==BCF_HL_CTG ? (vdict_t*)hdr->dict[BCF_DT_CTG] : (vdict_t*)hdr->dict[BCF_DT_ID]; khint_t k = kh_get(vdict, d, value); if ( k == kh_end(d) ) return NULL; return kh_val(d, k).hrec[type==BCF_HL_CTG?0:type]; } void bcf_hdr_check_sanity(bcf_hdr_t *hdr) { static int PL_warned = 0, GL_warned = 0; if ( !PL_warned ) { int id = bcf_hdr_id2int(hdr, BCF_DT_ID, "PL"); if ( bcf_hdr_idinfo_exists(hdr,BCF_HL_FMT,id) && bcf_hdr_id2length(hdr,BCF_HL_FMT,id)!=BCF_VL_G ) { hts_log_warning("PL should be declared as Number=G"); PL_warned = 1; } } if ( !GL_warned ) { int id = bcf_hdr_id2int(hdr, BCF_DT_ID, "GL"); if ( bcf_hdr_idinfo_exists(hdr,BCF_HL_FMT,id) && bcf_hdr_id2length(hdr,BCF_HL_FMT,id)!=BCF_VL_G ) { hts_log_warning("GL should be declared as Number=G"); GL_warned = 1; } } } int bcf_hdr_parse(bcf_hdr_t *hdr, char *htxt) { int len, done = 0; char *p = htxt; // Check sanity: "fileformat" string must come as first bcf_hrec_t *hrec = bcf_hdr_parse_line(hdr,p,&len); if ( !hrec || !hrec->key || strcasecmp(hrec->key,"fileformat") ) hts_log_warning("The first line should be ##fileformat; is the VCF/BCF header broken?"); if (bcf_hdr_add_hrec(hdr, hrec) < 0) { bcf_hrec_destroy(hrec); return -1; } // The filter PASS must appear first in the dictionary hrec = bcf_hdr_parse_line(hdr,"##FILTER=<ID=PASS,Description=\"All filters passed\">",&len); if (bcf_hdr_add_hrec(hdr, hrec) < 0) { bcf_hrec_destroy(hrec); return -1; } // Parse the whole header do { while (NULL != (hrec = bcf_hdr_parse_line(hdr, p, &len))) { if (bcf_hdr_add_hrec(hdr, hrec) < 0) { bcf_hrec_destroy(hrec); return -1; } p += len; } // Next should be the sample line. If not, it was a malformed // header, in which case print a warning and skip (many VCF // operations do not really care about a few malformed lines). // In the future we may want to add a strict mode that errors in // this case. if ( strncmp("#CHROM\tPOS",p,10) != 0 ) { char *eol = strchr(p, '\n'); if (*p != '\0') { hts_log_warning("Could not parse header line: %.*s", eol ? (int)(eol - p) : INT_MAX, p); } if (eol) { p = eol + 1; // Try from the next line. } else { done = -1; // No more lines left, give up. } } else { done = 1; // Sample line found } } while (!done); if (done < 0) { // No sample line is fatal. hts_log_error("Could not parse the header, sample line not found"); return -1; } if (bcf_hdr_parse_sample_line(hdr,p) < 0) return -1; if (bcf_hdr_sync(hdr) < 0) return -1; bcf_hdr_check_sanity(hdr); return 0; } int bcf_hdr_append(bcf_hdr_t *hdr, const char *line) { int len; bcf_hrec_t *hrec = bcf_hdr_parse_line(hdr, (char*) line, &len); if ( !hrec ) return -1; if (bcf_hdr_add_hrec(hdr, hrec) < 0) return -1; return 0; } void bcf_hdr_remove(bcf_hdr_t *hdr, int type, const char *key) { int i = 0; bcf_hrec_t *hrec; if ( !key ) { while ( i<hdr->nhrec ) { if ( hdr->hrec[i]->type!=type ) { i++; continue; } hrec = hdr->hrec[i]; if ( type==BCF_HL_FLT || type==BCF_HL_INFO || type==BCF_HL_FMT || type== BCF_HL_CTG ) { int j = bcf_hrec_find_key(hdr->hrec[i], "ID"); if ( j>=0 ) { vdict_t *d = type==BCF_HL_CTG ? (vdict_t*)hdr->dict[BCF_DT_CTG] : (vdict_t*)hdr->dict[BCF_DT_ID]; khint_t k = kh_get(vdict, d, hdr->hrec[i]->vals[j]); kh_val(d, k).hrec[type==BCF_HL_CTG?0:type] = NULL; } } hdr->dirty = 1; hdr->nhrec--; if ( i < hdr->nhrec ) memmove(&hdr->hrec[i],&hdr->hrec[i+1],(hdr->nhrec-i)*sizeof(bcf_hrec_t*)); bcf_hrec_destroy(hrec); } return; } while (1) { if ( type==BCF_HL_FLT || type==BCF_HL_INFO || type==BCF_HL_FMT || type== BCF_HL_CTG ) { hrec = bcf_hdr_get_hrec(hdr, type, "ID", key, NULL); if ( !hrec ) return; for (i=0; i<hdr->nhrec; i++) if ( hdr->hrec[i]==hrec ) break; assert( i<hdr->nhrec ); vdict_t *d = type==BCF_HL_CTG ? (vdict_t*)hdr->dict[BCF_DT_CTG] : (vdict_t*)hdr->dict[BCF_DT_ID]; khint_t k = kh_get(vdict, d, key); kh_val(d, k).hrec[type==BCF_HL_CTG?0:type] = NULL; } else { for (i=0; i<hdr->nhrec; i++) { if ( hdr->hrec[i]->type!=type ) continue; if ( type==BCF_HL_GEN ) { if ( !strcmp(hdr->hrec[i]->key,key) ) break; } else { // not all structured lines have ID, we could be more sophisticated as in bcf_hdr_get_hrec() int j = bcf_hrec_find_key(hdr->hrec[i], "ID"); if ( j>=0 && !strcmp(hdr->hrec[i]->vals[j],key) ) break; } } if ( i==hdr->nhrec ) return; hrec = hdr->hrec[i]; } hdr->nhrec--; if ( i < hdr->nhrec ) memmove(&hdr->hrec[i],&hdr->hrec[i+1],(hdr->nhrec-i)*sizeof(bcf_hrec_t*)); bcf_hrec_destroy(hrec); hdr->dirty = 1; } } int bcf_hdr_printf(bcf_hdr_t *hdr, const char *fmt, ...) { char tmp[256], *line = tmp; va_list ap; va_start(ap, fmt); int n = vsnprintf(line, sizeof(tmp), fmt, ap); va_end(ap); if (n >= sizeof(tmp)) { n++; // For trailing NUL line = (char*)malloc(n); if (!line) return -1; va_start(ap, fmt); vsnprintf(line, n, fmt, ap); va_end(ap); } int ret = bcf_hdr_append(hdr, line); if (line != tmp) free(line); return ret; } /********************** *** BCF header I/O *** **********************/ const char *bcf_hdr_get_version(const bcf_hdr_t *hdr) { bcf_hrec_t *hrec = bcf_hdr_get_hrec(hdr, BCF_HL_GEN, "fileformat", NULL, NULL); if ( !hrec ) { hts_log_warning("No version string found, assuming VCFv4.2"); return "VCFv4.2"; } return hrec->value; } int bcf_hdr_set_version(bcf_hdr_t *hdr, const char *version) { bcf_hrec_t *hrec = bcf_hdr_get_hrec(hdr, BCF_HL_GEN, "fileformat", NULL, NULL); if ( !hrec ) { int len; kstring_t str = {0,0,0}; ksprintf(&str,"##fileformat=%s", version); hrec = bcf_hdr_parse_line(hdr, str.s, &len); free(str.s); } else { free(hrec->value); hrec->value = strdup(version); } hdr->dirty = 1; return 0; // FIXME: check for errs in this function (return < 0 if so) } bcf_hdr_t *bcf_hdr_init(const char *mode) { int i; bcf_hdr_t *h; h = (bcf_hdr_t*)calloc(1, sizeof(bcf_hdr_t)); if (!h) return NULL; for (i = 0; i < 3; ++i) if ((h->dict[i] = kh_init(vdict)) == NULL) goto fail; if ( strchr(mode,'w') ) { bcf_hdr_append(h, "##fileformat=VCFv4.2"); // The filter PASS must appear first in the dictionary bcf_hdr_append(h, "##FILTER=<ID=PASS,Description=\"All filters passed\">"); } return h; fail: for (i = 0; i < 3; ++i) kh_destroy(vdict, h->dict[i]); free(h); return NULL; } void bcf_hdr_destroy(bcf_hdr_t *h) { int i; khint_t k; if (!h) return; for (i = 0; i < 3; ++i) { vdict_t *d = (vdict_t*)h->dict[i]; if (d == 0) continue; for (k = kh_begin(d); k != kh_end(d); ++k) if (kh_exist(d, k)) free((char*)kh_key(d, k)); kh_destroy(vdict, d); free(h->id[i]); } for (i=0; i<h->nhrec; i++) bcf_hrec_destroy(h->hrec[i]); if (h->nhrec) free(h->hrec); if (h->samples) free(h->samples); free(h->keep_samples); free(h->transl[0]); free(h->transl[1]); free(h->mem.s); free(h); } bcf_hdr_t *bcf_hdr_read(htsFile *hfp) { if (hfp->format.format == vcf) return vcf_hdr_read(hfp); if (hfp->format.format != bcf) { hts_log_error("Input is not detected as bcf or vcf format"); return NULL; } assert(hfp->is_bgzf); BGZF *fp = hfp->fp.bgzf; uint8_t magic[5]; bcf_hdr_t *h; h = bcf_hdr_init("r"); if (!h) { hts_log_error("Failed to allocate bcf header"); return NULL; } if (bgzf_read(fp, magic, 5) != 5) { hts_log_error("Failed to read the header (reading BCF in text mode?)"); bcf_hdr_destroy(h); return NULL; } if (strncmp((char*)magic, "BCF\2\2", 5) != 0) { if (!strncmp((char*)magic, "BCF", 3)) hts_log_error("Invalid BCF2 magic string: only BCFv2.2 is supported"); else hts_log_error("Invalid BCF2 magic string"); bcf_hdr_destroy(h); return NULL; } uint8_t buf[4]; size_t hlen; char *htxt = NULL; if (bgzf_read(fp, buf, 4) != 4) goto fail; hlen = buf[0] | (buf[1] << 8) | (buf[2] << 16) | ((size_t) buf[3] << 24); if (hlen >= SIZE_MAX) { errno = ENOMEM; goto fail; } htxt = (char*)malloc(hlen + 1); if (!htxt) goto fail; if (bgzf_read(fp, htxt, hlen) != hlen) goto fail; htxt[hlen] = '\0'; // Ensure htxt is terminated if ( bcf_hdr_parse(h, htxt) < 0 ) goto fail; free(htxt); return h; fail: hts_log_error("Failed to read BCF header"); free(htxt); bcf_hdr_destroy(h); return NULL; } int bcf_hdr_write(htsFile *hfp, bcf_hdr_t *h) { if (!h) { errno = EINVAL; return -1; } if ( h->dirty ) { if (bcf_hdr_sync(h) < 0) return -1; } hfp->format.category = variant_data; if (hfp->format.format == vcf || hfp->format.format == text_format) { hfp->format.format = vcf; return vcf_hdr_write(hfp, h); } if (hfp->format.format == binary_format) hfp->format.format = bcf; kstring_t htxt = {0,0,0}; bcf_hdr_format(h, 1, &htxt); kputc('\0', &htxt); // include the \0 byte BGZF *fp = hfp->fp.bgzf; if ( bgzf_write(fp, "BCF\2\2", 5) !=5 ) return -1; uint8_t hlen[4]; u32_to_le(htxt.l, hlen); if ( bgzf_write(fp, hlen, 4) !=4 ) return -1; if ( bgzf_write(fp, htxt.s, htxt.l) != htxt.l ) return -1; free(htxt.s); return 0; } /******************** *** BCF site I/O *** ********************/ bcf1_t *bcf_init() { bcf1_t *v; v = (bcf1_t*)calloc(1, sizeof(bcf1_t)); return v; } void bcf_clear(bcf1_t *v) { int i; for (i=0; i<v->d.m_info; i++) { if ( v->d.info[i].vptr_free ) { free(v->d.info[i].vptr - v->d.info[i].vptr_off); v->d.info[i].vptr_free = 0; } } for (i=0; i<v->d.m_fmt; i++) { if ( v->d.fmt[i].p_free ) { free(v->d.fmt[i].p - v->d.fmt[i].p_off); v->d.fmt[i].p_free = 0; } } v->rid = v->pos = v->rlen = v->unpacked = 0; bcf_float_set_missing(v->qual); v->n_info = v->n_allele = v->n_fmt = v->n_sample = 0; v->shared.l = v->indiv.l = 0; v->d.var_type = -1; v->d.shared_dirty = 0; v->d.indiv_dirty = 0; v->d.n_flt = 0; v->errcode = 0; if (v->d.m_als) v->d.als[0] = 0; if (v->d.m_id) v->d.id[0] = 0; } void bcf_empty(bcf1_t *v) { bcf_clear1(v); free(v->d.id); free(v->d.als); free(v->d.allele); free(v->d.flt); free(v->d.info); free(v->d.fmt); if (v->d.var ) free(v->d.var); free(v->shared.s); free(v->indiv.s); memset(&v->d,0,sizeof(v->d)); memset(&v->shared,0,sizeof(v->shared)); memset(&v->indiv,0,sizeof(v->indiv)); } void bcf_destroy(bcf1_t *v) { if (!v) return; bcf_empty1(v); free(v); } static inline int bcf_read1_core(BGZF *fp, bcf1_t *v) { uint8_t x[32]; ssize_t ret; uint32_t shared_len, indiv_len; if ((ret = bgzf_read(fp, x, 32)) != 32) { if (ret == 0) return -1; return -2; } bcf_clear1(v); shared_len = le_to_u32(x); if (shared_len < 24) return -2; shared_len -= 24; // to exclude six 32-bit integers if (ks_resize(&v->shared, shared_len) != 0) return -2; indiv_len = le_to_u32(x + 4); if (ks_resize(&v->indiv, indiv_len) != 0) return -2; v->rid = le_to_i32(x + 8); v->pos = le_to_u32(x + 12); v->rlen = le_to_i32(x + 16); v->qual = le_to_float(x + 20); v->n_info = le_to_u16(x + 24); v->n_allele = le_to_u16(x + 26); v->n_sample = le_to_u32(x + 28) & 0xffffff; v->n_fmt = x[31]; v->shared.l = shared_len; v->indiv.l = indiv_len; // silent fix of broken BCFs produced by earlier versions of bcf_subset, prior to and including bd6ed8b4 if ( (!v->indiv.l || !v->n_sample) && v->n_fmt ) v->n_fmt = 0; if (bgzf_read(fp, v->shared.s, v->shared.l) != v->shared.l) return -2; if (bgzf_read(fp, v->indiv.s, v->indiv.l) != v->indiv.l) return -2; return 0; } #define bit_array_size(n) ((n)/8+1) #define bit_array_set(a,i) ((a)[(i)/8] |= 1 << ((i)%8)) #define bit_array_clear(a,i) ((a)[(i)/8] &= ~(1 << ((i)%8))) #define bit_array_test(a,i) ((a)[(i)/8] & (1 << ((i)%8))) static int bcf_dec_typed_int1_safe(uint8_t *p, uint8_t *end, uint8_t **q, int32_t *val) { uint32_t t; if (end - p < 2) return -1; t = *p++ & 0xf; /* Use if .. else if ... else instead of switch to force order. Assumption is that small integers are more frequent than big ones. */ if (t == BCF_BT_INT8) { *q = p + 1; *val = *(int8_t *) p; } else if (t == BCF_BT_INT16) { if (end - p < 2) return -1; *q = p + 2; *val = le_to_i16(p); } else if (t == BCF_BT_INT32) { if (end - p < 4) return -1; *q = p + 4; *val = le_to_i32(p); #ifdef VCF_ALLOW_INT64 } else if (t == BCF_BT_INT64) { // This case should never happen because there should be no 64-bit BCFs // at all, definitely not coming from htslib if (end - p < 8) return -1; *q = p + 8; *val = le_to_i64(p); #endif } else { return -1; } return 0; } static int bcf_dec_size_safe(uint8_t *p, uint8_t *end, uint8_t **q, int *num, int *type) { int r; if (p >= end) return -1; *type = *p & 0xf; if (*p>>4 != 15) { *q = p + 1; *num = *p >> 4; return 0; } r = bcf_dec_typed_int1_safe(p + 1, end, q, num); if (r) return r; return *num >= 0 ? 0 : -1; } static const char *get_type_name(int type) { const char *types[9] = { "null", "int (8-bit)", "int (16 bit)", "int (32 bit)", "unknown", "float", "unknown", "char", "unknown" }; int t = (type >= 0 && type < 8) ? type : 8; return types[t]; } static int bcf_record_check(const bcf_hdr_t *hdr, bcf1_t *rec) { uint8_t *ptr, *end; size_t bytes; uint32_t err = 0; int type = 0; int num = 0; int reflen = 0; uint32_t i, reports; const uint32_t is_integer = ((1 << BCF_BT_INT8) | (1 << BCF_BT_INT16) | #ifdef VCF_ALLOW_INT64 (1 << BCF_BT_INT64) | #endif (1 << BCF_BT_INT32)); const uint32_t is_valid_type = (is_integer | (1 << BCF_BT_NULL) | (1 << BCF_BT_FLOAT) | (1 << BCF_BT_CHAR)); int32_t max_id = hdr ? hdr->n[BCF_DT_ID] : 0; // Check for valid contig ID if (rec->rid < 0 || (hdr && (rec->rid >= hdr->n[BCF_DT_CTG] || hdr->id[BCF_DT_CTG][rec->rid].key == NULL))) { hts_log_warning("Bad BCF record at %"PRIhts_pos": Invalid %s id %d", rec->pos+1, "CONTIG", rec->rid); err |= BCF_ERR_CTG_INVALID; } // Check ID ptr = (uint8_t *) rec->shared.s; end = ptr + rec->shared.l; if (bcf_dec_size_safe(ptr, end, &ptr, &num, &type) != 0) goto bad_shared; if (type != BCF_BT_CHAR) { hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s type %d (%s)", bcf_seqname_safe(hdr,rec), rec->pos+1, "ID", type, get_type_name(type)); err |= BCF_ERR_TAG_INVALID; } bytes = (size_t) num << bcf_type_shift[type]; if (end - ptr < bytes) goto bad_shared; ptr += bytes; // Check REF and ALT reports = 0; for (i = 0; i < rec->n_allele; i++) { if (bcf_dec_size_safe(ptr, end, &ptr, &num, &type) != 0) goto bad_shared; if (type != BCF_BT_CHAR) { if (!reports++ || hts_verbose >= HTS_LOG_DEBUG) hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s type %d (%s)", bcf_seqname_safe(hdr,rec), rec->pos+1, "REF/ALT", type, get_type_name(type)); err |= BCF_ERR_CHAR; } if (i == 0) reflen = num; bytes = (size_t) num << bcf_type_shift[type]; if (end - ptr < bytes) goto bad_shared; ptr += bytes; } // Check FILTER reports = 0; if (bcf_dec_size_safe(ptr, end, &ptr, &num, &type) != 0) goto bad_shared; if (num > 0) { bytes = (size_t) num << bcf_type_shift[type]; if (((1 << type) & is_integer) == 0) { hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s type %d (%s)", bcf_seqname_safe(hdr,rec), rec->pos+1, "FILTER", type, get_type_name(type)); err |= BCF_ERR_TAG_INVALID; if (end - ptr < bytes) goto bad_shared; ptr += bytes; } else { if (end - ptr < bytes) goto bad_shared; for (i = 0; i < num; i++) { int32_t key = bcf_dec_int1(ptr, type, &ptr); if (key < 0 || (hdr && (key >= max_id || hdr->id[BCF_DT_ID][key].key == NULL))) { if (!reports++ || hts_verbose >= HTS_LOG_DEBUG) hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s id %d", bcf_seqname_safe(hdr,rec), rec->pos+1, "FILTER", key); err |= BCF_ERR_TAG_UNDEF; } } } } // Check INFO reports = 0; for (i = 0; i < rec->n_info; i++) { int32_t key = -1; if (bcf_dec_typed_int1_safe(ptr, end, &ptr, &key) != 0) goto bad_shared; if (key < 0 || (hdr && (key >= max_id || hdr->id[BCF_DT_ID][key].key == NULL))) { if (!reports++ || hts_verbose >= HTS_LOG_DEBUG) hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s id %d", bcf_seqname_safe(hdr,rec), rec->pos+1, "INFO", key); err |= BCF_ERR_TAG_UNDEF; } if (bcf_dec_size_safe(ptr, end, &ptr, &num, &type) != 0) goto bad_shared; if (((1 << type) & is_valid_type) == 0) { if (!reports++ || hts_verbose >= HTS_LOG_DEBUG) hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s type %d (%s)", bcf_seqname_safe(hdr,rec), rec->pos+1, "INFO", type, get_type_name(type)); err |= BCF_ERR_TAG_INVALID; } bytes = (size_t) num << bcf_type_shift[type]; if (end - ptr < bytes) goto bad_shared; ptr += bytes; } // Check FORMAT and individual information ptr = (uint8_t *) rec->indiv.s; end = ptr + rec->indiv.l; reports = 0; for (i = 0; i < rec->n_fmt; i++) { int32_t key = -1; if (bcf_dec_typed_int1_safe(ptr, end, &ptr, &key) != 0) goto bad_indiv; if (key < 0 || (hdr && (key >= max_id || hdr->id[BCF_DT_ID][key].key == NULL))) { if (!reports++ || hts_verbose >= HTS_LOG_DEBUG) hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s id %d", bcf_seqname_safe(hdr,rec), rec->pos+1, "FORMAT", key); err |= BCF_ERR_TAG_UNDEF; } if (bcf_dec_size_safe(ptr, end, &ptr, &num, &type) != 0) goto bad_indiv; if (((1 << type) & is_valid_type) == 0) { if (!reports++ || hts_verbose >= HTS_LOG_DEBUG) hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s type %d (%s)", bcf_seqname_safe(hdr,rec), rec->pos+1, "FORMAT", type, get_type_name(type)); err |= BCF_ERR_TAG_INVALID; } bytes = ((size_t) num << bcf_type_shift[type]) * rec->n_sample; if (end - ptr < bytes) goto bad_indiv; ptr += bytes; } if (!err && rec->rlen < 0) { // Treat bad rlen as a warning instead of an error, and try to // fix up by using the length of the stored REF allele. static int warned = 0; if (!warned) { hts_log_warning("BCF record at %s:%"PRIhts_pos" has invalid RLEN (%"PRIhts_pos"). " "Only one invalid RLEN will be reported.", bcf_seqname_safe(hdr,rec), rec->pos+1, rec->rlen); warned = 1; } rec->rlen = reflen >= 0 ? reflen : 0; } rec->errcode |= err; return err ? -2 : 0; // Return -2 so bcf_read() reports an error bad_shared: hts_log_error("Bad BCF record at %s:%"PRIhts_pos" - shared section malformed or too short", bcf_seqname_safe(hdr,rec), rec->pos+1); return -2; bad_indiv: hts_log_error("Bad BCF record at %s:%"PRIhts_pos" - individuals section malformed or too short", bcf_seqname_safe(hdr,rec), rec->pos+1); return -2; } static inline uint8_t *bcf_unpack_fmt_core1(uint8_t *ptr, int n_sample, bcf_fmt_t *fmt); int bcf_subset_format(const bcf_hdr_t *hdr, bcf1_t *rec) { if ( !hdr->keep_samples ) return 0; if ( !bcf_hdr_nsamples(hdr) ) { rec->indiv.l = rec->n_sample = 0; return 0; } int i, j; uint8_t *ptr = (uint8_t*)rec->indiv.s, *dst = NULL, *src; bcf_dec_t *dec = &rec->d; hts_expand(bcf_fmt_t, rec->n_fmt, dec->m_fmt, dec->fmt); for (i=0; i<dec->m_fmt; ++i) dec->fmt[i].p_free = 0; for (i=0; i<rec->n_fmt; i++) { ptr = bcf_unpack_fmt_core1(ptr, rec->n_sample, &dec->fmt[i]); src = dec->fmt[i].p - dec->fmt[i].size; if ( dst ) { memmove(dec->fmt[i-1].p + dec->fmt[i-1].p_len, dec->fmt[i].p - dec->fmt[i].p_off, dec->fmt[i].p_off); dec->fmt[i].p = dec->fmt[i-1].p + dec->fmt[i-1].p_len + dec->fmt[i].p_off; } dst = dec->fmt[i].p; for (j=0; j<hdr->nsamples_ori; j++) { src += dec->fmt[i].size; if ( !bit_array_test(hdr->keep_samples,j) ) continue; memmove(dst, src, dec->fmt[i].size); dst += dec->fmt[i].size; } rec->indiv.l -= dec->fmt[i].p_len - (dst - dec->fmt[i].p); dec->fmt[i].p_len = dst - dec->fmt[i].p; } rec->unpacked |= BCF_UN_FMT; rec->n_sample = bcf_hdr_nsamples(hdr); return 0; } int bcf_read(htsFile *fp, const bcf_hdr_t *h, bcf1_t *v) { if (fp->format.format == vcf) return vcf_read(fp,h,v); int ret = bcf_read1_core(fp->fp.bgzf, v); if (ret == 0) ret = bcf_record_check(h, v); if ( ret!=0 || !h->keep_samples ) return ret; return bcf_subset_format(h,v); } int bcf_readrec(BGZF *fp, void *null, void *vv, int *tid, hts_pos_t *beg, hts_pos_t *end) { bcf1_t *v = (bcf1_t *) vv; int ret = bcf_read1_core(fp, v); if (ret == 0) ret = bcf_record_check(NULL, v); if (ret >= 0) *tid = v->rid, *beg = v->pos, *end = v->pos + v->rlen; return ret; } static inline int bcf1_sync_id(bcf1_t *line, kstring_t *str) { // single typed string if ( line->d.id && strcmp(line->d.id, ".") ) { return bcf_enc_vchar(str, strlen(line->d.id), line->d.id); } else { return bcf_enc_size(str, 0, BCF_BT_CHAR); } } static inline int bcf1_sync_alleles(bcf1_t *line, kstring_t *str) { // list of typed strings int i; for (i=0; i<line->n_allele; i++) { if (bcf_enc_vchar(str, strlen(line->d.allele[i]), line->d.allele[i]) < 0) return -1; } if ( !line->rlen && line->n_allele ) line->rlen = strlen(line->d.allele[0]); return 0; } static inline int bcf1_sync_filter(bcf1_t *line, kstring_t *str) { // typed vector of integers if ( line->d.n_flt ) { return bcf_enc_vint(str, line->d.n_flt, line->d.flt, -1); } else { return bcf_enc_vint(str, 0, 0, -1); } } static inline int bcf1_sync_info(bcf1_t *line, kstring_t *str) { // pairs of typed vectors int i, irm = -1, e = 0; for (i=0; i<line->n_info; i++) { bcf_info_t *info = &line->d.info[i]; if ( !info->vptr ) { // marked for removal if ( irm < 0 ) irm = i; continue; } e |= kputsn_(info->vptr - info->vptr_off, info->vptr_len + info->vptr_off, str) < 0; if ( irm >=0 ) { bcf_info_t tmp = line->d.info[irm]; line->d.info[irm] = line->d.info[i]; line->d.info[i] = tmp; while ( irm<=i && line->d.info[irm].vptr ) irm++; } } if ( irm>=0 ) line->n_info = irm; return e == 0 ? 0 : -1; } static int bcf1_sync(bcf1_t *line) { char *shared_ori = line->shared.s; size_t prev_len; kstring_t tmp = {0,0,0}; if ( !line->shared.l ) { // New line created via API, BCF data blocks do not exist. Get it ready for BCF output tmp = line->shared; bcf1_sync_id(line, &tmp); line->unpack_size[0] = tmp.l; prev_len = tmp.l; bcf1_sync_alleles(line, &tmp); line->unpack_size[1] = tmp.l - prev_len; prev_len = tmp.l; bcf1_sync_filter(line, &tmp); line->unpack_size[2] = tmp.l - prev_len; bcf1_sync_info(line, &tmp); line->shared = tmp; } else if ( line->d.shared_dirty ) { // The line was edited, update the BCF data block. if ( !(line->unpacked & BCF_UN_STR) ) bcf_unpack(line,BCF_UN_STR); // ptr_ori points to the original unchanged BCF data. uint8_t *ptr_ori = (uint8_t *) line->shared.s; // ID: single typed string if ( line->d.shared_dirty & BCF1_DIRTY_ID ) bcf1_sync_id(line, &tmp); else kputsn_(ptr_ori, line->unpack_size[0], &tmp); ptr_ori += line->unpack_size[0]; line->unpack_size[0] = tmp.l; prev_len = tmp.l; // REF+ALT: list of typed strings if ( line->d.shared_dirty & BCF1_DIRTY_ALS ) bcf1_sync_alleles(line, &tmp); else { kputsn_(ptr_ori, line->unpack_size[1], &tmp); if ( !line->rlen && line->n_allele ) line->rlen = strlen(line->d.allele[0]); } ptr_ori += line->unpack_size[1]; line->unpack_size[1] = tmp.l - prev_len; prev_len = tmp.l; if ( line->unpacked & BCF_UN_FLT ) { // FILTER: typed vector of integers if ( line->d.shared_dirty & BCF1_DIRTY_FLT ) bcf1_sync_filter(line, &tmp); else if ( line->d.n_flt ) kputsn_(ptr_ori, line->unpack_size[2], &tmp); else bcf_enc_vint(&tmp, 0, 0, -1); ptr_ori += line->unpack_size[2]; line->unpack_size[2] = tmp.l - prev_len; if ( line->unpacked & BCF_UN_INFO ) { // INFO: pairs of typed vectors if ( line->d.shared_dirty & BCF1_DIRTY_INF ) { bcf1_sync_info(line, &tmp); ptr_ori = (uint8_t*)line->shared.s + line->shared.l; } } } int size = line->shared.l - (size_t)ptr_ori + (size_t)line->shared.s; if ( size ) kputsn_(ptr_ori, size, &tmp); free(line->shared.s); line->shared = tmp; } if ( line->shared.s != shared_ori && line->unpacked & BCF_UN_INFO ) { // Reallocated line->shared.s block invalidated line->d.info[].vptr pointers size_t off_new = line->unpack_size[0] + line->unpack_size[1] + line->unpack_size[2]; int i; for (i=0; i<line->n_info; i++) { uint8_t *vptr_free = line->d.info[i].vptr_free ? line->d.info[i].vptr - line->d.info[i].vptr_off : NULL; line->d.info[i].vptr = (uint8_t*) line->shared.s + off_new + line->d.info[i].vptr_off; off_new += line->d.info[i].vptr_len + line->d.info[i].vptr_off; if ( vptr_free ) { free(vptr_free); line->d.info[i].vptr_free = 0; } } } if ( line->n_sample && line->n_fmt && (!line->indiv.l || line->d.indiv_dirty) ) { // The genotype fields changed or are not present tmp.l = tmp.m = 0; tmp.s = NULL; int i, irm = -1; for (i=0; i<line->n_fmt; i++) { bcf_fmt_t *fmt = &line->d.fmt[i]; if ( !fmt->p ) { // marked for removal if ( irm < 0 ) irm = i; continue; } kputsn_(fmt->p - fmt->p_off, fmt->p_len + fmt->p_off, &tmp); if ( irm >=0 ) { bcf_fmt_t tfmt = line->d.fmt[irm]; line->d.fmt[irm] = line->d.fmt[i]; line->d.fmt[i] = tfmt; while ( irm<=i && line->d.fmt[irm].p ) irm++; } } if ( irm>=0 ) line->n_fmt = irm; free(line->indiv.s); line->indiv = tmp; // Reallocated line->indiv.s block invalidated line->d.fmt[].p pointers size_t off_new = 0; for (i=0; i<line->n_fmt; i++) { uint8_t *p_free = line->d.fmt[i].p_free ? line->d.fmt[i].p - line->d.fmt[i].p_off : NULL; line->d.fmt[i].p = (uint8_t*) line->indiv.s + off_new + line->d.fmt[i].p_off; off_new += line->d.fmt[i].p_len + line->d.fmt[i].p_off; if ( p_free ) { free(p_free); line->d.fmt[i].p_free = 0; } } } if ( !line->n_sample ) line->n_fmt = 0; line->d.shared_dirty = line->d.indiv_dirty = 0; return 0; } bcf1_t *bcf_copy(bcf1_t *dst, bcf1_t *src) { bcf1_sync(src); bcf_clear(dst); dst->rid = src->rid; dst->pos = src->pos; dst->rlen = src->rlen; dst->qual = src->qual; dst->n_info = src->n_info; dst->n_allele = src->n_allele; dst->n_fmt = src->n_fmt; dst->n_sample = src->n_sample; if ( dst->shared.m < src->shared.l ) { dst->shared.s = (char*) realloc(dst->shared.s, src->shared.l); dst->shared.m = src->shared.l; } dst->shared.l = src->shared.l; memcpy(dst->shared.s,src->shared.s,dst->shared.l); if ( dst->indiv.m < src->indiv.l ) { dst->indiv.s = (char*) realloc(dst->indiv.s, src->indiv.l); dst->indiv.m = src->indiv.l; } dst->indiv.l = src->indiv.l; memcpy(dst->indiv.s,src->indiv.s,dst->indiv.l); return dst; } bcf1_t *bcf_dup(bcf1_t *src) { bcf1_t *out = bcf_init1(); return bcf_copy(out, src); } int bcf_write(htsFile *hfp, bcf_hdr_t *h, bcf1_t *v) { if ( h->dirty ) { if (bcf_hdr_sync(h) < 0) return -1; } if ( bcf_hdr_nsamples(h)!=v->n_sample ) { hts_log_error("Broken VCF record, the number of columns at %s:%"PRIhts_pos" does not match the number of samples (%d vs %d)", bcf_seqname_safe(h,v), v->pos+1, v->n_sample, bcf_hdr_nsamples(h)); return -1; } if ( hfp->format.format == vcf || hfp->format.format == text_format ) return vcf_write(hfp,h,v); if ( v->errcode ) { // vcf_parse1() encountered a new contig or tag, undeclared in the // header. At this point, the header must have been printed, // proceeding would lead to a broken BCF file. Errors must be checked // and cleared by the caller before we can proceed. hts_log_error("Unchecked error (%d) at %s:%"PRIhts_pos, v->errcode, bcf_seqname_safe(h,v), v->pos+1); return -1; } bcf1_sync(v); // check if the BCF record was modified if ( v->unpacked & BCF_IS_64BIT ) { hts_log_error("Data at %s:%"PRIhts_pos" contains 64-bit values not representable in BCF. Please use VCF instead", bcf_seqname_safe(h,v), v->pos+1); return -1; } BGZF *fp = hfp->fp.bgzf; uint8_t x[32]; u32_to_le(v->shared.l + 24, x); // to include six 32-bit integers u32_to_le(v->indiv.l, x + 4); i32_to_le(v->rid, x + 8); u32_to_le(v->pos, x + 12); u32_to_le(v->rlen, x + 16); float_to_le(v->qual, x + 20); u16_to_le(v->n_info, x + 24); u16_to_le(v->n_allele, x + 26); u32_to_le((uint32_t)v->n_fmt<<24 | (v->n_sample & 0xffffff), x + 28); if ( bgzf_write(fp, x, 32) != 32 ) return -1; if ( bgzf_write(fp, v->shared.s, v->shared.l) != v->shared.l ) return -1; if ( bgzf_write(fp, v->indiv.s, v->indiv.l) != v->indiv.l ) return -1; if (hfp->idx) { if (hts_idx_push(hfp->idx, v->rid, v->pos, v->pos + v->rlen, bgzf_tell(fp), 1) < 0) return -1; } return 0; } /********************** *** VCF header I/O *** **********************/ static int add_missing_contig_hrec(bcf_hdr_t *h, const char *name) { bcf_hrec_t *hrec = calloc(1, sizeof(bcf_hrec_t)); int save_errno; if (!hrec) goto fail; hrec->key = strdup("contig"); if (!hrec->key) goto fail; if (bcf_hrec_add_key(hrec, "ID", strlen("ID")) < 0) goto fail; if (bcf_hrec_set_val(hrec, hrec->nkeys-1, name, strlen(name), 0) < 0) goto fail; if (bcf_hdr_add_hrec(h, hrec) < 0) goto fail; return 0; fail: save_errno = errno; hts_log_error("%s", strerror(errno)); if (hrec) bcf_hrec_destroy(hrec); errno = save_errno; return -1; } bcf_hdr_t *vcf_hdr_read(htsFile *fp) { kstring_t txt, *s = &fp->line; int ret; bcf_hdr_t *h; tbx_t *idx = NULL; const char **names = NULL; h = bcf_hdr_init("r"); if (!h) { hts_log_error("Failed to allocate bcf header"); return NULL; } txt.l = txt.m = 0; txt.s = 0; while ((ret = hts_getline(fp, KS_SEP_LINE, s)) >= 0) { int e = 0; if (s->l == 0) continue; if (s->s[0] != '#') { hts_log_error("No sample line"); goto error; } if (s->s[1] != '#' && fp->fn_aux) { // insert contigs here kstring_t tmp = { 0, 0, NULL }; hFILE *f = hopen(fp->fn_aux, "r"); if (f == NULL) { hts_log_error("Couldn't open \"%s\"", fp->fn_aux); goto error; } while (tmp.l = 0, kgetline(&tmp, (kgets_func *) hgets, f) >= 0) { char *tab = strchr(tmp.s, '\t'); if (tab == NULL) continue; e |= (kputs("##contig=<ID=", &txt) < 0); e |= (kputsn(tmp.s, tab - tmp.s, &txt) < 0); e |= (kputs(",length=", &txt) < 0); e |= (kputl(atol(tab), &txt) < 0); e |= (kputsn(">\n", 2, &txt) < 0); } free(tmp.s); if (hclose(f) != 0) { hts_log_error("Error on closing %s", fp->fn_aux); goto error; } if (e) goto error; } if (kputsn(s->s, s->l, &txt) < 0) goto error; if (kputc('\n', &txt) < 0) goto error; if (s->s[1] != '#') break; } if ( ret < -1 ) goto error; if ( !txt.s ) { hts_log_error("Could not read the header"); goto error; } if ( bcf_hdr_parse(h, txt.s) < 0 ) goto error; // check tabix index, are all contigs listed in the header? add the missing ones idx = tbx_index_load3(fp->fn, NULL, HTS_IDX_SAVE_REMOTE|HTS_IDX_SILENT_FAIL); if ( idx ) { int i, n, need_sync = 0; names = tbx_seqnames(idx, &n); if (!names) goto error; for (i=0; i<n; i++) { bcf_hrec_t *hrec = bcf_hdr_get_hrec(h, BCF_HL_CTG, "ID", (char*) names[i], NULL); if ( hrec ) continue; if (add_missing_contig_hrec(h, names[i]) < 0) goto error; need_sync = 1; } if ( need_sync ) { if (bcf_hdr_sync(h) < 0) goto error; } free(names); tbx_destroy(idx); } free(txt.s); return h; error: if (idx) tbx_destroy(idx); free(names); free(txt.s); if (h) bcf_hdr_destroy(h); return NULL; } int bcf_hdr_set(bcf_hdr_t *hdr, const char *fname) { int i = 0, n = 0, save_errno; char **lines = hts_readlines(fname, &n); if ( !lines ) return 1; for (i=0; i<n-1; i++) { int k; bcf_hrec_t *hrec = bcf_hdr_parse_line(hdr,lines[i],&k); if (!hrec) goto fail; if (bcf_hdr_add_hrec(hdr, hrec) < 0) { bcf_hrec_destroy(hrec); goto fail; } free(lines[i]); lines[i] = NULL; } if (bcf_hdr_parse_sample_line(hdr, lines[n-1]) < 0) goto fail; if (bcf_hdr_sync(hdr) < 0) goto fail; free(lines[n-1]); free(lines); return 0; fail: save_errno = errno; for (; i < n; i++) free(lines[i]); free(lines); errno = save_errno; return 1; } static int _bcf_hrec_format(const bcf_hrec_t *hrec, int is_bcf, kstring_t *str) { uint32_t e = 0; if ( !hrec->value ) { int j, nout = 0; e |= ksprintf(str, "##%s=<", hrec->key) < 0; for (j=0; j<hrec->nkeys; j++) { // do not output IDX if output is VCF if ( !is_bcf && !strcmp("IDX",hrec->keys[j]) ) continue; if ( nout ) e |= kputc(',',str) < 0; e |= ksprintf(str,"%s=%s", hrec->keys[j], hrec->vals[j]) < 0; nout++; } e |= ksprintf(str,">\n") < 0; } else e |= ksprintf(str,"##%s=%s\n", hrec->key,hrec->value) < 0; return e == 0 ? 0 : -1; } int bcf_hrec_format(const bcf_hrec_t *hrec, kstring_t *str) { return _bcf_hrec_format(hrec,0,str); } int bcf_hdr_format(const bcf_hdr_t *hdr, int is_bcf, kstring_t *str) { int i; for (i=0; i<hdr->nhrec; i++) _bcf_hrec_format(hdr->hrec[i], is_bcf, str); ksprintf(str, "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO"); if ( bcf_hdr_nsamples(hdr) ) { ksprintf(str, "\tFORMAT"); for (i=0; i<bcf_hdr_nsamples(hdr); i++) ksprintf(str, "\t%s", hdr->samples[i]); } ksprintf(str, "\n"); return 0; } char *bcf_hdr_fmt_text(const bcf_hdr_t *hdr, int is_bcf, int *len) { kstring_t txt = {0,0,0}; bcf_hdr_format(hdr, is_bcf, &txt); if ( len ) *len = txt.l; return txt.s; } const char **bcf_hdr_seqnames(const bcf_hdr_t *h, int *n) { vdict_t *d = (vdict_t*)h->dict[BCF_DT_CTG]; int tid, m = kh_size(d); const char **names = (const char**) calloc(m,sizeof(const char*)); khint_t k; for (k=kh_begin(d); k<kh_end(d); k++) { if ( !kh_exist(d,k) ) continue; tid = kh_val(d,k).id; assert( tid<m ); names[tid] = kh_key(d,k); } // sanity check: there should be no gaps for (tid=0; tid<m; tid++) assert(names[tid]); *n = m; return names; } int vcf_hdr_write(htsFile *fp, const bcf_hdr_t *h) { kstring_t htxt = {0,0,0}; bcf_hdr_format(h, 0, &htxt); while (htxt.l && htxt.s[htxt.l-1] == '\0') --htxt.l; // kill trailing zeros int ret; if ( fp->format.compression!=no_compression ) ret = bgzf_write(fp->fp.bgzf, htxt.s, htxt.l); else ret = hwrite(fp->fp.hfile, htxt.s, htxt.l); free(htxt.s); return ret<0 ? -1 : 0; } /*********************** *** Typed value I/O *** ***********************/ int bcf_enc_vint(kstring_t *s, int n, int32_t *a, int wsize) { int32_t max = INT32_MIN, min = INT32_MAX; int i; if (n <= 0) bcf_enc_size(s, 0, BCF_BT_NULL); else if (n == 1) bcf_enc_int1(s, a[0]); else { if (wsize <= 0) wsize = n; for (i = 0; i < n; ++i) { if (a[i] == bcf_int32_missing || a[i] == bcf_int32_vector_end ) continue; if (max < a[i]) max = a[i]; if (min > a[i]) min = a[i]; } if (max <= BCF_MAX_BT_INT8 && min >= BCF_MIN_BT_INT8) { bcf_enc_size(s, wsize, BCF_BT_INT8); for (i = 0; i < n; ++i) if ( a[i]==bcf_int32_vector_end ) kputc(bcf_int8_vector_end, s); else if ( a[i]==bcf_int32_missing ) kputc(bcf_int8_missing, s); else kputc(a[i], s); } else if (max <= BCF_MAX_BT_INT16 && min >= BCF_MIN_BT_INT16) { uint8_t *p; bcf_enc_size(s, wsize, BCF_BT_INT16); ks_resize(s, s->l + n * sizeof(int16_t)); p = (uint8_t *) s->s + s->l; for (i = 0; i < n; ++i) { int16_t x; if ( a[i]==bcf_int32_vector_end ) x = bcf_int16_vector_end; else if ( a[i]==bcf_int32_missing ) x = bcf_int16_missing; else x = a[i]; i16_to_le(x, p); p += sizeof(int16_t); } s->l += n * sizeof(int16_t); } else { uint8_t *p; bcf_enc_size(s, wsize, BCF_BT_INT32); ks_resize(s, s->l + n * sizeof(int32_t)); p = (uint8_t *) s->s + s->l; for (i = 0; i < n; ++i) { i32_to_le(a[i], p); p += sizeof(int32_t); } s->l += n * sizeof(int32_t); } } return 0; // FIXME: check for errs in this function } #ifdef VCF_ALLOW_INT64 static int bcf_enc_long1(kstring_t *s, int64_t x) { uint32_t e = 0; if (x <= BCF_MAX_BT_INT32 && x >= BCF_MIN_BT_INT32) return bcf_enc_int1(s, x); if (x == bcf_int64_vector_end) { e |= bcf_enc_size(s, 1, BCF_BT_INT8); e |= kputc(bcf_int8_vector_end, s) < 0; } else if (x == bcf_int64_missing) { e |= bcf_enc_size(s, 1, BCF_BT_INT8); e |= kputc(bcf_int8_missing, s) < 0; } else { e |= bcf_enc_size(s, 1, BCF_BT_INT64); e |= ks_expand(s, 8); if (e == 0) { u64_to_le(x, (uint8_t *) s->s + s->l); s->l += 8; } } return e == 0 ? 0 : -1; } #endif static inline int serialize_float_array(kstring_t *s, size_t n, const float *a) { uint8_t *p; size_t i; size_t bytes = n * sizeof(float); if (bytes / sizeof(float) != n) return -1; if (ks_resize(s, s->l + bytes) < 0) return -1; p = (uint8_t *) s->s + s->l; for (i = 0; i < n; i++) { float_to_le(a[i], p); p += sizeof(float); } s->l += bytes; return 0; } int bcf_enc_vfloat(kstring_t *s, int n, float *a) { assert(n >= 0); bcf_enc_size(s, n, BCF_BT_FLOAT); serialize_float_array(s, n, a); return 0; // FIXME: check for errs in this function } int bcf_enc_vchar(kstring_t *s, int l, const char *a) { bcf_enc_size(s, l, BCF_BT_CHAR); kputsn(a, l, s); return 0; // FIXME: check for errs in this function } int bcf_fmt_array(kstring_t *s, int n, int type, void *data) { int j = 0; uint32_t e = 0; if (n == 0) { return kputc('.', s) >= 0 ? 0 : -1; } if (type == BCF_BT_CHAR) { char *p = (char*)data; for (j = 0; j < n && *p; ++j, ++p) { if ( *p==bcf_str_missing ) e |= kputc('.', s) < 0; else e |= kputc(*p, s) < 0; } } else { #define BRANCH(type_t, convert, is_missing, is_vector_end, kprint) { \ uint8_t *p = (uint8_t *) data; \ for (j=0; j<n; j++, p += sizeof(type_t)) \ { \ type_t v = convert(p); \ if ( is_vector_end ) break; \ if ( j ) kputc(',', s); \ if ( is_missing ) kputc('.', s); \ else e |= kprint < 0; \ } \ } switch (type) { case BCF_BT_INT8: BRANCH(int8_t, le_to_i8, v==bcf_int8_missing, v==bcf_int8_vector_end, kputw(v, s)); break; case BCF_BT_INT16: BRANCH(int16_t, le_to_i16, v==bcf_int16_missing, v==bcf_int16_vector_end, kputw(v, s)); break; case BCF_BT_INT32: BRANCH(int32_t, le_to_i32, v==bcf_int32_missing, v==bcf_int32_vector_end, kputw(v, s)); break; case BCF_BT_FLOAT: BRANCH(uint32_t, le_to_u32, v==bcf_float_missing, v==bcf_float_vector_end, kputd(le_to_float(p), s)); break; default: hts_log_error("Unexpected type %d", type); exit(1); break; } #undef BRANCH } return e == 0 ? 0 : -1; } uint8_t *bcf_fmt_sized_array(kstring_t *s, uint8_t *ptr) { int x, type; x = bcf_dec_size(ptr, &ptr, &type); bcf_fmt_array(s, x, type, ptr); return ptr + (x << bcf_type_shift[type]); } /******************** *** VCF site I/O *** ********************/ typedef struct { int key, max_m, size, offset; uint32_t is_gt:1, max_g:31; uint32_t max_l; uint32_t y; uint8_t *buf; } fmt_aux_t; static inline int align_mem(kstring_t *s) { int e = 0; if (s->l&7) { uint64_t zero = 0; e = kputsn((char*)&zero, 8 - (s->l&7), s) < 0; } return e == 0 ? 0 : -1; } // p,q is the start and the end of the FORMAT field #define MAX_N_FMT 255 /* Limited by size of bcf1_t n_fmt field */ static int vcf_parse_format(kstring_t *s, const bcf_hdr_t *h, bcf1_t *v, char *p, char *q) { if ( !bcf_hdr_nsamples(h) ) return 0; static int extreme_val_warned = 0; char *r, *t; int j, l, m, g, overflow = 0; khint_t k; ks_tokaux_t aux1; vdict_t *d = (vdict_t*)h->dict[BCF_DT_ID]; kstring_t *mem = (kstring_t*)&h->mem; fmt_aux_t fmt[MAX_N_FMT]; mem->l = 0; char *end = s->s + s->l; if ( q>=end ) { hts_log_error("FORMAT column with no sample columns starting at %s:%"PRIhts_pos"", bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_NCOLS; return -1; } v->n_fmt = 0; if ( p[0]=='.' && p[1]==0 ) // FORMAT field is empty "." { v->n_sample = bcf_hdr_nsamples(h); return 0; } // get format information from the dictionary for (j = 0, t = kstrtok(p, ":", &aux1); t; t = kstrtok(0, 0, &aux1), ++j) { if (j >= MAX_N_FMT) { v->errcode |= BCF_ERR_LIMITS; hts_log_error("FORMAT column at %s:%"PRIhts_pos" lists more identifiers than htslib can handle", bcf_seqname_safe(h,v), v->pos+1); return -1; } *(char*)aux1.p = 0; k = kh_get(vdict, d, t); if (k == kh_end(d) || kh_val(d, k).info[BCF_HL_FMT] == 15) { if ( t[0]=='.' && t[1]==0 ) { hts_log_error("Invalid FORMAT tag name '.' at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_TAG_INVALID; return -1; } hts_log_warning("FORMAT '%s' at %s:%"PRIhts_pos" is not defined in the header, assuming Type=String", t, bcf_seqname_safe(h,v), v->pos+1); kstring_t tmp = {0,0,0}; int l; ksprintf(&tmp, "##FORMAT=<ID=%s,Number=1,Type=String,Description=\"Dummy\">", t); bcf_hrec_t *hrec = bcf_hdr_parse_line(h,tmp.s,&l); free(tmp.s); int res = hrec ? bcf_hdr_add_hrec((bcf_hdr_t*)h, hrec) : -1; if (res < 0) bcf_hrec_destroy(hrec); if (res > 0) res = bcf_hdr_sync((bcf_hdr_t*)h); k = kh_get(vdict, d, t); v->errcode = BCF_ERR_TAG_UNDEF; if (res || k == kh_end(d)) { hts_log_error("Could not add dummy header for FORMAT '%s' at %s:%"PRIhts_pos, t, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_TAG_INVALID; return -1; } } fmt[j].max_l = fmt[j].max_m = fmt[j].max_g = 0; fmt[j].key = kh_val(d, k).id; fmt[j].is_gt = !strcmp(t, "GT"); fmt[j].y = h->id[0][fmt[j].key].val->info[BCF_HL_FMT]; v->n_fmt++; } // compute max int n_sample_ori = -1; r = q + 1; // r: position in the format string l = 0, m = g = 1, v->n_sample = 0; // m: max vector size, l: max field len, g: max number of alleles while ( r<end ) { // can we skip some samples? if ( h->keep_samples ) { n_sample_ori++; if ( !bit_array_test(h->keep_samples,n_sample_ori) ) { while ( *r!='\t' && r<end ) r++; if ( *r=='\t' ) { *r = 0; r++; } continue; } } // collect fmt stats: max vector size, length, number of alleles j = 0; // j-th format field fmt_aux_t *f = fmt; for (;;) { switch (*r) { case ',': m++; break; case '|': case '/': if (f->is_gt) g++; break; case '\t': *r = 0; // fall through case '\0': case ':': if (f->max_m < m) f->max_m = m; if (f->max_l < l) f->max_l = l; if (f->is_gt && f->max_g < g) f->max_g = g; l = 0, m = g = 1; if ( *r==':' ) { j++; f++; if ( j>=v->n_fmt ) { hts_log_error("Incorrect number of FORMAT fields at %s:%"PRIhts_pos"", h->id[BCF_DT_CTG][v->rid].key, v->pos+1); v->errcode |= BCF_ERR_NCOLS; return -1; } } else goto end_for; break; } if ( r>=end ) break; r++; l++; } end_for: v->n_sample++; if ( v->n_sample == bcf_hdr_nsamples(h) ) break; r++; } // allocate memory for arrays for (j = 0; j < v->n_fmt; ++j) { fmt_aux_t *f = &fmt[j]; if ( !f->max_m ) f->max_m = 1; // omitted trailing format field if ((f->y>>4&0xf) == BCF_HT_STR) { f->size = f->is_gt? f->max_g << 2 : f->max_l; } else if ((f->y>>4&0xf) == BCF_HT_REAL || (f->y>>4&0xf) == BCF_HT_INT) { f->size = f->max_m << 2; } else { hts_log_error("The format type %d at %s:%"PRIhts_pos" is currently not supported", f->y>>4&0xf, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_TAG_INVALID; return -1; } if (align_mem(mem) < 0) { hts_log_error("Memory allocation failure at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; return -1; } f->offset = mem->l; // Limit the total memory to ~2Gb per VCF row. This should mean // malformed VCF data is less likely to take excessive memory and/or // time. if (v->n_sample * (uint64_t)f->size > INT_MAX) { hts_log_error("Excessive memory required by FORMAT fields at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; return -1; } if (ks_resize(mem, mem->l + v->n_sample * (size_t)f->size) < 0) { hts_log_error("Memory allocation failure at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; return -1; } mem->l += v->n_sample * f->size; } for (j = 0; j < v->n_fmt; ++j) fmt[j].buf = (uint8_t*)mem->s + fmt[j].offset; // fill the sample fields; at beginning of the loop, t points to the first char of a format n_sample_ori = -1; t = q + 1; m = 0; // m: sample id while ( t<end ) { // can we skip some samples? if ( h->keep_samples ) { n_sample_ori++; if ( !bit_array_test(h->keep_samples,n_sample_ori) ) { while ( *t && t<end ) t++; t++; continue; } } if ( m == bcf_hdr_nsamples(h) ) break; j = 0; // j-th format field, m-th sample while ( t < end ) { fmt_aux_t *z = &fmt[j++]; if (!z->buf) { hts_log_error("Memory allocation failure for FORMAT field type %d at %s:%"PRIhts_pos, z->y>>4&0xf, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; return -1; } if ((z->y>>4&0xf) == BCF_HT_STR) { if (z->is_gt) { // genotypes int32_t is_phased = 0; uint32_t *x = (uint32_t*)(z->buf + z->size * (size_t)m); uint32_t unreadable = 0; uint32_t max = 0; overflow = 0; for (l = 0;; ++t) { if (*t == '.') { ++t, x[l++] = is_phased; } else { char *tt = t; uint32_t val = hts_str2uint(t, &t, sizeof(val) * CHAR_MAX - 2, &overflow); unreadable |= tt == t; if (max < val) max = val; x[l++] = (val + 1) << 1 | is_phased; } is_phased = (*t == '|'); if (*t != '|' && *t != '/') break; } // Possibly check max against v->n_allele instead? if (overflow || max > (INT32_MAX >> 1) - 1) { hts_log_error("Couldn't read GT data: value too large at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); return -1; } if (unreadable) { hts_log_error("Couldn't read GT data: value not a number or '.' at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); return -1; } if ( !l ) x[l++] = 0; // An empty field, insert missing value for (; l < z->size>>2; ++l) x[l] = bcf_int32_vector_end; } else { char *x = (char*)z->buf + z->size * (size_t)m; for (r = t, l = 0; *t != ':' && *t; ++t) x[l++] = *t; for (; l < z->size; ++l) x[l] = 0; } } else if ((z->y>>4&0xf) == BCF_HT_INT) { int32_t *x = (int32_t*)(z->buf + z->size * (size_t)m); for (l = 0;; ++t) { if (*t == '.') { x[l++] = bcf_int32_missing, ++t; // ++t to skip "." } else { overflow = 0; char *te; long int tmp_val = hts_str2int(t, &te, sizeof(tmp_val)*CHAR_BIT, &overflow); if ( te==t || overflow || tmp_val<BCF_MIN_BT_INT32 || tmp_val>BCF_MAX_BT_INT32 ) { if ( !extreme_val_warned ) { hts_log_warning("Extreme FORMAT/%s value encountered and set to missing at %s:%"PRIhts_pos, h->id[BCF_DT_ID][fmt[j-1].key].key, bcf_seqname_safe(h,v), v->pos+1); extreme_val_warned = 1; } tmp_val = bcf_int32_missing; } x[l++] = tmp_val; t = te; } if (*t != ',') break; } if ( !l ) x[l++] = bcf_int32_missing; for (; l < z->size>>2; ++l) x[l] = bcf_int32_vector_end; } else if ((z->y>>4&0xf) == BCF_HT_REAL) { float *x = (float*)(z->buf + z->size * (size_t)m); for (l = 0;; ++t) { if (*t == '.' && !isdigit_c(t[1])) { bcf_float_set_missing(x[l++]), ++t; // ++t to skip "." } else { overflow = 0; char *te; float tmp_val = hts_str2dbl(t, &te, &overflow); if ( (te==t || overflow) && !extreme_val_warned ) { hts_log_warning("Extreme FORMAT/%s value encountered at %s:%"PRIhts_pos, h->id[BCF_DT_ID][fmt[j-1].key].key, bcf_seqname(h,v), v->pos+1); extreme_val_warned = 1; } x[l++] = tmp_val; t = te; } if (*t != ',') break; } if ( !l ) bcf_float_set_missing(x[l++]); // An empty field, insert missing value for (; l < z->size>>2; ++l) bcf_float_set_vector_end(x[l]); } else { hts_log_error("Unknown FORMAT field type %d at %s:%"PRIhts_pos, z->y>>4&0xf, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_TAG_INVALID; return -1; } if (*t == '\0') { break; } else if (*t == ':') { t++; } else { char buffer[8]; hts_log_error("Invalid character %s in '%s' FORMAT field at %s:%"PRIhts_pos"", hts_strprint(buffer, sizeof buffer, '\'', t, 1), h->id[BCF_DT_ID][z->key].key, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_CHAR; return -1; } } for (; j < v->n_fmt; ++j) { // fill end-of-vector values fmt_aux_t *z = &fmt[j]; if ((z->y>>4&0xf) == BCF_HT_STR) { if (z->is_gt) { int32_t *x = (int32_t*)(z->buf + z->size * (size_t)m); if (z->size) x[0] = bcf_int32_missing; for (l = 1; l < z->size>>2; ++l) x[l] = bcf_int32_vector_end; } else { char *x = (char*)z->buf + z->size * (size_t)m; if ( z->size ) x[0] = '.'; for (l = 1; l < z->size; ++l) x[l] = 0; } } else if ((z->y>>4&0xf) == BCF_HT_INT) { int32_t *x = (int32_t*)(z->buf + z->size * (size_t)m); x[0] = bcf_int32_missing; for (l = 1; l < z->size>>2; ++l) x[l] = bcf_int32_vector_end; } else if ((z->y>>4&0xf) == BCF_HT_REAL) { float *x = (float*)(z->buf + z->size * (size_t)m); bcf_float_set_missing(x[0]); for (l = 1; l < z->size>>2; ++l) bcf_float_set_vector_end(x[l]); } } m++; t++; } // write individual genotype information kstring_t *str = &v->indiv; int i; if (v->n_sample > 0) { for (i = 0; i < v->n_fmt; ++i) { fmt_aux_t *z = &fmt[i]; bcf_enc_int1(str, z->key); if ((z->y>>4&0xf) == BCF_HT_STR && !z->is_gt) { bcf_enc_size(str, z->size, BCF_BT_CHAR); kputsn((char*)z->buf, z->size * (size_t)v->n_sample, str); } else if ((z->y>>4&0xf) == BCF_HT_INT || z->is_gt) { bcf_enc_vint(str, (z->size>>2) * v->n_sample, (int32_t*)z->buf, z->size>>2); } else { bcf_enc_size(str, z->size>>2, BCF_BT_FLOAT); if (serialize_float_array(str, (z->size>>2) * (size_t)v->n_sample, (float *) z->buf) != 0) { v->errcode |= BCF_ERR_LIMITS; hts_log_error("Out of memory at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); return -1; } } } } if ( v->n_sample!=bcf_hdr_nsamples(h) ) { hts_log_error("Number of columns at %s:%"PRIhts_pos" does not match the number of samples (%d vs %d)", bcf_seqname_safe(h,v), v->pos+1, v->n_sample, bcf_hdr_nsamples(h)); v->errcode |= BCF_ERR_NCOLS; return -1; } if ( v->indiv.l > 0xffffffff ) { hts_log_error("The FORMAT at %s:%"PRIhts_pos" is too long", bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; // Error recovery: return -1 if this is a critical error or 0 if we want to ignore the FORMAT and proceed v->n_fmt = 0; return -1; } return 0; } static khint_t fix_chromosome(const bcf_hdr_t *h, vdict_t *d, const char *p) { // Simple error recovery for chromosomes not defined in the header. It will not help when VCF header has // been already printed, but will enable tools like vcfcheck to proceed. kstring_t tmp = {0,0,0}; khint_t k; int l; ksprintf(&tmp, "##contig=<ID=%s>", p); bcf_hrec_t *hrec = bcf_hdr_parse_line(h,tmp.s,&l); free(tmp.s); int res = hrec ? bcf_hdr_add_hrec((bcf_hdr_t*)h, hrec) : -1; if (res < 0) bcf_hrec_destroy(hrec); if (res > 0) res = bcf_hdr_sync((bcf_hdr_t*)h); k = kh_get(vdict, d, p); return k; } static int vcf_parse_filter(kstring_t *str, const bcf_hdr_t *h, bcf1_t *v, char *p, char *q) { int i, n_flt = 1, max_n_flt = 0; char *r, *t; int32_t *a_flt = NULL; ks_tokaux_t aux1; khint_t k; vdict_t *d = (vdict_t*)h->dict[BCF_DT_ID]; // count the number of filters if (*(q-1) == ';') *(q-1) = 0; for (r = p; *r; ++r) if (*r == ';') ++n_flt; if (n_flt > max_n_flt) { a_flt = malloc(n_flt * sizeof(*a_flt)); if (!a_flt) { hts_log_error("Could not allocate memory at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; // No appropriate code? return -1; } max_n_flt = n_flt; } // add filters for (t = kstrtok(p, ";", &aux1), i = 0; t; t = kstrtok(0, 0, &aux1)) { *(char*)aux1.p = 0; k = kh_get(vdict, d, t); if (k == kh_end(d)) { // Simple error recovery for FILTERs not defined in the header. It will not help when VCF header has // been already printed, but will enable tools like vcfcheck to proceed. hts_log_warning("FILTER '%s' is not defined in the header", t); kstring_t tmp = {0,0,0}; int l; ksprintf(&tmp, "##FILTER=<ID=%s,Description=\"Dummy\">", t); bcf_hrec_t *hrec = bcf_hdr_parse_line(h,tmp.s,&l); free(tmp.s); int res = hrec ? bcf_hdr_add_hrec((bcf_hdr_t*)h, hrec) : -1; if (res < 0) bcf_hrec_destroy(hrec); if (res > 0) res = bcf_hdr_sync((bcf_hdr_t*)h); k = kh_get(vdict, d, t); v->errcode |= BCF_ERR_TAG_UNDEF; if (res || k == kh_end(d)) { hts_log_error("Could not add dummy header for FILTER '%s' at %s:%"PRIhts_pos, t, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_TAG_INVALID; free(a_flt); return -1; } } a_flt[i++] = kh_val(d, k).id; } bcf_enc_vint(str, n_flt, a_flt, -1); free(a_flt); return 0; } static int vcf_parse_info(kstring_t *str, const bcf_hdr_t *h, bcf1_t *v, char *p, char *q) { static int extreme_int_warned = 0, negative_rlen_warned = 0; int max_n_val = 0, overflow = 0; char *r, *key; khint_t k; vdict_t *d = (vdict_t*)h->dict[BCF_DT_ID]; int32_t *a_val = NULL; v->n_info = 0; if (*(q-1) == ';') *(q-1) = 0; for (r = key = p;; ++r) { int c; char *val, *end; if (*r != ';' && *r != '=' && *r != 0) continue; if (v->n_info == UINT16_MAX) { hts_log_error("Too many INFO entries at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; return -1; } val = end = 0; c = *r; *r = 0; if (c == '=') { val = r + 1; for (end = val; *end != ';' && *end != 0; ++end); c = *end; *end = 0; } else end = r; if ( !*key ) { if (c==0) break; r = end; key = r + 1; continue; } // faulty VCF, ";;" in the INFO k = kh_get(vdict, d, key); if (k == kh_end(d) || kh_val(d, k).info[BCF_HL_INFO] == 15) { hts_log_warning("INFO '%s' is not defined in the header, assuming Type=String", key); kstring_t tmp = {0,0,0}; int l; ksprintf(&tmp, "##INFO=<ID=%s,Number=1,Type=String,Description=\"Dummy\">", key); bcf_hrec_t *hrec = bcf_hdr_parse_line(h,tmp.s,&l); free(tmp.s); int res = hrec ? bcf_hdr_add_hrec((bcf_hdr_t*)h, hrec) : -1; if (res < 0) bcf_hrec_destroy(hrec); if (res > 0) res = bcf_hdr_sync((bcf_hdr_t*)h); k = kh_get(vdict, d, key); v->errcode = BCF_ERR_TAG_UNDEF; if (res || k == kh_end(d)) { hts_log_error("Could not add dummy header for INFO '%s' at %s:%"PRIhts_pos, key, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_TAG_INVALID; return -1; } } uint32_t y = kh_val(d, k).info[BCF_HL_INFO]; ++v->n_info; bcf_enc_int1(str, kh_val(d, k).id); if (val == 0) { bcf_enc_size(str, 0, BCF_BT_NULL); } else if ((y>>4&0xf) == BCF_HT_FLAG || (y>>4&0xf) == BCF_HT_STR) { // if Flag has a value, treat it as a string bcf_enc_vchar(str, end - val, val); } else { // int/float value/array int i, n_val; char *t, *te; for (t = val, n_val = 1; *t; ++t) // count the number of values if (*t == ',') ++n_val; // Check both int and float size in one step for simplicity if (n_val > max_n_val) { int32_t *a_tmp = (int32_t *)realloc(a_val, n_val * sizeof(*a_val)); if (!a_tmp) { hts_log_error("Could not allocate memory at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; // No appropriate code? return -1; } a_val = a_tmp; max_n_val = n_val; } if ((y>>4&0xf) == BCF_HT_INT) { i = 0, t = val; int64_t val1; int is_int64 = 0; #ifdef VCF_ALLOW_INT64 if ( n_val==1 ) { overflow = 0; long long int tmp_val = hts_str2int(val, &te, sizeof(tmp_val)*CHAR_BIT, &overflow); if ( te==val ) tmp_val = bcf_int32_missing; else if ( overflow || tmp_val<BCF_MIN_BT_INT64 || tmp_val>BCF_MAX_BT_INT64 ) { if ( !extreme_int_warned ) { hts_log_warning("Extreme INFO/%s value encountered and set to missing at %s:%"PRIhts_pos,key,bcf_seqname_safe(h,v), v->pos+1); extreme_int_warned = 1; } tmp_val = bcf_int32_missing; } else is_int64 = 1; val1 = tmp_val; t = te; i = 1; // this is just to avoid adding another nested block... } #endif for (; i < n_val; ++i, ++t) { overflow = 0; long int tmp_val = hts_str2int(t, &te, sizeof(tmp_val)*CHAR_BIT, &overflow); if ( te==t ) tmp_val = bcf_int32_missing; else if ( overflow || tmp_val<BCF_MIN_BT_INT32 || tmp_val>BCF_MAX_BT_INT32 ) { if ( !extreme_int_warned ) { hts_log_warning("Extreme INFO/%s value encountered and set to missing at %s:%"PRIhts_pos,key,bcf_seqname_safe(h,v), v->pos+1); extreme_int_warned = 1; } tmp_val = bcf_int32_missing; } a_val[i] = tmp_val; for (t = te; *t && *t != ','; t++); } if (n_val == 1) { #ifdef VCF_ALLOW_INT64 if ( is_int64 ) { v->unpacked |= BCF_IS_64BIT; bcf_enc_long1(str, val1); } else bcf_enc_int1(str, (int32_t)val1); #else val1 = a_val[0]; bcf_enc_int1(str, (int32_t)val1); #endif } else { bcf_enc_vint(str, n_val, a_val, -1); } if (n_val==1 && (val1!=bcf_int32_missing || is_int64) && strcmp(key, "END") == 0) { if ( val1 <= v->pos ) { if ( !negative_rlen_warned ) { hts_log_warning("INFO/END=%"PRIhts_pos" is smaller than POS at %s:%"PRIhts_pos,val1,bcf_seqname_safe(h,v),v->pos+1); negative_rlen_warned = 1; } } else v->rlen = val1 - v->pos; } } else if ((y>>4&0xf) == BCF_HT_REAL) { float *val_f = (float *)a_val; for (i = 0, t = val; i < n_val; ++i, ++t) { overflow = 0; val_f[i] = hts_str2dbl(t, &te, &overflow); if ( te==t || overflow ) // conversion failed bcf_float_set_missing(val_f[i]); for (t = te; *t && *t != ','; t++); } bcf_enc_vfloat(str, n_val, val_f); } } if (c == 0) break; r = end; key = r + 1; } free(a_val); return 0; } int vcf_parse(kstring_t *s, const bcf_hdr_t *h, bcf1_t *v) { int i = 0, ret = -2, overflow = 0; char *p, *q, *r, *t; kstring_t *str; khint_t k; ks_tokaux_t aux; if (!s || !h || !v || !(s->s)) return ret; // Assumed in lots of places, but we may as well spot this early assert(sizeof(float) == sizeof(int32_t)); bcf_clear1(v); str = &v->shared; memset(&aux, 0, sizeof(ks_tokaux_t)); for (p = kstrtok(s->s, "\t", &aux), i = 0; p; p = kstrtok(0, 0, &aux), ++i) { q = (char*)aux.p; *q = 0; if (i == 0) { // CHROM vdict_t *d = (vdict_t*)h->dict[BCF_DT_CTG]; k = kh_get(vdict, d, p); if (k == kh_end(d)) { hts_log_warning("Contig '%s' is not defined in the header. (Quick workaround: index the file with tabix.)", p); v->errcode = BCF_ERR_CTG_UNDEF; if ((k = fix_chromosome(h, d, p)) == kh_end(d)) { hts_log_error("Could not add dummy header for contig '%s'", p); v->errcode |= BCF_ERR_CTG_INVALID; goto err; } } v->rid = kh_val(d, k).id; } else if (i == 1) { // POS overflow = 0; v->pos = hts_str2uint(p, &p, 63, &overflow); if (overflow) { hts_log_error("Position value '%s' is too large", p); goto err; } else { v->pos -= 1; } if (v->pos >= INT32_MAX) v->unpacked |= BCF_IS_64BIT; } else if (i == 2) { // ID if (strcmp(p, ".")) bcf_enc_vchar(str, q - p, p); else bcf_enc_size(str, 0, BCF_BT_CHAR); } else if (i == 3) { // REF bcf_enc_vchar(str, q - p, p); v->n_allele = 1, v->rlen = q - p; } else if (i == 4) { // ALT if (strcmp(p, ".")) { for (r = t = p;; ++r) { if (*r == ',' || *r == 0) { if (v->n_allele == UINT16_MAX) { hts_log_error("Too many ALT alleles at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; goto err; } bcf_enc_vchar(str, r - t, t); t = r + 1; ++v->n_allele; } if (r == q) break; } } } else if (i == 5) { // QUAL if (strcmp(p, ".")) v->qual = atof(p); else bcf_float_set_missing(v->qual); if ( v->max_unpack && !(v->max_unpack>>1) ) goto end; // BCF_UN_STR } else if (i == 6) { // FILTER if (strcmp(p, ".")) { if (vcf_parse_filter(str, h, v, p, q)) goto err; } else bcf_enc_vint(str, 0, 0, -1); if ( v->max_unpack && !(v->max_unpack>>2) ) goto end; // BCF_UN_FLT } else if (i == 7) { // INFO if (strcmp(p, ".")) { if (vcf_parse_info(str, h, v, p, q)) goto err; } if ( v->max_unpack && !(v->max_unpack>>3) ) goto end; } else if (i == 8) {// FORMAT return vcf_parse_format(s, h, v, p, q) == 0 ? 0 : -2; } } end: ret = 0; err: return ret; } int vcf_open_mode(char *mode, const char *fn, const char *format) { if (format == NULL) { // Try to pick a format based on the filename extension char extension[HTS_MAX_EXT_LEN]; if (find_file_extension(fn, extension) < 0) return -1; return vcf_open_mode(mode, fn, extension); } else if (strcasecmp(format, "bcf") == 0) strcpy(mode, "b"); else if (strcasecmp(format, "vcf") == 0) strcpy(mode, ""); else if (strcasecmp(format, "vcf.gz") == 0 || strcasecmp(format, "vcf.bgz") == 0) strcpy(mode, "z"); else return -1; return 0; } int vcf_read(htsFile *fp, const bcf_hdr_t *h, bcf1_t *v) { int ret; ret = hts_getline(fp, KS_SEP_LINE, &fp->line); if (ret < 0) return ret; return vcf_parse1(&fp->line, h, v); } static inline uint8_t *bcf_unpack_fmt_core1(uint8_t *ptr, int n_sample, bcf_fmt_t *fmt) { uint8_t *ptr_start = ptr; fmt->id = bcf_dec_typed_int1(ptr, &ptr); fmt->n = bcf_dec_size(ptr, &ptr, &fmt->type); fmt->size = fmt->n << bcf_type_shift[fmt->type]; fmt->p = ptr; fmt->p_off = ptr - ptr_start; fmt->p_free = 0; ptr += n_sample * fmt->size; fmt->p_len = ptr - fmt->p; return ptr; } static inline uint8_t *bcf_unpack_info_core1(uint8_t *ptr, bcf_info_t *info) { uint8_t *ptr_start = ptr; info->key = bcf_dec_typed_int1(ptr, &ptr); info->len = bcf_dec_size(ptr, &ptr, &info->type); info->vptr = ptr; info->vptr_off = ptr - ptr_start; info->vptr_free = 0; info->v1.i = 0; if (info->len == 1) { if (info->type == BCF_BT_INT8 || info->type == BCF_BT_CHAR) info->v1.i = *(int8_t*)ptr; else if (info->type == BCF_BT_INT32) info->v1.i = le_to_i32(ptr); else if (info->type == BCF_BT_FLOAT) info->v1.f = le_to_float(ptr); else if (info->type == BCF_BT_INT16) info->v1.i = le_to_i16(ptr); else if (info->type == BCF_BT_INT64) info->v1.i = le_to_i64(ptr); } ptr += info->len << bcf_type_shift[info->type]; info->vptr_len = ptr - info->vptr; return ptr; } int bcf_unpack(bcf1_t *b, int which) { if ( !b->shared.l ) return 0; // Building a new BCF record from scratch uint8_t *ptr = (uint8_t*)b->shared.s, *ptr_ori; int i; bcf_dec_t *d = &b->d; if (which & BCF_UN_FLT) which |= BCF_UN_STR; if (which & BCF_UN_INFO) which |= BCF_UN_SHR; if ((which&BCF_UN_STR) && !(b->unpacked&BCF_UN_STR)) { kstring_t tmp; // ID tmp.l = 0; tmp.s = d->id; tmp.m = d->m_id; ptr_ori = ptr; ptr = bcf_fmt_sized_array(&tmp, ptr); b->unpack_size[0] = ptr - ptr_ori; kputc('\0', &tmp); d->id = tmp.s; d->m_id = tmp.m; // REF and ALT are in a single block (d->als) and d->alleles are pointers into this block hts_expand(char*, b->n_allele, d->m_allele, d->allele); // NM: hts_expand() is a macro tmp.l = 0; tmp.s = d->als; tmp.m = d->m_als; ptr_ori = ptr; char *o = ""; for (i = 0; i < b->n_allele; ++i) { d->allele[i] = o + tmp.l; ptr = bcf_fmt_sized_array(&tmp, ptr); kputc('\0', &tmp); } b->unpack_size[1] = ptr - ptr_ori; d->als = tmp.s; d->m_als = tmp.m; for (i = 0; i < b->n_allele; ++i) d->allele[i] = d->als + (d->allele[i]-o); b->unpacked |= BCF_UN_STR; } if ((which&BCF_UN_FLT) && !(b->unpacked&BCF_UN_FLT)) { // FILTER ptr = (uint8_t*)b->shared.s + b->unpack_size[0] + b->unpack_size[1]; ptr_ori = ptr; if (*ptr>>4) { int type; d->n_flt = bcf_dec_size(ptr, &ptr, &type); hts_expand(int, d->n_flt, d->m_flt, d->flt); for (i = 0; i < d->n_flt; ++i) d->flt[i] = bcf_dec_int1(ptr, type, &ptr); } else ++ptr, d->n_flt = 0; b->unpack_size[2] = ptr - ptr_ori; b->unpacked |= BCF_UN_FLT; } if ((which&BCF_UN_INFO) && !(b->unpacked&BCF_UN_INFO)) { // INFO ptr = (uint8_t*)b->shared.s + b->unpack_size[0] + b->unpack_size[1] + b->unpack_size[2]; hts_expand(bcf_info_t, b->n_info, d->m_info, d->info); for (i = 0; i < d->m_info; ++i) d->info[i].vptr_free = 0; for (i = 0; i < b->n_info; ++i) ptr = bcf_unpack_info_core1(ptr, &d->info[i]); b->unpacked |= BCF_UN_INFO; } if ((which&BCF_UN_FMT) && b->n_sample && !(b->unpacked&BCF_UN_FMT)) { // FORMAT ptr = (uint8_t*)b->indiv.s; hts_expand(bcf_fmt_t, b->n_fmt, d->m_fmt, d->fmt); for (i = 0; i < d->m_fmt; ++i) d->fmt[i].p_free = 0; for (i = 0; i < b->n_fmt; ++i) ptr = bcf_unpack_fmt_core1(ptr, b->n_sample, &d->fmt[i]); b->unpacked |= BCF_UN_FMT; } return 0; } int vcf_format(const bcf_hdr_t *h, const bcf1_t *v, kstring_t *s) { int i; int32_t max_dt_id = h->n[BCF_DT_ID]; const char *chrom = bcf_seqname(h, v); if (!chrom) { hts_log_error("Invalid BCF, CONTIG id=%d not present in the header", v->rid); errno = EINVAL; return -1; } bcf_unpack((bcf1_t*)v, BCF_UN_ALL); kputs(chrom, s); // CHROM kputc('\t', s); kputll(v->pos + 1, s); // POS kputc('\t', s); kputs(v->d.id ? v->d.id : ".", s); // ID kputc('\t', s); // REF if (v->n_allele > 0) kputs(v->d.allele[0], s); else kputc('.', s); kputc('\t', s); // ALT if (v->n_allele > 1) { for (i = 1; i < v->n_allele; ++i) { if (i > 1) kputc(',', s); kputs(v->d.allele[i], s); } } else kputc('.', s); kputc('\t', s); // QUAL if ( bcf_float_is_missing(v->qual) ) kputc('.', s); // QUAL else kputd(v->qual, s); kputc('\t', s); // FILTER if (v->d.n_flt) { for (i = 0; i < v->d.n_flt; ++i) { int32_t idx = v->d.flt[i]; if (idx < 0 || idx >= max_dt_id || h->id[BCF_DT_ID][idx].key == NULL) { hts_log_error("Invalid BCF, the FILTER tag id=%d at %s:%"PRIhts_pos" not present in the header", idx, bcf_seqname_safe(h, v), v->pos + 1); errno = EINVAL; return -1; } if (i) kputc(';', s); kputs(h->id[BCF_DT_ID][idx].key, s); } } else kputc('.', s); kputc('\t', s); // INFO if (v->n_info) { int first = 1; for (i = 0; i < v->n_info; ++i) { bcf_info_t *z = &v->d.info[i]; if ( !z->vptr ) continue; if ( !first ) kputc(';', s); first = 0; if (z->key < 0 || z->key >= max_dt_id || h->id[BCF_DT_ID][z->key].key == NULL) { hts_log_error("Invalid BCF, the INFO tag id=%d is %s at %s:%"PRIhts_pos, z->key, z->key < 0 ? "negative" : (z->key >= max_dt_id ? "too large" : "not present in the header"), bcf_seqname_safe(h, v), v->pos+1); errno = EINVAL; return -1; } kputs(h->id[BCF_DT_ID][z->key].key, s); if (z->len <= 0) continue; kputc('=', s); if (z->len == 1) { switch (z->type) { case BCF_BT_INT8: if ( z->v1.i==bcf_int8_missing ) kputc('.', s); else kputw(z->v1.i, s); break; case BCF_BT_INT16: if ( z->v1.i==bcf_int16_missing ) kputc('.', s); else kputw(z->v1.i, s); break; case BCF_BT_INT32: if ( z->v1.i==bcf_int32_missing ) kputc('.', s); else kputw(z->v1.i, s); break; case BCF_BT_INT64: if ( z->v1.i==bcf_int64_missing ) kputc('.', s); else kputll(z->v1.i, s); break; case BCF_BT_FLOAT: if ( bcf_float_is_missing(z->v1.f) ) kputc('.', s); else kputd(z->v1.f, s); break; case BCF_BT_CHAR: kputc(z->v1.i, s); break; default: hts_log_error("Unexpected type %d at %s:%"PRIhts_pos, z->type, bcf_seqname_safe(h, v), v->pos+1); errno = EINVAL; return -1; } } else bcf_fmt_array(s, z->len, z->type, z->vptr); } if ( first ) kputc('.', s); } else kputc('.', s); // FORMAT and individual information if (v->n_sample) { int i,j; if ( v->n_fmt) { int gt_i = -1; bcf_fmt_t *fmt = v->d.fmt; int first = 1; for (i = 0; i < (int)v->n_fmt; ++i) { if ( !fmt[i].p ) continue; kputc(!first ? ':' : '\t', s); first = 0; if (fmt[i].id < 0 || fmt[i].id >= max_dt_id || h->id[BCF_DT_ID][fmt[i].id].key == NULL) //!bcf_hdr_idinfo_exists(h,BCF_HL_FMT,fmt[i].id) ) { hts_log_error("Invalid BCF, the FORMAT tag id=%d at %s:%"PRIhts_pos" not present in the header", fmt[i].id, bcf_seqname_safe(h, v), v->pos+1); errno = EINVAL; return -1; } kputs(h->id[BCF_DT_ID][fmt[i].id].key, s); if (strcmp(h->id[BCF_DT_ID][fmt[i].id].key, "GT") == 0) gt_i = i; } if ( first ) kputs("\t.", s); for (j = 0; j < v->n_sample; ++j) { kputc('\t', s); first = 1; for (i = 0; i < (int)v->n_fmt; ++i) { bcf_fmt_t *f = &fmt[i]; if ( !f->p ) continue; if (!first) kputc(':', s); first = 0; if (gt_i == i) bcf_format_gt(f,j,s); else bcf_fmt_array(s, f->n, f->type, f->p + j * (size_t)f->size); } if ( first ) kputc('.', s); } } else for (j=0; j<=v->n_sample; j++) kputs("\t.", s); } kputc('\n', s); return 0; } int vcf_write_line(htsFile *fp, kstring_t *line) { int ret; if ( line->s[line->l-1]!='\n' ) kputc('\n',line); if ( fp->format.compression!=no_compression ) ret = bgzf_write(fp->fp.bgzf, line->s, line->l); else ret = hwrite(fp->fp.hfile, line->s, line->l); return ret==line->l ? 0 : -1; } int vcf_write(htsFile *fp, const bcf_hdr_t *h, bcf1_t *v) { int ret; fp->line.l = 0; if (vcf_format1(h, v, &fp->line) != 0) return -1; if ( fp->format.compression!=no_compression ) ret = bgzf_write(fp->fp.bgzf, fp->line.s, fp->line.l); else ret = hwrite(fp->fp.hfile, fp->line.s, fp->line.l); if (fp->idx) { int tid; if ((tid = hts_idx_tbi_name(fp->idx, v->rid, bcf_seqname_safe(h, v))) < 0) return -1; if (hts_idx_push(fp->idx, tid, v->pos, v->pos + v->rlen, bgzf_tell(fp->fp.bgzf), 1) < 0) return -1; } return ret==fp->line.l ? 0 : -1; } /************************ * Data access routines * ************************/ int bcf_hdr_id2int(const bcf_hdr_t *h, int which, const char *id) { khint_t k; vdict_t *d = (vdict_t*)h->dict[which]; k = kh_get(vdict, d, id); return k == kh_end(d)? -1 : kh_val(d, k).id; } /******************** *** BCF indexing *** ********************/ // Calculate number of index levels given min_shift and the header contig // list. Also returns number of contigs in *nids_out. static int idx_calc_n_lvls_ids(const bcf_hdr_t *h, int min_shift, int starting_n_lvls, int *nids_out) { int n_lvls, i, nids = 0; int64_t max_len = 0, s; for (i = 0; i < h->n[BCF_DT_CTG]; ++i) { if ( !h->id[BCF_DT_CTG][i].val ) continue; if ( max_len < h->id[BCF_DT_CTG][i].val->info[0] ) max_len = h->id[BCF_DT_CTG][i].val->info[0]; nids++; } if ( !max_len ) max_len = (1LL<<31) - 1; // In case contig line is broken. max_len += 256; s = 1LL << (min_shift + starting_n_lvls * 3); for (n_lvls = starting_n_lvls; max_len > s; ++n_lvls, s <<= 3); if (nids_out) *nids_out = nids; return n_lvls; } hts_idx_t *bcf_index(htsFile *fp, int min_shift) { int n_lvls; bcf1_t *b = NULL; hts_idx_t *idx = NULL; bcf_hdr_t *h; int r; h = bcf_hdr_read(fp); if ( !h ) return NULL; int nids = 0; n_lvls = idx_calc_n_lvls_ids(h, min_shift, 0, &nids); idx = hts_idx_init(nids, HTS_FMT_CSI, bgzf_tell(fp->fp.bgzf), min_shift, n_lvls); if (!idx) goto fail; b = bcf_init1(); if (!b) goto fail; while ((r = bcf_read1(fp,h, b)) >= 0) { int ret; ret = hts_idx_push(idx, b->rid, b->pos, b->pos + b->rlen, bgzf_tell(fp->fp.bgzf), 1); if (ret < 0) goto fail; } if (r < -1) goto fail; hts_idx_finish(idx, bgzf_tell(fp->fp.bgzf)); bcf_destroy1(b); bcf_hdr_destroy(h); return idx; fail: hts_idx_destroy(idx); bcf_destroy1(b); bcf_hdr_destroy(h); return NULL; } hts_idx_t *bcf_index_load2(const char *fn, const char *fnidx) { return fnidx? hts_idx_load2(fn, fnidx) : bcf_index_load(fn); } hts_idx_t *bcf_index_load3(const char *fn, const char *fnidx, int flags) { return hts_idx_load3(fn, fnidx, HTS_FMT_CSI, flags); } int bcf_index_build3(const char *fn, const char *fnidx, int min_shift, int n_threads) { htsFile *fp; hts_idx_t *idx; tbx_t *tbx; int ret; if ((fp = hts_open(fn, "rb")) == 0) return -2; if (n_threads) hts_set_threads(fp, n_threads); if ( fp->format.compression!=bgzf ) { hts_close(fp); return -3; } switch (fp->format.format) { case bcf: if (!min_shift) { hts_log_error("TBI indices for BCF files are not supported"); ret = -1; } else { idx = bcf_index(fp, min_shift); if (idx) { ret = hts_idx_save_as(idx, fn, fnidx, HTS_FMT_CSI); if (ret < 0) ret = -4; hts_idx_destroy(idx); } else ret = -1; } break; case vcf: tbx = tbx_index(hts_get_bgzfp(fp), min_shift, &tbx_conf_vcf); if (tbx) { ret = hts_idx_save_as(tbx->idx, fn, fnidx, min_shift > 0 ? HTS_FMT_CSI : HTS_FMT_TBI); if (ret < 0) ret = -4; tbx_destroy(tbx); } else ret = -1; break; default: ret = -3; break; } hts_close(fp); return ret; } int bcf_index_build2(const char *fn, const char *fnidx, int min_shift) { return bcf_index_build3(fn, fnidx, min_shift, 0); } int bcf_index_build(const char *fn, int min_shift) { return bcf_index_build3(fn, NULL, min_shift, 0); } // Initialise fp->idx for the current format type. // This must be called after the header has been written but no other data. static int vcf_idx_init(htsFile *fp, bcf_hdr_t *h, int min_shift, const char *fnidx) { int n_lvls, fmt; if (min_shift == 0) { min_shift = 14; n_lvls = 5; fmt = HTS_FMT_TBI; } else { // Set initial n_lvls to match tbx_index() int starting_n_lvls = (TBX_MAX_SHIFT - min_shift + 2) / 3; // Increase if necessary n_lvls = idx_calc_n_lvls_ids(h, min_shift, starting_n_lvls, NULL); fmt = HTS_FMT_CSI; } fp->idx = hts_idx_init(0, fmt, bgzf_tell(fp->fp.bgzf), min_shift, n_lvls); if (!fp->idx) return -1; // Tabix meta data, added even in CSI for VCF uint8_t conf[4*7]; u32_to_le(TBX_VCF, conf+0); // fmt u32_to_le(1, conf+4); // name col u32_to_le(2, conf+8); // beg col u32_to_le(0, conf+12); // end col u32_to_le('#', conf+16); // comment u32_to_le(0, conf+20); // n.skip u32_to_le(0, conf+24); // ref name len if (hts_idx_set_meta(fp->idx, sizeof(conf)*sizeof(*conf), (uint8_t *)conf, 1) < 0) { hts_idx_destroy(fp->idx); fp->idx = NULL; return -1; } fp->fnidx = fnidx; return 0; } // Initialise fp->idx for the current format type. // This must be called after the header has been written but no other data. int bcf_idx_init(htsFile *fp, bcf_hdr_t *h, int min_shift, const char *fnidx) { int n_lvls, nids = 0; if (fp->format.format == vcf) return vcf_idx_init(fp, h, min_shift, fnidx); if (!min_shift) min_shift = 14; n_lvls = idx_calc_n_lvls_ids(h, min_shift, 0, &nids); fp->idx = hts_idx_init(nids, HTS_FMT_CSI, bgzf_tell(fp->fp.bgzf), min_shift, n_lvls); if (!fp->idx) return -1; fp->fnidx = fnidx; return 0; } // Finishes an index. Call afer the last record has been written. // Returns 0 on success, <0 on failure. // // NB: same format as SAM/BAM as it uses bgzf. int bcf_idx_save(htsFile *fp) { return sam_idx_save(fp); } /***************** *** Utilities *** *****************/ int bcf_hdr_combine(bcf_hdr_t *dst, const bcf_hdr_t *src) { int i, ndst_ori = dst->nhrec, need_sync = 0, ret = 0, res; for (i=0; i<src->nhrec; i++) { if ( src->hrec[i]->type==BCF_HL_GEN && src->hrec[i]->value ) { int j; for (j=0; j<ndst_ori; j++) { if ( dst->hrec[j]->type!=BCF_HL_GEN ) continue; // Checking only the key part of generic lines, otherwise // the VCFs are too verbose. Should we perhaps add a flag // to bcf_hdr_combine() and make this optional? if ( !strcmp(src->hrec[i]->key,dst->hrec[j]->key) ) break; } if ( j>=ndst_ori ) { res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i])); if (res < 0) return -1; need_sync += res; } } else if ( src->hrec[i]->type==BCF_HL_STR ) { // NB: we are ignoring fields without ID int j = bcf_hrec_find_key(src->hrec[i],"ID"); if ( j>=0 ) { bcf_hrec_t *rec = bcf_hdr_get_hrec(dst, src->hrec[i]->type, "ID", src->hrec[i]->vals[j], src->hrec[i]->key); if ( !rec ) { res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i])); if (res < 0) return -1; need_sync += res; } } } else { int j = bcf_hrec_find_key(src->hrec[i],"ID"); assert( j>=0 ); // this should always be true for valid VCFs bcf_hrec_t *rec = bcf_hdr_get_hrec(dst, src->hrec[i]->type, "ID", src->hrec[i]->vals[j], NULL); if ( !rec ) { res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i])); if (res < 0) return -1; need_sync += res; } else if ( src->hrec[i]->type==BCF_HL_INFO || src->hrec[i]->type==BCF_HL_FMT ) { // Check that both records are of the same type. The bcf_hdr_id2length // macro cannot be used here because dst header is not synced yet. vdict_t *d_src = (vdict_t*)src->dict[BCF_DT_ID]; vdict_t *d_dst = (vdict_t*)dst->dict[BCF_DT_ID]; khint_t k_src = kh_get(vdict, d_src, src->hrec[i]->vals[0]); khint_t k_dst = kh_get(vdict, d_dst, src->hrec[i]->vals[0]); if ( (kh_val(d_src,k_src).info[rec->type]>>8 & 0xf) != (kh_val(d_dst,k_dst).info[rec->type]>>8 & 0xf) ) { hts_log_warning("Trying to combine \"%s\" tag definitions of different lengths", src->hrec[i]->vals[0]); ret |= 1; } if ( (kh_val(d_src,k_src).info[rec->type]>>4 & 0xf) != (kh_val(d_dst,k_dst).info[rec->type]>>4 & 0xf) ) { hts_log_warning("Trying to combine \"%s\" tag definitions of different types", src->hrec[i]->vals[0]); ret |= 1; } } } } if ( need_sync ) { if (bcf_hdr_sync(dst) < 0) return -1; } return ret; } bcf_hdr_t *bcf_hdr_merge(bcf_hdr_t *dst, const bcf_hdr_t *src) { if ( !dst ) { // this will effectively strip existing IDX attributes from src to become dst dst = bcf_hdr_init("r"); kstring_t htxt = {0,0,0}; bcf_hdr_format(src, 0, &htxt); if ( bcf_hdr_parse(dst, htxt.s) < 0 ) { bcf_hdr_destroy(dst); dst = NULL; } free(htxt.s); return dst; } int i, ndst_ori = dst->nhrec, need_sync = 0, ret = 0, res; for (i=0; i<src->nhrec; i++) { if ( src->hrec[i]->type==BCF_HL_GEN && src->hrec[i]->value ) { int j; for (j=0; j<ndst_ori; j++) { if ( dst->hrec[j]->type!=BCF_HL_GEN ) continue; // Checking only the key part of generic lines, otherwise // the VCFs are too verbose. Should we perhaps add a flag // to bcf_hdr_combine() and make this optional? if ( !strcmp(src->hrec[i]->key,dst->hrec[j]->key) ) break; } if ( j>=ndst_ori ) { res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i])); if (res < 0) return NULL; need_sync += res; } } else if ( src->hrec[i]->type==BCF_HL_STR ) { // NB: we are ignoring fields without ID int j = bcf_hrec_find_key(src->hrec[i],"ID"); if ( j>=0 ) { bcf_hrec_t *rec = bcf_hdr_get_hrec(dst, src->hrec[i]->type, "ID", src->hrec[i]->vals[j], src->hrec[i]->key); if ( !rec ) { res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i])); if (res < 0) return NULL; need_sync += res; } } } else { int j = bcf_hrec_find_key(src->hrec[i],"ID"); assert( j>=0 ); // this should always be true for valid VCFs bcf_hrec_t *rec = bcf_hdr_get_hrec(dst, src->hrec[i]->type, "ID", src->hrec[i]->vals[j], NULL); if ( !rec ) { res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i])); if (res < 0) return NULL; need_sync += res; } else if ( src->hrec[i]->type==BCF_HL_INFO || src->hrec[i]->type==BCF_HL_FMT ) { // Check that both records are of the same type. The bcf_hdr_id2length // macro cannot be used here because dst header is not synced yet. vdict_t *d_src = (vdict_t*)src->dict[BCF_DT_ID]; vdict_t *d_dst = (vdict_t*)dst->dict[BCF_DT_ID]; khint_t k_src = kh_get(vdict, d_src, src->hrec[i]->vals[0]); khint_t k_dst = kh_get(vdict, d_dst, src->hrec[i]->vals[0]); if ( (kh_val(d_src,k_src).info[rec->type]>>8 & 0xf) != (kh_val(d_dst,k_dst).info[rec->type]>>8 & 0xf) ) { hts_log_warning("Trying to combine \"%s\" tag definitions of different lengths", src->hrec[i]->vals[0]); ret |= 1; } if ( (kh_val(d_src,k_src).info[rec->type]>>4 & 0xf) != (kh_val(d_dst,k_dst).info[rec->type]>>4 & 0xf) ) { hts_log_warning("Trying to combine \"%s\" tag definitions of different types", src->hrec[i]->vals[0]); ret |= 1; } } } } if ( need_sync ) { if (bcf_hdr_sync(dst) < 0) return NULL; } return dst; } int bcf_translate(const bcf_hdr_t *dst_hdr, bcf_hdr_t *src_hdr, bcf1_t *line) { int i; if ( line->errcode ) { hts_log_error("Unchecked error (%d) at %s:%"PRIhts_pos", exiting", line->errcode, bcf_seqname_safe(src_hdr,line), line->pos+1); exit(1); } if ( src_hdr->ntransl==-1 ) return 0; // no need to translate, all tags have the same id if ( !src_hdr->ntransl ) // called for the first time, see what needs translating { int dict; for (dict=0; dict<2; dict++) // BCF_DT_ID and BCF_DT_CTG { src_hdr->transl[dict] = (int*) malloc(src_hdr->n[dict]*sizeof(int)); for (i=0; i<src_hdr->n[dict]; i++) { if ( !src_hdr->id[dict][i].key ) // gap left after removed BCF header lines { src_hdr->transl[dict][i] = -1; continue; } src_hdr->transl[dict][i] = bcf_hdr_id2int(dst_hdr,dict,src_hdr->id[dict][i].key); if ( src_hdr->transl[dict][i]!=-1 && i!=src_hdr->transl[dict][i] ) src_hdr->ntransl++; } } if ( !src_hdr->ntransl ) { free(src_hdr->transl[0]); src_hdr->transl[0] = NULL; free(src_hdr->transl[1]); src_hdr->transl[1] = NULL; src_hdr->ntransl = -1; } if ( src_hdr->ntransl==-1 ) return 0; } bcf_unpack(line,BCF_UN_ALL); // CHROM if ( src_hdr->transl[BCF_DT_CTG][line->rid] >=0 ) line->rid = src_hdr->transl[BCF_DT_CTG][line->rid]; // FILTER for (i=0; i<line->d.n_flt; i++) { int src_id = line->d.flt[i]; if ( src_hdr->transl[BCF_DT_ID][src_id] >=0 ) line->d.flt[i] = src_hdr->transl[BCF_DT_ID][src_id]; line->d.shared_dirty |= BCF1_DIRTY_FLT; } // INFO for (i=0; i<line->n_info; i++) { int src_id = line->d.info[i].key; int dst_id = src_hdr->transl[BCF_DT_ID][src_id]; if ( dst_id<0 ) continue; line->d.info[i].key = dst_id; if ( !line->d.info[i].vptr ) continue; // skip deleted int src_size = src_id>>7 ? ( src_id>>15 ? BCF_BT_INT32 : BCF_BT_INT16) : BCF_BT_INT8; int dst_size = dst_id>>7 ? ( dst_id>>15 ? BCF_BT_INT32 : BCF_BT_INT16) : BCF_BT_INT8; if ( src_size==dst_size ) // can overwrite { uint8_t *vptr = line->d.info[i].vptr - line->d.info[i].vptr_off; if ( dst_size==BCF_BT_INT8 ) { vptr[1] = (uint8_t)dst_id; } else if ( dst_size==BCF_BT_INT16 ) { *(uint16_t*)vptr = (uint16_t)dst_id; } else { *(uint32_t*)vptr = (uint32_t)dst_id; } } else // must realloc { bcf_info_t *info = &line->d.info[i]; kstring_t str = {0,0,0}; bcf_enc_int1(&str, dst_id); bcf_enc_size(&str, info->len,info->type); uint32_t vptr_off = str.l; kputsn((char*)info->vptr, info->vptr_len, &str); if( info->vptr_free ) free(info->vptr - info->vptr_off); info->vptr_off = vptr_off; info->vptr = (uint8_t*)str.s + info->vptr_off; info->vptr_free = 1; line->d.shared_dirty |= BCF1_DIRTY_INF; } } // FORMAT for (i=0; i<line->n_fmt; i++) { int src_id = line->d.fmt[i].id; int dst_id = src_hdr->transl[BCF_DT_ID][src_id]; if ( dst_id<0 ) continue; line->d.fmt[i].id = dst_id; if( !line->d.fmt[i].p ) continue; // skip deleted int src_size = src_id>>7 ? ( src_id>>15 ? BCF_BT_INT32 : BCF_BT_INT16) : BCF_BT_INT8; int dst_size = dst_id>>7 ? ( dst_id>>15 ? BCF_BT_INT32 : BCF_BT_INT16) : BCF_BT_INT8; if ( src_size==dst_size ) // can overwrite { uint8_t *p = line->d.fmt[i].p - line->d.fmt[i].p_off; // pointer to the vector size (4bits) and BT type (4bits) if ( dst_size==BCF_BT_INT8 ) { p[1] = dst_id; } else if ( dst_size==BCF_BT_INT16 ) { i16_to_le(dst_id, p + 1); } else { i32_to_le(dst_id, p + 1); } } else // must realloc { bcf_fmt_t *fmt = &line->d.fmt[i]; kstring_t str = {0,0,0}; bcf_enc_int1(&str, dst_id); bcf_enc_size(&str, fmt->n, fmt->type); uint32_t p_off = str.l; kputsn((char*)fmt->p, fmt->p_len, &str); if( fmt->p_free ) free(fmt->p - fmt->p_off); fmt->p_off = p_off; fmt->p = (uint8_t*)str.s + fmt->p_off; fmt->p_free = 1; line->d.indiv_dirty = 1; } } return 0; } bcf_hdr_t *bcf_hdr_dup(const bcf_hdr_t *hdr) { bcf_hdr_t *hout = bcf_hdr_init("r"); if (!hout) { hts_log_error("Failed to allocate bcf header"); return NULL; } kstring_t htxt = {0,0,0}; bcf_hdr_format(hdr, 1, &htxt); if ( bcf_hdr_parse(hout, htxt.s) < 0 ) { bcf_hdr_destroy(hout); hout = NULL; } free(htxt.s); return hout; } bcf_hdr_t *bcf_hdr_subset(const bcf_hdr_t *h0, int n, char *const* samples, int *imap) { void *names_hash = khash_str2int_init(); kstring_t htxt = {0,0,0}; kstring_t str = {0,0,0}; bcf_hdr_t *h = bcf_hdr_init("w"); if (!h) { hts_log_error("Failed to allocate bcf header"); khash_str2int_destroy(names_hash); return NULL; } bcf_hdr_format(h0, 1, &htxt); bcf_hdr_set_version(h,bcf_hdr_get_version(h0)); int j; for (j=0; j<n; j++) imap[j] = -1; if ( bcf_hdr_nsamples(h0) > 0) { char *p = find_chrom_header_line(htxt.s); int i = 0, end = n? 8 : 7; while ((p = strchr(p, '\t')) != 0 && i < end) ++i, ++p; if (i != end) { free(h); free(str.s); return 0; // malformated header } kputsn(htxt.s, p - htxt.s, &str); for (i = 0; i < n; ++i) { if ( khash_str2int_has_key(names_hash,samples[i]) ) { hts_log_error("Duplicate sample name \"%s\"", samples[i]); free(str.s); free(htxt.s); khash_str2int_destroy(names_hash); bcf_hdr_destroy(h); return NULL; } imap[i] = bcf_hdr_id2int(h0, BCF_DT_SAMPLE, samples[i]); if (imap[i] < 0) continue; kputc('\t', &str); kputs(samples[i], &str); khash_str2int_inc(names_hash,samples[i]); } } else kputsn(htxt.s, htxt.l, &str); while (str.l && (!str.s[str.l-1] || str.s[str.l-1]=='\n') ) str.l--; // kill trailing zeros and newlines kputc('\n',&str); if ( bcf_hdr_parse(h, str.s) < 0 ) { bcf_hdr_destroy(h); h = NULL; } free(str.s); free(htxt.s); khash_str2int_destroy(names_hash); return h; } int bcf_hdr_set_samples(bcf_hdr_t *hdr, const char *samples, int is_file) { if ( samples && !strcmp("-",samples) ) return 0; // keep all samples int i, narr = bit_array_size(bcf_hdr_nsamples(hdr)); hdr->keep_samples = (uint8_t*) calloc(narr,1); if (!hdr->keep_samples) return -1; hdr->nsamples_ori = bcf_hdr_nsamples(hdr); if ( !samples ) { // exclude all samples khint_t k; vdict_t *d = (vdict_t*)hdr->dict[BCF_DT_SAMPLE], *new_dict; new_dict = kh_init(vdict); if (!new_dict) return -1; bcf_hdr_nsamples(hdr) = 0; for (k = kh_begin(d); k != kh_end(d); ++k) if (kh_exist(d, k)) free((char*)kh_key(d, k)); kh_destroy(vdict, d); hdr->dict[BCF_DT_SAMPLE] = new_dict; if (bcf_hdr_sync(hdr) < 0) return -1; return 0; } if ( samples[0]=='^' ) for (i=0; i<bcf_hdr_nsamples(hdr); i++) bit_array_set(hdr->keep_samples,i); int idx, n, ret = 0; char **smpls = hts_readlist(samples[0]=='^'?samples+1:samples, is_file, &n); if ( !smpls ) return -1; for (i=0; i<n; i++) { idx = bcf_hdr_id2int(hdr,BCF_DT_SAMPLE,smpls[i]); if ( idx<0 ) { if ( !ret ) ret = i+1; continue; } assert( idx<bcf_hdr_nsamples(hdr) ); if ( samples[0]=='^' ) bit_array_clear(hdr->keep_samples, idx); else bit_array_set(hdr->keep_samples, idx); } for (i=0; i<n; i++) free(smpls[i]); free(smpls); bcf_hdr_nsamples(hdr) = 0; for (i=0; i<hdr->nsamples_ori; i++) if ( bit_array_test(hdr->keep_samples,i) ) bcf_hdr_nsamples(hdr)++; if ( !bcf_hdr_nsamples(hdr) ) { free(hdr->keep_samples); hdr->keep_samples=NULL; } else { // Make new list and dictionary with desired samples char **samples = (char**) malloc(sizeof(char*)*bcf_hdr_nsamples(hdr)); vdict_t *new_dict, *d; int k, res; if (!samples) return -1; new_dict = kh_init(vdict); if (!new_dict) { free(samples); return -1; } idx = 0; for (i=0; i<hdr->nsamples_ori; i++) { if ( bit_array_test(hdr->keep_samples,i) ) { samples[idx] = hdr->samples[i]; k = kh_put(vdict, new_dict, hdr->samples[i], &res); if (res < 0) { free(samples); kh_destroy(vdict, new_dict); return -1; } kh_val(new_dict, k) = bcf_idinfo_def; kh_val(new_dict, k).id = idx; idx++; } } // Delete desired samples from old dictionary, so we don't free them d = (vdict_t*)hdr->dict[BCF_DT_SAMPLE]; for (i=0; i < idx; i++) { int k = kh_get(vdict, d, samples[i]); if (k < kh_end(d)) kh_del(vdict, d, k); } // Free everything else for (k = kh_begin(d); k != kh_end(d); ++k) if (kh_exist(d, k)) free((char*)kh_key(d, k)); kh_destroy(vdict, d); hdr->dict[BCF_DT_SAMPLE] = new_dict; free(hdr->samples); hdr->samples = samples; if (bcf_hdr_sync(hdr) < 0) return -1; } return ret; } int bcf_subset(const bcf_hdr_t *h, bcf1_t *v, int n, int *imap) { kstring_t ind; ind.s = 0; ind.l = ind.m = 0; if (n) { bcf_fmt_t fmt[MAX_N_FMT]; int i, j; uint8_t *ptr = (uint8_t*)v->indiv.s; for (i = 0; i < v->n_fmt; ++i) ptr = bcf_unpack_fmt_core1(ptr, v->n_sample, &fmt[i]); for (i = 0; i < (int)v->n_fmt; ++i) { bcf_fmt_t *f = &fmt[i]; bcf_enc_int1(&ind, f->id); bcf_enc_size(&ind, f->n, f->type); for (j = 0; j < n; ++j) if (imap[j] >= 0) kputsn((char*)(f->p + imap[j] * f->size), f->size, &ind); } for (i = j = 0; j < n; ++j) if (imap[j] >= 0) ++i; v->n_sample = i; } else v->n_sample = 0; if ( !v->n_sample ) v->n_fmt = 0; free(v->indiv.s); v->indiv = ind; v->unpacked &= ~BCF_UN_FMT; // only BCF is ready for output, VCF will need to unpack again return 0; } int bcf_is_snp(bcf1_t *v) { int i; bcf_unpack(v, BCF_UN_STR); for (i = 0; i < v->n_allele; ++i) { if ( v->d.allele[i][1]==0 && v->d.allele[i][0]!='*' ) continue; // mpileup's <X> allele, see also below. This is not completely satisfactory, // a general library is here narrowly tailored to fit samtools. if ( v->d.allele[i][0]=='<' && v->d.allele[i][1]=='X' && v->d.allele[i][2]=='>' ) continue; if ( v->d.allele[i][0]=='<' && v->d.allele[i][1]=='*' && v->d.allele[i][2]=='>' ) continue; break; } return i == v->n_allele; } static void bcf_set_variant_type(const char *ref, const char *alt, variant_t *var) { if ( *alt == '*' && !alt[1] ) { var->n = 0; var->type = VCF_OVERLAP; return; } // overlapping variant // The most frequent case if ( !ref[1] && !alt[1] ) { if ( *alt == '.' || *ref==*alt ) { var->n = 0; var->type = VCF_REF; return; } if ( *alt == 'X' ) { var->n = 0; var->type = VCF_REF; return; } // mpileup's X allele shouldn't be treated as variant var->n = 1; var->type = VCF_SNP; return; } if ( alt[0]=='<' ) { if ( alt[1]=='X' && alt[2]=='>' ) { var->n = 0; var->type = VCF_REF; return; } // mpileup's X allele shouldn't be treated as variant if ( alt[1]=='*' && alt[2]=='>' ) { var->n = 0; var->type = VCF_REF; return; } if ( !strcmp("NON_REF>",alt+1) ) { var->n = 0; var->type = VCF_REF; return; } var->type = VCF_OTHER; return; } const char *r = ref, *a = alt; while (*r && *a && toupper_c(*r)==toupper_c(*a) ) { r++; a++; } // unfortunately, matching REF,ALT case is not guaranteed if ( *a && !*r ) { if ( *a==']' || *a=='[' ) { var->type = VCF_BND; return; } while ( *a ) a++; var->n = (a-alt)-(r-ref); var->type = VCF_INDEL; return; } else if ( *r && !*a ) { while ( *r ) r++; var->n = (a-alt)-(r-ref); var->type = VCF_INDEL; return; } else if ( !*r && !*a ) { var->n = 0; var->type = VCF_REF; return; } const char *re = r, *ae = a; while ( re[1] ) re++; while ( ae[1] ) ae++; while ( re>r && ae>a && toupper_c(*re)==toupper_c(*ae) ) { re--; ae--; } if ( ae==a ) { if ( re==r ) { var->n = 1; var->type = VCF_SNP; return; } var->n = -(re-r); if ( toupper_c(*re)==toupper_c(*ae) ) { var->type = VCF_INDEL; return; } var->type = VCF_OTHER; return; } else if ( re==r ) { var->n = ae-a; if ( toupper_c(*re)==toupper_c(*ae) ) { var->type = VCF_INDEL; return; } var->type = VCF_OTHER; return; } var->type = ( re-r == ae-a ) ? VCF_MNP : VCF_OTHER; var->n = ( re-r > ae-a ) ? -(re-r+1) : ae-a+1; // should do also complex events, SVs, etc... } static int bcf_set_variant_types(bcf1_t *b) { if ( !(b->unpacked & BCF_UN_STR) ) bcf_unpack(b, BCF_UN_STR); bcf_dec_t *d = &b->d; if ( d->n_var < b->n_allele ) { d->var = (variant_t *) realloc(d->var, sizeof(variant_t)*b->n_allele); d->n_var = b->n_allele; } int i; b->d.var_type = 0; d->var[0].type = VCF_REF; d->var[0].n = 0; for (i=1; i<b->n_allele; i++) { bcf_set_variant_type(d->allele[0],d->allele[i], &d->var[i]); b->d.var_type |= d->var[i].type; //fprintf(stderr,"[set_variant_type] %d %s %s -> %d %d .. %d\n", b->pos+1,d->allele[0],d->allele[i],d->var[i].type,d->var[i].n, b->d.var_type); } return 0; } int bcf_get_variant_types(bcf1_t *rec) { if ( rec->d.var_type==-1 ) bcf_set_variant_types(rec); return rec->d.var_type; } int bcf_get_variant_type(bcf1_t *rec, int ith_allele) { if ( rec->d.var_type==-1 ) bcf_set_variant_types(rec); return rec->d.var[ith_allele].type; } int bcf_update_info(const bcf_hdr_t *hdr, bcf1_t *line, const char *key, const void *values, int n, int type) { static int negative_rlen_warned = 0; int is_end_tag; // Is the field already present? int i, inf_id = bcf_hdr_id2int(hdr,BCF_DT_ID,key); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_INFO,inf_id) ) return -1; // No such INFO field in the header if ( !(line->unpacked & BCF_UN_INFO) ) bcf_unpack(line, BCF_UN_INFO); is_end_tag = strcmp(key, "END") == 0; for (i=0; i<line->n_info; i++) if ( inf_id==line->d.info[i].key ) break; bcf_info_t *inf = i==line->n_info ? NULL : &line->d.info[i]; if ( !n || (type==BCF_HT_STR && !values) ) { if ( n==0 && is_end_tag ) line->rlen = line->n_allele ? strlen(line->d.allele[0]) : 0; if ( inf ) { // Mark the tag for removal, free existing memory if necessary if ( inf->vptr_free ) { free(inf->vptr - inf->vptr_off); inf->vptr_free = 0; } line->d.shared_dirty |= BCF1_DIRTY_INF; inf->vptr = NULL; inf->vptr_off = inf->vptr_len = 0; } return 0; } if (is_end_tag) { if (n != 1) { hts_log_error("END info tag should only have one value at %s:%"PRIhts_pos, bcf_seqname_safe(hdr,line), line->pos+1); line->errcode |= BCF_ERR_TAG_INVALID; return -1; } if (type != BCF_HT_INT && type != BCF_HT_LONG) { hts_log_error("Wrong type (%d) for END info tag at %s:%"PRIhts_pos, type, bcf_seqname_safe(hdr,line), line->pos+1); line->errcode |= BCF_ERR_TAG_INVALID; return -1; } } // Encode the values and determine the size required to accommodate the values kstring_t str = {0,0,0}; bcf_enc_int1(&str, inf_id); if ( type==BCF_HT_INT ) bcf_enc_vint(&str, n, (int32_t*)values, -1); else if ( type==BCF_HT_REAL ) bcf_enc_vfloat(&str, n, (float*)values); else if ( type==BCF_HT_FLAG || type==BCF_HT_STR ) { if ( values==NULL ) bcf_enc_size(&str, 0, BCF_BT_NULL); else bcf_enc_vchar(&str, strlen((char*)values), (char*)values); } #ifdef VCF_ALLOW_INT64 else if ( type==BCF_HT_LONG ) { if (n != 1) { hts_log_error("Only storing a single BCF_HT_LONG value is supported at %s:%"PRIhts_pos, bcf_seqname_safe(hdr,line), line->pos+1); abort(); } bcf_enc_long1(&str, *(int64_t *) values); } #endif else { hts_log_error("The type %d not implemented yet at %s:%"PRIhts_pos, type, bcf_seqname_safe(hdr,line), line->pos+1); abort(); } // Is the INFO tag already present if ( inf ) { // Is it big enough to accommodate new block? if ( str.l <= inf->vptr_len + inf->vptr_off ) { if ( str.l != inf->vptr_len + inf->vptr_off ) line->d.shared_dirty |= BCF1_DIRTY_INF; uint8_t *ptr = inf->vptr - inf->vptr_off; memcpy(ptr, str.s, str.l); free(str.s); int vptr_free = inf->vptr_free; bcf_unpack_info_core1(ptr, inf); inf->vptr_free = vptr_free; } else { if ( inf->vptr_free ) free(inf->vptr - inf->vptr_off); bcf_unpack_info_core1((uint8_t*)str.s, inf); inf->vptr_free = 1; line->d.shared_dirty |= BCF1_DIRTY_INF; } } else { // The tag is not present, create new one line->n_info++; hts_expand0(bcf_info_t, line->n_info, line->d.m_info , line->d.info); inf = &line->d.info[line->n_info-1]; bcf_unpack_info_core1((uint8_t*)str.s, inf); inf->vptr_free = 1; line->d.shared_dirty |= BCF1_DIRTY_INF; } line->unpacked |= BCF_UN_INFO; if ( n==1 && is_end_tag) { hts_pos_t end = type == BCF_HT_INT ? *(int32_t *) values : *(int64_t *) values; if ( (type == BCF_HT_INT && end!=bcf_int32_missing) || (type == BCF_HT_LONG && end!=bcf_int64_missing) ) { if ( end <= line->pos ) { if ( !negative_rlen_warned ) { hts_log_warning("INFO/END=%"PRIhts_pos" is smaller than POS at %s:%"PRIhts_pos,end,bcf_seqname_safe(hdr,line),line->pos+1); negative_rlen_warned = 1; } line->rlen = line->n_allele ? strlen(line->d.allele[0]) : 0; } else line->rlen = end - line->pos; } } return 0; } int bcf_update_format_string(const bcf_hdr_t *hdr, bcf1_t *line, const char *key, const char **values, int n) { if ( !n ) return bcf_update_format(hdr,line,key,NULL,0,BCF_HT_STR); int i, max_len = 0; for (i=0; i<n; i++) { int len = strlen(values[i]); if ( len > max_len ) max_len = len; } char *out = (char*) malloc(max_len*n); if ( !out ) return -2; for (i=0; i<n; i++) { char *dst = out+i*max_len; const char *src = values[i]; int j = 0; while ( src[j] ) { dst[j] = src[j]; j++; } for (; j<max_len; j++) dst[j] = 0; } int ret = bcf_update_format(hdr,line,key,out,max_len*n,BCF_HT_STR); free(out); return ret; } int bcf_update_format(const bcf_hdr_t *hdr, bcf1_t *line, const char *key, const void *values, int n, int type) { // Is the field already present? int i, fmt_id = bcf_hdr_id2int(hdr,BCF_DT_ID,key); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_FMT,fmt_id) ) { if ( !n ) return 0; return -1; // the key not present in the header } if ( !(line->unpacked & BCF_UN_FMT) ) bcf_unpack(line, BCF_UN_FMT); for (i=0; i<line->n_fmt; i++) if ( line->d.fmt[i].id==fmt_id ) break; bcf_fmt_t *fmt = i==line->n_fmt ? NULL : &line->d.fmt[i]; if ( !n ) { if ( fmt ) { // Mark the tag for removal, free existing memory if necessary if ( fmt->p_free ) { free(fmt->p - fmt->p_off); fmt->p_free = 0; } line->d.indiv_dirty = 1; fmt->p = NULL; } return 0; } line->n_sample = bcf_hdr_nsamples(hdr); int nps = n / line->n_sample; // number of values per sample assert( nps && nps*line->n_sample==n ); // must be divisible by n_sample // Encode the values and determine the size required to accommodate the values kstring_t str = {0,0,0}; bcf_enc_int1(&str, fmt_id); assert(values != NULL); if ( type==BCF_HT_INT ) bcf_enc_vint(&str, n, (int32_t*)values, nps); else if ( type==BCF_HT_REAL ) { bcf_enc_size(&str, nps, BCF_BT_FLOAT); serialize_float_array(&str, nps*line->n_sample, (float *) values); } else if ( type==BCF_HT_STR ) { bcf_enc_size(&str, nps, BCF_BT_CHAR); kputsn((char*)values, nps*line->n_sample, &str); } else { hts_log_error("The type %d not implemented yet at %s:%"PRIhts_pos, type, bcf_seqname_safe(hdr,line), line->pos+1); abort(); } if ( !fmt ) { // Not present, new format field line->n_fmt++; hts_expand0(bcf_fmt_t, line->n_fmt, line->d.m_fmt, line->d.fmt); // Special case: VCF specification requires that GT is always first if ( line->n_fmt > 1 && key[0]=='G' && key[1]=='T' && !key[2] ) { for (i=line->n_fmt-1; i>0; i--) line->d.fmt[i] = line->d.fmt[i-1]; fmt = &line->d.fmt[0]; } else fmt = &line->d.fmt[line->n_fmt-1]; bcf_unpack_fmt_core1((uint8_t*)str.s, line->n_sample, fmt); line->d.indiv_dirty = 1; fmt->p_free = 1; } else { // The tag is already present, check if it is big enough to accomodate the new block if ( str.l <= fmt->p_len + fmt->p_off ) { // good, the block is big enough if ( str.l != fmt->p_len + fmt->p_off ) line->d.indiv_dirty = 1; uint8_t *ptr = fmt->p - fmt->p_off; memcpy(ptr, str.s, str.l); free(str.s); int p_free = fmt->p_free; bcf_unpack_fmt_core1(ptr, line->n_sample, fmt); fmt->p_free = p_free; } else { if ( fmt->p_free ) free(fmt->p - fmt->p_off); bcf_unpack_fmt_core1((uint8_t*)str.s, line->n_sample, fmt); fmt->p_free = 1; line->d.indiv_dirty = 1; } } line->unpacked |= BCF_UN_FMT; return 0; } int bcf_update_filter(const bcf_hdr_t *hdr, bcf1_t *line, int *flt_ids, int n) { if ( !(line->unpacked & BCF_UN_FLT) ) bcf_unpack(line, BCF_UN_FLT); line->d.shared_dirty |= BCF1_DIRTY_FLT; line->d.n_flt = n; if ( !n ) return 0; hts_expand(int, line->d.n_flt, line->d.m_flt, line->d.flt); int i; for (i=0; i<n; i++) line->d.flt[i] = flt_ids[i]; return 0; } int bcf_add_filter(const bcf_hdr_t *hdr, bcf1_t *line, int flt_id) { if ( !(line->unpacked & BCF_UN_FLT) ) bcf_unpack(line, BCF_UN_FLT); int i; for (i=0; i<line->d.n_flt; i++) if ( flt_id==line->d.flt[i] ) break; if ( i<line->d.n_flt ) return 0; // this filter is already set line->d.shared_dirty |= BCF1_DIRTY_FLT; if ( flt_id==0 ) // set to PASS line->d.n_flt = 1; else if ( line->d.n_flt==1 && line->d.flt[0]==0 ) line->d.n_flt = 1; else line->d.n_flt++; hts_expand(int, line->d.n_flt, line->d.m_flt, line->d.flt); line->d.flt[line->d.n_flt-1] = flt_id; return 1; } int bcf_remove_filter(const bcf_hdr_t *hdr, bcf1_t *line, int flt_id, int pass) { if ( !(line->unpacked & BCF_UN_FLT) ) bcf_unpack(line, BCF_UN_FLT); int i; for (i=0; i<line->d.n_flt; i++) if ( flt_id==line->d.flt[i] ) break; if ( i==line->d.n_flt ) return 0; // the filter is not present line->d.shared_dirty |= BCF1_DIRTY_FLT; if ( i!=line->d.n_flt-1 ) memmove(line->d.flt+i,line->d.flt+i+1,(line->d.n_flt-i-1)*sizeof(*line->d.flt)); line->d.n_flt--; if ( !line->d.n_flt && pass ) bcf_add_filter(hdr,line,0); return 0; } int bcf_has_filter(const bcf_hdr_t *hdr, bcf1_t *line, char *filter) { if ( filter[0]=='.' && !filter[1] ) filter = "PASS"; int id = bcf_hdr_id2int(hdr, BCF_DT_ID, filter); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_FLT,id) ) return -1; // not defined in the header if ( !(line->unpacked & BCF_UN_FLT) ) bcf_unpack(line, BCF_UN_FLT); if ( id==0 && !line->d.n_flt) return 1; // PASS int i; for (i=0; i<line->d.n_flt; i++) if ( line->d.flt[i]==id ) return 1; return 0; } static inline int _bcf1_sync_alleles(const bcf_hdr_t *hdr, bcf1_t *line, int nals) { line->d.shared_dirty |= BCF1_DIRTY_ALS; line->n_allele = nals; hts_expand(char*, line->n_allele, line->d.m_allele, line->d.allele); char *als = line->d.als; int n = 0; while (n<nals) { line->d.allele[n] = als; while ( *als ) als++; als++; n++; } // Update REF length. Note that END is 1-based while line->pos 0-based bcf_info_t *end_info = bcf_get_info(hdr,line,"END"); if ( end_info ) { if ( end_info->type==BCF_HT_INT && end_info->v1.i==bcf_int32_missing ) end_info = NULL; else if ( end_info->type==BCF_HT_LONG && end_info->v1.i==bcf_int64_missing ) end_info = NULL; } if ( end_info && end_info->v1.i > line->pos ) line->rlen = end_info->v1.i - line->pos; else if ( nals > 0 ) line->rlen = strlen(line->d.allele[0]); else line->rlen = 0; return 0; } int bcf_update_alleles(const bcf_hdr_t *hdr, bcf1_t *line, const char **alleles, int nals) { if ( !(line->unpacked & BCF_UN_STR) ) bcf_unpack(line, BCF_UN_STR); kstring_t tmp = {0,0,0}; char *free_old = NULL; // If the supplied alleles are not pointers to line->d.als, the existing block can be reused. int i; for (i=0; i<nals; i++) if ( alleles[i]>=line->d.als && alleles[i]<line->d.als+line->d.m_als ) break; if ( i==nals ) { // all alleles point elsewhere, reuse the existing block tmp.l = 0; tmp.s = line->d.als; tmp.m = line->d.m_als; } else free_old = line->d.als; for (i=0; i<nals; i++) { kputs(alleles[i], &tmp); kputc(0, &tmp); } line->d.als = tmp.s; line->d.m_als = tmp.m; free(free_old); return _bcf1_sync_alleles(hdr,line,nals); } int bcf_update_alleles_str(const bcf_hdr_t *hdr, bcf1_t *line, const char *alleles_string) { if ( !(line->unpacked & BCF_UN_STR) ) bcf_unpack(line, BCF_UN_STR); kstring_t tmp; tmp.l = 0; tmp.s = line->d.als; tmp.m = line->d.m_als; kputs(alleles_string, &tmp); line->d.als = tmp.s; line->d.m_als = tmp.m; int nals = 1; char *t = line->d.als; while (*t) { if ( *t==',' ) { *t = 0; nals++; } t++; } return _bcf1_sync_alleles(hdr, line, nals); } int bcf_update_id(const bcf_hdr_t *hdr, bcf1_t *line, const char *id) { if ( !(line->unpacked & BCF_UN_STR) ) bcf_unpack(line, BCF_UN_STR); kstring_t tmp; tmp.l = 0; tmp.s = line->d.id; tmp.m = line->d.m_id; if ( id ) kputs(id, &tmp); else kputs(".", &tmp); line->d.id = tmp.s; line->d.m_id = tmp.m; line->d.shared_dirty |= BCF1_DIRTY_ID; return 0; } int bcf_add_id(const bcf_hdr_t *hdr, bcf1_t *line, const char *id) { if ( !id ) return 0; if ( !(line->unpacked & BCF_UN_STR) ) bcf_unpack(line, BCF_UN_STR); kstring_t tmp; tmp.l = 0; tmp.s = line->d.id; tmp.m = line->d.m_id; int len = strlen(id); char *dst = line->d.id; while ( *dst && (dst=strstr(dst,id)) ) { if ( dst[len]!=0 && dst[len]!=';' ) dst++; // a prefix, not a match else if ( dst==line->d.id || dst[-1]==';' ) return 0; // already present dst++; // a suffix, not a match } if ( line->d.id && (line->d.id[0]!='.' || line->d.id[1]) ) { tmp.l = strlen(line->d.id); kputc(';',&tmp); } kputs(id,&tmp); line->d.id = tmp.s; line->d.m_id = tmp.m; line->d.shared_dirty |= BCF1_DIRTY_ID; return 0; } bcf_fmt_t *bcf_get_fmt(const bcf_hdr_t *hdr, bcf1_t *line, const char *key) { int id = bcf_hdr_id2int(hdr, BCF_DT_ID, key); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_FMT,id) ) return NULL; // no such FMT field in the header return bcf_get_fmt_id(line, id); } bcf_info_t *bcf_get_info(const bcf_hdr_t *hdr, bcf1_t *line, const char *key) { int id = bcf_hdr_id2int(hdr, BCF_DT_ID, key); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_INFO,id) ) return NULL; // no such INFO field in the header return bcf_get_info_id(line, id); } bcf_fmt_t *bcf_get_fmt_id(bcf1_t *line, const int id) { int i; if ( !(line->unpacked & BCF_UN_FMT) ) bcf_unpack(line, BCF_UN_FMT); for (i=0; i<line->n_fmt; i++) { if ( line->d.fmt[i].id==id ) return &line->d.fmt[i]; } return NULL; } bcf_info_t *bcf_get_info_id(bcf1_t *line, const int id) { int i; if ( !(line->unpacked & BCF_UN_INFO) ) bcf_unpack(line, BCF_UN_INFO); for (i=0; i<line->n_info; i++) { if ( line->d.info[i].key==id ) return &line->d.info[i]; } return NULL; } int bcf_get_info_values(const bcf_hdr_t *hdr, bcf1_t *line, const char *tag, void **dst, int *ndst, int type) { int i, ret = -4, tag_id = bcf_hdr_id2int(hdr, BCF_DT_ID, tag); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_INFO,tag_id) ) return -1; // no such INFO field in the header if ( bcf_hdr_id2type(hdr,BCF_HL_INFO,tag_id)!=(type & 0xff) ) return -2; // expected different type if ( !(line->unpacked & BCF_UN_INFO) ) bcf_unpack(line, BCF_UN_INFO); for (i=0; i<line->n_info; i++) if ( line->d.info[i].key==tag_id ) break; if ( i==line->n_info ) return ( type==BCF_HT_FLAG ) ? 0 : -3; // the tag is not present in this record if ( type==BCF_HT_FLAG ) return 1; bcf_info_t *info = &line->d.info[i]; if ( !info->vptr ) return -3; // the tag was marked for removal if ( type==BCF_HT_STR ) { if ( *ndst < info->len+1 ) { *ndst = info->len + 1; *dst = realloc(*dst, *ndst); } memcpy(*dst,info->vptr,info->len); ((uint8_t*)*dst)[info->len] = 0; return info->len; } // Make sure the buffer is big enough int size1; switch (type) { case BCF_HT_INT: size1 = sizeof(int32_t); break; case BCF_HT_LONG: size1 = sizeof(int64_t); break; case BCF_HT_REAL: size1 = sizeof(float); break; default: hts_log_error("Unexpected output type %d at %s:%"PRIhts_pos, type, bcf_seqname_safe(hdr,line), line->pos+1); return -2; } if ( *ndst < info->len ) { *ndst = info->len; *dst = realloc(*dst, *ndst * size1); } #define BRANCH(type_t, convert, is_missing, is_vector_end, set_missing, set_regular, out_type_t) do { \ out_type_t *tmp = (out_type_t *) *dst; \ int j; \ for (j=0; j<info->len; j++) \ { \ type_t p = convert(info->vptr + j * sizeof(type_t)); \ if ( is_vector_end ) break; \ if ( is_missing ) set_missing; \ else set_regular; \ tmp++; \ } \ ret = j; \ } while (0) switch (info->type) { case BCF_BT_INT8: if (type == BCF_HT_LONG) { BRANCH(int8_t, le_to_i8, p==bcf_int8_missing, p==bcf_int8_vector_end, *tmp=bcf_int64_missing, *tmp=p, int64_t); } else { BRANCH(int8_t, le_to_i8, p==bcf_int8_missing, p==bcf_int8_vector_end, *tmp=bcf_int32_missing, *tmp=p, int32_t); } break; case BCF_BT_INT16: if (type == BCF_HT_LONG) { BRANCH(int16_t, le_to_i16, p==bcf_int16_missing, p==bcf_int16_vector_end, *tmp=bcf_int64_missing, *tmp=p, int64_t); } else { BRANCH(int16_t, le_to_i16, p==bcf_int16_missing, p==bcf_int16_vector_end, *tmp=bcf_int32_missing, *tmp=p, int32_t); } break; case BCF_BT_INT32: if (type == BCF_HT_LONG) { BRANCH(int32_t, le_to_i32, p==bcf_int32_missing, p==bcf_int32_vector_end, *tmp=bcf_int64_missing, *tmp=p, int64_t); break; } else { BRANCH(int32_t, le_to_i32, p==bcf_int32_missing, p==bcf_int32_vector_end, *tmp=bcf_int32_missing, *tmp=p, int32_t); break; } case BCF_BT_FLOAT: BRANCH(uint32_t, le_to_u32, p==bcf_float_missing, p==bcf_float_vector_end, bcf_float_set_missing(*tmp), bcf_float_set(tmp, p), float); break; default: hts_log_error("Unexpected type %d at %s:%"PRIhts_pos, info->type, bcf_seqname_safe(hdr,line), line->pos+1); return -2; } #undef BRANCH return ret; // set by BRANCH } int bcf_get_format_string(const bcf_hdr_t *hdr, bcf1_t *line, const char *tag, char ***dst, int *ndst) { int i,tag_id = bcf_hdr_id2int(hdr, BCF_DT_ID, tag); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_FMT,tag_id) ) return -1; // no such FORMAT field in the header if ( bcf_hdr_id2type(hdr,BCF_HL_FMT,tag_id)!=BCF_HT_STR ) return -2; // expected different type if ( !(line->unpacked & BCF_UN_FMT) ) bcf_unpack(line, BCF_UN_FMT); for (i=0; i<line->n_fmt; i++) if ( line->d.fmt[i].id==tag_id ) break; if ( i==line->n_fmt ) return -3; // the tag is not present in this record bcf_fmt_t *fmt = &line->d.fmt[i]; if ( !fmt->p ) return -3; // the tag was marked for removal int nsmpl = bcf_hdr_nsamples(hdr); if ( !*dst ) { *dst = (char**) malloc(sizeof(char*)*nsmpl); if ( !*dst ) return -4; // could not alloc (*dst)[0] = NULL; } int n = (fmt->n+1)*nsmpl; if ( *ndst < n ) { (*dst)[0] = realloc((*dst)[0], n); if ( !(*dst)[0] ) return -4; // could not alloc *ndst = n; } for (i=0; i<nsmpl; i++) { uint8_t *src = fmt->p + i*fmt->n; uint8_t *tmp = (uint8_t*)(*dst)[0] + i*(fmt->n+1); memcpy(tmp,src,fmt->n); tmp[fmt->n] = 0; (*dst)[i] = (char*) tmp; } return n; } int bcf_get_format_values(const bcf_hdr_t *hdr, bcf1_t *line, const char *tag, void **dst, int *ndst, int type) { int i,j, tag_id = bcf_hdr_id2int(hdr, BCF_DT_ID, tag); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_FMT,tag_id) ) return -1; // no such FORMAT field in the header if ( tag[0]=='G' && tag[1]=='T' && tag[2]==0 ) { // Ugly: GT field is considered to be a string by the VCF header but BCF represents it as INT. if ( bcf_hdr_id2type(hdr,BCF_HL_FMT,tag_id)!=BCF_HT_STR ) return -2; } else if ( bcf_hdr_id2type(hdr,BCF_HL_FMT,tag_id)!=type ) return -2; // expected different type if ( !(line->unpacked & BCF_UN_FMT) ) bcf_unpack(line, BCF_UN_FMT); for (i=0; i<line->n_fmt; i++) if ( line->d.fmt[i].id==tag_id ) break; if ( i==line->n_fmt ) return -3; // the tag is not present in this record bcf_fmt_t *fmt = &line->d.fmt[i]; if ( !fmt->p ) return -3; // the tag was marked for removal if ( type==BCF_HT_STR ) { int n = fmt->n*bcf_hdr_nsamples(hdr); if ( *ndst < n ) { *dst = realloc(*dst, n); if ( !*dst ) return -4; // could not alloc *ndst = n; } memcpy(*dst,fmt->p,n); return n; } // Make sure the buffer is big enough int nsmpl = bcf_hdr_nsamples(hdr); int size1 = type==BCF_HT_INT ? sizeof(int32_t) : sizeof(float); if ( *ndst < fmt->n*nsmpl ) { *ndst = fmt->n*nsmpl; *dst = realloc(*dst, *ndst*size1); if ( !*dst ) return -4; // could not alloc } #define BRANCH(type_t, convert, is_missing, is_vector_end, set_missing, set_vector_end, set_regular, out_type_t) { \ out_type_t *tmp = (out_type_t *) *dst; \ uint8_t *fmt_p = fmt->p; \ for (i=0; i<nsmpl; i++) \ { \ for (j=0; j<fmt->n; j++) \ { \ type_t p = convert(fmt_p + j * sizeof(type_t)); \ if ( is_missing ) set_missing; \ else if ( is_vector_end ) { set_vector_end; break; } \ else set_regular; \ tmp++; \ } \ for (; j<fmt->n; j++) { set_vector_end; tmp++; } \ fmt_p += fmt->size; \ } \ } switch (fmt->type) { case BCF_BT_INT8: BRANCH(int8_t, le_to_i8, p==bcf_int8_missing, p==bcf_int8_vector_end, *tmp=bcf_int32_missing, *tmp=bcf_int32_vector_end, *tmp=p, int32_t); break; case BCF_BT_INT16: BRANCH(int16_t, le_to_i16, p==bcf_int16_missing, p==bcf_int16_vector_end, *tmp=bcf_int32_missing, *tmp=bcf_int32_vector_end, *tmp=p, int32_t); break; case BCF_BT_INT32: BRANCH(int32_t, le_to_i32, p==bcf_int32_missing, p==bcf_int32_vector_end, *tmp=bcf_int32_missing, *tmp=bcf_int32_vector_end, *tmp=p, int32_t); break; case BCF_BT_FLOAT: BRANCH(uint32_t, le_to_u32, p==bcf_float_missing, p==bcf_float_vector_end, bcf_float_set_missing(*tmp), bcf_float_set_vector_end(*tmp), bcf_float_set(tmp, p), float); break; default: hts_log_error("Unexpected type %d at %s:%"PRIhts_pos, fmt->type, bcf_seqname_safe(hdr,line), line->pos+1); exit(1); } #undef BRANCH return nsmpl*fmt->n; }
null
/* vcf.c -- VCF/BCF API functions. Copyright (C) 2012, 2013 Broad Institute. Copyright (C) 2012-2020 Genome Research Ltd. Portions copyright (C) 2014 Intel Corporation. Author: Heng Li <lh3@sanger.ac.uk> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define HTS_BUILDING_LIBRARY // Enables HTSLIB_EXPORT, see htslib/hts_defs.h #include <config.h> #include <stdio.h> #include <assert.h> #include <string.h> #include <strings.h> #include <stdlib.h> #include <limits.h> #include <stdint.h> #include <inttypes.h> #include <errno.h> #include "htslib/vcf.h" #include "htslib/bgzf.h" #include "htslib/tbx.h" #include "htslib/hfile.h" #include "hts_internal.h" #include "htslib/hts_endian.h" #include "htslib/khash_str2int.h" #include "htslib/kstring.h" #include "htslib/sam.h" #include "htslib/khash.h" KHASH_MAP_INIT_STR(vdict, bcf_idinfo_t) typedef khash_t(vdict) vdict_t; #include "htslib/kseq.h" HTSLIB_EXPORT uint32_t bcf_float_missing = 0x7F800001; HTSLIB_EXPORT uint32_t bcf_float_vector_end = 0x7F800002; HTSLIB_EXPORT uint8_t bcf_type_shift[] = { 0, 0, 1, 2, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static bcf_idinfo_t bcf_idinfo_def = { .info = { 15, 15, 15 }, .hrec = { NULL, NULL, NULL}, .id = -1 }; /* Partial support for 64-bit POS and Number=1 INFO tags. Notes: - the support for 64-bit values is motivated by POS and INFO/END for large genomes - the use of 64-bit values does not conform to the specification - cannot output 64-bit BCF and if it does, it is not compatible with anything - experimental, use at your risk */ #ifdef VCF_ALLOW_INT64 #define BCF_MAX_BT_INT64 (0x7fffffffffffffff) /* INT64_MAX, for internal use only */ #define BCF_MIN_BT_INT64 -9223372036854775800LL /* INT64_MIN + 8, for internal use only */ #endif #define BCF_IS_64BIT (1<<30) static char *find_chrom_header_line(char *s) { char *nl; if (strncmp(s, "#CHROM\t", 7) == 0) return s; else if ((nl = strstr(s, "\n#CHROM\t")) != NULL) return nl+1; else return NULL; } /************************* *** VCF header parser *** *************************/ static int bcf_hdr_add_sample_len(bcf_hdr_t *h, const char *s, size_t len) { if ( !s ) return 0; if (len == 0) len = strlen(s); const char *ss = s; while ( *ss && isspace_c(*ss) && ss - s < len) ss++; if ( !*ss || ss - s == len) { hts_log_error("Empty sample name: trailing spaces/tabs in the header line?"); return -1; } vdict_t *d = (vdict_t*)h->dict[BCF_DT_SAMPLE]; int ret; char *sdup = malloc(len + 1); if (!sdup) return -1; memcpy(sdup, s, len); sdup[len] = 0; // Ensure space is available in h->samples size_t n = kh_size(d); char **new_samples = realloc(h->samples, sizeof(char*) * (n + 1)); if (!new_samples) { free(sdup); return -1; } h->samples = new_samples; int k = kh_put(vdict, d, sdup, &ret); if (ret < 0) { free(sdup); return -1; } if (ret) { // absent kh_val(d, k) = bcf_idinfo_def; kh_val(d, k).id = n; } else { hts_log_error("Duplicated sample name '%s'", s); free(sdup); return -1; } h->samples[n] = sdup; h->dirty = 1; return 0; } int bcf_hdr_add_sample(bcf_hdr_t *h, const char *s) { return bcf_hdr_add_sample_len(h, s, 0); } int HTS_RESULT_USED bcf_hdr_parse_sample_line(bcf_hdr_t *h, const char *str) { int ret = 0; int i = 0; const char *p, *q; // add samples for (p = q = str;; ++q) { if (*q > '\n') continue; if (++i > 9) { if ( bcf_hdr_add_sample_len(h, p, q - p) < 0 ) ret = -1; } if (*q == 0 || *q == '\n' || ret < 0) break; p = q + 1; } return ret; } int bcf_hdr_sync(bcf_hdr_t *h) { int i; for (i = 0; i < 3; i++) { vdict_t *d = (vdict_t*)h->dict[i]; khint_t k; if ( h->n[i] < kh_size(d) ) { bcf_idpair_t *new_idpair; // this should be true only for i=2, BCF_DT_SAMPLE new_idpair = (bcf_idpair_t*) realloc(h->id[i], kh_size(d)*sizeof(bcf_idpair_t)); if (!new_idpair) return -1; h->n[i] = kh_size(d); h->id[i] = new_idpair; } for (k=kh_begin(d); k<kh_end(d); k++) { if (!kh_exist(d,k)) continue; h->id[i][kh_val(d,k).id].key = kh_key(d,k); h->id[i][kh_val(d,k).id].val = &kh_val(d,k); } } h->dirty = 0; return 0; } void bcf_hrec_destroy(bcf_hrec_t *hrec) { if (!hrec) return; free(hrec->key); if ( hrec->value ) free(hrec->value); int i; for (i=0; i<hrec->nkeys; i++) { free(hrec->keys[i]); free(hrec->vals[i]); } free(hrec->keys); free(hrec->vals); free(hrec); } // Copies all fields except IDX. bcf_hrec_t *bcf_hrec_dup(bcf_hrec_t *hrec) { int save_errno; bcf_hrec_t *out = (bcf_hrec_t*) calloc(1,sizeof(bcf_hrec_t)); if (!out) return NULL; out->type = hrec->type; if ( hrec->key ) { out->key = strdup(hrec->key); if (!out->key) goto fail; } if ( hrec->value ) { out->value = strdup(hrec->value); if (!out->value) goto fail; } out->nkeys = hrec->nkeys; out->keys = (char**) malloc(sizeof(char*)*hrec->nkeys); if (!out->keys) goto fail; out->vals = (char**) malloc(sizeof(char*)*hrec->nkeys); if (!out->vals) goto fail; int i, j = 0; for (i=0; i<hrec->nkeys; i++) { if ( hrec->keys[i] && !strcmp("IDX",hrec->keys[i]) ) continue; if ( hrec->keys[i] ) { out->keys[j] = strdup(hrec->keys[i]); if (!out->keys[j]) goto fail; } if ( hrec->vals[i] ) { out->vals[j] = strdup(hrec->vals[i]); if (!out->vals[j]) goto fail; } j++; } if ( i!=j ) out->nkeys -= i-j; // IDX was omitted return out; fail: save_errno = errno; hts_log_error("%s", strerror(errno)); bcf_hrec_destroy(out); errno = save_errno; return NULL; } void bcf_hrec_debug(FILE *fp, bcf_hrec_t *hrec) { fprintf(fp, "key=[%s] value=[%s]", hrec->key, hrec->value?hrec->value:""); int i; for (i=0; i<hrec->nkeys; i++) fprintf(fp, "\t[%s]=[%s]", hrec->keys[i],hrec->vals[i]); fprintf(fp, "\n"); } void bcf_header_debug(bcf_hdr_t *hdr) { int i, j; for (i=0; i<hdr->nhrec; i++) { if ( !hdr->hrec[i]->value ) { fprintf(stderr, "##%s=<", hdr->hrec[i]->key); fprintf(stderr,"%s=%s", hdr->hrec[i]->keys[0], hdr->hrec[i]->vals[0]); for (j=1; j<hdr->hrec[i]->nkeys; j++) fprintf(stderr,",%s=%s", hdr->hrec[i]->keys[j], hdr->hrec[i]->vals[j]); fprintf(stderr,">\n"); } else fprintf(stderr,"##%s=%s\n", hdr->hrec[i]->key,hdr->hrec[i]->value); } } int bcf_hrec_add_key(bcf_hrec_t *hrec, const char *str, size_t len) { char **tmp; size_t n = hrec->nkeys + 1; assert(len > 0 && len < SIZE_MAX); tmp = realloc(hrec->keys, sizeof(char*)*n); if (!tmp) return -1; hrec->keys = tmp; tmp = realloc(hrec->vals, sizeof(char*)*n); if (!tmp) return -1; hrec->vals = tmp; hrec->keys[hrec->nkeys] = (char*) malloc((len+1)*sizeof(char)); if (!hrec->keys[hrec->nkeys]) return -1; memcpy(hrec->keys[hrec->nkeys],str,len); hrec->keys[hrec->nkeys][len] = 0; hrec->vals[hrec->nkeys] = NULL; hrec->nkeys = n; return 0; } int bcf_hrec_set_val(bcf_hrec_t *hrec, int i, const char *str, size_t len, int is_quoted) { if ( hrec->vals[i] ) { free(hrec->vals[i]); hrec->vals[i] = NULL; } if ( !str ) return 0; if ( is_quoted ) { if (len >= SIZE_MAX - 3) { errno = ENOMEM; return -1; } hrec->vals[i] = (char*) malloc((len+3)*sizeof(char)); if (!hrec->vals[i]) return -1; hrec->vals[i][0] = '"'; memcpy(&hrec->vals[i][1],str,len); hrec->vals[i][len+1] = '"'; hrec->vals[i][len+2] = 0; } else { if (len == SIZE_MAX) { errno = ENOMEM; return -1; } hrec->vals[i] = (char*) malloc((len+1)*sizeof(char)); if (!hrec->vals[i]) return -1; memcpy(hrec->vals[i],str,len); hrec->vals[i][len] = 0; } return 0; } int hrec_add_idx(bcf_hrec_t *hrec, int idx) { int n = hrec->nkeys + 1; char **tmp = (char**) realloc(hrec->keys, sizeof(char*)*n); if (!tmp) return -1; hrec->keys = tmp; tmp = (char**) realloc(hrec->vals, sizeof(char*)*n); if (!tmp) return -1; hrec->vals = tmp; hrec->keys[hrec->nkeys] = strdup("IDX"); if (!hrec->keys[hrec->nkeys]) return -1; kstring_t str = {0,0,0}; if (kputw(idx, &str) < 0) { free(hrec->keys[hrec->nkeys]); return -1; } hrec->vals[hrec->nkeys] = str.s; hrec->nkeys = n; return 0; } int bcf_hrec_find_key(bcf_hrec_t *hrec, const char *key) { int i; for (i=0; i<hrec->nkeys; i++) if ( !strcasecmp(key,hrec->keys[i]) ) return i; return -1; } static inline int is_escaped(const char *min, const char *str) { int n = 0; while ( --str>=min && *str=='\\' ) n++; return n%2; } bcf_hrec_t *bcf_hdr_parse_line(const bcf_hdr_t *h, const char *line, int *len) { const char *p = line; if (p[0] != '#' || p[1] != '#') { *len = 0; return NULL; } p += 2; const char *q = p; while ( *q && *q!='=' && *q != '\n' ) q++; ptrdiff_t n = q-p; if ( *q!='=' || !n ) { *len = q-line+1; return NULL; } // wrong format bcf_hrec_t *hrec = (bcf_hrec_t*) calloc(1,sizeof(bcf_hrec_t)); if (!hrec) return NULL; hrec->key = (char*) malloc(sizeof(char)*(n+1)); if (!hrec->key) goto fail; memcpy(hrec->key,p,n); hrec->key[n] = 0; p = ++q; if ( *p!='<' ) // generic field, e.g. ##samtoolsVersion=0.1.18-r579 { while ( *q && *q!='\n' ) q++; hrec->value = (char*) malloc((q-p+1)*sizeof(char)); if (!hrec->value) goto fail; memcpy(hrec->value, p, q-p); hrec->value[q-p] = 0; *len = q - line + (*q ? 1 : 0); // Skip \n but not \0 return hrec; } // structured line, e.g. // ##INFO=<ID=PV1,Number=1,Type=Float,Description="P-value for baseQ bias"> // ##PEDIGREE=<Name_0=G0-ID,Name_1=G1-ID,Name_3=GN-ID> int nopen = 1; while ( *q && *q!='\n' && nopen>0 ) { p = ++q; while ( *q && *q==' ' ) { p++; q++; } // ^[A-Za-z_][0-9A-Za-z_.]*$ if (p==q && *q && (isalpha_c(*q) || *q=='_')) { q++; while ( *q && (isalnum_c(*q) || *q=='_' || *q=='.') ) q++; } n = q-p; int m = 0; while ( *q && *q==' ' ) { q++; m++; } if ( *q!='=' || !n ) { // wrong format while ( *q && *q!='\n' ) q++; hts_log_error("Could not parse the header line: \"%.*s\"", (int) (q - line), line); *len = q - line + (*q ? 1 : 0); bcf_hrec_destroy(hrec); return NULL; } if (bcf_hrec_add_key(hrec, p, q-p-m) < 0) goto fail; p = ++q; while ( *q && *q==' ' ) { p++; q++; } int quoted = *p=='"' ? 1 : 0; if ( quoted ) p++, q++; while ( *q && *q != '\n' ) { if ( quoted ) { if ( *q=='"' && !is_escaped(p,q) ) break; } else { if ( *q=='<' ) nopen++; if ( *q=='>' ) nopen--; if ( !nopen ) break; if ( *q==',' && nopen==1 ) break; } q++; } const char *r = q; while ( r > p && r[-1] == ' ' ) r--; if (bcf_hrec_set_val(hrec, hrec->nkeys-1, p, r-p, quoted) < 0) goto fail; if ( quoted && *q=='"' ) q++; if ( *q=='>' ) { nopen--; q++; } } // Skip to end of line int nonspace = 0; p = q; while ( *q && *q!='\n' ) { nonspace |= !isspace_c(*q); q++; } if (nonspace) { hts_log_warning("Dropped trailing junk from header line '%.*s'", (int) (q - line), line); } *len = q - line + (*q ? 1 : 0); return hrec; fail: bcf_hrec_destroy(hrec); return NULL; } static int bcf_hdr_set_idx(bcf_hdr_t *hdr, int dict_type, const char *tag, bcf_idinfo_t *idinfo) { size_t new_n; // If available, preserve existing IDX if ( idinfo->id==-1 ) idinfo->id = hdr->n[dict_type]; else if ( idinfo->id < hdr->n[dict_type] && hdr->id[dict_type][idinfo->id].key ) { hts_log_error("Conflicting IDX=%d lines in the header dictionary, the new tag is %s", idinfo->id, tag); errno = EINVAL; return -1; } new_n = idinfo->id >= hdr->n[dict_type] ? idinfo->id+1 : hdr->n[dict_type]; if (hts_resize(bcf_idpair_t, new_n, &hdr->m[dict_type], &hdr->id[dict_type], HTS_RESIZE_CLEAR)) { return -1; } hdr->n[dict_type] = new_n; // NB: the next kh_put call can invalidate the idinfo pointer, therefore // we leave it unassigned here. It must be set explicitly in bcf_hdr_sync. hdr->id[dict_type][idinfo->id].key = tag; return 0; } // returns: 1 when hdr needs to be synced, -1 on error, 0 otherwise static int bcf_hdr_register_hrec(bcf_hdr_t *hdr, bcf_hrec_t *hrec) { // contig int i, ret, replacing = 0; khint_t k; char *str = NULL; if ( !strcmp(hrec->key, "contig") ) { hts_pos_t len = 0; hrec->type = BCF_HL_CTG; // Get the contig ID ($str) and length ($j) i = bcf_hrec_find_key(hrec,"length"); if ( i<0 ) len = 0; else { char *end = hrec->vals[i]; len = strtoll(hrec->vals[i], &end, 10); if (end == hrec->vals[i] || len < 0) return 0; } i = bcf_hrec_find_key(hrec,"ID"); if ( i<0 ) return 0; str = strdup(hrec->vals[i]); if (!str) return -1; // Register in the dictionary vdict_t *d = (vdict_t*)hdr->dict[BCF_DT_CTG]; khint_t k = kh_get(vdict, d, str); if ( k != kh_end(d) ) { // already present free(str); str=NULL; if (kh_val(d, k).hrec[0] != NULL) // and not removed return 0; replacing = 1; } else { k = kh_put(vdict, d, str, &ret); if (ret < 0) { free(str); return -1; } } int idx = bcf_hrec_find_key(hrec,"IDX"); if ( idx!=-1 ) { char *tmp = hrec->vals[idx]; idx = strtol(hrec->vals[idx], &tmp, 10); if ( *tmp || idx < 0 || idx >= INT_MAX - 1) { if (!replacing) { kh_del(vdict, d, k); free(str); } hts_log_warning("Error parsing the IDX tag, skipping"); return 0; } } kh_val(d, k) = bcf_idinfo_def; kh_val(d, k).id = idx; kh_val(d, k).info[0] = len; kh_val(d, k).hrec[0] = hrec; if (bcf_hdr_set_idx(hdr, BCF_DT_CTG, kh_key(d,k), &kh_val(d,k)) < 0) { if (!replacing) { kh_del(vdict, d, k); free(str); } return -1; } if ( idx==-1 ) { if (hrec_add_idx(hrec, kh_val(d,k).id) < 0) { return -1; } } return 1; } if ( !strcmp(hrec->key, "INFO") ) hrec->type = BCF_HL_INFO; else if ( !strcmp(hrec->key, "FILTER") ) hrec->type = BCF_HL_FLT; else if ( !strcmp(hrec->key, "FORMAT") ) hrec->type = BCF_HL_FMT; else if ( hrec->nkeys>0 ) { hrec->type = BCF_HL_STR; return 1; } else return 0; // INFO/FILTER/FORMAT char *id = NULL; uint32_t type = UINT32_MAX, var = UINT32_MAX; int num = -1, idx = -1; for (i=0; i<hrec->nkeys; i++) { if ( !strcmp(hrec->keys[i], "ID") ) id = hrec->vals[i]; else if ( !strcmp(hrec->keys[i], "IDX") ) { char *tmp = hrec->vals[i]; idx = strtol(hrec->vals[i], &tmp, 10); if ( *tmp || idx < 0 || idx >= INT_MAX - 1) { hts_log_warning("Error parsing the IDX tag, skipping"); return 0; } } else if ( !strcmp(hrec->keys[i], "Type") ) { if ( !strcmp(hrec->vals[i], "Integer") ) type = BCF_HT_INT; else if ( !strcmp(hrec->vals[i], "Float") ) type = BCF_HT_REAL; else if ( !strcmp(hrec->vals[i], "String") ) type = BCF_HT_STR; else if ( !strcmp(hrec->vals[i], "Character") ) type = BCF_HT_STR; else if ( !strcmp(hrec->vals[i], "Flag") ) type = BCF_HT_FLAG; else { hts_log_warning("The type \"%s\" is not supported, assuming \"String\"", hrec->vals[i]); type = BCF_HT_STR; } } else if ( !strcmp(hrec->keys[i], "Number") ) { if ( !strcmp(hrec->vals[i],"A") ) var = BCF_VL_A; else if ( !strcmp(hrec->vals[i],"R") ) var = BCF_VL_R; else if ( !strcmp(hrec->vals[i],"G") ) var = BCF_VL_G; else if ( !strcmp(hrec->vals[i],".") ) var = BCF_VL_VAR; else { sscanf(hrec->vals[i],"%d",&num); var = BCF_VL_FIXED; } if (var != BCF_VL_FIXED) num = 0xfffff; } } if (hrec->type == BCF_HL_INFO || hrec->type == BCF_HL_FMT) { if (type == -1) { hts_log_warning("%s %s field has no Type defined. Assuming String", *hrec->key == 'I' ? "An" : "A", hrec->key); type = BCF_HT_STR; } if (var == -1) { hts_log_warning("%s %s field has no Number defined. Assuming '.'", *hrec->key == 'I' ? "An" : "A", hrec->key); var = BCF_VL_VAR; } } uint32_t info = ((((uint32_t)num) & 0xfffff)<<12 | (var & 0xf) << 8 | (type & 0xf) << 4 | (((uint32_t) hrec->type) & 0xf)); if ( !id ) return 0; str = strdup(id); if (!str) return -1; vdict_t *d = (vdict_t*)hdr->dict[BCF_DT_ID]; k = kh_get(vdict, d, str); if ( k != kh_end(d) ) { // already present free(str); if ( kh_val(d, k).hrec[info&0xf] ) return 0; kh_val(d, k).info[info&0xf] = info; kh_val(d, k).hrec[info&0xf] = hrec; if ( idx==-1 ) { if (hrec_add_idx(hrec, kh_val(d, k).id) < 0) { return -1; } } return 1; } k = kh_put(vdict, d, str, &ret); if (ret < 0) { free(str); return -1; } kh_val(d, k) = bcf_idinfo_def; kh_val(d, k).info[info&0xf] = info; kh_val(d, k).hrec[info&0xf] = hrec; kh_val(d, k).id = idx; if (bcf_hdr_set_idx(hdr, BCF_DT_ID, kh_key(d,k), &kh_val(d,k)) < 0) { kh_del(vdict, d, k); free(str); return -1; } if ( idx==-1 ) { if (hrec_add_idx(hrec, kh_val(d,k).id) < 0) { return -1; } } return 1; } int bcf_hdr_add_hrec(bcf_hdr_t *hdr, bcf_hrec_t *hrec) { int res; if ( !hrec ) return 0; hrec->type = BCF_HL_GEN; res = bcf_hdr_register_hrec(hdr,hrec); if (res < 0) return -1; if ( !res ) { // If one of the hashed field, then it is already present if ( hrec->type != BCF_HL_GEN ) { bcf_hrec_destroy(hrec); return 0; } // Is one of the generic fields and already present? int i; for (i=0; i<hdr->nhrec; i++) { if ( hdr->hrec[i]->type!=BCF_HL_GEN ) continue; if ( !strcmp(hdr->hrec[i]->key,hrec->key) && !strcmp(hrec->key,"fileformat") ) break; if ( !strcmp(hdr->hrec[i]->key,hrec->key) && !strcmp(hdr->hrec[i]->value,hrec->value) ) break; } if ( i<hdr->nhrec ) { bcf_hrec_destroy(hrec); return 0; } } // New record, needs to be added int n = hdr->nhrec + 1; bcf_hrec_t **new_hrec = realloc(hdr->hrec, n*sizeof(bcf_hrec_t*)); if (!new_hrec) return -1; hdr->hrec = new_hrec; hdr->hrec[hdr->nhrec] = hrec; hdr->dirty = 1; hdr->nhrec = n; return hrec->type==BCF_HL_GEN ? 0 : 1; } /* * Note that while querying of FLT,INFO,FMT,CTG lines is fast (the keys are hashed), * the STR,GEN lines are searched for linearly in a linked list of all header lines. * This may become a problem for VCFs with huge headers, we might need to build a * dictionary for these lines as well. */ bcf_hrec_t *bcf_hdr_get_hrec(const bcf_hdr_t *hdr, int type, const char *key, const char *value, const char *str_class) { int i; if ( type==BCF_HL_GEN ) { for (i=0; i<hdr->nhrec; i++) { if ( hdr->hrec[i]->type!=type ) continue; if ( strcmp(hdr->hrec[i]->key,key) ) continue; if ( !value || !strcmp(hdr->hrec[i]->value,value) ) return hdr->hrec[i]; } return NULL; } else if ( type==BCF_HL_STR ) { for (i=0; i<hdr->nhrec; i++) { if ( hdr->hrec[i]->type!=type ) continue; if ( strcmp(hdr->hrec[i]->key,str_class) ) continue; int j = bcf_hrec_find_key(hdr->hrec[i],key); if ( j>=0 && !strcmp(hdr->hrec[i]->vals[j],value) ) return hdr->hrec[i]; } return NULL; } vdict_t *d = type==BCF_HL_CTG ? (vdict_t*)hdr->dict[BCF_DT_CTG] : (vdict_t*)hdr->dict[BCF_DT_ID]; khint_t k = kh_get(vdict, d, value); if ( k == kh_end(d) ) return NULL; return kh_val(d, k).hrec[type==BCF_HL_CTG?0:type]; } void bcf_hdr_check_sanity(bcf_hdr_t *hdr) { static int PL_warned = 0, GL_warned = 0; if ( !PL_warned ) { int id = bcf_hdr_id2int(hdr, BCF_DT_ID, "PL"); if ( bcf_hdr_idinfo_exists(hdr,BCF_HL_FMT,id) && bcf_hdr_id2length(hdr,BCF_HL_FMT,id)!=BCF_VL_G ) { hts_log_warning("PL should be declared as Number=G"); PL_warned = 1; } } if ( !GL_warned ) { int id = bcf_hdr_id2int(hdr, BCF_DT_ID, "GL"); if ( bcf_hdr_idinfo_exists(hdr,BCF_HL_FMT,id) && bcf_hdr_id2length(hdr,BCF_HL_FMT,id)!=BCF_VL_G ) { hts_log_warning("GL should be declared as Number=G"); GL_warned = 1; } } } int bcf_hdr_parse(bcf_hdr_t *hdr, char *htxt) { int len, done = 0; char *p = htxt; // Check sanity: "fileformat" string must come as first bcf_hrec_t *hrec = bcf_hdr_parse_line(hdr,p,&len); if ( !hrec || !hrec->key || strcasecmp(hrec->key,"fileformat") ) hts_log_warning("The first line should be ##fileformat; is the VCF/BCF header broken?"); if (bcf_hdr_add_hrec(hdr, hrec) < 0) { bcf_hrec_destroy(hrec); return -1; } // The filter PASS must appear first in the dictionary hrec = bcf_hdr_parse_line(hdr,"##FILTER=<ID=PASS,Description=\"All filters passed\">",&len); if (bcf_hdr_add_hrec(hdr, hrec) < 0) { bcf_hrec_destroy(hrec); return -1; } // Parse the whole header do { while (NULL != (hrec = bcf_hdr_parse_line(hdr, p, &len))) { if (bcf_hdr_add_hrec(hdr, hrec) < 0) { bcf_hrec_destroy(hrec); return -1; } p += len; } // Next should be the sample line. If not, it was a malformed // header, in which case print a warning and skip (many VCF // operations do not really care about a few malformed lines). // In the future we may want to add a strict mode that errors in // this case. if ( strncmp("#CHROM\tPOS",p,10) != 0 ) { char *eol = strchr(p, '\n'); if (*p != '\0') { hts_log_warning("Could not parse header line: %.*s", eol ? (int)(eol - p) : INT_MAX, p); } if (eol) { p = eol + 1; // Try from the next line. } else { done = -1; // No more lines left, give up. } } else { done = 1; // Sample line found } } while (!done); if (done < 0) { // No sample line is fatal. hts_log_error("Could not parse the header, sample line not found"); return -1; } if (bcf_hdr_parse_sample_line(hdr,p) < 0) return -1; if (bcf_hdr_sync(hdr) < 0) return -1; bcf_hdr_check_sanity(hdr); return 0; } int bcf_hdr_append(bcf_hdr_t *hdr, const char *line) { int len; bcf_hrec_t *hrec = bcf_hdr_parse_line(hdr, (char*) line, &len); if ( !hrec ) return -1; if (bcf_hdr_add_hrec(hdr, hrec) < 0) return -1; return 0; } void bcf_hdr_remove(bcf_hdr_t *hdr, int type, const char *key) { int i = 0; bcf_hrec_t *hrec; if ( !key ) { while ( i<hdr->nhrec ) { if ( hdr->hrec[i]->type!=type ) { i++; continue; } hrec = hdr->hrec[i]; if ( type==BCF_HL_FLT || type==BCF_HL_INFO || type==BCF_HL_FMT || type== BCF_HL_CTG ) { int j = bcf_hrec_find_key(hdr->hrec[i], "ID"); if ( j>=0 ) { vdict_t *d = type==BCF_HL_CTG ? (vdict_t*)hdr->dict[BCF_DT_CTG] : (vdict_t*)hdr->dict[BCF_DT_ID]; khint_t k = kh_get(vdict, d, hdr->hrec[i]->vals[j]); kh_val(d, k).hrec[type==BCF_HL_CTG?0:type] = NULL; } } hdr->dirty = 1; hdr->nhrec--; if ( i < hdr->nhrec ) memmove(&hdr->hrec[i],&hdr->hrec[i+1],(hdr->nhrec-i)*sizeof(bcf_hrec_t*)); bcf_hrec_destroy(hrec); } return; } while (1) { if ( type==BCF_HL_FLT || type==BCF_HL_INFO || type==BCF_HL_FMT || type== BCF_HL_CTG ) { hrec = bcf_hdr_get_hrec(hdr, type, "ID", key, NULL); if ( !hrec ) return; for (i=0; i<hdr->nhrec; i++) if ( hdr->hrec[i]==hrec ) break; assert( i<hdr->nhrec ); vdict_t *d = type==BCF_HL_CTG ? (vdict_t*)hdr->dict[BCF_DT_CTG] : (vdict_t*)hdr->dict[BCF_DT_ID]; khint_t k = kh_get(vdict, d, key); kh_val(d, k).hrec[type==BCF_HL_CTG?0:type] = NULL; } else { for (i=0; i<hdr->nhrec; i++) { if ( hdr->hrec[i]->type!=type ) continue; if ( type==BCF_HL_GEN ) { if ( !strcmp(hdr->hrec[i]->key,key) ) break; } else { // not all structured lines have ID, we could be more sophisticated as in bcf_hdr_get_hrec() int j = bcf_hrec_find_key(hdr->hrec[i], "ID"); if ( j>=0 && !strcmp(hdr->hrec[i]->vals[j],key) ) break; } } if ( i==hdr->nhrec ) return; hrec = hdr->hrec[i]; } hdr->nhrec--; if ( i < hdr->nhrec ) memmove(&hdr->hrec[i],&hdr->hrec[i+1],(hdr->nhrec-i)*sizeof(bcf_hrec_t*)); bcf_hrec_destroy(hrec); hdr->dirty = 1; } } int bcf_hdr_printf(bcf_hdr_t *hdr, const char *fmt, ...) { char tmp[256], *line = tmp; va_list ap; va_start(ap, fmt); int n = vsnprintf(line, sizeof(tmp), fmt, ap); va_end(ap); if (n >= sizeof(tmp)) { n++; // For trailing NUL line = (char*)malloc(n); if (!line) return -1; va_start(ap, fmt); vsnprintf(line, n, fmt, ap); va_end(ap); } int ret = bcf_hdr_append(hdr, line); if (line != tmp) free(line); return ret; } /********************** *** BCF header I/O *** **********************/ const char *bcf_hdr_get_version(const bcf_hdr_t *hdr) { bcf_hrec_t *hrec = bcf_hdr_get_hrec(hdr, BCF_HL_GEN, "fileformat", NULL, NULL); if ( !hrec ) { hts_log_warning("No version string found, assuming VCFv4.2"); return "VCFv4.2"; } return hrec->value; } int bcf_hdr_set_version(bcf_hdr_t *hdr, const char *version) { bcf_hrec_t *hrec = bcf_hdr_get_hrec(hdr, BCF_HL_GEN, "fileformat", NULL, NULL); if ( !hrec ) { int len; kstring_t str = {0,0,0}; ksprintf(&str,"##fileformat=%s", version); hrec = bcf_hdr_parse_line(hdr, str.s, &len); free(str.s); } else { free(hrec->value); hrec->value = strdup(version); } hdr->dirty = 1; return 0; // FIXME: check for errs in this function (return < 0 if so) } bcf_hdr_t *bcf_hdr_init(const char *mode) { int i; bcf_hdr_t *h; h = (bcf_hdr_t*)calloc(1, sizeof(bcf_hdr_t)); if (!h) return NULL; for (i = 0; i < 3; ++i) if ((h->dict[i] = kh_init(vdict)) == NULL) goto fail; if ( strchr(mode,'w') ) { bcf_hdr_append(h, "##fileformat=VCFv4.2"); // The filter PASS must appear first in the dictionary bcf_hdr_append(h, "##FILTER=<ID=PASS,Description=\"All filters passed\">"); } return h; fail: for (i = 0; i < 3; ++i) kh_destroy(vdict, h->dict[i]); free(h); return NULL; } void bcf_hdr_destroy(bcf_hdr_t *h) { int i; khint_t k; if (!h) return; for (i = 0; i < 3; ++i) { vdict_t *d = (vdict_t*)h->dict[i]; if (d == 0) continue; for (k = kh_begin(d); k != kh_end(d); ++k) if (kh_exist(d, k)) free((char*)kh_key(d, k)); kh_destroy(vdict, d); free(h->id[i]); } for (i=0; i<h->nhrec; i++) bcf_hrec_destroy(h->hrec[i]); if (h->nhrec) free(h->hrec); if (h->samples) free(h->samples); free(h->keep_samples); free(h->transl[0]); free(h->transl[1]); free(h->mem.s); free(h); } bcf_hdr_t *bcf_hdr_read(htsFile *hfp) { if (hfp->format.format == vcf) return vcf_hdr_read(hfp); if (hfp->format.format != bcf) { hts_log_error("Input is not detected as bcf or vcf format"); return NULL; } assert(hfp->is_bgzf); BGZF *fp = hfp->fp.bgzf; uint8_t magic[5]; bcf_hdr_t *h; h = bcf_hdr_init("r"); if (!h) { hts_log_error("Failed to allocate bcf header"); return NULL; } if (bgzf_read(fp, magic, 5) != 5) { hts_log_error("Failed to read the header (reading BCF in text mode?)"); bcf_hdr_destroy(h); return NULL; } if (strncmp((char*)magic, "BCF\2\2", 5) != 0) { if (!strncmp((char*)magic, "BCF", 3)) hts_log_error("Invalid BCF2 magic string: only BCFv2.2 is supported"); else hts_log_error("Invalid BCF2 magic string"); bcf_hdr_destroy(h); return NULL; } uint8_t buf[4]; size_t hlen; char *htxt = NULL; if (bgzf_read(fp, buf, 4) != 4) goto fail; hlen = buf[0] | (buf[1] << 8) | (buf[2] << 16) | ((size_t) buf[3] << 24); if (hlen >= SIZE_MAX) { errno = ENOMEM; goto fail; } htxt = (char*)malloc(hlen + 1); if (!htxt) goto fail; if (bgzf_read(fp, htxt, hlen) != hlen) goto fail; htxt[hlen] = '\0'; // Ensure htxt is terminated if ( bcf_hdr_parse(h, htxt) < 0 ) goto fail; free(htxt); return h; fail: hts_log_error("Failed to read BCF header"); free(htxt); bcf_hdr_destroy(h); return NULL; } int bcf_hdr_write(htsFile *hfp, bcf_hdr_t *h) { if (!h) { errno = EINVAL; return -1; } if ( h->dirty ) { if (bcf_hdr_sync(h) < 0) return -1; } hfp->format.category = variant_data; if (hfp->format.format == vcf || hfp->format.format == text_format) { hfp->format.format = vcf; return vcf_hdr_write(hfp, h); } if (hfp->format.format == binary_format) hfp->format.format = bcf; kstring_t htxt = {0,0,0}; bcf_hdr_format(h, 1, &htxt); kputc('\0', &htxt); // include the \0 byte BGZF *fp = hfp->fp.bgzf; if ( bgzf_write(fp, "BCF\2\2", 5) !=5 ) return -1; uint8_t hlen[4]; u32_to_le(htxt.l, hlen); if ( bgzf_write(fp, hlen, 4) !=4 ) return -1; if ( bgzf_write(fp, htxt.s, htxt.l) != htxt.l ) return -1; free(htxt.s); return 0; } /******************** *** BCF site I/O *** ********************/ bcf1_t *bcf_init() { bcf1_t *v; v = (bcf1_t*)calloc(1, sizeof(bcf1_t)); return v; } void bcf_clear(bcf1_t *v) { int i; for (i=0; i<v->d.m_info; i++) { if ( v->d.info[i].vptr_free ) { free(v->d.info[i].vptr - v->d.info[i].vptr_off); v->d.info[i].vptr_free = 0; } } for (i=0; i<v->d.m_fmt; i++) { if ( v->d.fmt[i].p_free ) { free(v->d.fmt[i].p - v->d.fmt[i].p_off); v->d.fmt[i].p_free = 0; } } v->rid = v->pos = v->rlen = v->unpacked = 0; bcf_float_set_missing(v->qual); v->n_info = v->n_allele = v->n_fmt = v->n_sample = 0; v->shared.l = v->indiv.l = 0; v->d.var_type = -1; v->d.shared_dirty = 0; v->d.indiv_dirty = 0; v->d.n_flt = 0; v->errcode = 0; if (v->d.m_als) v->d.als[0] = 0; if (v->d.m_id) v->d.id[0] = 0; } void bcf_empty(bcf1_t *v) { bcf_clear1(v); free(v->d.id); free(v->d.als); free(v->d.allele); free(v->d.flt); free(v->d.info); free(v->d.fmt); if (v->d.var ) free(v->d.var); free(v->shared.s); free(v->indiv.s); memset(&v->d,0,sizeof(v->d)); memset(&v->shared,0,sizeof(v->shared)); memset(&v->indiv,0,sizeof(v->indiv)); } void bcf_destroy(bcf1_t *v) { if (!v) return; bcf_empty1(v); free(v); } static inline int bcf_read1_core(BGZF *fp, bcf1_t *v) { uint8_t x[32]; ssize_t ret; uint32_t shared_len, indiv_len; if ((ret = bgzf_read(fp, x, 32)) != 32) { if (ret == 0) return -1; return -2; } bcf_clear1(v); shared_len = le_to_u32(x); if (shared_len < 24) return -2; shared_len -= 24; // to exclude six 32-bit integers if (ks_resize(&v->shared, shared_len) != 0) return -2; indiv_len = le_to_u32(x + 4); if (ks_resize(&v->indiv, indiv_len) != 0) return -2; v->rid = le_to_i32(x + 8); v->pos = le_to_u32(x + 12); v->rlen = le_to_i32(x + 16); v->qual = le_to_float(x + 20); v->n_info = le_to_u16(x + 24); v->n_allele = le_to_u16(x + 26); v->n_sample = le_to_u32(x + 28) & 0xffffff; v->n_fmt = x[31]; v->shared.l = shared_len; v->indiv.l = indiv_len; // silent fix of broken BCFs produced by earlier versions of bcf_subset, prior to and including bd6ed8b4 if ( (!v->indiv.l || !v->n_sample) && v->n_fmt ) v->n_fmt = 0; if (bgzf_read(fp, v->shared.s, v->shared.l) != v->shared.l) return -2; if (bgzf_read(fp, v->indiv.s, v->indiv.l) != v->indiv.l) return -2; return 0; } #define bit_array_size(n) ((n)/8+1) #define bit_array_set(a,i) ((a)[(i)/8] |= 1 << ((i)%8)) #define bit_array_clear(a,i) ((a)[(i)/8] &= ~(1 << ((i)%8))) #define bit_array_test(a,i) ((a)[(i)/8] & (1 << ((i)%8))) static int bcf_dec_typed_int1_safe(uint8_t *p, uint8_t *end, uint8_t **q, int32_t *val) { uint32_t t; if (end - p < 2) return -1; t = *p++ & 0xf; /* Use if .. else if ... else instead of switch to force order. Assumption is that small integers are more frequent than big ones. */ if (t == BCF_BT_INT8) { *q = p + 1; *val = *(int8_t *) p; } else if (t == BCF_BT_INT16) { if (end - p < 2) return -1; *q = p + 2; *val = le_to_i16(p); } else if (t == BCF_BT_INT32) { if (end - p < 4) return -1; *q = p + 4; *val = le_to_i32(p); #ifdef VCF_ALLOW_INT64 } else if (t == BCF_BT_INT64) { // This case should never happen because there should be no 64-bit BCFs // at all, definitely not coming from htslib if (end - p < 8) return -1; *q = p + 8; *val = le_to_i64(p); #endif } else { return -1; } return 0; } static int bcf_dec_size_safe(uint8_t *p, uint8_t *end, uint8_t **q, int *num, int *type) { int r; if (p >= end) return -1; *type = *p & 0xf; if (*p>>4 != 15) { *q = p + 1; *num = *p >> 4; return 0; } r = bcf_dec_typed_int1_safe(p + 1, end, q, num); if (r) return r; return *num >= 0 ? 0 : -1; } static const char *get_type_name(int type) { const char *types[9] = { "null", "int (8-bit)", "int (16 bit)", "int (32 bit)", "unknown", "float", "unknown", "char", "unknown" }; int t = (type >= 0 && type < 8) ? type : 8; return types[t]; } static int bcf_record_check(const bcf_hdr_t *hdr, bcf1_t *rec) { uint8_t *ptr, *end; size_t bytes; uint32_t err = 0; int type = 0; int num = 0; int reflen = 0; uint32_t i, reports; const uint32_t is_integer = ((1 << BCF_BT_INT8) | (1 << BCF_BT_INT16) | #ifdef VCF_ALLOW_INT64 (1 << BCF_BT_INT64) | #endif (1 << BCF_BT_INT32)); const uint32_t is_valid_type = (is_integer | (1 << BCF_BT_NULL) | (1 << BCF_BT_FLOAT) | (1 << BCF_BT_CHAR)); int32_t max_id = hdr ? hdr->n[BCF_DT_ID] : 0; // Check for valid contig ID if (rec->rid < 0 || (hdr && (rec->rid >= hdr->n[BCF_DT_CTG] || hdr->id[BCF_DT_CTG][rec->rid].key == NULL))) { hts_log_warning("Bad BCF record at %"PRIhts_pos": Invalid %s id %d", rec->pos+1, "CONTIG", rec->rid); err |= BCF_ERR_CTG_INVALID; } // Check ID ptr = (uint8_t *) rec->shared.s; end = ptr + rec->shared.l; if (bcf_dec_size_safe(ptr, end, &ptr, &num, &type) != 0) goto bad_shared; if (type != BCF_BT_CHAR) { hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s type %d (%s)", bcf_seqname_safe(hdr,rec), rec->pos+1, "ID", type, get_type_name(type)); err |= BCF_ERR_TAG_INVALID; } bytes = (size_t) num << bcf_type_shift[type]; if (end - ptr < bytes) goto bad_shared; ptr += bytes; // Check REF and ALT reports = 0; for (i = 0; i < rec->n_allele; i++) { if (bcf_dec_size_safe(ptr, end, &ptr, &num, &type) != 0) goto bad_shared; if (type != BCF_BT_CHAR) { if (!reports++ || hts_verbose >= HTS_LOG_DEBUG) hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s type %d (%s)", bcf_seqname_safe(hdr,rec), rec->pos+1, "REF/ALT", type, get_type_name(type)); err |= BCF_ERR_CHAR; } if (i == 0) reflen = num; bytes = (size_t) num << bcf_type_shift[type]; if (end - ptr < bytes) goto bad_shared; ptr += bytes; } // Check FILTER reports = 0; if (bcf_dec_size_safe(ptr, end, &ptr, &num, &type) != 0) goto bad_shared; if (num > 0) { bytes = (size_t) num << bcf_type_shift[type]; if (((1 << type) & is_integer) == 0) { hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s type %d (%s)", bcf_seqname_safe(hdr,rec), rec->pos+1, "FILTER", type, get_type_name(type)); err |= BCF_ERR_TAG_INVALID; if (end - ptr < bytes) goto bad_shared; ptr += bytes; } else { if (end - ptr < bytes) goto bad_shared; for (i = 0; i < num; i++) { int32_t key = bcf_dec_int1(ptr, type, &ptr); if (key < 0 || (hdr && (key >= max_id || hdr->id[BCF_DT_ID][key].key == NULL))) { if (!reports++ || hts_verbose >= HTS_LOG_DEBUG) hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s id %d", bcf_seqname_safe(hdr,rec), rec->pos+1, "FILTER", key); err |= BCF_ERR_TAG_UNDEF; } } } } // Check INFO reports = 0; for (i = 0; i < rec->n_info; i++) { int32_t key = -1; if (bcf_dec_typed_int1_safe(ptr, end, &ptr, &key) != 0) goto bad_shared; if (key < 0 || (hdr && (key >= max_id || hdr->id[BCF_DT_ID][key].key == NULL))) { if (!reports++ || hts_verbose >= HTS_LOG_DEBUG) hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s id %d", bcf_seqname_safe(hdr,rec), rec->pos+1, "INFO", key); err |= BCF_ERR_TAG_UNDEF; } if (bcf_dec_size_safe(ptr, end, &ptr, &num, &type) != 0) goto bad_shared; if (((1 << type) & is_valid_type) == 0) { if (!reports++ || hts_verbose >= HTS_LOG_DEBUG) hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s type %d (%s)", bcf_seqname_safe(hdr,rec), rec->pos+1, "INFO", type, get_type_name(type)); err |= BCF_ERR_TAG_INVALID; } bytes = (size_t) num << bcf_type_shift[type]; if (end - ptr < bytes) goto bad_shared; ptr += bytes; } // Check FORMAT and individual information ptr = (uint8_t *) rec->indiv.s; end = ptr + rec->indiv.l; reports = 0; for (i = 0; i < rec->n_fmt; i++) { int32_t key = -1; if (bcf_dec_typed_int1_safe(ptr, end, &ptr, &key) != 0) goto bad_indiv; if (key < 0 || (hdr && (key >= max_id || hdr->id[BCF_DT_ID][key].key == NULL))) { if (!reports++ || hts_verbose >= HTS_LOG_DEBUG) hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s id %d", bcf_seqname_safe(hdr,rec), rec->pos+1, "FORMAT", key); err |= BCF_ERR_TAG_UNDEF; } if (bcf_dec_size_safe(ptr, end, &ptr, &num, &type) != 0) goto bad_indiv; if (((1 << type) & is_valid_type) == 0) { if (!reports++ || hts_verbose >= HTS_LOG_DEBUG) hts_log_warning("Bad BCF record at %s:%"PRIhts_pos": Invalid %s type %d (%s)", bcf_seqname_safe(hdr,rec), rec->pos+1, "FORMAT", type, get_type_name(type)); err |= BCF_ERR_TAG_INVALID; } bytes = ((size_t) num << bcf_type_shift[type]) * rec->n_sample; if (end - ptr < bytes) goto bad_indiv; ptr += bytes; } if (!err && rec->rlen < 0) { // Treat bad rlen as a warning instead of an error, and try to // fix up by using the length of the stored REF allele. static int warned = 0; if (!warned) { hts_log_warning("BCF record at %s:%"PRIhts_pos" has invalid RLEN (%"PRIhts_pos"). " "Only one invalid RLEN will be reported.", bcf_seqname_safe(hdr,rec), rec->pos+1, rec->rlen); warned = 1; } rec->rlen = reflen >= 0 ? reflen : 0; } rec->errcode |= err; return err ? -2 : 0; // Return -2 so bcf_read() reports an error bad_shared: hts_log_error("Bad BCF record at %s:%"PRIhts_pos" - shared section malformed or too short", bcf_seqname_safe(hdr,rec), rec->pos+1); return -2; bad_indiv: hts_log_error("Bad BCF record at %s:%"PRIhts_pos" - individuals section malformed or too short", bcf_seqname_safe(hdr,rec), rec->pos+1); return -2; } static inline uint8_t *bcf_unpack_fmt_core1(uint8_t *ptr, int n_sample, bcf_fmt_t *fmt); int bcf_subset_format(const bcf_hdr_t *hdr, bcf1_t *rec) { if ( !hdr->keep_samples ) return 0; if ( !bcf_hdr_nsamples(hdr) ) { rec->indiv.l = rec->n_sample = 0; return 0; } int i, j; uint8_t *ptr = (uint8_t*)rec->indiv.s, *dst = NULL, *src; bcf_dec_t *dec = &rec->d; hts_expand(bcf_fmt_t, rec->n_fmt, dec->m_fmt, dec->fmt); for (i=0; i<dec->m_fmt; ++i) dec->fmt[i].p_free = 0; for (i=0; i<rec->n_fmt; i++) { ptr = bcf_unpack_fmt_core1(ptr, rec->n_sample, &dec->fmt[i]); src = dec->fmt[i].p - dec->fmt[i].size; if ( dst ) { memmove(dec->fmt[i-1].p + dec->fmt[i-1].p_len, dec->fmt[i].p - dec->fmt[i].p_off, dec->fmt[i].p_off); dec->fmt[i].p = dec->fmt[i-1].p + dec->fmt[i-1].p_len + dec->fmt[i].p_off; } dst = dec->fmt[i].p; for (j=0; j<hdr->nsamples_ori; j++) { src += dec->fmt[i].size; if ( !bit_array_test(hdr->keep_samples,j) ) continue; memmove(dst, src, dec->fmt[i].size); dst += dec->fmt[i].size; } rec->indiv.l -= dec->fmt[i].p_len - (dst - dec->fmt[i].p); dec->fmt[i].p_len = dst - dec->fmt[i].p; } rec->unpacked |= BCF_UN_FMT; rec->n_sample = bcf_hdr_nsamples(hdr); return 0; } int bcf_read(htsFile *fp, const bcf_hdr_t *h, bcf1_t *v) { if (fp->format.format == vcf) return vcf_read(fp,h,v); int ret = bcf_read1_core(fp->fp.bgzf, v); if (ret == 0) ret = bcf_record_check(h, v); if ( ret!=0 || !h->keep_samples ) return ret; return bcf_subset_format(h,v); } int bcf_readrec(BGZF *fp, void *null, void *vv, int *tid, hts_pos_t *beg, hts_pos_t *end) { bcf1_t *v = (bcf1_t *) vv; int ret = bcf_read1_core(fp, v); if (ret == 0) ret = bcf_record_check(NULL, v); if (ret >= 0) *tid = v->rid, *beg = v->pos, *end = v->pos + v->rlen; return ret; } static inline int bcf1_sync_id(bcf1_t *line, kstring_t *str) { // single typed string if ( line->d.id && strcmp(line->d.id, ".") ) { return bcf_enc_vchar(str, strlen(line->d.id), line->d.id); } else { return bcf_enc_size(str, 0, BCF_BT_CHAR); } } static inline int bcf1_sync_alleles(bcf1_t *line, kstring_t *str) { // list of typed strings int i; for (i=0; i<line->n_allele; i++) { if (bcf_enc_vchar(str, strlen(line->d.allele[i]), line->d.allele[i]) < 0) return -1; } if ( !line->rlen && line->n_allele ) line->rlen = strlen(line->d.allele[0]); return 0; } static inline int bcf1_sync_filter(bcf1_t *line, kstring_t *str) { // typed vector of integers if ( line->d.n_flt ) { return bcf_enc_vint(str, line->d.n_flt, line->d.flt, -1); } else { return bcf_enc_vint(str, 0, 0, -1); } } static inline int bcf1_sync_info(bcf1_t *line, kstring_t *str) { // pairs of typed vectors int i, irm = -1, e = 0; for (i=0; i<line->n_info; i++) { bcf_info_t *info = &line->d.info[i]; if ( !info->vptr ) { // marked for removal if ( irm < 0 ) irm = i; continue; } e |= kputsn_(info->vptr - info->vptr_off, info->vptr_len + info->vptr_off, str) < 0; if ( irm >=0 ) { bcf_info_t tmp = line->d.info[irm]; line->d.info[irm] = line->d.info[i]; line->d.info[i] = tmp; while ( irm<=i && line->d.info[irm].vptr ) irm++; } } if ( irm>=0 ) line->n_info = irm; return e == 0 ? 0 : -1; } static int bcf1_sync(bcf1_t *line) { char *shared_ori = line->shared.s; size_t prev_len; kstring_t tmp = {0,0,0}; if ( !line->shared.l ) { // New line created via API, BCF data blocks do not exist. Get it ready for BCF output tmp = line->shared; bcf1_sync_id(line, &tmp); line->unpack_size[0] = tmp.l; prev_len = tmp.l; bcf1_sync_alleles(line, &tmp); line->unpack_size[1] = tmp.l - prev_len; prev_len = tmp.l; bcf1_sync_filter(line, &tmp); line->unpack_size[2] = tmp.l - prev_len; bcf1_sync_info(line, &tmp); line->shared = tmp; } else if ( line->d.shared_dirty ) { // The line was edited, update the BCF data block. if ( !(line->unpacked & BCF_UN_STR) ) bcf_unpack(line,BCF_UN_STR); // ptr_ori points to the original unchanged BCF data. uint8_t *ptr_ori = (uint8_t *) line->shared.s; // ID: single typed string if ( line->d.shared_dirty & BCF1_DIRTY_ID ) bcf1_sync_id(line, &tmp); else kputsn_(ptr_ori, line->unpack_size[0], &tmp); ptr_ori += line->unpack_size[0]; line->unpack_size[0] = tmp.l; prev_len = tmp.l; // REF+ALT: list of typed strings if ( line->d.shared_dirty & BCF1_DIRTY_ALS ) bcf1_sync_alleles(line, &tmp); else { kputsn_(ptr_ori, line->unpack_size[1], &tmp); if ( !line->rlen && line->n_allele ) line->rlen = strlen(line->d.allele[0]); } ptr_ori += line->unpack_size[1]; line->unpack_size[1] = tmp.l - prev_len; prev_len = tmp.l; if ( line->unpacked & BCF_UN_FLT ) { // FILTER: typed vector of integers if ( line->d.shared_dirty & BCF1_DIRTY_FLT ) bcf1_sync_filter(line, &tmp); else if ( line->d.n_flt ) kputsn_(ptr_ori, line->unpack_size[2], &tmp); else bcf_enc_vint(&tmp, 0, 0, -1); ptr_ori += line->unpack_size[2]; line->unpack_size[2] = tmp.l - prev_len; if ( line->unpacked & BCF_UN_INFO ) { // INFO: pairs of typed vectors if ( line->d.shared_dirty & BCF1_DIRTY_INF ) { bcf1_sync_info(line, &tmp); ptr_ori = (uint8_t*)line->shared.s + line->shared.l; } } } int size = line->shared.l - (size_t)ptr_ori + (size_t)line->shared.s; if ( size ) kputsn_(ptr_ori, size, &tmp); free(line->shared.s); line->shared = tmp; } if ( line->shared.s != shared_ori && line->unpacked & BCF_UN_INFO ) { // Reallocated line->shared.s block invalidated line->d.info[].vptr pointers size_t off_new = line->unpack_size[0] + line->unpack_size[1] + line->unpack_size[2]; int i; for (i=0; i<line->n_info; i++) { uint8_t *vptr_free = line->d.info[i].vptr_free ? line->d.info[i].vptr - line->d.info[i].vptr_off : NULL; line->d.info[i].vptr = (uint8_t*) line->shared.s + off_new + line->d.info[i].vptr_off; off_new += line->d.info[i].vptr_len + line->d.info[i].vptr_off; if ( vptr_free ) { free(vptr_free); line->d.info[i].vptr_free = 0; } } } if ( line->n_sample && line->n_fmt && (!line->indiv.l || line->d.indiv_dirty) ) { // The genotype fields changed or are not present tmp.l = tmp.m = 0; tmp.s = NULL; int i, irm = -1; for (i=0; i<line->n_fmt; i++) { bcf_fmt_t *fmt = &line->d.fmt[i]; if ( !fmt->p ) { // marked for removal if ( irm < 0 ) irm = i; continue; } kputsn_(fmt->p - fmt->p_off, fmt->p_len + fmt->p_off, &tmp); if ( irm >=0 ) { bcf_fmt_t tfmt = line->d.fmt[irm]; line->d.fmt[irm] = line->d.fmt[i]; line->d.fmt[i] = tfmt; while ( irm<=i && line->d.fmt[irm].p ) irm++; } } if ( irm>=0 ) line->n_fmt = irm; free(line->indiv.s); line->indiv = tmp; // Reallocated line->indiv.s block invalidated line->d.fmt[].p pointers size_t off_new = 0; for (i=0; i<line->n_fmt; i++) { uint8_t *p_free = line->d.fmt[i].p_free ? line->d.fmt[i].p - line->d.fmt[i].p_off : NULL; line->d.fmt[i].p = (uint8_t*) line->indiv.s + off_new + line->d.fmt[i].p_off; off_new += line->d.fmt[i].p_len + line->d.fmt[i].p_off; if ( p_free ) { free(p_free); line->d.fmt[i].p_free = 0; } } } if ( !line->n_sample ) line->n_fmt = 0; line->d.shared_dirty = line->d.indiv_dirty = 0; return 0; } bcf1_t *bcf_copy(bcf1_t *dst, bcf1_t *src) { bcf1_sync(src); bcf_clear(dst); dst->rid = src->rid; dst->pos = src->pos; dst->rlen = src->rlen; dst->qual = src->qual; dst->n_info = src->n_info; dst->n_allele = src->n_allele; dst->n_fmt = src->n_fmt; dst->n_sample = src->n_sample; if ( dst->shared.m < src->shared.l ) { dst->shared.s = (char*) realloc(dst->shared.s, src->shared.l); dst->shared.m = src->shared.l; } dst->shared.l = src->shared.l; memcpy(dst->shared.s,src->shared.s,dst->shared.l); if ( dst->indiv.m < src->indiv.l ) { dst->indiv.s = (char*) realloc(dst->indiv.s, src->indiv.l); dst->indiv.m = src->indiv.l; } dst->indiv.l = src->indiv.l; memcpy(dst->indiv.s,src->indiv.s,dst->indiv.l); return dst; } bcf1_t *bcf_dup(bcf1_t *src) { bcf1_t *out = bcf_init1(); return bcf_copy(out, src); } int bcf_write(htsFile *hfp, bcf_hdr_t *h, bcf1_t *v) { if ( h->dirty ) { if (bcf_hdr_sync(h) < 0) return -1; } if ( bcf_hdr_nsamples(h)!=v->n_sample ) { hts_log_error("Broken VCF record, the number of columns at %s:%"PRIhts_pos" does not match the number of samples (%d vs %d)", bcf_seqname_safe(h,v), v->pos+1, v->n_sample, bcf_hdr_nsamples(h)); return -1; } if ( hfp->format.format == vcf || hfp->format.format == text_format ) return vcf_write(hfp,h,v); if ( v->errcode ) { // vcf_parse1() encountered a new contig or tag, undeclared in the // header. At this point, the header must have been printed, // proceeding would lead to a broken BCF file. Errors must be checked // and cleared by the caller before we can proceed. hts_log_error("Unchecked error (%d) at %s:%"PRIhts_pos, v->errcode, bcf_seqname_safe(h,v), v->pos+1); return -1; } bcf1_sync(v); // check if the BCF record was modified if ( v->unpacked & BCF_IS_64BIT ) { hts_log_error("Data at %s:%"PRIhts_pos" contains 64-bit values not representable in BCF. Please use VCF instead", bcf_seqname_safe(h,v), v->pos+1); return -1; } BGZF *fp = hfp->fp.bgzf; uint8_t x[32]; u32_to_le(v->shared.l + 24, x); // to include six 32-bit integers u32_to_le(v->indiv.l, x + 4); i32_to_le(v->rid, x + 8); u32_to_le(v->pos, x + 12); u32_to_le(v->rlen, x + 16); float_to_le(v->qual, x + 20); u16_to_le(v->n_info, x + 24); u16_to_le(v->n_allele, x + 26); u32_to_le((uint32_t)v->n_fmt<<24 | (v->n_sample & 0xffffff), x + 28); if ( bgzf_write(fp, x, 32) != 32 ) return -1; if ( bgzf_write(fp, v->shared.s, v->shared.l) != v->shared.l ) return -1; if ( bgzf_write(fp, v->indiv.s, v->indiv.l) != v->indiv.l ) return -1; if (hfp->idx) { if (hts_idx_push(hfp->idx, v->rid, v->pos, v->pos + v->rlen, bgzf_tell(fp), 1) < 0) return -1; } return 0; } /********************** *** VCF header I/O *** **********************/ static int add_missing_contig_hrec(bcf_hdr_t *h, const char *name) { bcf_hrec_t *hrec = calloc(1, sizeof(bcf_hrec_t)); int save_errno; if (!hrec) goto fail; hrec->key = strdup("contig"); if (!hrec->key) goto fail; if (bcf_hrec_add_key(hrec, "ID", strlen("ID")) < 0) goto fail; if (bcf_hrec_set_val(hrec, hrec->nkeys-1, name, strlen(name), 0) < 0) goto fail; if (bcf_hdr_add_hrec(h, hrec) < 0) goto fail; return 0; fail: save_errno = errno; hts_log_error("%s", strerror(errno)); if (hrec) bcf_hrec_destroy(hrec); errno = save_errno; return -1; } bcf_hdr_t *vcf_hdr_read(htsFile *fp) { kstring_t txt, *s = &fp->line; int ret; bcf_hdr_t *h; tbx_t *idx = NULL; const char **names = NULL; h = bcf_hdr_init("r"); if (!h) { hts_log_error("Failed to allocate bcf header"); return NULL; } txt.l = txt.m = 0; txt.s = 0; while ((ret = hts_getline(fp, KS_SEP_LINE, s)) >= 0) { int e = 0; if (s->l == 0) continue; if (s->s[0] != '#') { hts_log_error("No sample line"); goto error; } if (s->s[1] != '#' && fp->fn_aux) { // insert contigs here kstring_t tmp = { 0, 0, NULL }; hFILE *f = hopen(fp->fn_aux, "r"); if (f == NULL) { hts_log_error("Couldn't open \"%s\"", fp->fn_aux); goto error; } while (tmp.l = 0, kgetline(&tmp, (kgets_func *) hgets, f) >= 0) { char *tab = strchr(tmp.s, '\t'); if (tab == NULL) continue; e |= (kputs("##contig=<ID=", &txt) < 0); e |= (kputsn(tmp.s, tab - tmp.s, &txt) < 0); e |= (kputs(",length=", &txt) < 0); e |= (kputl(atol(tab), &txt) < 0); e |= (kputsn(">\n", 2, &txt) < 0); } free(tmp.s); if (hclose(f) != 0) { hts_log_error("Error on closing %s", fp->fn_aux); goto error; } if (e) goto error; } if (kputsn(s->s, s->l, &txt) < 0) goto error; if (kputc('\n', &txt) < 0) goto error; if (s->s[1] != '#') break; } if ( ret < -1 ) goto error; if ( !txt.s ) { hts_log_error("Could not read the header"); goto error; } if ( bcf_hdr_parse(h, txt.s) < 0 ) goto error; // check tabix index, are all contigs listed in the header? add the missing ones idx = tbx_index_load3(fp->fn, NULL, HTS_IDX_SAVE_REMOTE|HTS_IDX_SILENT_FAIL); if ( idx ) { int i, n, need_sync = 0; names = tbx_seqnames(idx, &n); if (!names) goto error; for (i=0; i<n; i++) { bcf_hrec_t *hrec = bcf_hdr_get_hrec(h, BCF_HL_CTG, "ID", (char*) names[i], NULL); if ( hrec ) continue; if (add_missing_contig_hrec(h, names[i]) < 0) goto error; need_sync = 1; } if ( need_sync ) { if (bcf_hdr_sync(h) < 0) goto error; } free(names); tbx_destroy(idx); } free(txt.s); return h; error: if (idx) tbx_destroy(idx); free(names); free(txt.s); if (h) bcf_hdr_destroy(h); return NULL; } int bcf_hdr_set(bcf_hdr_t *hdr, const char *fname) { int i = 0, n = 0, save_errno; char **lines = hts_readlines(fname, &n); if ( !lines ) return 1; for (i=0; i<n-1; i++) { int k; bcf_hrec_t *hrec = bcf_hdr_parse_line(hdr,lines[i],&k); if (!hrec) goto fail; if (bcf_hdr_add_hrec(hdr, hrec) < 0) { bcf_hrec_destroy(hrec); goto fail; } free(lines[i]); lines[i] = NULL; } if (bcf_hdr_parse_sample_line(hdr, lines[n-1]) < 0) goto fail; if (bcf_hdr_sync(hdr) < 0) goto fail; free(lines[n-1]); free(lines); return 0; fail: save_errno = errno; for (; i < n; i++) free(lines[i]); free(lines); errno = save_errno; return 1; } static int _bcf_hrec_format(const bcf_hrec_t *hrec, int is_bcf, kstring_t *str) { uint32_t e = 0; if ( !hrec->value ) { int j, nout = 0; e |= ksprintf(str, "##%s=<", hrec->key) < 0; for (j=0; j<hrec->nkeys; j++) { // do not output IDX if output is VCF if ( !is_bcf && !strcmp("IDX",hrec->keys[j]) ) continue; if ( nout ) e |= kputc(',',str) < 0; e |= ksprintf(str,"%s=%s", hrec->keys[j], hrec->vals[j]) < 0; nout++; } e |= ksprintf(str,">\n") < 0; } else e |= ksprintf(str,"##%s=%s\n", hrec->key,hrec->value) < 0; return e == 0 ? 0 : -1; } int bcf_hrec_format(const bcf_hrec_t *hrec, kstring_t *str) { return _bcf_hrec_format(hrec,0,str); } int bcf_hdr_format(const bcf_hdr_t *hdr, int is_bcf, kstring_t *str) { int i; for (i=0; i<hdr->nhrec; i++) _bcf_hrec_format(hdr->hrec[i], is_bcf, str); ksprintf(str, "#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO"); if ( bcf_hdr_nsamples(hdr) ) { ksprintf(str, "\tFORMAT"); for (i=0; i<bcf_hdr_nsamples(hdr); i++) ksprintf(str, "\t%s", hdr->samples[i]); } ksprintf(str, "\n"); return 0; } char *bcf_hdr_fmt_text(const bcf_hdr_t *hdr, int is_bcf, int *len) { kstring_t txt = {0,0,0}; bcf_hdr_format(hdr, is_bcf, &txt); if ( len ) *len = txt.l; return txt.s; } const char **bcf_hdr_seqnames(const bcf_hdr_t *h, int *n) { vdict_t *d = (vdict_t*)h->dict[BCF_DT_CTG]; int tid, m = kh_size(d); const char **names = (const char**) calloc(m,sizeof(const char*)); khint_t k; for (k=kh_begin(d); k<kh_end(d); k++) { if ( !kh_exist(d,k) ) continue; tid = kh_val(d,k).id; assert( tid<m ); names[tid] = kh_key(d,k); } // sanity check: there should be no gaps for (tid=0; tid<m; tid++) assert(names[tid]); *n = m; return names; } int vcf_hdr_write(htsFile *fp, const bcf_hdr_t *h) { kstring_t htxt = {0,0,0}; bcf_hdr_format(h, 0, &htxt); while (htxt.l && htxt.s[htxt.l-1] == '\0') --htxt.l; // kill trailing zeros int ret; if ( fp->format.compression!=no_compression ) ret = bgzf_write(fp->fp.bgzf, htxt.s, htxt.l); else ret = hwrite(fp->fp.hfile, htxt.s, htxt.l); free(htxt.s); return ret<0 ? -1 : 0; } /*********************** *** Typed value I/O *** ***********************/ int bcf_enc_vint(kstring_t *s, int n, int32_t *a, int wsize) { int32_t max = INT32_MIN, min = INT32_MAX; int i; if (n <= 0) bcf_enc_size(s, 0, BCF_BT_NULL); else if (n == 1) bcf_enc_int1(s, a[0]); else { if (wsize <= 0) wsize = n; for (i = 0; i < n; ++i) { if (a[i] == bcf_int32_missing || a[i] == bcf_int32_vector_end ) continue; if (max < a[i]) max = a[i]; if (min > a[i]) min = a[i]; } if (max <= BCF_MAX_BT_INT8 && min >= BCF_MIN_BT_INT8) { bcf_enc_size(s, wsize, BCF_BT_INT8); for (i = 0; i < n; ++i) if ( a[i]==bcf_int32_vector_end ) kputc(bcf_int8_vector_end, s); else if ( a[i]==bcf_int32_missing ) kputc(bcf_int8_missing, s); else kputc(a[i], s); } else if (max <= BCF_MAX_BT_INT16 && min >= BCF_MIN_BT_INT16) { uint8_t *p; bcf_enc_size(s, wsize, BCF_BT_INT16); ks_resize(s, s->l + n * sizeof(int16_t)); p = (uint8_t *) s->s + s->l; for (i = 0; i < n; ++i) { int16_t x; if ( a[i]==bcf_int32_vector_end ) x = bcf_int16_vector_end; else if ( a[i]==bcf_int32_missing ) x = bcf_int16_missing; else x = a[i]; i16_to_le(x, p); p += sizeof(int16_t); } s->l += n * sizeof(int16_t); } else { uint8_t *p; bcf_enc_size(s, wsize, BCF_BT_INT32); ks_resize(s, s->l + n * sizeof(int32_t)); p = (uint8_t *) s->s + s->l; for (i = 0; i < n; ++i) { i32_to_le(a[i], p); p += sizeof(int32_t); } s->l += n * sizeof(int32_t); } } return 0; // FIXME: check for errs in this function } #ifdef VCF_ALLOW_INT64 static int bcf_enc_long1(kstring_t *s, int64_t x) { uint32_t e = 0; if (x <= BCF_MAX_BT_INT32 && x >= BCF_MIN_BT_INT32) return bcf_enc_int1(s, x); if (x == bcf_int64_vector_end) { e |= bcf_enc_size(s, 1, BCF_BT_INT8); e |= kputc(bcf_int8_vector_end, s) < 0; } else if (x == bcf_int64_missing) { e |= bcf_enc_size(s, 1, BCF_BT_INT8); e |= kputc(bcf_int8_missing, s) < 0; } else { e |= bcf_enc_size(s, 1, BCF_BT_INT64); e |= ks_expand(s, 8); if (e == 0) { u64_to_le(x, (uint8_t *) s->s + s->l); s->l += 8; } } return e == 0 ? 0 : -1; } #endif static inline int serialize_float_array(kstring_t *s, size_t n, const float *a) { uint8_t *p; size_t i; size_t bytes = n * sizeof(float); if (bytes / sizeof(float) != n) return -1; if (ks_resize(s, s->l + bytes) < 0) return -1; p = (uint8_t *) s->s + s->l; for (i = 0; i < n; i++) { float_to_le(a[i], p); p += sizeof(float); } s->l += bytes; return 0; } int bcf_enc_vfloat(kstring_t *s, int n, float *a) { assert(n >= 0); bcf_enc_size(s, n, BCF_BT_FLOAT); serialize_float_array(s, n, a); return 0; // FIXME: check for errs in this function } int bcf_enc_vchar(kstring_t *s, int l, const char *a) { bcf_enc_size(s, l, BCF_BT_CHAR); kputsn(a, l, s); return 0; // FIXME: check for errs in this function } int bcf_fmt_array(kstring_t *s, int n, int type, void *data) { int j = 0; uint32_t e = 0; if (n == 0) { return kputc('.', s) >= 0 ? 0 : -1; } if (type == BCF_BT_CHAR) { char *p = (char*)data; for (j = 0; j < n && *p; ++j, ++p) { if ( *p==bcf_str_missing ) e |= kputc('.', s) < 0; else e |= kputc(*p, s) < 0; } } else { #define BRANCH(type_t, convert, is_missing, is_vector_end, kprint) { \ uint8_t *p = (uint8_t *) data; \ for (j=0; j<n; j++, p += sizeof(type_t)) \ { \ type_t v = convert(p); \ if ( is_vector_end ) break; \ if ( j ) kputc(',', s); \ if ( is_missing ) kputc('.', s); \ else e |= kprint < 0; \ } \ } switch (type) { case BCF_BT_INT8: BRANCH(int8_t, le_to_i8, v==bcf_int8_missing, v==bcf_int8_vector_end, kputw(v, s)); break; case BCF_BT_INT16: BRANCH(int16_t, le_to_i16, v==bcf_int16_missing, v==bcf_int16_vector_end, kputw(v, s)); break; case BCF_BT_INT32: BRANCH(int32_t, le_to_i32, v==bcf_int32_missing, v==bcf_int32_vector_end, kputw(v, s)); break; case BCF_BT_FLOAT: BRANCH(uint32_t, le_to_u32, v==bcf_float_missing, v==bcf_float_vector_end, kputd(le_to_float(p), s)); break; default: hts_log_error("Unexpected type %d", type); exit(1); break; } #undef BRANCH } return e == 0 ? 0 : -1; } uint8_t *bcf_fmt_sized_array(kstring_t *s, uint8_t *ptr) { int x, type; x = bcf_dec_size(ptr, &ptr, &type); bcf_fmt_array(s, x, type, ptr); return ptr + (x << bcf_type_shift[type]); } /******************** *** VCF site I/O *** ********************/ typedef struct { int key, max_m, size, offset; uint32_t is_gt:1, max_g:31; uint32_t max_l; uint32_t y; uint8_t *buf; } fmt_aux_t; static inline int align_mem(kstring_t *s) { int e = 0; if (s->l&7) { uint64_t zero = 0; e = kputsn((char*)&zero, 8 - (s->l&7), s) < 0; } return e == 0 ? 0 : -1; } // p,q is the start and the end of the FORMAT field #define MAX_N_FMT 255 /* Limited by size of bcf1_t n_fmt field */ static int vcf_parse_format(kstring_t *s, const bcf_hdr_t *h, bcf1_t *v, char *p, char *q) { if ( !bcf_hdr_nsamples(h) ) return 0; static int extreme_val_warned = 0; char *r, *t; int j, l, m, g, overflow = 0; khint_t k; ks_tokaux_t aux1; vdict_t *d = (vdict_t*)h->dict[BCF_DT_ID]; kstring_t *mem = (kstring_t*)&h->mem; fmt_aux_t fmt[MAX_N_FMT]; mem->l = 0; char *end = s->s + s->l; if ( q>=end ) { hts_log_error("FORMAT column with no sample columns starting at %s:%"PRIhts_pos"", bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_NCOLS; return -1; } v->n_fmt = 0; if ( p[0]=='.' && p[1]==0 ) // FORMAT field is empty "." { v->n_sample = bcf_hdr_nsamples(h); return 0; } // get format information from the dictionary for (j = 0, t = kstrtok(p, ":", &aux1); t; t = kstrtok(0, 0, &aux1), ++j) { if (j >= MAX_N_FMT) { v->errcode |= BCF_ERR_LIMITS; hts_log_error("FORMAT column at %s:%"PRIhts_pos" lists more identifiers than htslib can handle", bcf_seqname_safe(h,v), v->pos+1); return -1; } *(char*)aux1.p = 0; k = kh_get(vdict, d, t); if (k == kh_end(d) || kh_val(d, k).info[BCF_HL_FMT] == 15) { if ( t[0]=='.' && t[1]==0 ) { hts_log_error("Invalid FORMAT tag name '.' at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_TAG_INVALID; return -1; } hts_log_warning("FORMAT '%s' at %s:%"PRIhts_pos" is not defined in the header, assuming Type=String", t, bcf_seqname_safe(h,v), v->pos+1); kstring_t tmp = {0,0,0}; int l; ksprintf(&tmp, "##FORMAT=<ID=%s,Number=1,Type=String,Description=\"Dummy\">", t); bcf_hrec_t *hrec = bcf_hdr_parse_line(h,tmp.s,&l); free(tmp.s); int res = hrec ? bcf_hdr_add_hrec((bcf_hdr_t*)h, hrec) : -1; if (res < 0) bcf_hrec_destroy(hrec); if (res > 0) res = bcf_hdr_sync((bcf_hdr_t*)h); k = kh_get(vdict, d, t); v->errcode = BCF_ERR_TAG_UNDEF; if (res || k == kh_end(d)) { hts_log_error("Could not add dummy header for FORMAT '%s' at %s:%"PRIhts_pos, t, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_TAG_INVALID; return -1; } } fmt[j].max_l = fmt[j].max_m = fmt[j].max_g = 0; fmt[j].key = kh_val(d, k).id; fmt[j].is_gt = !strcmp(t, "GT"); fmt[j].y = h->id[0][fmt[j].key].val->info[BCF_HL_FMT]; v->n_fmt++; } // compute max int n_sample_ori = -1; r = q + 1; // r: position in the format string l = 0, m = g = 1, v->n_sample = 0; // m: max vector size, l: max field len, g: max number of alleles while ( r<end ) { // can we skip some samples? if ( h->keep_samples ) { n_sample_ori++; if ( !bit_array_test(h->keep_samples,n_sample_ori) ) { while ( *r!='\t' && r<end ) r++; if ( *r=='\t' ) { *r = 0; r++; } continue; } } // collect fmt stats: max vector size, length, number of alleles j = 0; // j-th format field fmt_aux_t *f = fmt; for (;;) { switch (*r) { case ',': m++; break; case '|': case '/': if (f->is_gt) g++; break; case '\t': *r = 0; // fall through case '\0': case ':': if (f->max_m < m) f->max_m = m; if (f->max_l < l) f->max_l = l; if (f->is_gt && f->max_g < g) f->max_g = g; l = 0, m = g = 1; if ( *r==':' ) { j++; f++; if ( j>=v->n_fmt ) { hts_log_error("Incorrect number of FORMAT fields at %s:%"PRIhts_pos"", h->id[BCF_DT_CTG][v->rid].key, v->pos+1); v->errcode |= BCF_ERR_NCOLS; return -1; } } else goto end_for; break; } if ( r>=end ) break; r++; l++; } end_for: v->n_sample++; if ( v->n_sample == bcf_hdr_nsamples(h) ) break; r++; } // allocate memory for arrays for (j = 0; j < v->n_fmt; ++j) { fmt_aux_t *f = &fmt[j]; if ( !f->max_m ) f->max_m = 1; // omitted trailing format field if ((f->y>>4&0xf) == BCF_HT_STR) { f->size = f->is_gt? f->max_g << 2 : f->max_l; } else if ((f->y>>4&0xf) == BCF_HT_REAL || (f->y>>4&0xf) == BCF_HT_INT) { f->size = f->max_m << 2; } else { hts_log_error("The format type %d at %s:%"PRIhts_pos" is currently not supported", f->y>>4&0xf, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_TAG_INVALID; return -1; } if (align_mem(mem) < 0) { hts_log_error("Memory allocation failure at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; return -1; } // Limit the total memory to ~2Gb per VCF row. This should mean // malformed VCF data is less likely to take excessive memory and/or // time. if ((uint64_t) mem->l + v->n_sample * (uint64_t)f->size > INT_MAX) { hts_log_error("Excessive memory required by FORMAT fields at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; return -1; } f->offset = mem->l; if (ks_resize(mem, mem->l + v->n_sample * (size_t)f->size) < 0) { hts_log_error("Memory allocation failure at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; return -1; } mem->l += v->n_sample * f->size; } for (j = 0; j < v->n_fmt; ++j) fmt[j].buf = (uint8_t*)mem->s + fmt[j].offset; // fill the sample fields; at beginning of the loop, t points to the first char of a format n_sample_ori = -1; t = q + 1; m = 0; // m: sample id while ( t<end ) { // can we skip some samples? if ( h->keep_samples ) { n_sample_ori++; if ( !bit_array_test(h->keep_samples,n_sample_ori) ) { while ( *t && t<end ) t++; t++; continue; } } if ( m == bcf_hdr_nsamples(h) ) break; j = 0; // j-th format field, m-th sample while ( t < end ) { fmt_aux_t *z = &fmt[j++]; if (!z->buf) { hts_log_error("Memory allocation failure for FORMAT field type %d at %s:%"PRIhts_pos, z->y>>4&0xf, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; return -1; } if ((z->y>>4&0xf) == BCF_HT_STR) { if (z->is_gt) { // genotypes int32_t is_phased = 0; uint32_t *x = (uint32_t*)(z->buf + z->size * (size_t)m); uint32_t unreadable = 0; uint32_t max = 0; overflow = 0; for (l = 0;; ++t) { if (*t == '.') { ++t, x[l++] = is_phased; } else { char *tt = t; uint32_t val = hts_str2uint(t, &t, sizeof(val) * CHAR_MAX - 2, &overflow); unreadable |= tt == t; if (max < val) max = val; x[l++] = (val + 1) << 1 | is_phased; } is_phased = (*t == '|'); if (*t != '|' && *t != '/') break; } // Possibly check max against v->n_allele instead? if (overflow || max > (INT32_MAX >> 1) - 1) { hts_log_error("Couldn't read GT data: value too large at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); return -1; } if (unreadable) { hts_log_error("Couldn't read GT data: value not a number or '.' at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); return -1; } if ( !l ) x[l++] = 0; // An empty field, insert missing value for (; l < z->size>>2; ++l) x[l] = bcf_int32_vector_end; } else { char *x = (char*)z->buf + z->size * (size_t)m; for (r = t, l = 0; *t != ':' && *t; ++t) x[l++] = *t; for (; l < z->size; ++l) x[l] = 0; } } else if ((z->y>>4&0xf) == BCF_HT_INT) { int32_t *x = (int32_t*)(z->buf + z->size * (size_t)m); for (l = 0;; ++t) { if (*t == '.') { x[l++] = bcf_int32_missing, ++t; // ++t to skip "." } else { overflow = 0; char *te; long int tmp_val = hts_str2int(t, &te, sizeof(tmp_val)*CHAR_BIT, &overflow); if ( te==t || overflow || tmp_val<BCF_MIN_BT_INT32 || tmp_val>BCF_MAX_BT_INT32 ) { if ( !extreme_val_warned ) { hts_log_warning("Extreme FORMAT/%s value encountered and set to missing at %s:%"PRIhts_pos, h->id[BCF_DT_ID][fmt[j-1].key].key, bcf_seqname_safe(h,v), v->pos+1); extreme_val_warned = 1; } tmp_val = bcf_int32_missing; } x[l++] = tmp_val; t = te; } if (*t != ',') break; } if ( !l ) x[l++] = bcf_int32_missing; for (; l < z->size>>2; ++l) x[l] = bcf_int32_vector_end; } else if ((z->y>>4&0xf) == BCF_HT_REAL) { float *x = (float*)(z->buf + z->size * (size_t)m); for (l = 0;; ++t) { if (*t == '.' && !isdigit_c(t[1])) { bcf_float_set_missing(x[l++]), ++t; // ++t to skip "." } else { overflow = 0; char *te; float tmp_val = hts_str2dbl(t, &te, &overflow); if ( (te==t || overflow) && !extreme_val_warned ) { hts_log_warning("Extreme FORMAT/%s value encountered at %s:%"PRIhts_pos, h->id[BCF_DT_ID][fmt[j-1].key].key, bcf_seqname(h,v), v->pos+1); extreme_val_warned = 1; } x[l++] = tmp_val; t = te; } if (*t != ',') break; } if ( !l ) bcf_float_set_missing(x[l++]); // An empty field, insert missing value for (; l < z->size>>2; ++l) bcf_float_set_vector_end(x[l]); } else { hts_log_error("Unknown FORMAT field type %d at %s:%"PRIhts_pos, z->y>>4&0xf, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_TAG_INVALID; return -1; } if (*t == '\0') { break; } else if (*t == ':') { t++; } else { char buffer[8]; hts_log_error("Invalid character %s in '%s' FORMAT field at %s:%"PRIhts_pos"", hts_strprint(buffer, sizeof buffer, '\'', t, 1), h->id[BCF_DT_ID][z->key].key, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_CHAR; return -1; } } for (; j < v->n_fmt; ++j) { // fill end-of-vector values fmt_aux_t *z = &fmt[j]; if ((z->y>>4&0xf) == BCF_HT_STR) { if (z->is_gt) { int32_t *x = (int32_t*)(z->buf + z->size * (size_t)m); if (z->size) x[0] = bcf_int32_missing; for (l = 1; l < z->size>>2; ++l) x[l] = bcf_int32_vector_end; } else { char *x = (char*)z->buf + z->size * (size_t)m; if ( z->size ) x[0] = '.'; for (l = 1; l < z->size; ++l) x[l] = 0; } } else if ((z->y>>4&0xf) == BCF_HT_INT) { int32_t *x = (int32_t*)(z->buf + z->size * (size_t)m); x[0] = bcf_int32_missing; for (l = 1; l < z->size>>2; ++l) x[l] = bcf_int32_vector_end; } else if ((z->y>>4&0xf) == BCF_HT_REAL) { float *x = (float*)(z->buf + z->size * (size_t)m); bcf_float_set_missing(x[0]); for (l = 1; l < z->size>>2; ++l) bcf_float_set_vector_end(x[l]); } } m++; t++; } // write individual genotype information kstring_t *str = &v->indiv; int i; if (v->n_sample > 0) { for (i = 0; i < v->n_fmt; ++i) { fmt_aux_t *z = &fmt[i]; bcf_enc_int1(str, z->key); if ((z->y>>4&0xf) == BCF_HT_STR && !z->is_gt) { bcf_enc_size(str, z->size, BCF_BT_CHAR); kputsn((char*)z->buf, z->size * (size_t)v->n_sample, str); } else if ((z->y>>4&0xf) == BCF_HT_INT || z->is_gt) { bcf_enc_vint(str, (z->size>>2) * v->n_sample, (int32_t*)z->buf, z->size>>2); } else { bcf_enc_size(str, z->size>>2, BCF_BT_FLOAT); if (serialize_float_array(str, (z->size>>2) * (size_t)v->n_sample, (float *) z->buf) != 0) { v->errcode |= BCF_ERR_LIMITS; hts_log_error("Out of memory at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); return -1; } } } } if ( v->n_sample!=bcf_hdr_nsamples(h) ) { hts_log_error("Number of columns at %s:%"PRIhts_pos" does not match the number of samples (%d vs %d)", bcf_seqname_safe(h,v), v->pos+1, v->n_sample, bcf_hdr_nsamples(h)); v->errcode |= BCF_ERR_NCOLS; return -1; } if ( v->indiv.l > 0xffffffff ) { hts_log_error("The FORMAT at %s:%"PRIhts_pos" is too long", bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; // Error recovery: return -1 if this is a critical error or 0 if we want to ignore the FORMAT and proceed v->n_fmt = 0; return -1; } return 0; } static khint_t fix_chromosome(const bcf_hdr_t *h, vdict_t *d, const char *p) { // Simple error recovery for chromosomes not defined in the header. It will not help when VCF header has // been already printed, but will enable tools like vcfcheck to proceed. kstring_t tmp = {0,0,0}; khint_t k; int l; ksprintf(&tmp, "##contig=<ID=%s>", p); bcf_hrec_t *hrec = bcf_hdr_parse_line(h,tmp.s,&l); free(tmp.s); int res = hrec ? bcf_hdr_add_hrec((bcf_hdr_t*)h, hrec) : -1; if (res < 0) bcf_hrec_destroy(hrec); if (res > 0) res = bcf_hdr_sync((bcf_hdr_t*)h); k = kh_get(vdict, d, p); return k; } static int vcf_parse_filter(kstring_t *str, const bcf_hdr_t *h, bcf1_t *v, char *p, char *q) { int i, n_flt = 1, max_n_flt = 0; char *r, *t; int32_t *a_flt = NULL; ks_tokaux_t aux1; khint_t k; vdict_t *d = (vdict_t*)h->dict[BCF_DT_ID]; // count the number of filters if (*(q-1) == ';') *(q-1) = 0; for (r = p; *r; ++r) if (*r == ';') ++n_flt; if (n_flt > max_n_flt) { a_flt = malloc(n_flt * sizeof(*a_flt)); if (!a_flt) { hts_log_error("Could not allocate memory at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; // No appropriate code? return -1; } max_n_flt = n_flt; } // add filters for (t = kstrtok(p, ";", &aux1), i = 0; t; t = kstrtok(0, 0, &aux1)) { *(char*)aux1.p = 0; k = kh_get(vdict, d, t); if (k == kh_end(d)) { // Simple error recovery for FILTERs not defined in the header. It will not help when VCF header has // been already printed, but will enable tools like vcfcheck to proceed. hts_log_warning("FILTER '%s' is not defined in the header", t); kstring_t tmp = {0,0,0}; int l; ksprintf(&tmp, "##FILTER=<ID=%s,Description=\"Dummy\">", t); bcf_hrec_t *hrec = bcf_hdr_parse_line(h,tmp.s,&l); free(tmp.s); int res = hrec ? bcf_hdr_add_hrec((bcf_hdr_t*)h, hrec) : -1; if (res < 0) bcf_hrec_destroy(hrec); if (res > 0) res = bcf_hdr_sync((bcf_hdr_t*)h); k = kh_get(vdict, d, t); v->errcode |= BCF_ERR_TAG_UNDEF; if (res || k == kh_end(d)) { hts_log_error("Could not add dummy header for FILTER '%s' at %s:%"PRIhts_pos, t, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_TAG_INVALID; free(a_flt); return -1; } } a_flt[i++] = kh_val(d, k).id; } bcf_enc_vint(str, n_flt, a_flt, -1); free(a_flt); return 0; } static int vcf_parse_info(kstring_t *str, const bcf_hdr_t *h, bcf1_t *v, char *p, char *q) { static int extreme_int_warned = 0, negative_rlen_warned = 0; int max_n_val = 0, overflow = 0; char *r, *key; khint_t k; vdict_t *d = (vdict_t*)h->dict[BCF_DT_ID]; int32_t *a_val = NULL; v->n_info = 0; if (*(q-1) == ';') *(q-1) = 0; for (r = key = p;; ++r) { int c; char *val, *end; if (*r != ';' && *r != '=' && *r != 0) continue; if (v->n_info == UINT16_MAX) { hts_log_error("Too many INFO entries at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; return -1; } val = end = 0; c = *r; *r = 0; if (c == '=') { val = r + 1; for (end = val; *end != ';' && *end != 0; ++end); c = *end; *end = 0; } else end = r; if ( !*key ) { if (c==0) break; r = end; key = r + 1; continue; } // faulty VCF, ";;" in the INFO k = kh_get(vdict, d, key); if (k == kh_end(d) || kh_val(d, k).info[BCF_HL_INFO] == 15) { hts_log_warning("INFO '%s' is not defined in the header, assuming Type=String", key); kstring_t tmp = {0,0,0}; int l; ksprintf(&tmp, "##INFO=<ID=%s,Number=1,Type=String,Description=\"Dummy\">", key); bcf_hrec_t *hrec = bcf_hdr_parse_line(h,tmp.s,&l); free(tmp.s); int res = hrec ? bcf_hdr_add_hrec((bcf_hdr_t*)h, hrec) : -1; if (res < 0) bcf_hrec_destroy(hrec); if (res > 0) res = bcf_hdr_sync((bcf_hdr_t*)h); k = kh_get(vdict, d, key); v->errcode = BCF_ERR_TAG_UNDEF; if (res || k == kh_end(d)) { hts_log_error("Could not add dummy header for INFO '%s' at %s:%"PRIhts_pos, key, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_TAG_INVALID; return -1; } } uint32_t y = kh_val(d, k).info[BCF_HL_INFO]; ++v->n_info; bcf_enc_int1(str, kh_val(d, k).id); if (val == 0) { bcf_enc_size(str, 0, BCF_BT_NULL); } else if ((y>>4&0xf) == BCF_HT_FLAG || (y>>4&0xf) == BCF_HT_STR) { // if Flag has a value, treat it as a string bcf_enc_vchar(str, end - val, val); } else { // int/float value/array int i, n_val; char *t, *te; for (t = val, n_val = 1; *t; ++t) // count the number of values if (*t == ',') ++n_val; // Check both int and float size in one step for simplicity if (n_val > max_n_val) { int32_t *a_tmp = (int32_t *)realloc(a_val, n_val * sizeof(*a_val)); if (!a_tmp) { hts_log_error("Could not allocate memory at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; // No appropriate code? return -1; } a_val = a_tmp; max_n_val = n_val; } if ((y>>4&0xf) == BCF_HT_INT) { i = 0, t = val; int64_t val1; int is_int64 = 0; #ifdef VCF_ALLOW_INT64 if ( n_val==1 ) { overflow = 0; long long int tmp_val = hts_str2int(val, &te, sizeof(tmp_val)*CHAR_BIT, &overflow); if ( te==val ) tmp_val = bcf_int32_missing; else if ( overflow || tmp_val<BCF_MIN_BT_INT64 || tmp_val>BCF_MAX_BT_INT64 ) { if ( !extreme_int_warned ) { hts_log_warning("Extreme INFO/%s value encountered and set to missing at %s:%"PRIhts_pos,key,bcf_seqname_safe(h,v), v->pos+1); extreme_int_warned = 1; } tmp_val = bcf_int32_missing; } else is_int64 = 1; val1 = tmp_val; t = te; i = 1; // this is just to avoid adding another nested block... } #endif for (; i < n_val; ++i, ++t) { overflow = 0; long int tmp_val = hts_str2int(t, &te, sizeof(tmp_val)*CHAR_BIT, &overflow); if ( te==t ) tmp_val = bcf_int32_missing; else if ( overflow || tmp_val<BCF_MIN_BT_INT32 || tmp_val>BCF_MAX_BT_INT32 ) { if ( !extreme_int_warned ) { hts_log_warning("Extreme INFO/%s value encountered and set to missing at %s:%"PRIhts_pos,key,bcf_seqname_safe(h,v), v->pos+1); extreme_int_warned = 1; } tmp_val = bcf_int32_missing; } a_val[i] = tmp_val; for (t = te; *t && *t != ','; t++); } if (n_val == 1) { #ifdef VCF_ALLOW_INT64 if ( is_int64 ) { v->unpacked |= BCF_IS_64BIT; bcf_enc_long1(str, val1); } else bcf_enc_int1(str, (int32_t)val1); #else val1 = a_val[0]; bcf_enc_int1(str, (int32_t)val1); #endif } else { bcf_enc_vint(str, n_val, a_val, -1); } if (n_val==1 && (val1!=bcf_int32_missing || is_int64) && strcmp(key, "END") == 0) { if ( val1 <= v->pos ) { if ( !negative_rlen_warned ) { hts_log_warning("INFO/END=%"PRIhts_pos" is smaller than POS at %s:%"PRIhts_pos,val1,bcf_seqname_safe(h,v),v->pos+1); negative_rlen_warned = 1; } } else v->rlen = val1 - v->pos; } } else if ((y>>4&0xf) == BCF_HT_REAL) { float *val_f = (float *)a_val; for (i = 0, t = val; i < n_val; ++i, ++t) { overflow = 0; val_f[i] = hts_str2dbl(t, &te, &overflow); if ( te==t || overflow ) // conversion failed bcf_float_set_missing(val_f[i]); for (t = te; *t && *t != ','; t++); } bcf_enc_vfloat(str, n_val, val_f); } } if (c == 0) break; r = end; key = r + 1; } free(a_val); return 0; } int vcf_parse(kstring_t *s, const bcf_hdr_t *h, bcf1_t *v) { int i = 0, ret = -2, overflow = 0; char *p, *q, *r, *t; kstring_t *str; khint_t k; ks_tokaux_t aux; if (!s || !h || !v || !(s->s)) return ret; // Assumed in lots of places, but we may as well spot this early assert(sizeof(float) == sizeof(int32_t)); bcf_clear1(v); str = &v->shared; memset(&aux, 0, sizeof(ks_tokaux_t)); for (p = kstrtok(s->s, "\t", &aux), i = 0; p; p = kstrtok(0, 0, &aux), ++i) { q = (char*)aux.p; *q = 0; if (i == 0) { // CHROM vdict_t *d = (vdict_t*)h->dict[BCF_DT_CTG]; k = kh_get(vdict, d, p); if (k == kh_end(d)) { hts_log_warning("Contig '%s' is not defined in the header. (Quick workaround: index the file with tabix.)", p); v->errcode = BCF_ERR_CTG_UNDEF; if ((k = fix_chromosome(h, d, p)) == kh_end(d)) { hts_log_error("Could not add dummy header for contig '%s'", p); v->errcode |= BCF_ERR_CTG_INVALID; goto err; } } v->rid = kh_val(d, k).id; } else if (i == 1) { // POS overflow = 0; v->pos = hts_str2uint(p, &p, 63, &overflow); if (overflow) { hts_log_error("Position value '%s' is too large", p); goto err; } else { v->pos -= 1; } if (v->pos >= INT32_MAX) v->unpacked |= BCF_IS_64BIT; } else if (i == 2) { // ID if (strcmp(p, ".")) bcf_enc_vchar(str, q - p, p); else bcf_enc_size(str, 0, BCF_BT_CHAR); } else if (i == 3) { // REF bcf_enc_vchar(str, q - p, p); v->n_allele = 1, v->rlen = q - p; } else if (i == 4) { // ALT if (strcmp(p, ".")) { for (r = t = p;; ++r) { if (*r == ',' || *r == 0) { if (v->n_allele == UINT16_MAX) { hts_log_error("Too many ALT alleles at %s:%"PRIhts_pos, bcf_seqname_safe(h,v), v->pos+1); v->errcode |= BCF_ERR_LIMITS; goto err; } bcf_enc_vchar(str, r - t, t); t = r + 1; ++v->n_allele; } if (r == q) break; } } } else if (i == 5) { // QUAL if (strcmp(p, ".")) v->qual = atof(p); else bcf_float_set_missing(v->qual); if ( v->max_unpack && !(v->max_unpack>>1) ) goto end; // BCF_UN_STR } else if (i == 6) { // FILTER if (strcmp(p, ".")) { if (vcf_parse_filter(str, h, v, p, q)) goto err; } else bcf_enc_vint(str, 0, 0, -1); if ( v->max_unpack && !(v->max_unpack>>2) ) goto end; // BCF_UN_FLT } else if (i == 7) { // INFO if (strcmp(p, ".")) { if (vcf_parse_info(str, h, v, p, q)) goto err; } if ( v->max_unpack && !(v->max_unpack>>3) ) goto end; } else if (i == 8) {// FORMAT return vcf_parse_format(s, h, v, p, q) == 0 ? 0 : -2; } } end: ret = 0; err: return ret; } int vcf_open_mode(char *mode, const char *fn, const char *format) { if (format == NULL) { // Try to pick a format based on the filename extension char extension[HTS_MAX_EXT_LEN]; if (find_file_extension(fn, extension) < 0) return -1; return vcf_open_mode(mode, fn, extension); } else if (strcasecmp(format, "bcf") == 0) strcpy(mode, "b"); else if (strcasecmp(format, "vcf") == 0) strcpy(mode, ""); else if (strcasecmp(format, "vcf.gz") == 0 || strcasecmp(format, "vcf.bgz") == 0) strcpy(mode, "z"); else return -1; return 0; } int vcf_read(htsFile *fp, const bcf_hdr_t *h, bcf1_t *v) { int ret; ret = hts_getline(fp, KS_SEP_LINE, &fp->line); if (ret < 0) return ret; return vcf_parse1(&fp->line, h, v); } static inline uint8_t *bcf_unpack_fmt_core1(uint8_t *ptr, int n_sample, bcf_fmt_t *fmt) { uint8_t *ptr_start = ptr; fmt->id = bcf_dec_typed_int1(ptr, &ptr); fmt->n = bcf_dec_size(ptr, &ptr, &fmt->type); fmt->size = fmt->n << bcf_type_shift[fmt->type]; fmt->p = ptr; fmt->p_off = ptr - ptr_start; fmt->p_free = 0; ptr += n_sample * fmt->size; fmt->p_len = ptr - fmt->p; return ptr; } static inline uint8_t *bcf_unpack_info_core1(uint8_t *ptr, bcf_info_t *info) { uint8_t *ptr_start = ptr; info->key = bcf_dec_typed_int1(ptr, &ptr); info->len = bcf_dec_size(ptr, &ptr, &info->type); info->vptr = ptr; info->vptr_off = ptr - ptr_start; info->vptr_free = 0; info->v1.i = 0; if (info->len == 1) { if (info->type == BCF_BT_INT8 || info->type == BCF_BT_CHAR) info->v1.i = *(int8_t*)ptr; else if (info->type == BCF_BT_INT32) info->v1.i = le_to_i32(ptr); else if (info->type == BCF_BT_FLOAT) info->v1.f = le_to_float(ptr); else if (info->type == BCF_BT_INT16) info->v1.i = le_to_i16(ptr); else if (info->type == BCF_BT_INT64) info->v1.i = le_to_i64(ptr); } ptr += info->len << bcf_type_shift[info->type]; info->vptr_len = ptr - info->vptr; return ptr; } int bcf_unpack(bcf1_t *b, int which) { if ( !b->shared.l ) return 0; // Building a new BCF record from scratch uint8_t *ptr = (uint8_t*)b->shared.s, *ptr_ori; int i; bcf_dec_t *d = &b->d; if (which & BCF_UN_FLT) which |= BCF_UN_STR; if (which & BCF_UN_INFO) which |= BCF_UN_SHR; if ((which&BCF_UN_STR) && !(b->unpacked&BCF_UN_STR)) { kstring_t tmp; // ID tmp.l = 0; tmp.s = d->id; tmp.m = d->m_id; ptr_ori = ptr; ptr = bcf_fmt_sized_array(&tmp, ptr); b->unpack_size[0] = ptr - ptr_ori; kputc('\0', &tmp); d->id = tmp.s; d->m_id = tmp.m; // REF and ALT are in a single block (d->als) and d->alleles are pointers into this block hts_expand(char*, b->n_allele, d->m_allele, d->allele); // NM: hts_expand() is a macro tmp.l = 0; tmp.s = d->als; tmp.m = d->m_als; ptr_ori = ptr; char *o = ""; for (i = 0; i < b->n_allele; ++i) { d->allele[i] = o + tmp.l; ptr = bcf_fmt_sized_array(&tmp, ptr); kputc('\0', &tmp); } b->unpack_size[1] = ptr - ptr_ori; d->als = tmp.s; d->m_als = tmp.m; for (i = 0; i < b->n_allele; ++i) d->allele[i] = d->als + (d->allele[i]-o); b->unpacked |= BCF_UN_STR; } if ((which&BCF_UN_FLT) && !(b->unpacked&BCF_UN_FLT)) { // FILTER ptr = (uint8_t*)b->shared.s + b->unpack_size[0] + b->unpack_size[1]; ptr_ori = ptr; if (*ptr>>4) { int type; d->n_flt = bcf_dec_size(ptr, &ptr, &type); hts_expand(int, d->n_flt, d->m_flt, d->flt); for (i = 0; i < d->n_flt; ++i) d->flt[i] = bcf_dec_int1(ptr, type, &ptr); } else ++ptr, d->n_flt = 0; b->unpack_size[2] = ptr - ptr_ori; b->unpacked |= BCF_UN_FLT; } if ((which&BCF_UN_INFO) && !(b->unpacked&BCF_UN_INFO)) { // INFO ptr = (uint8_t*)b->shared.s + b->unpack_size[0] + b->unpack_size[1] + b->unpack_size[2]; hts_expand(bcf_info_t, b->n_info, d->m_info, d->info); for (i = 0; i < d->m_info; ++i) d->info[i].vptr_free = 0; for (i = 0; i < b->n_info; ++i) ptr = bcf_unpack_info_core1(ptr, &d->info[i]); b->unpacked |= BCF_UN_INFO; } if ((which&BCF_UN_FMT) && b->n_sample && !(b->unpacked&BCF_UN_FMT)) { // FORMAT ptr = (uint8_t*)b->indiv.s; hts_expand(bcf_fmt_t, b->n_fmt, d->m_fmt, d->fmt); for (i = 0; i < d->m_fmt; ++i) d->fmt[i].p_free = 0; for (i = 0; i < b->n_fmt; ++i) ptr = bcf_unpack_fmt_core1(ptr, b->n_sample, &d->fmt[i]); b->unpacked |= BCF_UN_FMT; } return 0; } int vcf_format(const bcf_hdr_t *h, const bcf1_t *v, kstring_t *s) { int i; int32_t max_dt_id = h->n[BCF_DT_ID]; const char *chrom = bcf_seqname(h, v); if (!chrom) { hts_log_error("Invalid BCF, CONTIG id=%d not present in the header", v->rid); errno = EINVAL; return -1; } bcf_unpack((bcf1_t*)v, BCF_UN_ALL); kputs(chrom, s); // CHROM kputc('\t', s); kputll(v->pos + 1, s); // POS kputc('\t', s); kputs(v->d.id ? v->d.id : ".", s); // ID kputc('\t', s); // REF if (v->n_allele > 0) kputs(v->d.allele[0], s); else kputc('.', s); kputc('\t', s); // ALT if (v->n_allele > 1) { for (i = 1; i < v->n_allele; ++i) { if (i > 1) kputc(',', s); kputs(v->d.allele[i], s); } } else kputc('.', s); kputc('\t', s); // QUAL if ( bcf_float_is_missing(v->qual) ) kputc('.', s); // QUAL else kputd(v->qual, s); kputc('\t', s); // FILTER if (v->d.n_flt) { for (i = 0; i < v->d.n_flt; ++i) { int32_t idx = v->d.flt[i]; if (idx < 0 || idx >= max_dt_id || h->id[BCF_DT_ID][idx].key == NULL) { hts_log_error("Invalid BCF, the FILTER tag id=%d at %s:%"PRIhts_pos" not present in the header", idx, bcf_seqname_safe(h, v), v->pos + 1); errno = EINVAL; return -1; } if (i) kputc(';', s); kputs(h->id[BCF_DT_ID][idx].key, s); } } else kputc('.', s); kputc('\t', s); // INFO if (v->n_info) { int first = 1; for (i = 0; i < v->n_info; ++i) { bcf_info_t *z = &v->d.info[i]; if ( !z->vptr ) continue; if ( !first ) kputc(';', s); first = 0; if (z->key < 0 || z->key >= max_dt_id || h->id[BCF_DT_ID][z->key].key == NULL) { hts_log_error("Invalid BCF, the INFO tag id=%d is %s at %s:%"PRIhts_pos, z->key, z->key < 0 ? "negative" : (z->key >= max_dt_id ? "too large" : "not present in the header"), bcf_seqname_safe(h, v), v->pos+1); errno = EINVAL; return -1; } kputs(h->id[BCF_DT_ID][z->key].key, s); if (z->len <= 0) continue; kputc('=', s); if (z->len == 1) { switch (z->type) { case BCF_BT_INT8: if ( z->v1.i==bcf_int8_missing ) kputc('.', s); else kputw(z->v1.i, s); break; case BCF_BT_INT16: if ( z->v1.i==bcf_int16_missing ) kputc('.', s); else kputw(z->v1.i, s); break; case BCF_BT_INT32: if ( z->v1.i==bcf_int32_missing ) kputc('.', s); else kputw(z->v1.i, s); break; case BCF_BT_INT64: if ( z->v1.i==bcf_int64_missing ) kputc('.', s); else kputll(z->v1.i, s); break; case BCF_BT_FLOAT: if ( bcf_float_is_missing(z->v1.f) ) kputc('.', s); else kputd(z->v1.f, s); break; case BCF_BT_CHAR: kputc(z->v1.i, s); break; default: hts_log_error("Unexpected type %d at %s:%"PRIhts_pos, z->type, bcf_seqname_safe(h, v), v->pos+1); errno = EINVAL; return -1; } } else bcf_fmt_array(s, z->len, z->type, z->vptr); } if ( first ) kputc('.', s); } else kputc('.', s); // FORMAT and individual information if (v->n_sample) { int i,j; if ( v->n_fmt) { int gt_i = -1; bcf_fmt_t *fmt = v->d.fmt; int first = 1; for (i = 0; i < (int)v->n_fmt; ++i) { if ( !fmt[i].p ) continue; kputc(!first ? ':' : '\t', s); first = 0; if (fmt[i].id < 0 || fmt[i].id >= max_dt_id || h->id[BCF_DT_ID][fmt[i].id].key == NULL) //!bcf_hdr_idinfo_exists(h,BCF_HL_FMT,fmt[i].id) ) { hts_log_error("Invalid BCF, the FORMAT tag id=%d at %s:%"PRIhts_pos" not present in the header", fmt[i].id, bcf_seqname_safe(h, v), v->pos+1); errno = EINVAL; return -1; } kputs(h->id[BCF_DT_ID][fmt[i].id].key, s); if (strcmp(h->id[BCF_DT_ID][fmt[i].id].key, "GT") == 0) gt_i = i; } if ( first ) kputs("\t.", s); for (j = 0; j < v->n_sample; ++j) { kputc('\t', s); first = 1; for (i = 0; i < (int)v->n_fmt; ++i) { bcf_fmt_t *f = &fmt[i]; if ( !f->p ) continue; if (!first) kputc(':', s); first = 0; if (gt_i == i) bcf_format_gt(f,j,s); else bcf_fmt_array(s, f->n, f->type, f->p + j * (size_t)f->size); } if ( first ) kputc('.', s); } } else for (j=0; j<=v->n_sample; j++) kputs("\t.", s); } kputc('\n', s); return 0; } int vcf_write_line(htsFile *fp, kstring_t *line) { int ret; if ( line->s[line->l-1]!='\n' ) kputc('\n',line); if ( fp->format.compression!=no_compression ) ret = bgzf_write(fp->fp.bgzf, line->s, line->l); else ret = hwrite(fp->fp.hfile, line->s, line->l); return ret==line->l ? 0 : -1; } int vcf_write(htsFile *fp, const bcf_hdr_t *h, bcf1_t *v) { int ret; fp->line.l = 0; if (vcf_format1(h, v, &fp->line) != 0) return -1; if ( fp->format.compression!=no_compression ) ret = bgzf_write(fp->fp.bgzf, fp->line.s, fp->line.l); else ret = hwrite(fp->fp.hfile, fp->line.s, fp->line.l); if (fp->idx) { int tid; if ((tid = hts_idx_tbi_name(fp->idx, v->rid, bcf_seqname_safe(h, v))) < 0) return -1; if (hts_idx_push(fp->idx, tid, v->pos, v->pos + v->rlen, bgzf_tell(fp->fp.bgzf), 1) < 0) return -1; } return ret==fp->line.l ? 0 : -1; } /************************ * Data access routines * ************************/ int bcf_hdr_id2int(const bcf_hdr_t *h, int which, const char *id) { khint_t k; vdict_t *d = (vdict_t*)h->dict[which]; k = kh_get(vdict, d, id); return k == kh_end(d)? -1 : kh_val(d, k).id; } /******************** *** BCF indexing *** ********************/ // Calculate number of index levels given min_shift and the header contig // list. Also returns number of contigs in *nids_out. static int idx_calc_n_lvls_ids(const bcf_hdr_t *h, int min_shift, int starting_n_lvls, int *nids_out) { int n_lvls, i, nids = 0; int64_t max_len = 0, s; for (i = 0; i < h->n[BCF_DT_CTG]; ++i) { if ( !h->id[BCF_DT_CTG][i].val ) continue; if ( max_len < h->id[BCF_DT_CTG][i].val->info[0] ) max_len = h->id[BCF_DT_CTG][i].val->info[0]; nids++; } if ( !max_len ) max_len = (1LL<<31) - 1; // In case contig line is broken. max_len += 256; s = 1LL << (min_shift + starting_n_lvls * 3); for (n_lvls = starting_n_lvls; max_len > s; ++n_lvls, s <<= 3); if (nids_out) *nids_out = nids; return n_lvls; } hts_idx_t *bcf_index(htsFile *fp, int min_shift) { int n_lvls; bcf1_t *b = NULL; hts_idx_t *idx = NULL; bcf_hdr_t *h; int r; h = bcf_hdr_read(fp); if ( !h ) return NULL; int nids = 0; n_lvls = idx_calc_n_lvls_ids(h, min_shift, 0, &nids); idx = hts_idx_init(nids, HTS_FMT_CSI, bgzf_tell(fp->fp.bgzf), min_shift, n_lvls); if (!idx) goto fail; b = bcf_init1(); if (!b) goto fail; while ((r = bcf_read1(fp,h, b)) >= 0) { int ret; ret = hts_idx_push(idx, b->rid, b->pos, b->pos + b->rlen, bgzf_tell(fp->fp.bgzf), 1); if (ret < 0) goto fail; } if (r < -1) goto fail; hts_idx_finish(idx, bgzf_tell(fp->fp.bgzf)); bcf_destroy1(b); bcf_hdr_destroy(h); return idx; fail: hts_idx_destroy(idx); bcf_destroy1(b); bcf_hdr_destroy(h); return NULL; } hts_idx_t *bcf_index_load2(const char *fn, const char *fnidx) { return fnidx? hts_idx_load2(fn, fnidx) : bcf_index_load(fn); } hts_idx_t *bcf_index_load3(const char *fn, const char *fnidx, int flags) { return hts_idx_load3(fn, fnidx, HTS_FMT_CSI, flags); } int bcf_index_build3(const char *fn, const char *fnidx, int min_shift, int n_threads) { htsFile *fp; hts_idx_t *idx; tbx_t *tbx; int ret; if ((fp = hts_open(fn, "rb")) == 0) return -2; if (n_threads) hts_set_threads(fp, n_threads); if ( fp->format.compression!=bgzf ) { hts_close(fp); return -3; } switch (fp->format.format) { case bcf: if (!min_shift) { hts_log_error("TBI indices for BCF files are not supported"); ret = -1; } else { idx = bcf_index(fp, min_shift); if (idx) { ret = hts_idx_save_as(idx, fn, fnidx, HTS_FMT_CSI); if (ret < 0) ret = -4; hts_idx_destroy(idx); } else ret = -1; } break; case vcf: tbx = tbx_index(hts_get_bgzfp(fp), min_shift, &tbx_conf_vcf); if (tbx) { ret = hts_idx_save_as(tbx->idx, fn, fnidx, min_shift > 0 ? HTS_FMT_CSI : HTS_FMT_TBI); if (ret < 0) ret = -4; tbx_destroy(tbx); } else ret = -1; break; default: ret = -3; break; } hts_close(fp); return ret; } int bcf_index_build2(const char *fn, const char *fnidx, int min_shift) { return bcf_index_build3(fn, fnidx, min_shift, 0); } int bcf_index_build(const char *fn, int min_shift) { return bcf_index_build3(fn, NULL, min_shift, 0); } // Initialise fp->idx for the current format type. // This must be called after the header has been written but no other data. static int vcf_idx_init(htsFile *fp, bcf_hdr_t *h, int min_shift, const char *fnidx) { int n_lvls, fmt; if (min_shift == 0) { min_shift = 14; n_lvls = 5; fmt = HTS_FMT_TBI; } else { // Set initial n_lvls to match tbx_index() int starting_n_lvls = (TBX_MAX_SHIFT - min_shift + 2) / 3; // Increase if necessary n_lvls = idx_calc_n_lvls_ids(h, min_shift, starting_n_lvls, NULL); fmt = HTS_FMT_CSI; } fp->idx = hts_idx_init(0, fmt, bgzf_tell(fp->fp.bgzf), min_shift, n_lvls); if (!fp->idx) return -1; // Tabix meta data, added even in CSI for VCF uint8_t conf[4*7]; u32_to_le(TBX_VCF, conf+0); // fmt u32_to_le(1, conf+4); // name col u32_to_le(2, conf+8); // beg col u32_to_le(0, conf+12); // end col u32_to_le('#', conf+16); // comment u32_to_le(0, conf+20); // n.skip u32_to_le(0, conf+24); // ref name len if (hts_idx_set_meta(fp->idx, sizeof(conf)*sizeof(*conf), (uint8_t *)conf, 1) < 0) { hts_idx_destroy(fp->idx); fp->idx = NULL; return -1; } fp->fnidx = fnidx; return 0; } // Initialise fp->idx for the current format type. // This must be called after the header has been written but no other data. int bcf_idx_init(htsFile *fp, bcf_hdr_t *h, int min_shift, const char *fnidx) { int n_lvls, nids = 0; if (fp->format.format == vcf) return vcf_idx_init(fp, h, min_shift, fnidx); if (!min_shift) min_shift = 14; n_lvls = idx_calc_n_lvls_ids(h, min_shift, 0, &nids); fp->idx = hts_idx_init(nids, HTS_FMT_CSI, bgzf_tell(fp->fp.bgzf), min_shift, n_lvls); if (!fp->idx) return -1; fp->fnidx = fnidx; return 0; } // Finishes an index. Call afer the last record has been written. // Returns 0 on success, <0 on failure. // // NB: same format as SAM/BAM as it uses bgzf. int bcf_idx_save(htsFile *fp) { return sam_idx_save(fp); } /***************** *** Utilities *** *****************/ int bcf_hdr_combine(bcf_hdr_t *dst, const bcf_hdr_t *src) { int i, ndst_ori = dst->nhrec, need_sync = 0, ret = 0, res; for (i=0; i<src->nhrec; i++) { if ( src->hrec[i]->type==BCF_HL_GEN && src->hrec[i]->value ) { int j; for (j=0; j<ndst_ori; j++) { if ( dst->hrec[j]->type!=BCF_HL_GEN ) continue; // Checking only the key part of generic lines, otherwise // the VCFs are too verbose. Should we perhaps add a flag // to bcf_hdr_combine() and make this optional? if ( !strcmp(src->hrec[i]->key,dst->hrec[j]->key) ) break; } if ( j>=ndst_ori ) { res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i])); if (res < 0) return -1; need_sync += res; } } else if ( src->hrec[i]->type==BCF_HL_STR ) { // NB: we are ignoring fields without ID int j = bcf_hrec_find_key(src->hrec[i],"ID"); if ( j>=0 ) { bcf_hrec_t *rec = bcf_hdr_get_hrec(dst, src->hrec[i]->type, "ID", src->hrec[i]->vals[j], src->hrec[i]->key); if ( !rec ) { res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i])); if (res < 0) return -1; need_sync += res; } } } else { int j = bcf_hrec_find_key(src->hrec[i],"ID"); assert( j>=0 ); // this should always be true for valid VCFs bcf_hrec_t *rec = bcf_hdr_get_hrec(dst, src->hrec[i]->type, "ID", src->hrec[i]->vals[j], NULL); if ( !rec ) { res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i])); if (res < 0) return -1; need_sync += res; } else if ( src->hrec[i]->type==BCF_HL_INFO || src->hrec[i]->type==BCF_HL_FMT ) { // Check that both records are of the same type. The bcf_hdr_id2length // macro cannot be used here because dst header is not synced yet. vdict_t *d_src = (vdict_t*)src->dict[BCF_DT_ID]; vdict_t *d_dst = (vdict_t*)dst->dict[BCF_DT_ID]; khint_t k_src = kh_get(vdict, d_src, src->hrec[i]->vals[0]); khint_t k_dst = kh_get(vdict, d_dst, src->hrec[i]->vals[0]); if ( (kh_val(d_src,k_src).info[rec->type]>>8 & 0xf) != (kh_val(d_dst,k_dst).info[rec->type]>>8 & 0xf) ) { hts_log_warning("Trying to combine \"%s\" tag definitions of different lengths", src->hrec[i]->vals[0]); ret |= 1; } if ( (kh_val(d_src,k_src).info[rec->type]>>4 & 0xf) != (kh_val(d_dst,k_dst).info[rec->type]>>4 & 0xf) ) { hts_log_warning("Trying to combine \"%s\" tag definitions of different types", src->hrec[i]->vals[0]); ret |= 1; } } } } if ( need_sync ) { if (bcf_hdr_sync(dst) < 0) return -1; } return ret; } bcf_hdr_t *bcf_hdr_merge(bcf_hdr_t *dst, const bcf_hdr_t *src) { if ( !dst ) { // this will effectively strip existing IDX attributes from src to become dst dst = bcf_hdr_init("r"); kstring_t htxt = {0,0,0}; bcf_hdr_format(src, 0, &htxt); if ( bcf_hdr_parse(dst, htxt.s) < 0 ) { bcf_hdr_destroy(dst); dst = NULL; } free(htxt.s); return dst; } int i, ndst_ori = dst->nhrec, need_sync = 0, ret = 0, res; for (i=0; i<src->nhrec; i++) { if ( src->hrec[i]->type==BCF_HL_GEN && src->hrec[i]->value ) { int j; for (j=0; j<ndst_ori; j++) { if ( dst->hrec[j]->type!=BCF_HL_GEN ) continue; // Checking only the key part of generic lines, otherwise // the VCFs are too verbose. Should we perhaps add a flag // to bcf_hdr_combine() and make this optional? if ( !strcmp(src->hrec[i]->key,dst->hrec[j]->key) ) break; } if ( j>=ndst_ori ) { res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i])); if (res < 0) return NULL; need_sync += res; } } else if ( src->hrec[i]->type==BCF_HL_STR ) { // NB: we are ignoring fields without ID int j = bcf_hrec_find_key(src->hrec[i],"ID"); if ( j>=0 ) { bcf_hrec_t *rec = bcf_hdr_get_hrec(dst, src->hrec[i]->type, "ID", src->hrec[i]->vals[j], src->hrec[i]->key); if ( !rec ) { res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i])); if (res < 0) return NULL; need_sync += res; } } } else { int j = bcf_hrec_find_key(src->hrec[i],"ID"); assert( j>=0 ); // this should always be true for valid VCFs bcf_hrec_t *rec = bcf_hdr_get_hrec(dst, src->hrec[i]->type, "ID", src->hrec[i]->vals[j], NULL); if ( !rec ) { res = bcf_hdr_add_hrec(dst, bcf_hrec_dup(src->hrec[i])); if (res < 0) return NULL; need_sync += res; } else if ( src->hrec[i]->type==BCF_HL_INFO || src->hrec[i]->type==BCF_HL_FMT ) { // Check that both records are of the same type. The bcf_hdr_id2length // macro cannot be used here because dst header is not synced yet. vdict_t *d_src = (vdict_t*)src->dict[BCF_DT_ID]; vdict_t *d_dst = (vdict_t*)dst->dict[BCF_DT_ID]; khint_t k_src = kh_get(vdict, d_src, src->hrec[i]->vals[0]); khint_t k_dst = kh_get(vdict, d_dst, src->hrec[i]->vals[0]); if ( (kh_val(d_src,k_src).info[rec->type]>>8 & 0xf) != (kh_val(d_dst,k_dst).info[rec->type]>>8 & 0xf) ) { hts_log_warning("Trying to combine \"%s\" tag definitions of different lengths", src->hrec[i]->vals[0]); ret |= 1; } if ( (kh_val(d_src,k_src).info[rec->type]>>4 & 0xf) != (kh_val(d_dst,k_dst).info[rec->type]>>4 & 0xf) ) { hts_log_warning("Trying to combine \"%s\" tag definitions of different types", src->hrec[i]->vals[0]); ret |= 1; } } } } if ( need_sync ) { if (bcf_hdr_sync(dst) < 0) return NULL; } return dst; } int bcf_translate(const bcf_hdr_t *dst_hdr, bcf_hdr_t *src_hdr, bcf1_t *line) { int i; if ( line->errcode ) { hts_log_error("Unchecked error (%d) at %s:%"PRIhts_pos", exiting", line->errcode, bcf_seqname_safe(src_hdr,line), line->pos+1); exit(1); } if ( src_hdr->ntransl==-1 ) return 0; // no need to translate, all tags have the same id if ( !src_hdr->ntransl ) // called for the first time, see what needs translating { int dict; for (dict=0; dict<2; dict++) // BCF_DT_ID and BCF_DT_CTG { src_hdr->transl[dict] = (int*) malloc(src_hdr->n[dict]*sizeof(int)); for (i=0; i<src_hdr->n[dict]; i++) { if ( !src_hdr->id[dict][i].key ) // gap left after removed BCF header lines { src_hdr->transl[dict][i] = -1; continue; } src_hdr->transl[dict][i] = bcf_hdr_id2int(dst_hdr,dict,src_hdr->id[dict][i].key); if ( src_hdr->transl[dict][i]!=-1 && i!=src_hdr->transl[dict][i] ) src_hdr->ntransl++; } } if ( !src_hdr->ntransl ) { free(src_hdr->transl[0]); src_hdr->transl[0] = NULL; free(src_hdr->transl[1]); src_hdr->transl[1] = NULL; src_hdr->ntransl = -1; } if ( src_hdr->ntransl==-1 ) return 0; } bcf_unpack(line,BCF_UN_ALL); // CHROM if ( src_hdr->transl[BCF_DT_CTG][line->rid] >=0 ) line->rid = src_hdr->transl[BCF_DT_CTG][line->rid]; // FILTER for (i=0; i<line->d.n_flt; i++) { int src_id = line->d.flt[i]; if ( src_hdr->transl[BCF_DT_ID][src_id] >=0 ) line->d.flt[i] = src_hdr->transl[BCF_DT_ID][src_id]; line->d.shared_dirty |= BCF1_DIRTY_FLT; } // INFO for (i=0; i<line->n_info; i++) { int src_id = line->d.info[i].key; int dst_id = src_hdr->transl[BCF_DT_ID][src_id]; if ( dst_id<0 ) continue; line->d.info[i].key = dst_id; if ( !line->d.info[i].vptr ) continue; // skip deleted int src_size = src_id>>7 ? ( src_id>>15 ? BCF_BT_INT32 : BCF_BT_INT16) : BCF_BT_INT8; int dst_size = dst_id>>7 ? ( dst_id>>15 ? BCF_BT_INT32 : BCF_BT_INT16) : BCF_BT_INT8; if ( src_size==dst_size ) // can overwrite { uint8_t *vptr = line->d.info[i].vptr - line->d.info[i].vptr_off; if ( dst_size==BCF_BT_INT8 ) { vptr[1] = (uint8_t)dst_id; } else if ( dst_size==BCF_BT_INT16 ) { *(uint16_t*)vptr = (uint16_t)dst_id; } else { *(uint32_t*)vptr = (uint32_t)dst_id; } } else // must realloc { bcf_info_t *info = &line->d.info[i]; kstring_t str = {0,0,0}; bcf_enc_int1(&str, dst_id); bcf_enc_size(&str, info->len,info->type); uint32_t vptr_off = str.l; kputsn((char*)info->vptr, info->vptr_len, &str); if( info->vptr_free ) free(info->vptr - info->vptr_off); info->vptr_off = vptr_off; info->vptr = (uint8_t*)str.s + info->vptr_off; info->vptr_free = 1; line->d.shared_dirty |= BCF1_DIRTY_INF; } } // FORMAT for (i=0; i<line->n_fmt; i++) { int src_id = line->d.fmt[i].id; int dst_id = src_hdr->transl[BCF_DT_ID][src_id]; if ( dst_id<0 ) continue; line->d.fmt[i].id = dst_id; if( !line->d.fmt[i].p ) continue; // skip deleted int src_size = src_id>>7 ? ( src_id>>15 ? BCF_BT_INT32 : BCF_BT_INT16) : BCF_BT_INT8; int dst_size = dst_id>>7 ? ( dst_id>>15 ? BCF_BT_INT32 : BCF_BT_INT16) : BCF_BT_INT8; if ( src_size==dst_size ) // can overwrite { uint8_t *p = line->d.fmt[i].p - line->d.fmt[i].p_off; // pointer to the vector size (4bits) and BT type (4bits) if ( dst_size==BCF_BT_INT8 ) { p[1] = dst_id; } else if ( dst_size==BCF_BT_INT16 ) { i16_to_le(dst_id, p + 1); } else { i32_to_le(dst_id, p + 1); } } else // must realloc { bcf_fmt_t *fmt = &line->d.fmt[i]; kstring_t str = {0,0,0}; bcf_enc_int1(&str, dst_id); bcf_enc_size(&str, fmt->n, fmt->type); uint32_t p_off = str.l; kputsn((char*)fmt->p, fmt->p_len, &str); if( fmt->p_free ) free(fmt->p - fmt->p_off); fmt->p_off = p_off; fmt->p = (uint8_t*)str.s + fmt->p_off; fmt->p_free = 1; line->d.indiv_dirty = 1; } } return 0; } bcf_hdr_t *bcf_hdr_dup(const bcf_hdr_t *hdr) { bcf_hdr_t *hout = bcf_hdr_init("r"); if (!hout) { hts_log_error("Failed to allocate bcf header"); return NULL; } kstring_t htxt = {0,0,0}; bcf_hdr_format(hdr, 1, &htxt); if ( bcf_hdr_parse(hout, htxt.s) < 0 ) { bcf_hdr_destroy(hout); hout = NULL; } free(htxt.s); return hout; } bcf_hdr_t *bcf_hdr_subset(const bcf_hdr_t *h0, int n, char *const* samples, int *imap) { void *names_hash = khash_str2int_init(); kstring_t htxt = {0,0,0}; kstring_t str = {0,0,0}; bcf_hdr_t *h = bcf_hdr_init("w"); if (!h) { hts_log_error("Failed to allocate bcf header"); khash_str2int_destroy(names_hash); return NULL; } bcf_hdr_format(h0, 1, &htxt); bcf_hdr_set_version(h,bcf_hdr_get_version(h0)); int j; for (j=0; j<n; j++) imap[j] = -1; if ( bcf_hdr_nsamples(h0) > 0) { char *p = find_chrom_header_line(htxt.s); int i = 0, end = n? 8 : 7; while ((p = strchr(p, '\t')) != 0 && i < end) ++i, ++p; if (i != end) { free(h); free(str.s); return 0; // malformated header } kputsn(htxt.s, p - htxt.s, &str); for (i = 0; i < n; ++i) { if ( khash_str2int_has_key(names_hash,samples[i]) ) { hts_log_error("Duplicate sample name \"%s\"", samples[i]); free(str.s); free(htxt.s); khash_str2int_destroy(names_hash); bcf_hdr_destroy(h); return NULL; } imap[i] = bcf_hdr_id2int(h0, BCF_DT_SAMPLE, samples[i]); if (imap[i] < 0) continue; kputc('\t', &str); kputs(samples[i], &str); khash_str2int_inc(names_hash,samples[i]); } } else kputsn(htxt.s, htxt.l, &str); while (str.l && (!str.s[str.l-1] || str.s[str.l-1]=='\n') ) str.l--; // kill trailing zeros and newlines kputc('\n',&str); if ( bcf_hdr_parse(h, str.s) < 0 ) { bcf_hdr_destroy(h); h = NULL; } free(str.s); free(htxt.s); khash_str2int_destroy(names_hash); return h; } int bcf_hdr_set_samples(bcf_hdr_t *hdr, const char *samples, int is_file) { if ( samples && !strcmp("-",samples) ) return 0; // keep all samples int i, narr = bit_array_size(bcf_hdr_nsamples(hdr)); hdr->keep_samples = (uint8_t*) calloc(narr,1); if (!hdr->keep_samples) return -1; hdr->nsamples_ori = bcf_hdr_nsamples(hdr); if ( !samples ) { // exclude all samples khint_t k; vdict_t *d = (vdict_t*)hdr->dict[BCF_DT_SAMPLE], *new_dict; new_dict = kh_init(vdict); if (!new_dict) return -1; bcf_hdr_nsamples(hdr) = 0; for (k = kh_begin(d); k != kh_end(d); ++k) if (kh_exist(d, k)) free((char*)kh_key(d, k)); kh_destroy(vdict, d); hdr->dict[BCF_DT_SAMPLE] = new_dict; if (bcf_hdr_sync(hdr) < 0) return -1; return 0; } if ( samples[0]=='^' ) for (i=0; i<bcf_hdr_nsamples(hdr); i++) bit_array_set(hdr->keep_samples,i); int idx, n, ret = 0; char **smpls = hts_readlist(samples[0]=='^'?samples+1:samples, is_file, &n); if ( !smpls ) return -1; for (i=0; i<n; i++) { idx = bcf_hdr_id2int(hdr,BCF_DT_SAMPLE,smpls[i]); if ( idx<0 ) { if ( !ret ) ret = i+1; continue; } assert( idx<bcf_hdr_nsamples(hdr) ); if ( samples[0]=='^' ) bit_array_clear(hdr->keep_samples, idx); else bit_array_set(hdr->keep_samples, idx); } for (i=0; i<n; i++) free(smpls[i]); free(smpls); bcf_hdr_nsamples(hdr) = 0; for (i=0; i<hdr->nsamples_ori; i++) if ( bit_array_test(hdr->keep_samples,i) ) bcf_hdr_nsamples(hdr)++; if ( !bcf_hdr_nsamples(hdr) ) { free(hdr->keep_samples); hdr->keep_samples=NULL; } else { // Make new list and dictionary with desired samples char **samples = (char**) malloc(sizeof(char*)*bcf_hdr_nsamples(hdr)); vdict_t *new_dict, *d; int k, res; if (!samples) return -1; new_dict = kh_init(vdict); if (!new_dict) { free(samples); return -1; } idx = 0; for (i=0; i<hdr->nsamples_ori; i++) { if ( bit_array_test(hdr->keep_samples,i) ) { samples[idx] = hdr->samples[i]; k = kh_put(vdict, new_dict, hdr->samples[i], &res); if (res < 0) { free(samples); kh_destroy(vdict, new_dict); return -1; } kh_val(new_dict, k) = bcf_idinfo_def; kh_val(new_dict, k).id = idx; idx++; } } // Delete desired samples from old dictionary, so we don't free them d = (vdict_t*)hdr->dict[BCF_DT_SAMPLE]; for (i=0; i < idx; i++) { int k = kh_get(vdict, d, samples[i]); if (k < kh_end(d)) kh_del(vdict, d, k); } // Free everything else for (k = kh_begin(d); k != kh_end(d); ++k) if (kh_exist(d, k)) free((char*)kh_key(d, k)); kh_destroy(vdict, d); hdr->dict[BCF_DT_SAMPLE] = new_dict; free(hdr->samples); hdr->samples = samples; if (bcf_hdr_sync(hdr) < 0) return -1; } return ret; } int bcf_subset(const bcf_hdr_t *h, bcf1_t *v, int n, int *imap) { kstring_t ind; ind.s = 0; ind.l = ind.m = 0; if (n) { bcf_fmt_t fmt[MAX_N_FMT]; int i, j; uint8_t *ptr = (uint8_t*)v->indiv.s; for (i = 0; i < v->n_fmt; ++i) ptr = bcf_unpack_fmt_core1(ptr, v->n_sample, &fmt[i]); for (i = 0; i < (int)v->n_fmt; ++i) { bcf_fmt_t *f = &fmt[i]; bcf_enc_int1(&ind, f->id); bcf_enc_size(&ind, f->n, f->type); for (j = 0; j < n; ++j) if (imap[j] >= 0) kputsn((char*)(f->p + imap[j] * f->size), f->size, &ind); } for (i = j = 0; j < n; ++j) if (imap[j] >= 0) ++i; v->n_sample = i; } else v->n_sample = 0; if ( !v->n_sample ) v->n_fmt = 0; free(v->indiv.s); v->indiv = ind; v->unpacked &= ~BCF_UN_FMT; // only BCF is ready for output, VCF will need to unpack again return 0; } int bcf_is_snp(bcf1_t *v) { int i; bcf_unpack(v, BCF_UN_STR); for (i = 0; i < v->n_allele; ++i) { if ( v->d.allele[i][1]==0 && v->d.allele[i][0]!='*' ) continue; // mpileup's <X> allele, see also below. This is not completely satisfactory, // a general library is here narrowly tailored to fit samtools. if ( v->d.allele[i][0]=='<' && v->d.allele[i][1]=='X' && v->d.allele[i][2]=='>' ) continue; if ( v->d.allele[i][0]=='<' && v->d.allele[i][1]=='*' && v->d.allele[i][2]=='>' ) continue; break; } return i == v->n_allele; } static void bcf_set_variant_type(const char *ref, const char *alt, variant_t *var) { if ( *alt == '*' && !alt[1] ) { var->n = 0; var->type = VCF_OVERLAP; return; } // overlapping variant // The most frequent case if ( !ref[1] && !alt[1] ) { if ( *alt == '.' || *ref==*alt ) { var->n = 0; var->type = VCF_REF; return; } if ( *alt == 'X' ) { var->n = 0; var->type = VCF_REF; return; } // mpileup's X allele shouldn't be treated as variant var->n = 1; var->type = VCF_SNP; return; } if ( alt[0]=='<' ) { if ( alt[1]=='X' && alt[2]=='>' ) { var->n = 0; var->type = VCF_REF; return; } // mpileup's X allele shouldn't be treated as variant if ( alt[1]=='*' && alt[2]=='>' ) { var->n = 0; var->type = VCF_REF; return; } if ( !strcmp("NON_REF>",alt+1) ) { var->n = 0; var->type = VCF_REF; return; } var->type = VCF_OTHER; return; } const char *r = ref, *a = alt; while (*r && *a && toupper_c(*r)==toupper_c(*a) ) { r++; a++; } // unfortunately, matching REF,ALT case is not guaranteed if ( *a && !*r ) { if ( *a==']' || *a=='[' ) { var->type = VCF_BND; return; } while ( *a ) a++; var->n = (a-alt)-(r-ref); var->type = VCF_INDEL; return; } else if ( *r && !*a ) { while ( *r ) r++; var->n = (a-alt)-(r-ref); var->type = VCF_INDEL; return; } else if ( !*r && !*a ) { var->n = 0; var->type = VCF_REF; return; } const char *re = r, *ae = a; while ( re[1] ) re++; while ( ae[1] ) ae++; while ( re>r && ae>a && toupper_c(*re)==toupper_c(*ae) ) { re--; ae--; } if ( ae==a ) { if ( re==r ) { var->n = 1; var->type = VCF_SNP; return; } var->n = -(re-r); if ( toupper_c(*re)==toupper_c(*ae) ) { var->type = VCF_INDEL; return; } var->type = VCF_OTHER; return; } else if ( re==r ) { var->n = ae-a; if ( toupper_c(*re)==toupper_c(*ae) ) { var->type = VCF_INDEL; return; } var->type = VCF_OTHER; return; } var->type = ( re-r == ae-a ) ? VCF_MNP : VCF_OTHER; var->n = ( re-r > ae-a ) ? -(re-r+1) : ae-a+1; // should do also complex events, SVs, etc... } static int bcf_set_variant_types(bcf1_t *b) { if ( !(b->unpacked & BCF_UN_STR) ) bcf_unpack(b, BCF_UN_STR); bcf_dec_t *d = &b->d; if ( d->n_var < b->n_allele ) { d->var = (variant_t *) realloc(d->var, sizeof(variant_t)*b->n_allele); d->n_var = b->n_allele; } int i; b->d.var_type = 0; d->var[0].type = VCF_REF; d->var[0].n = 0; for (i=1; i<b->n_allele; i++) { bcf_set_variant_type(d->allele[0],d->allele[i], &d->var[i]); b->d.var_type |= d->var[i].type; //fprintf(stderr,"[set_variant_type] %d %s %s -> %d %d .. %d\n", b->pos+1,d->allele[0],d->allele[i],d->var[i].type,d->var[i].n, b->d.var_type); } return 0; } int bcf_get_variant_types(bcf1_t *rec) { if ( rec->d.var_type==-1 ) bcf_set_variant_types(rec); return rec->d.var_type; } int bcf_get_variant_type(bcf1_t *rec, int ith_allele) { if ( rec->d.var_type==-1 ) bcf_set_variant_types(rec); return rec->d.var[ith_allele].type; } int bcf_update_info(const bcf_hdr_t *hdr, bcf1_t *line, const char *key, const void *values, int n, int type) { static int negative_rlen_warned = 0; int is_end_tag; // Is the field already present? int i, inf_id = bcf_hdr_id2int(hdr,BCF_DT_ID,key); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_INFO,inf_id) ) return -1; // No such INFO field in the header if ( !(line->unpacked & BCF_UN_INFO) ) bcf_unpack(line, BCF_UN_INFO); is_end_tag = strcmp(key, "END") == 0; for (i=0; i<line->n_info; i++) if ( inf_id==line->d.info[i].key ) break; bcf_info_t *inf = i==line->n_info ? NULL : &line->d.info[i]; if ( !n || (type==BCF_HT_STR && !values) ) { if ( n==0 && is_end_tag ) line->rlen = line->n_allele ? strlen(line->d.allele[0]) : 0; if ( inf ) { // Mark the tag for removal, free existing memory if necessary if ( inf->vptr_free ) { free(inf->vptr - inf->vptr_off); inf->vptr_free = 0; } line->d.shared_dirty |= BCF1_DIRTY_INF; inf->vptr = NULL; inf->vptr_off = inf->vptr_len = 0; } return 0; } if (is_end_tag) { if (n != 1) { hts_log_error("END info tag should only have one value at %s:%"PRIhts_pos, bcf_seqname_safe(hdr,line), line->pos+1); line->errcode |= BCF_ERR_TAG_INVALID; return -1; } if (type != BCF_HT_INT && type != BCF_HT_LONG) { hts_log_error("Wrong type (%d) for END info tag at %s:%"PRIhts_pos, type, bcf_seqname_safe(hdr,line), line->pos+1); line->errcode |= BCF_ERR_TAG_INVALID; return -1; } } // Encode the values and determine the size required to accommodate the values kstring_t str = {0,0,0}; bcf_enc_int1(&str, inf_id); if ( type==BCF_HT_INT ) bcf_enc_vint(&str, n, (int32_t*)values, -1); else if ( type==BCF_HT_REAL ) bcf_enc_vfloat(&str, n, (float*)values); else if ( type==BCF_HT_FLAG || type==BCF_HT_STR ) { if ( values==NULL ) bcf_enc_size(&str, 0, BCF_BT_NULL); else bcf_enc_vchar(&str, strlen((char*)values), (char*)values); } #ifdef VCF_ALLOW_INT64 else if ( type==BCF_HT_LONG ) { if (n != 1) { hts_log_error("Only storing a single BCF_HT_LONG value is supported at %s:%"PRIhts_pos, bcf_seqname_safe(hdr,line), line->pos+1); abort(); } bcf_enc_long1(&str, *(int64_t *) values); } #endif else { hts_log_error("The type %d not implemented yet at %s:%"PRIhts_pos, type, bcf_seqname_safe(hdr,line), line->pos+1); abort(); } // Is the INFO tag already present if ( inf ) { // Is it big enough to accommodate new block? if ( str.l <= inf->vptr_len + inf->vptr_off ) { if ( str.l != inf->vptr_len + inf->vptr_off ) line->d.shared_dirty |= BCF1_DIRTY_INF; uint8_t *ptr = inf->vptr - inf->vptr_off; memcpy(ptr, str.s, str.l); free(str.s); int vptr_free = inf->vptr_free; bcf_unpack_info_core1(ptr, inf); inf->vptr_free = vptr_free; } else { if ( inf->vptr_free ) free(inf->vptr - inf->vptr_off); bcf_unpack_info_core1((uint8_t*)str.s, inf); inf->vptr_free = 1; line->d.shared_dirty |= BCF1_DIRTY_INF; } } else { // The tag is not present, create new one line->n_info++; hts_expand0(bcf_info_t, line->n_info, line->d.m_info , line->d.info); inf = &line->d.info[line->n_info-1]; bcf_unpack_info_core1((uint8_t*)str.s, inf); inf->vptr_free = 1; line->d.shared_dirty |= BCF1_DIRTY_INF; } line->unpacked |= BCF_UN_INFO; if ( n==1 && is_end_tag) { hts_pos_t end = type == BCF_HT_INT ? *(int32_t *) values : *(int64_t *) values; if ( (type == BCF_HT_INT && end!=bcf_int32_missing) || (type == BCF_HT_LONG && end!=bcf_int64_missing) ) { if ( end <= line->pos ) { if ( !negative_rlen_warned ) { hts_log_warning("INFO/END=%"PRIhts_pos" is smaller than POS at %s:%"PRIhts_pos,end,bcf_seqname_safe(hdr,line),line->pos+1); negative_rlen_warned = 1; } line->rlen = line->n_allele ? strlen(line->d.allele[0]) : 0; } else line->rlen = end - line->pos; } } return 0; } int bcf_update_format_string(const bcf_hdr_t *hdr, bcf1_t *line, const char *key, const char **values, int n) { if ( !n ) return bcf_update_format(hdr,line,key,NULL,0,BCF_HT_STR); int i, max_len = 0; for (i=0; i<n; i++) { int len = strlen(values[i]); if ( len > max_len ) max_len = len; } char *out = (char*) malloc(max_len*n); if ( !out ) return -2; for (i=0; i<n; i++) { char *dst = out+i*max_len; const char *src = values[i]; int j = 0; while ( src[j] ) { dst[j] = src[j]; j++; } for (; j<max_len; j++) dst[j] = 0; } int ret = bcf_update_format(hdr,line,key,out,max_len*n,BCF_HT_STR); free(out); return ret; } int bcf_update_format(const bcf_hdr_t *hdr, bcf1_t *line, const char *key, const void *values, int n, int type) { // Is the field already present? int i, fmt_id = bcf_hdr_id2int(hdr,BCF_DT_ID,key); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_FMT,fmt_id) ) { if ( !n ) return 0; return -1; // the key not present in the header } if ( !(line->unpacked & BCF_UN_FMT) ) bcf_unpack(line, BCF_UN_FMT); for (i=0; i<line->n_fmt; i++) if ( line->d.fmt[i].id==fmt_id ) break; bcf_fmt_t *fmt = i==line->n_fmt ? NULL : &line->d.fmt[i]; if ( !n ) { if ( fmt ) { // Mark the tag for removal, free existing memory if necessary if ( fmt->p_free ) { free(fmt->p - fmt->p_off); fmt->p_free = 0; } line->d.indiv_dirty = 1; fmt->p = NULL; } return 0; } line->n_sample = bcf_hdr_nsamples(hdr); int nps = n / line->n_sample; // number of values per sample assert( nps && nps*line->n_sample==n ); // must be divisible by n_sample // Encode the values and determine the size required to accommodate the values kstring_t str = {0,0,0}; bcf_enc_int1(&str, fmt_id); assert(values != NULL); if ( type==BCF_HT_INT ) bcf_enc_vint(&str, n, (int32_t*)values, nps); else if ( type==BCF_HT_REAL ) { bcf_enc_size(&str, nps, BCF_BT_FLOAT); serialize_float_array(&str, nps*line->n_sample, (float *) values); } else if ( type==BCF_HT_STR ) { bcf_enc_size(&str, nps, BCF_BT_CHAR); kputsn((char*)values, nps*line->n_sample, &str); } else { hts_log_error("The type %d not implemented yet at %s:%"PRIhts_pos, type, bcf_seqname_safe(hdr,line), line->pos+1); abort(); } if ( !fmt ) { // Not present, new format field line->n_fmt++; hts_expand0(bcf_fmt_t, line->n_fmt, line->d.m_fmt, line->d.fmt); // Special case: VCF specification requires that GT is always first if ( line->n_fmt > 1 && key[0]=='G' && key[1]=='T' && !key[2] ) { for (i=line->n_fmt-1; i>0; i--) line->d.fmt[i] = line->d.fmt[i-1]; fmt = &line->d.fmt[0]; } else fmt = &line->d.fmt[line->n_fmt-1]; bcf_unpack_fmt_core1((uint8_t*)str.s, line->n_sample, fmt); line->d.indiv_dirty = 1; fmt->p_free = 1; } else { // The tag is already present, check if it is big enough to accomodate the new block if ( str.l <= fmt->p_len + fmt->p_off ) { // good, the block is big enough if ( str.l != fmt->p_len + fmt->p_off ) line->d.indiv_dirty = 1; uint8_t *ptr = fmt->p - fmt->p_off; memcpy(ptr, str.s, str.l); free(str.s); int p_free = fmt->p_free; bcf_unpack_fmt_core1(ptr, line->n_sample, fmt); fmt->p_free = p_free; } else { if ( fmt->p_free ) free(fmt->p - fmt->p_off); bcf_unpack_fmt_core1((uint8_t*)str.s, line->n_sample, fmt); fmt->p_free = 1; line->d.indiv_dirty = 1; } } line->unpacked |= BCF_UN_FMT; return 0; } int bcf_update_filter(const bcf_hdr_t *hdr, bcf1_t *line, int *flt_ids, int n) { if ( !(line->unpacked & BCF_UN_FLT) ) bcf_unpack(line, BCF_UN_FLT); line->d.shared_dirty |= BCF1_DIRTY_FLT; line->d.n_flt = n; if ( !n ) return 0; hts_expand(int, line->d.n_flt, line->d.m_flt, line->d.flt); int i; for (i=0; i<n; i++) line->d.flt[i] = flt_ids[i]; return 0; } int bcf_add_filter(const bcf_hdr_t *hdr, bcf1_t *line, int flt_id) { if ( !(line->unpacked & BCF_UN_FLT) ) bcf_unpack(line, BCF_UN_FLT); int i; for (i=0; i<line->d.n_flt; i++) if ( flt_id==line->d.flt[i] ) break; if ( i<line->d.n_flt ) return 0; // this filter is already set line->d.shared_dirty |= BCF1_DIRTY_FLT; if ( flt_id==0 ) // set to PASS line->d.n_flt = 1; else if ( line->d.n_flt==1 && line->d.flt[0]==0 ) line->d.n_flt = 1; else line->d.n_flt++; hts_expand(int, line->d.n_flt, line->d.m_flt, line->d.flt); line->d.flt[line->d.n_flt-1] = flt_id; return 1; } int bcf_remove_filter(const bcf_hdr_t *hdr, bcf1_t *line, int flt_id, int pass) { if ( !(line->unpacked & BCF_UN_FLT) ) bcf_unpack(line, BCF_UN_FLT); int i; for (i=0; i<line->d.n_flt; i++) if ( flt_id==line->d.flt[i] ) break; if ( i==line->d.n_flt ) return 0; // the filter is not present line->d.shared_dirty |= BCF1_DIRTY_FLT; if ( i!=line->d.n_flt-1 ) memmove(line->d.flt+i,line->d.flt+i+1,(line->d.n_flt-i-1)*sizeof(*line->d.flt)); line->d.n_flt--; if ( !line->d.n_flt && pass ) bcf_add_filter(hdr,line,0); return 0; } int bcf_has_filter(const bcf_hdr_t *hdr, bcf1_t *line, char *filter) { if ( filter[0]=='.' && !filter[1] ) filter = "PASS"; int id = bcf_hdr_id2int(hdr, BCF_DT_ID, filter); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_FLT,id) ) return -1; // not defined in the header if ( !(line->unpacked & BCF_UN_FLT) ) bcf_unpack(line, BCF_UN_FLT); if ( id==0 && !line->d.n_flt) return 1; // PASS int i; for (i=0; i<line->d.n_flt; i++) if ( line->d.flt[i]==id ) return 1; return 0; } static inline int _bcf1_sync_alleles(const bcf_hdr_t *hdr, bcf1_t *line, int nals) { line->d.shared_dirty |= BCF1_DIRTY_ALS; line->n_allele = nals; hts_expand(char*, line->n_allele, line->d.m_allele, line->d.allele); char *als = line->d.als; int n = 0; while (n<nals) { line->d.allele[n] = als; while ( *als ) als++; als++; n++; } // Update REF length. Note that END is 1-based while line->pos 0-based bcf_info_t *end_info = bcf_get_info(hdr,line,"END"); if ( end_info ) { if ( end_info->type==BCF_HT_INT && end_info->v1.i==bcf_int32_missing ) end_info = NULL; else if ( end_info->type==BCF_HT_LONG && end_info->v1.i==bcf_int64_missing ) end_info = NULL; } if ( end_info && end_info->v1.i > line->pos ) line->rlen = end_info->v1.i - line->pos; else if ( nals > 0 ) line->rlen = strlen(line->d.allele[0]); else line->rlen = 0; return 0; } int bcf_update_alleles(const bcf_hdr_t *hdr, bcf1_t *line, const char **alleles, int nals) { if ( !(line->unpacked & BCF_UN_STR) ) bcf_unpack(line, BCF_UN_STR); kstring_t tmp = {0,0,0}; char *free_old = NULL; // If the supplied alleles are not pointers to line->d.als, the existing block can be reused. int i; for (i=0; i<nals; i++) if ( alleles[i]>=line->d.als && alleles[i]<line->d.als+line->d.m_als ) break; if ( i==nals ) { // all alleles point elsewhere, reuse the existing block tmp.l = 0; tmp.s = line->d.als; tmp.m = line->d.m_als; } else free_old = line->d.als; for (i=0; i<nals; i++) { kputs(alleles[i], &tmp); kputc(0, &tmp); } line->d.als = tmp.s; line->d.m_als = tmp.m; free(free_old); return _bcf1_sync_alleles(hdr,line,nals); } int bcf_update_alleles_str(const bcf_hdr_t *hdr, bcf1_t *line, const char *alleles_string) { if ( !(line->unpacked & BCF_UN_STR) ) bcf_unpack(line, BCF_UN_STR); kstring_t tmp; tmp.l = 0; tmp.s = line->d.als; tmp.m = line->d.m_als; kputs(alleles_string, &tmp); line->d.als = tmp.s; line->d.m_als = tmp.m; int nals = 1; char *t = line->d.als; while (*t) { if ( *t==',' ) { *t = 0; nals++; } t++; } return _bcf1_sync_alleles(hdr, line, nals); } int bcf_update_id(const bcf_hdr_t *hdr, bcf1_t *line, const char *id) { if ( !(line->unpacked & BCF_UN_STR) ) bcf_unpack(line, BCF_UN_STR); kstring_t tmp; tmp.l = 0; tmp.s = line->d.id; tmp.m = line->d.m_id; if ( id ) kputs(id, &tmp); else kputs(".", &tmp); line->d.id = tmp.s; line->d.m_id = tmp.m; line->d.shared_dirty |= BCF1_DIRTY_ID; return 0; } int bcf_add_id(const bcf_hdr_t *hdr, bcf1_t *line, const char *id) { if ( !id ) return 0; if ( !(line->unpacked & BCF_UN_STR) ) bcf_unpack(line, BCF_UN_STR); kstring_t tmp; tmp.l = 0; tmp.s = line->d.id; tmp.m = line->d.m_id; int len = strlen(id); char *dst = line->d.id; while ( *dst && (dst=strstr(dst,id)) ) { if ( dst[len]!=0 && dst[len]!=';' ) dst++; // a prefix, not a match else if ( dst==line->d.id || dst[-1]==';' ) return 0; // already present dst++; // a suffix, not a match } if ( line->d.id && (line->d.id[0]!='.' || line->d.id[1]) ) { tmp.l = strlen(line->d.id); kputc(';',&tmp); } kputs(id,&tmp); line->d.id = tmp.s; line->d.m_id = tmp.m; line->d.shared_dirty |= BCF1_DIRTY_ID; return 0; } bcf_fmt_t *bcf_get_fmt(const bcf_hdr_t *hdr, bcf1_t *line, const char *key) { int id = bcf_hdr_id2int(hdr, BCF_DT_ID, key); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_FMT,id) ) return NULL; // no such FMT field in the header return bcf_get_fmt_id(line, id); } bcf_info_t *bcf_get_info(const bcf_hdr_t *hdr, bcf1_t *line, const char *key) { int id = bcf_hdr_id2int(hdr, BCF_DT_ID, key); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_INFO,id) ) return NULL; // no such INFO field in the header return bcf_get_info_id(line, id); } bcf_fmt_t *bcf_get_fmt_id(bcf1_t *line, const int id) { int i; if ( !(line->unpacked & BCF_UN_FMT) ) bcf_unpack(line, BCF_UN_FMT); for (i=0; i<line->n_fmt; i++) { if ( line->d.fmt[i].id==id ) return &line->d.fmt[i]; } return NULL; } bcf_info_t *bcf_get_info_id(bcf1_t *line, const int id) { int i; if ( !(line->unpacked & BCF_UN_INFO) ) bcf_unpack(line, BCF_UN_INFO); for (i=0; i<line->n_info; i++) { if ( line->d.info[i].key==id ) return &line->d.info[i]; } return NULL; } int bcf_get_info_values(const bcf_hdr_t *hdr, bcf1_t *line, const char *tag, void **dst, int *ndst, int type) { int i, ret = -4, tag_id = bcf_hdr_id2int(hdr, BCF_DT_ID, tag); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_INFO,tag_id) ) return -1; // no such INFO field in the header if ( bcf_hdr_id2type(hdr,BCF_HL_INFO,tag_id)!=(type & 0xff) ) return -2; // expected different type if ( !(line->unpacked & BCF_UN_INFO) ) bcf_unpack(line, BCF_UN_INFO); for (i=0; i<line->n_info; i++) if ( line->d.info[i].key==tag_id ) break; if ( i==line->n_info ) return ( type==BCF_HT_FLAG ) ? 0 : -3; // the tag is not present in this record if ( type==BCF_HT_FLAG ) return 1; bcf_info_t *info = &line->d.info[i]; if ( !info->vptr ) return -3; // the tag was marked for removal if ( type==BCF_HT_STR ) { if ( *ndst < info->len+1 ) { *ndst = info->len + 1; *dst = realloc(*dst, *ndst); } memcpy(*dst,info->vptr,info->len); ((uint8_t*)*dst)[info->len] = 0; return info->len; } // Make sure the buffer is big enough int size1; switch (type) { case BCF_HT_INT: size1 = sizeof(int32_t); break; case BCF_HT_LONG: size1 = sizeof(int64_t); break; case BCF_HT_REAL: size1 = sizeof(float); break; default: hts_log_error("Unexpected output type %d at %s:%"PRIhts_pos, type, bcf_seqname_safe(hdr,line), line->pos+1); return -2; } if ( *ndst < info->len ) { *ndst = info->len; *dst = realloc(*dst, *ndst * size1); } #define BRANCH(type_t, convert, is_missing, is_vector_end, set_missing, set_regular, out_type_t) do { \ out_type_t *tmp = (out_type_t *) *dst; \ int j; \ for (j=0; j<info->len; j++) \ { \ type_t p = convert(info->vptr + j * sizeof(type_t)); \ if ( is_vector_end ) break; \ if ( is_missing ) set_missing; \ else set_regular; \ tmp++; \ } \ ret = j; \ } while (0) switch (info->type) { case BCF_BT_INT8: if (type == BCF_HT_LONG) { BRANCH(int8_t, le_to_i8, p==bcf_int8_missing, p==bcf_int8_vector_end, *tmp=bcf_int64_missing, *tmp=p, int64_t); } else { BRANCH(int8_t, le_to_i8, p==bcf_int8_missing, p==bcf_int8_vector_end, *tmp=bcf_int32_missing, *tmp=p, int32_t); } break; case BCF_BT_INT16: if (type == BCF_HT_LONG) { BRANCH(int16_t, le_to_i16, p==bcf_int16_missing, p==bcf_int16_vector_end, *tmp=bcf_int64_missing, *tmp=p, int64_t); } else { BRANCH(int16_t, le_to_i16, p==bcf_int16_missing, p==bcf_int16_vector_end, *tmp=bcf_int32_missing, *tmp=p, int32_t); } break; case BCF_BT_INT32: if (type == BCF_HT_LONG) { BRANCH(int32_t, le_to_i32, p==bcf_int32_missing, p==bcf_int32_vector_end, *tmp=bcf_int64_missing, *tmp=p, int64_t); break; } else { BRANCH(int32_t, le_to_i32, p==bcf_int32_missing, p==bcf_int32_vector_end, *tmp=bcf_int32_missing, *tmp=p, int32_t); break; } case BCF_BT_FLOAT: BRANCH(uint32_t, le_to_u32, p==bcf_float_missing, p==bcf_float_vector_end, bcf_float_set_missing(*tmp), bcf_float_set(tmp, p), float); break; default: hts_log_error("Unexpected type %d at %s:%"PRIhts_pos, info->type, bcf_seqname_safe(hdr,line), line->pos+1); return -2; } #undef BRANCH return ret; // set by BRANCH } int bcf_get_format_string(const bcf_hdr_t *hdr, bcf1_t *line, const char *tag, char ***dst, int *ndst) { int i,tag_id = bcf_hdr_id2int(hdr, BCF_DT_ID, tag); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_FMT,tag_id) ) return -1; // no such FORMAT field in the header if ( bcf_hdr_id2type(hdr,BCF_HL_FMT,tag_id)!=BCF_HT_STR ) return -2; // expected different type if ( !(line->unpacked & BCF_UN_FMT) ) bcf_unpack(line, BCF_UN_FMT); for (i=0; i<line->n_fmt; i++) if ( line->d.fmt[i].id==tag_id ) break; if ( i==line->n_fmt ) return -3; // the tag is not present in this record bcf_fmt_t *fmt = &line->d.fmt[i]; if ( !fmt->p ) return -3; // the tag was marked for removal int nsmpl = bcf_hdr_nsamples(hdr); if ( !*dst ) { *dst = (char**) malloc(sizeof(char*)*nsmpl); if ( !*dst ) return -4; // could not alloc (*dst)[0] = NULL; } int n = (fmt->n+1)*nsmpl; if ( *ndst < n ) { (*dst)[0] = realloc((*dst)[0], n); if ( !(*dst)[0] ) return -4; // could not alloc *ndst = n; } for (i=0; i<nsmpl; i++) { uint8_t *src = fmt->p + i*fmt->n; uint8_t *tmp = (uint8_t*)(*dst)[0] + i*(fmt->n+1); memcpy(tmp,src,fmt->n); tmp[fmt->n] = 0; (*dst)[i] = (char*) tmp; } return n; } int bcf_get_format_values(const bcf_hdr_t *hdr, bcf1_t *line, const char *tag, void **dst, int *ndst, int type) { int i,j, tag_id = bcf_hdr_id2int(hdr, BCF_DT_ID, tag); if ( !bcf_hdr_idinfo_exists(hdr,BCF_HL_FMT,tag_id) ) return -1; // no such FORMAT field in the header if ( tag[0]=='G' && tag[1]=='T' && tag[2]==0 ) { // Ugly: GT field is considered to be a string by the VCF header but BCF represents it as INT. if ( bcf_hdr_id2type(hdr,BCF_HL_FMT,tag_id)!=BCF_HT_STR ) return -2; } else if ( bcf_hdr_id2type(hdr,BCF_HL_FMT,tag_id)!=type ) return -2; // expected different type if ( !(line->unpacked & BCF_UN_FMT) ) bcf_unpack(line, BCF_UN_FMT); for (i=0; i<line->n_fmt; i++) if ( line->d.fmt[i].id==tag_id ) break; if ( i==line->n_fmt ) return -3; // the tag is not present in this record bcf_fmt_t *fmt = &line->d.fmt[i]; if ( !fmt->p ) return -3; // the tag was marked for removal if ( type==BCF_HT_STR ) { int n = fmt->n*bcf_hdr_nsamples(hdr); if ( *ndst < n ) { *dst = realloc(*dst, n); if ( !*dst ) return -4; // could not alloc *ndst = n; } memcpy(*dst,fmt->p,n); return n; } // Make sure the buffer is big enough int nsmpl = bcf_hdr_nsamples(hdr); int size1 = type==BCF_HT_INT ? sizeof(int32_t) : sizeof(float); if ( *ndst < fmt->n*nsmpl ) { *ndst = fmt->n*nsmpl; *dst = realloc(*dst, *ndst*size1); if ( !*dst ) return -4; // could not alloc } #define BRANCH(type_t, convert, is_missing, is_vector_end, set_missing, set_vector_end, set_regular, out_type_t) { \ out_type_t *tmp = (out_type_t *) *dst; \ uint8_t *fmt_p = fmt->p; \ for (i=0; i<nsmpl; i++) \ { \ for (j=0; j<fmt->n; j++) \ { \ type_t p = convert(fmt_p + j * sizeof(type_t)); \ if ( is_missing ) set_missing; \ else if ( is_vector_end ) { set_vector_end; break; } \ else set_regular; \ tmp++; \ } \ for (; j<fmt->n; j++) { set_vector_end; tmp++; } \ fmt_p += fmt->size; \ } \ } switch (fmt->type) { case BCF_BT_INT8: BRANCH(int8_t, le_to_i8, p==bcf_int8_missing, p==bcf_int8_vector_end, *tmp=bcf_int32_missing, *tmp=bcf_int32_vector_end, *tmp=p, int32_t); break; case BCF_BT_INT16: BRANCH(int16_t, le_to_i16, p==bcf_int16_missing, p==bcf_int16_vector_end, *tmp=bcf_int32_missing, *tmp=bcf_int32_vector_end, *tmp=p, int32_t); break; case BCF_BT_INT32: BRANCH(int32_t, le_to_i32, p==bcf_int32_missing, p==bcf_int32_vector_end, *tmp=bcf_int32_missing, *tmp=bcf_int32_vector_end, *tmp=p, int32_t); break; case BCF_BT_FLOAT: BRANCH(uint32_t, le_to_u32, p==bcf_float_missing, p==bcf_float_vector_end, bcf_float_set_missing(*tmp), bcf_float_set_vector_end(*tmp), bcf_float_set(tmp, p), float); break; default: hts_log_error("Unexpected type %d at %s:%"PRIhts_pos, fmt->type, bcf_seqname_safe(hdr,line), line->pos+1); exit(1); } #undef BRANCH return nsmpl*fmt->n; }
null
225
CWE-787
CVE-2020-36406
/* * Authored by Alex Hultman, 2018-2020. * Intellectual property of third-party. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef UWS_TOPICTREE_H #define UWS_TOPICTREE_H #include <iostream> #include <vector> #include <map> #include <string_view> #include <functional> #include <set> #include <chrono> #include <list> #include <cstring> namespace uWS { /* A Subscriber is an extension of a socket */ struct Subscriber { std::list<struct Topic *> subscriptions; void *user; Subscriber(void *user) : user(user) {} }; struct Topic { /* Memory for our name */ char *name; size_t length; /* Our parent or nullptr */ Topic *parent = nullptr; /* Next triggered Topic */ bool triggered = false; /* Exact string matches */ std::map<std::string_view, Topic *> children; /* Wildcard child */ Topic *wildcardChild = nullptr; /* Terminating wildcard child */ Topic *terminatingWildcardChild = nullptr; /* What we published, {inflated, deflated} */ std::map<unsigned int, std::pair<std::string, std::string>> messages; std::set<Subscriber *> subs; }; struct TopicTree { private: std::function<int(Subscriber *, std::pair<std::string_view, std::string_view>)> cb; Topic *root = new Topic; /* Global messageId for deduplication of overlapping topics and ordering between topics */ unsigned int messageId = 0; /* The triggered topics */ Topic *triggeredTopics[64]; int numTriggeredTopics = 0; Subscriber *min = (Subscriber *) UINTPTR_MAX; /* Cull or trim unused Topic nodes from leaf to root */ void trimTree(Topic *topic) { repeat: if (!topic->subs.size() && !topic->children.size() && !topic->terminatingWildcardChild && !topic->wildcardChild) { Topic *parent = topic->parent; if (topic->length == 1) { if (topic->name[0] == '#') { parent->terminatingWildcardChild = nullptr; } else if (topic->name[0] == '+') { parent->wildcardChild = nullptr; } } /* Erase us from our parents set (wildcards also live here) */ parent->children.erase(std::string_view(topic->name, topic->length)); /* If this node is triggered, make sure to remove it from the triggered list */ if (topic->triggered) { Topic *tmp[64]; int length = 0; for (int i = 0; i < numTriggeredTopics; i++) { if (triggeredTopics[i] != topic) { tmp[length++] = triggeredTopics[i]; } } for (int i = 0; i < length; i++) { triggeredTopics[i] = tmp[i]; } numTriggeredTopics = length; } /* Free various memory for the node */ delete [] topic->name; delete topic; if (parent != root) { topic = parent; goto repeat; //trimTree(parent); } } } /* Should be getData and commit? */ void publish(Topic *iterator, size_t start, size_t stop, std::string_view topic, std::pair<std::string_view, std::string_view> message) { /* If we already have 64 triggered topics make sure to drain it here */ if (numTriggeredTopics == 64) { drain(); } /* Iterate over all segments in given topic */ for (; stop != std::string::npos; start = stop + 1) { stop = topic.find('/', start); std::string_view segment = topic.substr(start, stop - start); /* It is very important to disallow wildcards when publishing. * We will not catch EVERY misuse this lazy way, but enough to hinder * explosive recursion. * Terminating wildcards MAY still get triggered along the way, if for * instace the error is found late while iterating the topic segments. */ if (segment.length() == 1) { if (segment[0] == '+' || segment[0] == '#') { return; } } /* Do we have a terminating wildcard child? */ if (iterator->terminatingWildcardChild) { iterator->terminatingWildcardChild->messages[messageId] = message; /* Add this topic to triggered */ if (!iterator->terminatingWildcardChild->triggered) { triggeredTopics[numTriggeredTopics++] = iterator->terminatingWildcardChild; iterator->terminatingWildcardChild->triggered = true; } } /* Do we have a wildcard child? */ if (iterator->wildcardChild) { publish(iterator->wildcardChild, stop + 1, stop, topic, message); } std::map<std::string_view, Topic *>::iterator it = iterator->children.find(segment); if (it == iterator->children.end()) { /* Stop trying to match by exact string */ return; } iterator = it->second; } /* If we went all the way we matched exactly */ iterator->messages[messageId] = message; /* Add this topic to triggered */ if (!iterator->triggered) { triggeredTopics[numTriggeredTopics++] = iterator; iterator->triggered = true; } } public: TopicTree(std::function<int(Subscriber *, std::pair<std::string_view, std::string_view>)> cb) { this->cb = cb; } ~TopicTree() { delete root; } void subscribe(std::string_view topic, Subscriber *subscriber) { /* Start iterating from the root */ Topic *iterator = root; /* Traverse the topic, inserting a node for every new segment separated by / */ for (size_t start = 0, stop = 0; stop != std::string::npos; start = stop + 1) { stop = topic.find('/', start); std::string_view segment = topic.substr(start, stop - start); auto lb = iterator->children.lower_bound(segment); if (lb != iterator->children.end() && !(iterator->children.key_comp()(segment, lb->first))) { iterator = lb->second; } else { /* Allocate and insert new node */ Topic *newTopic = new Topic; newTopic->parent = iterator; newTopic->name = new char[segment.length()]; newTopic->length = segment.length(); newTopic->terminatingWildcardChild = nullptr; newTopic->wildcardChild = nullptr; memcpy(newTopic->name, segment.data(), segment.length()); /* For simplicity we do insert wildcards with text */ iterator->children.insert(lb, {std::string_view(newTopic->name, segment.length()), newTopic}); /* Store fast lookup to wildcards */ if (segment.length() == 1) { /* If this segment is '+' it is a wildcard */ if (segment[0] == '+') { iterator->wildcardChild = newTopic; } /* If this segment is '#' it is a terminating wildcard */ if (segment[0] == '#') { iterator->terminatingWildcardChild = newTopic; } } iterator = newTopic; } } /* If this topic is triggered, drain the tree before we join */ if (iterator->triggered) { drain(); } /* Add socket to Topic's Set */ auto [it, inserted] = iterator->subs.insert(subscriber); /* Add Topic to list of subscriptions only if we weren't already subscribed */ if (inserted) { subscriber->subscriptions.push_back(iterator); } } void publish(std::string_view topic, std::pair<std::string_view, std::string_view> message) { publish(root, 0, 0, topic, message); messageId++; } /* Returns whether we were subscribed prior */ bool unsubscribe(std::string_view topic, Subscriber *subscriber) { /* Subscribers are likely to have very few subscriptions (20 or fewer) */ if (subscriber) { /* Lookup exact Topic ptr from string */ Topic *iterator = root; for (size_t start = 0, stop = 0; stop != std::string::npos; start = stop + 1) { stop = topic.find('/', start); std::string_view segment = topic.substr(start, stop - start); std::map<std::string_view, Topic *>::iterator it = iterator->children.find(segment); if (it == iterator->children.end()) { /* This topic does not even exist */ return false; } iterator = it->second; } /* Try and remove this topic from our list */ for (auto it = subscriber->subscriptions.begin(); it != subscriber->subscriptions.end(); it++) { if (*it == iterator) { /* If this topic is triggered, drain the tree before we leave */ if (iterator->triggered) { drain(); } /* Remove topic ptr from our list */ subscriber->subscriptions.erase(it); /* Remove us from Topic's subs */ iterator->subs.erase(subscriber); trimTree(iterator); return true; } } } return false; } /* Can be called with nullptr, ignore it then */ void unsubscribeAll(Subscriber *subscriber, bool mayFlush = true) { if (subscriber) { for (Topic *topic : subscriber->subscriptions) { /* We do not want to flush when closing a socket, it makes no sense to do so */ /* If this topic is triggered, drain the tree before we leave */ if (mayFlush && topic->triggered) { drain(); } /* Remove us from the topic's set */ topic->subs.erase(subscriber); trimTree(topic); } subscriber->subscriptions.clear(); } } /* Drain the tree by emitting what to send with every Subscriber */ /* Better name would be commit() and making it public so that one can commit and shutdown, etc */ void drain() { /* Do nothing if nothing to send */ if (!numTriggeredTopics) { return; } /* bug fix: Filter triggered topics without subscribers */ int numFilteredTriggeredTopics = 0; for (int i = 0; i < numTriggeredTopics; i++) { if (triggeredTopics[i]->subs.size()) { triggeredTopics[numFilteredTriggeredTopics++] = triggeredTopics[i]; } else { /* If we no longer have any subscribers, yet still keep this Topic alive (parent), * make sure to clear its potential messages. */ triggeredTopics[i]->messages.clear(); triggeredTopics[i]->triggered = false; } } numTriggeredTopics = numFilteredTriggeredTopics; if (!numTriggeredTopics) { return; } /* bug fix: update min, as the one tracked via subscribe gets invalid as you unsubscribe */ min = (Subscriber *)UINTPTR_MAX; for (int i = 0; i < numTriggeredTopics; i++) { if ((triggeredTopics[i]->subs.size()) && (min > *triggeredTopics[i]->subs.begin())) { min = *triggeredTopics[i]->subs.begin(); } } /* Check if we really have any sockets still */ if (min != (Subscriber *)UINTPTR_MAX) { /* Up to 64 triggered Topics per batch */ std::map<uint64_t, std::pair<std::string, std::string>> intersectionCache; /* Loop over these here */ std::set<Subscriber *>::iterator it[64]; std::set<Subscriber *>::iterator end[64]; for (int i = 0; i < numTriggeredTopics; i++) { it[i] = triggeredTopics[i]->subs.begin(); end[i] = triggeredTopics[i]->subs.end(); } /* Empty all sets from unique subscribers */ for (int nonEmpty = numTriggeredTopics; nonEmpty; ) { Subscriber *nextMin = (Subscriber *)UINTPTR_MAX; /* The message sets relevant for this intersection */ std::map<unsigned int, std::pair<std::string, std::string>> *perSubscriberIntersectingTopicMessages[64]; int numPerSubscriberIntersectingTopicMessages = 0; uint64_t intersection = 0; for (int i = 0; i < numTriggeredTopics; i++) { if ((it[i] != end[i]) && (*it[i] == min)) { /* Mark this intersection */ intersection |= ((uint64_t)1 << i); perSubscriberIntersectingTopicMessages[numPerSubscriberIntersectingTopicMessages++] = &triggeredTopics[i]->messages; it[i]++; if (it[i] == end[i]) { nonEmpty--; } else { if (nextMin > *it[i]) { nextMin = *it[i]; } } } else { /* We need to lower nextMin to us, in the case of min being the last in a set */ if ((it[i] != end[i]) && (nextMin > *it[i])) { nextMin = *it[i]; } } } /* Generate cache for intersection */ if (intersectionCache[intersection].first.length() == 0) { /* Build the union in order without duplicates */ std::map<unsigned int, std::pair<std::string, std::string>> complete; for (int i = 0; i < numPerSubscriberIntersectingTopicMessages; i++) { complete.insert(perSubscriberIntersectingTopicMessages[i]->begin(), perSubscriberIntersectingTopicMessages[i]->end()); } /* Create the linear cache, {inflated, deflated} */ std::pair<std::string, std::string> res; for (auto &p : complete) { res.first.append(p.second.first); res.second.append(p.second.second); } cb(min, intersectionCache[intersection] = std::move(res)); } else { cb(min, intersectionCache[intersection]); } min = nextMin; } } /* Clear messages of triggered Topics */ for (int i = 0; i < numTriggeredTopics; i++) { triggeredTopics[i]->messages.clear(); triggeredTopics[i]->triggered = false; } numTriggeredTopics = 0; } }; } #endif
null
/* * Authored by Alex Hultman, 2018-2020. * Intellectual property of third-party. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef UWS_TOPICTREE_H #define UWS_TOPICTREE_H #include <iostream> #include <vector> #include <map> #include <string_view> #include <functional> #include <set> #include <chrono> #include <list> #include <cstring> namespace uWS { /* A Subscriber is an extension of a socket */ struct Subscriber { std::list<struct Topic *> subscriptions; void *user; Subscriber(void *user) : user(user) {} }; struct Topic { /* Memory for our name */ char *name; size_t length; /* Our parent or nullptr */ Topic *parent = nullptr; /* Next triggered Topic */ bool triggered = false; /* Exact string matches */ std::map<std::string_view, Topic *> children; /* Wildcard child */ Topic *wildcardChild = nullptr; /* Terminating wildcard child */ Topic *terminatingWildcardChild = nullptr; /* What we published, {inflated, deflated} */ std::map<unsigned int, std::pair<std::string, std::string>> messages; std::set<Subscriber *> subs; }; struct TopicTree { private: std::function<int(Subscriber *, std::pair<std::string_view, std::string_view>)> cb; Topic *root = new Topic; /* Global messageId for deduplication of overlapping topics and ordering between topics */ unsigned int messageId = 0; /* The triggered topics */ Topic *triggeredTopics[64]; int numTriggeredTopics = 0; Subscriber *min = (Subscriber *) UINTPTR_MAX; /* Cull or trim unused Topic nodes from leaf to root */ void trimTree(Topic *topic) { repeat: if (!topic->subs.size() && !topic->children.size() && !topic->terminatingWildcardChild && !topic->wildcardChild) { Topic *parent = topic->parent; if (topic->length == 1) { if (topic->name[0] == '#') { parent->terminatingWildcardChild = nullptr; } else if (topic->name[0] == '+') { parent->wildcardChild = nullptr; } } /* Erase us from our parents set (wildcards also live here) */ parent->children.erase(std::string_view(topic->name, topic->length)); /* If this node is triggered, make sure to remove it from the triggered list */ if (topic->triggered) { Topic *tmp[64]; int length = 0; for (int i = 0; i < numTriggeredTopics; i++) { if (triggeredTopics[i] != topic) { tmp[length++] = triggeredTopics[i]; } } for (int i = 0; i < length; i++) { triggeredTopics[i] = tmp[i]; } numTriggeredTopics = length; } /* Free various memory for the node */ delete [] topic->name; delete topic; if (parent != root) { topic = parent; goto repeat; //trimTree(parent); } } } /* Should be getData and commit? */ void publish(Topic *iterator, size_t start, size_t stop, std::string_view topic, std::pair<std::string_view, std::string_view> message) { /* Iterate over all segments in given topic */ for (; stop != std::string::npos; start = stop + 1) { stop = topic.find('/', start); std::string_view segment = topic.substr(start, stop - start); /* It is very important to disallow wildcards when publishing. * We will not catch EVERY misuse this lazy way, but enough to hinder * explosive recursion. * Terminating wildcards MAY still get triggered along the way, if for * instace the error is found late while iterating the topic segments. */ if (segment.length() == 1) { if (segment[0] == '+' || segment[0] == '#') { return; } } /* Do we have a terminating wildcard child? */ if (iterator->terminatingWildcardChild) { iterator->terminatingWildcardChild->messages[messageId] = message; /* Add this topic to triggered */ if (!iterator->terminatingWildcardChild->triggered) { /* If we already have 64 triggered topics make sure to drain it here */ if (numTriggeredTopics == 64) { drain(); } triggeredTopics[numTriggeredTopics++] = iterator->terminatingWildcardChild; iterator->terminatingWildcardChild->triggered = true; } } /* Do we have a wildcard child? */ if (iterator->wildcardChild) { publish(iterator->wildcardChild, stop + 1, stop, topic, message); } std::map<std::string_view, Topic *>::iterator it = iterator->children.find(segment); if (it == iterator->children.end()) { /* Stop trying to match by exact string */ return; } iterator = it->second; } /* If we went all the way we matched exactly */ iterator->messages[messageId] = message; /* Add this topic to triggered */ if (!iterator->triggered) { /* If we already have 64 triggered topics make sure to drain it here */ if (numTriggeredTopics == 64) { drain(); } triggeredTopics[numTriggeredTopics++] = iterator; iterator->triggered = true; } } public: TopicTree(std::function<int(Subscriber *, std::pair<std::string_view, std::string_view>)> cb) { this->cb = cb; } ~TopicTree() { delete root; } void subscribe(std::string_view topic, Subscriber *subscriber) { /* Start iterating from the root */ Topic *iterator = root; /* Traverse the topic, inserting a node for every new segment separated by / */ for (size_t start = 0, stop = 0; stop != std::string::npos; start = stop + 1) { stop = topic.find('/', start); std::string_view segment = topic.substr(start, stop - start); auto lb = iterator->children.lower_bound(segment); if (lb != iterator->children.end() && !(iterator->children.key_comp()(segment, lb->first))) { iterator = lb->second; } else { /* Allocate and insert new node */ Topic *newTopic = new Topic; newTopic->parent = iterator; newTopic->name = new char[segment.length()]; newTopic->length = segment.length(); newTopic->terminatingWildcardChild = nullptr; newTopic->wildcardChild = nullptr; memcpy(newTopic->name, segment.data(), segment.length()); /* For simplicity we do insert wildcards with text */ iterator->children.insert(lb, {std::string_view(newTopic->name, segment.length()), newTopic}); /* Store fast lookup to wildcards */ if (segment.length() == 1) { /* If this segment is '+' it is a wildcard */ if (segment[0] == '+') { iterator->wildcardChild = newTopic; } /* If this segment is '#' it is a terminating wildcard */ if (segment[0] == '#') { iterator->terminatingWildcardChild = newTopic; } } iterator = newTopic; } } /* If this topic is triggered, drain the tree before we join */ if (iterator->triggered) { drain(); } /* Add socket to Topic's Set */ auto [it, inserted] = iterator->subs.insert(subscriber); /* Add Topic to list of subscriptions only if we weren't already subscribed */ if (inserted) { subscriber->subscriptions.push_back(iterator); } } void publish(std::string_view topic, std::pair<std::string_view, std::string_view> message) { publish(root, 0, 0, topic, message); messageId++; } /* Returns whether we were subscribed prior */ bool unsubscribe(std::string_view topic, Subscriber *subscriber) { /* Subscribers are likely to have very few subscriptions (20 or fewer) */ if (subscriber) { /* Lookup exact Topic ptr from string */ Topic *iterator = root; for (size_t start = 0, stop = 0; stop != std::string::npos; start = stop + 1) { stop = topic.find('/', start); std::string_view segment = topic.substr(start, stop - start); std::map<std::string_view, Topic *>::iterator it = iterator->children.find(segment); if (it == iterator->children.end()) { /* This topic does not even exist */ return false; } iterator = it->second; } /* Try and remove this topic from our list */ for (auto it = subscriber->subscriptions.begin(); it != subscriber->subscriptions.end(); it++) { if (*it == iterator) { /* If this topic is triggered, drain the tree before we leave */ if (iterator->triggered) { drain(); } /* Remove topic ptr from our list */ subscriber->subscriptions.erase(it); /* Remove us from Topic's subs */ iterator->subs.erase(subscriber); trimTree(iterator); return true; } } } return false; } /* Can be called with nullptr, ignore it then */ void unsubscribeAll(Subscriber *subscriber, bool mayFlush = true) { if (subscriber) { for (Topic *topic : subscriber->subscriptions) { /* We do not want to flush when closing a socket, it makes no sense to do so */ /* If this topic is triggered, drain the tree before we leave */ if (mayFlush && topic->triggered) { drain(); } /* Remove us from the topic's set */ topic->subs.erase(subscriber); trimTree(topic); } subscriber->subscriptions.clear(); } } /* Drain the tree by emitting what to send with every Subscriber */ /* Better name would be commit() and making it public so that one can commit and shutdown, etc */ void drain() { /* Do nothing if nothing to send */ if (!numTriggeredTopics) { return; } /* bug fix: Filter triggered topics without subscribers */ int numFilteredTriggeredTopics = 0; for (int i = 0; i < numTriggeredTopics; i++) { if (triggeredTopics[i]->subs.size()) { triggeredTopics[numFilteredTriggeredTopics++] = triggeredTopics[i]; } else { /* If we no longer have any subscribers, yet still keep this Topic alive (parent), * make sure to clear its potential messages. */ triggeredTopics[i]->messages.clear(); triggeredTopics[i]->triggered = false; } } numTriggeredTopics = numFilteredTriggeredTopics; if (!numTriggeredTopics) { return; } /* bug fix: update min, as the one tracked via subscribe gets invalid as you unsubscribe */ min = (Subscriber *)UINTPTR_MAX; for (int i = 0; i < numTriggeredTopics; i++) { if ((triggeredTopics[i]->subs.size()) && (min > *triggeredTopics[i]->subs.begin())) { min = *triggeredTopics[i]->subs.begin(); } } /* Check if we really have any sockets still */ if (min != (Subscriber *)UINTPTR_MAX) { /* Up to 64 triggered Topics per batch */ std::map<uint64_t, std::pair<std::string, std::string>> intersectionCache; /* Loop over these here */ std::set<Subscriber *>::iterator it[64]; std::set<Subscriber *>::iterator end[64]; for (int i = 0; i < numTriggeredTopics; i++) { it[i] = triggeredTopics[i]->subs.begin(); end[i] = triggeredTopics[i]->subs.end(); } /* Empty all sets from unique subscribers */ for (int nonEmpty = numTriggeredTopics; nonEmpty; ) { Subscriber *nextMin = (Subscriber *)UINTPTR_MAX; /* The message sets relevant for this intersection */ std::map<unsigned int, std::pair<std::string, std::string>> *perSubscriberIntersectingTopicMessages[64]; int numPerSubscriberIntersectingTopicMessages = 0; uint64_t intersection = 0; for (int i = 0; i < numTriggeredTopics; i++) { if ((it[i] != end[i]) && (*it[i] == min)) { /* Mark this intersection */ intersection |= ((uint64_t)1 << i); perSubscriberIntersectingTopicMessages[numPerSubscriberIntersectingTopicMessages++] = &triggeredTopics[i]->messages; it[i]++; if (it[i] == end[i]) { nonEmpty--; } else { if (nextMin > *it[i]) { nextMin = *it[i]; } } } else { /* We need to lower nextMin to us, in the case of min being the last in a set */ if ((it[i] != end[i]) && (nextMin > *it[i])) { nextMin = *it[i]; } } } /* Generate cache for intersection */ if (intersectionCache[intersection].first.length() == 0) { /* Build the union in order without duplicates */ std::map<unsigned int, std::pair<std::string, std::string>> complete; for (int i = 0; i < numPerSubscriberIntersectingTopicMessages; i++) { complete.insert(perSubscriberIntersectingTopicMessages[i]->begin(), perSubscriberIntersectingTopicMessages[i]->end()); } /* Create the linear cache, {inflated, deflated} */ std::pair<std::string, std::string> res; for (auto &p : complete) { res.first.append(p.second.first); res.second.append(p.second.second); } cb(min, intersectionCache[intersection] = std::move(res)); } else { cb(min, intersectionCache[intersection]); } min = nextMin; } } /* Clear messages of triggered Topics */ for (int i = 0; i < numTriggeredTopics; i++) { triggeredTopics[i]->messages.clear(); triggeredTopics[i]->triggered = false; } numTriggeredTopics = 0; } }; } #endif
null
226
CWE-787
CVE-2020-36407
// Copyright 2019 Joe Drago. All rights reserved. // SPDX-License-Identifier: BSD-2-Clause #include "avif/internal.h" #include <string.h> #define AUXTYPE_SIZE 64 #define CONTENTTYPE_SIZE 64 // class VisualSampleEntry(codingname) extends SampleEntry(codingname) { // unsigned int(16) pre_defined = 0; // const unsigned int(16) reserved = 0; // unsigned int(32)[3] pre_defined = 0; // unsigned int(16) width; // unsigned int(16) height; // template unsigned int(32) horizresolution = 0x00480000; // 72 dpi // template unsigned int(32) vertresolution = 0x00480000; // 72 dpi // const unsigned int(32) reserved = 0; // template unsigned int(16) frame_count = 1; // string[32] compressorname; // template unsigned int(16) depth = 0x0018; // int(16) pre_defined = -1; // // other boxes from derived specifications // CleanApertureBox clap; // optional // PixelAspectRatioBox pasp; // optional // } static const size_t VISUALSAMPLEENTRY_SIZE = 78; static const char xmpContentType[] = CONTENT_TYPE_XMP; static const size_t xmpContentTypeSize = sizeof(xmpContentType); // --------------------------------------------------------------------------- // Box data structures // ftyp typedef struct avifFileType { uint8_t majorBrand[4]; uint32_t minorVersion; // If not null, points to a memory block of 4 * compatibleBrandsCount bytes. const uint8_t * compatibleBrands; int compatibleBrandsCount; } avifFileType; // ispe typedef struct avifImageSpatialExtents { uint32_t width; uint32_t height; } avifImageSpatialExtents; // auxC typedef struct avifAuxiliaryType { char auxType[AUXTYPE_SIZE]; } avifAuxiliaryType; // infe mime content_type typedef struct avifContentType { char contentType[CONTENTTYPE_SIZE]; } avifContentType; // colr typedef struct avifColourInformationBox { avifBool hasICC; const uint8_t * icc; size_t iccSize; avifBool hasNCLX; avifColorPrimaries colorPrimaries; avifTransferCharacteristics transferCharacteristics; avifMatrixCoefficients matrixCoefficients; avifRange range; } avifColourInformationBox; #define MAX_PIXI_PLANE_DEPTHS 4 typedef struct avifPixelInformationProperty { uint8_t planeDepths[MAX_PIXI_PLANE_DEPTHS]; uint8_t planeCount; } avifPixelInformationProperty; // --------------------------------------------------------------------------- // Top-level structures struct avifMeta; // Temporary storage for ipco/stsd contents until they can be associated and memcpy'd to an avifDecoderItem typedef struct avifProperty { uint8_t type[4]; union { avifImageSpatialExtents ispe; avifAuxiliaryType auxC; avifColourInformationBox colr; avifCodecConfigurationBox av1C; avifPixelAspectRatioBox pasp; avifCleanApertureBox clap; avifImageRotation irot; avifImageMirror imir; avifPixelInformationProperty pixi; } u; } avifProperty; AVIF_ARRAY_DECLARE(avifPropertyArray, avifProperty, prop); static const avifProperty * avifPropertyArrayFind(const avifPropertyArray * properties, const char * type) { for (uint32_t propertyIndex = 0; propertyIndex < properties->count; ++propertyIndex) { avifProperty * prop = &properties->prop[propertyIndex]; if (!memcmp(prop->type, type, 4)) { return prop; } } return NULL; } // one "item" worth for decoding (all iref, iloc, iprp, etc refer to one of these) typedef struct avifDecoderItem { uint32_t id; struct avifMeta * meta; // Unowned; A back-pointer for convenience uint8_t type[4]; uint32_t offset; uint32_t size; uint32_t idatID; // If non-zero, offset is relative to this idat box (iloc construction_method==1) avifContentType contentType; avifPropertyArray properties; uint32_t thumbnailForID; // if non-zero, this item is a thumbnail for Item #{thumbnailForID} uint32_t auxForID; // if non-zero, this item is an auxC plane for Item #{auxForID} uint32_t descForID; // if non-zero, this item is a content description for Item #{descForID} uint32_t dimgForID; // if non-zero, this item is a derived image for Item #{dimgForID} avifBool hasUnsupportedEssentialProperty; // If true, this item cites a property flagged as 'essential' that libavif doesn't support (yet). Ignore the item, if so. } avifDecoderItem; AVIF_ARRAY_DECLARE(avifDecoderItemArray, avifDecoderItem, item); // idat storage typedef struct avifDecoderItemData { uint32_t id; avifROData data; } avifDecoderItemData; AVIF_ARRAY_DECLARE(avifDecoderItemDataArray, avifDecoderItemData, idat); // grid storage typedef struct avifImageGrid { uint8_t rows; uint8_t columns; uint32_t outputWidth; uint32_t outputHeight; } avifImageGrid; // --------------------------------------------------------------------------- // avifTrack typedef struct avifSampleTableChunk { uint64_t offset; } avifSampleTableChunk; AVIF_ARRAY_DECLARE(avifSampleTableChunkArray, avifSampleTableChunk, chunk); typedef struct avifSampleTableSampleToChunk { uint32_t firstChunk; uint32_t samplesPerChunk; uint32_t sampleDescriptionIndex; } avifSampleTableSampleToChunk; AVIF_ARRAY_DECLARE(avifSampleTableSampleToChunkArray, avifSampleTableSampleToChunk, sampleToChunk); typedef struct avifSampleTableSampleSize { uint32_t size; } avifSampleTableSampleSize; AVIF_ARRAY_DECLARE(avifSampleTableSampleSizeArray, avifSampleTableSampleSize, sampleSize); typedef struct avifSampleTableTimeToSample { uint32_t sampleCount; uint32_t sampleDelta; } avifSampleTableTimeToSample; AVIF_ARRAY_DECLARE(avifSampleTableTimeToSampleArray, avifSampleTableTimeToSample, timeToSample); typedef struct avifSyncSample { uint32_t sampleNumber; } avifSyncSample; AVIF_ARRAY_DECLARE(avifSyncSampleArray, avifSyncSample, syncSample); typedef struct avifSampleDescription { uint8_t format[4]; avifPropertyArray properties; } avifSampleDescription; AVIF_ARRAY_DECLARE(avifSampleDescriptionArray, avifSampleDescription, description); typedef struct avifSampleTable { avifSampleTableChunkArray chunks; avifSampleDescriptionArray sampleDescriptions; avifSampleTableSampleToChunkArray sampleToChunks; avifSampleTableSampleSizeArray sampleSizes; avifSampleTableTimeToSampleArray timeToSamples; avifSyncSampleArray syncSamples; uint32_t allSamplesSize; // If this is non-zero, sampleSizes will be empty and all samples will be this size } avifSampleTable; static avifSampleTable * avifSampleTableCreate() { avifSampleTable * sampleTable = (avifSampleTable *)avifAlloc(sizeof(avifSampleTable)); memset(sampleTable, 0, sizeof(avifSampleTable)); avifArrayCreate(&sampleTable->chunks, sizeof(avifSampleTableChunk), 16); avifArrayCreate(&sampleTable->sampleDescriptions, sizeof(avifSampleDescription), 2); avifArrayCreate(&sampleTable->sampleToChunks, sizeof(avifSampleTableSampleToChunk), 16); avifArrayCreate(&sampleTable->sampleSizes, sizeof(avifSampleTableSampleSize), 16); avifArrayCreate(&sampleTable->timeToSamples, sizeof(avifSampleTableTimeToSample), 16); avifArrayCreate(&sampleTable->syncSamples, sizeof(avifSyncSample), 16); return sampleTable; } static void avifSampleTableDestroy(avifSampleTable * sampleTable) { avifArrayDestroy(&sampleTable->chunks); for (uint32_t i = 0; i < sampleTable->sampleDescriptions.count; ++i) { avifSampleDescription * description = &sampleTable->sampleDescriptions.description[i]; avifArrayDestroy(&description->properties); } avifArrayDestroy(&sampleTable->sampleDescriptions); avifArrayDestroy(&sampleTable->sampleToChunks); avifArrayDestroy(&sampleTable->sampleSizes); avifArrayDestroy(&sampleTable->timeToSamples); avifArrayDestroy(&sampleTable->syncSamples); avifFree(sampleTable); } static uint32_t avifSampleTableGetImageDelta(const avifSampleTable * sampleTable, int imageIndex) { int maxSampleIndex = 0; for (uint32_t i = 0; i < sampleTable->timeToSamples.count; ++i) { const avifSampleTableTimeToSample * timeToSample = &sampleTable->timeToSamples.timeToSample[i]; maxSampleIndex += timeToSample->sampleCount; if ((imageIndex < maxSampleIndex) || (i == (sampleTable->timeToSamples.count - 1))) { return timeToSample->sampleDelta; } } // TODO: fail here? return 1; } static avifBool avifSampleTableHasFormat(const avifSampleTable * sampleTable, const char * format) { for (uint32_t i = 0; i < sampleTable->sampleDescriptions.count; ++i) { if (!memcmp(sampleTable->sampleDescriptions.description[i].format, format, 4)) { return AVIF_TRUE; } } return AVIF_FALSE; } static uint32_t avifCodecConfigurationBoxGetDepth(const avifCodecConfigurationBox * av1C) { if (av1C->twelveBit) { return 12; } else if (av1C->highBitdepth) { return 10; } return 8; } static const avifPropertyArray * avifSampleTableGetProperties(const avifSampleTable * sampleTable) { for (uint32_t i = 0; i < sampleTable->sampleDescriptions.count; ++i) { const avifSampleDescription * description = &sampleTable->sampleDescriptions.description[i]; if (!memcmp(description->format, "av01", 4)) { return &description->properties; } } return NULL; } // one video track ("trak" contents) typedef struct avifTrack { uint32_t id; uint32_t auxForID; // if non-zero, this item is an auxC plane for Track #{auxForID} uint32_t mediaTimescale; uint64_t mediaDuration; uint32_t width; uint32_t height; avifSampleTable * sampleTable; struct avifMeta * meta; } avifTrack; AVIF_ARRAY_DECLARE(avifTrackArray, avifTrack, track); // --------------------------------------------------------------------------- // avifCodecDecodeInput avifCodecDecodeInput * avifCodecDecodeInputCreate(void) { avifCodecDecodeInput * decodeInput = (avifCodecDecodeInput *)avifAlloc(sizeof(avifCodecDecodeInput)); memset(decodeInput, 0, sizeof(avifCodecDecodeInput)); avifArrayCreate(&decodeInput->samples, sizeof(avifDecodeSample), 1); return decodeInput; } void avifCodecDecodeInputDestroy(avifCodecDecodeInput * decodeInput) { avifArrayDestroy(&decodeInput->samples); avifFree(decodeInput); } static avifBool avifCodecDecodeInputGetSamples(avifCodecDecodeInput * decodeInput, avifSampleTable * sampleTable, avifROData * rawInput) { uint32_t sampleSizeIndex = 0; for (uint32_t chunkIndex = 0; chunkIndex < sampleTable->chunks.count; ++chunkIndex) { avifSampleTableChunk * chunk = &sampleTable->chunks.chunk[chunkIndex]; // First, figure out how many samples are in this chunk uint32_t sampleCount = 0; for (int sampleToChunkIndex = sampleTable->sampleToChunks.count - 1; sampleToChunkIndex >= 0; --sampleToChunkIndex) { avifSampleTableSampleToChunk * sampleToChunk = &sampleTable->sampleToChunks.sampleToChunk[sampleToChunkIndex]; if (sampleToChunk->firstChunk <= (chunkIndex + 1)) { sampleCount = sampleToChunk->samplesPerChunk; break; } } if (sampleCount == 0) { // chunks with 0 samples are invalid return AVIF_FALSE; } uint64_t sampleOffset = chunk->offset; for (uint32_t sampleIndex = 0; sampleIndex < sampleCount; ++sampleIndex) { uint32_t sampleSize = sampleTable->allSamplesSize; if (sampleSize == 0) { if (sampleSizeIndex >= sampleTable->sampleSizes.count) { // We've run out of samples to sum return AVIF_FALSE; } avifSampleTableSampleSize * sampleSizePtr = &sampleTable->sampleSizes.sampleSize[sampleSizeIndex]; sampleSize = sampleSizePtr->size; } avifDecodeSample * sample = (avifDecodeSample *)avifArrayPushPtr(&decodeInput->samples); sample->data.data = rawInput->data + sampleOffset; sample->data.size = sampleSize; sample->sync = AVIF_FALSE; // to potentially be set to true following the outer loop if ((sampleOffset + sampleSize) > (uint64_t)rawInput->size) { return AVIF_FALSE; } sampleOffset += sampleSize; ++sampleSizeIndex; } } // Mark appropriate samples as sync for (uint32_t syncSampleIndex = 0; syncSampleIndex < sampleTable->syncSamples.count; ++syncSampleIndex) { uint32_t frameIndex = sampleTable->syncSamples.syncSample[syncSampleIndex].sampleNumber - 1; // sampleNumber is 1-based if (frameIndex < decodeInput->samples.count) { decodeInput->samples.sample[frameIndex].sync = AVIF_TRUE; } } // Assume frame 0 is sync, just in case the stss box is absent in the BMFF. (Unnecessary?) if (decodeInput->samples.count > 0) { decodeInput->samples.sample[0].sync = AVIF_TRUE; } return AVIF_TRUE; } // --------------------------------------------------------------------------- // Helper macros #define BEGIN_STREAM(VARNAME, PTR, SIZE) \ avifROStream VARNAME; \ avifROData VARNAME##_roData; \ VARNAME##_roData.data = PTR; \ VARNAME##_roData.size = SIZE; \ avifROStreamStart(&VARNAME, &VARNAME##_roData) // --------------------------------------------------------------------------- // avifDecoderData typedef struct avifTile { avifCodecDecodeInput * input; struct avifCodec * codec; avifImage * image; } avifTile; AVIF_ARRAY_DECLARE(avifTileArray, avifTile, tile); // This holds one "meta" box (from the BMFF and HEIF standards) worth of relevant-to-AVIF information. // * If a meta box is parsed from the root level of the BMFF, it can contain the information about // "items" which might be color planes, alpha planes, or EXIF or XMP metadata. // * If a meta box is parsed from inside of a track ("trak") box, any metadata (EXIF/XMP) items inside // of that box are implicitly associated with that track. typedef struct avifMeta { // Items (from HEIF) are the generic storage for any data that does not require timed processing // (single image color planes, alpha planes, EXIF, XMP, etc). Each item has a unique integer ID >1, // and is defined by a series of child boxes in a meta box: // * iloc - location: byte offset to item data, item size in bytes // * iinf - information: type of item (color planes, alpha plane, EXIF, XMP) // * ipco - properties: dimensions, aspect ratio, image transformations, references to other items // * ipma - associations: Attaches an item in the properties list to a given item // // Items are lazily created in this array when any of the above boxes refer to one by a new (unseen) ID, // and are then further modified/updated as new information for an item's ID is parsed. avifDecoderItemArray items; // Any ipco boxes explained above are populated into this array as a staging area, which are // then duplicated into the appropriate items upon encountering an item property association // (ipma) box. avifPropertyArray properties; // Filled with the contents of "idat" boxes, which are raw data that an item can directly refer to in its // item location box (iloc) instead of just giving an offset into the overall file. If all items' iloc boxes // simply point at an offset/length in the file itself, this array will likely be empty. avifDecoderItemDataArray idats; // Ever-incrementing ID for uniquely identifying which 'meta' box contains an idat (when // multiple meta boxes exist as BMFF siblings). Each time avifParseMetaBox() is called on an // avifMeta struct, this value is incremented. Any time an additional meta box is detected at // the same "level" (root level, trak level, etc), this ID helps distinguish which meta box's // "idat" is which, as items implicitly reference idat boxes that exist in the same meta // box. uint32_t idatID; // Contents of a pitm box, which signal which of the items in this file is the main image. For // AVIF, this should point at an av01 type item containing color planes, and all other items // are ignored unless they refer to this item in some way (alpha plane, EXIF/XMP metadata). uint32_t primaryItemID; } avifMeta; static avifMeta * avifMetaCreate() { avifMeta * meta = (avifMeta *)avifAlloc(sizeof(avifMeta)); memset(meta, 0, sizeof(avifMeta)); avifArrayCreate(&meta->items, sizeof(avifDecoderItem), 8); avifArrayCreate(&meta->properties, sizeof(avifProperty), 16); avifArrayCreate(&meta->idats, sizeof(avifDecoderItemData), 1); return meta; } static void avifMetaDestroy(avifMeta * meta) { for (uint32_t i = 0; i < meta->items.count; ++i) { avifDecoderItem * item = &meta->items.item[i]; avifArrayDestroy(&item->properties); } avifArrayDestroy(&meta->items); avifArrayDestroy(&meta->properties); avifArrayDestroy(&meta->idats); avifFree(meta); } static avifDecoderItem * avifMetaFindItem(avifMeta * meta, uint32_t itemID) { if (itemID == 0) { return NULL; } for (uint32_t i = 0; i < meta->items.count; ++i) { if (meta->items.item[i].id == itemID) { return &meta->items.item[i]; } } avifDecoderItem * item = (avifDecoderItem *)avifArrayPushPtr(&meta->items); avifArrayCreate(&item->properties, sizeof(avifProperty), 16); item->id = itemID; item->meta = meta; return item; } typedef struct avifDecoderData { avifFileType ftyp; avifMeta * meta; // The root-level meta box avifTrackArray tracks; avifROData rawInput; avifTileArray tiles; unsigned int colorTileCount; unsigned int alphaTileCount; avifImageGrid colorGrid; avifImageGrid alphaGrid; avifDecoderSource source; const avifSampleTable * sourceSampleTable; // NULL unless (source == AVIF_DECODER_SOURCE_TRACKS), owned by an avifTrack avifBool cicpSet; // True if avifDecoder's image has had its CICP set correctly yet. // This allows nclx colr boxes to override AV1 CICP, as specified in the MIAF // standard (ISO/IEC 23000-22:2019), section 7.3.6.4: // // "The colour information property takes precedence over any colour information in the image // bitstream, i.e. if the property is present, colour information in the bitstream shall be ignored." } avifDecoderData; static avifDecoderData * avifDecoderDataCreate() { avifDecoderData * data = (avifDecoderData *)avifAlloc(sizeof(avifDecoderData)); memset(data, 0, sizeof(avifDecoderData)); data->meta = avifMetaCreate(); avifArrayCreate(&data->tracks, sizeof(avifTrack), 2); avifArrayCreate(&data->tiles, sizeof(avifTile), 8); return data; } static void avifDecoderDataResetCodec(avifDecoderData * data) { for (unsigned int i = 0; i < data->tiles.count; ++i) { avifTile * tile = &data->tiles.tile[i]; if (tile->image) { avifImageFreePlanes(tile->image, AVIF_PLANES_ALL); // forget any pointers into codec image buffers } if (tile->codec) { avifCodecDestroy(tile->codec); tile->codec = NULL; } } } static avifTile * avifDecoderDataCreateTile(avifDecoderData * data) { avifTile * tile = (avifTile *)avifArrayPushPtr(&data->tiles); tile->image = avifImageCreateEmpty(); tile->input = avifCodecDecodeInputCreate(); return tile; } static avifTrack * avifDecoderDataCreateTrack(avifDecoderData * data) { avifTrack * track = (avifTrack *)avifArrayPushPtr(&data->tracks); track->meta = avifMetaCreate(); return track; } static void avifDecoderDataClearTiles(avifDecoderData * data) { for (unsigned int i = 0; i < data->tiles.count; ++i) { avifTile * tile = &data->tiles.tile[i]; if (tile->input) { avifCodecDecodeInputDestroy(tile->input); tile->input = NULL; } if (tile->codec) { avifCodecDestroy(tile->codec); tile->codec = NULL; } if (tile->image) { avifImageDestroy(tile->image); tile->image = NULL; } } data->tiles.count = 0; data->colorTileCount = 0; data->alphaTileCount = 0; } static void avifDecoderDataDestroy(avifDecoderData * data) { avifMetaDestroy(data->meta); for (uint32_t i = 0; i < data->tracks.count; ++i) { avifTrack * track = &data->tracks.track[i]; if (track->sampleTable) { avifSampleTableDestroy(track->sampleTable); } if (track->meta) { avifMetaDestroy(track->meta); } } avifArrayDestroy(&data->tracks); avifDecoderDataClearTiles(data); avifArrayDestroy(&data->tiles); avifFree(data); } static const uint8_t * avifDecoderDataCalcItemPtr(avifDecoderData * data, avifDecoderItem * item) { avifROData * offsetBuffer = NULL; if (item->idatID == 0) { // construction_method: file(0) offsetBuffer = &data->rawInput; } else { // construction_method: idat(1) // Find associated idat block for (uint32_t i = 0; i < item->meta->idats.count; ++i) { if (item->meta->idats.idat[i].id == item->idatID) { offsetBuffer = &item->meta->idats.idat[i].data; break; } } if (offsetBuffer == NULL) { // no idat box was found in this meta box, bail out return NULL; } } if (item->offset > offsetBuffer->size) { return NULL; } uint64_t offsetSize = (uint64_t)item->offset + (uint64_t)item->size; if (offsetSize > (uint64_t)offsetBuffer->size) { return NULL; } return offsetBuffer->data + item->offset; } static avifBool avifDecoderDataGenerateImageGridTiles(avifDecoderData * data, avifImageGrid * grid, avifDecoderItem * gridItem, avifBool alpha) { unsigned int tilesRequested = (unsigned int)grid->rows * (unsigned int)grid->columns; // Count number of dimg for this item, bail out if it doesn't match perfectly unsigned int tilesAvailable = 0; for (uint32_t i = 0; i < gridItem->meta->items.count; ++i) { avifDecoderItem * item = &gridItem->meta->items.item[i]; if (item->dimgForID == gridItem->id) { if (memcmp(item->type, "av01", 4)) { continue; } if (item->hasUnsupportedEssentialProperty) { // An essential property isn't supported by libavif; ignore the item. continue; } ++tilesAvailable; } } if (tilesRequested != tilesAvailable) { return AVIF_FALSE; } avifBool firstTile = AVIF_TRUE; for (uint32_t i = 0; i < gridItem->meta->items.count; ++i) { avifDecoderItem * item = &gridItem->meta->items.item[i]; if (item->dimgForID == gridItem->id) { if (memcmp(item->type, "av01", 4)) { continue; } if (item->hasUnsupportedEssentialProperty) { // An essential property isn't supported by libavif; ignore the item. continue; } avifTile * tile = avifDecoderDataCreateTile(data); avifDecodeSample * sample = (avifDecodeSample *)avifArrayPushPtr(&tile->input->samples); sample->data.data = avifDecoderDataCalcItemPtr(data, item); sample->data.size = item->size; sample->sync = AVIF_TRUE; tile->input->alpha = alpha; if (firstTile) { firstTile = AVIF_FALSE; // Adopt the av1C property of the first av01 tile, so that it can be queried from // the top-level color/alpha item during avifDecoderReset(). const avifProperty * srcProp = avifPropertyArrayFind(&item->properties, "av1C"); if (!srcProp) { return AVIF_FALSE; } avifProperty * dstProp = (avifProperty *)avifArrayPushPtr(&gridItem->properties); memcpy(dstProp, srcProp, sizeof(avifProperty)); } } } return AVIF_TRUE; } static avifBool avifDecoderDataFillImageGrid(avifDecoderData * data, avifImageGrid * grid, avifImage * dstImage, unsigned int firstTileIndex, unsigned int tileCount, avifBool alpha) { if (tileCount == 0) { return AVIF_FALSE; } avifTile * firstTile = &data->tiles.tile[firstTileIndex]; avifBool firstTileUVPresent = (firstTile->image->yuvPlanes[AVIF_CHAN_U] && firstTile->image->yuvPlanes[AVIF_CHAN_V]); // Check for tile consistency: All tiles in a grid image should match in the properties checked below. for (unsigned int i = 1; i < tileCount; ++i) { avifTile * tile = &data->tiles.tile[firstTileIndex + i]; avifBool uvPresent = (tile->image->yuvPlanes[AVIF_CHAN_U] && tile->image->yuvPlanes[AVIF_CHAN_V]); if ((tile->image->width != firstTile->image->width) || (tile->image->height != firstTile->image->height) || (tile->image->depth != firstTile->image->depth) || (tile->image->yuvFormat != firstTile->image->yuvFormat) || (tile->image->yuvRange != firstTile->image->yuvRange) || (uvPresent != firstTileUVPresent) || ((tile->image->colorPrimaries != firstTile->image->colorPrimaries) || (tile->image->transferCharacteristics != firstTile->image->transferCharacteristics) || (tile->image->matrixCoefficients != firstTile->image->matrixCoefficients))) { return AVIF_FALSE; } } // Lazily populate dstImage with the new frame's properties. If we're decoding alpha, // these values must already match. if ((dstImage->width != grid->outputWidth) || (dstImage->height != grid->outputHeight) || (dstImage->depth != firstTile->image->depth) || (dstImage->yuvFormat != firstTile->image->yuvFormat)) { if (alpha) { // Alpha doesn't match size, just bail out return AVIF_FALSE; } avifImageFreePlanes(dstImage, AVIF_PLANES_ALL); dstImage->width = grid->outputWidth; dstImage->height = grid->outputHeight; dstImage->depth = firstTile->image->depth; dstImage->yuvFormat = firstTile->image->yuvFormat; dstImage->yuvRange = firstTile->image->yuvRange; if (!data->cicpSet) { data->cicpSet = AVIF_TRUE; dstImage->colorPrimaries = firstTile->image->colorPrimaries; dstImage->transferCharacteristics = firstTile->image->transferCharacteristics; dstImage->matrixCoefficients = firstTile->image->matrixCoefficients; } } if (alpha) { dstImage->alphaRange = firstTile->image->alphaRange; } avifImageAllocatePlanes(dstImage, alpha ? AVIF_PLANES_A : AVIF_PLANES_YUV); avifPixelFormatInfo formatInfo; avifGetPixelFormatInfo(firstTile->image->yuvFormat, &formatInfo); unsigned int tileIndex = firstTileIndex; size_t pixelBytes = avifImageUsesU16(dstImage) ? 2 : 1; for (unsigned int rowIndex = 0; rowIndex < grid->rows; ++rowIndex) { for (unsigned int colIndex = 0; colIndex < grid->columns; ++colIndex, ++tileIndex) { avifTile * tile = &data->tiles.tile[tileIndex]; unsigned int widthToCopy = firstTile->image->width; unsigned int maxX = firstTile->image->width * (colIndex + 1); if (maxX > grid->outputWidth) { widthToCopy -= maxX - grid->outputWidth; } unsigned int heightToCopy = firstTile->image->height; unsigned int maxY = firstTile->image->height * (rowIndex + 1); if (maxY > grid->outputHeight) { heightToCopy -= maxY - grid->outputHeight; } // Y and A channels size_t yaColOffset = colIndex * firstTile->image->width; size_t yaRowOffset = rowIndex * firstTile->image->height; size_t yaRowBytes = widthToCopy * pixelBytes; if (alpha) { // A for (unsigned int j = 0; j < heightToCopy; ++j) { uint8_t * src = &tile->image->alphaPlane[j * tile->image->alphaRowBytes]; uint8_t * dst = &dstImage->alphaPlane[(yaColOffset * pixelBytes) + ((yaRowOffset + j) * dstImage->alphaRowBytes)]; memcpy(dst, src, yaRowBytes); } } else { // Y for (unsigned int j = 0; j < heightToCopy; ++j) { uint8_t * src = &tile->image->yuvPlanes[AVIF_CHAN_Y][j * tile->image->yuvRowBytes[AVIF_CHAN_Y]]; uint8_t * dst = &dstImage->yuvPlanes[AVIF_CHAN_Y][(yaColOffset * pixelBytes) + ((yaRowOffset + j) * dstImage->yuvRowBytes[AVIF_CHAN_Y])]; memcpy(dst, src, yaRowBytes); } if (!firstTileUVPresent) { continue; } // UV heightToCopy >>= formatInfo.chromaShiftY; size_t uvColOffset = yaColOffset >> formatInfo.chromaShiftX; size_t uvRowOffset = yaRowOffset >> formatInfo.chromaShiftY; size_t uvRowBytes = yaRowBytes >> formatInfo.chromaShiftX; for (unsigned int j = 0; j < heightToCopy; ++j) { uint8_t * srcU = &tile->image->yuvPlanes[AVIF_CHAN_U][j * tile->image->yuvRowBytes[AVIF_CHAN_U]]; uint8_t * dstU = &dstImage->yuvPlanes[AVIF_CHAN_U][(uvColOffset * pixelBytes) + ((uvRowOffset + j) * dstImage->yuvRowBytes[AVIF_CHAN_U])]; memcpy(dstU, srcU, uvRowBytes); uint8_t * srcV = &tile->image->yuvPlanes[AVIF_CHAN_V][j * tile->image->yuvRowBytes[AVIF_CHAN_V]]; uint8_t * dstV = &dstImage->yuvPlanes[AVIF_CHAN_V][(uvColOffset * pixelBytes) + ((uvRowOffset + j) * dstImage->yuvRowBytes[AVIF_CHAN_V])]; memcpy(dstV, srcV, uvRowBytes); } } } } return AVIF_TRUE; } // If colorId == 0 (a sentinel value as item IDs must be nonzero), accept any found EXIF/XMP metadata. Passing in 0 // is used when finding metadata in a meta box embedded in a trak box, as any items inside of a meta box that is // inside of a trak box are implicitly associated to the track. static avifBool avifDecoderDataFindMetadata(avifDecoderData * data, avifMeta * meta, avifImage * image, uint32_t colorId) { avifROData exifData = AVIF_DATA_EMPTY; avifROData xmpData = AVIF_DATA_EMPTY; for (uint32_t itemIndex = 0; itemIndex < meta->items.count; ++itemIndex) { avifDecoderItem * item = &meta->items.item[itemIndex]; if (!item->size) { continue; } if (item->hasUnsupportedEssentialProperty) { // An essential property isn't supported by libavif; ignore the item. continue; } if ((colorId > 0) && (item->descForID != colorId)) { // Not a content description (metadata) for the colorOBU, skip it continue; } if (!memcmp(item->type, "Exif", 4)) { // Advance past Annex A.2.1's header const uint8_t * boxPtr = avifDecoderDataCalcItemPtr(data, item); BEGIN_STREAM(exifBoxStream, boxPtr, item->size); uint32_t exifTiffHeaderOffset; CHECK(avifROStreamReadU32(&exifBoxStream, &exifTiffHeaderOffset)); // unsigned int(32) exif_tiff_header_offset; exifData.data = avifROStreamCurrent(&exifBoxStream); exifData.size = avifROStreamRemainingBytes(&exifBoxStream); } else if (!memcmp(item->type, "mime", 4) && !memcmp(item->contentType.contentType, xmpContentType, xmpContentTypeSize)) { xmpData.data = avifDecoderDataCalcItemPtr(data, item); xmpData.size = item->size; } } if (exifData.data && exifData.size) { avifImageSetMetadataExif(image, exifData.data, exifData.size); } if (xmpData.data && xmpData.size) { avifImageSetMetadataXMP(image, xmpData.data, xmpData.size); } return AVIF_TRUE; } // --------------------------------------------------------------------------- // URN static avifBool isAlphaURN(const char * urn) { return !strcmp(urn, URN_ALPHA0) || !strcmp(urn, URN_ALPHA1); } // --------------------------------------------------------------------------- // BMFF Parsing static avifBool avifParseItemLocationBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version; CHECK(avifROStreamReadVersionAndFlags(&s, &version, NULL)); if (version > 2) { return AVIF_FALSE; } uint8_t offsetSizeAndLengthSize; CHECK(avifROStreamRead(&s, &offsetSizeAndLengthSize, 1)); uint8_t offsetSize = (offsetSizeAndLengthSize >> 4) & 0xf; // unsigned int(4) offset_size; uint8_t lengthSize = (offsetSizeAndLengthSize >> 0) & 0xf; // unsigned int(4) length_size; uint8_t baseOffsetSizeAndIndexSize; CHECK(avifROStreamRead(&s, &baseOffsetSizeAndIndexSize, 1)); uint8_t baseOffsetSize = (baseOffsetSizeAndIndexSize >> 4) & 0xf; // unsigned int(4) base_offset_size; uint8_t indexSize = 0; if ((version == 1) || (version == 2)) { indexSize = baseOffsetSizeAndIndexSize & 0xf; // unsigned int(4) index_size; if (indexSize != 0) { // extent_index unsupported return AVIF_FALSE; } } uint16_t tmp16; uint32_t itemCount; if (version < 2) { CHECK(avifROStreamReadU16(&s, &tmp16)); // unsigned int(16) item_count; itemCount = tmp16; } else { CHECK(avifROStreamReadU32(&s, &itemCount)); // unsigned int(32) item_count; } for (uint32_t i = 0; i < itemCount; ++i) { uint32_t itemID; uint32_t idatID = 0; if (version < 2) { CHECK(avifROStreamReadU16(&s, &tmp16)); // unsigned int(16) item_ID; itemID = tmp16; } else { CHECK(avifROStreamReadU32(&s, &itemID)); // unsigned int(32) item_ID; } if ((version == 1) || (version == 2)) { uint8_t ignored; uint8_t constructionMethod; CHECK(avifROStreamRead(&s, &ignored, 1)); // unsigned int(12) reserved = 0; CHECK(avifROStreamRead(&s, &constructionMethod, 1)); // unsigned int(4) construction_method; constructionMethod = constructionMethod & 0xf; if ((constructionMethod != 0 /* file */) && (constructionMethod != 1 /* idat */)) { // construction method item(2) unsupported return AVIF_FALSE; } if (constructionMethod == 1) { idatID = meta->idatID; } } uint16_t dataReferenceIndex; // unsigned int(16) data_ref rence_index; CHECK(avifROStreamReadU16(&s, &dataReferenceIndex)); // uint64_t baseOffset; // unsigned int(base_offset_size*8) base_offset; CHECK(avifROStreamReadUX8(&s, &baseOffset, baseOffsetSize)); // uint16_t extentCount; // unsigned int(16) extent_count; CHECK(avifROStreamReadU16(&s, &extentCount)); // if (extentCount == 1) { // If extent_index is ever supported, this spec must be implemented here: // :: if (((version == 1) || (version == 2)) && (index_size > 0)) { // :: unsigned int(index_size*8) extent_index; // :: } uint64_t extentOffset; // unsigned int(offset_size*8) extent_offset; CHECK(avifROStreamReadUX8(&s, &extentOffset, offsetSize)); uint64_t extentLength; // unsigned int(offset_size*8) extent_length; CHECK(avifROStreamReadUX8(&s, &extentLength, lengthSize)); avifDecoderItem * item = avifMetaFindItem(meta, itemID); if (!item) { return AVIF_FALSE; } item->id = itemID; item->offset = (uint32_t)(baseOffset + extentOffset); item->size = (uint32_t)extentLength; item->idatID = idatID; } else { // TODO: support more than one extent return AVIF_FALSE; } } return AVIF_TRUE; } static avifBool avifParseImageGridBox(avifImageGrid * grid, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version, flags; CHECK(avifROStreamRead(&s, &version, 1)); // unsigned int(8) version = 0; if (version != 0) { return AVIF_FALSE; } CHECK(avifROStreamRead(&s, &flags, 1)); // unsigned int(8) flags; CHECK(avifROStreamRead(&s, &grid->rows, 1)); // unsigned int(8) rows_minus_one; CHECK(avifROStreamRead(&s, &grid->columns, 1)); // unsigned int(8) columns_minus_one; ++grid->rows; ++grid->columns; uint32_t fieldLength = ((flags & 1) + 1) * 16; if (fieldLength == 16) { uint16_t outputWidth16, outputHeight16; CHECK(avifROStreamReadU16(&s, &outputWidth16)); // unsigned int(FieldLength) output_width; CHECK(avifROStreamReadU16(&s, &outputHeight16)); // unsigned int(FieldLength) output_height; grid->outputWidth = outputWidth16; grid->outputHeight = outputHeight16; } else { if (fieldLength != 32) { // This should be impossible return AVIF_FALSE; } CHECK(avifROStreamReadU32(&s, &grid->outputWidth)); // unsigned int(FieldLength) output_width; CHECK(avifROStreamReadU32(&s, &grid->outputHeight)); // unsigned int(FieldLength) output_height; } return AVIF_TRUE; } static avifBool avifParseImageSpatialExtentsProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); avifImageSpatialExtents * ispe = &prop->u.ispe; CHECK(avifROStreamReadU32(&s, &ispe->width)); CHECK(avifROStreamReadU32(&s, &ispe->height)); return AVIF_TRUE; } static avifBool avifParseAuxiliaryTypeProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); CHECK(avifROStreamReadString(&s, prop->u.auxC.auxType, AUXTYPE_SIZE)); return AVIF_TRUE; } static avifBool avifParseColourInformationBox(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifColourInformationBox * colr = &prop->u.colr; colr->hasICC = AVIF_FALSE; colr->hasNCLX = AVIF_FALSE; uint8_t colorType[4]; // unsigned int(32) colour_type; CHECK(avifROStreamRead(&s, colorType, 4)); if (!memcmp(colorType, "rICC", 4) || !memcmp(colorType, "prof", 4)) { colr->hasICC = AVIF_TRUE; colr->icc = avifROStreamCurrent(&s); colr->iccSize = avifROStreamRemainingBytes(&s); } else if (!memcmp(colorType, "nclx", 4)) { uint16_t tmp16; // unsigned int(16) colour_primaries; CHECK(avifROStreamReadU16(&s, &tmp16)); colr->colorPrimaries = (avifColorPrimaries)tmp16; // unsigned int(16) transfer_characteristics; CHECK(avifROStreamReadU16(&s, &tmp16)); colr->transferCharacteristics = (avifTransferCharacteristics)tmp16; // unsigned int(16) matrix_coefficients; CHECK(avifROStreamReadU16(&s, &tmp16)); colr->matrixCoefficients = (avifMatrixCoefficients)tmp16; // unsigned int(1) full_range_flag; // unsigned int(7) reserved = 0; uint8_t tmp8; CHECK(avifROStreamRead(&s, &tmp8, 1)); colr->range = (tmp8 & 0x80) ? AVIF_RANGE_FULL : AVIF_RANGE_LIMITED; colr->hasNCLX = AVIF_TRUE; } return AVIF_TRUE; } static avifBool avifParseAV1CodecConfigurationBox(const uint8_t * raw, size_t rawLen, avifCodecConfigurationBox * av1C) { BEGIN_STREAM(s, raw, rawLen); uint8_t markerAndVersion = 0; CHECK(avifROStreamRead(&s, &markerAndVersion, 1)); uint8_t seqProfileAndIndex = 0; CHECK(avifROStreamRead(&s, &seqProfileAndIndex, 1)); uint8_t rawFlags = 0; CHECK(avifROStreamRead(&s, &rawFlags, 1)); if (markerAndVersion != 0x81) { // Marker and version must both == 1 return AVIF_FALSE; } av1C->seqProfile = (seqProfileAndIndex >> 5) & 0x7; // unsigned int (3) seq_profile; av1C->seqLevelIdx0 = (seqProfileAndIndex >> 0) & 0x1f; // unsigned int (5) seq_level_idx_0; av1C->seqTier0 = (rawFlags >> 7) & 0x1; // unsigned int (1) seq_tier_0; av1C->highBitdepth = (rawFlags >> 6) & 0x1; // unsigned int (1) high_bitdepth; av1C->twelveBit = (rawFlags >> 5) & 0x1; // unsigned int (1) twelve_bit; av1C->monochrome = (rawFlags >> 4) & 0x1; // unsigned int (1) monochrome; av1C->chromaSubsamplingX = (rawFlags >> 3) & 0x1; // unsigned int (1) chroma_subsampling_x; av1C->chromaSubsamplingY = (rawFlags >> 2) & 0x1; // unsigned int (1) chroma_subsampling_y; av1C->chromaSamplePosition = (rawFlags >> 0) & 0x3; // unsigned int (2) chroma_sample_position; return AVIF_TRUE; } static avifBool avifParseAV1CodecConfigurationBoxProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { return avifParseAV1CodecConfigurationBox(raw, rawLen, &prop->u.av1C); } static avifBool avifParsePixelAspectRatioBoxProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifPixelAspectRatioBox * pasp = &prop->u.pasp; CHECK(avifROStreamReadU32(&s, &pasp->hSpacing)); // unsigned int(32) hSpacing; CHECK(avifROStreamReadU32(&s, &pasp->vSpacing)); // unsigned int(32) vSpacing; return AVIF_TRUE; } static avifBool avifParseCleanApertureBoxProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifCleanApertureBox * clap = &prop->u.clap; CHECK(avifROStreamReadU32(&s, &clap->widthN)); // unsigned int(32) cleanApertureWidthN; CHECK(avifROStreamReadU32(&s, &clap->widthD)); // unsigned int(32) cleanApertureWidthD; CHECK(avifROStreamReadU32(&s, &clap->heightN)); // unsigned int(32) cleanApertureHeightN; CHECK(avifROStreamReadU32(&s, &clap->heightD)); // unsigned int(32) cleanApertureHeightD; CHECK(avifROStreamReadU32(&s, &clap->horizOffN)); // unsigned int(32) horizOffN; CHECK(avifROStreamReadU32(&s, &clap->horizOffD)); // unsigned int(32) horizOffD; CHECK(avifROStreamReadU32(&s, &clap->vertOffN)); // unsigned int(32) vertOffN; CHECK(avifROStreamReadU32(&s, &clap->vertOffD)); // unsigned int(32) vertOffD; return AVIF_TRUE; } static avifBool avifParseImageRotationProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifImageRotation * irot = &prop->u.irot; CHECK(avifROStreamRead(&s, &irot->angle, 1)); // unsigned int (6) reserved = 0; unsigned int (2) angle; if ((irot->angle & 0xfc) != 0) { // reserved bits must be 0 return AVIF_FALSE; } return AVIF_TRUE; } static avifBool avifParseImageMirrorProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifImageMirror * imir = &prop->u.imir; CHECK(avifROStreamRead(&s, &imir->axis, 1)); // unsigned int (7) reserved = 0; unsigned int (1) axis; if ((imir->axis & 0xfe) != 0) { // reserved bits must be 0 return AVIF_FALSE; } return AVIF_TRUE; } static avifBool avifParsePixelInformationProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); avifPixelInformationProperty * pixi = &prop->u.pixi; CHECK(avifROStreamRead(&s, &pixi->planeCount, 1)); // unsigned int (8) num_channels; if (pixi->planeCount > MAX_PIXI_PLANE_DEPTHS) { return AVIF_FALSE; } for (uint8_t i = 0; i < pixi->planeCount; ++i) { CHECK(avifROStreamRead(&s, &pixi->planeDepths[i], 1)); // unsigned int (8) bits_per_channel; } return AVIF_TRUE; } static avifBool avifParseItemPropertyContainerBox(avifPropertyArray * properties, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); int propertyIndex = avifArrayPushIndex(properties); avifProperty * prop = &properties->prop[propertyIndex]; memcpy(prop->type, header.type, 4); if (!memcmp(header.type, "ispe", 4)) { CHECK(avifParseImageSpatialExtentsProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "auxC", 4)) { CHECK(avifParseAuxiliaryTypeProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "colr", 4)) { CHECK(avifParseColourInformationBox(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "av1C", 4)) { CHECK(avifParseAV1CodecConfigurationBoxProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "pasp", 4)) { CHECK(avifParsePixelAspectRatioBoxProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "clap", 4)) { CHECK(avifParseCleanApertureBoxProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "irot", 4)) { CHECK(avifParseImageRotationProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "imir", 4)) { CHECK(avifParseImageMirrorProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "pixi", 4)) { CHECK(avifParsePixelInformationProperty(prop, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifParseItemPropertyAssociation(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version; uint32_t flags; CHECK(avifROStreamReadVersionAndFlags(&s, &version, &flags)); avifBool propertyIndexIsU16 = ((flags & 0x1) != 0); uint32_t entryCount; CHECK(avifROStreamReadU32(&s, &entryCount)); for (uint32_t entryIndex = 0; entryIndex < entryCount; ++entryIndex) { unsigned int itemID; if (version < 1) { uint16_t tmp; CHECK(avifROStreamReadU16(&s, &tmp)); itemID = tmp; } else { CHECK(avifROStreamReadU32(&s, &itemID)); } uint8_t associationCount; CHECK(avifROStreamRead(&s, &associationCount, 1)); for (uint8_t associationIndex = 0; associationIndex < associationCount; ++associationIndex) { avifBool essential = AVIF_FALSE; uint16_t propertyIndex = 0; if (propertyIndexIsU16) { CHECK(avifROStreamReadU16(&s, &propertyIndex)); essential = ((propertyIndex & 0x8000) != 0); propertyIndex &= 0x7fff; } else { uint8_t tmp; CHECK(avifROStreamRead(&s, &tmp, 1)); essential = ((tmp & 0x80) != 0); propertyIndex = tmp & 0x7f; } if (propertyIndex == 0) { // Not associated with any item continue; } --propertyIndex; // 1-indexed if (propertyIndex >= meta->properties.count) { return AVIF_FALSE; } avifDecoderItem * item = avifMetaFindItem(meta, itemID); if (!item) { return AVIF_FALSE; } // Copy property to item avifProperty * srcProp = &meta->properties.prop[propertyIndex]; static const char * supportedTypes[] = { "ispe", "auxC", "colr", "av1C", "pasp", "clap", "irot", "imir", "pixi" }; size_t supportedTypesCount = sizeof(supportedTypes) / sizeof(supportedTypes[0]); avifBool supportedType = AVIF_FALSE; for (size_t i = 0; i < supportedTypesCount; ++i) { if (!memcmp(srcProp->type, supportedTypes[i], 4)) { supportedType = AVIF_TRUE; break; } } if (supportedType) { avifProperty * dstProp = (avifProperty *)avifArrayPushPtr(&item->properties); memcpy(dstProp, srcProp, sizeof(avifProperty)); } else { if (essential) { // Discovered an essential item property that libavif doesn't support! // Make a note to ignore this item later. item->hasUnsupportedEssentialProperty = AVIF_TRUE; } } } } return AVIF_TRUE; } static avifBool avifParsePrimaryItemBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { if (meta->primaryItemID > 0) { // Illegal to have multiple pitm boxes, bail out return AVIF_FALSE; } BEGIN_STREAM(s, raw, rawLen); uint8_t version; CHECK(avifROStreamReadVersionAndFlags(&s, &version, NULL)); if (version == 0) { uint16_t tmp16; CHECK(avifROStreamReadU16(&s, &tmp16)); // unsigned int(16) item_ID; meta->primaryItemID = tmp16; } else { CHECK(avifROStreamReadU32(&s, &meta->primaryItemID)); // unsigned int(32) item_ID; } return AVIF_TRUE; } static avifBool avifParseItemDataBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { // Check to see if we've already seen an idat box for this meta box. If so, bail out for (uint32_t i = 0; i < meta->idats.count; ++i) { if (meta->idats.idat[i].id == meta->idatID) { return AVIF_FALSE; } } int index = avifArrayPushIndex(&meta->idats); avifDecoderItemData * idat = &meta->idats.idat[index]; idat->id = meta->idatID; idat->data.data = raw; idat->data.size = rawLen; return AVIF_TRUE; } static avifBool avifParseItemPropertiesBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifBoxHeader ipcoHeader; CHECK(avifROStreamReadBoxHeader(&s, &ipcoHeader)); if (memcmp(ipcoHeader.type, "ipco", 4) != 0) { return AVIF_FALSE; } // Read all item properties inside of ItemPropertyContainerBox CHECK(avifParseItemPropertyContainerBox(&meta->properties, avifROStreamCurrent(&s), ipcoHeader.size)); CHECK(avifROStreamSkip(&s, ipcoHeader.size)); // Now read all ItemPropertyAssociation until the end of the box, and make associations while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader ipmaHeader; CHECK(avifROStreamReadBoxHeader(&s, &ipmaHeader)); if (!memcmp(ipmaHeader.type, "ipma", 4)) { CHECK(avifParseItemPropertyAssociation(meta, avifROStreamCurrent(&s), ipmaHeader.size)); } else { // These must all be type ipma return AVIF_FALSE; } CHECK(avifROStreamSkip(&s, ipmaHeader.size)); } return AVIF_TRUE; } static avifBool avifParseItemInfoEntry(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 2)); // TODO: support version > 2? 2+ is required for item_type uint16_t itemID; // unsigned int(16) item_ID; CHECK(avifROStreamReadU16(&s, &itemID)); // uint16_t itemProtectionIndex; // unsigned int(16) item_protection_index; CHECK(avifROStreamReadU16(&s, &itemProtectionIndex)); // uint8_t itemType[4]; // unsigned int(32) item_type; CHECK(avifROStreamRead(&s, itemType, 4)); // avifContentType contentType; if (!memcmp(itemType, "mime", 4)) { CHECK(avifROStreamReadString(&s, NULL, 0)); // string item_name; (skipped) CHECK(avifROStreamReadString(&s, contentType.contentType, CONTENTTYPE_SIZE)); // string content_type; } else { memset(&contentType, 0, sizeof(contentType)); } avifDecoderItem * item = avifMetaFindItem(meta, itemID); if (!item) { return AVIF_FALSE; } memcpy(item->type, itemType, sizeof(itemType)); memcpy(&item->contentType, &contentType, sizeof(contentType)); return AVIF_TRUE; } static avifBool avifParseItemInfoBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version; CHECK(avifROStreamReadVersionAndFlags(&s, &version, NULL)); uint32_t entryCount; if (version == 0) { uint16_t tmp; CHECK(avifROStreamReadU16(&s, &tmp)); // unsigned int(16) entry_count; entryCount = tmp; } else if (version == 1) { CHECK(avifROStreamReadU32(&s, &entryCount)); // unsigned int(32) entry_count; } else { return AVIF_FALSE; } for (uint32_t entryIndex = 0; entryIndex < entryCount; ++entryIndex) { avifBoxHeader infeHeader; CHECK(avifROStreamReadBoxHeader(&s, &infeHeader)); if (!memcmp(infeHeader.type, "infe", 4)) { CHECK(avifParseItemInfoEntry(meta, avifROStreamCurrent(&s), infeHeader.size)); } else { // These must all be type ipma return AVIF_FALSE; } CHECK(avifROStreamSkip(&s, infeHeader.size)); } return AVIF_TRUE; } static avifBool avifParseItemReferenceBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version; CHECK(avifROStreamReadVersionAndFlags(&s, &version, NULL)); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader irefHeader; CHECK(avifROStreamReadBoxHeader(&s, &irefHeader)); uint32_t fromID = 0; if (version == 0) { uint16_t tmp; CHECK(avifROStreamReadU16(&s, &tmp)); // unsigned int(16) from_item_ID; fromID = tmp; } else if (version == 1) { CHECK(avifROStreamReadU32(&s, &fromID)); // unsigned int(32) from_item_ID; } else { // unsupported iref version, skip it break; } uint16_t referenceCount = 0; CHECK(avifROStreamReadU16(&s, &referenceCount)); // unsigned int(16) reference_count; for (uint16_t refIndex = 0; refIndex < referenceCount; ++refIndex) { uint32_t toID = 0; if (version == 0) { uint16_t tmp; CHECK(avifROStreamReadU16(&s, &tmp)); // unsigned int(16) to_item_ID; toID = tmp; } else if (version == 1) { CHECK(avifROStreamReadU32(&s, &toID)); // unsigned int(32) to_item_ID; } else { // unsupported iref version, skip it break; } // Read this reference as "{fromID} is a {irefType} for {toID}" if (fromID && toID) { avifDecoderItem * item = avifMetaFindItem(meta, fromID); if (!item) { return AVIF_FALSE; } if (!memcmp(irefHeader.type, "thmb", 4)) { item->thumbnailForID = toID; } if (!memcmp(irefHeader.type, "auxl", 4)) { item->auxForID = toID; } if (!memcmp(irefHeader.type, "cdsc", 4)) { item->descForID = toID; } if (!memcmp(irefHeader.type, "dimg", 4)) { // derived images refer in the opposite direction avifDecoderItem * dimg = avifMetaFindItem(meta, toID); if (!dimg) { return AVIF_FALSE; } dimg->dimgForID = fromID; } } } } return AVIF_TRUE; } static avifBool avifParseMetaBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); ++meta->idatID; // for tracking idat while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "iloc", 4)) { CHECK(avifParseItemLocationBox(meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "pitm", 4)) { CHECK(avifParsePrimaryItemBox(meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "idat", 4)) { CHECK(avifParseItemDataBox(meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "iprp", 4)) { CHECK(avifParseItemPropertiesBox(meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "iinf", 4)) { CHECK(avifParseItemInfoBox(meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "iref", 4)) { CHECK(avifParseItemReferenceBox(meta, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifParseTrackHeaderBox(avifTrack * track, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version; CHECK(avifROStreamReadVersionAndFlags(&s, &version, NULL)); uint32_t ignored32, trackID; uint64_t ignored64; if (version == 1) { CHECK(avifROStreamReadU64(&s, &ignored64)); // unsigned int(64) creation_time; CHECK(avifROStreamReadU64(&s, &ignored64)); // unsigned int(64) modification_time; CHECK(avifROStreamReadU32(&s, &trackID)); // unsigned int(32) track_ID; CHECK(avifROStreamReadU32(&s, &ignored32)); // const unsigned int(32) reserved = 0; CHECK(avifROStreamReadU64(&s, &ignored64)); // unsigned int(64) duration; } else if (version == 0) { CHECK(avifROStreamReadU32(&s, &ignored32)); // unsigned int(32) creation_time; CHECK(avifROStreamReadU32(&s, &ignored32)); // unsigned int(32) modification_time; CHECK(avifROStreamReadU32(&s, &trackID)); // unsigned int(32) track_ID; CHECK(avifROStreamReadU32(&s, &ignored32)); // const unsigned int(32) reserved = 0; CHECK(avifROStreamReadU32(&s, &ignored32)); // unsigned int(32) duration; } else { // Unsupported version return AVIF_FALSE; } // Skipping the following 52 bytes here: // ------------------------------------ // const unsigned int(32)[2] reserved = 0; // template int(16) layer = 0; // template int(16) alternate_group = 0; // template int(16) volume = {if track_is_audio 0x0100 else 0}; // const unsigned int(16) reserved = 0; // template int(32)[9] matrix= { 0x00010000,0,0,0,0x00010000,0,0,0,0x40000000 }; // unity matrix CHECK(avifROStreamSkip(&s, 52)); uint32_t width, height; CHECK(avifROStreamReadU32(&s, &width)); // unsigned int(32) width; CHECK(avifROStreamReadU32(&s, &height)); // unsigned int(32) height; track->width = width >> 16; track->height = height >> 16; // TODO: support scaling based on width/height track header info? track->id = trackID; return AVIF_TRUE; } static avifBool avifParseMediaHeaderBox(avifTrack * track, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version; CHECK(avifROStreamReadVersionAndFlags(&s, &version, NULL)); uint32_t ignored32, mediaTimescale, mediaDuration32; uint64_t ignored64, mediaDuration64; if (version == 1) { CHECK(avifROStreamReadU64(&s, &ignored64)); // unsigned int(64) creation_time; CHECK(avifROStreamReadU64(&s, &ignored64)); // unsigned int(64) modification_time; CHECK(avifROStreamReadU32(&s, &mediaTimescale)); // unsigned int(32) timescale; CHECK(avifROStreamReadU64(&s, &mediaDuration64)); // unsigned int(64) duration; track->mediaDuration = mediaDuration64; } else if (version == 0) { CHECK(avifROStreamReadU32(&s, &ignored32)); // unsigned int(32) creation_time; CHECK(avifROStreamReadU32(&s, &ignored32)); // unsigned int(32) modification_time; CHECK(avifROStreamReadU32(&s, &mediaTimescale)); // unsigned int(32) timescale; CHECK(avifROStreamReadU32(&s, &mediaDuration32)); // unsigned int(32) duration; track->mediaDuration = (uint64_t)mediaDuration32; } else { // Unsupported version return AVIF_FALSE; } track->mediaTimescale = mediaTimescale; return AVIF_TRUE; } static avifBool avifParseChunkOffsetBox(avifSampleTable * sampleTable, avifBool largeOffsets, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); uint32_t entryCount; CHECK(avifROStreamReadU32(&s, &entryCount)); // unsigned int(32) entry_count; for (uint32_t i = 0; i < entryCount; ++i) { uint64_t offset; if (largeOffsets) { CHECK(avifROStreamReadU64(&s, &offset)); // unsigned int(32) chunk_offset; } else { uint32_t offset32; CHECK(avifROStreamReadU32(&s, &offset32)); // unsigned int(32) chunk_offset; offset = (uint64_t)offset32; } avifSampleTableChunk * chunk = (avifSampleTableChunk *)avifArrayPushPtr(&sampleTable->chunks); chunk->offset = offset; } return AVIF_TRUE; } static avifBool avifParseSampleToChunkBox(avifSampleTable * sampleTable, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); uint32_t entryCount; CHECK(avifROStreamReadU32(&s, &entryCount)); // unsigned int(32) entry_count; for (uint32_t i = 0; i < entryCount; ++i) { avifSampleTableSampleToChunk * sampleToChunk = (avifSampleTableSampleToChunk *)avifArrayPushPtr(&sampleTable->sampleToChunks); CHECK(avifROStreamReadU32(&s, &sampleToChunk->firstChunk)); // unsigned int(32) first_chunk; CHECK(avifROStreamReadU32(&s, &sampleToChunk->samplesPerChunk)); // unsigned int(32) samples_per_chunk; CHECK(avifROStreamReadU32(&s, &sampleToChunk->sampleDescriptionIndex)); // unsigned int(32) sample_description_index; } return AVIF_TRUE; } static avifBool avifParseSampleSizeBox(avifSampleTable * sampleTable, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); uint32_t allSamplesSize, sampleCount; CHECK(avifROStreamReadU32(&s, &allSamplesSize)); // unsigned int(32) sample_size; CHECK(avifROStreamReadU32(&s, &sampleCount)); // unsigned int(32) sample_count; if (allSamplesSize > 0) { sampleTable->allSamplesSize = allSamplesSize; } else { for (uint32_t i = 0; i < sampleCount; ++i) { avifSampleTableSampleSize * sampleSize = (avifSampleTableSampleSize *)avifArrayPushPtr(&sampleTable->sampleSizes); CHECK(avifROStreamReadU32(&s, &sampleSize->size)); // unsigned int(32) entry_size; } } return AVIF_TRUE; } static avifBool avifParseSyncSampleBox(avifSampleTable * sampleTable, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); uint32_t entryCount; CHECK(avifROStreamReadU32(&s, &entryCount)); // unsigned int(32) entry_count; for (uint32_t i = 0; i < entryCount; ++i) { uint32_t sampleNumber = 0; CHECK(avifROStreamReadU32(&s, &sampleNumber)); // unsigned int(32) sample_number; avifSyncSample * syncSample = (avifSyncSample *)avifArrayPushPtr(&sampleTable->syncSamples); syncSample->sampleNumber = sampleNumber; } return AVIF_TRUE; } static avifBool avifParseTimeToSampleBox(avifSampleTable * sampleTable, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); uint32_t entryCount; CHECK(avifROStreamReadU32(&s, &entryCount)); // unsigned int(32) entry_count; for (uint32_t i = 0; i < entryCount; ++i) { avifSampleTableTimeToSample * timeToSample = (avifSampleTableTimeToSample *)avifArrayPushPtr(&sampleTable->timeToSamples); CHECK(avifROStreamReadU32(&s, &timeToSample->sampleCount)); // unsigned int(32) sample_count; CHECK(avifROStreamReadU32(&s, &timeToSample->sampleDelta)); // unsigned int(32) sample_delta; } return AVIF_TRUE; } static avifBool avifParseSampleDescriptionBox(avifSampleTable * sampleTable, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); uint32_t entryCount; CHECK(avifROStreamReadU32(&s, &entryCount)); // unsigned int(32) entry_count; for (uint32_t i = 0; i < entryCount; ++i) { avifBoxHeader sampleEntryHeader; CHECK(avifROStreamReadBoxHeader(&s, &sampleEntryHeader)); avifSampleDescription * description = (avifSampleDescription *)avifArrayPushPtr(&sampleTable->sampleDescriptions); avifArrayCreate(&description->properties, sizeof(avifProperty), 16); memcpy(description->format, sampleEntryHeader.type, sizeof(description->format)); size_t remainingBytes = avifROStreamRemainingBytes(&s); if (!memcmp(description->format, "av01", 4) && (remainingBytes > VISUALSAMPLEENTRY_SIZE)) { CHECK(avifParseItemPropertyContainerBox( &description->properties, avifROStreamCurrent(&s) + VISUALSAMPLEENTRY_SIZE, remainingBytes - VISUALSAMPLEENTRY_SIZE)); } CHECK(avifROStreamSkip(&s, sampleEntryHeader.size)); } return AVIF_TRUE; } static avifBool avifParseSampleTableBox(avifTrack * track, const uint8_t * raw, size_t rawLen) { if (track->sampleTable) { // A TrackBox may only have one SampleTable return AVIF_FALSE; } track->sampleTable = avifSampleTableCreate(); BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "stco", 4)) { CHECK(avifParseChunkOffsetBox(track->sampleTable, AVIF_FALSE, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "co64", 4)) { CHECK(avifParseChunkOffsetBox(track->sampleTable, AVIF_TRUE, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "stsc", 4)) { CHECK(avifParseSampleToChunkBox(track->sampleTable, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "stsz", 4)) { CHECK(avifParseSampleSizeBox(track->sampleTable, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "stss", 4)) { CHECK(avifParseSyncSampleBox(track->sampleTable, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "stts", 4)) { CHECK(avifParseTimeToSampleBox(track->sampleTable, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "stsd", 4)) { CHECK(avifParseSampleDescriptionBox(track->sampleTable, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifParseMediaInformationBox(avifTrack * track, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "stbl", 4)) { CHECK(avifParseSampleTableBox(track, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifParseMediaBox(avifTrack * track, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "mdhd", 4)) { CHECK(avifParseMediaHeaderBox(track, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "minf", 4)) { CHECK(avifParseMediaInformationBox(track, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifTrackReferenceBox(avifTrack * track, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "auxl", 4)) { uint32_t toID; CHECK(avifROStreamReadU32(&s, &toID)); // unsigned int(32) track_IDs[] CHECK(avifROStreamSkip(&s, header.size - sizeof(uint32_t))); // just take the first one track->auxForID = toID; } else { CHECK(avifROStreamSkip(&s, header.size)); } } return AVIF_TRUE; } static avifBool avifParseTrackBox(avifDecoderData * data, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifTrack * track = avifDecoderDataCreateTrack(data); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "tkhd", 4)) { CHECK(avifParseTrackHeaderBox(track, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "meta", 4)) { CHECK(avifParseMetaBox(track->meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "mdia", 4)) { CHECK(avifParseMediaBox(track, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "tref", 4)) { CHECK(avifTrackReferenceBox(track, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifParseMoovBox(avifDecoderData * data, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "trak", 4)) { CHECK(avifParseTrackBox(data, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifParseFileTypeBox(avifFileType * ftyp, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamRead(&s, ftyp->majorBrand, 4)); CHECK(avifROStreamReadU32(&s, &ftyp->minorVersion)); size_t compatibleBrandsBytes = avifROStreamRemainingBytes(&s); if ((compatibleBrandsBytes % 4) != 0) { return AVIF_FALSE; } ftyp->compatibleBrands = avifROStreamCurrent(&s); CHECK(avifROStreamSkip(&s, compatibleBrandsBytes)); ftyp->compatibleBrandsCount = (int)compatibleBrandsBytes / 4; return AVIF_TRUE; } static avifBool avifParse(avifDecoderData * data, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "ftyp", 4)) { CHECK(avifParseFileTypeBox(&data->ftyp, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "meta", 4)) { CHECK(avifParseMetaBox(data->meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "moov", 4)) { CHECK(avifParseMoovBox(data, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } // --------------------------------------------------------------------------- static avifBool avifFileTypeIsCompatible(avifFileType * ftyp) { avifBool avifCompatible = (memcmp(ftyp->majorBrand, "avif", 4) == 0 || memcmp(ftyp->majorBrand, "avis", 4) == 0); if (!avifCompatible) { for (int compatibleBrandIndex = 0; compatibleBrandIndex < ftyp->compatibleBrandsCount; ++compatibleBrandIndex) { const uint8_t * compatibleBrand = &ftyp->compatibleBrands[4 * compatibleBrandIndex]; if (!memcmp(compatibleBrand, "avif", 4) || !memcmp(compatibleBrand, "avis", 4)) { avifCompatible = AVIF_TRUE; break; } } } return avifCompatible; } avifBool avifPeekCompatibleFileType(const avifROData * input) { BEGIN_STREAM(s, input->data, input->size); avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (memcmp(header.type, "ftyp", 4) != 0) { return AVIF_FALSE; } avifFileType ftyp; memset(&ftyp, 0, sizeof(avifFileType)); avifBool parsed = avifParseFileTypeBox(&ftyp, avifROStreamCurrent(&s), header.size); if (!parsed) { return AVIF_FALSE; } return avifFileTypeIsCompatible(&ftyp); } // --------------------------------------------------------------------------- avifDecoder * avifDecoderCreate(void) { avifDecoder * decoder = (avifDecoder *)avifAlloc(sizeof(avifDecoder)); memset(decoder, 0, sizeof(avifDecoder)); return decoder; } static void avifDecoderCleanup(avifDecoder * decoder) { if (decoder->data) { avifDecoderDataDestroy(decoder->data); decoder->data = NULL; } if (decoder->image) { avifImageDestroy(decoder->image); decoder->image = NULL; } } void avifDecoderDestroy(avifDecoder * decoder) { avifDecoderCleanup(decoder); avifFree(decoder); } avifResult avifDecoderSetSource(avifDecoder * decoder, avifDecoderSource source) { decoder->requestedSource = source; return avifDecoderReset(decoder); } avifResult avifDecoderParse(avifDecoder * decoder, const avifROData * rawInput) { // Cleanup anything lingering in the decoder avifDecoderCleanup(decoder); // ----------------------------------------------------------------------- // Parse BMFF boxes decoder->data = avifDecoderDataCreate(); // Shallow copy, on purpose memcpy(&decoder->data->rawInput, rawInput, sizeof(avifROData)); if (!avifParse(decoder->data, decoder->data->rawInput.data, decoder->data->rawInput.size)) { return AVIF_RESULT_BMFF_PARSE_FAILED; } avifBool avifCompatible = avifFileTypeIsCompatible(&decoder->data->ftyp); if (!avifCompatible) { return AVIF_RESULT_INVALID_FTYP; } // Sanity check items for (uint32_t itemIndex = 0; itemIndex < decoder->data->meta->items.count; ++itemIndex) { avifDecoderItem * item = &decoder->data->meta->items.item[itemIndex]; if (item->hasUnsupportedEssentialProperty) { // An essential property isn't supported by libavif; ignore the item. continue; } const uint8_t * p = avifDecoderDataCalcItemPtr(decoder->data, item); if (p == NULL) { return AVIF_RESULT_BMFF_PARSE_FAILED; } } // Sanity check tracks for (uint32_t trackIndex = 0; trackIndex < decoder->data->tracks.count; ++trackIndex) { avifTrack * track = &decoder->data->tracks.track[trackIndex]; if (!track->sampleTable) { continue; } for (uint32_t chunkIndex = 0; chunkIndex < track->sampleTable->chunks.count; ++chunkIndex) { avifSampleTableChunk * chunk = &track->sampleTable->chunks.chunk[chunkIndex]; if (chunk->offset > decoder->data->rawInput.size) { return AVIF_RESULT_BMFF_PARSE_FAILED; } } } return avifDecoderReset(decoder); } static avifCodec * avifCodecCreateInternal(avifCodecChoice choice, avifCodecDecodeInput * decodeInput) { avifCodec * codec = avifCodecCreate(choice, AVIF_CODEC_FLAG_CAN_DECODE); if (codec) { codec->decodeInput = decodeInput; } return codec; } static avifResult avifDecoderFlush(avifDecoder * decoder) { avifDecoderDataResetCodec(decoder->data); for (unsigned int i = 0; i < decoder->data->tiles.count; ++i) { avifTile * tile = &decoder->data->tiles.tile[i]; tile->codec = avifCodecCreateInternal(decoder->codecChoice, tile->input); if (!tile->codec) { return AVIF_RESULT_NO_CODEC_AVAILABLE; } if (!tile->codec->open(tile->codec, decoder->imageIndex + 1)) { return AVIF_RESULT_DECODE_COLOR_FAILED; } } return AVIF_RESULT_OK; } avifResult avifDecoderReset(avifDecoder * decoder) { avifDecoderData * data = decoder->data; if (!data) { // Nothing to reset. return AVIF_RESULT_OK; } memset(&data->colorGrid, 0, sizeof(data->colorGrid)); memset(&data->alphaGrid, 0, sizeof(data->alphaGrid)); avifDecoderDataClearTiles(data); // Prepare / cleanup decoded image state if (decoder->image) { avifImageDestroy(decoder->image); } decoder->image = avifImageCreateEmpty(); data->cicpSet = AVIF_FALSE; memset(&decoder->ioStats, 0, sizeof(decoder->ioStats)); // ----------------------------------------------------------------------- // Build decode input data->sourceSampleTable = NULL; // Reset if (decoder->requestedSource == AVIF_DECODER_SOURCE_AUTO) { if (data->tracks.count > 0) { data->source = AVIF_DECODER_SOURCE_TRACKS; } else { data->source = AVIF_DECODER_SOURCE_PRIMARY_ITEM; } } else { data->source = decoder->requestedSource; } const avifPropertyArray * colorProperties = NULL; if (data->source == AVIF_DECODER_SOURCE_TRACKS) { avifTrack * colorTrack = NULL; avifTrack * alphaTrack = NULL; // Find primary track - this probably needs some better detection uint32_t colorTrackIndex = 0; for (; colorTrackIndex < decoder->data->tracks.count; ++colorTrackIndex) { avifTrack * track = &decoder->data->tracks.track[colorTrackIndex]; if (!track->sampleTable) { continue; } if (!track->id) { // trak box might be missing a tkhd box inside, skip it continue; } if (!track->sampleTable->chunks.count) { continue; } if (!avifSampleTableHasFormat(track->sampleTable, "av01")) { continue; } if (track->auxForID != 0) { continue; } // Found one! break; } if (colorTrackIndex == decoder->data->tracks.count) { return AVIF_RESULT_NO_CONTENT; } colorTrack = &decoder->data->tracks.track[colorTrackIndex]; colorProperties = avifSampleTableGetProperties(colorTrack->sampleTable); if (!colorProperties) { return AVIF_RESULT_BMFF_PARSE_FAILED; } // Find Exif and/or XMP metadata, if any if (colorTrack->meta) { // See the comment above avifDecoderDataFindMetadata() for the explanation of using 0 here if (!avifDecoderDataFindMetadata(data, colorTrack->meta, decoder->image, 0)) { return AVIF_RESULT_BMFF_PARSE_FAILED; } } uint32_t alphaTrackIndex = 0; for (; alphaTrackIndex < decoder->data->tracks.count; ++alphaTrackIndex) { avifTrack * track = &decoder->data->tracks.track[alphaTrackIndex]; if (!track->sampleTable) { continue; } if (!track->id) { continue; } if (!track->sampleTable->chunks.count) { continue; } if (!avifSampleTableHasFormat(track->sampleTable, "av01")) { continue; } if (track->auxForID == colorTrack->id) { // Found it! break; } } if (alphaTrackIndex != decoder->data->tracks.count) { alphaTrack = &decoder->data->tracks.track[alphaTrackIndex]; } avifTile * colorTile = avifDecoderDataCreateTile(decoder->data); if (!avifCodecDecodeInputGetSamples(colorTile->input, colorTrack->sampleTable, &decoder->data->rawInput)) { return AVIF_RESULT_BMFF_PARSE_FAILED; } decoder->data->colorTileCount = 1; avifTile * alphaTile = NULL; if (alphaTrack) { alphaTile = avifDecoderDataCreateTile(decoder->data); if (!avifCodecDecodeInputGetSamples(alphaTile->input, alphaTrack->sampleTable, &decoder->data->rawInput)) { return AVIF_RESULT_BMFF_PARSE_FAILED; } alphaTile->input->alpha = AVIF_TRUE; decoder->data->alphaTileCount = 1; } // Stash off sample table for future timing information data->sourceSampleTable = colorTrack->sampleTable; // Image sequence timing decoder->imageIndex = -1; decoder->imageCount = colorTile->input->samples.count; decoder->timescale = colorTrack->mediaTimescale; decoder->durationInTimescales = colorTrack->mediaDuration; if (colorTrack->mediaTimescale) { decoder->duration = (double)decoder->durationInTimescales / (double)colorTrack->mediaTimescale; } else { decoder->duration = 0; } memset(&decoder->imageTiming, 0, sizeof(decoder->imageTiming)); // to be set in avifDecoderNextImage() decoder->image->width = colorTrack->width; decoder->image->height = colorTrack->height; decoder->alphaPresent = (alphaTrack != NULL); } else { // Create from items avifROData colorOBU = AVIF_DATA_EMPTY; avifROData alphaOBU = AVIF_DATA_EMPTY; avifDecoderItem * colorOBUItem = NULL; avifDecoderItem * alphaOBUItem = NULL; // Find the colorOBU (primary) item for (uint32_t itemIndex = 0; itemIndex < data->meta->items.count; ++itemIndex) { avifDecoderItem * item = &data->meta->items.item[itemIndex]; if (!item->size) { continue; } if (item->hasUnsupportedEssentialProperty) { // An essential property isn't supported by libavif; ignore the item. continue; } avifBool isGrid = (memcmp(item->type, "grid", 4) == 0); if (memcmp(item->type, "av01", 4) && !isGrid) { // probably exif or some other data continue; } if (item->thumbnailForID != 0) { // It's a thumbnail, skip it continue; } if ((data->meta->primaryItemID > 0) && (item->id != data->meta->primaryItemID)) { // a primary item ID was specified, require it continue; } if (isGrid) { const uint8_t * itemPtr = avifDecoderDataCalcItemPtr(data, item); if (itemPtr == NULL) { return AVIF_RESULT_BMFF_PARSE_FAILED; } if (!avifParseImageGridBox(&data->colorGrid, itemPtr, item->size)) { return AVIF_RESULT_INVALID_IMAGE_GRID; } } else { colorOBU.data = avifDecoderDataCalcItemPtr(data, item); colorOBU.size = item->size; } colorOBUItem = item; break; } if (!colorOBUItem) { return AVIF_RESULT_NO_AV1_ITEMS_FOUND; } colorProperties = &colorOBUItem->properties; // Find the alphaOBU item, if any for (uint32_t itemIndex = 0; itemIndex < data->meta->items.count; ++itemIndex) { avifDecoderItem * item = &data->meta->items.item[itemIndex]; if (!item->size) { continue; } if (item->hasUnsupportedEssentialProperty) { // An essential property isn't supported by libavif; ignore the item. continue; } avifBool isGrid = (memcmp(item->type, "grid", 4) == 0); if (memcmp(item->type, "av01", 4) && !isGrid) { // probably exif or some other data continue; } if (item->thumbnailForID != 0) { // It's a thumbnail, skip it continue; } const avifProperty * auxCProp = avifPropertyArrayFind(&item->properties, "auxC"); if (auxCProp && isAlphaURN(auxCProp->u.auxC.auxType) && (item->auxForID == colorOBUItem->id)) { if (isGrid) { const uint8_t * itemPtr = avifDecoderDataCalcItemPtr(data, item); if (itemPtr == NULL) { return AVIF_RESULT_BMFF_PARSE_FAILED; } if (!avifParseImageGridBox(&data->alphaGrid, itemPtr, item->size)) { return AVIF_RESULT_INVALID_IMAGE_GRID; } } else { alphaOBU.data = avifDecoderDataCalcItemPtr(data, item); alphaOBU.size = item->size; } alphaOBUItem = item; break; } } // Find Exif and/or XMP metadata, if any if (!avifDecoderDataFindMetadata(data, data->meta, decoder->image, colorOBUItem->id)) { return AVIF_RESULT_BMFF_PARSE_FAILED; } if ((data->colorGrid.rows > 0) && (data->colorGrid.columns > 0)) { if (!avifDecoderDataGenerateImageGridTiles(data, &data->colorGrid, colorOBUItem, AVIF_FALSE)) { return AVIF_RESULT_INVALID_IMAGE_GRID; } data->colorTileCount = data->tiles.count; } else { if (colorOBU.size == 0) { return AVIF_RESULT_NO_AV1_ITEMS_FOUND; } avifTile * colorTile = avifDecoderDataCreateTile(decoder->data); avifDecodeSample * colorSample = (avifDecodeSample *)avifArrayPushPtr(&colorTile->input->samples); memcpy(&colorSample->data, &colorOBU, sizeof(avifROData)); colorSample->sync = AVIF_TRUE; decoder->data->colorTileCount = 1; } if ((data->alphaGrid.rows > 0) && (data->alphaGrid.columns > 0) && alphaOBUItem) { if (!avifDecoderDataGenerateImageGridTiles(data, &data->alphaGrid, alphaOBUItem, AVIF_FALSE)) { return AVIF_RESULT_INVALID_IMAGE_GRID; } data->alphaTileCount = data->tiles.count - data->colorTileCount; } else { avifTile * alphaTile = NULL; if (alphaOBU.size > 0) { alphaTile = avifDecoderDataCreateTile(decoder->data); avifDecodeSample * alphaSample = (avifDecodeSample *)avifArrayPushPtr(&alphaTile->input->samples); memcpy(&alphaSample->data, &alphaOBU, sizeof(avifROData)); alphaSample->sync = AVIF_TRUE; alphaTile->input->alpha = AVIF_TRUE; decoder->data->alphaTileCount = 1; } } // Set all counts and timing to safe-but-uninteresting values decoder->imageIndex = -1; decoder->imageCount = 1; decoder->imageTiming.timescale = 1; decoder->imageTiming.pts = 0; decoder->imageTiming.ptsInTimescales = 0; decoder->imageTiming.duration = 1; decoder->imageTiming.durationInTimescales = 1; decoder->timescale = 1; decoder->duration = 1; decoder->durationInTimescales = 1; decoder->ioStats.colorOBUSize = colorOBU.size; decoder->ioStats.alphaOBUSize = alphaOBU.size; const avifProperty * ispeProp = avifPropertyArrayFind(colorProperties, "ispe"); if (ispeProp) { decoder->image->width = ispeProp->u.ispe.width; decoder->image->height = ispeProp->u.ispe.height; } else { decoder->image->width = 0; decoder->image->height = 0; } decoder->alphaPresent = (alphaOBUItem != NULL); } // Sanity check tiles for (uint32_t tileIndex = 0; tileIndex < data->tiles.count; ++tileIndex) { avifTile * tile = &data->tiles.tile[tileIndex]; for (uint32_t sampleIndex = 0; sampleIndex < tile->input->samples.count; ++sampleIndex) { avifDecodeSample * sample = &tile->input->samples.sample[sampleIndex]; if (!sample->data.data || !sample->data.size) { // Every sample must have some data return AVIF_RESULT_BMFF_PARSE_FAILED; } } } const avifProperty * colrProp = avifPropertyArrayFind(colorProperties, "colr"); if (colrProp) { if (colrProp->u.colr.hasICC) { avifImageSetProfileICC(decoder->image, colrProp->u.colr.icc, colrProp->u.colr.iccSize); } else if (colrProp->u.colr.hasNCLX) { data->cicpSet = AVIF_TRUE; decoder->image->colorPrimaries = colrProp->u.colr.colorPrimaries; decoder->image->transferCharacteristics = colrProp->u.colr.transferCharacteristics; decoder->image->matrixCoefficients = colrProp->u.colr.matrixCoefficients; decoder->image->yuvRange = colrProp->u.colr.range; } } // Transformations const avifProperty * paspProp = avifPropertyArrayFind(colorProperties, "pasp"); if (paspProp) { decoder->image->transformFlags |= AVIF_TRANSFORM_PASP; memcpy(&decoder->image->pasp, &paspProp->u.pasp, sizeof(avifPixelAspectRatioBox)); } const avifProperty * clapProp = avifPropertyArrayFind(colorProperties, "clap"); if (clapProp) { decoder->image->transformFlags |= AVIF_TRANSFORM_CLAP; memcpy(&decoder->image->clap, &clapProp->u.clap, sizeof(avifCleanApertureBox)); } const avifProperty * irotProp = avifPropertyArrayFind(colorProperties, "irot"); if (irotProp) { decoder->image->transformFlags |= AVIF_TRANSFORM_IROT; memcpy(&decoder->image->irot, &irotProp->u.irot, sizeof(avifImageRotation)); } const avifProperty * imirProp = avifPropertyArrayFind(colorProperties, "imir"); if (imirProp) { decoder->image->transformFlags |= AVIF_TRANSFORM_IMIR; memcpy(&decoder->image->imir, &imirProp->u.imir, sizeof(avifImageMirror)); } if (!decoder->data->cicpSet && (data->tiles.count > 0)) { avifTile * firstTile = &data->tiles.tile[0]; if (firstTile->input->samples.count > 0) { avifDecodeSample * sample = &firstTile->input->samples.sample[0]; avifSequenceHeader sequenceHeader; if (avifSequenceHeaderParse(&sequenceHeader, &sample->data)) { decoder->data->cicpSet = AVIF_TRUE; decoder->image->colorPrimaries = sequenceHeader.colorPrimaries; decoder->image->transferCharacteristics = sequenceHeader.transferCharacteristics; decoder->image->matrixCoefficients = sequenceHeader.matrixCoefficients; decoder->image->yuvRange = sequenceHeader.range; } } } const avifProperty * av1CProp = avifPropertyArrayFind(colorProperties, "av1C"); if (av1CProp) { decoder->image->depth = avifCodecConfigurationBoxGetDepth(&av1CProp->u.av1C); if (av1CProp->u.av1C.monochrome) { decoder->image->yuvFormat = AVIF_PIXEL_FORMAT_YUV400; } else { if (av1CProp->u.av1C.chromaSubsamplingX && av1CProp->u.av1C.chromaSubsamplingY) { decoder->image->yuvFormat = AVIF_PIXEL_FORMAT_YUV420; } else if (av1CProp->u.av1C.chromaSubsamplingX) { decoder->image->yuvFormat = AVIF_PIXEL_FORMAT_YUV422; } else { decoder->image->yuvFormat = AVIF_PIXEL_FORMAT_YUV444; } } decoder->image->yuvChromaSamplePosition = (avifChromaSamplePosition)av1CProp->u.av1C.chromaSamplePosition; } else { // An av1C box is mandatory in all valid AVIF configurations. Bail out. return AVIF_RESULT_BMFF_PARSE_FAILED; } return avifDecoderFlush(decoder); } avifResult avifDecoderNextImage(avifDecoder * decoder) { for (unsigned int tileIndex = 0; tileIndex < decoder->data->tiles.count; ++tileIndex) { avifTile * tile = &decoder->data->tiles.tile[tileIndex]; if (!tile->codec->getNextImage(tile->codec, tile->image)) { if (tile->input->alpha) { return AVIF_RESULT_DECODE_ALPHA_FAILED; } else { if (tile->image->width) { // We've sent at least one image, but we've run out now. return AVIF_RESULT_NO_IMAGES_REMAINING; } return AVIF_RESULT_DECODE_COLOR_FAILED; } } } if (decoder->data->tiles.count != (decoder->data->colorTileCount + decoder->data->alphaTileCount)) { // TODO: assert here? This should be impossible. return AVIF_RESULT_UNKNOWN_ERROR; } if ((decoder->data->colorGrid.rows > 0) || (decoder->data->colorGrid.columns > 0)) { if (!avifDecoderDataFillImageGrid( decoder->data, &decoder->data->colorGrid, decoder->image, 0, decoder->data->colorTileCount, AVIF_FALSE)) { return AVIF_RESULT_INVALID_IMAGE_GRID; } } else { // Normal (most common) non-grid path. Just steal the planes from the only "tile". if (decoder->data->colorTileCount != 1) { return AVIF_RESULT_DECODE_COLOR_FAILED; } avifImage * srcColor = decoder->data->tiles.tile[0].image; if ((decoder->image->width != srcColor->width) || (decoder->image->height != srcColor->height) || (decoder->image->depth != srcColor->depth)) { avifImageFreePlanes(decoder->image, AVIF_PLANES_ALL); decoder->image->width = srcColor->width; decoder->image->height = srcColor->height; decoder->image->depth = srcColor->depth; } #if 0 // This code is currently unnecessary as the CICP is always set by the end of avifDecoderParse(). if (!decoder->data->cicpSet) { decoder->data->cicpSet = AVIF_TRUE; decoder->image->colorPrimaries = srcColor->colorPrimaries; decoder->image->transferCharacteristics = srcColor->transferCharacteristics; decoder->image->matrixCoefficients = srcColor->matrixCoefficients; } #endif avifImageStealPlanes(decoder->image, srcColor, AVIF_PLANES_YUV); } if ((decoder->data->alphaGrid.rows > 0) || (decoder->data->alphaGrid.columns > 0)) { if (!avifDecoderDataFillImageGrid( decoder->data, &decoder->data->alphaGrid, decoder->image, decoder->data->colorTileCount, decoder->data->alphaTileCount, AVIF_TRUE)) { return AVIF_RESULT_INVALID_IMAGE_GRID; } } else { // Normal (most common) non-grid path. Just steal the planes from the only "tile". if (decoder->data->alphaTileCount == 0) { avifImageFreePlanes(decoder->image, AVIF_PLANES_A); // no alpha } else { if (decoder->data->alphaTileCount != 1) { return AVIF_RESULT_DECODE_ALPHA_FAILED; } avifImage * srcAlpha = decoder->data->tiles.tile[decoder->data->colorTileCount].image; if ((decoder->image->width != srcAlpha->width) || (decoder->image->height != srcAlpha->height) || (decoder->image->depth != srcAlpha->depth)) { return AVIF_RESULT_DECODE_ALPHA_FAILED; } avifImageStealPlanes(decoder->image, srcAlpha, AVIF_PLANES_A); } } ++decoder->imageIndex; if (decoder->data->sourceSampleTable) { // Decoding from a track! Provide timing information. avifResult timingResult = avifDecoderNthImageTiming(decoder, decoder->imageIndex, &decoder->imageTiming); if (timingResult != AVIF_RESULT_OK) { return timingResult; } } return AVIF_RESULT_OK; } avifResult avifDecoderNthImageTiming(const avifDecoder * decoder, uint32_t frameIndex, avifImageTiming * outTiming) { if (!decoder->data) { // Nothing has been parsed yet return AVIF_RESULT_NO_CONTENT; } if ((int)frameIndex >= decoder->imageCount) { // Impossible index return AVIF_RESULT_NO_IMAGES_REMAINING; } if (!decoder->data->sourceSampleTable) { // There isn't any real timing associated with this decode, so // just hand back the defaults chosen in avifDecoderReset(). memcpy(outTiming, &decoder->imageTiming, sizeof(avifImageTiming)); return AVIF_RESULT_OK; } outTiming->timescale = decoder->timescale; outTiming->ptsInTimescales = 0; for (int imageIndex = 0; imageIndex < (int)frameIndex; ++imageIndex) { outTiming->ptsInTimescales += avifSampleTableGetImageDelta(decoder->data->sourceSampleTable, imageIndex); } outTiming->durationInTimescales = avifSampleTableGetImageDelta(decoder->data->sourceSampleTable, frameIndex); if (outTiming->timescale > 0) { outTiming->pts = (double)outTiming->ptsInTimescales / (double)outTiming->timescale; outTiming->duration = (double)outTiming->durationInTimescales / (double)outTiming->timescale; } else { outTiming->pts = 0.0; outTiming->duration = 0.0; } return AVIF_RESULT_OK; } avifResult avifDecoderNthImage(avifDecoder * decoder, uint32_t frameIndex) { int requestedIndex = (int)frameIndex; if (requestedIndex == decoder->imageIndex) { // We're here already, nothing to do return AVIF_RESULT_OK; } if (requestedIndex == (decoder->imageIndex + 1)) { // it's just the next image, nothing special here return avifDecoderNextImage(decoder); } if (requestedIndex >= decoder->imageCount) { // Impossible index return AVIF_RESULT_NO_IMAGES_REMAINING; } // If we get here, a decoder flush is necessary decoder->imageIndex = ((int)avifDecoderNearestKeyframe(decoder, frameIndex)) - 1; // prepare to read nearest keyframe avifDecoderFlush(decoder); for (;;) { avifResult result = avifDecoderNextImage(decoder); if (result != AVIF_RESULT_OK) { return result; } if (requestedIndex == decoder->imageIndex) { break; } } return AVIF_RESULT_OK; } avifBool avifDecoderIsKeyframe(const avifDecoder * decoder, uint32_t frameIndex) { if ((decoder->data->tiles.count > 0) && decoder->data->tiles.tile[0].input) { if (frameIndex < decoder->data->tiles.tile[0].input->samples.count) { return decoder->data->tiles.tile[0].input->samples.sample[frameIndex].sync; } } return AVIF_FALSE; } uint32_t avifDecoderNearestKeyframe(const avifDecoder * decoder, uint32_t frameIndex) { for (; frameIndex != 0; --frameIndex) { if (avifDecoderIsKeyframe(decoder, frameIndex)) { break; } } return frameIndex; } avifResult avifDecoderRead(avifDecoder * decoder, avifImage * image, const avifROData * input) { avifResult result = avifDecoderParse(decoder, input); if (result != AVIF_RESULT_OK) { return result; } result = avifDecoderNextImage(decoder); if (result != AVIF_RESULT_OK) { return result; } avifImageCopy(image, decoder->image, AVIF_PLANES_ALL); return AVIF_RESULT_OK; }
null
// Copyright 2019 Joe Drago. All rights reserved. // SPDX-License-Identifier: BSD-2-Clause #include "avif/internal.h" #include <string.h> #define AUXTYPE_SIZE 64 #define CONTENTTYPE_SIZE 64 // class VisualSampleEntry(codingname) extends SampleEntry(codingname) { // unsigned int(16) pre_defined = 0; // const unsigned int(16) reserved = 0; // unsigned int(32)[3] pre_defined = 0; // unsigned int(16) width; // unsigned int(16) height; // template unsigned int(32) horizresolution = 0x00480000; // 72 dpi // template unsigned int(32) vertresolution = 0x00480000; // 72 dpi // const unsigned int(32) reserved = 0; // template unsigned int(16) frame_count = 1; // string[32] compressorname; // template unsigned int(16) depth = 0x0018; // int(16) pre_defined = -1; // // other boxes from derived specifications // CleanApertureBox clap; // optional // PixelAspectRatioBox pasp; // optional // } static const size_t VISUALSAMPLEENTRY_SIZE = 78; static const char xmpContentType[] = CONTENT_TYPE_XMP; static const size_t xmpContentTypeSize = sizeof(xmpContentType); // --------------------------------------------------------------------------- // Box data structures // ftyp typedef struct avifFileType { uint8_t majorBrand[4]; uint32_t minorVersion; // If not null, points to a memory block of 4 * compatibleBrandsCount bytes. const uint8_t * compatibleBrands; int compatibleBrandsCount; } avifFileType; // ispe typedef struct avifImageSpatialExtents { uint32_t width; uint32_t height; } avifImageSpatialExtents; // auxC typedef struct avifAuxiliaryType { char auxType[AUXTYPE_SIZE]; } avifAuxiliaryType; // infe mime content_type typedef struct avifContentType { char contentType[CONTENTTYPE_SIZE]; } avifContentType; // colr typedef struct avifColourInformationBox { avifBool hasICC; const uint8_t * icc; size_t iccSize; avifBool hasNCLX; avifColorPrimaries colorPrimaries; avifTransferCharacteristics transferCharacteristics; avifMatrixCoefficients matrixCoefficients; avifRange range; } avifColourInformationBox; #define MAX_PIXI_PLANE_DEPTHS 4 typedef struct avifPixelInformationProperty { uint8_t planeDepths[MAX_PIXI_PLANE_DEPTHS]; uint8_t planeCount; } avifPixelInformationProperty; // --------------------------------------------------------------------------- // Top-level structures struct avifMeta; // Temporary storage for ipco/stsd contents until they can be associated and memcpy'd to an avifDecoderItem typedef struct avifProperty { uint8_t type[4]; union { avifImageSpatialExtents ispe; avifAuxiliaryType auxC; avifColourInformationBox colr; avifCodecConfigurationBox av1C; avifPixelAspectRatioBox pasp; avifCleanApertureBox clap; avifImageRotation irot; avifImageMirror imir; avifPixelInformationProperty pixi; } u; } avifProperty; AVIF_ARRAY_DECLARE(avifPropertyArray, avifProperty, prop); static const avifProperty * avifPropertyArrayFind(const avifPropertyArray * properties, const char * type) { for (uint32_t propertyIndex = 0; propertyIndex < properties->count; ++propertyIndex) { avifProperty * prop = &properties->prop[propertyIndex]; if (!memcmp(prop->type, type, 4)) { return prop; } } return NULL; } // one "item" worth for decoding (all iref, iloc, iprp, etc refer to one of these) typedef struct avifDecoderItem { uint32_t id; struct avifMeta * meta; // Unowned; A back-pointer for convenience uint8_t type[4]; uint32_t offset; uint32_t size; uint32_t idatID; // If non-zero, offset is relative to this idat box (iloc construction_method==1) avifContentType contentType; avifPropertyArray properties; uint32_t thumbnailForID; // if non-zero, this item is a thumbnail for Item #{thumbnailForID} uint32_t auxForID; // if non-zero, this item is an auxC plane for Item #{auxForID} uint32_t descForID; // if non-zero, this item is a content description for Item #{descForID} uint32_t dimgForID; // if non-zero, this item is a derived image for Item #{dimgForID} avifBool hasUnsupportedEssentialProperty; // If true, this item cites a property flagged as 'essential' that libavif doesn't support (yet). Ignore the item, if so. } avifDecoderItem; AVIF_ARRAY_DECLARE(avifDecoderItemArray, avifDecoderItem, item); // idat storage typedef struct avifDecoderItemData { uint32_t id; avifROData data; } avifDecoderItemData; AVIF_ARRAY_DECLARE(avifDecoderItemDataArray, avifDecoderItemData, idat); // grid storage typedef struct avifImageGrid { uint8_t rows; uint8_t columns; uint32_t outputWidth; uint32_t outputHeight; } avifImageGrid; // --------------------------------------------------------------------------- // avifTrack typedef struct avifSampleTableChunk { uint64_t offset; } avifSampleTableChunk; AVIF_ARRAY_DECLARE(avifSampleTableChunkArray, avifSampleTableChunk, chunk); typedef struct avifSampleTableSampleToChunk { uint32_t firstChunk; uint32_t samplesPerChunk; uint32_t sampleDescriptionIndex; } avifSampleTableSampleToChunk; AVIF_ARRAY_DECLARE(avifSampleTableSampleToChunkArray, avifSampleTableSampleToChunk, sampleToChunk); typedef struct avifSampleTableSampleSize { uint32_t size; } avifSampleTableSampleSize; AVIF_ARRAY_DECLARE(avifSampleTableSampleSizeArray, avifSampleTableSampleSize, sampleSize); typedef struct avifSampleTableTimeToSample { uint32_t sampleCount; uint32_t sampleDelta; } avifSampleTableTimeToSample; AVIF_ARRAY_DECLARE(avifSampleTableTimeToSampleArray, avifSampleTableTimeToSample, timeToSample); typedef struct avifSyncSample { uint32_t sampleNumber; } avifSyncSample; AVIF_ARRAY_DECLARE(avifSyncSampleArray, avifSyncSample, syncSample); typedef struct avifSampleDescription { uint8_t format[4]; avifPropertyArray properties; } avifSampleDescription; AVIF_ARRAY_DECLARE(avifSampleDescriptionArray, avifSampleDescription, description); typedef struct avifSampleTable { avifSampleTableChunkArray chunks; avifSampleDescriptionArray sampleDescriptions; avifSampleTableSampleToChunkArray sampleToChunks; avifSampleTableSampleSizeArray sampleSizes; avifSampleTableTimeToSampleArray timeToSamples; avifSyncSampleArray syncSamples; uint32_t allSamplesSize; // If this is non-zero, sampleSizes will be empty and all samples will be this size } avifSampleTable; static avifSampleTable * avifSampleTableCreate() { avifSampleTable * sampleTable = (avifSampleTable *)avifAlloc(sizeof(avifSampleTable)); memset(sampleTable, 0, sizeof(avifSampleTable)); avifArrayCreate(&sampleTable->chunks, sizeof(avifSampleTableChunk), 16); avifArrayCreate(&sampleTable->sampleDescriptions, sizeof(avifSampleDescription), 2); avifArrayCreate(&sampleTable->sampleToChunks, sizeof(avifSampleTableSampleToChunk), 16); avifArrayCreate(&sampleTable->sampleSizes, sizeof(avifSampleTableSampleSize), 16); avifArrayCreate(&sampleTable->timeToSamples, sizeof(avifSampleTableTimeToSample), 16); avifArrayCreate(&sampleTable->syncSamples, sizeof(avifSyncSample), 16); return sampleTable; } static void avifSampleTableDestroy(avifSampleTable * sampleTable) { avifArrayDestroy(&sampleTable->chunks); for (uint32_t i = 0; i < sampleTable->sampleDescriptions.count; ++i) { avifSampleDescription * description = &sampleTable->sampleDescriptions.description[i]; avifArrayDestroy(&description->properties); } avifArrayDestroy(&sampleTable->sampleDescriptions); avifArrayDestroy(&sampleTable->sampleToChunks); avifArrayDestroy(&sampleTable->sampleSizes); avifArrayDestroy(&sampleTable->timeToSamples); avifArrayDestroy(&sampleTable->syncSamples); avifFree(sampleTable); } static uint32_t avifSampleTableGetImageDelta(const avifSampleTable * sampleTable, int imageIndex) { int maxSampleIndex = 0; for (uint32_t i = 0; i < sampleTable->timeToSamples.count; ++i) { const avifSampleTableTimeToSample * timeToSample = &sampleTable->timeToSamples.timeToSample[i]; maxSampleIndex += timeToSample->sampleCount; if ((imageIndex < maxSampleIndex) || (i == (sampleTable->timeToSamples.count - 1))) { return timeToSample->sampleDelta; } } // TODO: fail here? return 1; } static avifBool avifSampleTableHasFormat(const avifSampleTable * sampleTable, const char * format) { for (uint32_t i = 0; i < sampleTable->sampleDescriptions.count; ++i) { if (!memcmp(sampleTable->sampleDescriptions.description[i].format, format, 4)) { return AVIF_TRUE; } } return AVIF_FALSE; } static uint32_t avifCodecConfigurationBoxGetDepth(const avifCodecConfigurationBox * av1C) { if (av1C->twelveBit) { return 12; } else if (av1C->highBitdepth) { return 10; } return 8; } static const avifPropertyArray * avifSampleTableGetProperties(const avifSampleTable * sampleTable) { for (uint32_t i = 0; i < sampleTable->sampleDescriptions.count; ++i) { const avifSampleDescription * description = &sampleTable->sampleDescriptions.description[i]; if (!memcmp(description->format, "av01", 4)) { return &description->properties; } } return NULL; } // one video track ("trak" contents) typedef struct avifTrack { uint32_t id; uint32_t auxForID; // if non-zero, this item is an auxC plane for Track #{auxForID} uint32_t mediaTimescale; uint64_t mediaDuration; uint32_t width; uint32_t height; avifSampleTable * sampleTable; struct avifMeta * meta; } avifTrack; AVIF_ARRAY_DECLARE(avifTrackArray, avifTrack, track); // --------------------------------------------------------------------------- // avifCodecDecodeInput avifCodecDecodeInput * avifCodecDecodeInputCreate(void) { avifCodecDecodeInput * decodeInput = (avifCodecDecodeInput *)avifAlloc(sizeof(avifCodecDecodeInput)); memset(decodeInput, 0, sizeof(avifCodecDecodeInput)); avifArrayCreate(&decodeInput->samples, sizeof(avifDecodeSample), 1); return decodeInput; } void avifCodecDecodeInputDestroy(avifCodecDecodeInput * decodeInput) { avifArrayDestroy(&decodeInput->samples); avifFree(decodeInput); } static avifBool avifCodecDecodeInputGetSamples(avifCodecDecodeInput * decodeInput, avifSampleTable * sampleTable, avifROData * rawInput) { uint32_t sampleSizeIndex = 0; for (uint32_t chunkIndex = 0; chunkIndex < sampleTable->chunks.count; ++chunkIndex) { avifSampleTableChunk * chunk = &sampleTable->chunks.chunk[chunkIndex]; // First, figure out how many samples are in this chunk uint32_t sampleCount = 0; for (int sampleToChunkIndex = sampleTable->sampleToChunks.count - 1; sampleToChunkIndex >= 0; --sampleToChunkIndex) { avifSampleTableSampleToChunk * sampleToChunk = &sampleTable->sampleToChunks.sampleToChunk[sampleToChunkIndex]; if (sampleToChunk->firstChunk <= (chunkIndex + 1)) { sampleCount = sampleToChunk->samplesPerChunk; break; } } if (sampleCount == 0) { // chunks with 0 samples are invalid return AVIF_FALSE; } uint64_t sampleOffset = chunk->offset; for (uint32_t sampleIndex = 0; sampleIndex < sampleCount; ++sampleIndex) { uint32_t sampleSize = sampleTable->allSamplesSize; if (sampleSize == 0) { if (sampleSizeIndex >= sampleTable->sampleSizes.count) { // We've run out of samples to sum return AVIF_FALSE; } avifSampleTableSampleSize * sampleSizePtr = &sampleTable->sampleSizes.sampleSize[sampleSizeIndex]; sampleSize = sampleSizePtr->size; } avifDecodeSample * sample = (avifDecodeSample *)avifArrayPushPtr(&decodeInput->samples); sample->data.data = rawInput->data + sampleOffset; sample->data.size = sampleSize; sample->sync = AVIF_FALSE; // to potentially be set to true following the outer loop if ((sampleOffset + sampleSize) > (uint64_t)rawInput->size) { return AVIF_FALSE; } sampleOffset += sampleSize; ++sampleSizeIndex; } } // Mark appropriate samples as sync for (uint32_t syncSampleIndex = 0; syncSampleIndex < sampleTable->syncSamples.count; ++syncSampleIndex) { uint32_t frameIndex = sampleTable->syncSamples.syncSample[syncSampleIndex].sampleNumber - 1; // sampleNumber is 1-based if (frameIndex < decodeInput->samples.count) { decodeInput->samples.sample[frameIndex].sync = AVIF_TRUE; } } // Assume frame 0 is sync, just in case the stss box is absent in the BMFF. (Unnecessary?) if (decodeInput->samples.count > 0) { decodeInput->samples.sample[0].sync = AVIF_TRUE; } return AVIF_TRUE; } // --------------------------------------------------------------------------- // Helper macros #define BEGIN_STREAM(VARNAME, PTR, SIZE) \ avifROStream VARNAME; \ avifROData VARNAME##_roData; \ VARNAME##_roData.data = PTR; \ VARNAME##_roData.size = SIZE; \ avifROStreamStart(&VARNAME, &VARNAME##_roData) // --------------------------------------------------------------------------- // avifDecoderData typedef struct avifTile { avifCodecDecodeInput * input; struct avifCodec * codec; avifImage * image; } avifTile; AVIF_ARRAY_DECLARE(avifTileArray, avifTile, tile); // This holds one "meta" box (from the BMFF and HEIF standards) worth of relevant-to-AVIF information. // * If a meta box is parsed from the root level of the BMFF, it can contain the information about // "items" which might be color planes, alpha planes, or EXIF or XMP metadata. // * If a meta box is parsed from inside of a track ("trak") box, any metadata (EXIF/XMP) items inside // of that box are implicitly associated with that track. typedef struct avifMeta { // Items (from HEIF) are the generic storage for any data that does not require timed processing // (single image color planes, alpha planes, EXIF, XMP, etc). Each item has a unique integer ID >1, // and is defined by a series of child boxes in a meta box: // * iloc - location: byte offset to item data, item size in bytes // * iinf - information: type of item (color planes, alpha plane, EXIF, XMP) // * ipco - properties: dimensions, aspect ratio, image transformations, references to other items // * ipma - associations: Attaches an item in the properties list to a given item // // Items are lazily created in this array when any of the above boxes refer to one by a new (unseen) ID, // and are then further modified/updated as new information for an item's ID is parsed. avifDecoderItemArray items; // Any ipco boxes explained above are populated into this array as a staging area, which are // then duplicated into the appropriate items upon encountering an item property association // (ipma) box. avifPropertyArray properties; // Filled with the contents of "idat" boxes, which are raw data that an item can directly refer to in its // item location box (iloc) instead of just giving an offset into the overall file. If all items' iloc boxes // simply point at an offset/length in the file itself, this array will likely be empty. avifDecoderItemDataArray idats; // Ever-incrementing ID for uniquely identifying which 'meta' box contains an idat (when // multiple meta boxes exist as BMFF siblings). Each time avifParseMetaBox() is called on an // avifMeta struct, this value is incremented. Any time an additional meta box is detected at // the same "level" (root level, trak level, etc), this ID helps distinguish which meta box's // "idat" is which, as items implicitly reference idat boxes that exist in the same meta // box. uint32_t idatID; // Contents of a pitm box, which signal which of the items in this file is the main image. For // AVIF, this should point at an av01 type item containing color planes, and all other items // are ignored unless they refer to this item in some way (alpha plane, EXIF/XMP metadata). uint32_t primaryItemID; } avifMeta; static avifMeta * avifMetaCreate() { avifMeta * meta = (avifMeta *)avifAlloc(sizeof(avifMeta)); memset(meta, 0, sizeof(avifMeta)); avifArrayCreate(&meta->items, sizeof(avifDecoderItem), 8); avifArrayCreate(&meta->properties, sizeof(avifProperty), 16); avifArrayCreate(&meta->idats, sizeof(avifDecoderItemData), 1); return meta; } static void avifMetaDestroy(avifMeta * meta) { for (uint32_t i = 0; i < meta->items.count; ++i) { avifDecoderItem * item = &meta->items.item[i]; avifArrayDestroy(&item->properties); } avifArrayDestroy(&meta->items); avifArrayDestroy(&meta->properties); avifArrayDestroy(&meta->idats); avifFree(meta); } static avifDecoderItem * avifMetaFindItem(avifMeta * meta, uint32_t itemID) { if (itemID == 0) { return NULL; } for (uint32_t i = 0; i < meta->items.count; ++i) { if (meta->items.item[i].id == itemID) { return &meta->items.item[i]; } } avifDecoderItem * item = (avifDecoderItem *)avifArrayPushPtr(&meta->items); avifArrayCreate(&item->properties, sizeof(avifProperty), 16); item->id = itemID; item->meta = meta; return item; } typedef struct avifDecoderData { avifFileType ftyp; avifMeta * meta; // The root-level meta box avifTrackArray tracks; avifROData rawInput; avifTileArray tiles; unsigned int colorTileCount; unsigned int alphaTileCount; avifImageGrid colorGrid; avifImageGrid alphaGrid; avifDecoderSource source; const avifSampleTable * sourceSampleTable; // NULL unless (source == AVIF_DECODER_SOURCE_TRACKS), owned by an avifTrack avifBool cicpSet; // True if avifDecoder's image has had its CICP set correctly yet. // This allows nclx colr boxes to override AV1 CICP, as specified in the MIAF // standard (ISO/IEC 23000-22:2019), section 7.3.6.4: // // "The colour information property takes precedence over any colour information in the image // bitstream, i.e. if the property is present, colour information in the bitstream shall be ignored." } avifDecoderData; static avifDecoderData * avifDecoderDataCreate() { avifDecoderData * data = (avifDecoderData *)avifAlloc(sizeof(avifDecoderData)); memset(data, 0, sizeof(avifDecoderData)); data->meta = avifMetaCreate(); avifArrayCreate(&data->tracks, sizeof(avifTrack), 2); avifArrayCreate(&data->tiles, sizeof(avifTile), 8); return data; } static void avifDecoderDataResetCodec(avifDecoderData * data) { for (unsigned int i = 0; i < data->tiles.count; ++i) { avifTile * tile = &data->tiles.tile[i]; if (tile->image) { avifImageFreePlanes(tile->image, AVIF_PLANES_ALL); // forget any pointers into codec image buffers } if (tile->codec) { avifCodecDestroy(tile->codec); tile->codec = NULL; } } } static avifTile * avifDecoderDataCreateTile(avifDecoderData * data) { avifTile * tile = (avifTile *)avifArrayPushPtr(&data->tiles); tile->image = avifImageCreateEmpty(); tile->input = avifCodecDecodeInputCreate(); return tile; } static avifTrack * avifDecoderDataCreateTrack(avifDecoderData * data) { avifTrack * track = (avifTrack *)avifArrayPushPtr(&data->tracks); track->meta = avifMetaCreate(); return track; } static void avifDecoderDataClearTiles(avifDecoderData * data) { for (unsigned int i = 0; i < data->tiles.count; ++i) { avifTile * tile = &data->tiles.tile[i]; if (tile->input) { avifCodecDecodeInputDestroy(tile->input); tile->input = NULL; } if (tile->codec) { avifCodecDestroy(tile->codec); tile->codec = NULL; } if (tile->image) { avifImageDestroy(tile->image); tile->image = NULL; } } data->tiles.count = 0; data->colorTileCount = 0; data->alphaTileCount = 0; } static void avifDecoderDataDestroy(avifDecoderData * data) { avifMetaDestroy(data->meta); for (uint32_t i = 0; i < data->tracks.count; ++i) { avifTrack * track = &data->tracks.track[i]; if (track->sampleTable) { avifSampleTableDestroy(track->sampleTable); } if (track->meta) { avifMetaDestroy(track->meta); } } avifArrayDestroy(&data->tracks); avifDecoderDataClearTiles(data); avifArrayDestroy(&data->tiles); avifFree(data); } static const uint8_t * avifDecoderDataCalcItemPtr(avifDecoderData * data, avifDecoderItem * item) { avifROData * offsetBuffer = NULL; if (item->idatID == 0) { // construction_method: file(0) offsetBuffer = &data->rawInput; } else { // construction_method: idat(1) // Find associated idat block for (uint32_t i = 0; i < item->meta->idats.count; ++i) { if (item->meta->idats.idat[i].id == item->idatID) { offsetBuffer = &item->meta->idats.idat[i].data; break; } } if (offsetBuffer == NULL) { // no idat box was found in this meta box, bail out return NULL; } } if (item->offset > offsetBuffer->size) { return NULL; } uint64_t offsetSize = (uint64_t)item->offset + (uint64_t)item->size; if (offsetSize > (uint64_t)offsetBuffer->size) { return NULL; } return offsetBuffer->data + item->offset; } static avifBool avifDecoderDataGenerateImageGridTiles(avifDecoderData * data, avifImageGrid * grid, avifDecoderItem * gridItem, avifBool alpha) { unsigned int tilesRequested = (unsigned int)grid->rows * (unsigned int)grid->columns; // Count number of dimg for this item, bail out if it doesn't match perfectly unsigned int tilesAvailable = 0; for (uint32_t i = 0; i < gridItem->meta->items.count; ++i) { avifDecoderItem * item = &gridItem->meta->items.item[i]; if (item->dimgForID == gridItem->id) { if (memcmp(item->type, "av01", 4)) { continue; } if (item->hasUnsupportedEssentialProperty) { // An essential property isn't supported by libavif; ignore the item. continue; } ++tilesAvailable; } } if (tilesRequested != tilesAvailable) { return AVIF_FALSE; } avifBool firstTile = AVIF_TRUE; for (uint32_t i = 0; i < gridItem->meta->items.count; ++i) { avifDecoderItem * item = &gridItem->meta->items.item[i]; if (item->dimgForID == gridItem->id) { if (memcmp(item->type, "av01", 4)) { continue; } if (item->hasUnsupportedEssentialProperty) { // An essential property isn't supported by libavif; ignore the item. continue; } avifTile * tile = avifDecoderDataCreateTile(data); avifDecodeSample * sample = (avifDecodeSample *)avifArrayPushPtr(&tile->input->samples); sample->data.data = avifDecoderDataCalcItemPtr(data, item); sample->data.size = item->size; sample->sync = AVIF_TRUE; tile->input->alpha = alpha; if (firstTile) { firstTile = AVIF_FALSE; // Adopt the av1C property of the first av01 tile, so that it can be queried from // the top-level color/alpha item during avifDecoderReset(). const avifProperty * srcProp = avifPropertyArrayFind(&item->properties, "av1C"); if (!srcProp) { return AVIF_FALSE; } avifProperty * dstProp = (avifProperty *)avifArrayPushPtr(&gridItem->properties); memcpy(dstProp, srcProp, sizeof(avifProperty)); } } } return AVIF_TRUE; } static avifBool avifDecoderDataFillImageGrid(avifDecoderData * data, avifImageGrid * grid, avifImage * dstImage, unsigned int firstTileIndex, unsigned int tileCount, avifBool alpha) { if (tileCount == 0) { return AVIF_FALSE; } avifTile * firstTile = &data->tiles.tile[firstTileIndex]; avifBool firstTileUVPresent = (firstTile->image->yuvPlanes[AVIF_CHAN_U] && firstTile->image->yuvPlanes[AVIF_CHAN_V]); // Check for tile consistency: All tiles in a grid image should match in the properties checked below. for (unsigned int i = 1; i < tileCount; ++i) { avifTile * tile = &data->tiles.tile[firstTileIndex + i]; avifBool uvPresent = (tile->image->yuvPlanes[AVIF_CHAN_U] && tile->image->yuvPlanes[AVIF_CHAN_V]); if ((tile->image->width != firstTile->image->width) || (tile->image->height != firstTile->image->height) || (tile->image->depth != firstTile->image->depth) || (tile->image->yuvFormat != firstTile->image->yuvFormat) || (tile->image->yuvRange != firstTile->image->yuvRange) || (uvPresent != firstTileUVPresent) || ((tile->image->colorPrimaries != firstTile->image->colorPrimaries) || (tile->image->transferCharacteristics != firstTile->image->transferCharacteristics) || (tile->image->matrixCoefficients != firstTile->image->matrixCoefficients))) { return AVIF_FALSE; } } // Lazily populate dstImage with the new frame's properties. If we're decoding alpha, // these values must already match. if ((dstImage->width != grid->outputWidth) || (dstImage->height != grid->outputHeight) || (dstImage->depth != firstTile->image->depth) || (dstImage->yuvFormat != firstTile->image->yuvFormat)) { if (alpha) { // Alpha doesn't match size, just bail out return AVIF_FALSE; } avifImageFreePlanes(dstImage, AVIF_PLANES_ALL); dstImage->width = grid->outputWidth; dstImage->height = grid->outputHeight; dstImage->depth = firstTile->image->depth; dstImage->yuvFormat = firstTile->image->yuvFormat; dstImage->yuvRange = firstTile->image->yuvRange; if (!data->cicpSet) { data->cicpSet = AVIF_TRUE; dstImage->colorPrimaries = firstTile->image->colorPrimaries; dstImage->transferCharacteristics = firstTile->image->transferCharacteristics; dstImage->matrixCoefficients = firstTile->image->matrixCoefficients; } } if (alpha) { dstImage->alphaRange = firstTile->image->alphaRange; } avifImageAllocatePlanes(dstImage, alpha ? AVIF_PLANES_A : AVIF_PLANES_YUV); avifPixelFormatInfo formatInfo; avifGetPixelFormatInfo(firstTile->image->yuvFormat, &formatInfo); unsigned int tileIndex = firstTileIndex; size_t pixelBytes = avifImageUsesU16(dstImage) ? 2 : 1; for (unsigned int rowIndex = 0; rowIndex < grid->rows; ++rowIndex) { for (unsigned int colIndex = 0; colIndex < grid->columns; ++colIndex, ++tileIndex) { avifTile * tile = &data->tiles.tile[tileIndex]; unsigned int widthToCopy = firstTile->image->width; unsigned int maxX = firstTile->image->width * (colIndex + 1); if (maxX > grid->outputWidth) { widthToCopy -= maxX - grid->outputWidth; } unsigned int heightToCopy = firstTile->image->height; unsigned int maxY = firstTile->image->height * (rowIndex + 1); if (maxY > grid->outputHeight) { heightToCopy -= maxY - grid->outputHeight; } // Y and A channels size_t yaColOffset = colIndex * firstTile->image->width; size_t yaRowOffset = rowIndex * firstTile->image->height; size_t yaRowBytes = widthToCopy * pixelBytes; if (alpha) { // A for (unsigned int j = 0; j < heightToCopy; ++j) { uint8_t * src = &tile->image->alphaPlane[j * tile->image->alphaRowBytes]; uint8_t * dst = &dstImage->alphaPlane[(yaColOffset * pixelBytes) + ((yaRowOffset + j) * dstImage->alphaRowBytes)]; memcpy(dst, src, yaRowBytes); } } else { // Y for (unsigned int j = 0; j < heightToCopy; ++j) { uint8_t * src = &tile->image->yuvPlanes[AVIF_CHAN_Y][j * tile->image->yuvRowBytes[AVIF_CHAN_Y]]; uint8_t * dst = &dstImage->yuvPlanes[AVIF_CHAN_Y][(yaColOffset * pixelBytes) + ((yaRowOffset + j) * dstImage->yuvRowBytes[AVIF_CHAN_Y])]; memcpy(dst, src, yaRowBytes); } if (!firstTileUVPresent) { continue; } // UV heightToCopy >>= formatInfo.chromaShiftY; size_t uvColOffset = yaColOffset >> formatInfo.chromaShiftX; size_t uvRowOffset = yaRowOffset >> formatInfo.chromaShiftY; size_t uvRowBytes = yaRowBytes >> formatInfo.chromaShiftX; for (unsigned int j = 0; j < heightToCopy; ++j) { uint8_t * srcU = &tile->image->yuvPlanes[AVIF_CHAN_U][j * tile->image->yuvRowBytes[AVIF_CHAN_U]]; uint8_t * dstU = &dstImage->yuvPlanes[AVIF_CHAN_U][(uvColOffset * pixelBytes) + ((uvRowOffset + j) * dstImage->yuvRowBytes[AVIF_CHAN_U])]; memcpy(dstU, srcU, uvRowBytes); uint8_t * srcV = &tile->image->yuvPlanes[AVIF_CHAN_V][j * tile->image->yuvRowBytes[AVIF_CHAN_V]]; uint8_t * dstV = &dstImage->yuvPlanes[AVIF_CHAN_V][(uvColOffset * pixelBytes) + ((uvRowOffset + j) * dstImage->yuvRowBytes[AVIF_CHAN_V])]; memcpy(dstV, srcV, uvRowBytes); } } } } return AVIF_TRUE; } // If colorId == 0 (a sentinel value as item IDs must be nonzero), accept any found EXIF/XMP metadata. Passing in 0 // is used when finding metadata in a meta box embedded in a trak box, as any items inside of a meta box that is // inside of a trak box are implicitly associated to the track. static avifBool avifDecoderDataFindMetadata(avifDecoderData * data, avifMeta * meta, avifImage * image, uint32_t colorId) { avifROData exifData = AVIF_DATA_EMPTY; avifROData xmpData = AVIF_DATA_EMPTY; for (uint32_t itemIndex = 0; itemIndex < meta->items.count; ++itemIndex) { avifDecoderItem * item = &meta->items.item[itemIndex]; if (!item->size) { continue; } if (item->hasUnsupportedEssentialProperty) { // An essential property isn't supported by libavif; ignore the item. continue; } if ((colorId > 0) && (item->descForID != colorId)) { // Not a content description (metadata) for the colorOBU, skip it continue; } if (!memcmp(item->type, "Exif", 4)) { // Advance past Annex A.2.1's header const uint8_t * boxPtr = avifDecoderDataCalcItemPtr(data, item); BEGIN_STREAM(exifBoxStream, boxPtr, item->size); uint32_t exifTiffHeaderOffset; CHECK(avifROStreamReadU32(&exifBoxStream, &exifTiffHeaderOffset)); // unsigned int(32) exif_tiff_header_offset; exifData.data = avifROStreamCurrent(&exifBoxStream); exifData.size = avifROStreamRemainingBytes(&exifBoxStream); } else if (!memcmp(item->type, "mime", 4) && !memcmp(item->contentType.contentType, xmpContentType, xmpContentTypeSize)) { xmpData.data = avifDecoderDataCalcItemPtr(data, item); xmpData.size = item->size; } } if (exifData.data && exifData.size) { avifImageSetMetadataExif(image, exifData.data, exifData.size); } if (xmpData.data && xmpData.size) { avifImageSetMetadataXMP(image, xmpData.data, xmpData.size); } return AVIF_TRUE; } // --------------------------------------------------------------------------- // URN static avifBool isAlphaURN(const char * urn) { return !strcmp(urn, URN_ALPHA0) || !strcmp(urn, URN_ALPHA1); } // --------------------------------------------------------------------------- // BMFF Parsing static avifBool avifParseItemLocationBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version; CHECK(avifROStreamReadVersionAndFlags(&s, &version, NULL)); if (version > 2) { return AVIF_FALSE; } uint8_t offsetSizeAndLengthSize; CHECK(avifROStreamRead(&s, &offsetSizeAndLengthSize, 1)); uint8_t offsetSize = (offsetSizeAndLengthSize >> 4) & 0xf; // unsigned int(4) offset_size; uint8_t lengthSize = (offsetSizeAndLengthSize >> 0) & 0xf; // unsigned int(4) length_size; uint8_t baseOffsetSizeAndIndexSize; CHECK(avifROStreamRead(&s, &baseOffsetSizeAndIndexSize, 1)); uint8_t baseOffsetSize = (baseOffsetSizeAndIndexSize >> 4) & 0xf; // unsigned int(4) base_offset_size; uint8_t indexSize = 0; if ((version == 1) || (version == 2)) { indexSize = baseOffsetSizeAndIndexSize & 0xf; // unsigned int(4) index_size; if (indexSize != 0) { // extent_index unsupported return AVIF_FALSE; } } uint16_t tmp16; uint32_t itemCount; if (version < 2) { CHECK(avifROStreamReadU16(&s, &tmp16)); // unsigned int(16) item_count; itemCount = tmp16; } else { CHECK(avifROStreamReadU32(&s, &itemCount)); // unsigned int(32) item_count; } for (uint32_t i = 0; i < itemCount; ++i) { uint32_t itemID; uint32_t idatID = 0; if (version < 2) { CHECK(avifROStreamReadU16(&s, &tmp16)); // unsigned int(16) item_ID; itemID = tmp16; } else { CHECK(avifROStreamReadU32(&s, &itemID)); // unsigned int(32) item_ID; } if ((version == 1) || (version == 2)) { uint8_t ignored; uint8_t constructionMethod; CHECK(avifROStreamRead(&s, &ignored, 1)); // unsigned int(12) reserved = 0; CHECK(avifROStreamRead(&s, &constructionMethod, 1)); // unsigned int(4) construction_method; constructionMethod = constructionMethod & 0xf; if ((constructionMethod != 0 /* file */) && (constructionMethod != 1 /* idat */)) { // construction method item(2) unsupported return AVIF_FALSE; } if (constructionMethod == 1) { idatID = meta->idatID; } } uint16_t dataReferenceIndex; // unsigned int(16) data_ref rence_index; CHECK(avifROStreamReadU16(&s, &dataReferenceIndex)); // uint64_t baseOffset; // unsigned int(base_offset_size*8) base_offset; CHECK(avifROStreamReadUX8(&s, &baseOffset, baseOffsetSize)); // uint16_t extentCount; // unsigned int(16) extent_count; CHECK(avifROStreamReadU16(&s, &extentCount)); // if (extentCount == 1) { // If extent_index is ever supported, this spec must be implemented here: // :: if (((version == 1) || (version == 2)) && (index_size > 0)) { // :: unsigned int(index_size*8) extent_index; // :: } uint64_t extentOffset; // unsigned int(offset_size*8) extent_offset; CHECK(avifROStreamReadUX8(&s, &extentOffset, offsetSize)); uint64_t extentLength; // unsigned int(offset_size*8) extent_length; CHECK(avifROStreamReadUX8(&s, &extentLength, lengthSize)); avifDecoderItem * item = avifMetaFindItem(meta, itemID); if (!item) { return AVIF_FALSE; } item->id = itemID; item->offset = (uint32_t)(baseOffset + extentOffset); item->size = (uint32_t)extentLength; item->idatID = idatID; } else { // TODO: support more than one extent return AVIF_FALSE; } } return AVIF_TRUE; } static avifBool avifParseImageGridBox(avifImageGrid * grid, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version, flags; CHECK(avifROStreamRead(&s, &version, 1)); // unsigned int(8) version = 0; if (version != 0) { return AVIF_FALSE; } CHECK(avifROStreamRead(&s, &flags, 1)); // unsigned int(8) flags; CHECK(avifROStreamRead(&s, &grid->rows, 1)); // unsigned int(8) rows_minus_one; CHECK(avifROStreamRead(&s, &grid->columns, 1)); // unsigned int(8) columns_minus_one; ++grid->rows; ++grid->columns; uint32_t fieldLength = ((flags & 1) + 1) * 16; if (fieldLength == 16) { uint16_t outputWidth16, outputHeight16; CHECK(avifROStreamReadU16(&s, &outputWidth16)); // unsigned int(FieldLength) output_width; CHECK(avifROStreamReadU16(&s, &outputHeight16)); // unsigned int(FieldLength) output_height; grid->outputWidth = outputWidth16; grid->outputHeight = outputHeight16; } else { if (fieldLength != 32) { // This should be impossible return AVIF_FALSE; } CHECK(avifROStreamReadU32(&s, &grid->outputWidth)); // unsigned int(FieldLength) output_width; CHECK(avifROStreamReadU32(&s, &grid->outputHeight)); // unsigned int(FieldLength) output_height; } if (grid->outputWidth > AVIF_MAX_IMAGE_SIZE / grid->outputHeight) { return AVIF_FALSE; } return AVIF_TRUE; } static avifBool avifParseImageSpatialExtentsProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); avifImageSpatialExtents * ispe = &prop->u.ispe; CHECK(avifROStreamReadU32(&s, &ispe->width)); CHECK(avifROStreamReadU32(&s, &ispe->height)); return AVIF_TRUE; } static avifBool avifParseAuxiliaryTypeProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); CHECK(avifROStreamReadString(&s, prop->u.auxC.auxType, AUXTYPE_SIZE)); return AVIF_TRUE; } static avifBool avifParseColourInformationBox(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifColourInformationBox * colr = &prop->u.colr; colr->hasICC = AVIF_FALSE; colr->hasNCLX = AVIF_FALSE; uint8_t colorType[4]; // unsigned int(32) colour_type; CHECK(avifROStreamRead(&s, colorType, 4)); if (!memcmp(colorType, "rICC", 4) || !memcmp(colorType, "prof", 4)) { colr->hasICC = AVIF_TRUE; colr->icc = avifROStreamCurrent(&s); colr->iccSize = avifROStreamRemainingBytes(&s); } else if (!memcmp(colorType, "nclx", 4)) { uint16_t tmp16; // unsigned int(16) colour_primaries; CHECK(avifROStreamReadU16(&s, &tmp16)); colr->colorPrimaries = (avifColorPrimaries)tmp16; // unsigned int(16) transfer_characteristics; CHECK(avifROStreamReadU16(&s, &tmp16)); colr->transferCharacteristics = (avifTransferCharacteristics)tmp16; // unsigned int(16) matrix_coefficients; CHECK(avifROStreamReadU16(&s, &tmp16)); colr->matrixCoefficients = (avifMatrixCoefficients)tmp16; // unsigned int(1) full_range_flag; // unsigned int(7) reserved = 0; uint8_t tmp8; CHECK(avifROStreamRead(&s, &tmp8, 1)); colr->range = (tmp8 & 0x80) ? AVIF_RANGE_FULL : AVIF_RANGE_LIMITED; colr->hasNCLX = AVIF_TRUE; } return AVIF_TRUE; } static avifBool avifParseAV1CodecConfigurationBox(const uint8_t * raw, size_t rawLen, avifCodecConfigurationBox * av1C) { BEGIN_STREAM(s, raw, rawLen); uint8_t markerAndVersion = 0; CHECK(avifROStreamRead(&s, &markerAndVersion, 1)); uint8_t seqProfileAndIndex = 0; CHECK(avifROStreamRead(&s, &seqProfileAndIndex, 1)); uint8_t rawFlags = 0; CHECK(avifROStreamRead(&s, &rawFlags, 1)); if (markerAndVersion != 0x81) { // Marker and version must both == 1 return AVIF_FALSE; } av1C->seqProfile = (seqProfileAndIndex >> 5) & 0x7; // unsigned int (3) seq_profile; av1C->seqLevelIdx0 = (seqProfileAndIndex >> 0) & 0x1f; // unsigned int (5) seq_level_idx_0; av1C->seqTier0 = (rawFlags >> 7) & 0x1; // unsigned int (1) seq_tier_0; av1C->highBitdepth = (rawFlags >> 6) & 0x1; // unsigned int (1) high_bitdepth; av1C->twelveBit = (rawFlags >> 5) & 0x1; // unsigned int (1) twelve_bit; av1C->monochrome = (rawFlags >> 4) & 0x1; // unsigned int (1) monochrome; av1C->chromaSubsamplingX = (rawFlags >> 3) & 0x1; // unsigned int (1) chroma_subsampling_x; av1C->chromaSubsamplingY = (rawFlags >> 2) & 0x1; // unsigned int (1) chroma_subsampling_y; av1C->chromaSamplePosition = (rawFlags >> 0) & 0x3; // unsigned int (2) chroma_sample_position; return AVIF_TRUE; } static avifBool avifParseAV1CodecConfigurationBoxProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { return avifParseAV1CodecConfigurationBox(raw, rawLen, &prop->u.av1C); } static avifBool avifParsePixelAspectRatioBoxProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifPixelAspectRatioBox * pasp = &prop->u.pasp; CHECK(avifROStreamReadU32(&s, &pasp->hSpacing)); // unsigned int(32) hSpacing; CHECK(avifROStreamReadU32(&s, &pasp->vSpacing)); // unsigned int(32) vSpacing; return AVIF_TRUE; } static avifBool avifParseCleanApertureBoxProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifCleanApertureBox * clap = &prop->u.clap; CHECK(avifROStreamReadU32(&s, &clap->widthN)); // unsigned int(32) cleanApertureWidthN; CHECK(avifROStreamReadU32(&s, &clap->widthD)); // unsigned int(32) cleanApertureWidthD; CHECK(avifROStreamReadU32(&s, &clap->heightN)); // unsigned int(32) cleanApertureHeightN; CHECK(avifROStreamReadU32(&s, &clap->heightD)); // unsigned int(32) cleanApertureHeightD; CHECK(avifROStreamReadU32(&s, &clap->horizOffN)); // unsigned int(32) horizOffN; CHECK(avifROStreamReadU32(&s, &clap->horizOffD)); // unsigned int(32) horizOffD; CHECK(avifROStreamReadU32(&s, &clap->vertOffN)); // unsigned int(32) vertOffN; CHECK(avifROStreamReadU32(&s, &clap->vertOffD)); // unsigned int(32) vertOffD; return AVIF_TRUE; } static avifBool avifParseImageRotationProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifImageRotation * irot = &prop->u.irot; CHECK(avifROStreamRead(&s, &irot->angle, 1)); // unsigned int (6) reserved = 0; unsigned int (2) angle; if ((irot->angle & 0xfc) != 0) { // reserved bits must be 0 return AVIF_FALSE; } return AVIF_TRUE; } static avifBool avifParseImageMirrorProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifImageMirror * imir = &prop->u.imir; CHECK(avifROStreamRead(&s, &imir->axis, 1)); // unsigned int (7) reserved = 0; unsigned int (1) axis; if ((imir->axis & 0xfe) != 0) { // reserved bits must be 0 return AVIF_FALSE; } return AVIF_TRUE; } static avifBool avifParsePixelInformationProperty(avifProperty * prop, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); avifPixelInformationProperty * pixi = &prop->u.pixi; CHECK(avifROStreamRead(&s, &pixi->planeCount, 1)); // unsigned int (8) num_channels; if (pixi->planeCount > MAX_PIXI_PLANE_DEPTHS) { return AVIF_FALSE; } for (uint8_t i = 0; i < pixi->planeCount; ++i) { CHECK(avifROStreamRead(&s, &pixi->planeDepths[i], 1)); // unsigned int (8) bits_per_channel; } return AVIF_TRUE; } static avifBool avifParseItemPropertyContainerBox(avifPropertyArray * properties, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); int propertyIndex = avifArrayPushIndex(properties); avifProperty * prop = &properties->prop[propertyIndex]; memcpy(prop->type, header.type, 4); if (!memcmp(header.type, "ispe", 4)) { CHECK(avifParseImageSpatialExtentsProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "auxC", 4)) { CHECK(avifParseAuxiliaryTypeProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "colr", 4)) { CHECK(avifParseColourInformationBox(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "av1C", 4)) { CHECK(avifParseAV1CodecConfigurationBoxProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "pasp", 4)) { CHECK(avifParsePixelAspectRatioBoxProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "clap", 4)) { CHECK(avifParseCleanApertureBoxProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "irot", 4)) { CHECK(avifParseImageRotationProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "imir", 4)) { CHECK(avifParseImageMirrorProperty(prop, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "pixi", 4)) { CHECK(avifParsePixelInformationProperty(prop, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifParseItemPropertyAssociation(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version; uint32_t flags; CHECK(avifROStreamReadVersionAndFlags(&s, &version, &flags)); avifBool propertyIndexIsU16 = ((flags & 0x1) != 0); uint32_t entryCount; CHECK(avifROStreamReadU32(&s, &entryCount)); for (uint32_t entryIndex = 0; entryIndex < entryCount; ++entryIndex) { unsigned int itemID; if (version < 1) { uint16_t tmp; CHECK(avifROStreamReadU16(&s, &tmp)); itemID = tmp; } else { CHECK(avifROStreamReadU32(&s, &itemID)); } uint8_t associationCount; CHECK(avifROStreamRead(&s, &associationCount, 1)); for (uint8_t associationIndex = 0; associationIndex < associationCount; ++associationIndex) { avifBool essential = AVIF_FALSE; uint16_t propertyIndex = 0; if (propertyIndexIsU16) { CHECK(avifROStreamReadU16(&s, &propertyIndex)); essential = ((propertyIndex & 0x8000) != 0); propertyIndex &= 0x7fff; } else { uint8_t tmp; CHECK(avifROStreamRead(&s, &tmp, 1)); essential = ((tmp & 0x80) != 0); propertyIndex = tmp & 0x7f; } if (propertyIndex == 0) { // Not associated with any item continue; } --propertyIndex; // 1-indexed if (propertyIndex >= meta->properties.count) { return AVIF_FALSE; } avifDecoderItem * item = avifMetaFindItem(meta, itemID); if (!item) { return AVIF_FALSE; } // Copy property to item avifProperty * srcProp = &meta->properties.prop[propertyIndex]; static const char * supportedTypes[] = { "ispe", "auxC", "colr", "av1C", "pasp", "clap", "irot", "imir", "pixi" }; size_t supportedTypesCount = sizeof(supportedTypes) / sizeof(supportedTypes[0]); avifBool supportedType = AVIF_FALSE; for (size_t i = 0; i < supportedTypesCount; ++i) { if (!memcmp(srcProp->type, supportedTypes[i], 4)) { supportedType = AVIF_TRUE; break; } } if (supportedType) { avifProperty * dstProp = (avifProperty *)avifArrayPushPtr(&item->properties); memcpy(dstProp, srcProp, sizeof(avifProperty)); } else { if (essential) { // Discovered an essential item property that libavif doesn't support! // Make a note to ignore this item later. item->hasUnsupportedEssentialProperty = AVIF_TRUE; } } } } return AVIF_TRUE; } static avifBool avifParsePrimaryItemBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { if (meta->primaryItemID > 0) { // Illegal to have multiple pitm boxes, bail out return AVIF_FALSE; } BEGIN_STREAM(s, raw, rawLen); uint8_t version; CHECK(avifROStreamReadVersionAndFlags(&s, &version, NULL)); if (version == 0) { uint16_t tmp16; CHECK(avifROStreamReadU16(&s, &tmp16)); // unsigned int(16) item_ID; meta->primaryItemID = tmp16; } else { CHECK(avifROStreamReadU32(&s, &meta->primaryItemID)); // unsigned int(32) item_ID; } return AVIF_TRUE; } static avifBool avifParseItemDataBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { // Check to see if we've already seen an idat box for this meta box. If so, bail out for (uint32_t i = 0; i < meta->idats.count; ++i) { if (meta->idats.idat[i].id == meta->idatID) { return AVIF_FALSE; } } int index = avifArrayPushIndex(&meta->idats); avifDecoderItemData * idat = &meta->idats.idat[index]; idat->id = meta->idatID; idat->data.data = raw; idat->data.size = rawLen; return AVIF_TRUE; } static avifBool avifParseItemPropertiesBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifBoxHeader ipcoHeader; CHECK(avifROStreamReadBoxHeader(&s, &ipcoHeader)); if (memcmp(ipcoHeader.type, "ipco", 4) != 0) { return AVIF_FALSE; } // Read all item properties inside of ItemPropertyContainerBox CHECK(avifParseItemPropertyContainerBox(&meta->properties, avifROStreamCurrent(&s), ipcoHeader.size)); CHECK(avifROStreamSkip(&s, ipcoHeader.size)); // Now read all ItemPropertyAssociation until the end of the box, and make associations while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader ipmaHeader; CHECK(avifROStreamReadBoxHeader(&s, &ipmaHeader)); if (!memcmp(ipmaHeader.type, "ipma", 4)) { CHECK(avifParseItemPropertyAssociation(meta, avifROStreamCurrent(&s), ipmaHeader.size)); } else { // These must all be type ipma return AVIF_FALSE; } CHECK(avifROStreamSkip(&s, ipmaHeader.size)); } return AVIF_TRUE; } static avifBool avifParseItemInfoEntry(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 2)); // TODO: support version > 2? 2+ is required for item_type uint16_t itemID; // unsigned int(16) item_ID; CHECK(avifROStreamReadU16(&s, &itemID)); // uint16_t itemProtectionIndex; // unsigned int(16) item_protection_index; CHECK(avifROStreamReadU16(&s, &itemProtectionIndex)); // uint8_t itemType[4]; // unsigned int(32) item_type; CHECK(avifROStreamRead(&s, itemType, 4)); // avifContentType contentType; if (!memcmp(itemType, "mime", 4)) { CHECK(avifROStreamReadString(&s, NULL, 0)); // string item_name; (skipped) CHECK(avifROStreamReadString(&s, contentType.contentType, CONTENTTYPE_SIZE)); // string content_type; } else { memset(&contentType, 0, sizeof(contentType)); } avifDecoderItem * item = avifMetaFindItem(meta, itemID); if (!item) { return AVIF_FALSE; } memcpy(item->type, itemType, sizeof(itemType)); memcpy(&item->contentType, &contentType, sizeof(contentType)); return AVIF_TRUE; } static avifBool avifParseItemInfoBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version; CHECK(avifROStreamReadVersionAndFlags(&s, &version, NULL)); uint32_t entryCount; if (version == 0) { uint16_t tmp; CHECK(avifROStreamReadU16(&s, &tmp)); // unsigned int(16) entry_count; entryCount = tmp; } else if (version == 1) { CHECK(avifROStreamReadU32(&s, &entryCount)); // unsigned int(32) entry_count; } else { return AVIF_FALSE; } for (uint32_t entryIndex = 0; entryIndex < entryCount; ++entryIndex) { avifBoxHeader infeHeader; CHECK(avifROStreamReadBoxHeader(&s, &infeHeader)); if (!memcmp(infeHeader.type, "infe", 4)) { CHECK(avifParseItemInfoEntry(meta, avifROStreamCurrent(&s), infeHeader.size)); } else { // These must all be type ipma return AVIF_FALSE; } CHECK(avifROStreamSkip(&s, infeHeader.size)); } return AVIF_TRUE; } static avifBool avifParseItemReferenceBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version; CHECK(avifROStreamReadVersionAndFlags(&s, &version, NULL)); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader irefHeader; CHECK(avifROStreamReadBoxHeader(&s, &irefHeader)); uint32_t fromID = 0; if (version == 0) { uint16_t tmp; CHECK(avifROStreamReadU16(&s, &tmp)); // unsigned int(16) from_item_ID; fromID = tmp; } else if (version == 1) { CHECK(avifROStreamReadU32(&s, &fromID)); // unsigned int(32) from_item_ID; } else { // unsupported iref version, skip it break; } uint16_t referenceCount = 0; CHECK(avifROStreamReadU16(&s, &referenceCount)); // unsigned int(16) reference_count; for (uint16_t refIndex = 0; refIndex < referenceCount; ++refIndex) { uint32_t toID = 0; if (version == 0) { uint16_t tmp; CHECK(avifROStreamReadU16(&s, &tmp)); // unsigned int(16) to_item_ID; toID = tmp; } else if (version == 1) { CHECK(avifROStreamReadU32(&s, &toID)); // unsigned int(32) to_item_ID; } else { // unsupported iref version, skip it break; } // Read this reference as "{fromID} is a {irefType} for {toID}" if (fromID && toID) { avifDecoderItem * item = avifMetaFindItem(meta, fromID); if (!item) { return AVIF_FALSE; } if (!memcmp(irefHeader.type, "thmb", 4)) { item->thumbnailForID = toID; } if (!memcmp(irefHeader.type, "auxl", 4)) { item->auxForID = toID; } if (!memcmp(irefHeader.type, "cdsc", 4)) { item->descForID = toID; } if (!memcmp(irefHeader.type, "dimg", 4)) { // derived images refer in the opposite direction avifDecoderItem * dimg = avifMetaFindItem(meta, toID); if (!dimg) { return AVIF_FALSE; } dimg->dimgForID = fromID; } } } } return AVIF_TRUE; } static avifBool avifParseMetaBox(avifMeta * meta, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); ++meta->idatID; // for tracking idat while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "iloc", 4)) { CHECK(avifParseItemLocationBox(meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "pitm", 4)) { CHECK(avifParsePrimaryItemBox(meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "idat", 4)) { CHECK(avifParseItemDataBox(meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "iprp", 4)) { CHECK(avifParseItemPropertiesBox(meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "iinf", 4)) { CHECK(avifParseItemInfoBox(meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "iref", 4)) { CHECK(avifParseItemReferenceBox(meta, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifParseTrackHeaderBox(avifTrack * track, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version; CHECK(avifROStreamReadVersionAndFlags(&s, &version, NULL)); uint32_t ignored32, trackID; uint64_t ignored64; if (version == 1) { CHECK(avifROStreamReadU64(&s, &ignored64)); // unsigned int(64) creation_time; CHECK(avifROStreamReadU64(&s, &ignored64)); // unsigned int(64) modification_time; CHECK(avifROStreamReadU32(&s, &trackID)); // unsigned int(32) track_ID; CHECK(avifROStreamReadU32(&s, &ignored32)); // const unsigned int(32) reserved = 0; CHECK(avifROStreamReadU64(&s, &ignored64)); // unsigned int(64) duration; } else if (version == 0) { CHECK(avifROStreamReadU32(&s, &ignored32)); // unsigned int(32) creation_time; CHECK(avifROStreamReadU32(&s, &ignored32)); // unsigned int(32) modification_time; CHECK(avifROStreamReadU32(&s, &trackID)); // unsigned int(32) track_ID; CHECK(avifROStreamReadU32(&s, &ignored32)); // const unsigned int(32) reserved = 0; CHECK(avifROStreamReadU32(&s, &ignored32)); // unsigned int(32) duration; } else { // Unsupported version return AVIF_FALSE; } // Skipping the following 52 bytes here: // ------------------------------------ // const unsigned int(32)[2] reserved = 0; // template int(16) layer = 0; // template int(16) alternate_group = 0; // template int(16) volume = {if track_is_audio 0x0100 else 0}; // const unsigned int(16) reserved = 0; // template int(32)[9] matrix= { 0x00010000,0,0,0,0x00010000,0,0,0,0x40000000 }; // unity matrix CHECK(avifROStreamSkip(&s, 52)); uint32_t width, height; CHECK(avifROStreamReadU32(&s, &width)); // unsigned int(32) width; CHECK(avifROStreamReadU32(&s, &height)); // unsigned int(32) height; track->width = width >> 16; track->height = height >> 16; // TODO: support scaling based on width/height track header info? track->id = trackID; return AVIF_TRUE; } static avifBool avifParseMediaHeaderBox(avifTrack * track, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); uint8_t version; CHECK(avifROStreamReadVersionAndFlags(&s, &version, NULL)); uint32_t ignored32, mediaTimescale, mediaDuration32; uint64_t ignored64, mediaDuration64; if (version == 1) { CHECK(avifROStreamReadU64(&s, &ignored64)); // unsigned int(64) creation_time; CHECK(avifROStreamReadU64(&s, &ignored64)); // unsigned int(64) modification_time; CHECK(avifROStreamReadU32(&s, &mediaTimescale)); // unsigned int(32) timescale; CHECK(avifROStreamReadU64(&s, &mediaDuration64)); // unsigned int(64) duration; track->mediaDuration = mediaDuration64; } else if (version == 0) { CHECK(avifROStreamReadU32(&s, &ignored32)); // unsigned int(32) creation_time; CHECK(avifROStreamReadU32(&s, &ignored32)); // unsigned int(32) modification_time; CHECK(avifROStreamReadU32(&s, &mediaTimescale)); // unsigned int(32) timescale; CHECK(avifROStreamReadU32(&s, &mediaDuration32)); // unsigned int(32) duration; track->mediaDuration = (uint64_t)mediaDuration32; } else { // Unsupported version return AVIF_FALSE; } track->mediaTimescale = mediaTimescale; return AVIF_TRUE; } static avifBool avifParseChunkOffsetBox(avifSampleTable * sampleTable, avifBool largeOffsets, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); uint32_t entryCount; CHECK(avifROStreamReadU32(&s, &entryCount)); // unsigned int(32) entry_count; for (uint32_t i = 0; i < entryCount; ++i) { uint64_t offset; if (largeOffsets) { CHECK(avifROStreamReadU64(&s, &offset)); // unsigned int(32) chunk_offset; } else { uint32_t offset32; CHECK(avifROStreamReadU32(&s, &offset32)); // unsigned int(32) chunk_offset; offset = (uint64_t)offset32; } avifSampleTableChunk * chunk = (avifSampleTableChunk *)avifArrayPushPtr(&sampleTable->chunks); chunk->offset = offset; } return AVIF_TRUE; } static avifBool avifParseSampleToChunkBox(avifSampleTable * sampleTable, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); uint32_t entryCount; CHECK(avifROStreamReadU32(&s, &entryCount)); // unsigned int(32) entry_count; for (uint32_t i = 0; i < entryCount; ++i) { avifSampleTableSampleToChunk * sampleToChunk = (avifSampleTableSampleToChunk *)avifArrayPushPtr(&sampleTable->sampleToChunks); CHECK(avifROStreamReadU32(&s, &sampleToChunk->firstChunk)); // unsigned int(32) first_chunk; CHECK(avifROStreamReadU32(&s, &sampleToChunk->samplesPerChunk)); // unsigned int(32) samples_per_chunk; CHECK(avifROStreamReadU32(&s, &sampleToChunk->sampleDescriptionIndex)); // unsigned int(32) sample_description_index; } return AVIF_TRUE; } static avifBool avifParseSampleSizeBox(avifSampleTable * sampleTable, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); uint32_t allSamplesSize, sampleCount; CHECK(avifROStreamReadU32(&s, &allSamplesSize)); // unsigned int(32) sample_size; CHECK(avifROStreamReadU32(&s, &sampleCount)); // unsigned int(32) sample_count; if (allSamplesSize > 0) { sampleTable->allSamplesSize = allSamplesSize; } else { for (uint32_t i = 0; i < sampleCount; ++i) { avifSampleTableSampleSize * sampleSize = (avifSampleTableSampleSize *)avifArrayPushPtr(&sampleTable->sampleSizes); CHECK(avifROStreamReadU32(&s, &sampleSize->size)); // unsigned int(32) entry_size; } } return AVIF_TRUE; } static avifBool avifParseSyncSampleBox(avifSampleTable * sampleTable, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); uint32_t entryCount; CHECK(avifROStreamReadU32(&s, &entryCount)); // unsigned int(32) entry_count; for (uint32_t i = 0; i < entryCount; ++i) { uint32_t sampleNumber = 0; CHECK(avifROStreamReadU32(&s, &sampleNumber)); // unsigned int(32) sample_number; avifSyncSample * syncSample = (avifSyncSample *)avifArrayPushPtr(&sampleTable->syncSamples); syncSample->sampleNumber = sampleNumber; } return AVIF_TRUE; } static avifBool avifParseTimeToSampleBox(avifSampleTable * sampleTable, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); uint32_t entryCount; CHECK(avifROStreamReadU32(&s, &entryCount)); // unsigned int(32) entry_count; for (uint32_t i = 0; i < entryCount; ++i) { avifSampleTableTimeToSample * timeToSample = (avifSampleTableTimeToSample *)avifArrayPushPtr(&sampleTable->timeToSamples); CHECK(avifROStreamReadU32(&s, &timeToSample->sampleCount)); // unsigned int(32) sample_count; CHECK(avifROStreamReadU32(&s, &timeToSample->sampleDelta)); // unsigned int(32) sample_delta; } return AVIF_TRUE; } static avifBool avifParseSampleDescriptionBox(avifSampleTable * sampleTable, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamReadAndEnforceVersion(&s, 0)); uint32_t entryCount; CHECK(avifROStreamReadU32(&s, &entryCount)); // unsigned int(32) entry_count; for (uint32_t i = 0; i < entryCount; ++i) { avifBoxHeader sampleEntryHeader; CHECK(avifROStreamReadBoxHeader(&s, &sampleEntryHeader)); avifSampleDescription * description = (avifSampleDescription *)avifArrayPushPtr(&sampleTable->sampleDescriptions); avifArrayCreate(&description->properties, sizeof(avifProperty), 16); memcpy(description->format, sampleEntryHeader.type, sizeof(description->format)); size_t remainingBytes = avifROStreamRemainingBytes(&s); if (!memcmp(description->format, "av01", 4) && (remainingBytes > VISUALSAMPLEENTRY_SIZE)) { CHECK(avifParseItemPropertyContainerBox( &description->properties, avifROStreamCurrent(&s) + VISUALSAMPLEENTRY_SIZE, remainingBytes - VISUALSAMPLEENTRY_SIZE)); } CHECK(avifROStreamSkip(&s, sampleEntryHeader.size)); } return AVIF_TRUE; } static avifBool avifParseSampleTableBox(avifTrack * track, const uint8_t * raw, size_t rawLen) { if (track->sampleTable) { // A TrackBox may only have one SampleTable return AVIF_FALSE; } track->sampleTable = avifSampleTableCreate(); BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "stco", 4)) { CHECK(avifParseChunkOffsetBox(track->sampleTable, AVIF_FALSE, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "co64", 4)) { CHECK(avifParseChunkOffsetBox(track->sampleTable, AVIF_TRUE, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "stsc", 4)) { CHECK(avifParseSampleToChunkBox(track->sampleTable, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "stsz", 4)) { CHECK(avifParseSampleSizeBox(track->sampleTable, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "stss", 4)) { CHECK(avifParseSyncSampleBox(track->sampleTable, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "stts", 4)) { CHECK(avifParseTimeToSampleBox(track->sampleTable, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "stsd", 4)) { CHECK(avifParseSampleDescriptionBox(track->sampleTable, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifParseMediaInformationBox(avifTrack * track, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "stbl", 4)) { CHECK(avifParseSampleTableBox(track, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifParseMediaBox(avifTrack * track, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "mdhd", 4)) { CHECK(avifParseMediaHeaderBox(track, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "minf", 4)) { CHECK(avifParseMediaInformationBox(track, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifTrackReferenceBox(avifTrack * track, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "auxl", 4)) { uint32_t toID; CHECK(avifROStreamReadU32(&s, &toID)); // unsigned int(32) track_IDs[] CHECK(avifROStreamSkip(&s, header.size - sizeof(uint32_t))); // just take the first one track->auxForID = toID; } else { CHECK(avifROStreamSkip(&s, header.size)); } } return AVIF_TRUE; } static avifBool avifParseTrackBox(avifDecoderData * data, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); avifTrack * track = avifDecoderDataCreateTrack(data); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "tkhd", 4)) { CHECK(avifParseTrackHeaderBox(track, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "meta", 4)) { CHECK(avifParseMetaBox(track->meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "mdia", 4)) { CHECK(avifParseMediaBox(track, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "tref", 4)) { CHECK(avifTrackReferenceBox(track, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifParseMoovBox(avifDecoderData * data, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "trak", 4)) { CHECK(avifParseTrackBox(data, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } static avifBool avifParseFileTypeBox(avifFileType * ftyp, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); CHECK(avifROStreamRead(&s, ftyp->majorBrand, 4)); CHECK(avifROStreamReadU32(&s, &ftyp->minorVersion)); size_t compatibleBrandsBytes = avifROStreamRemainingBytes(&s); if ((compatibleBrandsBytes % 4) != 0) { return AVIF_FALSE; } ftyp->compatibleBrands = avifROStreamCurrent(&s); CHECK(avifROStreamSkip(&s, compatibleBrandsBytes)); ftyp->compatibleBrandsCount = (int)compatibleBrandsBytes / 4; return AVIF_TRUE; } static avifBool avifParse(avifDecoderData * data, const uint8_t * raw, size_t rawLen) { BEGIN_STREAM(s, raw, rawLen); while (avifROStreamHasBytesLeft(&s, 1)) { avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (!memcmp(header.type, "ftyp", 4)) { CHECK(avifParseFileTypeBox(&data->ftyp, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "meta", 4)) { CHECK(avifParseMetaBox(data->meta, avifROStreamCurrent(&s), header.size)); } else if (!memcmp(header.type, "moov", 4)) { CHECK(avifParseMoovBox(data, avifROStreamCurrent(&s), header.size)); } CHECK(avifROStreamSkip(&s, header.size)); } return AVIF_TRUE; } // --------------------------------------------------------------------------- static avifBool avifFileTypeIsCompatible(avifFileType * ftyp) { avifBool avifCompatible = (memcmp(ftyp->majorBrand, "avif", 4) == 0 || memcmp(ftyp->majorBrand, "avis", 4) == 0); if (!avifCompatible) { for (int compatibleBrandIndex = 0; compatibleBrandIndex < ftyp->compatibleBrandsCount; ++compatibleBrandIndex) { const uint8_t * compatibleBrand = &ftyp->compatibleBrands[4 * compatibleBrandIndex]; if (!memcmp(compatibleBrand, "avif", 4) || !memcmp(compatibleBrand, "avis", 4)) { avifCompatible = AVIF_TRUE; break; } } } return avifCompatible; } avifBool avifPeekCompatibleFileType(const avifROData * input) { BEGIN_STREAM(s, input->data, input->size); avifBoxHeader header; CHECK(avifROStreamReadBoxHeader(&s, &header)); if (memcmp(header.type, "ftyp", 4) != 0) { return AVIF_FALSE; } avifFileType ftyp; memset(&ftyp, 0, sizeof(avifFileType)); avifBool parsed = avifParseFileTypeBox(&ftyp, avifROStreamCurrent(&s), header.size); if (!parsed) { return AVIF_FALSE; } return avifFileTypeIsCompatible(&ftyp); } // --------------------------------------------------------------------------- avifDecoder * avifDecoderCreate(void) { avifDecoder * decoder = (avifDecoder *)avifAlloc(sizeof(avifDecoder)); memset(decoder, 0, sizeof(avifDecoder)); return decoder; } static void avifDecoderCleanup(avifDecoder * decoder) { if (decoder->data) { avifDecoderDataDestroy(decoder->data); decoder->data = NULL; } if (decoder->image) { avifImageDestroy(decoder->image); decoder->image = NULL; } } void avifDecoderDestroy(avifDecoder * decoder) { avifDecoderCleanup(decoder); avifFree(decoder); } avifResult avifDecoderSetSource(avifDecoder * decoder, avifDecoderSource source) { decoder->requestedSource = source; return avifDecoderReset(decoder); } avifResult avifDecoderParse(avifDecoder * decoder, const avifROData * rawInput) { // Cleanup anything lingering in the decoder avifDecoderCleanup(decoder); // ----------------------------------------------------------------------- // Parse BMFF boxes decoder->data = avifDecoderDataCreate(); // Shallow copy, on purpose memcpy(&decoder->data->rawInput, rawInput, sizeof(avifROData)); if (!avifParse(decoder->data, decoder->data->rawInput.data, decoder->data->rawInput.size)) { return AVIF_RESULT_BMFF_PARSE_FAILED; } avifBool avifCompatible = avifFileTypeIsCompatible(&decoder->data->ftyp); if (!avifCompatible) { return AVIF_RESULT_INVALID_FTYP; } // Sanity check items for (uint32_t itemIndex = 0; itemIndex < decoder->data->meta->items.count; ++itemIndex) { avifDecoderItem * item = &decoder->data->meta->items.item[itemIndex]; if (item->hasUnsupportedEssentialProperty) { // An essential property isn't supported by libavif; ignore the item. continue; } const uint8_t * p = avifDecoderDataCalcItemPtr(decoder->data, item); if (p == NULL) { return AVIF_RESULT_BMFF_PARSE_FAILED; } } // Sanity check tracks for (uint32_t trackIndex = 0; trackIndex < decoder->data->tracks.count; ++trackIndex) { avifTrack * track = &decoder->data->tracks.track[trackIndex]; if (!track->sampleTable) { continue; } for (uint32_t chunkIndex = 0; chunkIndex < track->sampleTable->chunks.count; ++chunkIndex) { avifSampleTableChunk * chunk = &track->sampleTable->chunks.chunk[chunkIndex]; if (chunk->offset > decoder->data->rawInput.size) { return AVIF_RESULT_BMFF_PARSE_FAILED; } } } return avifDecoderReset(decoder); } static avifCodec * avifCodecCreateInternal(avifCodecChoice choice, avifCodecDecodeInput * decodeInput) { avifCodec * codec = avifCodecCreate(choice, AVIF_CODEC_FLAG_CAN_DECODE); if (codec) { codec->decodeInput = decodeInput; } return codec; } static avifResult avifDecoderFlush(avifDecoder * decoder) { avifDecoderDataResetCodec(decoder->data); for (unsigned int i = 0; i < decoder->data->tiles.count; ++i) { avifTile * tile = &decoder->data->tiles.tile[i]; tile->codec = avifCodecCreateInternal(decoder->codecChoice, tile->input); if (!tile->codec) { return AVIF_RESULT_NO_CODEC_AVAILABLE; } if (!tile->codec->open(tile->codec, decoder->imageIndex + 1)) { return AVIF_RESULT_DECODE_COLOR_FAILED; } } return AVIF_RESULT_OK; } avifResult avifDecoderReset(avifDecoder * decoder) { avifDecoderData * data = decoder->data; if (!data) { // Nothing to reset. return AVIF_RESULT_OK; } memset(&data->colorGrid, 0, sizeof(data->colorGrid)); memset(&data->alphaGrid, 0, sizeof(data->alphaGrid)); avifDecoderDataClearTiles(data); // Prepare / cleanup decoded image state if (decoder->image) { avifImageDestroy(decoder->image); } decoder->image = avifImageCreateEmpty(); data->cicpSet = AVIF_FALSE; memset(&decoder->ioStats, 0, sizeof(decoder->ioStats)); // ----------------------------------------------------------------------- // Build decode input data->sourceSampleTable = NULL; // Reset if (decoder->requestedSource == AVIF_DECODER_SOURCE_AUTO) { if (data->tracks.count > 0) { data->source = AVIF_DECODER_SOURCE_TRACKS; } else { data->source = AVIF_DECODER_SOURCE_PRIMARY_ITEM; } } else { data->source = decoder->requestedSource; } const avifPropertyArray * colorProperties = NULL; if (data->source == AVIF_DECODER_SOURCE_TRACKS) { avifTrack * colorTrack = NULL; avifTrack * alphaTrack = NULL; // Find primary track - this probably needs some better detection uint32_t colorTrackIndex = 0; for (; colorTrackIndex < decoder->data->tracks.count; ++colorTrackIndex) { avifTrack * track = &decoder->data->tracks.track[colorTrackIndex]; if (!track->sampleTable) { continue; } if (!track->id) { // trak box might be missing a tkhd box inside, skip it continue; } if (!track->sampleTable->chunks.count) { continue; } if (!avifSampleTableHasFormat(track->sampleTable, "av01")) { continue; } if (track->auxForID != 0) { continue; } // Found one! break; } if (colorTrackIndex == decoder->data->tracks.count) { return AVIF_RESULT_NO_CONTENT; } colorTrack = &decoder->data->tracks.track[colorTrackIndex]; colorProperties = avifSampleTableGetProperties(colorTrack->sampleTable); if (!colorProperties) { return AVIF_RESULT_BMFF_PARSE_FAILED; } // Find Exif and/or XMP metadata, if any if (colorTrack->meta) { // See the comment above avifDecoderDataFindMetadata() for the explanation of using 0 here if (!avifDecoderDataFindMetadata(data, colorTrack->meta, decoder->image, 0)) { return AVIF_RESULT_BMFF_PARSE_FAILED; } } uint32_t alphaTrackIndex = 0; for (; alphaTrackIndex < decoder->data->tracks.count; ++alphaTrackIndex) { avifTrack * track = &decoder->data->tracks.track[alphaTrackIndex]; if (!track->sampleTable) { continue; } if (!track->id) { continue; } if (!track->sampleTable->chunks.count) { continue; } if (!avifSampleTableHasFormat(track->sampleTable, "av01")) { continue; } if (track->auxForID == colorTrack->id) { // Found it! break; } } if (alphaTrackIndex != decoder->data->tracks.count) { alphaTrack = &decoder->data->tracks.track[alphaTrackIndex]; } avifTile * colorTile = avifDecoderDataCreateTile(decoder->data); if (!avifCodecDecodeInputGetSamples(colorTile->input, colorTrack->sampleTable, &decoder->data->rawInput)) { return AVIF_RESULT_BMFF_PARSE_FAILED; } decoder->data->colorTileCount = 1; avifTile * alphaTile = NULL; if (alphaTrack) { alphaTile = avifDecoderDataCreateTile(decoder->data); if (!avifCodecDecodeInputGetSamples(alphaTile->input, alphaTrack->sampleTable, &decoder->data->rawInput)) { return AVIF_RESULT_BMFF_PARSE_FAILED; } alphaTile->input->alpha = AVIF_TRUE; decoder->data->alphaTileCount = 1; } // Stash off sample table for future timing information data->sourceSampleTable = colorTrack->sampleTable; // Image sequence timing decoder->imageIndex = -1; decoder->imageCount = colorTile->input->samples.count; decoder->timescale = colorTrack->mediaTimescale; decoder->durationInTimescales = colorTrack->mediaDuration; if (colorTrack->mediaTimescale) { decoder->duration = (double)decoder->durationInTimescales / (double)colorTrack->mediaTimescale; } else { decoder->duration = 0; } memset(&decoder->imageTiming, 0, sizeof(decoder->imageTiming)); // to be set in avifDecoderNextImage() decoder->image->width = colorTrack->width; decoder->image->height = colorTrack->height; decoder->alphaPresent = (alphaTrack != NULL); } else { // Create from items avifROData colorOBU = AVIF_DATA_EMPTY; avifROData alphaOBU = AVIF_DATA_EMPTY; avifDecoderItem * colorOBUItem = NULL; avifDecoderItem * alphaOBUItem = NULL; // Find the colorOBU (primary) item for (uint32_t itemIndex = 0; itemIndex < data->meta->items.count; ++itemIndex) { avifDecoderItem * item = &data->meta->items.item[itemIndex]; if (!item->size) { continue; } if (item->hasUnsupportedEssentialProperty) { // An essential property isn't supported by libavif; ignore the item. continue; } avifBool isGrid = (memcmp(item->type, "grid", 4) == 0); if (memcmp(item->type, "av01", 4) && !isGrid) { // probably exif or some other data continue; } if (item->thumbnailForID != 0) { // It's a thumbnail, skip it continue; } if ((data->meta->primaryItemID > 0) && (item->id != data->meta->primaryItemID)) { // a primary item ID was specified, require it continue; } if (isGrid) { const uint8_t * itemPtr = avifDecoderDataCalcItemPtr(data, item); if (itemPtr == NULL) { return AVIF_RESULT_BMFF_PARSE_FAILED; } if (!avifParseImageGridBox(&data->colorGrid, itemPtr, item->size)) { return AVIF_RESULT_INVALID_IMAGE_GRID; } } else { colorOBU.data = avifDecoderDataCalcItemPtr(data, item); colorOBU.size = item->size; } colorOBUItem = item; break; } if (!colorOBUItem) { return AVIF_RESULT_NO_AV1_ITEMS_FOUND; } colorProperties = &colorOBUItem->properties; // Find the alphaOBU item, if any for (uint32_t itemIndex = 0; itemIndex < data->meta->items.count; ++itemIndex) { avifDecoderItem * item = &data->meta->items.item[itemIndex]; if (!item->size) { continue; } if (item->hasUnsupportedEssentialProperty) { // An essential property isn't supported by libavif; ignore the item. continue; } avifBool isGrid = (memcmp(item->type, "grid", 4) == 0); if (memcmp(item->type, "av01", 4) && !isGrid) { // probably exif or some other data continue; } if (item->thumbnailForID != 0) { // It's a thumbnail, skip it continue; } const avifProperty * auxCProp = avifPropertyArrayFind(&item->properties, "auxC"); if (auxCProp && isAlphaURN(auxCProp->u.auxC.auxType) && (item->auxForID == colorOBUItem->id)) { if (isGrid) { const uint8_t * itemPtr = avifDecoderDataCalcItemPtr(data, item); if (itemPtr == NULL) { return AVIF_RESULT_BMFF_PARSE_FAILED; } if (!avifParseImageGridBox(&data->alphaGrid, itemPtr, item->size)) { return AVIF_RESULT_INVALID_IMAGE_GRID; } } else { alphaOBU.data = avifDecoderDataCalcItemPtr(data, item); alphaOBU.size = item->size; } alphaOBUItem = item; break; } } // Find Exif and/or XMP metadata, if any if (!avifDecoderDataFindMetadata(data, data->meta, decoder->image, colorOBUItem->id)) { return AVIF_RESULT_BMFF_PARSE_FAILED; } if ((data->colorGrid.rows > 0) && (data->colorGrid.columns > 0)) { if (!avifDecoderDataGenerateImageGridTiles(data, &data->colorGrid, colorOBUItem, AVIF_FALSE)) { return AVIF_RESULT_INVALID_IMAGE_GRID; } data->colorTileCount = data->tiles.count; } else { if (colorOBU.size == 0) { return AVIF_RESULT_NO_AV1_ITEMS_FOUND; } avifTile * colorTile = avifDecoderDataCreateTile(decoder->data); avifDecodeSample * colorSample = (avifDecodeSample *)avifArrayPushPtr(&colorTile->input->samples); memcpy(&colorSample->data, &colorOBU, sizeof(avifROData)); colorSample->sync = AVIF_TRUE; decoder->data->colorTileCount = 1; } if ((data->alphaGrid.rows > 0) && (data->alphaGrid.columns > 0) && alphaOBUItem) { if (!avifDecoderDataGenerateImageGridTiles(data, &data->alphaGrid, alphaOBUItem, AVIF_FALSE)) { return AVIF_RESULT_INVALID_IMAGE_GRID; } data->alphaTileCount = data->tiles.count - data->colorTileCount; } else { avifTile * alphaTile = NULL; if (alphaOBU.size > 0) { alphaTile = avifDecoderDataCreateTile(decoder->data); avifDecodeSample * alphaSample = (avifDecodeSample *)avifArrayPushPtr(&alphaTile->input->samples); memcpy(&alphaSample->data, &alphaOBU, sizeof(avifROData)); alphaSample->sync = AVIF_TRUE; alphaTile->input->alpha = AVIF_TRUE; decoder->data->alphaTileCount = 1; } } // Set all counts and timing to safe-but-uninteresting values decoder->imageIndex = -1; decoder->imageCount = 1; decoder->imageTiming.timescale = 1; decoder->imageTiming.pts = 0; decoder->imageTiming.ptsInTimescales = 0; decoder->imageTiming.duration = 1; decoder->imageTiming.durationInTimescales = 1; decoder->timescale = 1; decoder->duration = 1; decoder->durationInTimescales = 1; decoder->ioStats.colorOBUSize = colorOBU.size; decoder->ioStats.alphaOBUSize = alphaOBU.size; const avifProperty * ispeProp = avifPropertyArrayFind(colorProperties, "ispe"); if (ispeProp) { decoder->image->width = ispeProp->u.ispe.width; decoder->image->height = ispeProp->u.ispe.height; } else { decoder->image->width = 0; decoder->image->height = 0; } decoder->alphaPresent = (alphaOBUItem != NULL); } // Sanity check tiles for (uint32_t tileIndex = 0; tileIndex < data->tiles.count; ++tileIndex) { avifTile * tile = &data->tiles.tile[tileIndex]; for (uint32_t sampleIndex = 0; sampleIndex < tile->input->samples.count; ++sampleIndex) { avifDecodeSample * sample = &tile->input->samples.sample[sampleIndex]; if (!sample->data.data || !sample->data.size) { // Every sample must have some data return AVIF_RESULT_BMFF_PARSE_FAILED; } } } const avifProperty * colrProp = avifPropertyArrayFind(colorProperties, "colr"); if (colrProp) { if (colrProp->u.colr.hasICC) { avifImageSetProfileICC(decoder->image, colrProp->u.colr.icc, colrProp->u.colr.iccSize); } else if (colrProp->u.colr.hasNCLX) { data->cicpSet = AVIF_TRUE; decoder->image->colorPrimaries = colrProp->u.colr.colorPrimaries; decoder->image->transferCharacteristics = colrProp->u.colr.transferCharacteristics; decoder->image->matrixCoefficients = colrProp->u.colr.matrixCoefficients; decoder->image->yuvRange = colrProp->u.colr.range; } } // Transformations const avifProperty * paspProp = avifPropertyArrayFind(colorProperties, "pasp"); if (paspProp) { decoder->image->transformFlags |= AVIF_TRANSFORM_PASP; memcpy(&decoder->image->pasp, &paspProp->u.pasp, sizeof(avifPixelAspectRatioBox)); } const avifProperty * clapProp = avifPropertyArrayFind(colorProperties, "clap"); if (clapProp) { decoder->image->transformFlags |= AVIF_TRANSFORM_CLAP; memcpy(&decoder->image->clap, &clapProp->u.clap, sizeof(avifCleanApertureBox)); } const avifProperty * irotProp = avifPropertyArrayFind(colorProperties, "irot"); if (irotProp) { decoder->image->transformFlags |= AVIF_TRANSFORM_IROT; memcpy(&decoder->image->irot, &irotProp->u.irot, sizeof(avifImageRotation)); } const avifProperty * imirProp = avifPropertyArrayFind(colorProperties, "imir"); if (imirProp) { decoder->image->transformFlags |= AVIF_TRANSFORM_IMIR; memcpy(&decoder->image->imir, &imirProp->u.imir, sizeof(avifImageMirror)); } if (!decoder->data->cicpSet && (data->tiles.count > 0)) { avifTile * firstTile = &data->tiles.tile[0]; if (firstTile->input->samples.count > 0) { avifDecodeSample * sample = &firstTile->input->samples.sample[0]; avifSequenceHeader sequenceHeader; if (avifSequenceHeaderParse(&sequenceHeader, &sample->data)) { decoder->data->cicpSet = AVIF_TRUE; decoder->image->colorPrimaries = sequenceHeader.colorPrimaries; decoder->image->transferCharacteristics = sequenceHeader.transferCharacteristics; decoder->image->matrixCoefficients = sequenceHeader.matrixCoefficients; decoder->image->yuvRange = sequenceHeader.range; } } } const avifProperty * av1CProp = avifPropertyArrayFind(colorProperties, "av1C"); if (av1CProp) { decoder->image->depth = avifCodecConfigurationBoxGetDepth(&av1CProp->u.av1C); if (av1CProp->u.av1C.monochrome) { decoder->image->yuvFormat = AVIF_PIXEL_FORMAT_YUV400; } else { if (av1CProp->u.av1C.chromaSubsamplingX && av1CProp->u.av1C.chromaSubsamplingY) { decoder->image->yuvFormat = AVIF_PIXEL_FORMAT_YUV420; } else if (av1CProp->u.av1C.chromaSubsamplingX) { decoder->image->yuvFormat = AVIF_PIXEL_FORMAT_YUV422; } else { decoder->image->yuvFormat = AVIF_PIXEL_FORMAT_YUV444; } } decoder->image->yuvChromaSamplePosition = (avifChromaSamplePosition)av1CProp->u.av1C.chromaSamplePosition; } else { // An av1C box is mandatory in all valid AVIF configurations. Bail out. return AVIF_RESULT_BMFF_PARSE_FAILED; } return avifDecoderFlush(decoder); } avifResult avifDecoderNextImage(avifDecoder * decoder) { for (unsigned int tileIndex = 0; tileIndex < decoder->data->tiles.count; ++tileIndex) { avifTile * tile = &decoder->data->tiles.tile[tileIndex]; if (!tile->codec->getNextImage(tile->codec, tile->image)) { if (tile->input->alpha) { return AVIF_RESULT_DECODE_ALPHA_FAILED; } else { if (tile->image->width) { // We've sent at least one image, but we've run out now. return AVIF_RESULT_NO_IMAGES_REMAINING; } return AVIF_RESULT_DECODE_COLOR_FAILED; } } } if (decoder->data->tiles.count != (decoder->data->colorTileCount + decoder->data->alphaTileCount)) { // TODO: assert here? This should be impossible. return AVIF_RESULT_UNKNOWN_ERROR; } if ((decoder->data->colorGrid.rows > 0) || (decoder->data->colorGrid.columns > 0)) { if (!avifDecoderDataFillImageGrid( decoder->data, &decoder->data->colorGrid, decoder->image, 0, decoder->data->colorTileCount, AVIF_FALSE)) { return AVIF_RESULT_INVALID_IMAGE_GRID; } } else { // Normal (most common) non-grid path. Just steal the planes from the only "tile". if (decoder->data->colorTileCount != 1) { return AVIF_RESULT_DECODE_COLOR_FAILED; } avifImage * srcColor = decoder->data->tiles.tile[0].image; if ((decoder->image->width != srcColor->width) || (decoder->image->height != srcColor->height) || (decoder->image->depth != srcColor->depth)) { avifImageFreePlanes(decoder->image, AVIF_PLANES_ALL); decoder->image->width = srcColor->width; decoder->image->height = srcColor->height; decoder->image->depth = srcColor->depth; } #if 0 // This code is currently unnecessary as the CICP is always set by the end of avifDecoderParse(). if (!decoder->data->cicpSet) { decoder->data->cicpSet = AVIF_TRUE; decoder->image->colorPrimaries = srcColor->colorPrimaries; decoder->image->transferCharacteristics = srcColor->transferCharacteristics; decoder->image->matrixCoefficients = srcColor->matrixCoefficients; } #endif avifImageStealPlanes(decoder->image, srcColor, AVIF_PLANES_YUV); } if ((decoder->data->alphaGrid.rows > 0) || (decoder->data->alphaGrid.columns > 0)) { if (!avifDecoderDataFillImageGrid( decoder->data, &decoder->data->alphaGrid, decoder->image, decoder->data->colorTileCount, decoder->data->alphaTileCount, AVIF_TRUE)) { return AVIF_RESULT_INVALID_IMAGE_GRID; } } else { // Normal (most common) non-grid path. Just steal the planes from the only "tile". if (decoder->data->alphaTileCount == 0) { avifImageFreePlanes(decoder->image, AVIF_PLANES_A); // no alpha } else { if (decoder->data->alphaTileCount != 1) { return AVIF_RESULT_DECODE_ALPHA_FAILED; } avifImage * srcAlpha = decoder->data->tiles.tile[decoder->data->colorTileCount].image; if ((decoder->image->width != srcAlpha->width) || (decoder->image->height != srcAlpha->height) || (decoder->image->depth != srcAlpha->depth)) { return AVIF_RESULT_DECODE_ALPHA_FAILED; } avifImageStealPlanes(decoder->image, srcAlpha, AVIF_PLANES_A); } } ++decoder->imageIndex; if (decoder->data->sourceSampleTable) { // Decoding from a track! Provide timing information. avifResult timingResult = avifDecoderNthImageTiming(decoder, decoder->imageIndex, &decoder->imageTiming); if (timingResult != AVIF_RESULT_OK) { return timingResult; } } return AVIF_RESULT_OK; } avifResult avifDecoderNthImageTiming(const avifDecoder * decoder, uint32_t frameIndex, avifImageTiming * outTiming) { if (!decoder->data) { // Nothing has been parsed yet return AVIF_RESULT_NO_CONTENT; } if ((int)frameIndex >= decoder->imageCount) { // Impossible index return AVIF_RESULT_NO_IMAGES_REMAINING; } if (!decoder->data->sourceSampleTable) { // There isn't any real timing associated with this decode, so // just hand back the defaults chosen in avifDecoderReset(). memcpy(outTiming, &decoder->imageTiming, sizeof(avifImageTiming)); return AVIF_RESULT_OK; } outTiming->timescale = decoder->timescale; outTiming->ptsInTimescales = 0; for (int imageIndex = 0; imageIndex < (int)frameIndex; ++imageIndex) { outTiming->ptsInTimescales += avifSampleTableGetImageDelta(decoder->data->sourceSampleTable, imageIndex); } outTiming->durationInTimescales = avifSampleTableGetImageDelta(decoder->data->sourceSampleTable, frameIndex); if (outTiming->timescale > 0) { outTiming->pts = (double)outTiming->ptsInTimescales / (double)outTiming->timescale; outTiming->duration = (double)outTiming->durationInTimescales / (double)outTiming->timescale; } else { outTiming->pts = 0.0; outTiming->duration = 0.0; } return AVIF_RESULT_OK; } avifResult avifDecoderNthImage(avifDecoder * decoder, uint32_t frameIndex) { int requestedIndex = (int)frameIndex; if (requestedIndex == decoder->imageIndex) { // We're here already, nothing to do return AVIF_RESULT_OK; } if (requestedIndex == (decoder->imageIndex + 1)) { // it's just the next image, nothing special here return avifDecoderNextImage(decoder); } if (requestedIndex >= decoder->imageCount) { // Impossible index return AVIF_RESULT_NO_IMAGES_REMAINING; } // If we get here, a decoder flush is necessary decoder->imageIndex = ((int)avifDecoderNearestKeyframe(decoder, frameIndex)) - 1; // prepare to read nearest keyframe avifDecoderFlush(decoder); for (;;) { avifResult result = avifDecoderNextImage(decoder); if (result != AVIF_RESULT_OK) { return result; } if (requestedIndex == decoder->imageIndex) { break; } } return AVIF_RESULT_OK; } avifBool avifDecoderIsKeyframe(const avifDecoder * decoder, uint32_t frameIndex) { if ((decoder->data->tiles.count > 0) && decoder->data->tiles.tile[0].input) { if (frameIndex < decoder->data->tiles.tile[0].input->samples.count) { return decoder->data->tiles.tile[0].input->samples.sample[frameIndex].sync; } } return AVIF_FALSE; } uint32_t avifDecoderNearestKeyframe(const avifDecoder * decoder, uint32_t frameIndex) { for (; frameIndex != 0; --frameIndex) { if (avifDecoderIsKeyframe(decoder, frameIndex)) { break; } } return frameIndex; } avifResult avifDecoderRead(avifDecoder * decoder, avifImage * image, const avifROData * input) { avifResult result = avifDecoderParse(decoder, input); if (result != AVIF_RESULT_OK) { return result; } result = avifDecoderNextImage(decoder); if (result != AVIF_RESULT_OK) { return result; } avifImageCopy(image, decoder->image, AVIF_PLANES_ALL); return AVIF_RESULT_OK; }
null
227
CWE-787
CVE-2020-36429
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Copyright 2014-2018 (c) Fraunhofer IOSB (Author: Julius Pfrommer) * Copyright 2018 (c) Fraunhofer IOSB (Author: Lukas Meling) */ #include "ua_types_encoding_json.h" #include <open62541/types_generated.h> #include <open62541/types_generated_handling.h> #include "ua_types_encoding_binary.h" #include <float.h> #include <math.h> #ifdef UA_ENABLE_CUSTOM_LIBC #include "../deps/musl/floatscan.h" #include "../deps/musl/vfprintf.h" #endif #include "../deps/itoa.h" #include "../deps/atoi.h" #include "../deps/string_escape.h" #include "../deps/base64.h" #include "../deps/libc_time.h" #if defined(_MSC_VER) # define strtoll _strtoi64 # define strtoull _strtoui64 #endif /* vs2008 does not have INFINITY and NAN defined */ #ifndef INFINITY # define INFINITY ((UA_Double)(DBL_MAX+DBL_MAX)) #endif #ifndef NAN # define NAN ((UA_Double)(INFINITY-INFINITY)) #endif #if defined(_MSC_VER) # pragma warning(disable: 4756) # pragma warning(disable: 4056) #endif #define UA_NODEIDTYPE_NUMERIC_TWOBYTE 0 #define UA_NODEIDTYPE_NUMERIC_FOURBYTE 1 #define UA_NODEIDTYPE_NUMERIC_COMPLETE 2 #define UA_EXPANDEDNODEID_SERVERINDEX_FLAG 0x40 #define UA_EXPANDEDNODEID_NAMESPACEURI_FLAG 0x80 #define UA_JSON_DATETIME_LENGTH 30 /* Max length of numbers for the allocation of temp buffers. Don't forget that * printf adds an additional \0 at the end! * * Sources: * https://www.exploringbinary.com/maximum-number-of-decimal-digits-in-binary-floating-point-numbers/ * * UInt16: 3 + 1 * SByte: 3 + 1 * UInt32: * Int32: * UInt64: * Int64: * Float: 149 + 1 * Double: 767 + 1 */ /************/ /* Encoding */ /************/ #define ENCODE_JSON(TYPE) static status \ TYPE##_encodeJson(const UA_##TYPE *src, const UA_DataType *type, CtxJson *ctx) #define ENCODE_DIRECT_JSON(SRC, TYPE) \ TYPE##_encodeJson((const UA_##TYPE*)SRC, NULL, ctx) extern const encodeJsonSignature encodeJsonJumpTable[UA_DATATYPEKINDS]; extern const decodeJsonSignature decodeJsonJumpTable[UA_DATATYPEKINDS]; /* Forward declarations */ UA_String UA_DateTime_toJSON(UA_DateTime t); ENCODE_JSON(ByteString); static status UA_FUNC_ATTR_WARN_UNUSED_RESULT writeChar(CtxJson *ctx, char c) { if(ctx->pos >= ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) *ctx->pos = (UA_Byte)c; ctx->pos++; return UA_STATUSCODE_GOOD; } #define WRITE_JSON_ELEMENT(ELEM) \ UA_FUNC_ATTR_WARN_UNUSED_RESULT status \ writeJson##ELEM(CtxJson *ctx) static WRITE_JSON_ELEMENT(Quote) { return writeChar(ctx, '\"'); } WRITE_JSON_ELEMENT(ObjStart) { /* increase depth, save: before first key-value no comma needed. */ ctx->depth++; ctx->commaNeeded[ctx->depth] = false; return writeChar(ctx, '{'); } WRITE_JSON_ELEMENT(ObjEnd) { ctx->depth--; //decrease depth ctx->commaNeeded[ctx->depth] = true; return writeChar(ctx, '}'); } WRITE_JSON_ELEMENT(ArrStart) { /* increase depth, save: before first array entry no comma needed. */ ctx->commaNeeded[++ctx->depth] = false; return writeChar(ctx, '['); } WRITE_JSON_ELEMENT(ArrEnd) { ctx->depth--; //decrease depth ctx->commaNeeded[ctx->depth] = true; return writeChar(ctx, ']'); } WRITE_JSON_ELEMENT(CommaIfNeeded) { if(ctx->commaNeeded[ctx->depth]) return writeChar(ctx, ','); return UA_STATUSCODE_GOOD; } status writeJsonArrElm(CtxJson *ctx, const void *value, const UA_DataType *type) { status ret = writeJsonCommaIfNeeded(ctx); ctx->commaNeeded[ctx->depth] = true; ret |= encodeJsonInternal(value, type, ctx); return ret; } status writeJsonObjElm(CtxJson *ctx, const char *key, const void *value, const UA_DataType *type){ return writeJsonKey(ctx, key) | encodeJsonInternal(value, type, ctx); } status writeJsonNull(CtxJson *ctx) { if(ctx->pos + 4 > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(ctx->calcOnly) { ctx->pos += 4; } else { *(ctx->pos++) = 'n'; *(ctx->pos++) = 'u'; *(ctx->pos++) = 'l'; *(ctx->pos++) = 'l'; } return UA_STATUSCODE_GOOD; } /* Keys for JSON */ /* LocalizedText */ static const char* UA_JSONKEY_LOCALE = "Locale"; static const char* UA_JSONKEY_TEXT = "Text"; /* QualifiedName */ static const char* UA_JSONKEY_NAME = "Name"; static const char* UA_JSONKEY_URI = "Uri"; /* NodeId */ static const char* UA_JSONKEY_ID = "Id"; static const char* UA_JSONKEY_IDTYPE = "IdType"; static const char* UA_JSONKEY_NAMESPACE = "Namespace"; /* ExpandedNodeId */ static const char* UA_JSONKEY_SERVERURI = "ServerUri"; /* Variant */ static const char* UA_JSONKEY_TYPE = "Type"; static const char* UA_JSONKEY_BODY = "Body"; static const char* UA_JSONKEY_DIMENSION = "Dimension"; /* DataValue */ static const char* UA_JSONKEY_VALUE = "Value"; static const char* UA_JSONKEY_STATUS = "Status"; static const char* UA_JSONKEY_SOURCETIMESTAMP = "SourceTimestamp"; static const char* UA_JSONKEY_SOURCEPICOSECONDS = "SourcePicoseconds"; static const char* UA_JSONKEY_SERVERTIMESTAMP = "ServerTimestamp"; static const char* UA_JSONKEY_SERVERPICOSECONDS = "ServerPicoseconds"; /* ExtensionObject */ static const char* UA_JSONKEY_ENCODING = "Encoding"; static const char* UA_JSONKEY_TYPEID = "TypeId"; /* StatusCode */ static const char* UA_JSONKEY_CODE = "Code"; static const char* UA_JSONKEY_SYMBOL = "Symbol"; /* DiagnosticInfo */ static const char* UA_JSONKEY_SYMBOLICID = "SymbolicId"; static const char* UA_JSONKEY_NAMESPACEURI = "NamespaceUri"; static const char* UA_JSONKEY_LOCALIZEDTEXT = "LocalizedText"; static const char* UA_JSONKEY_ADDITIONALINFO = "AdditionalInfo"; static const char* UA_JSONKEY_INNERSTATUSCODE = "InnerStatusCode"; static const char* UA_JSONKEY_INNERDIAGNOSTICINFO = "InnerDiagnosticInfo"; /* Writes null terminated string to output buffer (current ctx->pos). Writes * comma in front of key if needed. Encapsulates key in quotes. */ status UA_FUNC_ATTR_WARN_UNUSED_RESULT writeJsonKey(CtxJson *ctx, const char* key) { size_t size = strlen(key); if(ctx->pos + size + 4 > ctx->end) /* +4 because of " " : and , */ return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; status ret = writeJsonCommaIfNeeded(ctx); ctx->commaNeeded[ctx->depth] = true; if(ctx->calcOnly) { ctx->commaNeeded[ctx->depth] = true; ctx->pos += 3; ctx->pos += size; return ret; } ret |= writeChar(ctx, '\"'); for(size_t i = 0; i < size; i++) { *(ctx->pos++) = (u8)key[i]; } ret |= writeChar(ctx, '\"'); ret |= writeChar(ctx, ':'); return ret; } /* Boolean */ ENCODE_JSON(Boolean) { size_t sizeOfJSONBool; if(*src == true) { sizeOfJSONBool = 4; /*"true"*/ } else { sizeOfJSONBool = 5; /*"false"*/ } if(ctx->calcOnly) { ctx->pos += sizeOfJSONBool; return UA_STATUSCODE_GOOD; } if(ctx->pos + sizeOfJSONBool > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(*src) { *(ctx->pos++) = 't'; *(ctx->pos++) = 'r'; *(ctx->pos++) = 'u'; *(ctx->pos++) = 'e'; } else { *(ctx->pos++) = 'f'; *(ctx->pos++) = 'a'; *(ctx->pos++) = 'l'; *(ctx->pos++) = 's'; *(ctx->pos++) = 'e'; } return UA_STATUSCODE_GOOD; } /*****************/ /* Integer Types */ /*****************/ /* Byte */ ENCODE_JSON(Byte) { char buf[4]; UA_UInt16 digits = itoaUnsigned(*src, buf, 10); /* Ensure destination can hold the data- */ if(ctx->pos + digits > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; /* Copy digits to the output string/buffer. */ if(!ctx->calcOnly) memcpy(ctx->pos, buf, digits); ctx->pos += digits; return UA_STATUSCODE_GOOD; } /* signed Byte */ ENCODE_JSON(SByte) { char buf[5]; UA_UInt16 digits = itoaSigned(*src, buf); if(ctx->pos + digits > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, digits); ctx->pos += digits; return UA_STATUSCODE_GOOD; } /* UInt16 */ ENCODE_JSON(UInt16) { char buf[6]; UA_UInt16 digits = itoaUnsigned(*src, buf, 10); if(ctx->pos + digits > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, digits); ctx->pos += digits; return UA_STATUSCODE_GOOD; } /* Int16 */ ENCODE_JSON(Int16) { char buf[7]; UA_UInt16 digits = itoaSigned(*src, buf); if(ctx->pos + digits > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, digits); ctx->pos += digits; return UA_STATUSCODE_GOOD; } /* UInt32 */ ENCODE_JSON(UInt32) { char buf[11]; UA_UInt16 digits = itoaUnsigned(*src, buf, 10); if(ctx->pos + digits > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, digits); ctx->pos += digits; return UA_STATUSCODE_GOOD; } /* Int32 */ ENCODE_JSON(Int32) { char buf[12]; UA_UInt16 digits = itoaSigned(*src, buf); if(ctx->pos + digits > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, digits); ctx->pos += digits; return UA_STATUSCODE_GOOD; } /* UInt64 */ ENCODE_JSON(UInt64) { char buf[23]; buf[0] = '\"'; UA_UInt16 digits = itoaUnsigned(*src, buf + 1, 10); buf[digits + 1] = '\"'; UA_UInt16 length = (UA_UInt16)(digits + 2); if(ctx->pos + length > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, length); ctx->pos += length; return UA_STATUSCODE_GOOD; } /* Int64 */ ENCODE_JSON(Int64) { char buf[23]; buf[0] = '\"'; UA_UInt16 digits = itoaSigned(*src, buf + 1); buf[digits + 1] = '\"'; UA_UInt16 length = (UA_UInt16)(digits + 2); if(ctx->pos + length > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, length); ctx->pos += length; return UA_STATUSCODE_GOOD; } /************************/ /* Floating Point Types */ /************************/ /* Convert special numbers to string * - fmt_fp gives NAN, nan,-NAN, -nan, inf, INF, -inf, -INF * - Special floating-point numbers such as positive infinity (INF), negative * infinity (-INF) and not-a-number (NaN) shall be represented by the values * “Infinity”, “-Infinity” and “NaN” encoded as a JSON string. */ static status checkAndEncodeSpecialFloatingPoint(char *buffer, size_t *len) { /*nan and NaN*/ if(*len == 3 && (buffer[0] == 'n' || buffer[0] == 'N') && (buffer[1] == 'a' || buffer[1] == 'A') && (buffer[2] == 'n' || buffer[2] == 'N')) { *len = 5; memcpy(buffer, "\"NaN\"", *len); return UA_STATUSCODE_GOOD; } /*-nan and -NaN*/ if(*len == 4 && buffer[0] == '-' && (buffer[1] == 'n' || buffer[1] == 'N') && (buffer[2] == 'a' || buffer[2] == 'A') && (buffer[3] == 'n' || buffer[3] == 'N')) { *len = 6; memcpy(buffer, "\"-NaN\"", *len); return UA_STATUSCODE_GOOD; } /*inf*/ if(*len == 3 && (buffer[0] == 'i' || buffer[0] == 'I') && (buffer[1] == 'n' || buffer[1] == 'N') && (buffer[2] == 'f' || buffer[2] == 'F')) { *len = 10; memcpy(buffer, "\"Infinity\"", *len); return UA_STATUSCODE_GOOD; } /*-inf*/ if(*len == 4 && buffer[0] == '-' && (buffer[1] == 'i' || buffer[1] == 'I') && (buffer[2] == 'n' || buffer[2] == 'N') && (buffer[3] == 'f' || buffer[3] == 'F')) { *len = 11; memcpy(buffer, "\"-Infinity\"", *len); return UA_STATUSCODE_GOOD; } return UA_STATUSCODE_GOOD; } ENCODE_JSON(Float) { char buffer[200]; if(*src == *src) { #ifdef UA_ENABLE_CUSTOM_LIBC fmt_fp(buffer, *src, 0, -1, 0, 'g'); #else UA_snprintf(buffer, 200, "%.149g", (UA_Double)*src); #endif } else { strcpy(buffer, "NaN"); } size_t len = strlen(buffer); if(len == 0) return UA_STATUSCODE_BADENCODINGERROR; checkAndEncodeSpecialFloatingPoint(buffer, &len); if(ctx->pos + len > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buffer, len); ctx->pos += len; return UA_STATUSCODE_GOOD; } ENCODE_JSON(Double) { char buffer[2000]; if(*src == *src) { #ifdef UA_ENABLE_CUSTOM_LIBC fmt_fp(buffer, *src, 0, 17, 0, 'g'); #else UA_snprintf(buffer, 2000, "%.1074g", *src); #endif } else { strcpy(buffer, "NaN"); } size_t len = strlen(buffer); checkAndEncodeSpecialFloatingPoint(buffer, &len); if(ctx->pos + len > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buffer, len); ctx->pos += len; return UA_STATUSCODE_GOOD; } static status encodeJsonArray(CtxJson *ctx, const void *ptr, size_t length, const UA_DataType *type) { encodeJsonSignature encodeType = encodeJsonJumpTable[type->typeKind]; status ret = writeJsonArrStart(ctx); uintptr_t uptr = (uintptr_t)ptr; for(size_t i = 0; i < length && ret == UA_STATUSCODE_GOOD; ++i) { ret |= writeJsonCommaIfNeeded(ctx); ret |= encodeType((const void*)uptr, type, ctx); ctx->commaNeeded[ctx->depth] = true; uptr += type->memSize; } ret |= writeJsonArrEnd(ctx); return ret; } /*****************/ /* Builtin Types */ /*****************/ static const u8 hexmapLower[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; static const u8 hexmapUpper[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; ENCODE_JSON(String) { if(!src->data) return writeJsonNull(ctx); if(src->length == 0) { status retval = writeJsonQuote(ctx); retval |= writeJsonQuote(ctx); return retval; } UA_StatusCode ret = writeJsonQuote(ctx); /* Escaping adapted from https://github.com/akheron/jansson dump.c */ const char *str = (char*)src->data; const char *pos = str; const char *end = str; const char *lim = str + src->length; UA_UInt32 codepoint = 0; while(1) { const char *text; u8 seq[13]; size_t length; while(end < lim) { end = utf8_iterate(pos, (size_t)(lim - pos), (int32_t *)&codepoint); if(!end) return UA_STATUSCODE_BADENCODINGERROR; /* mandatory escape or control char */ if(codepoint == '\\' || codepoint == '"' || codepoint < 0x20) break; /* TODO: Why is this commented? */ /* slash if((flags & JSON_ESCAPE_SLASH) && codepoint == '/') break;*/ /* non-ASCII if((flags & JSON_ENSURE_ASCII) && codepoint > 0x7F) break;*/ pos = end; } if(pos != str) { if(ctx->pos + (pos - str) > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, str, (size_t)(pos - str)); ctx->pos += pos - str; } if(end == pos) break; /* handle \, /, ", and control codes */ length = 2; switch(codepoint) { case '\\': text = "\\\\"; break; case '\"': text = "\\\""; break; case '\b': text = "\\b"; break; case '\f': text = "\\f"; break; case '\n': text = "\\n"; break; case '\r': text = "\\r"; break; case '\t': text = "\\t"; break; case '/': text = "\\/"; break; default: if(codepoint < 0x10000) { /* codepoint is in BMP */ seq[0] = '\\'; seq[1] = 'u'; UA_Byte b1 = (UA_Byte)(codepoint >> 8u); UA_Byte b2 = (UA_Byte)(codepoint >> 0u); seq[2] = hexmapLower[(b1 & 0xF0u) >> 4u]; seq[3] = hexmapLower[b1 & 0x0Fu]; seq[4] = hexmapLower[(b2 & 0xF0u) >> 4u]; seq[5] = hexmapLower[b2 & 0x0Fu]; length = 6; } else { /* not in BMP -> construct a UTF-16 surrogate pair */ codepoint -= 0x10000; UA_UInt32 first = 0xD800u | ((codepoint & 0xffc00u) >> 10u); UA_UInt32 last = 0xDC00u | (codepoint & 0x003ffu); UA_Byte fb1 = (UA_Byte)(first >> 8u); UA_Byte fb2 = (UA_Byte)(first >> 0u); UA_Byte lb1 = (UA_Byte)(last >> 8u); UA_Byte lb2 = (UA_Byte)(last >> 0u); seq[0] = '\\'; seq[1] = 'u'; seq[2] = hexmapLower[(fb1 & 0xF0u) >> 4u]; seq[3] = hexmapLower[fb1 & 0x0Fu]; seq[4] = hexmapLower[(fb2 & 0xF0u) >> 4u]; seq[5] = hexmapLower[fb2 & 0x0Fu]; seq[6] = '\\'; seq[7] = 'u'; seq[8] = hexmapLower[(lb1 & 0xF0u) >> 4u]; seq[9] = hexmapLower[lb1 & 0x0Fu]; seq[10] = hexmapLower[(lb2 & 0xF0u) >> 4u]; seq[11] = hexmapLower[lb2 & 0x0Fu]; length = 12; } text = (char*)seq; break; } if(ctx->pos + length > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, text, length); ctx->pos += length; str = pos = end; } ret |= writeJsonQuote(ctx); return ret; } ENCODE_JSON(ByteString) { if(!src->data) return writeJsonNull(ctx); if(src->length == 0) { status retval = writeJsonQuote(ctx); retval |= writeJsonQuote(ctx); return retval; } status ret = writeJsonQuote(ctx); size_t flen = 0; unsigned char *ba64 = UA_base64(src->data, src->length, &flen); /* Not converted, no mem */ if(!ba64) return UA_STATUSCODE_BADENCODINGERROR; if(ctx->pos + flen > ctx->end) { UA_free(ba64); return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; } /* Copy flen bytes to output stream. */ if(!ctx->calcOnly) memcpy(ctx->pos, ba64, flen); ctx->pos += flen; /* Base64 result no longer needed */ UA_free(ba64); ret |= writeJsonQuote(ctx); return ret; } /* Converts Guid to a hexadecimal represenation */ static void UA_Guid_to_hex(const UA_Guid *guid, u8* out) { /* 16 byte +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ | data1 |data2|data3| data4 | +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ |aa aa aa aa-bb bb-cc cc-dd dd-ee ee ee ee ee ee| +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 36 character */ #ifdef hexCharlowerCase const u8 *hexmap = hexmapLower; #else const u8 *hexmap = hexmapUpper; #endif size_t i = 0, j = 28; for(; i<8;i++,j-=4) /* pos 0-7, 4byte, (a) */ out[i] = hexmap[(guid->data1 >> j) & 0x0Fu]; out[i++] = '-'; /* pos 8 */ for(j=12; i<13;i++,j-=4) /* pos 9-12, 2byte, (b) */ out[i] = hexmap[(uint16_t)(guid->data2 >> j) & 0x0Fu]; out[i++] = '-'; /* pos 13 */ for(j=12; i<18;i++,j-=4) /* pos 14-17, 2byte (c) */ out[i] = hexmap[(uint16_t)(guid->data3 >> j) & 0x0Fu]; out[i++] = '-'; /* pos 18 */ for(j=0;i<23;i+=2,j++) { /* pos 19-22, 2byte (d) */ out[i] = hexmap[(guid->data4[j] & 0xF0u) >> 4u]; out[i+1] = hexmap[guid->data4[j] & 0x0Fu]; } out[i++] = '-'; /* pos 23 */ for(j=2; i<36;i+=2,j++) { /* pos 24-35, 6byte (e) */ out[i] = hexmap[(guid->data4[j] & 0xF0u) >> 4u]; out[i+1] = hexmap[guid->data4[j] & 0x0Fu]; } } /* Guid */ ENCODE_JSON(Guid) { if(ctx->pos + 38 > ctx->end) /* 36 + 2 (") */ return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; status ret = writeJsonQuote(ctx); u8 *buf = ctx->pos; if(!ctx->calcOnly) UA_Guid_to_hex(src, buf); ctx->pos += 36; ret |= writeJsonQuote(ctx); return ret; } static void printNumber(u16 n, u8 *pos, size_t digits) { for(size_t i = digits; i > 0; --i) { pos[i - 1] = (u8) ((n % 10) + '0'); n = n / 10; } } ENCODE_JSON(DateTime) { UA_DateTimeStruct tSt = UA_DateTime_toStruct(*src); /* Format: yyyy-MM-dd'T'HH:mm:ss.SSSSSSSSS'Z' is used. max 30 bytes.*/ UA_Byte buffer[UA_JSON_DATETIME_LENGTH]; printNumber(tSt.year, &buffer[0], 4); buffer[4] = '-'; printNumber(tSt.month, &buffer[5], 2); buffer[7] = '-'; printNumber(tSt.day, &buffer[8], 2); buffer[10] = 'T'; printNumber(tSt.hour, &buffer[11], 2); buffer[13] = ':'; printNumber(tSt.min, &buffer[14], 2); buffer[16] = ':'; printNumber(tSt.sec, &buffer[17], 2); buffer[19] = '.'; printNumber(tSt.milliSec, &buffer[20], 3); printNumber(tSt.microSec, &buffer[23], 3); printNumber(tSt.nanoSec, &buffer[26], 3); size_t length = 28; while (buffer[length] == '0') length--; if (length != 19) length++; buffer[length] = 'Z'; UA_String str = {length + 1, buffer}; return ENCODE_DIRECT_JSON(&str, String); } /* NodeId */ static status NodeId_encodeJsonInternal(UA_NodeId const *src, CtxJson *ctx) { status ret = UA_STATUSCODE_GOOD; switch (src->identifierType) { case UA_NODEIDTYPE_NUMERIC: ret |= writeJsonKey(ctx, UA_JSONKEY_ID); ret |= ENCODE_DIRECT_JSON(&src->identifier.numeric, UInt32); break; case UA_NODEIDTYPE_STRING: ret |= writeJsonKey(ctx, UA_JSONKEY_IDTYPE); ret |= writeChar(ctx, '1'); ret |= writeJsonKey(ctx, UA_JSONKEY_ID); ret |= ENCODE_DIRECT_JSON(&src->identifier.string, String); break; case UA_NODEIDTYPE_GUID: ret |= writeJsonKey(ctx, UA_JSONKEY_IDTYPE); ret |= writeChar(ctx, '2'); ret |= writeJsonKey(ctx, UA_JSONKEY_ID); /* Id */ ret |= ENCODE_DIRECT_JSON(&src->identifier.guid, Guid); break; case UA_NODEIDTYPE_BYTESTRING: ret |= writeJsonKey(ctx, UA_JSONKEY_IDTYPE); ret |= writeChar(ctx, '3'); ret |= writeJsonKey(ctx, UA_JSONKEY_ID); /* Id */ ret |= ENCODE_DIRECT_JSON(&src->identifier.byteString, ByteString); break; default: return UA_STATUSCODE_BADINTERNALERROR; } return ret; } ENCODE_JSON(NodeId) { UA_StatusCode ret = writeJsonObjStart(ctx); ret |= NodeId_encodeJsonInternal(src, ctx); if(ctx->useReversible) { if(src->namespaceIndex > 0) { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); ret |= ENCODE_DIRECT_JSON(&src->namespaceIndex, UInt16); } } else { /* For the non-reversible encoding, the field is the NamespaceUri * associated with the NamespaceIndex, encoded as a JSON string. * A NamespaceIndex of 1 is always encoded as a JSON number. */ if(src->namespaceIndex == 1) { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); ret |= ENCODE_DIRECT_JSON(&src->namespaceIndex, UInt16); } else { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); /* Check if Namespace given and in range */ if(src->namespaceIndex < ctx->namespacesSize && ctx->namespaces != NULL) { UA_String namespaceEntry = ctx->namespaces[src->namespaceIndex]; ret |= ENCODE_DIRECT_JSON(&namespaceEntry, String); } else { return UA_STATUSCODE_BADNOTFOUND; } } } ret |= writeJsonObjEnd(ctx); return ret; } /* ExpandedNodeId */ ENCODE_JSON(ExpandedNodeId) { status ret = writeJsonObjStart(ctx); /* Encode the NodeId */ ret |= NodeId_encodeJsonInternal(&src->nodeId, ctx); if(ctx->useReversible) { if(src->namespaceUri.data != NULL && src->namespaceUri.length != 0 && (void*) src->namespaceUri.data > UA_EMPTY_ARRAY_SENTINEL) { /* If the NamespaceUri is specified it is encoded as a JSON string in this field. */ ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); ret |= ENCODE_DIRECT_JSON(&src->namespaceUri, String); } else { /* If the NamespaceUri is not specified, the NamespaceIndex is encoded with these rules: * The field is encoded as a JSON number for the reversible encoding. * The field is omitted if the NamespaceIndex equals 0. */ if(src->nodeId.namespaceIndex > 0) { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); ret |= ENCODE_DIRECT_JSON(&src->nodeId.namespaceIndex, UInt16); } } /* Encode the serverIndex/Url * This field is encoded as a JSON number for the reversible encoding. * This field is omitted if the ServerIndex equals 0. */ if(src->serverIndex > 0) { ret |= writeJsonKey(ctx, UA_JSONKEY_SERVERURI); ret |= ENCODE_DIRECT_JSON(&src->serverIndex, UInt32); } ret |= writeJsonObjEnd(ctx); return ret; } /* NON-Reversible Case */ /* If the NamespaceUri is not specified, the NamespaceIndex is encoded with these rules: * For the non-reversible encoding the field is the NamespaceUri associated with the * NamespaceIndex encoded as a JSON string. * A NamespaceIndex of 1 is always encoded as a JSON number. */ if(src->namespaceUri.data != NULL && src->namespaceUri.length != 0) { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); ret |= ENCODE_DIRECT_JSON(&src->namespaceUri, String); if(ret != UA_STATUSCODE_GOOD) return ret; } else { if(src->nodeId.namespaceIndex == 1) { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); ret |= ENCODE_DIRECT_JSON(&src->nodeId.namespaceIndex, UInt16); if(ret != UA_STATUSCODE_GOOD) return ret; } else { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); /* Check if Namespace given and in range */ if(src->nodeId.namespaceIndex < ctx->namespacesSize && ctx->namespaces != NULL) { UA_String namespaceEntry = ctx->namespaces[src->nodeId.namespaceIndex]; ret |= ENCODE_DIRECT_JSON(&namespaceEntry, String); if(ret != UA_STATUSCODE_GOOD) return ret; } else { return UA_STATUSCODE_BADNOTFOUND; } } } /* For the non-reversible encoding, this field is the ServerUri associated * with the ServerIndex portion of the ExpandedNodeId, encoded as a JSON * string. */ /* Check if Namespace given and in range */ if(src->serverIndex < ctx->serverUrisSize && ctx->serverUris != NULL) { UA_String serverUriEntry = ctx->serverUris[src->serverIndex]; ret |= writeJsonKey(ctx, UA_JSONKEY_SERVERURI); ret |= ENCODE_DIRECT_JSON(&serverUriEntry, String); } else { return UA_STATUSCODE_BADNOTFOUND; } ret |= writeJsonObjEnd(ctx); return ret; } /* LocalizedText */ ENCODE_JSON(LocalizedText) { if(ctx->useReversible) { status ret = writeJsonObjStart(ctx); ret |= writeJsonKey(ctx, UA_JSONKEY_LOCALE); ret |= ENCODE_DIRECT_JSON(&src->locale, String); ret |= writeJsonKey(ctx, UA_JSONKEY_TEXT); ret |= ENCODE_DIRECT_JSON(&src->text, String); ret |= writeJsonObjEnd(ctx); return ret; } /* For the non-reversible form, LocalizedText value shall be encoded as a * JSON string containing the Text component.*/ return ENCODE_DIRECT_JSON(&src->text, String); } ENCODE_JSON(QualifiedName) { status ret = writeJsonObjStart(ctx); ret |= writeJsonKey(ctx, UA_JSONKEY_NAME); ret |= ENCODE_DIRECT_JSON(&src->name, String); if(ctx->useReversible) { if(src->namespaceIndex != 0) { ret |= writeJsonKey(ctx, UA_JSONKEY_URI); ret |= ENCODE_DIRECT_JSON(&src->namespaceIndex, UInt16); } } else { /* For the non-reversible form, the NamespaceUri associated with the * NamespaceIndex portion of the QualifiedName is encoded as JSON string * unless the NamespaceIndex is 1 or if NamespaceUri is unknown. In * these cases, the NamespaceIndex is encoded as a JSON number. */ if(src->namespaceIndex == 1) { ret |= writeJsonKey(ctx, UA_JSONKEY_URI); ret |= ENCODE_DIRECT_JSON(&src->namespaceIndex, UInt16); } else { ret |= writeJsonKey(ctx, UA_JSONKEY_URI); /* Check if Namespace given and in range */ if(src->namespaceIndex < ctx->namespacesSize && ctx->namespaces != NULL) { UA_String namespaceEntry = ctx->namespaces[src->namespaceIndex]; ret |= ENCODE_DIRECT_JSON(&namespaceEntry, String); } else { /* If not encode as number */ ret |= ENCODE_DIRECT_JSON(&src->namespaceIndex, UInt16); } } } return ret | writeJsonObjEnd(ctx); } ENCODE_JSON(StatusCode) { if(!src) return writeJsonNull(ctx); if(ctx->useReversible) return ENCODE_DIRECT_JSON(src, UInt32); if(*src == UA_STATUSCODE_GOOD) return writeJsonNull(ctx); status ret = UA_STATUSCODE_GOOD; ret |= writeJsonObjStart(ctx); ret |= writeJsonKey(ctx, UA_JSONKEY_CODE); ret |= ENCODE_DIRECT_JSON(src, UInt32); ret |= writeJsonKey(ctx, UA_JSONKEY_SYMBOL); const char *codename = UA_StatusCode_name(*src); UA_String statusDescription = UA_STRING((char*)(uintptr_t)codename); ret |= ENCODE_DIRECT_JSON(&statusDescription, String); ret |= writeJsonObjEnd(ctx); return ret; } /* ExtensionObject */ ENCODE_JSON(ExtensionObject) { u8 encoding = (u8) src->encoding; if(encoding == UA_EXTENSIONOBJECT_ENCODED_NOBODY) return writeJsonNull(ctx); status ret = UA_STATUSCODE_GOOD; /* already encoded content.*/ if(encoding <= UA_EXTENSIONOBJECT_ENCODED_XML) { ret |= writeJsonObjStart(ctx); if(ctx->useReversible) { ret |= writeJsonKey(ctx, UA_JSONKEY_TYPEID); ret |= ENCODE_DIRECT_JSON(&src->content.encoded.typeId, NodeId); if(ret != UA_STATUSCODE_GOOD) return ret; } switch (src->encoding) { case UA_EXTENSIONOBJECT_ENCODED_BYTESTRING: { if(ctx->useReversible) { ret |= writeJsonKey(ctx, UA_JSONKEY_ENCODING); ret |= writeChar(ctx, '1'); } ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= ENCODE_DIRECT_JSON(&src->content.encoded.body, String); break; } case UA_EXTENSIONOBJECT_ENCODED_XML: { if(ctx->useReversible) { ret |= writeJsonKey(ctx, UA_JSONKEY_ENCODING); ret |= writeChar(ctx, '2'); } ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= ENCODE_DIRECT_JSON(&src->content.encoded.body, String); break; } default: ret = UA_STATUSCODE_BADINTERNALERROR; } ret |= writeJsonObjEnd(ctx); return ret; } /* encoding <= UA_EXTENSIONOBJECT_ENCODED_XML */ /* Cannot encode with no type description */ if(!src->content.decoded.type) return UA_STATUSCODE_BADENCODINGERROR; if(!src->content.decoded.data) return writeJsonNull(ctx); UA_NodeId typeId = src->content.decoded.type->typeId; if(typeId.identifierType != UA_NODEIDTYPE_NUMERIC) return UA_STATUSCODE_BADENCODINGERROR; ret |= writeJsonObjStart(ctx); const UA_DataType *contentType = src->content.decoded.type; if(ctx->useReversible) { /* REVERSIBLE */ ret |= writeJsonKey(ctx, UA_JSONKEY_TYPEID); ret |= ENCODE_DIRECT_JSON(&typeId, NodeId); /* Encode the content */ ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= encodeJsonInternal(src->content.decoded.data, contentType, ctx); } else { /* NON-REVERSIBLE * For the non-reversible form, ExtensionObject values * shall be encoded as a JSON object containing only the * value of the Body field. The TypeId and Encoding fields are dropped. * * TODO: UA_JSONKEY_BODY key in the ExtensionObject? */ ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= encodeJsonInternal(src->content.decoded.data, contentType, ctx); } ret |= writeJsonObjEnd(ctx); return ret; } static status Variant_encodeJsonWrapExtensionObject(const UA_Variant *src, const bool isArray, CtxJson *ctx) { size_t length = 1; status ret = UA_STATUSCODE_GOOD; if(isArray) { if(src->arrayLength > UA_INT32_MAX) return UA_STATUSCODE_BADENCODINGERROR; length = src->arrayLength; } /* Set up the ExtensionObject */ UA_ExtensionObject eo; UA_ExtensionObject_init(&eo); eo.encoding = UA_EXTENSIONOBJECT_DECODED; eo.content.decoded.type = src->type; const u16 memSize = src->type->memSize; uintptr_t ptr = (uintptr_t) src->data; if(isArray) { ret |= writeJsonArrStart(ctx); ctx->commaNeeded[ctx->depth] = false; /* Iterate over the array */ for(size_t i = 0; i < length && ret == UA_STATUSCODE_GOOD; ++i) { eo.content.decoded.data = (void*) ptr; ret |= writeJsonArrElm(ctx, &eo, &UA_TYPES[UA_TYPES_EXTENSIONOBJECT]); ptr += memSize; } ret |= writeJsonArrEnd(ctx); return ret; } eo.content.decoded.data = (void*) ptr; return encodeJsonInternal(&eo, &UA_TYPES[UA_TYPES_EXTENSIONOBJECT], ctx); } static status addMultiArrayContentJSON(CtxJson *ctx, void* array, const UA_DataType *type, size_t *index, UA_UInt32 *arrayDimensions, size_t dimensionIndex, size_t dimensionSize) { /* Check the recursion limit */ if(ctx->depth > UA_JSON_ENCODING_MAX_RECURSION) return UA_STATUSCODE_BADENCODINGERROR; /* Stop recursion: The inner Arrays are written */ status ret; if(dimensionIndex == (dimensionSize - 1)) { ret = encodeJsonArray(ctx, ((u8*)array) + (type->memSize * *index), arrayDimensions[dimensionIndex], type); (*index) += arrayDimensions[dimensionIndex]; return ret; } /* Recurse to the next dimension */ ret = writeJsonArrStart(ctx); for(size_t i = 0; i < arrayDimensions[dimensionIndex]; i++) { ret |= writeJsonCommaIfNeeded(ctx); ret |= addMultiArrayContentJSON(ctx, array, type, index, arrayDimensions, dimensionIndex + 1, dimensionSize); ctx->commaNeeded[ctx->depth] = true; if(ret != UA_STATUSCODE_GOOD) return ret; } ret |= writeJsonArrEnd(ctx); return ret; } ENCODE_JSON(Variant) { /* If type is 0 (NULL) the Variant contains a NULL value and the containing * JSON object shall be omitted or replaced by the JSON literal ‘null’ (when * an element of a JSON array). */ if(!src->type) { return writeJsonNull(ctx); } /* Set the content type in the encoding mask */ const UA_Boolean isBuiltin = (src->type->typeKind <= UA_DATATYPEKIND_DIAGNOSTICINFO); const UA_Boolean isEnum = (src->type->typeKind == UA_DATATYPEKIND_ENUM); /* Set the array type in the encoding mask */ const bool isArray = src->arrayLength > 0 || src->data <= UA_EMPTY_ARRAY_SENTINEL; const bool hasDimensions = isArray && src->arrayDimensionsSize > 0; status ret = UA_STATUSCODE_GOOD; if(ctx->useReversible) { ret |= writeJsonObjStart(ctx); if(ret != UA_STATUSCODE_GOOD) return ret; /* Encode the content */ if(!isBuiltin && !isEnum) { /* REVERSIBLE: NOT BUILTIN, can it be encoded? Wrap in extension object.*/ ret |= writeJsonKey(ctx, UA_JSONKEY_TYPE); ret |= ENCODE_DIRECT_JSON(&UA_TYPES[UA_TYPES_EXTENSIONOBJECT].typeId.identifier.numeric, UInt32); ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= Variant_encodeJsonWrapExtensionObject(src, isArray, ctx); } else if(!isArray) { /*REVERSIBLE: BUILTIN, single value.*/ ret |= writeJsonKey(ctx, UA_JSONKEY_TYPE); ret |= ENCODE_DIRECT_JSON(&src->type->typeId.identifier.numeric, UInt32); ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= encodeJsonInternal(src->data, src->type, ctx); } else { /*REVERSIBLE: BUILTIN, array.*/ ret |= writeJsonKey(ctx, UA_JSONKEY_TYPE); ret |= ENCODE_DIRECT_JSON(&src->type->typeId.identifier.numeric, UInt32); ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= encodeJsonArray(ctx, src->data, src->arrayLength, src->type); } if(ret != UA_STATUSCODE_GOOD) return ret; /* REVERSIBLE: Encode the array dimensions */ if(hasDimensions && ret == UA_STATUSCODE_GOOD) { ret |= writeJsonKey(ctx, UA_JSONKEY_DIMENSION); ret |= encodeJsonArray(ctx, src->arrayDimensions, src->arrayDimensionsSize, &UA_TYPES[UA_TYPES_INT32]); if(ret != UA_STATUSCODE_GOOD) return ret; } ret |= writeJsonObjEnd(ctx); return ret; } /* reversible */ /* NON-REVERSIBLE * For the non-reversible form, Variant values shall be encoded as a JSON object containing only * the value of the Body field. The Type and Dimensions fields are dropped. Multi-dimensional * arrays are encoded as a multi dimensional JSON array as described in 5.4.5. */ ret |= writeJsonObjStart(ctx); if(!isBuiltin && !isEnum) { /*NON REVERSIBLE: NOT BUILTIN, can it be encoded? Wrap in extension object.*/ if(src->arrayDimensionsSize > 1) { return UA_STATUSCODE_BADNOTIMPLEMENTED; } ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= Variant_encodeJsonWrapExtensionObject(src, isArray, ctx); } else if(!isArray) { /*NON REVERSIBLE: BUILTIN, single value.*/ ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= encodeJsonInternal(src->data, src->type, ctx); } else { /*NON REVERSIBLE: BUILTIN, array.*/ ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); size_t dimensionSize = src->arrayDimensionsSize; if(dimensionSize > 1) { /*nonreversible multidimensional array*/ size_t index = 0; size_t dimensionIndex = 0; void *ptr = src->data; const UA_DataType *arraytype = src->type; ret |= addMultiArrayContentJSON(ctx, ptr, arraytype, &index, src->arrayDimensions, dimensionIndex, dimensionSize); } else { /*nonreversible simple array*/ ret |= encodeJsonArray(ctx, src->data, src->arrayLength, src->type); } } ret |= writeJsonObjEnd(ctx); return ret; } /* DataValue */ ENCODE_JSON(DataValue) { UA_Boolean hasValue = src->hasValue && src->value.type != NULL; UA_Boolean hasStatus = src->hasStatus && src->status; UA_Boolean hasSourceTimestamp = src->hasSourceTimestamp && src->sourceTimestamp; UA_Boolean hasSourcePicoseconds = src->hasSourcePicoseconds && src->sourcePicoseconds; UA_Boolean hasServerTimestamp = src->hasServerTimestamp && src->serverTimestamp; UA_Boolean hasServerPicoseconds = src->hasServerPicoseconds && src->serverPicoseconds; if(!hasValue && !hasStatus && !hasSourceTimestamp && !hasSourcePicoseconds && !hasServerTimestamp && !hasServerPicoseconds) { return writeJsonNull(ctx); /*no element, encode as null*/ } status ret = UA_STATUSCODE_GOOD; ret |= writeJsonObjStart(ctx); if(hasValue) { ret |= writeJsonKey(ctx, UA_JSONKEY_VALUE); ret |= ENCODE_DIRECT_JSON(&src->value, Variant); if(ret != UA_STATUSCODE_GOOD) return ret; } if(hasStatus) { ret |= writeJsonKey(ctx, UA_JSONKEY_STATUS); ret |= ENCODE_DIRECT_JSON(&src->status, StatusCode); if(ret != UA_STATUSCODE_GOOD) return ret; } if(hasSourceTimestamp) { ret |= writeJsonKey(ctx, UA_JSONKEY_SOURCETIMESTAMP); ret |= ENCODE_DIRECT_JSON(&src->sourceTimestamp, DateTime); if(ret != UA_STATUSCODE_GOOD) return ret; } if(hasSourcePicoseconds) { ret |= writeJsonKey(ctx, UA_JSONKEY_SOURCEPICOSECONDS); ret |= ENCODE_DIRECT_JSON(&src->sourcePicoseconds, UInt16); if(ret != UA_STATUSCODE_GOOD) return ret; } if(hasServerTimestamp) { ret |= writeJsonKey(ctx, UA_JSONKEY_SERVERTIMESTAMP); ret |= ENCODE_DIRECT_JSON(&src->serverTimestamp, DateTime); if(ret != UA_STATUSCODE_GOOD) return ret; } if(hasServerPicoseconds) { ret |= writeJsonKey(ctx, UA_JSONKEY_SERVERPICOSECONDS); ret |= ENCODE_DIRECT_JSON(&src->serverPicoseconds, UInt16); if(ret != UA_STATUSCODE_GOOD) return ret; } ret |= writeJsonObjEnd(ctx); return ret; } /* DiagnosticInfo */ ENCODE_JSON(DiagnosticInfo) { status ret = UA_STATUSCODE_GOOD; if(!src->hasSymbolicId && !src->hasNamespaceUri && !src->hasLocalizedText && !src->hasLocale && !src->hasAdditionalInfo && !src->hasInnerDiagnosticInfo && !src->hasInnerStatusCode) { return writeJsonNull(ctx); /*no element present, encode as null.*/ } ret |= writeJsonObjStart(ctx); if(src->hasSymbolicId) { ret |= writeJsonKey(ctx, UA_JSONKEY_SYMBOLICID); ret |= ENCODE_DIRECT_JSON(&src->symbolicId, UInt32); if(ret != UA_STATUSCODE_GOOD) return ret; } if(src->hasNamespaceUri) { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACEURI); ret |= ENCODE_DIRECT_JSON(&src->namespaceUri, UInt32); if(ret != UA_STATUSCODE_GOOD) return ret; } if(src->hasLocalizedText) { ret |= writeJsonKey(ctx, UA_JSONKEY_LOCALIZEDTEXT); ret |= ENCODE_DIRECT_JSON(&src->localizedText, UInt32); if(ret != UA_STATUSCODE_GOOD) return ret; } if(src->hasLocale) { ret |= writeJsonKey(ctx, UA_JSONKEY_LOCALE); ret |= ENCODE_DIRECT_JSON(&src->locale, UInt32); if(ret != UA_STATUSCODE_GOOD) return ret; } if(src->hasAdditionalInfo) { ret |= writeJsonKey(ctx, UA_JSONKEY_ADDITIONALINFO); ret |= ENCODE_DIRECT_JSON(&src->additionalInfo, String); if(ret != UA_STATUSCODE_GOOD) return ret; } if(src->hasInnerStatusCode) { ret |= writeJsonKey(ctx, UA_JSONKEY_INNERSTATUSCODE); ret |= ENCODE_DIRECT_JSON(&src->innerStatusCode, StatusCode); if(ret != UA_STATUSCODE_GOOD) return ret; } if(src->hasInnerDiagnosticInfo && src->innerDiagnosticInfo) { ret |= writeJsonKey(ctx, UA_JSONKEY_INNERDIAGNOSTICINFO); /* Check recursion depth in encodeJsonInternal */ ret |= encodeJsonInternal(src->innerDiagnosticInfo, &UA_TYPES[UA_TYPES_DIAGNOSTICINFO], ctx); if(ret != UA_STATUSCODE_GOOD) return ret; } ret |= writeJsonObjEnd(ctx); return ret; } static status encodeJsonStructure(const void *src, const UA_DataType *type, CtxJson *ctx) { /* Check the recursion limit */ if(ctx->depth > UA_JSON_ENCODING_MAX_RECURSION) return UA_STATUSCODE_BADENCODINGERROR; ctx->depth++; status ret = writeJsonObjStart(ctx); uintptr_t ptr = (uintptr_t) src; u8 membersSize = type->membersSize; const UA_DataType * typelists[2] = {UA_TYPES, &type[-type->typeIndex]}; for(size_t i = 0; i < membersSize && ret == UA_STATUSCODE_GOOD; ++i) { const UA_DataTypeMember *m = &type->members[i]; const UA_DataType *mt = &typelists[!m->namespaceZero][m->memberTypeIndex]; if(m->memberName != NULL && *m->memberName != 0) ret |= writeJsonKey(ctx, m->memberName); if(!m->isArray) { ptr += m->padding; size_t memSize = mt->memSize; ret |= encodeJsonJumpTable[mt->typeKind]((const void*) ptr, mt, ctx); ptr += memSize; } else { ptr += m->padding; const size_t length = *((const size_t*) ptr); ptr += sizeof (size_t); ret |= encodeJsonArray(ctx, *(void * const *)ptr, length, mt); ptr += sizeof (void*); } } ret |= writeJsonObjEnd(ctx); ctx->depth--; return ret; } static status encodeJsonNotImplemented(const void *src, const UA_DataType *type, CtxJson *ctx) { (void) src, (void) type, (void)ctx; return UA_STATUSCODE_BADNOTIMPLEMENTED; } const encodeJsonSignature encodeJsonJumpTable[UA_DATATYPEKINDS] = { (encodeJsonSignature)Boolean_encodeJson, (encodeJsonSignature)SByte_encodeJson, /* SByte */ (encodeJsonSignature)Byte_encodeJson, (encodeJsonSignature)Int16_encodeJson, /* Int16 */ (encodeJsonSignature)UInt16_encodeJson, (encodeJsonSignature)Int32_encodeJson, /* Int32 */ (encodeJsonSignature)UInt32_encodeJson, (encodeJsonSignature)Int64_encodeJson, /* Int64 */ (encodeJsonSignature)UInt64_encodeJson, (encodeJsonSignature)Float_encodeJson, (encodeJsonSignature)Double_encodeJson, (encodeJsonSignature)String_encodeJson, (encodeJsonSignature)DateTime_encodeJson, /* DateTime */ (encodeJsonSignature)Guid_encodeJson, (encodeJsonSignature)ByteString_encodeJson, /* ByteString */ (encodeJsonSignature)String_encodeJson, /* XmlElement */ (encodeJsonSignature)NodeId_encodeJson, (encodeJsonSignature)ExpandedNodeId_encodeJson, (encodeJsonSignature)StatusCode_encodeJson, /* StatusCode */ (encodeJsonSignature)QualifiedName_encodeJson, /* QualifiedName */ (encodeJsonSignature)LocalizedText_encodeJson, (encodeJsonSignature)ExtensionObject_encodeJson, (encodeJsonSignature)DataValue_encodeJson, (encodeJsonSignature)Variant_encodeJson, (encodeJsonSignature)DiagnosticInfo_encodeJson, (encodeJsonSignature)encodeJsonNotImplemented, /* Decimal */ (encodeJsonSignature)Int32_encodeJson, /* Enum */ (encodeJsonSignature)encodeJsonStructure, (encodeJsonSignature)encodeJsonNotImplemented, /* Structure with optional fields */ (encodeJsonSignature)encodeJsonNotImplemented, /* Union */ (encodeJsonSignature)encodeJsonNotImplemented /* BitfieldCluster */ }; status encodeJsonInternal(const void *src, const UA_DataType *type, CtxJson *ctx) { return encodeJsonJumpTable[type->typeKind](src, type, ctx); } status UA_FUNC_ATTR_WARN_UNUSED_RESULT UA_encodeJson(const void *src, const UA_DataType *type, u8 **bufPos, const u8 **bufEnd, UA_String *namespaces, size_t namespaceSize, UA_String *serverUris, size_t serverUriSize, UA_Boolean useReversible) { if(!src || !type) return UA_STATUSCODE_BADINTERNALERROR; /* Set up the context */ CtxJson ctx; memset(&ctx, 0, sizeof(ctx)); ctx.pos = *bufPos; ctx.end = *bufEnd; ctx.depth = 0; ctx.namespaces = namespaces; ctx.namespacesSize = namespaceSize; ctx.serverUris = serverUris; ctx.serverUrisSize = serverUriSize; ctx.useReversible = useReversible; ctx.calcOnly = false; /* Encode */ status ret = encodeJsonJumpTable[type->typeKind](src, type, &ctx); *bufPos = ctx.pos; *bufEnd = ctx.end; return ret; } /************/ /* CalcSize */ /************/ size_t UA_calcSizeJson(const void *src, const UA_DataType *type, UA_String *namespaces, size_t namespaceSize, UA_String *serverUris, size_t serverUriSize, UA_Boolean useReversible) { if(!src || !type) return UA_STATUSCODE_BADINTERNALERROR; /* Set up the context */ CtxJson ctx; memset(&ctx, 0, sizeof(ctx)); ctx.pos = 0; ctx.end = (const UA_Byte*)(uintptr_t)SIZE_MAX; ctx.depth = 0; ctx.namespaces = namespaces; ctx.namespacesSize = namespaceSize; ctx.serverUris = serverUris; ctx.serverUrisSize = serverUriSize; ctx.useReversible = useReversible; ctx.calcOnly = true; /* Encode */ status ret = encodeJsonJumpTable[type->typeKind](src, type, &ctx); if(ret != UA_STATUSCODE_GOOD) return 0; return (size_t)ctx.pos; } /**********/ /* Decode */ /**********/ /* Macro which gets current size and char pointer of current Token. Needs * ParseCtx (parseCtx) and CtxJson (ctx). Does NOT increment index of Token. */ #define GET_TOKEN(data, size) do { \ (size) = (size_t)(parseCtx->tokenArray[parseCtx->index].end - parseCtx->tokenArray[parseCtx->index].start); \ (data) = (char*)(ctx->pos + parseCtx->tokenArray[parseCtx->index].start); } while(0) #define ALLOW_NULL do { \ if(isJsonNull(ctx, parseCtx)) { \ parseCtx->index++; \ return UA_STATUSCODE_GOOD; \ }} while(0) #define CHECK_TOKEN_BOUNDS do { \ if(parseCtx->index >= parseCtx->tokenCount) \ return UA_STATUSCODE_BADDECODINGERROR; \ } while(0) #define CHECK_PRIMITIVE do { \ if(getJsmnType(parseCtx) != JSMN_PRIMITIVE) { \ return UA_STATUSCODE_BADDECODINGERROR; \ }} while(0) #define CHECK_STRING do { \ if(getJsmnType(parseCtx) != JSMN_STRING) { \ return UA_STATUSCODE_BADDECODINGERROR; \ }} while(0) #define CHECK_OBJECT do { \ if(getJsmnType(parseCtx) != JSMN_OBJECT) { \ return UA_STATUSCODE_BADDECODINGERROR; \ }} while(0) /* Forward declarations*/ #define DECODE_JSON(TYPE) static status \ TYPE##_decodeJson(UA_##TYPE *dst, const UA_DataType *type, \ CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) /* decode without moving the token index */ #define DECODE_DIRECT_JSON(DST, TYPE) TYPE##_decodeJson((UA_##TYPE*)DST, NULL, ctx, parseCtx, false) /* If parseCtx->index points to the beginning of an object, move the index to * the next token after this object. Attention! The index can be moved after the * last parsed token. So the array length has to be checked afterwards. */ static void skipObject(ParseCtx *parseCtx) { int end = parseCtx->tokenArray[parseCtx->index].end; do { parseCtx->index++; } while(parseCtx->index < parseCtx->tokenCount && parseCtx->tokenArray[parseCtx->index].start < end); } static status Array_decodeJson(void *dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken); static status Array_decodeJson_internal(void **dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken); static status Variant_decodeJsonUnwrapExtensionObject(UA_Variant *dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken); /* Json decode Helper */ jsmntype_t getJsmnType(const ParseCtx *parseCtx) { if(parseCtx->index >= parseCtx->tokenCount) return JSMN_UNDEFINED; return parseCtx->tokenArray[parseCtx->index].type; } UA_Boolean isJsonNull(const CtxJson *ctx, const ParseCtx *parseCtx) { if(parseCtx->index >= parseCtx->tokenCount) return false; if(parseCtx->tokenArray[parseCtx->index].type != JSMN_PRIMITIVE) { return false; } char* elem = (char*)(ctx->pos + parseCtx->tokenArray[parseCtx->index].start); return (elem[0] == 'n' && elem[1] == 'u' && elem[2] == 'l' && elem[3] == 'l'); } static UA_SByte jsoneq(const char *json, jsmntok_t *tok, const char *searchKey) { /* TODO: necessary? if(json == NULL || tok == NULL || searchKey == NULL) { return -1; } */ if(tok->type == JSMN_STRING) { if(strlen(searchKey) == (size_t)(tok->end - tok->start) ) { if(strncmp(json + tok->start, (const char*)searchKey, (size_t)(tok->end - tok->start)) == 0) { return 0; } } } return -1; } DECODE_JSON(Boolean) { CHECK_PRIMITIVE; CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); if(tokenSize == 4 && tokenData[0] == 't' && tokenData[1] == 'r' && tokenData[2] == 'u' && tokenData[3] == 'e') { *dst = true; } else if(tokenSize == 5 && tokenData[0] == 'f' && tokenData[1] == 'a' && tokenData[2] == 'l' && tokenData[3] == 's' && tokenData[4] == 'e') { *dst = false; } else { return UA_STATUSCODE_BADDECODINGERROR; } if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; } #ifdef UA_ENABLE_CUSTOM_LIBC static UA_StatusCode parseUnsignedInteger(char* inputBuffer, size_t sizeOfBuffer, UA_UInt64 *destinationOfNumber) { UA_UInt64 d = 0; atoiUnsigned(inputBuffer, sizeOfBuffer, &d); if(!destinationOfNumber) return UA_STATUSCODE_BADDECODINGERROR; *destinationOfNumber = d; return UA_STATUSCODE_GOOD; } static UA_StatusCode parseSignedInteger(char* inputBuffer, size_t sizeOfBuffer, UA_Int64 *destinationOfNumber) { UA_Int64 d = 0; atoiSigned(inputBuffer, sizeOfBuffer, &d); if(!destinationOfNumber) return UA_STATUSCODE_BADDECODINGERROR; *destinationOfNumber = d; return UA_STATUSCODE_GOOD; } #else /* Safe strtol variant of unsigned string conversion. * Returns UA_STATUSCODE_BADDECODINGERROR in case of overflows. * Buffer limit is 20 digits. */ static UA_StatusCode parseUnsignedInteger(char* inputBuffer, size_t sizeOfBuffer, UA_UInt64 *destinationOfNumber) { /* Check size to avoid huge malicious stack allocation. * No UInt64 can have more digits than 20. */ if(sizeOfBuffer > 20) { return UA_STATUSCODE_BADDECODINGERROR; } /* convert to null terminated string */ UA_STACKARRAY(char, string, sizeOfBuffer+1); memcpy(string, inputBuffer, sizeOfBuffer); string[sizeOfBuffer] = 0; /* Conversion */ char *endptr, *str; str = string; errno = 0; /* To distinguish success/failure after call */ UA_UInt64 val = strtoull(str, &endptr, 10); /* Check for various possible errors */ if((errno == ERANGE && (val == LLONG_MAX || val == 0)) || (errno != 0 )) { return UA_STATUSCODE_BADDECODINGERROR; } /* Check if no digits were found */ if(endptr == str) return UA_STATUSCODE_BADDECODINGERROR; /* copy to destination */ *destinationOfNumber = val; return UA_STATUSCODE_GOOD; } /* Safe strtol variant of unsigned string conversion. * Returns UA_STATUSCODE_BADDECODINGERROR in case of overflows. * Buffer limit is 20 digits. */ static UA_StatusCode parseSignedInteger(char* inputBuffer, size_t sizeOfBuffer, UA_Int64 *destinationOfNumber) { /* Check size to avoid huge malicious stack allocation. * No UInt64 can have more digits than 20. */ if(sizeOfBuffer > 20) return UA_STATUSCODE_BADDECODINGERROR; /* convert to null terminated string */ UA_STACKARRAY(char, string, sizeOfBuffer + 1); memcpy(string, inputBuffer, sizeOfBuffer); string[sizeOfBuffer] = 0; /* Conversion */ char *endptr, *str; str = string; errno = 0; /* To distinguish success/failure after call */ UA_Int64 val = strtoll(str, &endptr, 10); /* Check for various possible errors */ if((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN)) || (errno != 0 )) { return UA_STATUSCODE_BADDECODINGERROR; } /* Check if no digits were found */ if(endptr == str) return UA_STATUSCODE_BADDECODINGERROR; /* copy to destination */ *destinationOfNumber = val; return UA_STATUSCODE_GOOD; } #endif DECODE_JSON(Byte) { CHECK_TOKEN_BOUNDS; CHECK_PRIMITIVE; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_UInt64 out = 0; UA_StatusCode s = parseUnsignedInteger(tokenData, tokenSize, &out); *dst = (UA_Byte)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(UInt16) { CHECK_TOKEN_BOUNDS; CHECK_PRIMITIVE; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_UInt64 out = 0; UA_StatusCode s = parseUnsignedInteger(tokenData, tokenSize, &out); *dst = (UA_UInt16)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(UInt32) { CHECK_TOKEN_BOUNDS; CHECK_PRIMITIVE; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_UInt64 out = 0; UA_StatusCode s = parseUnsignedInteger(tokenData, tokenSize, &out); *dst = (UA_UInt32)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(UInt64) { CHECK_TOKEN_BOUNDS; CHECK_STRING; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_UInt64 out = 0; UA_StatusCode s = parseUnsignedInteger(tokenData, tokenSize, &out); *dst = (UA_UInt64)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(SByte) { CHECK_TOKEN_BOUNDS; CHECK_PRIMITIVE; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_Int64 out = 0; UA_StatusCode s = parseSignedInteger(tokenData, tokenSize, &out); *dst = (UA_SByte)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(Int16) { CHECK_TOKEN_BOUNDS; CHECK_PRIMITIVE; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_Int64 out = 0; UA_StatusCode s = parseSignedInteger(tokenData, tokenSize, &out); *dst = (UA_Int16)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(Int32) { CHECK_TOKEN_BOUNDS; CHECK_PRIMITIVE; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_Int64 out = 0; UA_StatusCode s = parseSignedInteger(tokenData, tokenSize, &out); *dst = (UA_Int32)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(Int64) { CHECK_TOKEN_BOUNDS; CHECK_STRING; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_Int64 out = 0; UA_StatusCode s = parseSignedInteger(tokenData, tokenSize, &out); *dst = (UA_Int64)out; if(moveToken) parseCtx->index++; return s; } static UA_UInt32 hex2int(char ch) { if(ch >= '0' && ch <= '9') return (UA_UInt32)(ch - '0'); if(ch >= 'A' && ch <= 'F') return (UA_UInt32)(ch - 'A' + 10); if(ch >= 'a' && ch <= 'f') return (UA_UInt32)(ch - 'a' + 10); return 0; } /* Float * Either a JSMN_STRING or JSMN_PRIMITIVE */ DECODE_JSON(Float) { CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); /* https://www.exploringbinary.com/maximum-number-of-decimal-digits-in-binary-floating-point-numbers/ * Maximum digit counts for select IEEE floating-point formats: 149 * Sanity check. */ if(tokenSize > 150) return UA_STATUSCODE_BADDECODINGERROR; jsmntype_t tokenType = getJsmnType(parseCtx); if(tokenType == JSMN_STRING) { /*It could be a String with Nan, Infinity*/ if(tokenSize == 8 && memcmp(tokenData, "Infinity", 8) == 0) { *dst = (UA_Float)INFINITY; return UA_STATUSCODE_GOOD; } if(tokenSize == 9 && memcmp(tokenData, "-Infinity", 9) == 0) { /* workaround an MSVC 2013 issue */ *dst = (UA_Float)-INFINITY; return UA_STATUSCODE_GOOD; } if(tokenSize == 3 && memcmp(tokenData, "NaN", 3) == 0) { *dst = (UA_Float)NAN; return UA_STATUSCODE_GOOD; } if(tokenSize == 4 && memcmp(tokenData, "-NaN", 4) == 0) { *dst = (UA_Float)NAN; return UA_STATUSCODE_GOOD; } return UA_STATUSCODE_BADDECODINGERROR; } if(tokenType != JSMN_PRIMITIVE) return UA_STATUSCODE_BADDECODINGERROR; /* Null-Terminate for sscanf. */ UA_STACKARRAY(char, string, tokenSize+1); memcpy(string, tokenData, tokenSize); string[tokenSize] = 0; UA_Float d = 0; #ifdef UA_ENABLE_CUSTOM_LIBC d = (UA_Float)__floatscan(string, 1, 0); #else char c = 0; /* On success, the function returns the number of variables filled. * In the case of an input failure before any data could be successfully read, EOF is returned. */ int ret = sscanf(string, "%f%c", &d, &c); /* Exactly one var must be filled. %c acts as a guard for wrong input which is accepted by sscanf. E.g. 1.23.45 is not accepted. */ if(ret == EOF || (ret != 1)) return UA_STATUSCODE_BADDECODINGERROR; #endif *dst = d; parseCtx->index++; return UA_STATUSCODE_GOOD; } /* Either a JSMN_STRING or JSMN_PRIMITIVE */ DECODE_JSON(Double) { CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); /* https://www.exploringbinary.com/maximum-number-of-decimal-digits-in-binary-floating-point-numbers/ * Maximum digit counts for select IEEE floating-point formats: 1074 * Sanity check. */ if(tokenSize > 1075) return UA_STATUSCODE_BADDECODINGERROR; jsmntype_t tokenType = getJsmnType(parseCtx); if(tokenType == JSMN_STRING) { /*It could be a String with Nan, Infinity*/ if(tokenSize == 8 && memcmp(tokenData, "Infinity", 8) == 0) { *dst = INFINITY; return UA_STATUSCODE_GOOD; } if(tokenSize == 9 && memcmp(tokenData, "-Infinity", 9) == 0) { /* workaround an MSVC 2013 issue */ *dst = -INFINITY; return UA_STATUSCODE_GOOD; } if(tokenSize == 3 && memcmp(tokenData, "NaN", 3) == 0) { *dst = NAN; return UA_STATUSCODE_GOOD; } if(tokenSize == 4 && memcmp(tokenData, "-NaN", 4) == 0) { *dst = NAN; return UA_STATUSCODE_GOOD; } return UA_STATUSCODE_BADDECODINGERROR; } if(tokenType != JSMN_PRIMITIVE) return UA_STATUSCODE_BADDECODINGERROR; /* Null-Terminate for sscanf. Should this better be handled on heap? Max * 1075 input chars allowed. Not using heap. */ UA_STACKARRAY(char, string, tokenSize+1); memcpy(string, tokenData, tokenSize); string[tokenSize] = 0; UA_Double d = 0; #ifdef UA_ENABLE_CUSTOM_LIBC d = (UA_Double)__floatscan(string, 2, 0); #else char c = 0; /* On success, the function returns the number of variables filled. * In the case of an input failure before any data could be successfully read, EOF is returned. */ int ret = sscanf(string, "%lf%c", &d, &c); /* Exactly one var must be filled. %c acts as a guard for wrong input which is accepted by sscanf. E.g. 1.23.45 is not accepted. */ if(ret == EOF || (ret != 1)) return UA_STATUSCODE_BADDECODINGERROR; #endif *dst = d; parseCtx->index++; return UA_STATUSCODE_GOOD; } /* Expects 36 chars in format 00000003-0009-000A-0807-060504030201 | data1| |d2| |d3| |d4| | data4 | */ static UA_Guid UA_Guid_fromChars(const char* chars) { UA_Guid dst; UA_Guid_init(&dst); for(size_t i = 0; i < 8; i++) dst.data1 |= (UA_UInt32)(hex2int(chars[i]) << (28 - (i*4))); for(size_t i = 0; i < 4; i++) { dst.data2 |= (UA_UInt16)(hex2int(chars[9+i]) << (12 - (i*4))); dst.data3 |= (UA_UInt16)(hex2int(chars[14+i]) << (12 - (i*4))); } dst.data4[0] |= (UA_Byte)(hex2int(chars[19]) << 4u); dst.data4[0] |= (UA_Byte)(hex2int(chars[20]) << 0u); dst.data4[1] |= (UA_Byte)(hex2int(chars[21]) << 4u); dst.data4[1] |= (UA_Byte)(hex2int(chars[22]) << 0u); for(size_t i = 0; i < 6; i++) { dst.data4[2+i] |= (UA_Byte)(hex2int(chars[24 + i*2]) << 4u); dst.data4[2+i] |= (UA_Byte)(hex2int(chars[25 + i*2]) << 0u); } return dst; } DECODE_JSON(Guid) { CHECK_STRING; CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); if(tokenSize != 36) return UA_STATUSCODE_BADDECODINGERROR; /* check if incorrect chars are present */ for(size_t i = 0; i < tokenSize; i++) { if(!(tokenData[i] == '-' || (tokenData[i] >= '0' && tokenData[i] <= '9') || (tokenData[i] >= 'A' && tokenData[i] <= 'F') || (tokenData[i] >= 'a' && tokenData[i] <= 'f'))) { return UA_STATUSCODE_BADDECODINGERROR; } } *dst = UA_Guid_fromChars(tokenData); if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; } DECODE_JSON(String) { ALLOW_NULL; CHECK_STRING; CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); /* Empty string? */ if(tokenSize == 0) { dst->data = (UA_Byte*)UA_EMPTY_ARRAY_SENTINEL; dst->length = 0; if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; } /* The actual value is at most of the same length as the source string: * - Shortcut escapes (e.g. "\t") (length 2) are converted to 1 byte * - A single \uXXXX escape (length 6) is converted to at most 3 bytes * - Two \uXXXX escapes (length 12) forming an UTF-16 surrogate pair are * converted to 4 bytes */ char *outputBuffer = (char*)UA_malloc(tokenSize); if(!outputBuffer) return UA_STATUSCODE_BADOUTOFMEMORY; const char *p = (char*)tokenData; const char *end = (char*)&tokenData[tokenSize]; char *pos = outputBuffer; while(p < end) { /* No escaping */ if(*p != '\\') { *(pos++) = *(p++); continue; } /* Escape character */ p++; if(p == end) goto cleanup; if(*p != 'u') { switch(*p) { case '"': case '\\': case '/': *pos = *p; break; case 'b': *pos = '\b'; break; case 'f': *pos = '\f'; break; case 'n': *pos = '\n'; break; case 'r': *pos = '\r'; break; case 't': *pos = '\t'; break; default: goto cleanup; } pos++; p++; continue; } /* Unicode */ if(p + 4 >= end) goto cleanup; int32_t value_signed = decode_unicode_escape(p); if(value_signed < 0) goto cleanup; uint32_t value = (uint32_t)value_signed; p += 5; if(0xD800 <= value && value <= 0xDBFF) { /* Surrogate pair */ if(p + 5 >= end) goto cleanup; if(*p != '\\' || *(p + 1) != 'u') goto cleanup; int32_t value2 = decode_unicode_escape(p + 1); if(value2 < 0xDC00 || value2 > 0xDFFF) goto cleanup; value = ((value - 0xD800u) << 10u) + (uint32_t)((value2 - 0xDC00) + 0x10000); p += 6; } else if(0xDC00 <= value && value <= 0xDFFF) { /* Invalid Unicode '\\u%04X' */ goto cleanup; } size_t length; if(utf8_encode((int32_t)value, pos, &length)) goto cleanup; pos += length; } dst->length = (size_t)(pos - outputBuffer); if(dst->length > 0) { dst->data = (UA_Byte*)outputBuffer; } else { dst->data = (UA_Byte*)UA_EMPTY_ARRAY_SENTINEL; UA_free(outputBuffer); } if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; cleanup: UA_free(outputBuffer); return UA_STATUSCODE_BADDECODINGERROR; } DECODE_JSON(ByteString) { ALLOW_NULL; CHECK_STRING; CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); /* Empty bytestring? */ if(tokenSize == 0) { dst->data = (UA_Byte*)UA_EMPTY_ARRAY_SENTINEL; dst->length = 0; return UA_STATUSCODE_GOOD; } size_t flen = 0; unsigned char* unB64 = UA_unbase64((unsigned char*)tokenData, tokenSize, &flen); if(unB64 == 0) return UA_STATUSCODE_BADDECODINGERROR; dst->data = (u8*)unB64; dst->length = flen; if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; } DECODE_JSON(LocalizedText) { ALLOW_NULL; CHECK_OBJECT; DecodeEntry entries[2] = { {UA_JSONKEY_LOCALE, &dst->locale, (decodeJsonSignature) String_decodeJson, false, NULL}, {UA_JSONKEY_TEXT, &dst->text, (decodeJsonSignature) String_decodeJson, false, NULL} }; return decodeFields(ctx, parseCtx, entries, 2, type); } DECODE_JSON(QualifiedName) { ALLOW_NULL; CHECK_OBJECT; DecodeEntry entries[2] = { {UA_JSONKEY_NAME, &dst->name, (decodeJsonSignature) String_decodeJson, false, NULL}, {UA_JSONKEY_URI, &dst->namespaceIndex, (decodeJsonSignature) UInt16_decodeJson, false, NULL} }; return decodeFields(ctx, parseCtx, entries, 2, type); } /* Function for searching ahead of the current token. Used for retrieving the * OPC UA type of a token */ static status searchObjectForKeyRec(const char *searchKey, CtxJson *ctx, ParseCtx *parseCtx, size_t *resultIndex, UA_UInt16 depth) { UA_StatusCode ret = UA_STATUSCODE_BADNOTFOUND; CHECK_TOKEN_BOUNDS; if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) { size_t objectCount = (size_t)parseCtx->tokenArray[parseCtx->index].size; parseCtx->index++; /*Object to first Key*/ for(size_t i = 0; i < objectCount; i++) { CHECK_TOKEN_BOUNDS; if(depth == 0) { /* we search only on first layer */ if(jsoneq((char*)ctx->pos, &parseCtx->tokenArray[parseCtx->index], searchKey) == 0) { /*found*/ parseCtx->index++; /*We give back a pointer to the value of the searched key!*/ if (parseCtx->index >= parseCtx->tokenCount) /* We got invalid json. See https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=14620 */ return UA_STATUSCODE_BADOUTOFRANGE; *resultIndex = parseCtx->index; return UA_STATUSCODE_GOOD; } } parseCtx->index++; /* value */ CHECK_TOKEN_BOUNDS; if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) { ret = searchObjectForKeyRec(searchKey, ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) { ret = searchObjectForKeyRec(searchKey, ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else { /* Only Primitive or string */ parseCtx->index++; } } } else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) { size_t arraySize = (size_t)parseCtx->tokenArray[parseCtx->index].size; parseCtx->index++; /*Object to first element*/ for(size_t i = 0; i < arraySize; i++) { CHECK_TOKEN_BOUNDS; if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) { ret = searchObjectForKeyRec(searchKey, ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) { ret = searchObjectForKeyRec(searchKey, ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else { /* Only Primitive or string */ parseCtx->index++; } } } return ret; } UA_FUNC_ATTR_WARN_UNUSED_RESULT status lookAheadForKey(const char* search, CtxJson *ctx, ParseCtx *parseCtx, size_t *resultIndex) { UA_UInt16 oldIndex = parseCtx->index; /* Save index for later restore */ UA_UInt16 depth = 0; UA_StatusCode ret = searchObjectForKeyRec(search, ctx, parseCtx, resultIndex, depth); parseCtx->index = oldIndex; /* Restore index */ return ret; } /* Function used to jump over an object which cannot be parsed */ static status jumpOverRec(CtxJson *ctx, ParseCtx *parseCtx, size_t *resultIndex, UA_UInt16 depth) { UA_StatusCode ret = UA_STATUSCODE_BADDECODINGERROR; CHECK_TOKEN_BOUNDS; if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) { size_t objectCount = (size_t)(parseCtx->tokenArray[parseCtx->index].size); parseCtx->index++; /*Object to first Key*/ CHECK_TOKEN_BOUNDS; size_t i; for(i = 0; i < objectCount; i++) { CHECK_TOKEN_BOUNDS; parseCtx->index++; /*value*/ CHECK_TOKEN_BOUNDS; if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) { jumpOverRec(ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) { jumpOverRec(ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else { /*Only Primitive or string*/ parseCtx->index++; } } } else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) { size_t arraySize = (size_t)(parseCtx->tokenArray[parseCtx->index].size); parseCtx->index++; /*Object to first element*/ CHECK_TOKEN_BOUNDS; size_t i; for(i = 0; i < arraySize; i++) { if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) { jumpOverRec(ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) { jumpOverRec(ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else { /*Only Primitive or string*/ parseCtx->index++; } } } return ret; } static status jumpOverObject(CtxJson *ctx, ParseCtx *parseCtx, size_t *resultIndex) { UA_UInt16 oldIndex = parseCtx->index; /* Save index for later restore */ UA_UInt16 depth = 0; jumpOverRec(ctx, parseCtx, resultIndex, depth); *resultIndex = parseCtx->index; parseCtx->index = oldIndex; /* Restore index */ return UA_STATUSCODE_GOOD; } static status prepareDecodeNodeIdJson(UA_NodeId *dst, CtxJson *ctx, ParseCtx *parseCtx, u8 *fieldCount, DecodeEntry *entries) { /* possible keys: Id, IdType*/ /* Id must always be present */ entries[*fieldCount].fieldName = UA_JSONKEY_ID; entries[*fieldCount].found = false; entries[*fieldCount].type = NULL; /* IdType */ UA_Boolean hasIdType = false; size_t searchResult = 0; status ret = lookAheadForKey(UA_JSONKEY_IDTYPE, ctx, parseCtx, &searchResult); if(ret == UA_STATUSCODE_GOOD) { /*found*/ hasIdType = true; } if(hasIdType) { size_t size = (size_t)(parseCtx->tokenArray[searchResult].end - parseCtx->tokenArray[searchResult].start); if(size < 1) { return UA_STATUSCODE_BADDECODINGERROR; } char *idType = (char*)(ctx->pos + parseCtx->tokenArray[searchResult].start); if(idType[0] == '2') { dst->identifierType = UA_NODEIDTYPE_GUID; entries[*fieldCount].fieldPointer = &dst->identifier.guid; entries[*fieldCount].function = (decodeJsonSignature) Guid_decodeJson; } else if(idType[0] == '1') { dst->identifierType = UA_NODEIDTYPE_STRING; entries[*fieldCount].fieldPointer = &dst->identifier.string; entries[*fieldCount].function = (decodeJsonSignature) String_decodeJson; } else if(idType[0] == '3') { dst->identifierType = UA_NODEIDTYPE_BYTESTRING; entries[*fieldCount].fieldPointer = &dst->identifier.byteString; entries[*fieldCount].function = (decodeJsonSignature) ByteString_decodeJson; } else { return UA_STATUSCODE_BADDECODINGERROR; } /* Id always present */ (*fieldCount)++; entries[*fieldCount].fieldName = UA_JSONKEY_IDTYPE; entries[*fieldCount].fieldPointer = NULL; entries[*fieldCount].function = NULL; entries[*fieldCount].found = false; entries[*fieldCount].type = NULL; /* IdType */ (*fieldCount)++; } else { dst->identifierType = UA_NODEIDTYPE_NUMERIC; entries[*fieldCount].fieldPointer = &dst->identifier.numeric; entries[*fieldCount].function = (decodeJsonSignature) UInt32_decodeJson; entries[*fieldCount].type = NULL; (*fieldCount)++; } return UA_STATUSCODE_GOOD; } DECODE_JSON(NodeId) { ALLOW_NULL; CHECK_OBJECT; /* NameSpace */ UA_Boolean hasNamespace = false; size_t searchResultNamespace = 0; status ret = lookAheadForKey(UA_JSONKEY_NAMESPACE, ctx, parseCtx, &searchResultNamespace); if(ret != UA_STATUSCODE_GOOD) { dst->namespaceIndex = 0; } else { hasNamespace = true; } /* Keep track over number of keys present, incremented if key found */ u8 fieldCount = 0; DecodeEntry entries[3]; ret = prepareDecodeNodeIdJson(dst, ctx, parseCtx, &fieldCount, entries); if(ret != UA_STATUSCODE_GOOD) return ret; if(hasNamespace) { entries[fieldCount].fieldName = UA_JSONKEY_NAMESPACE; entries[fieldCount].fieldPointer = &dst->namespaceIndex; entries[fieldCount].function = (decodeJsonSignature) UInt16_decodeJson; entries[fieldCount].found = false; entries[fieldCount].type = NULL; fieldCount++; } else { dst->namespaceIndex = 0; } ret = decodeFields(ctx, parseCtx, entries, fieldCount, type); return ret; } DECODE_JSON(ExpandedNodeId) { ALLOW_NULL; CHECK_OBJECT; /* Keep track over number of keys present, incremented if key found */ u8 fieldCount = 0; /* ServerUri */ UA_Boolean hasServerUri = false; size_t searchResultServerUri = 0; status ret = lookAheadForKey(UA_JSONKEY_SERVERURI, ctx, parseCtx, &searchResultServerUri); if(ret != UA_STATUSCODE_GOOD) { dst->serverIndex = 0; } else { hasServerUri = true; } /* NameSpace */ UA_Boolean hasNamespace = false; UA_Boolean isNamespaceString = false; size_t searchResultNamespace = 0; ret = lookAheadForKey(UA_JSONKEY_NAMESPACE, ctx, parseCtx, &searchResultNamespace); if(ret != UA_STATUSCODE_GOOD) { dst->namespaceUri = UA_STRING_NULL; } else { hasNamespace = true; jsmntok_t nsToken = parseCtx->tokenArray[searchResultNamespace]; if(nsToken.type == JSMN_STRING) isNamespaceString = true; } DecodeEntry entries[4]; ret = prepareDecodeNodeIdJson(&dst->nodeId, ctx, parseCtx, &fieldCount, entries); if(ret != UA_STATUSCODE_GOOD) return ret; if(hasNamespace) { entries[fieldCount].fieldName = UA_JSONKEY_NAMESPACE; if(isNamespaceString) { entries[fieldCount].fieldPointer = &dst->namespaceUri; entries[fieldCount].function = (decodeJsonSignature) String_decodeJson; } else { entries[fieldCount].fieldPointer = &dst->nodeId.namespaceIndex; entries[fieldCount].function = (decodeJsonSignature) UInt16_decodeJson; } entries[fieldCount].found = false; entries[fieldCount].type = NULL; fieldCount++; } if(hasServerUri) { entries[fieldCount].fieldName = UA_JSONKEY_SERVERURI; entries[fieldCount].fieldPointer = &dst->serverIndex; entries[fieldCount].function = (decodeJsonSignature) UInt32_decodeJson; entries[fieldCount].found = false; entries[fieldCount].type = NULL; fieldCount++; } else { dst->serverIndex = 0; } return decodeFields(ctx, parseCtx, entries, fieldCount, type); } DECODE_JSON(DateTime) { CHECK_STRING; CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); /* TODO: proper ISO 8601:2004 parsing, musl strptime!*/ /* DateTime ISO 8601:2004 without milli is 20 Characters, with millis 24 */ if(tokenSize != 20 && tokenSize != 24) { return UA_STATUSCODE_BADDECODINGERROR; } /* sanity check */ if(tokenData[4] != '-' || tokenData[7] != '-' || tokenData[10] != 'T' || tokenData[13] != ':' || tokenData[16] != ':' || !(tokenData[19] == 'Z' || tokenData[19] == '.')) { return UA_STATUSCODE_BADDECODINGERROR; } struct mytm dts; memset(&dts, 0, sizeof(dts)); UA_UInt64 year = 0; atoiUnsigned(&tokenData[0], 4, &year); dts.tm_year = (UA_UInt16)year - 1900; UA_UInt64 month = 0; atoiUnsigned(&tokenData[5], 2, &month); dts.tm_mon = (UA_UInt16)month - 1; UA_UInt64 day = 0; atoiUnsigned(&tokenData[8], 2, &day); dts.tm_mday = (UA_UInt16)day; UA_UInt64 hour = 0; atoiUnsigned(&tokenData[11], 2, &hour); dts.tm_hour = (UA_UInt16)hour; UA_UInt64 min = 0; atoiUnsigned(&tokenData[14], 2, &min); dts.tm_min = (UA_UInt16)min; UA_UInt64 sec = 0; atoiUnsigned(&tokenData[17], 2, &sec); dts.tm_sec = (UA_UInt16)sec; UA_UInt64 msec = 0; if(tokenSize == 24) { atoiUnsigned(&tokenData[20], 3, &msec); } long long sinceunix = __tm_to_secs(&dts); UA_DateTime dt = (UA_DateTime)((UA_UInt64)(sinceunix*UA_DATETIME_SEC + UA_DATETIME_UNIX_EPOCH) + (UA_UInt64)(UA_DATETIME_MSEC * msec)); *dst = dt; if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; } DECODE_JSON(StatusCode) { status ret = DECODE_DIRECT_JSON(dst, UInt32); if(ret != UA_STATUSCODE_GOOD) return ret; if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; } static status VariantDimension_decodeJson(void * dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { (void) type; const UA_DataType *dimType = &UA_TYPES[UA_TYPES_UINT32]; return Array_decodeJson_internal((void**)dst, dimType, ctx, parseCtx, moveToken); } DECODE_JSON(Variant) { ALLOW_NULL; CHECK_OBJECT; /* First search for the variant type in the json object. */ size_t searchResultType = 0; status ret = lookAheadForKey(UA_JSONKEY_TYPE, ctx, parseCtx, &searchResultType); if(ret != UA_STATUSCODE_GOOD) { skipObject(parseCtx); return UA_STATUSCODE_GOOD; } size_t size = ((size_t)parseCtx->tokenArray[searchResultType].end - (size_t)parseCtx->tokenArray[searchResultType].start); /* check if size is zero or the type is not a number */ if(size < 1 || parseCtx->tokenArray[searchResultType].type != JSMN_PRIMITIVE) return UA_STATUSCODE_BADDECODINGERROR; /* Parse the type */ UA_UInt64 idTypeDecoded = 0; char *idTypeEncoded = (char*)(ctx->pos + parseCtx->tokenArray[searchResultType].start); status typeDecodeStatus = atoiUnsigned(idTypeEncoded, size, &idTypeDecoded); if(typeDecodeStatus != UA_STATUSCODE_GOOD) return typeDecodeStatus; /* A NULL Variant */ if(idTypeDecoded == 0) { skipObject(parseCtx); return UA_STATUSCODE_GOOD; } /* Set the type */ UA_NodeId typeNodeId = UA_NODEID_NUMERIC(0, (UA_UInt32)idTypeDecoded); dst->type = UA_findDataType(&typeNodeId); if(!dst->type) return UA_STATUSCODE_BADDECODINGERROR; /* Search for body */ size_t searchResultBody = 0; ret = lookAheadForKey(UA_JSONKEY_BODY, ctx, parseCtx, &searchResultBody); if(ret != UA_STATUSCODE_GOOD) { /*TODO: no body? set value NULL?*/ return UA_STATUSCODE_BADDECODINGERROR; } /* value is an array? */ UA_Boolean isArray = false; if(parseCtx->tokenArray[searchResultBody].type == JSMN_ARRAY) { isArray = true; dst->arrayLength = (size_t)parseCtx->tokenArray[searchResultBody].size; } /* Has the variant dimension? */ UA_Boolean hasDimension = false; size_t searchResultDim = 0; ret = lookAheadForKey(UA_JSONKEY_DIMENSION, ctx, parseCtx, &searchResultDim); if(ret == UA_STATUSCODE_GOOD) { hasDimension = true; dst->arrayDimensionsSize = (size_t)parseCtx->tokenArray[searchResultDim].size; } /* no array but has dimension. error? */ if(!isArray && hasDimension) return UA_STATUSCODE_BADDECODINGERROR; /* Get the datatype of the content. The type must be a builtin data type. * All not-builtin types are wrapped in an ExtensionObject. */ if(dst->type->typeKind > UA_TYPES_DIAGNOSTICINFO) return UA_STATUSCODE_BADDECODINGERROR; /* A variant cannot contain a variant. But it can contain an array of * variants */ if(dst->type->typeKind == UA_DATATYPEKIND_VARIANT && !isArray) return UA_STATUSCODE_BADDECODINGERROR; /* Decode an array */ if(isArray) { DecodeEntry entries[3] = { {UA_JSONKEY_TYPE, NULL, NULL, false, NULL}, {UA_JSONKEY_BODY, &dst->data, (decodeJsonSignature) Array_decodeJson, false, NULL}, {UA_JSONKEY_DIMENSION, &dst->arrayDimensions, (decodeJsonSignature) VariantDimension_decodeJson, false, NULL}}; if(!hasDimension) { ret = decodeFields(ctx, parseCtx, entries, 2, dst->type); /*use first 2 fields*/ } else { ret = decodeFields(ctx, parseCtx, entries, 3, dst->type); /*use all fields*/ } return ret; } /* Decode a value wrapped in an ExtensionObject */ if(dst->type->typeKind == UA_DATATYPEKIND_EXTENSIONOBJECT) { DecodeEntry entries[2] = {{UA_JSONKEY_TYPE, NULL, NULL, false, NULL}, {UA_JSONKEY_BODY, dst, (decodeJsonSignature)Variant_decodeJsonUnwrapExtensionObject, false, NULL}}; return decodeFields(ctx, parseCtx, entries, 2, dst->type); } /* Allocate Memory for Body */ dst->data = UA_new(dst->type); if(!dst->data) return UA_STATUSCODE_BADOUTOFMEMORY; DecodeEntry entries[2] = {{UA_JSONKEY_TYPE, NULL, NULL, false, NULL}, {UA_JSONKEY_BODY, dst->data, (decodeJsonSignature) decodeJsonInternal, false, NULL}}; return decodeFields(ctx, parseCtx, entries, 2, dst->type); } DECODE_JSON(DataValue) { ALLOW_NULL; CHECK_OBJECT; DecodeEntry entries[6] = { {UA_JSONKEY_VALUE, &dst->value, (decodeJsonSignature) Variant_decodeJson, false, NULL}, {UA_JSONKEY_STATUS, &dst->status, (decodeJsonSignature) StatusCode_decodeJson, false, NULL}, {UA_JSONKEY_SOURCETIMESTAMP, &dst->sourceTimestamp, (decodeJsonSignature) DateTime_decodeJson, false, NULL}, {UA_JSONKEY_SOURCEPICOSECONDS, &dst->sourcePicoseconds, (decodeJsonSignature) UInt16_decodeJson, false, NULL}, {UA_JSONKEY_SERVERTIMESTAMP, &dst->serverTimestamp, (decodeJsonSignature) DateTime_decodeJson, false, NULL}, {UA_JSONKEY_SERVERPICOSECONDS, &dst->serverPicoseconds, (decodeJsonSignature) UInt16_decodeJson, false, NULL}}; status ret = decodeFields(ctx, parseCtx, entries, 6, type); dst->hasValue = entries[0].found; dst->hasStatus = entries[1].found; dst->hasSourceTimestamp = entries[2].found; dst->hasSourcePicoseconds = entries[3].found; dst->hasServerTimestamp = entries[4].found; dst->hasServerPicoseconds = entries[5].found; return ret; } DECODE_JSON(ExtensionObject) { ALLOW_NULL; CHECK_OBJECT; /* Search for Encoding */ size_t searchEncodingResult = 0; status ret = lookAheadForKey(UA_JSONKEY_ENCODING, ctx, parseCtx, &searchEncodingResult); /* If no encoding found it is structure encoding */ if(ret != UA_STATUSCODE_GOOD) { UA_NodeId typeId; UA_NodeId_init(&typeId); size_t searchTypeIdResult = 0; ret = lookAheadForKey(UA_JSONKEY_TYPEID, ctx, parseCtx, &searchTypeIdResult); if(ret != UA_STATUSCODE_GOOD) { /* TYPEID not found, abort */ return UA_STATUSCODE_BADENCODINGERROR; } /* parse the nodeid */ /*for restore*/ UA_UInt16 index = parseCtx->index; parseCtx->index = (UA_UInt16)searchTypeIdResult; ret = NodeId_decodeJson(&typeId, &UA_TYPES[UA_TYPES_NODEID], ctx, parseCtx, true); if(ret != UA_STATUSCODE_GOOD) return ret; /*restore*/ parseCtx->index = index; const UA_DataType *typeOfBody = UA_findDataType(&typeId); if(!typeOfBody) { /*dont decode body: 1. save as bytestring, 2. jump over*/ dst->encoding = UA_EXTENSIONOBJECT_ENCODED_BYTESTRING; UA_NodeId_copy(&typeId, &dst->content.encoded.typeId); /*Check if Object in Extentionobject*/ if(getJsmnType(parseCtx) != JSMN_OBJECT) { UA_NodeId_deleteMembers(&typeId); return UA_STATUSCODE_BADDECODINGERROR; } /*Search for Body to save*/ size_t searchBodyResult = 0; ret = lookAheadForKey(UA_JSONKEY_BODY, ctx, parseCtx, &searchBodyResult); if(ret != UA_STATUSCODE_GOOD) { /*No Body*/ UA_NodeId_deleteMembers(&typeId); return UA_STATUSCODE_BADDECODINGERROR; } if(searchBodyResult >= (size_t)parseCtx->tokenCount) { /*index not in Tokenarray*/ UA_NodeId_deleteMembers(&typeId); return UA_STATUSCODE_BADDECODINGERROR; } /* Get the size of the Object as a string, not the Object key count! */ UA_Int64 sizeOfJsonString =(parseCtx->tokenArray[searchBodyResult].end - parseCtx->tokenArray[searchBodyResult].start); char* bodyJsonString = (char*)(ctx->pos + parseCtx->tokenArray[searchBodyResult].start); if(sizeOfJsonString <= 0) { UA_NodeId_deleteMembers(&typeId); return UA_STATUSCODE_BADDECODINGERROR; } /* Save encoded as bytestring. */ ret = UA_ByteString_allocBuffer(&dst->content.encoded.body, (size_t)sizeOfJsonString); if(ret != UA_STATUSCODE_GOOD) { UA_NodeId_deleteMembers(&typeId); return ret; } memcpy(dst->content.encoded.body.data, bodyJsonString, (size_t)sizeOfJsonString); size_t tokenAfteExtensionObject = 0; jumpOverObject(ctx, parseCtx, &tokenAfteExtensionObject); if(tokenAfteExtensionObject == 0) { /*next object token not found*/ UA_NodeId_deleteMembers(&typeId); UA_ByteString_deleteMembers(&dst->content.encoded.body); return UA_STATUSCODE_BADDECODINGERROR; } parseCtx->index = (UA_UInt16)tokenAfteExtensionObject; return UA_STATUSCODE_GOOD; } /*Type id not used anymore, typeOfBody has type*/ UA_NodeId_deleteMembers(&typeId); /*Set Found Type*/ dst->content.decoded.type = typeOfBody; dst->encoding = UA_EXTENSIONOBJECT_DECODED; if(searchTypeIdResult != 0) { dst->content.decoded.data = UA_new(typeOfBody); if(!dst->content.decoded.data) return UA_STATUSCODE_BADOUTOFMEMORY; UA_NodeId typeId_dummy; DecodeEntry entries[2] = { {UA_JSONKEY_TYPEID, &typeId_dummy, (decodeJsonSignature) NodeId_decodeJson, false, NULL}, {UA_JSONKEY_BODY, dst->content.decoded.data, (decodeJsonSignature) decodeJsonJumpTable[typeOfBody->typeKind], false, NULL} }; return decodeFields(ctx, parseCtx, entries, 2, typeOfBody); } else { return UA_STATUSCODE_BADDECODINGERROR; } } else { /* UA_JSONKEY_ENCODING found */ /*Parse the encoding*/ UA_UInt64 encoding = 0; char *extObjEncoding = (char*)(ctx->pos + parseCtx->tokenArray[searchEncodingResult].start); size_t size = (size_t)(parseCtx->tokenArray[searchEncodingResult].end - parseCtx->tokenArray[searchEncodingResult].start); atoiUnsigned(extObjEncoding, size, &encoding); if(encoding == 1) { /* BYTESTRING in Json Body */ dst->encoding = UA_EXTENSIONOBJECT_ENCODED_BYTESTRING; UA_UInt16 encodingTypeJson; DecodeEntry entries[3] = { {UA_JSONKEY_ENCODING, &encodingTypeJson, (decodeJsonSignature) UInt16_decodeJson, false, NULL}, {UA_JSONKEY_BODY, &dst->content.encoded.body, (decodeJsonSignature) String_decodeJson, false, NULL}, {UA_JSONKEY_TYPEID, &dst->content.encoded.typeId, (decodeJsonSignature) NodeId_decodeJson, false, NULL} }; return decodeFields(ctx, parseCtx, entries, 3, type); } else if(encoding == 2) { /* XmlElement in Json Body */ dst->encoding = UA_EXTENSIONOBJECT_ENCODED_XML; UA_UInt16 encodingTypeJson; DecodeEntry entries[3] = { {UA_JSONKEY_ENCODING, &encodingTypeJson, (decodeJsonSignature) UInt16_decodeJson, false, NULL}, {UA_JSONKEY_BODY, &dst->content.encoded.body, (decodeJsonSignature) String_decodeJson, false, NULL}, {UA_JSONKEY_TYPEID, &dst->content.encoded.typeId, (decodeJsonSignature) NodeId_decodeJson, false, NULL} }; return decodeFields(ctx, parseCtx, entries, 3, type); } else { return UA_STATUSCODE_BADDECODINGERROR; } } return UA_STATUSCODE_BADNOTIMPLEMENTED; } static status Variant_decodeJsonUnwrapExtensionObject(UA_Variant *dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { (void) type, (void) moveToken; /*EXTENSIONOBJECT POSITION!*/ UA_UInt16 old_index = parseCtx->index; UA_Boolean typeIdFound; /* Decode the DataType */ UA_NodeId typeId; UA_NodeId_init(&typeId); size_t searchTypeIdResult = 0; status ret = lookAheadForKey(UA_JSONKEY_TYPEID, ctx, parseCtx, &searchTypeIdResult); if(ret != UA_STATUSCODE_GOOD) { /*No Typeid found*/ typeIdFound = false; /*return UA_STATUSCODE_BADDECODINGERROR;*/ } else { typeIdFound = true; /* parse the nodeid */ parseCtx->index = (UA_UInt16)searchTypeIdResult; ret = NodeId_decodeJson(&typeId, &UA_TYPES[UA_TYPES_NODEID], ctx, parseCtx, true); if(ret != UA_STATUSCODE_GOOD) { UA_NodeId_deleteMembers(&typeId); return ret; } /*restore index, ExtensionObject position*/ parseCtx->index = old_index; } /* ---Decode the EncodingByte--- */ if(!typeIdFound) return UA_STATUSCODE_BADDECODINGERROR; UA_Boolean encodingFound = false; /*Search for Encoding*/ size_t searchEncodingResult = 0; ret = lookAheadForKey(UA_JSONKEY_ENCODING, ctx, parseCtx, &searchEncodingResult); UA_UInt64 encoding = 0; /*If no encoding found it is Structure encoding*/ if(ret == UA_STATUSCODE_GOOD) { /*FOUND*/ encodingFound = true; char *extObjEncoding = (char*)(ctx->pos + parseCtx->tokenArray[searchEncodingResult].start); size_t size = (size_t)(parseCtx->tokenArray[searchEncodingResult].end - parseCtx->tokenArray[searchEncodingResult].start); atoiUnsigned(extObjEncoding, size, &encoding); } const UA_DataType *typeOfBody = UA_findDataType(&typeId); if(encoding == 0 || typeOfBody != NULL) { /*This value is 0 if the body is Structure encoded as a JSON object (see 5.4.6).*/ /* Found a valid type and it is structure encoded so it can be unwrapped */ if (typeOfBody == NULL) return UA_STATUSCODE_BADDECODINGERROR; dst->type = typeOfBody; /* Allocate memory for type*/ dst->data = UA_new(dst->type); if(!dst->data) { UA_NodeId_deleteMembers(&typeId); return UA_STATUSCODE_BADOUTOFMEMORY; } /* Decode the content */ UA_NodeId nodeIddummy; DecodeEntry entries[3] = { {UA_JSONKEY_TYPEID, &nodeIddummy, (decodeJsonSignature) NodeId_decodeJson, false, NULL}, {UA_JSONKEY_BODY, dst->data, (decodeJsonSignature) decodeJsonJumpTable[dst->type->typeKind], false, NULL}, {UA_JSONKEY_ENCODING, NULL, NULL, false, NULL}}; ret = decodeFields(ctx, parseCtx, entries, encodingFound ? 3:2, typeOfBody); if(ret != UA_STATUSCODE_GOOD) { UA_free(dst->data); dst->data = NULL; } } else if(encoding == 1 || encoding == 2 || typeOfBody == NULL) { UA_NodeId_deleteMembers(&typeId); /* decode as ExtensionObject */ dst->type = &UA_TYPES[UA_TYPES_EXTENSIONOBJECT]; /* Allocate memory for extensionobject*/ dst->data = UA_new(dst->type); if(!dst->data) return UA_STATUSCODE_BADOUTOFMEMORY; /* decode: Does not move tokenindex. */ ret = DECODE_DIRECT_JSON(dst->data, ExtensionObject); if(ret != UA_STATUSCODE_GOOD) { UA_free(dst->data); dst->data = NULL; } } else { /*no recognized encoding type*/ return UA_STATUSCODE_BADDECODINGERROR; } return ret; } status DiagnosticInfoInner_decodeJson(void* dst, const UA_DataType* type, CtxJson* ctx, ParseCtx* parseCtx, UA_Boolean moveToken); DECODE_JSON(DiagnosticInfo) { ALLOW_NULL; CHECK_OBJECT; DecodeEntry entries[7] = { {UA_JSONKEY_SYMBOLICID, &dst->symbolicId, (decodeJsonSignature) Int32_decodeJson, false, NULL}, {UA_JSONKEY_NAMESPACEURI, &dst->namespaceUri, (decodeJsonSignature) Int32_decodeJson, false, NULL}, {UA_JSONKEY_LOCALIZEDTEXT, &dst->localizedText, (decodeJsonSignature) Int32_decodeJson, false, NULL}, {UA_JSONKEY_LOCALE, &dst->locale, (decodeJsonSignature) Int32_decodeJson, false, NULL}, {UA_JSONKEY_ADDITIONALINFO, &dst->additionalInfo, (decodeJsonSignature) String_decodeJson, false, NULL}, {UA_JSONKEY_INNERSTATUSCODE, &dst->innerStatusCode, (decodeJsonSignature) StatusCode_decodeJson, false, NULL}, {UA_JSONKEY_INNERDIAGNOSTICINFO, &dst->innerDiagnosticInfo, (decodeJsonSignature) DiagnosticInfoInner_decodeJson, false, NULL}}; status ret = decodeFields(ctx, parseCtx, entries, 7, type); dst->hasSymbolicId = entries[0].found; dst->hasNamespaceUri = entries[1].found; dst->hasLocalizedText = entries[2].found; dst->hasLocale = entries[3].found; dst->hasAdditionalInfo = entries[4].found; dst->hasInnerStatusCode = entries[5].found; dst->hasInnerDiagnosticInfo = entries[6].found; return ret; } status DiagnosticInfoInner_decodeJson(void* dst, const UA_DataType* type, CtxJson* ctx, ParseCtx* parseCtx, UA_Boolean moveToken) { UA_DiagnosticInfo *inner = (UA_DiagnosticInfo*)UA_calloc(1, sizeof(UA_DiagnosticInfo)); if(inner == NULL) { return UA_STATUSCODE_BADOUTOFMEMORY; } memcpy(dst, &inner, sizeof(UA_DiagnosticInfo*)); /* Copy new Pointer do dest */ return DiagnosticInfo_decodeJson(inner, type, ctx, parseCtx, moveToken); } status decodeFields(CtxJson *ctx, ParseCtx *parseCtx, DecodeEntry *entries, size_t entryCount, const UA_DataType *type) { CHECK_TOKEN_BOUNDS; size_t objectCount = (size_t)(parseCtx->tokenArray[parseCtx->index].size); status ret = UA_STATUSCODE_GOOD; if(entryCount == 1) { if(*(entries[0].fieldName) == 0) { /*No MemberName*/ return entries[0].function(entries[0].fieldPointer, type, ctx, parseCtx, true); /*ENCODE DIRECT*/ } } else if(entryCount == 0) { return UA_STATUSCODE_BADDECODINGERROR; } parseCtx->index++; /*go to first key*/ CHECK_TOKEN_BOUNDS; for (size_t currentObjectCount = 0; currentObjectCount < objectCount && parseCtx->index < parseCtx->tokenCount; currentObjectCount++) { /* start searching at the index of currentObjectCount */ for (size_t i = currentObjectCount; i < entryCount + currentObjectCount; i++) { /* Search for KEY, if found outer loop will be one less. Best case * is objectCount if in order! */ size_t index = i % entryCount; CHECK_TOKEN_BOUNDS; if(jsoneq((char*) ctx->pos, &parseCtx->tokenArray[parseCtx->index], entries[index].fieldName) != 0) continue; if(entries[index].found) { /*Duplicate Key found, abort.*/ return UA_STATUSCODE_BADDECODINGERROR; } entries[index].found = true; parseCtx->index++; /*goto value*/ CHECK_TOKEN_BOUNDS; /* Find the data type. * TODO: get rid of parameter type. Only forward via DecodeEntry. */ const UA_DataType *membertype = type; if(entries[index].type) membertype = entries[index].type; if(entries[index].function != NULL) { ret = entries[index].function(entries[index].fieldPointer, membertype, ctx, parseCtx, true); /*Move Token True*/ if(ret != UA_STATUSCODE_GOOD) return ret; } else { /*overstep single value, this will not work if object or array Only used not to double parse pre looked up type, but it has to be overstepped*/ parseCtx->index++; } break; } } return ret; } static status Array_decodeJson_internal(void **dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { (void) moveToken; status ret; if(parseCtx->tokenArray[parseCtx->index].type != JSMN_ARRAY) return UA_STATUSCODE_BADDECODINGERROR; size_t length = (size_t)parseCtx->tokenArray[parseCtx->index].size; /* Save the length of the array */ size_t *p = (size_t*) dst - 1; *p = length; /* Return early for empty arrays */ if(length == 0) { *dst = UA_EMPTY_ARRAY_SENTINEL; return UA_STATUSCODE_GOOD; } /* Allocate memory */ *dst = UA_calloc(length, type->memSize); if(*dst == NULL) return UA_STATUSCODE_BADOUTOFMEMORY; parseCtx->index++; /* We go to first Array member!*/ /* Decode array members */ uintptr_t ptr = (uintptr_t)*dst; for(size_t i = 0; i < length; ++i) { ret = decodeJsonJumpTable[type->typeKind]((void*)ptr, type, ctx, parseCtx, true); if(ret != UA_STATUSCODE_GOOD) { UA_Array_delete(*dst, i+1, type); *dst = NULL; return ret; } ptr += type->memSize; } return UA_STATUSCODE_GOOD; } /*Wrapper for array with valid decodingStructure.*/ static status Array_decodeJson(void * dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { return Array_decodeJson_internal((void **)dst, type, ctx, parseCtx, moveToken); } static status decodeJsonStructure(void *dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { (void) moveToken; /* Check the recursion limit */ if(ctx->depth > UA_JSON_ENCODING_MAX_RECURSION) return UA_STATUSCODE_BADENCODINGERROR; ctx->depth++; uintptr_t ptr = (uintptr_t)dst; status ret = UA_STATUSCODE_GOOD; u8 membersSize = type->membersSize; const UA_DataType *typelists[2] = { UA_TYPES, &type[-type->typeIndex] }; UA_STACKARRAY(DecodeEntry, entries, membersSize); for(size_t i = 0; i < membersSize && ret == UA_STATUSCODE_GOOD; ++i) { const UA_DataTypeMember *m = &type->members[i]; const UA_DataType *mt = &typelists[!m->namespaceZero][m->memberTypeIndex]; entries[i].type = mt; if(!m->isArray) { ptr += m->padding; entries[i].fieldName = m->memberName; entries[i].fieldPointer = (void*)ptr; entries[i].function = decodeJsonJumpTable[mt->typeKind]; entries[i].found = false; ptr += mt->memSize; } else { ptr += m->padding; ptr += sizeof(size_t); entries[i].fieldName = m->memberName; entries[i].fieldPointer = (void*)ptr; entries[i].function = (decodeJsonSignature)Array_decodeJson; entries[i].found = false; ptr += sizeof(void*); } } ret = decodeFields(ctx, parseCtx, entries, membersSize, type); ctx->depth--; return ret; } static status decodeJsonNotImplemented(void *dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { (void)dst, (void)type, (void)ctx, (void)parseCtx, (void)moveToken; return UA_STATUSCODE_BADNOTIMPLEMENTED; } const decodeJsonSignature decodeJsonJumpTable[UA_DATATYPEKINDS] = { (decodeJsonSignature)Boolean_decodeJson, (decodeJsonSignature)SByte_decodeJson, /* SByte */ (decodeJsonSignature)Byte_decodeJson, (decodeJsonSignature)Int16_decodeJson, /* Int16 */ (decodeJsonSignature)UInt16_decodeJson, (decodeJsonSignature)Int32_decodeJson, /* Int32 */ (decodeJsonSignature)UInt32_decodeJson, (decodeJsonSignature)Int64_decodeJson, /* Int64 */ (decodeJsonSignature)UInt64_decodeJson, (decodeJsonSignature)Float_decodeJson, (decodeJsonSignature)Double_decodeJson, (decodeJsonSignature)String_decodeJson, (decodeJsonSignature)DateTime_decodeJson, /* DateTime */ (decodeJsonSignature)Guid_decodeJson, (decodeJsonSignature)ByteString_decodeJson, /* ByteString */ (decodeJsonSignature)String_decodeJson, /* XmlElement */ (decodeJsonSignature)NodeId_decodeJson, (decodeJsonSignature)ExpandedNodeId_decodeJson, (decodeJsonSignature)StatusCode_decodeJson, /* StatusCode */ (decodeJsonSignature)QualifiedName_decodeJson, /* QualifiedName */ (decodeJsonSignature)LocalizedText_decodeJson, (decodeJsonSignature)ExtensionObject_decodeJson, (decodeJsonSignature)DataValue_decodeJson, (decodeJsonSignature)Variant_decodeJson, (decodeJsonSignature)DiagnosticInfo_decodeJson, (decodeJsonSignature)decodeJsonNotImplemented, /* Decimal */ (decodeJsonSignature)Int32_decodeJson, /* Enum */ (decodeJsonSignature)decodeJsonStructure, (decodeJsonSignature)decodeJsonNotImplemented, /* Structure with optional fields */ (decodeJsonSignature)decodeJsonNotImplemented, /* Union */ (decodeJsonSignature)decodeJsonNotImplemented /* BitfieldCluster */ }; decodeJsonSignature getDecodeSignature(u8 index) { return decodeJsonJumpTable[index]; } status tokenize(ParseCtx *parseCtx, CtxJson *ctx, const UA_ByteString *src) { /* Set up the context */ ctx->pos = &src->data[0]; ctx->end = &src->data[src->length]; ctx->depth = 0; parseCtx->tokenCount = 0; parseCtx->index = 0; /*Set up tokenizer jsmn*/ jsmn_parser p; jsmn_init(&p); parseCtx->tokenCount = (UA_Int32) jsmn_parse(&p, (char*)src->data, src->length, parseCtx->tokenArray, UA_JSON_MAXTOKENCOUNT); if(parseCtx->tokenCount < 0) { if(parseCtx->tokenCount == JSMN_ERROR_NOMEM) return UA_STATUSCODE_BADOUTOFMEMORY; return UA_STATUSCODE_BADDECODINGERROR; } return UA_STATUSCODE_GOOD; } UA_StatusCode decodeJsonInternal(void *dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { return decodeJsonJumpTable[type->typeKind](dst, type, ctx, parseCtx, moveToken); } status UA_FUNC_ATTR_WARN_UNUSED_RESULT UA_decodeJson(const UA_ByteString *src, void *dst, const UA_DataType *type) { #ifndef UA_ENABLE_TYPEDESCRIPTION return UA_STATUSCODE_BADNOTSUPPORTED; #endif if(dst == NULL || src == NULL || type == NULL) { return UA_STATUSCODE_BADARGUMENTSMISSING; } /* Set up the context */ CtxJson ctx; ParseCtx parseCtx; parseCtx.tokenArray = (jsmntok_t*)UA_malloc(sizeof(jsmntok_t) * UA_JSON_MAXTOKENCOUNT); if(!parseCtx.tokenArray) return UA_STATUSCODE_BADOUTOFMEMORY; status ret = tokenize(&parseCtx, &ctx, src); if(ret != UA_STATUSCODE_GOOD) goto cleanup; /* Assume the top-level element is an object */ if(parseCtx.tokenCount < 1 || parseCtx.tokenArray[0].type != JSMN_OBJECT) { if(parseCtx.tokenCount == 1) { if(parseCtx.tokenArray[0].type == JSMN_PRIMITIVE || parseCtx.tokenArray[0].type == JSMN_STRING) { /* Only a primitive to parse. Do it directly. */ memset(dst, 0, type->memSize); /* Initialize the value */ ret = decodeJsonJumpTable[type->typeKind](dst, type, &ctx, &parseCtx, true); goto cleanup; } } ret = UA_STATUSCODE_BADDECODINGERROR; goto cleanup; } /* Decode */ memset(dst, 0, type->memSize); /* Initialize the value */ ret = decodeJsonJumpTable[type->typeKind](dst, type, &ctx, &parseCtx, true); cleanup: UA_free(parseCtx.tokenArray); /* sanity check if all Tokens were processed */ if(!(parseCtx.index == parseCtx.tokenCount || parseCtx.index == parseCtx.tokenCount-1)) { ret = UA_STATUSCODE_BADDECODINGERROR; } if(ret != UA_STATUSCODE_GOOD) UA_deleteMembers(dst, type); /* Clean up */ return ret; }
null
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * Copyright 2014-2018 (c) Fraunhofer IOSB (Author: Julius Pfrommer) * Copyright 2018 (c) Fraunhofer IOSB (Author: Lukas Meling) */ #include "ua_types_encoding_json.h" #include <open62541/types_generated.h> #include <open62541/types_generated_handling.h> #include "ua_types_encoding_binary.h" #include <float.h> #include <math.h> #ifdef UA_ENABLE_CUSTOM_LIBC #include "../deps/musl/floatscan.h" #include "../deps/musl/vfprintf.h" #endif #include "../deps/itoa.h" #include "../deps/atoi.h" #include "../deps/string_escape.h" #include "../deps/base64.h" #include "../deps/libc_time.h" #if defined(_MSC_VER) # define strtoll _strtoi64 # define strtoull _strtoui64 #endif /* vs2008 does not have INFINITY and NAN defined */ #ifndef INFINITY # define INFINITY ((UA_Double)(DBL_MAX+DBL_MAX)) #endif #ifndef NAN # define NAN ((UA_Double)(INFINITY-INFINITY)) #endif #if defined(_MSC_VER) # pragma warning(disable: 4756) # pragma warning(disable: 4056) #endif #define UA_NODEIDTYPE_NUMERIC_TWOBYTE 0 #define UA_NODEIDTYPE_NUMERIC_FOURBYTE 1 #define UA_NODEIDTYPE_NUMERIC_COMPLETE 2 #define UA_EXPANDEDNODEID_SERVERINDEX_FLAG 0x40 #define UA_EXPANDEDNODEID_NAMESPACEURI_FLAG 0x80 #define UA_JSON_DATETIME_LENGTH 30 /* Max length of numbers for the allocation of temp buffers. Don't forget that * printf adds an additional \0 at the end! * * Sources: * https://www.exploringbinary.com/maximum-number-of-decimal-digits-in-binary-floating-point-numbers/ * * UInt16: 3 + 1 * SByte: 3 + 1 * UInt32: * Int32: * UInt64: * Int64: * Float: 149 + 1 * Double: 767 + 1 */ /************/ /* Encoding */ /************/ #define ENCODE_JSON(TYPE) static status \ TYPE##_encodeJson(const UA_##TYPE *src, const UA_DataType *type, CtxJson *ctx) #define ENCODE_DIRECT_JSON(SRC, TYPE) \ TYPE##_encodeJson((const UA_##TYPE*)SRC, NULL, ctx) extern const encodeJsonSignature encodeJsonJumpTable[UA_DATATYPEKINDS]; extern const decodeJsonSignature decodeJsonJumpTable[UA_DATATYPEKINDS]; /* Forward declarations */ UA_String UA_DateTime_toJSON(UA_DateTime t); ENCODE_JSON(ByteString); static status UA_FUNC_ATTR_WARN_UNUSED_RESULT writeChar(CtxJson *ctx, char c) { if(ctx->pos >= ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) *ctx->pos = (UA_Byte)c; ctx->pos++; return UA_STATUSCODE_GOOD; } #define WRITE_JSON_ELEMENT(ELEM) \ UA_FUNC_ATTR_WARN_UNUSED_RESULT status \ writeJson##ELEM(CtxJson *ctx) static WRITE_JSON_ELEMENT(Quote) { return writeChar(ctx, '\"'); } WRITE_JSON_ELEMENT(ObjStart) { /* increase depth, save: before first key-value no comma needed. */ if(ctx->depth >= UA_JSON_ENCODING_MAX_RECURSION) return UA_STATUSCODE_BADENCODINGERROR; ctx->depth++; ctx->commaNeeded[ctx->depth] = false; return writeChar(ctx, '{'); } WRITE_JSON_ELEMENT(ObjEnd) { ctx->depth--; //decrease depth ctx->commaNeeded[ctx->depth] = true; return writeChar(ctx, '}'); } WRITE_JSON_ELEMENT(ArrStart) { /* increase depth, save: before first array entry no comma needed. */ if(ctx->depth >= UA_JSON_ENCODING_MAX_RECURSION) return UA_STATUSCODE_BADENCODINGERROR; ctx->depth++; ctx->commaNeeded[ctx->depth] = false; return writeChar(ctx, '['); } WRITE_JSON_ELEMENT(ArrEnd) { ctx->depth--; //decrease depth ctx->commaNeeded[ctx->depth] = true; return writeChar(ctx, ']'); } WRITE_JSON_ELEMENT(CommaIfNeeded) { if(ctx->commaNeeded[ctx->depth]) return writeChar(ctx, ','); return UA_STATUSCODE_GOOD; } status writeJsonArrElm(CtxJson *ctx, const void *value, const UA_DataType *type) { status ret = writeJsonCommaIfNeeded(ctx); ctx->commaNeeded[ctx->depth] = true; ret |= encodeJsonInternal(value, type, ctx); return ret; } status writeJsonObjElm(CtxJson *ctx, const char *key, const void *value, const UA_DataType *type){ return writeJsonKey(ctx, key) | encodeJsonInternal(value, type, ctx); } status writeJsonNull(CtxJson *ctx) { if(ctx->pos + 4 > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(ctx->calcOnly) { ctx->pos += 4; } else { *(ctx->pos++) = 'n'; *(ctx->pos++) = 'u'; *(ctx->pos++) = 'l'; *(ctx->pos++) = 'l'; } return UA_STATUSCODE_GOOD; } /* Keys for JSON */ /* LocalizedText */ static const char* UA_JSONKEY_LOCALE = "Locale"; static const char* UA_JSONKEY_TEXT = "Text"; /* QualifiedName */ static const char* UA_JSONKEY_NAME = "Name"; static const char* UA_JSONKEY_URI = "Uri"; /* NodeId */ static const char* UA_JSONKEY_ID = "Id"; static const char* UA_JSONKEY_IDTYPE = "IdType"; static const char* UA_JSONKEY_NAMESPACE = "Namespace"; /* ExpandedNodeId */ static const char* UA_JSONKEY_SERVERURI = "ServerUri"; /* Variant */ static const char* UA_JSONKEY_TYPE = "Type"; static const char* UA_JSONKEY_BODY = "Body"; static const char* UA_JSONKEY_DIMENSION = "Dimension"; /* DataValue */ static const char* UA_JSONKEY_VALUE = "Value"; static const char* UA_JSONKEY_STATUS = "Status"; static const char* UA_JSONKEY_SOURCETIMESTAMP = "SourceTimestamp"; static const char* UA_JSONKEY_SOURCEPICOSECONDS = "SourcePicoseconds"; static const char* UA_JSONKEY_SERVERTIMESTAMP = "ServerTimestamp"; static const char* UA_JSONKEY_SERVERPICOSECONDS = "ServerPicoseconds"; /* ExtensionObject */ static const char* UA_JSONKEY_ENCODING = "Encoding"; static const char* UA_JSONKEY_TYPEID = "TypeId"; /* StatusCode */ static const char* UA_JSONKEY_CODE = "Code"; static const char* UA_JSONKEY_SYMBOL = "Symbol"; /* DiagnosticInfo */ static const char* UA_JSONKEY_SYMBOLICID = "SymbolicId"; static const char* UA_JSONKEY_NAMESPACEURI = "NamespaceUri"; static const char* UA_JSONKEY_LOCALIZEDTEXT = "LocalizedText"; static const char* UA_JSONKEY_ADDITIONALINFO = "AdditionalInfo"; static const char* UA_JSONKEY_INNERSTATUSCODE = "InnerStatusCode"; static const char* UA_JSONKEY_INNERDIAGNOSTICINFO = "InnerDiagnosticInfo"; /* Writes null terminated string to output buffer (current ctx->pos). Writes * comma in front of key if needed. Encapsulates key in quotes. */ status UA_FUNC_ATTR_WARN_UNUSED_RESULT writeJsonKey(CtxJson *ctx, const char* key) { size_t size = strlen(key); if(ctx->pos + size + 4 > ctx->end) /* +4 because of " " : and , */ return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; status ret = writeJsonCommaIfNeeded(ctx); ctx->commaNeeded[ctx->depth] = true; if(ctx->calcOnly) { ctx->commaNeeded[ctx->depth] = true; ctx->pos += 3; ctx->pos += size; return ret; } ret |= writeChar(ctx, '\"'); for(size_t i = 0; i < size; i++) { *(ctx->pos++) = (u8)key[i]; } ret |= writeChar(ctx, '\"'); ret |= writeChar(ctx, ':'); return ret; } /* Boolean */ ENCODE_JSON(Boolean) { size_t sizeOfJSONBool; if(*src == true) { sizeOfJSONBool = 4; /*"true"*/ } else { sizeOfJSONBool = 5; /*"false"*/ } if(ctx->calcOnly) { ctx->pos += sizeOfJSONBool; return UA_STATUSCODE_GOOD; } if(ctx->pos + sizeOfJSONBool > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(*src) { *(ctx->pos++) = 't'; *(ctx->pos++) = 'r'; *(ctx->pos++) = 'u'; *(ctx->pos++) = 'e'; } else { *(ctx->pos++) = 'f'; *(ctx->pos++) = 'a'; *(ctx->pos++) = 'l'; *(ctx->pos++) = 's'; *(ctx->pos++) = 'e'; } return UA_STATUSCODE_GOOD; } /*****************/ /* Integer Types */ /*****************/ /* Byte */ ENCODE_JSON(Byte) { char buf[4]; UA_UInt16 digits = itoaUnsigned(*src, buf, 10); /* Ensure destination can hold the data- */ if(ctx->pos + digits > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; /* Copy digits to the output string/buffer. */ if(!ctx->calcOnly) memcpy(ctx->pos, buf, digits); ctx->pos += digits; return UA_STATUSCODE_GOOD; } /* signed Byte */ ENCODE_JSON(SByte) { char buf[5]; UA_UInt16 digits = itoaSigned(*src, buf); if(ctx->pos + digits > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, digits); ctx->pos += digits; return UA_STATUSCODE_GOOD; } /* UInt16 */ ENCODE_JSON(UInt16) { char buf[6]; UA_UInt16 digits = itoaUnsigned(*src, buf, 10); if(ctx->pos + digits > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, digits); ctx->pos += digits; return UA_STATUSCODE_GOOD; } /* Int16 */ ENCODE_JSON(Int16) { char buf[7]; UA_UInt16 digits = itoaSigned(*src, buf); if(ctx->pos + digits > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, digits); ctx->pos += digits; return UA_STATUSCODE_GOOD; } /* UInt32 */ ENCODE_JSON(UInt32) { char buf[11]; UA_UInt16 digits = itoaUnsigned(*src, buf, 10); if(ctx->pos + digits > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, digits); ctx->pos += digits; return UA_STATUSCODE_GOOD; } /* Int32 */ ENCODE_JSON(Int32) { char buf[12]; UA_UInt16 digits = itoaSigned(*src, buf); if(ctx->pos + digits > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, digits); ctx->pos += digits; return UA_STATUSCODE_GOOD; } /* UInt64 */ ENCODE_JSON(UInt64) { char buf[23]; buf[0] = '\"'; UA_UInt16 digits = itoaUnsigned(*src, buf + 1, 10); buf[digits + 1] = '\"'; UA_UInt16 length = (UA_UInt16)(digits + 2); if(ctx->pos + length > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, length); ctx->pos += length; return UA_STATUSCODE_GOOD; } /* Int64 */ ENCODE_JSON(Int64) { char buf[23]; buf[0] = '\"'; UA_UInt16 digits = itoaSigned(*src, buf + 1); buf[digits + 1] = '\"'; UA_UInt16 length = (UA_UInt16)(digits + 2); if(ctx->pos + length > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buf, length); ctx->pos += length; return UA_STATUSCODE_GOOD; } /************************/ /* Floating Point Types */ /************************/ /* Convert special numbers to string * - fmt_fp gives NAN, nan,-NAN, -nan, inf, INF, -inf, -INF * - Special floating-point numbers such as positive infinity (INF), negative * infinity (-INF) and not-a-number (NaN) shall be represented by the values * “Infinity”, “-Infinity” and “NaN” encoded as a JSON string. */ static status checkAndEncodeSpecialFloatingPoint(char *buffer, size_t *len) { /*nan and NaN*/ if(*len == 3 && (buffer[0] == 'n' || buffer[0] == 'N') && (buffer[1] == 'a' || buffer[1] == 'A') && (buffer[2] == 'n' || buffer[2] == 'N')) { *len = 5; memcpy(buffer, "\"NaN\"", *len); return UA_STATUSCODE_GOOD; } /*-nan and -NaN*/ if(*len == 4 && buffer[0] == '-' && (buffer[1] == 'n' || buffer[1] == 'N') && (buffer[2] == 'a' || buffer[2] == 'A') && (buffer[3] == 'n' || buffer[3] == 'N')) { *len = 6; memcpy(buffer, "\"-NaN\"", *len); return UA_STATUSCODE_GOOD; } /*inf*/ if(*len == 3 && (buffer[0] == 'i' || buffer[0] == 'I') && (buffer[1] == 'n' || buffer[1] == 'N') && (buffer[2] == 'f' || buffer[2] == 'F')) { *len = 10; memcpy(buffer, "\"Infinity\"", *len); return UA_STATUSCODE_GOOD; } /*-inf*/ if(*len == 4 && buffer[0] == '-' && (buffer[1] == 'i' || buffer[1] == 'I') && (buffer[2] == 'n' || buffer[2] == 'N') && (buffer[3] == 'f' || buffer[3] == 'F')) { *len = 11; memcpy(buffer, "\"-Infinity\"", *len); return UA_STATUSCODE_GOOD; } return UA_STATUSCODE_GOOD; } ENCODE_JSON(Float) { char buffer[200]; if(*src == *src) { #ifdef UA_ENABLE_CUSTOM_LIBC fmt_fp(buffer, *src, 0, -1, 0, 'g'); #else UA_snprintf(buffer, 200, "%.149g", (UA_Double)*src); #endif } else { strcpy(buffer, "NaN"); } size_t len = strlen(buffer); if(len == 0) return UA_STATUSCODE_BADENCODINGERROR; checkAndEncodeSpecialFloatingPoint(buffer, &len); if(ctx->pos + len > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buffer, len); ctx->pos += len; return UA_STATUSCODE_GOOD; } ENCODE_JSON(Double) { char buffer[2000]; if(*src == *src) { #ifdef UA_ENABLE_CUSTOM_LIBC fmt_fp(buffer, *src, 0, 17, 0, 'g'); #else UA_snprintf(buffer, 2000, "%.1074g", *src); #endif } else { strcpy(buffer, "NaN"); } size_t len = strlen(buffer); checkAndEncodeSpecialFloatingPoint(buffer, &len); if(ctx->pos + len > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, buffer, len); ctx->pos += len; return UA_STATUSCODE_GOOD; } static status encodeJsonArray(CtxJson *ctx, const void *ptr, size_t length, const UA_DataType *type) { encodeJsonSignature encodeType = encodeJsonJumpTable[type->typeKind]; status ret = writeJsonArrStart(ctx); uintptr_t uptr = (uintptr_t)ptr; for(size_t i = 0; i < length && ret == UA_STATUSCODE_GOOD; ++i) { ret |= writeJsonCommaIfNeeded(ctx); ret |= encodeType((const void*)uptr, type, ctx); ctx->commaNeeded[ctx->depth] = true; uptr += type->memSize; } ret |= writeJsonArrEnd(ctx); return ret; } /*****************/ /* Builtin Types */ /*****************/ static const u8 hexmapLower[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; static const u8 hexmapUpper[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; ENCODE_JSON(String) { if(!src->data) return writeJsonNull(ctx); if(src->length == 0) { status retval = writeJsonQuote(ctx); retval |= writeJsonQuote(ctx); return retval; } UA_StatusCode ret = writeJsonQuote(ctx); /* Escaping adapted from https://github.com/akheron/jansson dump.c */ const char *str = (char*)src->data; const char *pos = str; const char *end = str; const char *lim = str + src->length; UA_UInt32 codepoint = 0; while(1) { const char *text; u8 seq[13]; size_t length; while(end < lim) { end = utf8_iterate(pos, (size_t)(lim - pos), (int32_t *)&codepoint); if(!end) return UA_STATUSCODE_BADENCODINGERROR; /* mandatory escape or control char */ if(codepoint == '\\' || codepoint == '"' || codepoint < 0x20) break; /* TODO: Why is this commented? */ /* slash if((flags & JSON_ESCAPE_SLASH) && codepoint == '/') break;*/ /* non-ASCII if((flags & JSON_ENSURE_ASCII) && codepoint > 0x7F) break;*/ pos = end; } if(pos != str) { if(ctx->pos + (pos - str) > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, str, (size_t)(pos - str)); ctx->pos += pos - str; } if(end == pos) break; /* handle \, /, ", and control codes */ length = 2; switch(codepoint) { case '\\': text = "\\\\"; break; case '\"': text = "\\\""; break; case '\b': text = "\\b"; break; case '\f': text = "\\f"; break; case '\n': text = "\\n"; break; case '\r': text = "\\r"; break; case '\t': text = "\\t"; break; case '/': text = "\\/"; break; default: if(codepoint < 0x10000) { /* codepoint is in BMP */ seq[0] = '\\'; seq[1] = 'u'; UA_Byte b1 = (UA_Byte)(codepoint >> 8u); UA_Byte b2 = (UA_Byte)(codepoint >> 0u); seq[2] = hexmapLower[(b1 & 0xF0u) >> 4u]; seq[3] = hexmapLower[b1 & 0x0Fu]; seq[4] = hexmapLower[(b2 & 0xF0u) >> 4u]; seq[5] = hexmapLower[b2 & 0x0Fu]; length = 6; } else { /* not in BMP -> construct a UTF-16 surrogate pair */ codepoint -= 0x10000; UA_UInt32 first = 0xD800u | ((codepoint & 0xffc00u) >> 10u); UA_UInt32 last = 0xDC00u | (codepoint & 0x003ffu); UA_Byte fb1 = (UA_Byte)(first >> 8u); UA_Byte fb2 = (UA_Byte)(first >> 0u); UA_Byte lb1 = (UA_Byte)(last >> 8u); UA_Byte lb2 = (UA_Byte)(last >> 0u); seq[0] = '\\'; seq[1] = 'u'; seq[2] = hexmapLower[(fb1 & 0xF0u) >> 4u]; seq[3] = hexmapLower[fb1 & 0x0Fu]; seq[4] = hexmapLower[(fb2 & 0xF0u) >> 4u]; seq[5] = hexmapLower[fb2 & 0x0Fu]; seq[6] = '\\'; seq[7] = 'u'; seq[8] = hexmapLower[(lb1 & 0xF0u) >> 4u]; seq[9] = hexmapLower[lb1 & 0x0Fu]; seq[10] = hexmapLower[(lb2 & 0xF0u) >> 4u]; seq[11] = hexmapLower[lb2 & 0x0Fu]; length = 12; } text = (char*)seq; break; } if(ctx->pos + length > ctx->end) return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; if(!ctx->calcOnly) memcpy(ctx->pos, text, length); ctx->pos += length; str = pos = end; } ret |= writeJsonQuote(ctx); return ret; } ENCODE_JSON(ByteString) { if(!src->data) return writeJsonNull(ctx); if(src->length == 0) { status retval = writeJsonQuote(ctx); retval |= writeJsonQuote(ctx); return retval; } status ret = writeJsonQuote(ctx); size_t flen = 0; unsigned char *ba64 = UA_base64(src->data, src->length, &flen); /* Not converted, no mem */ if(!ba64) return UA_STATUSCODE_BADENCODINGERROR; if(ctx->pos + flen > ctx->end) { UA_free(ba64); return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; } /* Copy flen bytes to output stream. */ if(!ctx->calcOnly) memcpy(ctx->pos, ba64, flen); ctx->pos += flen; /* Base64 result no longer needed */ UA_free(ba64); ret |= writeJsonQuote(ctx); return ret; } /* Converts Guid to a hexadecimal represenation */ static void UA_Guid_to_hex(const UA_Guid *guid, u8* out) { /* 16 byte +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ | data1 |data2|data3| data4 | +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ |aa aa aa aa-bb bb-cc cc-dd dd-ee ee ee ee ee ee| +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ 36 character */ #ifdef hexCharlowerCase const u8 *hexmap = hexmapLower; #else const u8 *hexmap = hexmapUpper; #endif size_t i = 0, j = 28; for(; i<8;i++,j-=4) /* pos 0-7, 4byte, (a) */ out[i] = hexmap[(guid->data1 >> j) & 0x0Fu]; out[i++] = '-'; /* pos 8 */ for(j=12; i<13;i++,j-=4) /* pos 9-12, 2byte, (b) */ out[i] = hexmap[(uint16_t)(guid->data2 >> j) & 0x0Fu]; out[i++] = '-'; /* pos 13 */ for(j=12; i<18;i++,j-=4) /* pos 14-17, 2byte (c) */ out[i] = hexmap[(uint16_t)(guid->data3 >> j) & 0x0Fu]; out[i++] = '-'; /* pos 18 */ for(j=0;i<23;i+=2,j++) { /* pos 19-22, 2byte (d) */ out[i] = hexmap[(guid->data4[j] & 0xF0u) >> 4u]; out[i+1] = hexmap[guid->data4[j] & 0x0Fu]; } out[i++] = '-'; /* pos 23 */ for(j=2; i<36;i+=2,j++) { /* pos 24-35, 6byte (e) */ out[i] = hexmap[(guid->data4[j] & 0xF0u) >> 4u]; out[i+1] = hexmap[guid->data4[j] & 0x0Fu]; } } /* Guid */ ENCODE_JSON(Guid) { if(ctx->pos + 38 > ctx->end) /* 36 + 2 (") */ return UA_STATUSCODE_BADENCODINGLIMITSEXCEEDED; status ret = writeJsonQuote(ctx); u8 *buf = ctx->pos; if(!ctx->calcOnly) UA_Guid_to_hex(src, buf); ctx->pos += 36; ret |= writeJsonQuote(ctx); return ret; } static void printNumber(u16 n, u8 *pos, size_t digits) { for(size_t i = digits; i > 0; --i) { pos[i - 1] = (u8) ((n % 10) + '0'); n = n / 10; } } ENCODE_JSON(DateTime) { UA_DateTimeStruct tSt = UA_DateTime_toStruct(*src); /* Format: yyyy-MM-dd'T'HH:mm:ss.SSSSSSSSS'Z' is used. max 30 bytes.*/ UA_Byte buffer[UA_JSON_DATETIME_LENGTH]; printNumber(tSt.year, &buffer[0], 4); buffer[4] = '-'; printNumber(tSt.month, &buffer[5], 2); buffer[7] = '-'; printNumber(tSt.day, &buffer[8], 2); buffer[10] = 'T'; printNumber(tSt.hour, &buffer[11], 2); buffer[13] = ':'; printNumber(tSt.min, &buffer[14], 2); buffer[16] = ':'; printNumber(tSt.sec, &buffer[17], 2); buffer[19] = '.'; printNumber(tSt.milliSec, &buffer[20], 3); printNumber(tSt.microSec, &buffer[23], 3); printNumber(tSt.nanoSec, &buffer[26], 3); size_t length = 28; while (buffer[length] == '0') length--; if (length != 19) length++; buffer[length] = 'Z'; UA_String str = {length + 1, buffer}; return ENCODE_DIRECT_JSON(&str, String); } /* NodeId */ static status NodeId_encodeJsonInternal(UA_NodeId const *src, CtxJson *ctx) { status ret = UA_STATUSCODE_GOOD; switch (src->identifierType) { case UA_NODEIDTYPE_NUMERIC: ret |= writeJsonKey(ctx, UA_JSONKEY_ID); ret |= ENCODE_DIRECT_JSON(&src->identifier.numeric, UInt32); break; case UA_NODEIDTYPE_STRING: ret |= writeJsonKey(ctx, UA_JSONKEY_IDTYPE); ret |= writeChar(ctx, '1'); ret |= writeJsonKey(ctx, UA_JSONKEY_ID); ret |= ENCODE_DIRECT_JSON(&src->identifier.string, String); break; case UA_NODEIDTYPE_GUID: ret |= writeJsonKey(ctx, UA_JSONKEY_IDTYPE); ret |= writeChar(ctx, '2'); ret |= writeJsonKey(ctx, UA_JSONKEY_ID); /* Id */ ret |= ENCODE_DIRECT_JSON(&src->identifier.guid, Guid); break; case UA_NODEIDTYPE_BYTESTRING: ret |= writeJsonKey(ctx, UA_JSONKEY_IDTYPE); ret |= writeChar(ctx, '3'); ret |= writeJsonKey(ctx, UA_JSONKEY_ID); /* Id */ ret |= ENCODE_DIRECT_JSON(&src->identifier.byteString, ByteString); break; default: return UA_STATUSCODE_BADINTERNALERROR; } return ret; } ENCODE_JSON(NodeId) { UA_StatusCode ret = writeJsonObjStart(ctx); ret |= NodeId_encodeJsonInternal(src, ctx); if(ctx->useReversible) { if(src->namespaceIndex > 0) { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); ret |= ENCODE_DIRECT_JSON(&src->namespaceIndex, UInt16); } } else { /* For the non-reversible encoding, the field is the NamespaceUri * associated with the NamespaceIndex, encoded as a JSON string. * A NamespaceIndex of 1 is always encoded as a JSON number. */ if(src->namespaceIndex == 1) { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); ret |= ENCODE_DIRECT_JSON(&src->namespaceIndex, UInt16); } else { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); /* Check if Namespace given and in range */ if(src->namespaceIndex < ctx->namespacesSize && ctx->namespaces != NULL) { UA_String namespaceEntry = ctx->namespaces[src->namespaceIndex]; ret |= ENCODE_DIRECT_JSON(&namespaceEntry, String); } else { return UA_STATUSCODE_BADNOTFOUND; } } } ret |= writeJsonObjEnd(ctx); return ret; } /* ExpandedNodeId */ ENCODE_JSON(ExpandedNodeId) { status ret = writeJsonObjStart(ctx); /* Encode the NodeId */ ret |= NodeId_encodeJsonInternal(&src->nodeId, ctx); if(ctx->useReversible) { if(src->namespaceUri.data != NULL && src->namespaceUri.length != 0 && (void*) src->namespaceUri.data > UA_EMPTY_ARRAY_SENTINEL) { /* If the NamespaceUri is specified it is encoded as a JSON string in this field. */ ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); ret |= ENCODE_DIRECT_JSON(&src->namespaceUri, String); } else { /* If the NamespaceUri is not specified, the NamespaceIndex is encoded with these rules: * The field is encoded as a JSON number for the reversible encoding. * The field is omitted if the NamespaceIndex equals 0. */ if(src->nodeId.namespaceIndex > 0) { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); ret |= ENCODE_DIRECT_JSON(&src->nodeId.namespaceIndex, UInt16); } } /* Encode the serverIndex/Url * This field is encoded as a JSON number for the reversible encoding. * This field is omitted if the ServerIndex equals 0. */ if(src->serverIndex > 0) { ret |= writeJsonKey(ctx, UA_JSONKEY_SERVERURI); ret |= ENCODE_DIRECT_JSON(&src->serverIndex, UInt32); } ret |= writeJsonObjEnd(ctx); return ret; } /* NON-Reversible Case */ /* If the NamespaceUri is not specified, the NamespaceIndex is encoded with these rules: * For the non-reversible encoding the field is the NamespaceUri associated with the * NamespaceIndex encoded as a JSON string. * A NamespaceIndex of 1 is always encoded as a JSON number. */ if(src->namespaceUri.data != NULL && src->namespaceUri.length != 0) { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); ret |= ENCODE_DIRECT_JSON(&src->namespaceUri, String); if(ret != UA_STATUSCODE_GOOD) return ret; } else { if(src->nodeId.namespaceIndex == 1) { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); ret |= ENCODE_DIRECT_JSON(&src->nodeId.namespaceIndex, UInt16); if(ret != UA_STATUSCODE_GOOD) return ret; } else { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACE); /* Check if Namespace given and in range */ if(src->nodeId.namespaceIndex < ctx->namespacesSize && ctx->namespaces != NULL) { UA_String namespaceEntry = ctx->namespaces[src->nodeId.namespaceIndex]; ret |= ENCODE_DIRECT_JSON(&namespaceEntry, String); if(ret != UA_STATUSCODE_GOOD) return ret; } else { return UA_STATUSCODE_BADNOTFOUND; } } } /* For the non-reversible encoding, this field is the ServerUri associated * with the ServerIndex portion of the ExpandedNodeId, encoded as a JSON * string. */ /* Check if Namespace given and in range */ if(src->serverIndex < ctx->serverUrisSize && ctx->serverUris != NULL) { UA_String serverUriEntry = ctx->serverUris[src->serverIndex]; ret |= writeJsonKey(ctx, UA_JSONKEY_SERVERURI); ret |= ENCODE_DIRECT_JSON(&serverUriEntry, String); } else { return UA_STATUSCODE_BADNOTFOUND; } ret |= writeJsonObjEnd(ctx); return ret; } /* LocalizedText */ ENCODE_JSON(LocalizedText) { if(ctx->useReversible) { status ret = writeJsonObjStart(ctx); ret |= writeJsonKey(ctx, UA_JSONKEY_LOCALE); ret |= ENCODE_DIRECT_JSON(&src->locale, String); ret |= writeJsonKey(ctx, UA_JSONKEY_TEXT); ret |= ENCODE_DIRECT_JSON(&src->text, String); ret |= writeJsonObjEnd(ctx); return ret; } /* For the non-reversible form, LocalizedText value shall be encoded as a * JSON string containing the Text component.*/ return ENCODE_DIRECT_JSON(&src->text, String); } ENCODE_JSON(QualifiedName) { status ret = writeJsonObjStart(ctx); ret |= writeJsonKey(ctx, UA_JSONKEY_NAME); ret |= ENCODE_DIRECT_JSON(&src->name, String); if(ctx->useReversible) { if(src->namespaceIndex != 0) { ret |= writeJsonKey(ctx, UA_JSONKEY_URI); ret |= ENCODE_DIRECT_JSON(&src->namespaceIndex, UInt16); } } else { /* For the non-reversible form, the NamespaceUri associated with the * NamespaceIndex portion of the QualifiedName is encoded as JSON string * unless the NamespaceIndex is 1 or if NamespaceUri is unknown. In * these cases, the NamespaceIndex is encoded as a JSON number. */ if(src->namespaceIndex == 1) { ret |= writeJsonKey(ctx, UA_JSONKEY_URI); ret |= ENCODE_DIRECT_JSON(&src->namespaceIndex, UInt16); } else { ret |= writeJsonKey(ctx, UA_JSONKEY_URI); /* Check if Namespace given and in range */ if(src->namespaceIndex < ctx->namespacesSize && ctx->namespaces != NULL) { UA_String namespaceEntry = ctx->namespaces[src->namespaceIndex]; ret |= ENCODE_DIRECT_JSON(&namespaceEntry, String); } else { /* If not encode as number */ ret |= ENCODE_DIRECT_JSON(&src->namespaceIndex, UInt16); } } } return ret | writeJsonObjEnd(ctx); } ENCODE_JSON(StatusCode) { if(!src) return writeJsonNull(ctx); if(ctx->useReversible) return ENCODE_DIRECT_JSON(src, UInt32); if(*src == UA_STATUSCODE_GOOD) return writeJsonNull(ctx); status ret = UA_STATUSCODE_GOOD; ret |= writeJsonObjStart(ctx); ret |= writeJsonKey(ctx, UA_JSONKEY_CODE); ret |= ENCODE_DIRECT_JSON(src, UInt32); ret |= writeJsonKey(ctx, UA_JSONKEY_SYMBOL); const char *codename = UA_StatusCode_name(*src); UA_String statusDescription = UA_STRING((char*)(uintptr_t)codename); ret |= ENCODE_DIRECT_JSON(&statusDescription, String); ret |= writeJsonObjEnd(ctx); return ret; } /* ExtensionObject */ ENCODE_JSON(ExtensionObject) { u8 encoding = (u8) src->encoding; if(encoding == UA_EXTENSIONOBJECT_ENCODED_NOBODY) return writeJsonNull(ctx); status ret = UA_STATUSCODE_GOOD; /* already encoded content.*/ if(encoding <= UA_EXTENSIONOBJECT_ENCODED_XML) { ret |= writeJsonObjStart(ctx); if(ctx->useReversible) { ret |= writeJsonKey(ctx, UA_JSONKEY_TYPEID); ret |= ENCODE_DIRECT_JSON(&src->content.encoded.typeId, NodeId); if(ret != UA_STATUSCODE_GOOD) return ret; } switch (src->encoding) { case UA_EXTENSIONOBJECT_ENCODED_BYTESTRING: { if(ctx->useReversible) { ret |= writeJsonKey(ctx, UA_JSONKEY_ENCODING); ret |= writeChar(ctx, '1'); } ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= ENCODE_DIRECT_JSON(&src->content.encoded.body, String); break; } case UA_EXTENSIONOBJECT_ENCODED_XML: { if(ctx->useReversible) { ret |= writeJsonKey(ctx, UA_JSONKEY_ENCODING); ret |= writeChar(ctx, '2'); } ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= ENCODE_DIRECT_JSON(&src->content.encoded.body, String); break; } default: ret = UA_STATUSCODE_BADINTERNALERROR; } ret |= writeJsonObjEnd(ctx); return ret; } /* encoding <= UA_EXTENSIONOBJECT_ENCODED_XML */ /* Cannot encode with no type description */ if(!src->content.decoded.type) return UA_STATUSCODE_BADENCODINGERROR; if(!src->content.decoded.data) return writeJsonNull(ctx); UA_NodeId typeId = src->content.decoded.type->typeId; if(typeId.identifierType != UA_NODEIDTYPE_NUMERIC) return UA_STATUSCODE_BADENCODINGERROR; ret |= writeJsonObjStart(ctx); const UA_DataType *contentType = src->content.decoded.type; if(ctx->useReversible) { /* REVERSIBLE */ ret |= writeJsonKey(ctx, UA_JSONKEY_TYPEID); ret |= ENCODE_DIRECT_JSON(&typeId, NodeId); /* Encode the content */ ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= encodeJsonInternal(src->content.decoded.data, contentType, ctx); } else { /* NON-REVERSIBLE * For the non-reversible form, ExtensionObject values * shall be encoded as a JSON object containing only the * value of the Body field. The TypeId and Encoding fields are dropped. * * TODO: UA_JSONKEY_BODY key in the ExtensionObject? */ ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= encodeJsonInternal(src->content.decoded.data, contentType, ctx); } ret |= writeJsonObjEnd(ctx); return ret; } static status Variant_encodeJsonWrapExtensionObject(const UA_Variant *src, const bool isArray, CtxJson *ctx) { size_t length = 1; status ret = UA_STATUSCODE_GOOD; if(isArray) { if(src->arrayLength > UA_INT32_MAX) return UA_STATUSCODE_BADENCODINGERROR; length = src->arrayLength; } /* Set up the ExtensionObject */ UA_ExtensionObject eo; UA_ExtensionObject_init(&eo); eo.encoding = UA_EXTENSIONOBJECT_DECODED; eo.content.decoded.type = src->type; const u16 memSize = src->type->memSize; uintptr_t ptr = (uintptr_t) src->data; if(isArray) { ret |= writeJsonArrStart(ctx); ctx->commaNeeded[ctx->depth] = false; /* Iterate over the array */ for(size_t i = 0; i < length && ret == UA_STATUSCODE_GOOD; ++i) { eo.content.decoded.data = (void*) ptr; ret |= writeJsonArrElm(ctx, &eo, &UA_TYPES[UA_TYPES_EXTENSIONOBJECT]); ptr += memSize; } ret |= writeJsonArrEnd(ctx); return ret; } eo.content.decoded.data = (void*) ptr; return encodeJsonInternal(&eo, &UA_TYPES[UA_TYPES_EXTENSIONOBJECT], ctx); } static status addMultiArrayContentJSON(CtxJson *ctx, void* array, const UA_DataType *type, size_t *index, UA_UInt32 *arrayDimensions, size_t dimensionIndex, size_t dimensionSize) { /* Check the recursion limit */ if(ctx->depth >= UA_JSON_ENCODING_MAX_RECURSION) return UA_STATUSCODE_BADENCODINGERROR; /* Stop recursion: The inner Arrays are written */ status ret; if(dimensionIndex == (dimensionSize - 1)) { ret = encodeJsonArray(ctx, ((u8*)array) + (type->memSize * *index), arrayDimensions[dimensionIndex], type); (*index) += arrayDimensions[dimensionIndex]; return ret; } /* Recurse to the next dimension */ ret = writeJsonArrStart(ctx); for(size_t i = 0; i < arrayDimensions[dimensionIndex]; i++) { ret |= writeJsonCommaIfNeeded(ctx); ret |= addMultiArrayContentJSON(ctx, array, type, index, arrayDimensions, dimensionIndex + 1, dimensionSize); ctx->commaNeeded[ctx->depth] = true; if(ret != UA_STATUSCODE_GOOD) return ret; } ret |= writeJsonArrEnd(ctx); return ret; } ENCODE_JSON(Variant) { /* If type is 0 (NULL) the Variant contains a NULL value and the containing * JSON object shall be omitted or replaced by the JSON literal ‘null’ (when * an element of a JSON array). */ if(!src->type) { return writeJsonNull(ctx); } /* Set the content type in the encoding mask */ const UA_Boolean isBuiltin = (src->type->typeKind <= UA_DATATYPEKIND_DIAGNOSTICINFO); const UA_Boolean isEnum = (src->type->typeKind == UA_DATATYPEKIND_ENUM); /* Set the array type in the encoding mask */ const bool isArray = src->arrayLength > 0 || src->data <= UA_EMPTY_ARRAY_SENTINEL; const bool hasDimensions = isArray && src->arrayDimensionsSize > 0; status ret = UA_STATUSCODE_GOOD; if(ctx->useReversible) { ret |= writeJsonObjStart(ctx); if(ret != UA_STATUSCODE_GOOD) return ret; /* Encode the content */ if(!isBuiltin && !isEnum) { /* REVERSIBLE: NOT BUILTIN, can it be encoded? Wrap in extension object.*/ ret |= writeJsonKey(ctx, UA_JSONKEY_TYPE); ret |= ENCODE_DIRECT_JSON(&UA_TYPES[UA_TYPES_EXTENSIONOBJECT].typeId.identifier.numeric, UInt32); ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= Variant_encodeJsonWrapExtensionObject(src, isArray, ctx); } else if(!isArray) { /*REVERSIBLE: BUILTIN, single value.*/ ret |= writeJsonKey(ctx, UA_JSONKEY_TYPE); ret |= ENCODE_DIRECT_JSON(&src->type->typeId.identifier.numeric, UInt32); ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= encodeJsonInternal(src->data, src->type, ctx); } else { /*REVERSIBLE: BUILTIN, array.*/ ret |= writeJsonKey(ctx, UA_JSONKEY_TYPE); ret |= ENCODE_DIRECT_JSON(&src->type->typeId.identifier.numeric, UInt32); ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= encodeJsonArray(ctx, src->data, src->arrayLength, src->type); } if(ret != UA_STATUSCODE_GOOD) return ret; /* REVERSIBLE: Encode the array dimensions */ if(hasDimensions && ret == UA_STATUSCODE_GOOD) { ret |= writeJsonKey(ctx, UA_JSONKEY_DIMENSION); ret |= encodeJsonArray(ctx, src->arrayDimensions, src->arrayDimensionsSize, &UA_TYPES[UA_TYPES_INT32]); if(ret != UA_STATUSCODE_GOOD) return ret; } ret |= writeJsonObjEnd(ctx); return ret; } /* reversible */ /* NON-REVERSIBLE * For the non-reversible form, Variant values shall be encoded as a JSON object containing only * the value of the Body field. The Type and Dimensions fields are dropped. Multi-dimensional * arrays are encoded as a multi dimensional JSON array as described in 5.4.5. */ ret |= writeJsonObjStart(ctx); if(!isBuiltin && !isEnum) { /*NON REVERSIBLE: NOT BUILTIN, can it be encoded? Wrap in extension object.*/ if(src->arrayDimensionsSize > 1) { return UA_STATUSCODE_BADNOTIMPLEMENTED; } ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= Variant_encodeJsonWrapExtensionObject(src, isArray, ctx); } else if(!isArray) { /*NON REVERSIBLE: BUILTIN, single value.*/ ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); ret |= encodeJsonInternal(src->data, src->type, ctx); } else { /*NON REVERSIBLE: BUILTIN, array.*/ ret |= writeJsonKey(ctx, UA_JSONKEY_BODY); size_t dimensionSize = src->arrayDimensionsSize; if(dimensionSize > 1) { /*nonreversible multidimensional array*/ size_t index = 0; size_t dimensionIndex = 0; void *ptr = src->data; const UA_DataType *arraytype = src->type; ret |= addMultiArrayContentJSON(ctx, ptr, arraytype, &index, src->arrayDimensions, dimensionIndex, dimensionSize); } else { /*nonreversible simple array*/ ret |= encodeJsonArray(ctx, src->data, src->arrayLength, src->type); } } ret |= writeJsonObjEnd(ctx); return ret; } /* DataValue */ ENCODE_JSON(DataValue) { UA_Boolean hasValue = src->hasValue && src->value.type != NULL; UA_Boolean hasStatus = src->hasStatus && src->status; UA_Boolean hasSourceTimestamp = src->hasSourceTimestamp && src->sourceTimestamp; UA_Boolean hasSourcePicoseconds = src->hasSourcePicoseconds && src->sourcePicoseconds; UA_Boolean hasServerTimestamp = src->hasServerTimestamp && src->serverTimestamp; UA_Boolean hasServerPicoseconds = src->hasServerPicoseconds && src->serverPicoseconds; if(!hasValue && !hasStatus && !hasSourceTimestamp && !hasSourcePicoseconds && !hasServerTimestamp && !hasServerPicoseconds) { return writeJsonNull(ctx); /*no element, encode as null*/ } status ret = UA_STATUSCODE_GOOD; ret |= writeJsonObjStart(ctx); if(hasValue) { ret |= writeJsonKey(ctx, UA_JSONKEY_VALUE); ret |= ENCODE_DIRECT_JSON(&src->value, Variant); if(ret != UA_STATUSCODE_GOOD) return ret; } if(hasStatus) { ret |= writeJsonKey(ctx, UA_JSONKEY_STATUS); ret |= ENCODE_DIRECT_JSON(&src->status, StatusCode); if(ret != UA_STATUSCODE_GOOD) return ret; } if(hasSourceTimestamp) { ret |= writeJsonKey(ctx, UA_JSONKEY_SOURCETIMESTAMP); ret |= ENCODE_DIRECT_JSON(&src->sourceTimestamp, DateTime); if(ret != UA_STATUSCODE_GOOD) return ret; } if(hasSourcePicoseconds) { ret |= writeJsonKey(ctx, UA_JSONKEY_SOURCEPICOSECONDS); ret |= ENCODE_DIRECT_JSON(&src->sourcePicoseconds, UInt16); if(ret != UA_STATUSCODE_GOOD) return ret; } if(hasServerTimestamp) { ret |= writeJsonKey(ctx, UA_JSONKEY_SERVERTIMESTAMP); ret |= ENCODE_DIRECT_JSON(&src->serverTimestamp, DateTime); if(ret != UA_STATUSCODE_GOOD) return ret; } if(hasServerPicoseconds) { ret |= writeJsonKey(ctx, UA_JSONKEY_SERVERPICOSECONDS); ret |= ENCODE_DIRECT_JSON(&src->serverPicoseconds, UInt16); if(ret != UA_STATUSCODE_GOOD) return ret; } ret |= writeJsonObjEnd(ctx); return ret; } /* DiagnosticInfo */ ENCODE_JSON(DiagnosticInfo) { status ret = UA_STATUSCODE_GOOD; if(!src->hasSymbolicId && !src->hasNamespaceUri && !src->hasLocalizedText && !src->hasLocale && !src->hasAdditionalInfo && !src->hasInnerDiagnosticInfo && !src->hasInnerStatusCode) { return writeJsonNull(ctx); /*no element present, encode as null.*/ } ret |= writeJsonObjStart(ctx); if(src->hasSymbolicId) { ret |= writeJsonKey(ctx, UA_JSONKEY_SYMBOLICID); ret |= ENCODE_DIRECT_JSON(&src->symbolicId, UInt32); if(ret != UA_STATUSCODE_GOOD) return ret; } if(src->hasNamespaceUri) { ret |= writeJsonKey(ctx, UA_JSONKEY_NAMESPACEURI); ret |= ENCODE_DIRECT_JSON(&src->namespaceUri, UInt32); if(ret != UA_STATUSCODE_GOOD) return ret; } if(src->hasLocalizedText) { ret |= writeJsonKey(ctx, UA_JSONKEY_LOCALIZEDTEXT); ret |= ENCODE_DIRECT_JSON(&src->localizedText, UInt32); if(ret != UA_STATUSCODE_GOOD) return ret; } if(src->hasLocale) { ret |= writeJsonKey(ctx, UA_JSONKEY_LOCALE); ret |= ENCODE_DIRECT_JSON(&src->locale, UInt32); if(ret != UA_STATUSCODE_GOOD) return ret; } if(src->hasAdditionalInfo) { ret |= writeJsonKey(ctx, UA_JSONKEY_ADDITIONALINFO); ret |= ENCODE_DIRECT_JSON(&src->additionalInfo, String); if(ret != UA_STATUSCODE_GOOD) return ret; } if(src->hasInnerStatusCode) { ret |= writeJsonKey(ctx, UA_JSONKEY_INNERSTATUSCODE); ret |= ENCODE_DIRECT_JSON(&src->innerStatusCode, StatusCode); if(ret != UA_STATUSCODE_GOOD) return ret; } if(src->hasInnerDiagnosticInfo && src->innerDiagnosticInfo) { ret |= writeJsonKey(ctx, UA_JSONKEY_INNERDIAGNOSTICINFO); /* Check recursion depth in encodeJsonInternal */ ret |= encodeJsonInternal(src->innerDiagnosticInfo, &UA_TYPES[UA_TYPES_DIAGNOSTICINFO], ctx); if(ret != UA_STATUSCODE_GOOD) return ret; } ret |= writeJsonObjEnd(ctx); return ret; } static status encodeJsonStructure(const void *src, const UA_DataType *type, CtxJson *ctx) { /* Check the recursion limit */ if(ctx->depth >= UA_JSON_ENCODING_MAX_RECURSION) return UA_STATUSCODE_BADENCODINGERROR; ctx->depth++; status ret = writeJsonObjStart(ctx); uintptr_t ptr = (uintptr_t) src; u8 membersSize = type->membersSize; const UA_DataType * typelists[2] = {UA_TYPES, &type[-type->typeIndex]}; for(size_t i = 0; i < membersSize && ret == UA_STATUSCODE_GOOD; ++i) { const UA_DataTypeMember *m = &type->members[i]; const UA_DataType *mt = &typelists[!m->namespaceZero][m->memberTypeIndex]; if(m->memberName != NULL && *m->memberName != 0) ret |= writeJsonKey(ctx, m->memberName); if(!m->isArray) { ptr += m->padding; size_t memSize = mt->memSize; ret |= encodeJsonJumpTable[mt->typeKind]((const void*) ptr, mt, ctx); ptr += memSize; } else { ptr += m->padding; const size_t length = *((const size_t*) ptr); ptr += sizeof (size_t); ret |= encodeJsonArray(ctx, *(void * const *)ptr, length, mt); ptr += sizeof (void*); } } ret |= writeJsonObjEnd(ctx); ctx->depth--; return ret; } static status encodeJsonNotImplemented(const void *src, const UA_DataType *type, CtxJson *ctx) { (void) src, (void) type, (void)ctx; return UA_STATUSCODE_BADNOTIMPLEMENTED; } const encodeJsonSignature encodeJsonJumpTable[UA_DATATYPEKINDS] = { (encodeJsonSignature)Boolean_encodeJson, (encodeJsonSignature)SByte_encodeJson, /* SByte */ (encodeJsonSignature)Byte_encodeJson, (encodeJsonSignature)Int16_encodeJson, /* Int16 */ (encodeJsonSignature)UInt16_encodeJson, (encodeJsonSignature)Int32_encodeJson, /* Int32 */ (encodeJsonSignature)UInt32_encodeJson, (encodeJsonSignature)Int64_encodeJson, /* Int64 */ (encodeJsonSignature)UInt64_encodeJson, (encodeJsonSignature)Float_encodeJson, (encodeJsonSignature)Double_encodeJson, (encodeJsonSignature)String_encodeJson, (encodeJsonSignature)DateTime_encodeJson, /* DateTime */ (encodeJsonSignature)Guid_encodeJson, (encodeJsonSignature)ByteString_encodeJson, /* ByteString */ (encodeJsonSignature)String_encodeJson, /* XmlElement */ (encodeJsonSignature)NodeId_encodeJson, (encodeJsonSignature)ExpandedNodeId_encodeJson, (encodeJsonSignature)StatusCode_encodeJson, /* StatusCode */ (encodeJsonSignature)QualifiedName_encodeJson, /* QualifiedName */ (encodeJsonSignature)LocalizedText_encodeJson, (encodeJsonSignature)ExtensionObject_encodeJson, (encodeJsonSignature)DataValue_encodeJson, (encodeJsonSignature)Variant_encodeJson, (encodeJsonSignature)DiagnosticInfo_encodeJson, (encodeJsonSignature)encodeJsonNotImplemented, /* Decimal */ (encodeJsonSignature)Int32_encodeJson, /* Enum */ (encodeJsonSignature)encodeJsonStructure, (encodeJsonSignature)encodeJsonNotImplemented, /* Structure with optional fields */ (encodeJsonSignature)encodeJsonNotImplemented, /* Union */ (encodeJsonSignature)encodeJsonNotImplemented /* BitfieldCluster */ }; status encodeJsonInternal(const void *src, const UA_DataType *type, CtxJson *ctx) { return encodeJsonJumpTable[type->typeKind](src, type, ctx); } status UA_FUNC_ATTR_WARN_UNUSED_RESULT UA_encodeJson(const void *src, const UA_DataType *type, u8 **bufPos, const u8 **bufEnd, UA_String *namespaces, size_t namespaceSize, UA_String *serverUris, size_t serverUriSize, UA_Boolean useReversible) { if(!src || !type) return UA_STATUSCODE_BADINTERNALERROR; /* Set up the context */ CtxJson ctx; memset(&ctx, 0, sizeof(ctx)); ctx.pos = *bufPos; ctx.end = *bufEnd; ctx.depth = 0; ctx.namespaces = namespaces; ctx.namespacesSize = namespaceSize; ctx.serverUris = serverUris; ctx.serverUrisSize = serverUriSize; ctx.useReversible = useReversible; ctx.calcOnly = false; /* Encode */ status ret = encodeJsonJumpTable[type->typeKind](src, type, &ctx); *bufPos = ctx.pos; *bufEnd = ctx.end; return ret; } /************/ /* CalcSize */ /************/ size_t UA_calcSizeJson(const void *src, const UA_DataType *type, UA_String *namespaces, size_t namespaceSize, UA_String *serverUris, size_t serverUriSize, UA_Boolean useReversible) { if(!src || !type) return UA_STATUSCODE_BADINTERNALERROR; /* Set up the context */ CtxJson ctx; memset(&ctx, 0, sizeof(ctx)); ctx.pos = 0; ctx.end = (const UA_Byte*)(uintptr_t)SIZE_MAX; ctx.depth = 0; ctx.namespaces = namespaces; ctx.namespacesSize = namespaceSize; ctx.serverUris = serverUris; ctx.serverUrisSize = serverUriSize; ctx.useReversible = useReversible; ctx.calcOnly = true; /* Encode */ status ret = encodeJsonJumpTable[type->typeKind](src, type, &ctx); if(ret != UA_STATUSCODE_GOOD) return 0; return (size_t)ctx.pos; } /**********/ /* Decode */ /**********/ /* Macro which gets current size and char pointer of current Token. Needs * ParseCtx (parseCtx) and CtxJson (ctx). Does NOT increment index of Token. */ #define GET_TOKEN(data, size) do { \ (size) = (size_t)(parseCtx->tokenArray[parseCtx->index].end - parseCtx->tokenArray[parseCtx->index].start); \ (data) = (char*)(ctx->pos + parseCtx->tokenArray[parseCtx->index].start); } while(0) #define ALLOW_NULL do { \ if(isJsonNull(ctx, parseCtx)) { \ parseCtx->index++; \ return UA_STATUSCODE_GOOD; \ }} while(0) #define CHECK_TOKEN_BOUNDS do { \ if(parseCtx->index >= parseCtx->tokenCount) \ return UA_STATUSCODE_BADDECODINGERROR; \ } while(0) #define CHECK_PRIMITIVE do { \ if(getJsmnType(parseCtx) != JSMN_PRIMITIVE) { \ return UA_STATUSCODE_BADDECODINGERROR; \ }} while(0) #define CHECK_STRING do { \ if(getJsmnType(parseCtx) != JSMN_STRING) { \ return UA_STATUSCODE_BADDECODINGERROR; \ }} while(0) #define CHECK_OBJECT do { \ if(getJsmnType(parseCtx) != JSMN_OBJECT) { \ return UA_STATUSCODE_BADDECODINGERROR; \ }} while(0) /* Forward declarations*/ #define DECODE_JSON(TYPE) static status \ TYPE##_decodeJson(UA_##TYPE *dst, const UA_DataType *type, \ CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) /* decode without moving the token index */ #define DECODE_DIRECT_JSON(DST, TYPE) TYPE##_decodeJson((UA_##TYPE*)DST, NULL, ctx, parseCtx, false) /* If parseCtx->index points to the beginning of an object, move the index to * the next token after this object. Attention! The index can be moved after the * last parsed token. So the array length has to be checked afterwards. */ static void skipObject(ParseCtx *parseCtx) { int end = parseCtx->tokenArray[parseCtx->index].end; do { parseCtx->index++; } while(parseCtx->index < parseCtx->tokenCount && parseCtx->tokenArray[parseCtx->index].start < end); } static status Array_decodeJson(void *dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken); static status Array_decodeJson_internal(void **dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken); static status Variant_decodeJsonUnwrapExtensionObject(UA_Variant *dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken); /* Json decode Helper */ jsmntype_t getJsmnType(const ParseCtx *parseCtx) { if(parseCtx->index >= parseCtx->tokenCount) return JSMN_UNDEFINED; return parseCtx->tokenArray[parseCtx->index].type; } UA_Boolean isJsonNull(const CtxJson *ctx, const ParseCtx *parseCtx) { if(parseCtx->index >= parseCtx->tokenCount) return false; if(parseCtx->tokenArray[parseCtx->index].type != JSMN_PRIMITIVE) { return false; } char* elem = (char*)(ctx->pos + parseCtx->tokenArray[parseCtx->index].start); return (elem[0] == 'n' && elem[1] == 'u' && elem[2] == 'l' && elem[3] == 'l'); } static UA_SByte jsoneq(const char *json, jsmntok_t *tok, const char *searchKey) { /* TODO: necessary? if(json == NULL || tok == NULL || searchKey == NULL) { return -1; } */ if(tok->type == JSMN_STRING) { if(strlen(searchKey) == (size_t)(tok->end - tok->start) ) { if(strncmp(json + tok->start, (const char*)searchKey, (size_t)(tok->end - tok->start)) == 0) { return 0; } } } return -1; } DECODE_JSON(Boolean) { CHECK_PRIMITIVE; CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); if(tokenSize == 4 && tokenData[0] == 't' && tokenData[1] == 'r' && tokenData[2] == 'u' && tokenData[3] == 'e') { *dst = true; } else if(tokenSize == 5 && tokenData[0] == 'f' && tokenData[1] == 'a' && tokenData[2] == 'l' && tokenData[3] == 's' && tokenData[4] == 'e') { *dst = false; } else { return UA_STATUSCODE_BADDECODINGERROR; } if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; } #ifdef UA_ENABLE_CUSTOM_LIBC static UA_StatusCode parseUnsignedInteger(char* inputBuffer, size_t sizeOfBuffer, UA_UInt64 *destinationOfNumber) { UA_UInt64 d = 0; atoiUnsigned(inputBuffer, sizeOfBuffer, &d); if(!destinationOfNumber) return UA_STATUSCODE_BADDECODINGERROR; *destinationOfNumber = d; return UA_STATUSCODE_GOOD; } static UA_StatusCode parseSignedInteger(char* inputBuffer, size_t sizeOfBuffer, UA_Int64 *destinationOfNumber) { UA_Int64 d = 0; atoiSigned(inputBuffer, sizeOfBuffer, &d); if(!destinationOfNumber) return UA_STATUSCODE_BADDECODINGERROR; *destinationOfNumber = d; return UA_STATUSCODE_GOOD; } #else /* Safe strtol variant of unsigned string conversion. * Returns UA_STATUSCODE_BADDECODINGERROR in case of overflows. * Buffer limit is 20 digits. */ static UA_StatusCode parseUnsignedInteger(char* inputBuffer, size_t sizeOfBuffer, UA_UInt64 *destinationOfNumber) { /* Check size to avoid huge malicious stack allocation. * No UInt64 can have more digits than 20. */ if(sizeOfBuffer > 20) { return UA_STATUSCODE_BADDECODINGERROR; } /* convert to null terminated string */ UA_STACKARRAY(char, string, sizeOfBuffer+1); memcpy(string, inputBuffer, sizeOfBuffer); string[sizeOfBuffer] = 0; /* Conversion */ char *endptr, *str; str = string; errno = 0; /* To distinguish success/failure after call */ UA_UInt64 val = strtoull(str, &endptr, 10); /* Check for various possible errors */ if((errno == ERANGE && (val == LLONG_MAX || val == 0)) || (errno != 0 )) { return UA_STATUSCODE_BADDECODINGERROR; } /* Check if no digits were found */ if(endptr == str) return UA_STATUSCODE_BADDECODINGERROR; /* copy to destination */ *destinationOfNumber = val; return UA_STATUSCODE_GOOD; } /* Safe strtol variant of unsigned string conversion. * Returns UA_STATUSCODE_BADDECODINGERROR in case of overflows. * Buffer limit is 20 digits. */ static UA_StatusCode parseSignedInteger(char* inputBuffer, size_t sizeOfBuffer, UA_Int64 *destinationOfNumber) { /* Check size to avoid huge malicious stack allocation. * No UInt64 can have more digits than 20. */ if(sizeOfBuffer > 20) return UA_STATUSCODE_BADDECODINGERROR; /* convert to null terminated string */ UA_STACKARRAY(char, string, sizeOfBuffer + 1); memcpy(string, inputBuffer, sizeOfBuffer); string[sizeOfBuffer] = 0; /* Conversion */ char *endptr, *str; str = string; errno = 0; /* To distinguish success/failure after call */ UA_Int64 val = strtoll(str, &endptr, 10); /* Check for various possible errors */ if((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN)) || (errno != 0 )) { return UA_STATUSCODE_BADDECODINGERROR; } /* Check if no digits were found */ if(endptr == str) return UA_STATUSCODE_BADDECODINGERROR; /* copy to destination */ *destinationOfNumber = val; return UA_STATUSCODE_GOOD; } #endif DECODE_JSON(Byte) { CHECK_TOKEN_BOUNDS; CHECK_PRIMITIVE; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_UInt64 out = 0; UA_StatusCode s = parseUnsignedInteger(tokenData, tokenSize, &out); *dst = (UA_Byte)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(UInt16) { CHECK_TOKEN_BOUNDS; CHECK_PRIMITIVE; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_UInt64 out = 0; UA_StatusCode s = parseUnsignedInteger(tokenData, tokenSize, &out); *dst = (UA_UInt16)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(UInt32) { CHECK_TOKEN_BOUNDS; CHECK_PRIMITIVE; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_UInt64 out = 0; UA_StatusCode s = parseUnsignedInteger(tokenData, tokenSize, &out); *dst = (UA_UInt32)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(UInt64) { CHECK_TOKEN_BOUNDS; CHECK_STRING; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_UInt64 out = 0; UA_StatusCode s = parseUnsignedInteger(tokenData, tokenSize, &out); *dst = (UA_UInt64)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(SByte) { CHECK_TOKEN_BOUNDS; CHECK_PRIMITIVE; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_Int64 out = 0; UA_StatusCode s = parseSignedInteger(tokenData, tokenSize, &out); *dst = (UA_SByte)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(Int16) { CHECK_TOKEN_BOUNDS; CHECK_PRIMITIVE; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_Int64 out = 0; UA_StatusCode s = parseSignedInteger(tokenData, tokenSize, &out); *dst = (UA_Int16)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(Int32) { CHECK_TOKEN_BOUNDS; CHECK_PRIMITIVE; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_Int64 out = 0; UA_StatusCode s = parseSignedInteger(tokenData, tokenSize, &out); *dst = (UA_Int32)out; if(moveToken) parseCtx->index++; return s; } DECODE_JSON(Int64) { CHECK_TOKEN_BOUNDS; CHECK_STRING; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); UA_Int64 out = 0; UA_StatusCode s = parseSignedInteger(tokenData, tokenSize, &out); *dst = (UA_Int64)out; if(moveToken) parseCtx->index++; return s; } static UA_UInt32 hex2int(char ch) { if(ch >= '0' && ch <= '9') return (UA_UInt32)(ch - '0'); if(ch >= 'A' && ch <= 'F') return (UA_UInt32)(ch - 'A' + 10); if(ch >= 'a' && ch <= 'f') return (UA_UInt32)(ch - 'a' + 10); return 0; } /* Float * Either a JSMN_STRING or JSMN_PRIMITIVE */ DECODE_JSON(Float) { CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); /* https://www.exploringbinary.com/maximum-number-of-decimal-digits-in-binary-floating-point-numbers/ * Maximum digit counts for select IEEE floating-point formats: 149 * Sanity check. */ if(tokenSize > 150) return UA_STATUSCODE_BADDECODINGERROR; jsmntype_t tokenType = getJsmnType(parseCtx); if(tokenType == JSMN_STRING) { /*It could be a String with Nan, Infinity*/ if(tokenSize == 8 && memcmp(tokenData, "Infinity", 8) == 0) { *dst = (UA_Float)INFINITY; return UA_STATUSCODE_GOOD; } if(tokenSize == 9 && memcmp(tokenData, "-Infinity", 9) == 0) { /* workaround an MSVC 2013 issue */ *dst = (UA_Float)-INFINITY; return UA_STATUSCODE_GOOD; } if(tokenSize == 3 && memcmp(tokenData, "NaN", 3) == 0) { *dst = (UA_Float)NAN; return UA_STATUSCODE_GOOD; } if(tokenSize == 4 && memcmp(tokenData, "-NaN", 4) == 0) { *dst = (UA_Float)NAN; return UA_STATUSCODE_GOOD; } return UA_STATUSCODE_BADDECODINGERROR; } if(tokenType != JSMN_PRIMITIVE) return UA_STATUSCODE_BADDECODINGERROR; /* Null-Terminate for sscanf. */ UA_STACKARRAY(char, string, tokenSize+1); memcpy(string, tokenData, tokenSize); string[tokenSize] = 0; UA_Float d = 0; #ifdef UA_ENABLE_CUSTOM_LIBC d = (UA_Float)__floatscan(string, 1, 0); #else char c = 0; /* On success, the function returns the number of variables filled. * In the case of an input failure before any data could be successfully read, EOF is returned. */ int ret = sscanf(string, "%f%c", &d, &c); /* Exactly one var must be filled. %c acts as a guard for wrong input which is accepted by sscanf. E.g. 1.23.45 is not accepted. */ if(ret == EOF || (ret != 1)) return UA_STATUSCODE_BADDECODINGERROR; #endif *dst = d; parseCtx->index++; return UA_STATUSCODE_GOOD; } /* Either a JSMN_STRING or JSMN_PRIMITIVE */ DECODE_JSON(Double) { CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); /* https://www.exploringbinary.com/maximum-number-of-decimal-digits-in-binary-floating-point-numbers/ * Maximum digit counts for select IEEE floating-point formats: 1074 * Sanity check. */ if(tokenSize > 1075) return UA_STATUSCODE_BADDECODINGERROR; jsmntype_t tokenType = getJsmnType(parseCtx); if(tokenType == JSMN_STRING) { /*It could be a String with Nan, Infinity*/ if(tokenSize == 8 && memcmp(tokenData, "Infinity", 8) == 0) { *dst = INFINITY; return UA_STATUSCODE_GOOD; } if(tokenSize == 9 && memcmp(tokenData, "-Infinity", 9) == 0) { /* workaround an MSVC 2013 issue */ *dst = -INFINITY; return UA_STATUSCODE_GOOD; } if(tokenSize == 3 && memcmp(tokenData, "NaN", 3) == 0) { *dst = NAN; return UA_STATUSCODE_GOOD; } if(tokenSize == 4 && memcmp(tokenData, "-NaN", 4) == 0) { *dst = NAN; return UA_STATUSCODE_GOOD; } return UA_STATUSCODE_BADDECODINGERROR; } if(tokenType != JSMN_PRIMITIVE) return UA_STATUSCODE_BADDECODINGERROR; /* Null-Terminate for sscanf. Should this better be handled on heap? Max * 1075 input chars allowed. Not using heap. */ UA_STACKARRAY(char, string, tokenSize+1); memcpy(string, tokenData, tokenSize); string[tokenSize] = 0; UA_Double d = 0; #ifdef UA_ENABLE_CUSTOM_LIBC d = (UA_Double)__floatscan(string, 2, 0); #else char c = 0; /* On success, the function returns the number of variables filled. * In the case of an input failure before any data could be successfully read, EOF is returned. */ int ret = sscanf(string, "%lf%c", &d, &c); /* Exactly one var must be filled. %c acts as a guard for wrong input which is accepted by sscanf. E.g. 1.23.45 is not accepted. */ if(ret == EOF || (ret != 1)) return UA_STATUSCODE_BADDECODINGERROR; #endif *dst = d; parseCtx->index++; return UA_STATUSCODE_GOOD; } /* Expects 36 chars in format 00000003-0009-000A-0807-060504030201 | data1| |d2| |d3| |d4| | data4 | */ static UA_Guid UA_Guid_fromChars(const char* chars) { UA_Guid dst; UA_Guid_init(&dst); for(size_t i = 0; i < 8; i++) dst.data1 |= (UA_UInt32)(hex2int(chars[i]) << (28 - (i*4))); for(size_t i = 0; i < 4; i++) { dst.data2 |= (UA_UInt16)(hex2int(chars[9+i]) << (12 - (i*4))); dst.data3 |= (UA_UInt16)(hex2int(chars[14+i]) << (12 - (i*4))); } dst.data4[0] |= (UA_Byte)(hex2int(chars[19]) << 4u); dst.data4[0] |= (UA_Byte)(hex2int(chars[20]) << 0u); dst.data4[1] |= (UA_Byte)(hex2int(chars[21]) << 4u); dst.data4[1] |= (UA_Byte)(hex2int(chars[22]) << 0u); for(size_t i = 0; i < 6; i++) { dst.data4[2+i] |= (UA_Byte)(hex2int(chars[24 + i*2]) << 4u); dst.data4[2+i] |= (UA_Byte)(hex2int(chars[25 + i*2]) << 0u); } return dst; } DECODE_JSON(Guid) { CHECK_STRING; CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); if(tokenSize != 36) return UA_STATUSCODE_BADDECODINGERROR; /* check if incorrect chars are present */ for(size_t i = 0; i < tokenSize; i++) { if(!(tokenData[i] == '-' || (tokenData[i] >= '0' && tokenData[i] <= '9') || (tokenData[i] >= 'A' && tokenData[i] <= 'F') || (tokenData[i] >= 'a' && tokenData[i] <= 'f'))) { return UA_STATUSCODE_BADDECODINGERROR; } } *dst = UA_Guid_fromChars(tokenData); if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; } DECODE_JSON(String) { ALLOW_NULL; CHECK_STRING; CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); /* Empty string? */ if(tokenSize == 0) { dst->data = (UA_Byte*)UA_EMPTY_ARRAY_SENTINEL; dst->length = 0; if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; } /* The actual value is at most of the same length as the source string: * - Shortcut escapes (e.g. "\t") (length 2) are converted to 1 byte * - A single \uXXXX escape (length 6) is converted to at most 3 bytes * - Two \uXXXX escapes (length 12) forming an UTF-16 surrogate pair are * converted to 4 bytes */ char *outputBuffer = (char*)UA_malloc(tokenSize); if(!outputBuffer) return UA_STATUSCODE_BADOUTOFMEMORY; const char *p = (char*)tokenData; const char *end = (char*)&tokenData[tokenSize]; char *pos = outputBuffer; while(p < end) { /* No escaping */ if(*p != '\\') { *(pos++) = *(p++); continue; } /* Escape character */ p++; if(p == end) goto cleanup; if(*p != 'u') { switch(*p) { case '"': case '\\': case '/': *pos = *p; break; case 'b': *pos = '\b'; break; case 'f': *pos = '\f'; break; case 'n': *pos = '\n'; break; case 'r': *pos = '\r'; break; case 't': *pos = '\t'; break; default: goto cleanup; } pos++; p++; continue; } /* Unicode */ if(p + 4 >= end) goto cleanup; int32_t value_signed = decode_unicode_escape(p); if(value_signed < 0) goto cleanup; uint32_t value = (uint32_t)value_signed; p += 5; if(0xD800 <= value && value <= 0xDBFF) { /* Surrogate pair */ if(p + 5 >= end) goto cleanup; if(*p != '\\' || *(p + 1) != 'u') goto cleanup; int32_t value2 = decode_unicode_escape(p + 1); if(value2 < 0xDC00 || value2 > 0xDFFF) goto cleanup; value = ((value - 0xD800u) << 10u) + (uint32_t)((value2 - 0xDC00) + 0x10000); p += 6; } else if(0xDC00 <= value && value <= 0xDFFF) { /* Invalid Unicode '\\u%04X' */ goto cleanup; } size_t length; if(utf8_encode((int32_t)value, pos, &length)) goto cleanup; pos += length; } dst->length = (size_t)(pos - outputBuffer); if(dst->length > 0) { dst->data = (UA_Byte*)outputBuffer; } else { dst->data = (UA_Byte*)UA_EMPTY_ARRAY_SENTINEL; UA_free(outputBuffer); } if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; cleanup: UA_free(outputBuffer); return UA_STATUSCODE_BADDECODINGERROR; } DECODE_JSON(ByteString) { ALLOW_NULL; CHECK_STRING; CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); /* Empty bytestring? */ if(tokenSize == 0) { dst->data = (UA_Byte*)UA_EMPTY_ARRAY_SENTINEL; dst->length = 0; return UA_STATUSCODE_GOOD; } size_t flen = 0; unsigned char* unB64 = UA_unbase64((unsigned char*)tokenData, tokenSize, &flen); if(unB64 == 0) return UA_STATUSCODE_BADDECODINGERROR; dst->data = (u8*)unB64; dst->length = flen; if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; } DECODE_JSON(LocalizedText) { ALLOW_NULL; CHECK_OBJECT; DecodeEntry entries[2] = { {UA_JSONKEY_LOCALE, &dst->locale, (decodeJsonSignature) String_decodeJson, false, NULL}, {UA_JSONKEY_TEXT, &dst->text, (decodeJsonSignature) String_decodeJson, false, NULL} }; return decodeFields(ctx, parseCtx, entries, 2, type); } DECODE_JSON(QualifiedName) { ALLOW_NULL; CHECK_OBJECT; DecodeEntry entries[2] = { {UA_JSONKEY_NAME, &dst->name, (decodeJsonSignature) String_decodeJson, false, NULL}, {UA_JSONKEY_URI, &dst->namespaceIndex, (decodeJsonSignature) UInt16_decodeJson, false, NULL} }; return decodeFields(ctx, parseCtx, entries, 2, type); } /* Function for searching ahead of the current token. Used for retrieving the * OPC UA type of a token */ static status searchObjectForKeyRec(const char *searchKey, CtxJson *ctx, ParseCtx *parseCtx, size_t *resultIndex, UA_UInt16 depth) { UA_StatusCode ret = UA_STATUSCODE_BADNOTFOUND; CHECK_TOKEN_BOUNDS; if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) { size_t objectCount = (size_t)parseCtx->tokenArray[parseCtx->index].size; parseCtx->index++; /*Object to first Key*/ for(size_t i = 0; i < objectCount; i++) { CHECK_TOKEN_BOUNDS; if(depth == 0) { /* we search only on first layer */ if(jsoneq((char*)ctx->pos, &parseCtx->tokenArray[parseCtx->index], searchKey) == 0) { /*found*/ parseCtx->index++; /*We give back a pointer to the value of the searched key!*/ if (parseCtx->index >= parseCtx->tokenCount) /* We got invalid json. See https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=14620 */ return UA_STATUSCODE_BADOUTOFRANGE; *resultIndex = parseCtx->index; return UA_STATUSCODE_GOOD; } } parseCtx->index++; /* value */ CHECK_TOKEN_BOUNDS; if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) { ret = searchObjectForKeyRec(searchKey, ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) { ret = searchObjectForKeyRec(searchKey, ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else { /* Only Primitive or string */ parseCtx->index++; } } } else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) { size_t arraySize = (size_t)parseCtx->tokenArray[parseCtx->index].size; parseCtx->index++; /*Object to first element*/ for(size_t i = 0; i < arraySize; i++) { CHECK_TOKEN_BOUNDS; if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) { ret = searchObjectForKeyRec(searchKey, ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) { ret = searchObjectForKeyRec(searchKey, ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else { /* Only Primitive or string */ parseCtx->index++; } } } return ret; } UA_FUNC_ATTR_WARN_UNUSED_RESULT status lookAheadForKey(const char* search, CtxJson *ctx, ParseCtx *parseCtx, size_t *resultIndex) { UA_UInt16 oldIndex = parseCtx->index; /* Save index for later restore */ UA_UInt16 depth = 0; UA_StatusCode ret = searchObjectForKeyRec(search, ctx, parseCtx, resultIndex, depth); parseCtx->index = oldIndex; /* Restore index */ return ret; } /* Function used to jump over an object which cannot be parsed */ static status jumpOverRec(CtxJson *ctx, ParseCtx *parseCtx, size_t *resultIndex, UA_UInt16 depth) { UA_StatusCode ret = UA_STATUSCODE_BADDECODINGERROR; CHECK_TOKEN_BOUNDS; if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) { size_t objectCount = (size_t)(parseCtx->tokenArray[parseCtx->index].size); parseCtx->index++; /*Object to first Key*/ CHECK_TOKEN_BOUNDS; size_t i; for(i = 0; i < objectCount; i++) { CHECK_TOKEN_BOUNDS; parseCtx->index++; /*value*/ CHECK_TOKEN_BOUNDS; if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) { jumpOverRec(ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) { jumpOverRec(ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else { /*Only Primitive or string*/ parseCtx->index++; } } } else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) { size_t arraySize = (size_t)(parseCtx->tokenArray[parseCtx->index].size); parseCtx->index++; /*Object to first element*/ CHECK_TOKEN_BOUNDS; size_t i; for(i = 0; i < arraySize; i++) { if(parseCtx->tokenArray[parseCtx->index].type == JSMN_OBJECT) { jumpOverRec(ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else if(parseCtx->tokenArray[parseCtx->index].type == JSMN_ARRAY) { jumpOverRec(ctx, parseCtx, resultIndex, (UA_UInt16)(depth + 1)); } else { /*Only Primitive or string*/ parseCtx->index++; } } } return ret; } static status jumpOverObject(CtxJson *ctx, ParseCtx *parseCtx, size_t *resultIndex) { UA_UInt16 oldIndex = parseCtx->index; /* Save index for later restore */ UA_UInt16 depth = 0; jumpOverRec(ctx, parseCtx, resultIndex, depth); *resultIndex = parseCtx->index; parseCtx->index = oldIndex; /* Restore index */ return UA_STATUSCODE_GOOD; } static status prepareDecodeNodeIdJson(UA_NodeId *dst, CtxJson *ctx, ParseCtx *parseCtx, u8 *fieldCount, DecodeEntry *entries) { /* possible keys: Id, IdType*/ /* Id must always be present */ entries[*fieldCount].fieldName = UA_JSONKEY_ID; entries[*fieldCount].found = false; entries[*fieldCount].type = NULL; /* IdType */ UA_Boolean hasIdType = false; size_t searchResult = 0; status ret = lookAheadForKey(UA_JSONKEY_IDTYPE, ctx, parseCtx, &searchResult); if(ret == UA_STATUSCODE_GOOD) { /*found*/ hasIdType = true; } if(hasIdType) { size_t size = (size_t)(parseCtx->tokenArray[searchResult].end - parseCtx->tokenArray[searchResult].start); if(size < 1) { return UA_STATUSCODE_BADDECODINGERROR; } char *idType = (char*)(ctx->pos + parseCtx->tokenArray[searchResult].start); if(idType[0] == '2') { dst->identifierType = UA_NODEIDTYPE_GUID; entries[*fieldCount].fieldPointer = &dst->identifier.guid; entries[*fieldCount].function = (decodeJsonSignature) Guid_decodeJson; } else if(idType[0] == '1') { dst->identifierType = UA_NODEIDTYPE_STRING; entries[*fieldCount].fieldPointer = &dst->identifier.string; entries[*fieldCount].function = (decodeJsonSignature) String_decodeJson; } else if(idType[0] == '3') { dst->identifierType = UA_NODEIDTYPE_BYTESTRING; entries[*fieldCount].fieldPointer = &dst->identifier.byteString; entries[*fieldCount].function = (decodeJsonSignature) ByteString_decodeJson; } else { return UA_STATUSCODE_BADDECODINGERROR; } /* Id always present */ (*fieldCount)++; entries[*fieldCount].fieldName = UA_JSONKEY_IDTYPE; entries[*fieldCount].fieldPointer = NULL; entries[*fieldCount].function = NULL; entries[*fieldCount].found = false; entries[*fieldCount].type = NULL; /* IdType */ (*fieldCount)++; } else { dst->identifierType = UA_NODEIDTYPE_NUMERIC; entries[*fieldCount].fieldPointer = &dst->identifier.numeric; entries[*fieldCount].function = (decodeJsonSignature) UInt32_decodeJson; entries[*fieldCount].type = NULL; (*fieldCount)++; } return UA_STATUSCODE_GOOD; } DECODE_JSON(NodeId) { ALLOW_NULL; CHECK_OBJECT; /* NameSpace */ UA_Boolean hasNamespace = false; size_t searchResultNamespace = 0; status ret = lookAheadForKey(UA_JSONKEY_NAMESPACE, ctx, parseCtx, &searchResultNamespace); if(ret != UA_STATUSCODE_GOOD) { dst->namespaceIndex = 0; } else { hasNamespace = true; } /* Keep track over number of keys present, incremented if key found */ u8 fieldCount = 0; DecodeEntry entries[3]; ret = prepareDecodeNodeIdJson(dst, ctx, parseCtx, &fieldCount, entries); if(ret != UA_STATUSCODE_GOOD) return ret; if(hasNamespace) { entries[fieldCount].fieldName = UA_JSONKEY_NAMESPACE; entries[fieldCount].fieldPointer = &dst->namespaceIndex; entries[fieldCount].function = (decodeJsonSignature) UInt16_decodeJson; entries[fieldCount].found = false; entries[fieldCount].type = NULL; fieldCount++; } else { dst->namespaceIndex = 0; } ret = decodeFields(ctx, parseCtx, entries, fieldCount, type); return ret; } DECODE_JSON(ExpandedNodeId) { ALLOW_NULL; CHECK_OBJECT; /* Keep track over number of keys present, incremented if key found */ u8 fieldCount = 0; /* ServerUri */ UA_Boolean hasServerUri = false; size_t searchResultServerUri = 0; status ret = lookAheadForKey(UA_JSONKEY_SERVERURI, ctx, parseCtx, &searchResultServerUri); if(ret != UA_STATUSCODE_GOOD) { dst->serverIndex = 0; } else { hasServerUri = true; } /* NameSpace */ UA_Boolean hasNamespace = false; UA_Boolean isNamespaceString = false; size_t searchResultNamespace = 0; ret = lookAheadForKey(UA_JSONKEY_NAMESPACE, ctx, parseCtx, &searchResultNamespace); if(ret != UA_STATUSCODE_GOOD) { dst->namespaceUri = UA_STRING_NULL; } else { hasNamespace = true; jsmntok_t nsToken = parseCtx->tokenArray[searchResultNamespace]; if(nsToken.type == JSMN_STRING) isNamespaceString = true; } DecodeEntry entries[4]; ret = prepareDecodeNodeIdJson(&dst->nodeId, ctx, parseCtx, &fieldCount, entries); if(ret != UA_STATUSCODE_GOOD) return ret; if(hasNamespace) { entries[fieldCount].fieldName = UA_JSONKEY_NAMESPACE; if(isNamespaceString) { entries[fieldCount].fieldPointer = &dst->namespaceUri; entries[fieldCount].function = (decodeJsonSignature) String_decodeJson; } else { entries[fieldCount].fieldPointer = &dst->nodeId.namespaceIndex; entries[fieldCount].function = (decodeJsonSignature) UInt16_decodeJson; } entries[fieldCount].found = false; entries[fieldCount].type = NULL; fieldCount++; } if(hasServerUri) { entries[fieldCount].fieldName = UA_JSONKEY_SERVERURI; entries[fieldCount].fieldPointer = &dst->serverIndex; entries[fieldCount].function = (decodeJsonSignature) UInt32_decodeJson; entries[fieldCount].found = false; entries[fieldCount].type = NULL; fieldCount++; } else { dst->serverIndex = 0; } return decodeFields(ctx, parseCtx, entries, fieldCount, type); } DECODE_JSON(DateTime) { CHECK_STRING; CHECK_TOKEN_BOUNDS; size_t tokenSize; char* tokenData; GET_TOKEN(tokenData, tokenSize); /* TODO: proper ISO 8601:2004 parsing, musl strptime!*/ /* DateTime ISO 8601:2004 without milli is 20 Characters, with millis 24 */ if(tokenSize != 20 && tokenSize != 24) { return UA_STATUSCODE_BADDECODINGERROR; } /* sanity check */ if(tokenData[4] != '-' || tokenData[7] != '-' || tokenData[10] != 'T' || tokenData[13] != ':' || tokenData[16] != ':' || !(tokenData[19] == 'Z' || tokenData[19] == '.')) { return UA_STATUSCODE_BADDECODINGERROR; } struct mytm dts; memset(&dts, 0, sizeof(dts)); UA_UInt64 year = 0; atoiUnsigned(&tokenData[0], 4, &year); dts.tm_year = (UA_UInt16)year - 1900; UA_UInt64 month = 0; atoiUnsigned(&tokenData[5], 2, &month); dts.tm_mon = (UA_UInt16)month - 1; UA_UInt64 day = 0; atoiUnsigned(&tokenData[8], 2, &day); dts.tm_mday = (UA_UInt16)day; UA_UInt64 hour = 0; atoiUnsigned(&tokenData[11], 2, &hour); dts.tm_hour = (UA_UInt16)hour; UA_UInt64 min = 0; atoiUnsigned(&tokenData[14], 2, &min); dts.tm_min = (UA_UInt16)min; UA_UInt64 sec = 0; atoiUnsigned(&tokenData[17], 2, &sec); dts.tm_sec = (UA_UInt16)sec; UA_UInt64 msec = 0; if(tokenSize == 24) { atoiUnsigned(&tokenData[20], 3, &msec); } long long sinceunix = __tm_to_secs(&dts); UA_DateTime dt = (UA_DateTime)((UA_UInt64)(sinceunix*UA_DATETIME_SEC + UA_DATETIME_UNIX_EPOCH) + (UA_UInt64)(UA_DATETIME_MSEC * msec)); *dst = dt; if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; } DECODE_JSON(StatusCode) { status ret = DECODE_DIRECT_JSON(dst, UInt32); if(ret != UA_STATUSCODE_GOOD) return ret; if(moveToken) parseCtx->index++; return UA_STATUSCODE_GOOD; } static status VariantDimension_decodeJson(void * dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { (void) type; const UA_DataType *dimType = &UA_TYPES[UA_TYPES_UINT32]; return Array_decodeJson_internal((void**)dst, dimType, ctx, parseCtx, moveToken); } DECODE_JSON(Variant) { ALLOW_NULL; CHECK_OBJECT; /* First search for the variant type in the json object. */ size_t searchResultType = 0; status ret = lookAheadForKey(UA_JSONKEY_TYPE, ctx, parseCtx, &searchResultType); if(ret != UA_STATUSCODE_GOOD) { skipObject(parseCtx); return UA_STATUSCODE_GOOD; } size_t size = ((size_t)parseCtx->tokenArray[searchResultType].end - (size_t)parseCtx->tokenArray[searchResultType].start); /* check if size is zero or the type is not a number */ if(size < 1 || parseCtx->tokenArray[searchResultType].type != JSMN_PRIMITIVE) return UA_STATUSCODE_BADDECODINGERROR; /* Parse the type */ UA_UInt64 idTypeDecoded = 0; char *idTypeEncoded = (char*)(ctx->pos + parseCtx->tokenArray[searchResultType].start); status typeDecodeStatus = atoiUnsigned(idTypeEncoded, size, &idTypeDecoded); if(typeDecodeStatus != UA_STATUSCODE_GOOD) return typeDecodeStatus; /* A NULL Variant */ if(idTypeDecoded == 0) { skipObject(parseCtx); return UA_STATUSCODE_GOOD; } /* Set the type */ UA_NodeId typeNodeId = UA_NODEID_NUMERIC(0, (UA_UInt32)idTypeDecoded); dst->type = UA_findDataType(&typeNodeId); if(!dst->type) return UA_STATUSCODE_BADDECODINGERROR; /* Search for body */ size_t searchResultBody = 0; ret = lookAheadForKey(UA_JSONKEY_BODY, ctx, parseCtx, &searchResultBody); if(ret != UA_STATUSCODE_GOOD) { /*TODO: no body? set value NULL?*/ return UA_STATUSCODE_BADDECODINGERROR; } /* value is an array? */ UA_Boolean isArray = false; if(parseCtx->tokenArray[searchResultBody].type == JSMN_ARRAY) { isArray = true; dst->arrayLength = (size_t)parseCtx->tokenArray[searchResultBody].size; } /* Has the variant dimension? */ UA_Boolean hasDimension = false; size_t searchResultDim = 0; ret = lookAheadForKey(UA_JSONKEY_DIMENSION, ctx, parseCtx, &searchResultDim); if(ret == UA_STATUSCODE_GOOD) { hasDimension = true; dst->arrayDimensionsSize = (size_t)parseCtx->tokenArray[searchResultDim].size; } /* no array but has dimension. error? */ if(!isArray && hasDimension) return UA_STATUSCODE_BADDECODINGERROR; /* Get the datatype of the content. The type must be a builtin data type. * All not-builtin types are wrapped in an ExtensionObject. */ if(dst->type->typeKind > UA_TYPES_DIAGNOSTICINFO) return UA_STATUSCODE_BADDECODINGERROR; /* A variant cannot contain a variant. But it can contain an array of * variants */ if(dst->type->typeKind == UA_DATATYPEKIND_VARIANT && !isArray) return UA_STATUSCODE_BADDECODINGERROR; /* Decode an array */ if(isArray) { DecodeEntry entries[3] = { {UA_JSONKEY_TYPE, NULL, NULL, false, NULL}, {UA_JSONKEY_BODY, &dst->data, (decodeJsonSignature) Array_decodeJson, false, NULL}, {UA_JSONKEY_DIMENSION, &dst->arrayDimensions, (decodeJsonSignature) VariantDimension_decodeJson, false, NULL}}; if(!hasDimension) { ret = decodeFields(ctx, parseCtx, entries, 2, dst->type); /*use first 2 fields*/ } else { ret = decodeFields(ctx, parseCtx, entries, 3, dst->type); /*use all fields*/ } return ret; } /* Decode a value wrapped in an ExtensionObject */ if(dst->type->typeKind == UA_DATATYPEKIND_EXTENSIONOBJECT) { DecodeEntry entries[2] = {{UA_JSONKEY_TYPE, NULL, NULL, false, NULL}, {UA_JSONKEY_BODY, dst, (decodeJsonSignature)Variant_decodeJsonUnwrapExtensionObject, false, NULL}}; return decodeFields(ctx, parseCtx, entries, 2, dst->type); } /* Allocate Memory for Body */ dst->data = UA_new(dst->type); if(!dst->data) return UA_STATUSCODE_BADOUTOFMEMORY; DecodeEntry entries[2] = {{UA_JSONKEY_TYPE, NULL, NULL, false, NULL}, {UA_JSONKEY_BODY, dst->data, (decodeJsonSignature) decodeJsonInternal, false, NULL}}; return decodeFields(ctx, parseCtx, entries, 2, dst->type); } DECODE_JSON(DataValue) { ALLOW_NULL; CHECK_OBJECT; DecodeEntry entries[6] = { {UA_JSONKEY_VALUE, &dst->value, (decodeJsonSignature) Variant_decodeJson, false, NULL}, {UA_JSONKEY_STATUS, &dst->status, (decodeJsonSignature) StatusCode_decodeJson, false, NULL}, {UA_JSONKEY_SOURCETIMESTAMP, &dst->sourceTimestamp, (decodeJsonSignature) DateTime_decodeJson, false, NULL}, {UA_JSONKEY_SOURCEPICOSECONDS, &dst->sourcePicoseconds, (decodeJsonSignature) UInt16_decodeJson, false, NULL}, {UA_JSONKEY_SERVERTIMESTAMP, &dst->serverTimestamp, (decodeJsonSignature) DateTime_decodeJson, false, NULL}, {UA_JSONKEY_SERVERPICOSECONDS, &dst->serverPicoseconds, (decodeJsonSignature) UInt16_decodeJson, false, NULL}}; status ret = decodeFields(ctx, parseCtx, entries, 6, type); dst->hasValue = entries[0].found; dst->hasStatus = entries[1].found; dst->hasSourceTimestamp = entries[2].found; dst->hasSourcePicoseconds = entries[3].found; dst->hasServerTimestamp = entries[4].found; dst->hasServerPicoseconds = entries[5].found; return ret; } DECODE_JSON(ExtensionObject) { ALLOW_NULL; CHECK_OBJECT; /* Search for Encoding */ size_t searchEncodingResult = 0; status ret = lookAheadForKey(UA_JSONKEY_ENCODING, ctx, parseCtx, &searchEncodingResult); /* If no encoding found it is structure encoding */ if(ret != UA_STATUSCODE_GOOD) { UA_NodeId typeId; UA_NodeId_init(&typeId); size_t searchTypeIdResult = 0; ret = lookAheadForKey(UA_JSONKEY_TYPEID, ctx, parseCtx, &searchTypeIdResult); if(ret != UA_STATUSCODE_GOOD) { /* TYPEID not found, abort */ return UA_STATUSCODE_BADENCODINGERROR; } /* parse the nodeid */ /*for restore*/ UA_UInt16 index = parseCtx->index; parseCtx->index = (UA_UInt16)searchTypeIdResult; ret = NodeId_decodeJson(&typeId, &UA_TYPES[UA_TYPES_NODEID], ctx, parseCtx, true); if(ret != UA_STATUSCODE_GOOD) return ret; /*restore*/ parseCtx->index = index; const UA_DataType *typeOfBody = UA_findDataType(&typeId); if(!typeOfBody) { /*dont decode body: 1. save as bytestring, 2. jump over*/ dst->encoding = UA_EXTENSIONOBJECT_ENCODED_BYTESTRING; UA_NodeId_copy(&typeId, &dst->content.encoded.typeId); /*Check if Object in Extentionobject*/ if(getJsmnType(parseCtx) != JSMN_OBJECT) { UA_NodeId_deleteMembers(&typeId); return UA_STATUSCODE_BADDECODINGERROR; } /*Search for Body to save*/ size_t searchBodyResult = 0; ret = lookAheadForKey(UA_JSONKEY_BODY, ctx, parseCtx, &searchBodyResult); if(ret != UA_STATUSCODE_GOOD) { /*No Body*/ UA_NodeId_deleteMembers(&typeId); return UA_STATUSCODE_BADDECODINGERROR; } if(searchBodyResult >= (size_t)parseCtx->tokenCount) { /*index not in Tokenarray*/ UA_NodeId_deleteMembers(&typeId); return UA_STATUSCODE_BADDECODINGERROR; } /* Get the size of the Object as a string, not the Object key count! */ UA_Int64 sizeOfJsonString =(parseCtx->tokenArray[searchBodyResult].end - parseCtx->tokenArray[searchBodyResult].start); char* bodyJsonString = (char*)(ctx->pos + parseCtx->tokenArray[searchBodyResult].start); if(sizeOfJsonString <= 0) { UA_NodeId_deleteMembers(&typeId); return UA_STATUSCODE_BADDECODINGERROR; } /* Save encoded as bytestring. */ ret = UA_ByteString_allocBuffer(&dst->content.encoded.body, (size_t)sizeOfJsonString); if(ret != UA_STATUSCODE_GOOD) { UA_NodeId_deleteMembers(&typeId); return ret; } memcpy(dst->content.encoded.body.data, bodyJsonString, (size_t)sizeOfJsonString); size_t tokenAfteExtensionObject = 0; jumpOverObject(ctx, parseCtx, &tokenAfteExtensionObject); if(tokenAfteExtensionObject == 0) { /*next object token not found*/ UA_NodeId_deleteMembers(&typeId); UA_ByteString_deleteMembers(&dst->content.encoded.body); return UA_STATUSCODE_BADDECODINGERROR; } parseCtx->index = (UA_UInt16)tokenAfteExtensionObject; return UA_STATUSCODE_GOOD; } /*Type id not used anymore, typeOfBody has type*/ UA_NodeId_deleteMembers(&typeId); /*Set Found Type*/ dst->content.decoded.type = typeOfBody; dst->encoding = UA_EXTENSIONOBJECT_DECODED; if(searchTypeIdResult != 0) { dst->content.decoded.data = UA_new(typeOfBody); if(!dst->content.decoded.data) return UA_STATUSCODE_BADOUTOFMEMORY; UA_NodeId typeId_dummy; DecodeEntry entries[2] = { {UA_JSONKEY_TYPEID, &typeId_dummy, (decodeJsonSignature) NodeId_decodeJson, false, NULL}, {UA_JSONKEY_BODY, dst->content.decoded.data, (decodeJsonSignature) decodeJsonJumpTable[typeOfBody->typeKind], false, NULL} }; return decodeFields(ctx, parseCtx, entries, 2, typeOfBody); } else { return UA_STATUSCODE_BADDECODINGERROR; } } else { /* UA_JSONKEY_ENCODING found */ /*Parse the encoding*/ UA_UInt64 encoding = 0; char *extObjEncoding = (char*)(ctx->pos + parseCtx->tokenArray[searchEncodingResult].start); size_t size = (size_t)(parseCtx->tokenArray[searchEncodingResult].end - parseCtx->tokenArray[searchEncodingResult].start); atoiUnsigned(extObjEncoding, size, &encoding); if(encoding == 1) { /* BYTESTRING in Json Body */ dst->encoding = UA_EXTENSIONOBJECT_ENCODED_BYTESTRING; UA_UInt16 encodingTypeJson; DecodeEntry entries[3] = { {UA_JSONKEY_ENCODING, &encodingTypeJson, (decodeJsonSignature) UInt16_decodeJson, false, NULL}, {UA_JSONKEY_BODY, &dst->content.encoded.body, (decodeJsonSignature) String_decodeJson, false, NULL}, {UA_JSONKEY_TYPEID, &dst->content.encoded.typeId, (decodeJsonSignature) NodeId_decodeJson, false, NULL} }; return decodeFields(ctx, parseCtx, entries, 3, type); } else if(encoding == 2) { /* XmlElement in Json Body */ dst->encoding = UA_EXTENSIONOBJECT_ENCODED_XML; UA_UInt16 encodingTypeJson; DecodeEntry entries[3] = { {UA_JSONKEY_ENCODING, &encodingTypeJson, (decodeJsonSignature) UInt16_decodeJson, false, NULL}, {UA_JSONKEY_BODY, &dst->content.encoded.body, (decodeJsonSignature) String_decodeJson, false, NULL}, {UA_JSONKEY_TYPEID, &dst->content.encoded.typeId, (decodeJsonSignature) NodeId_decodeJson, false, NULL} }; return decodeFields(ctx, parseCtx, entries, 3, type); } else { return UA_STATUSCODE_BADDECODINGERROR; } } return UA_STATUSCODE_BADNOTIMPLEMENTED; } static status Variant_decodeJsonUnwrapExtensionObject(UA_Variant *dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { (void) type, (void) moveToken; /*EXTENSIONOBJECT POSITION!*/ UA_UInt16 old_index = parseCtx->index; UA_Boolean typeIdFound; /* Decode the DataType */ UA_NodeId typeId; UA_NodeId_init(&typeId); size_t searchTypeIdResult = 0; status ret = lookAheadForKey(UA_JSONKEY_TYPEID, ctx, parseCtx, &searchTypeIdResult); if(ret != UA_STATUSCODE_GOOD) { /*No Typeid found*/ typeIdFound = false; /*return UA_STATUSCODE_BADDECODINGERROR;*/ } else { typeIdFound = true; /* parse the nodeid */ parseCtx->index = (UA_UInt16)searchTypeIdResult; ret = NodeId_decodeJson(&typeId, &UA_TYPES[UA_TYPES_NODEID], ctx, parseCtx, true); if(ret != UA_STATUSCODE_GOOD) { UA_NodeId_deleteMembers(&typeId); return ret; } /*restore index, ExtensionObject position*/ parseCtx->index = old_index; } /* ---Decode the EncodingByte--- */ if(!typeIdFound) return UA_STATUSCODE_BADDECODINGERROR; UA_Boolean encodingFound = false; /*Search for Encoding*/ size_t searchEncodingResult = 0; ret = lookAheadForKey(UA_JSONKEY_ENCODING, ctx, parseCtx, &searchEncodingResult); UA_UInt64 encoding = 0; /*If no encoding found it is Structure encoding*/ if(ret == UA_STATUSCODE_GOOD) { /*FOUND*/ encodingFound = true; char *extObjEncoding = (char*)(ctx->pos + parseCtx->tokenArray[searchEncodingResult].start); size_t size = (size_t)(parseCtx->tokenArray[searchEncodingResult].end - parseCtx->tokenArray[searchEncodingResult].start); atoiUnsigned(extObjEncoding, size, &encoding); } const UA_DataType *typeOfBody = UA_findDataType(&typeId); if(encoding == 0 || typeOfBody != NULL) { /*This value is 0 if the body is Structure encoded as a JSON object (see 5.4.6).*/ /* Found a valid type and it is structure encoded so it can be unwrapped */ if (typeOfBody == NULL) return UA_STATUSCODE_BADDECODINGERROR; dst->type = typeOfBody; /* Allocate memory for type*/ dst->data = UA_new(dst->type); if(!dst->data) { UA_NodeId_deleteMembers(&typeId); return UA_STATUSCODE_BADOUTOFMEMORY; } /* Decode the content */ UA_NodeId nodeIddummy; DecodeEntry entries[3] = { {UA_JSONKEY_TYPEID, &nodeIddummy, (decodeJsonSignature) NodeId_decodeJson, false, NULL}, {UA_JSONKEY_BODY, dst->data, (decodeJsonSignature) decodeJsonJumpTable[dst->type->typeKind], false, NULL}, {UA_JSONKEY_ENCODING, NULL, NULL, false, NULL}}; ret = decodeFields(ctx, parseCtx, entries, encodingFound ? 3:2, typeOfBody); if(ret != UA_STATUSCODE_GOOD) { UA_free(dst->data); dst->data = NULL; } } else if(encoding == 1 || encoding == 2 || typeOfBody == NULL) { UA_NodeId_deleteMembers(&typeId); /* decode as ExtensionObject */ dst->type = &UA_TYPES[UA_TYPES_EXTENSIONOBJECT]; /* Allocate memory for extensionobject*/ dst->data = UA_new(dst->type); if(!dst->data) return UA_STATUSCODE_BADOUTOFMEMORY; /* decode: Does not move tokenindex. */ ret = DECODE_DIRECT_JSON(dst->data, ExtensionObject); if(ret != UA_STATUSCODE_GOOD) { UA_free(dst->data); dst->data = NULL; } } else { /*no recognized encoding type*/ return UA_STATUSCODE_BADDECODINGERROR; } return ret; } status DiagnosticInfoInner_decodeJson(void* dst, const UA_DataType* type, CtxJson* ctx, ParseCtx* parseCtx, UA_Boolean moveToken); DECODE_JSON(DiagnosticInfo) { ALLOW_NULL; CHECK_OBJECT; DecodeEntry entries[7] = { {UA_JSONKEY_SYMBOLICID, &dst->symbolicId, (decodeJsonSignature) Int32_decodeJson, false, NULL}, {UA_JSONKEY_NAMESPACEURI, &dst->namespaceUri, (decodeJsonSignature) Int32_decodeJson, false, NULL}, {UA_JSONKEY_LOCALIZEDTEXT, &dst->localizedText, (decodeJsonSignature) Int32_decodeJson, false, NULL}, {UA_JSONKEY_LOCALE, &dst->locale, (decodeJsonSignature) Int32_decodeJson, false, NULL}, {UA_JSONKEY_ADDITIONALINFO, &dst->additionalInfo, (decodeJsonSignature) String_decodeJson, false, NULL}, {UA_JSONKEY_INNERSTATUSCODE, &dst->innerStatusCode, (decodeJsonSignature) StatusCode_decodeJson, false, NULL}, {UA_JSONKEY_INNERDIAGNOSTICINFO, &dst->innerDiagnosticInfo, (decodeJsonSignature) DiagnosticInfoInner_decodeJson, false, NULL}}; status ret = decodeFields(ctx, parseCtx, entries, 7, type); dst->hasSymbolicId = entries[0].found; dst->hasNamespaceUri = entries[1].found; dst->hasLocalizedText = entries[2].found; dst->hasLocale = entries[3].found; dst->hasAdditionalInfo = entries[4].found; dst->hasInnerStatusCode = entries[5].found; dst->hasInnerDiagnosticInfo = entries[6].found; return ret; } status DiagnosticInfoInner_decodeJson(void* dst, const UA_DataType* type, CtxJson* ctx, ParseCtx* parseCtx, UA_Boolean moveToken) { UA_DiagnosticInfo *inner = (UA_DiagnosticInfo*)UA_calloc(1, sizeof(UA_DiagnosticInfo)); if(inner == NULL) { return UA_STATUSCODE_BADOUTOFMEMORY; } memcpy(dst, &inner, sizeof(UA_DiagnosticInfo*)); /* Copy new Pointer do dest */ return DiagnosticInfo_decodeJson(inner, type, ctx, parseCtx, moveToken); } status decodeFields(CtxJson *ctx, ParseCtx *parseCtx, DecodeEntry *entries, size_t entryCount, const UA_DataType *type) { CHECK_TOKEN_BOUNDS; size_t objectCount = (size_t)(parseCtx->tokenArray[parseCtx->index].size); status ret = UA_STATUSCODE_GOOD; if(entryCount == 1) { if(*(entries[0].fieldName) == 0) { /*No MemberName*/ return entries[0].function(entries[0].fieldPointer, type, ctx, parseCtx, true); /*ENCODE DIRECT*/ } } else if(entryCount == 0) { return UA_STATUSCODE_BADDECODINGERROR; } parseCtx->index++; /*go to first key*/ CHECK_TOKEN_BOUNDS; for (size_t currentObjectCount = 0; currentObjectCount < objectCount && parseCtx->index < parseCtx->tokenCount; currentObjectCount++) { /* start searching at the index of currentObjectCount */ for (size_t i = currentObjectCount; i < entryCount + currentObjectCount; i++) { /* Search for KEY, if found outer loop will be one less. Best case * is objectCount if in order! */ size_t index = i % entryCount; CHECK_TOKEN_BOUNDS; if(jsoneq((char*) ctx->pos, &parseCtx->tokenArray[parseCtx->index], entries[index].fieldName) != 0) continue; if(entries[index].found) { /*Duplicate Key found, abort.*/ return UA_STATUSCODE_BADDECODINGERROR; } entries[index].found = true; parseCtx->index++; /*goto value*/ CHECK_TOKEN_BOUNDS; /* Find the data type. * TODO: get rid of parameter type. Only forward via DecodeEntry. */ const UA_DataType *membertype = type; if(entries[index].type) membertype = entries[index].type; if(entries[index].function != NULL) { ret = entries[index].function(entries[index].fieldPointer, membertype, ctx, parseCtx, true); /*Move Token True*/ if(ret != UA_STATUSCODE_GOOD) return ret; } else { /*overstep single value, this will not work if object or array Only used not to double parse pre looked up type, but it has to be overstepped*/ parseCtx->index++; } break; } } return ret; } static status Array_decodeJson_internal(void **dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { (void) moveToken; status ret; if(parseCtx->tokenArray[parseCtx->index].type != JSMN_ARRAY) return UA_STATUSCODE_BADDECODINGERROR; size_t length = (size_t)parseCtx->tokenArray[parseCtx->index].size; /* Save the length of the array */ size_t *p = (size_t*) dst - 1; *p = length; /* Return early for empty arrays */ if(length == 0) { *dst = UA_EMPTY_ARRAY_SENTINEL; return UA_STATUSCODE_GOOD; } /* Allocate memory */ *dst = UA_calloc(length, type->memSize); if(*dst == NULL) return UA_STATUSCODE_BADOUTOFMEMORY; parseCtx->index++; /* We go to first Array member!*/ /* Decode array members */ uintptr_t ptr = (uintptr_t)*dst; for(size_t i = 0; i < length; ++i) { ret = decodeJsonJumpTable[type->typeKind]((void*)ptr, type, ctx, parseCtx, true); if(ret != UA_STATUSCODE_GOOD) { UA_Array_delete(*dst, i+1, type); *dst = NULL; return ret; } ptr += type->memSize; } return UA_STATUSCODE_GOOD; } /*Wrapper for array with valid decodingStructure.*/ static status Array_decodeJson(void * dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { return Array_decodeJson_internal((void **)dst, type, ctx, parseCtx, moveToken); } static status decodeJsonStructure(void *dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { (void) moveToken; /* Check the recursion limit */ if(ctx->depth >= UA_JSON_ENCODING_MAX_RECURSION) return UA_STATUSCODE_BADENCODINGERROR; ctx->depth++; uintptr_t ptr = (uintptr_t)dst; status ret = UA_STATUSCODE_GOOD; u8 membersSize = type->membersSize; const UA_DataType *typelists[2] = { UA_TYPES, &type[-type->typeIndex] }; UA_STACKARRAY(DecodeEntry, entries, membersSize); for(size_t i = 0; i < membersSize && ret == UA_STATUSCODE_GOOD; ++i) { const UA_DataTypeMember *m = &type->members[i]; const UA_DataType *mt = &typelists[!m->namespaceZero][m->memberTypeIndex]; entries[i].type = mt; if(!m->isArray) { ptr += m->padding; entries[i].fieldName = m->memberName; entries[i].fieldPointer = (void*)ptr; entries[i].function = decodeJsonJumpTable[mt->typeKind]; entries[i].found = false; ptr += mt->memSize; } else { ptr += m->padding; ptr += sizeof(size_t); entries[i].fieldName = m->memberName; entries[i].fieldPointer = (void*)ptr; entries[i].function = (decodeJsonSignature)Array_decodeJson; entries[i].found = false; ptr += sizeof(void*); } } ret = decodeFields(ctx, parseCtx, entries, membersSize, type); ctx->depth--; return ret; } static status decodeJsonNotImplemented(void *dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { (void)dst, (void)type, (void)ctx, (void)parseCtx, (void)moveToken; return UA_STATUSCODE_BADNOTIMPLEMENTED; } const decodeJsonSignature decodeJsonJumpTable[UA_DATATYPEKINDS] = { (decodeJsonSignature)Boolean_decodeJson, (decodeJsonSignature)SByte_decodeJson, /* SByte */ (decodeJsonSignature)Byte_decodeJson, (decodeJsonSignature)Int16_decodeJson, /* Int16 */ (decodeJsonSignature)UInt16_decodeJson, (decodeJsonSignature)Int32_decodeJson, /* Int32 */ (decodeJsonSignature)UInt32_decodeJson, (decodeJsonSignature)Int64_decodeJson, /* Int64 */ (decodeJsonSignature)UInt64_decodeJson, (decodeJsonSignature)Float_decodeJson, (decodeJsonSignature)Double_decodeJson, (decodeJsonSignature)String_decodeJson, (decodeJsonSignature)DateTime_decodeJson, /* DateTime */ (decodeJsonSignature)Guid_decodeJson, (decodeJsonSignature)ByteString_decodeJson, /* ByteString */ (decodeJsonSignature)String_decodeJson, /* XmlElement */ (decodeJsonSignature)NodeId_decodeJson, (decodeJsonSignature)ExpandedNodeId_decodeJson, (decodeJsonSignature)StatusCode_decodeJson, /* StatusCode */ (decodeJsonSignature)QualifiedName_decodeJson, /* QualifiedName */ (decodeJsonSignature)LocalizedText_decodeJson, (decodeJsonSignature)ExtensionObject_decodeJson, (decodeJsonSignature)DataValue_decodeJson, (decodeJsonSignature)Variant_decodeJson, (decodeJsonSignature)DiagnosticInfo_decodeJson, (decodeJsonSignature)decodeJsonNotImplemented, /* Decimal */ (decodeJsonSignature)Int32_decodeJson, /* Enum */ (decodeJsonSignature)decodeJsonStructure, (decodeJsonSignature)decodeJsonNotImplemented, /* Structure with optional fields */ (decodeJsonSignature)decodeJsonNotImplemented, /* Union */ (decodeJsonSignature)decodeJsonNotImplemented /* BitfieldCluster */ }; decodeJsonSignature getDecodeSignature(u8 index) { return decodeJsonJumpTable[index]; } status tokenize(ParseCtx *parseCtx, CtxJson *ctx, const UA_ByteString *src) { /* Set up the context */ ctx->pos = &src->data[0]; ctx->end = &src->data[src->length]; ctx->depth = 0; parseCtx->tokenCount = 0; parseCtx->index = 0; /*Set up tokenizer jsmn*/ jsmn_parser p; jsmn_init(&p); parseCtx->tokenCount = (UA_Int32) jsmn_parse(&p, (char*)src->data, src->length, parseCtx->tokenArray, UA_JSON_MAXTOKENCOUNT); if(parseCtx->tokenCount < 0) { if(parseCtx->tokenCount == JSMN_ERROR_NOMEM) return UA_STATUSCODE_BADOUTOFMEMORY; return UA_STATUSCODE_BADDECODINGERROR; } return UA_STATUSCODE_GOOD; } UA_StatusCode decodeJsonInternal(void *dst, const UA_DataType *type, CtxJson *ctx, ParseCtx *parseCtx, UA_Boolean moveToken) { return decodeJsonJumpTable[type->typeKind](dst, type, ctx, parseCtx, moveToken); } status UA_FUNC_ATTR_WARN_UNUSED_RESULT UA_decodeJson(const UA_ByteString *src, void *dst, const UA_DataType *type) { #ifndef UA_ENABLE_TYPEDESCRIPTION return UA_STATUSCODE_BADNOTSUPPORTED; #endif if(dst == NULL || src == NULL || type == NULL) { return UA_STATUSCODE_BADARGUMENTSMISSING; } /* Set up the context */ CtxJson ctx; ParseCtx parseCtx; parseCtx.tokenArray = (jsmntok_t*)UA_malloc(sizeof(jsmntok_t) * UA_JSON_MAXTOKENCOUNT); if(!parseCtx.tokenArray) return UA_STATUSCODE_BADOUTOFMEMORY; status ret = tokenize(&parseCtx, &ctx, src); if(ret != UA_STATUSCODE_GOOD) goto cleanup; /* Assume the top-level element is an object */ if(parseCtx.tokenCount < 1 || parseCtx.tokenArray[0].type != JSMN_OBJECT) { if(parseCtx.tokenCount == 1) { if(parseCtx.tokenArray[0].type == JSMN_PRIMITIVE || parseCtx.tokenArray[0].type == JSMN_STRING) { /* Only a primitive to parse. Do it directly. */ memset(dst, 0, type->memSize); /* Initialize the value */ ret = decodeJsonJumpTable[type->typeKind](dst, type, &ctx, &parseCtx, true); goto cleanup; } } ret = UA_STATUSCODE_BADDECODINGERROR; goto cleanup; } /* Decode */ memset(dst, 0, type->memSize); /* Initialize the value */ ret = decodeJsonJumpTable[type->typeKind](dst, type, &ctx, &parseCtx, true); cleanup: UA_free(parseCtx.tokenArray); /* sanity check if all Tokens were processed */ if(!(parseCtx.index == parseCtx.tokenCount || parseCtx.index == parseCtx.tokenCount-1)) { ret = UA_STATUSCODE_BADDECODINGERROR; } if(ret != UA_STATUSCODE_GOOD) UA_deleteMembers(dst, type); /* Clean up */ return ret; }
null
228
CWE-787
CVE-2020-36430
/* * Copyright (C) 2006 Evgeniy Stepanov <eugeni.stepanov@gmail.com> * * This file is part of libass. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "config.h" #include "ass_compat.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <errno.h> #include <sys/types.h> #include <sys/stat.h> #include <inttypes.h> #ifdef CONFIG_ICONV #include <iconv.h> #endif #include "ass.h" #include "ass_utils.h" #include "ass_library.h" #include "ass_priv.h" #include "ass_shaper.h" #include "ass_string.h" #define ass_atof(STR) (ass_strtod((STR),NULL)) static const char *const ass_style_format = "Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, " "OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, " "ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, " "Alignment, MarginL, MarginR, MarginV, Encoding"; static const char *const ass_event_format = "Layer, Start, End, Style, Name, " "MarginL, MarginR, MarginV, Effect, Text"; static const char *const ssa_style_format = "Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, " "TertiaryColour, BackColour, Bold, Italic, BorderStyle, Outline, " "Shadow, Alignment, MarginL, MarginR, MarginV, AlphaLevel, Encoding"; static const char *const ssa_event_format = "Marked, Start, End, Style, Name, " "MarginL, MarginR, MarginV, Effect, Text"; #define ASS_STYLES_ALLOC 20 int ass_library_version(void) { return LIBASS_VERSION; } void ass_free_track(ASS_Track *track) { int i; if (!track) return; if (track->parser_priv) { free(track->parser_priv->read_order_bitmap); free(track->parser_priv->fontname); free(track->parser_priv->fontdata); free(track->parser_priv); } free(track->style_format); free(track->event_format); free(track->Language); if (track->styles) { for (i = 0; i < track->n_styles; ++i) ass_free_style(track, i); } free(track->styles); if (track->events) { for (i = 0; i < track->n_events; ++i) ass_free_event(track, i); } free(track->events); free(track->name); free(track); } /// \brief Allocate a new style struct /// \param track track /// \return style id or negative value on failure int ass_alloc_style(ASS_Track *track) { int sid; assert(track->n_styles <= track->max_styles); if (track->n_styles == track->max_styles) { if (track->max_styles >= FFMIN(SIZE_MAX, INT_MAX) - ASS_STYLES_ALLOC) return -1; int new_max = track->max_styles + ASS_STYLES_ALLOC; if (!ASS_REALLOC_ARRAY(track->styles, new_max)) return -1; track->max_styles = new_max; } sid = track->n_styles++; memset(track->styles + sid, 0, sizeof(ASS_Style)); return sid; } /// \brief Allocate a new event struct /// \param track track /// \return event id or negative value on failure int ass_alloc_event(ASS_Track *track) { int eid; assert(track->n_events <= track->max_events); if (track->n_events == track->max_events) { if (track->max_events >= FFMIN(SIZE_MAX, INT_MAX) / 2) return -1; int new_max = track->max_events * 2 + 1; if (!ASS_REALLOC_ARRAY(track->events, new_max)) return -1; track->max_events = new_max; } eid = track->n_events++; memset(track->events + eid, 0, sizeof(ASS_Event)); return eid; } void ass_free_event(ASS_Track *track, int eid) { ASS_Event *event = track->events + eid; free(event->Name); free(event->Effect); free(event->Text); free(event->render_priv); } void ass_free_style(ASS_Track *track, int sid) { ASS_Style *style = track->styles + sid; free(style->Name); free(style->FontName); } static int resize_read_order_bitmap(ASS_Track *track, int max_id) { // Don't allow malicious files to OOM us easily. Also avoids int overflows. if (max_id < 0 || max_id >= 10 * 1024 * 1024 * 8) goto fail; assert(track->parser_priv->read_order_bitmap || !track->parser_priv->read_order_elems); if (max_id >= track->parser_priv->read_order_elems * 32) { int oldelems = track->parser_priv->read_order_elems; int elems = ((max_id + 31) / 32 + 1) * 2; assert(elems >= oldelems); track->parser_priv->read_order_elems = elems; void *new_bitmap = realloc(track->parser_priv->read_order_bitmap, elems * 4); if (!new_bitmap) goto fail; track->parser_priv->read_order_bitmap = new_bitmap; memset(track->parser_priv->read_order_bitmap + oldelems, 0, (elems - oldelems) * 4); } return 0; fail: free(track->parser_priv->read_order_bitmap); track->parser_priv->read_order_bitmap = NULL; track->parser_priv->read_order_elems = 0; return -1; } static int test_and_set_read_order_bit(ASS_Track *track, int id) { if (resize_read_order_bitmap(track, id) < 0) return -1; int index = id / 32; uint32_t bit = 1u << (id % 32); if (track->parser_priv->read_order_bitmap[index] & bit) return 1; track->parser_priv->read_order_bitmap[index] |= bit; return 0; } // ============================================================================================== /** * \brief Set up default style * \param style style to edit to defaults * The parameters are mostly taken directly from VSFilter source for * best compatibility. */ static void set_default_style(ASS_Style *style) { style->Name = strdup("Default"); style->FontName = strdup("Arial"); style->FontSize = 18; style->PrimaryColour = 0xffffff00; style->SecondaryColour = 0x00ffff00; style->OutlineColour = 0x00000000; style->BackColour = 0x00000080; style->Bold = 200; style->ScaleX = 1.0; style->ScaleY = 1.0; style->Spacing = 0; style->BorderStyle = 1; style->Outline = 2; style->Shadow = 3; style->Alignment = 2; style->MarginL = style->MarginR = style->MarginV = 20; } static long long string2timecode(ASS_Library *library, char *p) { int h, m, s, ms; long long tm; int res = sscanf(p, "%d:%d:%d.%d", &h, &m, &s, &ms); if (res < 4) { ass_msg(library, MSGL_WARN, "Bad timestamp"); return 0; } tm = ((h * 60LL + m) * 60 + s) * 1000 + ms * 10LL; return tm; } #define NEXT(str,token) \ token = next_token(&str); \ if (!token) break; #define ALIAS(alias,name) \ if (ass_strcasecmp(tname, #alias) == 0) {tname = #name;} /* One section started with PARSE_START and PARSE_END parses a single token * (contained in the variable named token) for the header indicated by the * variable tname. It does so by chaining a number of else-if statements, each * of which checks if the tname variable indicates that this header should be * parsed. The first parameter of the macro gives the name of the header. * * The string that is passed is in str. str is advanced to the next token if * a header could be parsed. The parsed results are stored in the variable * target, which has the type ASS_Style* or ASS_Event*. */ #define PARSE_START if (0) { #define PARSE_END } #define ANYVAL(name,func) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ target->name = func(token); #define STRVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ char *new_str = strdup(token); \ if (new_str) { \ free(target->name); \ target->name = new_str; \ } #define STARREDSTRVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ while (*token == '*') ++token; \ char *new_str = strdup(token); \ if (new_str) { \ free(target->name); \ target->name = new_str; \ } #define COLORVAL(name) ANYVAL(name,parse_color_header) #define INTVAL(name) ANYVAL(name,atoi) #define FPVAL(name) ANYVAL(name,ass_atof) #define TIMEVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ target->name = string2timecode(track->library, token); #define STYLEVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ target->name = lookup_style(track, token); // skip spaces in str beforehand, or trim leading spaces afterwards static inline void advance_token_pos(const char **const str, const char **const start, const char **const end) { *start = *str; *end = *start; while (**end != '\0' && **end != ',') ++*end; *str = *end + (**end == ','); rskip_spaces((char**)end, (char*)*start); } static char *next_token(char **str) { char *p; char *start; skip_spaces(str); if (**str == '\0') { return 0; } advance_token_pos((const char**)str, (const char**)&start, (const char**)&p); *p = '\0'; return start; } /** * \brief Parse the tail of Dialogue line * \param track track * \param event parsed data goes here * \param str string to parse, zero-terminated * \param n_ignored number of format options to skip at the beginning */ static int process_event_tail(ASS_Track *track, ASS_Event *event, char *str, int n_ignored) { char *token; char *tname; char *p = str; int i; ASS_Event *target = event; char *format = strdup(track->event_format); if (!format) return -1; char *q = format; // format scanning pointer for (i = 0; i < n_ignored; ++i) { NEXT(q, tname); } while (1) { NEXT(q, tname); if (ass_strcasecmp(tname, "Text") == 0) { char *last; event->Text = strdup(p); if (event->Text && *event->Text != 0) { last = event->Text + strlen(event->Text) - 1; if (last >= event->Text && *last == '\r') *last = 0; } event->Duration -= event->Start; free(format); return event->Text ? 0 : -1; // "Text" is always the last } NEXT(p, token); ALIAS(End, Duration) // temporarily store end timecode in event->Duration PARSE_START INTVAL(Layer) STYLEVAL(Style) STRVAL(Name) STRVAL(Effect) INTVAL(MarginL) INTVAL(MarginR) INTVAL(MarginV) TIMEVAL(Start) TIMEVAL(Duration) PARSE_END } free(format); return 1; } /** * \brief Parse command line style overrides (--ass-force-style option) * \param track track to apply overrides to * The format for overrides is [StyleName.]Field=Value */ void ass_process_force_style(ASS_Track *track) { char **fs, *eq, *dt, *style, *tname, *token; ASS_Style *target; int sid; char **list = track->library->style_overrides; if (!list) return; for (fs = list; *fs; ++fs) { eq = strrchr(*fs, '='); if (!eq) continue; *eq = '\0'; token = eq + 1; if (!ass_strcasecmp(*fs, "PlayResX")) track->PlayResX = atoi(token); else if (!ass_strcasecmp(*fs, "PlayResY")) track->PlayResY = atoi(token); else if (!ass_strcasecmp(*fs, "Timer")) track->Timer = ass_atof(token); else if (!ass_strcasecmp(*fs, "WrapStyle")) track->WrapStyle = atoi(token); else if (!ass_strcasecmp(*fs, "ScaledBorderAndShadow")) track->ScaledBorderAndShadow = parse_bool(token); else if (!ass_strcasecmp(*fs, "Kerning")) track->Kerning = parse_bool(token); else if (!ass_strcasecmp(*fs, "YCbCr Matrix")) track->YCbCrMatrix = parse_ycbcr_matrix(token); dt = strrchr(*fs, '.'); if (dt) { *dt = '\0'; style = *fs; tname = dt + 1; } else { style = NULL; tname = *fs; } for (sid = 0; sid < track->n_styles; ++sid) { if (style == NULL || ass_strcasecmp(track->styles[sid].Name, style) == 0) { target = track->styles + sid; PARSE_START STRVAL(FontName) COLORVAL(PrimaryColour) COLORVAL(SecondaryColour) COLORVAL(OutlineColour) COLORVAL(BackColour) FPVAL(FontSize) INTVAL(Bold) INTVAL(Italic) INTVAL(Underline) INTVAL(StrikeOut) FPVAL(Spacing) FPVAL(Angle) INTVAL(BorderStyle) INTVAL(Alignment) INTVAL(Justify) INTVAL(MarginL) INTVAL(MarginR) INTVAL(MarginV) INTVAL(Encoding) FPVAL(ScaleX) FPVAL(ScaleY) FPVAL(Outline) FPVAL(Shadow) FPVAL(Blur) PARSE_END } } *eq = '='; if (dt) *dt = '.'; } } /** * \brief Parse the Style line * \param track track * \param str string to parse, zero-terminated * Allocates a new style struct. */ static int process_style(ASS_Track *track, char *str) { char *token; char *tname; char *p = str; char *format; char *q; // format scanning pointer int sid; ASS_Style *style; ASS_Style *target; if (!track->style_format) { // no style format header // probably an ancient script version if (track->track_type == TRACK_TYPE_SSA) track->style_format = strdup(ssa_style_format); else track->style_format = strdup(ass_style_format); if (!track->style_format) return -1; } q = format = strdup(track->style_format); if (!q) return -1; ass_msg(track->library, MSGL_V, "[%p] Style: %s", track, str); sid = ass_alloc_style(track); if (sid < 0) { free(format); return -1; } style = track->styles + sid; target = style; // fill style with some default values style->ScaleX = 100.; style->ScaleY = 100.; while (1) { NEXT(q, tname); NEXT(p, token); PARSE_START STARREDSTRVAL(Name) STRVAL(FontName) COLORVAL(PrimaryColour) COLORVAL(SecondaryColour) COLORVAL(OutlineColour) // TertiaryColor COLORVAL(BackColour) // SSA uses BackColour for both outline and shadow // this will destroy SSA's TertiaryColour, but i'm not going to use it anyway if (track->track_type == TRACK_TYPE_SSA) target->OutlineColour = target->BackColour; FPVAL(FontSize) INTVAL(Bold) INTVAL(Italic) INTVAL(Underline) INTVAL(StrikeOut) FPVAL(Spacing) FPVAL(Angle) INTVAL(BorderStyle) INTVAL(Alignment) if (track->track_type == TRACK_TYPE_ASS) target->Alignment = numpad2align(target->Alignment); // VSFilter compatibility else if (target->Alignment == 8) target->Alignment = 3; else if (target->Alignment == 4) target->Alignment = 11; INTVAL(MarginL) INTVAL(MarginR) INTVAL(MarginV) INTVAL(Encoding) FPVAL(ScaleX) FPVAL(ScaleY) FPVAL(Outline) FPVAL(Shadow) PARSE_END } free(format); style->ScaleX = FFMAX(style->ScaleX, 0.) / 100.; style->ScaleY = FFMAX(style->ScaleY, 0.) / 100.; style->Spacing = FFMAX(style->Spacing, 0.); style->Outline = FFMAX(style->Outline, 0.); style->Shadow = FFMAX(style->Shadow, 0.); style->Bold = !!style->Bold; style->Italic = !!style->Italic; style->Underline = !!style->Underline; style->StrikeOut = !!style->StrikeOut; if (!style->Name) style->Name = strdup("Default"); if (!style->FontName) style->FontName = strdup("Arial"); if (!style->Name || !style->FontName) { ass_free_style(track, sid); track->n_styles--; return -1; } if (strcmp(target->Name, "Default") == 0) track->default_style = sid; return 0; } static bool format_line_compare(const char *fmt1, const char *fmt2) { while (true) { const char *tk1_start, *tk2_start; const char *tk1_end, *tk2_end; skip_spaces((char**)&fmt1); skip_spaces((char**)&fmt2); if (!*fmt1 || !*fmt2) break; advance_token_pos(&fmt1, &tk1_start, &tk1_end); advance_token_pos(&fmt2, &tk2_start, &tk2_end); if ((tk1_end-tk1_start) != (tk2_end-tk2_start)) return false; if (ass_strncasecmp(tk1_start, tk2_start, tk1_end-tk1_start)) return false; } return *fmt1 == *fmt2; } /** * \brief Set SBAS=1 if not set explicitly in case of custom format line * \param track track * \param fmt format line of file * \param std standard format line * * As of writing libass is the only renderer accepting custom format lines. * For years libass defaultet SBAS to yes instead of no. * To avoid breaking released scripts with custom format lines, * keep SBAS=1 default for custom format files. */ static void custom_format_line_compatibility(ASS_Track *const track, const char *const fmt, const char *const std) { if (!(track->parser_priv->header_flags & SINFO_SCALEDBORDER) && !format_line_compare(fmt, std)) { ass_msg(track->library, MSGL_INFO, "Track has custom format line(s). " "'ScaledBorderAndShadow' will default to 'yes'."); track->ScaledBorderAndShadow = 1; } } static int process_styles_line(ASS_Track *track, char *str) { int ret = 0; if (!strncmp(str, "Format:", 7)) { char *p = str + 7; skip_spaces(&p); free(track->style_format); track->style_format = strdup(p); if (!track->style_format) return -1; ass_msg(track->library, MSGL_DBG2, "Style format: %s", track->style_format); if (track->track_type == TRACK_TYPE_ASS) custom_format_line_compatibility(track, p, ass_style_format); else custom_format_line_compatibility(track, p, ssa_style_format); } else if (!strncmp(str, "Style:", 6)) { char *p = str + 6; skip_spaces(&p); ret = process_style(track, p); } return ret; } static inline void check_duplicate_info_line(const ASS_Track *const track, const ScriptInfo si, const char *const name) { if (track->parser_priv->header_flags & si) ass_msg(track->library, MSGL_WARN, "Duplicate Script Info Header '%s'. Previous value overwritten!", name); else track->parser_priv->header_flags |= si; } static int process_info_line(ASS_Track *track, char *str) { if (!strncmp(str, "PlayResX:", 9)) { check_duplicate_info_line(track, SINFO_PLAYRESX, "PlayResX"); track->PlayResX = atoi(str + 9); } else if (!strncmp(str, "PlayResY:", 9)) { check_duplicate_info_line(track, SINFO_PLAYRESY, "PlayResY"); track->PlayResY = atoi(str + 9); } else if (!strncmp(str, "Timer:", 6)) { check_duplicate_info_line(track, SINFO_TIMER, "Timer"); track->Timer = ass_atof(str + 6); } else if (!strncmp(str, "WrapStyle:", 10)) { check_duplicate_info_line(track, SINFO_WRAPSTYLE, "WrapStyle"); track->WrapStyle = atoi(str + 10); } else if (!strncmp(str, "ScaledBorderAndShadow:", 22)) { check_duplicate_info_line(track, SINFO_SCALEDBORDER, "ScaledBorderAndShadow"); track->ScaledBorderAndShadow = parse_bool(str + 22); } else if (!strncmp(str, "Kerning:", 8)) { check_duplicate_info_line(track, SINFO_KERNING, "Kerning"); track->Kerning = parse_bool(str + 8); } else if (!strncmp(str, "YCbCr Matrix:", 13)) { check_duplicate_info_line(track, SINFO_COLOURMATRIX, "YCbCr Matrix"); track->YCbCrMatrix = parse_ycbcr_matrix(str + 13); } else if (!strncmp(str, "Language:", 9)) { check_duplicate_info_line(track, SINFO_LANGUAGE, "Language"); char *p = str + 9; while (*p && ass_isspace(*p)) p++; free(track->Language); track->Language = strndup(p, 2); } else if (!strncmp(str, "; Script generated by ", 22)) { if (!strncmp(str + 22,"FFmpeg/Lavc", 11)) track->parser_priv->header_flags |= GENBY_FFMPEG; } return 0; } static void event_format_fallback(ASS_Track *track) { track->parser_priv->state = PST_EVENTS; if (track->track_type == TRACK_TYPE_SSA) track->event_format = strdup(ssa_event_format); else track->event_format = strdup(ass_event_format); ass_msg(track->library, MSGL_V, "No event format found, using fallback"); } /** * \brief Return if track is post-signature and pre-SBAS ffmpeg track * \param track track */ static bool detect_legacy_conv_subs(ASS_Track *track) { /* * FFmpeg and libav convert srt subtitles to ass. * In legacy versions, they did not set the 'ScaledBorderAndShadow' header, * but expected it to default to yes (which libass did). * To avoid breaking them, we try to detect these * converted subs by common properties of ffmpeg/libav's converted subs. * Since files with custom format lines (-2014.10.11) default to SBAS=1 * regardless of being ffmpeg generated or not, we are only concerned with * post-signature and pre-SBAS ffmpeg-files (2014.10.11-2020.04.17). * We want to avoid matching modified ffmpeg files though. * * Relevant ffmpeg commits are: * 2c77c90684e24ef16f7e7c4462e011434cee6a98 2010.12.29 * Initial conversion format. * Style "Format:" line is mix of SSA and ASS * Event "Format:" line * "Format: Layer, Start, End, Text\r\n" * Only Header in ScriptInfo is "ScriptType: v4.00+" * 0e7782c08ec77739edb0b98ba5d896b45e98235f 2012.06.15 * Adds 'Style' to Event "Format:" line * 5039aadf68deb9ad6dd0737ea11259fe53d3727b 2014.06.18 * Adds PlayerRes(X|Y) (384x288) * (moved below ScriptType: a few minutes later) * 40b9f28641b696c6bb73ce49dc97c2ce2700cbdb 2014.10.11 14:31:23 +0200 * Regular full ASS Event and Style "Format:" lines * 52b0a0ecaa02e17f7e01bead8c3f215f1cfd48dc 2014.10.11 18:37:43 +0200 <== * Signature comment * 56bc0a6736cdc7edab837ff8f304661fd16de0e4 2015.02.08 * Allow custom PlayRes(X|Y) * a8ba2a2c1294a330a0e79ae7f0d3a203a7599166 2020.04.17 * Set 'ScaledBorderAndShadow: yes' * * libav outputs initial ffmpeg format. (no longer maintained) */ // GENBY_FFMPEG and exact ffmpeg headers required // Note: If there's SINFO_SCRIPTTYPE in the future this needs to be updated if (track->parser_priv->header_flags ^ (SINFO_PLAYRESX | SINFO_PLAYRESY | GENBY_FFMPEG)) return false; // Legacy ffmpeg only ever has one style // Check 2 not 1 because libass also adds a def style if (track->n_styles != 2 || strncmp(track->styles[1].Name, "Default", 7)) return false; return true; } static int process_events_line(ASS_Track *track, char *str) { if (!strncmp(str, "Format:", 7)) { char *p = str + 7; skip_spaces(&p); free(track->event_format); track->event_format = strdup(p); if (!track->event_format) return -1; ass_msg(track->library, MSGL_DBG2, "Event format: %s", track->event_format); if (track->track_type == TRACK_TYPE_ASS) custom_format_line_compatibility(track, p, ass_event_format); else custom_format_line_compatibility(track, p, ssa_event_format); // Guess if we are dealing with legacy ffmpeg subs and change accordingly // If file has no event format it was probably not created by ffmpeg/libav if (detect_legacy_conv_subs(track)) { track->ScaledBorderAndShadow = 1; ass_msg(track->library, MSGL_INFO, "Track treated as legacy ffmpeg sub."); } } else if (!strncmp(str, "Dialogue:", 9)) { // This should never be reached for embedded subtitles. // They have slightly different format and are parsed in ass_process_chunk, // called directly from demuxer int eid; ASS_Event *event; // We can't parse events without event_format if (!track->event_format) { event_format_fallback(track); if (!track->event_format) return -1; } str += 9; skip_spaces(&str); eid = ass_alloc_event(track); if (eid < 0) return -1; event = track->events + eid; return process_event_tail(track, event, str, 0); } else { ass_msg(track->library, MSGL_V, "Not understood: '%.30s'", str); } return 0; } static unsigned char *decode_chars(const unsigned char *src, unsigned char *dst, size_t cnt_in) { uint32_t value = 0; for (int i = 0; i < cnt_in; i++) value |= (uint32_t) ((src[i] - 33u) & 63) << 6 * (3 - i); *dst++ = value >> 16; if (cnt_in >= 3) *dst++ = value >> 8 & 0xff; if (cnt_in >= 4) *dst++ = value & 0xff; return dst; } static void reset_embedded_font_parsing(ASS_ParserPriv *parser_priv) { free(parser_priv->fontname); free(parser_priv->fontdata); parser_priv->fontname = NULL; parser_priv->fontdata = NULL; parser_priv->fontdata_size = 0; parser_priv->fontdata_used = 0; } static int decode_font(ASS_Track *track) { unsigned char *p; unsigned char *q; size_t i; size_t size; // original size size_t dsize; // decoded size unsigned char *buf = 0; ass_msg(track->library, MSGL_V, "Font: %d bytes encoded data", track->parser_priv->fontdata_used); size = track->parser_priv->fontdata_used; if (size % 4 == 1) { ass_msg(track->library, MSGL_ERR, "Bad encoded data size"); goto error_decode_font; } buf = malloc(size / 4 * 3 + FFMAX(size % 4 - 1, 0)); if (!buf) goto error_decode_font; q = buf; for (i = 0, p = (unsigned char *) track->parser_priv->fontdata; i < size / 4; i++, p += 4) { q = decode_chars(p, q, 4); } if (size % 4 == 2) { q = decode_chars(p, q, 2); } else if (size % 4 == 3) { q = decode_chars(p, q, 3); } dsize = q - buf; assert(dsize == size / 4 * 3 + FFMAX(size % 4 - 1, 0)); if (track->library->extract_fonts) { ass_add_font(track->library, track->parser_priv->fontname, (char *) buf, dsize); } error_decode_font: free(buf); reset_embedded_font_parsing(track->parser_priv); return 0; } static int process_fonts_line(ASS_Track *track, char *str) { size_t len; if (!strncmp(str, "fontname:", 9)) { char *p = str + 9; skip_spaces(&p); if (track->parser_priv->fontname) { decode_font(track); } track->parser_priv->fontname = strdup(p); if (!track->parser_priv->fontname) return -1; ass_msg(track->library, MSGL_V, "Fontname: %s", track->parser_priv->fontname); return 0; } if (!track->parser_priv->fontname) { ass_msg(track->library, MSGL_V, "Not understood: '%s'", str); return 1; } len = strlen(str); if (track->parser_priv->fontdata_used >= SIZE_MAX - FFMAX(len, 100 * 1024)) { goto mem_fail; } else if (track->parser_priv->fontdata_used + len > track->parser_priv->fontdata_size) { size_t new_size = track->parser_priv->fontdata_size + FFMAX(len, 100 * 1024); if (!ASS_REALLOC_ARRAY(track->parser_priv->fontdata, new_size)) goto mem_fail; track->parser_priv->fontdata_size = new_size; } memcpy(track->parser_priv->fontdata + track->parser_priv->fontdata_used, str, len); track->parser_priv->fontdata_used += len; return 0; mem_fail: reset_embedded_font_parsing(track->parser_priv); return -1; } /** * \brief Parse a header line * \param track track * \param str string to parse, zero-terminated */ static int process_line(ASS_Track *track, char *str) { skip_spaces(&str); if (!ass_strncasecmp(str, "[Script Info]", 13)) { track->parser_priv->state = PST_INFO; } else if (!ass_strncasecmp(str, "[V4 Styles]", 11)) { track->parser_priv->state = PST_STYLES; track->track_type = TRACK_TYPE_SSA; } else if (!ass_strncasecmp(str, "[V4+ Styles]", 12)) { track->parser_priv->state = PST_STYLES; track->track_type = TRACK_TYPE_ASS; } else if (!ass_strncasecmp(str, "[Events]", 8)) { track->parser_priv->state = PST_EVENTS; } else if (!ass_strncasecmp(str, "[Fonts]", 7)) { track->parser_priv->state = PST_FONTS; } else { switch (track->parser_priv->state) { case PST_INFO: process_info_line(track, str); break; case PST_STYLES: process_styles_line(track, str); break; case PST_EVENTS: process_events_line(track, str); break; case PST_FONTS: process_fonts_line(track, str); break; default: break; } } return 0; } static int process_text(ASS_Track *track, char *str) { char *p = str; while (1) { char *q; while (1) { if ((*p == '\r') || (*p == '\n')) ++p; else if (p[0] == '\xef' && p[1] == '\xbb' && p[2] == '\xbf') p += 3; // U+FFFE (BOM) else break; } for (q = p; ((*q != '\0') && (*q != '\r') && (*q != '\n')); ++q) { }; if (q == p) break; if (*q != '\0') *(q++) = '\0'; process_line(track, p); if (*q == '\0') break; p = q; } // there is no explicit end-of-font marker in ssa/ass if (track->parser_priv->fontname) decode_font(track); return 0; } /** * \brief Process a chunk of subtitle stream data. * \param track track * \param data string to parse * \param size length of data */ void ass_process_data(ASS_Track *track, char *data, int size) { char *str = malloc(size + 1); if (!str) return; memcpy(str, data, size); str[size] = '\0'; ass_msg(track->library, MSGL_V, "Event: %s", str); process_text(track, str); free(str); } /** * \brief Process CodecPrivate section of subtitle stream * \param track track * \param data string to parse * \param size length of data CodecPrivate section contains [Stream Info] and [V4+ Styles] ([V4 Styles] for SSA) sections */ void ass_process_codec_private(ASS_Track *track, char *data, int size) { ass_process_data(track, data, size); // probably an mkv produced by ancient mkvtoolnix // such files don't have [Events] and Format: headers if (!track->event_format) event_format_fallback(track); ass_process_force_style(track); } static int check_duplicate_event(ASS_Track *track, int ReadOrder) { if (track->parser_priv->read_order_bitmap) return test_and_set_read_order_bit(track, ReadOrder) > 0; // ignoring last event, it is the one we are comparing with for (int i = 0; i < track->n_events - 1; i++) if (track->events[i].ReadOrder == ReadOrder) return 1; return 0; } void ass_set_check_readorder(ASS_Track *track, int check_readorder) { track->parser_priv->check_readorder = check_readorder == 1; } /** * \brief Process a chunk of subtitle stream data. In Matroska, this contains exactly 1 event (or a commentary). * \param track track * \param data string to parse * \param size length of data * \param timecode starting time of the event (milliseconds) * \param duration duration of the event (milliseconds) */ void ass_process_chunk(ASS_Track *track, char *data, int size, long long timecode, long long duration) { char *str = NULL; int eid; char *p; char *token; ASS_Event *event; int check_readorder = track->parser_priv->check_readorder; if (check_readorder && !track->parser_priv->read_order_bitmap) { for (int i = 0; i < track->n_events; i++) { if (test_and_set_read_order_bit(track, track->events[i].ReadOrder) < 0) break; } } if (!track->event_format) { ass_msg(track->library, MSGL_WARN, "Event format header missing"); goto cleanup; } str = malloc(size + 1); if (!str) goto cleanup; memcpy(str, data, size); str[size] = '\0'; ass_msg(track->library, MSGL_V, "Event at %" PRId64 ", +%" PRId64 ": %s", (int64_t) timecode, (int64_t) duration, str); eid = ass_alloc_event(track); if (eid < 0) goto cleanup; event = track->events + eid; p = str; do { NEXT(p, token); event->ReadOrder = atoi(token); if (check_readorder && check_duplicate_event(track, event->ReadOrder)) break; NEXT(p, token); event->Layer = atoi(token); process_event_tail(track, event, p, 3); event->Start = timecode; event->Duration = duration; goto cleanup; // dump_events(tid); } while (0); // some error ass_free_event(track, eid); track->n_events--; cleanup: free(str); } /** * \brief Flush buffered events. * \param track track */ void ass_flush_events(ASS_Track *track) { if (track->events) { int eid; for (eid = 0; eid < track->n_events; eid++) ass_free_event(track, eid); track->n_events = 0; } free(track->parser_priv->read_order_bitmap); track->parser_priv->read_order_bitmap = NULL; track->parser_priv->read_order_elems = 0; } #ifdef CONFIG_ICONV /** \brief recode buffer to utf-8 * constraint: codepage != 0 * \param data pointer to text buffer * \param size buffer size * \return a pointer to recoded buffer, caller is responsible for freeing it **/ static char *sub_recode(ASS_Library *library, char *data, size_t size, char *codepage) { iconv_t icdsc; char *tocp = "UTF-8"; char *outbuf; assert(codepage); if ((icdsc = iconv_open(tocp, codepage)) != (iconv_t) (-1)) { ass_msg(library, MSGL_V, "Opened iconv descriptor"); } else { ass_msg(library, MSGL_ERR, "Error opening iconv descriptor"); return NULL; } { size_t osize = size; size_t ileft = size; size_t oleft = size - 1; char *ip; char *op; size_t rc; int clear = 0; outbuf = malloc(osize); if (!outbuf) goto out; ip = data; op = outbuf; while (1) { if (ileft) rc = iconv(icdsc, &ip, &ileft, &op, &oleft); else { // clear the conversion state and leave clear = 1; rc = iconv(icdsc, NULL, NULL, &op, &oleft); } if (rc == (size_t) (-1)) { if (errno == E2BIG) { size_t offset = op - outbuf; char *nbuf = realloc(outbuf, osize + size); if (!nbuf) { free(outbuf); outbuf = 0; goto out; } outbuf = nbuf; op = outbuf + offset; osize += size; oleft += size; } else { ass_msg(library, MSGL_WARN, "Error recoding file"); free(outbuf); outbuf = NULL; goto out; } } else if (clear) break; } outbuf[osize - oleft - 1] = 0; } out: if (icdsc != (iconv_t) (-1)) { (void) iconv_close(icdsc); ass_msg(library, MSGL_V, "Closed iconv descriptor"); } return outbuf; } #endif // ICONV /** * \brief read file contents into newly allocated buffer * \param fname file name * \param bufsize out: file size * \return pointer to file contents. Caller is responsible for its deallocation. */ char *read_file(ASS_Library *library, char *fname, size_t *bufsize) { int res; long sz; long bytes_read; char *buf; FILE *fp = fopen(fname, "rb"); if (!fp) { ass_msg(library, MSGL_WARN, "ass_read_file(%s): fopen failed", fname); return 0; } res = fseek(fp, 0, SEEK_END); if (res == -1) { ass_msg(library, MSGL_WARN, "ass_read_file(%s): fseek failed", fname); fclose(fp); return 0; } sz = ftell(fp); rewind(fp); ass_msg(library, MSGL_V, "File size: %ld", sz); buf = sz < SIZE_MAX ? malloc(sz + 1) : NULL; if (!buf) { fclose(fp); return NULL; } assert(buf); bytes_read = 0; do { res = fread(buf + bytes_read, 1, sz - bytes_read, fp); if (res <= 0) { ass_msg(library, MSGL_INFO, "Read failed, %d: %s", errno, strerror(errno)); fclose(fp); free(buf); return 0; } bytes_read += res; } while (sz - bytes_read > 0); buf[sz] = '\0'; fclose(fp); if (bufsize) *bufsize = sz; return buf; } /* * \param buf pointer to subtitle text in utf-8 */ static ASS_Track *parse_memory(ASS_Library *library, char *buf) { ASS_Track *track; int i; track = ass_new_track(library); if (!track) return NULL; // process header process_text(track, buf); // external SSA/ASS subs does not have ReadOrder field for (i = 0; i < track->n_events; ++i) track->events[i].ReadOrder = i; if (track->track_type == TRACK_TYPE_UNKNOWN) { ass_free_track(track); return 0; } ass_process_force_style(track); return track; } /** * \brief Read subtitles from memory. * \param library libass library object * \param buf pointer to subtitles text * \param bufsize size of buffer * \param codepage recode buffer contents from given codepage * \return newly allocated track */ ASS_Track *ass_read_memory(ASS_Library *library, char *buf, size_t bufsize, char *codepage) { ASS_Track *track; int copied = 0; if (!buf) return 0; #ifdef CONFIG_ICONV if (codepage) { buf = sub_recode(library, buf, bufsize, codepage); if (!buf) return 0; else copied = 1; } #endif if (!copied) { char *newbuf = malloc(bufsize + 1); if (!newbuf) return 0; memcpy(newbuf, buf, bufsize); newbuf[bufsize] = '\0'; buf = newbuf; } track = parse_memory(library, buf); free(buf); if (!track) return 0; ass_msg(library, MSGL_INFO, "Added subtitle file: " "<memory> (%d styles, %d events)", track->n_styles, track->n_events); return track; } static char *read_file_recode(ASS_Library *library, char *fname, char *codepage, size_t *size) { char *buf; size_t bufsize; buf = read_file(library, fname, &bufsize); if (!buf) return 0; #ifdef CONFIG_ICONV if (codepage) { char *tmpbuf = sub_recode(library, buf, bufsize, codepage); free(buf); buf = tmpbuf; } if (!buf) return 0; #endif *size = bufsize; return buf; } /** * \brief Read subtitles from file. * \param library libass library object * \param fname file name * \param codepage recode buffer contents from given codepage * \return newly allocated track */ ASS_Track *ass_read_file(ASS_Library *library, char *fname, char *codepage) { char *buf; ASS_Track *track; size_t bufsize; buf = read_file_recode(library, fname, codepage, &bufsize); if (!buf) return 0; track = parse_memory(library, buf); free(buf); if (!track) return 0; track->name = strdup(fname); ass_msg(library, MSGL_INFO, "Added subtitle file: '%s' (%d styles, %d events)", fname, track->n_styles, track->n_events); return track; } /** * \brief read styles from file into already initialized track */ int ass_read_styles(ASS_Track *track, char *fname, char *codepage) { char *buf; ParserState old_state; size_t sz; buf = read_file(track->library, fname, &sz); if (!buf) return 1; #ifdef CONFIG_ICONV if (codepage) { char *tmpbuf; tmpbuf = sub_recode(track->library, buf, sz, codepage); free(buf); buf = tmpbuf; } if (!buf) return 1; #endif old_state = track->parser_priv->state; track->parser_priv->state = PST_STYLES; process_text(track, buf); free(buf); track->parser_priv->state = old_state; return 0; } long long ass_step_sub(ASS_Track *track, long long now, int movement) { int i; ASS_Event *best = NULL; long long target = now; int direction = (movement > 0 ? 1 : -1) * !!movement; if (track->n_events == 0) return 0; do { ASS_Event *closest = NULL; long long closest_time = now; for (i = 0; i < track->n_events; i++) { if (direction < 0) { long long end = track->events[i].Start + track->events[i].Duration; if (end < target) { if (!closest || end > closest_time) { closest = &track->events[i]; closest_time = end; } } } else if (direction > 0) { long long start = track->events[i].Start; if (start > target) { if (!closest || start < closest_time) { closest = &track->events[i]; closest_time = start; } } } else { long long start = track->events[i].Start; if (start < target) { if (!closest || start >= closest_time) { closest = &track->events[i]; closest_time = start; } } } } target = closest_time + direction; movement -= direction; if (closest) best = closest; } while (movement); return best ? best->Start - now : 0; } ASS_Track *ass_new_track(ASS_Library *library) { int def_sid = -1; ASS_Track *track = calloc(1, sizeof(ASS_Track)); if (!track) goto fail; track->library = library; track->ScaledBorderAndShadow = 0; track->parser_priv = calloc(1, sizeof(ASS_ParserPriv)); if (!track->parser_priv) goto fail; def_sid = ass_alloc_style(track); if (def_sid < 0) goto fail; set_default_style(track->styles + def_sid); track->default_style = def_sid; if (!track->styles[def_sid].Name || !track->styles[def_sid].FontName) goto fail; track->parser_priv->check_readorder = 1; return track; fail: if (track) { if (def_sid >= 0) ass_free_style(track, def_sid); free(track->parser_priv); free(track); } return NULL; } int ass_track_set_feature(ASS_Track *track, ASS_Feature feature, int enable) { switch (feature) { case ASS_FEATURE_INCOMPATIBLE_EXTENSIONS: //-fallthrough #ifdef USE_FRIBIDI_EX_API case ASS_FEATURE_BIDI_BRACKETS: track->parser_priv->bidi_brackets = !!enable; #endif return 0; default: return -1; } } /** * \brief Prepare track for rendering */ void ass_lazy_track_init(ASS_Library *lib, ASS_Track *track) { if (track->PlayResX > 0 && track->PlayResY > 0) return; if (track->PlayResX <= 0 && track->PlayResY <= 0) { ass_msg(lib, MSGL_WARN, "Neither PlayResX nor PlayResY defined. Assuming 384x288"); track->PlayResX = 384; track->PlayResY = 288; } else { if (track->PlayResY <= 0 && track->PlayResX == 1280) { track->PlayResY = 1024; ass_msg(lib, MSGL_WARN, "PlayResY undefined, setting to %d", track->PlayResY); } else if (track->PlayResY <= 0) { track->PlayResY = FFMAX(1, track->PlayResX * 3LL / 4); ass_msg(lib, MSGL_WARN, "PlayResY undefined, setting to %d", track->PlayResY); } else if (track->PlayResX <= 0 && track->PlayResY == 1024) { track->PlayResX = 1280; ass_msg(lib, MSGL_WARN, "PlayResX undefined, setting to %d", track->PlayResX); } else if (track->PlayResX <= 0) { track->PlayResX = FFMAX(1, track->PlayResY * 4LL / 3); ass_msg(lib, MSGL_WARN, "PlayResX undefined, setting to %d", track->PlayResX); } } }
null
/* * Copyright (C) 2006 Evgeniy Stepanov <eugeni.stepanov@gmail.com> * * This file is part of libass. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "config.h" #include "ass_compat.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <errno.h> #include <sys/types.h> #include <sys/stat.h> #include <inttypes.h> #ifdef CONFIG_ICONV #include <iconv.h> #endif #include "ass.h" #include "ass_utils.h" #include "ass_library.h" #include "ass_priv.h" #include "ass_shaper.h" #include "ass_string.h" #define ass_atof(STR) (ass_strtod((STR),NULL)) static const char *const ass_style_format = "Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, " "OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, " "ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, " "Alignment, MarginL, MarginR, MarginV, Encoding"; static const char *const ass_event_format = "Layer, Start, End, Style, Name, " "MarginL, MarginR, MarginV, Effect, Text"; static const char *const ssa_style_format = "Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, " "TertiaryColour, BackColour, Bold, Italic, BorderStyle, Outline, " "Shadow, Alignment, MarginL, MarginR, MarginV, AlphaLevel, Encoding"; static const char *const ssa_event_format = "Marked, Start, End, Style, Name, " "MarginL, MarginR, MarginV, Effect, Text"; #define ASS_STYLES_ALLOC 20 int ass_library_version(void) { return LIBASS_VERSION; } void ass_free_track(ASS_Track *track) { int i; if (!track) return; if (track->parser_priv) { free(track->parser_priv->read_order_bitmap); free(track->parser_priv->fontname); free(track->parser_priv->fontdata); free(track->parser_priv); } free(track->style_format); free(track->event_format); free(track->Language); if (track->styles) { for (i = 0; i < track->n_styles; ++i) ass_free_style(track, i); } free(track->styles); if (track->events) { for (i = 0; i < track->n_events; ++i) ass_free_event(track, i); } free(track->events); free(track->name); free(track); } /// \brief Allocate a new style struct /// \param track track /// \return style id or negative value on failure int ass_alloc_style(ASS_Track *track) { int sid; assert(track->n_styles <= track->max_styles); if (track->n_styles == track->max_styles) { if (track->max_styles >= FFMIN(SIZE_MAX, INT_MAX) - ASS_STYLES_ALLOC) return -1; int new_max = track->max_styles + ASS_STYLES_ALLOC; if (!ASS_REALLOC_ARRAY(track->styles, new_max)) return -1; track->max_styles = new_max; } sid = track->n_styles++; memset(track->styles + sid, 0, sizeof(ASS_Style)); return sid; } /// \brief Allocate a new event struct /// \param track track /// \return event id or negative value on failure int ass_alloc_event(ASS_Track *track) { int eid; assert(track->n_events <= track->max_events); if (track->n_events == track->max_events) { if (track->max_events >= FFMIN(SIZE_MAX, INT_MAX) / 2) return -1; int new_max = track->max_events * 2 + 1; if (!ASS_REALLOC_ARRAY(track->events, new_max)) return -1; track->max_events = new_max; } eid = track->n_events++; memset(track->events + eid, 0, sizeof(ASS_Event)); return eid; } void ass_free_event(ASS_Track *track, int eid) { ASS_Event *event = track->events + eid; free(event->Name); free(event->Effect); free(event->Text); free(event->render_priv); } void ass_free_style(ASS_Track *track, int sid) { ASS_Style *style = track->styles + sid; free(style->Name); free(style->FontName); } static int resize_read_order_bitmap(ASS_Track *track, int max_id) { // Don't allow malicious files to OOM us easily. Also avoids int overflows. if (max_id < 0 || max_id >= 10 * 1024 * 1024 * 8) goto fail; assert(track->parser_priv->read_order_bitmap || !track->parser_priv->read_order_elems); if (max_id >= track->parser_priv->read_order_elems * 32) { int oldelems = track->parser_priv->read_order_elems; int elems = ((max_id + 31) / 32 + 1) * 2; assert(elems >= oldelems); track->parser_priv->read_order_elems = elems; void *new_bitmap = realloc(track->parser_priv->read_order_bitmap, elems * 4); if (!new_bitmap) goto fail; track->parser_priv->read_order_bitmap = new_bitmap; memset(track->parser_priv->read_order_bitmap + oldelems, 0, (elems - oldelems) * 4); } return 0; fail: free(track->parser_priv->read_order_bitmap); track->parser_priv->read_order_bitmap = NULL; track->parser_priv->read_order_elems = 0; return -1; } static int test_and_set_read_order_bit(ASS_Track *track, int id) { if (resize_read_order_bitmap(track, id) < 0) return -1; int index = id / 32; uint32_t bit = 1u << (id % 32); if (track->parser_priv->read_order_bitmap[index] & bit) return 1; track->parser_priv->read_order_bitmap[index] |= bit; return 0; } // ============================================================================================== /** * \brief Set up default style * \param style style to edit to defaults * The parameters are mostly taken directly from VSFilter source for * best compatibility. */ static void set_default_style(ASS_Style *style) { style->Name = strdup("Default"); style->FontName = strdup("Arial"); style->FontSize = 18; style->PrimaryColour = 0xffffff00; style->SecondaryColour = 0x00ffff00; style->OutlineColour = 0x00000000; style->BackColour = 0x00000080; style->Bold = 200; style->ScaleX = 1.0; style->ScaleY = 1.0; style->Spacing = 0; style->BorderStyle = 1; style->Outline = 2; style->Shadow = 3; style->Alignment = 2; style->MarginL = style->MarginR = style->MarginV = 20; } static long long string2timecode(ASS_Library *library, char *p) { int h, m, s, ms; long long tm; int res = sscanf(p, "%d:%d:%d.%d", &h, &m, &s, &ms); if (res < 4) { ass_msg(library, MSGL_WARN, "Bad timestamp"); return 0; } tm = ((h * 60LL + m) * 60 + s) * 1000 + ms * 10LL; return tm; } #define NEXT(str,token) \ token = next_token(&str); \ if (!token) break; #define ALIAS(alias,name) \ if (ass_strcasecmp(tname, #alias) == 0) {tname = #name;} /* One section started with PARSE_START and PARSE_END parses a single token * (contained in the variable named token) for the header indicated by the * variable tname. It does so by chaining a number of else-if statements, each * of which checks if the tname variable indicates that this header should be * parsed. The first parameter of the macro gives the name of the header. * * The string that is passed is in str. str is advanced to the next token if * a header could be parsed. The parsed results are stored in the variable * target, which has the type ASS_Style* or ASS_Event*. */ #define PARSE_START if (0) { #define PARSE_END } #define ANYVAL(name,func) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ target->name = func(token); #define STRVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ char *new_str = strdup(token); \ if (new_str) { \ free(target->name); \ target->name = new_str; \ } #define STARREDSTRVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ while (*token == '*') ++token; \ char *new_str = strdup(token); \ if (new_str) { \ free(target->name); \ target->name = new_str; \ } #define COLORVAL(name) ANYVAL(name,parse_color_header) #define INTVAL(name) ANYVAL(name,atoi) #define FPVAL(name) ANYVAL(name,ass_atof) #define TIMEVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ target->name = string2timecode(track->library, token); #define STYLEVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ target->name = lookup_style(track, token); // skip spaces in str beforehand, or trim leading spaces afterwards static inline void advance_token_pos(const char **const str, const char **const start, const char **const end) { *start = *str; *end = *start; while (**end != '\0' && **end != ',') ++*end; *str = *end + (**end == ','); rskip_spaces((char**)end, (char*)*start); } static char *next_token(char **str) { char *p; char *start; skip_spaces(str); if (**str == '\0') { return 0; } advance_token_pos((const char**)str, (const char**)&start, (const char**)&p); *p = '\0'; return start; } /** * \brief Parse the tail of Dialogue line * \param track track * \param event parsed data goes here * \param str string to parse, zero-terminated * \param n_ignored number of format options to skip at the beginning */ static int process_event_tail(ASS_Track *track, ASS_Event *event, char *str, int n_ignored) { char *token; char *tname; char *p = str; int i; ASS_Event *target = event; char *format = strdup(track->event_format); if (!format) return -1; char *q = format; // format scanning pointer for (i = 0; i < n_ignored; ++i) { NEXT(q, tname); } while (1) { NEXT(q, tname); if (ass_strcasecmp(tname, "Text") == 0) { char *last; event->Text = strdup(p); if (event->Text && *event->Text != 0) { last = event->Text + strlen(event->Text) - 1; if (last >= event->Text && *last == '\r') *last = 0; } event->Duration -= event->Start; free(format); return event->Text ? 0 : -1; // "Text" is always the last } NEXT(p, token); ALIAS(End, Duration) // temporarily store end timecode in event->Duration PARSE_START INTVAL(Layer) STYLEVAL(Style) STRVAL(Name) STRVAL(Effect) INTVAL(MarginL) INTVAL(MarginR) INTVAL(MarginV) TIMEVAL(Start) TIMEVAL(Duration) PARSE_END } free(format); return 1; } /** * \brief Parse command line style overrides (--ass-force-style option) * \param track track to apply overrides to * The format for overrides is [StyleName.]Field=Value */ void ass_process_force_style(ASS_Track *track) { char **fs, *eq, *dt, *style, *tname, *token; ASS_Style *target; int sid; char **list = track->library->style_overrides; if (!list) return; for (fs = list; *fs; ++fs) { eq = strrchr(*fs, '='); if (!eq) continue; *eq = '\0'; token = eq + 1; if (!ass_strcasecmp(*fs, "PlayResX")) track->PlayResX = atoi(token); else if (!ass_strcasecmp(*fs, "PlayResY")) track->PlayResY = atoi(token); else if (!ass_strcasecmp(*fs, "Timer")) track->Timer = ass_atof(token); else if (!ass_strcasecmp(*fs, "WrapStyle")) track->WrapStyle = atoi(token); else if (!ass_strcasecmp(*fs, "ScaledBorderAndShadow")) track->ScaledBorderAndShadow = parse_bool(token); else if (!ass_strcasecmp(*fs, "Kerning")) track->Kerning = parse_bool(token); else if (!ass_strcasecmp(*fs, "YCbCr Matrix")) track->YCbCrMatrix = parse_ycbcr_matrix(token); dt = strrchr(*fs, '.'); if (dt) { *dt = '\0'; style = *fs; tname = dt + 1; } else { style = NULL; tname = *fs; } for (sid = 0; sid < track->n_styles; ++sid) { if (style == NULL || ass_strcasecmp(track->styles[sid].Name, style) == 0) { target = track->styles + sid; PARSE_START STRVAL(FontName) COLORVAL(PrimaryColour) COLORVAL(SecondaryColour) COLORVAL(OutlineColour) COLORVAL(BackColour) FPVAL(FontSize) INTVAL(Bold) INTVAL(Italic) INTVAL(Underline) INTVAL(StrikeOut) FPVAL(Spacing) FPVAL(Angle) INTVAL(BorderStyle) INTVAL(Alignment) INTVAL(Justify) INTVAL(MarginL) INTVAL(MarginR) INTVAL(MarginV) INTVAL(Encoding) FPVAL(ScaleX) FPVAL(ScaleY) FPVAL(Outline) FPVAL(Shadow) FPVAL(Blur) PARSE_END } } *eq = '='; if (dt) *dt = '.'; } } /** * \brief Parse the Style line * \param track track * \param str string to parse, zero-terminated * Allocates a new style struct. */ static int process_style(ASS_Track *track, char *str) { char *token; char *tname; char *p = str; char *format; char *q; // format scanning pointer int sid; ASS_Style *style; ASS_Style *target; if (!track->style_format) { // no style format header // probably an ancient script version if (track->track_type == TRACK_TYPE_SSA) track->style_format = strdup(ssa_style_format); else track->style_format = strdup(ass_style_format); if (!track->style_format) return -1; } q = format = strdup(track->style_format); if (!q) return -1; ass_msg(track->library, MSGL_V, "[%p] Style: %s", track, str); sid = ass_alloc_style(track); if (sid < 0) { free(format); return -1; } style = track->styles + sid; target = style; // fill style with some default values style->ScaleX = 100.; style->ScaleY = 100.; while (1) { NEXT(q, tname); NEXT(p, token); PARSE_START STARREDSTRVAL(Name) STRVAL(FontName) COLORVAL(PrimaryColour) COLORVAL(SecondaryColour) COLORVAL(OutlineColour) // TertiaryColor COLORVAL(BackColour) // SSA uses BackColour for both outline and shadow // this will destroy SSA's TertiaryColour, but i'm not going to use it anyway if (track->track_type == TRACK_TYPE_SSA) target->OutlineColour = target->BackColour; FPVAL(FontSize) INTVAL(Bold) INTVAL(Italic) INTVAL(Underline) INTVAL(StrikeOut) FPVAL(Spacing) FPVAL(Angle) INTVAL(BorderStyle) INTVAL(Alignment) if (track->track_type == TRACK_TYPE_ASS) target->Alignment = numpad2align(target->Alignment); // VSFilter compatibility else if (target->Alignment == 8) target->Alignment = 3; else if (target->Alignment == 4) target->Alignment = 11; INTVAL(MarginL) INTVAL(MarginR) INTVAL(MarginV) INTVAL(Encoding) FPVAL(ScaleX) FPVAL(ScaleY) FPVAL(Outline) FPVAL(Shadow) PARSE_END } free(format); style->ScaleX = FFMAX(style->ScaleX, 0.) / 100.; style->ScaleY = FFMAX(style->ScaleY, 0.) / 100.; style->Spacing = FFMAX(style->Spacing, 0.); style->Outline = FFMAX(style->Outline, 0.); style->Shadow = FFMAX(style->Shadow, 0.); style->Bold = !!style->Bold; style->Italic = !!style->Italic; style->Underline = !!style->Underline; style->StrikeOut = !!style->StrikeOut; if (!style->Name) style->Name = strdup("Default"); if (!style->FontName) style->FontName = strdup("Arial"); if (!style->Name || !style->FontName) { ass_free_style(track, sid); track->n_styles--; return -1; } if (strcmp(target->Name, "Default") == 0) track->default_style = sid; return 0; } static bool format_line_compare(const char *fmt1, const char *fmt2) { while (true) { const char *tk1_start, *tk2_start; const char *tk1_end, *tk2_end; skip_spaces((char**)&fmt1); skip_spaces((char**)&fmt2); if (!*fmt1 || !*fmt2) break; advance_token_pos(&fmt1, &tk1_start, &tk1_end); advance_token_pos(&fmt2, &tk2_start, &tk2_end); if ((tk1_end-tk1_start) != (tk2_end-tk2_start)) return false; if (ass_strncasecmp(tk1_start, tk2_start, tk1_end-tk1_start)) return false; } return *fmt1 == *fmt2; } /** * \brief Set SBAS=1 if not set explicitly in case of custom format line * \param track track * \param fmt format line of file * \param std standard format line * * As of writing libass is the only renderer accepting custom format lines. * For years libass defaultet SBAS to yes instead of no. * To avoid breaking released scripts with custom format lines, * keep SBAS=1 default for custom format files. */ static void custom_format_line_compatibility(ASS_Track *const track, const char *const fmt, const char *const std) { if (!(track->parser_priv->header_flags & SINFO_SCALEDBORDER) && !format_line_compare(fmt, std)) { ass_msg(track->library, MSGL_INFO, "Track has custom format line(s). " "'ScaledBorderAndShadow' will default to 'yes'."); track->ScaledBorderAndShadow = 1; } } static int process_styles_line(ASS_Track *track, char *str) { int ret = 0; if (!strncmp(str, "Format:", 7)) { char *p = str + 7; skip_spaces(&p); free(track->style_format); track->style_format = strdup(p); if (!track->style_format) return -1; ass_msg(track->library, MSGL_DBG2, "Style format: %s", track->style_format); if (track->track_type == TRACK_TYPE_ASS) custom_format_line_compatibility(track, p, ass_style_format); else custom_format_line_compatibility(track, p, ssa_style_format); } else if (!strncmp(str, "Style:", 6)) { char *p = str + 6; skip_spaces(&p); ret = process_style(track, p); } return ret; } static inline void check_duplicate_info_line(const ASS_Track *const track, const ScriptInfo si, const char *const name) { if (track->parser_priv->header_flags & si) ass_msg(track->library, MSGL_WARN, "Duplicate Script Info Header '%s'. Previous value overwritten!", name); else track->parser_priv->header_flags |= si; } static int process_info_line(ASS_Track *track, char *str) { if (!strncmp(str, "PlayResX:", 9)) { check_duplicate_info_line(track, SINFO_PLAYRESX, "PlayResX"); track->PlayResX = atoi(str + 9); } else if (!strncmp(str, "PlayResY:", 9)) { check_duplicate_info_line(track, SINFO_PLAYRESY, "PlayResY"); track->PlayResY = atoi(str + 9); } else if (!strncmp(str, "Timer:", 6)) { check_duplicate_info_line(track, SINFO_TIMER, "Timer"); track->Timer = ass_atof(str + 6); } else if (!strncmp(str, "WrapStyle:", 10)) { check_duplicate_info_line(track, SINFO_WRAPSTYLE, "WrapStyle"); track->WrapStyle = atoi(str + 10); } else if (!strncmp(str, "ScaledBorderAndShadow:", 22)) { check_duplicate_info_line(track, SINFO_SCALEDBORDER, "ScaledBorderAndShadow"); track->ScaledBorderAndShadow = parse_bool(str + 22); } else if (!strncmp(str, "Kerning:", 8)) { check_duplicate_info_line(track, SINFO_KERNING, "Kerning"); track->Kerning = parse_bool(str + 8); } else if (!strncmp(str, "YCbCr Matrix:", 13)) { check_duplicate_info_line(track, SINFO_COLOURMATRIX, "YCbCr Matrix"); track->YCbCrMatrix = parse_ycbcr_matrix(str + 13); } else if (!strncmp(str, "Language:", 9)) { check_duplicate_info_line(track, SINFO_LANGUAGE, "Language"); char *p = str + 9; while (*p && ass_isspace(*p)) p++; free(track->Language); track->Language = strndup(p, 2); } else if (!strncmp(str, "; Script generated by ", 22)) { if (!strncmp(str + 22,"FFmpeg/Lavc", 11)) track->parser_priv->header_flags |= GENBY_FFMPEG; } return 0; } static void event_format_fallback(ASS_Track *track) { track->parser_priv->state = PST_EVENTS; if (track->track_type == TRACK_TYPE_SSA) track->event_format = strdup(ssa_event_format); else track->event_format = strdup(ass_event_format); ass_msg(track->library, MSGL_V, "No event format found, using fallback"); } /** * \brief Return if track is post-signature and pre-SBAS ffmpeg track * \param track track */ static bool detect_legacy_conv_subs(ASS_Track *track) { /* * FFmpeg and libav convert srt subtitles to ass. * In legacy versions, they did not set the 'ScaledBorderAndShadow' header, * but expected it to default to yes (which libass did). * To avoid breaking them, we try to detect these * converted subs by common properties of ffmpeg/libav's converted subs. * Since files with custom format lines (-2014.10.11) default to SBAS=1 * regardless of being ffmpeg generated or not, we are only concerned with * post-signature and pre-SBAS ffmpeg-files (2014.10.11-2020.04.17). * We want to avoid matching modified ffmpeg files though. * * Relevant ffmpeg commits are: * 2c77c90684e24ef16f7e7c4462e011434cee6a98 2010.12.29 * Initial conversion format. * Style "Format:" line is mix of SSA and ASS * Event "Format:" line * "Format: Layer, Start, End, Text\r\n" * Only Header in ScriptInfo is "ScriptType: v4.00+" * 0e7782c08ec77739edb0b98ba5d896b45e98235f 2012.06.15 * Adds 'Style' to Event "Format:" line * 5039aadf68deb9ad6dd0737ea11259fe53d3727b 2014.06.18 * Adds PlayerRes(X|Y) (384x288) * (moved below ScriptType: a few minutes later) * 40b9f28641b696c6bb73ce49dc97c2ce2700cbdb 2014.10.11 14:31:23 +0200 * Regular full ASS Event and Style "Format:" lines * 52b0a0ecaa02e17f7e01bead8c3f215f1cfd48dc 2014.10.11 18:37:43 +0200 <== * Signature comment * 56bc0a6736cdc7edab837ff8f304661fd16de0e4 2015.02.08 * Allow custom PlayRes(X|Y) * a8ba2a2c1294a330a0e79ae7f0d3a203a7599166 2020.04.17 * Set 'ScaledBorderAndShadow: yes' * * libav outputs initial ffmpeg format. (no longer maintained) */ // GENBY_FFMPEG and exact ffmpeg headers required // Note: If there's SINFO_SCRIPTTYPE in the future this needs to be updated if (track->parser_priv->header_flags ^ (SINFO_PLAYRESX | SINFO_PLAYRESY | GENBY_FFMPEG)) return false; // Legacy ffmpeg only ever has one style // Check 2 not 1 because libass also adds a def style if (track->n_styles != 2 || strncmp(track->styles[1].Name, "Default", 7)) return false; return true; } static int process_events_line(ASS_Track *track, char *str) { if (!strncmp(str, "Format:", 7)) { char *p = str + 7; skip_spaces(&p); free(track->event_format); track->event_format = strdup(p); if (!track->event_format) return -1; ass_msg(track->library, MSGL_DBG2, "Event format: %s", track->event_format); if (track->track_type == TRACK_TYPE_ASS) custom_format_line_compatibility(track, p, ass_event_format); else custom_format_line_compatibility(track, p, ssa_event_format); // Guess if we are dealing with legacy ffmpeg subs and change accordingly // If file has no event format it was probably not created by ffmpeg/libav if (detect_legacy_conv_subs(track)) { track->ScaledBorderAndShadow = 1; ass_msg(track->library, MSGL_INFO, "Track treated as legacy ffmpeg sub."); } } else if (!strncmp(str, "Dialogue:", 9)) { // This should never be reached for embedded subtitles. // They have slightly different format and are parsed in ass_process_chunk, // called directly from demuxer int eid; ASS_Event *event; // We can't parse events without event_format if (!track->event_format) { event_format_fallback(track); if (!track->event_format) return -1; } str += 9; skip_spaces(&str); eid = ass_alloc_event(track); if (eid < 0) return -1; event = track->events + eid; return process_event_tail(track, event, str, 0); } else { ass_msg(track->library, MSGL_V, "Not understood: '%.30s'", str); } return 0; } static unsigned char *decode_chars(const unsigned char *src, unsigned char *dst, size_t cnt_in) { uint32_t value = 0; for (int i = 0; i < cnt_in; i++) value |= (uint32_t) ((src[i] - 33u) & 63) << 6 * (3 - i); *dst++ = value >> 16; if (cnt_in >= 3) *dst++ = value >> 8 & 0xff; if (cnt_in >= 4) *dst++ = value & 0xff; return dst; } static void reset_embedded_font_parsing(ASS_ParserPriv *parser_priv) { free(parser_priv->fontname); free(parser_priv->fontdata); parser_priv->fontname = NULL; parser_priv->fontdata = NULL; parser_priv->fontdata_size = 0; parser_priv->fontdata_used = 0; } static int decode_font(ASS_Track *track) { unsigned char *p; unsigned char *q; size_t i; size_t size; // original size size_t dsize; // decoded size unsigned char *buf = 0; ass_msg(track->library, MSGL_V, "Font: %d bytes encoded data", track->parser_priv->fontdata_used); size = track->parser_priv->fontdata_used; if (size % 4 == 1) { ass_msg(track->library, MSGL_ERR, "Bad encoded data size"); goto error_decode_font; } buf = malloc(size / 4 * 3 + FFMAX(size % 4, 1) - 1); if (!buf) goto error_decode_font; q = buf; for (i = 0, p = (unsigned char *) track->parser_priv->fontdata; i < size / 4; i++, p += 4) { q = decode_chars(p, q, 4); } if (size % 4 == 2) { q = decode_chars(p, q, 2); } else if (size % 4 == 3) { q = decode_chars(p, q, 3); } dsize = q - buf; assert(dsize == size / 4 * 3 + FFMAX(size % 4, 1) - 1); if (track->library->extract_fonts) { ass_add_font(track->library, track->parser_priv->fontname, (char *) buf, dsize); } error_decode_font: free(buf); reset_embedded_font_parsing(track->parser_priv); return 0; } static int process_fonts_line(ASS_Track *track, char *str) { size_t len; if (!strncmp(str, "fontname:", 9)) { char *p = str + 9; skip_spaces(&p); if (track->parser_priv->fontname) { decode_font(track); } track->parser_priv->fontname = strdup(p); if (!track->parser_priv->fontname) return -1; ass_msg(track->library, MSGL_V, "Fontname: %s", track->parser_priv->fontname); return 0; } if (!track->parser_priv->fontname) { ass_msg(track->library, MSGL_V, "Not understood: '%s'", str); return 1; } len = strlen(str); if (track->parser_priv->fontdata_used >= SIZE_MAX - FFMAX(len, 100 * 1024)) { goto mem_fail; } else if (track->parser_priv->fontdata_used + len > track->parser_priv->fontdata_size) { size_t new_size = track->parser_priv->fontdata_size + FFMAX(len, 100 * 1024); if (!ASS_REALLOC_ARRAY(track->parser_priv->fontdata, new_size)) goto mem_fail; track->parser_priv->fontdata_size = new_size; } memcpy(track->parser_priv->fontdata + track->parser_priv->fontdata_used, str, len); track->parser_priv->fontdata_used += len; return 0; mem_fail: reset_embedded_font_parsing(track->parser_priv); return -1; } /** * \brief Parse a header line * \param track track * \param str string to parse, zero-terminated */ static int process_line(ASS_Track *track, char *str) { skip_spaces(&str); if (!ass_strncasecmp(str, "[Script Info]", 13)) { track->parser_priv->state = PST_INFO; } else if (!ass_strncasecmp(str, "[V4 Styles]", 11)) { track->parser_priv->state = PST_STYLES; track->track_type = TRACK_TYPE_SSA; } else if (!ass_strncasecmp(str, "[V4+ Styles]", 12)) { track->parser_priv->state = PST_STYLES; track->track_type = TRACK_TYPE_ASS; } else if (!ass_strncasecmp(str, "[Events]", 8)) { track->parser_priv->state = PST_EVENTS; } else if (!ass_strncasecmp(str, "[Fonts]", 7)) { track->parser_priv->state = PST_FONTS; } else { switch (track->parser_priv->state) { case PST_INFO: process_info_line(track, str); break; case PST_STYLES: process_styles_line(track, str); break; case PST_EVENTS: process_events_line(track, str); break; case PST_FONTS: process_fonts_line(track, str); break; default: break; } } return 0; } static int process_text(ASS_Track *track, char *str) { char *p = str; while (1) { char *q; while (1) { if ((*p == '\r') || (*p == '\n')) ++p; else if (p[0] == '\xef' && p[1] == '\xbb' && p[2] == '\xbf') p += 3; // U+FFFE (BOM) else break; } for (q = p; ((*q != '\0') && (*q != '\r') && (*q != '\n')); ++q) { }; if (q == p) break; if (*q != '\0') *(q++) = '\0'; process_line(track, p); if (*q == '\0') break; p = q; } // there is no explicit end-of-font marker in ssa/ass if (track->parser_priv->fontname) decode_font(track); return 0; } /** * \brief Process a chunk of subtitle stream data. * \param track track * \param data string to parse * \param size length of data */ void ass_process_data(ASS_Track *track, char *data, int size) { char *str = malloc(size + 1); if (!str) return; memcpy(str, data, size); str[size] = '\0'; ass_msg(track->library, MSGL_V, "Event: %s", str); process_text(track, str); free(str); } /** * \brief Process CodecPrivate section of subtitle stream * \param track track * \param data string to parse * \param size length of data CodecPrivate section contains [Stream Info] and [V4+ Styles] ([V4 Styles] for SSA) sections */ void ass_process_codec_private(ASS_Track *track, char *data, int size) { ass_process_data(track, data, size); // probably an mkv produced by ancient mkvtoolnix // such files don't have [Events] and Format: headers if (!track->event_format) event_format_fallback(track); ass_process_force_style(track); } static int check_duplicate_event(ASS_Track *track, int ReadOrder) { if (track->parser_priv->read_order_bitmap) return test_and_set_read_order_bit(track, ReadOrder) > 0; // ignoring last event, it is the one we are comparing with for (int i = 0; i < track->n_events - 1; i++) if (track->events[i].ReadOrder == ReadOrder) return 1; return 0; } void ass_set_check_readorder(ASS_Track *track, int check_readorder) { track->parser_priv->check_readorder = check_readorder == 1; } /** * \brief Process a chunk of subtitle stream data. In Matroska, this contains exactly 1 event (or a commentary). * \param track track * \param data string to parse * \param size length of data * \param timecode starting time of the event (milliseconds) * \param duration duration of the event (milliseconds) */ void ass_process_chunk(ASS_Track *track, char *data, int size, long long timecode, long long duration) { char *str = NULL; int eid; char *p; char *token; ASS_Event *event; int check_readorder = track->parser_priv->check_readorder; if (check_readorder && !track->parser_priv->read_order_bitmap) { for (int i = 0; i < track->n_events; i++) { if (test_and_set_read_order_bit(track, track->events[i].ReadOrder) < 0) break; } } if (!track->event_format) { ass_msg(track->library, MSGL_WARN, "Event format header missing"); goto cleanup; } str = malloc(size + 1); if (!str) goto cleanup; memcpy(str, data, size); str[size] = '\0'; ass_msg(track->library, MSGL_V, "Event at %" PRId64 ", +%" PRId64 ": %s", (int64_t) timecode, (int64_t) duration, str); eid = ass_alloc_event(track); if (eid < 0) goto cleanup; event = track->events + eid; p = str; do { NEXT(p, token); event->ReadOrder = atoi(token); if (check_readorder && check_duplicate_event(track, event->ReadOrder)) break; NEXT(p, token); event->Layer = atoi(token); process_event_tail(track, event, p, 3); event->Start = timecode; event->Duration = duration; goto cleanup; // dump_events(tid); } while (0); // some error ass_free_event(track, eid); track->n_events--; cleanup: free(str); } /** * \brief Flush buffered events. * \param track track */ void ass_flush_events(ASS_Track *track) { if (track->events) { int eid; for (eid = 0; eid < track->n_events; eid++) ass_free_event(track, eid); track->n_events = 0; } free(track->parser_priv->read_order_bitmap); track->parser_priv->read_order_bitmap = NULL; track->parser_priv->read_order_elems = 0; } #ifdef CONFIG_ICONV /** \brief recode buffer to utf-8 * constraint: codepage != 0 * \param data pointer to text buffer * \param size buffer size * \return a pointer to recoded buffer, caller is responsible for freeing it **/ static char *sub_recode(ASS_Library *library, char *data, size_t size, char *codepage) { iconv_t icdsc; char *tocp = "UTF-8"; char *outbuf; assert(codepage); if ((icdsc = iconv_open(tocp, codepage)) != (iconv_t) (-1)) { ass_msg(library, MSGL_V, "Opened iconv descriptor"); } else { ass_msg(library, MSGL_ERR, "Error opening iconv descriptor"); return NULL; } { size_t osize = size; size_t ileft = size; size_t oleft = size - 1; char *ip; char *op; size_t rc; int clear = 0; outbuf = malloc(osize); if (!outbuf) goto out; ip = data; op = outbuf; while (1) { if (ileft) rc = iconv(icdsc, &ip, &ileft, &op, &oleft); else { // clear the conversion state and leave clear = 1; rc = iconv(icdsc, NULL, NULL, &op, &oleft); } if (rc == (size_t) (-1)) { if (errno == E2BIG) { size_t offset = op - outbuf; char *nbuf = realloc(outbuf, osize + size); if (!nbuf) { free(outbuf); outbuf = 0; goto out; } outbuf = nbuf; op = outbuf + offset; osize += size; oleft += size; } else { ass_msg(library, MSGL_WARN, "Error recoding file"); free(outbuf); outbuf = NULL; goto out; } } else if (clear) break; } outbuf[osize - oleft - 1] = 0; } out: if (icdsc != (iconv_t) (-1)) { (void) iconv_close(icdsc); ass_msg(library, MSGL_V, "Closed iconv descriptor"); } return outbuf; } #endif // ICONV /** * \brief read file contents into newly allocated buffer * \param fname file name * \param bufsize out: file size * \return pointer to file contents. Caller is responsible for its deallocation. */ char *read_file(ASS_Library *library, char *fname, size_t *bufsize) { int res; long sz; long bytes_read; char *buf; FILE *fp = fopen(fname, "rb"); if (!fp) { ass_msg(library, MSGL_WARN, "ass_read_file(%s): fopen failed", fname); return 0; } res = fseek(fp, 0, SEEK_END); if (res == -1) { ass_msg(library, MSGL_WARN, "ass_read_file(%s): fseek failed", fname); fclose(fp); return 0; } sz = ftell(fp); rewind(fp); ass_msg(library, MSGL_V, "File size: %ld", sz); buf = sz < SIZE_MAX ? malloc(sz + 1) : NULL; if (!buf) { fclose(fp); return NULL; } assert(buf); bytes_read = 0; do { res = fread(buf + bytes_read, 1, sz - bytes_read, fp); if (res <= 0) { ass_msg(library, MSGL_INFO, "Read failed, %d: %s", errno, strerror(errno)); fclose(fp); free(buf); return 0; } bytes_read += res; } while (sz - bytes_read > 0); buf[sz] = '\0'; fclose(fp); if (bufsize) *bufsize = sz; return buf; } /* * \param buf pointer to subtitle text in utf-8 */ static ASS_Track *parse_memory(ASS_Library *library, char *buf) { ASS_Track *track; int i; track = ass_new_track(library); if (!track) return NULL; // process header process_text(track, buf); // external SSA/ASS subs does not have ReadOrder field for (i = 0; i < track->n_events; ++i) track->events[i].ReadOrder = i; if (track->track_type == TRACK_TYPE_UNKNOWN) { ass_free_track(track); return 0; } ass_process_force_style(track); return track; } /** * \brief Read subtitles from memory. * \param library libass library object * \param buf pointer to subtitles text * \param bufsize size of buffer * \param codepage recode buffer contents from given codepage * \return newly allocated track */ ASS_Track *ass_read_memory(ASS_Library *library, char *buf, size_t bufsize, char *codepage) { ASS_Track *track; int copied = 0; if (!buf) return 0; #ifdef CONFIG_ICONV if (codepage) { buf = sub_recode(library, buf, bufsize, codepage); if (!buf) return 0; else copied = 1; } #endif if (!copied) { char *newbuf = malloc(bufsize + 1); if (!newbuf) return 0; memcpy(newbuf, buf, bufsize); newbuf[bufsize] = '\0'; buf = newbuf; } track = parse_memory(library, buf); free(buf); if (!track) return 0; ass_msg(library, MSGL_INFO, "Added subtitle file: " "<memory> (%d styles, %d events)", track->n_styles, track->n_events); return track; } static char *read_file_recode(ASS_Library *library, char *fname, char *codepage, size_t *size) { char *buf; size_t bufsize; buf = read_file(library, fname, &bufsize); if (!buf) return 0; #ifdef CONFIG_ICONV if (codepage) { char *tmpbuf = sub_recode(library, buf, bufsize, codepage); free(buf); buf = tmpbuf; } if (!buf) return 0; #endif *size = bufsize; return buf; } /** * \brief Read subtitles from file. * \param library libass library object * \param fname file name * \param codepage recode buffer contents from given codepage * \return newly allocated track */ ASS_Track *ass_read_file(ASS_Library *library, char *fname, char *codepage) { char *buf; ASS_Track *track; size_t bufsize; buf = read_file_recode(library, fname, codepage, &bufsize); if (!buf) return 0; track = parse_memory(library, buf); free(buf); if (!track) return 0; track->name = strdup(fname); ass_msg(library, MSGL_INFO, "Added subtitle file: '%s' (%d styles, %d events)", fname, track->n_styles, track->n_events); return track; } /** * \brief read styles from file into already initialized track */ int ass_read_styles(ASS_Track *track, char *fname, char *codepage) { char *buf; ParserState old_state; size_t sz; buf = read_file(track->library, fname, &sz); if (!buf) return 1; #ifdef CONFIG_ICONV if (codepage) { char *tmpbuf; tmpbuf = sub_recode(track->library, buf, sz, codepage); free(buf); buf = tmpbuf; } if (!buf) return 1; #endif old_state = track->parser_priv->state; track->parser_priv->state = PST_STYLES; process_text(track, buf); free(buf); track->parser_priv->state = old_state; return 0; } long long ass_step_sub(ASS_Track *track, long long now, int movement) { int i; ASS_Event *best = NULL; long long target = now; int direction = (movement > 0 ? 1 : -1) * !!movement; if (track->n_events == 0) return 0; do { ASS_Event *closest = NULL; long long closest_time = now; for (i = 0; i < track->n_events; i++) { if (direction < 0) { long long end = track->events[i].Start + track->events[i].Duration; if (end < target) { if (!closest || end > closest_time) { closest = &track->events[i]; closest_time = end; } } } else if (direction > 0) { long long start = track->events[i].Start; if (start > target) { if (!closest || start < closest_time) { closest = &track->events[i]; closest_time = start; } } } else { long long start = track->events[i].Start; if (start < target) { if (!closest || start >= closest_time) { closest = &track->events[i]; closest_time = start; } } } } target = closest_time + direction; movement -= direction; if (closest) best = closest; } while (movement); return best ? best->Start - now : 0; } ASS_Track *ass_new_track(ASS_Library *library) { int def_sid = -1; ASS_Track *track = calloc(1, sizeof(ASS_Track)); if (!track) goto fail; track->library = library; track->ScaledBorderAndShadow = 0; track->parser_priv = calloc(1, sizeof(ASS_ParserPriv)); if (!track->parser_priv) goto fail; def_sid = ass_alloc_style(track); if (def_sid < 0) goto fail; set_default_style(track->styles + def_sid); track->default_style = def_sid; if (!track->styles[def_sid].Name || !track->styles[def_sid].FontName) goto fail; track->parser_priv->check_readorder = 1; return track; fail: if (track) { if (def_sid >= 0) ass_free_style(track, def_sid); free(track->parser_priv); free(track); } return NULL; } int ass_track_set_feature(ASS_Track *track, ASS_Feature feature, int enable) { switch (feature) { case ASS_FEATURE_INCOMPATIBLE_EXTENSIONS: //-fallthrough #ifdef USE_FRIBIDI_EX_API case ASS_FEATURE_BIDI_BRACKETS: track->parser_priv->bidi_brackets = !!enable; #endif return 0; default: return -1; } } /** * \brief Prepare track for rendering */ void ass_lazy_track_init(ASS_Library *lib, ASS_Track *track) { if (track->PlayResX > 0 && track->PlayResY > 0) return; if (track->PlayResX <= 0 && track->PlayResY <= 0) { ass_msg(lib, MSGL_WARN, "Neither PlayResX nor PlayResY defined. Assuming 384x288"); track->PlayResX = 384; track->PlayResY = 288; } else { if (track->PlayResY <= 0 && track->PlayResX == 1280) { track->PlayResY = 1024; ass_msg(lib, MSGL_WARN, "PlayResY undefined, setting to %d", track->PlayResY); } else if (track->PlayResY <= 0) { track->PlayResY = FFMAX(1, track->PlayResX * 3LL / 4); ass_msg(lib, MSGL_WARN, "PlayResY undefined, setting to %d", track->PlayResY); } else if (track->PlayResX <= 0 && track->PlayResY == 1024) { track->PlayResX = 1280; ass_msg(lib, MSGL_WARN, "PlayResX undefined, setting to %d", track->PlayResX); } else if (track->PlayResX <= 0) { track->PlayResX = FFMAX(1, track->PlayResY * 4LL / 3); ass_msg(lib, MSGL_WARN, "PlayResX undefined, setting to %d", track->PlayResX); } } }
null
229
CWE-787
CVE-2020-5234
/* * Copyright (C) 2006 Evgeniy Stepanov <eugeni.stepanov@gmail.com> * * This file is part of libass. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "config.h" #include "ass_compat.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <errno.h> #include <sys/types.h> #include <sys/stat.h> #include <inttypes.h> #ifdef CONFIG_ICONV #include <iconv.h> #endif #include "ass.h" #include "ass_utils.h" #include "ass_library.h" #include "ass_priv.h" #include "ass_shaper.h" #include "ass_string.h" #define ass_atof(STR) (ass_strtod((STR),NULL)) static const char *const ass_style_format = "Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, " "OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, " "ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, " "Alignment, MarginL, MarginR, MarginV, Encoding"; static const char *const ass_event_format = "Layer, Start, End, Style, Name, " "MarginL, MarginR, MarginV, Effect, Text"; static const char *const ssa_style_format = "Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, " "TertiaryColour, BackColour, Bold, Italic, BorderStyle, Outline, " "Shadow, Alignment, MarginL, MarginR, MarginV, AlphaLevel, Encoding"; static const char *const ssa_event_format = "Marked, Start, End, Style, Name, " "MarginL, MarginR, MarginV, Effect, Text"; #define ASS_STYLES_ALLOC 20 int ass_library_version(void) { return LIBASS_VERSION; } void ass_free_track(ASS_Track *track) { int i; if (!track) return; if (track->parser_priv) { free(track->parser_priv->read_order_bitmap); free(track->parser_priv->fontname); free(track->parser_priv->fontdata); free(track->parser_priv); } free(track->style_format); free(track->event_format); free(track->Language); if (track->styles) { for (i = 0; i < track->n_styles; ++i) ass_free_style(track, i); } free(track->styles); if (track->events) { for (i = 0; i < track->n_events; ++i) ass_free_event(track, i); } free(track->events); free(track->name); free(track); } /// \brief Allocate a new style struct /// \param track track /// \return style id or negative value on failure int ass_alloc_style(ASS_Track *track) { int sid; assert(track->n_styles <= track->max_styles); if (track->n_styles == track->max_styles) { if (track->max_styles >= FFMIN(SIZE_MAX, INT_MAX) - ASS_STYLES_ALLOC) return -1; int new_max = track->max_styles + ASS_STYLES_ALLOC; if (!ASS_REALLOC_ARRAY(track->styles, new_max)) return -1; track->max_styles = new_max; } sid = track->n_styles++; memset(track->styles + sid, 0, sizeof(ASS_Style)); return sid; } /// \brief Allocate a new event struct /// \param track track /// \return event id or negative value on failure int ass_alloc_event(ASS_Track *track) { int eid; assert(track->n_events <= track->max_events); if (track->n_events == track->max_events) { if (track->max_events >= FFMIN(SIZE_MAX, INT_MAX) / 2) return -1; int new_max = track->max_events * 2 + 1; if (!ASS_REALLOC_ARRAY(track->events, new_max)) return -1; track->max_events = new_max; } eid = track->n_events++; memset(track->events + eid, 0, sizeof(ASS_Event)); return eid; } void ass_free_event(ASS_Track *track, int eid) { ASS_Event *event = track->events + eid; free(event->Name); free(event->Effect); free(event->Text); free(event->render_priv); } void ass_free_style(ASS_Track *track, int sid) { ASS_Style *style = track->styles + sid; free(style->Name); free(style->FontName); } static int resize_read_order_bitmap(ASS_Track *track, int max_id) { // Don't allow malicious files to OOM us easily. Also avoids int overflows. if (max_id < 0 || max_id >= 10 * 1024 * 1024 * 8) goto fail; assert(track->parser_priv->read_order_bitmap || !track->parser_priv->read_order_elems); if (max_id >= track->parser_priv->read_order_elems * 32) { int oldelems = track->parser_priv->read_order_elems; int elems = ((max_id + 31) / 32 + 1) * 2; assert(elems >= oldelems); track->parser_priv->read_order_elems = elems; void *new_bitmap = realloc(track->parser_priv->read_order_bitmap, elems * 4); if (!new_bitmap) goto fail; track->parser_priv->read_order_bitmap = new_bitmap; memset(track->parser_priv->read_order_bitmap + oldelems, 0, (elems - oldelems) * 4); } return 0; fail: free(track->parser_priv->read_order_bitmap); track->parser_priv->read_order_bitmap = NULL; track->parser_priv->read_order_elems = 0; return -1; } static int test_and_set_read_order_bit(ASS_Track *track, int id) { if (resize_read_order_bitmap(track, id) < 0) return -1; int index = id / 32; uint32_t bit = 1u << (id % 32); if (track->parser_priv->read_order_bitmap[index] & bit) return 1; track->parser_priv->read_order_bitmap[index] |= bit; return 0; } // ============================================================================================== /** * \brief Set up default style * \param style style to edit to defaults * The parameters are mostly taken directly from VSFilter source for * best compatibility. */ static void set_default_style(ASS_Style *style) { style->Name = strdup("Default"); style->FontName = strdup("Arial"); style->FontSize = 18; style->PrimaryColour = 0xffffff00; style->SecondaryColour = 0x00ffff00; style->OutlineColour = 0x00000000; style->BackColour = 0x00000080; style->Bold = 200; style->ScaleX = 1.0; style->ScaleY = 1.0; style->Spacing = 0; style->BorderStyle = 1; style->Outline = 2; style->Shadow = 3; style->Alignment = 2; style->MarginL = style->MarginR = style->MarginV = 20; } static long long string2timecode(ASS_Library *library, char *p) { int h, m, s, ms; long long tm; int res = sscanf(p, "%d:%d:%d.%d", &h, &m, &s, &ms); if (res < 4) { ass_msg(library, MSGL_WARN, "Bad timestamp"); return 0; } tm = ((h * 60LL + m) * 60 + s) * 1000 + ms * 10LL; return tm; } #define NEXT(str,token) \ token = next_token(&str); \ if (!token) break; #define ALIAS(alias,name) \ if (ass_strcasecmp(tname, #alias) == 0) {tname = #name;} /* One section started with PARSE_START and PARSE_END parses a single token * (contained in the variable named token) for the header indicated by the * variable tname. It does so by chaining a number of else-if statements, each * of which checks if the tname variable indicates that this header should be * parsed. The first parameter of the macro gives the name of the header. * * The string that is passed is in str. str is advanced to the next token if * a header could be parsed. The parsed results are stored in the variable * target, which has the type ASS_Style* or ASS_Event*. */ #define PARSE_START if (0) { #define PARSE_END } #define ANYVAL(name,func) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ target->name = func(token); #define STRVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ char *new_str = strdup(token); \ if (new_str) { \ free(target->name); \ target->name = new_str; \ } #define STARREDSTRVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ while (*token == '*') ++token; \ char *new_str = strdup(token); \ if (new_str) { \ free(target->name); \ target->name = new_str; \ } #define COLORVAL(name) ANYVAL(name,parse_color_header) #define INTVAL(name) ANYVAL(name,atoi) #define FPVAL(name) ANYVAL(name,ass_atof) #define TIMEVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ target->name = string2timecode(track->library, token); #define STYLEVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ target->name = lookup_style(track, token); // skip spaces in str beforehand, or trim leading spaces afterwards static inline void advance_token_pos(const char **const str, const char **const start, const char **const end) { *start = *str; *end = *start; while (**end != '\0' && **end != ',') ++*end; *str = *end + (**end == ','); rskip_spaces((char**)end, (char*)*start); } static char *next_token(char **str) { char *p; char *start; skip_spaces(str); if (**str == '\0') { return 0; } advance_token_pos((const char**)str, (const char**)&start, (const char**)&p); *p = '\0'; return start; } /** * \brief Parse the tail of Dialogue line * \param track track * \param event parsed data goes here * \param str string to parse, zero-terminated * \param n_ignored number of format options to skip at the beginning */ static int process_event_tail(ASS_Track *track, ASS_Event *event, char *str, int n_ignored) { char *token; char *tname; char *p = str; int i; ASS_Event *target = event; char *format = strdup(track->event_format); if (!format) return -1; char *q = format; // format scanning pointer for (i = 0; i < n_ignored; ++i) { NEXT(q, tname); } while (1) { NEXT(q, tname); if (ass_strcasecmp(tname, "Text") == 0) { char *last; event->Text = strdup(p); if (event->Text && *event->Text != 0) { last = event->Text + strlen(event->Text) - 1; if (last >= event->Text && *last == '\r') *last = 0; } event->Duration -= event->Start; free(format); return event->Text ? 0 : -1; // "Text" is always the last } NEXT(p, token); ALIAS(End, Duration) // temporarily store end timecode in event->Duration PARSE_START INTVAL(Layer) STYLEVAL(Style) STRVAL(Name) STRVAL(Effect) INTVAL(MarginL) INTVAL(MarginR) INTVAL(MarginV) TIMEVAL(Start) TIMEVAL(Duration) PARSE_END } free(format); return 1; } /** * \brief Parse command line style overrides (--ass-force-style option) * \param track track to apply overrides to * The format for overrides is [StyleName.]Field=Value */ void ass_process_force_style(ASS_Track *track) { char **fs, *eq, *dt, *style, *tname, *token; ASS_Style *target; int sid; char **list = track->library->style_overrides; if (!list) return; for (fs = list; *fs; ++fs) { eq = strrchr(*fs, '='); if (!eq) continue; *eq = '\0'; token = eq + 1; if (!ass_strcasecmp(*fs, "PlayResX")) track->PlayResX = atoi(token); else if (!ass_strcasecmp(*fs, "PlayResY")) track->PlayResY = atoi(token); else if (!ass_strcasecmp(*fs, "Timer")) track->Timer = ass_atof(token); else if (!ass_strcasecmp(*fs, "WrapStyle")) track->WrapStyle = atoi(token); else if (!ass_strcasecmp(*fs, "ScaledBorderAndShadow")) track->ScaledBorderAndShadow = parse_bool(token); else if (!ass_strcasecmp(*fs, "Kerning")) track->Kerning = parse_bool(token); else if (!ass_strcasecmp(*fs, "YCbCr Matrix")) track->YCbCrMatrix = parse_ycbcr_matrix(token); dt = strrchr(*fs, '.'); if (dt) { *dt = '\0'; style = *fs; tname = dt + 1; } else { style = NULL; tname = *fs; } for (sid = 0; sid < track->n_styles; ++sid) { if (style == NULL || ass_strcasecmp(track->styles[sid].Name, style) == 0) { target = track->styles + sid; PARSE_START STRVAL(FontName) COLORVAL(PrimaryColour) COLORVAL(SecondaryColour) COLORVAL(OutlineColour) COLORVAL(BackColour) FPVAL(FontSize) INTVAL(Bold) INTVAL(Italic) INTVAL(Underline) INTVAL(StrikeOut) FPVAL(Spacing) FPVAL(Angle) INTVAL(BorderStyle) INTVAL(Alignment) INTVAL(Justify) INTVAL(MarginL) INTVAL(MarginR) INTVAL(MarginV) INTVAL(Encoding) FPVAL(ScaleX) FPVAL(ScaleY) FPVAL(Outline) FPVAL(Shadow) FPVAL(Blur) PARSE_END } } *eq = '='; if (dt) *dt = '.'; } } /** * \brief Parse the Style line * \param track track * \param str string to parse, zero-terminated * Allocates a new style struct. */ static int process_style(ASS_Track *track, char *str) { char *token; char *tname; char *p = str; char *format; char *q; // format scanning pointer int sid; ASS_Style *style; ASS_Style *target; if (!track->style_format) { // no style format header // probably an ancient script version if (track->track_type == TRACK_TYPE_SSA) track->style_format = strdup(ssa_style_format); else track->style_format = strdup(ass_style_format); if (!track->style_format) return -1; } q = format = strdup(track->style_format); if (!q) return -1; ass_msg(track->library, MSGL_V, "[%p] Style: %s", track, str); sid = ass_alloc_style(track); if (sid < 0) { free(format); return -1; } style = track->styles + sid; target = style; // fill style with some default values style->ScaleX = 100.; style->ScaleY = 100.; while (1) { NEXT(q, tname); NEXT(p, token); PARSE_START STARREDSTRVAL(Name) STRVAL(FontName) COLORVAL(PrimaryColour) COLORVAL(SecondaryColour) COLORVAL(OutlineColour) // TertiaryColor COLORVAL(BackColour) // SSA uses BackColour for both outline and shadow // this will destroy SSA's TertiaryColour, but i'm not going to use it anyway if (track->track_type == TRACK_TYPE_SSA) target->OutlineColour = target->BackColour; FPVAL(FontSize) INTVAL(Bold) INTVAL(Italic) INTVAL(Underline) INTVAL(StrikeOut) FPVAL(Spacing) FPVAL(Angle) INTVAL(BorderStyle) INTVAL(Alignment) if (track->track_type == TRACK_TYPE_ASS) target->Alignment = numpad2align(target->Alignment); // VSFilter compatibility else if (target->Alignment == 8) target->Alignment = 3; else if (target->Alignment == 4) target->Alignment = 11; INTVAL(MarginL) INTVAL(MarginR) INTVAL(MarginV) INTVAL(Encoding) FPVAL(ScaleX) FPVAL(ScaleY) FPVAL(Outline) FPVAL(Shadow) PARSE_END } free(format); style->ScaleX = FFMAX(style->ScaleX, 0.) / 100.; style->ScaleY = FFMAX(style->ScaleY, 0.) / 100.; style->Spacing = FFMAX(style->Spacing, 0.); style->Outline = FFMAX(style->Outline, 0.); style->Shadow = FFMAX(style->Shadow, 0.); style->Bold = !!style->Bold; style->Italic = !!style->Italic; style->Underline = !!style->Underline; style->StrikeOut = !!style->StrikeOut; if (!style->Name) style->Name = strdup("Default"); if (!style->FontName) style->FontName = strdup("Arial"); if (!style->Name || !style->FontName) { ass_free_style(track, sid); track->n_styles--; return -1; } if (strcmp(target->Name, "Default") == 0) track->default_style = sid; return 0; } static bool format_line_compare(const char *fmt1, const char *fmt2) { while (true) { const char *tk1_start, *tk2_start; const char *tk1_end, *tk2_end; skip_spaces((char**)&fmt1); skip_spaces((char**)&fmt2); if (!*fmt1 || !*fmt2) break; advance_token_pos(&fmt1, &tk1_start, &tk1_end); advance_token_pos(&fmt2, &tk2_start, &tk2_end); if ((tk1_end-tk1_start) != (tk2_end-tk2_start)) return false; if (ass_strncasecmp(tk1_start, tk2_start, tk1_end-tk1_start)) return false; } return *fmt1 == *fmt2; } /** * \brief Set SBAS=1 if not set explicitly in case of custom format line * \param track track * \param fmt format line of file * \param std standard format line * * As of writing libass is the only renderer accepting custom format lines. * For years libass defaultet SBAS to yes instead of no. * To avoid breaking released scripts with custom format lines, * keep SBAS=1 default for custom format files. */ static void custom_format_line_compatibility(ASS_Track *const track, const char *const fmt, const char *const std) { if (!(track->parser_priv->header_flags & SINFO_SCALEDBORDER) && !format_line_compare(fmt, std)) { ass_msg(track->library, MSGL_INFO, "Track has custom format line(s). " "'ScaledBorderAndShadow' will default to 'yes'."); track->ScaledBorderAndShadow = 1; } } static int process_styles_line(ASS_Track *track, char *str) { int ret = 0; if (!strncmp(str, "Format:", 7)) { char *p = str + 7; skip_spaces(&p); free(track->style_format); track->style_format = strdup(p); if (!track->style_format) return -1; ass_msg(track->library, MSGL_DBG2, "Style format: %s", track->style_format); if (track->track_type == TRACK_TYPE_ASS) custom_format_line_compatibility(track, p, ass_style_format); else custom_format_line_compatibility(track, p, ssa_style_format); } else if (!strncmp(str, "Style:", 6)) { char *p = str + 6; skip_spaces(&p); ret = process_style(track, p); } return ret; } static inline void check_duplicate_info_line(const ASS_Track *const track, const ScriptInfo si, const char *const name) { if (track->parser_priv->header_flags & si) ass_msg(track->library, MSGL_WARN, "Duplicate Script Info Header '%s'. Previous value overwritten!", name); else track->parser_priv->header_flags |= si; } static int process_info_line(ASS_Track *track, char *str) { if (!strncmp(str, "PlayResX:", 9)) { check_duplicate_info_line(track, SINFO_PLAYRESX, "PlayResX"); track->PlayResX = atoi(str + 9); } else if (!strncmp(str, "PlayResY:", 9)) { check_duplicate_info_line(track, SINFO_PLAYRESY, "PlayResY"); track->PlayResY = atoi(str + 9); } else if (!strncmp(str, "Timer:", 6)) { check_duplicate_info_line(track, SINFO_TIMER, "Timer"); track->Timer = ass_atof(str + 6); } else if (!strncmp(str, "WrapStyle:", 10)) { check_duplicate_info_line(track, SINFO_WRAPSTYLE, "WrapStyle"); track->WrapStyle = atoi(str + 10); } else if (!strncmp(str, "ScaledBorderAndShadow:", 22)) { check_duplicate_info_line(track, SINFO_SCALEDBORDER, "ScaledBorderAndShadow"); track->ScaledBorderAndShadow = parse_bool(str + 22); } else if (!strncmp(str, "Kerning:", 8)) { check_duplicate_info_line(track, SINFO_KERNING, "Kerning"); track->Kerning = parse_bool(str + 8); } else if (!strncmp(str, "YCbCr Matrix:", 13)) { check_duplicate_info_line(track, SINFO_COLOURMATRIX, "YCbCr Matrix"); track->YCbCrMatrix = parse_ycbcr_matrix(str + 13); } else if (!strncmp(str, "Language:", 9)) { check_duplicate_info_line(track, SINFO_LANGUAGE, "Language"); char *p = str + 9; while (*p && ass_isspace(*p)) p++; free(track->Language); track->Language = strndup(p, 2); } else if (!strncmp(str, "; Script generated by ", 22)) { if (!strncmp(str + 22,"FFmpeg/Lavc", 11)) track->parser_priv->header_flags |= GENBY_FFMPEG; } return 0; } static void event_format_fallback(ASS_Track *track) { track->parser_priv->state = PST_EVENTS; if (track->track_type == TRACK_TYPE_SSA) track->event_format = strdup(ssa_event_format); else track->event_format = strdup(ass_event_format); ass_msg(track->library, MSGL_V, "No event format found, using fallback"); } /** * \brief Return if track is post-signature and pre-SBAS ffmpeg track * \param track track */ static bool detect_legacy_conv_subs(ASS_Track *track) { /* * FFmpeg and libav convert srt subtitles to ass. * In legacy versions, they did not set the 'ScaledBorderAndShadow' header, * but expected it to default to yes (which libass did). * To avoid breaking them, we try to detect these * converted subs by common properties of ffmpeg/libav's converted subs. * Since files with custom format lines (-2014.10.11) default to SBAS=1 * regardless of being ffmpeg generated or not, we are only concerned with * post-signature and pre-SBAS ffmpeg-files (2014.10.11-2020.04.17). * We want to avoid matching modified ffmpeg files though. * * Relevant ffmpeg commits are: * 2c77c90684e24ef16f7e7c4462e011434cee6a98 2010.12.29 * Initial conversion format. * Style "Format:" line is mix of SSA and ASS * Event "Format:" line * "Format: Layer, Start, End, Text\r\n" * Only Header in ScriptInfo is "ScriptType: v4.00+" * 0e7782c08ec77739edb0b98ba5d896b45e98235f 2012.06.15 * Adds 'Style' to Event "Format:" line * 5039aadf68deb9ad6dd0737ea11259fe53d3727b 2014.06.18 * Adds PlayerRes(X|Y) (384x288) * (moved below ScriptType: a few minutes later) * 40b9f28641b696c6bb73ce49dc97c2ce2700cbdb 2014.10.11 14:31:23 +0200 * Regular full ASS Event and Style "Format:" lines * 52b0a0ecaa02e17f7e01bead8c3f215f1cfd48dc 2014.10.11 18:37:43 +0200 <== * Signature comment * 56bc0a6736cdc7edab837ff8f304661fd16de0e4 2015.02.08 * Allow custom PlayRes(X|Y) * a8ba2a2c1294a330a0e79ae7f0d3a203a7599166 2020.04.17 * Set 'ScaledBorderAndShadow: yes' * * libav outputs initial ffmpeg format. (no longer maintained) */ // GENBY_FFMPEG and exact ffmpeg headers required // Note: If there's SINFO_SCRIPTTYPE in the future this needs to be updated if (track->parser_priv->header_flags ^ (SINFO_PLAYRESX | SINFO_PLAYRESY | GENBY_FFMPEG)) return false; // Legacy ffmpeg only ever has one style // Check 2 not 1 because libass also adds a def style if (track->n_styles != 2 || strncmp(track->styles[1].Name, "Default", 7)) return false; return true; } static int process_events_line(ASS_Track *track, char *str) { if (!strncmp(str, "Format:", 7)) { char *p = str + 7; skip_spaces(&p); free(track->event_format); track->event_format = strdup(p); if (!track->event_format) return -1; ass_msg(track->library, MSGL_DBG2, "Event format: %s", track->event_format); if (track->track_type == TRACK_TYPE_ASS) custom_format_line_compatibility(track, p, ass_event_format); else custom_format_line_compatibility(track, p, ssa_event_format); // Guess if we are dealing with legacy ffmpeg subs and change accordingly // If file has no event format it was probably not created by ffmpeg/libav if (detect_legacy_conv_subs(track)) { track->ScaledBorderAndShadow = 1; ass_msg(track->library, MSGL_INFO, "Track treated as legacy ffmpeg sub."); } } else if (!strncmp(str, "Dialogue:", 9)) { // This should never be reached for embedded subtitles. // They have slightly different format and are parsed in ass_process_chunk, // called directly from demuxer int eid; ASS_Event *event; // We can't parse events without event_format if (!track->event_format) { event_format_fallback(track); if (!track->event_format) return -1; } str += 9; skip_spaces(&str); eid = ass_alloc_event(track); if (eid < 0) return -1; event = track->events + eid; return process_event_tail(track, event, str, 0); } else { ass_msg(track->library, MSGL_V, "Not understood: '%.30s'", str); } return 0; } static unsigned char *decode_chars(const unsigned char *src, unsigned char *dst, size_t cnt_in) { uint32_t value = 0; for (int i = 0; i < cnt_in; i++) value |= (uint32_t) ((src[i] - 33u) & 63) << 6 * (3 - i); *dst++ = value >> 16; if (cnt_in >= 3) *dst++ = value >> 8 & 0xff; if (cnt_in >= 4) *dst++ = value & 0xff; return dst; } static void reset_embedded_font_parsing(ASS_ParserPriv *parser_priv) { free(parser_priv->fontname); free(parser_priv->fontdata); parser_priv->fontname = NULL; parser_priv->fontdata = NULL; parser_priv->fontdata_size = 0; parser_priv->fontdata_used = 0; } static int decode_font(ASS_Track *track) { unsigned char *p; unsigned char *q; size_t i; size_t size; // original size size_t dsize; // decoded size unsigned char *buf = 0; ass_msg(track->library, MSGL_V, "Font: %d bytes encoded data", track->parser_priv->fontdata_used); size = track->parser_priv->fontdata_used; if (size % 4 == 1) { ass_msg(track->library, MSGL_ERR, "Bad encoded data size"); goto error_decode_font; } buf = malloc(size / 4 * 3 + FFMAX(size % 4 - 1, 0)); if (!buf) goto error_decode_font; q = buf; for (i = 0, p = (unsigned char *) track->parser_priv->fontdata; i < size / 4; i++, p += 4) { q = decode_chars(p, q, 4); } if (size % 4 == 2) { q = decode_chars(p, q, 2); } else if (size % 4 == 3) { q = decode_chars(p, q, 3); } dsize = q - buf; assert(dsize == size / 4 * 3 + FFMAX(size % 4 - 1, 0)); if (track->library->extract_fonts) { ass_add_font(track->library, track->parser_priv->fontname, (char *) buf, dsize); } error_decode_font: free(buf); reset_embedded_font_parsing(track->parser_priv); return 0; } static int process_fonts_line(ASS_Track *track, char *str) { size_t len; if (!strncmp(str, "fontname:", 9)) { char *p = str + 9; skip_spaces(&p); if (track->parser_priv->fontname) { decode_font(track); } track->parser_priv->fontname = strdup(p); if (!track->parser_priv->fontname) return -1; ass_msg(track->library, MSGL_V, "Fontname: %s", track->parser_priv->fontname); return 0; } if (!track->parser_priv->fontname) { ass_msg(track->library, MSGL_V, "Not understood: '%s'", str); return 1; } len = strlen(str); if (track->parser_priv->fontdata_used >= SIZE_MAX - FFMAX(len, 100 * 1024)) { goto mem_fail; } else if (track->parser_priv->fontdata_used + len > track->parser_priv->fontdata_size) { size_t new_size = track->parser_priv->fontdata_size + FFMAX(len, 100 * 1024); if (!ASS_REALLOC_ARRAY(track->parser_priv->fontdata, new_size)) goto mem_fail; track->parser_priv->fontdata_size = new_size; } memcpy(track->parser_priv->fontdata + track->parser_priv->fontdata_used, str, len); track->parser_priv->fontdata_used += len; return 0; mem_fail: reset_embedded_font_parsing(track->parser_priv); return -1; } /** * \brief Parse a header line * \param track track * \param str string to parse, zero-terminated */ static int process_line(ASS_Track *track, char *str) { skip_spaces(&str); if (!ass_strncasecmp(str, "[Script Info]", 13)) { track->parser_priv->state = PST_INFO; } else if (!ass_strncasecmp(str, "[V4 Styles]", 11)) { track->parser_priv->state = PST_STYLES; track->track_type = TRACK_TYPE_SSA; } else if (!ass_strncasecmp(str, "[V4+ Styles]", 12)) { track->parser_priv->state = PST_STYLES; track->track_type = TRACK_TYPE_ASS; } else if (!ass_strncasecmp(str, "[Events]", 8)) { track->parser_priv->state = PST_EVENTS; } else if (!ass_strncasecmp(str, "[Fonts]", 7)) { track->parser_priv->state = PST_FONTS; } else { switch (track->parser_priv->state) { case PST_INFO: process_info_line(track, str); break; case PST_STYLES: process_styles_line(track, str); break; case PST_EVENTS: process_events_line(track, str); break; case PST_FONTS: process_fonts_line(track, str); break; default: break; } } return 0; } static int process_text(ASS_Track *track, char *str) { char *p = str; while (1) { char *q; while (1) { if ((*p == '\r') || (*p == '\n')) ++p; else if (p[0] == '\xef' && p[1] == '\xbb' && p[2] == '\xbf') p += 3; // U+FFFE (BOM) else break; } for (q = p; ((*q != '\0') && (*q != '\r') && (*q != '\n')); ++q) { }; if (q == p) break; if (*q != '\0') *(q++) = '\0'; process_line(track, p); if (*q == '\0') break; p = q; } // there is no explicit end-of-font marker in ssa/ass if (track->parser_priv->fontname) decode_font(track); return 0; } /** * \brief Process a chunk of subtitle stream data. * \param track track * \param data string to parse * \param size length of data */ void ass_process_data(ASS_Track *track, char *data, int size) { char *str = malloc(size + 1); if (!str) return; memcpy(str, data, size); str[size] = '\0'; ass_msg(track->library, MSGL_V, "Event: %s", str); process_text(track, str); free(str); } /** * \brief Process CodecPrivate section of subtitle stream * \param track track * \param data string to parse * \param size length of data CodecPrivate section contains [Stream Info] and [V4+ Styles] ([V4 Styles] for SSA) sections */ void ass_process_codec_private(ASS_Track *track, char *data, int size) { ass_process_data(track, data, size); // probably an mkv produced by ancient mkvtoolnix // such files don't have [Events] and Format: headers if (!track->event_format) event_format_fallback(track); ass_process_force_style(track); } static int check_duplicate_event(ASS_Track *track, int ReadOrder) { if (track->parser_priv->read_order_bitmap) return test_and_set_read_order_bit(track, ReadOrder) > 0; // ignoring last event, it is the one we are comparing with for (int i = 0; i < track->n_events - 1; i++) if (track->events[i].ReadOrder == ReadOrder) return 1; return 0; } void ass_set_check_readorder(ASS_Track *track, int check_readorder) { track->parser_priv->check_readorder = check_readorder == 1; } /** * \brief Process a chunk of subtitle stream data. In Matroska, this contains exactly 1 event (or a commentary). * \param track track * \param data string to parse * \param size length of data * \param timecode starting time of the event (milliseconds) * \param duration duration of the event (milliseconds) */ void ass_process_chunk(ASS_Track *track, char *data, int size, long long timecode, long long duration) { char *str = NULL; int eid; char *p; char *token; ASS_Event *event; int check_readorder = track->parser_priv->check_readorder; if (check_readorder && !track->parser_priv->read_order_bitmap) { for (int i = 0; i < track->n_events; i++) { if (test_and_set_read_order_bit(track, track->events[i].ReadOrder) < 0) break; } } if (!track->event_format) { ass_msg(track->library, MSGL_WARN, "Event format header missing"); goto cleanup; } str = malloc(size + 1); if (!str) goto cleanup; memcpy(str, data, size); str[size] = '\0'; ass_msg(track->library, MSGL_V, "Event at %" PRId64 ", +%" PRId64 ": %s", (int64_t) timecode, (int64_t) duration, str); eid = ass_alloc_event(track); if (eid < 0) goto cleanup; event = track->events + eid; p = str; do { NEXT(p, token); event->ReadOrder = atoi(token); if (check_readorder && check_duplicate_event(track, event->ReadOrder)) break; NEXT(p, token); event->Layer = atoi(token); process_event_tail(track, event, p, 3); event->Start = timecode; event->Duration = duration; goto cleanup; // dump_events(tid); } while (0); // some error ass_free_event(track, eid); track->n_events--; cleanup: free(str); } /** * \brief Flush buffered events. * \param track track */ void ass_flush_events(ASS_Track *track) { if (track->events) { int eid; for (eid = 0; eid < track->n_events; eid++) ass_free_event(track, eid); track->n_events = 0; } free(track->parser_priv->read_order_bitmap); track->parser_priv->read_order_bitmap = NULL; track->parser_priv->read_order_elems = 0; } #ifdef CONFIG_ICONV /** \brief recode buffer to utf-8 * constraint: codepage != 0 * \param data pointer to text buffer * \param size buffer size * \return a pointer to recoded buffer, caller is responsible for freeing it **/ static char *sub_recode(ASS_Library *library, char *data, size_t size, char *codepage) { iconv_t icdsc; char *tocp = "UTF-8"; char *outbuf; assert(codepage); if ((icdsc = iconv_open(tocp, codepage)) != (iconv_t) (-1)) { ass_msg(library, MSGL_V, "Opened iconv descriptor"); } else { ass_msg(library, MSGL_ERR, "Error opening iconv descriptor"); return NULL; } { size_t osize = size; size_t ileft = size; size_t oleft = size - 1; char *ip; char *op; size_t rc; int clear = 0; outbuf = malloc(osize); if (!outbuf) goto out; ip = data; op = outbuf; while (1) { if (ileft) rc = iconv(icdsc, &ip, &ileft, &op, &oleft); else { // clear the conversion state and leave clear = 1; rc = iconv(icdsc, NULL, NULL, &op, &oleft); } if (rc == (size_t) (-1)) { if (errno == E2BIG) { size_t offset = op - outbuf; char *nbuf = realloc(outbuf, osize + size); if (!nbuf) { free(outbuf); outbuf = 0; goto out; } outbuf = nbuf; op = outbuf + offset; osize += size; oleft += size; } else { ass_msg(library, MSGL_WARN, "Error recoding file"); free(outbuf); outbuf = NULL; goto out; } } else if (clear) break; } outbuf[osize - oleft - 1] = 0; } out: if (icdsc != (iconv_t) (-1)) { (void) iconv_close(icdsc); ass_msg(library, MSGL_V, "Closed iconv descriptor"); } return outbuf; } #endif // ICONV /** * \brief read file contents into newly allocated buffer * \param fname file name * \param bufsize out: file size * \return pointer to file contents. Caller is responsible for its deallocation. */ char *read_file(ASS_Library *library, char *fname, size_t *bufsize) { int res; long sz; long bytes_read; char *buf; FILE *fp = fopen(fname, "rb"); if (!fp) { ass_msg(library, MSGL_WARN, "ass_read_file(%s): fopen failed", fname); return 0; } res = fseek(fp, 0, SEEK_END); if (res == -1) { ass_msg(library, MSGL_WARN, "ass_read_file(%s): fseek failed", fname); fclose(fp); return 0; } sz = ftell(fp); rewind(fp); ass_msg(library, MSGL_V, "File size: %ld", sz); buf = sz < SIZE_MAX ? malloc(sz + 1) : NULL; if (!buf) { fclose(fp); return NULL; } assert(buf); bytes_read = 0; do { res = fread(buf + bytes_read, 1, sz - bytes_read, fp); if (res <= 0) { ass_msg(library, MSGL_INFO, "Read failed, %d: %s", errno, strerror(errno)); fclose(fp); free(buf); return 0; } bytes_read += res; } while (sz - bytes_read > 0); buf[sz] = '\0'; fclose(fp); if (bufsize) *bufsize = sz; return buf; } /* * \param buf pointer to subtitle text in utf-8 */ static ASS_Track *parse_memory(ASS_Library *library, char *buf) { ASS_Track *track; int i; track = ass_new_track(library); if (!track) return NULL; // process header process_text(track, buf); // external SSA/ASS subs does not have ReadOrder field for (i = 0; i < track->n_events; ++i) track->events[i].ReadOrder = i; if (track->track_type == TRACK_TYPE_UNKNOWN) { ass_free_track(track); return 0; } ass_process_force_style(track); return track; } /** * \brief Read subtitles from memory. * \param library libass library object * \param buf pointer to subtitles text * \param bufsize size of buffer * \param codepage recode buffer contents from given codepage * \return newly allocated track */ ASS_Track *ass_read_memory(ASS_Library *library, char *buf, size_t bufsize, char *codepage) { ASS_Track *track; int copied = 0; if (!buf) return 0; #ifdef CONFIG_ICONV if (codepage) { buf = sub_recode(library, buf, bufsize, codepage); if (!buf) return 0; else copied = 1; } #endif if (!copied) { char *newbuf = malloc(bufsize + 1); if (!newbuf) return 0; memcpy(newbuf, buf, bufsize); newbuf[bufsize] = '\0'; buf = newbuf; } track = parse_memory(library, buf); free(buf); if (!track) return 0; ass_msg(library, MSGL_INFO, "Added subtitle file: " "<memory> (%d styles, %d events)", track->n_styles, track->n_events); return track; } static char *read_file_recode(ASS_Library *library, char *fname, char *codepage, size_t *size) { char *buf; size_t bufsize; buf = read_file(library, fname, &bufsize); if (!buf) return 0; #ifdef CONFIG_ICONV if (codepage) { char *tmpbuf = sub_recode(library, buf, bufsize, codepage); free(buf); buf = tmpbuf; } if (!buf) return 0; #endif *size = bufsize; return buf; } /** * \brief Read subtitles from file. * \param library libass library object * \param fname file name * \param codepage recode buffer contents from given codepage * \return newly allocated track */ ASS_Track *ass_read_file(ASS_Library *library, char *fname, char *codepage) { char *buf; ASS_Track *track; size_t bufsize; buf = read_file_recode(library, fname, codepage, &bufsize); if (!buf) return 0; track = parse_memory(library, buf); free(buf); if (!track) return 0; track->name = strdup(fname); ass_msg(library, MSGL_INFO, "Added subtitle file: '%s' (%d styles, %d events)", fname, track->n_styles, track->n_events); return track; } /** * \brief read styles from file into already initialized track */ int ass_read_styles(ASS_Track *track, char *fname, char *codepage) { char *buf; ParserState old_state; size_t sz; buf = read_file(track->library, fname, &sz); if (!buf) return 1; #ifdef CONFIG_ICONV if (codepage) { char *tmpbuf; tmpbuf = sub_recode(track->library, buf, sz, codepage); free(buf); buf = tmpbuf; } if (!buf) return 1; #endif old_state = track->parser_priv->state; track->parser_priv->state = PST_STYLES; process_text(track, buf); free(buf); track->parser_priv->state = old_state; return 0; } long long ass_step_sub(ASS_Track *track, long long now, int movement) { int i; ASS_Event *best = NULL; long long target = now; int direction = (movement > 0 ? 1 : -1) * !!movement; if (track->n_events == 0) return 0; do { ASS_Event *closest = NULL; long long closest_time = now; for (i = 0; i < track->n_events; i++) { if (direction < 0) { long long end = track->events[i].Start + track->events[i].Duration; if (end < target) { if (!closest || end > closest_time) { closest = &track->events[i]; closest_time = end; } } } else if (direction > 0) { long long start = track->events[i].Start; if (start > target) { if (!closest || start < closest_time) { closest = &track->events[i]; closest_time = start; } } } else { long long start = track->events[i].Start; if (start < target) { if (!closest || start >= closest_time) { closest = &track->events[i]; closest_time = start; } } } } target = closest_time + direction; movement -= direction; if (closest) best = closest; } while (movement); return best ? best->Start - now : 0; } ASS_Track *ass_new_track(ASS_Library *library) { int def_sid = -1; ASS_Track *track = calloc(1, sizeof(ASS_Track)); if (!track) goto fail; track->library = library; track->ScaledBorderAndShadow = 0; track->parser_priv = calloc(1, sizeof(ASS_ParserPriv)); if (!track->parser_priv) goto fail; def_sid = ass_alloc_style(track); if (def_sid < 0) goto fail; set_default_style(track->styles + def_sid); track->default_style = def_sid; if (!track->styles[def_sid].Name || !track->styles[def_sid].FontName) goto fail; track->parser_priv->check_readorder = 1; return track; fail: if (track) { if (def_sid >= 0) ass_free_style(track, def_sid); free(track->parser_priv); free(track); } return NULL; } int ass_track_set_feature(ASS_Track *track, ASS_Feature feature, int enable) { switch (feature) { case ASS_FEATURE_INCOMPATIBLE_EXTENSIONS: //-fallthrough #ifdef USE_FRIBIDI_EX_API case ASS_FEATURE_BIDI_BRACKETS: track->parser_priv->bidi_brackets = !!enable; #endif return 0; default: return -1; } } /** * \brief Prepare track for rendering */ void ass_lazy_track_init(ASS_Library *lib, ASS_Track *track) { if (track->PlayResX > 0 && track->PlayResY > 0) return; if (track->PlayResX <= 0 && track->PlayResY <= 0) { ass_msg(lib, MSGL_WARN, "Neither PlayResX nor PlayResY defined. Assuming 384x288"); track->PlayResX = 384; track->PlayResY = 288; } else { if (track->PlayResY <= 0 && track->PlayResX == 1280) { track->PlayResY = 1024; ass_msg(lib, MSGL_WARN, "PlayResY undefined, setting to %d", track->PlayResY); } else if (track->PlayResY <= 0) { track->PlayResY = FFMAX(1, track->PlayResX * 3LL / 4); ass_msg(lib, MSGL_WARN, "PlayResY undefined, setting to %d", track->PlayResY); } else if (track->PlayResX <= 0 && track->PlayResY == 1024) { track->PlayResX = 1280; ass_msg(lib, MSGL_WARN, "PlayResX undefined, setting to %d", track->PlayResX); } else if (track->PlayResX <= 0) { track->PlayResX = FFMAX(1, track->PlayResY * 4LL / 3); ass_msg(lib, MSGL_WARN, "PlayResX undefined, setting to %d", track->PlayResX); } } }
null
/* * Copyright (C) 2006 Evgeniy Stepanov <eugeni.stepanov@gmail.com> * * This file is part of libass. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "config.h" #include "ass_compat.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <errno.h> #include <sys/types.h> #include <sys/stat.h> #include <inttypes.h> #ifdef CONFIG_ICONV #include <iconv.h> #endif #include "ass.h" #include "ass_utils.h" #include "ass_library.h" #include "ass_priv.h" #include "ass_shaper.h" #include "ass_string.h" #define ass_atof(STR) (ass_strtod((STR),NULL)) static const char *const ass_style_format = "Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, " "OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, " "ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, " "Alignment, MarginL, MarginR, MarginV, Encoding"; static const char *const ass_event_format = "Layer, Start, End, Style, Name, " "MarginL, MarginR, MarginV, Effect, Text"; static const char *const ssa_style_format = "Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, " "TertiaryColour, BackColour, Bold, Italic, BorderStyle, Outline, " "Shadow, Alignment, MarginL, MarginR, MarginV, AlphaLevel, Encoding"; static const char *const ssa_event_format = "Marked, Start, End, Style, Name, " "MarginL, MarginR, MarginV, Effect, Text"; #define ASS_STYLES_ALLOC 20 int ass_library_version(void) { return LIBASS_VERSION; } void ass_free_track(ASS_Track *track) { int i; if (!track) return; if (track->parser_priv) { free(track->parser_priv->read_order_bitmap); free(track->parser_priv->fontname); free(track->parser_priv->fontdata); free(track->parser_priv); } free(track->style_format); free(track->event_format); free(track->Language); if (track->styles) { for (i = 0; i < track->n_styles; ++i) ass_free_style(track, i); } free(track->styles); if (track->events) { for (i = 0; i < track->n_events; ++i) ass_free_event(track, i); } free(track->events); free(track->name); free(track); } /// \brief Allocate a new style struct /// \param track track /// \return style id or negative value on failure int ass_alloc_style(ASS_Track *track) { int sid; assert(track->n_styles <= track->max_styles); if (track->n_styles == track->max_styles) { if (track->max_styles >= FFMIN(SIZE_MAX, INT_MAX) - ASS_STYLES_ALLOC) return -1; int new_max = track->max_styles + ASS_STYLES_ALLOC; if (!ASS_REALLOC_ARRAY(track->styles, new_max)) return -1; track->max_styles = new_max; } sid = track->n_styles++; memset(track->styles + sid, 0, sizeof(ASS_Style)); return sid; } /// \brief Allocate a new event struct /// \param track track /// \return event id or negative value on failure int ass_alloc_event(ASS_Track *track) { int eid; assert(track->n_events <= track->max_events); if (track->n_events == track->max_events) { if (track->max_events >= FFMIN(SIZE_MAX, INT_MAX) / 2) return -1; int new_max = track->max_events * 2 + 1; if (!ASS_REALLOC_ARRAY(track->events, new_max)) return -1; track->max_events = new_max; } eid = track->n_events++; memset(track->events + eid, 0, sizeof(ASS_Event)); return eid; } void ass_free_event(ASS_Track *track, int eid) { ASS_Event *event = track->events + eid; free(event->Name); free(event->Effect); free(event->Text); free(event->render_priv); } void ass_free_style(ASS_Track *track, int sid) { ASS_Style *style = track->styles + sid; free(style->Name); free(style->FontName); } static int resize_read_order_bitmap(ASS_Track *track, int max_id) { // Don't allow malicious files to OOM us easily. Also avoids int overflows. if (max_id < 0 || max_id >= 10 * 1024 * 1024 * 8) goto fail; assert(track->parser_priv->read_order_bitmap || !track->parser_priv->read_order_elems); if (max_id >= track->parser_priv->read_order_elems * 32) { int oldelems = track->parser_priv->read_order_elems; int elems = ((max_id + 31) / 32 + 1) * 2; assert(elems >= oldelems); track->parser_priv->read_order_elems = elems; void *new_bitmap = realloc(track->parser_priv->read_order_bitmap, elems * 4); if (!new_bitmap) goto fail; track->parser_priv->read_order_bitmap = new_bitmap; memset(track->parser_priv->read_order_bitmap + oldelems, 0, (elems - oldelems) * 4); } return 0; fail: free(track->parser_priv->read_order_bitmap); track->parser_priv->read_order_bitmap = NULL; track->parser_priv->read_order_elems = 0; return -1; } static int test_and_set_read_order_bit(ASS_Track *track, int id) { if (resize_read_order_bitmap(track, id) < 0) return -1; int index = id / 32; uint32_t bit = 1u << (id % 32); if (track->parser_priv->read_order_bitmap[index] & bit) return 1; track->parser_priv->read_order_bitmap[index] |= bit; return 0; } // ============================================================================================== /** * \brief Set up default style * \param style style to edit to defaults * The parameters are mostly taken directly from VSFilter source for * best compatibility. */ static void set_default_style(ASS_Style *style) { style->Name = strdup("Default"); style->FontName = strdup("Arial"); style->FontSize = 18; style->PrimaryColour = 0xffffff00; style->SecondaryColour = 0x00ffff00; style->OutlineColour = 0x00000000; style->BackColour = 0x00000080; style->Bold = 200; style->ScaleX = 1.0; style->ScaleY = 1.0; style->Spacing = 0; style->BorderStyle = 1; style->Outline = 2; style->Shadow = 3; style->Alignment = 2; style->MarginL = style->MarginR = style->MarginV = 20; } static long long string2timecode(ASS_Library *library, char *p) { int h, m, s, ms; long long tm; int res = sscanf(p, "%d:%d:%d.%d", &h, &m, &s, &ms); if (res < 4) { ass_msg(library, MSGL_WARN, "Bad timestamp"); return 0; } tm = ((h * 60LL + m) * 60 + s) * 1000 + ms * 10LL; return tm; } #define NEXT(str,token) \ token = next_token(&str); \ if (!token) break; #define ALIAS(alias,name) \ if (ass_strcasecmp(tname, #alias) == 0) {tname = #name;} /* One section started with PARSE_START and PARSE_END parses a single token * (contained in the variable named token) for the header indicated by the * variable tname. It does so by chaining a number of else-if statements, each * of which checks if the tname variable indicates that this header should be * parsed. The first parameter of the macro gives the name of the header. * * The string that is passed is in str. str is advanced to the next token if * a header could be parsed. The parsed results are stored in the variable * target, which has the type ASS_Style* or ASS_Event*. */ #define PARSE_START if (0) { #define PARSE_END } #define ANYVAL(name,func) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ target->name = func(token); #define STRVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ char *new_str = strdup(token); \ if (new_str) { \ free(target->name); \ target->name = new_str; \ } #define STARREDSTRVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ while (*token == '*') ++token; \ char *new_str = strdup(token); \ if (new_str) { \ free(target->name); \ target->name = new_str; \ } #define COLORVAL(name) ANYVAL(name,parse_color_header) #define INTVAL(name) ANYVAL(name,atoi) #define FPVAL(name) ANYVAL(name,ass_atof) #define TIMEVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ target->name = string2timecode(track->library, token); #define STYLEVAL(name) \ } else if (ass_strcasecmp(tname, #name) == 0) { \ target->name = lookup_style(track, token); // skip spaces in str beforehand, or trim leading spaces afterwards static inline void advance_token_pos(const char **const str, const char **const start, const char **const end) { *start = *str; *end = *start; while (**end != '\0' && **end != ',') ++*end; *str = *end + (**end == ','); rskip_spaces((char**)end, (char*)*start); } static char *next_token(char **str) { char *p; char *start; skip_spaces(str); if (**str == '\0') { return 0; } advance_token_pos((const char**)str, (const char**)&start, (const char**)&p); *p = '\0'; return start; } /** * \brief Parse the tail of Dialogue line * \param track track * \param event parsed data goes here * \param str string to parse, zero-terminated * \param n_ignored number of format options to skip at the beginning */ static int process_event_tail(ASS_Track *track, ASS_Event *event, char *str, int n_ignored) { char *token; char *tname; char *p = str; int i; ASS_Event *target = event; char *format = strdup(track->event_format); if (!format) return -1; char *q = format; // format scanning pointer for (i = 0; i < n_ignored; ++i) { NEXT(q, tname); } while (1) { NEXT(q, tname); if (ass_strcasecmp(tname, "Text") == 0) { char *last; event->Text = strdup(p); if (event->Text && *event->Text != 0) { last = event->Text + strlen(event->Text) - 1; if (last >= event->Text && *last == '\r') *last = 0; } event->Duration -= event->Start; free(format); return event->Text ? 0 : -1; // "Text" is always the last } NEXT(p, token); ALIAS(End, Duration) // temporarily store end timecode in event->Duration PARSE_START INTVAL(Layer) STYLEVAL(Style) STRVAL(Name) STRVAL(Effect) INTVAL(MarginL) INTVAL(MarginR) INTVAL(MarginV) TIMEVAL(Start) TIMEVAL(Duration) PARSE_END } free(format); return 1; } /** * \brief Parse command line style overrides (--ass-force-style option) * \param track track to apply overrides to * The format for overrides is [StyleName.]Field=Value */ void ass_process_force_style(ASS_Track *track) { char **fs, *eq, *dt, *style, *tname, *token; ASS_Style *target; int sid; char **list = track->library->style_overrides; if (!list) return; for (fs = list; *fs; ++fs) { eq = strrchr(*fs, '='); if (!eq) continue; *eq = '\0'; token = eq + 1; if (!ass_strcasecmp(*fs, "PlayResX")) track->PlayResX = atoi(token); else if (!ass_strcasecmp(*fs, "PlayResY")) track->PlayResY = atoi(token); else if (!ass_strcasecmp(*fs, "Timer")) track->Timer = ass_atof(token); else if (!ass_strcasecmp(*fs, "WrapStyle")) track->WrapStyle = atoi(token); else if (!ass_strcasecmp(*fs, "ScaledBorderAndShadow")) track->ScaledBorderAndShadow = parse_bool(token); else if (!ass_strcasecmp(*fs, "Kerning")) track->Kerning = parse_bool(token); else if (!ass_strcasecmp(*fs, "YCbCr Matrix")) track->YCbCrMatrix = parse_ycbcr_matrix(token); dt = strrchr(*fs, '.'); if (dt) { *dt = '\0'; style = *fs; tname = dt + 1; } else { style = NULL; tname = *fs; } for (sid = 0; sid < track->n_styles; ++sid) { if (style == NULL || ass_strcasecmp(track->styles[sid].Name, style) == 0) { target = track->styles + sid; PARSE_START STRVAL(FontName) COLORVAL(PrimaryColour) COLORVAL(SecondaryColour) COLORVAL(OutlineColour) COLORVAL(BackColour) FPVAL(FontSize) INTVAL(Bold) INTVAL(Italic) INTVAL(Underline) INTVAL(StrikeOut) FPVAL(Spacing) FPVAL(Angle) INTVAL(BorderStyle) INTVAL(Alignment) INTVAL(Justify) INTVAL(MarginL) INTVAL(MarginR) INTVAL(MarginV) INTVAL(Encoding) FPVAL(ScaleX) FPVAL(ScaleY) FPVAL(Outline) FPVAL(Shadow) FPVAL(Blur) PARSE_END } } *eq = '='; if (dt) *dt = '.'; } } /** * \brief Parse the Style line * \param track track * \param str string to parse, zero-terminated * Allocates a new style struct. */ static int process_style(ASS_Track *track, char *str) { char *token; char *tname; char *p = str; char *format; char *q; // format scanning pointer int sid; ASS_Style *style; ASS_Style *target; if (!track->style_format) { // no style format header // probably an ancient script version if (track->track_type == TRACK_TYPE_SSA) track->style_format = strdup(ssa_style_format); else track->style_format = strdup(ass_style_format); if (!track->style_format) return -1; } q = format = strdup(track->style_format); if (!q) return -1; ass_msg(track->library, MSGL_V, "[%p] Style: %s", track, str); sid = ass_alloc_style(track); if (sid < 0) { free(format); return -1; } style = track->styles + sid; target = style; // fill style with some default values style->ScaleX = 100.; style->ScaleY = 100.; while (1) { NEXT(q, tname); NEXT(p, token); PARSE_START STARREDSTRVAL(Name) STRVAL(FontName) COLORVAL(PrimaryColour) COLORVAL(SecondaryColour) COLORVAL(OutlineColour) // TertiaryColor COLORVAL(BackColour) // SSA uses BackColour for both outline and shadow // this will destroy SSA's TertiaryColour, but i'm not going to use it anyway if (track->track_type == TRACK_TYPE_SSA) target->OutlineColour = target->BackColour; FPVAL(FontSize) INTVAL(Bold) INTVAL(Italic) INTVAL(Underline) INTVAL(StrikeOut) FPVAL(Spacing) FPVAL(Angle) INTVAL(BorderStyle) INTVAL(Alignment) if (track->track_type == TRACK_TYPE_ASS) target->Alignment = numpad2align(target->Alignment); // VSFilter compatibility else if (target->Alignment == 8) target->Alignment = 3; else if (target->Alignment == 4) target->Alignment = 11; INTVAL(MarginL) INTVAL(MarginR) INTVAL(MarginV) INTVAL(Encoding) FPVAL(ScaleX) FPVAL(ScaleY) FPVAL(Outline) FPVAL(Shadow) PARSE_END } free(format); style->ScaleX = FFMAX(style->ScaleX, 0.) / 100.; style->ScaleY = FFMAX(style->ScaleY, 0.) / 100.; style->Spacing = FFMAX(style->Spacing, 0.); style->Outline = FFMAX(style->Outline, 0.); style->Shadow = FFMAX(style->Shadow, 0.); style->Bold = !!style->Bold; style->Italic = !!style->Italic; style->Underline = !!style->Underline; style->StrikeOut = !!style->StrikeOut; if (!style->Name) style->Name = strdup("Default"); if (!style->FontName) style->FontName = strdup("Arial"); if (!style->Name || !style->FontName) { ass_free_style(track, sid); track->n_styles--; return -1; } if (strcmp(target->Name, "Default") == 0) track->default_style = sid; return 0; } static bool format_line_compare(const char *fmt1, const char *fmt2) { while (true) { const char *tk1_start, *tk2_start; const char *tk1_end, *tk2_end; skip_spaces((char**)&fmt1); skip_spaces((char**)&fmt2); if (!*fmt1 || !*fmt2) break; advance_token_pos(&fmt1, &tk1_start, &tk1_end); advance_token_pos(&fmt2, &tk2_start, &tk2_end); if ((tk1_end-tk1_start) != (tk2_end-tk2_start)) return false; if (ass_strncasecmp(tk1_start, tk2_start, tk1_end-tk1_start)) return false; } return *fmt1 == *fmt2; } /** * \brief Set SBAS=1 if not set explicitly in case of custom format line * \param track track * \param fmt format line of file * \param std standard format line * * As of writing libass is the only renderer accepting custom format lines. * For years libass defaultet SBAS to yes instead of no. * To avoid breaking released scripts with custom format lines, * keep SBAS=1 default for custom format files. */ static void custom_format_line_compatibility(ASS_Track *const track, const char *const fmt, const char *const std) { if (!(track->parser_priv->header_flags & SINFO_SCALEDBORDER) && !format_line_compare(fmt, std)) { ass_msg(track->library, MSGL_INFO, "Track has custom format line(s). " "'ScaledBorderAndShadow' will default to 'yes'."); track->ScaledBorderAndShadow = 1; } } static int process_styles_line(ASS_Track *track, char *str) { int ret = 0; if (!strncmp(str, "Format:", 7)) { char *p = str + 7; skip_spaces(&p); free(track->style_format); track->style_format = strdup(p); if (!track->style_format) return -1; ass_msg(track->library, MSGL_DBG2, "Style format: %s", track->style_format); if (track->track_type == TRACK_TYPE_ASS) custom_format_line_compatibility(track, p, ass_style_format); else custom_format_line_compatibility(track, p, ssa_style_format); } else if (!strncmp(str, "Style:", 6)) { char *p = str + 6; skip_spaces(&p); ret = process_style(track, p); } return ret; } static inline void check_duplicate_info_line(const ASS_Track *const track, const ScriptInfo si, const char *const name) { if (track->parser_priv->header_flags & si) ass_msg(track->library, MSGL_WARN, "Duplicate Script Info Header '%s'. Previous value overwritten!", name); else track->parser_priv->header_flags |= si; } static int process_info_line(ASS_Track *track, char *str) { if (!strncmp(str, "PlayResX:", 9)) { check_duplicate_info_line(track, SINFO_PLAYRESX, "PlayResX"); track->PlayResX = atoi(str + 9); } else if (!strncmp(str, "PlayResY:", 9)) { check_duplicate_info_line(track, SINFO_PLAYRESY, "PlayResY"); track->PlayResY = atoi(str + 9); } else if (!strncmp(str, "Timer:", 6)) { check_duplicate_info_line(track, SINFO_TIMER, "Timer"); track->Timer = ass_atof(str + 6); } else if (!strncmp(str, "WrapStyle:", 10)) { check_duplicate_info_line(track, SINFO_WRAPSTYLE, "WrapStyle"); track->WrapStyle = atoi(str + 10); } else if (!strncmp(str, "ScaledBorderAndShadow:", 22)) { check_duplicate_info_line(track, SINFO_SCALEDBORDER, "ScaledBorderAndShadow"); track->ScaledBorderAndShadow = parse_bool(str + 22); } else if (!strncmp(str, "Kerning:", 8)) { check_duplicate_info_line(track, SINFO_KERNING, "Kerning"); track->Kerning = parse_bool(str + 8); } else if (!strncmp(str, "YCbCr Matrix:", 13)) { check_duplicate_info_line(track, SINFO_COLOURMATRIX, "YCbCr Matrix"); track->YCbCrMatrix = parse_ycbcr_matrix(str + 13); } else if (!strncmp(str, "Language:", 9)) { check_duplicate_info_line(track, SINFO_LANGUAGE, "Language"); char *p = str + 9; while (*p && ass_isspace(*p)) p++; free(track->Language); track->Language = strndup(p, 2); } else if (!strncmp(str, "; Script generated by ", 22)) { if (!strncmp(str + 22,"FFmpeg/Lavc", 11)) track->parser_priv->header_flags |= GENBY_FFMPEG; } return 0; } static void event_format_fallback(ASS_Track *track) { track->parser_priv->state = PST_EVENTS; if (track->track_type == TRACK_TYPE_SSA) track->event_format = strdup(ssa_event_format); else track->event_format = strdup(ass_event_format); ass_msg(track->library, MSGL_V, "No event format found, using fallback"); } /** * \brief Return if track is post-signature and pre-SBAS ffmpeg track * \param track track */ static bool detect_legacy_conv_subs(ASS_Track *track) { /* * FFmpeg and libav convert srt subtitles to ass. * In legacy versions, they did not set the 'ScaledBorderAndShadow' header, * but expected it to default to yes (which libass did). * To avoid breaking them, we try to detect these * converted subs by common properties of ffmpeg/libav's converted subs. * Since files with custom format lines (-2014.10.11) default to SBAS=1 * regardless of being ffmpeg generated or not, we are only concerned with * post-signature and pre-SBAS ffmpeg-files (2014.10.11-2020.04.17). * We want to avoid matching modified ffmpeg files though. * * Relevant ffmpeg commits are: * 2c77c90684e24ef16f7e7c4462e011434cee6a98 2010.12.29 * Initial conversion format. * Style "Format:" line is mix of SSA and ASS * Event "Format:" line * "Format: Layer, Start, End, Text\r\n" * Only Header in ScriptInfo is "ScriptType: v4.00+" * 0e7782c08ec77739edb0b98ba5d896b45e98235f 2012.06.15 * Adds 'Style' to Event "Format:" line * 5039aadf68deb9ad6dd0737ea11259fe53d3727b 2014.06.18 * Adds PlayerRes(X|Y) (384x288) * (moved below ScriptType: a few minutes later) * 40b9f28641b696c6bb73ce49dc97c2ce2700cbdb 2014.10.11 14:31:23 +0200 * Regular full ASS Event and Style "Format:" lines * 52b0a0ecaa02e17f7e01bead8c3f215f1cfd48dc 2014.10.11 18:37:43 +0200 <== * Signature comment * 56bc0a6736cdc7edab837ff8f304661fd16de0e4 2015.02.08 * Allow custom PlayRes(X|Y) * a8ba2a2c1294a330a0e79ae7f0d3a203a7599166 2020.04.17 * Set 'ScaledBorderAndShadow: yes' * * libav outputs initial ffmpeg format. (no longer maintained) */ // GENBY_FFMPEG and exact ffmpeg headers required // Note: If there's SINFO_SCRIPTTYPE in the future this needs to be updated if (track->parser_priv->header_flags ^ (SINFO_PLAYRESX | SINFO_PLAYRESY | GENBY_FFMPEG)) return false; // Legacy ffmpeg only ever has one style // Check 2 not 1 because libass also adds a def style if (track->n_styles != 2 || strncmp(track->styles[1].Name, "Default", 7)) return false; return true; } static int process_events_line(ASS_Track *track, char *str) { if (!strncmp(str, "Format:", 7)) { char *p = str + 7; skip_spaces(&p); free(track->event_format); track->event_format = strdup(p); if (!track->event_format) return -1; ass_msg(track->library, MSGL_DBG2, "Event format: %s", track->event_format); if (track->track_type == TRACK_TYPE_ASS) custom_format_line_compatibility(track, p, ass_event_format); else custom_format_line_compatibility(track, p, ssa_event_format); // Guess if we are dealing with legacy ffmpeg subs and change accordingly // If file has no event format it was probably not created by ffmpeg/libav if (detect_legacy_conv_subs(track)) { track->ScaledBorderAndShadow = 1; ass_msg(track->library, MSGL_INFO, "Track treated as legacy ffmpeg sub."); } } else if (!strncmp(str, "Dialogue:", 9)) { // This should never be reached for embedded subtitles. // They have slightly different format and are parsed in ass_process_chunk, // called directly from demuxer int eid; ASS_Event *event; // We can't parse events without event_format if (!track->event_format) { event_format_fallback(track); if (!track->event_format) return -1; } str += 9; skip_spaces(&str); eid = ass_alloc_event(track); if (eid < 0) return -1; event = track->events + eid; return process_event_tail(track, event, str, 0); } else { ass_msg(track->library, MSGL_V, "Not understood: '%.30s'", str); } return 0; } static unsigned char *decode_chars(const unsigned char *src, unsigned char *dst, size_t cnt_in) { uint32_t value = 0; for (int i = 0; i < cnt_in; i++) value |= (uint32_t) ((src[i] - 33u) & 63) << 6 * (3 - i); *dst++ = value >> 16; if (cnt_in >= 3) *dst++ = value >> 8 & 0xff; if (cnt_in >= 4) *dst++ = value & 0xff; return dst; } static void reset_embedded_font_parsing(ASS_ParserPriv *parser_priv) { free(parser_priv->fontname); free(parser_priv->fontdata); parser_priv->fontname = NULL; parser_priv->fontdata = NULL; parser_priv->fontdata_size = 0; parser_priv->fontdata_used = 0; } static int decode_font(ASS_Track *track) { unsigned char *p; unsigned char *q; size_t i; size_t size; // original size size_t dsize; // decoded size unsigned char *buf = 0; ass_msg(track->library, MSGL_V, "Font: %d bytes encoded data", track->parser_priv->fontdata_used); size = track->parser_priv->fontdata_used; if (size % 4 == 1) { ass_msg(track->library, MSGL_ERR, "Bad encoded data size"); goto error_decode_font; } buf = malloc(size / 4 * 3 + FFMAX(size % 4, 1) - 1); if (!buf) goto error_decode_font; q = buf; for (i = 0, p = (unsigned char *) track->parser_priv->fontdata; i < size / 4; i++, p += 4) { q = decode_chars(p, q, 4); } if (size % 4 == 2) { q = decode_chars(p, q, 2); } else if (size % 4 == 3) { q = decode_chars(p, q, 3); } dsize = q - buf; assert(dsize == size / 4 * 3 + FFMAX(size % 4, 1) - 1); if (track->library->extract_fonts) { ass_add_font(track->library, track->parser_priv->fontname, (char *) buf, dsize); } error_decode_font: free(buf); reset_embedded_font_parsing(track->parser_priv); return 0; } static int process_fonts_line(ASS_Track *track, char *str) { size_t len; if (!strncmp(str, "fontname:", 9)) { char *p = str + 9; skip_spaces(&p); if (track->parser_priv->fontname) { decode_font(track); } track->parser_priv->fontname = strdup(p); if (!track->parser_priv->fontname) return -1; ass_msg(track->library, MSGL_V, "Fontname: %s", track->parser_priv->fontname); return 0; } if (!track->parser_priv->fontname) { ass_msg(track->library, MSGL_V, "Not understood: '%s'", str); return 1; } len = strlen(str); if (track->parser_priv->fontdata_used >= SIZE_MAX - FFMAX(len, 100 * 1024)) { goto mem_fail; } else if (track->parser_priv->fontdata_used + len > track->parser_priv->fontdata_size) { size_t new_size = track->parser_priv->fontdata_size + FFMAX(len, 100 * 1024); if (!ASS_REALLOC_ARRAY(track->parser_priv->fontdata, new_size)) goto mem_fail; track->parser_priv->fontdata_size = new_size; } memcpy(track->parser_priv->fontdata + track->parser_priv->fontdata_used, str, len); track->parser_priv->fontdata_used += len; return 0; mem_fail: reset_embedded_font_parsing(track->parser_priv); return -1; } /** * \brief Parse a header line * \param track track * \param str string to parse, zero-terminated */ static int process_line(ASS_Track *track, char *str) { skip_spaces(&str); if (!ass_strncasecmp(str, "[Script Info]", 13)) { track->parser_priv->state = PST_INFO; } else if (!ass_strncasecmp(str, "[V4 Styles]", 11)) { track->parser_priv->state = PST_STYLES; track->track_type = TRACK_TYPE_SSA; } else if (!ass_strncasecmp(str, "[V4+ Styles]", 12)) { track->parser_priv->state = PST_STYLES; track->track_type = TRACK_TYPE_ASS; } else if (!ass_strncasecmp(str, "[Events]", 8)) { track->parser_priv->state = PST_EVENTS; } else if (!ass_strncasecmp(str, "[Fonts]", 7)) { track->parser_priv->state = PST_FONTS; } else { switch (track->parser_priv->state) { case PST_INFO: process_info_line(track, str); break; case PST_STYLES: process_styles_line(track, str); break; case PST_EVENTS: process_events_line(track, str); break; case PST_FONTS: process_fonts_line(track, str); break; default: break; } } return 0; } static int process_text(ASS_Track *track, char *str) { char *p = str; while (1) { char *q; while (1) { if ((*p == '\r') || (*p == '\n')) ++p; else if (p[0] == '\xef' && p[1] == '\xbb' && p[2] == '\xbf') p += 3; // U+FFFE (BOM) else break; } for (q = p; ((*q != '\0') && (*q != '\r') && (*q != '\n')); ++q) { }; if (q == p) break; if (*q != '\0') *(q++) = '\0'; process_line(track, p); if (*q == '\0') break; p = q; } // there is no explicit end-of-font marker in ssa/ass if (track->parser_priv->fontname) decode_font(track); return 0; } /** * \brief Process a chunk of subtitle stream data. * \param track track * \param data string to parse * \param size length of data */ void ass_process_data(ASS_Track *track, char *data, int size) { char *str = malloc(size + 1); if (!str) return; memcpy(str, data, size); str[size] = '\0'; ass_msg(track->library, MSGL_V, "Event: %s", str); process_text(track, str); free(str); } /** * \brief Process CodecPrivate section of subtitle stream * \param track track * \param data string to parse * \param size length of data CodecPrivate section contains [Stream Info] and [V4+ Styles] ([V4 Styles] for SSA) sections */ void ass_process_codec_private(ASS_Track *track, char *data, int size) { ass_process_data(track, data, size); // probably an mkv produced by ancient mkvtoolnix // such files don't have [Events] and Format: headers if (!track->event_format) event_format_fallback(track); ass_process_force_style(track); } static int check_duplicate_event(ASS_Track *track, int ReadOrder) { if (track->parser_priv->read_order_bitmap) return test_and_set_read_order_bit(track, ReadOrder) > 0; // ignoring last event, it is the one we are comparing with for (int i = 0; i < track->n_events - 1; i++) if (track->events[i].ReadOrder == ReadOrder) return 1; return 0; } void ass_set_check_readorder(ASS_Track *track, int check_readorder) { track->parser_priv->check_readorder = check_readorder == 1; } /** * \brief Process a chunk of subtitle stream data. In Matroska, this contains exactly 1 event (or a commentary). * \param track track * \param data string to parse * \param size length of data * \param timecode starting time of the event (milliseconds) * \param duration duration of the event (milliseconds) */ void ass_process_chunk(ASS_Track *track, char *data, int size, long long timecode, long long duration) { char *str = NULL; int eid; char *p; char *token; ASS_Event *event; int check_readorder = track->parser_priv->check_readorder; if (check_readorder && !track->parser_priv->read_order_bitmap) { for (int i = 0; i < track->n_events; i++) { if (test_and_set_read_order_bit(track, track->events[i].ReadOrder) < 0) break; } } if (!track->event_format) { ass_msg(track->library, MSGL_WARN, "Event format header missing"); goto cleanup; } str = malloc(size + 1); if (!str) goto cleanup; memcpy(str, data, size); str[size] = '\0'; ass_msg(track->library, MSGL_V, "Event at %" PRId64 ", +%" PRId64 ": %s", (int64_t) timecode, (int64_t) duration, str); eid = ass_alloc_event(track); if (eid < 0) goto cleanup; event = track->events + eid; p = str; do { NEXT(p, token); event->ReadOrder = atoi(token); if (check_readorder && check_duplicate_event(track, event->ReadOrder)) break; NEXT(p, token); event->Layer = atoi(token); process_event_tail(track, event, p, 3); event->Start = timecode; event->Duration = duration; goto cleanup; // dump_events(tid); } while (0); // some error ass_free_event(track, eid); track->n_events--; cleanup: free(str); } /** * \brief Flush buffered events. * \param track track */ void ass_flush_events(ASS_Track *track) { if (track->events) { int eid; for (eid = 0; eid < track->n_events; eid++) ass_free_event(track, eid); track->n_events = 0; } free(track->parser_priv->read_order_bitmap); track->parser_priv->read_order_bitmap = NULL; track->parser_priv->read_order_elems = 0; } #ifdef CONFIG_ICONV /** \brief recode buffer to utf-8 * constraint: codepage != 0 * \param data pointer to text buffer * \param size buffer size * \return a pointer to recoded buffer, caller is responsible for freeing it **/ static char *sub_recode(ASS_Library *library, char *data, size_t size, char *codepage) { iconv_t icdsc; char *tocp = "UTF-8"; char *outbuf; assert(codepage); if ((icdsc = iconv_open(tocp, codepage)) != (iconv_t) (-1)) { ass_msg(library, MSGL_V, "Opened iconv descriptor"); } else { ass_msg(library, MSGL_ERR, "Error opening iconv descriptor"); return NULL; } { size_t osize = size; size_t ileft = size; size_t oleft = size - 1; char *ip; char *op; size_t rc; int clear = 0; outbuf = malloc(osize); if (!outbuf) goto out; ip = data; op = outbuf; while (1) { if (ileft) rc = iconv(icdsc, &ip, &ileft, &op, &oleft); else { // clear the conversion state and leave clear = 1; rc = iconv(icdsc, NULL, NULL, &op, &oleft); } if (rc == (size_t) (-1)) { if (errno == E2BIG) { size_t offset = op - outbuf; char *nbuf = realloc(outbuf, osize + size); if (!nbuf) { free(outbuf); outbuf = 0; goto out; } outbuf = nbuf; op = outbuf + offset; osize += size; oleft += size; } else { ass_msg(library, MSGL_WARN, "Error recoding file"); free(outbuf); outbuf = NULL; goto out; } } else if (clear) break; } outbuf[osize - oleft - 1] = 0; } out: if (icdsc != (iconv_t) (-1)) { (void) iconv_close(icdsc); ass_msg(library, MSGL_V, "Closed iconv descriptor"); } return outbuf; } #endif // ICONV /** * \brief read file contents into newly allocated buffer * \param fname file name * \param bufsize out: file size * \return pointer to file contents. Caller is responsible for its deallocation. */ char *read_file(ASS_Library *library, char *fname, size_t *bufsize) { int res; long sz; long bytes_read; char *buf; FILE *fp = fopen(fname, "rb"); if (!fp) { ass_msg(library, MSGL_WARN, "ass_read_file(%s): fopen failed", fname); return 0; } res = fseek(fp, 0, SEEK_END); if (res == -1) { ass_msg(library, MSGL_WARN, "ass_read_file(%s): fseek failed", fname); fclose(fp); return 0; } sz = ftell(fp); rewind(fp); ass_msg(library, MSGL_V, "File size: %ld", sz); buf = sz < SIZE_MAX ? malloc(sz + 1) : NULL; if (!buf) { fclose(fp); return NULL; } assert(buf); bytes_read = 0; do { res = fread(buf + bytes_read, 1, sz - bytes_read, fp); if (res <= 0) { ass_msg(library, MSGL_INFO, "Read failed, %d: %s", errno, strerror(errno)); fclose(fp); free(buf); return 0; } bytes_read += res; } while (sz - bytes_read > 0); buf[sz] = '\0'; fclose(fp); if (bufsize) *bufsize = sz; return buf; } /* * \param buf pointer to subtitle text in utf-8 */ static ASS_Track *parse_memory(ASS_Library *library, char *buf) { ASS_Track *track; int i; track = ass_new_track(library); if (!track) return NULL; // process header process_text(track, buf); // external SSA/ASS subs does not have ReadOrder field for (i = 0; i < track->n_events; ++i) track->events[i].ReadOrder = i; if (track->track_type == TRACK_TYPE_UNKNOWN) { ass_free_track(track); return 0; } ass_process_force_style(track); return track; } /** * \brief Read subtitles from memory. * \param library libass library object * \param buf pointer to subtitles text * \param bufsize size of buffer * \param codepage recode buffer contents from given codepage * \return newly allocated track */ ASS_Track *ass_read_memory(ASS_Library *library, char *buf, size_t bufsize, char *codepage) { ASS_Track *track; int copied = 0; if (!buf) return 0; #ifdef CONFIG_ICONV if (codepage) { buf = sub_recode(library, buf, bufsize, codepage); if (!buf) return 0; else copied = 1; } #endif if (!copied) { char *newbuf = malloc(bufsize + 1); if (!newbuf) return 0; memcpy(newbuf, buf, bufsize); newbuf[bufsize] = '\0'; buf = newbuf; } track = parse_memory(library, buf); free(buf); if (!track) return 0; ass_msg(library, MSGL_INFO, "Added subtitle file: " "<memory> (%d styles, %d events)", track->n_styles, track->n_events); return track; } static char *read_file_recode(ASS_Library *library, char *fname, char *codepage, size_t *size) { char *buf; size_t bufsize; buf = read_file(library, fname, &bufsize); if (!buf) return 0; #ifdef CONFIG_ICONV if (codepage) { char *tmpbuf = sub_recode(library, buf, bufsize, codepage); free(buf); buf = tmpbuf; } if (!buf) return 0; #endif *size = bufsize; return buf; } /** * \brief Read subtitles from file. * \param library libass library object * \param fname file name * \param codepage recode buffer contents from given codepage * \return newly allocated track */ ASS_Track *ass_read_file(ASS_Library *library, char *fname, char *codepage) { char *buf; ASS_Track *track; size_t bufsize; buf = read_file_recode(library, fname, codepage, &bufsize); if (!buf) return 0; track = parse_memory(library, buf); free(buf); if (!track) return 0; track->name = strdup(fname); ass_msg(library, MSGL_INFO, "Added subtitle file: '%s' (%d styles, %d events)", fname, track->n_styles, track->n_events); return track; } /** * \brief read styles from file into already initialized track */ int ass_read_styles(ASS_Track *track, char *fname, char *codepage) { char *buf; ParserState old_state; size_t sz; buf = read_file(track->library, fname, &sz); if (!buf) return 1; #ifdef CONFIG_ICONV if (codepage) { char *tmpbuf; tmpbuf = sub_recode(track->library, buf, sz, codepage); free(buf); buf = tmpbuf; } if (!buf) return 1; #endif old_state = track->parser_priv->state; track->parser_priv->state = PST_STYLES; process_text(track, buf); free(buf); track->parser_priv->state = old_state; return 0; } long long ass_step_sub(ASS_Track *track, long long now, int movement) { int i; ASS_Event *best = NULL; long long target = now; int direction = (movement > 0 ? 1 : -1) * !!movement; if (track->n_events == 0) return 0; do { ASS_Event *closest = NULL; long long closest_time = now; for (i = 0; i < track->n_events; i++) { if (direction < 0) { long long end = track->events[i].Start + track->events[i].Duration; if (end < target) { if (!closest || end > closest_time) { closest = &track->events[i]; closest_time = end; } } } else if (direction > 0) { long long start = track->events[i].Start; if (start > target) { if (!closest || start < closest_time) { closest = &track->events[i]; closest_time = start; } } } else { long long start = track->events[i].Start; if (start < target) { if (!closest || start >= closest_time) { closest = &track->events[i]; closest_time = start; } } } } target = closest_time + direction; movement -= direction; if (closest) best = closest; } while (movement); return best ? best->Start - now : 0; } ASS_Track *ass_new_track(ASS_Library *library) { int def_sid = -1; ASS_Track *track = calloc(1, sizeof(ASS_Track)); if (!track) goto fail; track->library = library; track->ScaledBorderAndShadow = 0; track->parser_priv = calloc(1, sizeof(ASS_ParserPriv)); if (!track->parser_priv) goto fail; def_sid = ass_alloc_style(track); if (def_sid < 0) goto fail; set_default_style(track->styles + def_sid); track->default_style = def_sid; if (!track->styles[def_sid].Name || !track->styles[def_sid].FontName) goto fail; track->parser_priv->check_readorder = 1; return track; fail: if (track) { if (def_sid >= 0) ass_free_style(track, def_sid); free(track->parser_priv); free(track); } return NULL; } int ass_track_set_feature(ASS_Track *track, ASS_Feature feature, int enable) { switch (feature) { case ASS_FEATURE_INCOMPATIBLE_EXTENSIONS: //-fallthrough #ifdef USE_FRIBIDI_EX_API case ASS_FEATURE_BIDI_BRACKETS: track->parser_priv->bidi_brackets = !!enable; #endif return 0; default: return -1; } } /** * \brief Prepare track for rendering */ void ass_lazy_track_init(ASS_Library *lib, ASS_Track *track) { if (track->PlayResX > 0 && track->PlayResY > 0) return; if (track->PlayResX <= 0 && track->PlayResY <= 0) { ass_msg(lib, MSGL_WARN, "Neither PlayResX nor PlayResY defined. Assuming 384x288"); track->PlayResX = 384; track->PlayResY = 288; } else { if (track->PlayResY <= 0 && track->PlayResX == 1280) { track->PlayResY = 1024; ass_msg(lib, MSGL_WARN, "PlayResY undefined, setting to %d", track->PlayResY); } else if (track->PlayResY <= 0) { track->PlayResY = FFMAX(1, track->PlayResX * 3LL / 4); ass_msg(lib, MSGL_WARN, "PlayResY undefined, setting to %d", track->PlayResY); } else if (track->PlayResX <= 0 && track->PlayResY == 1024) { track->PlayResX = 1280; ass_msg(lib, MSGL_WARN, "PlayResX undefined, setting to %d", track->PlayResX); } else if (track->PlayResX <= 0) { track->PlayResX = FFMAX(1, track->PlayResY * 4LL / 3); ass_msg(lib, MSGL_WARN, "PlayResX undefined, setting to %d", track->PlayResX); } } }
null
230
CWE-787
CVE-2020-5303
package p2p import ( "context" "fmt" "net" "time" "github.com/pkg/errors" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/p2p/conn" ) const ( defaultDialTimeout = time.Second defaultFilterTimeout = 5 * time.Second defaultHandshakeTimeout = 3 * time.Second ) // IPResolver is a behaviour subset of net.Resolver. type IPResolver interface { LookupIPAddr(context.Context, string) ([]net.IPAddr, error) } // accept is the container to carry the upgraded connection and NodeInfo from an // asynchronously running routine to the Accept method. type accept struct { netAddr *NetAddress conn net.Conn nodeInfo NodeInfo err error } // peerConfig is used to bundle data we need to fully setup a Peer with an // MConn, provided by the caller of Accept and Dial (currently the Switch). This // a temporary measure until reactor setup is less dynamic and we introduce the // concept of PeerBehaviour to communicate about significant Peer lifecycle // events. // TODO(xla): Refactor out with more static Reactor setup and PeerBehaviour. type peerConfig struct { chDescs []*conn.ChannelDescriptor onPeerError func(Peer, interface{}) outbound bool // isPersistent allows you to set a function, which, given socket address // (for outbound peers) OR self-reported address (for inbound peers), tells // if the peer is persistent or not. isPersistent func(*NetAddress) bool reactorsByCh map[byte]Reactor metrics *Metrics } // Transport emits and connects to Peers. The implementation of Peer is left to // the transport. Each transport is also responsible to filter establishing // peers specific to its domain. type Transport interface { // Listening address. NetAddress() NetAddress // Accept returns a newly connected Peer. Accept(peerConfig) (Peer, error) // Dial connects to the Peer for the address. Dial(NetAddress, peerConfig) (Peer, error) // Cleanup any resources associated with Peer. Cleanup(Peer) } // transportLifecycle bundles the methods for callers to control start and stop // behaviour. type transportLifecycle interface { Close() error Listen(NetAddress) error } // ConnFilterFunc to be implemented by filter hooks after a new connection has // been established. The set of exisiting connections is passed along together // with all resolved IPs for the new connection. type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error // ConnDuplicateIPFilter resolves and keeps all ips for an incoming connection // and refuses new ones if they come from a known ip. func ConnDuplicateIPFilter() ConnFilterFunc { return func(cs ConnSet, c net.Conn, ips []net.IP) error { for _, ip := range ips { if cs.HasIP(ip) { return ErrRejected{ conn: c, err: fmt.Errorf("ip<%v> already connected", ip), isDuplicate: true, } } } return nil } } // MultiplexTransportOption sets an optional parameter on the // MultiplexTransport. type MultiplexTransportOption func(*MultiplexTransport) // MultiplexTransportConnFilters sets the filters for rejection new connections. func MultiplexTransportConnFilters( filters ...ConnFilterFunc, ) MultiplexTransportOption { return func(mt *MultiplexTransport) { mt.connFilters = filters } } // MultiplexTransportFilterTimeout sets the timeout waited for filter calls to // return. func MultiplexTransportFilterTimeout( timeout time.Duration, ) MultiplexTransportOption { return func(mt *MultiplexTransport) { mt.filterTimeout = timeout } } // MultiplexTransportResolver sets the Resolver used for ip lokkups, defaults to // net.DefaultResolver. func MultiplexTransportResolver(resolver IPResolver) MultiplexTransportOption { return func(mt *MultiplexTransport) { mt.resolver = resolver } } // MultiplexTransport accepts and dials tcp connections and upgrades them to // multiplexed peers. type MultiplexTransport struct { netAddr NetAddress listener net.Listener acceptc chan accept closec chan struct{} // Lookup table for duplicate ip and id checks. conns ConnSet connFilters []ConnFilterFunc dialTimeout time.Duration filterTimeout time.Duration handshakeTimeout time.Duration nodeInfo NodeInfo nodeKey NodeKey resolver IPResolver // TODO(xla): This config is still needed as we parameterise peerConn and // peer currently. All relevant configuration should be refactored into options // with sane defaults. mConfig conn.MConnConfig } // Test multiplexTransport for interface completeness. var _ Transport = (*MultiplexTransport)(nil) var _ transportLifecycle = (*MultiplexTransport)(nil) // NewMultiplexTransport returns a tcp connected multiplexed peer. func NewMultiplexTransport( nodeInfo NodeInfo, nodeKey NodeKey, mConfig conn.MConnConfig, ) *MultiplexTransport { return &MultiplexTransport{ acceptc: make(chan accept), closec: make(chan struct{}), dialTimeout: defaultDialTimeout, filterTimeout: defaultFilterTimeout, handshakeTimeout: defaultHandshakeTimeout, mConfig: mConfig, nodeInfo: nodeInfo, nodeKey: nodeKey, conns: NewConnSet(), resolver: net.DefaultResolver, } } // NetAddress implements Transport. func (mt *MultiplexTransport) NetAddress() NetAddress { return mt.netAddr } // Accept implements Transport. func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) { select { // This case should never have any side-effectful/blocking operations to // ensure that quality peers are ready to be used. case a := <-mt.acceptc: if a.err != nil { return nil, a.err } cfg.outbound = false return mt.wrapPeer(a.conn, a.nodeInfo, cfg, a.netAddr), nil case <-mt.closec: return nil, ErrTransportClosed{} } } // Dial implements Transport. func (mt *MultiplexTransport) Dial( addr NetAddress, cfg peerConfig, ) (Peer, error) { c, err := addr.DialTimeout(mt.dialTimeout) if err != nil { return nil, err } // TODO(xla): Evaluate if we should apply filters if we explicitly dial. if err := mt.filterConn(c); err != nil { return nil, err } secretConn, nodeInfo, err := mt.upgrade(c, &addr) if err != nil { return nil, err } cfg.outbound = true p := mt.wrapPeer(secretConn, nodeInfo, cfg, &addr) return p, nil } // Close implements transportLifecycle. func (mt *MultiplexTransport) Close() error { close(mt.closec) if mt.listener != nil { return mt.listener.Close() } return nil } // Listen implements transportLifecycle. func (mt *MultiplexTransport) Listen(addr NetAddress) error { ln, err := net.Listen("tcp", addr.DialString()) if err != nil { return err } mt.netAddr = addr mt.listener = ln go mt.acceptPeers() return nil } func (mt *MultiplexTransport) acceptPeers() { for { c, err := mt.listener.Accept() if err != nil { // If Close() has been called, silently exit. select { case _, ok := <-mt.closec: if !ok { return } default: // Transport is not closed } mt.acceptc <- accept{err: err} return } // Connection upgrade and filtering should be asynchronous to avoid // Head-of-line blocking[0]. // Reference: https://github.com/tendermint/tendermint/issues/2047 // // [0] https://en.wikipedia.org/wiki/Head-of-line_blocking go func(c net.Conn) { defer func() { if r := recover(); r != nil { err := ErrRejected{ conn: c, err: errors.Errorf("recovered from panic: %v", r), isAuthFailure: true, } select { case mt.acceptc <- accept{err: err}: case <-mt.closec: // Give up if the transport was closed. _ = c.Close() return } } }() var ( nodeInfo NodeInfo secretConn *conn.SecretConnection netAddr *NetAddress ) err := mt.filterConn(c) if err == nil { secretConn, nodeInfo, err = mt.upgrade(c, nil) if err == nil { addr := c.RemoteAddr() id := PubKeyToID(secretConn.RemotePubKey()) netAddr = NewNetAddress(id, addr) } } select { case mt.acceptc <- accept{netAddr, secretConn, nodeInfo, err}: // Make the upgraded peer available. case <-mt.closec: // Give up if the transport was closed. _ = c.Close() return } }(c) } } // Cleanup removes the given address from the connections set and // closes the connection. func (mt *MultiplexTransport) Cleanup(p Peer) { mt.conns.RemoveAddr(p.RemoteAddr()) _ = p.CloseConn() } func (mt *MultiplexTransport) cleanup(c net.Conn) error { mt.conns.Remove(c) return c.Close() } func (mt *MultiplexTransport) filterConn(c net.Conn) (err error) { defer func() { if err != nil { _ = c.Close() } }() // Reject if connection is already present. if mt.conns.Has(c) { return ErrRejected{conn: c, isDuplicate: true} } // Resolve ips for incoming conn. ips, err := resolveIPs(mt.resolver, c) if err != nil { return err } errc := make(chan error, len(mt.connFilters)) for _, f := range mt.connFilters { go func(f ConnFilterFunc, c net.Conn, ips []net.IP, errc chan<- error) { errc <- f(mt.conns, c, ips) }(f, c, ips, errc) } for i := 0; i < cap(errc); i++ { select { case err := <-errc: if err != nil { return ErrRejected{conn: c, err: err, isFiltered: true} } case <-time.After(mt.filterTimeout): return ErrFilterTimeout{} } } mt.conns.Set(c, ips) return nil } func (mt *MultiplexTransport) upgrade( c net.Conn, dialedAddr *NetAddress, ) (secretConn *conn.SecretConnection, nodeInfo NodeInfo, err error) { defer func() { if err != nil { _ = mt.cleanup(c) } }() secretConn, err = upgradeSecretConn(c, mt.handshakeTimeout, mt.nodeKey.PrivKey) if err != nil { return nil, nil, ErrRejected{ conn: c, err: fmt.Errorf("secret conn failed: %v", err), isAuthFailure: true, } } // For outgoing conns, ensure connection key matches dialed key. connID := PubKeyToID(secretConn.RemotePubKey()) if dialedAddr != nil { if dialedID := dialedAddr.ID; connID != dialedID { return nil, nil, ErrRejected{ conn: c, id: connID, err: fmt.Errorf( "conn.ID (%v) dialed ID (%v) mismatch", connID, dialedID, ), isAuthFailure: true, } } } nodeInfo, err = handshake(secretConn, mt.handshakeTimeout, mt.nodeInfo) if err != nil { return nil, nil, ErrRejected{ conn: c, err: fmt.Errorf("handshake failed: %v", err), isAuthFailure: true, } } if err := nodeInfo.Validate(); err != nil { return nil, nil, ErrRejected{ conn: c, err: err, isNodeInfoInvalid: true, } } // Ensure connection key matches self reported key. if connID != nodeInfo.ID() { return nil, nil, ErrRejected{ conn: c, id: connID, err: fmt.Errorf( "conn.ID (%v) NodeInfo.ID (%v) mismatch", connID, nodeInfo.ID(), ), isAuthFailure: true, } } // Reject self. if mt.nodeInfo.ID() == nodeInfo.ID() { return nil, nil, ErrRejected{ addr: *NewNetAddress(nodeInfo.ID(), c.RemoteAddr()), conn: c, id: nodeInfo.ID(), isSelf: true, } } if err := mt.nodeInfo.CompatibleWith(nodeInfo); err != nil { return nil, nil, ErrRejected{ conn: c, err: err, id: nodeInfo.ID(), isIncompatible: true, } } return secretConn, nodeInfo, nil } func (mt *MultiplexTransport) wrapPeer( c net.Conn, ni NodeInfo, cfg peerConfig, socketAddr *NetAddress, ) Peer { persistent := false if cfg.isPersistent != nil { if cfg.outbound { persistent = cfg.isPersistent(socketAddr) } else { selfReportedAddr, err := ni.NetAddress() if err == nil { persistent = cfg.isPersistent(selfReportedAddr) } } } peerConn := newPeerConn( cfg.outbound, persistent, c, socketAddr, ) p := newPeer( peerConn, mt.mConfig, ni, cfg.reactorsByCh, cfg.chDescs, cfg.onPeerError, PeerMetrics(cfg.metrics), ) return p } func handshake( c net.Conn, timeout time.Duration, nodeInfo NodeInfo, ) (NodeInfo, error) { if err := c.SetDeadline(time.Now().Add(timeout)); err != nil { return nil, err } var ( errc = make(chan error, 2) peerNodeInfo DefaultNodeInfo ourNodeInfo = nodeInfo.(DefaultNodeInfo) ) go func(errc chan<- error, c net.Conn) { _, err := cdc.MarshalBinaryLengthPrefixedWriter(c, ourNodeInfo) errc <- err }(errc, c) go func(errc chan<- error, c net.Conn) { _, err := cdc.UnmarshalBinaryLengthPrefixedReader( c, &peerNodeInfo, int64(MaxNodeInfoSize()), ) errc <- err }(errc, c) for i := 0; i < cap(errc); i++ { err := <-errc if err != nil { return nil, err } } return peerNodeInfo, c.SetDeadline(time.Time{}) } func upgradeSecretConn( c net.Conn, timeout time.Duration, privKey crypto.PrivKey, ) (*conn.SecretConnection, error) { if err := c.SetDeadline(time.Now().Add(timeout)); err != nil { return nil, err } sc, err := conn.MakeSecretConnection(c, privKey) if err != nil { return nil, err } return sc, sc.SetDeadline(time.Time{}) } func resolveIPs(resolver IPResolver, c net.Conn) ([]net.IP, error) { host, _, err := net.SplitHostPort(c.RemoteAddr().String()) if err != nil { return nil, err } addrs, err := resolver.LookupIPAddr(context.Background(), host) if err != nil { return nil, err } ips := []net.IP{} for _, addr := range addrs { ips = append(ips, addr.IP) } return ips, nil }
null
package p2p import ( "context" "fmt" "net" "time" "github.com/pkg/errors" "golang.org/x/net/netutil" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/p2p/conn" ) const ( defaultDialTimeout = time.Second defaultFilterTimeout = 5 * time.Second defaultHandshakeTimeout = 3 * time.Second ) // IPResolver is a behaviour subset of net.Resolver. type IPResolver interface { LookupIPAddr(context.Context, string) ([]net.IPAddr, error) } // accept is the container to carry the upgraded connection and NodeInfo from an // asynchronously running routine to the Accept method. type accept struct { netAddr *NetAddress conn net.Conn nodeInfo NodeInfo err error } // peerConfig is used to bundle data we need to fully setup a Peer with an // MConn, provided by the caller of Accept and Dial (currently the Switch). This // a temporary measure until reactor setup is less dynamic and we introduce the // concept of PeerBehaviour to communicate about significant Peer lifecycle // events. // TODO(xla): Refactor out with more static Reactor setup and PeerBehaviour. type peerConfig struct { chDescs []*conn.ChannelDescriptor onPeerError func(Peer, interface{}) outbound bool // isPersistent allows you to set a function, which, given socket address // (for outbound peers) OR self-reported address (for inbound peers), tells // if the peer is persistent or not. isPersistent func(*NetAddress) bool reactorsByCh map[byte]Reactor metrics *Metrics } // Transport emits and connects to Peers. The implementation of Peer is left to // the transport. Each transport is also responsible to filter establishing // peers specific to its domain. type Transport interface { // Listening address. NetAddress() NetAddress // Accept returns a newly connected Peer. Accept(peerConfig) (Peer, error) // Dial connects to the Peer for the address. Dial(NetAddress, peerConfig) (Peer, error) // Cleanup any resources associated with Peer. Cleanup(Peer) } // transportLifecycle bundles the methods for callers to control start and stop // behaviour. type transportLifecycle interface { Close() error Listen(NetAddress) error } // ConnFilterFunc to be implemented by filter hooks after a new connection has // been established. The set of exisiting connections is passed along together // with all resolved IPs for the new connection. type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error // ConnDuplicateIPFilter resolves and keeps all ips for an incoming connection // and refuses new ones if they come from a known ip. func ConnDuplicateIPFilter() ConnFilterFunc { return func(cs ConnSet, c net.Conn, ips []net.IP) error { for _, ip := range ips { if cs.HasIP(ip) { return ErrRejected{ conn: c, err: fmt.Errorf("ip<%v> already connected", ip), isDuplicate: true, } } } return nil } } // MultiplexTransportOption sets an optional parameter on the // MultiplexTransport. type MultiplexTransportOption func(*MultiplexTransport) // MultiplexTransportConnFilters sets the filters for rejection new connections. func MultiplexTransportConnFilters( filters ...ConnFilterFunc, ) MultiplexTransportOption { return func(mt *MultiplexTransport) { mt.connFilters = filters } } // MultiplexTransportFilterTimeout sets the timeout waited for filter calls to // return. func MultiplexTransportFilterTimeout( timeout time.Duration, ) MultiplexTransportOption { return func(mt *MultiplexTransport) { mt.filterTimeout = timeout } } // MultiplexTransportResolver sets the Resolver used for ip lokkups, defaults to // net.DefaultResolver. func MultiplexTransportResolver(resolver IPResolver) MultiplexTransportOption { return func(mt *MultiplexTransport) { mt.resolver = resolver } } // MultiplexTransportMaxIncomingConnections sets the maximum number of // simultaneous connections (incoming). Default: 0 (unlimited) func MultiplexTransportMaxIncomingConnections(n int) MultiplexTransportOption { return func(mt *MultiplexTransport) { mt.maxIncomingConnections = n } } // MultiplexTransport accepts and dials tcp connections and upgrades them to // multiplexed peers. type MultiplexTransport struct { netAddr NetAddress listener net.Listener maxIncomingConnections int // see MaxIncomingConnections acceptc chan accept closec chan struct{} // Lookup table for duplicate ip and id checks. conns ConnSet connFilters []ConnFilterFunc dialTimeout time.Duration filterTimeout time.Duration handshakeTimeout time.Duration nodeInfo NodeInfo nodeKey NodeKey resolver IPResolver // TODO(xla): This config is still needed as we parameterise peerConn and // peer currently. All relevant configuration should be refactored into options // with sane defaults. mConfig conn.MConnConfig } // Test multiplexTransport for interface completeness. var _ Transport = (*MultiplexTransport)(nil) var _ transportLifecycle = (*MultiplexTransport)(nil) // NewMultiplexTransport returns a tcp connected multiplexed peer. func NewMultiplexTransport( nodeInfo NodeInfo, nodeKey NodeKey, mConfig conn.MConnConfig, ) *MultiplexTransport { return &MultiplexTransport{ acceptc: make(chan accept), closec: make(chan struct{}), dialTimeout: defaultDialTimeout, filterTimeout: defaultFilterTimeout, handshakeTimeout: defaultHandshakeTimeout, mConfig: mConfig, nodeInfo: nodeInfo, nodeKey: nodeKey, conns: NewConnSet(), resolver: net.DefaultResolver, } } // NetAddress implements Transport. func (mt *MultiplexTransport) NetAddress() NetAddress { return mt.netAddr } // Accept implements Transport. func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) { select { // This case should never have any side-effectful/blocking operations to // ensure that quality peers are ready to be used. case a := <-mt.acceptc: if a.err != nil { return nil, a.err } cfg.outbound = false return mt.wrapPeer(a.conn, a.nodeInfo, cfg, a.netAddr), nil case <-mt.closec: return nil, ErrTransportClosed{} } } // Dial implements Transport. func (mt *MultiplexTransport) Dial( addr NetAddress, cfg peerConfig, ) (Peer, error) { c, err := addr.DialTimeout(mt.dialTimeout) if err != nil { return nil, err } // TODO(xla): Evaluate if we should apply filters if we explicitly dial. if err := mt.filterConn(c); err != nil { return nil, err } secretConn, nodeInfo, err := mt.upgrade(c, &addr) if err != nil { return nil, err } cfg.outbound = true p := mt.wrapPeer(secretConn, nodeInfo, cfg, &addr) return p, nil } // Close implements transportLifecycle. func (mt *MultiplexTransport) Close() error { close(mt.closec) if mt.listener != nil { return mt.listener.Close() } return nil } // Listen implements transportLifecycle. func (mt *MultiplexTransport) Listen(addr NetAddress) error { ln, err := net.Listen("tcp", addr.DialString()) if err != nil { return err } if mt.maxIncomingConnections > 0 { ln = netutil.LimitListener(ln, mt.maxIncomingConnections) } mt.netAddr = addr mt.listener = ln go mt.acceptPeers() return nil } func (mt *MultiplexTransport) acceptPeers() { for { c, err := mt.listener.Accept() if err != nil { // If Close() has been called, silently exit. select { case _, ok := <-mt.closec: if !ok { return } default: // Transport is not closed } mt.acceptc <- accept{err: err} return } // Connection upgrade and filtering should be asynchronous to avoid // Head-of-line blocking[0]. // Reference: https://github.com/tendermint/tendermint/issues/2047 // // [0] https://en.wikipedia.org/wiki/Head-of-line_blocking go func(c net.Conn) { defer func() { if r := recover(); r != nil { err := ErrRejected{ conn: c, err: errors.Errorf("recovered from panic: %v", r), isAuthFailure: true, } select { case mt.acceptc <- accept{err: err}: case <-mt.closec: // Give up if the transport was closed. _ = c.Close() return } } }() var ( nodeInfo NodeInfo secretConn *conn.SecretConnection netAddr *NetAddress ) err := mt.filterConn(c) if err == nil { secretConn, nodeInfo, err = mt.upgrade(c, nil) if err == nil { addr := c.RemoteAddr() id := PubKeyToID(secretConn.RemotePubKey()) netAddr = NewNetAddress(id, addr) } } select { case mt.acceptc <- accept{netAddr, secretConn, nodeInfo, err}: // Make the upgraded peer available. case <-mt.closec: // Give up if the transport was closed. _ = c.Close() return } }(c) } } // Cleanup removes the given address from the connections set and // closes the connection. func (mt *MultiplexTransport) Cleanup(p Peer) { mt.conns.RemoveAddr(p.RemoteAddr()) _ = p.CloseConn() } func (mt *MultiplexTransport) cleanup(c net.Conn) error { mt.conns.Remove(c) return c.Close() } func (mt *MultiplexTransport) filterConn(c net.Conn) (err error) { defer func() { if err != nil { _ = c.Close() } }() // Reject if connection is already present. if mt.conns.Has(c) { return ErrRejected{conn: c, isDuplicate: true} } // Resolve ips for incoming conn. ips, err := resolveIPs(mt.resolver, c) if err != nil { return err } errc := make(chan error, len(mt.connFilters)) for _, f := range mt.connFilters { go func(f ConnFilterFunc, c net.Conn, ips []net.IP, errc chan<- error) { errc <- f(mt.conns, c, ips) }(f, c, ips, errc) } for i := 0; i < cap(errc); i++ { select { case err := <-errc: if err != nil { return ErrRejected{conn: c, err: err, isFiltered: true} } case <-time.After(mt.filterTimeout): return ErrFilterTimeout{} } } mt.conns.Set(c, ips) return nil } func (mt *MultiplexTransport) upgrade( c net.Conn, dialedAddr *NetAddress, ) (secretConn *conn.SecretConnection, nodeInfo NodeInfo, err error) { defer func() { if err != nil { _ = mt.cleanup(c) } }() secretConn, err = upgradeSecretConn(c, mt.handshakeTimeout, mt.nodeKey.PrivKey) if err != nil { return nil, nil, ErrRejected{ conn: c, err: fmt.Errorf("secret conn failed: %v", err), isAuthFailure: true, } } // For outgoing conns, ensure connection key matches dialed key. connID := PubKeyToID(secretConn.RemotePubKey()) if dialedAddr != nil { if dialedID := dialedAddr.ID; connID != dialedID { return nil, nil, ErrRejected{ conn: c, id: connID, err: fmt.Errorf( "conn.ID (%v) dialed ID (%v) mismatch", connID, dialedID, ), isAuthFailure: true, } } } nodeInfo, err = handshake(secretConn, mt.handshakeTimeout, mt.nodeInfo) if err != nil { return nil, nil, ErrRejected{ conn: c, err: fmt.Errorf("handshake failed: %v", err), isAuthFailure: true, } } if err := nodeInfo.Validate(); err != nil { return nil, nil, ErrRejected{ conn: c, err: err, isNodeInfoInvalid: true, } } // Ensure connection key matches self reported key. if connID != nodeInfo.ID() { return nil, nil, ErrRejected{ conn: c, id: connID, err: fmt.Errorf( "conn.ID (%v) NodeInfo.ID (%v) mismatch", connID, nodeInfo.ID(), ), isAuthFailure: true, } } // Reject self. if mt.nodeInfo.ID() == nodeInfo.ID() { return nil, nil, ErrRejected{ addr: *NewNetAddress(nodeInfo.ID(), c.RemoteAddr()), conn: c, id: nodeInfo.ID(), isSelf: true, } } if err := mt.nodeInfo.CompatibleWith(nodeInfo); err != nil { return nil, nil, ErrRejected{ conn: c, err: err, id: nodeInfo.ID(), isIncompatible: true, } } return secretConn, nodeInfo, nil } func (mt *MultiplexTransport) wrapPeer( c net.Conn, ni NodeInfo, cfg peerConfig, socketAddr *NetAddress, ) Peer { persistent := false if cfg.isPersistent != nil { if cfg.outbound { persistent = cfg.isPersistent(socketAddr) } else { selfReportedAddr, err := ni.NetAddress() if err == nil { persistent = cfg.isPersistent(selfReportedAddr) } } } peerConn := newPeerConn( cfg.outbound, persistent, c, socketAddr, ) p := newPeer( peerConn, mt.mConfig, ni, cfg.reactorsByCh, cfg.chDescs, cfg.onPeerError, PeerMetrics(cfg.metrics), ) return p } func handshake( c net.Conn, timeout time.Duration, nodeInfo NodeInfo, ) (NodeInfo, error) { if err := c.SetDeadline(time.Now().Add(timeout)); err != nil { return nil, err } var ( errc = make(chan error, 2) peerNodeInfo DefaultNodeInfo ourNodeInfo = nodeInfo.(DefaultNodeInfo) ) go func(errc chan<- error, c net.Conn) { _, err := cdc.MarshalBinaryLengthPrefixedWriter(c, ourNodeInfo) errc <- err }(errc, c) go func(errc chan<- error, c net.Conn) { _, err := cdc.UnmarshalBinaryLengthPrefixedReader( c, &peerNodeInfo, int64(MaxNodeInfoSize()), ) errc <- err }(errc, c) for i := 0; i < cap(errc); i++ { err := <-errc if err != nil { return nil, err } } return peerNodeInfo, c.SetDeadline(time.Time{}) } func upgradeSecretConn( c net.Conn, timeout time.Duration, privKey crypto.PrivKey, ) (*conn.SecretConnection, error) { if err := c.SetDeadline(time.Now().Add(timeout)); err != nil { return nil, err } sc, err := conn.MakeSecretConnection(c, privKey) if err != nil { return nil, err } return sc, sc.SetDeadline(time.Time{}) } func resolveIPs(resolver IPResolver, c net.Conn) ([]net.IP, error) { host, _, err := net.SplitHostPort(c.RemoteAddr().String()) if err != nil { return nil, err } addrs, err := resolver.LookupIPAddr(context.Background(), host) if err != nil { return nil, err } ips := []net.IP{} for _, addr := range addrs { ips = append(ips, addr.IP) } return ips, nil }
null
231
CWE-787
CVE-2020-6016
//====== Copyright Valve Corporation, All rights reserved. ==================== #pragma once #include "../steamnetworkingsockets_internal.h" #include <vector> #include <map> #include <set> // Set paranoia level, if not already set: // 0 = disabled // 1 = sometimes // 2 = max #ifndef STEAMNETWORKINGSOCKETS_SNP_PARANOIA #ifdef _DEBUG #define STEAMNETWORKINGSOCKETS_SNP_PARANOIA 2 #else #define STEAMNETWORKINGSOCKETS_SNP_PARANOIA 0 #endif #endif #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 0 #if defined(__GNUC__ ) && defined( __linux__ ) && !defined( __ANDROID__ ) #include <debug/map> // FIXME use debug versions template< typename K, typename V, typename L = std::less<K> > using std_map = __gnu_debug::map<K,V,L>; template <typename K, typename V, typename L> inline int len( const std_map<K,V,L> &map ) { return (int)map.size(); } #else template< typename K, typename V, typename L = std::less<K> > using std_map = std::map<K,V,L>; #endif #else template< typename K, typename V, typename L = std::less<K> > using std_map = std::map<K,V,L>; #endif struct P2PSessionState_t; namespace SteamNetworkingSocketsLib { // Acks may be delayed. This controls the precision used on the wire to encode the delay time. constexpr int k_nAckDelayPrecisionShift = 5; constexpr SteamNetworkingMicroseconds k_usecAckDelayPrecision = (1 << k_nAckDelayPrecisionShift ); // When a receiver detects a dropped packet, wait a bit before NACKing it, to give it time // to arrive out of order. This is really important for many different types of connections // that send on different channels, e.g. DSL, Wifi. // Here we really could be smarter, by tracking how often dropped // packets really do arrive out of order. If the rate is low, then it's // probably best to go ahead and send a NACK now, rather than waiting. // But if dropped packets do often arrive out of order, then waiting // to NACK will probably save some retransmits. In fact, instead // of learning the rate, we should probably try to learn the delay. // E.g. a probability distribution P(t), which describes the odds // that a dropped packet will have arrived at time t. Then you // adjust the NACK delay such that P(nack_delay) gives the best // balance between false positive and false negative rates. constexpr SteamNetworkingMicroseconds k_usecNackFlush = 3*1000; // Max size of a message that we are wiling to *receive*. constexpr int k_cbMaxMessageSizeRecv = k_cbMaxSteamNetworkingSocketsMessageSizeSend*2; // The max we will look ahead and allocate data, ahead of the reliable // messages we have been able to decode. We limit this to make sure that // a malicious sender cannot exploit us. constexpr int k_cbMaxBufferedReceiveReliableData = k_cbMaxMessageSizeRecv + 64*1024; constexpr int k_nMaxReliableStreamGaps_Extend = 30; // Discard reliable data past the end of the stream, if it would cause us to get too many gaps constexpr int k_nMaxReliableStreamGaps_Fragment = 20; // Discard reliable data that is filling in the middle of a hole, if it would cause the number of gaps to exceed this number constexpr int k_nMaxPacketGaps = 62; // Don't bother tracking more than N gaps. Instead, we will end up NACKing some packets that we actually did receive. This should not break the protocol, but it protects us from malicious sender // Hang on to at most N unreliable segments. When packets are dropping // and unreliable messages being fragmented, we will accumulate old pieces // of unreliable messages that we retain in hopes that we will get the // missing piece and reassemble the whole message. At a certain point we // must give up and discard them. We use a simple strategy of just limiting // the max total number. In reality large unreliable messages are just a very bad // idea, since the odds of the message dropping increase exponentially with the // number of packets. With 20 packets, even 1% packet loss becomes ~80% message // loss. (Assuming naive fragmentation and reassembly and no forward // error correction.) constexpr int k_nMaxBufferedUnreliableSegments = 20; // If app tries to send a message larger than N bytes unreliably, // complain about it, and automatically convert to reliable. // About 15 segments. constexpr int k_cbMaxUnreliableMsgSize = 15*1100; class CSteamNetworkConnectionBase; class CConnectionTransport; struct SteamNetworkingMessageQueue; /// Actual implementation of SteamNetworkingMessage_t, which is the API /// visible type. Has extra fields needed to put the message into intrusive /// linked lists. class CSteamNetworkingMessage : public SteamNetworkingMessage_t { public: static CSteamNetworkingMessage *New( CSteamNetworkConnectionBase *pParent, uint32 cbSize, int64 nMsgNum, int nFlags, SteamNetworkingMicroseconds usecNow ); static CSteamNetworkingMessage *New( uint32 cbSize ); static void DefaultFreeData( SteamNetworkingMessage_t *pMsg ); /// OK to delay sending this message until this time. Set to zero to explicitly force /// Nagle timer to expire and send now (but this should behave the same as if the /// timer < usecNow). If the timer is cleared, then all messages with lower message numbers /// are also cleared. inline SteamNetworkingMicroseconds SNPSend_UsecNagle() const { return m_usecTimeReceived; } inline void SNPSend_SetUsecNagle( SteamNetworkingMicroseconds x ) { m_usecTimeReceived = x; } /// Offset in reliable stream of the header byte. 0 if we're not reliable. inline int64 SNPSend_ReliableStreamPos() const { return m_nConnUserData; } inline void SNPSend_SetReliableStreamPos( int64 x ) { m_nConnUserData = x; } inline int SNPSend_ReliableStreamSize() const { DbgAssert( m_nFlags & k_nSteamNetworkingSend_Reliable && m_nConnUserData > 0 && m_cbSNPSendReliableHeader > 0 && m_cbSize >= m_cbSNPSendReliableHeader ); return m_cbSize; } inline bool SNPSend_IsReliable() const { if ( m_nFlags & k_nSteamNetworkingSend_Reliable ) { DbgAssert( m_nConnUserData > 0 && m_cbSNPSendReliableHeader > 0 && m_cbSize >= m_cbSNPSendReliableHeader ); return true; } DbgAssert( m_nConnUserData == 0 && m_cbSNPSendReliableHeader == 0 ); return false; } // Reliable stream header int m_cbSNPSendReliableHeader; byte *SNPSend_ReliableHeader() { // !KLUDGE! Reuse the peer identity to hold the reliable header return (byte*)&m_identityPeer; } /// Remove it from queues void Unlink(); struct Links { SteamNetworkingMessageQueue *m_pQueue; CSteamNetworkingMessage *m_pPrev; CSteamNetworkingMessage *m_pNext; inline void Clear() { m_pQueue = nullptr; m_pPrev = nullptr; m_pNext = nullptr; } }; /// Intrusive links for the "primary" list we are in Links m_links; /// Intrusive links for any secondary list we may be in. (Same listen socket or /// P2P channel, depending on message type) Links m_linksSecondaryQueue; void LinkBefore( CSteamNetworkingMessage *pSuccessor, Links CSteamNetworkingMessage::*pMbrLinks, SteamNetworkingMessageQueue *pQueue ); void LinkToQueueTail( Links CSteamNetworkingMessage::*pMbrLinks, SteamNetworkingMessageQueue *pQueue ); void UnlinkFromQueue( Links CSteamNetworkingMessage::*pMbrLinks ); private: // Use New and Release()!! inline CSteamNetworkingMessage() {} inline ~CSteamNetworkingMessage() {} static void ReleaseFunc( SteamNetworkingMessage_t *pIMsg ); }; /// A doubly-linked list of CSteamNetworkingMessage struct SteamNetworkingMessageQueue { CSteamNetworkingMessage *m_pFirst = nullptr; CSteamNetworkingMessage *m_pLast = nullptr; inline bool empty() const { if ( m_pFirst ) { Assert( m_pLast ); return false; } Assert( !m_pLast ); return true; } /// Remove the first messages out of the queue (up to nMaxMessages). Returns the number returned int RemoveMessages( SteamNetworkingMessage_t **ppOutMessages, int nMaxMessages ); /// Delete all queued messages void PurgeMessages(); }; /// Maximum number of packets we will send in one Think() call. const int k_nMaxPacketsPerThink = 16; /// Max number of tokens we are allowed to store up in reserve, for a burst. const float k_flSendRateBurstOverageAllowance = k_cbSteamNetworkingSocketsMaxEncryptedPayloadSend; struct SNPRange_t { /// Byte or sequence number range int64 m_nBegin; int64 m_nEnd; // STL-style. It's one past the end inline int64 length() const { // In general, allow zero-length ranges, but not negative ones Assert( m_nEnd >= m_nBegin ); return m_nEnd - m_nBegin; } /// Strict comparison function. This is used in situations where /// ranges must not overlap, AND we also never search for /// a range that might overlap. struct NonOverlappingLess { inline bool operator ()(const SNPRange_t &l, const SNPRange_t &r ) const { if ( l.m_nBegin < r.m_nBegin ) return true; AssertMsg( l.m_nBegin > r.m_nBegin || l.m_nEnd == r.m_nEnd, "Ranges should not overlap in this map!" ); return false; } }; }; /// A packet that has been sent but we don't yet know if was received /// or dropped. These are kept in an ordered map keyed by packet number. /// (Hence the packet number not being a member) When we receive an ACK, /// we remove packets from this list. struct SNPInFlightPacket_t { // // FIXME - Could definitely pack this structure better. And maybe // worth it to optimize cache // /// Local timestamp when we sent it SteamNetworkingMicroseconds m_usecWhenSent; /// Did we get an ack block from peer that explicitly marked this /// packet as being skipped? Note that we might subsequently get an /// an ack for this same packet, that's OK! bool m_bNack; /// Transport used to send CConnectionTransport *m_pTransport; /// List of reliable segments. Ignoring retransmission, /// there really is no reason why we we would need to have /// more than 1 in a packet, even if there are multiple /// reliable messages. If we need to retry, we might /// be fragmented. But usually it will only be a few. vstd::small_vector<SNPRange_t,1> m_vecReliableSegments; }; struct SSNPSendMessageList : public SteamNetworkingMessageQueue { /// Unlink the message at the head, if any and return it. /// Unlike STL pop_front, this will return nullptr if the /// list is empty CSteamNetworkingMessage *pop_front() { CSteamNetworkingMessage *pResult = m_pFirst; if ( pResult ) { Assert( m_pLast ); Assert( pResult->m_links.m_pQueue == this ); Assert( pResult->m_links.m_pPrev == nullptr ); m_pFirst = pResult->m_links.m_pNext; if ( m_pFirst ) { Assert( m_pFirst->m_links.m_pPrev == pResult ); Assert( m_pFirst->m_nMessageNumber > pResult->m_nMessageNumber ); m_pFirst->m_links.m_pPrev = nullptr; } else { Assert( m_pLast == pResult ); m_pLast = nullptr; } pResult->m_links.m_pQueue = nullptr; pResult->m_links.m_pNext = nullptr; } return pResult; } /// Optimized insertion when we know it goes at the end void push_back( CSteamNetworkingMessage *pMsg ) { if ( m_pFirst == nullptr ) { Assert( m_pLast == nullptr ); m_pFirst = pMsg; } else { // Messages are always kept in message number order Assert( pMsg->m_nMessageNumber > m_pLast->m_nMessageNumber ); Assert( m_pLast->m_links.m_pNext == nullptr ); m_pLast->m_links.m_pNext = pMsg; } pMsg->m_links.m_pQueue = this; pMsg->m_links.m_pNext = nullptr; pMsg->m_links.m_pPrev = m_pLast; m_pLast = pMsg; } }; struct SSNPSenderState { SSNPSenderState(); ~SSNPSenderState() { Shutdown(); } void Shutdown(); /// Current sending rate in bytes per second, RFC 3448 4.2 states default /// is one packet per second, but that is insane and we're not doing that. /// In most cases we will set a default based on initial ping, so this is /// only rarely used. int m_n_x = 32*1024; /// If >=0, then we can send a full packet right now. We allow ourselves to "store up" /// about 1 packet worth of "reserve". In other words, if we have not sent any packets /// for a while, basically we allow ourselves to send two packets in rapid succession, /// thus "bursting" over the limit by 1 packet. That long term rate will be clamped by /// the send rate. /// /// If <0, then we are currently "over" our rate limit and need to wait before we can /// send a packet. /// /// Provision for accumulating "credits" and burst allowance, to account for lossy /// kernel scheduler, etc is mentioned in RFC 5348, section 4.6. float m_flTokenBucket = 0; /// Last time that we added tokens to m_flTokenBucket SteamNetworkingMicroseconds m_usecTokenBucketTime = 0; void TokenBucket_Init( SteamNetworkingMicroseconds usecNow ) { m_usecTokenBucketTime = usecNow; m_flTokenBucket = k_flSendRateBurstOverageAllowance; } /// Limit our token bucket to the max reserve amount void TokenBucket_Limit() { if ( m_flTokenBucket > k_flSendRateBurstOverageAllowance ) m_flTokenBucket = k_flSendRateBurstOverageAllowance; } /// Calculate time until we could send our next packet, checking our token /// bucket and the current send rate SteamNetworkingMicroseconds CalcTimeUntilNextSend() const { // Do we have tokens to burn right now? if ( m_flTokenBucket >= 0.0f ) return 0; return SteamNetworkingMicroseconds( m_flTokenBucket * -1e6f / (float)m_n_x ) + 1; // +1 to make sure that if we don't have any tokens, we never return 0, since zero means "ready right now" } /// Nagle timer on all pending messages void ClearNagleTimers() { CSteamNetworkingMessage *pMsg = m_messagesQueued.m_pLast; while ( pMsg && pMsg->SNPSend_UsecNagle() ) { pMsg->SNPSend_SetUsecNagle( 0 ); pMsg = pMsg->m_links.m_pPrev; } } // Current message number, we ++ when adding a message int64 m_nReliableStreamPos = 1; int64 m_nLastSentMsgNum = 0; // Will increment to 1 with first message int64 m_nLastSendMsgNumReliable = 0; /// List of messages that we have not yet finished putting on the wire the first time. /// The Nagle timer may be active on one or more, but if so, it is only on messages /// at the END of the list. The first message may be partially sent. SSNPSendMessageList m_messagesQueued; /// How many bytes into the first message in the queue have we put on the wire? int m_cbCurrentSendMessageSent = 0; /// List of reliable messages that have been fully placed on the wire at least once, /// but we're hanging onto because of the potential need to retry. (Note that if we get /// packet loss, it's possible that we hang onto a message even after it's been fully /// acked, because a prior message is still needed. We always operate on this list /// like a queue, rather than seeking into the middle of the list and removing messages /// as soon as they are no longer needed.) SSNPSendMessageList m_unackedReliableMessages; // Buffered data counters. See SteamNetworkingQuickConnectionStatus for more info int m_cbPendingUnreliable = 0; int m_cbPendingReliable = 0; int m_cbSentUnackedReliable = 0; inline int PendingBytesTotal() const { return m_cbPendingUnreliable + m_cbPendingReliable; } // Stats. FIXME - move to LinkStatsEndToEnd and track rate counters int64 m_nMessagesSentReliable = 0; int64 m_nMessagesSentUnreliable = 0; /// List of packets that we have sent but don't know whether they were received or not. /// We keep a dummy sentinel at the head of the list, with a negative packet number. /// This vastly simplifies the processing. std_map<int64,SNPInFlightPacket_t> m_mapInFlightPacketsByPktNum; /// The next unacked packet that should be timed out and implicitly NACKed, /// if we don't receive an ACK in time. Will be m_mapInFlightPacketsByPktNum.end() /// if we don't have any in flight packets that we are waiting on. std_map<int64,SNPInFlightPacket_t>::iterator m_itNextInFlightPacketToTimeout; /// Ordered list of reliable ranges that we have recently sent /// in a packet. These should be non-overlapping, and furthermore /// should not overlap with with any range in m_listReadyReliableRange /// /// The "value" portion of the map is the message that has the first bit of /// reliable data we need for this message std_map<SNPRange_t,CSteamNetworkingMessage*,SNPRange_t::NonOverlappingLess> m_listInFlightReliableRange; /// Ordered list of ranges that have been put on the wire, /// but have been detected as dropped, and now need to be retried. std_map<SNPRange_t,CSteamNetworkingMessage*,SNPRange_t::NonOverlappingLess> m_listReadyRetryReliableRange; /// Oldest packet sequence number that we are still asking peer /// to send acks for. int64 m_nMinPktWaitingOnAck = 0; // Remove messages from m_unackedReliableMessages that have been fully acked. void RemoveAckedReliableMessageFromUnackedList(); /// Check invariants in debug. #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA == 0 inline void DebugCheckInFlightPacketMap() const {} #else void DebugCheckInFlightPacketMap() const; #endif #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 1 inline void MaybeCheckInFlightPacketMap() const { DebugCheckInFlightPacketMap(); } #else inline void MaybeCheckInFlightPacketMap() const {} #endif }; struct SSNPRecvUnreliableSegmentKey { int64 m_nMsgNum; int m_nOffset; inline bool operator<(const SSNPRecvUnreliableSegmentKey &x) const { if ( m_nMsgNum < x.m_nMsgNum ) return true; if ( m_nMsgNum > x.m_nMsgNum ) return false; return m_nOffset < x.m_nOffset; } }; struct SSNPRecvUnreliableSegmentData { int m_cbSegSize = -1; bool m_bLast = false; char m_buf[ k_cbSteamNetworkingSocketsMaxPlaintextPayloadRecv ]; }; struct SSNPPacketGap { int64 m_nEnd; // just after the last packet received SteamNetworkingMicroseconds m_usecWhenReceivedPktBefore; // So we can send RTT data in our acks SteamNetworkingMicroseconds m_usecWhenAckPrior; // We need to send an ack for everything with lower packet numbers than this gap by this time. (Earlier is OK.) SteamNetworkingMicroseconds m_usecWhenOKToNack; // Don't give up on the gap being filed before this time }; struct SSNPReceiverState { SSNPReceiverState(); ~SSNPReceiverState() { Shutdown(); } void Shutdown(); /// Unreliable message segments that we have received. When an unreliable message /// needs to be fragmented, we store the pieces here. NOTE: it might be more efficient /// to use a simpler container, with worse O(), since this should ordinarily be /// a pretty small list. std_map<SSNPRecvUnreliableSegmentKey,SSNPRecvUnreliableSegmentData> m_mapUnreliableSegments; /// Stream position of the first byte in m_bufReliableData. Remember that the first byte /// in the reliable stream is actually at position 1, not 0 int64 m_nReliableStreamPos = 1; /// The highest message number we have seen so far. int64 m_nHighestSeenMsgNum = 0; /// The message number of the most recently received reliable message int64 m_nLastRecvReliableMsgNum = 0; /// Reliable data stream that we have received. This might have gaps in it! std::vector<byte> m_bufReliableStream; /// Gaps in the reliable data. These are created when we receive reliable data that /// is beyond what we expect next. Since these must never overlap, we store them /// using begin as the key and end as the value. /// /// !SPEED! We should probably use a small fixed-sized, sorted vector here, /// since in most cases the list will be small, and the cost of dynamic memory /// allocation will be way worse than O(n) insertion/removal. std_map<int64,int64> m_mapReliableStreamGaps; /// List of gaps in the packet sequence numbers we have received. /// Since these must never overlap, we store them using begin as the /// key and the end in the value. /// /// The last item in the list is a sentinel with /// begin and end set to INT64_MAX, and m_usecWhenAckPrior is /// the time when we need to flush acks/backs for all packets, /// including those received after the last gap (if any -- /// INT64_MAX means nothing scheduled). Remember, our wire /// protocol cannot report on packet N without also reporting /// on all packets numbered < N. /// /// !SPEED! We should probably use a small fixed-sized, sorted vector here, /// since in most cases the list will be small, and the cost of dynamic memory /// allocation will be way worse than O(n) insertion/removal. std_map<int64,SSNPPacketGap> m_mapPacketGaps; /// Oldest packet sequence number we need to ack to our peer int64 m_nMinPktNumToSendAcks = 0; /// Packet number when we received the value of m_nMinPktNumToSendAcks int64 m_nPktNumUpdatedMinPktNumToSendAcks = 0; /// The next ack that needs to be sent. The invariant /// for the times are: /// /// * Blocks with lower packet numbers: m_usecWhenAckPrior = INT64_MAX /// * This block: m_usecWhenAckPrior < INT64_MAX, or we are the sentinel /// * Blocks with higher packet numbers (if we are not the sentinel): m_usecWhenAckPrior >= previous m_usecWhenAckPrior /// /// We might send acks before they are due, rather than /// waiting until the last moment! If we are going to /// send a packet at all, we usually try to send at least /// a few acks, and if there is room in the packet, as /// many as will fit. The one exception is that if /// sending an ack would imply a NACK that we don't want to /// send yet. (Remember the restrictions on what we are able /// to communicate due to the tight RLE encoding of the wire /// format.) These delays are usually very short lived, and /// only happen when there is packet loss, so they don't delay /// acks very much. The whole purpose of this rather involved /// bookkeeping is to figure out which acks we *need* to send, /// and which acks we cannot send yet, so we can make optimal /// decisions. std_map<int64,SSNPPacketGap>::iterator m_itPendingAck; /// Iterator into m_mapPacketGaps. If != the sentinel, /// we will avoid reporting on the dropped packets in this /// gap (and all higher numbered packets), because we are /// waiting in the hopes that they will arrive out of order. std_map<int64,SSNPPacketGap>::iterator m_itPendingNack; /// Queue a flush of ALL acks (and NACKs!) by the given time. /// If anything is scheduled to happen earlier, that schedule /// will still be honered. We will ack up to that packet number, /// and then we we may report higher numbered blocks, or we may /// stop and wait to report more acks until later. void QueueFlushAllAcks( SteamNetworkingMicroseconds usecWhen ); /// Return the time when we need to flush out acks, or INT64_MAX /// if we don't have any acks pending right now. inline SteamNetworkingMicroseconds TimeWhenFlushAcks() const { // Paranoia if ( m_mapPacketGaps.empty() ) { AssertMsg( false, "TimeWhenFlushAcks - we're shut down!" ); return INT64_MAX; } return m_itPendingAck->second.m_usecWhenAckPrior; } /// Check invariants in debug. #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 1 void DebugCheckPackGapMap() const; #else inline void DebugCheckPackGapMap() const {} #endif // Stats. FIXME - move to LinkStatsEndToEnd and track rate counters int64 m_nMessagesRecvReliable = 0; int64 m_nMessagesRecvUnreliable = 0; }; } // SteamNetworkingSocketsLib
null
//====== Copyright Valve Corporation, All rights reserved. ==================== #pragma once #include "../steamnetworkingsockets_internal.h" #include <vector> #include <map> #include <set> // Set paranoia level, if not already set: // 0 = disabled // 1 = sometimes // 2 = max #ifndef STEAMNETWORKINGSOCKETS_SNP_PARANOIA #ifdef _DEBUG #define STEAMNETWORKINGSOCKETS_SNP_PARANOIA 2 #else #define STEAMNETWORKINGSOCKETS_SNP_PARANOIA 0 #endif #endif #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 0 #if defined(__GNUC__ ) && defined( __linux__ ) && !defined( __ANDROID__ ) #include <debug/map> // FIXME use debug versions template< typename K, typename V, typename L = std::less<K> > using std_map = __gnu_debug::map<K,V,L>; template <typename K, typename V, typename L> inline int len( const std_map<K,V,L> &map ) { return (int)map.size(); } #else template< typename K, typename V, typename L = std::less<K> > using std_map = std::map<K,V,L>; #endif #else template< typename K, typename V, typename L = std::less<K> > using std_map = std::map<K,V,L>; #endif struct P2PSessionState_t; namespace SteamNetworkingSocketsLib { // Acks may be delayed. This controls the precision used on the wire to encode the delay time. constexpr int k_nAckDelayPrecisionShift = 5; constexpr SteamNetworkingMicroseconds k_usecAckDelayPrecision = (1 << k_nAckDelayPrecisionShift ); // When a receiver detects a dropped packet, wait a bit before NACKing it, to give it time // to arrive out of order. This is really important for many different types of connections // that send on different channels, e.g. DSL, Wifi. // Here we really could be smarter, by tracking how often dropped // packets really do arrive out of order. If the rate is low, then it's // probably best to go ahead and send a NACK now, rather than waiting. // But if dropped packets do often arrive out of order, then waiting // to NACK will probably save some retransmits. In fact, instead // of learning the rate, we should probably try to learn the delay. // E.g. a probability distribution P(t), which describes the odds // that a dropped packet will have arrived at time t. Then you // adjust the NACK delay such that P(nack_delay) gives the best // balance between false positive and false negative rates. constexpr SteamNetworkingMicroseconds k_usecNackFlush = 3*1000; // Max size of a message that we are wiling to *receive*. constexpr int k_cbMaxMessageSizeRecv = k_cbMaxSteamNetworkingSocketsMessageSizeSend*2; // The max we will look ahead and allocate data, ahead of the reliable // messages we have been able to decode. We limit this to make sure that // a malicious sender cannot exploit us. constexpr int k_cbMaxBufferedReceiveReliableData = k_cbMaxMessageSizeRecv + 64*1024; constexpr int k_nMaxReliableStreamGaps_Extend = 30; // Discard reliable data past the end of the stream, if it would cause us to get too many gaps constexpr int k_nMaxReliableStreamGaps_Fragment = 20; // Discard reliable data that is filling in the middle of a hole, if it would cause the number of gaps to exceed this number constexpr int k_nMaxPacketGaps = 62; // Don't bother tracking more than N gaps. Instead, we will end up NACKing some packets that we actually did receive. This should not break the protocol, but it protects us from malicious sender // Hang on to at most N unreliable segments. When packets are dropping // and unreliable messages being fragmented, we will accumulate old pieces // of unreliable messages that we retain in hopes that we will get the // missing piece and reassemble the whole message. At a certain point we // must give up and discard them. We use a simple strategy of just limiting // the max total number. In reality large unreliable messages are just a very bad // idea, since the odds of the message dropping increase exponentially with the // number of packets. With 20 packets, even 1% packet loss becomes ~80% message // loss. (Assuming naive fragmentation and reassembly and no forward // error correction.) constexpr int k_nMaxBufferedUnreliableSegments = 20; // If app tries to send a message larger than N bytes unreliably, // complain about it, and automatically convert to reliable. // About 15 segments. constexpr int k_cbMaxUnreliableMsgSizeSend = 15*1100; // Max possible size of an unreliable segment we could receive. constexpr int k_cbMaxUnreliableSegmentSizeRecv = k_cbSteamNetworkingSocketsMaxPlaintextPayloadRecv; // Largest possible total unreliable message we can receive, based on the constraints above constexpr int k_cbMaxUnreliableMsgSizeRecv = k_nMaxBufferedUnreliableSegments*k_cbMaxUnreliableSegmentSizeRecv; COMPILE_TIME_ASSERT( k_cbMaxUnreliableMsgSizeRecv > k_cbMaxUnreliableMsgSizeSend + 4096 ); // Postel's law; confirm how much slack we have here class CSteamNetworkConnectionBase; class CConnectionTransport; struct SteamNetworkingMessageQueue; /// Actual implementation of SteamNetworkingMessage_t, which is the API /// visible type. Has extra fields needed to put the message into intrusive /// linked lists. class CSteamNetworkingMessage : public SteamNetworkingMessage_t { public: static CSteamNetworkingMessage *New( CSteamNetworkConnectionBase *pParent, uint32 cbSize, int64 nMsgNum, int nFlags, SteamNetworkingMicroseconds usecNow ); static CSteamNetworkingMessage *New( uint32 cbSize ); static void DefaultFreeData( SteamNetworkingMessage_t *pMsg ); /// OK to delay sending this message until this time. Set to zero to explicitly force /// Nagle timer to expire and send now (but this should behave the same as if the /// timer < usecNow). If the timer is cleared, then all messages with lower message numbers /// are also cleared. inline SteamNetworkingMicroseconds SNPSend_UsecNagle() const { return m_usecTimeReceived; } inline void SNPSend_SetUsecNagle( SteamNetworkingMicroseconds x ) { m_usecTimeReceived = x; } /// Offset in reliable stream of the header byte. 0 if we're not reliable. inline int64 SNPSend_ReliableStreamPos() const { return m_nConnUserData; } inline void SNPSend_SetReliableStreamPos( int64 x ) { m_nConnUserData = x; } inline int SNPSend_ReliableStreamSize() const { DbgAssert( m_nFlags & k_nSteamNetworkingSend_Reliable && m_nConnUserData > 0 && m_cbSNPSendReliableHeader > 0 && m_cbSize >= m_cbSNPSendReliableHeader ); return m_cbSize; } inline bool SNPSend_IsReliable() const { if ( m_nFlags & k_nSteamNetworkingSend_Reliable ) { DbgAssert( m_nConnUserData > 0 && m_cbSNPSendReliableHeader > 0 && m_cbSize >= m_cbSNPSendReliableHeader ); return true; } DbgAssert( m_nConnUserData == 0 && m_cbSNPSendReliableHeader == 0 ); return false; } // Reliable stream header int m_cbSNPSendReliableHeader; byte *SNPSend_ReliableHeader() { // !KLUDGE! Reuse the peer identity to hold the reliable header return (byte*)&m_identityPeer; } /// Remove it from queues void Unlink(); struct Links { SteamNetworkingMessageQueue *m_pQueue; CSteamNetworkingMessage *m_pPrev; CSteamNetworkingMessage *m_pNext; inline void Clear() { m_pQueue = nullptr; m_pPrev = nullptr; m_pNext = nullptr; } }; /// Intrusive links for the "primary" list we are in Links m_links; /// Intrusive links for any secondary list we may be in. (Same listen socket or /// P2P channel, depending on message type) Links m_linksSecondaryQueue; void LinkBefore( CSteamNetworkingMessage *pSuccessor, Links CSteamNetworkingMessage::*pMbrLinks, SteamNetworkingMessageQueue *pQueue ); void LinkToQueueTail( Links CSteamNetworkingMessage::*pMbrLinks, SteamNetworkingMessageQueue *pQueue ); void UnlinkFromQueue( Links CSteamNetworkingMessage::*pMbrLinks ); private: // Use New and Release()!! inline CSteamNetworkingMessage() {} inline ~CSteamNetworkingMessage() {} static void ReleaseFunc( SteamNetworkingMessage_t *pIMsg ); }; /// A doubly-linked list of CSteamNetworkingMessage struct SteamNetworkingMessageQueue { CSteamNetworkingMessage *m_pFirst = nullptr; CSteamNetworkingMessage *m_pLast = nullptr; inline bool empty() const { if ( m_pFirst ) { Assert( m_pLast ); return false; } Assert( !m_pLast ); return true; } /// Remove the first messages out of the queue (up to nMaxMessages). Returns the number returned int RemoveMessages( SteamNetworkingMessage_t **ppOutMessages, int nMaxMessages ); /// Delete all queued messages void PurgeMessages(); }; /// Maximum number of packets we will send in one Think() call. const int k_nMaxPacketsPerThink = 16; /// Max number of tokens we are allowed to store up in reserve, for a burst. const float k_flSendRateBurstOverageAllowance = k_cbSteamNetworkingSocketsMaxEncryptedPayloadSend; struct SNPRange_t { /// Byte or sequence number range int64 m_nBegin; int64 m_nEnd; // STL-style. It's one past the end inline int64 length() const { // In general, allow zero-length ranges, but not negative ones Assert( m_nEnd >= m_nBegin ); return m_nEnd - m_nBegin; } /// Strict comparison function. This is used in situations where /// ranges must not overlap, AND we also never search for /// a range that might overlap. struct NonOverlappingLess { inline bool operator ()(const SNPRange_t &l, const SNPRange_t &r ) const { if ( l.m_nBegin < r.m_nBegin ) return true; AssertMsg( l.m_nBegin > r.m_nBegin || l.m_nEnd == r.m_nEnd, "Ranges should not overlap in this map!" ); return false; } }; }; /// A packet that has been sent but we don't yet know if was received /// or dropped. These are kept in an ordered map keyed by packet number. /// (Hence the packet number not being a member) When we receive an ACK, /// we remove packets from this list. struct SNPInFlightPacket_t { // // FIXME - Could definitely pack this structure better. And maybe // worth it to optimize cache // /// Local timestamp when we sent it SteamNetworkingMicroseconds m_usecWhenSent; /// Did we get an ack block from peer that explicitly marked this /// packet as being skipped? Note that we might subsequently get an /// an ack for this same packet, that's OK! bool m_bNack; /// Transport used to send CConnectionTransport *m_pTransport; /// List of reliable segments. Ignoring retransmission, /// there really is no reason why we we would need to have /// more than 1 in a packet, even if there are multiple /// reliable messages. If we need to retry, we might /// be fragmented. But usually it will only be a few. vstd::small_vector<SNPRange_t,1> m_vecReliableSegments; }; struct SSNPSendMessageList : public SteamNetworkingMessageQueue { /// Unlink the message at the head, if any and return it. /// Unlike STL pop_front, this will return nullptr if the /// list is empty CSteamNetworkingMessage *pop_front() { CSteamNetworkingMessage *pResult = m_pFirst; if ( pResult ) { Assert( m_pLast ); Assert( pResult->m_links.m_pQueue == this ); Assert( pResult->m_links.m_pPrev == nullptr ); m_pFirst = pResult->m_links.m_pNext; if ( m_pFirst ) { Assert( m_pFirst->m_links.m_pPrev == pResult ); Assert( m_pFirst->m_nMessageNumber > pResult->m_nMessageNumber ); m_pFirst->m_links.m_pPrev = nullptr; } else { Assert( m_pLast == pResult ); m_pLast = nullptr; } pResult->m_links.m_pQueue = nullptr; pResult->m_links.m_pNext = nullptr; } return pResult; } /// Optimized insertion when we know it goes at the end void push_back( CSteamNetworkingMessage *pMsg ) { if ( m_pFirst == nullptr ) { Assert( m_pLast == nullptr ); m_pFirst = pMsg; } else { // Messages are always kept in message number order Assert( pMsg->m_nMessageNumber > m_pLast->m_nMessageNumber ); Assert( m_pLast->m_links.m_pNext == nullptr ); m_pLast->m_links.m_pNext = pMsg; } pMsg->m_links.m_pQueue = this; pMsg->m_links.m_pNext = nullptr; pMsg->m_links.m_pPrev = m_pLast; m_pLast = pMsg; } }; struct SSNPSenderState { SSNPSenderState(); ~SSNPSenderState() { Shutdown(); } void Shutdown(); /// Current sending rate in bytes per second, RFC 3448 4.2 states default /// is one packet per second, but that is insane and we're not doing that. /// In most cases we will set a default based on initial ping, so this is /// only rarely used. int m_n_x = 32*1024; /// If >=0, then we can send a full packet right now. We allow ourselves to "store up" /// about 1 packet worth of "reserve". In other words, if we have not sent any packets /// for a while, basically we allow ourselves to send two packets in rapid succession, /// thus "bursting" over the limit by 1 packet. That long term rate will be clamped by /// the send rate. /// /// If <0, then we are currently "over" our rate limit and need to wait before we can /// send a packet. /// /// Provision for accumulating "credits" and burst allowance, to account for lossy /// kernel scheduler, etc is mentioned in RFC 5348, section 4.6. float m_flTokenBucket = 0; /// Last time that we added tokens to m_flTokenBucket SteamNetworkingMicroseconds m_usecTokenBucketTime = 0; void TokenBucket_Init( SteamNetworkingMicroseconds usecNow ) { m_usecTokenBucketTime = usecNow; m_flTokenBucket = k_flSendRateBurstOverageAllowance; } /// Limit our token bucket to the max reserve amount void TokenBucket_Limit() { if ( m_flTokenBucket > k_flSendRateBurstOverageAllowance ) m_flTokenBucket = k_flSendRateBurstOverageAllowance; } /// Calculate time until we could send our next packet, checking our token /// bucket and the current send rate SteamNetworkingMicroseconds CalcTimeUntilNextSend() const { // Do we have tokens to burn right now? if ( m_flTokenBucket >= 0.0f ) return 0; return SteamNetworkingMicroseconds( m_flTokenBucket * -1e6f / (float)m_n_x ) + 1; // +1 to make sure that if we don't have any tokens, we never return 0, since zero means "ready right now" } /// Nagle timer on all pending messages void ClearNagleTimers() { CSteamNetworkingMessage *pMsg = m_messagesQueued.m_pLast; while ( pMsg && pMsg->SNPSend_UsecNagle() ) { pMsg->SNPSend_SetUsecNagle( 0 ); pMsg = pMsg->m_links.m_pPrev; } } // Current message number, we ++ when adding a message int64 m_nReliableStreamPos = 1; int64 m_nLastSentMsgNum = 0; // Will increment to 1 with first message int64 m_nLastSendMsgNumReliable = 0; /// List of messages that we have not yet finished putting on the wire the first time. /// The Nagle timer may be active on one or more, but if so, it is only on messages /// at the END of the list. The first message may be partially sent. SSNPSendMessageList m_messagesQueued; /// How many bytes into the first message in the queue have we put on the wire? int m_cbCurrentSendMessageSent = 0; /// List of reliable messages that have been fully placed on the wire at least once, /// but we're hanging onto because of the potential need to retry. (Note that if we get /// packet loss, it's possible that we hang onto a message even after it's been fully /// acked, because a prior message is still needed. We always operate on this list /// like a queue, rather than seeking into the middle of the list and removing messages /// as soon as they are no longer needed.) SSNPSendMessageList m_unackedReliableMessages; // Buffered data counters. See SteamNetworkingQuickConnectionStatus for more info int m_cbPendingUnreliable = 0; int m_cbPendingReliable = 0; int m_cbSentUnackedReliable = 0; inline int PendingBytesTotal() const { return m_cbPendingUnreliable + m_cbPendingReliable; } // Stats. FIXME - move to LinkStatsEndToEnd and track rate counters int64 m_nMessagesSentReliable = 0; int64 m_nMessagesSentUnreliable = 0; /// List of packets that we have sent but don't know whether they were received or not. /// We keep a dummy sentinel at the head of the list, with a negative packet number. /// This vastly simplifies the processing. std_map<int64,SNPInFlightPacket_t> m_mapInFlightPacketsByPktNum; /// The next unacked packet that should be timed out and implicitly NACKed, /// if we don't receive an ACK in time. Will be m_mapInFlightPacketsByPktNum.end() /// if we don't have any in flight packets that we are waiting on. std_map<int64,SNPInFlightPacket_t>::iterator m_itNextInFlightPacketToTimeout; /// Ordered list of reliable ranges that we have recently sent /// in a packet. These should be non-overlapping, and furthermore /// should not overlap with with any range in m_listReadyReliableRange /// /// The "value" portion of the map is the message that has the first bit of /// reliable data we need for this message std_map<SNPRange_t,CSteamNetworkingMessage*,SNPRange_t::NonOverlappingLess> m_listInFlightReliableRange; /// Ordered list of ranges that have been put on the wire, /// but have been detected as dropped, and now need to be retried. std_map<SNPRange_t,CSteamNetworkingMessage*,SNPRange_t::NonOverlappingLess> m_listReadyRetryReliableRange; /// Oldest packet sequence number that we are still asking peer /// to send acks for. int64 m_nMinPktWaitingOnAck = 0; // Remove messages from m_unackedReliableMessages that have been fully acked. void RemoveAckedReliableMessageFromUnackedList(); /// Check invariants in debug. #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA == 0 inline void DebugCheckInFlightPacketMap() const {} #else void DebugCheckInFlightPacketMap() const; #endif #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 1 inline void MaybeCheckInFlightPacketMap() const { DebugCheckInFlightPacketMap(); } #else inline void MaybeCheckInFlightPacketMap() const {} #endif }; struct SSNPRecvUnreliableSegmentKey { int64 m_nMsgNum; int m_nOffset; inline bool operator<(const SSNPRecvUnreliableSegmentKey &x) const { if ( m_nMsgNum < x.m_nMsgNum ) return true; if ( m_nMsgNum > x.m_nMsgNum ) return false; return m_nOffset < x.m_nOffset; } }; struct SSNPRecvUnreliableSegmentData { int m_cbSegSize = -1; bool m_bLast = false; char m_buf[ k_cbMaxUnreliableSegmentSizeRecv ]; }; struct SSNPPacketGap { int64 m_nEnd; // just after the last packet received SteamNetworkingMicroseconds m_usecWhenReceivedPktBefore; // So we can send RTT data in our acks SteamNetworkingMicroseconds m_usecWhenAckPrior; // We need to send an ack for everything with lower packet numbers than this gap by this time. (Earlier is OK.) SteamNetworkingMicroseconds m_usecWhenOKToNack; // Don't give up on the gap being filed before this time }; struct SSNPReceiverState { SSNPReceiverState(); ~SSNPReceiverState() { Shutdown(); } void Shutdown(); /// Unreliable message segments that we have received. When an unreliable message /// needs to be fragmented, we store the pieces here. NOTE: it might be more efficient /// to use a simpler container, with worse O(), since this should ordinarily be /// a pretty small list. std_map<SSNPRecvUnreliableSegmentKey,SSNPRecvUnreliableSegmentData> m_mapUnreliableSegments; /// Stream position of the first byte in m_bufReliableData. Remember that the first byte /// in the reliable stream is actually at position 1, not 0 int64 m_nReliableStreamPos = 1; /// The highest message number we have seen so far. int64 m_nHighestSeenMsgNum = 0; /// The message number of the most recently received reliable message int64 m_nLastRecvReliableMsgNum = 0; /// Reliable data stream that we have received. This might have gaps in it! std::vector<byte> m_bufReliableStream; /// Gaps in the reliable data. These are created when we receive reliable data that /// is beyond what we expect next. Since these must never overlap, we store them /// using begin as the key and end as the value. /// /// !SPEED! We should probably use a small fixed-sized, sorted vector here, /// since in most cases the list will be small, and the cost of dynamic memory /// allocation will be way worse than O(n) insertion/removal. std_map<int64,int64> m_mapReliableStreamGaps; /// List of gaps in the packet sequence numbers we have received. /// Since these must never overlap, we store them using begin as the /// key and the end in the value. /// /// The last item in the list is a sentinel with /// begin and end set to INT64_MAX, and m_usecWhenAckPrior is /// the time when we need to flush acks/backs for all packets, /// including those received after the last gap (if any -- /// INT64_MAX means nothing scheduled). Remember, our wire /// protocol cannot report on packet N without also reporting /// on all packets numbered < N. /// /// !SPEED! We should probably use a small fixed-sized, sorted vector here, /// since in most cases the list will be small, and the cost of dynamic memory /// allocation will be way worse than O(n) insertion/removal. std_map<int64,SSNPPacketGap> m_mapPacketGaps; /// Oldest packet sequence number we need to ack to our peer int64 m_nMinPktNumToSendAcks = 0; /// Packet number when we received the value of m_nMinPktNumToSendAcks int64 m_nPktNumUpdatedMinPktNumToSendAcks = 0; /// The next ack that needs to be sent. The invariant /// for the times are: /// /// * Blocks with lower packet numbers: m_usecWhenAckPrior = INT64_MAX /// * This block: m_usecWhenAckPrior < INT64_MAX, or we are the sentinel /// * Blocks with higher packet numbers (if we are not the sentinel): m_usecWhenAckPrior >= previous m_usecWhenAckPrior /// /// We might send acks before they are due, rather than /// waiting until the last moment! If we are going to /// send a packet at all, we usually try to send at least /// a few acks, and if there is room in the packet, as /// many as will fit. The one exception is that if /// sending an ack would imply a NACK that we don't want to /// send yet. (Remember the restrictions on what we are able /// to communicate due to the tight RLE encoding of the wire /// format.) These delays are usually very short lived, and /// only happen when there is packet loss, so they don't delay /// acks very much. The whole purpose of this rather involved /// bookkeeping is to figure out which acks we *need* to send, /// and which acks we cannot send yet, so we can make optimal /// decisions. std_map<int64,SSNPPacketGap>::iterator m_itPendingAck; /// Iterator into m_mapPacketGaps. If != the sentinel, /// we will avoid reporting on the dropped packets in this /// gap (and all higher numbered packets), because we are /// waiting in the hopes that they will arrive out of order. std_map<int64,SSNPPacketGap>::iterator m_itPendingNack; /// Queue a flush of ALL acks (and NACKs!) by the given time. /// If anything is scheduled to happen earlier, that schedule /// will still be honered. We will ack up to that packet number, /// and then we we may report higher numbered blocks, or we may /// stop and wait to report more acks until later. void QueueFlushAllAcks( SteamNetworkingMicroseconds usecWhen ); /// Return the time when we need to flush out acks, or INT64_MAX /// if we don't have any acks pending right now. inline SteamNetworkingMicroseconds TimeWhenFlushAcks() const { // Paranoia if ( m_mapPacketGaps.empty() ) { AssertMsg( false, "TimeWhenFlushAcks - we're shut down!" ); return INT64_MAX; } return m_itPendingAck->second.m_usecWhenAckPrior; } /// Check invariants in debug. #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 1 void DebugCheckPackGapMap() const; #else inline void DebugCheckPackGapMap() const {} #endif // Stats. FIXME - move to LinkStatsEndToEnd and track rate counters int64 m_nMessagesRecvReliable = 0; int64 m_nMessagesRecvUnreliable = 0; }; } // SteamNetworkingSocketsLib
null
232
CWE-787
CVE-2020-6017
//====== Copyright Valve Corporation, All rights reserved. ==================== #pragma once #include "../steamnetworkingsockets_internal.h" #include <vector> #include <map> #include <set> // Set paranoia level, if not already set: // 0 = disabled // 1 = sometimes // 2 = max #ifndef STEAMNETWORKINGSOCKETS_SNP_PARANOIA #ifdef _DEBUG #define STEAMNETWORKINGSOCKETS_SNP_PARANOIA 2 #else #define STEAMNETWORKINGSOCKETS_SNP_PARANOIA 0 #endif #endif #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 0 #if defined(__GNUC__ ) && defined( __linux__ ) && !defined( __ANDROID__ ) #include <debug/map> // FIXME use debug versions template< typename K, typename V, typename L = std::less<K> > using std_map = __gnu_debug::map<K,V,L>; template <typename K, typename V, typename L> inline int len( const std_map<K,V,L> &map ) { return (int)map.size(); } #else template< typename K, typename V, typename L = std::less<K> > using std_map = std::map<K,V,L>; #endif #else template< typename K, typename V, typename L = std::less<K> > using std_map = std::map<K,V,L>; #endif struct P2PSessionState_t; namespace SteamNetworkingSocketsLib { // Acks may be delayed. This controls the precision used on the wire to encode the delay time. constexpr int k_nAckDelayPrecisionShift = 5; constexpr SteamNetworkingMicroseconds k_usecAckDelayPrecision = (1 << k_nAckDelayPrecisionShift ); // When a receiver detects a dropped packet, wait a bit before NACKing it, to give it time // to arrive out of order. This is really important for many different types of connections // that send on different channels, e.g. DSL, Wifi. // Here we really could be smarter, by tracking how often dropped // packets really do arrive out of order. If the rate is low, then it's // probably best to go ahead and send a NACK now, rather than waiting. // But if dropped packets do often arrive out of order, then waiting // to NACK will probably save some retransmits. In fact, instead // of learning the rate, we should probably try to learn the delay. // E.g. a probability distribution P(t), which describes the odds // that a dropped packet will have arrived at time t. Then you // adjust the NACK delay such that P(nack_delay) gives the best // balance between false positive and false negative rates. constexpr SteamNetworkingMicroseconds k_usecNackFlush = 3*1000; // Max size of a message that we are wiling to *receive*. constexpr int k_cbMaxMessageSizeRecv = k_cbMaxSteamNetworkingSocketsMessageSizeSend*2; // The max we will look ahead and allocate data, ahead of the reliable // messages we have been able to decode. We limit this to make sure that // a malicious sender cannot exploit us. constexpr int k_cbMaxBufferedReceiveReliableData = k_cbMaxMessageSizeRecv + 64*1024; constexpr int k_nMaxReliableStreamGaps_Extend = 30; // Discard reliable data past the end of the stream, if it would cause us to get too many gaps constexpr int k_nMaxReliableStreamGaps_Fragment = 20; // Discard reliable data that is filling in the middle of a hole, if it would cause the number of gaps to exceed this number constexpr int k_nMaxPacketGaps = 62; // Don't bother tracking more than N gaps. Instead, we will end up NACKing some packets that we actually did receive. This should not break the protocol, but it protects us from malicious sender // Hang on to at most N unreliable segments. When packets are dropping // and unreliable messages being fragmented, we will accumulate old pieces // of unreliable messages that we retain in hopes that we will get the // missing piece and reassemble the whole message. At a certain point we // must give up and discard them. We use a simple strategy of just limiting // the max total number. In reality large unreliable messages are just a very bad // idea, since the odds of the message dropping increase exponentially with the // number of packets. With 20 packets, even 1% packet loss becomes ~80% message // loss. (Assuming naive fragmentation and reassembly and no forward // error correction.) constexpr int k_nMaxBufferedUnreliableSegments = 20; // If app tries to send a message larger than N bytes unreliably, // complain about it, and automatically convert to reliable. // About 15 segments. constexpr int k_cbMaxUnreliableMsgSize = 15*1100; class CSteamNetworkConnectionBase; class CConnectionTransport; struct SteamNetworkingMessageQueue; /// Actual implementation of SteamNetworkingMessage_t, which is the API /// visible type. Has extra fields needed to put the message into intrusive /// linked lists. class CSteamNetworkingMessage : public SteamNetworkingMessage_t { public: static CSteamNetworkingMessage *New( CSteamNetworkConnectionBase *pParent, uint32 cbSize, int64 nMsgNum, int nFlags, SteamNetworkingMicroseconds usecNow ); static CSteamNetworkingMessage *New( uint32 cbSize ); static void DefaultFreeData( SteamNetworkingMessage_t *pMsg ); /// OK to delay sending this message until this time. Set to zero to explicitly force /// Nagle timer to expire and send now (but this should behave the same as if the /// timer < usecNow). If the timer is cleared, then all messages with lower message numbers /// are also cleared. inline SteamNetworkingMicroseconds SNPSend_UsecNagle() const { return m_usecTimeReceived; } inline void SNPSend_SetUsecNagle( SteamNetworkingMicroseconds x ) { m_usecTimeReceived = x; } /// Offset in reliable stream of the header byte. 0 if we're not reliable. inline int64 SNPSend_ReliableStreamPos() const { return m_nConnUserData; } inline void SNPSend_SetReliableStreamPos( int64 x ) { m_nConnUserData = x; } inline int SNPSend_ReliableStreamSize() const { DbgAssert( m_nFlags & k_nSteamNetworkingSend_Reliable && m_nConnUserData > 0 && m_cbSNPSendReliableHeader > 0 && m_cbSize >= m_cbSNPSendReliableHeader ); return m_cbSize; } inline bool SNPSend_IsReliable() const { if ( m_nFlags & k_nSteamNetworkingSend_Reliable ) { DbgAssert( m_nConnUserData > 0 && m_cbSNPSendReliableHeader > 0 && m_cbSize >= m_cbSNPSendReliableHeader ); return true; } DbgAssert( m_nConnUserData == 0 && m_cbSNPSendReliableHeader == 0 ); return false; } // Reliable stream header int m_cbSNPSendReliableHeader; byte *SNPSend_ReliableHeader() { // !KLUDGE! Reuse the peer identity to hold the reliable header return (byte*)&m_identityPeer; } /// Remove it from queues void Unlink(); struct Links { SteamNetworkingMessageQueue *m_pQueue; CSteamNetworkingMessage *m_pPrev; CSteamNetworkingMessage *m_pNext; inline void Clear() { m_pQueue = nullptr; m_pPrev = nullptr; m_pNext = nullptr; } }; /// Intrusive links for the "primary" list we are in Links m_links; /// Intrusive links for any secondary list we may be in. (Same listen socket or /// P2P channel, depending on message type) Links m_linksSecondaryQueue; void LinkBefore( CSteamNetworkingMessage *pSuccessor, Links CSteamNetworkingMessage::*pMbrLinks, SteamNetworkingMessageQueue *pQueue ); void LinkToQueueTail( Links CSteamNetworkingMessage::*pMbrLinks, SteamNetworkingMessageQueue *pQueue ); void UnlinkFromQueue( Links CSteamNetworkingMessage::*pMbrLinks ); private: // Use New and Release()!! inline CSteamNetworkingMessage() {} inline ~CSteamNetworkingMessage() {} static void ReleaseFunc( SteamNetworkingMessage_t *pIMsg ); }; /// A doubly-linked list of CSteamNetworkingMessage struct SteamNetworkingMessageQueue { CSteamNetworkingMessage *m_pFirst = nullptr; CSteamNetworkingMessage *m_pLast = nullptr; inline bool empty() const { if ( m_pFirst ) { Assert( m_pLast ); return false; } Assert( !m_pLast ); return true; } /// Remove the first messages out of the queue (up to nMaxMessages). Returns the number returned int RemoveMessages( SteamNetworkingMessage_t **ppOutMessages, int nMaxMessages ); /// Delete all queued messages void PurgeMessages(); }; /// Maximum number of packets we will send in one Think() call. const int k_nMaxPacketsPerThink = 16; /// Max number of tokens we are allowed to store up in reserve, for a burst. const float k_flSendRateBurstOverageAllowance = k_cbSteamNetworkingSocketsMaxEncryptedPayloadSend; struct SNPRange_t { /// Byte or sequence number range int64 m_nBegin; int64 m_nEnd; // STL-style. It's one past the end inline int64 length() const { // In general, allow zero-length ranges, but not negative ones Assert( m_nEnd >= m_nBegin ); return m_nEnd - m_nBegin; } /// Strict comparison function. This is used in situations where /// ranges must not overlap, AND we also never search for /// a range that might overlap. struct NonOverlappingLess { inline bool operator ()(const SNPRange_t &l, const SNPRange_t &r ) const { if ( l.m_nBegin < r.m_nBegin ) return true; AssertMsg( l.m_nBegin > r.m_nBegin || l.m_nEnd == r.m_nEnd, "Ranges should not overlap in this map!" ); return false; } }; }; /// A packet that has been sent but we don't yet know if was received /// or dropped. These are kept in an ordered map keyed by packet number. /// (Hence the packet number not being a member) When we receive an ACK, /// we remove packets from this list. struct SNPInFlightPacket_t { // // FIXME - Could definitely pack this structure better. And maybe // worth it to optimize cache // /// Local timestamp when we sent it SteamNetworkingMicroseconds m_usecWhenSent; /// Did we get an ack block from peer that explicitly marked this /// packet as being skipped? Note that we might subsequently get an /// an ack for this same packet, that's OK! bool m_bNack; /// Transport used to send CConnectionTransport *m_pTransport; /// List of reliable segments. Ignoring retransmission, /// there really is no reason why we we would need to have /// more than 1 in a packet, even if there are multiple /// reliable messages. If we need to retry, we might /// be fragmented. But usually it will only be a few. vstd::small_vector<SNPRange_t,1> m_vecReliableSegments; }; struct SSNPSendMessageList : public SteamNetworkingMessageQueue { /// Unlink the message at the head, if any and return it. /// Unlike STL pop_front, this will return nullptr if the /// list is empty CSteamNetworkingMessage *pop_front() { CSteamNetworkingMessage *pResult = m_pFirst; if ( pResult ) { Assert( m_pLast ); Assert( pResult->m_links.m_pQueue == this ); Assert( pResult->m_links.m_pPrev == nullptr ); m_pFirst = pResult->m_links.m_pNext; if ( m_pFirst ) { Assert( m_pFirst->m_links.m_pPrev == pResult ); Assert( m_pFirst->m_nMessageNumber > pResult->m_nMessageNumber ); m_pFirst->m_links.m_pPrev = nullptr; } else { Assert( m_pLast == pResult ); m_pLast = nullptr; } pResult->m_links.m_pQueue = nullptr; pResult->m_links.m_pNext = nullptr; } return pResult; } /// Optimized insertion when we know it goes at the end void push_back( CSteamNetworkingMessage *pMsg ) { if ( m_pFirst == nullptr ) { Assert( m_pLast == nullptr ); m_pFirst = pMsg; } else { // Messages are always kept in message number order Assert( pMsg->m_nMessageNumber > m_pLast->m_nMessageNumber ); Assert( m_pLast->m_links.m_pNext == nullptr ); m_pLast->m_links.m_pNext = pMsg; } pMsg->m_links.m_pQueue = this; pMsg->m_links.m_pNext = nullptr; pMsg->m_links.m_pPrev = m_pLast; m_pLast = pMsg; } }; struct SSNPSenderState { SSNPSenderState(); ~SSNPSenderState() { Shutdown(); } void Shutdown(); /// Current sending rate in bytes per second, RFC 3448 4.2 states default /// is one packet per second, but that is insane and we're not doing that. /// In most cases we will set a default based on initial ping, so this is /// only rarely used. int m_n_x = 32*1024; /// If >=0, then we can send a full packet right now. We allow ourselves to "store up" /// about 1 packet worth of "reserve". In other words, if we have not sent any packets /// for a while, basically we allow ourselves to send two packets in rapid succession, /// thus "bursting" over the limit by 1 packet. That long term rate will be clamped by /// the send rate. /// /// If <0, then we are currently "over" our rate limit and need to wait before we can /// send a packet. /// /// Provision for accumulating "credits" and burst allowance, to account for lossy /// kernel scheduler, etc is mentioned in RFC 5348, section 4.6. float m_flTokenBucket = 0; /// Last time that we added tokens to m_flTokenBucket SteamNetworkingMicroseconds m_usecTokenBucketTime = 0; void TokenBucket_Init( SteamNetworkingMicroseconds usecNow ) { m_usecTokenBucketTime = usecNow; m_flTokenBucket = k_flSendRateBurstOverageAllowance; } /// Limit our token bucket to the max reserve amount void TokenBucket_Limit() { if ( m_flTokenBucket > k_flSendRateBurstOverageAllowance ) m_flTokenBucket = k_flSendRateBurstOverageAllowance; } /// Calculate time until we could send our next packet, checking our token /// bucket and the current send rate SteamNetworkingMicroseconds CalcTimeUntilNextSend() const { // Do we have tokens to burn right now? if ( m_flTokenBucket >= 0.0f ) return 0; return SteamNetworkingMicroseconds( m_flTokenBucket * -1e6f / (float)m_n_x ) + 1; // +1 to make sure that if we don't have any tokens, we never return 0, since zero means "ready right now" } /// Nagle timer on all pending messages void ClearNagleTimers() { CSteamNetworkingMessage *pMsg = m_messagesQueued.m_pLast; while ( pMsg && pMsg->SNPSend_UsecNagle() ) { pMsg->SNPSend_SetUsecNagle( 0 ); pMsg = pMsg->m_links.m_pPrev; } } // Current message number, we ++ when adding a message int64 m_nReliableStreamPos = 1; int64 m_nLastSentMsgNum = 0; // Will increment to 1 with first message int64 m_nLastSendMsgNumReliable = 0; /// List of messages that we have not yet finished putting on the wire the first time. /// The Nagle timer may be active on one or more, but if so, it is only on messages /// at the END of the list. The first message may be partially sent. SSNPSendMessageList m_messagesQueued; /// How many bytes into the first message in the queue have we put on the wire? int m_cbCurrentSendMessageSent = 0; /// List of reliable messages that have been fully placed on the wire at least once, /// but we're hanging onto because of the potential need to retry. (Note that if we get /// packet loss, it's possible that we hang onto a message even after it's been fully /// acked, because a prior message is still needed. We always operate on this list /// like a queue, rather than seeking into the middle of the list and removing messages /// as soon as they are no longer needed.) SSNPSendMessageList m_unackedReliableMessages; // Buffered data counters. See SteamNetworkingQuickConnectionStatus for more info int m_cbPendingUnreliable = 0; int m_cbPendingReliable = 0; int m_cbSentUnackedReliable = 0; inline int PendingBytesTotal() const { return m_cbPendingUnreliable + m_cbPendingReliable; } // Stats. FIXME - move to LinkStatsEndToEnd and track rate counters int64 m_nMessagesSentReliable = 0; int64 m_nMessagesSentUnreliable = 0; /// List of packets that we have sent but don't know whether they were received or not. /// We keep a dummy sentinel at the head of the list, with a negative packet number. /// This vastly simplifies the processing. std_map<int64,SNPInFlightPacket_t> m_mapInFlightPacketsByPktNum; /// The next unacked packet that should be timed out and implicitly NACKed, /// if we don't receive an ACK in time. Will be m_mapInFlightPacketsByPktNum.end() /// if we don't have any in flight packets that we are waiting on. std_map<int64,SNPInFlightPacket_t>::iterator m_itNextInFlightPacketToTimeout; /// Ordered list of reliable ranges that we have recently sent /// in a packet. These should be non-overlapping, and furthermore /// should not overlap with with any range in m_listReadyReliableRange /// /// The "value" portion of the map is the message that has the first bit of /// reliable data we need for this message std_map<SNPRange_t,CSteamNetworkingMessage*,SNPRange_t::NonOverlappingLess> m_listInFlightReliableRange; /// Ordered list of ranges that have been put on the wire, /// but have been detected as dropped, and now need to be retried. std_map<SNPRange_t,CSteamNetworkingMessage*,SNPRange_t::NonOverlappingLess> m_listReadyRetryReliableRange; /// Oldest packet sequence number that we are still asking peer /// to send acks for. int64 m_nMinPktWaitingOnAck = 0; // Remove messages from m_unackedReliableMessages that have been fully acked. void RemoveAckedReliableMessageFromUnackedList(); /// Check invariants in debug. #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA == 0 inline void DebugCheckInFlightPacketMap() const {} #else void DebugCheckInFlightPacketMap() const; #endif #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 1 inline void MaybeCheckInFlightPacketMap() const { DebugCheckInFlightPacketMap(); } #else inline void MaybeCheckInFlightPacketMap() const {} #endif }; struct SSNPRecvUnreliableSegmentKey { int64 m_nMsgNum; int m_nOffset; inline bool operator<(const SSNPRecvUnreliableSegmentKey &x) const { if ( m_nMsgNum < x.m_nMsgNum ) return true; if ( m_nMsgNum > x.m_nMsgNum ) return false; return m_nOffset < x.m_nOffset; } }; struct SSNPRecvUnreliableSegmentData { int m_cbSegSize = -1; bool m_bLast = false; char m_buf[ k_cbSteamNetworkingSocketsMaxPlaintextPayloadRecv ]; }; struct SSNPPacketGap { int64 m_nEnd; // just after the last packet received SteamNetworkingMicroseconds m_usecWhenReceivedPktBefore; // So we can send RTT data in our acks SteamNetworkingMicroseconds m_usecWhenAckPrior; // We need to send an ack for everything with lower packet numbers than this gap by this time. (Earlier is OK.) SteamNetworkingMicroseconds m_usecWhenOKToNack; // Don't give up on the gap being filed before this time }; struct SSNPReceiverState { SSNPReceiverState(); ~SSNPReceiverState() { Shutdown(); } void Shutdown(); /// Unreliable message segments that we have received. When an unreliable message /// needs to be fragmented, we store the pieces here. NOTE: it might be more efficient /// to use a simpler container, with worse O(), since this should ordinarily be /// a pretty small list. std_map<SSNPRecvUnreliableSegmentKey,SSNPRecvUnreliableSegmentData> m_mapUnreliableSegments; /// Stream position of the first byte in m_bufReliableData. Remember that the first byte /// in the reliable stream is actually at position 1, not 0 int64 m_nReliableStreamPos = 1; /// The highest message number we have seen so far. int64 m_nHighestSeenMsgNum = 0; /// The message number of the most recently received reliable message int64 m_nLastRecvReliableMsgNum = 0; /// Reliable data stream that we have received. This might have gaps in it! std::vector<byte> m_bufReliableStream; /// Gaps in the reliable data. These are created when we receive reliable data that /// is beyond what we expect next. Since these must never overlap, we store them /// using begin as the key and end as the value. /// /// !SPEED! We should probably use a small fixed-sized, sorted vector here, /// since in most cases the list will be small, and the cost of dynamic memory /// allocation will be way worse than O(n) insertion/removal. std_map<int64,int64> m_mapReliableStreamGaps; /// List of gaps in the packet sequence numbers we have received. /// Since these must never overlap, we store them using begin as the /// key and the end in the value. /// /// The last item in the list is a sentinel with /// begin and end set to INT64_MAX, and m_usecWhenAckPrior is /// the time when we need to flush acks/backs for all packets, /// including those received after the last gap (if any -- /// INT64_MAX means nothing scheduled). Remember, our wire /// protocol cannot report on packet N without also reporting /// on all packets numbered < N. /// /// !SPEED! We should probably use a small fixed-sized, sorted vector here, /// since in most cases the list will be small, and the cost of dynamic memory /// allocation will be way worse than O(n) insertion/removal. std_map<int64,SSNPPacketGap> m_mapPacketGaps; /// Oldest packet sequence number we need to ack to our peer int64 m_nMinPktNumToSendAcks = 0; /// Packet number when we received the value of m_nMinPktNumToSendAcks int64 m_nPktNumUpdatedMinPktNumToSendAcks = 0; /// The next ack that needs to be sent. The invariant /// for the times are: /// /// * Blocks with lower packet numbers: m_usecWhenAckPrior = INT64_MAX /// * This block: m_usecWhenAckPrior < INT64_MAX, or we are the sentinel /// * Blocks with higher packet numbers (if we are not the sentinel): m_usecWhenAckPrior >= previous m_usecWhenAckPrior /// /// We might send acks before they are due, rather than /// waiting until the last moment! If we are going to /// send a packet at all, we usually try to send at least /// a few acks, and if there is room in the packet, as /// many as will fit. The one exception is that if /// sending an ack would imply a NACK that we don't want to /// send yet. (Remember the restrictions on what we are able /// to communicate due to the tight RLE encoding of the wire /// format.) These delays are usually very short lived, and /// only happen when there is packet loss, so they don't delay /// acks very much. The whole purpose of this rather involved /// bookkeeping is to figure out which acks we *need* to send, /// and which acks we cannot send yet, so we can make optimal /// decisions. std_map<int64,SSNPPacketGap>::iterator m_itPendingAck; /// Iterator into m_mapPacketGaps. If != the sentinel, /// we will avoid reporting on the dropped packets in this /// gap (and all higher numbered packets), because we are /// waiting in the hopes that they will arrive out of order. std_map<int64,SSNPPacketGap>::iterator m_itPendingNack; /// Queue a flush of ALL acks (and NACKs!) by the given time. /// If anything is scheduled to happen earlier, that schedule /// will still be honered. We will ack up to that packet number, /// and then we we may report higher numbered blocks, or we may /// stop and wait to report more acks until later. void QueueFlushAllAcks( SteamNetworkingMicroseconds usecWhen ); /// Return the time when we need to flush out acks, or INT64_MAX /// if we don't have any acks pending right now. inline SteamNetworkingMicroseconds TimeWhenFlushAcks() const { // Paranoia if ( m_mapPacketGaps.empty() ) { AssertMsg( false, "TimeWhenFlushAcks - we're shut down!" ); return INT64_MAX; } return m_itPendingAck->second.m_usecWhenAckPrior; } /// Check invariants in debug. #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 1 void DebugCheckPackGapMap() const; #else inline void DebugCheckPackGapMap() const {} #endif // Stats. FIXME - move to LinkStatsEndToEnd and track rate counters int64 m_nMessagesRecvReliable = 0; int64 m_nMessagesRecvUnreliable = 0; }; } // SteamNetworkingSocketsLib
null
//====== Copyright Valve Corporation, All rights reserved. ==================== #pragma once #include "../steamnetworkingsockets_internal.h" #include <vector> #include <map> #include <set> // Set paranoia level, if not already set: // 0 = disabled // 1 = sometimes // 2 = max #ifndef STEAMNETWORKINGSOCKETS_SNP_PARANOIA #ifdef _DEBUG #define STEAMNETWORKINGSOCKETS_SNP_PARANOIA 2 #else #define STEAMNETWORKINGSOCKETS_SNP_PARANOIA 0 #endif #endif #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 0 #if defined(__GNUC__ ) && defined( __linux__ ) && !defined( __ANDROID__ ) #include <debug/map> // FIXME use debug versions template< typename K, typename V, typename L = std::less<K> > using std_map = __gnu_debug::map<K,V,L>; template <typename K, typename V, typename L> inline int len( const std_map<K,V,L> &map ) { return (int)map.size(); } #else template< typename K, typename V, typename L = std::less<K> > using std_map = std::map<K,V,L>; #endif #else template< typename K, typename V, typename L = std::less<K> > using std_map = std::map<K,V,L>; #endif struct P2PSessionState_t; namespace SteamNetworkingSocketsLib { // Acks may be delayed. This controls the precision used on the wire to encode the delay time. constexpr int k_nAckDelayPrecisionShift = 5; constexpr SteamNetworkingMicroseconds k_usecAckDelayPrecision = (1 << k_nAckDelayPrecisionShift ); // When a receiver detects a dropped packet, wait a bit before NACKing it, to give it time // to arrive out of order. This is really important for many different types of connections // that send on different channels, e.g. DSL, Wifi. // Here we really could be smarter, by tracking how often dropped // packets really do arrive out of order. If the rate is low, then it's // probably best to go ahead and send a NACK now, rather than waiting. // But if dropped packets do often arrive out of order, then waiting // to NACK will probably save some retransmits. In fact, instead // of learning the rate, we should probably try to learn the delay. // E.g. a probability distribution P(t), which describes the odds // that a dropped packet will have arrived at time t. Then you // adjust the NACK delay such that P(nack_delay) gives the best // balance between false positive and false negative rates. constexpr SteamNetworkingMicroseconds k_usecNackFlush = 3*1000; // Max size of a message that we are wiling to *receive*. constexpr int k_cbMaxMessageSizeRecv = k_cbMaxSteamNetworkingSocketsMessageSizeSend*2; // The max we will look ahead and allocate data, ahead of the reliable // messages we have been able to decode. We limit this to make sure that // a malicious sender cannot exploit us. constexpr int k_cbMaxBufferedReceiveReliableData = k_cbMaxMessageSizeRecv + 64*1024; constexpr int k_nMaxReliableStreamGaps_Extend = 30; // Discard reliable data past the end of the stream, if it would cause us to get too many gaps constexpr int k_nMaxReliableStreamGaps_Fragment = 20; // Discard reliable data that is filling in the middle of a hole, if it would cause the number of gaps to exceed this number constexpr int k_nMaxPacketGaps = 62; // Don't bother tracking more than N gaps. Instead, we will end up NACKing some packets that we actually did receive. This should not break the protocol, but it protects us from malicious sender // Hang on to at most N unreliable segments. When packets are dropping // and unreliable messages being fragmented, we will accumulate old pieces // of unreliable messages that we retain in hopes that we will get the // missing piece and reassemble the whole message. At a certain point we // must give up and discard them. We use a simple strategy of just limiting // the max total number. In reality large unreliable messages are just a very bad // idea, since the odds of the message dropping increase exponentially with the // number of packets. With 20 packets, even 1% packet loss becomes ~80% message // loss. (Assuming naive fragmentation and reassembly and no forward // error correction.) constexpr int k_nMaxBufferedUnreliableSegments = 20; // If app tries to send a message larger than N bytes unreliably, // complain about it, and automatically convert to reliable. // About 15 segments. constexpr int k_cbMaxUnreliableMsgSizeSend = 15*1100; // Max possible size of an unreliable segment we could receive. constexpr int k_cbMaxUnreliableSegmentSizeRecv = k_cbSteamNetworkingSocketsMaxPlaintextPayloadRecv; // Largest possible total unreliable message we can receive, based on the constraints above constexpr int k_cbMaxUnreliableMsgSizeRecv = k_nMaxBufferedUnreliableSegments*k_cbMaxUnreliableSegmentSizeRecv; COMPILE_TIME_ASSERT( k_cbMaxUnreliableMsgSizeRecv > k_cbMaxUnreliableMsgSizeSend + 4096 ); // Postel's law; confirm how much slack we have here class CSteamNetworkConnectionBase; class CConnectionTransport; struct SteamNetworkingMessageQueue; /// Actual implementation of SteamNetworkingMessage_t, which is the API /// visible type. Has extra fields needed to put the message into intrusive /// linked lists. class CSteamNetworkingMessage : public SteamNetworkingMessage_t { public: static CSteamNetworkingMessage *New( CSteamNetworkConnectionBase *pParent, uint32 cbSize, int64 nMsgNum, int nFlags, SteamNetworkingMicroseconds usecNow ); static CSteamNetworkingMessage *New( uint32 cbSize ); static void DefaultFreeData( SteamNetworkingMessage_t *pMsg ); /// OK to delay sending this message until this time. Set to zero to explicitly force /// Nagle timer to expire and send now (but this should behave the same as if the /// timer < usecNow). If the timer is cleared, then all messages with lower message numbers /// are also cleared. inline SteamNetworkingMicroseconds SNPSend_UsecNagle() const { return m_usecTimeReceived; } inline void SNPSend_SetUsecNagle( SteamNetworkingMicroseconds x ) { m_usecTimeReceived = x; } /// Offset in reliable stream of the header byte. 0 if we're not reliable. inline int64 SNPSend_ReliableStreamPos() const { return m_nConnUserData; } inline void SNPSend_SetReliableStreamPos( int64 x ) { m_nConnUserData = x; } inline int SNPSend_ReliableStreamSize() const { DbgAssert( m_nFlags & k_nSteamNetworkingSend_Reliable && m_nConnUserData > 0 && m_cbSNPSendReliableHeader > 0 && m_cbSize >= m_cbSNPSendReliableHeader ); return m_cbSize; } inline bool SNPSend_IsReliable() const { if ( m_nFlags & k_nSteamNetworkingSend_Reliable ) { DbgAssert( m_nConnUserData > 0 && m_cbSNPSendReliableHeader > 0 && m_cbSize >= m_cbSNPSendReliableHeader ); return true; } DbgAssert( m_nConnUserData == 0 && m_cbSNPSendReliableHeader == 0 ); return false; } // Reliable stream header int m_cbSNPSendReliableHeader; byte *SNPSend_ReliableHeader() { // !KLUDGE! Reuse the peer identity to hold the reliable header return (byte*)&m_identityPeer; } /// Remove it from queues void Unlink(); struct Links { SteamNetworkingMessageQueue *m_pQueue; CSteamNetworkingMessage *m_pPrev; CSteamNetworkingMessage *m_pNext; inline void Clear() { m_pQueue = nullptr; m_pPrev = nullptr; m_pNext = nullptr; } }; /// Intrusive links for the "primary" list we are in Links m_links; /// Intrusive links for any secondary list we may be in. (Same listen socket or /// P2P channel, depending on message type) Links m_linksSecondaryQueue; void LinkBefore( CSteamNetworkingMessage *pSuccessor, Links CSteamNetworkingMessage::*pMbrLinks, SteamNetworkingMessageQueue *pQueue ); void LinkToQueueTail( Links CSteamNetworkingMessage::*pMbrLinks, SteamNetworkingMessageQueue *pQueue ); void UnlinkFromQueue( Links CSteamNetworkingMessage::*pMbrLinks ); private: // Use New and Release()!! inline CSteamNetworkingMessage() {} inline ~CSteamNetworkingMessage() {} static void ReleaseFunc( SteamNetworkingMessage_t *pIMsg ); }; /// A doubly-linked list of CSteamNetworkingMessage struct SteamNetworkingMessageQueue { CSteamNetworkingMessage *m_pFirst = nullptr; CSteamNetworkingMessage *m_pLast = nullptr; inline bool empty() const { if ( m_pFirst ) { Assert( m_pLast ); return false; } Assert( !m_pLast ); return true; } /// Remove the first messages out of the queue (up to nMaxMessages). Returns the number returned int RemoveMessages( SteamNetworkingMessage_t **ppOutMessages, int nMaxMessages ); /// Delete all queued messages void PurgeMessages(); }; /// Maximum number of packets we will send in one Think() call. const int k_nMaxPacketsPerThink = 16; /// Max number of tokens we are allowed to store up in reserve, for a burst. const float k_flSendRateBurstOverageAllowance = k_cbSteamNetworkingSocketsMaxEncryptedPayloadSend; struct SNPRange_t { /// Byte or sequence number range int64 m_nBegin; int64 m_nEnd; // STL-style. It's one past the end inline int64 length() const { // In general, allow zero-length ranges, but not negative ones Assert( m_nEnd >= m_nBegin ); return m_nEnd - m_nBegin; } /// Strict comparison function. This is used in situations where /// ranges must not overlap, AND we also never search for /// a range that might overlap. struct NonOverlappingLess { inline bool operator ()(const SNPRange_t &l, const SNPRange_t &r ) const { if ( l.m_nBegin < r.m_nBegin ) return true; AssertMsg( l.m_nBegin > r.m_nBegin || l.m_nEnd == r.m_nEnd, "Ranges should not overlap in this map!" ); return false; } }; }; /// A packet that has been sent but we don't yet know if was received /// or dropped. These are kept in an ordered map keyed by packet number. /// (Hence the packet number not being a member) When we receive an ACK, /// we remove packets from this list. struct SNPInFlightPacket_t { // // FIXME - Could definitely pack this structure better. And maybe // worth it to optimize cache // /// Local timestamp when we sent it SteamNetworkingMicroseconds m_usecWhenSent; /// Did we get an ack block from peer that explicitly marked this /// packet as being skipped? Note that we might subsequently get an /// an ack for this same packet, that's OK! bool m_bNack; /// Transport used to send CConnectionTransport *m_pTransport; /// List of reliable segments. Ignoring retransmission, /// there really is no reason why we we would need to have /// more than 1 in a packet, even if there are multiple /// reliable messages. If we need to retry, we might /// be fragmented. But usually it will only be a few. vstd::small_vector<SNPRange_t,1> m_vecReliableSegments; }; struct SSNPSendMessageList : public SteamNetworkingMessageQueue { /// Unlink the message at the head, if any and return it. /// Unlike STL pop_front, this will return nullptr if the /// list is empty CSteamNetworkingMessage *pop_front() { CSteamNetworkingMessage *pResult = m_pFirst; if ( pResult ) { Assert( m_pLast ); Assert( pResult->m_links.m_pQueue == this ); Assert( pResult->m_links.m_pPrev == nullptr ); m_pFirst = pResult->m_links.m_pNext; if ( m_pFirst ) { Assert( m_pFirst->m_links.m_pPrev == pResult ); Assert( m_pFirst->m_nMessageNumber > pResult->m_nMessageNumber ); m_pFirst->m_links.m_pPrev = nullptr; } else { Assert( m_pLast == pResult ); m_pLast = nullptr; } pResult->m_links.m_pQueue = nullptr; pResult->m_links.m_pNext = nullptr; } return pResult; } /// Optimized insertion when we know it goes at the end void push_back( CSteamNetworkingMessage *pMsg ) { if ( m_pFirst == nullptr ) { Assert( m_pLast == nullptr ); m_pFirst = pMsg; } else { // Messages are always kept in message number order Assert( pMsg->m_nMessageNumber > m_pLast->m_nMessageNumber ); Assert( m_pLast->m_links.m_pNext == nullptr ); m_pLast->m_links.m_pNext = pMsg; } pMsg->m_links.m_pQueue = this; pMsg->m_links.m_pNext = nullptr; pMsg->m_links.m_pPrev = m_pLast; m_pLast = pMsg; } }; struct SSNPSenderState { SSNPSenderState(); ~SSNPSenderState() { Shutdown(); } void Shutdown(); /// Current sending rate in bytes per second, RFC 3448 4.2 states default /// is one packet per second, but that is insane and we're not doing that. /// In most cases we will set a default based on initial ping, so this is /// only rarely used. int m_n_x = 32*1024; /// If >=0, then we can send a full packet right now. We allow ourselves to "store up" /// about 1 packet worth of "reserve". In other words, if we have not sent any packets /// for a while, basically we allow ourselves to send two packets in rapid succession, /// thus "bursting" over the limit by 1 packet. That long term rate will be clamped by /// the send rate. /// /// If <0, then we are currently "over" our rate limit and need to wait before we can /// send a packet. /// /// Provision for accumulating "credits" and burst allowance, to account for lossy /// kernel scheduler, etc is mentioned in RFC 5348, section 4.6. float m_flTokenBucket = 0; /// Last time that we added tokens to m_flTokenBucket SteamNetworkingMicroseconds m_usecTokenBucketTime = 0; void TokenBucket_Init( SteamNetworkingMicroseconds usecNow ) { m_usecTokenBucketTime = usecNow; m_flTokenBucket = k_flSendRateBurstOverageAllowance; } /// Limit our token bucket to the max reserve amount void TokenBucket_Limit() { if ( m_flTokenBucket > k_flSendRateBurstOverageAllowance ) m_flTokenBucket = k_flSendRateBurstOverageAllowance; } /// Calculate time until we could send our next packet, checking our token /// bucket and the current send rate SteamNetworkingMicroseconds CalcTimeUntilNextSend() const { // Do we have tokens to burn right now? if ( m_flTokenBucket >= 0.0f ) return 0; return SteamNetworkingMicroseconds( m_flTokenBucket * -1e6f / (float)m_n_x ) + 1; // +1 to make sure that if we don't have any tokens, we never return 0, since zero means "ready right now" } /// Nagle timer on all pending messages void ClearNagleTimers() { CSteamNetworkingMessage *pMsg = m_messagesQueued.m_pLast; while ( pMsg && pMsg->SNPSend_UsecNagle() ) { pMsg->SNPSend_SetUsecNagle( 0 ); pMsg = pMsg->m_links.m_pPrev; } } // Current message number, we ++ when adding a message int64 m_nReliableStreamPos = 1; int64 m_nLastSentMsgNum = 0; // Will increment to 1 with first message int64 m_nLastSendMsgNumReliable = 0; /// List of messages that we have not yet finished putting on the wire the first time. /// The Nagle timer may be active on one or more, but if so, it is only on messages /// at the END of the list. The first message may be partially sent. SSNPSendMessageList m_messagesQueued; /// How many bytes into the first message in the queue have we put on the wire? int m_cbCurrentSendMessageSent = 0; /// List of reliable messages that have been fully placed on the wire at least once, /// but we're hanging onto because of the potential need to retry. (Note that if we get /// packet loss, it's possible that we hang onto a message even after it's been fully /// acked, because a prior message is still needed. We always operate on this list /// like a queue, rather than seeking into the middle of the list and removing messages /// as soon as they are no longer needed.) SSNPSendMessageList m_unackedReliableMessages; // Buffered data counters. See SteamNetworkingQuickConnectionStatus for more info int m_cbPendingUnreliable = 0; int m_cbPendingReliable = 0; int m_cbSentUnackedReliable = 0; inline int PendingBytesTotal() const { return m_cbPendingUnreliable + m_cbPendingReliable; } // Stats. FIXME - move to LinkStatsEndToEnd and track rate counters int64 m_nMessagesSentReliable = 0; int64 m_nMessagesSentUnreliable = 0; /// List of packets that we have sent but don't know whether they were received or not. /// We keep a dummy sentinel at the head of the list, with a negative packet number. /// This vastly simplifies the processing. std_map<int64,SNPInFlightPacket_t> m_mapInFlightPacketsByPktNum; /// The next unacked packet that should be timed out and implicitly NACKed, /// if we don't receive an ACK in time. Will be m_mapInFlightPacketsByPktNum.end() /// if we don't have any in flight packets that we are waiting on. std_map<int64,SNPInFlightPacket_t>::iterator m_itNextInFlightPacketToTimeout; /// Ordered list of reliable ranges that we have recently sent /// in a packet. These should be non-overlapping, and furthermore /// should not overlap with with any range in m_listReadyReliableRange /// /// The "value" portion of the map is the message that has the first bit of /// reliable data we need for this message std_map<SNPRange_t,CSteamNetworkingMessage*,SNPRange_t::NonOverlappingLess> m_listInFlightReliableRange; /// Ordered list of ranges that have been put on the wire, /// but have been detected as dropped, and now need to be retried. std_map<SNPRange_t,CSteamNetworkingMessage*,SNPRange_t::NonOverlappingLess> m_listReadyRetryReliableRange; /// Oldest packet sequence number that we are still asking peer /// to send acks for. int64 m_nMinPktWaitingOnAck = 0; // Remove messages from m_unackedReliableMessages that have been fully acked. void RemoveAckedReliableMessageFromUnackedList(); /// Check invariants in debug. #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA == 0 inline void DebugCheckInFlightPacketMap() const {} #else void DebugCheckInFlightPacketMap() const; #endif #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 1 inline void MaybeCheckInFlightPacketMap() const { DebugCheckInFlightPacketMap(); } #else inline void MaybeCheckInFlightPacketMap() const {} #endif }; struct SSNPRecvUnreliableSegmentKey { int64 m_nMsgNum; int m_nOffset; inline bool operator<(const SSNPRecvUnreliableSegmentKey &x) const { if ( m_nMsgNum < x.m_nMsgNum ) return true; if ( m_nMsgNum > x.m_nMsgNum ) return false; return m_nOffset < x.m_nOffset; } }; struct SSNPRecvUnreliableSegmentData { int m_cbSegSize = -1; bool m_bLast = false; char m_buf[ k_cbMaxUnreliableSegmentSizeRecv ]; }; struct SSNPPacketGap { int64 m_nEnd; // just after the last packet received SteamNetworkingMicroseconds m_usecWhenReceivedPktBefore; // So we can send RTT data in our acks SteamNetworkingMicroseconds m_usecWhenAckPrior; // We need to send an ack for everything with lower packet numbers than this gap by this time. (Earlier is OK.) SteamNetworkingMicroseconds m_usecWhenOKToNack; // Don't give up on the gap being filed before this time }; struct SSNPReceiverState { SSNPReceiverState(); ~SSNPReceiverState() { Shutdown(); } void Shutdown(); /// Unreliable message segments that we have received. When an unreliable message /// needs to be fragmented, we store the pieces here. NOTE: it might be more efficient /// to use a simpler container, with worse O(), since this should ordinarily be /// a pretty small list. std_map<SSNPRecvUnreliableSegmentKey,SSNPRecvUnreliableSegmentData> m_mapUnreliableSegments; /// Stream position of the first byte in m_bufReliableData. Remember that the first byte /// in the reliable stream is actually at position 1, not 0 int64 m_nReliableStreamPos = 1; /// The highest message number we have seen so far. int64 m_nHighestSeenMsgNum = 0; /// The message number of the most recently received reliable message int64 m_nLastRecvReliableMsgNum = 0; /// Reliable data stream that we have received. This might have gaps in it! std::vector<byte> m_bufReliableStream; /// Gaps in the reliable data. These are created when we receive reliable data that /// is beyond what we expect next. Since these must never overlap, we store them /// using begin as the key and end as the value. /// /// !SPEED! We should probably use a small fixed-sized, sorted vector here, /// since in most cases the list will be small, and the cost of dynamic memory /// allocation will be way worse than O(n) insertion/removal. std_map<int64,int64> m_mapReliableStreamGaps; /// List of gaps in the packet sequence numbers we have received. /// Since these must never overlap, we store them using begin as the /// key and the end in the value. /// /// The last item in the list is a sentinel with /// begin and end set to INT64_MAX, and m_usecWhenAckPrior is /// the time when we need to flush acks/backs for all packets, /// including those received after the last gap (if any -- /// INT64_MAX means nothing scheduled). Remember, our wire /// protocol cannot report on packet N without also reporting /// on all packets numbered < N. /// /// !SPEED! We should probably use a small fixed-sized, sorted vector here, /// since in most cases the list will be small, and the cost of dynamic memory /// allocation will be way worse than O(n) insertion/removal. std_map<int64,SSNPPacketGap> m_mapPacketGaps; /// Oldest packet sequence number we need to ack to our peer int64 m_nMinPktNumToSendAcks = 0; /// Packet number when we received the value of m_nMinPktNumToSendAcks int64 m_nPktNumUpdatedMinPktNumToSendAcks = 0; /// The next ack that needs to be sent. The invariant /// for the times are: /// /// * Blocks with lower packet numbers: m_usecWhenAckPrior = INT64_MAX /// * This block: m_usecWhenAckPrior < INT64_MAX, or we are the sentinel /// * Blocks with higher packet numbers (if we are not the sentinel): m_usecWhenAckPrior >= previous m_usecWhenAckPrior /// /// We might send acks before they are due, rather than /// waiting until the last moment! If we are going to /// send a packet at all, we usually try to send at least /// a few acks, and if there is room in the packet, as /// many as will fit. The one exception is that if /// sending an ack would imply a NACK that we don't want to /// send yet. (Remember the restrictions on what we are able /// to communicate due to the tight RLE encoding of the wire /// format.) These delays are usually very short lived, and /// only happen when there is packet loss, so they don't delay /// acks very much. The whole purpose of this rather involved /// bookkeeping is to figure out which acks we *need* to send, /// and which acks we cannot send yet, so we can make optimal /// decisions. std_map<int64,SSNPPacketGap>::iterator m_itPendingAck; /// Iterator into m_mapPacketGaps. If != the sentinel, /// we will avoid reporting on the dropped packets in this /// gap (and all higher numbered packets), because we are /// waiting in the hopes that they will arrive out of order. std_map<int64,SSNPPacketGap>::iterator m_itPendingNack; /// Queue a flush of ALL acks (and NACKs!) by the given time. /// If anything is scheduled to happen earlier, that schedule /// will still be honered. We will ack up to that packet number, /// and then we we may report higher numbered blocks, or we may /// stop and wait to report more acks until later. void QueueFlushAllAcks( SteamNetworkingMicroseconds usecWhen ); /// Return the time when we need to flush out acks, or INT64_MAX /// if we don't have any acks pending right now. inline SteamNetworkingMicroseconds TimeWhenFlushAcks() const { // Paranoia if ( m_mapPacketGaps.empty() ) { AssertMsg( false, "TimeWhenFlushAcks - we're shut down!" ); return INT64_MAX; } return m_itPendingAck->second.m_usecWhenAckPrior; } /// Check invariants in debug. #if STEAMNETWORKINGSOCKETS_SNP_PARANOIA > 1 void DebugCheckPackGapMap() const; #else inline void DebugCheckPackGapMap() const {} #endif // Stats. FIXME - move to LinkStatsEndToEnd and track rate counters int64 m_nMessagesRecvReliable = 0; int64 m_nMessagesRecvUnreliable = 0; }; } // SteamNetworkingSocketsLib
null
233
CWE-787
CVE-2020-6018
#include "crypto.h" #include <tier0/vprof.h> #include <tier0/dbg.h> #include "tier0/memdbgoff.h" #include <sodium.h> #include "tier0/memdbgon.h" #ifdef STEAMNETWORKINGSOCKETS_CRYPTO_LIBSODIUM SymmetricCryptContextBase::SymmetricCryptContextBase() : m_ctx(nullptr), m_cbIV(0), m_cbTag(0) { } void SymmetricCryptContextBase::Wipe() { sodium_free(m_ctx); m_ctx = nullptr; m_cbIV = 0; m_cbTag = 0; } bool AES_GCM_CipherContext::InitCipher( const void *pKey, size_t cbKey, size_t cbIV, size_t cbTag, bool bEncrypt ) { // Libsodium requires AES and CLMUL instructions for AES-GCM, available in // Intel "Westmere" and up. 90.41% of Steam users have this as of the // November 2019 survey. // Libsodium recommends ChaCha20-Poly1305 in software if you've not got AES support // in hardware. AssertMsg( crypto_aead_aes256gcm_is_available() == 1, "No hardware AES support on this CPU." ); AssertMsg( cbKey == crypto_aead_aes256gcm_KEYBYTES, "AES key sizes other than 256 are unsupported." ); AssertMsg( cbIV == crypto_aead_aes256gcm_NPUBBYTES, "Nonce size is unsupported" ); if(m_ctx == nullptr) { m_ctx = sodium_malloc( sizeof(crypto_aead_aes256gcm_state) ); } crypto_aead_aes256gcm_beforenm( static_cast<crypto_aead_aes256gcm_state*>( m_ctx ), static_cast<const unsigned char*>( pKey ) ); return true; } bool AES_GCM_EncryptContext::Encrypt( const void *pPlaintextData, size_t cbPlaintextData, const void *pIV, void *pEncryptedDataAndTag, uint32 *pcbEncryptedDataAndTag, const void *pAdditionalAuthenticationData, size_t cbAuthenticationData ) { unsigned long long pcbEncryptedDataAndTag_longlong = *pcbEncryptedDataAndTag; crypto_aead_aes256gcm_encrypt_afternm( static_cast<unsigned char*>( pEncryptedDataAndTag ), &pcbEncryptedDataAndTag_longlong, static_cast<const unsigned char*>( pPlaintextData ), cbPlaintextData, static_cast<const unsigned char*>(pAdditionalAuthenticationData), cbAuthenticationData, nullptr, static_cast<const unsigned char*>( pIV ), static_cast<const crypto_aead_aes256gcm_state*>( m_ctx ) ); *pcbEncryptedDataAndTag = pcbEncryptedDataAndTag_longlong; return true; } bool AES_GCM_DecryptContext::Decrypt( const void *pEncryptedDataAndTag, size_t cbEncryptedDataAndTag, const void *pIV, void *pPlaintextData, uint32 *pcbPlaintextData, const void *pAdditionalAuthenticationData, size_t cbAuthenticationData ) { unsigned long long pcbPlaintextData_longlong; const int nDecryptResult = crypto_aead_aes256gcm_decrypt_afternm( static_cast<unsigned char*>( pPlaintextData ), &pcbPlaintextData_longlong, nullptr, static_cast<const unsigned char*>( pEncryptedDataAndTag ), cbEncryptedDataAndTag, static_cast<const unsigned char*>( pAdditionalAuthenticationData ), cbAuthenticationData, static_cast<const unsigned char*>( pIV ), static_cast<const crypto_aead_aes256gcm_state*>( m_ctx ) ); *pcbPlaintextData = pcbPlaintextData_longlong; return nDecryptResult == 0; } void CCrypto::Init() { // sodium_init is safe to call multiple times from multiple threads // so no need to do anything clever here. if(sodium_init() < 0) { AssertMsg( false, "libsodium didn't init" ); } } void CCrypto::GenerateRandomBlock( void *pubDest, int cubDest ) { VPROF_BUDGET( "CCrypto::GenerateRandomBlock", VPROF_BUDGETGROUP_ENCRYPTION ); AssertFatal( cubDest >= 0 ); randombytes_buf( pubDest, cubDest ); } void CCrypto::GenerateSHA256Digest( const void *pData, size_t cbData, SHA256Digest_t *pOutputDigest ) { VPROF_BUDGET( "CCrypto::GenerateSHA256Digest", VPROF_BUDGETGROUP_ENCRYPTION ); Assert( pData ); Assert( pOutputDigest ); crypto_hash_sha256( *pOutputDigest, static_cast<const unsigned char*>(pData), cbData ); } void CCrypto::GenerateHMAC256( const uint8 *pubData, uint32 cubData, const uint8 *pubKey, uint32 cubKey, SHA256Digest_t *pOutputDigest ) { VPROF_BUDGET( "CCrypto::GenerateHMAC256", VPROF_BUDGETGROUP_ENCRYPTION ); Assert( pubData ); Assert( cubData > 0 ); Assert( pubKey ); Assert( cubKey > 0 ); Assert( pOutputDigest ); Assert( sizeof(*pOutputDigest) == crypto_auth_hmacsha256_BYTES ); Assert( cubKey == crypto_auth_hmacsha256_KEYBYTES ); crypto_auth_hmacsha256( *pOutputDigest, pubData, cubData, pubKey ); } #endif
null
#include "crypto.h" #include <tier0/vprof.h> #include <tier0/dbg.h> #include "tier0/memdbgoff.h" #include <sodium.h> #include "tier0/memdbgon.h" #ifdef STEAMNETWORKINGSOCKETS_CRYPTO_LIBSODIUM SymmetricCryptContextBase::SymmetricCryptContextBase() : m_ctx(nullptr), m_cbIV(0), m_cbTag(0) { } void SymmetricCryptContextBase::Wipe() { sodium_free(m_ctx); m_ctx = nullptr; m_cbIV = 0; m_cbTag = 0; } bool AES_GCM_CipherContext::InitCipher( const void *pKey, size_t cbKey, size_t cbIV, size_t cbTag, bool bEncrypt ) { // Libsodium requires AES and CLMUL instructions for AES-GCM, available in // Intel "Westmere" and up. 90.41% of Steam users have this as of the // November 2019 survey. // Libsodium recommends ChaCha20-Poly1305 in software if you've not got AES support // in hardware. AssertMsg( crypto_aead_aes256gcm_is_available() == 1, "No hardware AES support on this CPU." ); AssertMsg( cbKey == crypto_aead_aes256gcm_KEYBYTES, "AES key sizes other than 256 are unsupported." ); AssertMsg( cbIV == crypto_aead_aes256gcm_NPUBBYTES, "Nonce size is unsupported" ); if(m_ctx == nullptr) { m_ctx = sodium_malloc( sizeof(crypto_aead_aes256gcm_state) ); } crypto_aead_aes256gcm_beforenm( static_cast<crypto_aead_aes256gcm_state*>( m_ctx ), static_cast<const unsigned char*>( pKey ) ); return true; } bool AES_GCM_EncryptContext::Encrypt( const void *pPlaintextData, size_t cbPlaintextData, const void *pIV, void *pEncryptedDataAndTag, uint32 *pcbEncryptedDataAndTag, const void *pAdditionalAuthenticationData, size_t cbAuthenticationData ) { // Make sure caller's buffer is big enough to hold the result. if ( cbPlaintextData + crypto_aead_aes256gcm_ABYTES > *pcbEncryptedDataAndTag ) { *pcbEncryptedDataAndTag = 0; return false; } unsigned long long cbEncryptedDataAndTag_longlong; crypto_aead_aes256gcm_encrypt_afternm( static_cast<unsigned char*>( pEncryptedDataAndTag ), &cbEncryptedDataAndTag_longlong, static_cast<const unsigned char*>( pPlaintextData ), cbPlaintextData, static_cast<const unsigned char*>(pAdditionalAuthenticationData), cbAuthenticationData, nullptr, static_cast<const unsigned char*>( pIV ), static_cast<const crypto_aead_aes256gcm_state*>( m_ctx ) ); *pcbEncryptedDataAndTag = cbEncryptedDataAndTag_longlong; return true; } bool AES_GCM_DecryptContext::Decrypt( const void *pEncryptedDataAndTag, size_t cbEncryptedDataAndTag, const void *pIV, void *pPlaintextData, uint32 *pcbPlaintextData, const void *pAdditionalAuthenticationData, size_t cbAuthenticationData ) { // Make sure caller's buffer is big enough to hold the result if ( cbEncryptedDataAndTag > *pcbPlaintextData + crypto_aead_aes256gcm_ABYTES ) { *pcbPlaintextData = 0; return false; } unsigned long long cbPlaintextData_longlong; const int nDecryptResult = crypto_aead_aes256gcm_decrypt_afternm( static_cast<unsigned char*>( pPlaintextData ), &cbPlaintextData_longlong, nullptr, static_cast<const unsigned char*>( pEncryptedDataAndTag ), cbEncryptedDataAndTag, static_cast<const unsigned char*>( pAdditionalAuthenticationData ), cbAuthenticationData, static_cast<const unsigned char*>( pIV ), static_cast<const crypto_aead_aes256gcm_state*>( m_ctx ) ); *pcbPlaintextData = cbPlaintextData_longlong; return nDecryptResult == 0; } void CCrypto::Init() { // sodium_init is safe to call multiple times from multiple threads // so no need to do anything clever here. if(sodium_init() < 0) { AssertMsg( false, "libsodium didn't init" ); } } void CCrypto::GenerateRandomBlock( void *pubDest, int cubDest ) { VPROF_BUDGET( "CCrypto::GenerateRandomBlock", VPROF_BUDGETGROUP_ENCRYPTION ); AssertFatal( cubDest >= 0 ); randombytes_buf( pubDest, cubDest ); } void CCrypto::GenerateSHA256Digest( const void *pData, size_t cbData, SHA256Digest_t *pOutputDigest ) { VPROF_BUDGET( "CCrypto::GenerateSHA256Digest", VPROF_BUDGETGROUP_ENCRYPTION ); Assert( pData ); Assert( pOutputDigest ); crypto_hash_sha256( *pOutputDigest, static_cast<const unsigned char*>(pData), cbData ); } void CCrypto::GenerateHMAC256( const uint8 *pubData, uint32 cubData, const uint8 *pubKey, uint32 cubKey, SHA256Digest_t *pOutputDigest ) { VPROF_BUDGET( "CCrypto::GenerateHMAC256", VPROF_BUDGETGROUP_ENCRYPTION ); Assert( pubData ); Assert( cubData > 0 ); Assert( pubKey ); Assert( cubKey > 0 ); Assert( pOutputDigest ); Assert( sizeof(*pOutputDigest) == crypto_auth_hmacsha256_BYTES ); Assert( cubKey == crypto_auth_hmacsha256_KEYBYTES ); crypto_auth_hmacsha256( *pOutputDigest, pubData, cubData, pubKey ); } #endif
null
234
CWE-787
CVE-2020-8935
/* * * Copyright 2019 Asylo authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "asylo/platform/host_call/trusted/host_calls.h" #include <errno.h> #include <ifaddrs.h> #include <net/if.h> #include <netdb.h> #include <signal.h> #include <sys/statfs.h> #include <algorithm> #include "asylo/platform/host_call/exit_handler_constants.h" #include "asylo/platform/host_call/serializer_functions.h" #include "asylo/platform/primitives/trusted_primitives.h" #include "asylo/platform/system_call/type_conversions/types_functions.h" using ::asylo::host_call::NonSystemCallDispatcher; using ::asylo::primitives::Extent; using ::asylo::primitives::MessageReader; using ::asylo::primitives::MessageWriter; using ::asylo::primitives::TrustedPrimitives; void CheckStatusAndParamCount(const asylo::primitives::PrimitiveStatus &status, const MessageReader &output, const char *name, int expected_params, bool match_exact_params) { if (!status.ok()) { std::string message = absl::StrCat("Host call '", name, "' failed."); TrustedPrimitives::BestEffortAbort(message.c_str()); } if (!match_exact_params) { if (output.size() < expected_params) { std::string message = absl::StrCat( "Host call '", name, "': Expected at least ", expected_params, " parameters on the MessageReader, found ", output.size()); TrustedPrimitives::BestEffortAbort(message.c_str()); } } else { if (output.size() != expected_params) { std::string message = absl::StrCat( "Host call '", name, "': Expected ", expected_params, " parameters on the MessageReader, found ", output.size()); TrustedPrimitives::BestEffortAbort(message.c_str()); } } } namespace { // A global passwd struct. The address of it is used as the return value of // getpwuid. struct passwd global_passwd; size_t CalculateTotalMessageSize(const struct msghdr *msg) { size_t total_message_size = 0; for (int i = 0; i < msg->msg_iovlen; ++i) { total_message_size += msg->msg_iov[i].iov_len; } return total_message_size; } #define PASSWD_HOLDER_FIELD_LENGTH 1024 // Struct for storing the buffers needed by struct passwd members. struct passwd_holder { char pw_name[PASSWD_HOLDER_FIELD_LENGTH]; char pw_passwd[PASSWD_HOLDER_FIELD_LENGTH]; uid_t pw_uid; gid_t pw_gid; char pw_gecos[PASSWD_HOLDER_FIELD_LENGTH]; char pw_dir[PASSWD_HOLDER_FIELD_LENGTH]; char pw_shell[PASSWD_HOLDER_FIELD_LENGTH]; }; bool DeserializePasswd(MessageReader *reader, struct passwd_holder *passwd_buffers) { if (!reader || !passwd_buffers) { return false; } if (reader->size() < 7) { return false; } auto pw_name_buf = reader->next(); auto pw_passwd_buf = reader->next(); auto pw_uid = reader->next<uid_t>(); auto pw_gid = reader->next<gid_t>(); auto pw_gecos_buf = reader->next(); auto pw_dir_buf = reader->next(); auto pw_shell_buf = reader->next(); strncpy(passwd_buffers->pw_name, pw_name_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_name), pw_name_buf.size())); strncpy(passwd_buffers->pw_passwd, pw_passwd_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_passwd), pw_passwd_buf.size())); passwd_buffers->pw_uid = pw_uid; passwd_buffers->pw_gid = pw_gid; strncpy(passwd_buffers->pw_gecos, pw_gecos_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_gecos), pw_gecos_buf.size())); strncpy(passwd_buffers->pw_dir, pw_dir_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_dir), pw_dir_buf.size())); strncpy(passwd_buffers->pw_shell, pw_shell_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_shell), pw_shell_buf.size())); return true; } bool PasswdHolderToPasswd(struct passwd_holder *passwd_in, struct passwd *passwd_out) { if (!passwd_in || !passwd_out) { return false; } passwd_out->pw_name = passwd_in->pw_name; passwd_out->pw_passwd = passwd_in->pw_passwd; passwd_out->pw_uid = passwd_in->pw_uid; passwd_out->pw_gid = passwd_in->pw_gid; passwd_out->pw_gecos = passwd_in->pw_gecos; passwd_out->pw_dir = passwd_in->pw_dir; passwd_out->pw_shell = passwd_in->pw_shell; return true; } } // namespace extern "C" { int enc_untrusted_access(const char *path_name, int mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_access, path_name, mode); } pid_t enc_untrusted_getpid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getpid); } pid_t enc_untrusted_getppid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getppid); } pid_t enc_untrusted_setsid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_setsid); } uid_t enc_untrusted_getuid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getuid); } gid_t enc_untrusted_getgid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getgid); } uid_t enc_untrusted_geteuid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_geteuid); } gid_t enc_untrusted_getegid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getegid); } int enc_untrusted_kill(pid_t pid, int sig) { int klinux_sig = TokLinuxSignalNumber(sig); if (klinux_sig < 0) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_kill, pid, klinux_sig); } int enc_untrusted_link(const char *oldpath, const char *newpath) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_link, oldpath, newpath); } off_t enc_untrusted_lseek(int fd, off_t offset, int whence) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_lseek, fd, offset, whence); } int enc_untrusted_mkdir(const char *pathname, mode_t mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_mkdir, pathname, mode); } int enc_untrusted_open(const char *pathname, int flags, ...) { int mode = 0; if (flags & O_CREAT) { va_list ap; va_start(ap, flags); mode = va_arg(ap, mode_t); va_end(ap); } return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_open, pathname, TokLinuxFileStatusFlag(flags), TokLinuxFileModeFlag(mode)); } int enc_untrusted_unlink(const char *pathname) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_unlink, pathname); } int enc_untrusted_rename(const char *oldpath, const char *newpath) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_rename, oldpath, newpath); } ssize_t enc_untrusted_read(int fd, void *buf, size_t count) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_read, fd, buf, count)); } ssize_t enc_untrusted_write(int fd, const void *buf, size_t count) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_write, fd, buf, count)); } int enc_untrusted_symlink(const char *target, const char *linkpath) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_symlink, target, linkpath); } ssize_t enc_untrusted_readlink(const char *pathname, char *buf, size_t bufsiz) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_readlink, pathname, buf, bufsiz)); } int enc_untrusted_truncate(const char *path, off_t length) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_truncate, path, length); } int enc_untrusted_ftruncate(int fd, off_t length) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_ftruncate, fd, length); } int enc_untrusted_rmdir(const char *path) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_rmdir, path); } int enc_untrusted_pipe2(int pipefd[2], int flags) { if (flags & ~(O_CLOEXEC | O_DIRECT | O_NONBLOCK)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_pipe2, pipefd, TokLinuxFileStatusFlag(flags)); } int enc_untrusted_socket(int domain, int type, int protocol) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_socket, TokLinuxAfFamily(domain), TokLinuxSocketType(type), protocol); } int enc_untrusted_listen(int sockfd, int backlog) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_listen, sockfd, backlog); } int enc_untrusted_shutdown(int sockfd, int how) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_shutdown, sockfd, how); } ssize_t enc_untrusted_send(int sockfd, const void *buf, size_t len, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_sendto, sockfd, buf, len, flags, /*dest_addr=*/nullptr, /*addrlen=*/0); } int enc_untrusted_fcntl(int fd, int cmd, ... /* arg */) { // We do not currently support file locks in Asylo, so arg is not expected to // be a pointer to struct flock. int64_t arg = 0; va_list ap; va_start(ap, cmd); arg = va_arg(ap, int64_t); va_end(ap); int klinux_cmd = TokLinuxFcntlCommand(cmd); if (klinux_cmd == -1) { errno = EINVAL; return -1; } int intarg = arg; switch (cmd) { case F_SETFL: { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fcntl, fd, klinux_cmd, TokLinuxFileStatusFlag(intarg)); } case F_SETFD: { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fcntl, fd, klinux_cmd, TokLinuxFDFlag(intarg)); } case F_GETFL: { int retval = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fcntl, fd, klinux_cmd, arg); if (retval != -1) { retval = FromkLinuxFileStatusFlag(retval); } return retval; } case F_GETFD: { int retval = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fcntl, fd, klinux_cmd, arg); if (retval != -1) { retval = FromkLinuxFDFlag(retval); } return retval; } case F_GETPIPE_SZ: case F_SETPIPE_SZ: { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fcntl, fd, klinux_cmd, arg); } // We do not handle the case for F_DUPFD. It is expected to be handled at // a higher abstraction, as we need not exit the enclave for duplicating // the file descriptor. default: { errno = EINVAL; return -1; } } } int enc_untrusted_chown(const char *pathname, uid_t owner, gid_t group) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_chown, pathname, owner, group); } int enc_untrusted_fchown(int fd, uid_t owner, gid_t group) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fchown, fd, owner, group); } int enc_untrusted_setsockopt(int sockfd, int level, int optname, const void *optval, socklen_t optlen) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_setsockopt, sockfd, level, TokLinuxOptionName(level, optname), optval, optlen); } int enc_untrusted_flock(int fd, int operation) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_flock, fd, TokLinuxFLockOperation(operation)); } int enc_untrusted_wait(int *wstatus) { int klinux_wstatus; pid_t ret = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_wait4, /*wpid=*/-1, &klinux_wstatus, /*options=*/0, /*rusage=*/nullptr); *wstatus = FromkLinuxToNewlibWstatus(klinux_wstatus); return ret; } int enc_untrusted_inotify_init1(int flags) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_inotify_init1, TokLinuxInotifyFlag(flags)); } int enc_untrusted_inotify_add_watch(int fd, const char *pathname, uint32_t mask) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_inotify_add_watch, fd, pathname, TokLinuxInotifyEventMask(mask)); } int enc_untrusted_inotify_rm_watch(int fd, int wd) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_inotify_rm_watch, fd, wd); } mode_t enc_untrusted_umask(mode_t mask) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_umask, mask); } int enc_untrusted_chmod(const char *path_name, mode_t mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_chmod, path_name, mode); } int enc_untrusted_fchmod(int fd, mode_t mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fchmod, fd, mode); } int enc_untrusted_sched_yield() { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_sched_yield); } int enc_untrusted_sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask) { klinux_cpu_set_t klinux_mask{}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_sched_getaffinity, pid, static_cast<uint64_t>(cpusetsize), &klinux_mask); // On success, the raw getaffinity syscall returns the size of the cpumask_t // data type, To mimic the glibc behavior, we return 0 on success and -1 on // failure. See https://linux.die.net/man/2/sched_getaffinity, under "notes". if (result < 0) { return -1; } if (!FromkLinuxCpuSet(&klinux_mask, mask)) { errno = EFAULT; return -1; } return 0; } int enc_untrusted_pread64(int fd, void *buf, size_t count, off_t offset) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_pread64, fd, buf, count, offset); } int enc_untrusted_pwrite64(int fd, const void *buf, size_t count, off_t offset) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_pwrite64, fd, buf, count, offset); } int enc_untrusted_isatty(int fd) { MessageWriter input; input.Push(fd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kIsAttyHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_isatty", 2); int result = output.next<int>(); // isatty() returns 1 if fd is an open file descriptor referring to a // terminal; otherwise 0 is returned, and errno is set to indicate the error. if (result == 0) { int klinux_errno = output.next<int>(); errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_usleep(useconds_t usec) { MessageWriter input; input.Push(usec); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kUSleepHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_usleep", 2); int result = output.next<int>(); int klinux_errno = output.next<int>(); // usleep() returns 0 on success. On error, -1 is returned, with errno set to // indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_fstat(int fd, struct stat *statbuf) { struct klinux_stat stat_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fstat, fd, &stat_kernel); if (FromkLinuxStat(&stat_kernel, statbuf)) { statbuf->st_mode = FromkLinuxFileModeFlag(stat_kernel.klinux_st_mode); } return result; } int enc_untrusted_fstatfs(int fd, struct statfs *statbuf) { struct klinux_statfs statfs_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fstatfs, fd, &statfs_kernel); if (FromkLinuxStatFs(&statfs_kernel, statbuf)) { statbuf->f_flags = FromkLinuxStatFsFlags(statfs_kernel.klinux_f_flags); } return result; } int enc_untrusted_lstat(const char *pathname, struct stat *statbuf) { struct klinux_stat stat_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_lstat, pathname, &stat_kernel); if (FromkLinuxStat(&stat_kernel, statbuf)) { statbuf->st_mode = FromkLinuxFileModeFlag(stat_kernel.klinux_st_mode); } return result; } int enc_untrusted_stat(const char *pathname, struct stat *statbuf) { struct klinux_stat stat_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_stat, pathname, &stat_kernel); if (FromkLinuxStat(&stat_kernel, statbuf)) { statbuf->st_mode = FromkLinuxFileModeFlag(stat_kernel.klinux_st_mode); } return result; } int enc_untrusted_statfs(const char *pathname, struct statfs *statbuf) { struct klinux_statfs statfs_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_statfs, pathname, &statfs_kernel); if (FromkLinuxStatFs(&statfs_kernel, statbuf)) { statbuf->f_flags = FromkLinuxStatFsFlags(statfs_kernel.klinux_f_flags); } return result; } ssize_t enc_untrusted_getxattr(const char *path, const char *name, void *value, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getxattr, path, name, value, size); } ssize_t enc_untrusted_lgetxattr(const char *path, const char *name, void *value, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_lgetxattr, path, name, value, size); } ssize_t enc_untrusted_fgetxattr(int fd, const char *name, void *value, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fgetxattr, fd, name, value, size); } int enc_untrusted_setxattr(const char *path, const char *name, const void *value, size_t size, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_setxattr, path, name, value, size, flags); } int enc_untrusted_lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_lsetxattr, path, name, value, size, flags); } int enc_untrusted_fsetxattr(int fd, const char *name, const void *value, size_t size, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fsetxattr, fd, name, value, size, flags); } ssize_t enc_untrusted_listxattr(const char *path, char *list, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_listxattr, path, list, size); } ssize_t enc_untrusted_llistxattr(const char *path, char *list, size_t size) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_llistxattr, path, list, size); } ssize_t enc_untrusted_flistxattr(int fd, char *list, size_t size) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_flistxattr, fd, list, size); } int64_t enc_untrusted_sysconf(int name) { int kLinux_name = TokLinuxSysconfConstant(name); if (kLinux_name == -1) { errno = EINVAL; return -1; } MessageWriter input; input.Push(kLinux_name); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kSysconfHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sysconf", 2); int64_t result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_close(int fd) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_close, fd); } void *enc_untrusted_realloc(void *ptr, size_t size) { MessageWriter input; input.Push(reinterpret_cast<uint64_t>(ptr)); input.Push(static_cast<uint64_t>(size)); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kReallocHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_realloc", 2); void *result = output.next<void *>(); int klinux_errno = output.next<int>(); // realloc only sets the errno (ENOMEM) when output pointer is null and a // non-zero |size| is provided. if (!result && size != 0) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } uint32_t enc_untrusted_sleep(uint32_t seconds) { MessageWriter input; input.Push<uint32_t>(seconds); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher(asylo::host_call::kSleepHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sleep", 2); // Returns sleep's return value directly since it doesn't set errno. return output.next<uint32_t>(); } int enc_untrusted_nanosleep(const struct timespec *req, struct timespec *rem) { struct kLinux_timespec klinux_req; if (!TokLinuxtimespec(req, &klinux_req)) { errno = EINVAL; return -1; } struct kLinux_timespec klinux_rem; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_nanosleep, &klinux_req, &klinux_rem); FromkLinuxtimespec(&klinux_rem, rem); return result; } int enc_untrusted_clock_gettime(clockid_t clk_id, struct timespec *tp) { clockid_t klinux_clk_id = TokLinuxClockId(clk_id); if (klinux_clk_id == -1) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int64_t>(klinux_clk_id); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kClockGettimeHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_clock_gettime", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); struct kLinux_timespec klinux_tp = output.next<struct kLinux_timespec>(); // clock_gettime returns -1 on error and sets the errno. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } FromkLinuxtimespec(&klinux_tp, tp); return result; } int enc_untrusted_clock_getcpuclockid(pid_t pid, clockid_t *clock_id) { MessageWriter input; input.Push<uint32_t>(pid); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kGetCpuClockIdHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getcpuclockid", 2); // clock_getcpuclockid returns an errno value directly, without setting errno. // The value must still be translated in order to be interpreted. int klinux_errno_result = output.next<int32_t>(); if (klinux_errno_result != 0) { return FromkLinuxErrorNumber(klinux_errno_result); } clockid_t klinux_clk_id = output.next<uint64_t>(); *clock_id = FromkLinuxClockId(klinux_clk_id); return 0; } int enc_untrusted_bind(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { socklen_t klinux_sock_len = std::max(std::max(sizeof(klinux_sockaddr_un), sizeof(klinux_sockaddr_in)), sizeof(klinux_sockaddr_in6)); auto klinux_sock = absl::make_unique<char[]>(klinux_sock_len); if (!TokLinuxSockAddr(addr, addrlen, reinterpret_cast<klinux_sockaddr *>(klinux_sock.get()), &klinux_sock_len, TrustedPrimitives::BestEffortAbort)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_bind, sockfd, klinux_sock.get(), klinux_sock_len); } int enc_untrusted_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { socklen_t klinux_sock_len = std::max(std::max(sizeof(klinux_sockaddr_un), sizeof(klinux_sockaddr_in)), sizeof(klinux_sockaddr_in6)); auto klinux_sock = absl::make_unique<char[]>(klinux_sock_len); if (!TokLinuxSockAddr(addr, addrlen, reinterpret_cast<klinux_sockaddr *>(klinux_sock.get()), &klinux_sock_len, TrustedPrimitives::BestEffortAbort)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_connect, sockfd, klinux_sock.get(), klinux_sock_len); } ssize_t enc_untrusted_sendmsg(int sockfd, const struct msghdr *msg, int flags) { size_t total_message_size = CalculateTotalMessageSize(msg); std::unique_ptr<char[]> msg_iov_buffer(new char[total_message_size]); size_t copied_bytes = 0; for (int i = 0; i < msg->msg_iovlen; ++i) { memcpy(msg_iov_buffer.get() + copied_bytes, msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len); copied_bytes += msg->msg_iov[i].iov_len; } MessageWriter input; input.Push(sockfd); input.PushByReference(Extent{msg->msg_name, msg->msg_namelen}); input.PushByReference(Extent{msg_iov_buffer.get(), total_message_size}); input.PushByReference(Extent{msg->msg_control, msg->msg_controllen}); input.Push(msg->msg_flags); input.Push(flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kSendMsgHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sendmsg", 2); ssize_t result = output.next<ssize_t>(); int klinux_errno = output.next<int>(); // sendmsg() returns the number of characters sent. On error, -1 is returned, // with errno set to indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } ssize_t enc_untrusted_recvmsg(int sockfd, struct msghdr *msg, int flags) { size_t total_buffer_size = CalculateTotalMessageSize(msg); MessageWriter input; input.Push(sockfd); input.Push<uint64_t>(msg->msg_namelen); input.Push<uint64_t>(total_buffer_size); input.Push<uint64_t>(msg->msg_controllen); input.Push(msg->msg_flags); input.Push(flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kRecvMsgHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_recvmsg", 2, /*match_exact_params=*/false); ssize_t result = output.next<ssize_t>(); int klinux_errno = output.next<int>(); // recvmsg() returns the number of characters received. On error, -1 is // returned, with errno set to indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } if (result > total_buffer_size) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_recvmsg: result exceeds requested"); } auto msg_name_extent = output.next(); // The returned |msg_namelen| should not exceed the buffer size. if (msg_name_extent.size() <= msg->msg_namelen) { msg->msg_namelen = msg_name_extent.size(); } memcpy(msg->msg_name, msg_name_extent.As<char>(), msg->msg_namelen); // A single buffer is passed from the untrusted side, copy it into the // scattered buffers inside the enclave. auto msg_iov_extent = output.next(); size_t total_bytes = msg_iov_extent.size(); size_t bytes_copied = 0; for (int i = 0; i < msg->msg_iovlen && bytes_copied < total_bytes; ++i) { size_t bytes_to_copy = std::min(msg->msg_iov[i].iov_len, total_bytes - bytes_copied); memcpy(msg->msg_iov[i].iov_base, msg_iov_extent.As<char>() + bytes_copied, bytes_to_copy); bytes_copied += bytes_to_copy; } auto msg_control_extent = output.next(); // The returned |msg_controllen| should not exceed the buffer size. if (msg_control_extent.size() <= msg->msg_controllen) { msg->msg_controllen = msg_control_extent.size(); } memcpy(msg->msg_control, msg_control_extent.As<char>(), msg->msg_controllen); return result; } int enc_untrusted_getsockname(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { if (!addr || !addrlen) { errno = EFAULT; return -1; } // Guard against -1 being passed as addrlen even though it's unsigned. if (*addrlen == 0 || *addrlen > INT32_MAX) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetSocknameHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getsockname", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // getsockname() returns 0 on success. On error, -1 is returned, with errno // set to indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); if (!FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), addr, addrlen, TrustedPrimitives::BestEffortAbort)) { errno = EFAULT; return -1; } return result; } int enc_untrusted_accept(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { MessageWriter input; input.Push<int>(sockfd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kAcceptHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_accept", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // accept() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), addr, addrlen, TrustedPrimitives::BestEffortAbort); return result; } int enc_untrusted_getpeername(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { if (!addr || !addrlen) { errno = EFAULT; return -1; } // Guard against -1 being passed as addrlen even though it's unsigned. if (*addrlen == 0 || *addrlen > INT32_MAX) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetPeernameHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getpeername", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // getpeername() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), addr, addrlen, TrustedPrimitives::BestEffortAbort); return result; } ssize_t enc_untrusted_recvfrom(int sockfd, void *buf, size_t len, int flags, struct sockaddr *src_addr, socklen_t *addrlen) { int klinux_flags = TokLinuxRecvSendFlag(flags); if (klinux_flags == 0 && flags != 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); input.Push<uint64_t>(len); input.Push<int>(klinux_flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kRecvFromHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_recvfrom", 4); int result = output.next<int>(); int klinux_errno = output.next<int>(); // recvfrom() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } if (result > len) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_recvfrom: result exceeds requested"); } auto buffer_received = output.next(); memcpy(buf, buffer_received.data(), std::min(len, buffer_received.size())); // If |src_addr| is not NULL, and the underlying protocol provides the source // address, this source address is filled in. When |src_addr| is NULL, nothing // is filled in; in this case, |addrlen| is not used, and should also be NULL. if (src_addr != nullptr && addrlen != nullptr) { auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), src_addr, addrlen, TrustedPrimitives::BestEffortAbort); } return result; } int enc_untrusted_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout) { struct klinux_fd_set klinux_readfds, klinux_writefds, klinux_exceptfds; struct kLinux_timeval klinux_timeout; TokLinuxFdSet(readfds, &klinux_readfds); TokLinuxFdSet(writefds, &klinux_writefds); TokLinuxFdSet(exceptfds, &klinux_exceptfds); TokLinuxtimeval(timeout, &klinux_timeout); int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_select, nfds, &klinux_readfds, &klinux_writefds, &klinux_exceptfds, &klinux_timeout); FromkLinuxFdSet(&klinux_readfds, readfds); FromkLinuxFdSet(&klinux_writefds, writefds); FromkLinuxFdSet(&klinux_exceptfds, exceptfds); return result; } int enc_untrusted_gettimeofday(struct timeval *tv, struct timezone *tz) { struct kLinux_timeval ktv; TokLinuxtimeval(tv, &ktv); // We do not convert timezone to a klinux value since this struct is expected // to be identical across enclave boundary. Besides, the use of the timezone // structure is obsolete; the tz argument should normally be specified as // NULL. int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_gettimeofday, &ktv, tz); FromkLinuxtimeval(&ktv, tv); return result; } int enc_untrusted_fsync(int fd) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fsync, fd); } int enc_untrusted_raise(int sig) { int klinux_sig = TokLinuxSignalNumber(sig); if (klinux_sig < 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(klinux_sig); MessageReader output; const auto status = NonSystemCallDispatcher(::asylo::host_call::kRaiseHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_raise", 2); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result != 0) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_getsockopt(int sockfd, int level, int optname, void *optval, socklen_t *optlen) { if (!optval || !optlen || *optlen == 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); input.Push<int>(level); input.Push<int>(TokLinuxOptionName(level, optname)); input.PushByReference(Extent{reinterpret_cast<char *>(optval), *optlen}); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetSockOptHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getsockopt", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); Extent opt_received = output.next(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } // The returned |optlen| should not exceed the buffer size. if (opt_received.size() <= *optlen) { *optlen = opt_received.size(); } memcpy(optval, opt_received.data(), *optlen); return result; } int enc_untrusted_getitimer(int which, struct itimerval *curr_value) { struct klinux_itimerval klinux_curr_value {}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_getitimer, TokLinuxItimerType(which), &klinux_curr_value); if (!curr_value || !FromkLinuxItimerval(&klinux_curr_value, curr_value)) { errno = EFAULT; return -1; } return result; } int enc_untrusted_setitimer(int which, const struct itimerval *new_value, struct itimerval *old_value) { struct klinux_itimerval klinux_new_value {}; struct klinux_itimerval klinux_old_value {}; if (!TokLinuxItimerval(new_value, &klinux_new_value)) { errno = EFAULT; return -1; } int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_setitimer, TokLinuxItimerType(which), &klinux_new_value, &klinux_old_value); if (old_value != nullptr && !FromkLinuxItimerval(&klinux_old_value, old_value)) { errno = EFAULT; return -1; } return result; } clock_t enc_untrusted_times(struct tms *buf) { struct kLinux_tms klinux_buf {}; int64_t result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_times, &klinux_buf); if (!FromkLinuxtms(&klinux_buf, buf)) { errno = EFAULT; return -1; } return static_cast<clock_t>(result); } int enc_untrusted_getaddrinfo(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res) { MessageWriter input; input.PushByReference(Extent{node, (node != nullptr) ? strlen(node) + 1 : 0}); input.PushByReference( Extent{service, (service != nullptr) ? strlen(service) + 1 : 0}); if (hints != nullptr) { input.Push<int>(TokLinuxAddressInfoFlag(hints->ai_flags)); input.Push<int>(TokLinuxAfFamily(hints->ai_family)); input.Push<int>(TokLinuxSocketType(hints->ai_socktype)); input.Push<int>(hints->ai_protocol); } MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetAddrInfoHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getaddrinfo", 3, /*match_exact_params=*/false); int klinux_ret = output.next<int>(); int klinux_errno = output.next<int>(); int ret = FromkLinuxAddressInfoError(klinux_ret); if (ret != 0) { if (ret == EAI_SYSTEM) { errno = FromkLinuxErrorNumber(klinux_errno); } return ret; } if (!asylo::host_call::DeserializeAddrinfo( &output, res, TrustedPrimitives::BestEffortAbort)) { TrustedPrimitives::DebugPuts( "enc_untrusted_getaddrinfo: Invalid addrinfo in response."); return -1; } return 0; } void enc_freeaddrinfo(struct addrinfo *res) { struct addrinfo *prev_info = nullptr; for (struct addrinfo *info = res; info != nullptr; info = info->ai_next) { if (prev_info) free(prev_info); if (info->ai_addr) free(info->ai_addr); if (info->ai_canonname) free(info->ai_canonname); prev_info = info; } if (prev_info) free(prev_info); } int enc_untrusted_poll(struct pollfd *fds, nfds_t nfds, int timeout) { auto klinux_fds = absl::make_unique<struct klinux_pollfd[]>(nfds); for (int i = 0; i < nfds; ++i) { if (!TokLinuxPollfd(&fds[i], &klinux_fds[i])) { errno = EFAULT; return -1; } } int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_poll, klinux_fds.get(), static_cast<uint64_t>(nfds), timeout); if (result < 0) { return result; } for (int i = 0; i < nfds; ++i) { if (!FromkLinuxPollfd(&klinux_fds[i], &fds[i])) { errno = EFAULT; return -1; } } return result; } int enc_untrusted_epoll_create(int size) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_epoll_create, size); } int enc_untrusted_utimes(const char *filename, const struct timeval times[2]) { struct kLinux_timeval klinux_times[2]; if (!TokLinuxtimeval(&times[0], &klinux_times[0]) || !TokLinuxtimeval(&times[1], &klinux_times[1])) { errno = EBADE; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_utimes, filename, klinux_times); } int enc_untrusted_utime(const char *filename, const struct utimbuf *times) { struct kLinux_utimbuf klinux_times {}; // We do not check the return value of the conversion function since utimbuf // is allowed to be null. TokLinuxutimbuf(times, &klinux_times); return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_utime, filename, &klinux_times); } int enc_untrusted_inet_pton(int af, const char *src, void *dst) { if (!src || !dst) { return 0; } MessageWriter input; input.Push<int>(TokLinuxAfFamily(af)); input.PushByReference(Extent{ src, std::min(strlen(src) + 1, static_cast<size_t>(INET6_ADDRSTRLEN))}); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kInetPtonHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_inet_pton", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } auto klinux_addr_buffer = output.next(); size_t max_size = 0; if (af == AF_INET) { max_size = sizeof(struct in_addr); } else if (af == AF_INET6) { max_size = sizeof(struct in6_addr); } memcpy(dst, klinux_addr_buffer.data(), std::min(klinux_addr_buffer.size(), max_size)); return result; } const char *enc_untrusted_inet_ntop(int af, const void *src, char *dst, socklen_t size) { if (!src || !dst) { errno = EFAULT; return nullptr; } size_t src_size = 0; if (af == AF_INET) { src_size = sizeof(struct in_addr); } else if (af == AF_INET6) { src_size = sizeof(struct in6_addr); } else { errno = EAFNOSUPPORT; return nullptr; } MessageWriter input; input.Push<int>(TokLinuxAfFamily(af)); input.PushByReference(Extent{reinterpret_cast<const char *>(src), src_size}); input.Push(size); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kInetNtopHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_inet_ntop", 2); auto result = output.next(); int klinux_errno = output.next<int>(); if (result.empty()) { errno = FromkLinuxErrorNumber(klinux_errno); return nullptr; } memcpy(dst, result.data(), std::min(static_cast<size_t>(size), static_cast<size_t>(INET6_ADDRSTRLEN))); return dst; } int enc_untrusted_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) { klinux_sigset_t klinux_set; if (!TokLinuxSigset(set, &klinux_set)) { errno = EINVAL; return -1; } int klinux_how = TokLinuxSigMaskAction(how); if (klinux_how == -1) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(klinux_how); input.Push<klinux_sigset_t>(klinux_set); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kSigprocmaskHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sigprocmask", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // sigprocmask() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } klinux_sigset_t klinux_oldset = output.next<klinux_sigset_t>(); if (oldset != nullptr) { if (!FromkLinuxSigset(&klinux_oldset, oldset)) { errno = EINVAL; return -1; } } return result; } unsigned int enc_untrusted_if_nametoindex(const char *ifname) { MessageWriter input; input.PushString(ifname); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kIfNameToIndexHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_nametoindex", 2); auto result = output.next<unsigned int>(); int klinux_errno = output.next<int>(); if (result == 0) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } char *enc_untrusted_if_indextoname(unsigned int ifindex, char *ifname) { if (!ifname) { return nullptr; } MessageWriter input; input.Push(ifindex); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kIfIndexToNameHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_indextoname", 2); Extent ifname_buffer = output.next(); memcpy(ifname, ifname_buffer.As<char>(), std::min(ifname_buffer.size(), static_cast<size_t>(IF_NAMESIZE))); int klinux_errno = output.next<int>(); if (ifname_buffer.empty()) { errno = FromkLinuxErrorNumber(klinux_errno); } return ifname; } int enc_untrusted_epoll_ctl(int epfd, int op, int fd, struct epoll_event *event) { struct klinux_epoll_event klinux_event_tmp {}; if (event != nullptr && !TokLinuxEpollEvent(event, &klinux_event_tmp)) { errno = EINVAL; return -1; } int klinux_op = TokLinuxEpollCtlOp(op); if (klinux_op == 0) { errno = EINVAL; return -1; } struct klinux_epoll_event *klinux_event = (event != nullptr) ? &klinux_event_tmp : nullptr; return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_epoll_ctl, epfd, klinux_op, fd, klinux_event); } int enc_untrusted_epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout) { if (maxevents <= 0) { errno = EINVAL; return -1; } auto klinux_events = absl::make_unique<struct klinux_epoll_event[]>(maxevents); int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_epoll_wait, epfd, klinux_events.get(), maxevents, timeout); // Only process epoll events if syscall was successful. if (result == -1) { // errno is already set by the system_call library at this point for a // return value of -1. return result; } if (result > maxevents) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_epoll_wait: result found to be greater than maxevents " "supplied."); } for (int i = 0; i < result; i++) { if (!FromkLinuxEpollEvent(&klinux_events.get()[i], &events[i])) { errno = EBADE; return -1; } } return result; } int enc_untrusted_getifaddrs(struct ifaddrs **ifap) { MessageWriter input; MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetIfAddrsHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getifaddrs", 3, /*match_exact_params=*/false); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result != 0) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } if (!asylo::host_call::DeserializeIfAddrs( &output, ifap, TrustedPrimitives::BestEffortAbort)) { TrustedPrimitives::DebugPuts( "enc_untrusted_getifaddrs: Invalid ifaddrs in response."); return -1; } return 0; } void enc_freeifaddrs(struct ifaddrs *ifa) { asylo::host_call::FreeDeserializedIfAddrs(ifa); } int enc_untrusted_getrusage(int who, struct rusage *usage) { struct klinux_rusage klinux_usage {}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_getrusage, TokLinuxRusageTarget(who), &klinux_usage); if (result != -1) { if (!FromkLinuxRusage(&klinux_usage, usage)) { errno = EINVAL; return -1; } } return result; } pid_t enc_untrusted_wait3(int *status, int options, struct rusage *rusage) { int klinux_status; struct klinux_rusage klinux_usage; pid_t result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_wait4, /*pid=*/-1, &klinux_status, TokLinuxWaitOption(options), &klinux_usage); if (status) { *status = FromkLinuxToNewlibWstatus(klinux_status); } if (rusage) { if (!FromkLinuxRusage(&klinux_usage, rusage)) { errno = EINVAL; return -1; } } return result; } pid_t enc_untrusted_waitpid(pid_t pid, int *status, int options) { int klinux_status; pid_t result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_wait4, pid, &klinux_status, TokLinuxWaitOption(options), /*rusage=*/nullptr); if (status) { *status = FromkLinuxToNewlibWstatus(klinux_status); } return result; } int enc_untrusted_uname(struct utsname *buf) { struct klinux_utsname klinux_buf {}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_uname, &klinux_buf); if (result != 0) { return result; } if (!FromkLinuxUtsName(&klinux_buf, buf)) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_uname: Returned an ill-formed utsname."); } return 0; } struct passwd *enc_untrusted_getpwuid(uid_t uid) { MessageWriter input; MessageReader output; input.Push<uid_t>(uid); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetPwUidHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getpwuid", 1, /*match_exact_params=*/false); int klinux_errno = output.next<int>(); if (output.size() == 1) { errno = FromkLinuxErrorNumber(klinux_errno); return nullptr; } // Store the struct passwd members in a static passwd_holder, and direct the // pointers in global_passwd to those members. static struct passwd_holder passwd_buffers; if (!DeserializePasswd(&output, &passwd_buffers) || !PasswdHolderToPasswd(&passwd_buffers, &global_passwd)) { errno = EFAULT; return nullptr; } return &global_passwd; } void enc_untrusted_hex_dump(const void *buf, size_t nbytes) { MessageWriter input; MessageReader output; input.PushByReference(Extent{reinterpret_cast<const char *>(buf), nbytes}); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kHexDumpHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_hex_dump", 2); } void enc_untrusted_syslog(int priority, const char *message, int len) { EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_syslog, TokLinuxSyslogPriority(priority), message, len); } void enc_untrusted_openlog(const char *ident, int option, int facility) { MessageWriter input; MessageReader output; input.PushString(ident); input.Push<int>(TokLinuxSyslogOption(option)); input.Push<int>(TokLinuxSyslogFacility(facility)); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kOpenLogHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_openlog", 1); } int enc_untrusted_inotify_read(int fd, size_t count, char **serialized_events, size_t *serialized_events_len) { MessageWriter input; MessageReader output; input.Push<int>(fd); input.Push<uint64_t>(count); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kInotifyReadHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_inotify_read", 2, /*match_exact_params=*/false); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } Extent serialized_buf = output.next(); *serialized_events_len = serialized_buf.size(); // The caller to this host call owns memory pointed by |*serialized_events|. *serialized_events = reinterpret_cast<char *>(malloc(*serialized_events_len)); if (!serialized_events) { errno = ENOMEM; return -1; } memcpy(*serialized_events, serialized_buf.As<char>(), *serialized_events_len); return result; } int enc_untrusted_ioctl1(int fd, uint64_t request) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_ioctl, fd, request); } } // extern "C"
null
/* * * Copyright 2019 Asylo authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "asylo/platform/host_call/trusted/host_calls.h" #include <errno.h> #include <ifaddrs.h> #include <net/if.h> #include <netdb.h> #include <signal.h> #include <sys/statfs.h> #include <algorithm> #include "asylo/platform/host_call/exit_handler_constants.h" #include "asylo/platform/host_call/serializer_functions.h" #include "asylo/platform/primitives/trusted_primitives.h" #include "asylo/platform/system_call/type_conversions/types_functions.h" using ::asylo::host_call::NonSystemCallDispatcher; using ::asylo::primitives::Extent; using ::asylo::primitives::MessageReader; using ::asylo::primitives::MessageWriter; using ::asylo::primitives::TrustedPrimitives; void CheckStatusAndParamCount(const asylo::primitives::PrimitiveStatus &status, const MessageReader &output, const char *name, int expected_params, bool match_exact_params) { if (!status.ok()) { std::string message = absl::StrCat("Host call '", name, "' failed."); TrustedPrimitives::BestEffortAbort(message.c_str()); } if (!match_exact_params) { if (output.size() < expected_params) { std::string message = absl::StrCat( "Host call '", name, "': Expected at least ", expected_params, " parameters on the MessageReader, found ", output.size()); TrustedPrimitives::BestEffortAbort(message.c_str()); } } else { if (output.size() != expected_params) { std::string message = absl::StrCat( "Host call '", name, "': Expected ", expected_params, " parameters on the MessageReader, found ", output.size()); TrustedPrimitives::BestEffortAbort(message.c_str()); } } } namespace { // A global passwd struct. The address of it is used as the return value of // getpwuid. struct passwd global_passwd; size_t CalculateTotalMessageSize(const struct msghdr *msg) { size_t total_message_size = 0; for (int i = 0; i < msg->msg_iovlen; ++i) { total_message_size += msg->msg_iov[i].iov_len; } return total_message_size; } #define PASSWD_HOLDER_FIELD_LENGTH 1024 // Struct for storing the buffers needed by struct passwd members. struct passwd_holder { char pw_name[PASSWD_HOLDER_FIELD_LENGTH]; char pw_passwd[PASSWD_HOLDER_FIELD_LENGTH]; uid_t pw_uid; gid_t pw_gid; char pw_gecos[PASSWD_HOLDER_FIELD_LENGTH]; char pw_dir[PASSWD_HOLDER_FIELD_LENGTH]; char pw_shell[PASSWD_HOLDER_FIELD_LENGTH]; }; bool DeserializePasswd(MessageReader *reader, struct passwd_holder *passwd_buffers) { if (!reader || !passwd_buffers) { return false; } if (reader->size() < 7) { return false; } auto pw_name_buf = reader->next(); auto pw_passwd_buf = reader->next(); auto pw_uid = reader->next<uid_t>(); auto pw_gid = reader->next<gid_t>(); auto pw_gecos_buf = reader->next(); auto pw_dir_buf = reader->next(); auto pw_shell_buf = reader->next(); strncpy(passwd_buffers->pw_name, pw_name_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_name), pw_name_buf.size())); strncpy(passwd_buffers->pw_passwd, pw_passwd_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_passwd), pw_passwd_buf.size())); passwd_buffers->pw_uid = pw_uid; passwd_buffers->pw_gid = pw_gid; strncpy(passwd_buffers->pw_gecos, pw_gecos_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_gecos), pw_gecos_buf.size())); strncpy(passwd_buffers->pw_dir, pw_dir_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_dir), pw_dir_buf.size())); strncpy(passwd_buffers->pw_shell, pw_shell_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_shell), pw_shell_buf.size())); return true; } bool PasswdHolderToPasswd(struct passwd_holder *passwd_in, struct passwd *passwd_out) { if (!passwd_in || !passwd_out) { return false; } passwd_out->pw_name = passwd_in->pw_name; passwd_out->pw_passwd = passwd_in->pw_passwd; passwd_out->pw_uid = passwd_in->pw_uid; passwd_out->pw_gid = passwd_in->pw_gid; passwd_out->pw_gecos = passwd_in->pw_gecos; passwd_out->pw_dir = passwd_in->pw_dir; passwd_out->pw_shell = passwd_in->pw_shell; return true; } } // namespace extern "C" { int enc_untrusted_access(const char *path_name, int mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_access, path_name, mode); } pid_t enc_untrusted_getpid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getpid); } pid_t enc_untrusted_getppid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getppid); } pid_t enc_untrusted_setsid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_setsid); } uid_t enc_untrusted_getuid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getuid); } gid_t enc_untrusted_getgid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getgid); } uid_t enc_untrusted_geteuid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_geteuid); } gid_t enc_untrusted_getegid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getegid); } int enc_untrusted_kill(pid_t pid, int sig) { int klinux_sig = TokLinuxSignalNumber(sig); if (klinux_sig < 0) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_kill, pid, klinux_sig); } int enc_untrusted_link(const char *oldpath, const char *newpath) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_link, oldpath, newpath); } off_t enc_untrusted_lseek(int fd, off_t offset, int whence) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_lseek, fd, offset, whence); } int enc_untrusted_mkdir(const char *pathname, mode_t mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_mkdir, pathname, mode); } int enc_untrusted_open(const char *pathname, int flags, ...) { int mode = 0; if (flags & O_CREAT) { va_list ap; va_start(ap, flags); mode = va_arg(ap, mode_t); va_end(ap); } return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_open, pathname, TokLinuxFileStatusFlag(flags), TokLinuxFileModeFlag(mode)); } int enc_untrusted_unlink(const char *pathname) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_unlink, pathname); } int enc_untrusted_rename(const char *oldpath, const char *newpath) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_rename, oldpath, newpath); } ssize_t enc_untrusted_read(int fd, void *buf, size_t count) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_read, fd, buf, count)); } ssize_t enc_untrusted_write(int fd, const void *buf, size_t count) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_write, fd, buf, count)); } int enc_untrusted_symlink(const char *target, const char *linkpath) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_symlink, target, linkpath); } ssize_t enc_untrusted_readlink(const char *pathname, char *buf, size_t bufsiz) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_readlink, pathname, buf, bufsiz)); } int enc_untrusted_truncate(const char *path, off_t length) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_truncate, path, length); } int enc_untrusted_ftruncate(int fd, off_t length) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_ftruncate, fd, length); } int enc_untrusted_rmdir(const char *path) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_rmdir, path); } int enc_untrusted_pipe2(int pipefd[2], int flags) { if (flags & ~(O_CLOEXEC | O_DIRECT | O_NONBLOCK)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_pipe2, pipefd, TokLinuxFileStatusFlag(flags)); } int enc_untrusted_socket(int domain, int type, int protocol) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_socket, TokLinuxAfFamily(domain), TokLinuxSocketType(type), protocol); } int enc_untrusted_listen(int sockfd, int backlog) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_listen, sockfd, backlog); } int enc_untrusted_shutdown(int sockfd, int how) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_shutdown, sockfd, how); } ssize_t enc_untrusted_send(int sockfd, const void *buf, size_t len, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_sendto, sockfd, buf, len, flags, /*dest_addr=*/nullptr, /*addrlen=*/0); } int enc_untrusted_fcntl(int fd, int cmd, ... /* arg */) { // We do not currently support file locks in Asylo, so arg is not expected to // be a pointer to struct flock. int64_t arg = 0; va_list ap; va_start(ap, cmd); arg = va_arg(ap, int64_t); va_end(ap); int klinux_cmd = TokLinuxFcntlCommand(cmd); if (klinux_cmd == -1) { errno = EINVAL; return -1; } int intarg = arg; switch (cmd) { case F_SETFL: { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fcntl, fd, klinux_cmd, TokLinuxFileStatusFlag(intarg)); } case F_SETFD: { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fcntl, fd, klinux_cmd, TokLinuxFDFlag(intarg)); } case F_GETFL: { int retval = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fcntl, fd, klinux_cmd, arg); if (retval != -1) { retval = FromkLinuxFileStatusFlag(retval); } return retval; } case F_GETFD: { int retval = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fcntl, fd, klinux_cmd, arg); if (retval != -1) { retval = FromkLinuxFDFlag(retval); } return retval; } case F_GETPIPE_SZ: case F_SETPIPE_SZ: { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fcntl, fd, klinux_cmd, arg); } // We do not handle the case for F_DUPFD. It is expected to be handled at // a higher abstraction, as we need not exit the enclave for duplicating // the file descriptor. default: { errno = EINVAL; return -1; } } } int enc_untrusted_chown(const char *pathname, uid_t owner, gid_t group) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_chown, pathname, owner, group); } int enc_untrusted_fchown(int fd, uid_t owner, gid_t group) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fchown, fd, owner, group); } int enc_untrusted_setsockopt(int sockfd, int level, int optname, const void *optval, socklen_t optlen) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_setsockopt, sockfd, level, TokLinuxOptionName(level, optname), optval, optlen); } int enc_untrusted_flock(int fd, int operation) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_flock, fd, TokLinuxFLockOperation(operation)); } int enc_untrusted_wait(int *wstatus) { int klinux_wstatus; pid_t ret = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_wait4, /*wpid=*/-1, &klinux_wstatus, /*options=*/0, /*rusage=*/nullptr); *wstatus = FromkLinuxToNewlibWstatus(klinux_wstatus); return ret; } int enc_untrusted_inotify_init1(int flags) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_inotify_init1, TokLinuxInotifyFlag(flags)); } int enc_untrusted_inotify_add_watch(int fd, const char *pathname, uint32_t mask) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_inotify_add_watch, fd, pathname, TokLinuxInotifyEventMask(mask)); } int enc_untrusted_inotify_rm_watch(int fd, int wd) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_inotify_rm_watch, fd, wd); } mode_t enc_untrusted_umask(mode_t mask) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_umask, mask); } int enc_untrusted_chmod(const char *path_name, mode_t mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_chmod, path_name, mode); } int enc_untrusted_fchmod(int fd, mode_t mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fchmod, fd, mode); } int enc_untrusted_sched_yield() { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_sched_yield); } int enc_untrusted_sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask) { klinux_cpu_set_t klinux_mask{}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_sched_getaffinity, pid, static_cast<uint64_t>(cpusetsize), &klinux_mask); // On success, the raw getaffinity syscall returns the size of the cpumask_t // data type, To mimic the glibc behavior, we return 0 on success and -1 on // failure. See https://linux.die.net/man/2/sched_getaffinity, under "notes". if (result < 0) { return -1; } if (!FromkLinuxCpuSet(&klinux_mask, mask)) { errno = EFAULT; return -1; } return 0; } int enc_untrusted_pread64(int fd, void *buf, size_t count, off_t offset) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_pread64, fd, buf, count, offset); } int enc_untrusted_pwrite64(int fd, const void *buf, size_t count, off_t offset) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_pwrite64, fd, buf, count, offset); } int enc_untrusted_isatty(int fd) { MessageWriter input; input.Push(fd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kIsAttyHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_isatty", 2); int result = output.next<int>(); // isatty() returns 1 if fd is an open file descriptor referring to a // terminal; otherwise 0 is returned, and errno is set to indicate the error. if (result == 0) { int klinux_errno = output.next<int>(); errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_usleep(useconds_t usec) { MessageWriter input; input.Push(usec); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kUSleepHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_usleep", 2); int result = output.next<int>(); int klinux_errno = output.next<int>(); // usleep() returns 0 on success. On error, -1 is returned, with errno set to // indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_fstat(int fd, struct stat *statbuf) { struct klinux_stat stat_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fstat, fd, &stat_kernel); if (FromkLinuxStat(&stat_kernel, statbuf)) { statbuf->st_mode = FromkLinuxFileModeFlag(stat_kernel.klinux_st_mode); } return result; } int enc_untrusted_fstatfs(int fd, struct statfs *statbuf) { struct klinux_statfs statfs_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fstatfs, fd, &statfs_kernel); if (FromkLinuxStatFs(&statfs_kernel, statbuf)) { statbuf->f_flags = FromkLinuxStatFsFlags(statfs_kernel.klinux_f_flags); } return result; } int enc_untrusted_lstat(const char *pathname, struct stat *statbuf) { struct klinux_stat stat_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_lstat, pathname, &stat_kernel); if (FromkLinuxStat(&stat_kernel, statbuf)) { statbuf->st_mode = FromkLinuxFileModeFlag(stat_kernel.klinux_st_mode); } return result; } int enc_untrusted_stat(const char *pathname, struct stat *statbuf) { struct klinux_stat stat_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_stat, pathname, &stat_kernel); if (FromkLinuxStat(&stat_kernel, statbuf)) { statbuf->st_mode = FromkLinuxFileModeFlag(stat_kernel.klinux_st_mode); } return result; } int enc_untrusted_statfs(const char *pathname, struct statfs *statbuf) { struct klinux_statfs statfs_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_statfs, pathname, &statfs_kernel); if (FromkLinuxStatFs(&statfs_kernel, statbuf)) { statbuf->f_flags = FromkLinuxStatFsFlags(statfs_kernel.klinux_f_flags); } return result; } ssize_t enc_untrusted_getxattr(const char *path, const char *name, void *value, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getxattr, path, name, value, size); } ssize_t enc_untrusted_lgetxattr(const char *path, const char *name, void *value, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_lgetxattr, path, name, value, size); } ssize_t enc_untrusted_fgetxattr(int fd, const char *name, void *value, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fgetxattr, fd, name, value, size); } int enc_untrusted_setxattr(const char *path, const char *name, const void *value, size_t size, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_setxattr, path, name, value, size, flags); } int enc_untrusted_lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_lsetxattr, path, name, value, size, flags); } int enc_untrusted_fsetxattr(int fd, const char *name, const void *value, size_t size, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fsetxattr, fd, name, value, size, flags); } ssize_t enc_untrusted_listxattr(const char *path, char *list, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_listxattr, path, list, size); } ssize_t enc_untrusted_llistxattr(const char *path, char *list, size_t size) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_llistxattr, path, list, size); } ssize_t enc_untrusted_flistxattr(int fd, char *list, size_t size) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_flistxattr, fd, list, size); } int64_t enc_untrusted_sysconf(int name) { int kLinux_name = TokLinuxSysconfConstant(name); if (kLinux_name == -1) { errno = EINVAL; return -1; } MessageWriter input; input.Push(kLinux_name); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kSysconfHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sysconf", 2); int64_t result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_close(int fd) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_close, fd); } void *enc_untrusted_realloc(void *ptr, size_t size) { MessageWriter input; input.Push(reinterpret_cast<uint64_t>(ptr)); input.Push(static_cast<uint64_t>(size)); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kReallocHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_realloc", 2); void *result = output.next<void *>(); int klinux_errno = output.next<int>(); // realloc only sets the errno (ENOMEM) when output pointer is null and a // non-zero |size| is provided. if (!result && size != 0) { errno = FromkLinuxErrorNumber(klinux_errno); } if (!::asylo::primitives::TrustedPrimitives::IsOutsideEnclave(result, size)) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_realloc: realloc result should be in untrusted " "memory"); } return result; } uint32_t enc_untrusted_sleep(uint32_t seconds) { MessageWriter input; input.Push<uint32_t>(seconds); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher(asylo::host_call::kSleepHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sleep", 2); // Returns sleep's return value directly since it doesn't set errno. return output.next<uint32_t>(); } int enc_untrusted_nanosleep(const struct timespec *req, struct timespec *rem) { struct kLinux_timespec klinux_req; if (!TokLinuxtimespec(req, &klinux_req)) { errno = EINVAL; return -1; } struct kLinux_timespec klinux_rem; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_nanosleep, &klinux_req, &klinux_rem); FromkLinuxtimespec(&klinux_rem, rem); return result; } int enc_untrusted_clock_gettime(clockid_t clk_id, struct timespec *tp) { clockid_t klinux_clk_id = TokLinuxClockId(clk_id); if (klinux_clk_id == -1) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int64_t>(klinux_clk_id); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kClockGettimeHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_clock_gettime", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); struct kLinux_timespec klinux_tp = output.next<struct kLinux_timespec>(); // clock_gettime returns -1 on error and sets the errno. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } FromkLinuxtimespec(&klinux_tp, tp); return result; } int enc_untrusted_clock_getcpuclockid(pid_t pid, clockid_t *clock_id) { MessageWriter input; input.Push<uint32_t>(pid); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kGetCpuClockIdHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getcpuclockid", 2); // clock_getcpuclockid returns an errno value directly, without setting errno. // The value must still be translated in order to be interpreted. int klinux_errno_result = output.next<int32_t>(); if (klinux_errno_result != 0) { return FromkLinuxErrorNumber(klinux_errno_result); } clockid_t klinux_clk_id = output.next<uint64_t>(); *clock_id = FromkLinuxClockId(klinux_clk_id); return 0; } int enc_untrusted_bind(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { socklen_t klinux_sock_len = std::max(std::max(sizeof(klinux_sockaddr_un), sizeof(klinux_sockaddr_in)), sizeof(klinux_sockaddr_in6)); auto klinux_sock = absl::make_unique<char[]>(klinux_sock_len); if (!TokLinuxSockAddr(addr, addrlen, reinterpret_cast<klinux_sockaddr *>(klinux_sock.get()), &klinux_sock_len, TrustedPrimitives::BestEffortAbort)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_bind, sockfd, klinux_sock.get(), klinux_sock_len); } int enc_untrusted_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { socklen_t klinux_sock_len = std::max(std::max(sizeof(klinux_sockaddr_un), sizeof(klinux_sockaddr_in)), sizeof(klinux_sockaddr_in6)); auto klinux_sock = absl::make_unique<char[]>(klinux_sock_len); if (!TokLinuxSockAddr(addr, addrlen, reinterpret_cast<klinux_sockaddr *>(klinux_sock.get()), &klinux_sock_len, TrustedPrimitives::BestEffortAbort)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_connect, sockfd, klinux_sock.get(), klinux_sock_len); } ssize_t enc_untrusted_sendmsg(int sockfd, const struct msghdr *msg, int flags) { size_t total_message_size = CalculateTotalMessageSize(msg); std::unique_ptr<char[]> msg_iov_buffer(new char[total_message_size]); size_t copied_bytes = 0; for (int i = 0; i < msg->msg_iovlen; ++i) { memcpy(msg_iov_buffer.get() + copied_bytes, msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len); copied_bytes += msg->msg_iov[i].iov_len; } MessageWriter input; input.Push(sockfd); input.PushByReference(Extent{msg->msg_name, msg->msg_namelen}); input.PushByReference(Extent{msg_iov_buffer.get(), total_message_size}); input.PushByReference(Extent{msg->msg_control, msg->msg_controllen}); input.Push(msg->msg_flags); input.Push(flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kSendMsgHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sendmsg", 2); ssize_t result = output.next<ssize_t>(); int klinux_errno = output.next<int>(); // sendmsg() returns the number of characters sent. On error, -1 is returned, // with errno set to indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } ssize_t enc_untrusted_recvmsg(int sockfd, struct msghdr *msg, int flags) { size_t total_buffer_size = CalculateTotalMessageSize(msg); MessageWriter input; input.Push(sockfd); input.Push<uint64_t>(msg->msg_namelen); input.Push<uint64_t>(total_buffer_size); input.Push<uint64_t>(msg->msg_controllen); input.Push(msg->msg_flags); input.Push(flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kRecvMsgHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_recvmsg", 2, /*match_exact_params=*/false); ssize_t result = output.next<ssize_t>(); int klinux_errno = output.next<int>(); // recvmsg() returns the number of characters received. On error, -1 is // returned, with errno set to indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } if (result > total_buffer_size) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_recvmsg: result exceeds requested"); } auto msg_name_extent = output.next(); // The returned |msg_namelen| should not exceed the buffer size. if (msg_name_extent.size() <= msg->msg_namelen) { msg->msg_namelen = msg_name_extent.size(); } memcpy(msg->msg_name, msg_name_extent.As<char>(), msg->msg_namelen); // A single buffer is passed from the untrusted side, copy it into the // scattered buffers inside the enclave. auto msg_iov_extent = output.next(); size_t total_bytes = msg_iov_extent.size(); size_t bytes_copied = 0; for (int i = 0; i < msg->msg_iovlen && bytes_copied < total_bytes; ++i) { size_t bytes_to_copy = std::min(msg->msg_iov[i].iov_len, total_bytes - bytes_copied); memcpy(msg->msg_iov[i].iov_base, msg_iov_extent.As<char>() + bytes_copied, bytes_to_copy); bytes_copied += bytes_to_copy; } auto msg_control_extent = output.next(); // The returned |msg_controllen| should not exceed the buffer size. if (msg_control_extent.size() <= msg->msg_controllen) { msg->msg_controllen = msg_control_extent.size(); } memcpy(msg->msg_control, msg_control_extent.As<char>(), msg->msg_controllen); return result; } int enc_untrusted_getsockname(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { if (!addr || !addrlen) { errno = EFAULT; return -1; } // Guard against -1 being passed as addrlen even though it's unsigned. if (*addrlen == 0 || *addrlen > INT32_MAX) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetSocknameHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getsockname", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // getsockname() returns 0 on success. On error, -1 is returned, with errno // set to indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); if (!FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), addr, addrlen, TrustedPrimitives::BestEffortAbort)) { errno = EFAULT; return -1; } return result; } int enc_untrusted_accept(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { MessageWriter input; input.Push<int>(sockfd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kAcceptHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_accept", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // accept() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), addr, addrlen, TrustedPrimitives::BestEffortAbort); return result; } int enc_untrusted_getpeername(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { if (!addr || !addrlen) { errno = EFAULT; return -1; } // Guard against -1 being passed as addrlen even though it's unsigned. if (*addrlen == 0 || *addrlen > INT32_MAX) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetPeernameHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getpeername", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // getpeername() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), addr, addrlen, TrustedPrimitives::BestEffortAbort); return result; } ssize_t enc_untrusted_recvfrom(int sockfd, void *buf, size_t len, int flags, struct sockaddr *src_addr, socklen_t *addrlen) { int klinux_flags = TokLinuxRecvSendFlag(flags); if (klinux_flags == 0 && flags != 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); input.Push<uint64_t>(len); input.Push<int>(klinux_flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kRecvFromHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_recvfrom", 4); int result = output.next<int>(); int klinux_errno = output.next<int>(); // recvfrom() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } if (result > len) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_recvfrom: result exceeds requested"); } auto buffer_received = output.next(); memcpy(buf, buffer_received.data(), std::min(len, buffer_received.size())); // If |src_addr| is not NULL, and the underlying protocol provides the source // address, this source address is filled in. When |src_addr| is NULL, nothing // is filled in; in this case, |addrlen| is not used, and should also be NULL. if (src_addr != nullptr && addrlen != nullptr) { auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), src_addr, addrlen, TrustedPrimitives::BestEffortAbort); } return result; } int enc_untrusted_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout) { struct klinux_fd_set klinux_readfds, klinux_writefds, klinux_exceptfds; struct kLinux_timeval klinux_timeout; TokLinuxFdSet(readfds, &klinux_readfds); TokLinuxFdSet(writefds, &klinux_writefds); TokLinuxFdSet(exceptfds, &klinux_exceptfds); TokLinuxtimeval(timeout, &klinux_timeout); int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_select, nfds, &klinux_readfds, &klinux_writefds, &klinux_exceptfds, &klinux_timeout); FromkLinuxFdSet(&klinux_readfds, readfds); FromkLinuxFdSet(&klinux_writefds, writefds); FromkLinuxFdSet(&klinux_exceptfds, exceptfds); return result; } int enc_untrusted_gettimeofday(struct timeval *tv, struct timezone *tz) { struct kLinux_timeval ktv; TokLinuxtimeval(tv, &ktv); // We do not convert timezone to a klinux value since this struct is expected // to be identical across enclave boundary. Besides, the use of the timezone // structure is obsolete; the tz argument should normally be specified as // NULL. int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_gettimeofday, &ktv, tz); FromkLinuxtimeval(&ktv, tv); return result; } int enc_untrusted_fsync(int fd) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fsync, fd); } int enc_untrusted_raise(int sig) { int klinux_sig = TokLinuxSignalNumber(sig); if (klinux_sig < 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(klinux_sig); MessageReader output; const auto status = NonSystemCallDispatcher(::asylo::host_call::kRaiseHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_raise", 2); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result != 0) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_getsockopt(int sockfd, int level, int optname, void *optval, socklen_t *optlen) { if (!optval || !optlen || *optlen == 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); input.Push<int>(level); input.Push<int>(TokLinuxOptionName(level, optname)); input.PushByReference(Extent{reinterpret_cast<char *>(optval), *optlen}); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetSockOptHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getsockopt", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); Extent opt_received = output.next(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } // The returned |optlen| should not exceed the buffer size. if (opt_received.size() <= *optlen) { *optlen = opt_received.size(); } memcpy(optval, opt_received.data(), *optlen); return result; } int enc_untrusted_getitimer(int which, struct itimerval *curr_value) { struct klinux_itimerval klinux_curr_value {}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_getitimer, TokLinuxItimerType(which), &klinux_curr_value); if (!curr_value || !FromkLinuxItimerval(&klinux_curr_value, curr_value)) { errno = EFAULT; return -1; } return result; } int enc_untrusted_setitimer(int which, const struct itimerval *new_value, struct itimerval *old_value) { struct klinux_itimerval klinux_new_value {}; struct klinux_itimerval klinux_old_value {}; if (!TokLinuxItimerval(new_value, &klinux_new_value)) { errno = EFAULT; return -1; } int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_setitimer, TokLinuxItimerType(which), &klinux_new_value, &klinux_old_value); if (old_value != nullptr && !FromkLinuxItimerval(&klinux_old_value, old_value)) { errno = EFAULT; return -1; } return result; } clock_t enc_untrusted_times(struct tms *buf) { struct kLinux_tms klinux_buf {}; int64_t result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_times, &klinux_buf); if (!FromkLinuxtms(&klinux_buf, buf)) { errno = EFAULT; return -1; } return static_cast<clock_t>(result); } int enc_untrusted_getaddrinfo(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res) { MessageWriter input; input.PushByReference(Extent{node, (node != nullptr) ? strlen(node) + 1 : 0}); input.PushByReference( Extent{service, (service != nullptr) ? strlen(service) + 1 : 0}); if (hints != nullptr) { input.Push<int>(TokLinuxAddressInfoFlag(hints->ai_flags)); input.Push<int>(TokLinuxAfFamily(hints->ai_family)); input.Push<int>(TokLinuxSocketType(hints->ai_socktype)); input.Push<int>(hints->ai_protocol); } MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetAddrInfoHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getaddrinfo", 3, /*match_exact_params=*/false); int klinux_ret = output.next<int>(); int klinux_errno = output.next<int>(); int ret = FromkLinuxAddressInfoError(klinux_ret); if (ret != 0) { if (ret == EAI_SYSTEM) { errno = FromkLinuxErrorNumber(klinux_errno); } return ret; } if (!asylo::host_call::DeserializeAddrinfo( &output, res, TrustedPrimitives::BestEffortAbort)) { TrustedPrimitives::DebugPuts( "enc_untrusted_getaddrinfo: Invalid addrinfo in response."); return -1; } return 0; } void enc_freeaddrinfo(struct addrinfo *res) { struct addrinfo *prev_info = nullptr; for (struct addrinfo *info = res; info != nullptr; info = info->ai_next) { if (prev_info) free(prev_info); if (info->ai_addr) free(info->ai_addr); if (info->ai_canonname) free(info->ai_canonname); prev_info = info; } if (prev_info) free(prev_info); } int enc_untrusted_poll(struct pollfd *fds, nfds_t nfds, int timeout) { auto klinux_fds = absl::make_unique<struct klinux_pollfd[]>(nfds); for (int i = 0; i < nfds; ++i) { if (!TokLinuxPollfd(&fds[i], &klinux_fds[i])) { errno = EFAULT; return -1; } } int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_poll, klinux_fds.get(), static_cast<uint64_t>(nfds), timeout); if (result < 0) { return result; } for (int i = 0; i < nfds; ++i) { if (!FromkLinuxPollfd(&klinux_fds[i], &fds[i])) { errno = EFAULT; return -1; } } return result; } int enc_untrusted_epoll_create(int size) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_epoll_create, size); } int enc_untrusted_utimes(const char *filename, const struct timeval times[2]) { struct kLinux_timeval klinux_times[2]; if (!TokLinuxtimeval(&times[0], &klinux_times[0]) || !TokLinuxtimeval(&times[1], &klinux_times[1])) { errno = EBADE; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_utimes, filename, klinux_times); } int enc_untrusted_utime(const char *filename, const struct utimbuf *times) { struct kLinux_utimbuf klinux_times {}; // We do not check the return value of the conversion function since utimbuf // is allowed to be null. TokLinuxutimbuf(times, &klinux_times); return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_utime, filename, &klinux_times); } int enc_untrusted_inet_pton(int af, const char *src, void *dst) { if (!src || !dst) { return 0; } MessageWriter input; input.Push<int>(TokLinuxAfFamily(af)); input.PushByReference(Extent{ src, std::min(strlen(src) + 1, static_cast<size_t>(INET6_ADDRSTRLEN))}); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kInetPtonHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_inet_pton", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } auto klinux_addr_buffer = output.next(); size_t max_size = 0; if (af == AF_INET) { max_size = sizeof(struct in_addr); } else if (af == AF_INET6) { max_size = sizeof(struct in6_addr); } memcpy(dst, klinux_addr_buffer.data(), std::min(klinux_addr_buffer.size(), max_size)); return result; } const char *enc_untrusted_inet_ntop(int af, const void *src, char *dst, socklen_t size) { if (!src || !dst) { errno = EFAULT; return nullptr; } size_t src_size = 0; if (af == AF_INET) { src_size = sizeof(struct in_addr); } else if (af == AF_INET6) { src_size = sizeof(struct in6_addr); } else { errno = EAFNOSUPPORT; return nullptr; } MessageWriter input; input.Push<int>(TokLinuxAfFamily(af)); input.PushByReference(Extent{reinterpret_cast<const char *>(src), src_size}); input.Push(size); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kInetNtopHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_inet_ntop", 2); auto result = output.next(); int klinux_errno = output.next<int>(); if (result.empty()) { errno = FromkLinuxErrorNumber(klinux_errno); return nullptr; } memcpy(dst, result.data(), std::min(static_cast<size_t>(size), static_cast<size_t>(INET6_ADDRSTRLEN))); return dst; } int enc_untrusted_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) { klinux_sigset_t klinux_set; if (!TokLinuxSigset(set, &klinux_set)) { errno = EINVAL; return -1; } int klinux_how = TokLinuxSigMaskAction(how); if (klinux_how == -1) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(klinux_how); input.Push<klinux_sigset_t>(klinux_set); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kSigprocmaskHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sigprocmask", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // sigprocmask() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } klinux_sigset_t klinux_oldset = output.next<klinux_sigset_t>(); if (oldset != nullptr) { if (!FromkLinuxSigset(&klinux_oldset, oldset)) { errno = EINVAL; return -1; } } return result; } unsigned int enc_untrusted_if_nametoindex(const char *ifname) { MessageWriter input; input.PushString(ifname); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kIfNameToIndexHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_nametoindex", 2); auto result = output.next<unsigned int>(); int klinux_errno = output.next<int>(); if (result == 0) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } char *enc_untrusted_if_indextoname(unsigned int ifindex, char *ifname) { if (!ifname) { return nullptr; } MessageWriter input; input.Push(ifindex); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kIfIndexToNameHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_indextoname", 2); Extent ifname_buffer = output.next(); memcpy(ifname, ifname_buffer.As<char>(), std::min(ifname_buffer.size(), static_cast<size_t>(IF_NAMESIZE))); int klinux_errno = output.next<int>(); if (ifname_buffer.empty()) { errno = FromkLinuxErrorNumber(klinux_errno); } return ifname; } int enc_untrusted_epoll_ctl(int epfd, int op, int fd, struct epoll_event *event) { struct klinux_epoll_event klinux_event_tmp {}; if (event != nullptr && !TokLinuxEpollEvent(event, &klinux_event_tmp)) { errno = EINVAL; return -1; } int klinux_op = TokLinuxEpollCtlOp(op); if (klinux_op == 0) { errno = EINVAL; return -1; } struct klinux_epoll_event *klinux_event = (event != nullptr) ? &klinux_event_tmp : nullptr; return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_epoll_ctl, epfd, klinux_op, fd, klinux_event); } int enc_untrusted_epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout) { if (maxevents <= 0) { errno = EINVAL; return -1; } auto klinux_events = absl::make_unique<struct klinux_epoll_event[]>(maxevents); int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_epoll_wait, epfd, klinux_events.get(), maxevents, timeout); // Only process epoll events if syscall was successful. if (result == -1) { // errno is already set by the system_call library at this point for a // return value of -1. return result; } if (result > maxevents) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_epoll_wait: result found to be greater than maxevents " "supplied."); } for (int i = 0; i < result; i++) { if (!FromkLinuxEpollEvent(&klinux_events.get()[i], &events[i])) { errno = EBADE; return -1; } } return result; } int enc_untrusted_getifaddrs(struct ifaddrs **ifap) { MessageWriter input; MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetIfAddrsHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getifaddrs", 3, /*match_exact_params=*/false); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result != 0) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } if (!asylo::host_call::DeserializeIfAddrs( &output, ifap, TrustedPrimitives::BestEffortAbort)) { TrustedPrimitives::DebugPuts( "enc_untrusted_getifaddrs: Invalid ifaddrs in response."); return -1; } return 0; } void enc_freeifaddrs(struct ifaddrs *ifa) { asylo::host_call::FreeDeserializedIfAddrs(ifa); } int enc_untrusted_getrusage(int who, struct rusage *usage) { struct klinux_rusage klinux_usage {}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_getrusage, TokLinuxRusageTarget(who), &klinux_usage); if (result != -1) { if (!FromkLinuxRusage(&klinux_usage, usage)) { errno = EINVAL; return -1; } } return result; } pid_t enc_untrusted_wait3(int *status, int options, struct rusage *rusage) { int klinux_status; struct klinux_rusage klinux_usage; pid_t result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_wait4, /*pid=*/-1, &klinux_status, TokLinuxWaitOption(options), &klinux_usage); if (status) { *status = FromkLinuxToNewlibWstatus(klinux_status); } if (rusage) { if (!FromkLinuxRusage(&klinux_usage, rusage)) { errno = EINVAL; return -1; } } return result; } pid_t enc_untrusted_waitpid(pid_t pid, int *status, int options) { int klinux_status; pid_t result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_wait4, pid, &klinux_status, TokLinuxWaitOption(options), /*rusage=*/nullptr); if (status) { *status = FromkLinuxToNewlibWstatus(klinux_status); } return result; } int enc_untrusted_uname(struct utsname *buf) { struct klinux_utsname klinux_buf {}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_uname, &klinux_buf); if (result != 0) { return result; } if (!FromkLinuxUtsName(&klinux_buf, buf)) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_uname: Returned an ill-formed utsname."); } return 0; } struct passwd *enc_untrusted_getpwuid(uid_t uid) { MessageWriter input; MessageReader output; input.Push<uid_t>(uid); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetPwUidHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getpwuid", 1, /*match_exact_params=*/false); int klinux_errno = output.next<int>(); if (output.size() == 1) { errno = FromkLinuxErrorNumber(klinux_errno); return nullptr; } // Store the struct passwd members in a static passwd_holder, and direct the // pointers in global_passwd to those members. static struct passwd_holder passwd_buffers; if (!DeserializePasswd(&output, &passwd_buffers) || !PasswdHolderToPasswd(&passwd_buffers, &global_passwd)) { errno = EFAULT; return nullptr; } return &global_passwd; } void enc_untrusted_hex_dump(const void *buf, size_t nbytes) { MessageWriter input; MessageReader output; input.PushByReference(Extent{reinterpret_cast<const char *>(buf), nbytes}); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kHexDumpHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_hex_dump", 2); } void enc_untrusted_syslog(int priority, const char *message, int len) { EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_syslog, TokLinuxSyslogPriority(priority), message, len); } void enc_untrusted_openlog(const char *ident, int option, int facility) { MessageWriter input; MessageReader output; input.PushString(ident); input.Push<int>(TokLinuxSyslogOption(option)); input.Push<int>(TokLinuxSyslogFacility(facility)); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kOpenLogHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_openlog", 1); } int enc_untrusted_inotify_read(int fd, size_t count, char **serialized_events, size_t *serialized_events_len) { MessageWriter input; MessageReader output; input.Push<int>(fd); input.Push<uint64_t>(count); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kInotifyReadHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_inotify_read", 2, /*match_exact_params=*/false); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } Extent serialized_buf = output.next(); *serialized_events_len = serialized_buf.size(); // The caller to this host call owns memory pointed by |*serialized_events|. *serialized_events = reinterpret_cast<char *>(malloc(*serialized_events_len)); if (!serialized_events) { errno = ENOMEM; return -1; } memcpy(*serialized_events, serialized_buf.As<char>(), *serialized_events_len); return result; } int enc_untrusted_ioctl1(int fd, uint64_t request) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_ioctl, fd, request); } } // extern "C"
null
235
CWE-787
CVE-2020-8937
/* * * Copyright 2019 Asylo authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <errno.h> #include "asylo/platform/host_call/exit_handler_constants.h" #include "asylo/platform/host_call/trusted/host_call_dispatcher.h" #include "asylo/platform/host_call/trusted/host_calls.h" #include "asylo/platform/primitives/trusted_primitives.h" #include "asylo/platform/system_call/type_conversions/types_functions.h" using ::asylo::host_call::NonSystemCallDispatcher; using ::asylo::primitives::MessageReader; using ::asylo::primitives::MessageWriter; using ::asylo::primitives::TrustedPrimitives; static constexpr int32_t kWaitQueueEnabled = 0; static constexpr int32_t kWaitQueueDisabled = 1; extern "C" { int enc_untrusted_sys_futex_wait(int32_t *futex, int32_t expected, int64_t timeout_microsec) { if (!TrustedPrimitives::IsOutsideEnclave(futex, sizeof(int32_t))) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_sys_futex_wait: futex word should be in untrusted " "local memory."); } MessageWriter input; MessageReader output; input.Push<uint64_t>(reinterpret_cast<uint64_t>(futex)); input.Push<int32_t>(expected); input.Push<int64_t>(timeout_microsec); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kSysFutexWaitHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sys_futex_wait", 2); int result = output.next<int>(); int klinux_errno = output.next<int>(); // If FUTEX_WAIT successfully causes the thread to be suspended in the kernel, // it returns a zero when the caller is woken up. Otherwise, it returns the // appropriate errno. if (result != 0) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_sys_futex_wake(int32_t *futex, int32_t num) { if (!TrustedPrimitives::IsOutsideEnclave(futex, sizeof(int32_t))) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_sys_futex_wake: futex word should be in untrusted " "local memory."); } MessageWriter input; MessageReader output; input.Push<uint64_t>(reinterpret_cast<uint64_t>(futex)); input.Push<int32_t>(num); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kSysFutexWakeHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sys_futex_wake", 2); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int32_t *enc_untrusted_create_wait_queue() { MessageWriter input; MessageReader output; input.Push<uint64_t>(sizeof(int32_t)); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kLocalLifetimeAllocHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_create_wait_queue", 2); int32_t *queue = reinterpret_cast<int32_t *>(output.next<uintptr_t>()); int klinux_errno = output.next<int>(); if (queue == nullptr) { errno = FromkLinuxErrorNumber(klinux_errno); } enc_untrusted_disable_waiting(queue); return queue; } void enc_untrusted_destroy_wait_queue(int32_t *const queue) { // This is a no op on purpose. Wait queue pointers are now // registered to be freed on enclave exit. } void enc_untrusted_thread_wait(int32_t *const queue, uint64_t timeout_microsec) { enc_untrusted_thread_wait_value(queue, kWaitQueueEnabled, timeout_microsec); } void enc_untrusted_notify(int32_t *const queue, int32_t num_threads) { enc_untrusted_sys_futex_wake(queue, num_threads); } void enc_untrusted_disable_waiting(int32_t *const queue) { enc_untrusted_wait_queue_set_value(queue, kWaitQueueDisabled); } void enc_untrusted_enable_waiting(int32_t *const queue) { enc_untrusted_wait_queue_set_value(queue, kWaitQueueEnabled); } void enc_untrusted_wait_queue_set_value(int32_t *const queue, int32_t value) { TrustedPrimitives::UntrustedLocalMemcpy(queue, &value, sizeof(int32_t)); } void enc_untrusted_thread_wait_value(int32_t *const queue, int32_t value, uint64_t timeout_microsec) { enc_untrusted_sys_futex_wait(queue, value, timeout_microsec); } } // extern "C"
null
/* * * Copyright 2019 Asylo authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <errno.h> #include "asylo/platform/host_call/exit_handler_constants.h" #include "asylo/platform/host_call/trusted/host_call_dispatcher.h" #include "asylo/platform/host_call/trusted/host_calls.h" #include "asylo/platform/primitives/trusted_primitives.h" #include "asylo/platform/system_call/type_conversions/types_functions.h" using ::asylo::host_call::NonSystemCallDispatcher; using ::asylo::primitives::MessageReader; using ::asylo::primitives::MessageWriter; using ::asylo::primitives::TrustedPrimitives; static constexpr int32_t kWaitQueueEnabled = 0; static constexpr int32_t kWaitQueueDisabled = 1; extern "C" { int enc_untrusted_sys_futex_wait(int32_t *futex, int32_t expected, int64_t timeout_microsec) { if (!TrustedPrimitives::IsOutsideEnclave(futex, sizeof(int32_t))) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_sys_futex_wait: futex word should be in untrusted " "local memory."); } MessageWriter input; MessageReader output; input.Push<uint64_t>(reinterpret_cast<uint64_t>(futex)); input.Push<int32_t>(expected); input.Push<int64_t>(timeout_microsec); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kSysFutexWaitHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sys_futex_wait", 2); int result = output.next<int>(); int klinux_errno = output.next<int>(); // If FUTEX_WAIT successfully causes the thread to be suspended in the kernel, // it returns a zero when the caller is woken up. Otherwise, it returns the // appropriate errno. if (result != 0) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_sys_futex_wake(int32_t *futex, int32_t num) { if (!TrustedPrimitives::IsOutsideEnclave(futex, sizeof(int32_t))) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_sys_futex_wake: futex word should be in untrusted " "local memory."); } MessageWriter input; MessageReader output; input.Push<uint64_t>(reinterpret_cast<uint64_t>(futex)); input.Push<int32_t>(num); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kSysFutexWakeHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sys_futex_wake", 2); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int32_t *enc_untrusted_create_wait_queue() { MessageWriter input; MessageReader output; input.Push<uint64_t>(sizeof(int32_t)); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kLocalLifetimeAllocHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_create_wait_queue", 2); int32_t *queue = reinterpret_cast<int32_t *>(output.next<uintptr_t>()); if (!TrustedPrimitives::IsOutsideEnclave(queue, sizeof(int32_t))) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_create_wait_queue: queue should be in untrusted memory"); } int klinux_errno = output.next<int>(); if (queue == nullptr) { errno = FromkLinuxErrorNumber(klinux_errno); } enc_untrusted_disable_waiting(queue); return queue; } void enc_untrusted_destroy_wait_queue(int32_t *const queue) { // This is a no op on purpose. Wait queue pointers are now // registered to be freed on enclave exit. } void enc_untrusted_thread_wait(int32_t *const queue, uint64_t timeout_microsec) { enc_untrusted_thread_wait_value(queue, kWaitQueueEnabled, timeout_microsec); } void enc_untrusted_notify(int32_t *const queue, int32_t num_threads) { enc_untrusted_sys_futex_wake(queue, num_threads); } void enc_untrusted_disable_waiting(int32_t *const queue) { enc_untrusted_wait_queue_set_value(queue, kWaitQueueDisabled); } void enc_untrusted_enable_waiting(int32_t *const queue) { enc_untrusted_wait_queue_set_value(queue, kWaitQueueEnabled); } void enc_untrusted_wait_queue_set_value(int32_t *const queue, int32_t value) { TrustedPrimitives::UntrustedLocalMemcpy(queue, &value, sizeof(int32_t)); } void enc_untrusted_thread_wait_value(int32_t *const queue, int32_t value, uint64_t timeout_microsec) { enc_untrusted_sys_futex_wait(queue, value, timeout_microsec); } } // extern "C"
null
236
CWE-787
CVE-2020-8938
/* * * Copyright 2019 Asylo authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "asylo/platform/system_call/type_conversions/manual_types_functions.h" #include <sched.h> #include <signal.h> #include <sys/stat.h> #include <sys/statfs.h> #include <sys/statvfs.h> #include <algorithm> #include <cstring> #include "absl/strings/str_cat.h" #include "asylo/platform/system_call/type_conversions/generated_types_functions.h" namespace { template <typename T, typename U> void ReinterpretCopySingle(T *dst, const U *src) { memcpy(dst, src, std::min(sizeof(T), sizeof(U))); } template <typename T, size_t M, typename U, size_t N> void ReinterpretCopyArray(T (&dst)[M], const U (&src)[N], size_t max_len = SIZE_MAX) { memcpy(dst, src, std::min(max_len, std::min(sizeof(T) * M, sizeof(U) * N))); } template <typename T> void InitializeToZeroSingle(T *ptr) { memset(ptr, 0, sizeof(T)); } template <typename T, size_t M> void InitializeToZeroArray(T (&ptr)[M]) { memset(ptr, 0, sizeof(T) * M); } // Helper for implementing standard POSIX semantics for returning sockaddr // structures. Copies the sockaddr in |source|, of length |source_len|, into the // buffer pointed to by |addr_dest|, which has |addrlen_dest| bytes available. // The copy is truncated if the destination buffer is too small. The number of // bytes in the un-truncated structure is written to addrlen_dest. void CopySockaddr(void *source, socklen_t source_len, void *addr_dest, socklen_t *addrlen_dest) { memcpy(addr_dest, source, std::min(*addrlen_dest, source_len)); *addrlen_dest = source_len; } inline void klinux_sigemptyset(klinux_sigset_t *klinux_set) { memset(klinux_set, 0, sizeof(klinux_sigset_t)); } inline int klinux_sigismember(const klinux_sigset_t *klinux_set, int klinux_sig) { uint64_t sig = klinux_sig - 1; return 1 & (klinux_set->klinux_val[0] >> sig); } inline void klinux_sigaddset(klinux_sigset_t *klinux_set, int klinux_sig) { uint64_t sig = klinux_sig - 1; klinux_set->klinux_val[0] |= 1UL << sig; } // Copies the C string |source_buf| into |dest_buf|. Only copies up to size-1 // non-null characters. Always terminates the copied string with a null byte on // a successful write. // // Fails if |source_buf| contains more than |size| bytes (including the // terminating null byte). bool CStringCopy(const char *source_buf, char *dest_buf, size_t size) { int ret = snprintf(dest_buf, size, "%s", source_buf); return ret >= 0 && static_cast<size_t>(ret) < size; } } // namespace int TokLinuxSocketType(int input) { int sock_type = input; int output = 0; if (sock_type & SOCK_NONBLOCK) { output |= kLinux_SOCK_NONBLOCK; sock_type &= ~SOCK_NONBLOCK; } if (sock_type & SOCK_CLOEXEC) { output |= kLinux_SOCK_CLOEXEC; sock_type &= ~SOCK_CLOEXEC; } if (!sock_type) { // Only SOCK_CLOEXEC or SOCK_NONBLOCK are present. return output; } switch (sock_type) { case SOCK_STREAM: output |= kLinux_SOCK_STREAM; break; case SOCK_DGRAM: output |= kLinux_SOCK_DGRAM; break; case SOCK_SEQPACKET: output |= kLinux_SOCK_SEQPACKET; break; case SOCK_RAW: output |= kLinux_SOCK_RAW; break; case SOCK_RDM: output |= kLinux_SOCK_RDM; break; case SOCK_PACKET: output |= kLinux_SOCK_PACKET; break; default: output = -1; // Unsupported } return output; } int FromkLinuxSocketType(int input) { int kLinux_sock_type = input; int output = 0; if (kLinux_sock_type & kLinux_SOCK_NONBLOCK) { output |= SOCK_NONBLOCK; kLinux_sock_type &= ~kLinux_SOCK_NONBLOCK; } if (kLinux_sock_type & kLinux_SOCK_CLOEXEC) { output |= SOCK_CLOEXEC; kLinux_sock_type &= ~kLinux_SOCK_CLOEXEC; } if (!kLinux_sock_type) { // Only kLinux_SOCK_CLOEXEC or kLinux_SOCK_NONBLOCK // are present. return output; } switch (kLinux_sock_type) { case kLinux_SOCK_STREAM: output |= SOCK_STREAM; break; case kLinux_SOCK_DGRAM: output |= SOCK_DGRAM; break; case kLinux_SOCK_SEQPACKET: output |= SOCK_SEQPACKET; break; case kLinux_SOCK_RAW: output |= SOCK_RAW; break; case kLinux_SOCK_RDM: output |= SOCK_RDM; break; case kLinux_SOCK_PACKET: output |= SOCK_PACKET; break; default: output = -1; // Unsupported } return output; } int TokLinuxOptionName(int level, int option_name) { if (level == IPPROTO_TCP) { return TokLinuxTcpOptionName(option_name); } else if (level == IPPROTO_IPV6) { return TokLinuxIpV6OptionName(option_name); } else if (level == SOL_SOCKET) { return TokLinuxSocketOptionName(option_name); } return -1; } int FromkLinuxOptionName(int level, int klinux_option_name) { if (level == IPPROTO_TCP) { return FromkLinuxTcpOptionName(klinux_option_name); } else if (level == IPPROTO_IPV6) { return TokLinuxIpV6OptionName(klinux_option_name); } else if (level == SOL_SOCKET) { return FromkLinuxSocketOptionName(klinux_option_name); } return -1; } bool FromkLinuxStat(const struct klinux_stat *input, struct stat *output) { if (!input || !output) return false; output->st_atime = input->klinux_st_atime; output->st_blksize = input->klinux_st_blksize; output->st_blocks = input->klinux_st_blocks; output->st_mtime = input->klinux_st_mtime; output->st_dev = input->klinux_st_dev; output->st_gid = input->klinux_st_gid; output->st_ino = input->klinux_st_ino; output->st_mode = input->klinux_st_mode; output->st_ctime = input->klinux_st_ctime; output->st_nlink = input->klinux_st_nlink; output->st_rdev = input->klinux_st_rdev; output->st_size = input->klinux_st_size; output->st_uid = input->klinux_st_uid; return true; } bool TokLinuxStat(const struct stat *input, struct klinux_stat *output) { if (!input || !output) return false; output->klinux_st_atime = input->st_atime; output->klinux_st_blksize = input->st_blksize; output->klinux_st_blocks = input->st_blocks; output->klinux_st_mtime = input->st_mtime; output->klinux_st_dev = input->st_dev; output->klinux_st_gid = input->st_gid; output->klinux_st_ino = input->st_ino; output->klinux_st_mode = input->st_mode; output->klinux_st_ctime = input->st_ctime; output->klinux_st_nlink = input->st_nlink; output->klinux_st_rdev = input->st_rdev; output->klinux_st_size = input->st_size; output->klinux_st_uid = input->st_uid; return true; } bool SockaddrTokLinuxSockaddrUn(const struct sockaddr *input, socklen_t input_addrlen, klinux_sockaddr_un *output) { if (!input || !output || input_addrlen == 0 || input->sa_family != AF_UNIX || input_addrlen < sizeof(output->klinux_sun_family)) { output = nullptr; return false; } struct sockaddr_un *sock_un = const_cast<struct sockaddr_un *>( reinterpret_cast<const struct sockaddr_un *>(input)); output->klinux_sun_family = kLinux_AF_UNIX; InitializeToZeroArray(output->klinux_sun_path); ReinterpretCopyArray(output->klinux_sun_path, sock_un->sun_path, input_addrlen - sizeof(input->sa_family)); return true; } bool SockaddrTokLinuxSockaddrIn(const struct sockaddr *input, socklen_t input_addrlen, klinux_sockaddr_in *output) { if (!input || !output || input_addrlen == 0 || input->sa_family != AF_INET || input_addrlen < sizeof(struct sockaddr_in)) { output = nullptr; return false; } struct sockaddr_in *sockaddr_in_from = const_cast<struct sockaddr_in *>( reinterpret_cast<const struct sockaddr_in *>(input)); output->klinux_sin_family = kLinux_AF_INET; output->klinux_sin_port = sockaddr_in_from->sin_port; InitializeToZeroSingle(&output->klinux_sin_addr); ReinterpretCopySingle(&output->klinux_sin_addr, &sockaddr_in_from->sin_addr); InitializeToZeroArray(output->klinux_sin_zero); ReinterpretCopyArray(output->klinux_sin_zero, sockaddr_in_from->sin_zero, std::min(sizeof(output->klinux_sin_zero), sizeof(sockaddr_in_from->sin_zero))); return true; } bool SockaddrTokLinuxSockaddrIn6(const struct sockaddr *input, socklen_t input_addrlen, klinux_sockaddr_in6 *output) { if (!input || !output || input_addrlen == 0 || input->sa_family != AF_INET6 || input_addrlen < sizeof(struct sockaddr_in6)) { output = nullptr; return false; } struct sockaddr_in6 *sockaddr_in6_from = const_cast<struct sockaddr_in6 *>( reinterpret_cast<const struct sockaddr_in6 *>(input)); output->klinux_sin6_family = kLinux_AF_INET6; output->klinux_sin6_flowinfo = sockaddr_in6_from->sin6_flowinfo; output->klinux_sin6_port = sockaddr_in6_from->sin6_port; output->klinux_sin6_scope_id = sockaddr_in6_from->sin6_scope_id; InitializeToZeroSingle(&output->klinux_sin6_addr); ReinterpretCopySingle(&output->klinux_sin6_addr, &sockaddr_in6_from->sin6_addr); return true; } bool FromkLinuxSockAddrUn(const struct klinux_sockaddr_un *input, struct sockaddr_un *output) { if (!input || !output) { return false; } output->sun_family = AF_UNIX; InitializeToZeroArray(output->sun_path); ReinterpretCopyArray( output->sun_path, input->klinux_sun_path, sizeof(struct klinux_sockaddr_un) - sizeof(input->klinux_sun_family)); return true; } bool FromkLinuxSockAddrIn(const struct klinux_sockaddr_in *input, struct sockaddr_in *output) { if (!input || !output) { return false; } output->sin_family = AF_INET; output->sin_port = input->klinux_sin_port; InitializeToZeroSingle(&output->sin_addr); ReinterpretCopySingle(&output->sin_addr, &input->klinux_sin_port); InitializeToZeroArray(output->sin_zero); ReinterpretCopyArray( output->sin_zero, input->klinux_sin_zero, std::min(sizeof(output->sin_zero), sizeof(input->klinux_sin_zero))); return true; } bool FromkLinuxSockAddrIn6(const struct klinux_sockaddr_in6 *input, struct sockaddr_in6 *output) { if (!input || !output) { return false; } output->sin6_family = AF_INET; output->sin6_port = input->klinux_sin6_port; output->sin6_scope_id = input->klinux_sin6_scope_id; output->sin6_flowinfo = input->klinux_sin6_flowinfo; InitializeToZeroSingle(&output->sin6_addr); ReinterpretCopySingle(&output->sin6_addr, &input->klinux_sin6_port); return true; } bool FromkLinuxStatFs(const struct klinux_statfs *input, struct statfs *output) { if (!input || !output) return false; output->f_type = input->klinux_f_type; output->f_bsize = input->klinux_f_bsize; output->f_blocks = input->klinux_f_blocks; output->f_bfree = input->klinux_f_bfree; output->f_bavail = input->klinux_f_bavail; output->f_files = input->klinux_f_files; output->f_ffree = input->klinux_f_ffree; output->f_fsid.__val[0] = input->klinux_f_fsid.__val[0]; output->f_fsid.__val[1] = input->klinux_f_fsid.__val[1]; output->f_namelen = input->klinux_f_namelen; output->f_frsize = input->klinux_f_frsize; output->f_flags = input->klinux_f_flags; memset(output->f_spare, 0, sizeof(output->f_spare)); return true; } bool TokLinuxStatFs(const struct statfs *input, struct klinux_statfs *output) { if (!input || !output) return false; output->klinux_f_bsize = input->f_bsize; output->klinux_f_frsize = input->f_frsize; output->klinux_f_blocks = input->f_blocks; output->klinux_f_bfree = input->f_bfree; output->klinux_f_bavail = input->f_bavail; output->klinux_f_files = input->f_files; output->klinux_f_ffree = input->f_ffree; output->klinux_f_fsid.__val[0] = input->f_fsid.__val[0]; output->klinux_f_fsid.__val[1] = input->f_fsid.__val[1]; output->klinux_f_namelen = input->f_namelen; output->klinux_f_frsize = input->f_frsize; output->klinux_f_flags = input->f_flags; memset(output->klinux_f_spare, 0, sizeof(output->klinux_f_spare)); return true; } int64_t FromkLinuxStatFsFlags(int64_t input) { int64_t result = 0; if (input & kLinux_ST_NOSUID) result |= ST_NOSUID; if (input & kLinux_ST_RDONLY) result |= ST_RDONLY; #if (defined(__USE_GNU) && __USE_GNU) || \ (defined(__GNU_VISIBLE) && __GNU_VISIBLE) if (input & kLinux_ST_MANDLOCK) result |= ST_MANDLOCK; if (input & kLinux_ST_NOATIME) result |= ST_NOATIME; if (input & kLinux_ST_NODEV) result |= ST_NODEV; if (input & kLinux_ST_NODIRATIME) result |= ST_NODIRATIME; if (input & kLinux_ST_NOEXEC) result |= ST_NOEXEC; if (input & kLinux_ST_RELATIME) result |= ST_RELATIME; if (input & kLinux_ST_SYNCHRONOUS) result |= ST_SYNCHRONOUS; #endif return result; } int64_t TokLinuxStatFsFlags(int64_t input) { int64_t result = 0; if (input & ST_NOSUID) result |= kLinux_ST_NOSUID; if (input & ST_RDONLY) result |= kLinux_ST_RDONLY; #if (defined(__USE_GNU) && __USE_GNU) || \ (defined(__GNU_VISIBLE) && __GNU_VISIBLE) if (input & ST_MANDLOCK) result |= kLinux_ST_MANDLOCK; if (input & ST_NOATIME) result |= kLinux_ST_NOATIME; if (input & ST_NODEV) result |= kLinux_ST_NODEV; if (input & ST_NODIRATIME) result |= kLinux_ST_NODIRATIME; if (input & ST_NOEXEC) result |= kLinux_ST_NOEXEC; if (input & ST_RELATIME) result |= kLinux_ST_RELATIME; if (input & ST_SYNCHRONOUS) result |= kLinux_ST_SYNCHRONOUS; #endif return result; } bool FromkLinuxSockAddr(const struct klinux_sockaddr *input, socklen_t input_len, struct sockaddr *output, socklen_t *output_len, void (*abort_handler)(const char *)) { if (!input || !output || !output_len || input_len == 0) { output = nullptr; return false; } int16_t klinux_family = input->klinux_sa_family; if (klinux_family == kLinux_AF_UNIX) { struct klinux_sockaddr_un *klinux_sockaddr_un_in = const_cast<struct klinux_sockaddr_un *>( reinterpret_cast<const struct klinux_sockaddr_un *>(input)); struct sockaddr_un sockaddr_un_out; sockaddr_un_out.sun_family = AF_UNIX; InitializeToZeroArray(sockaddr_un_out.sun_path); ReinterpretCopyArray( sockaddr_un_out.sun_path, klinux_sockaddr_un_in->klinux_sun_path, std::min(sizeof(sockaddr_un_out.sun_path), sizeof(klinux_sockaddr_un_in->klinux_sun_path))); CopySockaddr(&sockaddr_un_out, sizeof(sockaddr_un_out), output, output_len); } else if (klinux_family == kLinux_AF_INET) { struct klinux_sockaddr_in *klinux_sockaddr_in_in = const_cast<struct klinux_sockaddr_in *>( reinterpret_cast<const struct klinux_sockaddr_in *>(input)); struct sockaddr_in sockaddr_in_out; sockaddr_in_out.sin_family = AF_INET; sockaddr_in_out.sin_port = klinux_sockaddr_in_in->klinux_sin_port; InitializeToZeroSingle(&sockaddr_in_out.sin_addr); ReinterpretCopySingle(&sockaddr_in_out.sin_addr, &klinux_sockaddr_in_in->klinux_sin_addr); InitializeToZeroArray(sockaddr_in_out.sin_zero); ReinterpretCopyArray(sockaddr_in_out.sin_zero, klinux_sockaddr_in_in->klinux_sin_zero); CopySockaddr(&sockaddr_in_out, sizeof(sockaddr_in_out), output, output_len); } else if (klinux_family == kLinux_AF_INET6) { struct klinux_sockaddr_in6 *klinux_sockaddr_in6_in = const_cast<struct klinux_sockaddr_in6 *>( reinterpret_cast<const struct klinux_sockaddr_in6 *>(input)); struct sockaddr_in6 sockaddr_in6_out; sockaddr_in6_out.sin6_family = AF_INET6; sockaddr_in6_out.sin6_port = klinux_sockaddr_in6_in->klinux_sin6_port; sockaddr_in6_out.sin6_flowinfo = klinux_sockaddr_in6_in->klinux_sin6_flowinfo; sockaddr_in6_out.sin6_scope_id = klinux_sockaddr_in6_in->klinux_sin6_scope_id; InitializeToZeroSingle(&sockaddr_in6_out.sin6_addr); ReinterpretCopySingle(&sockaddr_in6_out.sin6_addr, &klinux_sockaddr_in6_in->klinux_sin6_addr); CopySockaddr(&sockaddr_in6_out, sizeof(sockaddr_in6_out), output, output_len); } else if (klinux_family == kLinux_AF_UNSPEC) { output = nullptr; *output_len = 0; } else { if (abort_handler != nullptr) { std::string message = absl::StrCat( "Type conversion error - Unsupported AF family: ", klinux_family); abort_handler(message.c_str()); } else { abort(); } } return true; } bool TokLinuxSockAddr(const struct sockaddr *input, socklen_t input_len, struct klinux_sockaddr *output, socklen_t *output_len, void (*abort_handler)(const char *)) { if (!input || input_len == 0 || !output || !output_len) { return false; } if (input->sa_family == AF_UNIX) { struct klinux_sockaddr_un klinux_sock_un; if (!SockaddrTokLinuxSockaddrUn(input, input_len, &klinux_sock_un)) { return false; } CopySockaddr(&klinux_sock_un, sizeof(klinux_sock_un), output, output_len); } else if (input->sa_family == AF_INET) { struct klinux_sockaddr_in klinux_sock_in; if (!SockaddrTokLinuxSockaddrIn(input, input_len, &klinux_sock_in)) { return false; } CopySockaddr(&klinux_sock_in, sizeof(klinux_sock_in), output, output_len); } else if (input->sa_family == AF_INET6) { struct klinux_sockaddr_in6 klinux_sock_in6; if (!SockaddrTokLinuxSockaddrIn6(input, input_len, &klinux_sock_in6)) { return false; } CopySockaddr(&klinux_sock_in6, sizeof(klinux_sock_in6), output, output_len); } else if (input->sa_family == AF_UNSPEC) { output = nullptr; *output_len = 0; } else { if (abort_handler != nullptr) { std::string message = absl::StrCat("Unsupported AF family encountered: ", input->sa_family); abort_handler(message.c_str()); } else { abort(); } } return true; } bool FromkLinuxFdSet(const struct klinux_fd_set *input, fd_set *output) { if (!input || !output) { output = nullptr; return false; } FD_ZERO(output); for (int fd = 0; fd < std::min(KLINUX_FD_SETSIZE, FD_SETSIZE); ++fd) { if (KLINUX_FD_ISSET(fd, input)) { FD_SET(fd, output); } } return true; } bool TokLinuxFdSet(const fd_set *input, struct klinux_fd_set *output) { if (!input || !output) { output = nullptr; return false; } KLINUX_FD_ZERO(output); for (int fd = 0; fd < std::min(FD_SETSIZE, KLINUX_FD_SETSIZE); ++fd) { if (FD_ISSET(fd, input)) { KLINUX_FD_SET(fd, output); } } return true; } int FromkLinuxSignalNumber(int input) { #if defined(SIGRTMIN) && defined(SIGRTMAX) if (input >= kLinux_SIGRTMIN && input <= kLinux_SIGRTMAX) { return SIGRTMIN + input - kLinux_SIGRTMIN; } #endif return FromkLinuxBaseSignalNumber(input); } int TokLinuxSignalNumber(int input) { #if defined(SIGRTMIN) && defined(SIGRTMAX) if (input >= SIGRTMIN && input <= SIGRTMAX) { return kLinux_SIGRTMIN + input - SIGRTMIN; } #endif return TokLinuxBaseSignalNumber(input); } bool TokLinuxSigset(const sigset_t *input, klinux_sigset_t *output) { if (!input || !output) { output = nullptr; return false; } klinux_sigemptyset(output); for (int sig = 1; sig < NSIG; sig++) { if (sigismember(input, sig)) { int klinux_sig = TokLinuxSignalNumber(sig); if (klinux_sig != -1) { klinux_sigaddset(output, klinux_sig); } } } return true; } bool FromkLinuxSigset(const klinux_sigset_t *input, sigset_t *output) { if (!input || !output) { output = nullptr; return false; } sigemptyset(output); for (int klinux_sig = 1; klinux_sig < kLinux_NSIG; klinux_sig++) { if (klinux_sigismember(input, klinux_sig)) { int sig = FromkLinuxSignalNumber(klinux_sig); if (sig != -1) { sigaddset(output, sig); } } } return true; } inline uint64_t kLinuxCpuWordNum(int cpu) { return cpu / (8 * sizeof(klinux_cpu_set_word)); } inline klinux_cpu_set_word kLinuxCpuBitNum(int cpu) { return cpu % (8 * sizeof(klinux_cpu_set_word)); } int kLinuxCpuSetCheckBit(int cpu, klinux_cpu_set_t *set) { return (set->words[kLinuxCpuWordNum(cpu)] & (static_cast<klinux_cpu_set_word>(1) << kLinuxCpuBitNum(cpu))) != 0; } bool FromkLinuxCpuSet(klinux_cpu_set_t *input, cpu_set_t *output) { if (!input || !output) { return false; } CPU_ZERO(output); for (int cpu = 0; cpu < KLINUX_CPU_SET_MAX_CPUS; cpu++) { if (kLinuxCpuSetCheckBit(cpu, input)) { CPU_SET(cpu, output); } } return true; } bool TokLinuxItimerval(const struct itimerval *input, struct klinux_itimerval *output) { if (!input || !output) { return false; } if (!TokLinuxtimeval(&input->it_interval, &output->klinux_it_interval) || !TokLinuxtimeval(&input->it_value, &output->klinux_it_value)) { return false; } return true; } bool FromkLinuxItimerval(const struct klinux_itimerval *input, struct itimerval *output) { if (!input || !output) { return false; } if (!FromkLinuxtimeval(&input->klinux_it_interval, &output->it_interval) || !FromkLinuxtimeval(&input->klinux_it_value, &output->it_value)) { return false; } return true; } bool TokLinuxPollfd(const struct pollfd *input, struct klinux_pollfd *output) { if (!input || !output) return false; output->klinux_fd = input->fd; output->klinux_events = TokLinuxPollEvent(input->events); output->klinux_revents = TokLinuxPollEvent(input->revents); return true; } bool FromkLinuxPollfd(const struct klinux_pollfd *input, struct pollfd *output) { if (!input || !output) return false; output->fd = input->klinux_fd; output->events = FromkLinuxPollEvent(input->klinux_events); output->revents = FromkLinuxPollEvent(input->klinux_revents); return true; } bool TokLinuxEpollEvent(const struct epoll_event *input, struct klinux_epoll_event *output) { if (!input || !output) return false; output->events = TokLinuxEpollEvents(input->events); if (input->events != 0 && output->events == 0) { return false; } output->data.u64 = input->data.u64; return true; } bool FromkLinuxEpollEvent(const struct klinux_epoll_event *input, struct epoll_event *output) { if (!input || !output) return false; output->events = FromkLinuxEpollEvents(input->events); if (input->events != 0 && output->events == 0) { return false; } output->data.u64 = input->data.u64; return true; } bool FromkLinuxRusage(const struct klinux_rusage *input, struct rusage *output) { if (!input || !output) { return false; } if (!FromkLinuxtimeval(&input->ru_stime, &output->ru_stime) || !FromkLinuxtimeval(&input->ru_utime, &output->ru_utime)) { return false; } return true; } bool TokLinuxRusage(const struct rusage *input, struct klinux_rusage *output) { if (!input || !output) { return false; } if (!TokLinuxtimeval(&input->ru_stime, &output->ru_stime) || !TokLinuxtimeval(&input->ru_utime, &output->ru_utime)) { return false; } return true; } int FromkLinuxToNewlibWstatus(int input) { int info = static_cast<int>(input >> 8 & 0xff) << 8; int code = input & 0x7f; if (KLINUX_WIFEXITED(input)) { code = 0; } else if (KLINUX_WIFSTOPPED(input)) { code = 0x7f; } return info + code; } bool FromkLinuxUtsName(const struct klinux_utsname *input, struct utsname *output) { if (!input || !output) { return false; } if (!CStringCopy(input->sysname, output->sysname, sizeof(output->sysname)) || !CStringCopy(input->nodename, output->nodename, sizeof(output->nodename)) || !CStringCopy(input->release, output->release, sizeof(output->release)) || !CStringCopy(input->version, output->version, sizeof(output->version)) || !CStringCopy(input->machine, output->machine, sizeof(output->machine))) { return false; } #if (defined(__USE_GNU) && __USE_GNU) || \ (defined(__GNU_VISIBLE) && __GNU_VISIBLE) if (!CStringCopy(input->domainname, output->domainname, sizeof(output->domainname))) { return false; } #else if (!CStringCopy(input->__domainname, output->domainname, sizeof(output->domainname))) { return false; } #endif return true; } // Priorities are encoded into a single 32-bit integer. The bottom 3 bits are // the level and the rest are the facility. int TokLinuxSyslogPriority(int input) { int syslog_level = input & 0x07; int syslog_facility = input & ~0x07; return TokLinuxSyslogLevel(syslog_level) | TokLinuxSyslogFacility(syslog_facility); } bool TokLinuxSiginfo(const siginfo_t *input, klinux_siginfo_t *output) { if (!input || !output) { return false; } output->si_signo = TokLinuxSignalNumber(input->si_signo); output->si_code = TokLinuxSignalNumber(input->si_code); return true; } bool FromkLinuxSiginfo(const klinux_siginfo_t *input, siginfo_t *output) { if (!input || !output) { return false; } output->si_signo = FromkLinuxSignalNumber(input->si_signo); output->si_code = FromkLinuxSignalNumber(input->si_code); return true; }
null
/* * * Copyright 2019 Asylo authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "asylo/platform/system_call/type_conversions/manual_types_functions.h" #include <sched.h> #include <signal.h> #include <sys/stat.h> #include <sys/statfs.h> #include <sys/statvfs.h> #include <algorithm> #include <cstring> #include "absl/strings/str_cat.h" #include "asylo/platform/system_call/type_conversions/generated_types_functions.h" namespace { template <typename T, typename U> void ReinterpretCopySingle(T *dst, const U *src) { memcpy(dst, src, std::min(sizeof(T), sizeof(U))); } template <typename T, size_t M, typename U, size_t N> void ReinterpretCopyArray(T (&dst)[M], const U (&src)[N], size_t max_len = SIZE_MAX) { memcpy(dst, src, std::min(max_len, std::min(sizeof(T) * M, sizeof(U) * N))); } template <typename T> void InitializeToZeroSingle(T *ptr) { memset(ptr, 0, sizeof(T)); } template <typename T, size_t M> void InitializeToZeroArray(T (&ptr)[M]) { memset(ptr, 0, sizeof(T) * M); } // Helper for implementing standard POSIX semantics for returning sockaddr // structures. Copies the sockaddr in |source|, of length |source_len|, into the // buffer pointed to by |addr_dest|, which has |addrlen_dest| bytes available. // The copy is truncated if the destination buffer is too small. The number of // bytes in the un-truncated structure is written to addrlen_dest. void CopySockaddr(void *source, socklen_t source_len, void *addr_dest, socklen_t *addrlen_dest) { memcpy(addr_dest, source, std::min(*addrlen_dest, source_len)); *addrlen_dest = source_len; } inline void klinux_sigemptyset(klinux_sigset_t *klinux_set) { memset(klinux_set, 0, sizeof(klinux_sigset_t)); } inline int klinux_sigismember(const klinux_sigset_t *klinux_set, int klinux_sig) { uint64_t sig = klinux_sig - 1; return 1 & (klinux_set->klinux_val[0] >> sig); } inline void klinux_sigaddset(klinux_sigset_t *klinux_set, int klinux_sig) { uint64_t sig = klinux_sig - 1; klinux_set->klinux_val[0] |= 1UL << sig; } // Copies the C string |source_buf| into |dest_buf|. Only copies up to size-1 // non-null characters. Always terminates the copied string with a null byte on // a successful write. // // Fails if |source_buf| contains more than |size| bytes (including the // terminating null byte). bool CStringCopy(const char *source_buf, char *dest_buf, size_t size) { int ret = snprintf(dest_buf, size, "%s", source_buf); return ret >= 0 && static_cast<size_t>(ret) < size; } } // namespace int TokLinuxSocketType(int input) { int sock_type = input; int output = 0; if (sock_type & SOCK_NONBLOCK) { output |= kLinux_SOCK_NONBLOCK; sock_type &= ~SOCK_NONBLOCK; } if (sock_type & SOCK_CLOEXEC) { output |= kLinux_SOCK_CLOEXEC; sock_type &= ~SOCK_CLOEXEC; } if (!sock_type) { // Only SOCK_CLOEXEC or SOCK_NONBLOCK are present. return output; } switch (sock_type) { case SOCK_STREAM: output |= kLinux_SOCK_STREAM; break; case SOCK_DGRAM: output |= kLinux_SOCK_DGRAM; break; case SOCK_SEQPACKET: output |= kLinux_SOCK_SEQPACKET; break; case SOCK_RAW: output |= kLinux_SOCK_RAW; break; case SOCK_RDM: output |= kLinux_SOCK_RDM; break; case SOCK_PACKET: output |= kLinux_SOCK_PACKET; break; default: output = -1; // Unsupported } return output; } int FromkLinuxSocketType(int input) { int kLinux_sock_type = input; int output = 0; if (kLinux_sock_type & kLinux_SOCK_NONBLOCK) { output |= SOCK_NONBLOCK; kLinux_sock_type &= ~kLinux_SOCK_NONBLOCK; } if (kLinux_sock_type & kLinux_SOCK_CLOEXEC) { output |= SOCK_CLOEXEC; kLinux_sock_type &= ~kLinux_SOCK_CLOEXEC; } if (!kLinux_sock_type) { // Only kLinux_SOCK_CLOEXEC or kLinux_SOCK_NONBLOCK // are present. return output; } switch (kLinux_sock_type) { case kLinux_SOCK_STREAM: output |= SOCK_STREAM; break; case kLinux_SOCK_DGRAM: output |= SOCK_DGRAM; break; case kLinux_SOCK_SEQPACKET: output |= SOCK_SEQPACKET; break; case kLinux_SOCK_RAW: output |= SOCK_RAW; break; case kLinux_SOCK_RDM: output |= SOCK_RDM; break; case kLinux_SOCK_PACKET: output |= SOCK_PACKET; break; default: output = -1; // Unsupported } return output; } int TokLinuxOptionName(int level, int option_name) { if (level == IPPROTO_TCP) { return TokLinuxTcpOptionName(option_name); } else if (level == IPPROTO_IPV6) { return TokLinuxIpV6OptionName(option_name); } else if (level == SOL_SOCKET) { return TokLinuxSocketOptionName(option_name); } return -1; } int FromkLinuxOptionName(int level, int klinux_option_name) { if (level == IPPROTO_TCP) { return FromkLinuxTcpOptionName(klinux_option_name); } else if (level == IPPROTO_IPV6) { return TokLinuxIpV6OptionName(klinux_option_name); } else if (level == SOL_SOCKET) { return FromkLinuxSocketOptionName(klinux_option_name); } return -1; } bool FromkLinuxStat(const struct klinux_stat *input, struct stat *output) { if (!input || !output) return false; output->st_atime = input->klinux_st_atime; output->st_blksize = input->klinux_st_blksize; output->st_blocks = input->klinux_st_blocks; output->st_mtime = input->klinux_st_mtime; output->st_dev = input->klinux_st_dev; output->st_gid = input->klinux_st_gid; output->st_ino = input->klinux_st_ino; output->st_mode = input->klinux_st_mode; output->st_ctime = input->klinux_st_ctime; output->st_nlink = input->klinux_st_nlink; output->st_rdev = input->klinux_st_rdev; output->st_size = input->klinux_st_size; output->st_uid = input->klinux_st_uid; return true; } bool TokLinuxStat(const struct stat *input, struct klinux_stat *output) { if (!input || !output) return false; output->klinux_st_atime = input->st_atime; output->klinux_st_blksize = input->st_blksize; output->klinux_st_blocks = input->st_blocks; output->klinux_st_mtime = input->st_mtime; output->klinux_st_dev = input->st_dev; output->klinux_st_gid = input->st_gid; output->klinux_st_ino = input->st_ino; output->klinux_st_mode = input->st_mode; output->klinux_st_ctime = input->st_ctime; output->klinux_st_nlink = input->st_nlink; output->klinux_st_rdev = input->st_rdev; output->klinux_st_size = input->st_size; output->klinux_st_uid = input->st_uid; return true; } bool SockaddrTokLinuxSockaddrUn(const struct sockaddr *input, socklen_t input_addrlen, klinux_sockaddr_un *output) { if (!input || !output || input_addrlen == 0 || input->sa_family != AF_UNIX || input_addrlen < sizeof(output->klinux_sun_family)) { output = nullptr; return false; } struct sockaddr_un *sock_un = const_cast<struct sockaddr_un *>( reinterpret_cast<const struct sockaddr_un *>(input)); output->klinux_sun_family = kLinux_AF_UNIX; InitializeToZeroArray(output->klinux_sun_path); ReinterpretCopyArray(output->klinux_sun_path, sock_un->sun_path, input_addrlen - sizeof(input->sa_family)); return true; } bool SockaddrTokLinuxSockaddrIn(const struct sockaddr *input, socklen_t input_addrlen, klinux_sockaddr_in *output) { if (!input || !output || input_addrlen == 0 || input->sa_family != AF_INET || input_addrlen < sizeof(struct sockaddr_in)) { output = nullptr; return false; } struct sockaddr_in *sockaddr_in_from = const_cast<struct sockaddr_in *>( reinterpret_cast<const struct sockaddr_in *>(input)); output->klinux_sin_family = kLinux_AF_INET; output->klinux_sin_port = sockaddr_in_from->sin_port; InitializeToZeroSingle(&output->klinux_sin_addr); ReinterpretCopySingle(&output->klinux_sin_addr, &sockaddr_in_from->sin_addr); InitializeToZeroArray(output->klinux_sin_zero); ReinterpretCopyArray(output->klinux_sin_zero, sockaddr_in_from->sin_zero, std::min(sizeof(output->klinux_sin_zero), sizeof(sockaddr_in_from->sin_zero))); return true; } bool SockaddrTokLinuxSockaddrIn6(const struct sockaddr *input, socklen_t input_addrlen, klinux_sockaddr_in6 *output) { if (!input || !output || input_addrlen == 0 || input->sa_family != AF_INET6 || input_addrlen < sizeof(struct sockaddr_in6)) { output = nullptr; return false; } struct sockaddr_in6 *sockaddr_in6_from = const_cast<struct sockaddr_in6 *>( reinterpret_cast<const struct sockaddr_in6 *>(input)); output->klinux_sin6_family = kLinux_AF_INET6; output->klinux_sin6_flowinfo = sockaddr_in6_from->sin6_flowinfo; output->klinux_sin6_port = sockaddr_in6_from->sin6_port; output->klinux_sin6_scope_id = sockaddr_in6_from->sin6_scope_id; InitializeToZeroSingle(&output->klinux_sin6_addr); ReinterpretCopySingle(&output->klinux_sin6_addr, &sockaddr_in6_from->sin6_addr); return true; } bool FromkLinuxSockAddrUn(const struct klinux_sockaddr_un *input, struct sockaddr_un *output) { if (!input || !output) { return false; } output->sun_family = AF_UNIX; InitializeToZeroArray(output->sun_path); ReinterpretCopyArray( output->sun_path, input->klinux_sun_path, sizeof(struct klinux_sockaddr_un) - sizeof(input->klinux_sun_family)); return true; } bool FromkLinuxSockAddrIn(const struct klinux_sockaddr_in *input, struct sockaddr_in *output) { if (!input || !output) { return false; } output->sin_family = AF_INET; output->sin_port = input->klinux_sin_port; InitializeToZeroSingle(&output->sin_addr); ReinterpretCopySingle(&output->sin_addr, &input->klinux_sin_port); InitializeToZeroArray(output->sin_zero); ReinterpretCopyArray( output->sin_zero, input->klinux_sin_zero, std::min(sizeof(output->sin_zero), sizeof(input->klinux_sin_zero))); return true; } bool FromkLinuxSockAddrIn6(const struct klinux_sockaddr_in6 *input, struct sockaddr_in6 *output) { if (!input || !output) { return false; } output->sin6_family = AF_INET; output->sin6_port = input->klinux_sin6_port; output->sin6_scope_id = input->klinux_sin6_scope_id; output->sin6_flowinfo = input->klinux_sin6_flowinfo; InitializeToZeroSingle(&output->sin6_addr); ReinterpretCopySingle(&output->sin6_addr, &input->klinux_sin6_port); return true; } bool FromkLinuxStatFs(const struct klinux_statfs *input, struct statfs *output) { if (!input || !output) return false; output->f_type = input->klinux_f_type; output->f_bsize = input->klinux_f_bsize; output->f_blocks = input->klinux_f_blocks; output->f_bfree = input->klinux_f_bfree; output->f_bavail = input->klinux_f_bavail; output->f_files = input->klinux_f_files; output->f_ffree = input->klinux_f_ffree; output->f_fsid.__val[0] = input->klinux_f_fsid.__val[0]; output->f_fsid.__val[1] = input->klinux_f_fsid.__val[1]; output->f_namelen = input->klinux_f_namelen; output->f_frsize = input->klinux_f_frsize; output->f_flags = input->klinux_f_flags; memset(output->f_spare, 0, sizeof(output->f_spare)); return true; } bool TokLinuxStatFs(const struct statfs *input, struct klinux_statfs *output) { if (!input || !output) return false; output->klinux_f_bsize = input->f_bsize; output->klinux_f_frsize = input->f_frsize; output->klinux_f_blocks = input->f_blocks; output->klinux_f_bfree = input->f_bfree; output->klinux_f_bavail = input->f_bavail; output->klinux_f_files = input->f_files; output->klinux_f_ffree = input->f_ffree; output->klinux_f_fsid.__val[0] = input->f_fsid.__val[0]; output->klinux_f_fsid.__val[1] = input->f_fsid.__val[1]; output->klinux_f_namelen = input->f_namelen; output->klinux_f_frsize = input->f_frsize; output->klinux_f_flags = input->f_flags; memset(output->klinux_f_spare, 0, sizeof(output->klinux_f_spare)); return true; } int64_t FromkLinuxStatFsFlags(int64_t input) { int64_t result = 0; if (input & kLinux_ST_NOSUID) result |= ST_NOSUID; if (input & kLinux_ST_RDONLY) result |= ST_RDONLY; #if (defined(__USE_GNU) && __USE_GNU) || \ (defined(__GNU_VISIBLE) && __GNU_VISIBLE) if (input & kLinux_ST_MANDLOCK) result |= ST_MANDLOCK; if (input & kLinux_ST_NOATIME) result |= ST_NOATIME; if (input & kLinux_ST_NODEV) result |= ST_NODEV; if (input & kLinux_ST_NODIRATIME) result |= ST_NODIRATIME; if (input & kLinux_ST_NOEXEC) result |= ST_NOEXEC; if (input & kLinux_ST_RELATIME) result |= ST_RELATIME; if (input & kLinux_ST_SYNCHRONOUS) result |= ST_SYNCHRONOUS; #endif return result; } int64_t TokLinuxStatFsFlags(int64_t input) { int64_t result = 0; if (input & ST_NOSUID) result |= kLinux_ST_NOSUID; if (input & ST_RDONLY) result |= kLinux_ST_RDONLY; #if (defined(__USE_GNU) && __USE_GNU) || \ (defined(__GNU_VISIBLE) && __GNU_VISIBLE) if (input & ST_MANDLOCK) result |= kLinux_ST_MANDLOCK; if (input & ST_NOATIME) result |= kLinux_ST_NOATIME; if (input & ST_NODEV) result |= kLinux_ST_NODEV; if (input & ST_NODIRATIME) result |= kLinux_ST_NODIRATIME; if (input & ST_NOEXEC) result |= kLinux_ST_NOEXEC; if (input & ST_RELATIME) result |= kLinux_ST_RELATIME; if (input & ST_SYNCHRONOUS) result |= kLinux_ST_SYNCHRONOUS; #endif return result; } bool FromkLinuxSockAddr(const struct klinux_sockaddr *input, socklen_t input_len, struct sockaddr *output, socklen_t *output_len, void (*abort_handler)(const char *)) { if (!input || !output || !output_len || input_len == 0) { output = nullptr; return false; } int16_t klinux_family = input->klinux_sa_family; if (klinux_family == kLinux_AF_UNIX) { if (input_len < sizeof(struct klinux_sockaddr_un)) { return false; } struct klinux_sockaddr_un *klinux_sockaddr_un_in = const_cast<struct klinux_sockaddr_un *>( reinterpret_cast<const struct klinux_sockaddr_un *>(input)); struct sockaddr_un sockaddr_un_out; sockaddr_un_out.sun_family = AF_UNIX; InitializeToZeroArray(sockaddr_un_out.sun_path); ReinterpretCopyArray( sockaddr_un_out.sun_path, klinux_sockaddr_un_in->klinux_sun_path, std::min(sizeof(sockaddr_un_out.sun_path), sizeof(klinux_sockaddr_un_in->klinux_sun_path))); CopySockaddr(&sockaddr_un_out, sizeof(sockaddr_un_out), output, output_len); } else if (klinux_family == kLinux_AF_INET) { if (input_len < sizeof(struct klinux_sockaddr_in)) { return false; } struct klinux_sockaddr_in *klinux_sockaddr_in_in = const_cast<struct klinux_sockaddr_in *>( reinterpret_cast<const struct klinux_sockaddr_in *>(input)); struct sockaddr_in sockaddr_in_out; sockaddr_in_out.sin_family = AF_INET; sockaddr_in_out.sin_port = klinux_sockaddr_in_in->klinux_sin_port; InitializeToZeroSingle(&sockaddr_in_out.sin_addr); ReinterpretCopySingle(&sockaddr_in_out.sin_addr, &klinux_sockaddr_in_in->klinux_sin_addr); InitializeToZeroArray(sockaddr_in_out.sin_zero); ReinterpretCopyArray(sockaddr_in_out.sin_zero, klinux_sockaddr_in_in->klinux_sin_zero); CopySockaddr(&sockaddr_in_out, sizeof(sockaddr_in_out), output, output_len); } else if (klinux_family == kLinux_AF_INET6) { if (input_len < sizeof(struct klinux_sockaddr_in6)) { return false; } struct klinux_sockaddr_in6 *klinux_sockaddr_in6_in = const_cast<struct klinux_sockaddr_in6 *>( reinterpret_cast<const struct klinux_sockaddr_in6 *>(input)); struct sockaddr_in6 sockaddr_in6_out; sockaddr_in6_out.sin6_family = AF_INET6; sockaddr_in6_out.sin6_port = klinux_sockaddr_in6_in->klinux_sin6_port; sockaddr_in6_out.sin6_flowinfo = klinux_sockaddr_in6_in->klinux_sin6_flowinfo; sockaddr_in6_out.sin6_scope_id = klinux_sockaddr_in6_in->klinux_sin6_scope_id; InitializeToZeroSingle(&sockaddr_in6_out.sin6_addr); ReinterpretCopySingle(&sockaddr_in6_out.sin6_addr, &klinux_sockaddr_in6_in->klinux_sin6_addr); CopySockaddr(&sockaddr_in6_out, sizeof(sockaddr_in6_out), output, output_len); } else if (klinux_family == kLinux_AF_UNSPEC) { output = nullptr; *output_len = 0; } else { if (abort_handler != nullptr) { std::string message = absl::StrCat( "Type conversion error - Unsupported AF family: ", klinux_family); abort_handler(message.c_str()); } else { abort(); } } return true; } bool TokLinuxSockAddr(const struct sockaddr *input, socklen_t input_len, struct klinux_sockaddr *output, socklen_t *output_len, void (*abort_handler)(const char *)) { if (!input || input_len == 0 || !output || !output_len) { return false; } if (input->sa_family == AF_UNIX) { struct klinux_sockaddr_un klinux_sock_un; if (!SockaddrTokLinuxSockaddrUn(input, input_len, &klinux_sock_un)) { return false; } CopySockaddr(&klinux_sock_un, sizeof(klinux_sock_un), output, output_len); } else if (input->sa_family == AF_INET) { struct klinux_sockaddr_in klinux_sock_in; if (!SockaddrTokLinuxSockaddrIn(input, input_len, &klinux_sock_in)) { return false; } CopySockaddr(&klinux_sock_in, sizeof(klinux_sock_in), output, output_len); } else if (input->sa_family == AF_INET6) { struct klinux_sockaddr_in6 klinux_sock_in6; if (!SockaddrTokLinuxSockaddrIn6(input, input_len, &klinux_sock_in6)) { return false; } CopySockaddr(&klinux_sock_in6, sizeof(klinux_sock_in6), output, output_len); } else if (input->sa_family == AF_UNSPEC) { output = nullptr; *output_len = 0; } else { if (abort_handler != nullptr) { std::string message = absl::StrCat("Unsupported AF family encountered: ", input->sa_family); abort_handler(message.c_str()); } else { abort(); } } return true; } bool FromkLinuxFdSet(const struct klinux_fd_set *input, fd_set *output) { if (!input || !output) { output = nullptr; return false; } FD_ZERO(output); for (int fd = 0; fd < std::min(KLINUX_FD_SETSIZE, FD_SETSIZE); ++fd) { if (KLINUX_FD_ISSET(fd, input)) { FD_SET(fd, output); } } return true; } bool TokLinuxFdSet(const fd_set *input, struct klinux_fd_set *output) { if (!input || !output) { output = nullptr; return false; } KLINUX_FD_ZERO(output); for (int fd = 0; fd < std::min(FD_SETSIZE, KLINUX_FD_SETSIZE); ++fd) { if (FD_ISSET(fd, input)) { KLINUX_FD_SET(fd, output); } } return true; } int FromkLinuxSignalNumber(int input) { #if defined(SIGRTMIN) && defined(SIGRTMAX) if (input >= kLinux_SIGRTMIN && input <= kLinux_SIGRTMAX) { return SIGRTMIN + input - kLinux_SIGRTMIN; } #endif return FromkLinuxBaseSignalNumber(input); } int TokLinuxSignalNumber(int input) { #if defined(SIGRTMIN) && defined(SIGRTMAX) if (input >= SIGRTMIN && input <= SIGRTMAX) { return kLinux_SIGRTMIN + input - SIGRTMIN; } #endif return TokLinuxBaseSignalNumber(input); } bool TokLinuxSigset(const sigset_t *input, klinux_sigset_t *output) { if (!input || !output) { output = nullptr; return false; } klinux_sigemptyset(output); for (int sig = 1; sig < NSIG; sig++) { if (sigismember(input, sig)) { int klinux_sig = TokLinuxSignalNumber(sig); if (klinux_sig != -1) { klinux_sigaddset(output, klinux_sig); } } } return true; } bool FromkLinuxSigset(const klinux_sigset_t *input, sigset_t *output) { if (!input || !output) { output = nullptr; return false; } sigemptyset(output); for (int klinux_sig = 1; klinux_sig < kLinux_NSIG; klinux_sig++) { if (klinux_sigismember(input, klinux_sig)) { int sig = FromkLinuxSignalNumber(klinux_sig); if (sig != -1) { sigaddset(output, sig); } } } return true; } inline uint64_t kLinuxCpuWordNum(int cpu) { return cpu / (8 * sizeof(klinux_cpu_set_word)); } inline klinux_cpu_set_word kLinuxCpuBitNum(int cpu) { return cpu % (8 * sizeof(klinux_cpu_set_word)); } int kLinuxCpuSetCheckBit(int cpu, klinux_cpu_set_t *set) { return (set->words[kLinuxCpuWordNum(cpu)] & (static_cast<klinux_cpu_set_word>(1) << kLinuxCpuBitNum(cpu))) != 0; } bool FromkLinuxCpuSet(klinux_cpu_set_t *input, cpu_set_t *output) { if (!input || !output) { return false; } CPU_ZERO(output); for (int cpu = 0; cpu < KLINUX_CPU_SET_MAX_CPUS; cpu++) { if (kLinuxCpuSetCheckBit(cpu, input)) { CPU_SET(cpu, output); } } return true; } bool TokLinuxItimerval(const struct itimerval *input, struct klinux_itimerval *output) { if (!input || !output) { return false; } if (!TokLinuxtimeval(&input->it_interval, &output->klinux_it_interval) || !TokLinuxtimeval(&input->it_value, &output->klinux_it_value)) { return false; } return true; } bool FromkLinuxItimerval(const struct klinux_itimerval *input, struct itimerval *output) { if (!input || !output) { return false; } if (!FromkLinuxtimeval(&input->klinux_it_interval, &output->it_interval) || !FromkLinuxtimeval(&input->klinux_it_value, &output->it_value)) { return false; } return true; } bool TokLinuxPollfd(const struct pollfd *input, struct klinux_pollfd *output) { if (!input || !output) return false; output->klinux_fd = input->fd; output->klinux_events = TokLinuxPollEvent(input->events); output->klinux_revents = TokLinuxPollEvent(input->revents); return true; } bool FromkLinuxPollfd(const struct klinux_pollfd *input, struct pollfd *output) { if (!input || !output) return false; output->fd = input->klinux_fd; output->events = FromkLinuxPollEvent(input->klinux_events); output->revents = FromkLinuxPollEvent(input->klinux_revents); return true; } bool TokLinuxEpollEvent(const struct epoll_event *input, struct klinux_epoll_event *output) { if (!input || !output) return false; output->events = TokLinuxEpollEvents(input->events); if (input->events != 0 && output->events == 0) { return false; } output->data.u64 = input->data.u64; return true; } bool FromkLinuxEpollEvent(const struct klinux_epoll_event *input, struct epoll_event *output) { if (!input || !output) return false; output->events = FromkLinuxEpollEvents(input->events); if (input->events != 0 && output->events == 0) { return false; } output->data.u64 = input->data.u64; return true; } bool FromkLinuxRusage(const struct klinux_rusage *input, struct rusage *output) { if (!input || !output) { return false; } if (!FromkLinuxtimeval(&input->ru_stime, &output->ru_stime) || !FromkLinuxtimeval(&input->ru_utime, &output->ru_utime)) { return false; } return true; } bool TokLinuxRusage(const struct rusage *input, struct klinux_rusage *output) { if (!input || !output) { return false; } if (!TokLinuxtimeval(&input->ru_stime, &output->ru_stime) || !TokLinuxtimeval(&input->ru_utime, &output->ru_utime)) { return false; } return true; } int FromkLinuxToNewlibWstatus(int input) { int info = static_cast<int>(input >> 8 & 0xff) << 8; int code = input & 0x7f; if (KLINUX_WIFEXITED(input)) { code = 0; } else if (KLINUX_WIFSTOPPED(input)) { code = 0x7f; } return info + code; } bool FromkLinuxUtsName(const struct klinux_utsname *input, struct utsname *output) { if (!input || !output) { return false; } if (!CStringCopy(input->sysname, output->sysname, sizeof(output->sysname)) || !CStringCopy(input->nodename, output->nodename, sizeof(output->nodename)) || !CStringCopy(input->release, output->release, sizeof(output->release)) || !CStringCopy(input->version, output->version, sizeof(output->version)) || !CStringCopy(input->machine, output->machine, sizeof(output->machine))) { return false; } #if (defined(__USE_GNU) && __USE_GNU) || \ (defined(__GNU_VISIBLE) && __GNU_VISIBLE) if (!CStringCopy(input->domainname, output->domainname, sizeof(output->domainname))) { return false; } #else if (!CStringCopy(input->__domainname, output->domainname, sizeof(output->domainname))) { return false; } #endif return true; } // Priorities are encoded into a single 32-bit integer. The bottom 3 bits are // the level and the rest are the facility. int TokLinuxSyslogPriority(int input) { int syslog_level = input & 0x07; int syslog_facility = input & ~0x07; return TokLinuxSyslogLevel(syslog_level) | TokLinuxSyslogFacility(syslog_facility); } bool TokLinuxSiginfo(const siginfo_t *input, klinux_siginfo_t *output) { if (!input || !output) { return false; } output->si_signo = TokLinuxSignalNumber(input->si_signo); output->si_code = TokLinuxSignalNumber(input->si_code); return true; } bool FromkLinuxSiginfo(const klinux_siginfo_t *input, siginfo_t *output) { if (!input || !output) { return false; } output->si_signo = FromkLinuxSignalNumber(input->si_signo); output->si_code = FromkLinuxSignalNumber(input->si_code); return true; }
null
237
CWE-787
CVE-2020-8944
/* * * Copyright 2017 Asylo authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Stubs invoked by edger8r-generated code for calls into the enclave. #include <cerrno> #include <cstring> #include <string> #include "absl/strings/str_cat.h" #include "asylo/enclave.pb.h" #include "asylo/util/logging.h" #include "asylo/platform/primitives/primitives.h" #include "asylo/platform/primitives/sgx/fork.h" #include "asylo/platform/primitives/sgx/generated_bridge_t.h" #include "asylo/platform/primitives/sgx/trusted_sgx.h" #include "asylo/platform/primitives/trusted_primitives.h" #include "asylo/util/posix_error_space.h" #include "asylo/util/status.h" #include "include/sgx_trts.h" // Edger8r does basic sanity checks for input and output pointers. The // parameters passed by the untrusted caller are copied by the edger8r-generated // code into trusted memory and then passed here. Consequently, there is no // possibility for TOCTOU attacks on these parameters. // Invokes the enclave snapshotting entry-point. Returns a non-zero error code // on failure. int ecall_take_snapshot(char **output, uint64_t *output_len) { int result = 0; size_t tmp_output_len; try { result = asylo::TakeSnapshot(output, &tmp_output_len); } catch (...) { LOG(FATAL) << "Uncaught exception in enclave"; } if (output_len) { *output_len = static_cast<uint64_t>(tmp_output_len); } return result; } // Invokes the enclave restoring entry-point. Returns a non-zero error code on // failure. int ecall_restore(const char *input, uint64_t input_len, char **output, uint64_t *output_len) { if (!asylo::primitives::TrustedPrimitives::IsOutsideEnclave(input, input_len) || !asylo::primitives::TrustedPrimitives::IsOutsideEnclave( output_len, sizeof(uint64_t))) { asylo::primitives::TrustedPrimitives::BestEffortAbort( "ecall_restore: input/output found to not be in untrusted memory."); } int result = 0; size_t tmp_output_len; try { result = asylo::Restore(input, static_cast<size_t>(input_len), output, &tmp_output_len); } catch (...) { LOG(FATAL) << "Uncaught exception in enclave"; } if (output_len) { *output_len = static_cast<uint64_t>(tmp_output_len); } return result; } // Invokes the enclave secure snapshot key transfer entry-point. Returns a // non-zero error code on failure. int ecall_transfer_secure_snapshot_key(const char *input, uint64_t input_len, char **output, uint64_t *output_len) { int result = 0; uint64_t bridge_output_len; try { result = asylo::TransferSecureSnapshotKey( input, static_cast<size_t>(input_len), output, &bridge_output_len); } catch (...) { LOG(FATAL) << "Uncaught exception in enclave"; } if (output_len) { *output_len = static_cast<size_t>(bridge_output_len); } return result; } // Invokes the trusted entry point designated by |selector|. Returns a // non-zero error code on failure. int ecall_dispatch_trusted_call(uint64_t selector, void *buffer) { return asylo::primitives::asylo_enclave_call(selector, buffer); } // Invokes the enclave signal handling entry-point. Returns a non-zero error // code on failure. int ecall_deliver_signal(int signum, int sigcode) { int result = 0; try { result = asylo::primitives::DeliverSignal(signum, sigcode); } catch (...) { LOG(FATAL) << "Uncaught exception in enclave"; } return result; }
null
/* * * Copyright 2017 Asylo authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Stubs invoked by edger8r-generated code for calls into the enclave. #include <cerrno> #include <cstring> #include <string> #include "absl/strings/str_cat.h" #include "asylo/enclave.pb.h" #include "asylo/util/logging.h" #include "asylo/platform/primitives/primitives.h" #include "asylo/platform/primitives/sgx/fork.h" #include "asylo/platform/primitives/sgx/generated_bridge_t.h" #include "asylo/platform/primitives/sgx/trusted_sgx.h" #include "asylo/platform/primitives/trusted_primitives.h" #include "asylo/util/posix_error_space.h" #include "asylo/util/status.h" #include "include/sgx_trts.h" // Edger8r does basic sanity checks for input and output pointers. The // parameters passed by the untrusted caller are copied by the edger8r-generated // code into trusted memory and then passed here. Consequently, there is no // possibility for TOCTOU attacks on these parameters. // Invokes the enclave snapshotting entry-point. Returns a non-zero error code // on failure. int ecall_take_snapshot(char **output, uint64_t *output_len) { int result = 0; size_t tmp_output_len; try { result = asylo::TakeSnapshot(output, &tmp_output_len); } catch (...) { LOG(FATAL) << "Uncaught exception in enclave"; } if (output_len) { *output_len = static_cast<uint64_t>(tmp_output_len); } return result; } // Invokes the enclave restoring entry-point. Returns a non-zero error code on // failure. int ecall_restore(const char *input, uint64_t input_len, char **output, uint64_t *output_len) { if (!asylo::primitives::TrustedPrimitives::IsOutsideEnclave(input, input_len) || !asylo::primitives::TrustedPrimitives::IsOutsideEnclave( output_len, sizeof(uint64_t)) || !asylo::primitives::TrustedPrimitives::IsOutsideEnclave(output, *output_len)) { asylo::primitives::TrustedPrimitives::BestEffortAbort( "ecall_restore: input/output found to not be in untrusted memory."); } int result = 0; size_t tmp_output_len; try { result = asylo::Restore(input, static_cast<size_t>(input_len), output, &tmp_output_len); } catch (...) { LOG(FATAL) << "Uncaught exception in enclave"; } if (output_len) { *output_len = static_cast<uint64_t>(tmp_output_len); } return result; } // Invokes the enclave secure snapshot key transfer entry-point. Returns a // non-zero error code on failure. int ecall_transfer_secure_snapshot_key(const char *input, uint64_t input_len, char **output, uint64_t *output_len) { int result = 0; uint64_t bridge_output_len; try { result = asylo::TransferSecureSnapshotKey( input, static_cast<size_t>(input_len), output, &bridge_output_len); } catch (...) { LOG(FATAL) << "Uncaught exception in enclave"; } if (output_len) { *output_len = static_cast<size_t>(bridge_output_len); } return result; } // Invokes the trusted entry point designated by |selector|. Returns a // non-zero error code on failure. int ecall_dispatch_trusted_call(uint64_t selector, void *buffer) { return asylo::primitives::asylo_enclave_call(selector, buffer); } // Invokes the enclave signal handling entry-point. Returns a non-zero error // code on failure. int ecall_deliver_signal(int signum, int sigcode) { int result = 0; try { result = asylo::primitives::DeliverSignal(signum, sigcode); } catch (...) { LOG(FATAL) << "Uncaught exception in enclave"; } return result; }
null
238
CWE-787
CVE-2020-9395
{ "packages": [ { "name": "realtek", "maintainer": "Realtek", "websiteURL": "http://www.amebaiot.com/", "email": "ameba.arduino@gmail.com", "help": { "online": "http://www.amebaiot.com/" }, "platforms": [ { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.6", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_1-2.0.6.tar.gz", "archiveFileName": "ameba_1-2.0.6.tar.gz", "checksum": "SHA-256:bc31dc587be5be4b11737097574210dbbbe8ed1a6185fb120edee1c645647fea", "size": "28113111", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"}, {"name": "Ameba RTL8711AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "ameba_1_arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_1_tools", "version": "1.1.1" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.5", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-2.0.5.tar.gz", "archiveFileName": "ameba-2.0.5.tar.gz", "checksum": "SHA-256:d40bac42d72a7c1fa0292cc7e4c3528692d593c3887faa8cd12a016b18e5911e", "size": "28108728", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.1.0" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.4", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-2.0.3.tar.gz", "archiveFileName": "ameba-2.0.3.tar.gz", "checksum": "SHA-256:3dcdf87ae84c261d1a0c226f1703249bd5052746ef3e8f945e5cdea419f6c286", "size": "28112187", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.1.0" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.3", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-2.0.3.tar.gz", "archiveFileName": "ameba-2.0.3.tar.gz", "checksum": "SHA-256:3dcdf87ae84c261d1a0c226f1703249bd5052746ef3e8f945e5cdea419f6c286", "size": "28112187", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.1.0" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.2", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-2.0.2.tar.gz", "archiveFileName": "ameba-2.0.2.tar.gz", "checksum": "SHA-256:6ab5e1f2a84b80e2f35f61660c248325c28251b9e087f3c18950976ea0af0143", "size": "27920875", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.9" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.1", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-2.0.1.tar.gz", "archiveFileName": "ameba-2.0.1.tar.gz", "checksum": "SHA-256:7e4c013d178a4d3e02998951003208803339d17e417baa756e0a714c065c4bdf", "size": "27065302", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.9" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.0", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-2.0.0.tar.gz", "archiveFileName": "ameba-2.0.0.tar.gz", "checksum": "SHA-256:b2c7edcf50bddce06cbd6c43cc6f6954c32ab9117b5eca25d1a0fded0f1ea647", "size": "27062841", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.8" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.1.5", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.1.5.tar.gz", "archiveFileName": "ameba-1.1.5.tar.gz", "checksum": "SHA-256:960394cfd5dd7c1d38179325b2c87916c4ab0160997e11b66312228815528f2a", "size": "27062585", "boards": [ {"name": "Ameba RTL8195A"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.7" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.1.4", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.1.4.tar.gz", "archiveFileName": "ameba-1.1.4.tar.gz", "checksum": "SHA-256:4b5ef36fa619ff771cb7be5a3195252ab6e52336a89b2fd23207d2aa6cfa6843", "size": "16998391", "boards": [ {"name": "Ameba RTL8195A"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.7" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.1.3", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.1.3.tar.gz", "archiveFileName": "ameba-1.1.3.tar.gz", "checksum": "SHA-256:b8c9ad3cccb0ea97224c29e5f19204e54825aec053f844a5e18204e0c693ed0b", "size": "16982932", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.1.2", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.1.2.tar.gz", "archiveFileName": "ameba-1.1.2.tar.gz", "checksum": "SHA-256:713fd098dd3507166f56c7883a28fa2860cb8a8697ca522cf6df8a4f01c6e26f", "size": "16971819", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.1.1", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.1.1.tar.gz", "archiveFileName": "ameba-1.1.1.tar.gz", "checksum": "SHA-256:f5ab800ad3b1c9801fe66558a7e3532bbd702308093a1e9a23c836127e3f86c2", "size": "16970352", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.1.0", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.1.0.tar.gz", "archiveFileName": "ameba-1.1.0.tar.gz", "checksum": "SHA-256:328f8ce8646e40a0c4074c9e181da298ab4cd9de6e53fef7a0b706b33f877d8f", "size": "16573295", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.9", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.9.tar.gz", "archiveFileName": "ameba-1.0.9.tar.gz", "checksum": "SHA-256:bc664ecb48e97db7cf1fae6b5ffc43f487e6c358cf5c0e609ef26fbc099d6735", "size": "16449042", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.8", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.8.tar.gz", "archiveFileName": "ameba-1.0.8.tar.gz", "checksum": "SHA-256:97461aefbd6eb3aac960ce01608592f4c73394e4dfdd52030c693cbf265c3ac2", "size": "16407607", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.7", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.7.tar.gz", "archiveFileName": "ameba-1.0.7.tar.gz", "checksum": "SHA-256:bd1379494d295a487ebfe6108436a464bb88f86375bce695c1fe9b51d7d42498", "size": "16394925", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.6", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.6.tar.gz", "archiveFileName": "ameba-1.0.6.tar.gz", "checksum": "SHA-256:541977c720cd079581a90d269eb8a79c997eb4af09da0c6bb76c833ff9154ef0", "size": "15448625", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.5", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.5.tar.gz", "archiveFileName": "ameba-1.0.5.tar.gz", "checksum": "SHA-256:5bb37c277940c88eff42c342991a1df64b40a19fe886e7c44a75c3d377313623", "size": "15450506", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.5" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.4", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.4.tar.gz", "archiveFileName": "ameba-1.0.4.tar.gz", "checksum": "SHA-256:83c453986960405965867baee5a22760f964d2dad22116e228527a612690c165", "size": "15436788", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.4" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.3", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.3.tar.gz", "archiveFileName": "ameba-1.0.3.tar.gz", "checksum": "SHA-256:1981c779613c7d7003532b6d0c08b65ae030613c33c16023bd5d51bf97acaf51", "size": "15416660", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.3" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.2", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.2.tar.gz", "archiveFileName": "ameba-1.0.2.tar.gz", "checksum": "SHA-256:73d2cfff3afd0a697506534caeccae9e9f6e3391cc29ff338aca34eaf89f7cbc", "size": "15363137", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.2" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.1", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.1.tar.gz", "archiveFileName": "ameba-1.0.1.tar.gz", "checksum": "SHA-256:c89f08a3fc64edee1ed9a266b7e41cc1b15241787b6860ac46e5cafe2bad8b9b", "size": "15363137", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.1" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.0", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.0.tar.gz", "archiveFileName": "ameba-1.0.0.tar.gz", "checksum": "SHA-256:9beb7abc8cd7c22f4f031daf8e58e07aa5b3fc2d6449a120c54b5c899f1012fb", "size": "15348074", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.0" } ] } ], "tools": [ { "name":"ameba_1_arm-none-eabi-gcc", "version":"4.8.3-2014q1", "systems":[ { "host":"i686-mingw32", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-windows.tar.gz", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-windows.tar.gz", "checksum":"SHA-256:fd8c111c861144f932728e00abd3f7d1107e186eb9cd6083a54c7236ea78b7c2", "size":"84537449" }, { "host":"x86_64-apple-darwin", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-mac.tar.gz", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-mac.tar.gz", "checksum":"SHA-256:3598acf21600f17a8e4a4e8e193dc422b894dc09384759b270b2ece5facb59c2", "size":"52518522" }, { "host":"x86_64-pc-linux-gnu", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-linux64.tar.gz", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-linux64.tar.gz", "checksum":"SHA-256:d23f6626148396d6ec42a5b4d928955a703e0757829195fa71a939e5b86eecf6", "size":"51395093" }, { "host":"i686-pc-linux-gnu", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-linux32.tar.gz", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-linux32.tar.gz", "checksum":"SHA-256:ba1994235f69c526c564f65343f22ddbc9822b2ea8c5ee07dd79d89f6ace2498", "size":"51029223" } ] }, { "name":"arm-none-eabi-gcc", "version":"4.8.3-2014q1", "systems":[ { "host":"i686-mingw32", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-windows.tar.gz", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-windows.tar.gz", "checksum":"SHA-256:fd8c111c861144f932728e00abd3f7d1107e186eb9cd6083a54c7236ea78b7c2", "size":"84537449" }, { "host":"x86_64-apple-darwin", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-mac.tar.gz", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-mac.tar.gz", "checksum":"SHA-256:3598acf21600f17a8e4a4e8e193dc422b894dc09384759b270b2ece5facb59c2", "size":"52518522" }, { "host":"x86_64-pc-linux-gnu", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-linux64.tar.gz", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-linux64.tar.gz", "checksum":"SHA-256:d23f6626148396d6ec42a5b4d928955a703e0757829195fa71a939e5b86eecf6", "size":"51395093" }, { "host":"i686-pc-linux-gnu", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-linux32.tar.gz", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-linux32.tar.gz", "checksum":"SHA-256:ba1994235f69c526c564f65343f22ddbc9822b2ea8c5ee07dd79d89f6ace2498", "size":"51029223" } ] }, { "name": "ameba_1_tools", "version": "1.1.1", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools_windows-1.1.1.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_windows-1.1.1.tar.gz", "checksum": "SHA-256:7cb9c430967c0213a87784ea0b015de7a83e79cc774d1f8032561104ea64d14a", "size": "989120" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools_macosx-1.1.1.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_macosx-1.1.1.tar.gz", "checksum": "SHA-256:2fa7690929c4f2b4fe862e550427e97e0bd48f09da9882118a0d746761665ec3", "size": "64965" }, { "host":"x86_64-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.1.1.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.1.1.tar.gz", "checksum":"SHA-256:84945f9bc5efec558ee254952e1b03ad2afa775b7aa87f189051bfffb176e395", "size":"3302451" }, { "host":"i686-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.1.1.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.1.1.tar.gz", "checksum":"SHA-256:84945f9bc5efec558ee254952e1b03ad2afa775b7aa87f189051bfffb176e395", "size":"3302451" } ] }, { "name": "ameba_tools", "version": "1.1.1", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools_windows-1.1.1.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_windows-1.1.1.tar.gz", "checksum": "SHA-256:10fa8d7bd50e2625a123c1547b643e4d5437e1b3f983d22de664a89cd681d877", "size": "988627" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools_macosx-1.1.1.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_macosx-1.1.1.tar.gz", "checksum": "SHA-256:2fa7690929c4f2b4fe862e550427e97e0bd48f09da9882118a0d746761665ec3", "size": "64965" }, { "host":"x86_64-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.1.1.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.1.1.tar.gz", "checksum":"SHA-256:84945f9bc5efec558ee254952e1b03ad2afa775b7aa87f189051bfffb176e395", "size":"3302451" }, { "host":"i686-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.1.1.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.1.1.tar.gz", "checksum":"SHA-256:84945f9bc5efec558ee254952e1b03ad2afa775b7aa87f189051bfffb176e395", "size":"3302451" } ] }, { "name": "ameba_tools", "version": "1.1.0", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools_windows-1.1.0.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_windows-1.1.0.tar.gz", "checksum": "SHA-256:731cd60b06616cc872d88a8262b297d3c5a10e353e54e240b160caa9bb0d3e80", "size": "784222" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools_macosx-1.1.0.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_macosx-1.1.0.tar.gz", "checksum": "SHA-256:727ddab27246e93ec1f4f7c1b877463b8ffc9add120e3ebaabb2fa601b68bf94", "size": "65604" }, { "host":"x86_64-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.1.0.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.1.0.tar.gz", "checksum":"SHA-256:44baa47f871e3139c971ade778a277c365acbc2676e032c46bc8c31440bec6c8", "size":"3211616" }, { "host":"i686-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.1.0.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.1.0.tar.gz", "checksum":"SHA-256:44baa47f871e3139c971ade778a277c365acbc2676e032c46bc8c31440bec6c8", "size":"3211616" } ] }, { "name": "ameba_tools", "version": "1.0.9", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools_windows-1.0.9.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_windows-1.0.9.tar.gz", "checksum": "SHA-256:ccb795361cf20a69d41ef97466ee127c4721cd0068f9569b72fdb965356db3ec", "size": "784212" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools_macosx-1.0.9.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_macosx-1.0.9.tar.gz", "checksum": "SHA-256:d4b938b34140d8bab16efc538f482bf61c22795ad690c9114215286e37ab7fdd", "size": "64855" }, { "host":"x86_64-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.0.9-v2.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.0.9-v2.tar.gz", "checksum":"SHA-256:e55c49504521d4084c753983e3102fbedecf085d7aaba6fdbf3194ce52ce9ea5", "size":"3212475" }, { "host":"i686-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.0.9-v2.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.0.9-v2.tar.gz", "checksum":"SHA-256:e55c49504521d4084c753983e3102fbedecf085d7aaba6fdbf3194ce52ce9ea5", "size":"3212475" } ] }, { "name": "ameba_tools", "version": "1.0.8", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.8.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.8.tar.gz", "checksum": "SHA-256:8f2f84ee52862c101e61e3eabae7f20384e02d8640451d96a5357fa260df82c7", "size": "5527556" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools-1.0.8.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.8.tar.gz", "checksum": "SHA-256:8f2f84ee52862c101e61e3eabae7f20384e02d8640451d96a5357fa260df82c7", "size": "5527556" } ] }, { "name": "ameba_tools", "version": "1.0.7", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.7.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.7.tar.gz", "checksum": "SHA-256:2e67bcfbcd31ce3cc02a8edb4f645ccb2dcd8b3b9ccb63f2d090e54225c94075", "size": "5540922" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools-1.0.7.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.7.tar.gz", "checksum": "SHA-256:2e67bcfbcd31ce3cc02a8edb4f645ccb2dcd8b3b9ccb63f2d090e54225c94075", "size": "5540922" } ] }, { "name": "ameba_tools", "version": "1.0.6", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.6.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.6.tar.gz", "checksum": "SHA-256:ac6580447f73fd3d5f063b4396e0b75865b6067f4d47eee7c68e39a47e13f222", "size": "5502387" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools-1.0.6.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.6.tar.gz", "checksum": "SHA-256:ac6580447f73fd3d5f063b4396e0b75865b6067f4d47eee7c68e39a47e13f222", "size": "5502387" } ] }, { "name": "ameba_tools", "version": "1.0.5", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.5.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.5.tar.gz", "checksum": "SHA-256:ac6580447f73fd3d5f063b4396e0b75865b6067f4d47eee7c68e39a47e13f222", "size": "5502387" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools-1.0.5.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.5.tar.gz", "checksum": "SHA-256:ac6580447f73fd3d5f063b4396e0b75865b6067f4d47eee7c68e39a47e13f222", "size": "5502387" } ] }, { "name": "ameba_tools", "version": "1.0.4", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.4.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.4.tar.gz", "checksum": "SHA-256:ac6580447f73fd3d5f063b4396e0b75865b6067f4d47eee7c68e39a47e13f222", "size": "5502387" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools-1.0.4.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.4.tar.gz", "checksum": "SHA-256:ac6580447f73fd3d5f063b4396e0b75865b6067f4d47eee7c68e39a47e13f222", "size": "5502387" } ] }, { "name": "ameba_tools", "version": "1.0.3", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.3.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.2.tar.gz", "checksum": "SHA-256:cb5115f389896786995b6d6d9edd9c25408310700dc6bed267325a02b1dc3e90", "size": "5108076" } ] }, { "name": "ameba_tools", "version": "1.0.2", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.2.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.2.tar.gz", "checksum": "SHA-256:cb5115f389896786995b6d6d9edd9c25408310700dc6bed267325a02b1dc3e90", "size": "5108076" } ] }, { "name": "ameba_tools", "version": "1.0.1", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.1.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.1.tar.gz", "checksum": "SHA-256:cb5115f389896786995b6d6d9edd9c25408310700dc6bed267325a02b1dc3e90", "size": "5108076" } ] }, { "name": "ameba_tools", "version": "1.0.0", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.0.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.0.tar.gz", "checksum": "SHA-256:cb5115f389896786995b6d6d9edd9c25408310700dc6bed267325a02b1dc3e90", "size": "5108076" } ] } ] } ] }
null
{ "packages": [ { "name": "realtek", "maintainer": "Realtek", "websiteURL": "http://www.amebaiot.com/", "email": "ameba.arduino@gmail.com", "help": { "online": "http://www.amebaiot.com/" }, "platforms": [ { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.6-v2", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_1-2.0.6-v2.tar.gz", "archiveFileName": "ameba_1-2.0.6-v2.tar.gz", "checksum": "SHA-256:ae23b8a56008edafe70f8db3f304176fab014b7be8a5564ebeedaf0a5b02e3d1", "size": "28100307", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"}, {"name": "Ameba RTL8711AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "ameba_1_arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_1_tools", "version": "1.1.1" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.6", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_1-2.0.6.tar.gz", "archiveFileName": "ameba_1-2.0.6.tar.gz", "checksum": "SHA-256:bc31dc587be5be4b11737097574210dbbbe8ed1a6185fb120edee1c645647fea", "size": "28113111", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"}, {"name": "Ameba RTL8711AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "ameba_1_arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_1_tools", "version": "1.1.1" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.5", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-2.0.5.tar.gz", "archiveFileName": "ameba-2.0.5.tar.gz", "checksum": "SHA-256:d40bac42d72a7c1fa0292cc7e4c3528692d593c3887faa8cd12a016b18e5911e", "size": "28108728", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.1.0" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.4", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-2.0.3.tar.gz", "archiveFileName": "ameba-2.0.3.tar.gz", "checksum": "SHA-256:3dcdf87ae84c261d1a0c226f1703249bd5052746ef3e8f945e5cdea419f6c286", "size": "28112187", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.1.0" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.3", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-2.0.3.tar.gz", "archiveFileName": "ameba-2.0.3.tar.gz", "checksum": "SHA-256:3dcdf87ae84c261d1a0c226f1703249bd5052746ef3e8f945e5cdea419f6c286", "size": "28112187", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.1.0" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.2", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-2.0.2.tar.gz", "archiveFileName": "ameba-2.0.2.tar.gz", "checksum": "SHA-256:6ab5e1f2a84b80e2f35f61660c248325c28251b9e087f3c18950976ea0af0143", "size": "27920875", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.9" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.1", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-2.0.1.tar.gz", "archiveFileName": "ameba-2.0.1.tar.gz", "checksum": "SHA-256:7e4c013d178a4d3e02998951003208803339d17e417baa756e0a714c065c4bdf", "size": "27065302", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.9" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "2.0.0", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-2.0.0.tar.gz", "archiveFileName": "ameba-2.0.0.tar.gz", "checksum": "SHA-256:b2c7edcf50bddce06cbd6c43cc6f6954c32ab9117b5eca25d1a0fded0f1ea647", "size": "27062841", "boards": [ {"name": "Ameba RTL8195A"}, {"name": "Ameba RTL8710"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.8" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.1.5", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.1.5.tar.gz", "archiveFileName": "ameba-1.1.5.tar.gz", "checksum": "SHA-256:960394cfd5dd7c1d38179325b2c87916c4ab0160997e11b66312228815528f2a", "size": "27062585", "boards": [ {"name": "Ameba RTL8195A"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.7" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.1.4", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.1.4.tar.gz", "archiveFileName": "ameba-1.1.4.tar.gz", "checksum": "SHA-256:4b5ef36fa619ff771cb7be5a3195252ab6e52336a89b2fd23207d2aa6cfa6843", "size": "16998391", "boards": [ {"name": "Ameba RTL8195A"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.7" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.1.3", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.1.3.tar.gz", "archiveFileName": "ameba-1.1.3.tar.gz", "checksum": "SHA-256:b8c9ad3cccb0ea97224c29e5f19204e54825aec053f844a5e18204e0c693ed0b", "size": "16982932", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.1.2", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.1.2.tar.gz", "archiveFileName": "ameba-1.1.2.tar.gz", "checksum": "SHA-256:713fd098dd3507166f56c7883a28fa2860cb8a8697ca522cf6df8a4f01c6e26f", "size": "16971819", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.1.1", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.1.1.tar.gz", "archiveFileName": "ameba-1.1.1.tar.gz", "checksum": "SHA-256:f5ab800ad3b1c9801fe66558a7e3532bbd702308093a1e9a23c836127e3f86c2", "size": "16970352", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.1.0", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.1.0.tar.gz", "archiveFileName": "ameba-1.1.0.tar.gz", "checksum": "SHA-256:328f8ce8646e40a0c4074c9e181da298ab4cd9de6e53fef7a0b706b33f877d8f", "size": "16573295", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.9", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.9.tar.gz", "archiveFileName": "ameba-1.0.9.tar.gz", "checksum": "SHA-256:bc664ecb48e97db7cf1fae6b5ffc43f487e6c358cf5c0e609ef26fbc099d6735", "size": "16449042", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.8", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.8.tar.gz", "archiveFileName": "ameba-1.0.8.tar.gz", "checksum": "SHA-256:97461aefbd6eb3aac960ce01608592f4c73394e4dfdd52030c693cbf265c3ac2", "size": "16407607", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.7", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.7.tar.gz", "archiveFileName": "ameba-1.0.7.tar.gz", "checksum": "SHA-256:bd1379494d295a487ebfe6108436a464bb88f86375bce695c1fe9b51d7d42498", "size": "16394925", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.6", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.6.tar.gz", "archiveFileName": "ameba-1.0.6.tar.gz", "checksum": "SHA-256:541977c720cd079581a90d269eb8a79c997eb4af09da0c6bb76c833ff9154ef0", "size": "15448625", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.6" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.5", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.5.tar.gz", "archiveFileName": "ameba-1.0.5.tar.gz", "checksum": "SHA-256:5bb37c277940c88eff42c342991a1df64b40a19fe886e7c44a75c3d377313623", "size": "15450506", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.5" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.4", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.4.tar.gz", "archiveFileName": "ameba-1.0.4.tar.gz", "checksum": "SHA-256:83c453986960405965867baee5a22760f964d2dad22116e228527a612690c165", "size": "15436788", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.4" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.3", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.3.tar.gz", "archiveFileName": "ameba-1.0.3.tar.gz", "checksum": "SHA-256:1981c779613c7d7003532b6d0c08b65ae030613c33c16023bd5d51bf97acaf51", "size": "15416660", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.3" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.2", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.2.tar.gz", "archiveFileName": "ameba-1.0.2.tar.gz", "checksum": "SHA-256:73d2cfff3afd0a697506534caeccae9e9f6e3391cc29ff338aca34eaf89f7cbc", "size": "15363137", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.2" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.1", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.1.tar.gz", "archiveFileName": "ameba-1.0.1.tar.gz", "checksum": "SHA-256:c89f08a3fc64edee1ed9a266b7e41cc1b15241787b6860ac46e5cafe2bad8b9b", "size": "15363137", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.1" } ] }, { "name": "Realtek Ameba Boards (32-bits ARM Cortex-M3)", "architecture": "Ameba1", "version": "1.0.0", "category": "Contributed", "help": { "online": "http://www.amebaiot.com/" }, "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba-1.0.0.tar.gz", "archiveFileName": "ameba-1.0.0.tar.gz", "checksum": "SHA-256:9beb7abc8cd7c22f4f031daf8e58e07aa5b3fc2d6449a120c54b5c899f1012fb", "size": "15348074", "boards": [ {"name": "Ameba 8195AM"} ], "toolsDependencies": [ { "packager": "realtek", "name": "arm-none-eabi-gcc", "version": "4.8.3-2014q1" }, { "packager": "realtek", "name": "ameba_tools", "version": "1.0.0" } ] } ], "tools": [ { "name":"ameba_1_arm-none-eabi-gcc", "version":"4.8.3-2014q1", "systems":[ { "host":"i686-mingw32", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-windows.tar.gz", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-windows.tar.gz", "checksum":"SHA-256:fd8c111c861144f932728e00abd3f7d1107e186eb9cd6083a54c7236ea78b7c2", "size":"84537449" }, { "host":"x86_64-apple-darwin", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-mac.tar.gz", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-mac.tar.gz", "checksum":"SHA-256:3598acf21600f17a8e4a4e8e193dc422b894dc09384759b270b2ece5facb59c2", "size":"52518522" }, { "host":"x86_64-pc-linux-gnu", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-linux64.tar.gz", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-linux64.tar.gz", "checksum":"SHA-256:d23f6626148396d6ec42a5b4d928955a703e0757829195fa71a939e5b86eecf6", "size":"51395093" }, { "host":"i686-pc-linux-gnu", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-linux32.tar.gz", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-linux32.tar.gz", "checksum":"SHA-256:ba1994235f69c526c564f65343f22ddbc9822b2ea8c5ee07dd79d89f6ace2498", "size":"51029223" } ] }, { "name":"arm-none-eabi-gcc", "version":"4.8.3-2014q1", "systems":[ { "host":"i686-mingw32", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-windows.tar.gz", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-windows.tar.gz", "checksum":"SHA-256:fd8c111c861144f932728e00abd3f7d1107e186eb9cd6083a54c7236ea78b7c2", "size":"84537449" }, { "host":"x86_64-apple-darwin", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-mac.tar.gz", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-mac.tar.gz", "checksum":"SHA-256:3598acf21600f17a8e4a4e8e193dc422b894dc09384759b270b2ece5facb59c2", "size":"52518522" }, { "host":"x86_64-pc-linux-gnu", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-linux64.tar.gz", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-linux64.tar.gz", "checksum":"SHA-256:d23f6626148396d6ec42a5b4d928955a703e0757829195fa71a939e5b86eecf6", "size":"51395093" }, { "host":"i686-pc-linux-gnu", "url":"http://downloads.arduino.cc/gcc-arm-none-eabi-4.8.3-2014q1-linux32.tar.gz", "archiveFileName":"gcc-arm-none-eabi-4.8.3-2014q1-linux32.tar.gz", "checksum":"SHA-256:ba1994235f69c526c564f65343f22ddbc9822b2ea8c5ee07dd79d89f6ace2498", "size":"51029223" } ] }, { "name": "ameba_1_tools", "version": "1.1.1", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools_windows-1.1.1.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_windows-1.1.1.tar.gz", "checksum": "SHA-256:7cb9c430967c0213a87784ea0b015de7a83e79cc774d1f8032561104ea64d14a", "size": "989120" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools_macosx-1.1.1.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_macosx-1.1.1.tar.gz", "checksum": "SHA-256:2fa7690929c4f2b4fe862e550427e97e0bd48f09da9882118a0d746761665ec3", "size": "64965" }, { "host":"x86_64-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.1.1.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.1.1.tar.gz", "checksum":"SHA-256:84945f9bc5efec558ee254952e1b03ad2afa775b7aa87f189051bfffb176e395", "size":"3302451" }, { "host":"i686-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.1.1.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.1.1.tar.gz", "checksum":"SHA-256:84945f9bc5efec558ee254952e1b03ad2afa775b7aa87f189051bfffb176e395", "size":"3302451" } ] }, { "name": "ameba_tools", "version": "1.1.1", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools_windows-1.1.1.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_windows-1.1.1.tar.gz", "checksum": "SHA-256:10fa8d7bd50e2625a123c1547b643e4d5437e1b3f983d22de664a89cd681d877", "size": "988627" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools_macosx-1.1.1.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_macosx-1.1.1.tar.gz", "checksum": "SHA-256:2fa7690929c4f2b4fe862e550427e97e0bd48f09da9882118a0d746761665ec3", "size": "64965" }, { "host":"x86_64-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.1.1.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.1.1.tar.gz", "checksum":"SHA-256:84945f9bc5efec558ee254952e1b03ad2afa775b7aa87f189051bfffb176e395", "size":"3302451" }, { "host":"i686-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.1.1.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.1.1.tar.gz", "checksum":"SHA-256:84945f9bc5efec558ee254952e1b03ad2afa775b7aa87f189051bfffb176e395", "size":"3302451" } ] }, { "name": "ameba_tools", "version": "1.1.0", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools_windows-1.1.0.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_windows-1.1.0.tar.gz", "checksum": "SHA-256:731cd60b06616cc872d88a8262b297d3c5a10e353e54e240b160caa9bb0d3e80", "size": "784222" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools_macosx-1.1.0.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_macosx-1.1.0.tar.gz", "checksum": "SHA-256:727ddab27246e93ec1f4f7c1b877463b8ffc9add120e3ebaabb2fa601b68bf94", "size": "65604" }, { "host":"x86_64-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.1.0.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.1.0.tar.gz", "checksum":"SHA-256:44baa47f871e3139c971ade778a277c365acbc2676e032c46bc8c31440bec6c8", "size":"3211616" }, { "host":"i686-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.1.0.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.1.0.tar.gz", "checksum":"SHA-256:44baa47f871e3139c971ade778a277c365acbc2676e032c46bc8c31440bec6c8", "size":"3211616" } ] }, { "name": "ameba_tools", "version": "1.0.9", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools_windows-1.0.9.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_windows-1.0.9.tar.gz", "checksum": "SHA-256:ccb795361cf20a69d41ef97466ee127c4721cd0068f9569b72fdb965356db3ec", "size": "784212" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools_macosx-1.0.9.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_macosx-1.0.9.tar.gz", "checksum": "SHA-256:d4b938b34140d8bab16efc538f482bf61c22795ad690c9114215286e37ab7fdd", "size": "64855" }, { "host":"x86_64-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.0.9-v2.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.0.9-v2.tar.gz", "checksum":"SHA-256:e55c49504521d4084c753983e3102fbedecf085d7aaba6fdbf3194ce52ce9ea5", "size":"3212475" }, { "host":"i686-pc-linux-gnu", "archiveFileName": "ameba_tools_linux-1.0.9-v2.tar.gz", "url":"https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools_linux-1.0.9-v2.tar.gz", "checksum":"SHA-256:e55c49504521d4084c753983e3102fbedecf085d7aaba6fdbf3194ce52ce9ea5", "size":"3212475" } ] }, { "name": "ameba_tools", "version": "1.0.8", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.8.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.8.tar.gz", "checksum": "SHA-256:8f2f84ee52862c101e61e3eabae7f20384e02d8640451d96a5357fa260df82c7", "size": "5527556" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools-1.0.8.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.8.tar.gz", "checksum": "SHA-256:8f2f84ee52862c101e61e3eabae7f20384e02d8640451d96a5357fa260df82c7", "size": "5527556" } ] }, { "name": "ameba_tools", "version": "1.0.7", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.7.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.7.tar.gz", "checksum": "SHA-256:2e67bcfbcd31ce3cc02a8edb4f645ccb2dcd8b3b9ccb63f2d090e54225c94075", "size": "5540922" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools-1.0.7.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.7.tar.gz", "checksum": "SHA-256:2e67bcfbcd31ce3cc02a8edb4f645ccb2dcd8b3b9ccb63f2d090e54225c94075", "size": "5540922" } ] }, { "name": "ameba_tools", "version": "1.0.6", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.6.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.6.tar.gz", "checksum": "SHA-256:ac6580447f73fd3d5f063b4396e0b75865b6067f4d47eee7c68e39a47e13f222", "size": "5502387" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools-1.0.6.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.6.tar.gz", "checksum": "SHA-256:ac6580447f73fd3d5f063b4396e0b75865b6067f4d47eee7c68e39a47e13f222", "size": "5502387" } ] }, { "name": "ameba_tools", "version": "1.0.5", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.5.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.5.tar.gz", "checksum": "SHA-256:ac6580447f73fd3d5f063b4396e0b75865b6067f4d47eee7c68e39a47e13f222", "size": "5502387" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools-1.0.5.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.5.tar.gz", "checksum": "SHA-256:ac6580447f73fd3d5f063b4396e0b75865b6067f4d47eee7c68e39a47e13f222", "size": "5502387" } ] }, { "name": "ameba_tools", "version": "1.0.4", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.4.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.4.tar.gz", "checksum": "SHA-256:ac6580447f73fd3d5f063b4396e0b75865b6067f4d47eee7c68e39a47e13f222", "size": "5502387" }, { "host": "x86_64-apple-darwin", "archiveFileName": "ameba_tools-1.0.4.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.4.tar.gz", "checksum": "SHA-256:ac6580447f73fd3d5f063b4396e0b75865b6067f4d47eee7c68e39a47e13f222", "size": "5502387" } ] }, { "name": "ameba_tools", "version": "1.0.3", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.3.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.2.tar.gz", "checksum": "SHA-256:cb5115f389896786995b6d6d9edd9c25408310700dc6bed267325a02b1dc3e90", "size": "5108076" } ] }, { "name": "ameba_tools", "version": "1.0.2", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.2.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.2.tar.gz", "checksum": "SHA-256:cb5115f389896786995b6d6d9edd9c25408310700dc6bed267325a02b1dc3e90", "size": "5108076" } ] }, { "name": "ameba_tools", "version": "1.0.1", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.1.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.1.tar.gz", "checksum": "SHA-256:cb5115f389896786995b6d6d9edd9c25408310700dc6bed267325a02b1dc3e90", "size": "5108076" } ] }, { "name": "ameba_tools", "version": "1.0.0", "systems": [ { "host": "i686-mingw32", "archiveFileName": "ameba_tools-1.0.0.tar.gz", "url": "https://github.com/ambiot/amb1_arduino/raw/master/Arduino_package/release/ameba_tools-1.0.0.tar.gz", "checksum": "SHA-256:cb5115f389896786995b6d6d9edd9c25408310700dc6bed267325a02b1dc3e90", "size": "5108076" } ] } ] } ] }
null
239
CWE-787
CVE-2021-20298
/////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2006, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// //----------------------------------------------------------------------------- // // class B44Compressor // // This compressor is lossy for HALF channels; the compression rate // is fixed at 32/14 (approximately 2.28). FLOAT and UINT channels // are not compressed; their data are preserved exactly. // // Each HALF channel is split into blocks of 4 by 4 pixels. An // uncompressed block occupies 32 bytes, which are re-interpreted // as sixteen 16-bit unsigned integers, t[0] ... t[15]. Compression // shrinks the block to 14 bytes. The compressed 14-byte block // contains // // - t[0] // // - a 6-bit shift value // // - 15 densely packed 6-bit values, r[0] ... r[14], which are // computed by subtracting adjacent pixel values and right- // shifting the differences according to the stored shift value. // // Differences between adjacent pixels are computed according // to the following diagram: // // 0 --------> 1 --------> 2 --------> 3 // | 3 7 11 // | // | 0 // | // v // 4 --------> 5 --------> 6 --------> 7 // | 4 8 12 // | // | 1 // | // v // 8 --------> 9 --------> 10 --------> 11 // | 5 9 13 // | // | 2 // | // v // 12 --------> 13 --------> 14 --------> 15 // 6 10 14 // // Here // // 5 ---------> 6 // 8 // // means that r[8] is the difference between t[5] and t[6]. // // - optionally, a 4-by-4 pixel block where all pixels have the // same value can be treated as a special case, where the // compressed block contains only 3 instead of 14 bytes: // t[0], followed by an "impossible" 6-bit shift value and // two padding bits. // // This compressor can handle positive and negative pixel values. // NaNs and infinities are replaced with zeroes before compression. // //----------------------------------------------------------------------------- #include "ImfB44Compressor.h" #include "ImfHeader.h" #include "ImfChannelList.h" #include "ImfMisc.h" #include "ImfCheckedArithmetic.h" #include <ImathFun.h> #include <ImathBox.h> #include <Iex.h> #include <ImfIO.h> #include <ImfXdr.h> #include <string.h> #include <assert.h> #include <algorithm> #include "ImfNamespace.h" OPENEXR_IMF_INTERNAL_NAMESPACE_SOURCE_ENTER using IMATH_NAMESPACE::divp; using IMATH_NAMESPACE::modp; using IMATH_NAMESPACE::Box2i; using IMATH_NAMESPACE::V2i; using std::min; namespace { // // Lookup tables for // y = exp (x / 8) // and // x = 8 * log (y) // #include "b44ExpLogTable.h" inline void convertFromLinear (unsigned short s[16]) { for (int i = 0; i < 16; ++i) s[i] = expTable[s[i]]; } inline void convertToLinear (unsigned short s[16]) { for (int i = 0; i < 16; ++i) s[i] = logTable[s[i]]; } inline int shiftAndRound (int x, int shift) { // // Compute // // y = x * pow (2, -shift), // // then round y to the nearest integer. // In case of a tie, where y is exactly // halfway between two integers, round // to the even one. // x <<= 1; int a = (1 << shift) - 1; shift += 1; int b = (x >> shift) & 1; return (x + a + b) >> shift; } int pack (const unsigned short s[16], unsigned char b[14], bool optFlatFields, bool exactMax) { // // Pack a block of 4 by 4 16-bit pixels (32 bytes) into // either 14 or 3 bytes. // // // Integers s[0] ... s[15] represent floating-point numbers // in what is essentially a sign-magnitude format. Convert // s[0] .. s[15] into a new set of integers, t[0] ... t[15], // such that if t[i] is greater than t[j], the floating-point // number that corresponds to s[i] is always greater than // the floating-point number that corresponds to s[j]. // // Also, replace any bit patterns that represent NaNs or // infinities with bit patterns that represent floating-point // zeroes. // // bit pattern floating-point bit pattern // in s[i] value in t[i] // // 0x7fff NAN 0x8000 // 0x7ffe NAN 0x8000 // ... ... // 0x7c01 NAN 0x8000 // 0x7c00 +infinity 0x8000 // 0x7bff +HALF_MAX 0xfbff // 0x7bfe 0xfbfe // 0x7bfd 0xfbfd // ... ... // 0x0002 +2 * HALF_MIN 0x8002 // 0x0001 +HALF_MIN 0x8001 // 0x0000 +0.0 0x8000 // 0x8000 -0.0 0x7fff // 0x8001 -HALF_MIN 0x7ffe // 0x8002 -2 * HALF_MIN 0x7ffd // ... ... // 0xfbfd 0x0f02 // 0xfbfe 0x0401 // 0xfbff -HALF_MAX 0x0400 // 0xfc00 -infinity 0x8000 // 0xfc01 NAN 0x8000 // ... ... // 0xfffe NAN 0x8000 // 0xffff NAN 0x8000 // unsigned short t[16]; for (int i = 0; i < 16; ++i) { if ((s[i] & 0x7c00) == 0x7c00) t[i] = 0x8000; else if (s[i] & 0x8000) t[i] = ~s[i]; else t[i] = s[i] | 0x8000; } // // Find the maximum, tMax, of t[0] ... t[15]. // unsigned short tMax = 0; for (int i = 0; i < 16; ++i) if (tMax < t[i]) tMax = t[i]; // // Compute a set of running differences, r[0] ... r[14]: // Find a shift value such that after rounding off the // rightmost bits and shifting all differenes are between // -32 and +31. Then bias the differences so that they // end up between 0 and 63. // int shift = -1; int d[16]; int r[15]; int rMin; int rMax; const int bias = 0x20; do { shift += 1; // // Compute absolute differences, d[0] ... d[15], // between tMax and t[0] ... t[15]. // // Shift and round the absolute differences. // for (int i = 0; i < 16; ++i) d[i] = shiftAndRound (tMax - t[i], shift); // // Convert d[0] .. d[15] into running differences // r[ 0] = d[ 0] - d[ 4] + bias; r[ 1] = d[ 4] - d[ 8] + bias; r[ 2] = d[ 8] - d[12] + bias; r[ 3] = d[ 0] - d[ 1] + bias; r[ 4] = d[ 4] - d[ 5] + bias; r[ 5] = d[ 8] - d[ 9] + bias; r[ 6] = d[12] - d[13] + bias; r[ 7] = d[ 1] - d[ 2] + bias; r[ 8] = d[ 5] - d[ 6] + bias; r[ 9] = d[ 9] - d[10] + bias; r[10] = d[13] - d[14] + bias; r[11] = d[ 2] - d[ 3] + bias; r[12] = d[ 6] - d[ 7] + bias; r[13] = d[10] - d[11] + bias; r[14] = d[14] - d[15] + bias; rMin = r[0]; rMax = r[0]; for (int i = 1; i < 15; ++i) { if (rMin > r[i]) rMin = r[i]; if (rMax < r[i]) rMax = r[i]; } } while (rMin < 0 || rMax > 0x3f); if (rMin == bias && rMax == bias && optFlatFields) { // // Special case - all pixels have the same value. // We encode this in 3 instead of 14 bytes by // storing the value 0xfc in the third output byte, // which cannot occur in the 14-byte encoding. // b[0] = (t[0] >> 8); b[1] = (unsigned char) t[0]; b[2] = 0xfc; return 3; } if (exactMax) { // // Adjust t[0] so that the pixel whose value is equal // to tMax gets represented as accurately as possible. // t[0] = tMax - (d[0] << shift); } // // Pack t[0], shift and r[0] ... r[14] into 14 bytes: // b[ 0] = (t[0] >> 8); b[ 1] = (unsigned char) t[0]; b[ 2] = (unsigned char) ((shift << 2) | (r[ 0] >> 4)); b[ 3] = (unsigned char) ((r[ 0] << 4) | (r[ 1] >> 2)); b[ 4] = (unsigned char) ((r[ 1] << 6) | r[ 2] ); b[ 5] = (unsigned char) ((r[ 3] << 2) | (r[ 4] >> 4)); b[ 6] = (unsigned char) ((r[ 4] << 4) | (r[ 5] >> 2)); b[ 7] = (unsigned char) ((r[ 5] << 6) | r[ 6] ); b[ 8] = (unsigned char) ((r[ 7] << 2) | (r[ 8] >> 4)); b[ 9] = (unsigned char) ((r[ 8] << 4) | (r[ 9] >> 2)); b[10] = (unsigned char) ((r[ 9] << 6) | r[10] ); b[11] = (unsigned char) ((r[11] << 2) | (r[12] >> 4)); b[12] = (unsigned char) ((r[12] << 4) | (r[13] >> 2)); b[13] = (unsigned char) ((r[13] << 6) | r[14] ); return 14; } inline void unpack14 (const unsigned char b[14], unsigned short s[16]) { // // Unpack a 14-byte block into 4 by 4 16-bit pixels. // #if defined (DEBUG) assert (b[2] != 0xfc); #endif s[ 0] = (b[0] << 8) | b[1]; unsigned short shift = (b[ 2] >> 2); unsigned short bias = (0x20u << shift); s[ 4] = s[ 0] + ((((b[ 2] << 4) | (b[ 3] >> 4)) & 0x3fu) << shift) - bias; s[ 8] = s[ 4] + ((((b[ 3] << 2) | (b[ 4] >> 6)) & 0x3fu) << shift) - bias; s[12] = s[ 8] + ((b[ 4] & 0x3fu) << shift) - bias; s[ 1] = s[ 0] + ((unsigned int) (b[ 5] >> 2) << shift) - bias; s[ 5] = s[ 4] + ((((b[ 5] << 4) | (b[ 6] >> 4)) & 0x3fu) << shift) - bias; s[ 9] = s[ 8] + ((((b[ 6] << 2) | (b[ 7] >> 6)) & 0x3fu) << shift) - bias; s[13] = s[12] + ((b[ 7] & 0x3fu) << shift) - bias; s[ 2] = s[ 1] + ((unsigned int)(b[ 8] >> 2) << shift) - bias; s[ 6] = s[ 5] + ((((b[ 8] << 4) | (b[ 9] >> 4)) & 0x3fu) << shift) - bias; s[10] = s[ 9] + ((((b[ 9] << 2) | (b[10] >> 6)) & 0x3fu) << shift) - bias; s[14] = s[13] + ((b[10] & 0x3fu) << shift) - bias; s[ 3] = s[ 2] + ((unsigned int)(b[11] >> 2) << shift) - bias; s[ 7] = s[ 6] + ((((b[11] << 4) | (b[12] >> 4)) & 0x3fu) << shift) - bias; s[11] = s[10] + ((((b[12] << 2) | (b[13] >> 6)) & 0x3fu) << shift) - bias; s[15] = s[14] + ((b[13] & 0x3fu) << shift) - bias; for (int i = 0; i < 16; ++i) { if (s[i] & 0x8000) s[i] &= 0x7fff; else s[i] = ~s[i]; } } inline void unpack3 (const unsigned char b[3], unsigned short s[16]) { // // Unpack a 3-byte block into 4 by 4 identical 16-bit pixels. // #if defined (DEBUG) assert (b[2] == 0xfc); #endif s[0] = (b[0] << 8) | b[1]; if (s[0] & 0x8000) s[0] &= 0x7fff; else s[0] = ~s[0]; for (int i = 1; i < 16; ++i) s[i] = s[0]; } void notEnoughData () { throw IEX_NAMESPACE::InputExc ("Error decompressing data " "(input data are shorter than expected)."); } void tooMuchData () { throw IEX_NAMESPACE::InputExc ("Error decompressing data " "(input data are longer than expected)."); } } // namespace struct B44Compressor::ChannelData { unsigned short * start; unsigned short * end; int nx; int ny; int ys; PixelType type; bool pLinear; int size; }; B44Compressor::B44Compressor (const Header &hdr, size_t maxScanLineSize, size_t numScanLines, bool optFlatFields) : Compressor (hdr), _maxScanLineSize (maxScanLineSize), _optFlatFields (optFlatFields), _format (XDR), _numScanLines (numScanLines), _tmpBuffer (0), _outBuffer (0), _numChans (0), _channels (hdr.channels()), _channelData (0) { // TODO: Remove this when we can change the ABI (void)_maxScanLineSize; // // Allocate buffers for compressed an uncompressed pixel data, // allocate a set of ChannelData structs to help speed up the // compress() and uncompress() functions, below, and determine // if uncompressed pixel data should be in native or Xdr format. // _tmpBuffer = new unsigned short [checkArraySize (uiMult (maxScanLineSize, numScanLines), sizeof (unsigned short))]; const ChannelList &channels = header().channels(); int numHalfChans = 0; for (ChannelList::ConstIterator c = channels.begin(); c != channels.end(); ++c) { assert (pixelTypeSize (c.channel().type) % pixelTypeSize (HALF) == 0); ++_numChans; if (c.channel().type == HALF) ++numHalfChans; } // // Compressed data may be larger than the input data // size_t padding = 12 * numHalfChans * (numScanLines + 3) / 4; _outBuffer = new char [uiAdd (uiMult (maxScanLineSize, numScanLines), padding)]; _channelData = new ChannelData[_numChans]; int i = 0; for (ChannelList::ConstIterator c = channels.begin(); c != channels.end(); ++c, ++i) { _channelData[i].ys = c.channel().ySampling; _channelData[i].type = c.channel().type; _channelData[i].pLinear = c.channel().pLinear; _channelData[i].size = pixelTypeSize (c.channel().type) / pixelTypeSize (HALF); } const Box2i &dataWindow = hdr.dataWindow(); _minX = dataWindow.min.x; _maxX = dataWindow.max.x; _maxY = dataWindow.max.y; // // We can support uncompressed data in the machine's native // format only if all image channels are of type HALF. // assert (sizeof (unsigned short) == pixelTypeSize (HALF)); if (_numChans == numHalfChans) _format = NATIVE; } B44Compressor::~B44Compressor () { delete [] _tmpBuffer; delete [] _outBuffer; delete [] _channelData; } int B44Compressor::numScanLines () const { return _numScanLines; } Compressor::Format B44Compressor::format () const { return _format; } int B44Compressor::compress (const char *inPtr, int inSize, int minY, const char *&outPtr) { return compress (inPtr, inSize, Box2i (V2i (_minX, minY), V2i (_maxX, minY + numScanLines() - 1)), outPtr); } int B44Compressor::compressTile (const char *inPtr, int inSize, IMATH_NAMESPACE::Box2i range, const char *&outPtr) { return compress (inPtr, inSize, range, outPtr); } int B44Compressor::uncompress (const char *inPtr, int inSize, int minY, const char *&outPtr) { return uncompress (inPtr, inSize, Box2i (V2i (_minX, minY), V2i (_maxX, minY + numScanLines() - 1)), outPtr); } int B44Compressor::uncompressTile (const char *inPtr, int inSize, IMATH_NAMESPACE::Box2i range, const char *&outPtr) { return uncompress (inPtr, inSize, range, outPtr); } int B44Compressor::compress (const char *inPtr, int inSize, IMATH_NAMESPACE::Box2i range, const char *&outPtr) { // // Compress a block of pixel data: First copy the input pixels // from the input buffer into _tmpBuffer, rearranging them such // that blocks of 4x4 pixels of a single channel can be accessed // conveniently. Then compress each 4x4 block of HALF pixel data // and append the result to the output buffer. Copy UINT and // FLOAT data to the output buffer without compressing them. // outPtr = _outBuffer; if (inSize == 0) { // // Special case - empty input buffer. // return 0; } // // For each channel, detemine how many pixels are stored // in the input buffer, and where those pixels will be // placed in _tmpBuffer. // int minX = range.min.x; int maxX = min (range.max.x, _maxX); int minY = range.min.y; int maxY = min (range.max.y, _maxY); unsigned short *tmpBufferEnd = _tmpBuffer; int i = 0; for (ChannelList::ConstIterator c = _channels.begin(); c != _channels.end(); ++c, ++i) { ChannelData &cd = _channelData[i]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = numSamples (c.channel().xSampling, minX, maxX); cd.ny = numSamples (c.channel().ySampling, minY, maxY); tmpBufferEnd += cd.nx * cd.ny * cd.size; } if (_format == XDR) { // // The data in the input buffer are in the machine-independent // Xdr format. Copy the HALF channels into _tmpBuffer and // convert them back into native format for compression. // Copy UINT and FLOAT channels verbatim into _tmpBuffer. // for (int y = minY; y <= maxY; ++y) { for (int i = 0; i < _numChans; ++i) { ChannelData &cd = _channelData[i]; if (modp (y, cd.ys) != 0) continue; if (cd.type == HALF) { for (int x = cd.nx; x > 0; --x) { Xdr::read <CharPtrIO> (inPtr, *cd.end); ++cd.end; } } else { int n = cd.nx * cd.size; memcpy (cd.end, inPtr, n * sizeof (unsigned short)); inPtr += n * sizeof (unsigned short); cd.end += n; } } } } else { // // The input buffer contains only HALF channels, and they // are in native, machine-dependent format. Copy the pixels // into _tmpBuffer. // for (int y = minY; y <= maxY; ++y) { for (int i = 0; i < _numChans; ++i) { ChannelData &cd = _channelData[i]; #if defined (DEBUG) assert (cd.type == HALF); #endif if (modp (y, cd.ys) != 0) continue; int n = cd.nx * cd.size; memcpy (cd.end, inPtr, n * sizeof (unsigned short)); inPtr += n * sizeof (unsigned short); cd.end += n; } } } // // The pixels for each channel have been packed into a contiguous // block in _tmpBuffer. HALF channels are in native format; UINT // and FLOAT channels are in Xdr format. // #if defined (DEBUG) for (int i = 1; i < _numChans; ++i) assert (_channelData[i-1].end == _channelData[i].start); assert (_channelData[_numChans-1].end == tmpBufferEnd); #endif // // For each HALF channel, split the data in _tmpBuffer into 4x4 // pixel blocks. Compress each block and append the compressed // data to the output buffer. // // UINT and FLOAT channels are copied from _tmpBuffer into the // output buffer without further processing. // char *outEnd = _outBuffer; for (int i = 0; i < _numChans; ++i) { ChannelData &cd = _channelData[i]; if (cd.type != HALF) { // // UINT or FLOAT channel. // int n = cd.nx * cd.ny * cd.size * sizeof (unsigned short); memcpy (outEnd, cd.start, n); outEnd += n; continue; } // // HALF channel // for (int y = 0; y < cd.ny; y += 4) { // // Copy the next 4x4 pixel block into array s. // If the width, cd.nx, or the height, cd.ny, of // the pixel data in _tmpBuffer is not divisible // by 4, then pad the data by repeating the // rightmost column and the bottom row. // unsigned short *row0 = cd.start + y * cd.nx; unsigned short *row1 = row0 + cd.nx; unsigned short *row2 = row1 + cd.nx; unsigned short *row3 = row2 + cd.nx; if (y + 3 >= cd.ny) { if (y + 1 >= cd.ny) row1 = row0; if (y + 2 >= cd.ny) row2 = row1; row3 = row2; } for (int x = 0; x < cd.nx; x += 4) { unsigned short s[16]; if (x + 3 >= cd.nx) { int n = cd.nx - x; for (int i = 0; i < 4; ++i) { int j = min (i, n - 1); s[i + 0] = row0[j]; s[i + 4] = row1[j]; s[i + 8] = row2[j]; s[i + 12] = row3[j]; } } else { memcpy (&s[ 0], row0, 4 * sizeof (unsigned short)); memcpy (&s[ 4], row1, 4 * sizeof (unsigned short)); memcpy (&s[ 8], row2, 4 * sizeof (unsigned short)); memcpy (&s[12], row3, 4 * sizeof (unsigned short)); } row0 += 4; row1 += 4; row2 += 4; row3 += 4; // // Compress the contents of array s and append the // results to the output buffer. // if (cd.pLinear) convertFromLinear (s); outEnd += pack (s, (unsigned char *) outEnd, _optFlatFields, !cd.pLinear); } } } return outEnd - _outBuffer; } int B44Compressor::uncompress (const char *inPtr, int inSize, IMATH_NAMESPACE::Box2i range, const char *&outPtr) { // // This function is the reverse of the compress() function, // above. First all pixels are moved from the input buffer // into _tmpBuffer. UINT and FLOAT channels are copied // verbatim; HALF channels are uncompressed in blocks of // 4x4 pixels. Then the pixels in _tmpBuffer are copied // into the output buffer and rearranged such that the data // for for each scan line form a contiguous block. // outPtr = _outBuffer; if (inSize == 0) { return 0; } int minX = range.min.x; int maxX = min (range.max.x, _maxX); int minY = range.min.y; int maxY = min (range.max.y, _maxY); unsigned short *tmpBufferEnd = _tmpBuffer; int i = 0; for (ChannelList::ConstIterator c = _channels.begin(); c != _channels.end(); ++c, ++i) { ChannelData &cd = _channelData[i]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = numSamples (c.channel().xSampling, minX, maxX); cd.ny = numSamples (c.channel().ySampling, minY, maxY); tmpBufferEnd += cd.nx * cd.ny * cd.size; } for (int i = 0; i < _numChans; ++i) { ChannelData &cd = _channelData[i]; if (cd.type != HALF) { // // UINT or FLOAT channel. // int n = cd.nx * cd.ny * cd.size * sizeof (unsigned short); if (inSize < n) notEnoughData(); memcpy (cd.start, inPtr, n); inPtr += n; inSize -= n; continue; } // // HALF channel // for (int y = 0; y < cd.ny; y += 4) { unsigned short *row0 = cd.start + y * cd.nx; unsigned short *row1 = row0 + cd.nx; unsigned short *row2 = row1 + cd.nx; unsigned short *row3 = row2 + cd.nx; for (int x = 0; x < cd.nx; x += 4) { unsigned short s[16]; if (inSize < 3) notEnoughData(); // // If shift exponent is 63, call unpack14 (ignoring unused bits) // if (((const unsigned char *)inPtr)[2] >= (13<<2) ) { unpack3 ((const unsigned char *)inPtr, s); inPtr += 3; inSize -= 3; } else { if (inSize < 14) notEnoughData(); unpack14 ((const unsigned char *)inPtr, s); inPtr += 14; inSize -= 14; } if (cd.pLinear) convertToLinear (s); int n = (x + 3 < cd.nx)? 4 * sizeof (unsigned short) : (cd.nx - x) * sizeof (unsigned short); if (y + 3 < cd.ny) { memcpy (row0, &s[ 0], n); memcpy (row1, &s[ 4], n); memcpy (row2, &s[ 8], n); memcpy (row3, &s[12], n); } else { memcpy (row0, &s[ 0], n); if (y + 1 < cd.ny) memcpy (row1, &s[ 4], n); if (y + 2 < cd.ny) memcpy (row2, &s[ 8], n); } row0 += 4; row1 += 4; row2 += 4; row3 += 4; } } } char *outEnd = _outBuffer; if (_format == XDR) { for (int y = minY; y <= maxY; ++y) { for (int i = 0; i < _numChans; ++i) { ChannelData &cd = _channelData[i]; if (modp (y, cd.ys) != 0) continue; if (cd.type == HALF) { for (int x = cd.nx; x > 0; --x) { Xdr::write <CharPtrIO> (outEnd, *cd.end); ++cd.end; } } else { int n = cd.nx * cd.size; memcpy (outEnd, cd.end, n * sizeof (unsigned short)); outEnd += n * sizeof (unsigned short); cd.end += n; } } } } else { for (int y = minY; y <= maxY; ++y) { for (int i = 0; i < _numChans; ++i) { ChannelData &cd = _channelData[i]; #if defined (DEBUG) assert (cd.type == HALF); #endif if (modp (y, cd.ys) != 0) continue; int n = cd.nx * cd.size; memcpy (outEnd, cd.end, n * sizeof (unsigned short)); outEnd += n * sizeof (unsigned short); cd.end += n; } } } #if defined (DEBUG) for (int i = 1; i < _numChans; ++i) assert (_channelData[i-1].end == _channelData[i].start); assert (_channelData[_numChans-1].end == tmpBufferEnd); #endif if (inSize > 0) tooMuchData(); outPtr = _outBuffer; return outEnd - _outBuffer; } OPENEXR_IMF_INTERNAL_NAMESPACE_SOURCE_EXIT
null
/////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2006, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// //----------------------------------------------------------------------------- // // class B44Compressor // // This compressor is lossy for HALF channels; the compression rate // is fixed at 32/14 (approximately 2.28). FLOAT and UINT channels // are not compressed; their data are preserved exactly. // // Each HALF channel is split into blocks of 4 by 4 pixels. An // uncompressed block occupies 32 bytes, which are re-interpreted // as sixteen 16-bit unsigned integers, t[0] ... t[15]. Compression // shrinks the block to 14 bytes. The compressed 14-byte block // contains // // - t[0] // // - a 6-bit shift value // // - 15 densely packed 6-bit values, r[0] ... r[14], which are // computed by subtracting adjacent pixel values and right- // shifting the differences according to the stored shift value. // // Differences between adjacent pixels are computed according // to the following diagram: // // 0 --------> 1 --------> 2 --------> 3 // | 3 7 11 // | // | 0 // | // v // 4 --------> 5 --------> 6 --------> 7 // | 4 8 12 // | // | 1 // | // v // 8 --------> 9 --------> 10 --------> 11 // | 5 9 13 // | // | 2 // | // v // 12 --------> 13 --------> 14 --------> 15 // 6 10 14 // // Here // // 5 ---------> 6 // 8 // // means that r[8] is the difference between t[5] and t[6]. // // - optionally, a 4-by-4 pixel block where all pixels have the // same value can be treated as a special case, where the // compressed block contains only 3 instead of 14 bytes: // t[0], followed by an "impossible" 6-bit shift value and // two padding bits. // // This compressor can handle positive and negative pixel values. // NaNs and infinities are replaced with zeroes before compression. // //----------------------------------------------------------------------------- #include "ImfB44Compressor.h" #include "ImfHeader.h" #include "ImfChannelList.h" #include "ImfMisc.h" #include "ImfCheckedArithmetic.h" #include <ImathFun.h> #include <ImathBox.h> #include <Iex.h> #include <ImfIO.h> #include <ImfXdr.h> #include <string.h> #include <assert.h> #include <algorithm> #include "ImfNamespace.h" OPENEXR_IMF_INTERNAL_NAMESPACE_SOURCE_ENTER using IMATH_NAMESPACE::divp; using IMATH_NAMESPACE::modp; using IMATH_NAMESPACE::Box2i; using IMATH_NAMESPACE::V2i; using std::min; namespace { // // Lookup tables for // y = exp (x / 8) // and // x = 8 * log (y) // #include "b44ExpLogTable.h" inline void convertFromLinear (unsigned short s[16]) { for (int i = 0; i < 16; ++i) s[i] = expTable[s[i]]; } inline void convertToLinear (unsigned short s[16]) { for (int i = 0; i < 16; ++i) s[i] = logTable[s[i]]; } inline int shiftAndRound (int x, int shift) { // // Compute // // y = x * pow (2, -shift), // // then round y to the nearest integer. // In case of a tie, where y is exactly // halfway between two integers, round // to the even one. // x <<= 1; int a = (1 << shift) - 1; shift += 1; int b = (x >> shift) & 1; return (x + a + b) >> shift; } int pack (const unsigned short s[16], unsigned char b[14], bool optFlatFields, bool exactMax) { // // Pack a block of 4 by 4 16-bit pixels (32 bytes) into // either 14 or 3 bytes. // // // Integers s[0] ... s[15] represent floating-point numbers // in what is essentially a sign-magnitude format. Convert // s[0] .. s[15] into a new set of integers, t[0] ... t[15], // such that if t[i] is greater than t[j], the floating-point // number that corresponds to s[i] is always greater than // the floating-point number that corresponds to s[j]. // // Also, replace any bit patterns that represent NaNs or // infinities with bit patterns that represent floating-point // zeroes. // // bit pattern floating-point bit pattern // in s[i] value in t[i] // // 0x7fff NAN 0x8000 // 0x7ffe NAN 0x8000 // ... ... // 0x7c01 NAN 0x8000 // 0x7c00 +infinity 0x8000 // 0x7bff +HALF_MAX 0xfbff // 0x7bfe 0xfbfe // 0x7bfd 0xfbfd // ... ... // 0x0002 +2 * HALF_MIN 0x8002 // 0x0001 +HALF_MIN 0x8001 // 0x0000 +0.0 0x8000 // 0x8000 -0.0 0x7fff // 0x8001 -HALF_MIN 0x7ffe // 0x8002 -2 * HALF_MIN 0x7ffd // ... ... // 0xfbfd 0x0f02 // 0xfbfe 0x0401 // 0xfbff -HALF_MAX 0x0400 // 0xfc00 -infinity 0x8000 // 0xfc01 NAN 0x8000 // ... ... // 0xfffe NAN 0x8000 // 0xffff NAN 0x8000 // unsigned short t[16]; for (int i = 0; i < 16; ++i) { if ((s[i] & 0x7c00) == 0x7c00) t[i] = 0x8000; else if (s[i] & 0x8000) t[i] = ~s[i]; else t[i] = s[i] | 0x8000; } // // Find the maximum, tMax, of t[0] ... t[15]. // unsigned short tMax = 0; for (int i = 0; i < 16; ++i) if (tMax < t[i]) tMax = t[i]; // // Compute a set of running differences, r[0] ... r[14]: // Find a shift value such that after rounding off the // rightmost bits and shifting all differenes are between // -32 and +31. Then bias the differences so that they // end up between 0 and 63. // int shift = -1; int d[16]; int r[15]; int rMin; int rMax; const int bias = 0x20; do { shift += 1; // // Compute absolute differences, d[0] ... d[15], // between tMax and t[0] ... t[15]. // // Shift and round the absolute differences. // for (int i = 0; i < 16; ++i) d[i] = shiftAndRound (tMax - t[i], shift); // // Convert d[0] .. d[15] into running differences // r[ 0] = d[ 0] - d[ 4] + bias; r[ 1] = d[ 4] - d[ 8] + bias; r[ 2] = d[ 8] - d[12] + bias; r[ 3] = d[ 0] - d[ 1] + bias; r[ 4] = d[ 4] - d[ 5] + bias; r[ 5] = d[ 8] - d[ 9] + bias; r[ 6] = d[12] - d[13] + bias; r[ 7] = d[ 1] - d[ 2] + bias; r[ 8] = d[ 5] - d[ 6] + bias; r[ 9] = d[ 9] - d[10] + bias; r[10] = d[13] - d[14] + bias; r[11] = d[ 2] - d[ 3] + bias; r[12] = d[ 6] - d[ 7] + bias; r[13] = d[10] - d[11] + bias; r[14] = d[14] - d[15] + bias; rMin = r[0]; rMax = r[0]; for (int i = 1; i < 15; ++i) { if (rMin > r[i]) rMin = r[i]; if (rMax < r[i]) rMax = r[i]; } } while (rMin < 0 || rMax > 0x3f); if (rMin == bias && rMax == bias && optFlatFields) { // // Special case - all pixels have the same value. // We encode this in 3 instead of 14 bytes by // storing the value 0xfc in the third output byte, // which cannot occur in the 14-byte encoding. // b[0] = (t[0] >> 8); b[1] = (unsigned char) t[0]; b[2] = 0xfc; return 3; } if (exactMax) { // // Adjust t[0] so that the pixel whose value is equal // to tMax gets represented as accurately as possible. // t[0] = tMax - (d[0] << shift); } // // Pack t[0], shift and r[0] ... r[14] into 14 bytes: // b[ 0] = (t[0] >> 8); b[ 1] = (unsigned char) t[0]; b[ 2] = (unsigned char) ((shift << 2) | (r[ 0] >> 4)); b[ 3] = (unsigned char) ((r[ 0] << 4) | (r[ 1] >> 2)); b[ 4] = (unsigned char) ((r[ 1] << 6) | r[ 2] ); b[ 5] = (unsigned char) ((r[ 3] << 2) | (r[ 4] >> 4)); b[ 6] = (unsigned char) ((r[ 4] << 4) | (r[ 5] >> 2)); b[ 7] = (unsigned char) ((r[ 5] << 6) | r[ 6] ); b[ 8] = (unsigned char) ((r[ 7] << 2) | (r[ 8] >> 4)); b[ 9] = (unsigned char) ((r[ 8] << 4) | (r[ 9] >> 2)); b[10] = (unsigned char) ((r[ 9] << 6) | r[10] ); b[11] = (unsigned char) ((r[11] << 2) | (r[12] >> 4)); b[12] = (unsigned char) ((r[12] << 4) | (r[13] >> 2)); b[13] = (unsigned char) ((r[13] << 6) | r[14] ); return 14; } inline void unpack14 (const unsigned char b[14], unsigned short s[16]) { // // Unpack a 14-byte block into 4 by 4 16-bit pixels. // #if defined (DEBUG) assert (b[2] != 0xfc); #endif s[ 0] = (b[0] << 8) | b[1]; unsigned short shift = (b[ 2] >> 2); unsigned short bias = (0x20u << shift); s[ 4] = s[ 0] + ((((b[ 2] << 4) | (b[ 3] >> 4)) & 0x3fu) << shift) - bias; s[ 8] = s[ 4] + ((((b[ 3] << 2) | (b[ 4] >> 6)) & 0x3fu) << shift) - bias; s[12] = s[ 8] + ((b[ 4] & 0x3fu) << shift) - bias; s[ 1] = s[ 0] + ((unsigned int) (b[ 5] >> 2) << shift) - bias; s[ 5] = s[ 4] + ((((b[ 5] << 4) | (b[ 6] >> 4)) & 0x3fu) << shift) - bias; s[ 9] = s[ 8] + ((((b[ 6] << 2) | (b[ 7] >> 6)) & 0x3fu) << shift) - bias; s[13] = s[12] + ((b[ 7] & 0x3fu) << shift) - bias; s[ 2] = s[ 1] + ((unsigned int)(b[ 8] >> 2) << shift) - bias; s[ 6] = s[ 5] + ((((b[ 8] << 4) | (b[ 9] >> 4)) & 0x3fu) << shift) - bias; s[10] = s[ 9] + ((((b[ 9] << 2) | (b[10] >> 6)) & 0x3fu) << shift) - bias; s[14] = s[13] + ((b[10] & 0x3fu) << shift) - bias; s[ 3] = s[ 2] + ((unsigned int)(b[11] >> 2) << shift) - bias; s[ 7] = s[ 6] + ((((b[11] << 4) | (b[12] >> 4)) & 0x3fu) << shift) - bias; s[11] = s[10] + ((((b[12] << 2) | (b[13] >> 6)) & 0x3fu) << shift) - bias; s[15] = s[14] + ((b[13] & 0x3fu) << shift) - bias; for (int i = 0; i < 16; ++i) { if (s[i] & 0x8000) s[i] &= 0x7fff; else s[i] = ~s[i]; } } inline void unpack3 (const unsigned char b[3], unsigned short s[16]) { // // Unpack a 3-byte block into 4 by 4 identical 16-bit pixels. // #if defined (DEBUG) assert (b[2] == 0xfc); #endif s[0] = (b[0] << 8) | b[1]; if (s[0] & 0x8000) s[0] &= 0x7fff; else s[0] = ~s[0]; for (int i = 1; i < 16; ++i) s[i] = s[0]; } void notEnoughData () { throw IEX_NAMESPACE::InputExc ("Error decompressing data " "(input data are shorter than expected)."); } void tooMuchData () { throw IEX_NAMESPACE::InputExc ("Error decompressing data " "(input data are longer than expected)."); } } // namespace struct B44Compressor::ChannelData { unsigned short * start; unsigned short * end; int nx; int ny; int ys; PixelType type; bool pLinear; int size; }; B44Compressor::B44Compressor (const Header &hdr, size_t maxScanLineSize, size_t numScanLines, bool optFlatFields) : Compressor (hdr), _maxScanLineSize (maxScanLineSize), _optFlatFields (optFlatFields), _format (XDR), _numScanLines (numScanLines), _tmpBuffer (0), _outBuffer (0), _numChans (0), _channels (hdr.channels()), _channelData (0) { // TODO: Remove this when we can change the ABI (void)_maxScanLineSize; // // Allocate buffers for compressed an uncompressed pixel data, // allocate a set of ChannelData structs to help speed up the // compress() and uncompress() functions, below, and determine // if uncompressed pixel data should be in native or Xdr format. // _tmpBuffer = new unsigned short [checkArraySize (uiMult (maxScanLineSize / sizeof(unsigned short), numScanLines), sizeof (unsigned short))]; const ChannelList &channels = header().channels(); int numHalfChans = 0; for (ChannelList::ConstIterator c = channels.begin(); c != channels.end(); ++c) { assert (pixelTypeSize (c.channel().type) % pixelTypeSize (HALF) == 0); ++_numChans; if (c.channel().type == HALF) ++numHalfChans; } // // Compressed data may be larger than the input data // size_t padding = 12 * numHalfChans * (numScanLines + 3) / 4; _outBuffer = new char [uiAdd (uiMult (maxScanLineSize, numScanLines), padding)]; _channelData = new ChannelData[_numChans]; int i = 0; for (ChannelList::ConstIterator c = channels.begin(); c != channels.end(); ++c, ++i) { _channelData[i].ys = c.channel().ySampling; _channelData[i].type = c.channel().type; _channelData[i].pLinear = c.channel().pLinear; _channelData[i].size = pixelTypeSize (c.channel().type) / pixelTypeSize (HALF); } const Box2i &dataWindow = hdr.dataWindow(); _minX = dataWindow.min.x; _maxX = dataWindow.max.x; _maxY = dataWindow.max.y; // // We can support uncompressed data in the machine's native // format only if all image channels are of type HALF. // assert (sizeof (unsigned short) == pixelTypeSize (HALF)); if (_numChans == numHalfChans) _format = NATIVE; } B44Compressor::~B44Compressor () { delete [] _tmpBuffer; delete [] _outBuffer; delete [] _channelData; } int B44Compressor::numScanLines () const { return _numScanLines; } Compressor::Format B44Compressor::format () const { return _format; } int B44Compressor::compress (const char *inPtr, int inSize, int minY, const char *&outPtr) { return compress (inPtr, inSize, Box2i (V2i (_minX, minY), V2i (_maxX, minY + numScanLines() - 1)), outPtr); } int B44Compressor::compressTile (const char *inPtr, int inSize, IMATH_NAMESPACE::Box2i range, const char *&outPtr) { return compress (inPtr, inSize, range, outPtr); } int B44Compressor::uncompress (const char *inPtr, int inSize, int minY, const char *&outPtr) { return uncompress (inPtr, inSize, Box2i (V2i (_minX, minY), V2i (_maxX, minY + numScanLines() - 1)), outPtr); } int B44Compressor::uncompressTile (const char *inPtr, int inSize, IMATH_NAMESPACE::Box2i range, const char *&outPtr) { return uncompress (inPtr, inSize, range, outPtr); } int B44Compressor::compress (const char *inPtr, int inSize, IMATH_NAMESPACE::Box2i range, const char *&outPtr) { // // Compress a block of pixel data: First copy the input pixels // from the input buffer into _tmpBuffer, rearranging them such // that blocks of 4x4 pixels of a single channel can be accessed // conveniently. Then compress each 4x4 block of HALF pixel data // and append the result to the output buffer. Copy UINT and // FLOAT data to the output buffer without compressing them. // outPtr = _outBuffer; if (inSize == 0) { // // Special case - empty input buffer. // return 0; } // // For each channel, detemine how many pixels are stored // in the input buffer, and where those pixels will be // placed in _tmpBuffer. // int minX = range.min.x; int maxX = min (range.max.x, _maxX); int minY = range.min.y; int maxY = min (range.max.y, _maxY); unsigned short *tmpBufferEnd = _tmpBuffer; int i = 0; for (ChannelList::ConstIterator c = _channels.begin(); c != _channels.end(); ++c, ++i) { ChannelData &cd = _channelData[i]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = numSamples (c.channel().xSampling, minX, maxX); cd.ny = numSamples (c.channel().ySampling, minY, maxY); tmpBufferEnd += cd.nx * cd.ny * cd.size; } if (_format == XDR) { // // The data in the input buffer are in the machine-independent // Xdr format. Copy the HALF channels into _tmpBuffer and // convert them back into native format for compression. // Copy UINT and FLOAT channels verbatim into _tmpBuffer. // for (int y = minY; y <= maxY; ++y) { for (int i = 0; i < _numChans; ++i) { ChannelData &cd = _channelData[i]; if (modp (y, cd.ys) != 0) continue; if (cd.type == HALF) { for (int x = cd.nx; x > 0; --x) { Xdr::read <CharPtrIO> (inPtr, *cd.end); ++cd.end; } } else { int n = cd.nx * cd.size; memcpy (cd.end, inPtr, n * sizeof (unsigned short)); inPtr += n * sizeof (unsigned short); cd.end += n; } } } } else { // // The input buffer contains only HALF channels, and they // are in native, machine-dependent format. Copy the pixels // into _tmpBuffer. // for (int y = minY; y <= maxY; ++y) { for (int i = 0; i < _numChans; ++i) { ChannelData &cd = _channelData[i]; #if defined (DEBUG) assert (cd.type == HALF); #endif if (modp (y, cd.ys) != 0) continue; int n = cd.nx * cd.size; memcpy (cd.end, inPtr, n * sizeof (unsigned short)); inPtr += n * sizeof (unsigned short); cd.end += n; } } } // // The pixels for each channel have been packed into a contiguous // block in _tmpBuffer. HALF channels are in native format; UINT // and FLOAT channels are in Xdr format. // #if defined (DEBUG) for (int i = 1; i < _numChans; ++i) assert (_channelData[i-1].end == _channelData[i].start); assert (_channelData[_numChans-1].end == tmpBufferEnd); #endif // // For each HALF channel, split the data in _tmpBuffer into 4x4 // pixel blocks. Compress each block and append the compressed // data to the output buffer. // // UINT and FLOAT channels are copied from _tmpBuffer into the // output buffer without further processing. // char *outEnd = _outBuffer; for (int i = 0; i < _numChans; ++i) { ChannelData &cd = _channelData[i]; if (cd.type != HALF) { // // UINT or FLOAT channel. // int n = cd.nx * cd.ny * cd.size * sizeof (unsigned short); memcpy (outEnd, cd.start, n); outEnd += n; continue; } // // HALF channel // for (int y = 0; y < cd.ny; y += 4) { // // Copy the next 4x4 pixel block into array s. // If the width, cd.nx, or the height, cd.ny, of // the pixel data in _tmpBuffer is not divisible // by 4, then pad the data by repeating the // rightmost column and the bottom row. // unsigned short *row0 = cd.start + y * cd.nx; unsigned short *row1 = row0 + cd.nx; unsigned short *row2 = row1 + cd.nx; unsigned short *row3 = row2 + cd.nx; if (y + 3 >= cd.ny) { if (y + 1 >= cd.ny) row1 = row0; if (y + 2 >= cd.ny) row2 = row1; row3 = row2; } for (int x = 0; x < cd.nx; x += 4) { unsigned short s[16]; if (x + 3 >= cd.nx) { int n = cd.nx - x; for (int i = 0; i < 4; ++i) { int j = min (i, n - 1); s[i + 0] = row0[j]; s[i + 4] = row1[j]; s[i + 8] = row2[j]; s[i + 12] = row3[j]; } } else { memcpy (&s[ 0], row0, 4 * sizeof (unsigned short)); memcpy (&s[ 4], row1, 4 * sizeof (unsigned short)); memcpy (&s[ 8], row2, 4 * sizeof (unsigned short)); memcpy (&s[12], row3, 4 * sizeof (unsigned short)); } row0 += 4; row1 += 4; row2 += 4; row3 += 4; // // Compress the contents of array s and append the // results to the output buffer. // if (cd.pLinear) convertFromLinear (s); outEnd += pack (s, (unsigned char *) outEnd, _optFlatFields, !cd.pLinear); } } } return outEnd - _outBuffer; } int B44Compressor::uncompress (const char *inPtr, int inSize, IMATH_NAMESPACE::Box2i range, const char *&outPtr) { // // This function is the reverse of the compress() function, // above. First all pixels are moved from the input buffer // into _tmpBuffer. UINT and FLOAT channels are copied // verbatim; HALF channels are uncompressed in blocks of // 4x4 pixels. Then the pixels in _tmpBuffer are copied // into the output buffer and rearranged such that the data // for for each scan line form a contiguous block. // outPtr = _outBuffer; if (inSize == 0) { return 0; } int minX = range.min.x; int maxX = min (range.max.x, _maxX); int minY = range.min.y; int maxY = min (range.max.y, _maxY); unsigned short *tmpBufferEnd = _tmpBuffer; int i = 0; for (ChannelList::ConstIterator c = _channels.begin(); c != _channels.end(); ++c, ++i) { ChannelData &cd = _channelData[i]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = numSamples (c.channel().xSampling, minX, maxX); cd.ny = numSamples (c.channel().ySampling, minY, maxY); tmpBufferEnd += cd.nx * cd.ny * cd.size; } for (int i = 0; i < _numChans; ++i) { ChannelData &cd = _channelData[i]; if (cd.type != HALF) { // // UINT or FLOAT channel. // int n = cd.nx * cd.ny * cd.size * sizeof (unsigned short); if (inSize < n) notEnoughData(); memcpy (cd.start, inPtr, n); inPtr += n; inSize -= n; continue; } // // HALF channel // for (int y = 0; y < cd.ny; y += 4) { unsigned short *row0 = cd.start + y * cd.nx; unsigned short *row1 = row0 + cd.nx; unsigned short *row2 = row1 + cd.nx; unsigned short *row3 = row2 + cd.nx; for (int x = 0; x < cd.nx; x += 4) { unsigned short s[16]; if (inSize < 3) notEnoughData(); // // If shift exponent is 63, call unpack14 (ignoring unused bits) // if (((const unsigned char *)inPtr)[2] >= (13<<2) ) { unpack3 ((const unsigned char *)inPtr, s); inPtr += 3; inSize -= 3; } else { if (inSize < 14) notEnoughData(); unpack14 ((const unsigned char *)inPtr, s); inPtr += 14; inSize -= 14; } if (cd.pLinear) convertToLinear (s); int n = (x + 3 < cd.nx)? 4 * sizeof (unsigned short) : (cd.nx - x) * sizeof (unsigned short); if (y + 3 < cd.ny) { memcpy (row0, &s[ 0], n); memcpy (row1, &s[ 4], n); memcpy (row2, &s[ 8], n); memcpy (row3, &s[12], n); } else { memcpy (row0, &s[ 0], n); if (y + 1 < cd.ny) memcpy (row1, &s[ 4], n); if (y + 2 < cd.ny) memcpy (row2, &s[ 8], n); } row0 += 4; row1 += 4; row2 += 4; row3 += 4; } } } char *outEnd = _outBuffer; if (_format == XDR) { for (int y = minY; y <= maxY; ++y) { for (int i = 0; i < _numChans; ++i) { ChannelData &cd = _channelData[i]; if (modp (y, cd.ys) != 0) continue; if (cd.type == HALF) { for (int x = cd.nx; x > 0; --x) { Xdr::write <CharPtrIO> (outEnd, *cd.end); ++cd.end; } } else { int n = cd.nx * cd.size; memcpy (outEnd, cd.end, n * sizeof (unsigned short)); outEnd += n * sizeof (unsigned short); cd.end += n; } } } } else { for (int y = minY; y <= maxY; ++y) { for (int i = 0; i < _numChans; ++i) { ChannelData &cd = _channelData[i]; #if defined (DEBUG) assert (cd.type == HALF); #endif if (modp (y, cd.ys) != 0) continue; int n = cd.nx * cd.size; memcpy (outEnd, cd.end, n * sizeof (unsigned short)); outEnd += n * sizeof (unsigned short); cd.end += n; } } } #if defined (DEBUG) for (int i = 1; i < _numChans; ++i) assert (_channelData[i-1].end == _channelData[i].start); assert (_channelData[_numChans-1].end == tmpBufferEnd); #endif if (inSize > 0) tooMuchData(); outPtr = _outBuffer; return outEnd - _outBuffer; } OPENEXR_IMF_INTERNAL_NAMESPACE_SOURCE_EXIT
null
240
CWE-787
CVE-2021-23165
/* * PostScript + PDF output routines for HTMLDOC, a HTML document processing * program. * * Just in case you didn't notice it, this file is too big; it will be * broken into more manageable pieces once we make all of the output * "drivers" into classes... * * Copyright © 2011-2021 by Michael R Sweet. * Copyright © 1997-2010 by Easy Software Products. All rights reserved. * * This program is free software. Distribution and use rights are outlined in * the file "COPYING". */ /* * Include necessary headers. */ /* * The GCC compiler on HP-UX has a nasty habit of incorrectly "fixing" * the vmtypes.h header file provided with HP-UX. The following * conditional magic makes sure that "page_t" (which we use in our * code) is not defined... */ #ifdef __hpux # define page_t hpux_page_t #endif // __hpux /*#define DEBUG*/ #include "htmldoc.h" #include "markdown.h" #include "md5-private.h" #define md5_append _cupsMD5Append #define md5_finish _cupsMD5Finish #define md5_init _cupsMD5Init typedef unsigned char md5_byte_t; #define md5_state_t _cups_md5_state_t #include "rc4.h" #include <stdarg.h> #include <ctype.h> #include <time.h> #include <math.h> #ifdef WIN32 # include <io.h> #else # include <unistd.h> #endif // WIN32 #include <fcntl.h> #include <zlib.h> extern "C" { /* Workaround for JPEG header problems... */ #include <jpeglib.h> /* JPEG/JFIF image definitions */ } #ifdef __hpux # undef page_t #endif // __hpux /* * Output options... */ #define HTMLDOC_ASCII85 //#define HTMLDOC_INTERPOLATION #define HTMLDOC_PRODUCER "htmldoc " SVERSION " Copyright 2011-2019 by Michael R Sweet" /* * Constants... */ #define RENDER_TEXT 0 /* Text fragment */ #define RENDER_IMAGE 1 /* Image */ #define RENDER_BOX 2 /* Box */ #define RENDER_LINK 3 /* Hyperlink */ #define RENDER_BG 4 /* Background image */ /* * Structures... */ typedef struct render_str /**** Render entity structure ****/ { struct render_str *prev; /* Previous rendering entity */ struct render_str *next; /* Next rendering entity */ int type; /* Type of entity */ float x, /* Position in points */ y, /* ... */ width, /* Size in points */ height; /* ... */ union { struct { int typeface, /* Typeface for text */ style; /* Style of text */ float size; /* Size of text in points */ float spacing; /* Inter-character spacing */ float rgb[3]; /* Color of text */ uchar buffer[1]; /* String buffer */ } text; image_t *image; /* Image pointer */ float box[3]; /* Box color */ uchar link[1]; /* Link URL */ } data; } render_t; typedef struct /**** Named link position structure */ { short page, /* Page # */ top; /* Top position */ uchar name[124]; /* Reference name */ } link_t; typedef struct //// Page information { int width, // Width of page in points length, // Length of page in points left, // Left margin in points right, // Right margin in points top, // Top margin in points bottom, // Bottom margin in points duplex, // Duplex this page? landscape; // Landscape orientation? render_t *start, // First render element *end; // Last render element uchar *url, // URL/file *chapter, // Chapter text *heading; // Heading text tree_t *headnode; // Heading node uchar *header[3], // Headers for regular pages *header1[3], // Headers for first pages *footer[3]; // Footers for all pages char media_color[64], // Media color media_type[64]; // Media type int media_position; // Media position char page_text[64]; // Page number for TOC image_t *background_image; // Background image float background_color[3]; // Background color // Number-up support int nup; // Number up pages int outpage; // Output page # float outmatrix[2][3]; // Transform matrix } page_t; typedef struct //// Output page info { int nup; // Number up pages int pages[16]; // Pages on this output page int annot_object; // Annotation object } outpage_t; /* * Local globals... */ static time_t doc_time; // Current time static struct tm doc_date; // Current date static uchar *current_url = NULL; static int title_page; static int chapter, chapter_outstarts[MAX_CHAPTERS], chapter_outends[MAX_CHAPTERS], chapter_starts[MAX_CHAPTERS], chapter_ends[MAX_CHAPTERS]; static size_t num_headings = 0, alloc_headings = 0; static int *heading_pages = NULL, *heading_tops = NULL; static size_t num_pages = 0, alloc_pages = 0; static page_t *pages = NULL; static tree_t *current_heading; static size_t num_outpages = 0; static outpage_t *outpages = NULL; static size_t num_links = 0, alloc_links = 0; static link_t *links = NULL; static uchar list_types[16]; static int list_values[16]; static char stdout_filename[256]; static size_t num_objects = 0, alloc_objects = 0; static int *objects = NULL, root_object, info_object, outline_object, pages_object, names_object, encrypt_object, font_objects[TYPE_MAX * STYLE_MAX]; static uchar *doc_title = NULL; static image_t *logo_image = NULL; static float logo_width, logo_height; static image_t *lh_image = NULL; static float lh_width, lh_height; static image_t *hfimage[MAX_HF_IMAGES]; static float hfimage_width[MAX_HF_IMAGES], hfimage_height[MAX_HF_IMAGES]; static float maxhfheight; static image_t *background_image = NULL; static float background_color[3] = { 1.0, 1.0, 1.0 }, link_color[3] = { 0.0, 0.0, 1.0 }; static int render_typeface, render_style; static float render_size, render_rgb[3], render_x, render_y, render_startx, render_spacing; static int compressor_active = 0; static z_stream compressor; static uchar comp_buffer[8192]; static uchar encrypt_key[16]; static int encrypt_len; static rc4_context_t encrypt_state; static md5_byte_t file_id[16]; /* * Local functions... */ extern "C" { typedef int (*compare_func_t)(const void *, const void *); } static void pspdf_debug_stats(); static void pspdf_transform_coords(page_t *p, float &x, float &y); static void pspdf_transform_page(int outpage, int pos, int page); static void pspdf_prepare_outpages(); static void pspdf_prepare_page(int page); static void pspdf_prepare_heading(int page, int print_page, uchar **format, int y, char *page_text, int page_len); static void ps_write_document(uchar *author, uchar *creator, uchar *copyright, uchar *keywords, uchar *subject, uchar *lang); static void ps_write_outpage(FILE *out, int outpage); static void ps_write_page(FILE *out, int page); static void ps_write_background(FILE *out); static void pdf_write_document(uchar *author, uchar *creator, uchar *copyright, uchar *keywords, uchar *subject, uchar *lang, tree_t *doc, tree_t *toc); static void pdf_write_outpage(FILE *out, int outpage); static void pdf_write_page(FILE *out, int page); static void pdf_write_resources(FILE *out, int page); #ifdef DEBUG_TOC static void pdf_text_contents(FILE *out, tree_t *toc, int indent = 0); #endif // DEBUG_TOC static void pdf_write_contents(FILE *out, tree_t *toc, int parent, int prev, int next, int *heading); static void pdf_write_files(FILE *out, tree_t *doc); static void pdf_write_links(FILE *out); static void pdf_write_names(FILE *out); static int pdf_count_headings(tree_t *toc); static int pdf_start_object(FILE *out, int array = 0); static void pdf_start_stream(FILE *out); static void pdf_end_object(FILE *out); static void encrypt_init(void); static void flate_open_stream(FILE *out); static void flate_close_stream(FILE *out); static void flate_puts(const char *s, FILE *out); static void flate_printf(FILE *out, const char *format, ...); static void flate_write(FILE *out, uchar *inbuf, int length, int flush=0); static void parse_contents(tree_t *t, float left, float width, float bottom, float length, float *y, int *page, int *heading, tree_t *chap); static void parse_doc(tree_t *t, float *left, float *right, float *bottom, float *top, float *x, float *y, int *page, tree_t *cpara, int *needspace); static void parse_heading(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_paragraph(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_pre(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_table(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_list(tree_t *t, float *left, float *width, float *bottom, float *length, float *x, float *y, int *page, int needspace); static void init_list(tree_t *t); static void parse_comment(tree_t *t, float *left, float *width, float *bottom, float *length, float *x, float *y, int *page, tree_t *para, int needspace); static void check_pages(int page); static void add_link(uchar *name, int page, int top); static link_t *find_link(uchar *name); static int compare_links(link_t *n1, link_t *n2); static void find_background(tree_t *t); static void write_background(int page, FILE *out); static render_t *new_render(int page, int type, double x, double y, double width, double height, void *data, render_t *insert = 0); static float get_cell_size(tree_t *t, float left, float right, float *minwidth, float *prefwidth, float *minheight); static float get_table_size(tree_t *t, float left, float right, float *minwidth, float *prefwidth, float *minheight); static tree_t *flatten_tree(tree_t *t); static float get_width(uchar *s, int typeface, int style, int size); static void update_image_size(tree_t *t); static uchar *get_title(tree_t *doc); static FILE *open_file(void); static void set_color(FILE *out, float *rgb); static void set_font(FILE *out, int typeface, int style, float size); static void set_pos(FILE *out, float x, float y); static void write_prolog(FILE *out, int pages, uchar *author, uchar *creator, uchar *copyright, uchar *keywords, uchar *subject); static void ps_hex(FILE *out, uchar *data, int length); #ifdef HTMLDOC_ASCII85 static void ps_ascii85(FILE *out, uchar *data, int length, int eod = 0); #endif // HTMLDOC_ASCII85 static void jpg_init(j_compress_ptr cinfo); static boolean jpg_empty(j_compress_ptr cinfo); static void jpg_term(j_compress_ptr cinfo); static void jpg_setup(FILE *out, image_t *img, j_compress_ptr cinfo); static int compare_rgb(unsigned *rgb1, unsigned *rgb2); static void write_image(FILE *out, render_t *r, int write_obj = 0); static void write_imagemask(FILE *out, render_t *r); static void write_string(FILE *out, uchar *s, int compress); static void write_text(FILE *out, render_t *r); static void write_trailer(FILE *out, int pages, uchar *lang); static int write_type1(FILE *out, typeface_t typeface, style_t style); static void write_utf16(FILE *out, uchar *s); /* * 'pspdf_export()' - Export PostScript/PDF file(s)... */ int pspdf_export(tree_t *document, /* I - Document to export */ tree_t *toc) /* I - Table of contents for document */ { int i, j; /* Looping vars */ const char *title_file; /* Location of title image/file */ uchar *author, /* Author of document */ *creator, /* HTML file creator (Netscape, etc) */ *copyright, /* File copyright */ *docnumber, /* Document number */ *keywords, /* Search keywords */ *subject, /* Subject */ *lang; /* Language */ tree_t *t; /* Title page document tree */ FILE *fp; /* Title page file */ float x, y, /* Current page position */ left, right, /* Left and right margins */ bottom, top, /* Bottom and top margins */ width, /* Width of , author, etc */ height; /* Height of area */ int page, /* Current page # */ pos, /* Current header/footer position */ heading, /* Current heading # */ toc_duplex, /* Duplex TOC pages? */ toc_landscape, /* Do TOC in landscape? */ toc_width, /* Width of TOC pages */ toc_length, /* Length of TOC pages */ toc_left, /* TOC page margins */ toc_right, toc_bottom, toc_top; image_t *timage; /* Title image */ float timage_width, /* Title image width */ timage_height; /* Title image height */ render_t *r; /* Rendering structure... */ float rgb[3]; /* Text color */ int needspace; /* Need whitespace */ /* * Figure out the printable area of the output page... */ if (Landscape) { PagePrintWidth = PageLength - PageLeft - PageRight; PagePrintLength = PageWidth - PageTop - PageBottom; } else { PagePrintWidth = PageWidth - PageLeft - PageRight; PagePrintLength = PageLength - PageTop - PageBottom; } toc_width = PageWidth; toc_length = PageLength; toc_left = PageLeft; toc_right = PageRight; toc_bottom = PageBottom; toc_top = PageTop; toc_landscape = Landscape; toc_duplex = PageDuplex; /* * Get the document title, author, etc... */ doc_title = get_title(document); author = htmlGetMeta(document, (uchar *)"author"); creator = htmlGetMeta(document, (uchar *)"generator"); copyright = htmlGetMeta(document, (uchar *)"copyright"); docnumber = htmlGetMeta(document, (uchar *)"docnumber"); keywords = htmlGetMeta(document, (uchar *)"keywords"); subject = htmlGetMeta(document, (uchar *)"subject"); lang = htmlGetMeta(document, (uchar *)"lang"); logo_image = image_load(LogoImage, !OutputColor); lh_image = image_load(Letterhead, !OutputColor); maxhfheight = 0.0f; if (docnumber == NULL) docnumber = htmlGetMeta(document, (uchar *)"version"); if (lh_image != NULL) { lh_width = (float)(lh_image->width * PagePrintWidth / _htmlBrowserWidth); lh_height = (float)(lh_width * lh_image->height / lh_image->width); if (lh_height > maxhfheight) maxhfheight = lh_height; } else lh_width = lh_height = 0.0f; if (logo_image != NULL) { logo_width = (float)(logo_image->width * PagePrintWidth / _htmlBrowserWidth); logo_height = (float)(logo_width * logo_image->height / logo_image->width); if (logo_height > (2.0 * HeadFootSize)) { // Issue #273: too large logo image will overlap the body text, so cap // the height of the logo image to the header/footer size... // // Issue #303: regression prevents using header/footer images for special // underlining/etc. effects. logo_height = (float)(2.0 * HeadFootSize); logo_width = logo_height * logo_image->width / logo_image->height; } if (logo_height > maxhfheight) maxhfheight = logo_height; } else logo_width = logo_height = 0.0f; for (int hfi = 0; hfi < MAX_HF_IMAGES; hfi ++) { hfimage[hfi] = image_load(HFImage[hfi], !OutputColor); if (hfimage[hfi]) { hfimage_width[hfi] = (float)(hfimage[hfi]->width * PagePrintWidth / _htmlBrowserWidth); hfimage_height[hfi] = (float)(hfimage_width[hfi] * hfimage[hfi]->height / hfimage[hfi]->width); if (hfimage_height[hfi] > (2.0 * HeadFootSize)) { // Issue #273: too large logo image will overlap the body text, so cap // the height of the logo image to the header/footer size... // // Issue #303: regression prevents using header/footer images for special // underlining/etc. effects. hfimage_height[hfi] = (float)(2.0 * HeadFootSize); hfimage_width[hfi] = hfimage_height[hfi] * hfimage[hfi]->width / hfimage[hfi]->height; } if (hfimage_height[hfi] > maxhfheight) maxhfheight = hfimage_height[hfi]; } else hfimage_width[hfi] = hfimage_height[hfi] = 0.0f; } find_background(document); get_color((uchar *)LinkColor, link_color); /* * Initialize page rendering variables... */ num_pages = 0; alloc_pages = 0; pages = NULL; memset(list_types, 0267, sizeof(list_types)); memset(list_values, 0, sizeof(list_values)); memset(chapter_starts, -1, sizeof(chapter_starts)); memset(chapter_ends, -1, sizeof(chapter_starts)); /* * Get the current date, using the SOURCE_DATE_EPOCH environment variable, if * present, for the number of seconds since the epoch - this enables * reproducible builds (Issue #310). */ const char *source_date_epoch = getenv("SOURCE_DATE_EPOCH"); if (!source_date_epoch || (doc_time = (time_t)strtol(source_date_epoch, NULL, 10)) <= 0) doc_time = time(NULL); gmtime_r(&doc_time, &doc_date); num_headings = 0; alloc_headings = 0; heading_pages = NULL; heading_tops = NULL; num_links = 0; alloc_links = 0; links = NULL; num_pages = 0; DEBUG_printf(("pspdf_export: TitlePage = %d, TitleImage = \"%s\"\n", TitlePage, TitleImage)); if (TitlePage) { const char *title_ext = file_extension(TitleImage); #ifdef WIN32 if (TitleImage[0] && stricmp(title_ext, "bmp") != 0 && stricmp(title_ext, "gif") != 0 && stricmp(title_ext, "jpg") != 0 && stricmp(title_ext, "png") != 0) #else if (TitleImage[0] && strcmp(title_ext, "bmp") != 0 && strcmp(title_ext, "gif") != 0 && strcmp(title_ext, "jpg") != 0 && strcmp(title_ext, "png") != 0) #endif // WIN32 { DEBUG_printf(("pspdf_export: Generating a titlepage using \"%s\"\n", TitleImage)); // Find the title file... if ((title_file = file_find(Path, TitleImage)) == NULL) { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to find title file \"%s\"!", TitleImage); return (1); } // Write a title page from HTML source... if ((fp = fopen(title_file, "rb")) == NULL) { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open title file \"%s\" - %s!", TitleImage, strerror(errno)); return (1); } #ifdef _WIN32 if (!stricmp(title_ext, "md")) #else if (!strcmp(title_ext, "md")) #endif // _WIN32 t = mdReadFile(NULL, fp, file_directory(TitleImage)); else t = htmlReadFile(NULL, fp, file_directory(TitleImage)); htmlFixLinks(t, t, (uchar *)file_directory(TitleImage)); fclose(fp); page = 0; title_page = 1; current_heading = NULL; x = 0.0f; bottom = 0.0f; top = PagePrintLength; y = top; needspace = 0; left = 0.0f; right = PagePrintWidth; parse_doc(t, &left, &right, &bottom, &top, &x, &y, &page, NULL, &needspace); if (PageDuplex && (num_pages & 1)) check_pages(num_pages); htmlDeleteTree(t); } else { /* * Create a standard title page... */ if ((timage = image_load(TitleImage, !OutputColor)) != NULL) { timage_width = (float)(timage->width * PagePrintWidth / _htmlBrowserWidth); timage_height = (float)(timage_width * timage->height / timage->width); } else timage_width = timage_height = 0.0f; check_pages(0); if (PageDuplex) check_pages(1); height = 0.0; if (timage != NULL) height += timage_height + _htmlSpacings[SIZE_P]; if (doc_title != NULL) height += _htmlSpacings[SIZE_H1] + _htmlSpacings[SIZE_P]; if (author != NULL) height += _htmlSpacings[SIZE_P]; if (docnumber != NULL) height += _htmlSpacings[SIZE_P]; if (copyright != NULL) height += _htmlSpacings[SIZE_P]; y = 0.5f * (PagePrintLength + height); if (timage != NULL) { new_render(0, RENDER_IMAGE, 0.5f * (PagePrintWidth - timage_width), y - timage_height, timage_width, timage_height, timage); y -= timage_height + _htmlSpacings[SIZE_P]; } get_color(_htmlTextColor, rgb); if (doc_title != NULL) { width = get_width(doc_title, _htmlHeadingFont, STYLE_BOLD, SIZE_H1); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_H1], width, _htmlSizes[SIZE_H1], doc_title); r->data.text.typeface = _htmlHeadingFont; r->data.text.style = STYLE_BOLD; r->data.text.size = (float)_htmlSizes[SIZE_H1]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); y -= _htmlSpacings[SIZE_H1]; if (docnumber != NULL) { width = get_width(docnumber, _htmlBodyFont, STYLE_NORMAL, SIZE_P); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_P], width, _htmlSizes[SIZE_P], docnumber); r->data.text.typeface = _htmlBodyFont; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[SIZE_P]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); y -= _htmlSpacings[SIZE_P]; } y -= _htmlSpacings[SIZE_P]; } if (author != NULL) { width = get_width(author, _htmlBodyFont, STYLE_NORMAL, SIZE_P); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_P], width, _htmlSizes[SIZE_P], author); r->data.text.typeface = _htmlBodyFont; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[SIZE_P]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); y -= _htmlSpacings[SIZE_P]; } if (copyright != NULL) { width = get_width(copyright, _htmlBodyFont, STYLE_NORMAL, SIZE_P); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_P], width, _htmlSizes[SIZE_P], copyright); r->data.text.typeface = _htmlBodyFont; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[SIZE_P]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); } } for (page = 0; page < (int)num_pages; page ++) strlcpy((char *)pages[page].page_text, (page & 1) ? "eltit" : "title", sizeof(pages[page].page_text)); } else page = 0; /* * Parse the document... */ if (OutputType == OUTPUT_BOOK) chapter = 0; else { chapter = 1; TocDocCount = 1; chapter_starts[1] = num_pages; } title_page = 0; current_heading = NULL; x = 0.0f; needspace = 0; left = 0.0f; right = PagePrintWidth; // Adjust top margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Header[pos] && (strstr(Header[pos], "$IMAGE") != NULL || strstr(Header[pos], "$HFIMAGE") != NULL || strstr(Header[pos], "$LETTERHEAD") != NULL)) temp_adjust = image_adjust; else if (Header1[pos] && (strstr(Header1[pos], "$IMAGE") != NULL || strstr(Header1[pos], "$HFIMAGE") != NULL || strstr(Header1[pos], "$LETTERHEAD") != NULL)) temp_adjust = image_adjust; else if (Header[pos] || Header1[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } top = PagePrintLength - adjust; // Adjust bottom margin as needed... for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Footer[pos] && (strstr(Footer[pos], "$IMAGE") != NULL || strstr(Footer[pos], "$HFIMAGE") != NULL || strstr(Footer[pos], "$LETTERHEAD") != NULL)) temp_adjust = image_adjust; else if (Footer[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } bottom = adjust; y = top; parse_doc(document, &left, &right, &bottom, &top, &x, &y, &page, NULL, &needspace); if (PageDuplex && (num_pages & 1)) { if (PSLevel == 0) chapter_ends[chapter] = num_pages - 1; check_pages(num_pages); if (PSLevel > 0) chapter_ends[chapter] = num_pages - 1; } else chapter_ends[chapter] = num_pages - 1; for (chapter = 1; chapter <= TocDocCount; chapter ++) for (page = chapter_starts[chapter]; page <= chapter_ends[chapter]; page ++) pspdf_prepare_page(page); /* * Parse the table-of-contents if necessary... */ if (TocLevels > 0 && num_headings > 0) { // Restore default page size, etc... PageWidth = toc_width; PageLength = toc_length; PageLeft = toc_left; PageRight = toc_right; PageBottom = toc_bottom; PageTop = toc_top; Landscape = toc_landscape; PageDuplex = toc_duplex; if (Landscape) { PagePrintWidth = PageLength - PageLeft - PageRight; PagePrintLength = PageWidth - PageTop - PageBottom; } else { PagePrintWidth = PageWidth - PageLeft - PageRight; PagePrintLength = PageLength - PageTop - PageBottom; } // Adjust top margin as needed... for (pos = 0; pos < 3; pos ++) if (TocHeader[pos]) break; if (pos == 3) top = PagePrintLength; else if (maxhfheight > HeadFootSize) top = (float)(PagePrintLength - maxhfheight - HeadFootSize); else top = (float)(PagePrintLength - 2 * HeadFootSize); // Adjust bottom margin as needed... for (pos = 0; pos < 3; pos ++) if (TocFooter[pos]) break; if (pos == 3) bottom = 0.0f; else if (maxhfheight > HeadFootSize) bottom = (float)(maxhfheight + HeadFootSize); else bottom = (float)(2 * HeadFootSize); y = 0.0; page = num_pages - 1; heading = 0; chapter_starts[0] = num_pages; chapter = 0; parse_contents(toc, 0, PagePrintWidth, bottom, top, &y, &page, &heading, 0); if (PageDuplex && (num_pages & 1)) check_pages(num_pages); chapter_ends[0] = num_pages - 1; for (page = chapter_starts[0]; page <= chapter_ends[0]; page ++) pspdf_prepare_page(page); } if (TocDocCount > MAX_CHAPTERS) TocDocCount = MAX_CHAPTERS; /* * Do we have any pages? */ if (num_pages > 0 && TocDocCount > 0) { /* * Yes, write the document to disk... */ pspdf_prepare_outpages(); pspdf_debug_stats(); progress_error(HD_ERROR_NONE, "PAGES: %d", (int)num_outpages); if (PSLevel > 0) ps_write_document(author, creator, copyright, keywords, subject, lang); else pdf_write_document(author, creator, copyright, keywords, subject, lang, document, toc); } else { /* * No, show an error... */ pspdf_debug_stats(); progress_error(HD_ERROR_NO_PAGES, "Error: no pages generated! (did you remember to use webpage mode?"); } /* * Free memory... */ if (doc_title != NULL) free(doc_title); if (alloc_links) { free(links); num_links = 0; alloc_links = 0; links = NULL; } for (i = 0; i < (int)num_pages; i ++) { if ((i == 0 || pages[i].chapter != pages[i - 1].chapter) && pages[i].chapter) free(pages[i].chapter); if ((i == 0 || pages[i].heading != pages[i - 1].heading) && pages[i].heading) free(pages[i].heading); if (!pages[i].heading) continue; for (j = 0; j < 3; j ++) { if (!pages[i].header[j]) continue; if (i == 0 || pages[i].header[j] != pages[i - 1].header[j]) free(pages[i].header[j]); } for (j = 0; j < 3; j ++) { if (!pages[i].header1[j]) continue; if (i == 0 || pages[i].header1[j] != pages[i - 1].header1[j]) free(pages[i].header1[j]); } for (j = 0; j < 3; j ++) { if (!pages[i].footer[j]) continue; if (i == 0 || pages[i].footer[j] != pages[i - 1].footer[j]) free(pages[i].footer[j]); } } for (i = 0; i < 3; i ++) { Header[i] = NULL; Header1[i] = NULL; Footer[i] = NULL; TocHeader[i] = NULL; TocFooter[i] = NULL; } if (alloc_pages) { free(pages); free(outpages); num_pages = 0; alloc_pages = 0; pages = NULL; } if (alloc_headings) { free(heading_pages); free(heading_tops); num_headings = 0; alloc_headings = 0; heading_pages = NULL; heading_tops = NULL; } return (0); } // // 'pspdf_debug_stats()' - Display debug statistics for render memory use. // static void pspdf_debug_stats() { const char *debug; // HTMLDOC_DEBUG env var int i; // Looping var render_t *r; // Render node int bytes; // Number of bytes if ((debug = getenv("HTMLDOC_DEBUG")) == NULL || (strstr(debug, "all") == NULL && strstr(debug, "memory") == NULL)) return; bytes = alloc_headings * sizeof(int) * 2; bytes += alloc_pages * sizeof(page_t); for (i = 0; i < (int)num_pages; i ++) { for (r = pages[i].start; r != NULL; r = r->next) { bytes += sizeof(render_t); if (r->type == RENDER_TEXT) bytes += strlen((char *)r->data.text.buffer); } } bytes += num_outpages * sizeof(outpage_t); bytes += alloc_links * sizeof(link_t); bytes += alloc_objects * sizeof(int); progress_error(HD_ERROR_NONE, "DEBUG: Render Data = %d kbytes", (bytes + 1023) / 1024); } /* * 'pspdf_transform_coords()' - Transform page coordinates. */ static void pspdf_transform_coords(page_t *p, // I - Page float &x, // IO - X coordinate float &y) // IO - Y coordinate { float tx, ty; // Temporary X and Y tx = x; ty = y; x = tx * p->outmatrix[0][0] + ty * p->outmatrix[0][1] + p->outmatrix[0][2]; y = tx * p->outmatrix[1][0] + ty * p->outmatrix[1][1] + p->outmatrix[1][2]; } /* * 'pspdf_transform_page()' - Transform a page. */ static void pspdf_transform_page(int outpage, // I - Output page int pos, // I - Position on page int page) // I - Input page { outpage_t *op; // Current output page page_t *bp; // Current base page page_t *p; // Current input page int x, y; // Position on output page double w, l, // Width and length of subpage tx, ty; // Translation values for subpage double pw, pl; // Printable width and length of full page DEBUG_printf(("pspdf_transform_page(outpage = %d, pos = %d, page = %d)\n", outpage, pos, page)); if (pos > 15) progress_error(HD_ERROR_INTERNAL_ERROR, "Internal error: pos = %d", pos); op = outpages + outpage; op->pages[pos] = page; bp = pages + op->pages[0]; p = pages + page; p->outpage = outpage; pw = bp->width; pl = bp->length; DEBUG_printf((" width = %d, length = %d\n", p->width, p->length)); switch (op->nup) { default : case 1 : p->outmatrix[0][0] = 1.0f; p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = 1.0f; p->outmatrix[0][2] = 0.0f; p->outmatrix[1][2] = 0.0f; break; case 2 : x = pos & 1; l = pw; w = l * p->width / p->length; if (w > (pl * 0.5f)) { w = pl * 0.5f; l = w * p->length / p->width; } tx = 0.5 * (pl * 0.5 - w); ty = 0.5 * (pw - l); p->outmatrix[0][0] = 0.0f; p->outmatrix[1][0] = (float)(w / p->width); p->outmatrix[0][1] = (float)(-w / p->width); p->outmatrix[1][1] = 0.0f; p->outmatrix[0][2] = (float)(ty + pl * w / p->width); p->outmatrix[1][2] = (float)(tx + x * pl / 2); break; case 4 : x = pos & 1; y = 1 - pos / 2; w = pw * 0.5; l = w * p->length / p->width; if (l > (pl * 0.5)) { l = pl * 0.5; w = l * p->width / p->length; } tx = 0.5 * (pw * 0.5 - w); ty = 0.5 * (pl * 0.5 - l); p->outmatrix[0][0] = (float)(w / p->width); p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = (float)(w / p->width); p->outmatrix[0][2] = (float)(tx + x * pw / 2); p->outmatrix[1][2] = (float)(ty + y * pl / 2); break; case 6 : x = pos % 3; y = pos / 3; l = pw * 0.5; w = l * p->width / p->length; if (w > (pl * 0.333f)) { w = pl * 0.333f; l = w * p->length / p->width; } tx = 0.5 * (pl * 0.333 - w); ty = 0.5 * (pw * 0.5 - l); p->outmatrix[0][0] = 0.0f; p->outmatrix[1][0] = (float)(w / p->width); p->outmatrix[0][1] = (float)(-w / p->width); p->outmatrix[1][1] = 0.0f; p->outmatrix[0][2] = (float)(ty + y * pw / 2 + pl * w / p->width); p->outmatrix[1][2] = (float)(tx + x * pl / 3); break; case 9 : x = pos % 3; y = 2 - pos / 3; w = pw * 0.333; l = w * p->length / p->width; if (l > (pl * 0.333)) { l = pl * 0.333; w = l * p->width / p->length; } tx = 0.5 * (pw * 0.333 - w); ty = 0.5 * (pl * 0.333 - l); p->outmatrix[0][0] = (float)(w / p->width); p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = (float)(w / p->width); p->outmatrix[0][2] = (float)(tx + x * pw / 3); p->outmatrix[1][2] = (float)(ty + y * pl / 3); break; case 16 : x = pos & 3; y = 3 - pos / 4; w = pw * 0.25; l = w * p->length / p->width; if (l > (pl * 0.25)) { l = pl * 0.25; w = l * p->width / p->length; } tx = 0.5 * (pw * 0.25 - w); ty = 0.5 * (pl * 0.25 - l); p->outmatrix[0][0] = (float)(w / p->width); p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = (float)(w / p->width); p->outmatrix[0][2] = (float)(tx + x * pw / 4); p->outmatrix[1][2] = (float)(ty + y * pl / 4); break; } } /* * 'pspdf_prepare_outpages()' - Prepare output pages... */ static void pspdf_prepare_outpages() { int c, i, j; /* Looping vars */ int nup; /* Current number-up value */ page_t *page; /* Current page */ outpage_t *outpage; /* Current output page */ // Allocate an output page array... outpages = (outpage_t *)malloc(sizeof(outpage_t) * num_pages); memset(outpages, -1, sizeof(outpage_t) * num_pages); num_outpages = 0; outpage = outpages; // Handle the title page, as needed... if (TitlePage) { for (i = 0, j = 0, nup = -1, page = pages; i < chapter_starts[1]; i ++, page ++) { if (nup != page->nup) { if (j) { // Break the current output page... outpage ++; num_outpages ++; } nup = page->nup; j = 0; } if (!j) outpage->nup = nup; pspdf_transform_page(num_outpages, j, i); j ++; if (j >= nup) { j = 0; outpage ++; num_outpages ++; } } if (j) { // Break the current output page... outpage ++; num_outpages ++; } } // Loop through each chapter, adding pages as needed... if (OutputType == OUTPUT_BOOK && TocLevels > 0) c = 0; else c = 1; for (; c <= TocDocCount; c ++) { if (chapter_starts[c] < 0) continue; chapter_outstarts[c] = num_outpages; for (i = chapter_starts[c], j = 0, nup = -1, page = pages + i; i <= chapter_ends[c]; i ++, page ++) { if (nup != page->nup) { if (j) { // Break the current output page... outpage ++; num_outpages ++; } nup = page->nup; j = 0; } if (!j) outpage->nup = nup; pspdf_transform_page(num_outpages, j, i); j ++; if (j >= nup) { j = 0; outpage ++; num_outpages ++; } } if (j) { // Break the current output page... outpage ++; num_outpages ++; } chapter_outends[c] = num_outpages; } #ifdef DEBUG for (c = 0; c <= TocDocCount; c ++) printf("chapter_outstarts[%d] = %d, chapter_outends[%d] = %d\n", c, chapter_outstarts[c], c, chapter_outends[c]); printf("num_outpages = %d\n", (int)num_outpages); for (i = 0, outpage = outpages; i < (int)num_outpages; i ++, outpage ++) { printf("outpage[%d]:\tnup=%d, pages=[", i, outpage->nup); for (j = 0; j < outpage->nup; j ++) printf(" %d", outpage->pages[j]); puts(" ]"); page = pages + outpage->pages[0]; printf("\t\twidth = %d, length = %d\n", page->width, page->length); } for (c = 0; c <= TocDocCount; c ++) printf("chapter_starts[%d] = %d, chapter_ends[%d] = %d\n", c, chapter_starts[c], c, chapter_ends[c]); for (i = 0; i < (int)num_pages; i ++) printf("pages[%d]->outpage = %d\n", i, pages[i].outpage); for (i = 0; i < (int)num_headings; i ++) printf("heading_pages[%d] = %d\n", i, heading_pages[i]); for (i = 0; i < (int)num_links; i ++) printf("links[%d].name = \"%s\", page = %d\n", i, links[i].name, links[i].page); #endif // DEBUG } /* * 'pspdf_prepare_page()' - Add headers/footers to page before writing... */ static void pspdf_prepare_page(int page) /* I - Page number */ { int print_page; /* Printed page # */ char page_text[64]; /* Page number text */ int top; /* Top of page */ DEBUG_printf(("pspdf_prepare_page(%d)\n", page)); if (page < 0 || page >= num_pages) return; /* * Make a page number; use roman numerals for the table of contents * and arabic numbers for all others... */ if (chapter == 0 && OutputType == OUTPUT_BOOK) { print_page = page - chapter_starts[0] + 1; strlcpy(page_text, format_number(print_page, 'i'), sizeof(page_text)); } else if (chapter < 0) { print_page = 0; // Safe because page_text is more than 6 chars strlcpy(page_text, (page & 1) ? (char *)"eltit" : (char *)"title", sizeof(page_text)); } else { print_page = page - chapter_starts[1] + 1; strlcpy(page_text, format_number(print_page, '1'), sizeof(page_text)); } DEBUG_printf(("BEFORE page %d page_text is \"%s\"...\n", page, page_text)); DEBUG_printf((" header[0] = \"%s\"\n", pages[page].header[0])); DEBUG_printf((" header[1] = \"%s\"\n", pages[page].header[1])); DEBUG_printf((" header[2] = \"%s\"\n", pages[page].header[2])); /* * Add page headings... */ if (pages[page].landscape) { PagePrintWidth = pages[page].length - pages[page].right - pages[page].left; PagePrintLength = pages[page].width - pages[page].top - pages[page].bottom; } else { PagePrintWidth = pages[page].width - pages[page].right - pages[page].left; PagePrintLength = pages[page].length - pages[page].top - pages[page].bottom; } top = (int)(PagePrintLength - HeadFootSize); if (chapter == 0) { /* * Add table-of-contents header & footer... */ pspdf_prepare_heading(page, print_page, pages[page].header, top, page_text, sizeof(page_text)); pspdf_prepare_heading(page, print_page, pages[page].footer, 0, page_text, sizeof(page_text)); } else if (chapter > 0 && !title_page) { /* * Add chapter header & footer... */ if (page > chapter_starts[chapter] || OutputType != OUTPUT_BOOK) pspdf_prepare_heading(page, print_page, pages[page].header, top, page_text, sizeof(page_text)); else pspdf_prepare_heading(page, print_page, pages[page].header1, top, page_text, sizeof(page_text)); pspdf_prepare_heading(page, print_page, pages[page].footer, 0, page_text, sizeof(page_text)); } /* * Copy the page number for the TOC... */ strlcpy(pages[page].page_text, page_text, sizeof(pages[page].page_text)); DEBUG_printf(("AFTER page %d page_text is \"%s\"...\n", page, page_text)); } /* * 'pspdf_prepare_heading()' - Add headers/footers to page before writing... */ static void pspdf_prepare_heading(int page, // I - Page number int print_page, // I - Printed page number uchar **format, // I - Page headings int y, // I - Baseline of heading char *page_text, // O - Page number text int page_len) // I - Size of page text { int pos, // Position in heading dir; // Direction of page char *number; // Page number char buffer[1024], // String buffer *bufptr, // Pointer into buffer *formatptr; // Pointer into format string int formatlen; // Length of format command string render_t *temp; // Render structure for titles, etc. DEBUG_printf(("pspdf_prepare_heading(%d, %d, [\"%s\",\"%s\",\"%s\"], %d, %p, %d)\n", page, print_page, format[0], format[1], format[2], y, (void *)page_text, page_len)); /* * Add page headings... */ if (PageDuplex && (page & 1)) { dir = -1; format += 2; } else dir = 1; for (pos = 0; pos < 3; pos ++, format += dir) { /* * Add the appropriate object... */ if (!*format) continue; temp = NULL; if (strncasecmp((char *)*format, "$LOGOIMAGE", 10) == 0 && logo_image) { // Insert the logo image... if (y < (PagePrintLength / 2)) temp = new_render(page, RENDER_IMAGE, 0, y, logo_width, logo_height, logo_image); else // Offset from top temp = new_render(page, RENDER_IMAGE, 0, y + HeadFootSize - logo_height, logo_width, logo_height, logo_image); } else if (strncasecmp((char *)*format, "$LETTERHEAD", 11) == 0 && lh_image) { // Insert the logo image as a letterhead... if (y < (PagePrintLength / 2)) temp = new_render(page, RENDER_IMAGE, 0, y, lh_width, lh_height, lh_image); else // Offset from top temp = new_render(page, RENDER_IMAGE, 0, y + HeadFootSize - lh_height, lh_width, lh_height, lh_image); } else if (strncasecmp((char *)*format, "$HFIMAGE", 8) == 0) { int hfi; // Header/footer image index char *hfp; // Pointer into $HFIMAGE hfi = strtol((char*)((*format) + 8), &hfp, 10); if (hfi < 0 || hfi >= MAX_HF_IMAGES || !(isspace(*hfp) || !*hfp)) progress_error(HD_ERROR_BAD_HF_STRING, "Bad $HFIMAGE... substitution on page %d.", page + 1); else { if (y < (PagePrintLength / 2)) temp = new_render(page, RENDER_IMAGE, 0, y, hfimage_width[hfi], hfimage_height[hfi], hfimage[hfi]); else temp = new_render(page, RENDER_IMAGE, 0, y + HeadFootSize - hfimage_height[hfi], hfimage_width[hfi], hfimage_height[hfi], hfimage[hfi]); } } else { // Otherwise format the text... buffer[sizeof(buffer) - 1] = '\0'; for (bufptr = buffer, formatptr = (char *)*format; *formatptr;) { if (*formatptr == '$') { if (formatptr[1] == '$') { if (bufptr < (buffer + sizeof(buffer) - 1)) *bufptr++ = '$'; formatptr += 2; continue; } else if (!formatptr[1]) break; formatptr ++; for (formatlen = 1; isalpha(formatptr[formatlen]); formatlen ++); if (formatlen == 4 && strncasecmp(formatptr, "PAGE", 4) == 0) { if (formatptr[4] == '(' && formatptr[5] && formatptr[6] == ')') { number = format_number(print_page, formatptr[5]); formatptr += 7; } else { number = format_number(print_page, '1'); formatptr += 4; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 5 && strncasecmp(formatptr, "PAGES", 5) == 0) { if (formatptr[5] == '(' && formatptr[6] && formatptr[7] == ')') { number = format_number(chapter_ends[TocDocCount] - chapter_starts[1] + 1, formatptr[6]); formatptr += 8; } else { number = format_number(chapter_ends[TocDocCount] - chapter_starts[1] + 1, '1'); formatptr += 5; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 11 && strncasecmp(formatptr, "CHAPTERPAGE", 11) == 0) { int chapter_page; chapter_page = print_page - chapter_starts[::chapter] + chapter_starts[1]; if (formatptr[11] == '(' && formatptr[12] && formatptr[13] == ')') { number = format_number(chapter_page, formatptr[12]); formatptr += 14; } else { number = format_number(chapter_page, '1'); formatptr += 11; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 12 && strncasecmp(formatptr, "CHAPTERPAGES", 12) == 0) { if (formatptr[12] == '(' && formatptr[13] && formatptr[14] == ')') { number = format_number(chapter_ends[::chapter] - chapter_starts[::chapter] + 1, formatptr[13]); formatptr += 15; } else { number = format_number(chapter_ends[::chapter] - chapter_starts[::chapter] + 1, '1'); formatptr += 12; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 5 && strncasecmp(formatptr, "TITLE", 5) == 0) { formatptr += 5; if (doc_title) { strlcpy(bufptr, (char *)doc_title, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } } else if (formatlen == 7 && strncasecmp(formatptr, "CHAPTER", 7) == 0) { formatptr += 7; if (pages[page].chapter) { strlcpy(bufptr, (char *)(pages[page].chapter), sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } } else if (formatlen == 7 && strncasecmp(formatptr, "HEADING", 7) == 0) { formatptr += 7; if (pages[page].heading) { strlcpy(bufptr, (char *)(pages[page].heading), sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } } else if (formatlen == 4 && strncasecmp(formatptr, "TIME", 4) == 0) { formatptr += 4; strftime(bufptr, sizeof(buffer) - 1 - (size_t)(bufptr - buffer), "%X", &doc_date); bufptr += strlen(bufptr); } else if (formatlen == 4 && strncasecmp(formatptr, "DATE", 4) == 0) { formatptr += 4; strftime(bufptr, sizeof(buffer) - 1 - (size_t)(bufptr - buffer), "%x", &doc_date); bufptr += strlen(bufptr); } else if (formatlen == 3 && strncasecmp(formatptr, "URL", 3) == 0) { uchar *url = pages[page].url ? pages[page].url : (uchar *)"Unknown"; formatptr += 3; strlcpy(bufptr, (char *)url, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else { progress_error(HD_ERROR_BAD_HF_STRING, "Bad header/footer $ command on page %d.", page + 1); strlcpy(bufptr, formatptr - 1, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); formatptr += formatlen; } } else if (bufptr < (buffer + sizeof(buffer) - 1)) *bufptr++ = *formatptr++; else break; } *bufptr = '\0'; temp = new_render(page, RENDER_TEXT, 0, y, get_width((uchar *)buffer, HeadFootType, HeadFootStyle, SIZE_P) * HeadFootSize / _htmlSizes[SIZE_P], HeadFootSize, (uchar *)buffer); if (strstr((char *)*format, "$PAGE") || strstr((char *)*format, "$CHAPTERPAGE")) strlcpy(page_text, buffer, (size_t)page_len); } if (temp == NULL) continue; /* * Justify the object... */ switch (pos) { case 0 : /* Left justified */ break; case 1 : /* Centered */ temp->x = (float)((PagePrintWidth - temp->width) * 0.5); break; case 2 : /* Right justified */ temp->x = PagePrintWidth - temp->width; break; } /* * Set the text font and color... */ if (temp->type == RENDER_TEXT) { temp->data.text.typeface = HeadFootType; temp->data.text.style = HeadFootStyle; temp->data.text.size = (float)HeadFootSize; get_color(_htmlTextColor, temp->data.text.rgb); } } } /* * 'ps_write_document()' - Write all render entities to PostScript file(s). */ static void ps_write_document(uchar *author, /* I - Author of document */ uchar *creator, /* I - Application that generated the HTML file */ uchar *copyright, /* I - Copyright (if any) on the document */ uchar *keywords, /* I - Search keywords */ uchar *subject, /* I - Subject */ uchar *lang) /* I - Language */ { FILE *out; /* Output file */ int page; /* Current page # */ int first; /* First chapter */ /* * Write the title page(s)... */ chapter = -1; out = NULL; if (!OutputFiles) { out = open_file(); if (out == NULL) { progress_error(HD_ERROR_WRITE_ERROR, "Unable to open output file - %s\n", strerror(errno)); return; } write_prolog(out, num_outpages, author, creator, copyright, keywords, subject); } if (OutputType == OUTPUT_BOOK && TocLevels > 0) first = 0; else first = 1; if (TitlePage) { if (OutputFiles) { out = open_file(); write_prolog(out, chapter_outstarts[first], author, creator, copyright, keywords, subject); } for (page = 0; page < chapter_outstarts[first]; page ++) ps_write_outpage(out, page); if (OutputFiles) { write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); fclose(out); } } for (chapter = first; chapter <= TocDocCount; chapter ++) { if (chapter_starts[chapter] < 0) continue; if (OutputFiles) { out = open_file(); if (out == NULL) { progress_error(HD_ERROR_WRITE_ERROR, "Unable to create output file - %s\n", strerror(errno)); return; } write_prolog(out, chapter_outends[chapter] - chapter_outstarts[chapter], author, creator, copyright, keywords, subject); } for (page = chapter_outstarts[chapter]; page < chapter_outends[chapter]; page ++) ps_write_outpage(out, page); /* * Close the output file as necessary... */ if (OutputFiles) { write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); fclose(out); } } /* * Close the output file as necessary... */ if (!OutputFiles) { write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); if (out != stdout) fclose(out); } if (Verbosity) progress_hide(); } /* * 'ps_write_outpage()' - Write an output page. */ static void ps_write_outpage(FILE *out, /* I - Output file */ int outpage) /* I - Output page number */ { int file_page; /* Current page # in document */ page_t *p; /* Current page */ outpage_t *op; /* Current output page */ int i; /* Looping var */ if (outpage < 0 || outpage >= (int)num_outpages) return; op = outpages + outpage; p = pages + op->pages[0]; DEBUG_printf(("ps_write_outpage(%p, %d)\n", (void *)out, outpage)); /* * Let the user know which page we are writing... */ if (Verbosity) { progress_show("Writing page %s...", p->page_text); progress_update(100 * outpage / (int)num_outpages); } /* * Figure out the page number in the file... */ if (OutputFiles && chapter >= 0) file_page = outpage - chapter_outstarts[chapter] + 1; else if (chapter < 0) file_page = outpage + 1; else if (chapter == 0) { if (TitlePage) file_page = outpage + 1; else file_page = outpage - chapter_outstarts[0] + 1; } else { if (TitlePage) file_page = outpage + 1; else file_page = outpage - chapter_outstarts[1] + 1; } /* * Output the page prolog... */ fprintf(out, "%%%%Page: (%s) %d\n", p->page_text, file_page); if (op->nup == 1) { if (p->duplex && !(file_page & 1)) fprintf(out, "%%%%PageBoundingBox: %d %d %d %d\n", p->right, p->bottom, p->width - p->left, p->length - p->top); else fprintf(out, "%%%%PageBoundingBox: %d %d %d %d\n", p->left, p->bottom, p->width - p->right, p->length - p->top); } else fprintf(out, "%%%%PageBoundingBox: 0 0 %d %d\n", p->width, p->length); if (PSLevel > 1 && PSCommands) { fputs("%%BeginPageSetup\n", out); if (p->width == 612 && p->length == 792) fputs("%%BeginFeature: *PageSize Letter\n", out); else if (p->width == 612 && p->length == 1008) fputs("%%BeginFeature: *PageSize Legal\n", out); else if (p->width == 792 && p->length == 1224) fputs("%%BeginFeature: *PageSize Tabloid\n", out); else if (p->width == 842 && p->length == 1190) fputs("%%BeginFeature: *PageSize A3\n", out); else if (p->width == 595 && p->length == 842) fputs("%%BeginFeature: *PageSize A4\n", out); else fprintf(out, "%%%%BeginFeature: *PageSize w%dh%d\n", p->width, p->length); fprintf(out, "%d %d SetPageSize\n", p->width, p->length); fputs("%%EndFeature\n", out); if (p->duplex) { if (p->landscape) { fputs("%%BeginFeature: *Duplex DuplexTumble\n", out); fputs("true true SetDuplexMode\n", out); fputs("%%EndFeature\n", out); } else { fputs("%%BeginFeature: *Duplex DuplexNoTumble\n", out); fputs("true false SetDuplexMode\n", out); fputs("%%EndFeature\n", out); } } else { fputs("%%BeginFeature: *Duplex None\n", out); fputs("false false SetDuplexMode\n", out); fputs("%%EndFeature\n", out); } if (p->media_color[0]) { fprintf(out, "%%%%BeginFeature: *MediaColor %s\n", p->media_color); fprintf(out, "(%s) SetMediaColor\n", p->media_color); fputs("%%EndFeature\n", out); } if (p->media_position) { fprintf(out, "%%%%BeginFeature: *InputSlot Tray%d\n", p->media_position); fprintf(out, "%d SetMediaPosition\n", p->media_position); fputs("%%EndFeature\n", out); } if (p->media_type[0]) { fprintf(out, "%%%%BeginFeature: *MediaType %s\n", p->media_type); fprintf(out, "(%s) SetMediaType\n", p->media_type); fputs("%%EndFeature\n", out); } fputs("%%EndPageSetup\n", out); } /* * Render all of the pages... */ switch (op->nup) { case 1 : ps_write_page(out, op->pages[0]); break; default : for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; fprintf(out, "GS[%.3f %.3f %.3f %.3f %.3f %.3f]CM\n", p->outmatrix[0][0], p->outmatrix[1][0], p->outmatrix[0][1], p->outmatrix[1][1], p->outmatrix[0][2], p->outmatrix[1][2]); ps_write_page(out, op->pages[i]); fputs("GR\n", out); } break; } /* * Output the page trailer... */ fputs("SP\n", out); fflush(out); } /* * 'ps_write_page()' - Write all render entities on a page to a PostScript file. */ static void ps_write_page(FILE *out, /* I - Output file */ int page) /* I - Page number */ { render_t *r, /* Render pointer */ *next; /* Next render */ page_t *p; /* Current page */ const char *debug; /* HTMLDOC_DEBUG environment variable */ if (page < 0 || page >= (int)alloc_pages) return; p = pages + page; DEBUG_printf(("ps_write_page(%p, %d)\n", (void *)out, page)); /* * Clear the render cache... */ render_typeface = -1; render_style = -1; render_size = -1; render_rgb[0] = -1.0f; render_rgb[1] = -1.0f; render_rgb[2] = -1.0f; render_x = -1.0f; render_y = -1.0f; render_spacing = -1.0f; /* * Setup the page... */ fputs("GS\n", out); if (p->landscape) { if (p->duplex && (page & 1)) fprintf(out, "0 %d T -90 RO\n", p->length); else fprintf(out, "%d 0 T 90 RO\n", p->width); } write_background(page, out); if (p->duplex && (page & 1)) fprintf(out, "%d %d T\n", p->right, p->bottom); else fprintf(out, "%d %d T\n", p->left, p->bottom); /* * Render all graphics elements... */ for (r = p->start; r != NULL; r = r->next) switch (r->type) { case RENDER_BOX : set_color(out, r->data.box); set_pos(out, r->x, r->y); if (r->height > 0.0f) fprintf(out, " %.1f %.1f F\n", r->width, r->height); else fprintf(out, " %.1f L\n", r->width); render_x = -1.0f; break; case RENDER_IMAGE : if (r->width > 0.01f && r->height > 0.01f) write_image(out, r); break; } /* * Render all text elements, freeing used memory as we go... */ for (r = p->start, next = NULL; r != NULL; r = next) { if (r->type == RENDER_TEXT) write_text(out, r); next = r->next; free(r); } p->start = NULL; if ((debug = getenv("HTMLDOC_DEBUG")) != NULL && strstr(debug, "margin")) { // Show printable area... fprintf(out, "1 0 1 C 0 0 %d %d B\n", p->width - p->right - p->left, p->length - p->top - p->bottom); } /* * Output the page trailer... */ fputs("GR\n", out); } /* * 'ps_write_background()' - Write a background image... */ static void ps_write_background(FILE *out) /* I - Output file */ { int y, /* Current line */ pwidth; /* Pixel width */ if (!background_image->pixels) image_load(background_image->filename, !OutputColor, 1); pwidth = background_image->width * background_image->depth; fputs("/BG[", out); for (y = 0; y < background_image->height; y ++) { putc('<', out); ps_hex(out, background_image->pixels + y * pwidth, pwidth); putc('>', out); } fputs("]def", out); image_unload(background_image); } /* * 'pdf_write_document()' - Write all render entities to a PDF file. */ static void pdf_write_document(uchar *author, // I - Author of document uchar *creator, // I - Application that generated the HTML file uchar *copyright, // I - Copyright (if any) on the document uchar *keywords, // I - Search keywords uchar *subject, // I - Subject uchar *lang, // I - Language tree_t *doc, // I - Document tree_t *toc) // I - Table of contents tree { int i; // Looping variable FILE *out; // Output file int outpage, // Current page # heading; // Current heading # int bytes; // Number of bytes char buffer[8192]; // Copy buffer int num_images; // Number of images in document image_t **images; // Pointers to images render_t temp; // Dummy rendering data... // Open the output file... out = open_file(); if (out == NULL) { progress_error(HD_ERROR_WRITE_ERROR, "Unable to write document file - %s\n", strerror(errno)); return; } // Clear the objects array... num_objects = 0; alloc_objects = 0; objects = NULL; // Write the prolog... write_prolog(out, num_outpages, author, creator, copyright, keywords, subject); // Write images as needed... num_images = image_getlist(&images); for (i = 0; i < num_images; i ++) { int hfi; // Header/footer image index for (hfi = 0; hfi < MAX_HF_IMAGES; hfi ++) if (images[i] == hfimage[hfi]) break; if (images[i]->use > 1 || images[i]->mask || (images[i]->width * images[i]->height * images[i]->depth) > 65536 || images[i] == background_image || images[i] == logo_image || hfi < MAX_HF_IMAGES) { progress_show("Writing image %d (%s)...", i + 1, images[i]->filename); progress_update(100 * i / num_images); temp.data.image = images[i]; write_image(out, &temp, 1); } } // Write links and target names... pdf_write_links(out); if (PDFVersion >= 12) pdf_write_names(out); // Verify that everything is working so far... pdf_start_object(out); if (pages_object != (int)num_objects) progress_error(HD_ERROR_INTERNAL_ERROR, "Internal error: pages_object != num_objects"); fputs("/Type/Pages", out); fprintf(out, "/Count %d", (int)num_outpages); fputs("/Kids[", out); for (outpage = 0; outpage < (int)num_outpages; outpage ++) fprintf(out, "%d 0 R\n", pages_object + outpage * 2 + 1); fputs("]", out); pdf_end_object(out); for (outpage = 0; outpage < (int)num_outpages; outpage ++) pdf_write_outpage(out, outpage); if (OutputType == OUTPUT_BOOK && TocLevels > 0) { /* * Write the outline tree using the table-of-contents... */ heading = 0; #ifdef DEBUG_TOC pdf_text_contents(out, toc); #endif // DEBUG_TOC pdf_write_contents(out, toc, 0, 0, 0, &heading); } else { /* * Write the outline tree using the HTML files. */ pdf_write_files(out, doc); } /* * Write the trailer and close the output file... */ write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); if (CGIMode) { const char *meta_filename = (const char *)htmlGetMeta(doc, (uchar *)"HTMLDOC.filename"); const char *filename; if (meta_filename) { if ((filename = strrchr(meta_filename, '/')) != NULL) filename ++; else filename = meta_filename; } else filename = "htmldoc.pdf"; // In CGI mode, we only produce PDF output to stdout... printf("Content-Type: application/pdf\r\n" "Content-Length: %ld\r\n" "Content-Disposition: inline; filename=\"%s\"\r\n" "Accept-Ranges: none\r\n" "X-Creator: HTMLDOC " SVERSION "\r\n" "\r\n", ftell(out), filename); } fclose(out); // // If we are sending the output to stdout, copy the temp file now... // if (!OutputPath[0]) { #ifdef WIN32 // Make sure we are in binary mode... stupid Microsoft! setmode(1, O_BINARY); #elif defined(__EMX__) // OS/2 has a setmode for FILE's... fflush(stdout); _fsetmode(stdout, "b"); #endif // WIN32 || __EMX__ // Open the temporary file and copy it to stdout... out = fopen(stdout_filename, "rb"); while ((bytes = fread(buffer, 1, sizeof(buffer), out)) > 0) fwrite(buffer, 1, (size_t)bytes, stdout); // Close the temporary file (it is removed when the program exits...) fclose(out); } // Clear the objects array... if (alloc_objects) { free(objects); num_objects = 0; alloc_objects = 0; objects = NULL; } if (Verbosity) progress_hide(); } /* * 'pdf_write_resources()' - Write the resources dictionary for a page. */ static void pdf_write_resources(FILE *out, /* I - Output file */ int outpage) /* I - Output page for resources */ { int i; /* Looping var */ outpage_t *op; /* Current output page */ page_t *p; /* Current page */ render_t *r; /* Render pointer */ int fonts_used[TYPE_MAX * STYLE_MAX]; /* Non-zero if the page uses a font */ int images_used; /* Non-zero if the page uses an image */ int text_used; /* Non-zero if the page uses text */ static const char *effects[] = /* Effects and their commands */ { "", "/S/Box/M/I", "/S/Box/M/O", "/S/Dissolve", "/S/Glitter/Di 270", "/S/Glitter/Di 315", "/S/Glitter/Di 0", "/S/Blinds/Dm/H", "/S/Split/Dm/H/M/I", "/S/Split/Dm/H/M/O", "/S/Blinds/Dm/V", "/S/Split/Dm/V/M/I", "/S/Split/Dm/V/M/O", "/S/Wipe/Di 270", "/S/Wipe/Di 180", "/S/Wipe/Di 0", "/S/Wipe/Di 90" }; memset(fonts_used, 0, sizeof(fonts_used)); images_used = background_image != NULL; text_used = 0; op = outpages + outpage; for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_IMAGE) images_used = 1; else if (r->type == RENDER_TEXT) { text_used = 1; fonts_used[r->data.text.typeface * 4 + r->data.text.style] = 1; } } fputs("/Resources<<", out); if (!images_used) fputs("/ProcSet[/PDF/Text]", out); else if (PDFVersion >= 12) { if (OutputColor) fputs("/ProcSet[/PDF/Text/ImageB/ImageC/ImageI]", out); else fputs("/ProcSet[/PDF/Text/ImageB/ImageI]", out); } else { if (OutputColor) fputs("/ProcSet[/PDF/Text/ImageB/ImageC]", out); else fputs("/ProcSet[/PDF/Text/ImageB]", out); } if (text_used) { fputs("/Font<<", out); for (i = 0; i < (TYPE_MAX * STYLE_MAX); i ++) if (fonts_used[i]) fprintf(out, "/F%x %d 0 R", i, font_objects[i]); fputs(">>", out); } fputs("/XObject<<", out); for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_IMAGE && r->data.image->obj) fprintf(out, "/I%d %d 0 R", r->data.image->obj, r->data.image->obj); } if (background_image) fprintf(out, "/I%d %d 0 R", background_image->obj, background_image->obj); fputs(">>>>", out); if (PDFEffect) fprintf(out, "/Dur %.0f/Trans<</Type/Trans/D %.1f%s>>", PDFPageDuration, PDFEffectDuration, effects[PDFEffect]); } /* * 'pdf_write_outpage()' - Write an output page. */ static void pdf_write_outpage(FILE *out, /* I - Output file */ int outpage) /* I - Output page number */ { int i; /* Looping var */ page_t *p; /* Current page */ outpage_t *op; /* Output page */ DEBUG_printf(("pdf_write_outpage(out = %p, outpage = %d)\n", (void *)out, outpage)); if (outpage < 0 || outpage >= (int)num_outpages) return; op = outpages + outpage; p = pages + op->pages[0]; DEBUG_printf(("op->pages[0] = %d (%dx%d)\n", op->pages[0], p->width, p->length)); /* * Let the user know which page we are writing... */ if (Verbosity) { progress_show("Writing page %s...", p->page_text); progress_update(100 * outpage / (int)num_outpages); } /* * Output the page prolog... */ pdf_start_object(out); fputs("/Type/Page", out); fprintf(out, "/Parent %d 0 R", pages_object); fprintf(out, "/Contents %d 0 R", (int)num_objects + 1); if (p->landscape) fprintf(out, "/MediaBox[0 0 %d %d]", p->length, p->width); else fprintf(out, "/MediaBox[0 0 %d %d]", p->width, p->length); pdf_write_resources(out, outpage); /* * Actions (links)... */ if (op->annot_object > 0) fprintf(out, "/Annots %d 0 R", op->annot_object); pdf_end_object(out); pdf_start_object(out); if (Compression) fputs("/Filter/FlateDecode", out); pdf_start_stream(out); flate_open_stream(out); /* * Render all of the pages... */ switch (op->nup) { case 1 : pdf_write_page(out, op->pages[0]); break; default : for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; flate_printf(out, "q %.3f %.3f %.3f %.3f %.3f %.3f cm\n", p->outmatrix[0][0], p->outmatrix[1][0], p->outmatrix[0][1], p->outmatrix[1][1], p->outmatrix[0][2], p->outmatrix[1][2]); pdf_write_page(out, op->pages[i]); flate_puts("Q\n", out); } break; } /* * Close out the page... */ flate_close_stream(out); pdf_end_object(out); } /* * 'pdf_write_page()' - Write a page to a PDF file. */ static void pdf_write_page(FILE *out, /* I - Output file */ int page) /* I - Page number */ { render_t *r, /* Render pointer */ *next; /* Next render */ float box[3]; /* RGB color for boxes */ page_t *p; /* Current page */ const char *debug; /* HTMLDOC_DEBUG environment variable */ if (page < 0 || page >= (int)alloc_pages) return; p = pages + page; /* * Clear the render cache... */ render_rgb[0] = -1.0f; render_rgb[1] = -1.0f; render_rgb[2] = -1.0f; render_x = -1.0f; render_y = -1.0f; /* * Output the page header... */ flate_puts("q\n", out); write_background(page, out); if (p->duplex && (page & 1)) flate_printf(out, "1 0 0 1 %d %d cm\n", p->right, p->bottom); else flate_printf(out, "1 0 0 1 %d %d cm\n", p->left, p->bottom); /* * Render all graphics elements... */ box[0] = -1.0f; box[1] = -1.0f; box[2] = -1.0f; for (r = p->start; r != NULL; r = r->next) switch (r->type) { case RENDER_IMAGE : if (r->width > 0.01f && r->height > 0.01f) write_image(out, r); break; case RENDER_BOX : if (r->height == 0.0) { if (box[0] != r->data.box[0] || box[1] != r->data.box[1] || box[2] != r->data.box[2]) { box[0] = r->data.box[0]; box[1] = r->data.box[1]; box[2] = r->data.box[2]; if (OutputColor) flate_printf(out, "%.2f %.2f %.2f RG\n", box[0], box[1], box[2]); else flate_printf(out, "%.2f G\n", box[0] * 0.31f + box[1] * 0.61f + box[2] * 0.08f); } flate_printf(out, "%.1f %.1f m %.1f %.1f l S\n", r->x, r->y, r->x + r->width, r->y); } else { set_color(out, r->data.box); flate_printf(out, "%.1f %.1f %.1f %.1f re f\n", r->x, r->y, r->width, r->height); } break; } /* * Render all text elements, freeing used memory as we go... */ flate_puts("BT\n", out); render_typeface = -1; render_style = -1; render_size = -1; render_x = -1.0f; render_y = -1.0f; render_spacing = -1.0f; for (r = p->start, next = NULL; r != NULL; r = next) { if (r->type == RENDER_TEXT) write_text(out, r); next = r->next; free(r); } p->start = NULL; flate_puts("ET\n", out); if ((debug = getenv("HTMLDOC_DEBUG")) != NULL && strstr(debug, "margin")) { // Show printable area... flate_printf(out, "1 0 1 RG 0 0 %d %d re S\n", p->width - p->right - p->left, p->length - p->top - p->bottom); } /* * Output the page trailer... */ flate_puts("Q\n", out); } #ifdef DEBUG_TOC static void pdf_text_contents(FILE *out, tree_t *toc, int indent) { static const char *spaces = " " " "; if (indent > 16) indent = 16; while (toc) { fprintf(out, "%% %s<%s>", spaces + 64 - 4 * indent, _htmlMarkups[toc->markup]); switch (toc->markup) { case MARKUP_A : tree_t *temp; for (temp = toc->child; temp; temp = temp->next) fputs((char *)temp->data, out); break; default : fputs("\n", out); pdf_text_contents(out, toc->child, indent + 1); fprintf(out, "%% %s", spaces + 64 - 4 * indent); break; } fprintf(out, "</%s>\n", _htmlMarkups[toc->markup]); toc = toc->next; } } #endif // DEBUG_TOC /* * 'pdf_write_contents()' - Write the table of contents as outline records to * a PDF file. */ static void pdf_write_contents(FILE *out, /* I - Output file */ tree_t *toc, /* I - Table of contents tree */ int parent, /* I - Parent outline object */ int prev, /* I - Previous outline object */ int next, /* I - Next outline object */ int *heading) /* IO - Current heading # */ { int i, /* Looping var */ thisobj, /* This object */ entry, /* TOC entry object */ count; /* Number of entries at this level */ uchar *text; /* Entry text */ tree_t *temp; /* Looping var */ int *entry_counts, /* Number of sub-entries for this entry */ *entry_objects; /* Objects for each entry */ tree_t **entries; /* Pointers to each entry */ float x, y; /* Position of link */ /* * Make an object for this entry... */ if (toc == NULL) { /* * This is for the Table of Contents page... */ thisobj = pdf_start_object(out); fprintf(out, "/Parent %d 0 R", parent); fputs("/Title", out); write_utf16(out, (uchar *)TocTitle); x = 0.0f; y = PagePrintLength + PageBottom; pspdf_transform_coords(pages + chapter_starts[0], x, y); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * chapter_outstarts[0] + 1, x, y); if (prev > 0) fprintf(out, "/Prev %d 0 R", prev); if (next > 0) fprintf(out, "/Next %d 0 R", next); pdf_end_object(out); return; } /* * Allocate the arrays... Add 1 to hold the TOC at the top level... */ if ((entry_counts = (int *)calloc(sizeof(int), num_headings + 1)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)num_headings, strerror(errno)); return; } if ((entry_objects = (int *)calloc(sizeof(int), num_headings + 1)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)num_headings, strerror(errno)); free(entry_counts); return; } if ((entries = (tree_t **)calloc(sizeof(tree_t *), num_headings + 1)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)num_headings, strerror(errno)); free(entry_objects); free(entry_counts); return; } if (parent == 0 && TocLevels > 0) { /* * Add the table of contents to the top-level contents... */ entries[0] = NULL; entry_objects[0] = num_objects + 2; entry = num_objects + 3; count = 1; } else { entry = num_objects + 2; count = 0; } /* * Find and count the children (entries)... */ if (toc->markup == MARKUP_B && toc->next && toc->next->markup == MARKUP_UL) temp = toc->next->child; else if (toc->markup == MARKUP_LI && toc->last_child && toc->last_child->markup == MARKUP_UL) temp = toc->last_child->child; else temp = toc->child; for (; temp && count <= (int)num_headings; temp = temp->next) { if (temp->markup == MARKUP_B) { entries[count] = temp; entry_objects[count] = entry; if (temp->next && temp->next->markup == MARKUP_UL) entry_counts[count] = pdf_count_headings(temp->next->child); else entry_counts[count] = 0; entry += entry_counts[count] + 1; count ++; } else if (temp->markup == MARKUP_LI) { entries[count] = temp; entry_objects[count] = entry; if (temp->last_child && temp->last_child->markup == MARKUP_UL) entry_counts[count] = pdf_count_headings(temp->last_child); else entry_counts[count] = 0; entry += entry_counts[count] + 1; count ++; } } /* * Output the top-level object... */ thisobj = pdf_start_object(out); if (parent == 0) outline_object = thisobj; else fprintf(out, "/Parent %d 0 R", parent); if (count > 0) { fprintf(out, "/Count %d", parent == 0 ? count : -count); fprintf(out, "/First %d 0 R", entry_objects[0]); fprintf(out, "/Last %d 0 R", entry_objects[count - 1]); } if (parent > 0 && toc->child && toc->child->markup == MARKUP_A) { if ((text = htmlGetText(toc->child->child)) != NULL) { fputs("/Title", out); write_utf16(out, text); free(text); } i = heading_pages[*heading]; x = 0.0f; y = heading_tops[*heading] + pages[i].bottom; pspdf_transform_coords(pages + i, x, y); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[i].outpage + 1, x, y); (*heading) ++; } if (prev > 0) fprintf(out, "/Prev %d 0 R", prev); if (next > 0) fprintf(out, "/Next %d 0 R", next); pdf_end_object(out); for (i = 0; i < count ; i ++) pdf_write_contents(out, entries[i], thisobj, i > 0 ? entry_objects[i - 1] : 0, i < (count - 1) ? entry_objects[i + 1] : 0, heading); free(entry_objects); free(entry_counts); free(entries); } // // 'pdf_write_files()' - Write an outline of HTML files. // static void pdf_write_files(FILE *out, // I - Output file tree_t *doc) // I - Document tree { int i, // Looping var num_files, // Number of FILE elements alloc_text; // Allocated text? uchar *text; // Entry text tree_t *temp; // Current node link_t *link; // Link to file... float x, y; // Position of link // Figure out the number of (top-level) files in the document... for (num_files = 0, temp = doc; temp; temp = temp->next) if (temp->markup == MARKUP_FILE) num_files ++; if (num_files < 2) { // No files to outline... outline_object = 0; return; } // Write the outline dictionary... outline_object = pdf_start_object(out); fprintf(out, "/Count %d", num_files); fprintf(out, "/First %d 0 R", outline_object + 1); fprintf(out, "/Last %d 0 R", outline_object + num_files); pdf_end_object(out); // Now write the outline items... for (i = 0, temp = doc; temp; temp = temp->next) if (temp->markup == MARKUP_FILE) { alloc_text = 0; if ((text = get_title(temp->child)) != NULL) alloc_text = 1; else if ((text = htmlGetVariable(temp, (uchar *)"_HD_FILENAME")) == NULL) text = (uchar *)"Unknown"; pdf_start_object(out); fprintf(out, "/Parent %d 0 R", outline_object); fputs("/Title", out); write_utf16(out, text); if (alloc_text) free(text); if ((link = find_link(htmlGetVariable(temp, (uchar *)"_HD_FILENAME"))) != NULL) { x = 0.0f; y = link->top + pages[link->page].bottom; pspdf_transform_coords(pages + link->page, x, y); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[link->page].outpage + 1, x, y); } if (i > 0) fprintf(out, "/Prev %d 0 R", outline_object + i); if (i < (num_files - 1)) fprintf(out, "/Next %d 0 R", outline_object + i + 2); pdf_end_object(out); i ++; } } /* * 'pdf_count_headings()' - Count the number of headings under this TOC * entry. */ static int /* O - Number of headings found */ pdf_count_headings(tree_t *toc) /* I - TOC entry */ { int headings; /* Number of headings */ for (headings = 0; toc != NULL; toc = toc->next) { if (toc->markup == MARKUP_A) headings ++; if (toc->child != NULL) headings += pdf_count_headings(toc->child); } return (headings); } /* * PDF object state variables... */ static int pdf_stream_length = 0; static int pdf_stream_start = 0; static int pdf_object_type = 0; /* * 'pdf_start_object()' - Start a new PDF object... */ static int // O - Object number pdf_start_object(FILE *out, // I - File to write to int array) // I - 1 = array, 0 = dictionary { int *temp; // Temporary integer pointer num_objects ++; // Allocate memory as necessary... if (num_objects >= alloc_objects) { alloc_objects += ALLOC_OBJECTS; if (alloc_objects == ALLOC_OBJECTS) temp = (int *)malloc(sizeof(int) * alloc_objects); else temp = (int *)realloc(objects, sizeof(int) * alloc_objects); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d objects - %s", (int)alloc_objects, strerror(errno)); alloc_objects -= ALLOC_OBJECTS; return (0); } objects = temp; } objects[num_objects] = ftell(out); fprintf(out, "%d 0 obj", (int)num_objects); pdf_object_type = array; fputs(pdf_object_type ? "[" : "<<", out); return (num_objects); } /* * 'pdf_start_stream()' - Start a new PDF stream... */ static void pdf_start_stream(FILE *out) // I - File to write to { // Write the "/Length " string, get the position, and then write 10 // zeroes to cover the maximum size of a stream. fputs("/Length ", out); pdf_stream_length = ftell(out); fputs("0000000000>>stream\n", out); pdf_stream_start = ftell(out); } /* * 'pdf_end_object()' - End a PDF object... */ static void pdf_end_object(FILE *out) // I - File to write to { int length; // Total length of stream if (pdf_stream_start) { // For streams, go back and update the length field in the // object dictionary... length = ftell(out) - pdf_stream_start; fseek(out, pdf_stream_length, SEEK_SET); fprintf(out, "%-10d", length); fseek(out, 0, SEEK_END); pdf_stream_start = 0; fputs("endstream\n", out); } else fputs(pdf_object_type ? "]" : ">>", out); fputs("endobj\n", out); } /* * 'pdf_write_links()' - Write annotation link objects for each page in the * document. */ static void pdf_write_links(FILE *out) /* I - Output file */ { int i, /* Looping var */ outpage, /* Current page */ lobj, /* Current link */ num_lobjs, /* Number of links on this page */ alloc_lobjs, /* Number of links to allocate */ *lobjs; /* Link objects */ float x, y; /* Position of last link */ render_t *r, /* Current render primitive */ *rlast, /* Last render link primitive */ *rprev; /* Previous render primitive */ link_t *link; /* Local link */ page_t *p; /* Current page */ outpage_t *op; /* Current output page */ /* * First combine adjacent, identical links... */ for (outpage = 0, op = outpages; outpage < (int)num_outpages; outpage ++, op ++) { for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start, x = 0.0f, y = 0.0f, rlast = NULL, rprev = NULL; r != NULL; rprev = r, r = r->next) if (r->type == RENDER_LINK) { if (fabs(r->x - x) < 0.1f && fabs(r->y - y) < 0.1f && rlast != NULL && strcmp((const char *)rlast->data.link, (const char *)r->data.link) == 0) { // Combine this primitive with the previous one in rlast... rlast->width = r->x + r->width - rlast->x; x = rlast->x + rlast->width; // Delete this render primitive... rprev->next = r->next; free(r); r = rprev; } else { // Can't combine; just save this info for later use... rlast = r; x = r->x + r->width; y = r->y; } } } } /* * Setup the initial pages_object number... */ pages_object = num_objects + 1; /* * Add space for named links in PDF 1.2 output... */ if (PDFVersion >= 12) pages_object += num_links + 3; /* * Stop here if we won't be generating links in the output... */ if (!Links) return; /* * Figure out how many link objects we'll have... */ for (outpage = 0, op = outpages, alloc_lobjs = 0; outpage < (int)num_pages; outpage ++, op ++) { num_lobjs = 0; for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_LINK) { if (find_link(r->data.link) != NULL) num_lobjs ++; else num_lobjs += 2; } } if (num_lobjs > 0) pages_object += num_lobjs + 1; if (num_lobjs > alloc_lobjs) alloc_lobjs = num_lobjs; } if (alloc_lobjs == 0) return; /* * Allocate memory for the links... */ if ((lobjs = (int *)malloc(sizeof(int) * (size_t)alloc_lobjs)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d link objects - %s", alloc_lobjs, strerror(errno)); return; } /* * Then generate annotation objects for all the links... */ for (outpage = 0, op = outpages; outpage < (int)num_pages; outpage ++, op ++) { num_lobjs = 0; for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_LINK) { if ((link = find_link(r->data.link)) != NULL) { /* * Local link... */ float x1, y1, x2, y2; lobjs[num_lobjs ++] = pdf_start_object(out); fputs("/Subtype/Link", out); if (PageDuplex && (op->pages[i] & 1)) { x1 = r->x + p->right; y1 = r->y + p->bottom - 2; x2 = r->x + r->width + p->right; y2 = r->y + r->height + p->bottom; } else { x1 = r->x + p->left; y1 = r->y + p->bottom - 2; x2 = r->x + r->width + p->left; y2 = r->y + r->height + p->bottom; } pspdf_transform_coords(p, x1, y1); pspdf_transform_coords(p, x2, y2); fprintf(out, "/Rect[%.1f %.1f %.1f %.1f]", x1, y1, x2, y2); fputs("/Border[0 0 0]", out); x1 = 0.0f; y1 = link->top + pages[link->page].bottom; pspdf_transform_coords(pages + link->page, x1, y1); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[link->page].outpage + 1, x1, y1); pdf_end_object(out); } else { /* * Remote link... */ pdf_start_object(out); if (PDFVersion >= 12 && file_method((char *)r->data.link) == NULL) { #ifdef WIN32 if (strcasecmp(file_extension((char *)r->data.link), "pdf") == 0) #else if (strcmp(file_extension((char *)r->data.link), "pdf") == 0) #endif /* WIN32 */ { /* * Link to external PDF file... */ const char *target = file_target((char *)r->data.link); fputs("/S/GoToR", out); if (target) { char url[1024], *urlptr; fputs("/D", out); write_string(out, (uchar *)target, 0); strlcpy(url, (char *)r->data.link, sizeof(url)); if ((urlptr = strrchr(url, '#')) != NULL) *urlptr = '\0'; fputs("/F", out); write_string(out, (uchar *)url, 0); } else { fputs("/D[0/XYZ null null 0]/F", out); write_string(out, r->data.link, 0); } } else { /* * Link to external filename... */ fputs("/S/Launch", out); fputs("/F", out); write_string(out, r->data.link, 0); if (StrictHTML) progress_error(HD_ERROR_UNRESOLVED_LINK, "Unable to resolve link to \"%s\"!", r->data.link); } } else { /* * Link to web file... */ fputs("/S/URI", out); fputs("/URI", out); write_string(out, r->data.link, 0); } pdf_end_object(out); lobjs[num_lobjs ++] = pdf_start_object(out); fputs("/Subtype/Link", out); if (PageDuplex && (outpage & 1)) fprintf(out, "/Rect[%.1f %.1f %.1f %.1f]", r->x + PageRight, r->y + PageBottom, r->x + r->width + PageRight, r->y + r->height + PageBottom); else fprintf(out, "/Rect[%.1f %.1f %.1f %.1f]", r->x + PageLeft, r->y + PageBottom - 2, r->x + r->width + PageLeft, r->y + r->height + PageBottom); fputs("/Border[0 0 0]", out); fprintf(out, "/A %d 0 R", (int)num_objects - 1); pdf_end_object(out); } } } if (num_lobjs > 0) { outpages[outpage].annot_object = pdf_start_object(out, 1); for (lobj = 0; lobj < num_lobjs; lobj ++) fprintf(out, "%d 0 R%s", lobjs[lobj], lobj < (num_lobjs - 1) ? "\n" : ""); pdf_end_object(out); } } free(lobjs); } /* * 'pdf_write_names()' - Write named destinations for each link. */ static void pdf_write_names(FILE *out) /* I - Output file */ { int i; /* Looping var */ uchar *s; /* Current character in name */ link_t *link; /* Local link */ /* * Convert all link names to lowercase... */ for (i = num_links, link = links; i > 0; i --, link ++) for (s = link->name; *s != '\0'; s ++) *s = (uchar)tolower(*s); /* * Write the root name tree entry... */ names_object = pdf_start_object(out); fprintf(out, "/Dests %d 0 R", (int)num_objects + 1); pdf_end_object(out); /* * Write the name tree child list... */ pdf_start_object(out); fprintf(out, "/Kids[%d 0 R]", (int)num_objects + 1); pdf_end_object(out); /* * Write the leaf node for the name tree... */ pdf_start_object(out); fputs("/Limits[", out); write_string(out, links[0].name, 0); write_string(out, links[num_links - 1].name, 0); fputs("]", out); fputs("/Names[", out); for (i = 1, link = links; i <= (int)num_links; i ++, link ++) { write_string(out, link->name, 0); fprintf(out, "%d 0 R", (int)num_objects + i); } fputs("]", out); pdf_end_object(out); for (i = num_links, link = links; i > 0; i --, link ++) { pdf_start_object(out); float x, y; x = 0.0f; y = link->top + pages[link->page].bottom; pspdf_transform_coords(pages + link->page, x, y); fprintf(out, "/D[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[link->page].outpage + 1, x, y); pdf_end_object(out); } } /* * 'render_contents()' - Render a single heading. */ static void render_contents(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int heading, /* I - Heading # */ tree_t *chap) /* I - Chapter heading */ { float x, width, numberwidth, height, rgb[3]; int hpage; uchar number[1024], *nptr, *link; tree_t *flat, *temp, *next; render_t *r; float dot_width; DEBUG_printf(("render_contents(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, y=%.1f, page=%d, heading=%d, chap=%p)\n", (void *)t, left, right, bottom, top, *y, *page, heading, (void *)chap)); if (!t) return; dot_width = _htmlSizes[SIZE_P] * _htmlWidths[t->typeface][t->style]['.'] * 0.001f; /* * Put the text... */ flat = flatten_tree(t->child->child); for (height = 0.0, temp = flat; temp != NULL; temp = temp->next) if (temp->height > height) height = temp->height; height *= _htmlSpacings[SIZE_P] / _htmlSizes[SIZE_P]; if (t->indent) x = left + 18.0f + 18.0f * t->indent; else x = left; *y -= height; /* * Get the width of the page number, leave room for three dots... */ if (heading >= 0 && heading < (int)num_headings) { hpage = heading_pages[heading]; numberwidth = (float)(get_width((uchar *)pages[hpage].page_text, t->typeface, t->style, t->size) + 3.0f * dot_width); } else { hpage = 0; numberwidth = 0.0f; } for (temp = flat; temp != NULL; temp = next) { rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; if ((x + temp->width) >= (right - numberwidth)) { /* * Too wide to fit, continue on the next line */ *y -= _htmlSpacings[SIZE_P]; x = left + 36.0f * t->indent; } if (*y < bottom) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); width = get_width((uchar *)TocTitle, _htmlHeadingFont, STYLE_BOLD, SIZE_H1); *y = (float)(top - _htmlSpacings[SIZE_H1]); x = (float)(left + 0.5f * (right - left - width)); r = new_render(*page, RENDER_TEXT, x, *y, 0, 0, TocTitle); r->data.text.typeface = _htmlHeadingFont; r->data.text.style = STYLE_BOLD; r->data.text.size = (float)_htmlSizes[SIZE_H1]; get_color(_htmlTextColor, r->data.text.rgb); *y -= _htmlSpacings[SIZE_H1]; if (t->indent) x = left + 18.0f + 18.0f * t->indent; else x = left; if (chap != t) { *y += height; render_contents(chap, left, right, bottom, top, y, page, -1, 0); *y -= _htmlSpacings[SIZE_P]; } } if (temp->link != NULL) { link = htmlGetVariable(temp->link, (uchar *)"HREF"); /* * Add a page link... */ new_render(*page, RENDER_LINK, x, *y, temp->width, temp->height, link); if (PSLevel == 0 && Links) { memcpy(rgb, link_color, sizeof(rgb)); temp->red = (uchar)(link_color[0] * 255.0); temp->green = (uchar)(link_color[1] * 255.0); temp->blue = (uchar)(link_color[2] * 255.0); if (LinkStyle) new_render(*page, RENDER_BOX, x, *y - 1, temp->width, 0, link_color); } } if ((link = htmlGetVariable(temp, (uchar *)"ID")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } switch (temp->markup) { case MARKUP_A : if ((link = htmlGetVariable(temp, (uchar *)"NAME")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } break; case MARKUP_NONE : if (temp->data == NULL) break; if (temp->underline) new_render(*page, RENDER_BOX, x, *y - 1, temp->width, 0, rgb); if (temp->strikethrough) new_render(*page, RENDER_BOX, x, *y + temp->height * 0.25f, temp->width, 0, rgb); r = new_render(*page, RENDER_TEXT, x, *y, 0, 0, temp->data); r->data.text.typeface = temp->typeface; r->data.text.style = temp->style; r->data.text.size = (float)_htmlSizes[temp->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (temp->superscript) r->y += height - temp->height; else if (temp->subscript) r->y -= height * _htmlSizes[0] / _htmlSpacings[0] - temp->height; break; case MARKUP_IMG : update_image_size(temp); new_render(*page, RENDER_IMAGE, x, *y, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); break; default : break; } x += temp->width; next = temp->next; free(temp); } if (numberwidth > 0.0f) { /* * Draw dots leading up to the page number... */ width = (float)(numberwidth - 3.0 * dot_width + x); for (nptr = number; nptr < (number + sizeof(number) - 1) && width < right; width += dot_width) *nptr++ = '.'; if (nptr > number) nptr --; strlcpy((char *)nptr, pages[hpage].page_text, sizeof(number) - (size_t)(nptr - number)); r = new_render(*page, RENDER_TEXT, right - width + x, *y, 0, 0, number); r->data.text.typeface = t->typeface; r->data.text.style = t->style; r->data.text.size = (float)_htmlSizes[t->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); } } /* * 'count_headings()' - Count the number of headings in the TOC. */ static int count_headings(tree_t *t) // I - Tree to count { int count; // Number of headings... count = 0; while (t != NULL) { switch (t->markup) { case MARKUP_B : case MARKUP_LI : count ++; if (t->last_child && t->last_child->markup == MARKUP_UL) count += count_headings(t->last_child); break; default : count += count_headings(t->child); break; } t = t->next; } return (count); } /* * 'parse_contents()' - Parse the table of contents and produce a * rendering list... */ static void parse_contents(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int *heading, /* IO - Heading # */ tree_t *chap) /* I - Chapter heading */ { DEBUG_printf(("parse_contents(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, y=%.1f, page=%d, heading=%d, chap=%p)\n", (void *)t, left, right, bottom, top, *y, *page, *heading, (void *)chap)); while (t != NULL) { switch (t->markup) { case MARKUP_B : /* Top-level TOC */ if (t->prev != NULL) /* Advance one line prior to top-levels... */ *y -= _htmlSpacings[SIZE_P]; if (*y < (bottom + _htmlSpacings[SIZE_P] * 3)) *y = 0; // Force page break chap = t; case MARKUP_LI : /* Lower-level TOC */ DEBUG_printf(("parse_contents: heading=%d, page = %d\n", *heading, heading_pages[*heading])); /* * Put the text unless the author has flagged it otherwise... */ if (htmlGetVariable(t, (uchar *)"_HD_OMIT_TOC") == NULL) { render_contents(t, left, right, bottom, top, y, page, *heading, chap); /* * Update current headings for header/footer strings in TOC. */ check_pages(*page); if (t->markup == MARKUP_B && pages[*page].chapter == pages[*page - 1].chapter) pages[*page].chapter = htmlGetText(t->child->child); if (pages[*page].heading == pages[*page - 1].heading) pages[*page].heading = htmlGetText(t->child->child); /* * Next heading... */ (*heading) ++; if (t->last_child->markup == MARKUP_UL) parse_contents(t->last_child, left, right, bottom, top, y, page, heading, chap); } else if (t->next != NULL && t->next->markup == MARKUP_UL) { /* * Skip children of omitted heading... */ t = t->next; (*heading) += count_headings(t->child) + 1; } else (*heading) ++; break; default : parse_contents(t->child, left, right, bottom, top, y, page, heading, chap); break; } t = t->next; } } /* * 'parse_doc()' - Parse a document tree and produce rendering list output. */ static void parse_doc(tree_t *t, /* I - Tree to parse */ float *left, /* I - Left margin */ float *right, /* I - Printable width */ float *bottom, /* I - Bottom margin */ float *top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ tree_t *cpara, /* I - Current paragraph */ int *needspace) /* I - Need whitespace before this element */ { int i; /* Looping var */ tree_t *para, /* Phoney paragraph tree entry */ *temp; /* Paragraph entry */ var_t *var; /* Variable entry */ uchar *name; /* ID name */ uchar *style; /* STYLE attribute */ float width, /* Width of horizontal rule */ height, /* Height of rule */ rgb[3]; /* RGB color of rule */ DEBUG_printf(("parse_doc(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, x=%.1f, y=%.1f, page=%d, cpara=%p, needspace=%d\n", (void *)t, *left, *right, *bottom, *top, *x, *y, *page, (void *)cpara, *needspace)); DEBUG_printf((" title_page = %d, chapter = %d\n", title_page, chapter)); if (cpara == NULL) para = htmlNewTree(NULL, MARKUP_P, NULL); else para = cpara; while (t != NULL) { if (t->markup == MARKUP_FILE) current_url = htmlGetVariable(t, (uchar *)"_HD_URL"); if (((t->markup == MARKUP_H1 && OutputType == OUTPUT_BOOK) || (t->markup == MARKUP_FILE && OutputType == OUTPUT_WEBPAGES)) && !title_page) { // New page on H1 in book mode or file in webpage mode... if (para->child != NULL && chapter > 0) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if ((chapter > 0 && OutputType == OUTPUT_BOOK) || ((*page > 0 || *y < *top) && OutputType == OUTPUT_WEBPAGES)) { if (*y < *top) (*page) ++; if (PageDuplex && (*page & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); chapter_ends[chapter] = *page - 1; } // Make sure header and footer strings are correct... check_pages(*page); memcpy(pages[*page].header, Header, sizeof(pages[*page].header)); memcpy(pages[*page].header1, Header1, sizeof(pages[*page].header1)); memcpy(pages[*page].footer, Footer, sizeof(pages[*page].footer)); // Bump the chapter/file count... chapter ++; if (chapter >= MAX_CHAPTERS) { progress_error(HD_ERROR_TOO_MANY_CHAPTERS, "Too many chapters/files in document (%d > %d)!", chapter, MAX_CHAPTERS); chapter = MAX_CHAPTERS - 1; } else chapter_starts[chapter] = *page; if (chapter > TocDocCount) TocDocCount = chapter; *y = *top; *x = *left; *needspace = 0; } if ((name = htmlGetVariable(t, (uchar *)"ID")) != NULL) { /* * Add a link target using the ID=name variable... */ add_link(name, *page, (int)*y); } else if (t->markup == MARKUP_FILE) { /* * Add a file link... */ uchar newname[256], /* New filename */ *sep; /* "?" separator in links */ // Strip any trailing HTTP GET data stuff... strlcpy((char *)newname, (char *)htmlGetVariable(t, (uchar *)"_HD_FILENAME"), sizeof(newname)); if ((sep = (uchar *)strchr((char *)newname, '?')) != NULL) *sep = '\0'; // Add the link add_link(newname, *page, (int)*y); } if (chapter == 0 && !title_page) { // Need to handle page comments before the first heading... if (t->markup == MARKUP_COMMENT) parse_comment(t, left, right, bottom, top, x, y, page, para, *needspace); if (t->child != NULL) parse_doc(t->child, left, right, bottom, top, x, y, page, para, needspace); t = t->next; continue; } // Check for some basic stylesheet stuff... if ((style = htmlGetStyle(t, (uchar *)"page-break-before:")) != NULL && strcasecmp((char *)style, "avoid") != 0) { // Advance to the next page... (*page) ++; *x = *left; *y = *top; *needspace = 0; // See if we need to go to the next left/righthand page... if (PageDuplex && ((*page) & 1) && strcasecmp((char *)style, "right") == 0) (*page) ++; else if (PageDuplex && !((*page) & 1) && strcasecmp((char *)style, "left") == 0) (*page) ++; // Update the progress as necessary... if (Verbosity) progress_show("Formatting page %d", *page); } // Process the markup... switch (t->markup) { case MARKUP_IMG : update_image_size(t); case MARKUP_NONE : case MARKUP_BR : if (para->child == NULL) { if (t->parent == NULL) { para->halignment = ALIGN_LEFT; para->indent = 0; } else { para->halignment = t->parent->halignment; para->indent = t->parent->indent; } } // Skip heading whitespace... if (para->child == NULL && t->markup == MARKUP_NONE && t->data != NULL && strcmp((char *)t->data, " ") == 0) break; if ((temp = htmlAddTree(para, t->markup, t->data)) != NULL) { temp->link = t->link; temp->width = t->width; temp->height = t->height; temp->typeface = t->typeface; temp->style = t->style; temp->size = t->size; temp->underline = t->underline; temp->strikethrough = t->strikethrough; temp->superscript = t->superscript; temp->subscript = t->subscript; temp->halignment = t->halignment; temp->valignment = t->valignment; temp->red = t->red; temp->green = t->green; temp->blue = t->blue; for (i = 0, var = t->vars; i < t->nvars; i ++, var ++) htmlSetVariable(temp, var->name, var->value); } break; case MARKUP_TABLE : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } parse_table(t, *left, *right, *bottom, *top, x, y, page, *needspace); *needspace = 0; break; case MARKUP_H1 : case MARKUP_H2 : case MARKUP_H3 : case MARKUP_H4 : case MARKUP_H5 : case MARKUP_H6 : case MARKUP_H7 : case MARKUP_H8 : case MARKUP_H9 : case MARKUP_H10 : case MARKUP_H11 : case MARKUP_H12 : case MARKUP_H13 : case MARKUP_H14 : case MARKUP_H15 : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } parse_heading(t, *left, *right, *bottom, *top, x, y, page, *needspace); *needspace = 1; break; case MARKUP_BLOCKQUOTE : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } *left += 36; *right -= 36; parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *left -= 36; *right += 36; *x = *left; *needspace = 1; break; case MARKUP_CENTER : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *x = *left; *needspace = 1; break; case MARKUP_P : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *x = *left; *needspace = 1; break; case MARKUP_DIV : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } break; case MARKUP_PRE : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } *left += 36.0f; *x = *left; parse_pre(t, *left, *right, *bottom, *top, x, y, page, *needspace); *left -= 36.0f; *x = *left; *needspace = 1; break; case MARKUP_DIR : case MARKUP_MENU : case MARKUP_UL : case MARKUP_OL : init_list(t); case MARKUP_DL : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if (t->indent == 1) *needspace = 1; *left += 36.0f; *x = *left; parse_doc(t->child, left, right, bottom, top, x, y, page, para, needspace); *left -= 36.0f; if (t->indent == 1) *needspace = 1; break; case MARKUP_LI : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } parse_list(t, left, right, bottom, top, x, y, page, *needspace); *x = *left; *needspace = t->next && t->next->markup != MARKUP_LI && t->next->markup != MARKUP_UL && t->next->markup != MARKUP_OL; break; case MARKUP_DT : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } *left -= 36.0f; *x = *left; parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *left += 36.0f; *x = *left; *needspace = 0; break; case MARKUP_DD : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *x = *left; *needspace = 0; break; case MARKUP_HR : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if (htmlGetVariable(t, (uchar *)"BREAK") == NULL) { /* * Generate a horizontal rule... */ if ((name = htmlGetVariable(t, (uchar *)"WIDTH")) == NULL) width = *right - *left; else { if (strchr((char *)name, '%') != NULL) width = atoi((char *)name) * (*right - *left) / 100; else width = (float)(atoi((char *)name) * PagePrintWidth / _htmlBrowserWidth); } if ((name = htmlGetVariable(t, (uchar *)"SIZE")) == NULL) height = 2; else height = (float)(atoi((char *)name) * PagePrintWidth / _htmlBrowserWidth); switch (t->halignment) { case ALIGN_LEFT : *x = *left; break; case ALIGN_CENTER : *x = *left + (*right - *left - width) * 0.5f; break; case ALIGN_RIGHT : *x = *right - width; break; } if (*y < (*bottom + height + _htmlSpacings[SIZE_P])) { /* * Won't fit on this page... */ (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; } (*y) -= height + _htmlSpacings[SIZE_P]; rgb[0] = t->red / 255.0f; rgb[1] = t->green / 255.0f; rgb[2] = t->blue / 255.0f; new_render(*page, RENDER_BOX, *x, *y + _htmlSpacings[SIZE_P] * 0.5, width, height, rgb); } else { /* * <HR BREAK> generates a page break... */ (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; } *x = *left; *needspace = 0; break; case MARKUP_COMMENT : // Check comments for commands... parse_comment(t, left, right, bottom, top, x, y, page, para, *needspace); break; case MARKUP_HEAD : // Ignore document HEAD section case MARKUP_TITLE : // Ignore title and meta stuff case MARKUP_META : case MARKUP_SCRIPT : // Ignore script stuff case MARKUP_INPUT : // Ignore form stuff case MARKUP_SELECT : case MARKUP_OPTION : case MARKUP_TEXTAREA : break; case MARKUP_STYLE : break; case MARKUP_A : if (htmlGetVariable(t, (uchar *)"NAME") != NULL) { /* * Add this named destination to the paragraph tree... */ if (para->child == NULL) { para->halignment = t->halignment; para->indent = t->indent; } if ((temp = htmlAddTree(para, t->markup, t->data)) != NULL) { temp->link = t->link; temp->width = t->width; temp->height = t->height; temp->typeface = t->typeface; temp->style = t->style; temp->size = t->size; temp->underline = t->underline; temp->strikethrough = t->strikethrough; temp->superscript = t->superscript; temp->subscript = t->subscript; temp->halignment = t->halignment; temp->valignment = t->valignment; temp->red = t->red; temp->green = t->green; temp->blue = t->blue; for (i = 0, var = t->vars; i < t->nvars; i ++, var ++) htmlSetVariable(temp, var->name, var->value); } } default : if (t->child != NULL) parse_doc(t->child, left, right, bottom, top, x, y, page, para, needspace); break; } // Check for some basic stylesheet stuff... if ((style = htmlGetStyle(t, (uchar *)"page-break-after:")) != NULL && strcasecmp((char *)style, "avoid") != 0) { // Advance to the next page... (*page) ++; *x = *left; *y = *top; *needspace = 0; // See if we need to go to the next left/righthand page... if (PageDuplex && ((*page) & 1) && strcasecmp((char *)style, "right") == 0) (*page) ++; else if (PageDuplex && !((*page) & 1) && strcasecmp((char *)style, "left") == 0) (*page) ++; // Update the progress as necessary... if (Verbosity) progress_show("Formatting page %d", *page); } // Move to the next node... t = t->next; } if (para->child != NULL && cpara != para) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } if (cpara != para) htmlDeleteTree(para); DEBUG_printf(("LEAVING parse_doc(), x = %.1f, y = %.1f, page = %d\n", *x, *y, *page)); } /* * 'parse_heading()' - Parse a heading tree and produce rendering list output. */ static void parse_heading(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace) /* I - Need whitespace? */ { int *temp; // Temporary integer array pointer DEBUG_printf(("parse_heading(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, x=%.1f, y=%.1f, page=%d, needspace=%d\n", (void *)t, left, right, bottom, top, *x, *y, *page, needspace)); if (((t->markup - MARKUP_H1) < TocLevels || TocLevels == 0) && !title_page) current_heading = t->child; if (*y < (5 * _htmlSpacings[SIZE_P] + bottom)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } check_pages(*page); if (t->markup == MARKUP_H1 && !title_page) pages[*page].chapter = htmlGetText(current_heading); if ((pages[*page].heading == NULL || t->markup == MARKUP_H1 || (*page > 0 && pages[*page].heading == pages[*page - 1].heading)) && !title_page) { pages[*page].heading = htmlGetText(current_heading); pages[*page].headnode = current_heading; } if ((t->markup - MARKUP_H1) < TocLevels && !title_page) { DEBUG_printf(("H%d: heading_pages[%d] = %d\n", t->markup - MARKUP_H1 + 1, (int)num_headings, *page - 1)); // See if we need to resize the headings arrays... if (num_headings >= alloc_headings) { alloc_headings += ALLOC_HEADINGS; if (num_headings == 0) temp = (int *)malloc(sizeof(int) * alloc_headings); else temp = (int *)realloc(heading_pages, sizeof(int) * alloc_headings); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)alloc_headings, strerror(errno)); alloc_headings -= ALLOC_HEADINGS; return; } memset(temp + alloc_headings - ALLOC_HEADINGS, 0, sizeof(int) * ALLOC_HEADINGS); heading_pages = temp; if (num_headings == 0) temp = (int *)malloc(sizeof(int) * alloc_headings); else temp = (int *)realloc(heading_tops, sizeof(int) * alloc_headings); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)alloc_headings, strerror(errno)); alloc_headings -= ALLOC_HEADINGS; return; } memset(temp + alloc_headings - ALLOC_HEADINGS, 0, sizeof(int) * ALLOC_HEADINGS); heading_tops = temp; } heading_pages[num_headings] = *page; heading_tops[num_headings] = (int)(*y + 4 * _htmlSpacings[SIZE_P]); num_headings ++; } parse_paragraph(t, left, right, bottom, top, x, y, page, needspace); if (t->halignment == ALIGN_RIGHT && t->markup == MARKUP_H1 && OutputType == OUTPUT_BOOK && !title_page) { /* * Special case - chapter heading for users manual... */ *y = bottom + 0.5f * (top - bottom); } } #if defined(PARA_DEBUG) && !defined(DEBUG) # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) printf x # define DEBUG_puts(x) puts(x) #endif /* PARA_DEBUG && !defined(DEBUG) */ /* * 'parse_paragraph()' - Parse a paragraph tree and produce rendering list * output. */ static void parse_paragraph(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace)/* I - Need whitespace? */ { int whitespace; /* Non-zero if a fragment ends in whitespace */ tree_t *flat, *start, *end, *prev, *temp; float width, height, offset, spacing, borderspace, temp_y, temp_width, temp_height; float format_width, image_y, image_left, image_right; int image_page = *page; float char_spacing; int num_chars; render_t *r; uchar *align, *hspace, *vspace, *link, *border; float rgb[3]; uchar line[10240], *lineptr, *dataptr; tree_t *linetype; float linex, linewidth; int firstline; DEBUG_printf(("parse_paragraph(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, x=%.1f, y=%.1f, page=%d, needspace=%d\n", (void *)t, left, right, bottom, top, *x, *y, *page, needspace)); flat = flatten_tree(t->child); image_left = left; image_right = right; image_y = 0; if (flat == NULL) DEBUG_puts("parse_paragraph: flat == NULL!"); // Add leading whitespace... if (*y < top && needspace) *y -= _htmlSpacings[SIZE_P]; /* * First scan for images with left/right alignment tags... */ for (temp = flat, prev = NULL; temp != NULL;) { if (temp->markup == MARKUP_IMG) update_image_size(temp); if (temp->markup == MARKUP_IMG && (align = htmlGetVariable(temp, (uchar *)"ALIGN"))) { if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; if (strcasecmp((char *)align, "LEFT") == 0) { if ((vspace = htmlGetVariable(temp, (uchar *)"VSPACE")) != NULL) *y -= atoi((char *)vspace); if (*y < (bottom + temp->height + 2 * borderspace)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } if (borderspace > 0.0f) { if (temp->link && PSLevel == 0) memcpy(rgb, link_color, sizeof(rgb)); else { rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; } // Top new_render(*page, RENDER_BOX, image_left, *y - borderspace, temp->width + 2 * borderspace, borderspace, rgb); // Left new_render(*page, RENDER_BOX, image_left, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Right new_render(*page, RENDER_BOX, image_left + temp->width + borderspace, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Bottom new_render(*page, RENDER_BOX, image_left, *y - temp->height - 2 * borderspace, temp->width + 2 * borderspace, borderspace, rgb); } *y -= borderspace; new_render(*page, RENDER_IMAGE, image_left + borderspace, *y - temp->height, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); if (temp->link && (link = htmlGetVariable(temp->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, image_left + borderspace, *y - temp->height, temp->width, temp->height, link); } *y -= borderspace; if (vspace != NULL) *y -= atoi((char *)vspace); image_left += temp->width + 2 * borderspace; temp_y = *y - temp->height; image_page = *page; if (temp_y < image_y || image_y == 0) image_y = temp_y; if ((hspace = htmlGetVariable(temp, (uchar *)"HSPACE")) != NULL) image_left += atoi((char *)hspace); if (prev != NULL) prev->next = temp->next; else flat = temp->next; free(temp); temp = prev; } else if (strcasecmp((char *)align, "RIGHT") == 0) { if ((vspace = htmlGetVariable(temp, (uchar *)"VSPACE")) != NULL) *y -= atoi((char *)vspace); if (*y < (bottom + temp->height + 2 * borderspace)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } image_right -= temp->width + 2 * borderspace; image_page = *page; if (borderspace > 0.0f) { if (temp->link && PSLevel == 0) memcpy(rgb, link_color, sizeof(rgb)); else { rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; } // Top new_render(*page, RENDER_BOX, image_right, *y - borderspace, temp->width + 2 * borderspace, borderspace, rgb); // Left new_render(*page, RENDER_BOX, image_right, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Right new_render(*page, RENDER_BOX, image_right + temp->width + borderspace, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Bottom new_render(*page, RENDER_BOX, image_right, *y - temp->height - 2 * borderspace, temp->width + 2 * borderspace, borderspace, rgb); } *y -= borderspace; new_render(*page, RENDER_IMAGE, image_right + borderspace, *y - temp->height, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); if (temp->link && (link = htmlGetVariable(temp->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, image_right + borderspace, *y - temp->height, temp->width, temp->height, link); } *y -= borderspace; if (vspace != NULL) *y -= atoi((char *)vspace); temp_y = *y - temp->height; if (temp_y < image_y || image_y == 0) image_y = temp_y; if ((hspace = htmlGetVariable(temp, (uchar *)"HSPACE")) != NULL) image_right -= atoi((char *)hspace); if (prev != NULL) prev->next = temp->next; else flat = temp->next; free(temp); temp = prev; } } if (temp != NULL) { prev = temp; temp = temp->next; } else temp = flat; } /* * Then format the text and inline images... */ format_width = image_right - image_left; firstline = 1; DEBUG_printf(("format_width = %.1f\n", format_width)); // Make stupid compiler warnings go away (if you can't put // enough smarts in the compiler, don't add the warning!) offset = 0.0f; temp_width = 0.0f; temp_height = 0.0f; lineptr = NULL; linex = 0.0f; linewidth = 0.0f; while (flat != NULL) { start = flat; end = flat; width = 0.0; while (flat != NULL) { // Get fragments... temp_width = 0.0; temp = flat; whitespace = 0; while (temp != NULL && !whitespace) { if (temp->markup == MARKUP_NONE && temp->data[0] == ' ') { if (temp == start) temp_width -= _htmlWidths[temp->typeface][temp->style][' '] * _htmlSizes[temp->size] * 0.001f; else if (temp_width > 0.0f) whitespace = 1; } else whitespace = 0; if (whitespace) break; if (temp->markup == MARKUP_IMG) { if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; temp_width += 2 * borderspace; } prev = temp; temp = temp->next; temp_width += prev->width; if ((temp_width >= format_width && prev->markup == MARKUP_IMG) || prev->markup == MARKUP_BR) { break; } else if (prev->markup == MARKUP_NONE) { int ch = prev->data[strlen((char *)prev->data) - 1]; if (_htmlUTF8) ch = _htmlUnicode[ch]; if (ch == 173) break; } } if ((width + temp_width) <= format_width) { width += temp_width; end = temp; flat = temp; if (prev->markup == MARKUP_BR) break; } else if (width == 0.0) { width += temp_width; end = temp; flat = temp; break; } else break; } if (start == end) { end = start->next; flat = start->next; width = start->width; } for (height = 0.0, num_chars = 0, temp = prev = start; temp != end; temp = temp->next) { prev = temp; if (temp->markup == MARKUP_NONE) num_chars += strlen((char *)temp->data); if (temp->height > height) height = temp->height; } for (spacing = 0.0, temp = prev = start; temp != end; temp = temp->next) { prev = temp; if (temp->markup != MARKUP_IMG) temp_height = (float)(temp->height * _htmlSpacings[0] / _htmlSizes[0]); else { if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; temp_height = temp->height + 2 * borderspace; } if (temp_height > spacing) spacing = temp_height; } if (firstline && end != NULL && *y < (bottom + height + _htmlSpacings[t->size])) { // Go to next page since only 1 line will fit on this one... (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } firstline = 0; if (height == 0.0f) height = spacing; for (temp = start; temp != end; temp = temp->next) if (temp->markup != MARKUP_A) break; if (temp != NULL && temp->markup == MARKUP_NONE && temp->data[0] == ' ') { // Drop leading space... for (dataptr = temp->data; *dataptr; dataptr ++) *dataptr = dataptr[1]; *dataptr = '\0'; temp_width = _htmlWidths[temp->typeface][temp->style][' '] * _htmlSizes[temp->size] * 0.001f; temp->width -= temp_width; num_chars --; } if (end != NULL) temp = end->prev; else temp = NULL; DEBUG_printf((" BEFORE page=%d, y=%.1f, height=%.1f, spacing=%.1f, bottom=%.1f\n", *page, *y, height, spacing, bottom)); if (*y < (spacing + bottom)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } *y -= height; DEBUG_printf((" page=%d, y=%.1f, width=%.1f, height=%.1f\n", *page, *y, width, height)); if (Verbosity) progress_update(100 - (int)(100 * (*y) / PagePrintLength)); char_spacing = 0.0f; whitespace = 0; temp = start; linetype = NULL; rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; switch (t->halignment) { case ALIGN_LEFT : linex = image_left; break; case ALIGN_CENTER : linex = image_left + 0.5f * (format_width - width); break; case ALIGN_RIGHT : linex = image_right - width; break; case ALIGN_JUSTIFY : linex = image_left; if (flat != NULL && flat->prev->markup != MARKUP_BR && num_chars > 1) char_spacing = (format_width - width) / (num_chars - 1); break; } while (temp != end) { if (temp->link != NULL && PSLevel == 0 && Links && temp->markup == MARKUP_NONE) { temp->red = (uchar)(link_color[0] * 255.0); temp->green = (uchar)(link_color[1] * 255.0); temp->blue = (uchar)(link_color[2] * 255.0); } /* * See if we are doing a run of characters in a line and need to * output this run... */ if (linetype != NULL && (temp->markup != MARKUP_NONE || temp->typeface != linetype->typeface || temp->style != linetype->style || temp->size != linetype->size || temp->superscript != linetype->superscript || temp->subscript != linetype->subscript || temp->red != linetype->red || temp->green != linetype->green || temp->blue != linetype->blue)) { r = new_render(*page, RENDER_TEXT, linex - linewidth, *y, linewidth, linetype->height, line); r->data.text.typeface = linetype->typeface; r->data.text.style = linetype->style; r->data.text.size = (float)_htmlSizes[linetype->size]; r->data.text.spacing = char_spacing; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (linetype->superscript) r->y += height - linetype->height; else if (linetype->subscript) r->y -= height - linetype->height; free(linetype); linetype = NULL; } if ((link = htmlGetVariable(temp, (uchar *)"ID")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } switch (temp->markup) { case MARKUP_A : if ((link = htmlGetVariable(temp, (uchar *)"NAME")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } default : temp_width = temp->width; break; case MARKUP_NONE : if (temp->data == NULL) break; if (((temp->width - right + left) > 0.001 || (temp->height - top + bottom) > 0.001) && OverflowErrors) progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Text on page %d too large - " "truncation or overlapping may occur!", *page + 1); if (linetype == NULL) { linetype = temp; lineptr = line; linewidth = 0.0; rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; } strlcpy((char *)lineptr, (char *)temp->data, sizeof(line) - (size_t)(lineptr - line)); temp_width = temp->width + char_spacing * strlen((char *)lineptr); if (temp->underline || (temp->link && LinkStyle && PSLevel == 0)) new_render(*page, RENDER_BOX, linex, *y - 1, temp_width, 0, rgb); if (temp->strikethrough) new_render(*page, RENDER_BOX, linex, *y + temp->height * 0.25f, temp_width, 0, rgb); linewidth += temp_width; lineptr += strlen((char *)lineptr); if (lineptr > line && lineptr[-1] == ' ') whitespace = 1; else whitespace = 0; break; case MARKUP_IMG : if (((temp->width - right + left) > 0.001 || (temp->height - top + bottom) > 0.001) && OverflowErrors) { DEBUG_printf(("IMAGE: %.3fx%.3f > %.3fx%.3f\n", temp->width, temp->height, right - left, top - bottom)); progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Image on page %d too large - " "truncation or overlapping may occur!", *page + 1); } if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; temp_width += 2 * borderspace; switch (temp->valignment) { case ALIGN_TOP : offset = height - temp->height - 2 * borderspace; break; case ALIGN_MIDDLE : offset = 0.5f * (height - temp->height) - borderspace; break; case ALIGN_BOTTOM : offset = 0.0f; } if (borderspace > 0.0f) { // Top new_render(*page, RENDER_BOX, linex, *y + offset + temp->height + borderspace, temp->width + 2 * borderspace, borderspace, rgb); // Left new_render(*page, RENDER_BOX, linex, *y + offset, borderspace, temp->height + 2 * borderspace, rgb); // Right new_render(*page, RENDER_BOX, linex + temp->width + borderspace, *y + offset, borderspace, temp->height + 2 * borderspace, rgb); // Bottom new_render(*page, RENDER_BOX, linex, *y + offset, temp->width + 2 * borderspace, borderspace, rgb); } new_render(*page, RENDER_IMAGE, linex + borderspace, *y + offset + borderspace, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); whitespace = 0; temp_width = temp->width + 2 * borderspace; break; } if (temp->link != NULL && (link = htmlGetVariable(temp->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, linex, *y + offset, temp->width, temp->height, link); } linex += temp_width; prev = temp; temp = temp->next; if (prev != linetype) free(prev); } /* * See if we have a run of characters that hasn't been output... */ if (linetype != NULL) { r = new_render(*page, RENDER_TEXT, linex - linewidth, *y, linewidth, linetype->height, line); r->data.text.typeface = linetype->typeface; r->data.text.style = linetype->style; r->data.text.spacing = char_spacing; r->data.text.size = (float)_htmlSizes[linetype->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (linetype->superscript) r->y += height - linetype->height; else if (linetype->subscript) r->y -= height - linetype->height; free(linetype); } /* * Update the margins after we pass below the images... */ *y -= spacing - height; DEBUG_printf((" AFTER y=%.1f, bottom=%.1f\n", *y, bottom)); if (*y < bottom) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } if (*y < image_y || *page > image_page) { image_y = 0.0f; image_left = left; image_right = right; format_width = image_right - image_left; } } *x = left; if (*y > image_y && image_y > 0.0f && image_page == *page) *y = image_y; DEBUG_printf(("LEAVING parse_paragraph(), x = %.1f, y = %.1f, page = %d, image_y = %.1f\n", *x, *y, *page, image_y)); } #if defined(PARA_DEBUG) && !defined(DEBUG) # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) # define DEBUG_puts(x) #endif /* PARA_DEBUG && !DEBUG */ /* * 'parse_pre()' - Parse preformatted text and produce rendering list output. */ static void parse_pre(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace) /* I - Need whitespace? */ { tree_t *flat, *start, *next; uchar *link, line[10240], *lineptr, *dataptr; int col; float width, height, rgb[3]; render_t *r; REF(right); DEBUG_printf(("parse_pre(t=%p, left=%.1f, right=%.1f, x=%.1f, y=%.1f, page=%d\n", (void *)t, left, right, *x, *y, *page)); if (t->child == NULL) return; if (*y < top && needspace) *y -= _htmlSpacings[SIZE_P]; flat = flatten_tree(t->child); if (flat == NULL) return; if (flat->markup == MARKUP_NONE && flat->data != NULL) { // Skip leading blank line, if present... for (dataptr = flat->data; isspace(*dataptr); dataptr ++); if (!*dataptr) { next = flat->next; free(flat); flat = next; } } while (flat != NULL) { for (height = 0.0f, start = flat; flat != NULL; flat = flat->next) { if (flat->height > height) height = flat->height; if (flat->markup == MARKUP_BR || (flat->markup == MARKUP_NONE && flat->data && flat->data[strlen((char *)flat->data) - 1] == '\n')) break; } if (flat) flat = flat->next; if (*y < (height + bottom)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } *x = left; *y -= height; if (Verbosity) progress_update(100 - (int)(100 * (*y) / PagePrintLength)); col = 0; while (start != flat) { rgb[0] = start->red / 255.0f; rgb[1] = start->green / 255.0f; rgb[2] = start->blue / 255.0f; if (start->link && (link = htmlGetVariable(start->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, *x, *y, start->width, start->height, link); if (PSLevel == 0 && Links) { memcpy(rgb, link_color, sizeof(rgb)); start->red = (uchar)(link_color[0] * 255.0); start->green = (uchar)(link_color[1] * 255.0); start->blue = (uchar)(link_color[2] * 255.0); if (LinkStyle) new_render(*page, RENDER_BOX, *x, *y - 1, start->width, 0, link_color); } } if ((link = htmlGetVariable(start, (uchar *)"ID")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } switch (start->markup) { case MARKUP_COMMENT : parse_comment(start, &left, &right, &bottom, &top, x, y, page, NULL, 0); break; case MARKUP_A : if ((link = htmlGetVariable(start, (uchar *)"NAME")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } break; case MARKUP_NONE : for (lineptr = line, dataptr = start->data; *dataptr != '\0' && lineptr < (line + sizeof(line) - 1); dataptr ++) if (*dataptr == '\n') break; else if (*dataptr == '\t') { /* This code changed after 15 years to work around new compiler optimization bugs (Issue #349) */ int num_cols = 8 - (col & 7); memcpy(lineptr, " ", num_cols); lineptr += num_cols; col += num_cols; } else if (*dataptr != '\r') { *lineptr++ = *dataptr; col ++; } *lineptr = '\0'; width = get_width(line, start->typeface, start->style, start->size); r = new_render(*page, RENDER_TEXT, *x, *y, width, 0, line); r->data.text.typeface = start->typeface; r->data.text.style = start->style; r->data.text.size = (float)_htmlSizes[start->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (start->underline) new_render(*page, RENDER_BOX, *x, *y - 1, start->width, 0, rgb); if (start->strikethrough) new_render(*page, RENDER_BOX, *x, *y + start->height * 0.25f, start->width, 0, rgb); *x += start->width; break; case MARKUP_IMG : new_render(*page, RENDER_IMAGE, *x, *y, start->width, start->height, image_find((char *)htmlGetVariable(start, (uchar *)"REALSRC"))); *x += start->width; col ++; break; default : break; } next = start->next; free(start); start = next; } if ((*x - right) > 0.001 && OverflowErrors) progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Preformatted text on page %d too long - " "truncation or overlapping may occur!", *page + 1); *y -= _htmlSpacings[t->size] - _htmlSizes[t->size]; } *x = left; } //#define TABLE_DEBUG 1 #ifdef TABLE_DEBUG # undef DEBUG_puts # define DEBUG_puts(x) puts(x) # define DEBUG 1 # undef DEBUG_printf # define DEBUG_printf(x) printf x #endif /* TABLE_DEBUG */ typedef struct { int debug; int num_cols, num_rows; float border, border_left, border_rgb[3], border_size, cellpadding, height; int col_spans[MAX_COLUMNS], row_spans[MAX_COLUMNS]; char col_fixed[MAX_COLUMNS], col_percent[MAX_COLUMNS]; float col_lefts[MAX_COLUMNS], col_rights[MAX_COLUMNS], col_widths[MAX_COLUMNS], col_swidths[MAX_COLUMNS], col_mins[MAX_COLUMNS], col_smins[MAX_COLUMNS], col_prefs[MAX_COLUMNS]; int cell_page[MAX_COLUMNS], // Start page for cell cell_endpage[MAX_COLUMNS]; // End page for cell float cell_y[MAX_COLUMNS], // Row for each cell cell_endy[MAX_COLUMNS], // Row for each cell cell_height[MAX_COLUMNS], // Height of each cell in a row span_heights[MAX_COLUMNS]; // Height of spans render_t *cell_bg[MAX_COLUMNS]; // Background rectangles render_t *cell_start[MAX_COLUMNS]; // Start of the content for a cell in the row render_t *cell_end[MAX_COLUMNS]; // End of the content for a cell in a row } hdtable_t; /* * 'render_table_row()' - Render a table row. */ static void render_table_row(hdtable_t &table, tree_t ***cells, int row, uchar *height_var, float left, // I - Left margin float right, // I - Printable width float bottom, // I - Bottom margin float top, // I - Printable top float *x, float *y, int *page) { int col, tcol, colspan, rowspan, tempspace; float width, temp_y; int temp_page; uchar *var; int do_valign; // True if we should do vertical alignment of cells int row_page; float row_y, row_starty, row_height, // Total height of the row temp_height; // Temporary holder uchar *bgcolor; float bgrgb[3]; do_valign = 1; row_height = 0.0f; row_page = *page; row_y = *y - table.cellpadding; row_starty = row_y; DEBUG_printf(("BEFORE row_y = %.1f, *y = %.1f, row_page = %d\n", row_y, *y, row_page)); for (col = 0, rowspan = 9999; col < table.num_cols; col += colspan) { if (table.row_spans[col] == 0) { if ((var = htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN")) != NULL) table.row_spans[col] = atoi((char *)var); if (table.row_spans[col] <= 1) table.row_spans[col] = 0; if (table.row_spans[col] > (table.num_rows - row)) table.row_spans[col] = table.num_rows - row; table.span_heights[col] = 0.0f; } if (table.row_spans[col] < rowspan) rowspan = table.row_spans[col]; for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; } if (!rowspan) rowspan = 1; for (col = 0; col < table.num_cols;) { for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; colspan --; DEBUG_printf((" col = %d, colspan = %d, left = %.1f, right = %.1f, cell = %p\n", col, colspan, table.col_lefts[col], table.col_rights[col + colspan], (void *)cells[row][col])); *x = table.col_lefts[col]; temp_y = *y - table.cellpadding; temp_page = *page; tempspace = 0; if (row == 0 || cells[row][col] != cells[row - 1][col]) { check_pages(*page); if (cells[row][col] == NULL) bgcolor = NULL; else if ((bgcolor = htmlGetVariable(cells[row][col], (uchar *)"BGCOLOR")) != NULL) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); width = table.col_rights[col + colspan] - table.col_lefts[col] + 2 * table.cellpadding; table.border_left = table.col_lefts[col] - table.cellpadding; table.cell_bg[col] = new_render(*page, RENDER_BOX, table.border_left, row_y, width + table.border, 0.0, bgrgb); } else { table.cell_bg[col] = NULL; new_render(*page, RENDER_TEXT, -1.0f, -1.0f, 0.0, 0.0, (void *)""); } DEBUG_printf(("cell_bg[%d] = %p, pages[%d].end = %p\n", col, (void *)table.cell_bg[col], *page, (void *)pages[*page].end)); table.cell_start[col] = pages[*page].end; table.cell_page[col] = temp_page; table.cell_y[col] = temp_y; if (table.debug) { check_pages(*page); render_t *r; char table_text[255]; snprintf(table_text, sizeof(table_text), "cell=%p [%d,%d]", (void *)cells[row][col], row, col); r = new_render(temp_page, RENDER_TEXT, *x, temp_y, get_width((uchar *)table_text, TYPE_COURIER, STYLE_NORMAL, 1), _htmlSizes[1], table_text); r->data.text.typeface = TYPE_COURIER; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[1]; } if (cells[row][col] != NULL && cells[row][col]->child != NULL) { DEBUG_printf((" parsing cell %d,%d; width = %.1f\n", row, col, table.col_rights[col + colspan] - table.col_lefts[col])); bottom += table.cellpadding; top -= table.cellpadding; parse_doc(cells[row][col]->child, table.col_lefts + col, table.col_rights + col + colspan, &bottom, &top, x, &temp_y, &temp_page, NULL, &tempspace); bottom -= table.cellpadding; top += table.cellpadding; } table.cell_endpage[col] = temp_page; table.cell_endy[col] = temp_y; table.cell_height[col] = *y - table.cellpadding - temp_y; table.cell_end[col] = pages[*page].end; if (table.cell_start[col] == NULL) table.cell_start[col] = pages[*page].start; DEBUG_printf(("row = %d, col = %d, y = %.1f, cell_y = %.1f, cell_height = %.1f\n", row, col, *y - table.cellpadding, temp_y, table.cell_height[col])); DEBUG_printf(("cell_start[%d] = %p, cell_end[%d] = %p\n", col, (void *)table.cell_start[col], col, (void *)table.cell_end[col])); } if (table.row_spans[col] == 0 && table.cell_page[col] == table.cell_endpage[col] && table.cell_height[col] > row_height) row_height = table.cell_height[col]; if (table.row_spans[col] <= rowspan) { if (table.cell_page[col] != table.cell_endpage[col]) do_valign = 0; if (table.cell_endpage[col] > row_page) { row_page = table.cell_endpage[col]; row_y = table.cell_endy[col]; } else if (table.cell_endy[col] < row_y && table.cell_endpage[col] == row_page) row_y = table.cell_endy[col]; } DEBUG_printf(("**** col = %d, row = %d, row_y = %.1f, row_page = %d\n", col, row, row_y, row_page)); for (col ++; colspan > 0; colspan --, col ++) { table.cell_start[col] = NULL; table.cell_page[col] = table.cell_page[col - 1]; table.cell_y[col] = table.cell_y[col - 1]; table.cell_end[col] = NULL; table.cell_endpage[col] = table.cell_endpage[col - 1]; table.cell_endy[col] = table.cell_endy[col - 1]; table.cell_height[col] = table.cell_height[col - 1]; } } DEBUG_printf(("row = %d, row_y = %.1f, row_height = %.1f\n", row, row_y, row_height)); for (col = 0; col < table.num_cols; col += colspan) { for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; if (table.row_spans[col]) table.span_heights[col] += row_height; DEBUG_printf(("col = %d, cell_y = %.1f, cell_page = %d, cell_endpage = %d, row_spans = %d, span_heights = %.1f, cell_height = %.1f\n", col, table.cell_y[col], table.cell_page[col], table.cell_endpage[col], table.row_spans[col], table.span_heights[col], table.cell_height[col])); } for (col = 0; col < table.num_cols; col += colspan) { for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; if (table.row_spans[col] == rowspan && table.cell_page[col] == table.cell_endpage[col] && table.cell_height[col] > table.span_heights[col]) { temp_height = table.cell_height[col] - table.span_heights[col]; row_height += temp_height; DEBUG_printf(("Adjusting row-span height by %.1f, new row_height = %.1f\n", temp_height, row_height)); for (tcol = 0; tcol < table.num_cols; tcol ++) if (table.row_spans[tcol]) { table.span_heights[tcol] += temp_height; DEBUG_printf(("col = %d, span_heights = %.1f\n", tcol, table.span_heights[tcol])); } } } DEBUG_printf(("AFTER row = %d, row_page = %d, row_y = %.1f, row_height = %.1f, *y = %.1f, do_valign = %d\n", row, row_page, row_y, row_height, *y, do_valign)); /* * Do the vertical alignment */ if (do_valign) { height_var = NULL; if (cells[row][0] != NULL) { if ((height_var = htmlGetVariable(cells[row][0]->parent, (uchar *)"HEIGHT")) == NULL) for (col = 0; col < table.num_cols; col ++) if (htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN") == NULL) if ((height_var = htmlGetVariable(cells[row][col], (uchar *)"HEIGHT")) != NULL) break; } if (height_var != NULL) { // Hardcode the row height... if (height_var[strlen((char *)height_var) - 1] == '%') temp_height = (float)(atof((char *)height_var) * 0.01f * PagePrintLength); else temp_height = (float)(atof((char *)height_var) * PagePrintWidth / _htmlBrowserWidth); if (table.height > 0 && temp_height > table.height) temp_height = table.height; temp_height -= 2 * table.cellpadding; if (temp_height > row_height) { // Only enforce the height if it is > the actual row height. row_height = temp_height; row_y = *y - temp_height; } } for (col = 0; col < table.num_cols; col += colspan + 1) { render_t *p; float delta_y; for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; colspan --; if (table.cell_start[col] == NULL || table.row_spans[col] > rowspan || cells[row][col] == NULL || cells[row][col]->child == NULL) continue; if (table.row_spans[col] == 1) { int tcol; float span_height = 0.0f; for (tcol = 0; tcol < table.num_cols; tcol ++) { if (table.row_spans[col] == 1 && table.span_heights[col] > span_height) span_height = table.span_heights[col]; } switch (cells[row][col]->valignment) { case ALIGN_MIDDLE : // delta_y = (table.span_heights[col] - table.cell_height[col]) * 0.5f; delta_y = (span_height - table.cell_height[col]) * 0.5f; break; case ALIGN_BOTTOM : // delta_y = table.span_heights[col] - table.cell_height[col]; delta_y = span_height - table.cell_height[col]; break; default : delta_y = 0.0f; break; } } else if (table.row_spans[col]) { delta_y = 0.0f; } else { switch (cells[row][col]->valignment) { case ALIGN_MIDDLE : delta_y = (row_height - table.cell_height[col]) * 0.5f; break; case ALIGN_BOTTOM : delta_y = row_height - table.cell_height[col]; break; default : delta_y = 0.0f; break; } } DEBUG_printf(("row = %d, col = %d, valign = %d, rowspans = %d, cell_height = %.1f, span_heights = %.1f, delta_y = %.1f\n", row, col, cells[row][col]->valignment, table.row_spans[col], table.cell_height[col], table.span_heights[col], delta_y)); if (delta_y > 0.0f) { if (table.cell_start[col] == table.cell_end[col]) p = table.cell_start[col]; else p = table.cell_start[col]->next; for (; p != NULL; p = p->next) { DEBUG_printf(("aligning %p (%s), y was %.1f, now %.1f\n", (void *)p, p->data.text.buffer, p->y, p->y - delta_y)); p->y -= delta_y; if (p == table.cell_end[col]) break; } } #ifdef DEBUG else { if (table.cell_start[col] == table.cell_end[col]) p = table.cell_start[col]; else p = table.cell_start[col]->next; for (; p != NULL; p = p->next) { printf("NOT aligning %p (%s)\n", (void *)p, p->data.text.buffer); if (p == table.cell_end[col]) break; } } #endif /* DEBUG */ } } // Update all current columns with ROWSPAN <= rowspan to use the same // end page and row... for (col = 0, temp_page = -1, temp_y = 99999999; col < table.num_cols; col ++) if (table.row_spans[col] <= rowspan && cells[row][col] != NULL && cells[row][col]->child != NULL) { if (table.cell_endpage[col] > temp_page) { temp_page = table.cell_endpage[col]; temp_y = table.cell_endy[col]; } else if (table.cell_endpage[col] == temp_page && table.cell_endy[col] < temp_y) temp_y = table.cell_endy[col]; } for (col = 0; col < table.num_cols; col ++) if (table.row_spans[col] <= rowspan && cells[row][col] != NULL && cells[row][col]->child != NULL) { table.cell_endpage[col] = temp_page; table.cell_endy[col] = temp_y; } row_y -= table.cellpadding; table.border_left = table.col_lefts[0] - table.cellpadding; width = table.col_rights[table.num_cols - 1] - table.col_lefts[0] + 2 * table.cellpadding; for (bgcolor = NULL, col = 0; col < table.num_cols; col ++) if (table.row_spans[col] <= rowspan && cells[row][col] && !htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN") && (bgcolor = htmlGetVariable(cells[row][col]->parent, (uchar *)"BGCOLOR")) != NULL) break; if (bgcolor) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); if (row_page > *page) { // Draw background on multiple pages... // Bottom of first page... new_render(*page, RENDER_BOX, table.border_left, bottom, width, row_starty - bottom + table.cellpadding, bgrgb, pages[*page].start); // Intervening pages... for (temp_page = *page + 1; temp_page < row_page; temp_page ++) { new_render(temp_page, RENDER_BOX, table.border_left, bottom, width, top - bottom, bgrgb, pages[temp_page].start); } // Top of last page... check_pages(*page); new_render(row_page, RENDER_BOX, table.border_left, row_y, width, top - row_y, bgrgb, pages[row_page].start); } else { // Draw background in row... new_render(row_page, RENDER_BOX, table.border_left, row_y, width, row_height + 2 * table.cellpadding, bgrgb, pages[row_page].start); } } for (col = 0; col < table.num_cols; col += colspan + 1) { for (colspan = 0; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; else if (table.row_spans[col + colspan] > 0) { DEBUG_printf(("row = %d, col = %d, decrementing row_spans (%d) to %d...\n", row, col, table.row_spans[col + colspan], table.row_spans[col + colspan] - rowspan)); table.row_spans[col + colspan] -= rowspan; } colspan --; width = table.col_rights[col + colspan] - table.col_lefts[col] + 2 * table.cellpadding; if (cells[row][col] == NULL || cells[row][col]->child == NULL || table.row_spans[col] > 0) continue; DEBUG_printf(("DRAWING BORDER+BACKGROUND: col=%d, row=%d, cell_page=%d, cell_y=%.1f\n" " cell_endpage=%d, cell_endy=%.1f\n", col, row, table.cell_page[col], table.cell_y[col], table.cell_endpage[col], table.cell_endy[col])); if ((bgcolor = htmlGetVariable(cells[row][col], (uchar *)"BGCOLOR")) != NULL) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); } table.border_left = table.col_lefts[col] - table.cellpadding; if (table.cell_page[col] != table.cell_endpage[col]) { /* * Crossing a page boundary... */ if (table.border > 0) { /* * +---+---+---+ * | | | | */ // Top new_render(table.cell_page[col], RENDER_BOX, table.border_left, table.cell_y[col] + table.cellpadding, width + table.border, table.border, table.border_rgb); // Left new_render(table.cell_page[col], RENDER_BOX, table.border_left, bottom, table.border, table.cell_y[col] - bottom + table.cellpadding + table.border, table.border_rgb); // Right new_render(table.cell_page[col], RENDER_BOX, table.border_left + width, bottom, table.border, table.cell_y[col] - bottom + table.cellpadding + table.border, table.border_rgb); } if (bgcolor != NULL) { table.cell_bg[col]->y = bottom; table.cell_bg[col]->height = table.cell_y[col] - bottom + table.cellpadding + table.border; } for (temp_page = table.cell_page[col] + 1; temp_page < table.cell_endpage[col]; temp_page ++) { /* * | | | | * | | | | */ if (table.border > 0.0f) { // Left new_render(temp_page, RENDER_BOX, table.border_left, bottom, table.border, top - bottom, table.border_rgb); // Right new_render(temp_page, RENDER_BOX, table.border_left + width, bottom, table.border, top - bottom, table.border_rgb); } if (bgcolor != NULL) new_render(temp_page, RENDER_BOX, table.border_left, bottom, width + table.border, top - bottom, bgrgb, pages[temp_page].start); } if (table.border > 0.0f) { /* * | | | | * +---+---+---+ */ // Left new_render(table.cell_endpage[col], RENDER_BOX, table.border_left, row_y, table.border, top - row_y, table.border_rgb); // Right new_render(table.cell_endpage[col], RENDER_BOX, table.border_left + width, row_y, table.border, top - row_y, table.border_rgb); // Bottom new_render(table.cell_endpage[col], RENDER_BOX, table.border_left, row_y, width + table.border, table.border, table.border_rgb); } if (bgcolor != NULL) { check_pages(table.cell_endpage[col]); new_render(table.cell_endpage[col], RENDER_BOX, table.border_left, row_y, width + table.border, top - row_y, bgrgb, pages[table.cell_endpage[col]].start); } } else { /* * +---+---+---+ * | | | | * +---+---+---+ */ if (table.border > 0.0f) { // Top new_render(table.cell_page[col], RENDER_BOX, table.border_left, table.cell_y[col] + table.cellpadding, width + table.border, table.border, table.border_rgb); // Left new_render(table.cell_page[col], RENDER_BOX, table.border_left, row_y, table.border, table.cell_y[col] - row_y + table.cellpadding + table.border, table.border_rgb); // Right new_render(table.cell_page[col], RENDER_BOX, table.border_left + width, row_y, table.border, table.cell_y[col] - row_y + table.cellpadding + table.border, table.border_rgb); // Bottom new_render(table.cell_page[col], RENDER_BOX, table.border_left, row_y, width + table.border, table.border, table.border_rgb); } if (bgcolor != NULL) { table.cell_bg[col]->y = row_y; table.cell_bg[col]->height = table.cell_y[col] - row_y + table.cellpadding + table.border; } } } *page = row_page; *y = row_y; } /* * 'parse_table()' - Parse a table and produce rendering output. */ static void parse_table(tree_t *t, // I - Tree to parse float left, // I - Left margin float right, // I - Printable width float bottom, // I - Bottom margin float top, // I - Printable top float *x, // IO - X position float *y, // IO - Y position int *page, // IO - Page # int needspace) // I - Need whitespace? { int col, row, header_row = -1, tcol, colspan, rowspan, alloc_rows, regular_cols; hdtable_t table; float col_width, col_min, col_pref, col_height, cellspacing, width, pref_width, span_width, regular_width, actual_width, table_width, min_width, temp_width, header_height = 0.0, table_y, temp_bottom, temp_top; int temp_page, table_page; uchar *var, *height_var, // Row HEIGHT variable *header_height_var = NULL; tree_t *temprow, *tempcol, *tempnext, ***cells, *caption; // Caption for bottom, if any float temp_height; // Temporary holder uchar *bgcolor; float bgrgb[3]; const char *htmldoc_debug; // HTMLDOC_DEBUG env var DEBUG_puts("\n\nTABLE"); DEBUG_printf(("parse_table(t=%p, left=%.1f, right=%.1f, x=%.1f, y=%.1f, page=%d\n", (void *)t, left, right, *x, *y, *page)); if (t->child == NULL) return; /* Empty table... */ memset(&table, 0, sizeof(table)); /* * Check debug mode... */ if ((htmldoc_debug = getenv("HTMLDOC_DEBUG")) != NULL && (strstr(htmldoc_debug, "table") || strstr(htmldoc_debug, "all"))) table.debug = 1; else table.debug = 0; /* * Figure out the # of rows, columns, and the desired widths... */ cells = NULL; if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL) { if (var[strlen((char *)var) - 1] == '%') table_width = (float)(atof((char *)var) * (right - left) / 100.0f); else table_width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); if (table_width < 0.0f || table_width > PagePrintWidth) table_width = right - left; } else table_width = right - left; if ((var = htmlGetVariable(t, (uchar *)"HEIGHT")) != NULL) { if (var[strlen((char *)var) - 1] == '%') table.height = (float)(atof((char *)var) * (top - bottom) / 100.0f); else table.height = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else table.height = -1.0f; DEBUG_printf(("table_width = %.1f\n", table_width)); if ((var = htmlGetVariable(t, (uchar *)"CELLPADDING")) != NULL) { if ((table.cellpadding = atoi((char *)var)) < 0.0f) table.cellpadding = 0.0f; else if (table.cellpadding > 20.0f) table.cellpadding = 20.0f; } else table.cellpadding = 1.0f; if ((var = htmlGetVariable(t, (uchar *)"CELLSPACING")) != NULL) { if ((cellspacing = atoi((char *)var)) < 0.0f) cellspacing = 0.0f; else if (cellspacing > 20.0f) cellspacing = 20.0f; } else cellspacing = 0.0f; if ((var = htmlGetVariable(t, (uchar *)"BORDER")) != NULL) { if ((table.border = (float)atof((char *)var)) <= 0.0 && var[0] != '0') table.border = 1.0f; else if (table.border > 20.0f) table.border = 20.0f; table.cellpadding += table.border; } else table.border = 0.0f; if (table.debug && table.border == 0.0f) table.border = 0.01f; table.border_rgb[0] = t->red / 255.0f; table.border_rgb[1] = t->green / 255.0f; table.border_rgb[2] = t->blue / 255.0f; if ((var = htmlGetVariable(t, (uchar *)"BORDERCOLOR")) != NULL) get_color(var, table.border_rgb, 0); if (table.border == 0.0f && table.cellpadding > 0.0f) { /* * Ah, the strange table formatting nightmare that is HTML. * Netscape and MSIE assign an invisible border width of 1 * pixel if no border is specified... */ table.cellpadding += 1.0f; } table.border_size = table.border - 1.0f; cellspacing *= PagePrintWidth / _htmlBrowserWidth; table.cellpadding *= PagePrintWidth / _htmlBrowserWidth; table.border *= PagePrintWidth / _htmlBrowserWidth; table.border_size *= PagePrintWidth / _htmlBrowserWidth; DEBUG_printf(("border = %.1f, cellpadding = %.1f\n", table.border, table.cellpadding)); temp_bottom = bottom - table.cellpadding; temp_top = top + table.cellpadding; for (temprow = t->child, table.num_cols = 0, table.num_rows = 0, alloc_rows = 0, caption = NULL; temprow != NULL; temprow = tempnext) { tempnext = temprow->next; if (temprow->markup == MARKUP_CAPTION) { if ((var = htmlGetVariable(temprow, (uchar *)"ALIGN")) == NULL || strcasecmp((char *)var, "bottom")) { /* * Show caption at top... */ parse_paragraph(temprow, left, right, bottom, top, x, y, page, needspace); needspace = 1; } else { /* * Flag caption for bottom of table... */ caption = temprow; } } else if (temprow->markup == MARKUP_TR || ((temprow->markup == MARKUP_TBODY || temprow->markup == MARKUP_THEAD || temprow->markup == MARKUP_TFOOT) && temprow->child != NULL)) { if (temprow->markup == MARKUP_THEAD) header_row = table.num_rows; // Descend into table body as needed... if (temprow->markup == MARKUP_TBODY || temprow->markup == MARKUP_THEAD || temprow->markup == MARKUP_TFOOT) temprow = temprow->child; // Figure out the next row... if ((tempnext = temprow->next) == NULL) if (temprow->parent->markup == MARKUP_TBODY || temprow->parent->markup == MARKUP_THEAD || temprow->parent->markup == MARKUP_TFOOT) tempnext = temprow->parent->next; // Allocate memory for the table as needed... if (table.num_rows >= alloc_rows) { alloc_rows += ALLOC_ROWS; if (alloc_rows == ALLOC_ROWS) cells = (tree_t ***)malloc(sizeof(tree_t **) * (size_t)alloc_rows); else cells = (tree_t ***)realloc(cells, sizeof(tree_t **) * (size_t)alloc_rows); if (cells == (tree_t ***)0) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for table!"); return; } } if ((cells[table.num_rows] = (tree_t **)calloc(sizeof(tree_t *), MAX_COLUMNS)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for table!"); free(cells); return; } #ifdef DEBUG printf("BEFORE row %d: num_cols = %d\n", table.num_rows, table.num_cols); if (table.num_rows) for (col = 0; col < table.num_cols; col ++) printf(" col %d: row_spans[] = %d\n", col, table.row_spans[col]); #endif // DEBUG // Figure out the starting column... if (table.num_rows) { for (col = 0, rowspan = 9999; col < table.num_cols; col ++) if (table.row_spans[col] < rowspan) rowspan = table.row_spans[col]; for (col = 0; col < table.num_cols; col ++) table.row_spans[col] -= rowspan; for (col = 0; table.row_spans[col] && col < table.num_cols; col ++) cells[table.num_rows][col] = cells[table.num_rows - 1][col]; } else col = 0; for (tempcol = temprow->child; tempcol != NULL && col < MAX_COLUMNS; tempcol = tempcol->next) { if (tempcol->markup == MARKUP_TH && table.num_rows == 0) header_row = table.num_rows; if (tempcol->markup == MARKUP_TD || tempcol->markup == MARKUP_TH) { // Handle colspan and rowspan stuff... if ((var = htmlGetVariable(tempcol, (uchar *)"COLSPAN")) != NULL) { if ((colspan = atoi((char *)var)) < 1) colspan = 1; else if (colspan > (MAX_COLUMNS - col)) colspan = MAX_COLUMNS - col; } else colspan = 1; if ((var = htmlGetVariable(tempcol, (uchar *)"ROWSPAN")) != NULL) { table.row_spans[col] = atoi((char *)var); if (table.row_spans[col] <= 1) table.row_spans[col] = 0; for (tcol = 1; tcol < colspan; tcol ++) table.row_spans[col + tcol] = table.row_spans[col]; } // Compute the cell size... col_width = get_cell_size(tempcol, 0.0f, table_width, &col_min, &col_pref, &col_height); if ((var = htmlGetVariable(tempcol, (uchar *)"WIDTH")) != NULL) { if (var[strlen((char *)var) - 1] == '%') { col_width -= 2.0 * table.cellpadding - cellspacing; if (colspan <= 1) table.col_percent[col] = 1; } else { col_width -= 2.0 * table.cellpadding; } if (col_width <= 0.0f) col_width = 0.0f; else if (col_width > PageWidth) col_width = PageWidth; } else col_width = 0.0f; tempcol->height = col_height; DEBUG_printf(("%d,%d: colsp=%d, rowsp=%d, width=%.1f, minw=%.1f, prefw=%.1f, minh=%.1f\n", col, table.num_rows, colspan, table.row_spans[col], col_width, col_min, col_pref, col_height)); // Add widths to columns... if (colspan > 1) { if (colspan > table.col_spans[col]) table.col_spans[col] = colspan; if (col_width > table.col_swidths[col]) table.col_swidths[col] = col_width; if (col_min > table.col_smins[col]) table.col_smins[col] = col_min; temp_width = col_width / colspan; for (int i = 0; i < colspan; i ++) { if (temp_width > table.col_widths[col + i]) table.col_widths[col + i] = temp_width; } } else { if (col_width > 0.0f) table.col_fixed[col] = 1; if (col_width > table.col_widths[col]) table.col_widths[col] = col_width; if (col_pref > table.col_prefs[col]) table.col_prefs[col] = col_pref; if (col_min > table.col_mins[col]) table.col_mins[col] = col_min; } while (colspan > 0 && col < MAX_COLUMNS) { cells[table.num_rows][col] = tempcol; col ++; colspan --; } while (table.row_spans[col] && col < table.num_cols) { cells[table.num_rows][col] = cells[table.num_rows - 1][col]; col ++; } } } DEBUG_printf(("header_row=%d\n", header_row)); if (col > table.num_cols) table.num_cols = col; #ifdef DEBUG printf("AFTER row %d: num_cols = %d\n", table.num_rows, table.num_cols); for (col = 0; col < table.num_cols; col ++) printf(" col %d: row_spans[] = %d\n", col, table.row_spans[col]); #endif // DEBUG table.num_rows ++; for (col = 0; col < table.num_cols; col ++) if (table.row_spans[col]) table.row_spans[col] --; } } /* * OK, some people apparently create HTML tables with no columns or * rows... If this happened, return immediately... */ if (table.num_cols == 0) return; /* * Now figure out the width of the table... */ if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL) { if (var[strlen((char *)var) - 1] == '%') width = (float)(atof((char *)var) * (right - left) / 100.0f); else width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else { for (col = 0, width = 0.0; col < table.num_cols; col ++) width += table.col_prefs[col]; width += (2 * table.cellpadding + cellspacing) * table.num_cols - cellspacing; if (width > (right - left)) width = right - left; } /* * Compute the width of each column based on the printable width. */ DEBUG_printf(("\nTABLE: %dx%d\n\n", table.num_cols, table.num_rows)); actual_width = (2 * table.cellpadding + cellspacing) * table.num_cols - cellspacing; regular_width = (width - actual_width) / table.num_cols; DEBUG_printf((" width = %.1f, actual_width = %.1f, regular_width = %.1f\n\n", width, actual_width, regular_width)); DEBUG_puts(" Col Width Min Pref Fixed? Percent?"); DEBUG_puts(" --- ------ ------ ------ ------ --------"); #ifdef DEBUG for (col = 0; col < table.num_cols; col ++) printf(" %-3d %-6.1f %-6.1f %-6.1f %-6s %s\n", col, table.col_widths[col], table.col_mins[col], table.col_prefs[col], table.col_fixed[col] ? "YES" : "NO", table.col_percent[col] ? "YES" : "NO"); puts(""); #endif /* DEBUG */ /* * The first pass just handles columns with a specified width... */ DEBUG_puts("PASS 1: fixed width handling\n"); for (col = 0, regular_cols = 0; col < table.num_cols; col ++) if (table.col_widths[col] > 0.0f) { if (table.col_mins[col] > table.col_widths[col]) { DEBUG_printf((" updating column %d to width=%.1f\n", col, table.col_mins[col])); table.col_widths[col] = table.col_mins[col]; } actual_width += table.col_widths[col]; } else { regular_cols ++; actual_width += table.col_mins[col]; } DEBUG_printf((" actual_width = %.1f, regular_cols = %d\n\n", actual_width,regular_cols)); /* * Pass two uses the "preferred" width whenever possible, and the * minimum otherwise... */ DEBUG_puts("PASS 2: preferred width handling\n"); for (col = 0, pref_width = 0.0f; col < table.num_cols; col ++) if (table.col_widths[col] == 0.0f) pref_width += table.col_prefs[col] - table.col_mins[col]; DEBUG_printf((" pref_width = %.1f\n", pref_width)); if (pref_width > 0.0f) { if ((regular_width = (width - actual_width) / pref_width) < 0.0f) regular_width = 0.0f; else if (regular_width > 1.0f) regular_width = 1.0f; DEBUG_printf((" regular_width = %.1f\n", regular_width)); for (col = 0; col < table.num_cols; col ++) if (table.col_widths[col] == 0.0f) { pref_width = (table.col_prefs[col] - table.col_mins[col]) * regular_width; if ((actual_width + pref_width) > width) { if (col == (table.num_cols - 1) && (width - actual_width) >= table.col_mins[col]) table.col_widths[col] = width - actual_width; else table.col_widths[col] = table.col_mins[col]; } else table.col_widths[col] = pref_width + table.col_mins[col]; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); actual_width += table.col_widths[col] - table.col_mins[col]; } } else { /* * Assign min widths for all cells... */ for (col = 0; col < table.num_cols; col ++) if (table.col_widths[col] == 0.0f) table.col_widths[col] = table.col_mins[col]; } DEBUG_printf((" actual_width = %.1f\n\n", actual_width)); /* * Pass three enforces any hard or minimum widths for COLSPAN'd * columns... */ DEBUG_puts("PASS 3: colspan handling\n\n"); for (col = 0; col < table.num_cols; col ++) { DEBUG_printf((" col %d, colspan %d\n", col, table.col_spans[col])); if (table.col_spans[col] > 1) { for (colspan = 0, span_width = 0.0f; colspan < table.col_spans[col]; colspan ++) span_width += table.col_widths[col + colspan]; pref_width = 0.0f; if (span_width < table.col_swidths[col]) pref_width = table.col_swidths[col]; if (span_width < table.col_smins[col] && pref_width < table.col_smins[col]) pref_width = table.col_smins[col]; for (colspan = 0; colspan < table.col_spans[col]; colspan ++) if (table.col_fixed[col + colspan]) { span_width -= table.col_widths[col + colspan]; pref_width -= table.col_widths[col + colspan]; } DEBUG_printf((" col_swidths=%.1f, col_smins=%.1f, span_width=%.1f, pref_width=%.1f\n", table.col_swidths[col], table.col_smins[col], span_width, pref_width)); if (pref_width > 0.0f && pref_width > span_width) { if (span_width >= 1.0f) { // Expand cells proportionately... regular_width = pref_width / span_width; for (colspan = 0; colspan < table.col_spans[col]; colspan ++) if (!table.col_fixed[col + colspan]) { actual_width -= table.col_widths[col + colspan]; table.col_widths[col + colspan] *= regular_width; actual_width += table.col_widths[col + colspan]; DEBUG_printf((" col_widths[%d] = %.1f\n", col + colspan, table.col_widths[col + colspan])); } } else { // Divide the space up equally between columns, since the // colspan area is always by itself... (this hack brought // to you by Yahoo! and their single cell tables with // colspan=2 :) regular_width = pref_width / table.col_spans[col]; for (colspan = 0; colspan < table.col_spans[col]; colspan ++) { actual_width += regular_width; table.col_widths[col + colspan] += regular_width; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); } } } } } DEBUG_printf((" actual_width = %.1f\n\n", actual_width)); /* * Pass four divides up the remaining space amongst the columns... */ DEBUG_puts("PASS 4: divide remaining space, if any...\n"); if (width > actual_width) { for (col = 0, colspan = 0; col < table.num_cols; col ++) if (!table.col_fixed[col] || table.col_percent[col]) colspan ++; if (colspan > 0) { regular_width = (width - actual_width) / table.num_cols; for (col = 0; col < table.num_cols; col ++) if (!table.col_fixed[col]) { table.col_widths[col] += regular_width; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); } } } else width = actual_width; DEBUG_puts(""); /* * The final pass is only run if the width > table_width... */ DEBUG_puts("PASS 5: Squeeze table as needed..."); if (width > table_width) { /* * Squeeze the table to fit the requested width or the printable width * as determined at the beginning... */ for (col = 0, min_width = -cellspacing; col < table.num_cols; col ++) min_width += table.col_mins[col] + 2 * table.cellpadding + cellspacing; DEBUG_printf((" table_width = %.1f, width = %.1f, min_width = %.1f\n", table_width, width, min_width)); temp_width = table_width - min_width; if (temp_width < 0.0f) temp_width = 0.0f; width -= min_width; if (width < 1.0f) width = 1.0f; for (col = 0; col < table.num_cols; col ++) { table.col_widths[col] = table.col_mins[col] + temp_width * (table.col_widths[col] - table.col_mins[col]) / width; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); } for (col = 0, width = -cellspacing; col < table.num_cols; col ++) width += table.col_widths[col] + 2 * table.cellpadding + cellspacing; DEBUG_printf((" new width = %.1f, max width = %.1f\n", width, right - left)); } if ((width - right + left) > 0.001f && OverflowErrors) progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Table on page %d too wide - truncation or overlapping may occur!", *page + 1); DEBUG_puts(""); DEBUG_printf(("Final table width = %.1f, alignment = %d\n", width, t->halignment)); switch (t->halignment) { case ALIGN_LEFT : *x = left + table.cellpadding; break; case ALIGN_CENTER : *x = left + 0.5f * (right - left - width) + table.cellpadding; break; case ALIGN_RIGHT : *x = right - width + table.cellpadding; break; } for (col = 0; col < table.num_cols; col ++) { table.col_lefts[col] = *x; table.col_rights[col] = *x + table.col_widths[col]; *x = table.col_rights[col] + 2 * table.cellpadding + cellspacing; DEBUG_printf(("left[%d] = %.1f, right[%d] = %.1f\n", col, table.col_lefts[col], col, table.col_rights[col])); } /* * Now render the whole table... */ if (*y < top && needspace) *y -= _htmlSpacings[SIZE_P]; if (table.debug) { check_pages(*page); render_t *r; char table_text[255]; snprintf(table_text, sizeof(table_text), "t=%p", (void *)t); r = new_render(*page, RENDER_TEXT, left, *y, get_width((uchar *)table_text, TYPE_COURIER, STYLE_NORMAL, 3), _htmlSizes[3], table_text); r->data.text.typeface = TYPE_COURIER; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[3]; } table_page = *page; table_y = *y; for (row = 0; row < table.num_rows; row ++) { height_var = NULL; if (cells[row][0] != NULL) { /* * Do page comments... */ if (cells[row][0]->parent->prev != NULL && cells[row][0]->parent->prev->markup == MARKUP_COMMENT) parse_comment(cells[row][0]->parent->prev, &left, &right, &temp_bottom, &temp_top, x, y, page, NULL, 0); /* * Get height... */ if ((height_var = htmlGetVariable(cells[row][0]->parent, (uchar *)"HEIGHT")) == NULL) for (col = 0; col < table.num_cols; col ++) if (htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN") == NULL) if ((height_var = htmlGetVariable(cells[row][col], (uchar *)"HEIGHT")) != NULL) break; } if (height_var != NULL && row == header_row) header_height_var = height_var; if (cells[row][0] != NULL && height_var != NULL) { // Row height specified; make sure it'll fit... if (height_var[strlen((char *)height_var) - 1] == '%') temp_height = (float)(atof((char *)height_var) * 0.01f * (PagePrintLength - 2 * table.cellpadding)); else temp_height = (float)(atof((char *)height_var) * PagePrintWidth / _htmlBrowserWidth); if (table.height > 0.0f && temp_height > table.height) temp_height = table.height; temp_height -= 2 * table.cellpadding; } else { // Use min height computed from get_cell_size()... for (col = 0, temp_height = (float)_htmlSpacings[SIZE_P]; col < table.num_cols; col ++) if (cells[row][col] != NULL && cells[row][col]->height > temp_height && !htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN")) temp_height = cells[row][col]->height; if (table.height > 0.0) { // Table height specified; make sure it'll fit... if (temp_height > table.height) temp_height = table.height; temp_height -= 2 * table.cellpadding; } else if (temp_height > (PageLength / 8.0) && height_var == NULL) temp_height = PageLength / 8.0; } DEBUG_printf(("BEFORE row = %d, temp_height = %.1f, *y = %.1f, *page = %d\n", row, temp_height, *y, *page)); if (*y < (bottom + 2 * table.cellpadding + temp_height) && temp_height <= (top - bottom - 2 * table.cellpadding)) { DEBUG_puts("NEW PAGE"); *y = top - header_height; (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); if (row > 0 && header_row >= 0) { // Render header row... render_table_row(table, cells, header_row, header_height_var, left, right, bottom, top, x, y, page); } } float start_y = *y; temp_page = *page; render_table_row(table, cells, row, height_var, left, right, bottom, top, x, y, page); if (header_row >= 0 && row == header_row) { header_height = *y - start_y; top += header_height; } else if (temp_page != *page && header_row >= 0) { // Render header row on new page(s)... do { float temp_y = top - header_height; temp_page ++; render_table_row(table, cells, header_row, header_height_var, left, right, bottom, top, x, &temp_y, &temp_page); } while (temp_page < *page); } if (row < (table.num_rows - 1)) (*y) -= cellspacing; DEBUG_printf(("END row = %d, *y = %.1f, *page = %d\n", row, *y, *page)); } top -= header_height; /* * Handle table background color... */ if ((bgcolor = htmlGetVariable(t, (uchar *)"BGCOLOR")) != NULL) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); table.border_left = table.col_lefts[0] - table.cellpadding; width = table.col_rights[table.num_cols - 1] - table.col_lefts[0] + 2 * table.cellpadding; if (table_page != *page) { // Draw background on multiple pages... // Bottom of first page... new_render(table_page, RENDER_BOX, table.border_left, bottom, width, table_y - bottom, bgrgb, pages[table_page].start); // Intervening pages... for (temp_page = table_page + 1; temp_page < *page; temp_page ++) { new_render(temp_page, RENDER_BOX, table.border_left, bottom, width, top - bottom, bgrgb, pages[temp_page].start); } // Top of last page... check_pages(*page); new_render(*page, RENDER_BOX, table.border_left, *y, width, top - *y, bgrgb, pages[*page].start); } else { // Draw background in row... new_render(table_page, RENDER_BOX, table.border_left, *y, width, table_y - *y, bgrgb, pages[table_page].start); } } *x = left; if (caption) { /* * Show caption at bottom... */ parse_paragraph(caption, left, right, bottom, top, x, y, page, needspace); needspace = 1; } /* * Free memory for the table... */ if (table.num_rows > 0) { for (row = 0; row < table.num_rows; row ++) free(cells[row]); free(cells); } } #ifdef TABLE_DEBUG # undef DEBUG # undef DEBUG_puts # define DEBUG_puts(x) # undef DEBUG_printf # define DEBUG_printf(x) #endif /* TABLE_DEBUG */ /* * 'parse_list()' - Parse a list entry and produce rendering output. */ static void parse_list(tree_t *t, /* I - Tree to parse */ float *left, /* I - Left margin */ float *right, /* I - Printable width */ float *bottom, /* I - Bottom margin */ float *top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace) /* I - Need whitespace? */ { uchar number[255]; /* List number (for numbered types) */ uchar *value; /* VALUE= variable */ int typeface; /* Typeface of list number */ float width; /* Width of list number */ render_t *r; /* Render primitive */ int oldpage; /* Old page value */ float oldy; /* Old Y value */ float tempx; /* Temporary X value */ DEBUG_printf(("parse_list(t=%p, left=%.1f, right=%.1f, x=%.1f, y=%.1f, page=%d\n", (void *)t, *left, *right, *x, *y, *page)); if (needspace && *y < *top) { *y -= _htmlSpacings[t->size]; needspace = 0; } check_pages(*page); oldy = *y; oldpage = *page; r = pages[*page].end; tempx = *x; if (t->indent == 0) { // Adjust left margin when no UL/OL/DL is being used... *left += _htmlSizes[t->size]; tempx += _htmlSizes[t->size]; } parse_doc(t->child, left, right, bottom, top, &tempx, y, page, NULL, &needspace); // Handle when paragraph wrapped to new page... if (*page != oldpage) { // First see if anything was added to the old page... if ((r != NULL && r->next == NULL) || pages[oldpage].end == NULL) { // No, put the symbol on the next page... oldpage = *page; oldy = *top; } } if ((value = htmlGetVariable(t, (uchar *)"VALUE")) != NULL) { if (isdigit(value[0])) list_values[t->indent] = atoi((char *)value); else if (isupper(value[0])) list_values[t->indent] = value[0] - 'A' + 1; else list_values[t->indent] = value[0] - 'a' + 1; } switch (list_types[t->indent]) { case 'a' : case 'A' : case '1' : case 'i' : case 'I' : strlcpy((char *)number, format_number(list_values[t->indent], (char)list_types[t->indent]), sizeof(number)); strlcat((char *)number, ". ", sizeof(number)); typeface = t->typeface; break; default : snprintf((char *)number, sizeof(number), "%c ", list_types[t->indent]); typeface = TYPE_SYMBOL; break; } width = get_width(number, typeface, t->style, t->size); r = new_render(oldpage, RENDER_TEXT, *left - width, oldy - _htmlSizes[t->size], width, _htmlSpacings[t->size], number); r->data.text.typeface = typeface; r->data.text.style = t->style; r->data.text.size = (float)_htmlSizes[t->size]; r->data.text.rgb[0] = t->red / 255.0f; r->data.text.rgb[1] = t->green / 255.0f; r->data.text.rgb[2] = t->blue / 255.0f; list_values[t->indent] ++; if (t->indent == 0) { // Adjust left margin when no UL/OL/DL is being used... *left -= _htmlSizes[t->size]; } } /* * 'init_list()' - Initialize the list type and value as necessary. */ static void init_list(tree_t *t) /* I - List entry */ { uchar *type, /* TYPE= variable */ *value; /* VALUE= variable */ static uchar *symbols = (uchar *)"\327\267\250\340"; if ((type = htmlGetVariable(t, (uchar *)"TYPE")) != NULL) { if (strlen((char *)type) == 1) list_types[t->indent] = type[0]; else if (strcasecmp((char *)type, "disc") == 0 || strcasecmp((char *)type, "circle") == 0) list_types[t->indent] = symbols[1]; else list_types[t->indent] = symbols[2]; } else if (t->markup == MARKUP_UL) list_types[t->indent] = symbols[t->indent & 3]; else if (t->markup == MARKUP_OL) list_types[t->indent] = '1'; if ((value = htmlGetVariable(t, (uchar *)"VALUE")) == NULL) value = htmlGetVariable(t, (uchar *)"START"); if (value != NULL) { if (isdigit(value[0])) list_values[t->indent] = atoi((char *)value); else if (isupper(value[0])) list_values[t->indent] = value[0] - 'A' + 1; else list_values[t->indent] = value[0] - 'a' + 1; } else if (t->markup == MARKUP_OL) list_values[t->indent] = 1; } /* * 'parse_comment()' - Parse a comment for HTMLDOC comments. */ #ifdef COMMENT_DEBUG # undef DEBUG_puts # define DEBUG_puts(x) puts(x) # define DEBUG # undef DEBUG_printf # define DEBUG_printf(x) printf x #endif /* COMMENT_DEBUG */ static void parse_comment(tree_t *t, /* I - Tree to parse */ float *left, /* I - Left margin */ float *right, /* I - Printable width */ float *bottom, /* I - Bottom margin */ float *top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ tree_t *para, /* I - Current paragraph */ int needspace) /* I - Need whitespace? */ { int i; /* Looping var */ const char *comment; /* Comment text */ char *ptr, /* Pointer into value string */ buffer[1024]; /* Buffer for strings */ int pos, /* Position (left, center, right) */ tof; /* Top of form */ DEBUG_printf(("parse_comment(t=%p, left=%.1f, right=%.1f, bottom=%.1f, " "top=%.1f, x=%.1f, y=%.1f, page=%d, para=%p, needspace=%d\n", (void *)t, *left, *right, *bottom, *top, *x, *y, *page, (void *)para, needspace)); if (t->data == NULL) return; if (para != NULL && para->child != NULL && para->child->next == NULL && para->child->child == NULL && para->child->markup == MARKUP_NONE && strcmp((const char *)para->child->data, " ") == 0) { // Remove paragraph consisting solely of whitespace... htmlDeleteTree(para->child); para->child = para->last_child = NULL; } // Mark if we are at the top of form... tof = (*y >= *top); DEBUG_printf(("BEFORE tof=%d, *y=%.1f, *top=%.1f, *page=%d, t->data=\"%s\"\n", tof, *y, *top, *page, t->data)); DEBUG_printf((" PagePrintWidth = %d\n", PagePrintWidth)); DEBUG_printf(("PagePrintLength = %d\n", PagePrintLength)); DEBUG_printf((" PageWidth = %d\n", PageWidth)); DEBUG_printf((" PageLength = %d\n", PageLength)); DEBUG_printf((" PageLeft = %d\n", PageLeft)); DEBUG_printf((" PageBottom = %d\n", PageBottom)); DEBUG_printf((" PageRight = %d\n", PageRight)); DEBUG_printf((" PageTop = %d\n", PageTop)); DEBUG_printf((" Landscape = %d\n", Landscape)); for (comment = (const char *)t->data; *comment;) { // Skip leading whitespace... while (isspace(*comment)) comment ++; if (!*comment) break; if (strncasecmp(comment, "PAGE BREAK", 10) == 0 && (!comment[10] || isspace(comment[10]))) { /* * <!-- PAGE BREAK --> generates a page break... */ comment += 10; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else if (strncasecmp(comment, "NEW PAGE", 8) == 0 && (!comment[8] || isspace(comment[8]))) { /* * <!-- NEW PAGE --> generates a page break... */ comment += 8; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else if (strncasecmp(comment, "NEW SHEET", 9) == 0 && (!comment[9] || isspace(comment[9]))) { /* * <!-- NEW SHEET --> generate a page break to a new sheet... */ comment += 9; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if (NumberUp == 1) { // NEW SHEET breaks to the next sheet of paper... (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; } else { // NEW SHEET breaks to the next side/sheet... (*page) ++; for (i = *page - 1; i >= 0; i --) if (pages[i].nup != NumberUp) break; i ++; for (i = *page - i; (i % NumberUp) != 0; i ++, (*page) ++); } if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else if (strncasecmp(comment, "HALF PAGE", 9) == 0 && (!comment[9] || isspace(comment[9]))) { /* * <!-- HALF PAGE --> Go to the next half page. If in the * top half of a page, go to the bottom half. If in the * bottom half, go to the next page. */ float halfway; comment += 9; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } halfway = 0.5f * (*top + *bottom); if (*y <= halfway) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else { *x = *left; *y = halfway; tof = 0; } } else if (strncasecmp(comment, "NEED ", 5) == 0) { /* * <!-- NEED amount --> generate a page break if there isn't * enough remaining space... */ comment += 5; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if ((*y - get_measurement(comment, (float)_htmlSpacings[SIZE_P])) < *bottom) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; // Skip amount... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA COLOR ", 12) == 0) { // Media color for page... comment += 12; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); // Get color... if (*comment == '\"') { for (ptr = pages[*page].media_color, comment ++; *comment && *comment != '\"'; comment ++) if (ptr < (pages[*page].media_color + sizeof(pages[*page].media_color) - 1)) *ptr++ = *comment; if (*comment == '\"') comment ++; } else { for (ptr = pages[*page].media_color; *comment && !isspace(*comment); comment ++) if (ptr < (pages[*page].media_color + sizeof(pages[*page].media_color) - 1)) *ptr++ = *comment; } *ptr = '\0'; } else if (strncasecmp(comment, "MEDIA POSITION ", 15) == 0) { // Media position for page... comment += 15; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); pages[*page].media_position = atoi(comment); // Skip position... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA TYPE ", 11) == 0) { // Media type for page... comment += 11; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); // Get type... if (*comment == '\"') { for (ptr = pages[*page].media_type, comment ++; *comment && *comment != '\"'; comment ++) if (ptr < (pages[*page].media_type + sizeof(pages[*page].media_type) - 1)) *ptr++ = *comment; if (*comment == '\"') comment ++; } else { for (ptr = pages[*page].media_type; *comment && !isspace(*comment); comment ++) if (ptr < (pages[*page].media_type + sizeof(pages[*page].media_type) - 1)) *ptr++ = *comment; } *ptr = '\0'; } else if (strncasecmp(comment, "MEDIA SIZE ", 11) == 0) { // Media size... comment += 11; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; tof = 1; } if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); check_pages(*page); *right = PagePrintWidth - *right; *top = PagePrintLength - *top; set_page_size(comment); if (Landscape) { PagePrintWidth = PageLength - PageLeft - PageRight; PagePrintLength = PageWidth - PageTop - PageBottom; } else { PagePrintWidth = PageWidth - PageLeft - PageRight; PagePrintLength = PageLength - PageTop - PageBottom; } *right = PagePrintWidth - *right; *top = PagePrintLength - *top; *x = *left; *y = *top; pages[*page].width = PageWidth; pages[*page].length = PageLength; // Skip width... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA LEFT ", 11) == 0) { // Left margin... comment += 11; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); *right = PagePrintWidth - *right; PageLeft = pages[*page].left = get_measurement(comment); if (Landscape) PagePrintWidth = PageLength - PageRight - PageLeft; else PagePrintWidth = PageWidth - PageRight - PageLeft; *right = PagePrintWidth - *right; // Skip left... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA RIGHT ", 12) == 0) { // Right margin... comment += 12; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); *right = PagePrintWidth - *right; PageRight = pages[*page].right = get_measurement(comment); if (Landscape) PagePrintWidth = PageLength - PageRight - PageLeft; else PagePrintWidth = PageWidth - PageRight - PageLeft; *right = PagePrintWidth - *right; // Skip right... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA BOTTOM ", 13) == 0) { // Bottom margin... comment += 13; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); tof = 1; } *x = *left; check_pages(*page); *top = PagePrintLength - *top; PageBottom = pages[*page].bottom = get_measurement(comment); if (Landscape) PagePrintLength = PageWidth - PageTop - PageBottom; else PagePrintLength = PageLength - PageTop - PageBottom; *top = PagePrintLength - *top; *y = *top; // Skip bottom... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA TOP ", 10) == 0) { // Top margin... comment += 10; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); tof = 1; } *x = *left; check_pages(*page); *top = PagePrintLength - *top; PageTop = pages[*page].top = get_measurement(comment); if (Landscape) PagePrintLength = PageWidth - PageTop - PageBottom; else PagePrintLength = PageLength - PageTop - PageBottom; *top = PagePrintLength - *top; *y = *top; // Skip top... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA LANDSCAPE ", 16) == 0) { // Landscape on/off... comment += 16; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; tof = 1; } if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; check_pages(*page); if (strncasecmp(comment, "OFF", 3) == 0 || tolower(comment[0]) == 'n') { if (Landscape) { *right = PageLength - PageRight - *right; PagePrintWidth = PageWidth - PageRight - PageLeft; *right = PageWidth - PageRight - *right; *top = PageWidth - PageTop - *top; PagePrintLength = PageLength - PageTop - PageBottom; *top = PageLength - PageTop - *top; } Landscape = pages[*page].landscape = 0; } else if (strncasecmp(comment, "ON", 2) == 0 || tolower(comment[0]) == 'y') { if (!Landscape) { *top = PageLength - PageTop - *top; PagePrintLength = PageWidth - PageTop - PageBottom; *top = PageWidth - PageTop - *top; *right = PageWidth - PageRight - *right; PagePrintWidth = PageLength - PageRight - PageLeft; *right = PageLength - PageRight - *right; } Landscape = pages[*page].landscape = 1; } *y = *top; // Skip landscape... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA DUPLEX ", 13) == 0) { // Duplex printing on/off... comment += 13; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; *y = *top; tof = 1; } if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; check_pages(*page); if (strncasecmp(comment, "OFF", 3) == 0 || tolower(comment[0]) == 'n') PageDuplex = pages[*page].duplex = 0; else if (strncasecmp(comment, "ON", 2) == 0 || tolower(comment[0]) == 'y') { if ((*page) & 1) { (*page) ++; check_pages(*page); if (Verbosity) progress_show("Formatting page %d", *page); } PageDuplex = pages[*page].duplex = 1; } // Skip duplex... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "HEADER ", 7) == 0) { // Header string... comment += 7; while (isspace(*comment)) comment ++; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (strncasecmp(comment, "LEFT", 4) == 0 && isspace(comment[4])) { pos = 0; comment += 4; } else if (strncasecmp(comment, "CENTER", 6) == 0 && isspace(comment[6])) { pos = 1; comment += 6; } else if (strncasecmp(comment, "RIGHT", 5) == 0 && isspace(comment[5])) { pos = 2; comment += 5; } else { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER position: \"%s\"", comment); return; } while (isspace(*comment)) comment ++; if (*comment != '\"') { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER string: \"%s\"", comment); return; } for (ptr = buffer, comment ++; *comment && *comment != '\"'; comment ++) { if (*comment == '\\') comment ++; if (ptr < (buffer + sizeof(buffer) - 1)) *ptr++ = *comment; } if (*comment == '\"') comment ++; *ptr = '\0'; if (ptr > buffer) Header[pos] = strdup(buffer); else Header[pos] = NULL; if (tof) { DEBUG_printf(("Setting header %d for page %d to \"%s\"...\n", pos, *page, Header[pos] ? Header[pos] : "(null)")); check_pages(*page); pages[*page].header[pos] = (uchar *)Header[pos]; } // Adjust top margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Header[pos] && (strstr(Header[pos], "$IMAGE") != NULL || strstr(Header[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header1[pos] && (strstr(Header1[pos], "$IMAGE") != NULL || strstr(Header1[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header[pos] || Header1[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } *top = PagePrintLength - adjust; if (tof) *y = *top; } else if (strncasecmp(comment, "HEADER1 ", 8) == 0) { // First page header string... comment += 8; while (isspace(*comment)) comment ++; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (strncasecmp(comment, "LEFT", 4) == 0 && isspace(comment[4])) { pos = 0; comment += 4; } else if (strncasecmp(comment, "CENTER", 6) == 0 && isspace(comment[6])) { pos = 1; comment += 6; } else if (strncasecmp(comment, "RIGHT", 5) == 0 && isspace(comment[5])) { pos = 2; comment += 5; } else { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER1 position: \"%s\"", comment); return; } while (isspace(*comment)) comment ++; if (*comment != '\"') { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER1 string: \"%s\"", comment); return; } for (ptr = buffer, comment ++; *comment && *comment != '\"'; comment ++) { if (*comment == '\\') comment ++; if (ptr < (buffer + sizeof(buffer) - 1)) *ptr++ = *comment; } if (*comment == '\"') comment ++; *ptr = '\0'; if (ptr > buffer) Header1[pos] = strdup(buffer); else Header1[pos] = NULL; // Adjust top margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Header[pos] && (strstr(Header[pos], "$IMAGE") != NULL || strstr(Header[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header1[pos] && (strstr(Header1[pos], "$IMAGE") != NULL || strstr(Header1[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header[pos] || Header1[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } *top = PagePrintLength - adjust; if (tof) *y = *top; } else if (strncasecmp(comment, "FOOTER ", 7) == 0) { // Footer string... comment += 7; while (isspace(*comment)) comment ++; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (strncasecmp(comment, "LEFT", 4) == 0 && isspace(comment[4])) { pos = 0; comment += 4; } else if (strncasecmp(comment, "CENTER", 6) == 0 && isspace(comment[6])) { pos = 1; comment += 6; } else if (strncasecmp(comment, "RIGHT", 5) == 0 && isspace(comment[5])) { pos = 2; comment += 5; } else { progress_error(HD_ERROR_BAD_COMMENT, "Bad FOOTER position: \"%s\"", comment); return; } while (isspace(*comment)) comment ++; if (*comment != '\"') { progress_error(HD_ERROR_BAD_COMMENT, "Bad FOOTER string: \"%s\"", comment); return; } for (ptr = buffer, comment ++; *comment && *comment != '\"'; comment ++) { if (*comment == '\\') comment ++; if (ptr < (buffer + sizeof(buffer) - 1)) *ptr++ = *comment; } if (*comment == '\"') comment ++; *ptr = '\0'; if (ptr > buffer) Footer[pos] = strdup(buffer); else Footer[pos] = NULL; if (tof) { check_pages(*page); pages[*page].footer[pos] = (uchar *)Footer[pos]; } // Adjust bottom margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Footer[pos] && (strstr(Footer[pos], "$IMAGE") != NULL || strstr(Footer[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Footer[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } *bottom = adjust; } else if (strncasecmp(comment, "NUMBER-UP ", 10) == 0) { // N-up printing... comment += 10; while (isspace(*comment)) comment ++; if (!*comment) break; NumberUp = strtol(comment, (char **)&comment, 10); if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (tof) { check_pages(*page); pages[*page].nup = NumberUp; } } else break; } DEBUG_printf(("LEAVING parse_comment() x=%.1f, y=%.1f, page=%d\n", *x, *y, *page)); DEBUG_printf((" PagePrintWidth = %d\n", PagePrintWidth)); DEBUG_printf(("PagePrintLength = %d\n", PagePrintLength)); DEBUG_printf((" PageWidth = %d\n", PageWidth)); DEBUG_printf((" PageLength = %d\n", PageLength)); DEBUG_printf((" PageLeft = %d\n", PageLeft)); DEBUG_printf((" PageBottom = %d\n", PageBottom)); DEBUG_printf((" PageRight = %d\n", PageRight)); DEBUG_printf((" PageTop = %d\n", PageTop)); DEBUG_printf((" Landscape = %d\n", Landscape)); } #ifdef COMMENT_DEBUG # undef DEBUG # undef DEBUG_puts # define DEBUG_puts(x) # undef DEBUG_printf # define DEBUG_printf(x) #endif /* COMMENT_DEBUG */ /* * 'find_background()' - Find the background image/color for the given document. */ static void find_background(tree_t *t) /* I - Document to search */ { uchar *var; /* BGCOLOR/BACKGROUND variable */ /* * First see if the --bodycolor or --bodyimage options have been * specified... */ if (BodyImage[0] != '\0') { background_image = image_load(BodyImage, !OutputColor); return; } else if (BodyColor[0] != '\0') { get_color((uchar *)BodyColor, background_color, 0); return; } /* * If not, search the document tree... */ while (t != NULL && background_image == NULL && background_color[0] == 1.0 && background_color[1] == 1.0 && background_color[2] == 1.0) { if (t->markup == MARKUP_BODY) { if ((var = htmlGetVariable(t, (uchar *)"BACKGROUND")) != NULL) background_image = image_load((char *)var, !OutputColor); if ((var = htmlGetVariable(t, (uchar *)"BGCOLOR")) != NULL) get_color(var, background_color, 0); } if (t->child != NULL) find_background(t->child); t = t->next; } } /* * 'write_background()' - Write the background image/color for to the current * page. */ static void write_background(int page, /* I - Page we are writing for */ FILE *out) /* I - File to write to */ { float x, y; float width, height; int page_width, page_length; if (Landscape) { page_length = pages[page].width; page_width = pages[page].length; } else { page_width = pages[page].width; page_length = pages[page].length; } if (background_color[0] != 1.0 || background_color[1] != 1.0 || background_color[2] != 1.0) { if (PSLevel > 0) { render_x = -1.0; render_y = -1.0; set_color(out, background_color); fprintf(out, "0 0 M %d %d F\n", page_width, page_length); } else { set_color(out, background_color); flate_printf(out, "0 0 %d %d re f\n", page_width, page_length); } } if (background_image != NULL) { width = (float)(background_image->width * 72.0f / _htmlPPI); height = (float)(background_image->height * 72.0f / _htmlPPI); if (width < 1.0f) width = 1.0f; if (height < 1.0f) height = 1.0f; switch (PSLevel) { case 0 : for (x = 0.0; x < page_width; x += width) for (y = page_length; y >= 0.0f;) { y -= height; flate_printf(out, "q %.1f 0 0 %.1f %.1f %.1f cm", width, height, x, y); flate_printf(out, "/I%d Do\n", background_image->obj); flate_puts("Q\n", out); } break; default : fprintf(out, "0 %.1f %d{/y exch neg %d add def\n", height, page_length + (int)height - 1, page_length); fprintf(out, "0 %.1f %d{/x exch def\n", width, page_width); fprintf(out, "GS[%.1f 0 0 %.1f x y]CM/iy -1 def\n", width, height); fprintf(out, "%d %d 8[%d 0 0 %d 0 %d]", background_image->width, background_image->height, background_image->width, -background_image->height, background_image->height); fputs("{/iy iy 1 add def BG iy get}", out); if (background_image->depth == 1) fputs("image\n", out); else fputs("false 3 colorimage\n", out); fputs("GR}for}for\n", out); break; } } } /* * 'new_render()' - Allocate memory for a new rendering structure. */ static render_t * /* O - New render structure */ new_render(int page, /* I - Page number (0-n) */ int type, /* I - Type of render primitive */ double x, /* I - Horizontal position */ double y, /* I - Vertical position */ double width, /* I - Width */ double height, /* I - Height */ void *data, /* I - Data */ render_t *insert) /* I - Insert before here... */ { render_t *r; /* New render primitive */ size_t datalen = 0; /* Length of data */ static render_t dummy; /* Dummy var for errors... */ DEBUG_printf(("new_render(page=%d, type=%d, x=%.1f, y=%.1f, width=%.1f, height=%.1f, data=%p, insert=%p)\n", page, type, x, y, width, height, (void *)data, (void *)insert)); check_pages(page); if (page < 0 || page >= (int)alloc_pages) { progress_error(HD_ERROR_INTERNAL_ERROR, "Page number (%d) out of range (1...%d)\n", page + 1, (int)alloc_pages); memset(&dummy, 0, sizeof(dummy)); return (&dummy); } if ((type != RENDER_TEXT && type != RENDER_LINK) || data == NULL) r = (render_t *)calloc(sizeof(render_t), 1); else { datalen = strlen((char *)data); r = (render_t *)calloc(sizeof(render_t) + datalen, 1); } if (r == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory on page %d\n", (int)page + 1); memset(&dummy, 0, sizeof(dummy)); return (&dummy); } r->type = type; r->x = (float)x; r->y = (float)y; r->width = (float)width; r->height = (float)height; switch (type) { case RENDER_TEXT : if (data == NULL) { free(r); return (NULL); } // Safe because buffer is allocated... memcpy((char *)r->data.text.buffer, (char *)data, datalen); get_color(_htmlTextColor, r->data.text.rgb); break; case RENDER_IMAGE : if (data == NULL) { free(r); return (NULL); } r->data.image = (image_t *)data; break; case RENDER_BOX : memcpy(r->data.box, data, sizeof(r->data.box)); break; case RENDER_LINK : if (data == NULL) { free(r); return (NULL); } // Safe because buffer is allocated... memcpy((char *)r->data.link, (char *)data, datalen); break; } if (insert) { if (insert->prev) insert->prev->next = r; else pages[page].start = r; r->prev = insert->prev; r->next = insert; insert->prev = r; } else { if (pages[page].end != NULL) pages[page].end->next = r; else pages[page].start = r; r->next = NULL; r->prev = pages[page].end; pages[page].end = r; } DEBUG_printf((" returning r = %p\n", (void *)r)); return (r); } /* * 'check_pages()' - Allocate memory for more pages as needed... */ static void check_pages(int page) // I - Current page { page_t *temp; // Temporary page pointer DEBUG_printf(("check_pages(%d)\n", page)); // See if we need to allocate memory for the page... if (page >= (int)alloc_pages) { // Yes, allocate enough for ALLOC_PAGES more pages... while (page >= (int)alloc_pages) alloc_pages += ALLOC_PAGES; // Do the pages pointers... if (num_pages == 0) temp = (page_t *)malloc(sizeof(page_t) * alloc_pages); else temp = (page_t *)realloc(pages, sizeof(page_t) * alloc_pages); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d pages - %s", (int)alloc_pages, strerror(errno)); alloc_pages -= ALLOC_PAGES; return; } memset(temp + num_pages, 0, (alloc_pages - num_pages) * sizeof(page_t)); pages = temp; } // Initialize the page data as needed... for (temp = pages + num_pages; (int)num_pages <= page; num_pages ++, temp ++) { if (!temp->width) { if (num_pages == 0 || !temp[-1].width || !temp[-1].length || chapter == 0) { temp->width = PageWidth; temp->length = PageLength; temp->left = PageLeft; temp->right = PageRight; temp->top = PageTop; temp->bottom = PageBottom; temp->duplex = PageDuplex; temp->landscape = Landscape; temp->nup = NumberUp; } else { memcpy(temp, temp - 1, sizeof(page_t)); temp->start = NULL; temp->end = NULL; } temp->url = current_url; if (chapter == 0) { memcpy(temp->header, TocHeader, sizeof(temp->header)); memcpy(temp->footer, TocFooter, sizeof(temp->footer)); } else { memcpy(temp->header, Header, sizeof(temp->header)); memcpy(temp->header1, Header1, sizeof(temp->header1)); memcpy(temp->footer, Footer, sizeof(temp->footer)); if (current_heading != temp->headnode) { temp->heading = htmlGetText(current_heading); temp->headnode = current_heading; } } memcpy(temp->background_color, background_color, sizeof(temp->background_color)); temp->background_image = background_image; } } } /* * 'add_link()' - Add a named link... */ static void add_link(uchar *name, /* I - Name of link */ int page, /* I - Page # */ int top) /* I - Y position */ { link_t *temp; /* New name */ if (name == NULL) return; DEBUG_printf(("add_link(name=\"%s\", page=%d, top=%d)\n", name, page, top)); if ((temp = find_link(name)) != NULL) { temp->page = (short)page; temp->top = (short)top; } else { // See if we need to allocate memory for links... if (num_links >= alloc_links) { // Allocate more links... alloc_links += ALLOC_LINKS; if (num_links == 0) temp = (link_t *)malloc(sizeof(link_t) * alloc_links); else temp = (link_t *)realloc(links, sizeof(link_t) * alloc_links); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d links - %s", (int)alloc_links, strerror(errno)); alloc_links -= ALLOC_LINKS; return; } links = temp; } // Add a new link... temp = links + num_links; num_links ++; strlcpy((char *)temp->name, (char *)name, sizeof(temp->name)); temp->page = (short)page; temp->top = (short)top; if (num_links > 1) qsort(links, num_links, sizeof(link_t), (compare_func_t)compare_links); } } /* * 'find_link()' - Find a named link... */ static link_t * find_link(uchar *name) /* I - Name to find */ { link_t key, /* Search key */ *match; /* Matching name entry */ if (name == NULL || num_links == 0) return (NULL); if (name[0] == '#') name ++; strlcpy((char *)key.name, (char *)name, sizeof(key.name)); match = (link_t *)bsearch(&key, links, num_links, sizeof(link_t), (compare_func_t)compare_links); return (match); } /* * 'compare_links()' - Compare two named links. */ static int /* O - 0 = equal, -1 or 1 = not equal */ compare_links(link_t *n1, /* I - First name */ link_t *n2) /* I - Second name */ { return (strcasecmp((char *)n1->name, (char *)n2->name)); } #ifdef TABLE_DEBUG # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) printf x # define DEBUG_puts(x) puts(x) #endif /* TABLE_DEBUG */ // // 'get_cell_size()' - Compute the minimum width of a cell. // static float // O - Required width of cell get_cell_size(tree_t *t, // I - Cell float left, // I - Left margin float right, // I - Right margin float *minwidth, // O - Minimum width float *prefwidth, // O - Preferred width float *minheight) // O - Minimum height { tree_t *temp, // Current tree entry *next; // Next tree entry uchar *var; // Attribute value int nowrap; // NOWRAP attribute? float width, // Width of cell frag_width, // Fragment required width frag_height, // Fragment height frag_pref, // Fragment preferred width frag_min, // Fragment minimum width minh, // Local minimum height minw, // Local minimum width prefw, // Local preferred width format_width; // Working format width for images DEBUG_printf(("get_cell_size(%p, %.1f, %.1f, %p, %p, %p)\n", (void *)t, left, right, (void *)minwidth, (void *)prefwidth, (void *)minheight)); // First see if the width has been specified for this cell... if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL && (var[strlen((char *)var) - 1] != '%' || (right - left) > 0.0f)) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') width = (right - left) * atoi((char *)var) * 0.01f; else width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else width = 0.0f; if ((format_width = right - left) <= 0.0f) format_width = PagePrintWidth; minw = 0.0f; prefw = 0.0f; // Then the height... if ((var = htmlGetVariable(t, (uchar *)"HEIGHT")) != NULL) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') minh = PagePrintLength * atoi((char *)var) * 0.01f; else minh = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else minh = 0.0f; nowrap = (htmlGetVariable(t, (uchar *)"NOWRAP") != NULL); DEBUG_printf(("nowrap = %d\n", nowrap)); for (temp = t->child, frag_width = 0.0f, frag_pref = 0.0f; temp != NULL; temp = next) { // Point to next markup, if any... next = temp->child; switch (temp->markup) { case MARKUP_TABLE : // Update widths... if (frag_pref > prefw) prefw = frag_pref; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } if (nowrap && frag_pref > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for break...\n", frag_pref, minw)); minw = frag_pref; } // For nested tables, compute the width of the table. frag_width = get_table_size(temp, left, right, &frag_min, &frag_pref, &frag_height); if (frag_pref > prefw) prefw = frag_pref; if (frag_min > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for nested table...\n", frag_min, minw)); minw = frag_min; } frag_width = 0.0f; frag_pref = 0.0f; frag_min = 0.0f; next = NULL; break; case MARKUP_IMG : // Update the image width as needed... if (temp->markup == MARKUP_IMG) update_image_size(temp); case MARKUP_NONE : case MARKUP_SPACER : frag_height = temp->height; #ifdef TABLE_DEBUG2 if (temp->markup == MARKUP_NONE) printf("FRAG(%s) = %.1f\n", temp->data, temp->width); else if (temp->markup == MARKUP_SPACER) printf("SPACER = %.1f\n", temp->width); else printf("IMG(%s) = %.1f\n", htmlGetVariable(temp, (uchar *)"SRC"), temp->width); #endif // TABLE_DEBUG2 // Handle min/preferred widths separately... if (temp->width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for fragment...\n", temp->width, minw)); minw = temp->width; } if (temp->preformatted && temp->data != NULL && temp->data[strlen((char *)temp->data) - 1] == '\n') { // End of a line - check preferred width... frag_pref += temp->width + 1; if (frag_pref > prefw) prefw = frag_pref; if (temp->preformatted && frag_pref > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for preformatted...\n", frag_pref, minw)); minw = frag_pref; } frag_pref = 0.0f; } else if (temp->data != NULL) frag_pref += temp->width + 1; else if ((frag_pref + temp->width) > format_width) { // parse_paragraph() will force a break if (frag_pref > prefw) prefw = frag_pref; frag_pref = temp->width; } else frag_pref += temp->width; if (temp->preformatted && temp->data != NULL && temp->data[strlen((char *)temp->data) - 1] == '\n') { // Check required width... frag_width += temp->width + 1; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } frag_width = 0.0f; } else if (!temp->preformatted && temp->data != NULL && (isspace(temp->data[0]) || (temp->data[0] && isspace(temp->data[strlen((char *)temp->data) - 1])))) { // Check required width... if (isspace(temp->data[0])) frag_width = temp->width + 1; else frag_width += temp->width + 1; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } if (!isspace(temp->data[0])) frag_width = 0.0f; DEBUG_printf(("frag_width=%.1f after whitespace processing...\n", frag_width)); } else if (temp->data != NULL) frag_width += temp->width + 1; else if ((frag_width + temp->width) > format_width) // parse_paragraph() will force a break frag_width = temp->width; else frag_width += temp->width; break; case MARKUP_ADDRESS : case MARKUP_BLOCKQUOTE : case MARKUP_BR : case MARKUP_CENTER : case MARKUP_DD : case MARKUP_DIV : case MARKUP_DT : case MARKUP_H1 : case MARKUP_H2 : case MARKUP_H3 : case MARKUP_H4 : case MARKUP_H5 : case MARKUP_H6 : case MARKUP_H7 : case MARKUP_H8 : case MARKUP_H9 : case MARKUP_H10 : case MARKUP_H11 : case MARKUP_H12 : case MARKUP_H13 : case MARKUP_H14 : case MARKUP_H15 : case MARKUP_HR : case MARKUP_LI : case MARKUP_P : case MARKUP_PRE : DEBUG_printf(("BREAK at %.1f\n", frag_pref)); if (frag_pref > prefw) prefw = frag_pref; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } if (nowrap && frag_pref > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for break...\n", frag_pref, minw)); minw = frag_pref; } frag_pref = 0.0f; frag_width = 0.0f; default : frag_height = 0.0f; break; } // Update minimum height... if (frag_height > minh) minh = frag_height; // Update next pointer as needed... if (next == NULL) next = temp->next; if (next == NULL) { // This code is almost funny if you say it fast... :) for (next = temp->parent; next != NULL && next != t; next = next->parent) if (next->next != NULL) break; if (next == t) next = NULL; else if (next) next = next->next; } } // Check the last fragment's width... if (frag_pref > prefw) prefw = frag_pref; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } // Handle the "NOWRAP" option... if (nowrap && prefw > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for NOWRAP...\n", prefw, minw)); minw = prefw; } // Return the required, minimum, and preferred size of the cell... *minwidth = minw; *prefwidth = prefw; *minheight = minh; DEBUG_printf(("get_cell_size(): width=%.1f, minw=%.1f, prefw=%.1f, minh=%.1f\n", width, minw, prefw, minh)); return (width); } // // 'get_table_size()' - Compute the minimum width of a table. // static float // O - Minimum width of table get_table_size(tree_t *t, // I - Table float left, // I - Left margin float right, // I - Right margin float *minwidth, // O - Minimum width float *prefwidth, // O - Preferred width float *minheight) // O - Minimum height { tree_t *temp, // Current tree entry *next; // Next tree entry uchar *var; // Attribute value float width, // Required width of table minw, // Minimum width of table minh, // Minimum height of table prefw, // Preferred width of table cell_width, // Cell required width cell_pref, // Cell preferred width cell_min, // Cell minimum width cell_height, // Cell minimum height row_width, // Row required width row_pref, // Row preferred width row_min, // Row minimum width row_height, // Row minimum height border, // Border around cells cellpadding, // Padding inside cells cellspacing; // Spacing around cells int columns, // Current number of columns max_columns, // Maximum columns rows; // Number of rows DEBUG_printf(("get_table_size(%p, %.1f, %.1f, %p, %p, %p)\n", (void *)t, left, right, (void *)minwidth, (void *)prefwidth, (void *)minheight)); // First see if the width has been specified for this table... if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL && (var[strlen((char *)var) - 1] != '%' || (right - left) > 0.0f)) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') width = (right - left) * atoi((char *)var) * 0.01f; else width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else width = 0.0f; minw = 0.0f; prefw = 0.0f; // Then the height... if ((var = htmlGetVariable(t, (uchar *)"HEIGHT")) != NULL) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') minh = PagePrintLength * atoi((char *)var) * 0.01f; else minh = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else minh = 0.0f; // Update the size as needed... for (temp = t->child, row_width = 0.0f, row_min = 0.0f, row_pref = 0.0f, row_height = 0.0f, columns = 0, rows = 0, max_columns = 0; temp != NULL; temp = next) { // Point to next markup, if any... next = temp->child; // Start a new row or add the cell width as needed... if (temp->markup == MARKUP_TR) { minh += row_height; row_width = 0.0f; row_pref = 0.0f; row_min = 0.0f; row_height = 0.0f; rows ++; columns = 0; } else if (temp->markup == MARKUP_TD || temp->markup == MARKUP_TH) { // Update columns... columns ++; if (columns > max_columns) max_columns = columns; // Get widths of cell... cell_width = get_cell_size(temp, left, right, &cell_min, &cell_pref, &cell_height); // Update row widths... row_width += cell_width; row_pref += cell_pref; row_min += cell_min; if (cell_height > row_height) row_height = cell_height; // Check current row widths against table... if (row_pref > prefw) prefw = row_pref; if (row_min > minw) minw = row_min; } // Update next pointer as needed... if (next == NULL) next = temp->next; if (next == NULL) { // This code is almost funny if you say it fast... :) for (next = temp->parent; next != NULL && next != t; next = next->parent) if (next->next != NULL) break; if (next == t) next = NULL; else if (next) next = next->next; } } // Make sure last row is counted in min height calcs. minh += row_height; // Add room for spacing and padding... if ((var = htmlGetVariable(t, (uchar *)"CELLPADDING")) != NULL) cellpadding = atoi((char *)var); else cellpadding = 1.0f; if ((var = htmlGetVariable(t, (uchar *)"CELLSPACING")) != NULL) cellspacing = atoi((char *)var); else cellspacing = 0.0f; if ((var = htmlGetVariable(t, (uchar *)"BORDER")) != NULL) { if ((border = (float)atof((char *)var)) == 0.0 && var[0] != '0') border = 1.0f; cellpadding += border; } else border = 0.0f; if (border == 0.0f && cellpadding > 0.0f) { /* * Ah, the strange table formatting nightmare that is HTML. * Netscape and MSIE assign an invisible border width of 1 * pixel if no border is specified... */ cellpadding += 1.0f; } cellspacing *= PagePrintWidth / _htmlBrowserWidth; cellpadding *= PagePrintWidth / _htmlBrowserWidth; DEBUG_printf(("ADDING %.1f for table space for %d columns...\n", max_columns * (2 * cellpadding + cellspacing) - cellspacing, max_columns)); if (width > 0.0f) width += max_columns * (2 * cellpadding + cellspacing) - cellspacing; minw += max_columns * (2 * cellpadding + cellspacing) - cellspacing; prefw += max_columns * (2 * cellpadding + cellspacing) - cellspacing; minh += rows * (2 * cellpadding + cellspacing) - cellspacing; // Return the required, minimum, and preferred size of the table... *minwidth = minw; *prefwidth = prefw; *minheight = minh; DEBUG_printf(("get_table_size(): width=%.1f, minw=%.1f, prefw=%.1f, minh=%.1f\n", width, minw, prefw, minh)); return (width); } #ifdef TABLE_DEBUG # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) # define DEBUG_puts(x) #endif /* TABLE_DEBUG */ /* * 'flatten_tree()' - Flatten an HTML tree to only include the text, image, * link, and break markups. */ static tree_t * /* O - Flattened markup tree */ flatten_tree(tree_t *t) /* I - Markup tree to flatten */ { tree_t *temp, /* New tree node */ *flat; /* Flattened tree */ flat = NULL; while (t != NULL) { switch (t->markup) { case MARKUP_NONE : if (t->data == NULL) break; case MARKUP_COMMENT : case MARKUP_BR : case MARKUP_SPACER : case MARKUP_IMG : temp = (tree_t *)calloc(sizeof(tree_t), 1); memcpy(temp, t, sizeof(tree_t)); temp->parent = NULL; temp->child = NULL; temp->prev = flat; temp->next = NULL; if (flat != NULL) flat->next = temp; flat = temp; if (temp->markup == MARKUP_IMG) update_image_size(temp); break; case MARKUP_A : if (htmlGetVariable(t, (uchar *)"NAME") != NULL) { temp = (tree_t *)calloc(sizeof(tree_t), 1); memcpy(temp, t, sizeof(tree_t)); temp->parent = NULL; temp->child = NULL; temp->prev = flat; temp->next = NULL; if (flat != NULL) flat->next = temp; flat = temp; } break; case MARKUP_P : case MARKUP_PRE : case MARKUP_H1 : case MARKUP_H2 : case MARKUP_H3 : case MARKUP_H4 : case MARKUP_H5 : case MARKUP_H6 : case MARKUP_H7 : case MARKUP_H8 : case MARKUP_H9 : case MARKUP_H10 : case MARKUP_H11 : case MARKUP_H12 : case MARKUP_H13 : case MARKUP_H14 : case MARKUP_H15 : case MARKUP_UL : case MARKUP_DIR : case MARKUP_MENU : case MARKUP_OL : case MARKUP_DL : case MARKUP_LI : case MARKUP_DD : case MARKUP_DT : case MARKUP_TR : case MARKUP_CAPTION : temp = (tree_t *)calloc(sizeof(tree_t), 1); temp->markup = MARKUP_BR; temp->parent = NULL; temp->child = NULL; temp->prev = flat; temp->next = NULL; if (flat != NULL) flat->next = temp; flat = temp; break; default : break; } if (t->child != NULL && t->markup != MARKUP_UNKNOWN) { temp = flatten_tree(t->child); if (temp != NULL) temp->prev = flat; if (flat != NULL) flat->next = temp; else flat = temp; } if (flat != NULL) while (flat->next != NULL) flat = flat->next; t = t->next; } if (flat == NULL) return (NULL); while (flat->prev != NULL) flat = flat->prev; return (flat); } /* * 'update_image_size()' - Update the size of an image based upon the * printable width. */ static void update_image_size(tree_t *t) /* I - Tree entry */ { image_t *img; /* Image file */ uchar *width, /* Width string */ *height; /* Height string */ width = htmlGetVariable(t, (uchar *)"WIDTH"); height = htmlGetVariable(t, (uchar *)"HEIGHT"); if (width != NULL && height != NULL) { if (width[strlen((char *)width) - 1] == '%') t->width = (float)(atof((char *)width) * PagePrintWidth / 100.0f); else t->width = (float)(atoi((char *)width) * PagePrintWidth / _htmlBrowserWidth); if (height[strlen((char *)height) - 1] == '%') t->height = (float)(atof((char *)height) * PagePrintWidth / 100.0f); else t->height = (float)(atoi((char *)height) * PagePrintWidth / _htmlBrowserWidth); return; } img = image_find((char *)htmlGetVariable(t, (uchar *)"REALSRC")); if (img == NULL) return; if (width != NULL) { if (width[strlen((char *)width) - 1] == '%') t->width = (float)(atof((char *)width) * PagePrintWidth / 100.0f); else t->width = (float)(atoi((char *)width) * PagePrintWidth / _htmlBrowserWidth); t->height = t->width * img->height / img->width; } else if (height != NULL) { if (height[strlen((char *)height) - 1] == '%') t->height = (float)(atof((char *)height) * PagePrintWidth / 100.0f); else t->height = (float)(atoi((char *)height) * PagePrintWidth / _htmlBrowserWidth); t->width = t->height * img->width / img->height; } else { t->width = (float)(img->width * PagePrintWidth / _htmlBrowserWidth); t->height = (float)(img->height * PagePrintWidth / _htmlBrowserWidth); } } /* * 'get_width()' - Get the width of a string in points. */ static float /* O - Width in points */ get_width(uchar *s, /* I - String to scan */ int typeface, /* I - Typeface code */ int style, /* I - Style code */ int size) /* I - Size */ { uchar *ptr; /* Current character */ int width; /* Current width */ DEBUG_printf(("get_width(\"%s\", %d, %d, %d)\n", s == NULL ? "(null)" : (const char *)s, typeface, style, size)); if (s == NULL) return (0.0); if (!_htmlWidthsLoaded[typeface][style]) htmlLoadFontWidths(typeface, style); for (width = 0, ptr = s; *ptr != '\0'; ptr ++) width += _htmlWidths[typeface][style][*ptr]; return (width * _htmlSizes[size] * 0.001f); } /* * 'get_title()' - Get the title string for a document. */ static uchar * /* O - Title string */ get_title(tree_t *doc) /* I - Document */ { uchar *temp; while (doc != NULL) { if (doc->markup == MARKUP_TITLE) return (htmlGetText(doc->child)); else if (doc->child != NULL) if ((temp = get_title(doc->child)) != NULL) return (temp); doc = doc->next; } return (NULL); } /* * 'open_file()' - Open an output file for the current chapter. */ static FILE * /* O - File pointer */ open_file(void) { char filename[255]; /* Filename */ if (OutputFiles && PSLevel > 0) { if (chapter == -1) snprintf(filename, sizeof(filename), "%s/cover.ps", OutputPath); else if (chapter == 0) snprintf(filename, sizeof(filename), "%s/contents.ps", OutputPath); else snprintf(filename, sizeof(filename), "%s/doc%d.ps", OutputPath, chapter); return (fopen(filename, "wb+")); } else if (OutputFiles) { snprintf(filename, sizeof(filename), "%s/doc.pdf", OutputPath); return (fopen(filename, "wb+")); } else if (OutputPath[0] != '\0') return (fopen(OutputPath, "wb+")); else if (PSLevel == 0) return (file_temp(stdout_filename, sizeof(stdout_filename))); else return (stdout); } /* * 'set_color()' - Set the current text color... */ static void set_color(FILE *out, /* I - File to write to */ float *rgb) /* I - RGB color */ { if (rgb[0] == render_rgb[0] && rgb[1] == render_rgb[1] && rgb[2] == render_rgb[2]) return; render_rgb[0] = rgb[0]; render_rgb[1] = rgb[1]; render_rgb[2] = rgb[2]; if (OutputColor) { // Output RGB color... if (PSLevel > 0) fprintf(out, "%.2f %.2f %.2f C ", rgb[0], rgb[1], rgb[2]); else flate_printf(out, "%.2f %.2f %.2f rg ", rgb[0], rgb[1], rgb[2]); } else { // Output grayscale... if (PSLevel > 0) fprintf(out, "%.2f G ", rgb[0] * 0.31f + rgb[1] * 0.61f + rgb[2] * 0.08f); else flate_printf(out, "%.2f g ", rgb[0] * 0.31f + rgb[1] * 0.61f + rgb[2] * 0.08f); } } /* * 'set_font()' - Set the current text font. */ static void set_font(FILE *out, /* I - File to write to */ int typeface, /* I - Typeface code */ int style, /* I - Style code */ float size) /* I - Size */ { char sizes[255], /* Formatted string for size... */ *s; /* Pointer to end of string */ if (typeface == render_typeface && style == render_style && size == render_size) return; /* * Format size and strip trailing 0's and decimals... */ snprintf(sizes, sizeof(sizes), "%.1f", size); for (s = sizes + strlen(sizes) - 1; s > sizes && *s == '0'; s --) *s = '\0'; if (*s == '.') *s = '\0'; /* * Set the new typeface, style, and size. */ if (PSLevel > 0) { if (size != render_size) fprintf(out, "%s FS", sizes); fprintf(out, "/F%x SF ", typeface * 4 + style); } else flate_printf(out, "/F%x %s Tf ", typeface * 4 + style, sizes); render_typeface = typeface; render_style = style; render_size = size; } /* * 'set_pos()' - Set the current text position. */ static void set_pos(FILE *out, /* I - File to write to */ float x, /* I - X position */ float y) /* I - Y position */ { char xs[255], /* Formatted string for X... */ ys[255], /* Formatted string for Y... */ *s; /* Pointer to end of string */ if (fabs(render_x - x) < 0.1 && fabs(render_y - y) < 0.1) return; /* * Format X and Y... */ if (PSLevel > 0 || render_x == -1.0) { snprintf(xs, sizeof(xs), "%.3f", x); snprintf(ys, sizeof(ys), "%.3f", y); } else { snprintf(xs, sizeof(xs), "%.3f", x - render_startx); snprintf(ys, sizeof(ys), "%.3f", y - render_y); } /* * Strip trailing 0's and decimals... */ for (s = xs + strlen(xs) - 1; s > xs && *s == '0'; s --) *s = '\0'; if (*s == '.') *s = '\0'; for (s = ys + strlen(ys) - 1; s > ys && *s == '0'; s --) *s = '\0'; if (*s == '.') *s = '\0'; if (PSLevel > 0) fprintf(out, "%s %s M", xs, ys); else flate_printf(out, "%s %s Td", xs, ys); render_x = render_startx = x; render_y = y; } /* * 'ps_hex()' - Print binary data as a series of hexadecimal numbers. */ static void ps_hex(FILE *out, /* I - File to print to */ uchar *data, /* I - Data to print */ int length) /* I - Number of bytes to print */ { int col; static const char *hex = "0123456789ABCDEF"; col = 0; while (length > 0) { /* * Put the hex uchars out to the file; note that we don't use fprintf() * for speed reasons... */ putc(hex[*data >> 4], out); putc(hex[*data & 15], out); data ++; length --; col = (col + 1) % 40; if (col == 0) putc('\n', out); } if (col > 0) putc('\n', out); } #ifdef HTMLDOC_ASCII85 /* * 'ps_ascii85()' - Print binary data as a series of base-85 numbers. */ static void ps_ascii85(FILE *out, /* I - File to print to */ uchar *data, /* I - Data to print */ int length, /* I - Number of bytes to print */ int eod) /* I - 1 = end-of-data */ { unsigned b = 0; /* Current 32-bit word */ uchar c[5]; /* Base-85 encoded characters */ static int col = 0; /* Column */ static uchar leftdata[4]; /* Leftover data at the end */ static int leftcount = 0; /* Size of leftover data */ length += leftcount; while (length > 3) { switch (leftcount) { case 0 : b = (unsigned)((((((data[0] << 8) | data[1]) << 8) | data[2]) << 8) | data[3]); break; case 1 : b = (unsigned)((((((leftdata[0] << 8) | data[0]) << 8) | data[1]) << 8) | data[2]); break; case 2 : b = (unsigned)((((((leftdata[0] << 8) | leftdata[1]) << 8) | data[0]) << 8) | data[1]); break; case 3 : b = (unsigned)((((((leftdata[0] << 8) | leftdata[1]) << 8) | leftdata[2]) << 8) | data[0]); break; } if (col >= 76) { col = 0; putc('\n', out); } if (b == 0) { putc('z', out); col ++; } else { c[4] = (b % 85) + '!'; b /= 85; c[3] = (b % 85) + '!'; b /= 85; c[2] = (b % 85) + '!'; b /= 85; c[1] = (b % 85) + '!'; b /= 85; c[0] = (uchar)(b + '!'); fwrite(c, 1, 5, out); col += 5; } data += 4 - leftcount; length -= 4 - leftcount; leftcount = 0; } if (length > 0) { // Copy any remainder into the leftdata array... if ((length - leftcount) > 0) memcpy(leftdata + leftcount, data, (size_t)(length - leftcount)); memset(leftdata + length, 0, (size_t)(4 - length)); leftcount = length; } if (eod) { // Do the end-of-data dance... if (col >= 76) { col = 0; putc('\n', out); } if (leftcount > 0) { // Write the remaining bytes as needed... b = (unsigned)((((((leftdata[0] << 8) | leftdata[1]) << 8) | leftdata[2]) << 8) | leftdata[3]); c[4] = (b % 85) + '!'; b /= 85; c[3] = (b % 85) + '!'; b /= 85; c[2] = (b % 85) + '!'; b /= 85; c[1] = (b % 85) + '!'; b /= 85; c[0] = (uchar)(b + '!'); fwrite(c, (size_t)(leftcount + 1), 1, out); leftcount = 0; } fputs("~>\n", out); col = 0; } } #endif // HTMLDOC_ASCII85 /* * JPEG library destination data manager. These routines direct * compressed data from libjpeg into the PDF or PostScript file. */ static FILE *jpg_file; /* JPEG file */ static uchar jpg_buf[8192]; /* JPEG buffer */ static jpeg_destination_mgr jpg_dest; /* JPEG destination manager */ static struct jpeg_error_mgr jerr; /* JPEG error handler */ /* * 'jpg_init()' - Initialize the JPEG destination. */ static void jpg_init(j_compress_ptr cinfo) /* I - Compressor info */ { (void)cinfo; jpg_dest.next_output_byte = jpg_buf; jpg_dest.free_in_buffer = sizeof(jpg_buf); } /* * 'jpg_empty()' - Empty the JPEG output buffer. */ static boolean /* O - True if buffer written OK */ jpg_empty(j_compress_ptr cinfo) /* I - Compressor info */ { (void)cinfo; if (PSLevel > 0) #ifdef HTMLDOC_ASCII85 ps_ascii85(jpg_file, jpg_buf, sizeof(jpg_buf)); #else ps_hex(jpg_file, jpg_buf, sizeof(jpg_buf)); #endif // HTMLDOC_ASCII85 else flate_write(jpg_file, jpg_buf, sizeof(jpg_buf)); jpg_dest.next_output_byte = jpg_buf; jpg_dest.free_in_buffer = sizeof(jpg_buf); return (TRUE); } /* * 'jpg_term()' - Write the last JPEG data to the file. */ static void jpg_term(j_compress_ptr cinfo) /* I - Compressor info */ { int nbytes; /* Number of bytes to write */ (void)cinfo; nbytes = sizeof(jpg_buf) - jpg_dest.free_in_buffer; if (PSLevel > 0) #ifdef HTMLDOC_ASCII85 ps_ascii85(jpg_file, jpg_buf, nbytes); #else ps_hex(jpg_file, jpg_buf, nbytes); #endif // HTMLDOC_ASCII85 else flate_write(jpg_file, jpg_buf, nbytes); } /* * 'jpg_setup()' - Setup the JPEG compressor for writing an image. */ static void jpg_setup(FILE *out, /* I - Output file */ image_t *img, /* I - Output image */ j_compress_ptr cinfo) /* I - Compressor info */ { int i; // Looping var jpg_file = out; cinfo->err = jpeg_std_error(&jerr); jpeg_create_compress(cinfo); cinfo->dest = &jpg_dest; jpg_dest.init_destination = jpg_init; jpg_dest.empty_output_buffer = jpg_empty; jpg_dest.term_destination = jpg_term; cinfo->image_width = (JDIMENSION)img->width; cinfo->image_height = (JDIMENSION)img->height; cinfo->input_components = img->depth; cinfo->in_color_space = img->depth == 1 ? JCS_GRAYSCALE : JCS_RGB; jpeg_set_defaults(cinfo); jpeg_set_quality(cinfo, OutputJPEG, TRUE); // Update things when writing to PS files... if (PSLevel) { // Adobe uses sampling == 1 for (i = 0; i < img->depth; i ++) { cinfo->comp_info[i].h_samp_factor = 1; cinfo->comp_info[i].v_samp_factor = 1; } } cinfo->write_JFIF_header = FALSE; cinfo->write_Adobe_marker = TRUE; jpeg_start_compress(cinfo, TRUE); } /* * 'compare_rgb()' - Compare two RGB colors... */ static int /* O - -1 if rgb1<rgb2, etc. */ compare_rgb(unsigned *rgb1, /* I - First color */ unsigned *rgb2) /* I - Second color */ { return ((int)*rgb1 - (int)*rgb2); } /* * 'write_image()' - Write an image to the given output file... */ static void write_image(FILE *out, /* I - Output file */ render_t *r, /* I - Image to write */ int write_obj) /* I - Write an object? */ { int i, j, k, m, /* Looping vars */ ncolors; /* Number of colors */ uchar *pixel, /* Current pixel */ *indices, /* New indexed pixel array */ *indptr; /* Current index */ int indwidth, /* Width of indexed line */ indbits; /* Bits per index */ int max_colors; /* Max colors to use */ unsigned colors[256], /* Colormap values */ key, /* Color key */ *match; /* Matching color value */ uchar grays[256], /* Grayscale usage */ cmap[256][3]; /* Colormap */ image_t *img; /* Image */ struct jpeg_compress_struct cinfo; /* JPEG compressor */ uchar *data, /* PS Level 3 image data */ *dataptr, /* Pointer into image data */ *maskptr; /* Pointer into mask data */ /* * See if we can optimize the image as indexed without color loss... */ img = r->data.image; ncolors = 0; indices = NULL; indwidth = 0; if (!img->pixels && !img->obj) image_load(img->filename, !OutputColor, 1); // Note: Acrobat 6 tries to decrypt the colormap of indexed in-line images twice, which // is 1) not consistent with prior Acrobat releases and 2) in violation of their // PDF spec. The "img->use > 1 || !Encryption" test prevents the use of indexed // in-line images when encryption is enabled. // // We are filing a bug on this with Adobe, but if history is any indicator, we are // stuck with this workaround forever... if (PSLevel != 1 && PDFVersion >= 12 && img->obj == 0 && (img->use > 1 || !Encryption)) { if (img->depth == 1) { /* * Greyscale image... */ memset(grays, 0, sizeof(grays)); for (i = img->width * img->height, pixel = img->pixels; i > 0; i --, pixel ++) if (!grays[*pixel]) { if (ncolors >= 16) break; grays[*pixel] = 1; ncolors ++; } if (i == 0) { for (i = 0, j = 0; i < 256; i ++) if (grays[i]) { colors[j] = (unsigned)((((i << 8) | i) << 8) | i); grays[i] = (uchar)j; j ++; } } else ncolors = 0; } else { /* * Color image... */ if (OutputJPEG && !Compression) max_colors = 16; else max_colors = 256; for (i = img->width * img->height, pixel = img->pixels, match = NULL; i > 0; i --, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (!match || *match != key) { if (ncolors > 0) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); else match = NULL; } if (match == NULL) { if (ncolors >= max_colors) break; colors[ncolors] = key; ncolors ++; if (ncolors > 1) qsort(colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); } } if (i > 0) ncolors = 0; } } if (ncolors > 0) { if (PSLevel == 3 && img->mask) indbits = 8; else if (ncolors <= 2) indbits = 1; else if (ncolors <= 4) indbits = 2; else if (ncolors <= 16) indbits = 4; else indbits = 8; indwidth = (img->width * indbits + 7) / 8; indices = (uchar *)calloc((size_t)indwidth, (size_t)(img->height + 1)); // height + 1 for PS odd-row-count bug if (img->depth == 1) { /* * Convert a grayscale image... */ switch (indbits) { case 1 : for (i = img->height, pixel = img->pixels, indptr = indices; i > 0; i --) { for (j = img->width, k = 7; j > 0; j --, k = (k + 7) & 7, pixel ++) switch (k) { case 7 : *indptr = (uchar)(grays[*pixel] << 7); break; default : *indptr |= (uchar)(grays[*pixel] << k); break; case 0 : *indptr++ |= (uchar)grays[*pixel]; break; } if (k != 7) indptr ++; } break; case 2 : for (i = img->height, pixel = img->pixels, indptr = indices; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k = (k + 1) & 3, pixel ++) switch (k) { case 0 : *indptr = (uchar)(grays[*pixel] << 6); break; case 1 : *indptr |= (uchar)(grays[*pixel] << 4); break; case 2 : *indptr |= (uchar)(grays[*pixel] << 2); break; case 3 : *indptr++ |= (uchar)grays[*pixel]; break; } if (k) indptr ++; } break; case 4 : for (i = img->height, pixel = img->pixels, indptr = indices; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k ^= 1, pixel ++) if (k) *indptr++ |= grays[*pixel]; else *indptr = (uchar)(grays[*pixel] << 4); if (k) indptr ++; } break; } } else { /* * Convert a color image... */ switch (indbits) { case 1 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width, k = 7; j > 0; j --, k = (k + 7) & 7, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); m = match - colors; switch (k) { case 7 : *indptr = (uchar)(m << 7); break; default : *indptr |= (uchar)(m << k); break; case 0 : *indptr++ |= (uchar)m; break; } } if (k != 7) indptr ++; } break; case 2 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k = (k + 1) & 3, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); m = match - colors; switch (k) { case 0 : *indptr = (uchar)(m << 6); break; case 1 : *indptr |= (uchar)(m << 4); break; case 2 : *indptr |= (uchar)(m << 2); break; case 3 : *indptr++ |= (uchar)m; break; } } if (k) indptr ++; } break; case 4 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k ^= 1, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); m = match - colors; if (k) *indptr++ |= (uchar)m; else *indptr = (uchar)(m << 4); } if (k) indptr ++; } break; case 8 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width; j > 0; j --, pixel += 3, indptr ++) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); *indptr = (uchar)(match - colors); } } break; } } } else indbits = 8; if (ncolors == 1) { /* * Adobe doesn't like 1 color images... */ ncolors = 2; colors[1] = 0; } /* * Now write the image... */ switch (PSLevel) { case 0 : /* PDF */ if (!write_obj) flate_printf(out, "q %.1f 0 0 %.1f %.1f %.1f cm\n", r->width, r->height, r->x, r->y); if (img->obj) { if (img->mask && PDFVersion < 13) write_imagemask(out, r); flate_printf(out, "/I%d Do Q\n", img->obj); break; } if (img->mask && write_obj && PDFVersion >= 13) { // We have a mask image, write it! pdf_start_object(out); fputs("/Type/XObject/Subtype/Image", out); fputs("/ColorSpace/DeviceGray", out); if (img->maskscale == 8) fprintf(out, "/Width %d/Height %d/BitsPerComponent 8", img->width, img->height); else fprintf(out, "/Width %d/Height %d/BitsPerComponent 1/ImageMask true", img->width * img->maskscale, img->height * img->maskscale); if (Compression) fputs("/Filter/FlateDecode", out); pdf_start_stream(out); flate_open_stream(out); if (img->maskscale == 8) flate_write(out, img->mask, img->width * img->height); else flate_write(out, img->mask, img->maskwidth * img->height * img->maskscale); flate_close_stream(out); pdf_end_object(out); } if (write_obj) { // Write an image object... img->obj = pdf_start_object(out); fputs("/Type/XObject/Subtype/Image", out); if (img->mask && PDFVersion >= 13) { if (img->maskscale == 8) fprintf(out, "/SMask %d 0 R", img->obj - 1); else fprintf(out, "/Mask %d 0 R", img->obj - 1); } if (ncolors > 0) { for (i = 0; i < ncolors; i ++) { cmap[i][0] = (uchar)(colors[i] >> 16); cmap[i][1] = (uchar)(colors[i] >> 8); cmap[i][2] = (uchar)colors[i]; } if (Encryption) { // Encrypt the colormap... encrypt_init(); rc4_encrypt(&encrypt_state, cmap[0], cmap[0], (unsigned)(ncolors * 3)); } fprintf(out, "/ColorSpace[/Indexed/DeviceRGB %d<", ncolors - 1); for (i = 0; i < ncolors; i ++) fprintf(out, "%02X%02X%02X", cmap[i][0], cmap[i][1], cmap[i][2]); fputs(">]", out); } else if (img->depth == 1) fputs("/ColorSpace/DeviceGray", out); else fputs("/ColorSpace/DeviceRGB", out); #ifdef HTMLDOC_INTERPOLATION if (ncolors != 2) fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION if (Compression && (ncolors || !OutputJPEG)) fputs("/Filter/FlateDecode", out); else if (OutputJPEG && ncolors == 0) { if (Compression) fputs("/Filter[/FlateDecode/DCTDecode]", out); else fputs("/Filter/DCTDecode", out); } fprintf(out, "/Width %d/Height %d/BitsPerComponent %d", img->width, img->height, indbits); pdf_start_stream(out); flate_open_stream(out); if (OutputJPEG && ncolors == 0) { jpg_setup(out, img, &cinfo); for (i = img->height, pixel = img->pixels; i > 0; i --, pixel += img->width * img->depth) jpeg_write_scanlines(&cinfo, &pixel, 1); jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); } else { if (ncolors > 0) flate_write(out, indices, indwidth * img->height); else flate_write(out, img->pixels, img->width * img->height * img->depth); } flate_close_stream(out); pdf_end_object(out); } else { // Put the image in-line... flate_puts("BI", out); if (ncolors > 0) { flate_printf(out, "/CS[/I/RGB %d<", ncolors - 1); for (i = 0; i < ncolors; i ++) flate_printf(out, "%02X%02X%02X", colors[i] >> 16, (colors[i] >> 8) & 255, colors[i] & 255); flate_puts(">]", out); } else if (img->depth == 1) flate_puts("/CS/G", out); else flate_puts("/CS/RGB", out); if (ncolors != 2) flate_puts("/I true", out); flate_printf(out, "/W %d/H %d/BPC %d", img->width, img->height, indbits); if (ncolors > 0) { flate_puts(" ID\n", out); flate_write(out, indices, indwidth * img->height, 1); } else if (OutputJPEG) { flate_puts("/F/DCT ID\n", out); jpg_setup(out, img, &cinfo); for (i = img->height, pixel = img->pixels; i > 0; i --, pixel += img->width * img->depth) jpeg_write_scanlines(&cinfo, &pixel, 1); jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); } else { flate_puts(" ID\n", out); flate_write(out, img->pixels, img->width * img->height * img->depth, 1); } flate_write(out, (uchar *)"\nEI\nQ\n", 6, 1); } break; case 1 : /* PostScript, Level 1 */ fputs("GS", out); fprintf(out, "[%.1f 0 0 %.1f %.1f %.1f]CM", r->width, r->height, r->x, r->y); if (img->mask) write_imagemask(out, r); fprintf(out, "/picture %d string def\n", img->width * img->depth); if (img->depth == 1) fprintf(out, "%d %d 8 [%d 0 0 %d 0 %d] {currentfile picture readhexstring pop} image\n", img->width, img->height, img->width, -img->height, img->height); else fprintf(out, "%d %d 8 [%d 0 0 %d 0 %d] {currentfile picture readhexstring pop} false 3 colorimage\n", img->width, img->height, img->width, -img->height, img->height); ps_hex(out, img->pixels, img->width * img->height * img->depth); fputs("GR\n", out); break; case 3 : /* PostScript, Level 3 */ // Fallthrough to Level 2 output if compression is disabled and // we aren't doing transparency... if ((Compression && (!OutputJPEG || ncolors > 0)) || (img->mask && img->maskscale == 8)) { fputs("GS", out); fprintf(out, "[%.1f 0 0 %.1f %.1f %.1f]CM", r->width, r->height, r->x, r->y); if (img->mask && img->maskscale != 8) write_imagemask(out, r); if (ncolors > 0) { if (ncolors <= 2) ncolors = 2; /* Adobe doesn't like 1 color images... */ fprintf(out, "[/Indexed/DeviceRGB %d\n<", ncolors - 1); for (i = 0; i < ncolors; i ++) { fprintf(out, "%02X%02X%02X", colors[i] >> 16, (colors[i] >> 8) & 255, colors[i] & 255); if ((i % 13) == 12) putc('\n', out); } fputs(">]setcolorspace\n", out); if (img->mask && img->maskscale == 8) fprintf(out, "<<" "/ImageType 3" "/InterleaveType 1" "/MaskDict<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 1]" ">>\n" "/DataDict", img->width, img->height, img->width, -img->height, img->height); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent %d" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 %d]", img->width, img->height, indbits, img->width, -img->height, img->height, (1 << indbits) - 1); #ifdef HTMLDOC_INTERPOLATION if (ncolors != 2) fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter", out); #else fputs("/DataSource currentfile/ASCIIHexDecode filter", out); #endif // HTMLDOC_ASCII85 if (Compression) fputs("/FlateDecode filter", out); fputs(">>\n", out); if (img->mask && img->maskscale == 8) fputs(">>\n", out); fputs("image\n", out); flate_open_stream(out); if (img->mask && img->maskscale == 8) { data = (uchar *)malloc((size_t)(img->width * 2)); for (i = 0, maskptr = img->mask, indptr = indices; i < img->height; i ++) { for (j = img->width, dataptr = data; j > 0; j --) { *dataptr++ = *maskptr++; *dataptr++ = *indptr++; } flate_write(out, data, img->width * 2); } free(data); } else flate_write(out, indices, indwidth * img->height); flate_close_stream(out); } else { if (img->depth == 1) fputs("/DeviceGray setcolorspace", out); else fputs("/DeviceRGB setcolorspace", out); if (img->mask && img->maskscale == 8) fprintf(out, "<<" "/ImageType 3" "/InterleaveType 1" "/MaskDict<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 1]" ">>\n" "/DataDict", img->width, img->height, img->width, -img->height, img->height); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[%s]", img->width, img->height, img->width, -img->height, img->height, img->depth == 1 ? "0 1" : "0 1 0 1 0 1"); #ifdef HTMLDOC_INTERPOLATION fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter", out); #else fputs("/DataSource currentfile/ASCIIHexDecode filter", out); #endif // HTMLDOC_ASCII85 if (Compression) fputs("/FlateDecode filter", out); fputs(">>\n", out); if (img->mask && img->maskscale == 8) fputs(">>\n", out); fputs("image\n", out); flate_open_stream(out); if (img->mask && img->maskscale == 8) { data = (uchar *)malloc((size_t)(img->width * (img->depth + 1))); for (i = 0, maskptr = img->mask, pixel = img->pixels; i < img->height; i ++) { if (img->depth == 1) { for (j = img->width, dataptr = data; j > 0; j --) { *dataptr++ = *maskptr++; *dataptr++ = *pixel++; } } else { for (j = img->width, dataptr = data; j > 0; j --) { *dataptr++ = *maskptr++; *dataptr++ = *pixel++; *dataptr++ = *pixel++; *dataptr++ = *pixel++; } } flate_write(out, data, img->width * (img->depth + 1)); } free(data); } else flate_write(out, img->pixels, img->width * img->height * img->depth); flate_close_stream(out); } fputs("GR\n", out); break; } case 2 : /* PostScript, Level 2 */ fputs("GS", out); fprintf(out, "[%.1f 0 0 %.1f %.1f %.1f]CM", r->width, r->height, r->x, r->y); if (img->mask) write_imagemask(out, r); if (ncolors > 0) { fprintf(out, "[/Indexed/DeviceRGB %d\n<", ncolors - 1); for (i = 0; i < ncolors; i ++) { fprintf(out, "%02X%02X%02X", colors[i] >> 16, (colors[i] >> 8) & 255, colors[i] & 255); if ((i % 13) == 12) putc('\n', out); } fputs(">]setcolorspace\n", out); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent %d" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 %d]", img->width, img->height, indbits, img->width, -img->height, img->height, (1 << indbits) - 1); #ifdef HTMLDOC_INTERPOLATION if (ncolors != 2) fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter>>image\n", out); ps_ascii85(out, indices, indwidth * img->height, 1); #else fputs("/DataSource currentfile/ASCIIHexDecode filter>>image\n", out); ps_hex(out, indices, indwidth * img->height); // End of data marker... fputs(">\n", out); #endif /* HTMLDOC_ASCII85 */ } else if (OutputJPEG) { if (img->depth == 1) fputs("/DeviceGray setcolorspace\n", out); else fputs("/DeviceRGB setcolorspace\n", out); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[%s]", img->width, img->height, img->width, -img->height, img->height, img->depth == 1 ? "0 1" : "0 1 0 1 0 1"); #ifdef HTMLDOC_INTERPOLATION fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter/DCTDecode filter" ">>image\n", out); #else fputs("/DataSource currentfile/ASCIIHexDecode filter/DCTDecode filter" ">>image\n", out); #endif // HTMLDOC_ASCII85 jpg_setup(out, img, &cinfo); for (i = img->height, pixel = img->pixels; i > 0; i --, pixel += img->width * img->depth) jpeg_write_scanlines(&cinfo, &pixel, 1); jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); #ifdef HTMLDOC_ASCII85 ps_ascii85(out, (uchar *)"", 0, 1); #else // End of data marker... fputs(">\n", out); #endif // HTMLDOC_ASCII85 } else { if (img->depth == 1) fputs("/DeviceGray setcolorspace\n", out); else fputs("/DeviceRGB setcolorspace\n", out); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[%s]", img->width, img->height, img->width, -img->height, img->height, img->depth == 1 ? "0 1" : "0 1 0 1 0 1"); #ifdef HTMLDOC_INTERPOLATION fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter" ">>image\n", out); ps_ascii85(out, img->pixels, img->width * img->height * img->depth, 1); #else fputs("/DataSource currentfile/ASCIIHexDecode filter" ">>image\n", out); ps_hex(out, img->pixels, img->width * img->depth * img->height); // End of data marker... fputs(">\n", out); #endif // HTMLDOC_ASCII85 } fputs("GR\n", out); break; } if (ncolors > 0) free(indices); image_unload(img); } /* * 'write_imagemask()' - Write an imagemask to the output file... */ static void write_imagemask(FILE *out, /* I - Output file */ render_t *r) /* I - Image to write */ { image_t *img; /* Current image */ int x, y; /* Position in mask image */ int startx, count; /* Start and count */ uchar *ptr, /* Pointer into mask image */ byte, /* Current byte */ bit; /* Current bit */ float scalex, scaley; /* 1/(w-1) and 1/(h-1) scaling factors */ int width, height; /* Scaled width and height */ img = r->data.image; width = img->width * img->maskscale; height = img->height * img->maskscale; scalex = 1.0f / width; scaley = 1.0f / height; switch (PSLevel) { case 0 : // PDF break; default : // PostScript fputs("\nnewpath\n", out); break; } for (y = 0; y < height; y ++) { for (x = 0, ptr = img->mask + (height - y - 1) * img->maskwidth, bit = 128, byte = *ptr++, startx = 0, count = 0; x < width; x ++) { if (!(bit & byte)) { if (!count) startx = x; count ++; } else if (count) { switch (PSLevel) { case 0 : // PDF flate_printf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; default : // PostScript fprintf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; } count = 0; } if (bit > 1) bit >>= 1; else { bit = 128; byte = *ptr++; } } if (count) { switch (PSLevel) { case 0 : // PDF flate_printf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; default : // PostScript fprintf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; } } } switch (PSLevel) { case 0 : // PDF flate_puts("W n\n", out); break; default : // PostScript fputs("clip\n", out); break; } } /* * 'write_prolog()' - Write the file prolog... */ static void write_prolog(FILE *out, /* I - Output file */ int page_count, /* I - Number of pages (0 if not known) */ uchar *author, /* I - Author of document */ uchar *creator, /* I - Application that generated the HTML file */ uchar *copyright, /* I - Copyright (if any) on the document */ uchar *keywords, /* I - Search keywords */ uchar *subject) /* I - Subject */ { FILE *prolog; /* PostScript prolog file */ int i, j, /* Looping vars */ encoding_object; /* Font encoding object */ int page; /* Current page */ render_t *r; /* Current render data */ int fonts_used[TYPE_MAX][STYLE_MAX]; /* Whether or not a font is used */ int font_desc[TYPE_MAX][STYLE_MAX]; /* Font descriptor objects */ char temp[1024]; /* Temporary string */ md5_state_t md5; /* MD5 state */ md5_byte_t digest[16]; /* MD5 digest value */ rc4_context_t rc4; /* RC4 context */ uchar owner_pad[32], /* Padded owner password */ owner_key[32], /* Owner key */ user_pad[32], /* Padded user password */ user_key[32]; /* User key */ uchar perm_bytes[4]; /* Permission bytes */ unsigned perm_value; /* Permission value, unsigned */ static unsigned char pad[32] = { /* Padding for passwords */ 0x28, 0xbf, 0x4e, 0x5e, 0x4e, 0x75, 0x8a, 0x41, 0x64, 0x00, 0x4e, 0x56, 0xff, 0xfa, 0x01, 0x08, 0x2e, 0x2e, 0x00, 0xb6, 0xd0, 0x68, 0x3e, 0x80, 0x2f, 0x0c, 0xa9, 0xfe, 0x64, 0x53, 0x69, 0x7a }; /* * See what fonts are used... */ memset(fonts_used, 0, sizeof(fonts_used)); fonts_used[HeadFootType][HeadFootStyle] = 1; for (page = 0; page < (int)num_pages; page ++) for (r = pages[page].start; r != NULL; r = r->next) if (r->type == RENDER_TEXT) fonts_used[r->data.text.typeface][r->data.text.style] = 1; #ifdef DEBUG puts("The following fonts were used:"); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) printf(" %s\n", _htmlFonts[i][j]); #endif // DEBUG /* * Generate the heading... */ if (PSLevel > 0) { /* * Write PostScript prolog stuff... */ if (XRXComments) { int start, end; // Start and end of document pages... int count; // Number of exception pages in this range... // The following comments are Xerox job ticket information that // is used on the high-end Laser Printing Systems rather than // embedded commands... fputs("%XRXbegin: 001.0300\n", out); fputs("%XRXPDLformat: PS-Adobe\n", out); if (doc_title) fprintf(out, "%%XRXtitle: %s\n", doc_title); if (OutputFiles) { // Output a single chapter... if (chapter < 0) { start = 0; end = chapter_outstarts[1] - 1; } else { start = chapter_outstarts[chapter]; end = chapter_outends[chapter]; } } else { start = 0; end = 0; } if (pages[outpages[start].pages[0]].duplex) { if (pages[outpages[start].pages[0]].landscape) fputs("%XRXrequirements: duplex(tumble)\n", out); else fputs("%XRXrequirements: duplex\n", out); } else fputs("%XRXrequirements: simplex\n", out); fputs("%XRXdisposition: PRINT\n", out); fputs("%XRXsignature: False\n", out); fprintf(out, "%%XRXpaperType-size: %.0f %.0f\n", pages[outpages[start].pages[0]].width * 25.4f / 72.0f, pages[outpages[start].pages[0]].length * 25.4f / 72.0f); if (pages[outpages[start].pages[0]].media_type[0]) fprintf(out, "%%XRXpaperType-preFinish: %s 0 0\n", pages[start].media_type); if (pages[outpages[start].pages[0]].media_color[0]) fprintf(out, "%%XRXdocumentPaperColors: %c%s\n", tolower(pages[start].media_color[0]), pages[start].media_color + 1); if (OutputFiles) { // Handle document settings per-chapter... for (i = start + 1; i < end; i += count) { if (pages[outpages[i].pages[0]].width != pages[0].width || pages[outpages[i].pages[0]].length != pages[0].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[0].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[0].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[0].duplex) { for (count = 1; (i + count) <= end; count ++) if (pages[outpages[i].pages[0]].width != pages[outpages[i + count].pages[0]].width || pages[outpages[i].pages[0]].length != pages[outpages[i + count].pages[0]].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[outpages[i + count].pages[0]].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[outpages[i + count].pages[0]].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[outpages[i + count].pages[0]].duplex) break; fprintf(out, "%%XRXpageExceptions: %d %d %.0f %.0f %c%s opaque %s 0 0\n", i + 1, i + count, pages[outpages[i].pages[0]].width * 25.4f / 72.0f, pages[outpages[i].pages[0]].length * 25.4f / 72.0f, tolower(pages[outpages[i].pages[0]].media_color[0]), pages[outpages[i].pages[0]].media_color + 1, pages[outpages[i].pages[0]].media_type[0] ? pages[outpages[i].pages[0]].media_type : "Plain"); if (pages[outpages[i].pages[0]].duplex && pages[outpages[i].pages[0]].landscape) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex(tumble)\n", i + 1, i + count); else if (pages[outpages[i].pages[0]].duplex) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex\n", i + 1, i + count); else fprintf(out, "%%XRXpageExceptions-plex: %d %d simplex\n", i + 1, i + count); } else count = 1; } } else { // All pages are in a single file... for (j = (TocLevels == 0); j <= TocDocCount; j ++) { start = chapter_outstarts[j]; end = chapter_outends[j]; for (i = start + 1; i < end; i += count) { if (pages[outpages[i].pages[0]].width != pages[0].width || pages[outpages[i].pages[0]].length != pages[0].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[0].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[0].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[0].duplex) { for (count = 1; (i + count) < end; count ++) if (pages[outpages[i].pages[0]].width != pages[outpages[i + count].pages[0]].width || pages[outpages[i].pages[0]].length != pages[outpages[i + count].pages[0]].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[outpages[i + count].pages[0]].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[outpages[i + count].pages[0]].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[outpages[i + count].pages[0]].duplex) break; fprintf(out, "%%XRXpageExceptions: %d %d %.0f %.0f %c%s opaque %s 0 0\n", i + 1, i + count, pages[outpages[i].pages[0]].width * 25.4f / 72.0f, pages[outpages[i].pages[0]].length * 25.4f / 72.0f, tolower(pages[outpages[i].pages[0]].media_color[0]), pages[outpages[i].pages[0]].media_color + 1, pages[outpages[i].pages[0]].media_type[0] ? pages[outpages[i].pages[0]].media_type : "Plain"); if (pages[outpages[i].pages[0]].duplex && pages[outpages[i].pages[0]].landscape) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex(tumble)\n", i + 1, i + count); else if (pages[outpages[i].pages[0]].duplex) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex\n", i + 1, i + count); else fprintf(out, "%%XRXpageExceptions-plex: %d %d simplex\n", i + 1, i + count); } else count = 1; } } } fputs("%XRXend\n", out); } fputs("%!PS-Adobe-3.0\n", out); if (Landscape) fprintf(out, "%%%%BoundingBox: 0 0 %d %d\n", PageLength, PageWidth); else fprintf(out, "%%%%BoundingBox: 0 0 %d %d\n", PageWidth, PageLength); fprintf(out,"%%%%LanguageLevel: %d\n", PSLevel); fputs("%%Creator: " HTMLDOC_PRODUCER "\n", out); fprintf(out, "%%%%CreationDate: D:%04d%02d%02d%02d%02d%02d+0000\n", doc_date.tm_year + 1900, doc_date.tm_mon + 1, doc_date.tm_mday, doc_date.tm_hour, doc_date.tm_min, doc_date.tm_sec); if (doc_title != NULL) fprintf(out, "%%%%Title: %s\n", doc_title); if (author != NULL) fprintf(out, "%%%%Author: %s\n", author); if (creator != NULL) fprintf(out, "%%%%Generator: %s\n", creator); if (copyright != NULL) fprintf(out, "%%%%Copyright: %s\n", copyright); if (keywords != NULL) fprintf(out, "%%%%Keywords: %s\n", keywords); if (subject != NULL) fprintf(out, "%%%%Subject: %s\n", keywords); if (page_count > 0) fprintf(out, "%%%%Pages: %d\n", page_count); else fputs("%%Pages: (atend)\n", out); if (!EmbedFonts) { fputs("%%DocumentNeededResources:\n", out); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j] && _htmlStandardFonts[i]) fprintf(out, "%%%%+ font %s\n", _htmlFonts[i][j]); } fputs("%%DocumentProvidedResources:\n", out); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j] && (EmbedFonts || !_htmlStandardFonts[i])) fprintf(out, "%%%%+ font %s\n", _htmlFonts[i][j]); fputs("%%DocumentData: Clean7bit\n", out); fputs("%%EndComments\n", out); fputs("%%BeginProlog\n", out); /* * Embed fonts? */ for (i = 0; i < TYPE_MAX; i ++) { if (EmbedFonts || !_htmlStandardFonts[i]) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) write_type1(out, (typeface_t)i, (style_t)j); } /* * Procedures used throughout the document... */ const char *version = SVERSION; fprintf(out, "%%%%BeginResource: procset htmldoc-page 1.8 %s\n", version + 4); fputs("/BD{bind def}bind def", out); fputs("/B{dup 0 exch rlineto exch 0 rlineto neg 0 exch rlineto\n" "closepath stroke}BD", out); fputs("/C{setrgbcolor}BD\n", out); fputs("/CM{concat}BD", out); fputs("/DF{findfont dup length dict begin{1 index/FID ne{def}{pop pop}\n" "ifelse}forall/Encoding fontencoding def currentdict end definefont pop}BD\n", out); fputs("/F{dup 0 exch rlineto exch 0 rlineto neg 0 exch rlineto closepath fill}BD\n", out); fputs("/FS{/hdFontSize exch def}BD", out); fputs("/G{setgray}BD\n", out); fputs("/GS{gsave}BD", out); fputs("/GR{grestore}BD", out); fputs("/J{0 exch ashow}BD\n", out); fputs("/L{0 rlineto stroke}BD", out); fputs("/M{moveto}BD", out); fputs("/re{4 2 roll moveto 1 index 0 rlineto 0 exch rlineto neg 0 rlineto closepath}BD\n", out); fputs("/RO{rotate}BD", out); fputs("/S{show}BD", out); fputs("/SC{dup scale}BD\n", out); fputs("/SF{findfont hdFontSize scalefont setfont}BD", out); fputs("/SP{showpage}BD", out); fputs("/T{translate}BD\n", out); fputs("%%EndResource\n", out); /* * Output the font encoding for the current character set... For now we * just support 8-bit fonts since true Unicode support needs a very large * number of extra fonts that aren't normally available on a PS printer. */ fputs("/fontencoding[\n", out); for (i = 0, j = 0; i < 256; i ++) { if (_htmlGlyphs[i]) j += strlen(_htmlGlyphs[i]) + 1; else j += 8; if (j > 80) { if (_htmlGlyphs[i]) j = strlen(_htmlGlyphs[i]) + 1; else j = 8; putc('\n', out); } putc('/', out); if (_htmlGlyphs[i]) fputs(_htmlGlyphs[i], out); else fputs(".notdef", out); } fputs("]def\n", out); /* * Fonts... */ for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) { if (i < TYPE_SYMBOL) fprintf(out, "/F%x/%s DF\n", i * 4 + j, _htmlFonts[i][j]); else fprintf(out, "/F%x/%s findfont definefont pop\n", i * 4 + j, _htmlFonts[i][j]); } if (PSCommands) { snprintf(temp, sizeof(temp), "%s/data/prolog.ps", _htmlData); if ((prolog = fopen(temp, "rb")) != NULL) { while (fgets(temp, sizeof(temp), prolog) != NULL) fputs(temp, out); fclose(prolog); } else { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open data file \"%s\" - %s", temp, strerror(errno)); fprintf(out, "%%%%BeginResource: procset htmldoc-device 1.8 %s\n", version + 4); fputs("languagelevel 1 eq{/setpagedevice{pop}BD}if\n", out); fputs("/SetDuplexMode{<</Duplex 3 index/Tumble 5 index>>setpagedevice " "pop pop}BD\n", out); fputs("/SetMediaColor{pop}BD\n", out); fputs("/SetMediaType{pop}BD\n", out); fputs("/SetMediaPosition{pop}BD\n", out); fputs("/SetPageSize{2 array astore<</PageSize 2 index/ImageableArea " "null>>setpagedevice pop}BD\n", out); fputs("%%EndResource\n", out); } } if (background_image != NULL) ps_write_background(out); fputs("%%EndProlog\n", out); } else { /* * Write PDF prolog stuff... */ fprintf(out, "%%PDF-%.1f\n", 0.1 * PDFVersion); fputs("%\342\343\317\323\n", out); num_objects = 0; /* * Compute the file ID... */ md5_init(&md5); md5_append(&md5, (md5_byte_t *)OutputPath, sizeof(OutputPath)); md5_append(&md5, (md5_byte_t *)&doc_time, sizeof(doc_time)); md5_finish(&md5, file_id); /* * Setup encryption stuff as necessary... */ if (Encryption) { /* * Copy and pad the user password... */ strlcpy((char *)user_pad, UserPassword, sizeof(user_pad)); if ((i = strlen(UserPassword)) < 32) memcpy(user_pad + i, pad, (size_t)(32 - i)); if (OwnerPassword[0]) { /* * Copy and pad the owner password... */ strlcpy((char *)owner_pad, OwnerPassword, sizeof(owner_pad)); if ((i = strlen(OwnerPassword)) < 32) memcpy(owner_pad + i, pad, (size_t)(32 - i)); } else { /* * Generate a pseudo-random owner password... */ srand(time(NULL)); for (i = 0; i < 32; i ++) owner_pad[i] = (uchar)rand(); } /* * What is the key length? * * Acrobat 4.0 and earlier (PDF 1.3 and earlier) allow a maximum of * 40-bits. Acrobat 5.0 and newer support 128-bits. */ if (PDFVersion > 13) encrypt_len = 16; // 128 bits else encrypt_len = 5; // 40 bits /* * Compute the owner key... */ md5_init(&md5); md5_append(&md5, owner_pad, 32); md5_finish(&md5, digest); if (encrypt_len > 5) { // MD5 the result 50 more times... for (i = 0; i < 50; i ++) { md5_init(&md5); md5_append(&md5, digest, 16); md5_finish(&md5, digest); } // Copy the padded user password... memcpy(owner_key, user_pad, 32); // Encrypt the result 20 times... for (i = 0; i < 20; i ++) { // XOR each byte in the key with the loop counter... for (j = 0; j < encrypt_len; j ++) encrypt_key[j] = (uchar)(digest[j] ^ i); rc4_init(&rc4, encrypt_key, (size_t)encrypt_len); rc4_encrypt(&rc4, owner_key, owner_key, 32); } } else { rc4_init(&rc4, digest, (size_t)encrypt_len); rc4_encrypt(&rc4, user_pad, owner_key, 32); } /* * Figure out the permissions word; the new N-bit security * handler adds several new permission bits, which we must * simulate... */ perm_value = (unsigned)Permissions; if (encrypt_len > 5) { // N-bit encryption... if (!(perm_value & PDF_PERM_COPY)) perm_value &= (unsigned)~0x00240000; // Mask additional copy perms... } /* * Compute the encryption key... */ md5_init(&md5); md5_append(&md5, user_pad, 32); md5_append(&md5, owner_key, 32); perm_bytes[0] = (uchar)perm_value; perm_bytes[1] = (uchar)(perm_value >> 8); perm_bytes[2] = (uchar)(perm_value >> 16); perm_bytes[3] = (uchar)(perm_value >> 24); md5_append(&md5, perm_bytes, 4); md5_append(&md5, file_id, 16); md5_finish(&md5, digest); if (encrypt_len > 5) { // MD5 the result 50 times.. for (i = 0; i < 50; i ++) { md5_init(&md5); md5_append(&md5, digest, 16); md5_finish(&md5, digest); } } memcpy(encrypt_key, digest, (size_t)encrypt_len); /* * Compute the user key... */ if (encrypt_len > 5) { md5_init(&md5); md5_append(&md5, pad, 32); md5_append(&md5, file_id, 16); md5_finish(&md5, user_key); memset(user_key + 16, 0, 16); // Encrypt the result 20 times... for (i = 0; i < 20; i ++) { // XOR each byte in the key with the loop counter... for (j = 0; j < encrypt_len; j ++) digest[j] = (uchar)(encrypt_key[j] ^ i); rc4_init(&rc4, digest, (size_t)encrypt_len); rc4_encrypt(&rc4, user_key, user_key, 16); } } else { rc4_init(&rc4, encrypt_key, (size_t)encrypt_len); rc4_encrypt(&rc4, pad, user_key, 32); } /* * Write the encryption dictionary... */ encrypt_object = pdf_start_object(out); fputs("/Filter/Standard/O<", out); for (i = 0; i < 32; i ++) fprintf(out, "%02x", owner_key[i]); fputs(">/U<", out); for (i = 0; i < 32; i ++) fprintf(out, "%02x", user_key[i]); fputs(">", out); if (encrypt_len > 5) { // N-bit encryption... fprintf(out, "/P %d/V 2/R 3/Length %d", (int)perm_value, encrypt_len * 8); } else fprintf(out, "/P %d/V 1/R 2", (int)perm_value); pdf_end_object(out); } else encrypt_object = 0; /* * Write info object... */ info_object = pdf_start_object(out); fputs("/Producer", out); write_string(out, (uchar *)HTMLDOC_PRODUCER, 0); fputs("/CreationDate", out); snprintf(temp, sizeof(temp), "D:%04d%02d%02d%02d%02d%02d+0000", doc_date.tm_year + 1900, doc_date.tm_mon + 1, doc_date.tm_mday, doc_date.tm_hour, doc_date.tm_min, doc_date.tm_sec); write_string(out, (uchar *)temp, 0); if (doc_title != NULL) { fputs("/Title", out); write_utf16(out, doc_title); } if (author != NULL || copyright != NULL) { if (author && copyright) snprintf(temp, sizeof(temp), "%s, %s", author, copyright); else if (author) strlcpy(temp, (const char *)author, sizeof(temp)); else strlcpy(temp, (const char *)copyright, sizeof(temp)); fputs("/Author", out); write_utf16(out, (uchar *)temp); } if (creator != NULL) { fputs("/Creator", out); write_utf16(out, creator); } if (keywords != NULL) { fputs("/Keywords", out); write_utf16(out, keywords); } if (subject != NULL) { fputs("/Subject", out); write_utf16(out, subject); } pdf_end_object(out); /* * Write the font encoding for the selected character set. Note that * we *should* be able to use the WinAnsiEncoding value for ISO-8859-1 * to make smaller files, however Acrobat Exchange does not like it * despite the fact that it is defined in the PDF specification... */ encoding_object = pdf_start_object(out); fputs("/Type/Encoding", out); fputs("/Differences[", out); for (i = 0, j = -1; i < 256; i ++) if (_htmlGlyphs[i]) { /* * Output a character index if we had blank ones... */ if (j != (i - 1)) fprintf(out, " %d", i); fprintf(out, "/%s", _htmlGlyphs[i]); j = i; } fputs("]", out); pdf_end_object(out); memset(font_desc, 0, sizeof(font_desc)); /* * Build font descriptors for the EmbedFonts fonts... */ for (i = 0; i < TYPE_MAX; i ++) if (EmbedFonts || !_htmlStandardFonts[i]) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) font_desc[i][j] = write_type1(out, (typeface_t )i, (style_t)j); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) { font_objects[i * STYLE_MAX + j] = pdf_start_object(out); fputs("/Type/Font", out); fputs("/Subtype/Type1", out); fprintf(out, "/BaseFont/%s", _htmlFonts[i][j]); if (font_desc[i][j]) { // Embed Type1 font... fputs("/FirstChar 0", out); fputs("/LastChar 255", out); fprintf(out, "/Widths %d 0 R", font_desc[i][j] + 1); fprintf(out, "/FontDescriptor %d 0 R", font_desc[i][j]); } if (i < TYPE_SYMBOL) /* Use native encoding for symbols */ fprintf(out, "/Encoding %d 0 R", encoding_object); pdf_end_object(out); } } } /* * 'write_string()' - Write a text entity. */ static void write_string(FILE *out, /* I - Output file */ uchar *s, /* I - String */ int compress) /* I - Compress output? */ { int i; /* Looping var */ if (Encryption && !compress && PSLevel == 0) { int len, // Length of string bytes; // Current bytes encrypted uchar news[1024]; // New string /* * Write an encrypted string... */ putc('<', out); encrypt_init(); for (len = strlen((char *)s); len > 0; len -= bytes, s += bytes) { if (len > (int)sizeof(news)) bytes = (int)sizeof(news); else bytes = len; rc4_encrypt(&encrypt_state, s, news, (size_t)bytes); for (i = 0; i < bytes; i ++) fprintf(out, "%02x", news[i]); } putc('>', out); } else { uchar nbsp = 160; // Non-breaking space char if (compress) flate_write(out, (uchar *)"(", 1); else putc('(', out); if (_htmlUTF8) nbsp = _htmlCharacters[160]; while (*s != '\0') { if (*s == nbsp) { /* &nbsp; */ if (compress) flate_write(out, (uchar *)" ", 1); else putc(' ', out); } else if (*s < 32 || *s > 126) { if (compress) flate_printf(out, "\\%o", *s); else fprintf(out, "\\%o", *s); } else if (compress) { if (*s == '(' || *s == ')' || *s == '\\') flate_write(out, (uchar *)"\\", 1); flate_write(out, s, 1); } else { if (*s == '(' || *s == ')' || *s == '\\') putc('\\', out); putc(*s, out); } s ++; } if (compress) flate_write(out, (uchar *)")", 1); else putc(')', out); } } /* * 'write_text()' - Write a text entity. */ static void write_text(FILE *out, /* I - Output file */ render_t *r) /* I - Text entity */ { uchar *ptr; /* Pointer into text */ // Quick optimization - don't output spaces... for (ptr = r->data.text.buffer; *ptr; ptr ++) if (!isspace(*ptr) && *ptr != 0xa0) break; if (!*ptr) return; // Not just whitespace - send it out... set_color(out, r->data.text.rgb); set_font(out, r->data.text.typeface, r->data.text.style, r->data.text.size); set_pos(out, r->x, r->y); if (PSLevel > 0) { if (r->data.text.spacing > 0.0f) fprintf(out, " %.3f", r->data.text.spacing); } else if (r->data.text.spacing != render_spacing) flate_printf(out, " %.3f Tc", render_spacing = r->data.text.spacing); write_string(out, r->data.text.buffer, PSLevel == 0); if (PSLevel > 0) { if (r->data.text.spacing > 0.0f) fputs("J\n", out); else fputs("S\n", out); } else flate_puts("Tj\n", out); render_x += r->width; } /* * 'write_trailer()' - Write the file trailer. */ static void write_trailer(FILE *out, /* I - Output file */ int num_file_pages, /* I - Number of pages in file */ uchar *lang) /* I - Language */ { int i, j, k, /* Looping vars */ type, /* Type of number */ offset, /* Offset to xref table in PDF file */ start; /* Start page number */ page_t *page; /* Start page of chapter */ char prefix[64], /* Prefix string */ *prefptr; /* Pointer into prefix string */ static const char *modes[] = /* Page modes */ { "UseNone", "UseOutlines", "FullScreen" }; static const char *layouts[] = /* Page layouts */ { "SinglePage", "OneColumn", "TwoColumnLeft", "TwoColumnRight" }; if (PSLevel > 0) { /* * PostScript... */ fputs("%%Trailer\n", out); if (num_file_pages > 0) fprintf(out, "%%%%Pages: %d\n", num_file_pages); fputs("%%EOF\n", out); } else { /* * PDF... */ root_object = pdf_start_object(out); fputs("/Type/Catalog", out); fprintf(out, "/Pages %d 0 R", pages_object); if (PDFVersion >= 12) { if (names_object) fprintf(out, "/Names %d 0 R", names_object); fprintf(out, "/PageLayout/%s", layouts[PDFPageLayout]); } if (lang) fprintf(out, "/Lang(%s)", (char *)lang); if (outline_object > 0) fprintf(out, "/Outlines %d 0 R", outline_object); switch (PDFFirstPage) { case PDF_PAGE_1 : if (TitlePage) { fprintf(out, "/OpenAction[%d 0 R/XYZ null null 0]", pages_object + 1); break; } break; case PDF_TOC : if (TocLevels > 0) { fprintf(out, "/OpenAction[%d 0 R/XYZ null null 0]", pages_object + 2 * chapter_outstarts[0] + 1); break; } break; case PDF_CHAPTER_1 : fprintf(out, "/OpenAction[%d 0 R/XYZ null null 0]", pages_object + 2 * chapter_outstarts[1] + 1); break; } fprintf(out, "/PageMode/%s", modes[PDFPageMode]); if (PDFVersion > 12 && NumberUp == 1) { // Output the PageLabels tree... fputs("/PageLabels<</Nums[", out); for (i = 0; i < chapter_starts[1]; i ++) { fprintf(out, "%d<</P", i); if (i & 1) write_string(out, (uchar *)"eltit", 0); else write_string(out, (uchar *)"title", 0); fputs(">>", out); } if (TocLevels > 0 && OutputType == OUTPUT_BOOK) { type = 'r'; for (j = 0; j < 3; j ++) if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(1)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(1)"))) type = 'D'; else if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(I)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(I)"))) type = 'R'; else if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(a)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(a)"))) type = 'a'; else if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(A)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(A)"))) type = 'A'; fprintf(out, "%d<</S/%c>>", i, type); i += chapter_ends[0] - chapter_starts[0] + 1; } for (j = 1; j <= TocDocCount; j ++) { if (chapter_starts[j] < 0) continue; page = pages + chapter_starts[j]; start = chapter_starts[j] - chapter_starts[1] + 1; type = 'D'; prefix[0] = '\0'; for (k = 0; k < 3; k ++) { if (page->header[k] && strstr((char *)page->header[k], "PAGE")) strlcpy(prefix, (char *)page->header[k], sizeof(prefix)); else if (page->footer[k] && strstr((char *)page->footer[k], "PAGE")) strlcpy(prefix, (char *)page->footer[k], sizeof(prefix)); if ((page->header[k] && strstr((char *)page->header[k], "PAGE(i)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(i)"))) type = 'r'; else if ((page->header[k] && strstr((char *)page->header[k], "PAGE(I)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(I)"))) type = 'R'; else if ((page->header[k] && strstr((char *)page->header[k], "PAGE(a)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(a)"))) type = 'a'; else if ((page->header[k] && strstr((char *)page->header[k], "PAGE(A)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(A)"))) type = 'A'; if ((page->header[k] && strstr((char *)page->header[k], "$CHAPTERPAGE")) || (page->footer[k] && strstr((char *)page->footer[k], "$CHAPTERPAGE"))) start = 1; } if ((prefptr = strstr(prefix, "$PAGE")) == NULL) prefptr = strstr(prefix, "$CHAPTERPAGE"); fprintf(out, "%d<</S/%c/St %d", i, type, start); if (prefptr) { *prefptr = '\0'; fputs("/P", out); write_string(out, (uchar *)prefix, 0); } fputs(">>", out); i += chapter_ends[j] - chapter_starts[j] + 1; } fputs("]>>", out); } pdf_end_object(out); offset = ftell(out); fputs("xref\n", out); fprintf(out, "0 %d \n", (int)num_objects + 1); fputs("0000000000 65535 f \n", out); for (i = 1; i <= (int)num_objects; i ++) fprintf(out, "%010d 00000 n \n", objects[i]); fputs("trailer\n", out); fputs("<<", out); fprintf(out, "/Size %d", (int)num_objects + 1); fprintf(out, "/Root %d 0 R", root_object); fprintf(out, "/Info %d 0 R", info_object); fputs("/ID[<", out); for (i = 0; i < 16; i ++) fprintf(out, "%02x", file_id[i]); fputs("><", out); for (i = 0; i < 16; i ++) fprintf(out, "%02x", file_id[i]); fputs(">]", out); if (Encryption) fprintf(out, "/Encrypt %d 0 R", encrypt_object); fputs(">>\n", out); fputs("startxref\n", out); fprintf(out, "%d\n", offset); fputs("%%EOF\n", out); } } /* * 'write_type1()' - Write an embedded Type 1 font. */ static int /* O - Object number */ write_type1(FILE *out, /* I - File to write to */ typeface_t typeface, /* I - Typeface */ style_t style) /* I - Style */ { char filename[1024]; /* PFA filename */ FILE *fp; /* PFA file */ int ch; /* Character value */ int width; /* Width value */ char glyph[64], /* Glyph name */ line[1024], /* Line from AFM file */ *lineptr, /* Pointer into line */ *dataptr; /* Pointer for data */ int ascent, /* Ascent above baseline */ cap_height, /* Ascent of CAPITALS */ x_height, /* Ascent of lowercase */ descent, /* Decent below baseline */ bbox[4], /* Bounding box */ italic_angle; /* Angle for italics */ int widths[256]; /* Character widths */ int length1, /* Length1 value for font */ length2, /* Length2 value for font */ length3; /* Length3 value for font */ static int tflags[] = /* PDF typeface flags */ { 33, /* Courier */ 34, /* Times-Roman */ 32, /* Helvetica */ 33, /* Monospace */ 34, /* Serif */ 32, /* Sans */ 4, /* Symbol */ 4 /* Dingbats */ }; static int sflags[] = /* PDF style flags */ { 0, /* Normal */ 0, /* Bold */ 64, /* Italic */ 64 /* Bold-Italic */ }; /* * This function writes a Type1 font, either as an object for PDF * output or as an in-line font in PostScript output. This is useful * because the Type1 fonts that Adobe ships typically do not include * the full set of characters required by some of the ISO character * sets. */ /* * Try to open the PFA file for the Type1 font... */ snprintf(filename, sizeof(filename), "%s/fonts/%s.pfa", _htmlData, _htmlFonts[typeface][style]); if ((fp = fopen(filename, "r")) == NULL) { #ifndef DEBUG progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open font file %s!", filename); #endif /* !DEBUG */ return (0); } /* * Write the font (object)... */ if (PSLevel) { /* * Embed a Type1 font in the PostScript output... */ fprintf(out, "%%%%BeginResource: font %s\n", _htmlFonts[typeface][style]); line[0] = '\0'; while (fgets(line, sizeof(line), fp) != NULL) fputs(line, out); if (line[strlen(line) - 1] != '\n') fputs("\n", out); fputs("%%EndResource\n", out); fclose(fp); } else { /* * Embed a Type1 font object in the PDF output... */ length1 = 0; length2 = 0; length3 = 0; while (fgets(line, sizeof(line), fp) != NULL) { length1 += strlen(line); if (strstr(line, "currentfile eexec") != NULL) break; } while (fgets(line, sizeof(line), fp) != NULL) { if (!strcmp(line, "00000000000000000000000000000000" "00000000000000000000000000000000\n")) break; length2 += (strlen(line) - 1) / 2; } length3 = strlen(line); while (fgets(line, sizeof(line), fp) != NULL) length3 += strlen(line); rewind(fp); pdf_start_object(out); fprintf(out, "/Length1 %d", length1); fprintf(out, "/Length2 %d", length2); fprintf(out, "/Length3 %d", length3); if (Compression) fputs("/Filter/FlateDecode", out); pdf_start_stream(out); flate_open_stream(out); while (fgets(line, sizeof(line), fp) != NULL) { flate_puts(line, out); if (strstr(line, "currentfile eexec") != NULL) break; } while (fgets(line, sizeof(line), fp) != NULL) { if (!strcmp(line, "00000000000000000000000000000000" "00000000000000000000000000000000\n")) break; for (lineptr = line, dataptr = line; isxdigit(*lineptr); lineptr += 2) { if (isdigit(lineptr[0])) ch = (lineptr[0] - '0') << 4; else ch = (tolower(lineptr[0] & 255) - 'a' + 10) << 4; if (isdigit(lineptr[1])) ch |= lineptr[1] - '0'; else ch |= tolower(lineptr[1] & 255) - 'a' + 10; *dataptr++ = (char)ch; } flate_write(out, (uchar *)line, dataptr - line); } flate_puts(line, out); while (fgets(line, sizeof(line), fp) != NULL) flate_puts(line, out); flate_close_stream(out); pdf_end_object(out); fclose(fp); /* * Try to open the AFM file for the Type1 font... */ snprintf(filename, sizeof(filename), "%s/fonts/%s.afm", _htmlData, _htmlFonts[typeface][style]); if ((fp = fopen(filename, "r")) == NULL) { #ifndef DEBUG progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open font width file %s!", filename); #endif /* !DEBUG */ return (0); } /* * Set the default values (Courier)... */ for (ch = 0; ch < 256; ch ++) widths[ch] = 600; ascent = 629; cap_height = 562; x_height = 426; descent = -157; bbox[0] = -28; bbox[1] = -250; bbox[2] = 628; bbox[3] = 805; italic_angle = 0; /* * Read the AFM file... */ while (fgets(line, sizeof(line), fp) != NULL) { if (strncmp(line, "ItalicAngle ", 12) == 0) italic_angle = atoi(line + 12); else if (strncmp(line, "FontBBox ", 9) == 0) sscanf(line + 9, "%d%d%d%d", bbox + 0, bbox + 1, bbox + 2, bbox + 3); else if (strncmp(line, "CapHeight ", 10) == 0) cap_height = atoi(line + 10); else if (strncmp(line, "XHeight ", 8) == 0) x_height = atoi(line + 8); else if (strncmp(line, "Ascender ", 9) == 0) ascent = atoi(line + 9); else if (strncmp(line, "Descender ", 10) == 0) descent = atoi(line + 10); else if (strncmp(line, "C ", 2) == 0) { if (typeface < TYPE_SYMBOL) { /* * Handle encoding of Courier, Times, and Helvetica using * assigned charset... */ if (sscanf(line, "%*s%*s%*s%*s%d%*s%*s%63s", &width, glyph) != 2) continue; for (ch = 0; ch < 256; ch ++) if (_htmlGlyphs[ch] && strcmp(_htmlGlyphs[ch], glyph) == 0) break; if (ch < 256) widths[ch] = width; } else { /* * Symbol font uses its own encoding... */ if (sscanf(line, "%*s%d%*s%*s%d", &ch, &width) != 2) continue; if (ch >= 0 && ch < 256) widths[ch] = width; } } } fclose(fp); /* * Write the font descriptor... */ pdf_start_object(out); fputs("/Type/FontDescriptor", out); fprintf(out, "/Ascent %d", ascent); fprintf(out, "/Descent %d", descent); fprintf(out, "/CapHeight %d", cap_height); fprintf(out, "/XHeight %d", x_height); fprintf(out, "/FontBBox[%d %d %d %d]", bbox[0], bbox[1], bbox[2], bbox[3]); fprintf(out, "/ItalicAngle %d", italic_angle); fprintf(out, "/StemV %d", widths['v']); fprintf(out, "/Flags %d", tflags[typeface] | sflags[style]); fprintf(out, "/FontName/%s", _htmlFonts[typeface][style]); fprintf(out, "/FontFile %d 0 R", (int)num_objects - 1); pdf_end_object(out); /* * Write the character widths... */ pdf_start_object(out, 1); fprintf(out, "%d", widths[0]); for (ch = 1; ch < 256; ch ++) fprintf(out, " %d", widths[ch]); pdf_end_object(out); } /* * Return the font descriptor... */ return (num_objects - 1); } /* * 'write_utf16()' - Write a UTF-16 string... */ static void write_utf16(FILE *out, // I - File to write to uchar *s) // I - String to write { uchar *sptr; // Pointer into string /* * We start by checking to see if the string is composed only of * ASCII characters; if so, we can just write a normal string... */ for (sptr = s; *sptr && !(*sptr & 0x80); sptr ++); if (!*sptr) { /* * Write an ASCII string... */ write_string(out, s, 0); } else if (Encryption) { /* * Convert the string to Unicode and encrypt... */ int ch; // Character value uchar unicode[2], // Unicode character enicode[2]; // Encrypted unicode character putc('<', out); encrypt_init(); unicode[0] = 0xfe; // Start with BOM unicode[1] = 0xff; rc4_encrypt(&encrypt_state, unicode, enicode, 2); fprintf(out, "%02x%02x", enicode[0], enicode[1]); for (sptr = s; *sptr; sptr ++) { ch = _htmlUnicode[*sptr]; unicode[0] = (uchar)(ch >> 8); unicode[1] = (uchar)ch; rc4_encrypt(&encrypt_state, unicode, enicode, 2); fprintf(out, "%02x%02x", enicode[0], enicode[1]); } putc('>', out); } else { /* * Convert the string to Unicode... */ fputs("<feff", out); // Start with BOM for (sptr = s; *sptr; sptr ++) fprintf(out, "%04x", _htmlUnicode[*sptr]); putc('>', out); } } /* * 'encrypt_init()' - Initialize the RC4 encryption context for the current * object. */ static void encrypt_init(void) { int i; /* Looping var */ uchar data[21], /* Key data */ *dataptr; /* Pointer to key data */ md5_state_t md5; /* MD5 state */ md5_byte_t digest[16]; /* MD5 digest value */ /* * Compute the key data for the MD5 hash. */ for (i = 0, dataptr = data; i < encrypt_len; i ++) *dataptr++ = encrypt_key[i]; *dataptr++ = (uchar)num_objects; *dataptr++ = (uchar)(num_objects >> 8); *dataptr++ = (uchar)(num_objects >> 16); *dataptr++ = 0; *dataptr++ = 0; /* * Hash it... */ md5_init(&md5); md5_append(&md5, data, encrypt_len + 5); md5_finish(&md5, digest); /* * Initialize the RC4 context using the first N+5 bytes of the digest... */ if (encrypt_len > 11) rc4_init(&encrypt_state, digest, 16); else rc4_init(&encrypt_state, digest, (size_t)(encrypt_len + 5)); } /* * 'flate_open_stream()' - Open a deflated output stream. */ static void flate_open_stream(FILE *out) /* I - Output file */ { if (Encryption && !PSLevel) encrypt_init(); if (!Compression) return; compressor_active = 1; compressor.zalloc = (alloc_func)0; compressor.zfree = (free_func)0; compressor.opaque = (voidpf)0; deflateInit(&compressor, Compression); compressor.next_out = (Bytef *)comp_buffer; compressor.avail_out = sizeof(comp_buffer); } /* * 'flate_close_stream()' - Close a deflated output stream. */ static void flate_close_stream(FILE *out) /* I - Output file */ { int status; /* Deflate status */ if (!Compression) { #ifdef HTMLDOC_ASCII85 if (PSLevel) ps_ascii85(out, (uchar *)"", 0, 1); #endif // HTMLDOC_ASCII85 return; } while ((status = deflate(&compressor, Z_FINISH)) != Z_STREAM_END) { if (status < Z_OK && status != Z_BUF_ERROR) { progress_error(HD_ERROR_OUT_OF_MEMORY, "deflate() failed (%d)", status); return; } if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #else ps_hex(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #endif // HTMLDOC_ASCII85 else { if (Encryption) rc4_encrypt(&encrypt_state, comp_buffer, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); fwrite(comp_buffer, (size_t)((uchar *)compressor.next_out - (uchar *)comp_buffer), 1, out); } compressor.next_out = (Bytef *)comp_buffer; compressor.avail_out = sizeof(comp_buffer); } if ((uchar *)compressor.next_out > (uchar *)comp_buffer) { if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #else ps_hex(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #endif // HTMLDOC_ASCII85 else { if (Encryption) rc4_encrypt(&encrypt_state, comp_buffer, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); fwrite(comp_buffer, (size_t)((uchar *)compressor.next_out - (uchar *)comp_buffer), 1, out); } } deflateEnd(&compressor); compressor_active = 0; #ifdef HTMLDOC_ASCII85 if (PSLevel) ps_ascii85(out, (uchar *)"", 0, 1); #else if (PSLevel) { // End of data marker... fputs(">\n", out); } #endif // HTMLDOC_ASCII85 } /* * 'flate_puts()' - Write a character string to a compressed stream. */ static void flate_puts(const char *s, /* I - String to write */ FILE *out) /* I - Output file */ { flate_write(out, (uchar *)s, strlen(s)); } /* * 'flate_printf()' - Write a formatted character string to a compressed stream. */ static void flate_printf(FILE *out, /* I - Output file */ const char *format, /* I - Format string */ ...) /* I - Additional args as necessary */ { int length; /* Length of output string */ char buf[10240]; /* Output buffer */ va_list ap; /* Argument pointer */ va_start(ap, format); length = vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); flate_write(out, (uchar *)buf, length); } /* * 'flate_write()' - Write data to a compressed stream. */ static void flate_write(FILE *out, /* I - Output file */ uchar *buf, /* I - Buffer */ int length, /* I - Number of bytes to write */ int flush) /* I - Flush when writing data? */ { int status; /* Deflate status */ if (compressor_active) { compressor.next_in = buf; compressor.avail_in = (unsigned)length; while (compressor.avail_in > 0) { if (compressor.avail_out < (int)(sizeof(comp_buffer) / 8)) { if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #else ps_hex(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #endif // HTMLDOC_ASCII85 else { if (Encryption) rc4_encrypt(&encrypt_state, comp_buffer, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); fwrite(comp_buffer, (size_t)((uchar *)compressor.next_out - (uchar *)comp_buffer), 1, out); } compressor.next_out = (Bytef *)comp_buffer; compressor.avail_out = sizeof(comp_buffer); } status = deflate(&compressor, flush ? Z_FULL_FLUSH : Z_NO_FLUSH); if (status < Z_OK && status != Z_BUF_ERROR) { progress_error(HD_ERROR_OUT_OF_MEMORY, "deflate() failed (%d)", status); return; } flush = 0; } } else if (Encryption && !PSLevel) { int i, // Looping var bytes; // Number of bytes to encrypt/write uchar newbuf[1024]; // New encrypted data buffer for (i = 0; i < length; i += sizeof(newbuf)) { if ((bytes = length - i) > (int)sizeof(newbuf)) bytes = sizeof(newbuf); rc4_encrypt(&encrypt_state, buf + i, newbuf, (size_t)bytes); fwrite(newbuf, (size_t)bytes, 1, out); } } else if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, buf, length); #else ps_hex(out, buf, length); #endif // HTMLDOC_ASCII85 else fwrite(buf, (size_t)length, 1, out); }
null
/* * PostScript + PDF output routines for HTMLDOC, a HTML document processing * program. * * Just in case you didn't notice it, this file is too big; it will be * broken into more manageable pieces once we make all of the output * "drivers" into classes... * * Copyright © 2011-2021 by Michael R Sweet. * Copyright © 1997-2010 by Easy Software Products. All rights reserved. * * This program is free software. Distribution and use rights are outlined in * the file "COPYING". */ /* * Include necessary headers. */ /* * The GCC compiler on HP-UX has a nasty habit of incorrectly "fixing" * the vmtypes.h header file provided with HP-UX. The following * conditional magic makes sure that "page_t" (which we use in our * code) is not defined... */ #ifdef __hpux # define page_t hpux_page_t #endif // __hpux /*#define DEBUG*/ #include "htmldoc.h" #include "markdown.h" #include "md5-private.h" #define md5_append _cupsMD5Append #define md5_finish _cupsMD5Finish #define md5_init _cupsMD5Init typedef unsigned char md5_byte_t; #define md5_state_t _cups_md5_state_t #include "rc4.h" #include <stdarg.h> #include <ctype.h> #include <time.h> #include <math.h> #ifdef WIN32 # include <io.h> #else # include <unistd.h> #endif // WIN32 #include <fcntl.h> #include <zlib.h> extern "C" { /* Workaround for JPEG header problems... */ #include <jpeglib.h> /* JPEG/JFIF image definitions */ } #ifdef __hpux # undef page_t #endif // __hpux /* * Output options... */ #define HTMLDOC_ASCII85 //#define HTMLDOC_INTERPOLATION #define HTMLDOC_PRODUCER "htmldoc " SVERSION " Copyright 2011-2019 by Michael R Sweet" /* * Constants... */ #define RENDER_TEXT 0 /* Text fragment */ #define RENDER_IMAGE 1 /* Image */ #define RENDER_BOX 2 /* Box */ #define RENDER_LINK 3 /* Hyperlink */ #define RENDER_BG 4 /* Background image */ /* * Structures... */ typedef struct render_str /**** Render entity structure ****/ { struct render_str *prev; /* Previous rendering entity */ struct render_str *next; /* Next rendering entity */ int type; /* Type of entity */ float x, /* Position in points */ y, /* ... */ width, /* Size in points */ height; /* ... */ union { struct { int typeface, /* Typeface for text */ style; /* Style of text */ float size; /* Size of text in points */ float spacing; /* Inter-character spacing */ float rgb[3]; /* Color of text */ uchar buffer[1]; /* String buffer */ } text; image_t *image; /* Image pointer */ float box[3]; /* Box color */ uchar link[1]; /* Link URL */ } data; } render_t; typedef struct /**** Named link position structure */ { short page, /* Page # */ top; /* Top position */ uchar name[124]; /* Reference name */ } link_t; typedef struct //// Page information { int width, // Width of page in points length, // Length of page in points left, // Left margin in points right, // Right margin in points top, // Top margin in points bottom, // Bottom margin in points duplex, // Duplex this page? landscape; // Landscape orientation? render_t *start, // First render element *end; // Last render element uchar *url, // URL/file *chapter, // Chapter text *heading; // Heading text tree_t *headnode; // Heading node uchar *header[3], // Headers for regular pages *header1[3], // Headers for first pages *footer[3]; // Footers for all pages char media_color[64], // Media color media_type[64]; // Media type int media_position; // Media position char page_text[64]; // Page number for TOC image_t *background_image; // Background image float background_color[3]; // Background color // Number-up support int nup; // Number up pages int outpage; // Output page # float outmatrix[2][3]; // Transform matrix } page_t; typedef struct //// Output page info { int nup; // Number up pages int pages[16]; // Pages on this output page int annot_object; // Annotation object } outpage_t; /* * Local globals... */ static time_t doc_time; // Current time static struct tm doc_date; // Current date static uchar *current_url = NULL; static int title_page; static int chapter, chapter_outstarts[MAX_CHAPTERS], chapter_outends[MAX_CHAPTERS], chapter_starts[MAX_CHAPTERS], chapter_ends[MAX_CHAPTERS]; static size_t num_headings = 0, alloc_headings = 0; static int *heading_pages = NULL, *heading_tops = NULL; static size_t num_pages = 0, alloc_pages = 0; static page_t *pages = NULL; static tree_t *current_heading; static size_t num_outpages = 0; static outpage_t *outpages = NULL; static size_t num_links = 0, alloc_links = 0; static link_t *links = NULL; static uchar list_types[16]; static int list_values[16]; static char stdout_filename[256]; static size_t num_objects = 0, alloc_objects = 0; static int *objects = NULL, root_object, info_object, outline_object, pages_object, names_object, encrypt_object, font_objects[TYPE_MAX * STYLE_MAX]; static uchar *doc_title = NULL; static image_t *logo_image = NULL; static float logo_width, logo_height; static image_t *lh_image = NULL; static float lh_width, lh_height; static image_t *hfimage[MAX_HF_IMAGES]; static float hfimage_width[MAX_HF_IMAGES], hfimage_height[MAX_HF_IMAGES]; static float maxhfheight; static image_t *background_image = NULL; static float background_color[3] = { 1.0, 1.0, 1.0 }, link_color[3] = { 0.0, 0.0, 1.0 }; static int render_typeface, render_style; static float render_size, render_rgb[3], render_x, render_y, render_startx, render_spacing; static int compressor_active = 0; static z_stream compressor; static uchar comp_buffer[8192]; static uchar encrypt_key[16]; static int encrypt_len; static rc4_context_t encrypt_state; static md5_byte_t file_id[16]; /* * Local functions... */ extern "C" { typedef int (*compare_func_t)(const void *, const void *); } static void pspdf_debug_stats(); static void pspdf_transform_coords(page_t *p, float &x, float &y); static void pspdf_transform_page(int outpage, int pos, int page); static void pspdf_prepare_outpages(); static void pspdf_prepare_page(int page); static void pspdf_prepare_heading(int page, int print_page, uchar **format, int y, char *page_text, int page_len); static void ps_write_document(uchar *author, uchar *creator, uchar *copyright, uchar *keywords, uchar *subject, uchar *lang); static void ps_write_outpage(FILE *out, int outpage); static void ps_write_page(FILE *out, int page); static void ps_write_background(FILE *out); static void pdf_write_document(uchar *author, uchar *creator, uchar *copyright, uchar *keywords, uchar *subject, uchar *lang, tree_t *doc, tree_t *toc); static void pdf_write_outpage(FILE *out, int outpage); static void pdf_write_page(FILE *out, int page); static void pdf_write_resources(FILE *out, int page); #ifdef DEBUG_TOC static void pdf_text_contents(FILE *out, tree_t *toc, int indent = 0); #endif // DEBUG_TOC static void pdf_write_contents(FILE *out, tree_t *toc, int parent, int prev, int next, int *heading); static void pdf_write_files(FILE *out, tree_t *doc); static void pdf_write_links(FILE *out); static void pdf_write_names(FILE *out); static int pdf_count_headings(tree_t *toc); static int pdf_start_object(FILE *out, int array = 0); static void pdf_start_stream(FILE *out); static void pdf_end_object(FILE *out); static void encrypt_init(void); static void flate_open_stream(FILE *out); static void flate_close_stream(FILE *out); static void flate_puts(const char *s, FILE *out); static void flate_printf(FILE *out, const char *format, ...); static void flate_write(FILE *out, uchar *inbuf, int length, int flush=0); static void parse_contents(tree_t *t, float left, float width, float bottom, float length, float *y, int *page, int *heading, tree_t *chap); static void parse_doc(tree_t *t, float *left, float *right, float *bottom, float *top, float *x, float *y, int *page, tree_t *cpara, int *needspace); static void parse_heading(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_paragraph(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_pre(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_table(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_list(tree_t *t, float *left, float *width, float *bottom, float *length, float *x, float *y, int *page, int needspace); static void init_list(tree_t *t); static void parse_comment(tree_t *t, float *left, float *width, float *bottom, float *length, float *x, float *y, int *page, tree_t *para, int needspace); static void check_pages(int page); static void add_link(uchar *name, int page, int top); static link_t *find_link(uchar *name); static int compare_links(link_t *n1, link_t *n2); static void find_background(tree_t *t); static void write_background(int page, FILE *out); static render_t *new_render(int page, int type, double x, double y, double width, double height, void *data, render_t *insert = 0); static float get_cell_size(tree_t *t, float left, float right, float *minwidth, float *prefwidth, float *minheight); static float get_table_size(tree_t *t, float left, float right, float *minwidth, float *prefwidth, float *minheight); static tree_t *flatten_tree(tree_t *t); static float get_width(uchar *s, int typeface, int style, int size); static void update_image_size(tree_t *t); static uchar *get_title(tree_t *doc); static FILE *open_file(void); static void set_color(FILE *out, float *rgb); static void set_font(FILE *out, int typeface, int style, float size); static void set_pos(FILE *out, float x, float y); static void write_prolog(FILE *out, int pages, uchar *author, uchar *creator, uchar *copyright, uchar *keywords, uchar *subject); static void ps_hex(FILE *out, uchar *data, int length); #ifdef HTMLDOC_ASCII85 static void ps_ascii85(FILE *out, uchar *data, int length, int eod = 0); #endif // HTMLDOC_ASCII85 static void jpg_init(j_compress_ptr cinfo); static boolean jpg_empty(j_compress_ptr cinfo); static void jpg_term(j_compress_ptr cinfo); static void jpg_setup(FILE *out, image_t *img, j_compress_ptr cinfo); static int compare_rgb(unsigned *rgb1, unsigned *rgb2); static void write_image(FILE *out, render_t *r, int write_obj = 0); static void write_imagemask(FILE *out, render_t *r); static void write_string(FILE *out, uchar *s, int compress); static void write_text(FILE *out, render_t *r); static void write_trailer(FILE *out, int pages, uchar *lang); static int write_type1(FILE *out, typeface_t typeface, style_t style); static void write_utf16(FILE *out, uchar *s); /* * 'pspdf_export()' - Export PostScript/PDF file(s)... */ int pspdf_export(tree_t *document, /* I - Document to export */ tree_t *toc) /* I - Table of contents for document */ { int i, j; /* Looping vars */ const char *title_file; /* Location of title image/file */ uchar *author, /* Author of document */ *creator, /* HTML file creator (Netscape, etc) */ *copyright, /* File copyright */ *docnumber, /* Document number */ *keywords, /* Search keywords */ *subject, /* Subject */ *lang; /* Language */ tree_t *t; /* Title page document tree */ FILE *fp; /* Title page file */ float x, y, /* Current page position */ left, right, /* Left and right margins */ bottom, top, /* Bottom and top margins */ width, /* Width of , author, etc */ height; /* Height of area */ int page, /* Current page # */ pos, /* Current header/footer position */ heading, /* Current heading # */ toc_duplex, /* Duplex TOC pages? */ toc_landscape, /* Do TOC in landscape? */ toc_width, /* Width of TOC pages */ toc_length, /* Length of TOC pages */ toc_left, /* TOC page margins */ toc_right, toc_bottom, toc_top; image_t *timage; /* Title image */ float timage_width, /* Title image width */ timage_height; /* Title image height */ render_t *r; /* Rendering structure... */ float rgb[3]; /* Text color */ int needspace; /* Need whitespace */ /* * Figure out the printable area of the output page... */ if (Landscape) { PagePrintWidth = PageLength - PageLeft - PageRight; PagePrintLength = PageWidth - PageTop - PageBottom; } else { PagePrintWidth = PageWidth - PageLeft - PageRight; PagePrintLength = PageLength - PageTop - PageBottom; } toc_width = PageWidth; toc_length = PageLength; toc_left = PageLeft; toc_right = PageRight; toc_bottom = PageBottom; toc_top = PageTop; toc_landscape = Landscape; toc_duplex = PageDuplex; /* * Get the document title, author, etc... */ doc_title = get_title(document); author = htmlGetMeta(document, (uchar *)"author"); creator = htmlGetMeta(document, (uchar *)"generator"); copyright = htmlGetMeta(document, (uchar *)"copyright"); docnumber = htmlGetMeta(document, (uchar *)"docnumber"); keywords = htmlGetMeta(document, (uchar *)"keywords"); subject = htmlGetMeta(document, (uchar *)"subject"); lang = htmlGetMeta(document, (uchar *)"lang"); logo_image = image_load(LogoImage, !OutputColor); lh_image = image_load(Letterhead, !OutputColor); maxhfheight = 0.0f; if (docnumber == NULL) docnumber = htmlGetMeta(document, (uchar *)"version"); if (lh_image != NULL) { lh_width = (float)(lh_image->width * PagePrintWidth / _htmlBrowserWidth); lh_height = (float)(lh_width * lh_image->height / lh_image->width); if (lh_height > maxhfheight) maxhfheight = lh_height; } else lh_width = lh_height = 0.0f; if (logo_image != NULL) { logo_width = (float)(logo_image->width * PagePrintWidth / _htmlBrowserWidth); logo_height = (float)(logo_width * logo_image->height / logo_image->width); if (logo_height > (2.0 * HeadFootSize)) { // Issue #273: too large logo image will overlap the body text, so cap // the height of the logo image to the header/footer size... // // Issue #303: regression prevents using header/footer images for special // underlining/etc. effects. logo_height = (float)(2.0 * HeadFootSize); logo_width = logo_height * logo_image->width / logo_image->height; } if (logo_height > maxhfheight) maxhfheight = logo_height; } else logo_width = logo_height = 0.0f; for (int hfi = 0; hfi < MAX_HF_IMAGES; hfi ++) { hfimage[hfi] = image_load(HFImage[hfi], !OutputColor); if (hfimage[hfi]) { hfimage_width[hfi] = (float)(hfimage[hfi]->width * PagePrintWidth / _htmlBrowserWidth); hfimage_height[hfi] = (float)(hfimage_width[hfi] * hfimage[hfi]->height / hfimage[hfi]->width); if (hfimage_height[hfi] > (2.0 * HeadFootSize)) { // Issue #273: too large logo image will overlap the body text, so cap // the height of the logo image to the header/footer size... // // Issue #303: regression prevents using header/footer images for special // underlining/etc. effects. hfimage_height[hfi] = (float)(2.0 * HeadFootSize); hfimage_width[hfi] = hfimage_height[hfi] * hfimage[hfi]->width / hfimage[hfi]->height; } if (hfimage_height[hfi] > maxhfheight) maxhfheight = hfimage_height[hfi]; } else hfimage_width[hfi] = hfimage_height[hfi] = 0.0f; } find_background(document); get_color((uchar *)LinkColor, link_color); /* * Initialize page rendering variables... */ num_pages = 0; alloc_pages = 0; pages = NULL; memset(list_types, 0267, sizeof(list_types)); memset(list_values, 0, sizeof(list_values)); memset(chapter_starts, -1, sizeof(chapter_starts)); memset(chapter_ends, -1, sizeof(chapter_starts)); /* * Get the current date, using the SOURCE_DATE_EPOCH environment variable, if * present, for the number of seconds since the epoch - this enables * reproducible builds (Issue #310). */ const char *source_date_epoch = getenv("SOURCE_DATE_EPOCH"); if (!source_date_epoch || (doc_time = (time_t)strtol(source_date_epoch, NULL, 10)) <= 0) doc_time = time(NULL); gmtime_r(&doc_time, &doc_date); num_headings = 0; alloc_headings = 0; heading_pages = NULL; heading_tops = NULL; num_links = 0; alloc_links = 0; links = NULL; num_pages = 0; DEBUG_printf(("pspdf_export: TitlePage = %d, TitleImage = \"%s\"\n", TitlePage, TitleImage)); if (TitlePage) { const char *title_ext = file_extension(TitleImage); #ifdef WIN32 if (TitleImage[0] && stricmp(title_ext, "bmp") != 0 && stricmp(title_ext, "gif") != 0 && stricmp(title_ext, "jpg") != 0 && stricmp(title_ext, "png") != 0) #else if (TitleImage[0] && strcmp(title_ext, "bmp") != 0 && strcmp(title_ext, "gif") != 0 && strcmp(title_ext, "jpg") != 0 && strcmp(title_ext, "png") != 0) #endif // WIN32 { DEBUG_printf(("pspdf_export: Generating a titlepage using \"%s\"\n", TitleImage)); // Find the title file... if ((title_file = file_find(Path, TitleImage)) == NULL) { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to find title file \"%s\"!", TitleImage); return (1); } // Write a title page from HTML source... if ((fp = fopen(title_file, "rb")) == NULL) { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open title file \"%s\" - %s!", TitleImage, strerror(errno)); return (1); } #ifdef _WIN32 if (!stricmp(title_ext, "md")) #else if (!strcmp(title_ext, "md")) #endif // _WIN32 t = mdReadFile(NULL, fp, file_directory(TitleImage)); else t = htmlReadFile(NULL, fp, file_directory(TitleImage)); htmlFixLinks(t, t, (uchar *)file_directory(TitleImage)); fclose(fp); page = 0; title_page = 1; current_heading = NULL; x = 0.0f; bottom = 0.0f; top = PagePrintLength; y = top; needspace = 0; left = 0.0f; right = PagePrintWidth; parse_doc(t, &left, &right, &bottom, &top, &x, &y, &page, NULL, &needspace); if (PageDuplex && (num_pages & 1)) check_pages(num_pages); htmlDeleteTree(t); } else { /* * Create a standard title page... */ if ((timage = image_load(TitleImage, !OutputColor)) != NULL) { timage_width = (float)(timage->width * PagePrintWidth / _htmlBrowserWidth); timage_height = (float)(timage_width * timage->height / timage->width); } else timage_width = timage_height = 0.0f; check_pages(0); if (PageDuplex) check_pages(1); height = 0.0; if (timage != NULL) height += timage_height + _htmlSpacings[SIZE_P]; if (doc_title != NULL) height += _htmlSpacings[SIZE_H1] + _htmlSpacings[SIZE_P]; if (author != NULL) height += _htmlSpacings[SIZE_P]; if (docnumber != NULL) height += _htmlSpacings[SIZE_P]; if (copyright != NULL) height += _htmlSpacings[SIZE_P]; y = 0.5f * (PagePrintLength + height); if (timage != NULL) { new_render(0, RENDER_IMAGE, 0.5f * (PagePrintWidth - timage_width), y - timage_height, timage_width, timage_height, timage); y -= timage_height + _htmlSpacings[SIZE_P]; } get_color(_htmlTextColor, rgb); if (doc_title != NULL) { width = get_width(doc_title, _htmlHeadingFont, STYLE_BOLD, SIZE_H1); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_H1], width, _htmlSizes[SIZE_H1], doc_title); r->data.text.typeface = _htmlHeadingFont; r->data.text.style = STYLE_BOLD; r->data.text.size = (float)_htmlSizes[SIZE_H1]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); y -= _htmlSpacings[SIZE_H1]; if (docnumber != NULL) { width = get_width(docnumber, _htmlBodyFont, STYLE_NORMAL, SIZE_P); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_P], width, _htmlSizes[SIZE_P], docnumber); r->data.text.typeface = _htmlBodyFont; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[SIZE_P]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); y -= _htmlSpacings[SIZE_P]; } y -= _htmlSpacings[SIZE_P]; } if (author != NULL) { width = get_width(author, _htmlBodyFont, STYLE_NORMAL, SIZE_P); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_P], width, _htmlSizes[SIZE_P], author); r->data.text.typeface = _htmlBodyFont; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[SIZE_P]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); y -= _htmlSpacings[SIZE_P]; } if (copyright != NULL) { width = get_width(copyright, _htmlBodyFont, STYLE_NORMAL, SIZE_P); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_P], width, _htmlSizes[SIZE_P], copyright); r->data.text.typeface = _htmlBodyFont; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[SIZE_P]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); } } for (page = 0; page < (int)num_pages; page ++) strlcpy((char *)pages[page].page_text, (page & 1) ? "eltit" : "title", sizeof(pages[page].page_text)); } else page = 0; /* * Parse the document... */ if (OutputType == OUTPUT_BOOK) chapter = 0; else { chapter = 1; TocDocCount = 1; chapter_starts[1] = num_pages; } title_page = 0; current_heading = NULL; x = 0.0f; needspace = 0; left = 0.0f; right = PagePrintWidth; // Adjust top margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Header[pos] && (strstr(Header[pos], "$IMAGE") != NULL || strstr(Header[pos], "$HFIMAGE") != NULL || strstr(Header[pos], "$LETTERHEAD") != NULL)) temp_adjust = image_adjust; else if (Header1[pos] && (strstr(Header1[pos], "$IMAGE") != NULL || strstr(Header1[pos], "$HFIMAGE") != NULL || strstr(Header1[pos], "$LETTERHEAD") != NULL)) temp_adjust = image_adjust; else if (Header[pos] || Header1[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } top = PagePrintLength - adjust; // Adjust bottom margin as needed... for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Footer[pos] && (strstr(Footer[pos], "$IMAGE") != NULL || strstr(Footer[pos], "$HFIMAGE") != NULL || strstr(Footer[pos], "$LETTERHEAD") != NULL)) temp_adjust = image_adjust; else if (Footer[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } bottom = adjust; y = top; parse_doc(document, &left, &right, &bottom, &top, &x, &y, &page, NULL, &needspace); if (PageDuplex && (num_pages & 1)) { if (PSLevel == 0) chapter_ends[chapter] = num_pages - 1; check_pages(num_pages); if (PSLevel > 0) chapter_ends[chapter] = num_pages - 1; } else chapter_ends[chapter] = num_pages - 1; for (chapter = 1; chapter <= TocDocCount; chapter ++) for (page = chapter_starts[chapter]; page <= chapter_ends[chapter]; page ++) pspdf_prepare_page(page); /* * Parse the table-of-contents if necessary... */ if (TocLevels > 0 && num_headings > 0) { // Restore default page size, etc... PageWidth = toc_width; PageLength = toc_length; PageLeft = toc_left; PageRight = toc_right; PageBottom = toc_bottom; PageTop = toc_top; Landscape = toc_landscape; PageDuplex = toc_duplex; if (Landscape) { PagePrintWidth = PageLength - PageLeft - PageRight; PagePrintLength = PageWidth - PageTop - PageBottom; } else { PagePrintWidth = PageWidth - PageLeft - PageRight; PagePrintLength = PageLength - PageTop - PageBottom; } // Adjust top margin as needed... for (pos = 0; pos < 3; pos ++) if (TocHeader[pos]) break; if (pos == 3) top = PagePrintLength; else if (maxhfheight > HeadFootSize) top = (float)(PagePrintLength - maxhfheight - HeadFootSize); else top = (float)(PagePrintLength - 2 * HeadFootSize); // Adjust bottom margin as needed... for (pos = 0; pos < 3; pos ++) if (TocFooter[pos]) break; if (pos == 3) bottom = 0.0f; else if (maxhfheight > HeadFootSize) bottom = (float)(maxhfheight + HeadFootSize); else bottom = (float)(2 * HeadFootSize); y = 0.0; page = num_pages - 1; heading = 0; chapter_starts[0] = num_pages; chapter = 0; parse_contents(toc, 0, PagePrintWidth, bottom, top, &y, &page, &heading, 0); if (PageDuplex && (num_pages & 1)) check_pages(num_pages); chapter_ends[0] = num_pages - 1; for (page = chapter_starts[0]; page <= chapter_ends[0]; page ++) pspdf_prepare_page(page); } if (TocDocCount > MAX_CHAPTERS) TocDocCount = MAX_CHAPTERS; /* * Do we have any pages? */ if (num_pages > 0 && TocDocCount > 0) { /* * Yes, write the document to disk... */ pspdf_prepare_outpages(); pspdf_debug_stats(); progress_error(HD_ERROR_NONE, "PAGES: %d", (int)num_outpages); if (PSLevel > 0) ps_write_document(author, creator, copyright, keywords, subject, lang); else pdf_write_document(author, creator, copyright, keywords, subject, lang, document, toc); } else { /* * No, show an error... */ pspdf_debug_stats(); progress_error(HD_ERROR_NO_PAGES, "Error: no pages generated! (did you remember to use webpage mode?"); } /* * Free memory... */ if (doc_title != NULL) free(doc_title); if (alloc_links) { free(links); num_links = 0; alloc_links = 0; links = NULL; } for (i = 0; i < (int)num_pages; i ++) { if ((i == 0 || pages[i].chapter != pages[i - 1].chapter) && pages[i].chapter) free(pages[i].chapter); if ((i == 0 || pages[i].heading != pages[i - 1].heading) && pages[i].heading) free(pages[i].heading); if (!pages[i].heading) continue; for (j = 0; j < 3; j ++) { if (!pages[i].header[j]) continue; if (i == 0 || pages[i].header[j] != pages[i - 1].header[j]) free(pages[i].header[j]); } for (j = 0; j < 3; j ++) { if (!pages[i].header1[j]) continue; if (i == 0 || pages[i].header1[j] != pages[i - 1].header1[j]) free(pages[i].header1[j]); } for (j = 0; j < 3; j ++) { if (!pages[i].footer[j]) continue; if (i == 0 || pages[i].footer[j] != pages[i - 1].footer[j]) free(pages[i].footer[j]); } } for (i = 0; i < 3; i ++) { Header[i] = NULL; Header1[i] = NULL; Footer[i] = NULL; TocHeader[i] = NULL; TocFooter[i] = NULL; } if (alloc_pages) { free(pages); free(outpages); num_pages = 0; alloc_pages = 0; pages = NULL; } if (alloc_headings) { free(heading_pages); free(heading_tops); num_headings = 0; alloc_headings = 0; heading_pages = NULL; heading_tops = NULL; } return (0); } // // 'pspdf_debug_stats()' - Display debug statistics for render memory use. // static void pspdf_debug_stats() { const char *debug; // HTMLDOC_DEBUG env var int i; // Looping var render_t *r; // Render node int bytes; // Number of bytes if ((debug = getenv("HTMLDOC_DEBUG")) == NULL || (strstr(debug, "all") == NULL && strstr(debug, "memory") == NULL)) return; bytes = alloc_headings * sizeof(int) * 2; bytes += alloc_pages * sizeof(page_t); for (i = 0; i < (int)num_pages; i ++) { for (r = pages[i].start; r != NULL; r = r->next) { bytes += sizeof(render_t); if (r->type == RENDER_TEXT) bytes += strlen((char *)r->data.text.buffer); } } bytes += num_outpages * sizeof(outpage_t); bytes += alloc_links * sizeof(link_t); bytes += alloc_objects * sizeof(int); progress_error(HD_ERROR_NONE, "DEBUG: Render Data = %d kbytes", (bytes + 1023) / 1024); } /* * 'pspdf_transform_coords()' - Transform page coordinates. */ static void pspdf_transform_coords(page_t *p, // I - Page float &x, // IO - X coordinate float &y) // IO - Y coordinate { float tx, ty; // Temporary X and Y tx = x; ty = y; x = tx * p->outmatrix[0][0] + ty * p->outmatrix[0][1] + p->outmatrix[0][2]; y = tx * p->outmatrix[1][0] + ty * p->outmatrix[1][1] + p->outmatrix[1][2]; } /* * 'pspdf_transform_page()' - Transform a page. */ static void pspdf_transform_page(int outpage, // I - Output page int pos, // I - Position on page int page) // I - Input page { outpage_t *op; // Current output page page_t *bp; // Current base page page_t *p; // Current input page int x, y; // Position on output page double w, l, // Width and length of subpage tx, ty; // Translation values for subpage double pw, pl; // Printable width and length of full page DEBUG_printf(("pspdf_transform_page(outpage = %d, pos = %d, page = %d)\n", outpage, pos, page)); if (pos > 15) progress_error(HD_ERROR_INTERNAL_ERROR, "Internal error: pos = %d", pos); op = outpages + outpage; op->pages[pos] = page; bp = pages + op->pages[0]; p = pages + page; p->outpage = outpage; pw = bp->width; pl = bp->length; DEBUG_printf((" width = %d, length = %d\n", p->width, p->length)); switch (op->nup) { default : case 1 : p->outmatrix[0][0] = 1.0f; p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = 1.0f; p->outmatrix[0][2] = 0.0f; p->outmatrix[1][2] = 0.0f; break; case 2 : x = pos & 1; l = pw; w = l * p->width / p->length; if (w > (pl * 0.5f)) { w = pl * 0.5f; l = w * p->length / p->width; } tx = 0.5 * (pl * 0.5 - w); ty = 0.5 * (pw - l); p->outmatrix[0][0] = 0.0f; p->outmatrix[1][0] = (float)(w / p->width); p->outmatrix[0][1] = (float)(-w / p->width); p->outmatrix[1][1] = 0.0f; p->outmatrix[0][2] = (float)(ty + pl * w / p->width); p->outmatrix[1][2] = (float)(tx + x * pl / 2); break; case 4 : x = pos & 1; y = 1 - pos / 2; w = pw * 0.5; l = w * p->length / p->width; if (l > (pl * 0.5)) { l = pl * 0.5; w = l * p->width / p->length; } tx = 0.5 * (pw * 0.5 - w); ty = 0.5 * (pl * 0.5 - l); p->outmatrix[0][0] = (float)(w / p->width); p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = (float)(w / p->width); p->outmatrix[0][2] = (float)(tx + x * pw / 2); p->outmatrix[1][2] = (float)(ty + y * pl / 2); break; case 6 : x = pos % 3; y = pos / 3; l = pw * 0.5; w = l * p->width / p->length; if (w > (pl * 0.333f)) { w = pl * 0.333f; l = w * p->length / p->width; } tx = 0.5 * (pl * 0.333 - w); ty = 0.5 * (pw * 0.5 - l); p->outmatrix[0][0] = 0.0f; p->outmatrix[1][0] = (float)(w / p->width); p->outmatrix[0][1] = (float)(-w / p->width); p->outmatrix[1][1] = 0.0f; p->outmatrix[0][2] = (float)(ty + y * pw / 2 + pl * w / p->width); p->outmatrix[1][2] = (float)(tx + x * pl / 3); break; case 9 : x = pos % 3; y = 2 - pos / 3; w = pw * 0.333; l = w * p->length / p->width; if (l > (pl * 0.333)) { l = pl * 0.333; w = l * p->width / p->length; } tx = 0.5 * (pw * 0.333 - w); ty = 0.5 * (pl * 0.333 - l); p->outmatrix[0][0] = (float)(w / p->width); p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = (float)(w / p->width); p->outmatrix[0][2] = (float)(tx + x * pw / 3); p->outmatrix[1][2] = (float)(ty + y * pl / 3); break; case 16 : x = pos & 3; y = 3 - pos / 4; w = pw * 0.25; l = w * p->length / p->width; if (l > (pl * 0.25)) { l = pl * 0.25; w = l * p->width / p->length; } tx = 0.5 * (pw * 0.25 - w); ty = 0.5 * (pl * 0.25 - l); p->outmatrix[0][0] = (float)(w / p->width); p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = (float)(w / p->width); p->outmatrix[0][2] = (float)(tx + x * pw / 4); p->outmatrix[1][2] = (float)(ty + y * pl / 4); break; } } /* * 'pspdf_prepare_outpages()' - Prepare output pages... */ static void pspdf_prepare_outpages() { int c, i, j; /* Looping vars */ int nup; /* Current number-up value */ page_t *page; /* Current page */ outpage_t *outpage; /* Current output page */ // Allocate an output page array... outpages = (outpage_t *)malloc(sizeof(outpage_t) * num_pages); memset(outpages, -1, sizeof(outpage_t) * num_pages); num_outpages = 0; outpage = outpages; // Handle the title page, as needed... if (TitlePage) { for (i = 0, j = 0, nup = -1, page = pages; i < chapter_starts[1]; i ++, page ++) { if (nup != page->nup) { if (j) { // Break the current output page... outpage ++; num_outpages ++; } nup = page->nup; j = 0; } if (!j) outpage->nup = nup; pspdf_transform_page(num_outpages, j, i); j ++; if (j >= nup) { j = 0; outpage ++; num_outpages ++; } } if (j) { // Break the current output page... outpage ++; num_outpages ++; } } // Loop through each chapter, adding pages as needed... if (OutputType == OUTPUT_BOOK && TocLevels > 0) c = 0; else c = 1; for (; c <= TocDocCount; c ++) { if (chapter_starts[c] < 0) continue; chapter_outstarts[c] = num_outpages; for (i = chapter_starts[c], j = 0, nup = -1, page = pages + i; i <= chapter_ends[c] && num_outpages < num_pages; i ++, page ++) { if (nup != page->nup) { if (j) { // Break the current output page... outpage ++; num_outpages ++; } nup = page->nup; j = 0; } if (!j) outpage->nup = nup; pspdf_transform_page(num_outpages, j, i); j ++; if (j >= nup) { j = 0; outpage ++; num_outpages ++; } } if (j) { // Break the current output page... outpage ++; num_outpages ++; } chapter_outends[c] = num_outpages; } #ifdef DEBUG for (c = 0; c <= TocDocCount; c ++) printf("chapter_outstarts[%d] = %d, chapter_outends[%d] = %d\n", c, chapter_outstarts[c], c, chapter_outends[c]); printf("num_outpages = %d\n", (int)num_outpages); for (i = 0, outpage = outpages; i < (int)num_outpages; i ++, outpage ++) { printf("outpage[%d]:\tnup=%d, pages=[", i, outpage->nup); for (j = 0; j < outpage->nup; j ++) printf(" %d", outpage->pages[j]); puts(" ]"); page = pages + outpage->pages[0]; printf("\t\twidth = %d, length = %d\n", page->width, page->length); } for (c = 0; c <= TocDocCount; c ++) printf("chapter_starts[%d] = %d, chapter_ends[%d] = %d\n", c, chapter_starts[c], c, chapter_ends[c]); for (i = 0; i < (int)num_pages; i ++) printf("pages[%d]->outpage = %d\n", i, pages[i].outpage); for (i = 0; i < (int)num_headings; i ++) printf("heading_pages[%d] = %d\n", i, heading_pages[i]); for (i = 0; i < (int)num_links; i ++) printf("links[%d].name = \"%s\", page = %d\n", i, links[i].name, links[i].page); #endif // DEBUG } /* * 'pspdf_prepare_page()' - Add headers/footers to page before writing... */ static void pspdf_prepare_page(int page) /* I - Page number */ { int print_page; /* Printed page # */ char page_text[64]; /* Page number text */ int top; /* Top of page */ DEBUG_printf(("pspdf_prepare_page(%d)\n", page)); if (page < 0 || page >= num_pages) return; /* * Make a page number; use roman numerals for the table of contents * and arabic numbers for all others... */ if (chapter == 0 && OutputType == OUTPUT_BOOK) { print_page = page - chapter_starts[0] + 1; strlcpy(page_text, format_number(print_page, 'i'), sizeof(page_text)); } else if (chapter < 0) { print_page = 0; // Safe because page_text is more than 6 chars strlcpy(page_text, (page & 1) ? (char *)"eltit" : (char *)"title", sizeof(page_text)); } else { print_page = page - chapter_starts[1] + 1; strlcpy(page_text, format_number(print_page, '1'), sizeof(page_text)); } DEBUG_printf(("BEFORE page %d page_text is \"%s\"...\n", page, page_text)); DEBUG_printf((" header[0] = \"%s\"\n", pages[page].header[0])); DEBUG_printf((" header[1] = \"%s\"\n", pages[page].header[1])); DEBUG_printf((" header[2] = \"%s\"\n", pages[page].header[2])); /* * Add page headings... */ if (pages[page].landscape) { PagePrintWidth = pages[page].length - pages[page].right - pages[page].left; PagePrintLength = pages[page].width - pages[page].top - pages[page].bottom; } else { PagePrintWidth = pages[page].width - pages[page].right - pages[page].left; PagePrintLength = pages[page].length - pages[page].top - pages[page].bottom; } top = (int)(PagePrintLength - HeadFootSize); if (chapter == 0) { /* * Add table-of-contents header & footer... */ pspdf_prepare_heading(page, print_page, pages[page].header, top, page_text, sizeof(page_text)); pspdf_prepare_heading(page, print_page, pages[page].footer, 0, page_text, sizeof(page_text)); } else if (chapter > 0 && !title_page) { /* * Add chapter header & footer... */ if (page > chapter_starts[chapter] || OutputType != OUTPUT_BOOK) pspdf_prepare_heading(page, print_page, pages[page].header, top, page_text, sizeof(page_text)); else pspdf_prepare_heading(page, print_page, pages[page].header1, top, page_text, sizeof(page_text)); pspdf_prepare_heading(page, print_page, pages[page].footer, 0, page_text, sizeof(page_text)); } /* * Copy the page number for the TOC... */ strlcpy(pages[page].page_text, page_text, sizeof(pages[page].page_text)); DEBUG_printf(("AFTER page %d page_text is \"%s\"...\n", page, page_text)); } /* * 'pspdf_prepare_heading()' - Add headers/footers to page before writing... */ static void pspdf_prepare_heading(int page, // I - Page number int print_page, // I - Printed page number uchar **format, // I - Page headings int y, // I - Baseline of heading char *page_text, // O - Page number text int page_len) // I - Size of page text { int pos, // Position in heading dir; // Direction of page char *number; // Page number char buffer[1024], // String buffer *bufptr, // Pointer into buffer *formatptr; // Pointer into format string int formatlen; // Length of format command string render_t *temp; // Render structure for titles, etc. DEBUG_printf(("pspdf_prepare_heading(%d, %d, [\"%s\",\"%s\",\"%s\"], %d, %p, %d)\n", page, print_page, format[0], format[1], format[2], y, (void *)page_text, page_len)); /* * Add page headings... */ if (PageDuplex && (page & 1)) { dir = -1; format += 2; } else dir = 1; for (pos = 0; pos < 3; pos ++, format += dir) { /* * Add the appropriate object... */ if (!*format) continue; temp = NULL; if (strncasecmp((char *)*format, "$LOGOIMAGE", 10) == 0 && logo_image) { // Insert the logo image... if (y < (PagePrintLength / 2)) temp = new_render(page, RENDER_IMAGE, 0, y, logo_width, logo_height, logo_image); else // Offset from top temp = new_render(page, RENDER_IMAGE, 0, y + HeadFootSize - logo_height, logo_width, logo_height, logo_image); } else if (strncasecmp((char *)*format, "$LETTERHEAD", 11) == 0 && lh_image) { // Insert the logo image as a letterhead... if (y < (PagePrintLength / 2)) temp = new_render(page, RENDER_IMAGE, 0, y, lh_width, lh_height, lh_image); else // Offset from top temp = new_render(page, RENDER_IMAGE, 0, y + HeadFootSize - lh_height, lh_width, lh_height, lh_image); } else if (strncasecmp((char *)*format, "$HFIMAGE", 8) == 0) { int hfi; // Header/footer image index char *hfp; // Pointer into $HFIMAGE hfi = strtol((char*)((*format) + 8), &hfp, 10); if (hfi < 0 || hfi >= MAX_HF_IMAGES || !(isspace(*hfp) || !*hfp)) progress_error(HD_ERROR_BAD_HF_STRING, "Bad $HFIMAGE... substitution on page %d.", page + 1); else { if (y < (PagePrintLength / 2)) temp = new_render(page, RENDER_IMAGE, 0, y, hfimage_width[hfi], hfimage_height[hfi], hfimage[hfi]); else temp = new_render(page, RENDER_IMAGE, 0, y + HeadFootSize - hfimage_height[hfi], hfimage_width[hfi], hfimage_height[hfi], hfimage[hfi]); } } else { // Otherwise format the text... buffer[sizeof(buffer) - 1] = '\0'; for (bufptr = buffer, formatptr = (char *)*format; *formatptr;) { if (*formatptr == '$') { if (formatptr[1] == '$') { if (bufptr < (buffer + sizeof(buffer) - 1)) *bufptr++ = '$'; formatptr += 2; continue; } else if (!formatptr[1]) break; formatptr ++; for (formatlen = 1; isalpha(formatptr[formatlen]); formatlen ++); if (formatlen == 4 && strncasecmp(formatptr, "PAGE", 4) == 0) { if (formatptr[4] == '(' && formatptr[5] && formatptr[6] == ')') { number = format_number(print_page, formatptr[5]); formatptr += 7; } else { number = format_number(print_page, '1'); formatptr += 4; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 5 && strncasecmp(formatptr, "PAGES", 5) == 0) { if (formatptr[5] == '(' && formatptr[6] && formatptr[7] == ')') { number = format_number(chapter_ends[TocDocCount] - chapter_starts[1] + 1, formatptr[6]); formatptr += 8; } else { number = format_number(chapter_ends[TocDocCount] - chapter_starts[1] + 1, '1'); formatptr += 5; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 11 && strncasecmp(formatptr, "CHAPTERPAGE", 11) == 0) { int chapter_page; chapter_page = print_page - chapter_starts[::chapter] + chapter_starts[1]; if (formatptr[11] == '(' && formatptr[12] && formatptr[13] == ')') { number = format_number(chapter_page, formatptr[12]); formatptr += 14; } else { number = format_number(chapter_page, '1'); formatptr += 11; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 12 && strncasecmp(formatptr, "CHAPTERPAGES", 12) == 0) { if (formatptr[12] == '(' && formatptr[13] && formatptr[14] == ')') { number = format_number(chapter_ends[::chapter] - chapter_starts[::chapter] + 1, formatptr[13]); formatptr += 15; } else { number = format_number(chapter_ends[::chapter] - chapter_starts[::chapter] + 1, '1'); formatptr += 12; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 5 && strncasecmp(formatptr, "TITLE", 5) == 0) { formatptr += 5; if (doc_title) { strlcpy(bufptr, (char *)doc_title, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } } else if (formatlen == 7 && strncasecmp(formatptr, "CHAPTER", 7) == 0) { formatptr += 7; if (pages[page].chapter) { strlcpy(bufptr, (char *)(pages[page].chapter), sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } } else if (formatlen == 7 && strncasecmp(formatptr, "HEADING", 7) == 0) { formatptr += 7; if (pages[page].heading) { strlcpy(bufptr, (char *)(pages[page].heading), sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } } else if (formatlen == 4 && strncasecmp(formatptr, "TIME", 4) == 0) { formatptr += 4; strftime(bufptr, sizeof(buffer) - 1 - (size_t)(bufptr - buffer), "%X", &doc_date); bufptr += strlen(bufptr); } else if (formatlen == 4 && strncasecmp(formatptr, "DATE", 4) == 0) { formatptr += 4; strftime(bufptr, sizeof(buffer) - 1 - (size_t)(bufptr - buffer), "%x", &doc_date); bufptr += strlen(bufptr); } else if (formatlen == 3 && strncasecmp(formatptr, "URL", 3) == 0) { uchar *url = pages[page].url ? pages[page].url : (uchar *)"Unknown"; formatptr += 3; strlcpy(bufptr, (char *)url, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else { progress_error(HD_ERROR_BAD_HF_STRING, "Bad header/footer $ command on page %d.", page + 1); strlcpy(bufptr, formatptr - 1, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); formatptr += formatlen; } } else if (bufptr < (buffer + sizeof(buffer) - 1)) *bufptr++ = *formatptr++; else break; } *bufptr = '\0'; temp = new_render(page, RENDER_TEXT, 0, y, get_width((uchar *)buffer, HeadFootType, HeadFootStyle, SIZE_P) * HeadFootSize / _htmlSizes[SIZE_P], HeadFootSize, (uchar *)buffer); if (strstr((char *)*format, "$PAGE") || strstr((char *)*format, "$CHAPTERPAGE")) strlcpy(page_text, buffer, (size_t)page_len); } if (temp == NULL) continue; /* * Justify the object... */ switch (pos) { case 0 : /* Left justified */ break; case 1 : /* Centered */ temp->x = (float)((PagePrintWidth - temp->width) * 0.5); break; case 2 : /* Right justified */ temp->x = PagePrintWidth - temp->width; break; } /* * Set the text font and color... */ if (temp->type == RENDER_TEXT) { temp->data.text.typeface = HeadFootType; temp->data.text.style = HeadFootStyle; temp->data.text.size = (float)HeadFootSize; get_color(_htmlTextColor, temp->data.text.rgb); } } } /* * 'ps_write_document()' - Write all render entities to PostScript file(s). */ static void ps_write_document(uchar *author, /* I - Author of document */ uchar *creator, /* I - Application that generated the HTML file */ uchar *copyright, /* I - Copyright (if any) on the document */ uchar *keywords, /* I - Search keywords */ uchar *subject, /* I - Subject */ uchar *lang) /* I - Language */ { FILE *out; /* Output file */ int page; /* Current page # */ int first; /* First chapter */ /* * Write the title page(s)... */ chapter = -1; out = NULL; if (!OutputFiles) { out = open_file(); if (out == NULL) { progress_error(HD_ERROR_WRITE_ERROR, "Unable to open output file - %s\n", strerror(errno)); return; } write_prolog(out, num_outpages, author, creator, copyright, keywords, subject); } if (OutputType == OUTPUT_BOOK && TocLevels > 0) first = 0; else first = 1; if (TitlePage) { if (OutputFiles) { out = open_file(); write_prolog(out, chapter_outstarts[first], author, creator, copyright, keywords, subject); } for (page = 0; page < chapter_outstarts[first]; page ++) ps_write_outpage(out, page); if (OutputFiles) { write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); fclose(out); } } for (chapter = first; chapter <= TocDocCount; chapter ++) { if (chapter_starts[chapter] < 0) continue; if (OutputFiles) { out = open_file(); if (out == NULL) { progress_error(HD_ERROR_WRITE_ERROR, "Unable to create output file - %s\n", strerror(errno)); return; } write_prolog(out, chapter_outends[chapter] - chapter_outstarts[chapter], author, creator, copyright, keywords, subject); } for (page = chapter_outstarts[chapter]; page < chapter_outends[chapter]; page ++) ps_write_outpage(out, page); /* * Close the output file as necessary... */ if (OutputFiles) { write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); fclose(out); } } /* * Close the output file as necessary... */ if (!OutputFiles) { write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); if (out != stdout) fclose(out); } if (Verbosity) progress_hide(); } /* * 'ps_write_outpage()' - Write an output page. */ static void ps_write_outpage(FILE *out, /* I - Output file */ int outpage) /* I - Output page number */ { int file_page; /* Current page # in document */ page_t *p; /* Current page */ outpage_t *op; /* Current output page */ int i; /* Looping var */ if (outpage < 0 || outpage >= (int)num_outpages) return; op = outpages + outpage; p = pages + op->pages[0]; DEBUG_printf(("ps_write_outpage(%p, %d)\n", (void *)out, outpage)); /* * Let the user know which page we are writing... */ if (Verbosity) { progress_show("Writing page %s...", p->page_text); progress_update(100 * outpage / (int)num_outpages); } /* * Figure out the page number in the file... */ if (OutputFiles && chapter >= 0) file_page = outpage - chapter_outstarts[chapter] + 1; else if (chapter < 0) file_page = outpage + 1; else if (chapter == 0) { if (TitlePage) file_page = outpage + 1; else file_page = outpage - chapter_outstarts[0] + 1; } else { if (TitlePage) file_page = outpage + 1; else file_page = outpage - chapter_outstarts[1] + 1; } /* * Output the page prolog... */ fprintf(out, "%%%%Page: (%s) %d\n", p->page_text, file_page); if (op->nup == 1) { if (p->duplex && !(file_page & 1)) fprintf(out, "%%%%PageBoundingBox: %d %d %d %d\n", p->right, p->bottom, p->width - p->left, p->length - p->top); else fprintf(out, "%%%%PageBoundingBox: %d %d %d %d\n", p->left, p->bottom, p->width - p->right, p->length - p->top); } else fprintf(out, "%%%%PageBoundingBox: 0 0 %d %d\n", p->width, p->length); if (PSLevel > 1 && PSCommands) { fputs("%%BeginPageSetup\n", out); if (p->width == 612 && p->length == 792) fputs("%%BeginFeature: *PageSize Letter\n", out); else if (p->width == 612 && p->length == 1008) fputs("%%BeginFeature: *PageSize Legal\n", out); else if (p->width == 792 && p->length == 1224) fputs("%%BeginFeature: *PageSize Tabloid\n", out); else if (p->width == 842 && p->length == 1190) fputs("%%BeginFeature: *PageSize A3\n", out); else if (p->width == 595 && p->length == 842) fputs("%%BeginFeature: *PageSize A4\n", out); else fprintf(out, "%%%%BeginFeature: *PageSize w%dh%d\n", p->width, p->length); fprintf(out, "%d %d SetPageSize\n", p->width, p->length); fputs("%%EndFeature\n", out); if (p->duplex) { if (p->landscape) { fputs("%%BeginFeature: *Duplex DuplexTumble\n", out); fputs("true true SetDuplexMode\n", out); fputs("%%EndFeature\n", out); } else { fputs("%%BeginFeature: *Duplex DuplexNoTumble\n", out); fputs("true false SetDuplexMode\n", out); fputs("%%EndFeature\n", out); } } else { fputs("%%BeginFeature: *Duplex None\n", out); fputs("false false SetDuplexMode\n", out); fputs("%%EndFeature\n", out); } if (p->media_color[0]) { fprintf(out, "%%%%BeginFeature: *MediaColor %s\n", p->media_color); fprintf(out, "(%s) SetMediaColor\n", p->media_color); fputs("%%EndFeature\n", out); } if (p->media_position) { fprintf(out, "%%%%BeginFeature: *InputSlot Tray%d\n", p->media_position); fprintf(out, "%d SetMediaPosition\n", p->media_position); fputs("%%EndFeature\n", out); } if (p->media_type[0]) { fprintf(out, "%%%%BeginFeature: *MediaType %s\n", p->media_type); fprintf(out, "(%s) SetMediaType\n", p->media_type); fputs("%%EndFeature\n", out); } fputs("%%EndPageSetup\n", out); } /* * Render all of the pages... */ switch (op->nup) { case 1 : ps_write_page(out, op->pages[0]); break; default : for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; fprintf(out, "GS[%.3f %.3f %.3f %.3f %.3f %.3f]CM\n", p->outmatrix[0][0], p->outmatrix[1][0], p->outmatrix[0][1], p->outmatrix[1][1], p->outmatrix[0][2], p->outmatrix[1][2]); ps_write_page(out, op->pages[i]); fputs("GR\n", out); } break; } /* * Output the page trailer... */ fputs("SP\n", out); fflush(out); } /* * 'ps_write_page()' - Write all render entities on a page to a PostScript file. */ static void ps_write_page(FILE *out, /* I - Output file */ int page) /* I - Page number */ { render_t *r, /* Render pointer */ *next; /* Next render */ page_t *p; /* Current page */ const char *debug; /* HTMLDOC_DEBUG environment variable */ if (page < 0 || page >= (int)alloc_pages) return; p = pages + page; DEBUG_printf(("ps_write_page(%p, %d)\n", (void *)out, page)); /* * Clear the render cache... */ render_typeface = -1; render_style = -1; render_size = -1; render_rgb[0] = -1.0f; render_rgb[1] = -1.0f; render_rgb[2] = -1.0f; render_x = -1.0f; render_y = -1.0f; render_spacing = -1.0f; /* * Setup the page... */ fputs("GS\n", out); if (p->landscape) { if (p->duplex && (page & 1)) fprintf(out, "0 %d T -90 RO\n", p->length); else fprintf(out, "%d 0 T 90 RO\n", p->width); } write_background(page, out); if (p->duplex && (page & 1)) fprintf(out, "%d %d T\n", p->right, p->bottom); else fprintf(out, "%d %d T\n", p->left, p->bottom); /* * Render all graphics elements... */ for (r = p->start; r != NULL; r = r->next) switch (r->type) { case RENDER_BOX : set_color(out, r->data.box); set_pos(out, r->x, r->y); if (r->height > 0.0f) fprintf(out, " %.1f %.1f F\n", r->width, r->height); else fprintf(out, " %.1f L\n", r->width); render_x = -1.0f; break; case RENDER_IMAGE : if (r->width > 0.01f && r->height > 0.01f) write_image(out, r); break; } /* * Render all text elements, freeing used memory as we go... */ for (r = p->start, next = NULL; r != NULL; r = next) { if (r->type == RENDER_TEXT) write_text(out, r); next = r->next; free(r); } p->start = NULL; if ((debug = getenv("HTMLDOC_DEBUG")) != NULL && strstr(debug, "margin")) { // Show printable area... fprintf(out, "1 0 1 C 0 0 %d %d B\n", p->width - p->right - p->left, p->length - p->top - p->bottom); } /* * Output the page trailer... */ fputs("GR\n", out); } /* * 'ps_write_background()' - Write a background image... */ static void ps_write_background(FILE *out) /* I - Output file */ { int y, /* Current line */ pwidth; /* Pixel width */ if (!background_image->pixels) image_load(background_image->filename, !OutputColor, 1); pwidth = background_image->width * background_image->depth; fputs("/BG[", out); for (y = 0; y < background_image->height; y ++) { putc('<', out); ps_hex(out, background_image->pixels + y * pwidth, pwidth); putc('>', out); } fputs("]def", out); image_unload(background_image); } /* * 'pdf_write_document()' - Write all render entities to a PDF file. */ static void pdf_write_document(uchar *author, // I - Author of document uchar *creator, // I - Application that generated the HTML file uchar *copyright, // I - Copyright (if any) on the document uchar *keywords, // I - Search keywords uchar *subject, // I - Subject uchar *lang, // I - Language tree_t *doc, // I - Document tree_t *toc) // I - Table of contents tree { int i; // Looping variable FILE *out; // Output file int outpage, // Current page # heading; // Current heading # int bytes; // Number of bytes char buffer[8192]; // Copy buffer int num_images; // Number of images in document image_t **images; // Pointers to images render_t temp; // Dummy rendering data... // Open the output file... out = open_file(); if (out == NULL) { progress_error(HD_ERROR_WRITE_ERROR, "Unable to write document file - %s\n", strerror(errno)); return; } // Clear the objects array... num_objects = 0; alloc_objects = 0; objects = NULL; // Write the prolog... write_prolog(out, num_outpages, author, creator, copyright, keywords, subject); // Write images as needed... num_images = image_getlist(&images); for (i = 0; i < num_images; i ++) { int hfi; // Header/footer image index for (hfi = 0; hfi < MAX_HF_IMAGES; hfi ++) if (images[i] == hfimage[hfi]) break; if (images[i]->use > 1 || images[i]->mask || (images[i]->width * images[i]->height * images[i]->depth) > 65536 || images[i] == background_image || images[i] == logo_image || hfi < MAX_HF_IMAGES) { progress_show("Writing image %d (%s)...", i + 1, images[i]->filename); progress_update(100 * i / num_images); temp.data.image = images[i]; write_image(out, &temp, 1); } } // Write links and target names... pdf_write_links(out); if (PDFVersion >= 12) pdf_write_names(out); // Verify that everything is working so far... pdf_start_object(out); if (pages_object != (int)num_objects) progress_error(HD_ERROR_INTERNAL_ERROR, "Internal error: pages_object != num_objects"); fputs("/Type/Pages", out); fprintf(out, "/Count %d", (int)num_outpages); fputs("/Kids[", out); for (outpage = 0; outpage < (int)num_outpages; outpage ++) fprintf(out, "%d 0 R\n", pages_object + outpage * 2 + 1); fputs("]", out); pdf_end_object(out); for (outpage = 0; outpage < (int)num_outpages; outpage ++) pdf_write_outpage(out, outpage); if (OutputType == OUTPUT_BOOK && TocLevels > 0) { /* * Write the outline tree using the table-of-contents... */ heading = 0; #ifdef DEBUG_TOC pdf_text_contents(out, toc); #endif // DEBUG_TOC pdf_write_contents(out, toc, 0, 0, 0, &heading); } else { /* * Write the outline tree using the HTML files. */ pdf_write_files(out, doc); } /* * Write the trailer and close the output file... */ write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); if (CGIMode) { const char *meta_filename = (const char *)htmlGetMeta(doc, (uchar *)"HTMLDOC.filename"); const char *filename; if (meta_filename) { if ((filename = strrchr(meta_filename, '/')) != NULL) filename ++; else filename = meta_filename; } else filename = "htmldoc.pdf"; // In CGI mode, we only produce PDF output to stdout... printf("Content-Type: application/pdf\r\n" "Content-Length: %ld\r\n" "Content-Disposition: inline; filename=\"%s\"\r\n" "Accept-Ranges: none\r\n" "X-Creator: HTMLDOC " SVERSION "\r\n" "\r\n", ftell(out), filename); } fclose(out); // // If we are sending the output to stdout, copy the temp file now... // if (!OutputPath[0]) { #ifdef WIN32 // Make sure we are in binary mode... stupid Microsoft! setmode(1, O_BINARY); #elif defined(__EMX__) // OS/2 has a setmode for FILE's... fflush(stdout); _fsetmode(stdout, "b"); #endif // WIN32 || __EMX__ // Open the temporary file and copy it to stdout... out = fopen(stdout_filename, "rb"); while ((bytes = fread(buffer, 1, sizeof(buffer), out)) > 0) fwrite(buffer, 1, (size_t)bytes, stdout); // Close the temporary file (it is removed when the program exits...) fclose(out); } // Clear the objects array... if (alloc_objects) { free(objects); num_objects = 0; alloc_objects = 0; objects = NULL; } if (Verbosity) progress_hide(); } /* * 'pdf_write_resources()' - Write the resources dictionary for a page. */ static void pdf_write_resources(FILE *out, /* I - Output file */ int outpage) /* I - Output page for resources */ { int i; /* Looping var */ outpage_t *op; /* Current output page */ page_t *p; /* Current page */ render_t *r; /* Render pointer */ int fonts_used[TYPE_MAX * STYLE_MAX]; /* Non-zero if the page uses a font */ int images_used; /* Non-zero if the page uses an image */ int text_used; /* Non-zero if the page uses text */ static const char *effects[] = /* Effects and their commands */ { "", "/S/Box/M/I", "/S/Box/M/O", "/S/Dissolve", "/S/Glitter/Di 270", "/S/Glitter/Di 315", "/S/Glitter/Di 0", "/S/Blinds/Dm/H", "/S/Split/Dm/H/M/I", "/S/Split/Dm/H/M/O", "/S/Blinds/Dm/V", "/S/Split/Dm/V/M/I", "/S/Split/Dm/V/M/O", "/S/Wipe/Di 270", "/S/Wipe/Di 180", "/S/Wipe/Di 0", "/S/Wipe/Di 90" }; memset(fonts_used, 0, sizeof(fonts_used)); images_used = background_image != NULL; text_used = 0; op = outpages + outpage; for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_IMAGE) images_used = 1; else if (r->type == RENDER_TEXT) { text_used = 1; fonts_used[r->data.text.typeface * 4 + r->data.text.style] = 1; } } fputs("/Resources<<", out); if (!images_used) fputs("/ProcSet[/PDF/Text]", out); else if (PDFVersion >= 12) { if (OutputColor) fputs("/ProcSet[/PDF/Text/ImageB/ImageC/ImageI]", out); else fputs("/ProcSet[/PDF/Text/ImageB/ImageI]", out); } else { if (OutputColor) fputs("/ProcSet[/PDF/Text/ImageB/ImageC]", out); else fputs("/ProcSet[/PDF/Text/ImageB]", out); } if (text_used) { fputs("/Font<<", out); for (i = 0; i < (TYPE_MAX * STYLE_MAX); i ++) if (fonts_used[i]) fprintf(out, "/F%x %d 0 R", i, font_objects[i]); fputs(">>", out); } fputs("/XObject<<", out); for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_IMAGE && r->data.image->obj) fprintf(out, "/I%d %d 0 R", r->data.image->obj, r->data.image->obj); } if (background_image) fprintf(out, "/I%d %d 0 R", background_image->obj, background_image->obj); fputs(">>>>", out); if (PDFEffect) fprintf(out, "/Dur %.0f/Trans<</Type/Trans/D %.1f%s>>", PDFPageDuration, PDFEffectDuration, effects[PDFEffect]); } /* * 'pdf_write_outpage()' - Write an output page. */ static void pdf_write_outpage(FILE *out, /* I - Output file */ int outpage) /* I - Output page number */ { int i; /* Looping var */ page_t *p; /* Current page */ outpage_t *op; /* Output page */ DEBUG_printf(("pdf_write_outpage(out = %p, outpage = %d)\n", (void *)out, outpage)); if (outpage < 0 || outpage >= (int)num_outpages) return; op = outpages + outpage; p = pages + op->pages[0]; DEBUG_printf(("op->pages[0] = %d (%dx%d)\n", op->pages[0], p->width, p->length)); /* * Let the user know which page we are writing... */ if (Verbosity) { progress_show("Writing page %s...", p->page_text); progress_update(100 * outpage / (int)num_outpages); } /* * Output the page prolog... */ pdf_start_object(out); fputs("/Type/Page", out); fprintf(out, "/Parent %d 0 R", pages_object); fprintf(out, "/Contents %d 0 R", (int)num_objects + 1); if (p->landscape) fprintf(out, "/MediaBox[0 0 %d %d]", p->length, p->width); else fprintf(out, "/MediaBox[0 0 %d %d]", p->width, p->length); pdf_write_resources(out, outpage); /* * Actions (links)... */ if (op->annot_object > 0) fprintf(out, "/Annots %d 0 R", op->annot_object); pdf_end_object(out); pdf_start_object(out); if (Compression) fputs("/Filter/FlateDecode", out); pdf_start_stream(out); flate_open_stream(out); /* * Render all of the pages... */ switch (op->nup) { case 1 : pdf_write_page(out, op->pages[0]); break; default : for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; flate_printf(out, "q %.3f %.3f %.3f %.3f %.3f %.3f cm\n", p->outmatrix[0][0], p->outmatrix[1][0], p->outmatrix[0][1], p->outmatrix[1][1], p->outmatrix[0][2], p->outmatrix[1][2]); pdf_write_page(out, op->pages[i]); flate_puts("Q\n", out); } break; } /* * Close out the page... */ flate_close_stream(out); pdf_end_object(out); } /* * 'pdf_write_page()' - Write a page to a PDF file. */ static void pdf_write_page(FILE *out, /* I - Output file */ int page) /* I - Page number */ { render_t *r, /* Render pointer */ *next; /* Next render */ float box[3]; /* RGB color for boxes */ page_t *p; /* Current page */ const char *debug; /* HTMLDOC_DEBUG environment variable */ if (page < 0 || page >= (int)alloc_pages) return; p = pages + page; /* * Clear the render cache... */ render_rgb[0] = -1.0f; render_rgb[1] = -1.0f; render_rgb[2] = -1.0f; render_x = -1.0f; render_y = -1.0f; /* * Output the page header... */ flate_puts("q\n", out); write_background(page, out); if (p->duplex && (page & 1)) flate_printf(out, "1 0 0 1 %d %d cm\n", p->right, p->bottom); else flate_printf(out, "1 0 0 1 %d %d cm\n", p->left, p->bottom); /* * Render all graphics elements... */ box[0] = -1.0f; box[1] = -1.0f; box[2] = -1.0f; for (r = p->start; r != NULL; r = r->next) switch (r->type) { case RENDER_IMAGE : if (r->width > 0.01f && r->height > 0.01f) write_image(out, r); break; case RENDER_BOX : if (r->height == 0.0) { if (box[0] != r->data.box[0] || box[1] != r->data.box[1] || box[2] != r->data.box[2]) { box[0] = r->data.box[0]; box[1] = r->data.box[1]; box[2] = r->data.box[2]; if (OutputColor) flate_printf(out, "%.2f %.2f %.2f RG\n", box[0], box[1], box[2]); else flate_printf(out, "%.2f G\n", box[0] * 0.31f + box[1] * 0.61f + box[2] * 0.08f); } flate_printf(out, "%.1f %.1f m %.1f %.1f l S\n", r->x, r->y, r->x + r->width, r->y); } else { set_color(out, r->data.box); flate_printf(out, "%.1f %.1f %.1f %.1f re f\n", r->x, r->y, r->width, r->height); } break; } /* * Render all text elements, freeing used memory as we go... */ flate_puts("BT\n", out); render_typeface = -1; render_style = -1; render_size = -1; render_x = -1.0f; render_y = -1.0f; render_spacing = -1.0f; for (r = p->start, next = NULL; r != NULL; r = next) { if (r->type == RENDER_TEXT) write_text(out, r); next = r->next; free(r); } p->start = NULL; flate_puts("ET\n", out); if ((debug = getenv("HTMLDOC_DEBUG")) != NULL && strstr(debug, "margin")) { // Show printable area... flate_printf(out, "1 0 1 RG 0 0 %d %d re S\n", p->width - p->right - p->left, p->length - p->top - p->bottom); } /* * Output the page trailer... */ flate_puts("Q\n", out); } #ifdef DEBUG_TOC static void pdf_text_contents(FILE *out, tree_t *toc, int indent) { static const char *spaces = " " " "; if (indent > 16) indent = 16; while (toc) { fprintf(out, "%% %s<%s>", spaces + 64 - 4 * indent, _htmlMarkups[toc->markup]); switch (toc->markup) { case MARKUP_A : tree_t *temp; for (temp = toc->child; temp; temp = temp->next) fputs((char *)temp->data, out); break; default : fputs("\n", out); pdf_text_contents(out, toc->child, indent + 1); fprintf(out, "%% %s", spaces + 64 - 4 * indent); break; } fprintf(out, "</%s>\n", _htmlMarkups[toc->markup]); toc = toc->next; } } #endif // DEBUG_TOC /* * 'pdf_write_contents()' - Write the table of contents as outline records to * a PDF file. */ static void pdf_write_contents(FILE *out, /* I - Output file */ tree_t *toc, /* I - Table of contents tree */ int parent, /* I - Parent outline object */ int prev, /* I - Previous outline object */ int next, /* I - Next outline object */ int *heading) /* IO - Current heading # */ { int i, /* Looping var */ thisobj, /* This object */ entry, /* TOC entry object */ count; /* Number of entries at this level */ uchar *text; /* Entry text */ tree_t *temp; /* Looping var */ int *entry_counts, /* Number of sub-entries for this entry */ *entry_objects; /* Objects for each entry */ tree_t **entries; /* Pointers to each entry */ float x, y; /* Position of link */ /* * Make an object for this entry... */ if (toc == NULL) { /* * This is for the Table of Contents page... */ thisobj = pdf_start_object(out); fprintf(out, "/Parent %d 0 R", parent); fputs("/Title", out); write_utf16(out, (uchar *)TocTitle); x = 0.0f; y = PagePrintLength + PageBottom; pspdf_transform_coords(pages + chapter_starts[0], x, y); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * chapter_outstarts[0] + 1, x, y); if (prev > 0) fprintf(out, "/Prev %d 0 R", prev); if (next > 0) fprintf(out, "/Next %d 0 R", next); pdf_end_object(out); return; } /* * Allocate the arrays... Add 1 to hold the TOC at the top level... */ if ((entry_counts = (int *)calloc(sizeof(int), num_headings + 1)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)num_headings, strerror(errno)); return; } if ((entry_objects = (int *)calloc(sizeof(int), num_headings + 1)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)num_headings, strerror(errno)); free(entry_counts); return; } if ((entries = (tree_t **)calloc(sizeof(tree_t *), num_headings + 1)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)num_headings, strerror(errno)); free(entry_objects); free(entry_counts); return; } if (parent == 0 && TocLevels > 0) { /* * Add the table of contents to the top-level contents... */ entries[0] = NULL; entry_objects[0] = num_objects + 2; entry = num_objects + 3; count = 1; } else { entry = num_objects + 2; count = 0; } /* * Find and count the children (entries)... */ if (toc->markup == MARKUP_B && toc->next && toc->next->markup == MARKUP_UL) temp = toc->next->child; else if (toc->markup == MARKUP_LI && toc->last_child && toc->last_child->markup == MARKUP_UL) temp = toc->last_child->child; else temp = toc->child; for (; temp && count <= (int)num_headings; temp = temp->next) { if (temp->markup == MARKUP_B) { entries[count] = temp; entry_objects[count] = entry; if (temp->next && temp->next->markup == MARKUP_UL) entry_counts[count] = pdf_count_headings(temp->next->child); else entry_counts[count] = 0; entry += entry_counts[count] + 1; count ++; } else if (temp->markup == MARKUP_LI) { entries[count] = temp; entry_objects[count] = entry; if (temp->last_child && temp->last_child->markup == MARKUP_UL) entry_counts[count] = pdf_count_headings(temp->last_child); else entry_counts[count] = 0; entry += entry_counts[count] + 1; count ++; } } /* * Output the top-level object... */ thisobj = pdf_start_object(out); if (parent == 0) outline_object = thisobj; else fprintf(out, "/Parent %d 0 R", parent); if (count > 0) { fprintf(out, "/Count %d", parent == 0 ? count : -count); fprintf(out, "/First %d 0 R", entry_objects[0]); fprintf(out, "/Last %d 0 R", entry_objects[count - 1]); } if (parent > 0 && toc->child && toc->child->markup == MARKUP_A) { if ((text = htmlGetText(toc->child->child)) != NULL) { fputs("/Title", out); write_utf16(out, text); free(text); } i = heading_pages[*heading]; x = 0.0f; y = heading_tops[*heading] + pages[i].bottom; pspdf_transform_coords(pages + i, x, y); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[i].outpage + 1, x, y); (*heading) ++; } if (prev > 0) fprintf(out, "/Prev %d 0 R", prev); if (next > 0) fprintf(out, "/Next %d 0 R", next); pdf_end_object(out); for (i = 0; i < count ; i ++) pdf_write_contents(out, entries[i], thisobj, i > 0 ? entry_objects[i - 1] : 0, i < (count - 1) ? entry_objects[i + 1] : 0, heading); free(entry_objects); free(entry_counts); free(entries); } // // 'pdf_write_files()' - Write an outline of HTML files. // static void pdf_write_files(FILE *out, // I - Output file tree_t *doc) // I - Document tree { int i, // Looping var num_files, // Number of FILE elements alloc_text; // Allocated text? uchar *text; // Entry text tree_t *temp; // Current node link_t *link; // Link to file... float x, y; // Position of link // Figure out the number of (top-level) files in the document... for (num_files = 0, temp = doc; temp; temp = temp->next) if (temp->markup == MARKUP_FILE) num_files ++; if (num_files < 2) { // No files to outline... outline_object = 0; return; } // Write the outline dictionary... outline_object = pdf_start_object(out); fprintf(out, "/Count %d", num_files); fprintf(out, "/First %d 0 R", outline_object + 1); fprintf(out, "/Last %d 0 R", outline_object + num_files); pdf_end_object(out); // Now write the outline items... for (i = 0, temp = doc; temp; temp = temp->next) if (temp->markup == MARKUP_FILE) { alloc_text = 0; if ((text = get_title(temp->child)) != NULL) alloc_text = 1; else if ((text = htmlGetVariable(temp, (uchar *)"_HD_FILENAME")) == NULL) text = (uchar *)"Unknown"; pdf_start_object(out); fprintf(out, "/Parent %d 0 R", outline_object); fputs("/Title", out); write_utf16(out, text); if (alloc_text) free(text); if ((link = find_link(htmlGetVariable(temp, (uchar *)"_HD_FILENAME"))) != NULL) { x = 0.0f; y = link->top + pages[link->page].bottom; pspdf_transform_coords(pages + link->page, x, y); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[link->page].outpage + 1, x, y); } if (i > 0) fprintf(out, "/Prev %d 0 R", outline_object + i); if (i < (num_files - 1)) fprintf(out, "/Next %d 0 R", outline_object + i + 2); pdf_end_object(out); i ++; } } /* * 'pdf_count_headings()' - Count the number of headings under this TOC * entry. */ static int /* O - Number of headings found */ pdf_count_headings(tree_t *toc) /* I - TOC entry */ { int headings; /* Number of headings */ for (headings = 0; toc != NULL; toc = toc->next) { if (toc->markup == MARKUP_A) headings ++; if (toc->child != NULL) headings += pdf_count_headings(toc->child); } return (headings); } /* * PDF object state variables... */ static int pdf_stream_length = 0; static int pdf_stream_start = 0; static int pdf_object_type = 0; /* * 'pdf_start_object()' - Start a new PDF object... */ static int // O - Object number pdf_start_object(FILE *out, // I - File to write to int array) // I - 1 = array, 0 = dictionary { int *temp; // Temporary integer pointer num_objects ++; // Allocate memory as necessary... if (num_objects >= alloc_objects) { alloc_objects += ALLOC_OBJECTS; if (alloc_objects == ALLOC_OBJECTS) temp = (int *)malloc(sizeof(int) * alloc_objects); else temp = (int *)realloc(objects, sizeof(int) * alloc_objects); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d objects - %s", (int)alloc_objects, strerror(errno)); alloc_objects -= ALLOC_OBJECTS; return (0); } objects = temp; } objects[num_objects] = ftell(out); fprintf(out, "%d 0 obj", (int)num_objects); pdf_object_type = array; fputs(pdf_object_type ? "[" : "<<", out); return (num_objects); } /* * 'pdf_start_stream()' - Start a new PDF stream... */ static void pdf_start_stream(FILE *out) // I - File to write to { // Write the "/Length " string, get the position, and then write 10 // zeroes to cover the maximum size of a stream. fputs("/Length ", out); pdf_stream_length = ftell(out); fputs("0000000000>>stream\n", out); pdf_stream_start = ftell(out); } /* * 'pdf_end_object()' - End a PDF object... */ static void pdf_end_object(FILE *out) // I - File to write to { int length; // Total length of stream if (pdf_stream_start) { // For streams, go back and update the length field in the // object dictionary... length = ftell(out) - pdf_stream_start; fseek(out, pdf_stream_length, SEEK_SET); fprintf(out, "%-10d", length); fseek(out, 0, SEEK_END); pdf_stream_start = 0; fputs("endstream\n", out); } else fputs(pdf_object_type ? "]" : ">>", out); fputs("endobj\n", out); } /* * 'pdf_write_links()' - Write annotation link objects for each page in the * document. */ static void pdf_write_links(FILE *out) /* I - Output file */ { int i, /* Looping var */ outpage, /* Current page */ lobj, /* Current link */ num_lobjs, /* Number of links on this page */ alloc_lobjs, /* Number of links to allocate */ *lobjs; /* Link objects */ float x, y; /* Position of last link */ render_t *r, /* Current render primitive */ *rlast, /* Last render link primitive */ *rprev; /* Previous render primitive */ link_t *link; /* Local link */ page_t *p; /* Current page */ outpage_t *op; /* Current output page */ /* * First combine adjacent, identical links... */ for (outpage = 0, op = outpages; outpage < (int)num_outpages; outpage ++, op ++) { for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start, x = 0.0f, y = 0.0f, rlast = NULL, rprev = NULL; r != NULL; rprev = r, r = r->next) if (r->type == RENDER_LINK) { if (fabs(r->x - x) < 0.1f && fabs(r->y - y) < 0.1f && rlast != NULL && strcmp((const char *)rlast->data.link, (const char *)r->data.link) == 0) { // Combine this primitive with the previous one in rlast... rlast->width = r->x + r->width - rlast->x; x = rlast->x + rlast->width; // Delete this render primitive... rprev->next = r->next; free(r); r = rprev; } else { // Can't combine; just save this info for later use... rlast = r; x = r->x + r->width; y = r->y; } } } } /* * Setup the initial pages_object number... */ pages_object = num_objects + 1; /* * Add space for named links in PDF 1.2 output... */ if (PDFVersion >= 12) pages_object += num_links + 3; /* * Stop here if we won't be generating links in the output... */ if (!Links) return; /* * Figure out how many link objects we'll have... */ for (outpage = 0, op = outpages, alloc_lobjs = 0; outpage < (int)num_pages; outpage ++, op ++) { num_lobjs = 0; for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_LINK) { if (find_link(r->data.link) != NULL) num_lobjs ++; else num_lobjs += 2; } } if (num_lobjs > 0) pages_object += num_lobjs + 1; if (num_lobjs > alloc_lobjs) alloc_lobjs = num_lobjs; } if (alloc_lobjs == 0) return; /* * Allocate memory for the links... */ if ((lobjs = (int *)malloc(sizeof(int) * (size_t)alloc_lobjs)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d link objects - %s", alloc_lobjs, strerror(errno)); return; } /* * Then generate annotation objects for all the links... */ for (outpage = 0, op = outpages; outpage < (int)num_pages; outpage ++, op ++) { num_lobjs = 0; for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_LINK) { if ((link = find_link(r->data.link)) != NULL) { /* * Local link... */ float x1, y1, x2, y2; lobjs[num_lobjs ++] = pdf_start_object(out); fputs("/Subtype/Link", out); if (PageDuplex && (op->pages[i] & 1)) { x1 = r->x + p->right; y1 = r->y + p->bottom - 2; x2 = r->x + r->width + p->right; y2 = r->y + r->height + p->bottom; } else { x1 = r->x + p->left; y1 = r->y + p->bottom - 2; x2 = r->x + r->width + p->left; y2 = r->y + r->height + p->bottom; } pspdf_transform_coords(p, x1, y1); pspdf_transform_coords(p, x2, y2); fprintf(out, "/Rect[%.1f %.1f %.1f %.1f]", x1, y1, x2, y2); fputs("/Border[0 0 0]", out); x1 = 0.0f; y1 = link->top + pages[link->page].bottom; pspdf_transform_coords(pages + link->page, x1, y1); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[link->page].outpage + 1, x1, y1); pdf_end_object(out); } else { /* * Remote link... */ pdf_start_object(out); if (PDFVersion >= 12 && file_method((char *)r->data.link) == NULL) { #ifdef WIN32 if (strcasecmp(file_extension((char *)r->data.link), "pdf") == 0) #else if (strcmp(file_extension((char *)r->data.link), "pdf") == 0) #endif /* WIN32 */ { /* * Link to external PDF file... */ const char *target = file_target((char *)r->data.link); fputs("/S/GoToR", out); if (target) { char url[1024], *urlptr; fputs("/D", out); write_string(out, (uchar *)target, 0); strlcpy(url, (char *)r->data.link, sizeof(url)); if ((urlptr = strrchr(url, '#')) != NULL) *urlptr = '\0'; fputs("/F", out); write_string(out, (uchar *)url, 0); } else { fputs("/D[0/XYZ null null 0]/F", out); write_string(out, r->data.link, 0); } } else { /* * Link to external filename... */ fputs("/S/Launch", out); fputs("/F", out); write_string(out, r->data.link, 0); if (StrictHTML) progress_error(HD_ERROR_UNRESOLVED_LINK, "Unable to resolve link to \"%s\"!", r->data.link); } } else { /* * Link to web file... */ fputs("/S/URI", out); fputs("/URI", out); write_string(out, r->data.link, 0); } pdf_end_object(out); lobjs[num_lobjs ++] = pdf_start_object(out); fputs("/Subtype/Link", out); if (PageDuplex && (outpage & 1)) fprintf(out, "/Rect[%.1f %.1f %.1f %.1f]", r->x + PageRight, r->y + PageBottom, r->x + r->width + PageRight, r->y + r->height + PageBottom); else fprintf(out, "/Rect[%.1f %.1f %.1f %.1f]", r->x + PageLeft, r->y + PageBottom - 2, r->x + r->width + PageLeft, r->y + r->height + PageBottom); fputs("/Border[0 0 0]", out); fprintf(out, "/A %d 0 R", (int)num_objects - 1); pdf_end_object(out); } } } if (num_lobjs > 0) { outpages[outpage].annot_object = pdf_start_object(out, 1); for (lobj = 0; lobj < num_lobjs; lobj ++) fprintf(out, "%d 0 R%s", lobjs[lobj], lobj < (num_lobjs - 1) ? "\n" : ""); pdf_end_object(out); } } free(lobjs); } /* * 'pdf_write_names()' - Write named destinations for each link. */ static void pdf_write_names(FILE *out) /* I - Output file */ { int i; /* Looping var */ uchar *s; /* Current character in name */ link_t *link; /* Local link */ /* * Convert all link names to lowercase... */ for (i = num_links, link = links; i > 0; i --, link ++) for (s = link->name; *s != '\0'; s ++) *s = (uchar)tolower(*s); /* * Write the root name tree entry... */ names_object = pdf_start_object(out); fprintf(out, "/Dests %d 0 R", (int)num_objects + 1); pdf_end_object(out); /* * Write the name tree child list... */ pdf_start_object(out); fprintf(out, "/Kids[%d 0 R]", (int)num_objects + 1); pdf_end_object(out); /* * Write the leaf node for the name tree... */ pdf_start_object(out); fputs("/Limits[", out); write_string(out, links[0].name, 0); write_string(out, links[num_links - 1].name, 0); fputs("]", out); fputs("/Names[", out); for (i = 1, link = links; i <= (int)num_links; i ++, link ++) { write_string(out, link->name, 0); fprintf(out, "%d 0 R", (int)num_objects + i); } fputs("]", out); pdf_end_object(out); for (i = num_links, link = links; i > 0; i --, link ++) { pdf_start_object(out); float x, y; x = 0.0f; y = link->top + pages[link->page].bottom; pspdf_transform_coords(pages + link->page, x, y); fprintf(out, "/D[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[link->page].outpage + 1, x, y); pdf_end_object(out); } } /* * 'render_contents()' - Render a single heading. */ static void render_contents(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int heading, /* I - Heading # */ tree_t *chap) /* I - Chapter heading */ { float x, width, numberwidth, height, rgb[3]; int hpage; uchar number[1024], *nptr, *link; tree_t *flat, *temp, *next; render_t *r; float dot_width; DEBUG_printf(("render_contents(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, y=%.1f, page=%d, heading=%d, chap=%p)\n", (void *)t, left, right, bottom, top, *y, *page, heading, (void *)chap)); if (!t) return; dot_width = _htmlSizes[SIZE_P] * _htmlWidths[t->typeface][t->style]['.'] * 0.001f; /* * Put the text... */ flat = flatten_tree(t->child->child); for (height = 0.0, temp = flat; temp != NULL; temp = temp->next) if (temp->height > height) height = temp->height; height *= _htmlSpacings[SIZE_P] / _htmlSizes[SIZE_P]; if (t->indent) x = left + 18.0f + 18.0f * t->indent; else x = left; *y -= height; /* * Get the width of the page number, leave room for three dots... */ if (heading >= 0 && heading < (int)num_headings) { hpage = heading_pages[heading]; numberwidth = (float)(get_width((uchar *)pages[hpage].page_text, t->typeface, t->style, t->size) + 3.0f * dot_width); } else { hpage = 0; numberwidth = 0.0f; } for (temp = flat; temp != NULL; temp = next) { rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; if ((x + temp->width) >= (right - numberwidth)) { /* * Too wide to fit, continue on the next line */ *y -= _htmlSpacings[SIZE_P]; x = left + 36.0f * t->indent; } if (*y < bottom) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); width = get_width((uchar *)TocTitle, _htmlHeadingFont, STYLE_BOLD, SIZE_H1); *y = (float)(top - _htmlSpacings[SIZE_H1]); x = (float)(left + 0.5f * (right - left - width)); r = new_render(*page, RENDER_TEXT, x, *y, 0, 0, TocTitle); r->data.text.typeface = _htmlHeadingFont; r->data.text.style = STYLE_BOLD; r->data.text.size = (float)_htmlSizes[SIZE_H1]; get_color(_htmlTextColor, r->data.text.rgb); *y -= _htmlSpacings[SIZE_H1]; if (t->indent) x = left + 18.0f + 18.0f * t->indent; else x = left; if (chap != t) { *y += height; render_contents(chap, left, right, bottom, top, y, page, -1, 0); *y -= _htmlSpacings[SIZE_P]; } } if (temp->link != NULL) { link = htmlGetVariable(temp->link, (uchar *)"HREF"); /* * Add a page link... */ new_render(*page, RENDER_LINK, x, *y, temp->width, temp->height, link); if (PSLevel == 0 && Links) { memcpy(rgb, link_color, sizeof(rgb)); temp->red = (uchar)(link_color[0] * 255.0); temp->green = (uchar)(link_color[1] * 255.0); temp->blue = (uchar)(link_color[2] * 255.0); if (LinkStyle) new_render(*page, RENDER_BOX, x, *y - 1, temp->width, 0, link_color); } } if ((link = htmlGetVariable(temp, (uchar *)"ID")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } switch (temp->markup) { case MARKUP_A : if ((link = htmlGetVariable(temp, (uchar *)"NAME")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } break; case MARKUP_NONE : if (temp->data == NULL) break; if (temp->underline) new_render(*page, RENDER_BOX, x, *y - 1, temp->width, 0, rgb); if (temp->strikethrough) new_render(*page, RENDER_BOX, x, *y + temp->height * 0.25f, temp->width, 0, rgb); r = new_render(*page, RENDER_TEXT, x, *y, 0, 0, temp->data); r->data.text.typeface = temp->typeface; r->data.text.style = temp->style; r->data.text.size = (float)_htmlSizes[temp->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (temp->superscript) r->y += height - temp->height; else if (temp->subscript) r->y -= height * _htmlSizes[0] / _htmlSpacings[0] - temp->height; break; case MARKUP_IMG : update_image_size(temp); new_render(*page, RENDER_IMAGE, x, *y, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); break; default : break; } x += temp->width; next = temp->next; free(temp); } if (numberwidth > 0.0f) { /* * Draw dots leading up to the page number... */ width = (float)(numberwidth - 3.0 * dot_width + x); for (nptr = number; nptr < (number + sizeof(number) - 1) && width < right; width += dot_width) *nptr++ = '.'; if (nptr > number) nptr --; strlcpy((char *)nptr, pages[hpage].page_text, sizeof(number) - (size_t)(nptr - number)); r = new_render(*page, RENDER_TEXT, right - width + x, *y, 0, 0, number); r->data.text.typeface = t->typeface; r->data.text.style = t->style; r->data.text.size = (float)_htmlSizes[t->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); } } /* * 'count_headings()' - Count the number of headings in the TOC. */ static int count_headings(tree_t *t) // I - Tree to count { int count; // Number of headings... count = 0; while (t != NULL) { switch (t->markup) { case MARKUP_B : case MARKUP_LI : count ++; if (t->last_child && t->last_child->markup == MARKUP_UL) count += count_headings(t->last_child); break; default : count += count_headings(t->child); break; } t = t->next; } return (count); } /* * 'parse_contents()' - Parse the table of contents and produce a * rendering list... */ static void parse_contents(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int *heading, /* IO - Heading # */ tree_t *chap) /* I - Chapter heading */ { DEBUG_printf(("parse_contents(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, y=%.1f, page=%d, heading=%d, chap=%p)\n", (void *)t, left, right, bottom, top, *y, *page, *heading, (void *)chap)); while (t != NULL) { switch (t->markup) { case MARKUP_B : /* Top-level TOC */ if (t->prev != NULL) /* Advance one line prior to top-levels... */ *y -= _htmlSpacings[SIZE_P]; if (*y < (bottom + _htmlSpacings[SIZE_P] * 3)) *y = 0; // Force page break chap = t; case MARKUP_LI : /* Lower-level TOC */ DEBUG_printf(("parse_contents: heading=%d, page = %d\n", *heading, heading_pages[*heading])); /* * Put the text unless the author has flagged it otherwise... */ if (htmlGetVariable(t, (uchar *)"_HD_OMIT_TOC") == NULL) { render_contents(t, left, right, bottom, top, y, page, *heading, chap); /* * Update current headings for header/footer strings in TOC. */ check_pages(*page); if (t->markup == MARKUP_B && pages[*page].chapter == pages[*page - 1].chapter) pages[*page].chapter = htmlGetText(t->child->child); if (pages[*page].heading == pages[*page - 1].heading) pages[*page].heading = htmlGetText(t->child->child); /* * Next heading... */ (*heading) ++; if (t->last_child->markup == MARKUP_UL) parse_contents(t->last_child, left, right, bottom, top, y, page, heading, chap); } else if (t->next != NULL && t->next->markup == MARKUP_UL) { /* * Skip children of omitted heading... */ t = t->next; (*heading) += count_headings(t->child) + 1; } else (*heading) ++; break; default : parse_contents(t->child, left, right, bottom, top, y, page, heading, chap); break; } t = t->next; } } /* * 'parse_doc()' - Parse a document tree and produce rendering list output. */ static void parse_doc(tree_t *t, /* I - Tree to parse */ float *left, /* I - Left margin */ float *right, /* I - Printable width */ float *bottom, /* I - Bottom margin */ float *top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ tree_t *cpara, /* I - Current paragraph */ int *needspace) /* I - Need whitespace before this element */ { int i; /* Looping var */ tree_t *para, /* Phoney paragraph tree entry */ *temp; /* Paragraph entry */ var_t *var; /* Variable entry */ uchar *name; /* ID name */ uchar *style; /* STYLE attribute */ float width, /* Width of horizontal rule */ height, /* Height of rule */ rgb[3]; /* RGB color of rule */ DEBUG_printf(("parse_doc(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, x=%.1f, y=%.1f, page=%d, cpara=%p, needspace=%d\n", (void *)t, *left, *right, *bottom, *top, *x, *y, *page, (void *)cpara, *needspace)); DEBUG_printf((" title_page = %d, chapter = %d\n", title_page, chapter)); if (cpara == NULL) para = htmlNewTree(NULL, MARKUP_P, NULL); else para = cpara; while (t != NULL) { if (t->markup == MARKUP_FILE) current_url = htmlGetVariable(t, (uchar *)"_HD_URL"); if (((t->markup == MARKUP_H1 && OutputType == OUTPUT_BOOK) || (t->markup == MARKUP_FILE && OutputType == OUTPUT_WEBPAGES)) && !title_page) { // New page on H1 in book mode or file in webpage mode... if (para->child != NULL && chapter > 0) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if ((chapter > 0 && OutputType == OUTPUT_BOOK) || ((*page > 0 || *y < *top) && OutputType == OUTPUT_WEBPAGES)) { if (*y < *top) (*page) ++; if (PageDuplex && (*page & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); chapter_ends[chapter] = *page - 1; } // Make sure header and footer strings are correct... check_pages(*page); memcpy(pages[*page].header, Header, sizeof(pages[*page].header)); memcpy(pages[*page].header1, Header1, sizeof(pages[*page].header1)); memcpy(pages[*page].footer, Footer, sizeof(pages[*page].footer)); // Bump the chapter/file count... chapter ++; if (chapter >= MAX_CHAPTERS) { progress_error(HD_ERROR_TOO_MANY_CHAPTERS, "Too many chapters/files in document (%d > %d)!", chapter, MAX_CHAPTERS); chapter = MAX_CHAPTERS - 1; } else chapter_starts[chapter] = *page; if (chapter > TocDocCount) TocDocCount = chapter; *y = *top; *x = *left; *needspace = 0; } if ((name = htmlGetVariable(t, (uchar *)"ID")) != NULL) { /* * Add a link target using the ID=name variable... */ add_link(name, *page, (int)*y); } else if (t->markup == MARKUP_FILE) { /* * Add a file link... */ uchar newname[256], /* New filename */ *sep; /* "?" separator in links */ // Strip any trailing HTTP GET data stuff... strlcpy((char *)newname, (char *)htmlGetVariable(t, (uchar *)"_HD_FILENAME"), sizeof(newname)); if ((sep = (uchar *)strchr((char *)newname, '?')) != NULL) *sep = '\0'; // Add the link add_link(newname, *page, (int)*y); } if (chapter == 0 && !title_page) { // Need to handle page comments before the first heading... if (t->markup == MARKUP_COMMENT) parse_comment(t, left, right, bottom, top, x, y, page, para, *needspace); if (t->child != NULL) parse_doc(t->child, left, right, bottom, top, x, y, page, para, needspace); t = t->next; continue; } // Check for some basic stylesheet stuff... if ((style = htmlGetStyle(t, (uchar *)"page-break-before:")) != NULL && strcasecmp((char *)style, "avoid") != 0) { // Advance to the next page... (*page) ++; *x = *left; *y = *top; *needspace = 0; // See if we need to go to the next left/righthand page... if (PageDuplex && ((*page) & 1) && strcasecmp((char *)style, "right") == 0) (*page) ++; else if (PageDuplex && !((*page) & 1) && strcasecmp((char *)style, "left") == 0) (*page) ++; // Update the progress as necessary... if (Verbosity) progress_show("Formatting page %d", *page); } // Process the markup... switch (t->markup) { case MARKUP_IMG : update_image_size(t); case MARKUP_NONE : case MARKUP_BR : if (para->child == NULL) { if (t->parent == NULL) { para->halignment = ALIGN_LEFT; para->indent = 0; } else { para->halignment = t->parent->halignment; para->indent = t->parent->indent; } } // Skip heading whitespace... if (para->child == NULL && t->markup == MARKUP_NONE && t->data != NULL && strcmp((char *)t->data, " ") == 0) break; if ((temp = htmlAddTree(para, t->markup, t->data)) != NULL) { temp->link = t->link; temp->width = t->width; temp->height = t->height; temp->typeface = t->typeface; temp->style = t->style; temp->size = t->size; temp->underline = t->underline; temp->strikethrough = t->strikethrough; temp->superscript = t->superscript; temp->subscript = t->subscript; temp->halignment = t->halignment; temp->valignment = t->valignment; temp->red = t->red; temp->green = t->green; temp->blue = t->blue; for (i = 0, var = t->vars; i < t->nvars; i ++, var ++) htmlSetVariable(temp, var->name, var->value); } break; case MARKUP_TABLE : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } parse_table(t, *left, *right, *bottom, *top, x, y, page, *needspace); *needspace = 0; break; case MARKUP_H1 : case MARKUP_H2 : case MARKUP_H3 : case MARKUP_H4 : case MARKUP_H5 : case MARKUP_H6 : case MARKUP_H7 : case MARKUP_H8 : case MARKUP_H9 : case MARKUP_H10 : case MARKUP_H11 : case MARKUP_H12 : case MARKUP_H13 : case MARKUP_H14 : case MARKUP_H15 : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } parse_heading(t, *left, *right, *bottom, *top, x, y, page, *needspace); *needspace = 1; break; case MARKUP_BLOCKQUOTE : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } *left += 36; *right -= 36; parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *left -= 36; *right += 36; *x = *left; *needspace = 1; break; case MARKUP_CENTER : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *x = *left; *needspace = 1; break; case MARKUP_P : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *x = *left; *needspace = 1; break; case MARKUP_DIV : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } break; case MARKUP_PRE : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } *left += 36.0f; *x = *left; parse_pre(t, *left, *right, *bottom, *top, x, y, page, *needspace); *left -= 36.0f; *x = *left; *needspace = 1; break; case MARKUP_DIR : case MARKUP_MENU : case MARKUP_UL : case MARKUP_OL : init_list(t); case MARKUP_DL : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if (t->indent == 1) *needspace = 1; *left += 36.0f; *x = *left; parse_doc(t->child, left, right, bottom, top, x, y, page, para, needspace); *left -= 36.0f; if (t->indent == 1) *needspace = 1; break; case MARKUP_LI : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } parse_list(t, left, right, bottom, top, x, y, page, *needspace); *x = *left; *needspace = t->next && t->next->markup != MARKUP_LI && t->next->markup != MARKUP_UL && t->next->markup != MARKUP_OL; break; case MARKUP_DT : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } *left -= 36.0f; *x = *left; parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *left += 36.0f; *x = *left; *needspace = 0; break; case MARKUP_DD : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *x = *left; *needspace = 0; break; case MARKUP_HR : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if (htmlGetVariable(t, (uchar *)"BREAK") == NULL) { /* * Generate a horizontal rule... */ if ((name = htmlGetVariable(t, (uchar *)"WIDTH")) == NULL) width = *right - *left; else { if (strchr((char *)name, '%') != NULL) width = atoi((char *)name) * (*right - *left) / 100; else width = (float)(atoi((char *)name) * PagePrintWidth / _htmlBrowserWidth); } if ((name = htmlGetVariable(t, (uchar *)"SIZE")) == NULL) height = 2; else height = (float)(atoi((char *)name) * PagePrintWidth / _htmlBrowserWidth); switch (t->halignment) { case ALIGN_LEFT : *x = *left; break; case ALIGN_CENTER : *x = *left + (*right - *left - width) * 0.5f; break; case ALIGN_RIGHT : *x = *right - width; break; } if (*y < (*bottom + height + _htmlSpacings[SIZE_P])) { /* * Won't fit on this page... */ (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; } (*y) -= height + _htmlSpacings[SIZE_P]; rgb[0] = t->red / 255.0f; rgb[1] = t->green / 255.0f; rgb[2] = t->blue / 255.0f; new_render(*page, RENDER_BOX, *x, *y + _htmlSpacings[SIZE_P] * 0.5, width, height, rgb); } else { /* * <HR BREAK> generates a page break... */ (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; } *x = *left; *needspace = 0; break; case MARKUP_COMMENT : // Check comments for commands... parse_comment(t, left, right, bottom, top, x, y, page, para, *needspace); break; case MARKUP_HEAD : // Ignore document HEAD section case MARKUP_TITLE : // Ignore title and meta stuff case MARKUP_META : case MARKUP_SCRIPT : // Ignore script stuff case MARKUP_INPUT : // Ignore form stuff case MARKUP_SELECT : case MARKUP_OPTION : case MARKUP_TEXTAREA : break; case MARKUP_STYLE : break; case MARKUP_A : if (htmlGetVariable(t, (uchar *)"NAME") != NULL) { /* * Add this named destination to the paragraph tree... */ if (para->child == NULL) { para->halignment = t->halignment; para->indent = t->indent; } if ((temp = htmlAddTree(para, t->markup, t->data)) != NULL) { temp->link = t->link; temp->width = t->width; temp->height = t->height; temp->typeface = t->typeface; temp->style = t->style; temp->size = t->size; temp->underline = t->underline; temp->strikethrough = t->strikethrough; temp->superscript = t->superscript; temp->subscript = t->subscript; temp->halignment = t->halignment; temp->valignment = t->valignment; temp->red = t->red; temp->green = t->green; temp->blue = t->blue; for (i = 0, var = t->vars; i < t->nvars; i ++, var ++) htmlSetVariable(temp, var->name, var->value); } } default : if (t->child != NULL) parse_doc(t->child, left, right, bottom, top, x, y, page, para, needspace); break; } // Check for some basic stylesheet stuff... if ((style = htmlGetStyle(t, (uchar *)"page-break-after:")) != NULL && strcasecmp((char *)style, "avoid") != 0) { // Advance to the next page... (*page) ++; *x = *left; *y = *top; *needspace = 0; // See if we need to go to the next left/righthand page... if (PageDuplex && ((*page) & 1) && strcasecmp((char *)style, "right") == 0) (*page) ++; else if (PageDuplex && !((*page) & 1) && strcasecmp((char *)style, "left") == 0) (*page) ++; // Update the progress as necessary... if (Verbosity) progress_show("Formatting page %d", *page); } // Move to the next node... t = t->next; } if (para->child != NULL && cpara != para) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } if (cpara != para) htmlDeleteTree(para); DEBUG_printf(("LEAVING parse_doc(), x = %.1f, y = %.1f, page = %d\n", *x, *y, *page)); } /* * 'parse_heading()' - Parse a heading tree and produce rendering list output. */ static void parse_heading(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace) /* I - Need whitespace? */ { int *temp; // Temporary integer array pointer DEBUG_printf(("parse_heading(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, x=%.1f, y=%.1f, page=%d, needspace=%d\n", (void *)t, left, right, bottom, top, *x, *y, *page, needspace)); if (((t->markup - MARKUP_H1) < TocLevels || TocLevels == 0) && !title_page) current_heading = t->child; if (*y < (5 * _htmlSpacings[SIZE_P] + bottom)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } check_pages(*page); if (t->markup == MARKUP_H1 && !title_page) pages[*page].chapter = htmlGetText(current_heading); if ((pages[*page].heading == NULL || t->markup == MARKUP_H1 || (*page > 0 && pages[*page].heading == pages[*page - 1].heading)) && !title_page) { pages[*page].heading = htmlGetText(current_heading); pages[*page].headnode = current_heading; } if ((t->markup - MARKUP_H1) < TocLevels && !title_page) { DEBUG_printf(("H%d: heading_pages[%d] = %d\n", t->markup - MARKUP_H1 + 1, (int)num_headings, *page - 1)); // See if we need to resize the headings arrays... if (num_headings >= alloc_headings) { alloc_headings += ALLOC_HEADINGS; if (num_headings == 0) temp = (int *)malloc(sizeof(int) * alloc_headings); else temp = (int *)realloc(heading_pages, sizeof(int) * alloc_headings); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)alloc_headings, strerror(errno)); alloc_headings -= ALLOC_HEADINGS; return; } memset(temp + alloc_headings - ALLOC_HEADINGS, 0, sizeof(int) * ALLOC_HEADINGS); heading_pages = temp; if (num_headings == 0) temp = (int *)malloc(sizeof(int) * alloc_headings); else temp = (int *)realloc(heading_tops, sizeof(int) * alloc_headings); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)alloc_headings, strerror(errno)); alloc_headings -= ALLOC_HEADINGS; return; } memset(temp + alloc_headings - ALLOC_HEADINGS, 0, sizeof(int) * ALLOC_HEADINGS); heading_tops = temp; } heading_pages[num_headings] = *page; heading_tops[num_headings] = (int)(*y + 4 * _htmlSpacings[SIZE_P]); num_headings ++; } parse_paragraph(t, left, right, bottom, top, x, y, page, needspace); if (t->halignment == ALIGN_RIGHT && t->markup == MARKUP_H1 && OutputType == OUTPUT_BOOK && !title_page) { /* * Special case - chapter heading for users manual... */ *y = bottom + 0.5f * (top - bottom); } } #if defined(PARA_DEBUG) && !defined(DEBUG) # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) printf x # define DEBUG_puts(x) puts(x) #endif /* PARA_DEBUG && !defined(DEBUG) */ /* * 'parse_paragraph()' - Parse a paragraph tree and produce rendering list * output. */ static void parse_paragraph(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace)/* I - Need whitespace? */ { int whitespace; /* Non-zero if a fragment ends in whitespace */ tree_t *flat, *start, *end, *prev, *temp; float width, height, offset, spacing, borderspace, temp_y, temp_width, temp_height; float format_width, image_y, image_left, image_right; int image_page = *page; float char_spacing; int num_chars; render_t *r; uchar *align, *hspace, *vspace, *link, *border; float rgb[3]; uchar line[10240], *lineptr, *dataptr; tree_t *linetype; float linex, linewidth; int firstline; DEBUG_printf(("parse_paragraph(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, x=%.1f, y=%.1f, page=%d, needspace=%d\n", (void *)t, left, right, bottom, top, *x, *y, *page, needspace)); flat = flatten_tree(t->child); image_left = left; image_right = right; image_y = 0; if (flat == NULL) DEBUG_puts("parse_paragraph: flat == NULL!"); // Add leading whitespace... if (*y < top && needspace) *y -= _htmlSpacings[SIZE_P]; /* * First scan for images with left/right alignment tags... */ for (temp = flat, prev = NULL; temp != NULL;) { if (temp->markup == MARKUP_IMG) update_image_size(temp); if (temp->markup == MARKUP_IMG && (align = htmlGetVariable(temp, (uchar *)"ALIGN"))) { if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; if (strcasecmp((char *)align, "LEFT") == 0) { if ((vspace = htmlGetVariable(temp, (uchar *)"VSPACE")) != NULL) *y -= atoi((char *)vspace); if (*y < (bottom + temp->height + 2 * borderspace)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } if (borderspace > 0.0f) { if (temp->link && PSLevel == 0) memcpy(rgb, link_color, sizeof(rgb)); else { rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; } // Top new_render(*page, RENDER_BOX, image_left, *y - borderspace, temp->width + 2 * borderspace, borderspace, rgb); // Left new_render(*page, RENDER_BOX, image_left, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Right new_render(*page, RENDER_BOX, image_left + temp->width + borderspace, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Bottom new_render(*page, RENDER_BOX, image_left, *y - temp->height - 2 * borderspace, temp->width + 2 * borderspace, borderspace, rgb); } *y -= borderspace; new_render(*page, RENDER_IMAGE, image_left + borderspace, *y - temp->height, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); if (temp->link && (link = htmlGetVariable(temp->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, image_left + borderspace, *y - temp->height, temp->width, temp->height, link); } *y -= borderspace; if (vspace != NULL) *y -= atoi((char *)vspace); image_left += temp->width + 2 * borderspace; temp_y = *y - temp->height; image_page = *page; if (temp_y < image_y || image_y == 0) image_y = temp_y; if ((hspace = htmlGetVariable(temp, (uchar *)"HSPACE")) != NULL) image_left += atoi((char *)hspace); if (prev != NULL) prev->next = temp->next; else flat = temp->next; free(temp); temp = prev; } else if (strcasecmp((char *)align, "RIGHT") == 0) { if ((vspace = htmlGetVariable(temp, (uchar *)"VSPACE")) != NULL) *y -= atoi((char *)vspace); if (*y < (bottom + temp->height + 2 * borderspace)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } image_right -= temp->width + 2 * borderspace; image_page = *page; if (borderspace > 0.0f) { if (temp->link && PSLevel == 0) memcpy(rgb, link_color, sizeof(rgb)); else { rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; } // Top new_render(*page, RENDER_BOX, image_right, *y - borderspace, temp->width + 2 * borderspace, borderspace, rgb); // Left new_render(*page, RENDER_BOX, image_right, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Right new_render(*page, RENDER_BOX, image_right + temp->width + borderspace, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Bottom new_render(*page, RENDER_BOX, image_right, *y - temp->height - 2 * borderspace, temp->width + 2 * borderspace, borderspace, rgb); } *y -= borderspace; new_render(*page, RENDER_IMAGE, image_right + borderspace, *y - temp->height, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); if (temp->link && (link = htmlGetVariable(temp->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, image_right + borderspace, *y - temp->height, temp->width, temp->height, link); } *y -= borderspace; if (vspace != NULL) *y -= atoi((char *)vspace); temp_y = *y - temp->height; if (temp_y < image_y || image_y == 0) image_y = temp_y; if ((hspace = htmlGetVariable(temp, (uchar *)"HSPACE")) != NULL) image_right -= atoi((char *)hspace); if (prev != NULL) prev->next = temp->next; else flat = temp->next; free(temp); temp = prev; } } if (temp != NULL) { prev = temp; temp = temp->next; } else temp = flat; } /* * Then format the text and inline images... */ format_width = image_right - image_left; firstline = 1; DEBUG_printf(("format_width = %.1f\n", format_width)); // Make stupid compiler warnings go away (if you can't put // enough smarts in the compiler, don't add the warning!) offset = 0.0f; temp_width = 0.0f; temp_height = 0.0f; lineptr = NULL; linex = 0.0f; linewidth = 0.0f; while (flat != NULL) { start = flat; end = flat; width = 0.0; while (flat != NULL) { // Get fragments... temp_width = 0.0; temp = flat; whitespace = 0; while (temp != NULL && !whitespace) { if (temp->markup == MARKUP_NONE && temp->data[0] == ' ') { if (temp == start) temp_width -= _htmlWidths[temp->typeface][temp->style][' '] * _htmlSizes[temp->size] * 0.001f; else if (temp_width > 0.0f) whitespace = 1; } else whitespace = 0; if (whitespace) break; if (temp->markup == MARKUP_IMG) { if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; temp_width += 2 * borderspace; } prev = temp; temp = temp->next; temp_width += prev->width; if ((temp_width >= format_width && prev->markup == MARKUP_IMG) || prev->markup == MARKUP_BR) { break; } else if (prev->markup == MARKUP_NONE) { int ch = prev->data[strlen((char *)prev->data) - 1]; if (_htmlUTF8) ch = _htmlUnicode[ch]; if (ch == 173) break; } } if ((width + temp_width) <= format_width) { width += temp_width; end = temp; flat = temp; if (prev->markup == MARKUP_BR) break; } else if (width == 0.0) { width += temp_width; end = temp; flat = temp; break; } else break; } if (start == end) { end = start->next; flat = start->next; width = start->width; } for (height = 0.0, num_chars = 0, temp = prev = start; temp != end; temp = temp->next) { prev = temp; if (temp->markup == MARKUP_NONE) num_chars += strlen((char *)temp->data); if (temp->height > height) height = temp->height; } for (spacing = 0.0, temp = prev = start; temp != end; temp = temp->next) { prev = temp; if (temp->markup != MARKUP_IMG) temp_height = (float)(temp->height * _htmlSpacings[0] / _htmlSizes[0]); else { if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; temp_height = temp->height + 2 * borderspace; } if (temp_height > spacing) spacing = temp_height; } if (firstline && end != NULL && *y < (bottom + height + _htmlSpacings[t->size])) { // Go to next page since only 1 line will fit on this one... (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } firstline = 0; if (height == 0.0f) height = spacing; for (temp = start; temp != end; temp = temp->next) if (temp->markup != MARKUP_A) break; if (temp != NULL && temp->markup == MARKUP_NONE && temp->data[0] == ' ') { // Drop leading space... for (dataptr = temp->data; *dataptr; dataptr ++) *dataptr = dataptr[1]; *dataptr = '\0'; temp_width = _htmlWidths[temp->typeface][temp->style][' '] * _htmlSizes[temp->size] * 0.001f; temp->width -= temp_width; num_chars --; } if (end != NULL) temp = end->prev; else temp = NULL; DEBUG_printf((" BEFORE page=%d, y=%.1f, height=%.1f, spacing=%.1f, bottom=%.1f\n", *page, *y, height, spacing, bottom)); if (*y < (spacing + bottom)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } *y -= height; DEBUG_printf((" page=%d, y=%.1f, width=%.1f, height=%.1f\n", *page, *y, width, height)); if (Verbosity) progress_update(100 - (int)(100 * (*y) / PagePrintLength)); char_spacing = 0.0f; whitespace = 0; temp = start; linetype = NULL; rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; switch (t->halignment) { case ALIGN_LEFT : linex = image_left; break; case ALIGN_CENTER : linex = image_left + 0.5f * (format_width - width); break; case ALIGN_RIGHT : linex = image_right - width; break; case ALIGN_JUSTIFY : linex = image_left; if (flat != NULL && flat->prev->markup != MARKUP_BR && num_chars > 1) char_spacing = (format_width - width) / (num_chars - 1); break; } while (temp != end) { if (temp->link != NULL && PSLevel == 0 && Links && temp->markup == MARKUP_NONE) { temp->red = (uchar)(link_color[0] * 255.0); temp->green = (uchar)(link_color[1] * 255.0); temp->blue = (uchar)(link_color[2] * 255.0); } /* * See if we are doing a run of characters in a line and need to * output this run... */ if (linetype != NULL && (temp->markup != MARKUP_NONE || temp->typeface != linetype->typeface || temp->style != linetype->style || temp->size != linetype->size || temp->superscript != linetype->superscript || temp->subscript != linetype->subscript || temp->red != linetype->red || temp->green != linetype->green || temp->blue != linetype->blue)) { r = new_render(*page, RENDER_TEXT, linex - linewidth, *y, linewidth, linetype->height, line); r->data.text.typeface = linetype->typeface; r->data.text.style = linetype->style; r->data.text.size = (float)_htmlSizes[linetype->size]; r->data.text.spacing = char_spacing; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (linetype->superscript) r->y += height - linetype->height; else if (linetype->subscript) r->y -= height - linetype->height; free(linetype); linetype = NULL; } if ((link = htmlGetVariable(temp, (uchar *)"ID")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } switch (temp->markup) { case MARKUP_A : if ((link = htmlGetVariable(temp, (uchar *)"NAME")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } default : temp_width = temp->width; break; case MARKUP_NONE : if (temp->data == NULL) break; if (((temp->width - right + left) > 0.001 || (temp->height - top + bottom) > 0.001) && OverflowErrors) progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Text on page %d too large - " "truncation or overlapping may occur!", *page + 1); if (linetype == NULL) { linetype = temp; lineptr = line; linewidth = 0.0; rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; } strlcpy((char *)lineptr, (char *)temp->data, sizeof(line) - (size_t)(lineptr - line)); temp_width = temp->width + char_spacing * strlen((char *)lineptr); if (temp->underline || (temp->link && LinkStyle && PSLevel == 0)) new_render(*page, RENDER_BOX, linex, *y - 1, temp_width, 0, rgb); if (temp->strikethrough) new_render(*page, RENDER_BOX, linex, *y + temp->height * 0.25f, temp_width, 0, rgb); linewidth += temp_width; lineptr += strlen((char *)lineptr); if (lineptr > line && lineptr[-1] == ' ') whitespace = 1; else whitespace = 0; break; case MARKUP_IMG : if (((temp->width - right + left) > 0.001 || (temp->height - top + bottom) > 0.001) && OverflowErrors) { DEBUG_printf(("IMAGE: %.3fx%.3f > %.3fx%.3f\n", temp->width, temp->height, right - left, top - bottom)); progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Image on page %d too large - " "truncation or overlapping may occur!", *page + 1); } if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; temp_width += 2 * borderspace; switch (temp->valignment) { case ALIGN_TOP : offset = height - temp->height - 2 * borderspace; break; case ALIGN_MIDDLE : offset = 0.5f * (height - temp->height) - borderspace; break; case ALIGN_BOTTOM : offset = 0.0f; } if (borderspace > 0.0f) { // Top new_render(*page, RENDER_BOX, linex, *y + offset + temp->height + borderspace, temp->width + 2 * borderspace, borderspace, rgb); // Left new_render(*page, RENDER_BOX, linex, *y + offset, borderspace, temp->height + 2 * borderspace, rgb); // Right new_render(*page, RENDER_BOX, linex + temp->width + borderspace, *y + offset, borderspace, temp->height + 2 * borderspace, rgb); // Bottom new_render(*page, RENDER_BOX, linex, *y + offset, temp->width + 2 * borderspace, borderspace, rgb); } new_render(*page, RENDER_IMAGE, linex + borderspace, *y + offset + borderspace, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); whitespace = 0; temp_width = temp->width + 2 * borderspace; break; } if (temp->link != NULL && (link = htmlGetVariable(temp->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, linex, *y + offset, temp->width, temp->height, link); } linex += temp_width; prev = temp; temp = temp->next; if (prev != linetype) free(prev); } /* * See if we have a run of characters that hasn't been output... */ if (linetype != NULL) { r = new_render(*page, RENDER_TEXT, linex - linewidth, *y, linewidth, linetype->height, line); r->data.text.typeface = linetype->typeface; r->data.text.style = linetype->style; r->data.text.spacing = char_spacing; r->data.text.size = (float)_htmlSizes[linetype->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (linetype->superscript) r->y += height - linetype->height; else if (linetype->subscript) r->y -= height - linetype->height; free(linetype); } /* * Update the margins after we pass below the images... */ *y -= spacing - height; DEBUG_printf((" AFTER y=%.1f, bottom=%.1f\n", *y, bottom)); if (*y < bottom) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } if (*y < image_y || *page > image_page) { image_y = 0.0f; image_left = left; image_right = right; format_width = image_right - image_left; } } *x = left; if (*y > image_y && image_y > 0.0f && image_page == *page) *y = image_y; DEBUG_printf(("LEAVING parse_paragraph(), x = %.1f, y = %.1f, page = %d, image_y = %.1f\n", *x, *y, *page, image_y)); } #if defined(PARA_DEBUG) && !defined(DEBUG) # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) # define DEBUG_puts(x) #endif /* PARA_DEBUG && !DEBUG */ /* * 'parse_pre()' - Parse preformatted text and produce rendering list output. */ static void parse_pre(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace) /* I - Need whitespace? */ { tree_t *flat, *start, *next; uchar *link, line[10240], *lineptr, *dataptr; int col; float width, height, rgb[3]; render_t *r; REF(right); DEBUG_printf(("parse_pre(t=%p, left=%.1f, right=%.1f, x=%.1f, y=%.1f, page=%d\n", (void *)t, left, right, *x, *y, *page)); if (t->child == NULL) return; if (*y < top && needspace) *y -= _htmlSpacings[SIZE_P]; flat = flatten_tree(t->child); if (flat == NULL) return; if (flat->markup == MARKUP_NONE && flat->data != NULL) { // Skip leading blank line, if present... for (dataptr = flat->data; isspace(*dataptr); dataptr ++); if (!*dataptr) { next = flat->next; free(flat); flat = next; } } while (flat != NULL) { for (height = 0.0f, start = flat; flat != NULL; flat = flat->next) { if (flat->height > height) height = flat->height; if (flat->markup == MARKUP_BR || (flat->markup == MARKUP_NONE && flat->data && flat->data[strlen((char *)flat->data) - 1] == '\n')) break; } if (flat) flat = flat->next; if (*y < (height + bottom)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } *x = left; *y -= height; if (Verbosity) progress_update(100 - (int)(100 * (*y) / PagePrintLength)); col = 0; while (start != flat) { rgb[0] = start->red / 255.0f; rgb[1] = start->green / 255.0f; rgb[2] = start->blue / 255.0f; if (start->link && (link = htmlGetVariable(start->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, *x, *y, start->width, start->height, link); if (PSLevel == 0 && Links) { memcpy(rgb, link_color, sizeof(rgb)); start->red = (uchar)(link_color[0] * 255.0); start->green = (uchar)(link_color[1] * 255.0); start->blue = (uchar)(link_color[2] * 255.0); if (LinkStyle) new_render(*page, RENDER_BOX, *x, *y - 1, start->width, 0, link_color); } } if ((link = htmlGetVariable(start, (uchar *)"ID")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } switch (start->markup) { case MARKUP_COMMENT : parse_comment(start, &left, &right, &bottom, &top, x, y, page, NULL, 0); break; case MARKUP_A : if ((link = htmlGetVariable(start, (uchar *)"NAME")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } break; case MARKUP_NONE : for (lineptr = line, dataptr = start->data; *dataptr != '\0' && lineptr < (line + sizeof(line) - 1); dataptr ++) if (*dataptr == '\n') break; else if (*dataptr == '\t') { /* This code changed after 15 years to work around new compiler optimization bugs (Issue #349) */ int num_cols = 8 - (col & 7); memcpy(lineptr, " ", num_cols); lineptr += num_cols; col += num_cols; } else if (*dataptr != '\r') { *lineptr++ = *dataptr; col ++; } *lineptr = '\0'; width = get_width(line, start->typeface, start->style, start->size); r = new_render(*page, RENDER_TEXT, *x, *y, width, 0, line); r->data.text.typeface = start->typeface; r->data.text.style = start->style; r->data.text.size = (float)_htmlSizes[start->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (start->underline) new_render(*page, RENDER_BOX, *x, *y - 1, start->width, 0, rgb); if (start->strikethrough) new_render(*page, RENDER_BOX, *x, *y + start->height * 0.25f, start->width, 0, rgb); *x += start->width; break; case MARKUP_IMG : new_render(*page, RENDER_IMAGE, *x, *y, start->width, start->height, image_find((char *)htmlGetVariable(start, (uchar *)"REALSRC"))); *x += start->width; col ++; break; default : break; } next = start->next; free(start); start = next; } if ((*x - right) > 0.001 && OverflowErrors) progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Preformatted text on page %d too long - " "truncation or overlapping may occur!", *page + 1); *y -= _htmlSpacings[t->size] - _htmlSizes[t->size]; } *x = left; } //#define TABLE_DEBUG 1 #ifdef TABLE_DEBUG # undef DEBUG_puts # define DEBUG_puts(x) puts(x) # define DEBUG 1 # undef DEBUG_printf # define DEBUG_printf(x) printf x #endif /* TABLE_DEBUG */ typedef struct { int debug; int num_cols, num_rows; float border, border_left, border_rgb[3], border_size, cellpadding, height; int col_spans[MAX_COLUMNS], row_spans[MAX_COLUMNS]; char col_fixed[MAX_COLUMNS], col_percent[MAX_COLUMNS]; float col_lefts[MAX_COLUMNS], col_rights[MAX_COLUMNS], col_widths[MAX_COLUMNS], col_swidths[MAX_COLUMNS], col_mins[MAX_COLUMNS], col_smins[MAX_COLUMNS], col_prefs[MAX_COLUMNS]; int cell_page[MAX_COLUMNS], // Start page for cell cell_endpage[MAX_COLUMNS]; // End page for cell float cell_y[MAX_COLUMNS], // Row for each cell cell_endy[MAX_COLUMNS], // Row for each cell cell_height[MAX_COLUMNS], // Height of each cell in a row span_heights[MAX_COLUMNS]; // Height of spans render_t *cell_bg[MAX_COLUMNS]; // Background rectangles render_t *cell_start[MAX_COLUMNS]; // Start of the content for a cell in the row render_t *cell_end[MAX_COLUMNS]; // End of the content for a cell in a row } hdtable_t; /* * 'render_table_row()' - Render a table row. */ static void render_table_row(hdtable_t &table, tree_t ***cells, int row, uchar *height_var, float left, // I - Left margin float right, // I - Printable width float bottom, // I - Bottom margin float top, // I - Printable top float *x, float *y, int *page) { int col, tcol, colspan, rowspan, tempspace; float width, temp_y; int temp_page; uchar *var; int do_valign; // True if we should do vertical alignment of cells int row_page; float row_y, row_starty, row_height, // Total height of the row temp_height; // Temporary holder uchar *bgcolor; float bgrgb[3]; do_valign = 1; row_height = 0.0f; row_page = *page; row_y = *y - table.cellpadding; row_starty = row_y; DEBUG_printf(("BEFORE row_y = %.1f, *y = %.1f, row_page = %d\n", row_y, *y, row_page)); for (col = 0, rowspan = 9999; col < table.num_cols; col += colspan) { if (table.row_spans[col] == 0) { if ((var = htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN")) != NULL) table.row_spans[col] = atoi((char *)var); if (table.row_spans[col] <= 1) table.row_spans[col] = 0; if (table.row_spans[col] > (table.num_rows - row)) table.row_spans[col] = table.num_rows - row; table.span_heights[col] = 0.0f; } if (table.row_spans[col] < rowspan) rowspan = table.row_spans[col]; for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; } if (!rowspan) rowspan = 1; for (col = 0; col < table.num_cols;) { for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; colspan --; DEBUG_printf((" col = %d, colspan = %d, left = %.1f, right = %.1f, cell = %p\n", col, colspan, table.col_lefts[col], table.col_rights[col + colspan], (void *)cells[row][col])); *x = table.col_lefts[col]; temp_y = *y - table.cellpadding; temp_page = *page; tempspace = 0; if (row == 0 || cells[row][col] != cells[row - 1][col]) { check_pages(*page); if (cells[row][col] == NULL) bgcolor = NULL; else if ((bgcolor = htmlGetVariable(cells[row][col], (uchar *)"BGCOLOR")) != NULL) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); width = table.col_rights[col + colspan] - table.col_lefts[col] + 2 * table.cellpadding; table.border_left = table.col_lefts[col] - table.cellpadding; table.cell_bg[col] = new_render(*page, RENDER_BOX, table.border_left, row_y, width + table.border, 0.0, bgrgb); } else { table.cell_bg[col] = NULL; new_render(*page, RENDER_TEXT, -1.0f, -1.0f, 0.0, 0.0, (void *)""); } DEBUG_printf(("cell_bg[%d] = %p, pages[%d].end = %p\n", col, (void *)table.cell_bg[col], *page, (void *)pages[*page].end)); table.cell_start[col] = pages[*page].end; table.cell_page[col] = temp_page; table.cell_y[col] = temp_y; if (table.debug) { check_pages(*page); render_t *r; char table_text[255]; snprintf(table_text, sizeof(table_text), "cell=%p [%d,%d]", (void *)cells[row][col], row, col); r = new_render(temp_page, RENDER_TEXT, *x, temp_y, get_width((uchar *)table_text, TYPE_COURIER, STYLE_NORMAL, 1), _htmlSizes[1], table_text); r->data.text.typeface = TYPE_COURIER; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[1]; } if (cells[row][col] != NULL && cells[row][col]->child != NULL) { DEBUG_printf((" parsing cell %d,%d; width = %.1f\n", row, col, table.col_rights[col + colspan] - table.col_lefts[col])); bottom += table.cellpadding; top -= table.cellpadding; parse_doc(cells[row][col]->child, table.col_lefts + col, table.col_rights + col + colspan, &bottom, &top, x, &temp_y, &temp_page, NULL, &tempspace); bottom -= table.cellpadding; top += table.cellpadding; } table.cell_endpage[col] = temp_page; table.cell_endy[col] = temp_y; table.cell_height[col] = *y - table.cellpadding - temp_y; table.cell_end[col] = pages[*page].end; if (table.cell_start[col] == NULL) table.cell_start[col] = pages[*page].start; DEBUG_printf(("row = %d, col = %d, y = %.1f, cell_y = %.1f, cell_height = %.1f\n", row, col, *y - table.cellpadding, temp_y, table.cell_height[col])); DEBUG_printf(("cell_start[%d] = %p, cell_end[%d] = %p\n", col, (void *)table.cell_start[col], col, (void *)table.cell_end[col])); } if (table.row_spans[col] == 0 && table.cell_page[col] == table.cell_endpage[col] && table.cell_height[col] > row_height) row_height = table.cell_height[col]; if (table.row_spans[col] <= rowspan) { if (table.cell_page[col] != table.cell_endpage[col]) do_valign = 0; if (table.cell_endpage[col] > row_page) { row_page = table.cell_endpage[col]; row_y = table.cell_endy[col]; } else if (table.cell_endy[col] < row_y && table.cell_endpage[col] == row_page) row_y = table.cell_endy[col]; } DEBUG_printf(("**** col = %d, row = %d, row_y = %.1f, row_page = %d\n", col, row, row_y, row_page)); for (col ++; colspan > 0; colspan --, col ++) { table.cell_start[col] = NULL; table.cell_page[col] = table.cell_page[col - 1]; table.cell_y[col] = table.cell_y[col - 1]; table.cell_end[col] = NULL; table.cell_endpage[col] = table.cell_endpage[col - 1]; table.cell_endy[col] = table.cell_endy[col - 1]; table.cell_height[col] = table.cell_height[col - 1]; } } DEBUG_printf(("row = %d, row_y = %.1f, row_height = %.1f\n", row, row_y, row_height)); for (col = 0; col < table.num_cols; col += colspan) { for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; if (table.row_spans[col]) table.span_heights[col] += row_height; DEBUG_printf(("col = %d, cell_y = %.1f, cell_page = %d, cell_endpage = %d, row_spans = %d, span_heights = %.1f, cell_height = %.1f\n", col, table.cell_y[col], table.cell_page[col], table.cell_endpage[col], table.row_spans[col], table.span_heights[col], table.cell_height[col])); } for (col = 0; col < table.num_cols; col += colspan) { for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; if (table.row_spans[col] == rowspan && table.cell_page[col] == table.cell_endpage[col] && table.cell_height[col] > table.span_heights[col]) { temp_height = table.cell_height[col] - table.span_heights[col]; row_height += temp_height; DEBUG_printf(("Adjusting row-span height by %.1f, new row_height = %.1f\n", temp_height, row_height)); for (tcol = 0; tcol < table.num_cols; tcol ++) if (table.row_spans[tcol]) { table.span_heights[tcol] += temp_height; DEBUG_printf(("col = %d, span_heights = %.1f\n", tcol, table.span_heights[tcol])); } } } DEBUG_printf(("AFTER row = %d, row_page = %d, row_y = %.1f, row_height = %.1f, *y = %.1f, do_valign = %d\n", row, row_page, row_y, row_height, *y, do_valign)); /* * Do the vertical alignment */ if (do_valign) { height_var = NULL; if (cells[row][0] != NULL) { if ((height_var = htmlGetVariable(cells[row][0]->parent, (uchar *)"HEIGHT")) == NULL) for (col = 0; col < table.num_cols; col ++) if (htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN") == NULL) if ((height_var = htmlGetVariable(cells[row][col], (uchar *)"HEIGHT")) != NULL) break; } if (height_var != NULL) { // Hardcode the row height... if (height_var[strlen((char *)height_var) - 1] == '%') temp_height = (float)(atof((char *)height_var) * 0.01f * PagePrintLength); else temp_height = (float)(atof((char *)height_var) * PagePrintWidth / _htmlBrowserWidth); if (table.height > 0 && temp_height > table.height) temp_height = table.height; temp_height -= 2 * table.cellpadding; if (temp_height > row_height) { // Only enforce the height if it is > the actual row height. row_height = temp_height; row_y = *y - temp_height; } } for (col = 0; col < table.num_cols; col += colspan + 1) { render_t *p; float delta_y; for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; colspan --; if (table.cell_start[col] == NULL || table.row_spans[col] > rowspan || cells[row][col] == NULL || cells[row][col]->child == NULL) continue; if (table.row_spans[col] == 1) { int tcol; float span_height = 0.0f; for (tcol = 0; tcol < table.num_cols; tcol ++) { if (table.row_spans[col] == 1 && table.span_heights[col] > span_height) span_height = table.span_heights[col]; } switch (cells[row][col]->valignment) { case ALIGN_MIDDLE : // delta_y = (table.span_heights[col] - table.cell_height[col]) * 0.5f; delta_y = (span_height - table.cell_height[col]) * 0.5f; break; case ALIGN_BOTTOM : // delta_y = table.span_heights[col] - table.cell_height[col]; delta_y = span_height - table.cell_height[col]; break; default : delta_y = 0.0f; break; } } else if (table.row_spans[col]) { delta_y = 0.0f; } else { switch (cells[row][col]->valignment) { case ALIGN_MIDDLE : delta_y = (row_height - table.cell_height[col]) * 0.5f; break; case ALIGN_BOTTOM : delta_y = row_height - table.cell_height[col]; break; default : delta_y = 0.0f; break; } } DEBUG_printf(("row = %d, col = %d, valign = %d, rowspans = %d, cell_height = %.1f, span_heights = %.1f, delta_y = %.1f\n", row, col, cells[row][col]->valignment, table.row_spans[col], table.cell_height[col], table.span_heights[col], delta_y)); if (delta_y > 0.0f) { if (table.cell_start[col] == table.cell_end[col]) p = table.cell_start[col]; else p = table.cell_start[col]->next; for (; p != NULL; p = p->next) { DEBUG_printf(("aligning %p (%s), y was %.1f, now %.1f\n", (void *)p, p->data.text.buffer, p->y, p->y - delta_y)); p->y -= delta_y; if (p == table.cell_end[col]) break; } } #ifdef DEBUG else { if (table.cell_start[col] == table.cell_end[col]) p = table.cell_start[col]; else p = table.cell_start[col]->next; for (; p != NULL; p = p->next) { printf("NOT aligning %p (%s)\n", (void *)p, p->data.text.buffer); if (p == table.cell_end[col]) break; } } #endif /* DEBUG */ } } // Update all current columns with ROWSPAN <= rowspan to use the same // end page and row... for (col = 0, temp_page = -1, temp_y = 99999999; col < table.num_cols; col ++) if (table.row_spans[col] <= rowspan && cells[row][col] != NULL && cells[row][col]->child != NULL) { if (table.cell_endpage[col] > temp_page) { temp_page = table.cell_endpage[col]; temp_y = table.cell_endy[col]; } else if (table.cell_endpage[col] == temp_page && table.cell_endy[col] < temp_y) temp_y = table.cell_endy[col]; } for (col = 0; col < table.num_cols; col ++) if (table.row_spans[col] <= rowspan && cells[row][col] != NULL && cells[row][col]->child != NULL) { table.cell_endpage[col] = temp_page; table.cell_endy[col] = temp_y; } row_y -= table.cellpadding; table.border_left = table.col_lefts[0] - table.cellpadding; width = table.col_rights[table.num_cols - 1] - table.col_lefts[0] + 2 * table.cellpadding; for (bgcolor = NULL, col = 0; col < table.num_cols; col ++) if (table.row_spans[col] <= rowspan && cells[row][col] && !htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN") && (bgcolor = htmlGetVariable(cells[row][col]->parent, (uchar *)"BGCOLOR")) != NULL) break; if (bgcolor) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); if (row_page > *page) { // Draw background on multiple pages... // Bottom of first page... new_render(*page, RENDER_BOX, table.border_left, bottom, width, row_starty - bottom + table.cellpadding, bgrgb, pages[*page].start); // Intervening pages... for (temp_page = *page + 1; temp_page < row_page; temp_page ++) { new_render(temp_page, RENDER_BOX, table.border_left, bottom, width, top - bottom, bgrgb, pages[temp_page].start); } // Top of last page... check_pages(*page); new_render(row_page, RENDER_BOX, table.border_left, row_y, width, top - row_y, bgrgb, pages[row_page].start); } else { // Draw background in row... new_render(row_page, RENDER_BOX, table.border_left, row_y, width, row_height + 2 * table.cellpadding, bgrgb, pages[row_page].start); } } for (col = 0; col < table.num_cols; col += colspan + 1) { for (colspan = 0; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; else if (table.row_spans[col + colspan] > 0) { DEBUG_printf(("row = %d, col = %d, decrementing row_spans (%d) to %d...\n", row, col, table.row_spans[col + colspan], table.row_spans[col + colspan] - rowspan)); table.row_spans[col + colspan] -= rowspan; } colspan --; width = table.col_rights[col + colspan] - table.col_lefts[col] + 2 * table.cellpadding; if (cells[row][col] == NULL || cells[row][col]->child == NULL || table.row_spans[col] > 0) continue; DEBUG_printf(("DRAWING BORDER+BACKGROUND: col=%d, row=%d, cell_page=%d, cell_y=%.1f\n" " cell_endpage=%d, cell_endy=%.1f\n", col, row, table.cell_page[col], table.cell_y[col], table.cell_endpage[col], table.cell_endy[col])); if ((bgcolor = htmlGetVariable(cells[row][col], (uchar *)"BGCOLOR")) != NULL) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); } table.border_left = table.col_lefts[col] - table.cellpadding; if (table.cell_page[col] != table.cell_endpage[col]) { /* * Crossing a page boundary... */ if (table.border > 0) { /* * +---+---+---+ * | | | | */ // Top new_render(table.cell_page[col], RENDER_BOX, table.border_left, table.cell_y[col] + table.cellpadding, width + table.border, table.border, table.border_rgb); // Left new_render(table.cell_page[col], RENDER_BOX, table.border_left, bottom, table.border, table.cell_y[col] - bottom + table.cellpadding + table.border, table.border_rgb); // Right new_render(table.cell_page[col], RENDER_BOX, table.border_left + width, bottom, table.border, table.cell_y[col] - bottom + table.cellpadding + table.border, table.border_rgb); } if (bgcolor != NULL) { table.cell_bg[col]->y = bottom; table.cell_bg[col]->height = table.cell_y[col] - bottom + table.cellpadding + table.border; } for (temp_page = table.cell_page[col] + 1; temp_page < table.cell_endpage[col]; temp_page ++) { /* * | | | | * | | | | */ if (table.border > 0.0f) { // Left new_render(temp_page, RENDER_BOX, table.border_left, bottom, table.border, top - bottom, table.border_rgb); // Right new_render(temp_page, RENDER_BOX, table.border_left + width, bottom, table.border, top - bottom, table.border_rgb); } if (bgcolor != NULL) new_render(temp_page, RENDER_BOX, table.border_left, bottom, width + table.border, top - bottom, bgrgb, pages[temp_page].start); } if (table.border > 0.0f) { /* * | | | | * +---+---+---+ */ // Left new_render(table.cell_endpage[col], RENDER_BOX, table.border_left, row_y, table.border, top - row_y, table.border_rgb); // Right new_render(table.cell_endpage[col], RENDER_BOX, table.border_left + width, row_y, table.border, top - row_y, table.border_rgb); // Bottom new_render(table.cell_endpage[col], RENDER_BOX, table.border_left, row_y, width + table.border, table.border, table.border_rgb); } if (bgcolor != NULL) { check_pages(table.cell_endpage[col]); new_render(table.cell_endpage[col], RENDER_BOX, table.border_left, row_y, width + table.border, top - row_y, bgrgb, pages[table.cell_endpage[col]].start); } } else { /* * +---+---+---+ * | | | | * +---+---+---+ */ if (table.border > 0.0f) { // Top new_render(table.cell_page[col], RENDER_BOX, table.border_left, table.cell_y[col] + table.cellpadding, width + table.border, table.border, table.border_rgb); // Left new_render(table.cell_page[col], RENDER_BOX, table.border_left, row_y, table.border, table.cell_y[col] - row_y + table.cellpadding + table.border, table.border_rgb); // Right new_render(table.cell_page[col], RENDER_BOX, table.border_left + width, row_y, table.border, table.cell_y[col] - row_y + table.cellpadding + table.border, table.border_rgb); // Bottom new_render(table.cell_page[col], RENDER_BOX, table.border_left, row_y, width + table.border, table.border, table.border_rgb); } if (bgcolor != NULL) { table.cell_bg[col]->y = row_y; table.cell_bg[col]->height = table.cell_y[col] - row_y + table.cellpadding + table.border; } } } *page = row_page; *y = row_y; } /* * 'parse_table()' - Parse a table and produce rendering output. */ static void parse_table(tree_t *t, // I - Tree to parse float left, // I - Left margin float right, // I - Printable width float bottom, // I - Bottom margin float top, // I - Printable top float *x, // IO - X position float *y, // IO - Y position int *page, // IO - Page # int needspace) // I - Need whitespace? { int col, row, header_row = -1, tcol, colspan, rowspan, alloc_rows, regular_cols; hdtable_t table; float col_width, col_min, col_pref, col_height, cellspacing, width, pref_width, span_width, regular_width, actual_width, table_width, min_width, temp_width, header_height = 0.0, table_y, temp_bottom, temp_top; int temp_page, table_page; uchar *var, *height_var, // Row HEIGHT variable *header_height_var = NULL; tree_t *temprow, *tempcol, *tempnext, ***cells, *caption; // Caption for bottom, if any float temp_height; // Temporary holder uchar *bgcolor; float bgrgb[3]; const char *htmldoc_debug; // HTMLDOC_DEBUG env var DEBUG_puts("\n\nTABLE"); DEBUG_printf(("parse_table(t=%p, left=%.1f, right=%.1f, x=%.1f, y=%.1f, page=%d\n", (void *)t, left, right, *x, *y, *page)); if (t->child == NULL) return; /* Empty table... */ memset(&table, 0, sizeof(table)); /* * Check debug mode... */ if ((htmldoc_debug = getenv("HTMLDOC_DEBUG")) != NULL && (strstr(htmldoc_debug, "table") || strstr(htmldoc_debug, "all"))) table.debug = 1; else table.debug = 0; /* * Figure out the # of rows, columns, and the desired widths... */ cells = NULL; if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL) { if (var[strlen((char *)var) - 1] == '%') table_width = (float)(atof((char *)var) * (right - left) / 100.0f); else table_width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); if (table_width < 0.0f || table_width > PagePrintWidth) table_width = right - left; } else table_width = right - left; if ((var = htmlGetVariable(t, (uchar *)"HEIGHT")) != NULL) { if (var[strlen((char *)var) - 1] == '%') table.height = (float)(atof((char *)var) * (top - bottom) / 100.0f); else table.height = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else table.height = -1.0f; DEBUG_printf(("table_width = %.1f\n", table_width)); if ((var = htmlGetVariable(t, (uchar *)"CELLPADDING")) != NULL) { if ((table.cellpadding = atoi((char *)var)) < 0.0f) table.cellpadding = 0.0f; else if (table.cellpadding > 20.0f) table.cellpadding = 20.0f; } else table.cellpadding = 1.0f; if ((var = htmlGetVariable(t, (uchar *)"CELLSPACING")) != NULL) { if ((cellspacing = atoi((char *)var)) < 0.0f) cellspacing = 0.0f; else if (cellspacing > 20.0f) cellspacing = 20.0f; } else cellspacing = 0.0f; if ((var = htmlGetVariable(t, (uchar *)"BORDER")) != NULL) { if ((table.border = (float)atof((char *)var)) <= 0.0 && var[0] != '0') table.border = 1.0f; else if (table.border > 20.0f) table.border = 20.0f; table.cellpadding += table.border; } else table.border = 0.0f; if (table.debug && table.border == 0.0f) table.border = 0.01f; table.border_rgb[0] = t->red / 255.0f; table.border_rgb[1] = t->green / 255.0f; table.border_rgb[2] = t->blue / 255.0f; if ((var = htmlGetVariable(t, (uchar *)"BORDERCOLOR")) != NULL) get_color(var, table.border_rgb, 0); if (table.border == 0.0f && table.cellpadding > 0.0f) { /* * Ah, the strange table formatting nightmare that is HTML. * Netscape and MSIE assign an invisible border width of 1 * pixel if no border is specified... */ table.cellpadding += 1.0f; } table.border_size = table.border - 1.0f; cellspacing *= PagePrintWidth / _htmlBrowserWidth; table.cellpadding *= PagePrintWidth / _htmlBrowserWidth; table.border *= PagePrintWidth / _htmlBrowserWidth; table.border_size *= PagePrintWidth / _htmlBrowserWidth; DEBUG_printf(("border = %.1f, cellpadding = %.1f\n", table.border, table.cellpadding)); temp_bottom = bottom - table.cellpadding; temp_top = top + table.cellpadding; for (temprow = t->child, table.num_cols = 0, table.num_rows = 0, alloc_rows = 0, caption = NULL; temprow != NULL; temprow = tempnext) { tempnext = temprow->next; if (temprow->markup == MARKUP_CAPTION) { if ((var = htmlGetVariable(temprow, (uchar *)"ALIGN")) == NULL || strcasecmp((char *)var, "bottom")) { /* * Show caption at top... */ parse_paragraph(temprow, left, right, bottom, top, x, y, page, needspace); needspace = 1; } else { /* * Flag caption for bottom of table... */ caption = temprow; } } else if (temprow->markup == MARKUP_TR || ((temprow->markup == MARKUP_TBODY || temprow->markup == MARKUP_THEAD || temprow->markup == MARKUP_TFOOT) && temprow->child != NULL)) { if (temprow->markup == MARKUP_THEAD) header_row = table.num_rows; // Descend into table body as needed... if (temprow->markup == MARKUP_TBODY || temprow->markup == MARKUP_THEAD || temprow->markup == MARKUP_TFOOT) temprow = temprow->child; // Figure out the next row... if ((tempnext = temprow->next) == NULL) if (temprow->parent->markup == MARKUP_TBODY || temprow->parent->markup == MARKUP_THEAD || temprow->parent->markup == MARKUP_TFOOT) tempnext = temprow->parent->next; // Allocate memory for the table as needed... if (table.num_rows >= alloc_rows) { alloc_rows += ALLOC_ROWS; if (alloc_rows == ALLOC_ROWS) cells = (tree_t ***)malloc(sizeof(tree_t **) * (size_t)alloc_rows); else cells = (tree_t ***)realloc(cells, sizeof(tree_t **) * (size_t)alloc_rows); if (cells == (tree_t ***)0) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for table!"); return; } } if ((cells[table.num_rows] = (tree_t **)calloc(sizeof(tree_t *), MAX_COLUMNS)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for table!"); free(cells); return; } #ifdef DEBUG printf("BEFORE row %d: num_cols = %d\n", table.num_rows, table.num_cols); if (table.num_rows) for (col = 0; col < table.num_cols; col ++) printf(" col %d: row_spans[] = %d\n", col, table.row_spans[col]); #endif // DEBUG // Figure out the starting column... if (table.num_rows) { for (col = 0, rowspan = 9999; col < table.num_cols; col ++) if (table.row_spans[col] < rowspan) rowspan = table.row_spans[col]; for (col = 0; col < table.num_cols; col ++) table.row_spans[col] -= rowspan; for (col = 0; table.row_spans[col] && col < table.num_cols; col ++) cells[table.num_rows][col] = cells[table.num_rows - 1][col]; } else col = 0; for (tempcol = temprow->child; tempcol != NULL && col < MAX_COLUMNS; tempcol = tempcol->next) { if (tempcol->markup == MARKUP_TH && table.num_rows == 0) header_row = table.num_rows; if (tempcol->markup == MARKUP_TD || tempcol->markup == MARKUP_TH) { // Handle colspan and rowspan stuff... if ((var = htmlGetVariable(tempcol, (uchar *)"COLSPAN")) != NULL) { if ((colspan = atoi((char *)var)) < 1) colspan = 1; else if (colspan > (MAX_COLUMNS - col)) colspan = MAX_COLUMNS - col; } else colspan = 1; if ((var = htmlGetVariable(tempcol, (uchar *)"ROWSPAN")) != NULL) { table.row_spans[col] = atoi((char *)var); if (table.row_spans[col] <= 1) table.row_spans[col] = 0; for (tcol = 1; tcol < colspan; tcol ++) table.row_spans[col + tcol] = table.row_spans[col]; } // Compute the cell size... col_width = get_cell_size(tempcol, 0.0f, table_width, &col_min, &col_pref, &col_height); if ((var = htmlGetVariable(tempcol, (uchar *)"WIDTH")) != NULL) { if (var[strlen((char *)var) - 1] == '%') { col_width -= 2.0 * table.cellpadding - cellspacing; if (colspan <= 1) table.col_percent[col] = 1; } else { col_width -= 2.0 * table.cellpadding; } if (col_width <= 0.0f) col_width = 0.0f; else if (col_width > PageWidth) col_width = PageWidth; } else col_width = 0.0f; tempcol->height = col_height; DEBUG_printf(("%d,%d: colsp=%d, rowsp=%d, width=%.1f, minw=%.1f, prefw=%.1f, minh=%.1f\n", col, table.num_rows, colspan, table.row_spans[col], col_width, col_min, col_pref, col_height)); // Add widths to columns... if (colspan > 1) { if (colspan > table.col_spans[col]) table.col_spans[col] = colspan; if (col_width > table.col_swidths[col]) table.col_swidths[col] = col_width; if (col_min > table.col_smins[col]) table.col_smins[col] = col_min; temp_width = col_width / colspan; for (int i = 0; i < colspan; i ++) { if (temp_width > table.col_widths[col + i]) table.col_widths[col + i] = temp_width; } } else { if (col_width > 0.0f) table.col_fixed[col] = 1; if (col_width > table.col_widths[col]) table.col_widths[col] = col_width; if (col_pref > table.col_prefs[col]) table.col_prefs[col] = col_pref; if (col_min > table.col_mins[col]) table.col_mins[col] = col_min; } while (colspan > 0 && col < MAX_COLUMNS) { cells[table.num_rows][col] = tempcol; col ++; colspan --; } while (table.row_spans[col] && col < table.num_cols) { cells[table.num_rows][col] = cells[table.num_rows - 1][col]; col ++; } } } DEBUG_printf(("header_row=%d\n", header_row)); if (col > table.num_cols) table.num_cols = col; #ifdef DEBUG printf("AFTER row %d: num_cols = %d\n", table.num_rows, table.num_cols); for (col = 0; col < table.num_cols; col ++) printf(" col %d: row_spans[] = %d\n", col, table.row_spans[col]); #endif // DEBUG table.num_rows ++; for (col = 0; col < table.num_cols; col ++) if (table.row_spans[col]) table.row_spans[col] --; } } /* * OK, some people apparently create HTML tables with no columns or * rows... If this happened, return immediately... */ if (table.num_cols == 0) return; /* * Now figure out the width of the table... */ if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL) { if (var[strlen((char *)var) - 1] == '%') width = (float)(atof((char *)var) * (right - left) / 100.0f); else width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else { for (col = 0, width = 0.0; col < table.num_cols; col ++) width += table.col_prefs[col]; width += (2 * table.cellpadding + cellspacing) * table.num_cols - cellspacing; if (width > (right - left)) width = right - left; } /* * Compute the width of each column based on the printable width. */ DEBUG_printf(("\nTABLE: %dx%d\n\n", table.num_cols, table.num_rows)); actual_width = (2 * table.cellpadding + cellspacing) * table.num_cols - cellspacing; regular_width = (width - actual_width) / table.num_cols; DEBUG_printf((" width = %.1f, actual_width = %.1f, regular_width = %.1f\n\n", width, actual_width, regular_width)); DEBUG_puts(" Col Width Min Pref Fixed? Percent?"); DEBUG_puts(" --- ------ ------ ------ ------ --------"); #ifdef DEBUG for (col = 0; col < table.num_cols; col ++) printf(" %-3d %-6.1f %-6.1f %-6.1f %-6s %s\n", col, table.col_widths[col], table.col_mins[col], table.col_prefs[col], table.col_fixed[col] ? "YES" : "NO", table.col_percent[col] ? "YES" : "NO"); puts(""); #endif /* DEBUG */ /* * The first pass just handles columns with a specified width... */ DEBUG_puts("PASS 1: fixed width handling\n"); for (col = 0, regular_cols = 0; col < table.num_cols; col ++) if (table.col_widths[col] > 0.0f) { if (table.col_mins[col] > table.col_widths[col]) { DEBUG_printf((" updating column %d to width=%.1f\n", col, table.col_mins[col])); table.col_widths[col] = table.col_mins[col]; } actual_width += table.col_widths[col]; } else { regular_cols ++; actual_width += table.col_mins[col]; } DEBUG_printf((" actual_width = %.1f, regular_cols = %d\n\n", actual_width,regular_cols)); /* * Pass two uses the "preferred" width whenever possible, and the * minimum otherwise... */ DEBUG_puts("PASS 2: preferred width handling\n"); for (col = 0, pref_width = 0.0f; col < table.num_cols; col ++) if (table.col_widths[col] == 0.0f) pref_width += table.col_prefs[col] - table.col_mins[col]; DEBUG_printf((" pref_width = %.1f\n", pref_width)); if (pref_width > 0.0f) { if ((regular_width = (width - actual_width) / pref_width) < 0.0f) regular_width = 0.0f; else if (regular_width > 1.0f) regular_width = 1.0f; DEBUG_printf((" regular_width = %.1f\n", regular_width)); for (col = 0; col < table.num_cols; col ++) if (table.col_widths[col] == 0.0f) { pref_width = (table.col_prefs[col] - table.col_mins[col]) * regular_width; if ((actual_width + pref_width) > width) { if (col == (table.num_cols - 1) && (width - actual_width) >= table.col_mins[col]) table.col_widths[col] = width - actual_width; else table.col_widths[col] = table.col_mins[col]; } else table.col_widths[col] = pref_width + table.col_mins[col]; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); actual_width += table.col_widths[col] - table.col_mins[col]; } } else { /* * Assign min widths for all cells... */ for (col = 0; col < table.num_cols; col ++) if (table.col_widths[col] == 0.0f) table.col_widths[col] = table.col_mins[col]; } DEBUG_printf((" actual_width = %.1f\n\n", actual_width)); /* * Pass three enforces any hard or minimum widths for COLSPAN'd * columns... */ DEBUG_puts("PASS 3: colspan handling\n\n"); for (col = 0; col < table.num_cols; col ++) { DEBUG_printf((" col %d, colspan %d\n", col, table.col_spans[col])); if (table.col_spans[col] > 1) { for (colspan = 0, span_width = 0.0f; colspan < table.col_spans[col]; colspan ++) span_width += table.col_widths[col + colspan]; pref_width = 0.0f; if (span_width < table.col_swidths[col]) pref_width = table.col_swidths[col]; if (span_width < table.col_smins[col] && pref_width < table.col_smins[col]) pref_width = table.col_smins[col]; for (colspan = 0; colspan < table.col_spans[col]; colspan ++) if (table.col_fixed[col + colspan]) { span_width -= table.col_widths[col + colspan]; pref_width -= table.col_widths[col + colspan]; } DEBUG_printf((" col_swidths=%.1f, col_smins=%.1f, span_width=%.1f, pref_width=%.1f\n", table.col_swidths[col], table.col_smins[col], span_width, pref_width)); if (pref_width > 0.0f && pref_width > span_width) { if (span_width >= 1.0f) { // Expand cells proportionately... regular_width = pref_width / span_width; for (colspan = 0; colspan < table.col_spans[col]; colspan ++) if (!table.col_fixed[col + colspan]) { actual_width -= table.col_widths[col + colspan]; table.col_widths[col + colspan] *= regular_width; actual_width += table.col_widths[col + colspan]; DEBUG_printf((" col_widths[%d] = %.1f\n", col + colspan, table.col_widths[col + colspan])); } } else { // Divide the space up equally between columns, since the // colspan area is always by itself... (this hack brought // to you by Yahoo! and their single cell tables with // colspan=2 :) regular_width = pref_width / table.col_spans[col]; for (colspan = 0; colspan < table.col_spans[col]; colspan ++) { actual_width += regular_width; table.col_widths[col + colspan] += regular_width; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); } } } } } DEBUG_printf((" actual_width = %.1f\n\n", actual_width)); /* * Pass four divides up the remaining space amongst the columns... */ DEBUG_puts("PASS 4: divide remaining space, if any...\n"); if (width > actual_width) { for (col = 0, colspan = 0; col < table.num_cols; col ++) if (!table.col_fixed[col] || table.col_percent[col]) colspan ++; if (colspan > 0) { regular_width = (width - actual_width) / table.num_cols; for (col = 0; col < table.num_cols; col ++) if (!table.col_fixed[col]) { table.col_widths[col] += regular_width; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); } } } else width = actual_width; DEBUG_puts(""); /* * The final pass is only run if the width > table_width... */ DEBUG_puts("PASS 5: Squeeze table as needed..."); if (width > table_width) { /* * Squeeze the table to fit the requested width or the printable width * as determined at the beginning... */ for (col = 0, min_width = -cellspacing; col < table.num_cols; col ++) min_width += table.col_mins[col] + 2 * table.cellpadding + cellspacing; DEBUG_printf((" table_width = %.1f, width = %.1f, min_width = %.1f\n", table_width, width, min_width)); temp_width = table_width - min_width; if (temp_width < 0.0f) temp_width = 0.0f; width -= min_width; if (width < 1.0f) width = 1.0f; for (col = 0; col < table.num_cols; col ++) { table.col_widths[col] = table.col_mins[col] + temp_width * (table.col_widths[col] - table.col_mins[col]) / width; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); } for (col = 0, width = -cellspacing; col < table.num_cols; col ++) width += table.col_widths[col] + 2 * table.cellpadding + cellspacing; DEBUG_printf((" new width = %.1f, max width = %.1f\n", width, right - left)); } if ((width - right + left) > 0.001f && OverflowErrors) progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Table on page %d too wide - truncation or overlapping may occur!", *page + 1); DEBUG_puts(""); DEBUG_printf(("Final table width = %.1f, alignment = %d\n", width, t->halignment)); switch (t->halignment) { case ALIGN_LEFT : *x = left + table.cellpadding; break; case ALIGN_CENTER : *x = left + 0.5f * (right - left - width) + table.cellpadding; break; case ALIGN_RIGHT : *x = right - width + table.cellpadding; break; } for (col = 0; col < table.num_cols; col ++) { table.col_lefts[col] = *x; table.col_rights[col] = *x + table.col_widths[col]; *x = table.col_rights[col] + 2 * table.cellpadding + cellspacing; DEBUG_printf(("left[%d] = %.1f, right[%d] = %.1f\n", col, table.col_lefts[col], col, table.col_rights[col])); } /* * Now render the whole table... */ if (*y < top && needspace) *y -= _htmlSpacings[SIZE_P]; if (table.debug) { check_pages(*page); render_t *r; char table_text[255]; snprintf(table_text, sizeof(table_text), "t=%p", (void *)t); r = new_render(*page, RENDER_TEXT, left, *y, get_width((uchar *)table_text, TYPE_COURIER, STYLE_NORMAL, 3), _htmlSizes[3], table_text); r->data.text.typeface = TYPE_COURIER; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[3]; } table_page = *page; table_y = *y; for (row = 0; row < table.num_rows; row ++) { height_var = NULL; if (cells[row][0] != NULL) { /* * Do page comments... */ if (cells[row][0]->parent->prev != NULL && cells[row][0]->parent->prev->markup == MARKUP_COMMENT) parse_comment(cells[row][0]->parent->prev, &left, &right, &temp_bottom, &temp_top, x, y, page, NULL, 0); /* * Get height... */ if ((height_var = htmlGetVariable(cells[row][0]->parent, (uchar *)"HEIGHT")) == NULL) for (col = 0; col < table.num_cols; col ++) if (htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN") == NULL) if ((height_var = htmlGetVariable(cells[row][col], (uchar *)"HEIGHT")) != NULL) break; } if (height_var != NULL && row == header_row) header_height_var = height_var; if (cells[row][0] != NULL && height_var != NULL) { // Row height specified; make sure it'll fit... if (height_var[strlen((char *)height_var) - 1] == '%') temp_height = (float)(atof((char *)height_var) * 0.01f * (PagePrintLength - 2 * table.cellpadding)); else temp_height = (float)(atof((char *)height_var) * PagePrintWidth / _htmlBrowserWidth); if (table.height > 0.0f && temp_height > table.height) temp_height = table.height; temp_height -= 2 * table.cellpadding; } else { // Use min height computed from get_cell_size()... for (col = 0, temp_height = (float)_htmlSpacings[SIZE_P]; col < table.num_cols; col ++) if (cells[row][col] != NULL && cells[row][col]->height > temp_height && !htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN")) temp_height = cells[row][col]->height; if (table.height > 0.0) { // Table height specified; make sure it'll fit... if (temp_height > table.height) temp_height = table.height; temp_height -= 2 * table.cellpadding; } else if (temp_height > (PageLength / 8.0) && height_var == NULL) temp_height = PageLength / 8.0; } DEBUG_printf(("BEFORE row = %d, temp_height = %.1f, *y = %.1f, *page = %d\n", row, temp_height, *y, *page)); if (*y < (bottom + 2 * table.cellpadding + temp_height) && temp_height <= (top - bottom - 2 * table.cellpadding)) { DEBUG_puts("NEW PAGE"); *y = top - header_height; (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); if (row > 0 && header_row >= 0) { // Render header row... render_table_row(table, cells, header_row, header_height_var, left, right, bottom, top, x, y, page); } } float start_y = *y; temp_page = *page; render_table_row(table, cells, row, height_var, left, right, bottom, top, x, y, page); if (header_row >= 0 && row == header_row) { header_height = *y - start_y; top += header_height; } else if (temp_page != *page && header_row >= 0) { // Render header row on new page(s)... do { float temp_y = top - header_height; temp_page ++; render_table_row(table, cells, header_row, header_height_var, left, right, bottom, top, x, &temp_y, &temp_page); } while (temp_page < *page); } if (row < (table.num_rows - 1)) (*y) -= cellspacing; DEBUG_printf(("END row = %d, *y = %.1f, *page = %d\n", row, *y, *page)); } top -= header_height; /* * Handle table background color... */ if ((bgcolor = htmlGetVariable(t, (uchar *)"BGCOLOR")) != NULL) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); table.border_left = table.col_lefts[0] - table.cellpadding; width = table.col_rights[table.num_cols - 1] - table.col_lefts[0] + 2 * table.cellpadding; if (table_page != *page) { // Draw background on multiple pages... // Bottom of first page... new_render(table_page, RENDER_BOX, table.border_left, bottom, width, table_y - bottom, bgrgb, pages[table_page].start); // Intervening pages... for (temp_page = table_page + 1; temp_page < *page; temp_page ++) { new_render(temp_page, RENDER_BOX, table.border_left, bottom, width, top - bottom, bgrgb, pages[temp_page].start); } // Top of last page... check_pages(*page); new_render(*page, RENDER_BOX, table.border_left, *y, width, top - *y, bgrgb, pages[*page].start); } else { // Draw background in row... new_render(table_page, RENDER_BOX, table.border_left, *y, width, table_y - *y, bgrgb, pages[table_page].start); } } *x = left; if (caption) { /* * Show caption at bottom... */ parse_paragraph(caption, left, right, bottom, top, x, y, page, needspace); needspace = 1; } /* * Free memory for the table... */ if (table.num_rows > 0) { for (row = 0; row < table.num_rows; row ++) free(cells[row]); free(cells); } } #ifdef TABLE_DEBUG # undef DEBUG # undef DEBUG_puts # define DEBUG_puts(x) # undef DEBUG_printf # define DEBUG_printf(x) #endif /* TABLE_DEBUG */ /* * 'parse_list()' - Parse a list entry and produce rendering output. */ static void parse_list(tree_t *t, /* I - Tree to parse */ float *left, /* I - Left margin */ float *right, /* I - Printable width */ float *bottom, /* I - Bottom margin */ float *top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace) /* I - Need whitespace? */ { uchar number[255]; /* List number (for numbered types) */ uchar *value; /* VALUE= variable */ int typeface; /* Typeface of list number */ float width; /* Width of list number */ render_t *r; /* Render primitive */ int oldpage; /* Old page value */ float oldy; /* Old Y value */ float tempx; /* Temporary X value */ DEBUG_printf(("parse_list(t=%p, left=%.1f, right=%.1f, x=%.1f, y=%.1f, page=%d\n", (void *)t, *left, *right, *x, *y, *page)); if (needspace && *y < *top) { *y -= _htmlSpacings[t->size]; needspace = 0; } check_pages(*page); oldy = *y; oldpage = *page; r = pages[*page].end; tempx = *x; if (t->indent == 0) { // Adjust left margin when no UL/OL/DL is being used... *left += _htmlSizes[t->size]; tempx += _htmlSizes[t->size]; } parse_doc(t->child, left, right, bottom, top, &tempx, y, page, NULL, &needspace); // Handle when paragraph wrapped to new page... if (*page != oldpage) { // First see if anything was added to the old page... if ((r != NULL && r->next == NULL) || pages[oldpage].end == NULL) { // No, put the symbol on the next page... oldpage = *page; oldy = *top; } } if ((value = htmlGetVariable(t, (uchar *)"VALUE")) != NULL) { if (isdigit(value[0])) list_values[t->indent] = atoi((char *)value); else if (isupper(value[0])) list_values[t->indent] = value[0] - 'A' + 1; else list_values[t->indent] = value[0] - 'a' + 1; } switch (list_types[t->indent]) { case 'a' : case 'A' : case '1' : case 'i' : case 'I' : strlcpy((char *)number, format_number(list_values[t->indent], (char)list_types[t->indent]), sizeof(number)); strlcat((char *)number, ". ", sizeof(number)); typeface = t->typeface; break; default : snprintf((char *)number, sizeof(number), "%c ", list_types[t->indent]); typeface = TYPE_SYMBOL; break; } width = get_width(number, typeface, t->style, t->size); r = new_render(oldpage, RENDER_TEXT, *left - width, oldy - _htmlSizes[t->size], width, _htmlSpacings[t->size], number); r->data.text.typeface = typeface; r->data.text.style = t->style; r->data.text.size = (float)_htmlSizes[t->size]; r->data.text.rgb[0] = t->red / 255.0f; r->data.text.rgb[1] = t->green / 255.0f; r->data.text.rgb[2] = t->blue / 255.0f; list_values[t->indent] ++; if (t->indent == 0) { // Adjust left margin when no UL/OL/DL is being used... *left -= _htmlSizes[t->size]; } } /* * 'init_list()' - Initialize the list type and value as necessary. */ static void init_list(tree_t *t) /* I - List entry */ { uchar *type, /* TYPE= variable */ *value; /* VALUE= variable */ static uchar *symbols = (uchar *)"\327\267\250\340"; if ((type = htmlGetVariable(t, (uchar *)"TYPE")) != NULL) { if (strlen((char *)type) == 1) list_types[t->indent] = type[0]; else if (strcasecmp((char *)type, "disc") == 0 || strcasecmp((char *)type, "circle") == 0) list_types[t->indent] = symbols[1]; else list_types[t->indent] = symbols[2]; } else if (t->markup == MARKUP_UL) list_types[t->indent] = symbols[t->indent & 3]; else if (t->markup == MARKUP_OL) list_types[t->indent] = '1'; if ((value = htmlGetVariable(t, (uchar *)"VALUE")) == NULL) value = htmlGetVariable(t, (uchar *)"START"); if (value != NULL) { if (isdigit(value[0])) list_values[t->indent] = atoi((char *)value); else if (isupper(value[0])) list_values[t->indent] = value[0] - 'A' + 1; else list_values[t->indent] = value[0] - 'a' + 1; } else if (t->markup == MARKUP_OL) list_values[t->indent] = 1; } /* * 'parse_comment()' - Parse a comment for HTMLDOC comments. */ #ifdef COMMENT_DEBUG # undef DEBUG_puts # define DEBUG_puts(x) puts(x) # define DEBUG # undef DEBUG_printf # define DEBUG_printf(x) printf x #endif /* COMMENT_DEBUG */ static void parse_comment(tree_t *t, /* I - Tree to parse */ float *left, /* I - Left margin */ float *right, /* I - Printable width */ float *bottom, /* I - Bottom margin */ float *top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ tree_t *para, /* I - Current paragraph */ int needspace) /* I - Need whitespace? */ { int i; /* Looping var */ const char *comment; /* Comment text */ char *ptr, /* Pointer into value string */ buffer[1024]; /* Buffer for strings */ int pos, /* Position (left, center, right) */ tof; /* Top of form */ DEBUG_printf(("parse_comment(t=%p, left=%.1f, right=%.1f, bottom=%.1f, " "top=%.1f, x=%.1f, y=%.1f, page=%d, para=%p, needspace=%d\n", (void *)t, *left, *right, *bottom, *top, *x, *y, *page, (void *)para, needspace)); if (t->data == NULL) return; if (para != NULL && para->child != NULL && para->child->next == NULL && para->child->child == NULL && para->child->markup == MARKUP_NONE && strcmp((const char *)para->child->data, " ") == 0) { // Remove paragraph consisting solely of whitespace... htmlDeleteTree(para->child); para->child = para->last_child = NULL; } // Mark if we are at the top of form... tof = (*y >= *top); DEBUG_printf(("BEFORE tof=%d, *y=%.1f, *top=%.1f, *page=%d, t->data=\"%s\"\n", tof, *y, *top, *page, t->data)); DEBUG_printf((" PagePrintWidth = %d\n", PagePrintWidth)); DEBUG_printf(("PagePrintLength = %d\n", PagePrintLength)); DEBUG_printf((" PageWidth = %d\n", PageWidth)); DEBUG_printf((" PageLength = %d\n", PageLength)); DEBUG_printf((" PageLeft = %d\n", PageLeft)); DEBUG_printf((" PageBottom = %d\n", PageBottom)); DEBUG_printf((" PageRight = %d\n", PageRight)); DEBUG_printf((" PageTop = %d\n", PageTop)); DEBUG_printf((" Landscape = %d\n", Landscape)); for (comment = (const char *)t->data; *comment;) { // Skip leading whitespace... while (isspace(*comment)) comment ++; if (!*comment) break; if (strncasecmp(comment, "PAGE BREAK", 10) == 0 && (!comment[10] || isspace(comment[10]))) { /* * <!-- PAGE BREAK --> generates a page break... */ comment += 10; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else if (strncasecmp(comment, "NEW PAGE", 8) == 0 && (!comment[8] || isspace(comment[8]))) { /* * <!-- NEW PAGE --> generates a page break... */ comment += 8; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else if (strncasecmp(comment, "NEW SHEET", 9) == 0 && (!comment[9] || isspace(comment[9]))) { /* * <!-- NEW SHEET --> generate a page break to a new sheet... */ comment += 9; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if (NumberUp == 1) { // NEW SHEET breaks to the next sheet of paper... (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; } else { // NEW SHEET breaks to the next side/sheet... (*page) ++; for (i = *page - 1; i >= 0; i --) if (pages[i].nup != NumberUp) break; i ++; for (i = *page - i; (i % NumberUp) != 0; i ++, (*page) ++); } if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else if (strncasecmp(comment, "HALF PAGE", 9) == 0 && (!comment[9] || isspace(comment[9]))) { /* * <!-- HALF PAGE --> Go to the next half page. If in the * top half of a page, go to the bottom half. If in the * bottom half, go to the next page. */ float halfway; comment += 9; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } halfway = 0.5f * (*top + *bottom); if (*y <= halfway) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else { *x = *left; *y = halfway; tof = 0; } } else if (strncasecmp(comment, "NEED ", 5) == 0) { /* * <!-- NEED amount --> generate a page break if there isn't * enough remaining space... */ comment += 5; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if ((*y - get_measurement(comment, (float)_htmlSpacings[SIZE_P])) < *bottom) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; // Skip amount... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA COLOR ", 12) == 0) { // Media color for page... comment += 12; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); // Get color... if (*comment == '\"') { for (ptr = pages[*page].media_color, comment ++; *comment && *comment != '\"'; comment ++) if (ptr < (pages[*page].media_color + sizeof(pages[*page].media_color) - 1)) *ptr++ = *comment; if (*comment == '\"') comment ++; } else { for (ptr = pages[*page].media_color; *comment && !isspace(*comment); comment ++) if (ptr < (pages[*page].media_color + sizeof(pages[*page].media_color) - 1)) *ptr++ = *comment; } *ptr = '\0'; } else if (strncasecmp(comment, "MEDIA POSITION ", 15) == 0) { // Media position for page... comment += 15; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); pages[*page].media_position = atoi(comment); // Skip position... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA TYPE ", 11) == 0) { // Media type for page... comment += 11; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); // Get type... if (*comment == '\"') { for (ptr = pages[*page].media_type, comment ++; *comment && *comment != '\"'; comment ++) if (ptr < (pages[*page].media_type + sizeof(pages[*page].media_type) - 1)) *ptr++ = *comment; if (*comment == '\"') comment ++; } else { for (ptr = pages[*page].media_type; *comment && !isspace(*comment); comment ++) if (ptr < (pages[*page].media_type + sizeof(pages[*page].media_type) - 1)) *ptr++ = *comment; } *ptr = '\0'; } else if (strncasecmp(comment, "MEDIA SIZE ", 11) == 0) { // Media size... comment += 11; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; tof = 1; } if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); check_pages(*page); *right = PagePrintWidth - *right; *top = PagePrintLength - *top; set_page_size(comment); if (Landscape) { PagePrintWidth = PageLength - PageLeft - PageRight; PagePrintLength = PageWidth - PageTop - PageBottom; } else { PagePrintWidth = PageWidth - PageLeft - PageRight; PagePrintLength = PageLength - PageTop - PageBottom; } *right = PagePrintWidth - *right; *top = PagePrintLength - *top; *x = *left; *y = *top; pages[*page].width = PageWidth; pages[*page].length = PageLength; // Skip width... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA LEFT ", 11) == 0) { // Left margin... comment += 11; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); *right = PagePrintWidth - *right; PageLeft = pages[*page].left = get_measurement(comment); if (Landscape) PagePrintWidth = PageLength - PageRight - PageLeft; else PagePrintWidth = PageWidth - PageRight - PageLeft; *right = PagePrintWidth - *right; // Skip left... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA RIGHT ", 12) == 0) { // Right margin... comment += 12; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); *right = PagePrintWidth - *right; PageRight = pages[*page].right = get_measurement(comment); if (Landscape) PagePrintWidth = PageLength - PageRight - PageLeft; else PagePrintWidth = PageWidth - PageRight - PageLeft; *right = PagePrintWidth - *right; // Skip right... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA BOTTOM ", 13) == 0) { // Bottom margin... comment += 13; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); tof = 1; } *x = *left; check_pages(*page); *top = PagePrintLength - *top; PageBottom = pages[*page].bottom = get_measurement(comment); if (Landscape) PagePrintLength = PageWidth - PageTop - PageBottom; else PagePrintLength = PageLength - PageTop - PageBottom; *top = PagePrintLength - *top; *y = *top; // Skip bottom... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA TOP ", 10) == 0) { // Top margin... comment += 10; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); tof = 1; } *x = *left; check_pages(*page); *top = PagePrintLength - *top; PageTop = pages[*page].top = get_measurement(comment); if (Landscape) PagePrintLength = PageWidth - PageTop - PageBottom; else PagePrintLength = PageLength - PageTop - PageBottom; *top = PagePrintLength - *top; *y = *top; // Skip top... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA LANDSCAPE ", 16) == 0) { // Landscape on/off... comment += 16; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; tof = 1; } if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; check_pages(*page); if (strncasecmp(comment, "OFF", 3) == 0 || tolower(comment[0]) == 'n') { if (Landscape) { *right = PageLength - PageRight - *right; PagePrintWidth = PageWidth - PageRight - PageLeft; *right = PageWidth - PageRight - *right; *top = PageWidth - PageTop - *top; PagePrintLength = PageLength - PageTop - PageBottom; *top = PageLength - PageTop - *top; } Landscape = pages[*page].landscape = 0; } else if (strncasecmp(comment, "ON", 2) == 0 || tolower(comment[0]) == 'y') { if (!Landscape) { *top = PageLength - PageTop - *top; PagePrintLength = PageWidth - PageTop - PageBottom; *top = PageWidth - PageTop - *top; *right = PageWidth - PageRight - *right; PagePrintWidth = PageLength - PageRight - PageLeft; *right = PageLength - PageRight - *right; } Landscape = pages[*page].landscape = 1; } *y = *top; // Skip landscape... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA DUPLEX ", 13) == 0) { // Duplex printing on/off... comment += 13; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; *y = *top; tof = 1; } if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; check_pages(*page); if (strncasecmp(comment, "OFF", 3) == 0 || tolower(comment[0]) == 'n') PageDuplex = pages[*page].duplex = 0; else if (strncasecmp(comment, "ON", 2) == 0 || tolower(comment[0]) == 'y') { if ((*page) & 1) { (*page) ++; check_pages(*page); if (Verbosity) progress_show("Formatting page %d", *page); } PageDuplex = pages[*page].duplex = 1; } // Skip duplex... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "HEADER ", 7) == 0) { // Header string... comment += 7; while (isspace(*comment)) comment ++; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (strncasecmp(comment, "LEFT", 4) == 0 && isspace(comment[4])) { pos = 0; comment += 4; } else if (strncasecmp(comment, "CENTER", 6) == 0 && isspace(comment[6])) { pos = 1; comment += 6; } else if (strncasecmp(comment, "RIGHT", 5) == 0 && isspace(comment[5])) { pos = 2; comment += 5; } else { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER position: \"%s\"", comment); return; } while (isspace(*comment)) comment ++; if (*comment != '\"') { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER string: \"%s\"", comment); return; } for (ptr = buffer, comment ++; *comment && *comment != '\"'; comment ++) { if (*comment == '\\') comment ++; if (ptr < (buffer + sizeof(buffer) - 1)) *ptr++ = *comment; } if (*comment == '\"') comment ++; *ptr = '\0'; if (ptr > buffer) Header[pos] = strdup(buffer); else Header[pos] = NULL; if (tof) { DEBUG_printf(("Setting header %d for page %d to \"%s\"...\n", pos, *page, Header[pos] ? Header[pos] : "(null)")); check_pages(*page); pages[*page].header[pos] = (uchar *)Header[pos]; } // Adjust top margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Header[pos] && (strstr(Header[pos], "$IMAGE") != NULL || strstr(Header[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header1[pos] && (strstr(Header1[pos], "$IMAGE") != NULL || strstr(Header1[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header[pos] || Header1[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } *top = PagePrintLength - adjust; if (tof) *y = *top; } else if (strncasecmp(comment, "HEADER1 ", 8) == 0) { // First page header string... comment += 8; while (isspace(*comment)) comment ++; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (strncasecmp(comment, "LEFT", 4) == 0 && isspace(comment[4])) { pos = 0; comment += 4; } else if (strncasecmp(comment, "CENTER", 6) == 0 && isspace(comment[6])) { pos = 1; comment += 6; } else if (strncasecmp(comment, "RIGHT", 5) == 0 && isspace(comment[5])) { pos = 2; comment += 5; } else { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER1 position: \"%s\"", comment); return; } while (isspace(*comment)) comment ++; if (*comment != '\"') { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER1 string: \"%s\"", comment); return; } for (ptr = buffer, comment ++; *comment && *comment != '\"'; comment ++) { if (*comment == '\\') comment ++; if (ptr < (buffer + sizeof(buffer) - 1)) *ptr++ = *comment; } if (*comment == '\"') comment ++; *ptr = '\0'; if (ptr > buffer) Header1[pos] = strdup(buffer); else Header1[pos] = NULL; // Adjust top margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Header[pos] && (strstr(Header[pos], "$IMAGE") != NULL || strstr(Header[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header1[pos] && (strstr(Header1[pos], "$IMAGE") != NULL || strstr(Header1[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header[pos] || Header1[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } *top = PagePrintLength - adjust; if (tof) *y = *top; } else if (strncasecmp(comment, "FOOTER ", 7) == 0) { // Footer string... comment += 7; while (isspace(*comment)) comment ++; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (strncasecmp(comment, "LEFT", 4) == 0 && isspace(comment[4])) { pos = 0; comment += 4; } else if (strncasecmp(comment, "CENTER", 6) == 0 && isspace(comment[6])) { pos = 1; comment += 6; } else if (strncasecmp(comment, "RIGHT", 5) == 0 && isspace(comment[5])) { pos = 2; comment += 5; } else { progress_error(HD_ERROR_BAD_COMMENT, "Bad FOOTER position: \"%s\"", comment); return; } while (isspace(*comment)) comment ++; if (*comment != '\"') { progress_error(HD_ERROR_BAD_COMMENT, "Bad FOOTER string: \"%s\"", comment); return; } for (ptr = buffer, comment ++; *comment && *comment != '\"'; comment ++) { if (*comment == '\\') comment ++; if (ptr < (buffer + sizeof(buffer) - 1)) *ptr++ = *comment; } if (*comment == '\"') comment ++; *ptr = '\0'; if (ptr > buffer) Footer[pos] = strdup(buffer); else Footer[pos] = NULL; if (tof) { check_pages(*page); pages[*page].footer[pos] = (uchar *)Footer[pos]; } // Adjust bottom margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Footer[pos] && (strstr(Footer[pos], "$IMAGE") != NULL || strstr(Footer[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Footer[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } *bottom = adjust; } else if (strncasecmp(comment, "NUMBER-UP ", 10) == 0) { // N-up printing... comment += 10; while (isspace(*comment)) comment ++; if (!*comment) break; NumberUp = strtol(comment, (char **)&comment, 10); if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (tof) { check_pages(*page); pages[*page].nup = NumberUp; } } else break; } DEBUG_printf(("LEAVING parse_comment() x=%.1f, y=%.1f, page=%d\n", *x, *y, *page)); DEBUG_printf((" PagePrintWidth = %d\n", PagePrintWidth)); DEBUG_printf(("PagePrintLength = %d\n", PagePrintLength)); DEBUG_printf((" PageWidth = %d\n", PageWidth)); DEBUG_printf((" PageLength = %d\n", PageLength)); DEBUG_printf((" PageLeft = %d\n", PageLeft)); DEBUG_printf((" PageBottom = %d\n", PageBottom)); DEBUG_printf((" PageRight = %d\n", PageRight)); DEBUG_printf((" PageTop = %d\n", PageTop)); DEBUG_printf((" Landscape = %d\n", Landscape)); } #ifdef COMMENT_DEBUG # undef DEBUG # undef DEBUG_puts # define DEBUG_puts(x) # undef DEBUG_printf # define DEBUG_printf(x) #endif /* COMMENT_DEBUG */ /* * 'find_background()' - Find the background image/color for the given document. */ static void find_background(tree_t *t) /* I - Document to search */ { uchar *var; /* BGCOLOR/BACKGROUND variable */ /* * First see if the --bodycolor or --bodyimage options have been * specified... */ if (BodyImage[0] != '\0') { background_image = image_load(BodyImage, !OutputColor); return; } else if (BodyColor[0] != '\0') { get_color((uchar *)BodyColor, background_color, 0); return; } /* * If not, search the document tree... */ while (t != NULL && background_image == NULL && background_color[0] == 1.0 && background_color[1] == 1.0 && background_color[2] == 1.0) { if (t->markup == MARKUP_BODY) { if ((var = htmlGetVariable(t, (uchar *)"BACKGROUND")) != NULL) background_image = image_load((char *)var, !OutputColor); if ((var = htmlGetVariable(t, (uchar *)"BGCOLOR")) != NULL) get_color(var, background_color, 0); } if (t->child != NULL) find_background(t->child); t = t->next; } } /* * 'write_background()' - Write the background image/color for to the current * page. */ static void write_background(int page, /* I - Page we are writing for */ FILE *out) /* I - File to write to */ { float x, y; float width, height; int page_width, page_length; if (Landscape) { page_length = pages[page].width; page_width = pages[page].length; } else { page_width = pages[page].width; page_length = pages[page].length; } if (background_color[0] != 1.0 || background_color[1] != 1.0 || background_color[2] != 1.0) { if (PSLevel > 0) { render_x = -1.0; render_y = -1.0; set_color(out, background_color); fprintf(out, "0 0 M %d %d F\n", page_width, page_length); } else { set_color(out, background_color); flate_printf(out, "0 0 %d %d re f\n", page_width, page_length); } } if (background_image != NULL) { width = (float)(background_image->width * 72.0f / _htmlPPI); height = (float)(background_image->height * 72.0f / _htmlPPI); if (width < 1.0f) width = 1.0f; if (height < 1.0f) height = 1.0f; switch (PSLevel) { case 0 : for (x = 0.0; x < page_width; x += width) for (y = page_length; y >= 0.0f;) { y -= height; flate_printf(out, "q %.1f 0 0 %.1f %.1f %.1f cm", width, height, x, y); flate_printf(out, "/I%d Do\n", background_image->obj); flate_puts("Q\n", out); } break; default : fprintf(out, "0 %.1f %d{/y exch neg %d add def\n", height, page_length + (int)height - 1, page_length); fprintf(out, "0 %.1f %d{/x exch def\n", width, page_width); fprintf(out, "GS[%.1f 0 0 %.1f x y]CM/iy -1 def\n", width, height); fprintf(out, "%d %d 8[%d 0 0 %d 0 %d]", background_image->width, background_image->height, background_image->width, -background_image->height, background_image->height); fputs("{/iy iy 1 add def BG iy get}", out); if (background_image->depth == 1) fputs("image\n", out); else fputs("false 3 colorimage\n", out); fputs("GR}for}for\n", out); break; } } } /* * 'new_render()' - Allocate memory for a new rendering structure. */ static render_t * /* O - New render structure */ new_render(int page, /* I - Page number (0-n) */ int type, /* I - Type of render primitive */ double x, /* I - Horizontal position */ double y, /* I - Vertical position */ double width, /* I - Width */ double height, /* I - Height */ void *data, /* I - Data */ render_t *insert) /* I - Insert before here... */ { render_t *r; /* New render primitive */ size_t datalen = 0; /* Length of data */ static render_t dummy; /* Dummy var for errors... */ DEBUG_printf(("new_render(page=%d, type=%d, x=%.1f, y=%.1f, width=%.1f, height=%.1f, data=%p, insert=%p)\n", page, type, x, y, width, height, (void *)data, (void *)insert)); check_pages(page); if (page < 0 || page >= (int)alloc_pages) { progress_error(HD_ERROR_INTERNAL_ERROR, "Page number (%d) out of range (1...%d)\n", page + 1, (int)alloc_pages); memset(&dummy, 0, sizeof(dummy)); return (&dummy); } if ((type != RENDER_TEXT && type != RENDER_LINK) || data == NULL) r = (render_t *)calloc(sizeof(render_t), 1); else { datalen = strlen((char *)data); r = (render_t *)calloc(sizeof(render_t) + datalen, 1); } if (r == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory on page %d\n", (int)page + 1); memset(&dummy, 0, sizeof(dummy)); return (&dummy); } r->type = type; r->x = (float)x; r->y = (float)y; r->width = (float)width; r->height = (float)height; switch (type) { case RENDER_TEXT : if (data == NULL) { free(r); return (NULL); } // Safe because buffer is allocated... memcpy((char *)r->data.text.buffer, (char *)data, datalen); get_color(_htmlTextColor, r->data.text.rgb); break; case RENDER_IMAGE : if (data == NULL) { free(r); return (NULL); } r->data.image = (image_t *)data; break; case RENDER_BOX : memcpy(r->data.box, data, sizeof(r->data.box)); break; case RENDER_LINK : if (data == NULL) { free(r); return (NULL); } // Safe because buffer is allocated... memcpy((char *)r->data.link, (char *)data, datalen); break; } if (insert) { if (insert->prev) insert->prev->next = r; else pages[page].start = r; r->prev = insert->prev; r->next = insert; insert->prev = r; } else { if (pages[page].end != NULL) pages[page].end->next = r; else pages[page].start = r; r->next = NULL; r->prev = pages[page].end; pages[page].end = r; } DEBUG_printf((" returning r = %p\n", (void *)r)); return (r); } /* * 'check_pages()' - Allocate memory for more pages as needed... */ static void check_pages(int page) // I - Current page { page_t *temp; // Temporary page pointer DEBUG_printf(("check_pages(%d)\n", page)); // See if we need to allocate memory for the page... if (page >= (int)alloc_pages) { // Yes, allocate enough for ALLOC_PAGES more pages... while (page >= (int)alloc_pages) alloc_pages += ALLOC_PAGES; // Do the pages pointers... if (num_pages == 0) temp = (page_t *)malloc(sizeof(page_t) * alloc_pages); else temp = (page_t *)realloc(pages, sizeof(page_t) * alloc_pages); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d pages - %s", (int)alloc_pages, strerror(errno)); alloc_pages -= ALLOC_PAGES; return; } memset(temp + num_pages, 0, (alloc_pages - num_pages) * sizeof(page_t)); pages = temp; } // Initialize the page data as needed... for (temp = pages + num_pages; (int)num_pages <= page; num_pages ++, temp ++) { if (!temp->width) { if (num_pages == 0 || !temp[-1].width || !temp[-1].length || chapter == 0) { temp->width = PageWidth; temp->length = PageLength; temp->left = PageLeft; temp->right = PageRight; temp->top = PageTop; temp->bottom = PageBottom; temp->duplex = PageDuplex; temp->landscape = Landscape; temp->nup = NumberUp; } else { memcpy(temp, temp - 1, sizeof(page_t)); temp->start = NULL; temp->end = NULL; } temp->url = current_url; if (chapter == 0) { memcpy(temp->header, TocHeader, sizeof(temp->header)); memcpy(temp->footer, TocFooter, sizeof(temp->footer)); } else { memcpy(temp->header, Header, sizeof(temp->header)); memcpy(temp->header1, Header1, sizeof(temp->header1)); memcpy(temp->footer, Footer, sizeof(temp->footer)); if (current_heading != temp->headnode) { temp->heading = htmlGetText(current_heading); temp->headnode = current_heading; } } memcpy(temp->background_color, background_color, sizeof(temp->background_color)); temp->background_image = background_image; } } } /* * 'add_link()' - Add a named link... */ static void add_link(uchar *name, /* I - Name of link */ int page, /* I - Page # */ int top) /* I - Y position */ { link_t *temp; /* New name */ if (name == NULL) return; DEBUG_printf(("add_link(name=\"%s\", page=%d, top=%d)\n", name, page, top)); if ((temp = find_link(name)) != NULL) { temp->page = (short)page; temp->top = (short)top; } else { // See if we need to allocate memory for links... if (num_links >= alloc_links) { // Allocate more links... alloc_links += ALLOC_LINKS; if (num_links == 0) temp = (link_t *)malloc(sizeof(link_t) * alloc_links); else temp = (link_t *)realloc(links, sizeof(link_t) * alloc_links); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d links - %s", (int)alloc_links, strerror(errno)); alloc_links -= ALLOC_LINKS; return; } links = temp; } // Add a new link... temp = links + num_links; num_links ++; strlcpy((char *)temp->name, (char *)name, sizeof(temp->name)); temp->page = (short)page; temp->top = (short)top; if (num_links > 1) qsort(links, num_links, sizeof(link_t), (compare_func_t)compare_links); } } /* * 'find_link()' - Find a named link... */ static link_t * find_link(uchar *name) /* I - Name to find */ { link_t key, /* Search key */ *match; /* Matching name entry */ if (name == NULL || num_links == 0) return (NULL); if (name[0] == '#') name ++; strlcpy((char *)key.name, (char *)name, sizeof(key.name)); match = (link_t *)bsearch(&key, links, num_links, sizeof(link_t), (compare_func_t)compare_links); return (match); } /* * 'compare_links()' - Compare two named links. */ static int /* O - 0 = equal, -1 or 1 = not equal */ compare_links(link_t *n1, /* I - First name */ link_t *n2) /* I - Second name */ { return (strcasecmp((char *)n1->name, (char *)n2->name)); } #ifdef TABLE_DEBUG # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) printf x # define DEBUG_puts(x) puts(x) #endif /* TABLE_DEBUG */ // // 'get_cell_size()' - Compute the minimum width of a cell. // static float // O - Required width of cell get_cell_size(tree_t *t, // I - Cell float left, // I - Left margin float right, // I - Right margin float *minwidth, // O - Minimum width float *prefwidth, // O - Preferred width float *minheight) // O - Minimum height { tree_t *temp, // Current tree entry *next; // Next tree entry uchar *var; // Attribute value int nowrap; // NOWRAP attribute? float width, // Width of cell frag_width, // Fragment required width frag_height, // Fragment height frag_pref, // Fragment preferred width frag_min, // Fragment minimum width minh, // Local minimum height minw, // Local minimum width prefw, // Local preferred width format_width; // Working format width for images DEBUG_printf(("get_cell_size(%p, %.1f, %.1f, %p, %p, %p)\n", (void *)t, left, right, (void *)minwidth, (void *)prefwidth, (void *)minheight)); // First see if the width has been specified for this cell... if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL && (var[strlen((char *)var) - 1] != '%' || (right - left) > 0.0f)) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') width = (right - left) * atoi((char *)var) * 0.01f; else width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else width = 0.0f; if ((format_width = right - left) <= 0.0f) format_width = PagePrintWidth; minw = 0.0f; prefw = 0.0f; // Then the height... if ((var = htmlGetVariable(t, (uchar *)"HEIGHT")) != NULL) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') minh = PagePrintLength * atoi((char *)var) * 0.01f; else minh = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else minh = 0.0f; nowrap = (htmlGetVariable(t, (uchar *)"NOWRAP") != NULL); DEBUG_printf(("nowrap = %d\n", nowrap)); for (temp = t->child, frag_width = 0.0f, frag_pref = 0.0f; temp != NULL; temp = next) { // Point to next markup, if any... next = temp->child; switch (temp->markup) { case MARKUP_TABLE : // Update widths... if (frag_pref > prefw) prefw = frag_pref; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } if (nowrap && frag_pref > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for break...\n", frag_pref, minw)); minw = frag_pref; } // For nested tables, compute the width of the table. frag_width = get_table_size(temp, left, right, &frag_min, &frag_pref, &frag_height); if (frag_pref > prefw) prefw = frag_pref; if (frag_min > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for nested table...\n", frag_min, minw)); minw = frag_min; } frag_width = 0.0f; frag_pref = 0.0f; frag_min = 0.0f; next = NULL; break; case MARKUP_IMG : // Update the image width as needed... if (temp->markup == MARKUP_IMG) update_image_size(temp); case MARKUP_NONE : case MARKUP_SPACER : frag_height = temp->height; #ifdef TABLE_DEBUG2 if (temp->markup == MARKUP_NONE) printf("FRAG(%s) = %.1f\n", temp->data, temp->width); else if (temp->markup == MARKUP_SPACER) printf("SPACER = %.1f\n", temp->width); else printf("IMG(%s) = %.1f\n", htmlGetVariable(temp, (uchar *)"SRC"), temp->width); #endif // TABLE_DEBUG2 // Handle min/preferred widths separately... if (temp->width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for fragment...\n", temp->width, minw)); minw = temp->width; } if (temp->preformatted && temp->data != NULL && temp->data[strlen((char *)temp->data) - 1] == '\n') { // End of a line - check preferred width... frag_pref += temp->width + 1; if (frag_pref > prefw) prefw = frag_pref; if (temp->preformatted && frag_pref > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for preformatted...\n", frag_pref, minw)); minw = frag_pref; } frag_pref = 0.0f; } else if (temp->data != NULL) frag_pref += temp->width + 1; else if ((frag_pref + temp->width) > format_width) { // parse_paragraph() will force a break if (frag_pref > prefw) prefw = frag_pref; frag_pref = temp->width; } else frag_pref += temp->width; if (temp->preformatted && temp->data != NULL && temp->data[strlen((char *)temp->data) - 1] == '\n') { // Check required width... frag_width += temp->width + 1; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } frag_width = 0.0f; } else if (!temp->preformatted && temp->data != NULL && (isspace(temp->data[0]) || (temp->data[0] && isspace(temp->data[strlen((char *)temp->data) - 1])))) { // Check required width... if (isspace(temp->data[0])) frag_width = temp->width + 1; else frag_width += temp->width + 1; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } if (!isspace(temp->data[0])) frag_width = 0.0f; DEBUG_printf(("frag_width=%.1f after whitespace processing...\n", frag_width)); } else if (temp->data != NULL) frag_width += temp->width + 1; else if ((frag_width + temp->width) > format_width) // parse_paragraph() will force a break frag_width = temp->width; else frag_width += temp->width; break; case MARKUP_ADDRESS : case MARKUP_BLOCKQUOTE : case MARKUP_BR : case MARKUP_CENTER : case MARKUP_DD : case MARKUP_DIV : case MARKUP_DT : case MARKUP_H1 : case MARKUP_H2 : case MARKUP_H3 : case MARKUP_H4 : case MARKUP_H5 : case MARKUP_H6 : case MARKUP_H7 : case MARKUP_H8 : case MARKUP_H9 : case MARKUP_H10 : case MARKUP_H11 : case MARKUP_H12 : case MARKUP_H13 : case MARKUP_H14 : case MARKUP_H15 : case MARKUP_HR : case MARKUP_LI : case MARKUP_P : case MARKUP_PRE : DEBUG_printf(("BREAK at %.1f\n", frag_pref)); if (frag_pref > prefw) prefw = frag_pref; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } if (nowrap && frag_pref > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for break...\n", frag_pref, minw)); minw = frag_pref; } frag_pref = 0.0f; frag_width = 0.0f; default : frag_height = 0.0f; break; } // Update minimum height... if (frag_height > minh) minh = frag_height; // Update next pointer as needed... if (next == NULL) next = temp->next; if (next == NULL) { // This code is almost funny if you say it fast... :) for (next = temp->parent; next != NULL && next != t; next = next->parent) if (next->next != NULL) break; if (next == t) next = NULL; else if (next) next = next->next; } } // Check the last fragment's width... if (frag_pref > prefw) prefw = frag_pref; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } // Handle the "NOWRAP" option... if (nowrap && prefw > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for NOWRAP...\n", prefw, minw)); minw = prefw; } // Return the required, minimum, and preferred size of the cell... *minwidth = minw; *prefwidth = prefw; *minheight = minh; DEBUG_printf(("get_cell_size(): width=%.1f, minw=%.1f, prefw=%.1f, minh=%.1f\n", width, minw, prefw, minh)); return (width); } // // 'get_table_size()' - Compute the minimum width of a table. // static float // O - Minimum width of table get_table_size(tree_t *t, // I - Table float left, // I - Left margin float right, // I - Right margin float *minwidth, // O - Minimum width float *prefwidth, // O - Preferred width float *minheight) // O - Minimum height { tree_t *temp, // Current tree entry *next; // Next tree entry uchar *var; // Attribute value float width, // Required width of table minw, // Minimum width of table minh, // Minimum height of table prefw, // Preferred width of table cell_width, // Cell required width cell_pref, // Cell preferred width cell_min, // Cell minimum width cell_height, // Cell minimum height row_width, // Row required width row_pref, // Row preferred width row_min, // Row minimum width row_height, // Row minimum height border, // Border around cells cellpadding, // Padding inside cells cellspacing; // Spacing around cells int columns, // Current number of columns max_columns, // Maximum columns rows; // Number of rows DEBUG_printf(("get_table_size(%p, %.1f, %.1f, %p, %p, %p)\n", (void *)t, left, right, (void *)minwidth, (void *)prefwidth, (void *)minheight)); // First see if the width has been specified for this table... if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL && (var[strlen((char *)var) - 1] != '%' || (right - left) > 0.0f)) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') width = (right - left) * atoi((char *)var) * 0.01f; else width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else width = 0.0f; minw = 0.0f; prefw = 0.0f; // Then the height... if ((var = htmlGetVariable(t, (uchar *)"HEIGHT")) != NULL) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') minh = PagePrintLength * atoi((char *)var) * 0.01f; else minh = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else minh = 0.0f; // Update the size as needed... for (temp = t->child, row_width = 0.0f, row_min = 0.0f, row_pref = 0.0f, row_height = 0.0f, columns = 0, rows = 0, max_columns = 0; temp != NULL; temp = next) { // Point to next markup, if any... next = temp->child; // Start a new row or add the cell width as needed... if (temp->markup == MARKUP_TR) { minh += row_height; row_width = 0.0f; row_pref = 0.0f; row_min = 0.0f; row_height = 0.0f; rows ++; columns = 0; } else if (temp->markup == MARKUP_TD || temp->markup == MARKUP_TH) { // Update columns... columns ++; if (columns > max_columns) max_columns = columns; // Get widths of cell... cell_width = get_cell_size(temp, left, right, &cell_min, &cell_pref, &cell_height); // Update row widths... row_width += cell_width; row_pref += cell_pref; row_min += cell_min; if (cell_height > row_height) row_height = cell_height; // Check current row widths against table... if (row_pref > prefw) prefw = row_pref; if (row_min > minw) minw = row_min; } // Update next pointer as needed... if (next == NULL) next = temp->next; if (next == NULL) { // This code is almost funny if you say it fast... :) for (next = temp->parent; next != NULL && next != t; next = next->parent) if (next->next != NULL) break; if (next == t) next = NULL; else if (next) next = next->next; } } // Make sure last row is counted in min height calcs. minh += row_height; // Add room for spacing and padding... if ((var = htmlGetVariable(t, (uchar *)"CELLPADDING")) != NULL) cellpadding = atoi((char *)var); else cellpadding = 1.0f; if ((var = htmlGetVariable(t, (uchar *)"CELLSPACING")) != NULL) cellspacing = atoi((char *)var); else cellspacing = 0.0f; if ((var = htmlGetVariable(t, (uchar *)"BORDER")) != NULL) { if ((border = (float)atof((char *)var)) == 0.0 && var[0] != '0') border = 1.0f; cellpadding += border; } else border = 0.0f; if (border == 0.0f && cellpadding > 0.0f) { /* * Ah, the strange table formatting nightmare that is HTML. * Netscape and MSIE assign an invisible border width of 1 * pixel if no border is specified... */ cellpadding += 1.0f; } cellspacing *= PagePrintWidth / _htmlBrowserWidth; cellpadding *= PagePrintWidth / _htmlBrowserWidth; DEBUG_printf(("ADDING %.1f for table space for %d columns...\n", max_columns * (2 * cellpadding + cellspacing) - cellspacing, max_columns)); if (width > 0.0f) width += max_columns * (2 * cellpadding + cellspacing) - cellspacing; minw += max_columns * (2 * cellpadding + cellspacing) - cellspacing; prefw += max_columns * (2 * cellpadding + cellspacing) - cellspacing; minh += rows * (2 * cellpadding + cellspacing) - cellspacing; // Return the required, minimum, and preferred size of the table... *minwidth = minw; *prefwidth = prefw; *minheight = minh; DEBUG_printf(("get_table_size(): width=%.1f, minw=%.1f, prefw=%.1f, minh=%.1f\n", width, minw, prefw, minh)); return (width); } #ifdef TABLE_DEBUG # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) # define DEBUG_puts(x) #endif /* TABLE_DEBUG */ /* * 'flatten_tree()' - Flatten an HTML tree to only include the text, image, * link, and break markups. */ static tree_t * /* O - Flattened markup tree */ flatten_tree(tree_t *t) /* I - Markup tree to flatten */ { tree_t *temp, /* New tree node */ *flat; /* Flattened tree */ flat = NULL; while (t != NULL) { switch (t->markup) { case MARKUP_NONE : if (t->data == NULL) break; case MARKUP_COMMENT : case MARKUP_BR : case MARKUP_SPACER : case MARKUP_IMG : temp = (tree_t *)calloc(sizeof(tree_t), 1); memcpy(temp, t, sizeof(tree_t)); temp->parent = NULL; temp->child = NULL; temp->prev = flat; temp->next = NULL; if (flat != NULL) flat->next = temp; flat = temp; if (temp->markup == MARKUP_IMG) update_image_size(temp); break; case MARKUP_A : if (htmlGetVariable(t, (uchar *)"NAME") != NULL) { temp = (tree_t *)calloc(sizeof(tree_t), 1); memcpy(temp, t, sizeof(tree_t)); temp->parent = NULL; temp->child = NULL; temp->prev = flat; temp->next = NULL; if (flat != NULL) flat->next = temp; flat = temp; } break; case MARKUP_P : case MARKUP_PRE : case MARKUP_H1 : case MARKUP_H2 : case MARKUP_H3 : case MARKUP_H4 : case MARKUP_H5 : case MARKUP_H6 : case MARKUP_H7 : case MARKUP_H8 : case MARKUP_H9 : case MARKUP_H10 : case MARKUP_H11 : case MARKUP_H12 : case MARKUP_H13 : case MARKUP_H14 : case MARKUP_H15 : case MARKUP_UL : case MARKUP_DIR : case MARKUP_MENU : case MARKUP_OL : case MARKUP_DL : case MARKUP_LI : case MARKUP_DD : case MARKUP_DT : case MARKUP_TR : case MARKUP_CAPTION : temp = (tree_t *)calloc(sizeof(tree_t), 1); temp->markup = MARKUP_BR; temp->parent = NULL; temp->child = NULL; temp->prev = flat; temp->next = NULL; if (flat != NULL) flat->next = temp; flat = temp; break; default : break; } if (t->child != NULL && t->markup != MARKUP_UNKNOWN) { temp = flatten_tree(t->child); if (temp != NULL) temp->prev = flat; if (flat != NULL) flat->next = temp; else flat = temp; } if (flat != NULL) while (flat->next != NULL) flat = flat->next; t = t->next; } if (flat == NULL) return (NULL); while (flat->prev != NULL) flat = flat->prev; return (flat); } /* * 'update_image_size()' - Update the size of an image based upon the * printable width. */ static void update_image_size(tree_t *t) /* I - Tree entry */ { image_t *img; /* Image file */ uchar *width, /* Width string */ *height; /* Height string */ width = htmlGetVariable(t, (uchar *)"WIDTH"); height = htmlGetVariable(t, (uchar *)"HEIGHT"); if (width != NULL && height != NULL) { if (width[strlen((char *)width) - 1] == '%') t->width = (float)(atof((char *)width) * PagePrintWidth / 100.0f); else t->width = (float)(atoi((char *)width) * PagePrintWidth / _htmlBrowserWidth); if (height[strlen((char *)height) - 1] == '%') t->height = (float)(atof((char *)height) * PagePrintWidth / 100.0f); else t->height = (float)(atoi((char *)height) * PagePrintWidth / _htmlBrowserWidth); return; } img = image_find((char *)htmlGetVariable(t, (uchar *)"REALSRC")); if (img == NULL) return; if (width != NULL) { if (width[strlen((char *)width) - 1] == '%') t->width = (float)(atof((char *)width) * PagePrintWidth / 100.0f); else t->width = (float)(atoi((char *)width) * PagePrintWidth / _htmlBrowserWidth); t->height = t->width * img->height / img->width; } else if (height != NULL) { if (height[strlen((char *)height) - 1] == '%') t->height = (float)(atof((char *)height) * PagePrintWidth / 100.0f); else t->height = (float)(atoi((char *)height) * PagePrintWidth / _htmlBrowserWidth); t->width = t->height * img->width / img->height; } else { t->width = (float)(img->width * PagePrintWidth / _htmlBrowserWidth); t->height = (float)(img->height * PagePrintWidth / _htmlBrowserWidth); } } /* * 'get_width()' - Get the width of a string in points. */ static float /* O - Width in points */ get_width(uchar *s, /* I - String to scan */ int typeface, /* I - Typeface code */ int style, /* I - Style code */ int size) /* I - Size */ { uchar *ptr; /* Current character */ int width; /* Current width */ DEBUG_printf(("get_width(\"%s\", %d, %d, %d)\n", s == NULL ? "(null)" : (const char *)s, typeface, style, size)); if (s == NULL) return (0.0); if (!_htmlWidthsLoaded[typeface][style]) htmlLoadFontWidths(typeface, style); for (width = 0, ptr = s; *ptr != '\0'; ptr ++) width += _htmlWidths[typeface][style][*ptr]; return (width * _htmlSizes[size] * 0.001f); } /* * 'get_title()' - Get the title string for a document. */ static uchar * /* O - Title string */ get_title(tree_t *doc) /* I - Document */ { uchar *temp; while (doc != NULL) { if (doc->markup == MARKUP_TITLE) return (htmlGetText(doc->child)); else if (doc->child != NULL) if ((temp = get_title(doc->child)) != NULL) return (temp); doc = doc->next; } return (NULL); } /* * 'open_file()' - Open an output file for the current chapter. */ static FILE * /* O - File pointer */ open_file(void) { char filename[255]; /* Filename */ if (OutputFiles && PSLevel > 0) { if (chapter == -1) snprintf(filename, sizeof(filename), "%s/cover.ps", OutputPath); else if (chapter == 0) snprintf(filename, sizeof(filename), "%s/contents.ps", OutputPath); else snprintf(filename, sizeof(filename), "%s/doc%d.ps", OutputPath, chapter); return (fopen(filename, "wb+")); } else if (OutputFiles) { snprintf(filename, sizeof(filename), "%s/doc.pdf", OutputPath); return (fopen(filename, "wb+")); } else if (OutputPath[0] != '\0') return (fopen(OutputPath, "wb+")); else if (PSLevel == 0) return (file_temp(stdout_filename, sizeof(stdout_filename))); else return (stdout); } /* * 'set_color()' - Set the current text color... */ static void set_color(FILE *out, /* I - File to write to */ float *rgb) /* I - RGB color */ { if (rgb[0] == render_rgb[0] && rgb[1] == render_rgb[1] && rgb[2] == render_rgb[2]) return; render_rgb[0] = rgb[0]; render_rgb[1] = rgb[1]; render_rgb[2] = rgb[2]; if (OutputColor) { // Output RGB color... if (PSLevel > 0) fprintf(out, "%.2f %.2f %.2f C ", rgb[0], rgb[1], rgb[2]); else flate_printf(out, "%.2f %.2f %.2f rg ", rgb[0], rgb[1], rgb[2]); } else { // Output grayscale... if (PSLevel > 0) fprintf(out, "%.2f G ", rgb[0] * 0.31f + rgb[1] * 0.61f + rgb[2] * 0.08f); else flate_printf(out, "%.2f g ", rgb[0] * 0.31f + rgb[1] * 0.61f + rgb[2] * 0.08f); } } /* * 'set_font()' - Set the current text font. */ static void set_font(FILE *out, /* I - File to write to */ int typeface, /* I - Typeface code */ int style, /* I - Style code */ float size) /* I - Size */ { char sizes[255], /* Formatted string for size... */ *s; /* Pointer to end of string */ if (typeface == render_typeface && style == render_style && size == render_size) return; /* * Format size and strip trailing 0's and decimals... */ snprintf(sizes, sizeof(sizes), "%.1f", size); for (s = sizes + strlen(sizes) - 1; s > sizes && *s == '0'; s --) *s = '\0'; if (*s == '.') *s = '\0'; /* * Set the new typeface, style, and size. */ if (PSLevel > 0) { if (size != render_size) fprintf(out, "%s FS", sizes); fprintf(out, "/F%x SF ", typeface * 4 + style); } else flate_printf(out, "/F%x %s Tf ", typeface * 4 + style, sizes); render_typeface = typeface; render_style = style; render_size = size; } /* * 'set_pos()' - Set the current text position. */ static void set_pos(FILE *out, /* I - File to write to */ float x, /* I - X position */ float y) /* I - Y position */ { char xs[255], /* Formatted string for X... */ ys[255], /* Formatted string for Y... */ *s; /* Pointer to end of string */ if (fabs(render_x - x) < 0.1 && fabs(render_y - y) < 0.1) return; /* * Format X and Y... */ if (PSLevel > 0 || render_x == -1.0) { snprintf(xs, sizeof(xs), "%.3f", x); snprintf(ys, sizeof(ys), "%.3f", y); } else { snprintf(xs, sizeof(xs), "%.3f", x - render_startx); snprintf(ys, sizeof(ys), "%.3f", y - render_y); } /* * Strip trailing 0's and decimals... */ for (s = xs + strlen(xs) - 1; s > xs && *s == '0'; s --) *s = '\0'; if (*s == '.') *s = '\0'; for (s = ys + strlen(ys) - 1; s > ys && *s == '0'; s --) *s = '\0'; if (*s == '.') *s = '\0'; if (PSLevel > 0) fprintf(out, "%s %s M", xs, ys); else flate_printf(out, "%s %s Td", xs, ys); render_x = render_startx = x; render_y = y; } /* * 'ps_hex()' - Print binary data as a series of hexadecimal numbers. */ static void ps_hex(FILE *out, /* I - File to print to */ uchar *data, /* I - Data to print */ int length) /* I - Number of bytes to print */ { int col; static const char *hex = "0123456789ABCDEF"; col = 0; while (length > 0) { /* * Put the hex uchars out to the file; note that we don't use fprintf() * for speed reasons... */ putc(hex[*data >> 4], out); putc(hex[*data & 15], out); data ++; length --; col = (col + 1) % 40; if (col == 0) putc('\n', out); } if (col > 0) putc('\n', out); } #ifdef HTMLDOC_ASCII85 /* * 'ps_ascii85()' - Print binary data as a series of base-85 numbers. */ static void ps_ascii85(FILE *out, /* I - File to print to */ uchar *data, /* I - Data to print */ int length, /* I - Number of bytes to print */ int eod) /* I - 1 = end-of-data */ { unsigned b = 0; /* Current 32-bit word */ uchar c[5]; /* Base-85 encoded characters */ static int col = 0; /* Column */ static uchar leftdata[4]; /* Leftover data at the end */ static int leftcount = 0; /* Size of leftover data */ length += leftcount; while (length > 3) { switch (leftcount) { case 0 : b = (unsigned)((((((data[0] << 8) | data[1]) << 8) | data[2]) << 8) | data[3]); break; case 1 : b = (unsigned)((((((leftdata[0] << 8) | data[0]) << 8) | data[1]) << 8) | data[2]); break; case 2 : b = (unsigned)((((((leftdata[0] << 8) | leftdata[1]) << 8) | data[0]) << 8) | data[1]); break; case 3 : b = (unsigned)((((((leftdata[0] << 8) | leftdata[1]) << 8) | leftdata[2]) << 8) | data[0]); break; } if (col >= 76) { col = 0; putc('\n', out); } if (b == 0) { putc('z', out); col ++; } else { c[4] = (b % 85) + '!'; b /= 85; c[3] = (b % 85) + '!'; b /= 85; c[2] = (b % 85) + '!'; b /= 85; c[1] = (b % 85) + '!'; b /= 85; c[0] = (uchar)(b + '!'); fwrite(c, 1, 5, out); col += 5; } data += 4 - leftcount; length -= 4 - leftcount; leftcount = 0; } if (length > 0) { // Copy any remainder into the leftdata array... if ((length - leftcount) > 0) memcpy(leftdata + leftcount, data, (size_t)(length - leftcount)); memset(leftdata + length, 0, (size_t)(4 - length)); leftcount = length; } if (eod) { // Do the end-of-data dance... if (col >= 76) { col = 0; putc('\n', out); } if (leftcount > 0) { // Write the remaining bytes as needed... b = (unsigned)((((((leftdata[0] << 8) | leftdata[1]) << 8) | leftdata[2]) << 8) | leftdata[3]); c[4] = (b % 85) + '!'; b /= 85; c[3] = (b % 85) + '!'; b /= 85; c[2] = (b % 85) + '!'; b /= 85; c[1] = (b % 85) + '!'; b /= 85; c[0] = (uchar)(b + '!'); fwrite(c, (size_t)(leftcount + 1), 1, out); leftcount = 0; } fputs("~>\n", out); col = 0; } } #endif // HTMLDOC_ASCII85 /* * JPEG library destination data manager. These routines direct * compressed data from libjpeg into the PDF or PostScript file. */ static FILE *jpg_file; /* JPEG file */ static uchar jpg_buf[8192]; /* JPEG buffer */ static jpeg_destination_mgr jpg_dest; /* JPEG destination manager */ static struct jpeg_error_mgr jerr; /* JPEG error handler */ /* * 'jpg_init()' - Initialize the JPEG destination. */ static void jpg_init(j_compress_ptr cinfo) /* I - Compressor info */ { (void)cinfo; jpg_dest.next_output_byte = jpg_buf; jpg_dest.free_in_buffer = sizeof(jpg_buf); } /* * 'jpg_empty()' - Empty the JPEG output buffer. */ static boolean /* O - True if buffer written OK */ jpg_empty(j_compress_ptr cinfo) /* I - Compressor info */ { (void)cinfo; if (PSLevel > 0) #ifdef HTMLDOC_ASCII85 ps_ascii85(jpg_file, jpg_buf, sizeof(jpg_buf)); #else ps_hex(jpg_file, jpg_buf, sizeof(jpg_buf)); #endif // HTMLDOC_ASCII85 else flate_write(jpg_file, jpg_buf, sizeof(jpg_buf)); jpg_dest.next_output_byte = jpg_buf; jpg_dest.free_in_buffer = sizeof(jpg_buf); return (TRUE); } /* * 'jpg_term()' - Write the last JPEG data to the file. */ static void jpg_term(j_compress_ptr cinfo) /* I - Compressor info */ { int nbytes; /* Number of bytes to write */ (void)cinfo; nbytes = sizeof(jpg_buf) - jpg_dest.free_in_buffer; if (PSLevel > 0) #ifdef HTMLDOC_ASCII85 ps_ascii85(jpg_file, jpg_buf, nbytes); #else ps_hex(jpg_file, jpg_buf, nbytes); #endif // HTMLDOC_ASCII85 else flate_write(jpg_file, jpg_buf, nbytes); } /* * 'jpg_setup()' - Setup the JPEG compressor for writing an image. */ static void jpg_setup(FILE *out, /* I - Output file */ image_t *img, /* I - Output image */ j_compress_ptr cinfo) /* I - Compressor info */ { int i; // Looping var jpg_file = out; cinfo->err = jpeg_std_error(&jerr); jpeg_create_compress(cinfo); cinfo->dest = &jpg_dest; jpg_dest.init_destination = jpg_init; jpg_dest.empty_output_buffer = jpg_empty; jpg_dest.term_destination = jpg_term; cinfo->image_width = (JDIMENSION)img->width; cinfo->image_height = (JDIMENSION)img->height; cinfo->input_components = img->depth; cinfo->in_color_space = img->depth == 1 ? JCS_GRAYSCALE : JCS_RGB; jpeg_set_defaults(cinfo); jpeg_set_quality(cinfo, OutputJPEG, TRUE); // Update things when writing to PS files... if (PSLevel) { // Adobe uses sampling == 1 for (i = 0; i < img->depth; i ++) { cinfo->comp_info[i].h_samp_factor = 1; cinfo->comp_info[i].v_samp_factor = 1; } } cinfo->write_JFIF_header = FALSE; cinfo->write_Adobe_marker = TRUE; jpeg_start_compress(cinfo, TRUE); } /* * 'compare_rgb()' - Compare two RGB colors... */ static int /* O - -1 if rgb1<rgb2, etc. */ compare_rgb(unsigned *rgb1, /* I - First color */ unsigned *rgb2) /* I - Second color */ { return ((int)*rgb1 - (int)*rgb2); } /* * 'write_image()' - Write an image to the given output file... */ static void write_image(FILE *out, /* I - Output file */ render_t *r, /* I - Image to write */ int write_obj) /* I - Write an object? */ { int i, j, k, m, /* Looping vars */ ncolors; /* Number of colors */ uchar *pixel, /* Current pixel */ *indices, /* New indexed pixel array */ *indptr; /* Current index */ int indwidth, /* Width of indexed line */ indbits; /* Bits per index */ int max_colors; /* Max colors to use */ unsigned colors[256], /* Colormap values */ key, /* Color key */ *match; /* Matching color value */ uchar grays[256], /* Grayscale usage */ cmap[256][3]; /* Colormap */ image_t *img; /* Image */ struct jpeg_compress_struct cinfo; /* JPEG compressor */ uchar *data, /* PS Level 3 image data */ *dataptr, /* Pointer into image data */ *maskptr; /* Pointer into mask data */ /* * See if we can optimize the image as indexed without color loss... */ img = r->data.image; ncolors = 0; indices = NULL; indwidth = 0; if (!img->pixels && !img->obj) image_load(img->filename, !OutputColor, 1); // Note: Acrobat 6 tries to decrypt the colormap of indexed in-line images twice, which // is 1) not consistent with prior Acrobat releases and 2) in violation of their // PDF spec. The "img->use > 1 || !Encryption" test prevents the use of indexed // in-line images when encryption is enabled. // // We are filing a bug on this with Adobe, but if history is any indicator, we are // stuck with this workaround forever... if (PSLevel != 1 && PDFVersion >= 12 && img->obj == 0 && (img->use > 1 || !Encryption)) { if (img->depth == 1) { /* * Greyscale image... */ memset(grays, 0, sizeof(grays)); for (i = img->width * img->height, pixel = img->pixels; i > 0; i --, pixel ++) if (!grays[*pixel]) { if (ncolors >= 16) break; grays[*pixel] = 1; ncolors ++; } if (i == 0) { for (i = 0, j = 0; i < 256; i ++) if (grays[i]) { colors[j] = (unsigned)((((i << 8) | i) << 8) | i); grays[i] = (uchar)j; j ++; } } else ncolors = 0; } else { /* * Color image... */ if (OutputJPEG && !Compression) max_colors = 16; else max_colors = 256; for (i = img->width * img->height, pixel = img->pixels, match = NULL; i > 0; i --, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (!match || *match != key) { if (ncolors > 0) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); else match = NULL; } if (match == NULL) { if (ncolors >= max_colors) break; colors[ncolors] = key; ncolors ++; if (ncolors > 1) qsort(colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); } } if (i > 0) ncolors = 0; } } if (ncolors > 0) { if (PSLevel == 3 && img->mask) indbits = 8; else if (ncolors <= 2) indbits = 1; else if (ncolors <= 4) indbits = 2; else if (ncolors <= 16) indbits = 4; else indbits = 8; indwidth = (img->width * indbits + 7) / 8; indices = (uchar *)calloc((size_t)indwidth, (size_t)(img->height + 1)); // height + 1 for PS odd-row-count bug if (img->depth == 1) { /* * Convert a grayscale image... */ switch (indbits) { case 1 : for (i = img->height, pixel = img->pixels, indptr = indices; i > 0; i --) { for (j = img->width, k = 7; j > 0; j --, k = (k + 7) & 7, pixel ++) switch (k) { case 7 : *indptr = (uchar)(grays[*pixel] << 7); break; default : *indptr |= (uchar)(grays[*pixel] << k); break; case 0 : *indptr++ |= (uchar)grays[*pixel]; break; } if (k != 7) indptr ++; } break; case 2 : for (i = img->height, pixel = img->pixels, indptr = indices; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k = (k + 1) & 3, pixel ++) switch (k) { case 0 : *indptr = (uchar)(grays[*pixel] << 6); break; case 1 : *indptr |= (uchar)(grays[*pixel] << 4); break; case 2 : *indptr |= (uchar)(grays[*pixel] << 2); break; case 3 : *indptr++ |= (uchar)grays[*pixel]; break; } if (k) indptr ++; } break; case 4 : for (i = img->height, pixel = img->pixels, indptr = indices; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k ^= 1, pixel ++) if (k) *indptr++ |= grays[*pixel]; else *indptr = (uchar)(grays[*pixel] << 4); if (k) indptr ++; } break; } } else { /* * Convert a color image... */ switch (indbits) { case 1 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width, k = 7; j > 0; j --, k = (k + 7) & 7, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); m = match - colors; switch (k) { case 7 : *indptr = (uchar)(m << 7); break; default : *indptr |= (uchar)(m << k); break; case 0 : *indptr++ |= (uchar)m; break; } } if (k != 7) indptr ++; } break; case 2 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k = (k + 1) & 3, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); m = match - colors; switch (k) { case 0 : *indptr = (uchar)(m << 6); break; case 1 : *indptr |= (uchar)(m << 4); break; case 2 : *indptr |= (uchar)(m << 2); break; case 3 : *indptr++ |= (uchar)m; break; } } if (k) indptr ++; } break; case 4 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k ^= 1, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); m = match - colors; if (k) *indptr++ |= (uchar)m; else *indptr = (uchar)(m << 4); } if (k) indptr ++; } break; case 8 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width; j > 0; j --, pixel += 3, indptr ++) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); *indptr = (uchar)(match - colors); } } break; } } } else indbits = 8; if (ncolors == 1) { /* * Adobe doesn't like 1 color images... */ ncolors = 2; colors[1] = 0; } /* * Now write the image... */ switch (PSLevel) { case 0 : /* PDF */ if (!write_obj) flate_printf(out, "q %.1f 0 0 %.1f %.1f %.1f cm\n", r->width, r->height, r->x, r->y); if (img->obj) { if (img->mask && PDFVersion < 13) write_imagemask(out, r); flate_printf(out, "/I%d Do Q\n", img->obj); break; } if (img->mask && write_obj && PDFVersion >= 13) { // We have a mask image, write it! pdf_start_object(out); fputs("/Type/XObject/Subtype/Image", out); fputs("/ColorSpace/DeviceGray", out); if (img->maskscale == 8) fprintf(out, "/Width %d/Height %d/BitsPerComponent 8", img->width, img->height); else fprintf(out, "/Width %d/Height %d/BitsPerComponent 1/ImageMask true", img->width * img->maskscale, img->height * img->maskscale); if (Compression) fputs("/Filter/FlateDecode", out); pdf_start_stream(out); flate_open_stream(out); if (img->maskscale == 8) flate_write(out, img->mask, img->width * img->height); else flate_write(out, img->mask, img->maskwidth * img->height * img->maskscale); flate_close_stream(out); pdf_end_object(out); } if (write_obj) { // Write an image object... img->obj = pdf_start_object(out); fputs("/Type/XObject/Subtype/Image", out); if (img->mask && PDFVersion >= 13) { if (img->maskscale == 8) fprintf(out, "/SMask %d 0 R", img->obj - 1); else fprintf(out, "/Mask %d 0 R", img->obj - 1); } if (ncolors > 0) { for (i = 0; i < ncolors; i ++) { cmap[i][0] = (uchar)(colors[i] >> 16); cmap[i][1] = (uchar)(colors[i] >> 8); cmap[i][2] = (uchar)colors[i]; } if (Encryption) { // Encrypt the colormap... encrypt_init(); rc4_encrypt(&encrypt_state, cmap[0], cmap[0], (unsigned)(ncolors * 3)); } fprintf(out, "/ColorSpace[/Indexed/DeviceRGB %d<", ncolors - 1); for (i = 0; i < ncolors; i ++) fprintf(out, "%02X%02X%02X", cmap[i][0], cmap[i][1], cmap[i][2]); fputs(">]", out); } else if (img->depth == 1) fputs("/ColorSpace/DeviceGray", out); else fputs("/ColorSpace/DeviceRGB", out); #ifdef HTMLDOC_INTERPOLATION if (ncolors != 2) fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION if (Compression && (ncolors || !OutputJPEG)) fputs("/Filter/FlateDecode", out); else if (OutputJPEG && ncolors == 0) { if (Compression) fputs("/Filter[/FlateDecode/DCTDecode]", out); else fputs("/Filter/DCTDecode", out); } fprintf(out, "/Width %d/Height %d/BitsPerComponent %d", img->width, img->height, indbits); pdf_start_stream(out); flate_open_stream(out); if (OutputJPEG && ncolors == 0) { jpg_setup(out, img, &cinfo); for (i = img->height, pixel = img->pixels; i > 0; i --, pixel += img->width * img->depth) jpeg_write_scanlines(&cinfo, &pixel, 1); jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); } else { if (ncolors > 0) flate_write(out, indices, indwidth * img->height); else flate_write(out, img->pixels, img->width * img->height * img->depth); } flate_close_stream(out); pdf_end_object(out); } else { // Put the image in-line... flate_puts("BI", out); if (ncolors > 0) { flate_printf(out, "/CS[/I/RGB %d<", ncolors - 1); for (i = 0; i < ncolors; i ++) flate_printf(out, "%02X%02X%02X", colors[i] >> 16, (colors[i] >> 8) & 255, colors[i] & 255); flate_puts(">]", out); } else if (img->depth == 1) flate_puts("/CS/G", out); else flate_puts("/CS/RGB", out); if (ncolors != 2) flate_puts("/I true", out); flate_printf(out, "/W %d/H %d/BPC %d", img->width, img->height, indbits); if (ncolors > 0) { flate_puts(" ID\n", out); flate_write(out, indices, indwidth * img->height, 1); } else if (OutputJPEG) { flate_puts("/F/DCT ID\n", out); jpg_setup(out, img, &cinfo); for (i = img->height, pixel = img->pixels; i > 0; i --, pixel += img->width * img->depth) jpeg_write_scanlines(&cinfo, &pixel, 1); jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); } else { flate_puts(" ID\n", out); flate_write(out, img->pixels, img->width * img->height * img->depth, 1); } flate_write(out, (uchar *)"\nEI\nQ\n", 6, 1); } break; case 1 : /* PostScript, Level 1 */ fputs("GS", out); fprintf(out, "[%.1f 0 0 %.1f %.1f %.1f]CM", r->width, r->height, r->x, r->y); if (img->mask) write_imagemask(out, r); fprintf(out, "/picture %d string def\n", img->width * img->depth); if (img->depth == 1) fprintf(out, "%d %d 8 [%d 0 0 %d 0 %d] {currentfile picture readhexstring pop} image\n", img->width, img->height, img->width, -img->height, img->height); else fprintf(out, "%d %d 8 [%d 0 0 %d 0 %d] {currentfile picture readhexstring pop} false 3 colorimage\n", img->width, img->height, img->width, -img->height, img->height); ps_hex(out, img->pixels, img->width * img->height * img->depth); fputs("GR\n", out); break; case 3 : /* PostScript, Level 3 */ // Fallthrough to Level 2 output if compression is disabled and // we aren't doing transparency... if ((Compression && (!OutputJPEG || ncolors > 0)) || (img->mask && img->maskscale == 8)) { fputs("GS", out); fprintf(out, "[%.1f 0 0 %.1f %.1f %.1f]CM", r->width, r->height, r->x, r->y); if (img->mask && img->maskscale != 8) write_imagemask(out, r); if (ncolors > 0) { if (ncolors <= 2) ncolors = 2; /* Adobe doesn't like 1 color images... */ fprintf(out, "[/Indexed/DeviceRGB %d\n<", ncolors - 1); for (i = 0; i < ncolors; i ++) { fprintf(out, "%02X%02X%02X", colors[i] >> 16, (colors[i] >> 8) & 255, colors[i] & 255); if ((i % 13) == 12) putc('\n', out); } fputs(">]setcolorspace\n", out); if (img->mask && img->maskscale == 8) fprintf(out, "<<" "/ImageType 3" "/InterleaveType 1" "/MaskDict<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 1]" ">>\n" "/DataDict", img->width, img->height, img->width, -img->height, img->height); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent %d" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 %d]", img->width, img->height, indbits, img->width, -img->height, img->height, (1 << indbits) - 1); #ifdef HTMLDOC_INTERPOLATION if (ncolors != 2) fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter", out); #else fputs("/DataSource currentfile/ASCIIHexDecode filter", out); #endif // HTMLDOC_ASCII85 if (Compression) fputs("/FlateDecode filter", out); fputs(">>\n", out); if (img->mask && img->maskscale == 8) fputs(">>\n", out); fputs("image\n", out); flate_open_stream(out); if (img->mask && img->maskscale == 8) { data = (uchar *)malloc((size_t)(img->width * 2)); for (i = 0, maskptr = img->mask, indptr = indices; i < img->height; i ++) { for (j = img->width, dataptr = data; j > 0; j --) { *dataptr++ = *maskptr++; *dataptr++ = *indptr++; } flate_write(out, data, img->width * 2); } free(data); } else flate_write(out, indices, indwidth * img->height); flate_close_stream(out); } else { if (img->depth == 1) fputs("/DeviceGray setcolorspace", out); else fputs("/DeviceRGB setcolorspace", out); if (img->mask && img->maskscale == 8) fprintf(out, "<<" "/ImageType 3" "/InterleaveType 1" "/MaskDict<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 1]" ">>\n" "/DataDict", img->width, img->height, img->width, -img->height, img->height); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[%s]", img->width, img->height, img->width, -img->height, img->height, img->depth == 1 ? "0 1" : "0 1 0 1 0 1"); #ifdef HTMLDOC_INTERPOLATION fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter", out); #else fputs("/DataSource currentfile/ASCIIHexDecode filter", out); #endif // HTMLDOC_ASCII85 if (Compression) fputs("/FlateDecode filter", out); fputs(">>\n", out); if (img->mask && img->maskscale == 8) fputs(">>\n", out); fputs("image\n", out); flate_open_stream(out); if (img->mask && img->maskscale == 8) { data = (uchar *)malloc((size_t)(img->width * (img->depth + 1))); for (i = 0, maskptr = img->mask, pixel = img->pixels; i < img->height; i ++) { if (img->depth == 1) { for (j = img->width, dataptr = data; j > 0; j --) { *dataptr++ = *maskptr++; *dataptr++ = *pixel++; } } else { for (j = img->width, dataptr = data; j > 0; j --) { *dataptr++ = *maskptr++; *dataptr++ = *pixel++; *dataptr++ = *pixel++; *dataptr++ = *pixel++; } } flate_write(out, data, img->width * (img->depth + 1)); } free(data); } else flate_write(out, img->pixels, img->width * img->height * img->depth); flate_close_stream(out); } fputs("GR\n", out); break; } case 2 : /* PostScript, Level 2 */ fputs("GS", out); fprintf(out, "[%.1f 0 0 %.1f %.1f %.1f]CM", r->width, r->height, r->x, r->y); if (img->mask) write_imagemask(out, r); if (ncolors > 0) { fprintf(out, "[/Indexed/DeviceRGB %d\n<", ncolors - 1); for (i = 0; i < ncolors; i ++) { fprintf(out, "%02X%02X%02X", colors[i] >> 16, (colors[i] >> 8) & 255, colors[i] & 255); if ((i % 13) == 12) putc('\n', out); } fputs(">]setcolorspace\n", out); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent %d" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 %d]", img->width, img->height, indbits, img->width, -img->height, img->height, (1 << indbits) - 1); #ifdef HTMLDOC_INTERPOLATION if (ncolors != 2) fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter>>image\n", out); ps_ascii85(out, indices, indwidth * img->height, 1); #else fputs("/DataSource currentfile/ASCIIHexDecode filter>>image\n", out); ps_hex(out, indices, indwidth * img->height); // End of data marker... fputs(">\n", out); #endif /* HTMLDOC_ASCII85 */ } else if (OutputJPEG) { if (img->depth == 1) fputs("/DeviceGray setcolorspace\n", out); else fputs("/DeviceRGB setcolorspace\n", out); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[%s]", img->width, img->height, img->width, -img->height, img->height, img->depth == 1 ? "0 1" : "0 1 0 1 0 1"); #ifdef HTMLDOC_INTERPOLATION fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter/DCTDecode filter" ">>image\n", out); #else fputs("/DataSource currentfile/ASCIIHexDecode filter/DCTDecode filter" ">>image\n", out); #endif // HTMLDOC_ASCII85 jpg_setup(out, img, &cinfo); for (i = img->height, pixel = img->pixels; i > 0; i --, pixel += img->width * img->depth) jpeg_write_scanlines(&cinfo, &pixel, 1); jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); #ifdef HTMLDOC_ASCII85 ps_ascii85(out, (uchar *)"", 0, 1); #else // End of data marker... fputs(">\n", out); #endif // HTMLDOC_ASCII85 } else { if (img->depth == 1) fputs("/DeviceGray setcolorspace\n", out); else fputs("/DeviceRGB setcolorspace\n", out); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[%s]", img->width, img->height, img->width, -img->height, img->height, img->depth == 1 ? "0 1" : "0 1 0 1 0 1"); #ifdef HTMLDOC_INTERPOLATION fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter" ">>image\n", out); ps_ascii85(out, img->pixels, img->width * img->height * img->depth, 1); #else fputs("/DataSource currentfile/ASCIIHexDecode filter" ">>image\n", out); ps_hex(out, img->pixels, img->width * img->depth * img->height); // End of data marker... fputs(">\n", out); #endif // HTMLDOC_ASCII85 } fputs("GR\n", out); break; } if (ncolors > 0) free(indices); image_unload(img); } /* * 'write_imagemask()' - Write an imagemask to the output file... */ static void write_imagemask(FILE *out, /* I - Output file */ render_t *r) /* I - Image to write */ { image_t *img; /* Current image */ int x, y; /* Position in mask image */ int startx, count; /* Start and count */ uchar *ptr, /* Pointer into mask image */ byte, /* Current byte */ bit; /* Current bit */ float scalex, scaley; /* 1/(w-1) and 1/(h-1) scaling factors */ int width, height; /* Scaled width and height */ img = r->data.image; width = img->width * img->maskscale; height = img->height * img->maskscale; scalex = 1.0f / width; scaley = 1.0f / height; switch (PSLevel) { case 0 : // PDF break; default : // PostScript fputs("\nnewpath\n", out); break; } for (y = 0; y < height; y ++) { for (x = 0, ptr = img->mask + (height - y - 1) * img->maskwidth, bit = 128, byte = *ptr++, startx = 0, count = 0; x < width; x ++) { if (!(bit & byte)) { if (!count) startx = x; count ++; } else if (count) { switch (PSLevel) { case 0 : // PDF flate_printf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; default : // PostScript fprintf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; } count = 0; } if (bit > 1) bit >>= 1; else { bit = 128; byte = *ptr++; } } if (count) { switch (PSLevel) { case 0 : // PDF flate_printf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; default : // PostScript fprintf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; } } } switch (PSLevel) { case 0 : // PDF flate_puts("W n\n", out); break; default : // PostScript fputs("clip\n", out); break; } } /* * 'write_prolog()' - Write the file prolog... */ static void write_prolog(FILE *out, /* I - Output file */ int page_count, /* I - Number of pages (0 if not known) */ uchar *author, /* I - Author of document */ uchar *creator, /* I - Application that generated the HTML file */ uchar *copyright, /* I - Copyright (if any) on the document */ uchar *keywords, /* I - Search keywords */ uchar *subject) /* I - Subject */ { FILE *prolog; /* PostScript prolog file */ int i, j, /* Looping vars */ encoding_object; /* Font encoding object */ int page; /* Current page */ render_t *r; /* Current render data */ int fonts_used[TYPE_MAX][STYLE_MAX]; /* Whether or not a font is used */ int font_desc[TYPE_MAX][STYLE_MAX]; /* Font descriptor objects */ char temp[1024]; /* Temporary string */ md5_state_t md5; /* MD5 state */ md5_byte_t digest[16]; /* MD5 digest value */ rc4_context_t rc4; /* RC4 context */ uchar owner_pad[32], /* Padded owner password */ owner_key[32], /* Owner key */ user_pad[32], /* Padded user password */ user_key[32]; /* User key */ uchar perm_bytes[4]; /* Permission bytes */ unsigned perm_value; /* Permission value, unsigned */ static unsigned char pad[32] = { /* Padding for passwords */ 0x28, 0xbf, 0x4e, 0x5e, 0x4e, 0x75, 0x8a, 0x41, 0x64, 0x00, 0x4e, 0x56, 0xff, 0xfa, 0x01, 0x08, 0x2e, 0x2e, 0x00, 0xb6, 0xd0, 0x68, 0x3e, 0x80, 0x2f, 0x0c, 0xa9, 0xfe, 0x64, 0x53, 0x69, 0x7a }; /* * See what fonts are used... */ memset(fonts_used, 0, sizeof(fonts_used)); fonts_used[HeadFootType][HeadFootStyle] = 1; for (page = 0; page < (int)num_pages; page ++) for (r = pages[page].start; r != NULL; r = r->next) if (r->type == RENDER_TEXT) fonts_used[r->data.text.typeface][r->data.text.style] = 1; #ifdef DEBUG puts("The following fonts were used:"); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) printf(" %s\n", _htmlFonts[i][j]); #endif // DEBUG /* * Generate the heading... */ if (PSLevel > 0) { /* * Write PostScript prolog stuff... */ if (XRXComments) { int start, end; // Start and end of document pages... int count; // Number of exception pages in this range... // The following comments are Xerox job ticket information that // is used on the high-end Laser Printing Systems rather than // embedded commands... fputs("%XRXbegin: 001.0300\n", out); fputs("%XRXPDLformat: PS-Adobe\n", out); if (doc_title) fprintf(out, "%%XRXtitle: %s\n", doc_title); if (OutputFiles) { // Output a single chapter... if (chapter < 0) { start = 0; end = chapter_outstarts[1] - 1; } else { start = chapter_outstarts[chapter]; end = chapter_outends[chapter]; } } else { start = 0; end = 0; } if (pages[outpages[start].pages[0]].duplex) { if (pages[outpages[start].pages[0]].landscape) fputs("%XRXrequirements: duplex(tumble)\n", out); else fputs("%XRXrequirements: duplex\n", out); } else fputs("%XRXrequirements: simplex\n", out); fputs("%XRXdisposition: PRINT\n", out); fputs("%XRXsignature: False\n", out); fprintf(out, "%%XRXpaperType-size: %.0f %.0f\n", pages[outpages[start].pages[0]].width * 25.4f / 72.0f, pages[outpages[start].pages[0]].length * 25.4f / 72.0f); if (pages[outpages[start].pages[0]].media_type[0]) fprintf(out, "%%XRXpaperType-preFinish: %s 0 0\n", pages[start].media_type); if (pages[outpages[start].pages[0]].media_color[0]) fprintf(out, "%%XRXdocumentPaperColors: %c%s\n", tolower(pages[start].media_color[0]), pages[start].media_color + 1); if (OutputFiles) { // Handle document settings per-chapter... for (i = start + 1; i < end; i += count) { if (pages[outpages[i].pages[0]].width != pages[0].width || pages[outpages[i].pages[0]].length != pages[0].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[0].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[0].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[0].duplex) { for (count = 1; (i + count) <= end; count ++) if (pages[outpages[i].pages[0]].width != pages[outpages[i + count].pages[0]].width || pages[outpages[i].pages[0]].length != pages[outpages[i + count].pages[0]].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[outpages[i + count].pages[0]].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[outpages[i + count].pages[0]].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[outpages[i + count].pages[0]].duplex) break; fprintf(out, "%%XRXpageExceptions: %d %d %.0f %.0f %c%s opaque %s 0 0\n", i + 1, i + count, pages[outpages[i].pages[0]].width * 25.4f / 72.0f, pages[outpages[i].pages[0]].length * 25.4f / 72.0f, tolower(pages[outpages[i].pages[0]].media_color[0]), pages[outpages[i].pages[0]].media_color + 1, pages[outpages[i].pages[0]].media_type[0] ? pages[outpages[i].pages[0]].media_type : "Plain"); if (pages[outpages[i].pages[0]].duplex && pages[outpages[i].pages[0]].landscape) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex(tumble)\n", i + 1, i + count); else if (pages[outpages[i].pages[0]].duplex) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex\n", i + 1, i + count); else fprintf(out, "%%XRXpageExceptions-plex: %d %d simplex\n", i + 1, i + count); } else count = 1; } } else { // All pages are in a single file... for (j = (TocLevels == 0); j <= TocDocCount; j ++) { start = chapter_outstarts[j]; end = chapter_outends[j]; for (i = start + 1; i < end; i += count) { if (pages[outpages[i].pages[0]].width != pages[0].width || pages[outpages[i].pages[0]].length != pages[0].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[0].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[0].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[0].duplex) { for (count = 1; (i + count) < end; count ++) if (pages[outpages[i].pages[0]].width != pages[outpages[i + count].pages[0]].width || pages[outpages[i].pages[0]].length != pages[outpages[i + count].pages[0]].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[outpages[i + count].pages[0]].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[outpages[i + count].pages[0]].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[outpages[i + count].pages[0]].duplex) break; fprintf(out, "%%XRXpageExceptions: %d %d %.0f %.0f %c%s opaque %s 0 0\n", i + 1, i + count, pages[outpages[i].pages[0]].width * 25.4f / 72.0f, pages[outpages[i].pages[0]].length * 25.4f / 72.0f, tolower(pages[outpages[i].pages[0]].media_color[0]), pages[outpages[i].pages[0]].media_color + 1, pages[outpages[i].pages[0]].media_type[0] ? pages[outpages[i].pages[0]].media_type : "Plain"); if (pages[outpages[i].pages[0]].duplex && pages[outpages[i].pages[0]].landscape) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex(tumble)\n", i + 1, i + count); else if (pages[outpages[i].pages[0]].duplex) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex\n", i + 1, i + count); else fprintf(out, "%%XRXpageExceptions-plex: %d %d simplex\n", i + 1, i + count); } else count = 1; } } } fputs("%XRXend\n", out); } fputs("%!PS-Adobe-3.0\n", out); if (Landscape) fprintf(out, "%%%%BoundingBox: 0 0 %d %d\n", PageLength, PageWidth); else fprintf(out, "%%%%BoundingBox: 0 0 %d %d\n", PageWidth, PageLength); fprintf(out,"%%%%LanguageLevel: %d\n", PSLevel); fputs("%%Creator: " HTMLDOC_PRODUCER "\n", out); fprintf(out, "%%%%CreationDate: D:%04d%02d%02d%02d%02d%02d+0000\n", doc_date.tm_year + 1900, doc_date.tm_mon + 1, doc_date.tm_mday, doc_date.tm_hour, doc_date.tm_min, doc_date.tm_sec); if (doc_title != NULL) fprintf(out, "%%%%Title: %s\n", doc_title); if (author != NULL) fprintf(out, "%%%%Author: %s\n", author); if (creator != NULL) fprintf(out, "%%%%Generator: %s\n", creator); if (copyright != NULL) fprintf(out, "%%%%Copyright: %s\n", copyright); if (keywords != NULL) fprintf(out, "%%%%Keywords: %s\n", keywords); if (subject != NULL) fprintf(out, "%%%%Subject: %s\n", keywords); if (page_count > 0) fprintf(out, "%%%%Pages: %d\n", page_count); else fputs("%%Pages: (atend)\n", out); if (!EmbedFonts) { fputs("%%DocumentNeededResources:\n", out); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j] && _htmlStandardFonts[i]) fprintf(out, "%%%%+ font %s\n", _htmlFonts[i][j]); } fputs("%%DocumentProvidedResources:\n", out); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j] && (EmbedFonts || !_htmlStandardFonts[i])) fprintf(out, "%%%%+ font %s\n", _htmlFonts[i][j]); fputs("%%DocumentData: Clean7bit\n", out); fputs("%%EndComments\n", out); fputs("%%BeginProlog\n", out); /* * Embed fonts? */ for (i = 0; i < TYPE_MAX; i ++) { if (EmbedFonts || !_htmlStandardFonts[i]) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) write_type1(out, (typeface_t)i, (style_t)j); } /* * Procedures used throughout the document... */ const char *version = SVERSION; fprintf(out, "%%%%BeginResource: procset htmldoc-page 1.8 %s\n", version + 4); fputs("/BD{bind def}bind def", out); fputs("/B{dup 0 exch rlineto exch 0 rlineto neg 0 exch rlineto\n" "closepath stroke}BD", out); fputs("/C{setrgbcolor}BD\n", out); fputs("/CM{concat}BD", out); fputs("/DF{findfont dup length dict begin{1 index/FID ne{def}{pop pop}\n" "ifelse}forall/Encoding fontencoding def currentdict end definefont pop}BD\n", out); fputs("/F{dup 0 exch rlineto exch 0 rlineto neg 0 exch rlineto closepath fill}BD\n", out); fputs("/FS{/hdFontSize exch def}BD", out); fputs("/G{setgray}BD\n", out); fputs("/GS{gsave}BD", out); fputs("/GR{grestore}BD", out); fputs("/J{0 exch ashow}BD\n", out); fputs("/L{0 rlineto stroke}BD", out); fputs("/M{moveto}BD", out); fputs("/re{4 2 roll moveto 1 index 0 rlineto 0 exch rlineto neg 0 rlineto closepath}BD\n", out); fputs("/RO{rotate}BD", out); fputs("/S{show}BD", out); fputs("/SC{dup scale}BD\n", out); fputs("/SF{findfont hdFontSize scalefont setfont}BD", out); fputs("/SP{showpage}BD", out); fputs("/T{translate}BD\n", out); fputs("%%EndResource\n", out); /* * Output the font encoding for the current character set... For now we * just support 8-bit fonts since true Unicode support needs a very large * number of extra fonts that aren't normally available on a PS printer. */ fputs("/fontencoding[\n", out); for (i = 0, j = 0; i < 256; i ++) { if (_htmlGlyphs[i]) j += strlen(_htmlGlyphs[i]) + 1; else j += 8; if (j > 80) { if (_htmlGlyphs[i]) j = strlen(_htmlGlyphs[i]) + 1; else j = 8; putc('\n', out); } putc('/', out); if (_htmlGlyphs[i]) fputs(_htmlGlyphs[i], out); else fputs(".notdef", out); } fputs("]def\n", out); /* * Fonts... */ for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) { if (i < TYPE_SYMBOL) fprintf(out, "/F%x/%s DF\n", i * 4 + j, _htmlFonts[i][j]); else fprintf(out, "/F%x/%s findfont definefont pop\n", i * 4 + j, _htmlFonts[i][j]); } if (PSCommands) { snprintf(temp, sizeof(temp), "%s/data/prolog.ps", _htmlData); if ((prolog = fopen(temp, "rb")) != NULL) { while (fgets(temp, sizeof(temp), prolog) != NULL) fputs(temp, out); fclose(prolog); } else { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open data file \"%s\" - %s", temp, strerror(errno)); fprintf(out, "%%%%BeginResource: procset htmldoc-device 1.8 %s\n", version + 4); fputs("languagelevel 1 eq{/setpagedevice{pop}BD}if\n", out); fputs("/SetDuplexMode{<</Duplex 3 index/Tumble 5 index>>setpagedevice " "pop pop}BD\n", out); fputs("/SetMediaColor{pop}BD\n", out); fputs("/SetMediaType{pop}BD\n", out); fputs("/SetMediaPosition{pop}BD\n", out); fputs("/SetPageSize{2 array astore<</PageSize 2 index/ImageableArea " "null>>setpagedevice pop}BD\n", out); fputs("%%EndResource\n", out); } } if (background_image != NULL) ps_write_background(out); fputs("%%EndProlog\n", out); } else { /* * Write PDF prolog stuff... */ fprintf(out, "%%PDF-%.1f\n", 0.1 * PDFVersion); fputs("%\342\343\317\323\n", out); num_objects = 0; /* * Compute the file ID... */ md5_init(&md5); md5_append(&md5, (md5_byte_t *)OutputPath, sizeof(OutputPath)); md5_append(&md5, (md5_byte_t *)&doc_time, sizeof(doc_time)); md5_finish(&md5, file_id); /* * Setup encryption stuff as necessary... */ if (Encryption) { /* * Copy and pad the user password... */ strlcpy((char *)user_pad, UserPassword, sizeof(user_pad)); if ((i = strlen(UserPassword)) < 32) memcpy(user_pad + i, pad, (size_t)(32 - i)); if (OwnerPassword[0]) { /* * Copy and pad the owner password... */ strlcpy((char *)owner_pad, OwnerPassword, sizeof(owner_pad)); if ((i = strlen(OwnerPassword)) < 32) memcpy(owner_pad + i, pad, (size_t)(32 - i)); } else { /* * Generate a pseudo-random owner password... */ srand(time(NULL)); for (i = 0; i < 32; i ++) owner_pad[i] = (uchar)rand(); } /* * What is the key length? * * Acrobat 4.0 and earlier (PDF 1.3 and earlier) allow a maximum of * 40-bits. Acrobat 5.0 and newer support 128-bits. */ if (PDFVersion > 13) encrypt_len = 16; // 128 bits else encrypt_len = 5; // 40 bits /* * Compute the owner key... */ md5_init(&md5); md5_append(&md5, owner_pad, 32); md5_finish(&md5, digest); if (encrypt_len > 5) { // MD5 the result 50 more times... for (i = 0; i < 50; i ++) { md5_init(&md5); md5_append(&md5, digest, 16); md5_finish(&md5, digest); } // Copy the padded user password... memcpy(owner_key, user_pad, 32); // Encrypt the result 20 times... for (i = 0; i < 20; i ++) { // XOR each byte in the key with the loop counter... for (j = 0; j < encrypt_len; j ++) encrypt_key[j] = (uchar)(digest[j] ^ i); rc4_init(&rc4, encrypt_key, (size_t)encrypt_len); rc4_encrypt(&rc4, owner_key, owner_key, 32); } } else { rc4_init(&rc4, digest, (size_t)encrypt_len); rc4_encrypt(&rc4, user_pad, owner_key, 32); } /* * Figure out the permissions word; the new N-bit security * handler adds several new permission bits, which we must * simulate... */ perm_value = (unsigned)Permissions; if (encrypt_len > 5) { // N-bit encryption... if (!(perm_value & PDF_PERM_COPY)) perm_value &= (unsigned)~0x00240000; // Mask additional copy perms... } /* * Compute the encryption key... */ md5_init(&md5); md5_append(&md5, user_pad, 32); md5_append(&md5, owner_key, 32); perm_bytes[0] = (uchar)perm_value; perm_bytes[1] = (uchar)(perm_value >> 8); perm_bytes[2] = (uchar)(perm_value >> 16); perm_bytes[3] = (uchar)(perm_value >> 24); md5_append(&md5, perm_bytes, 4); md5_append(&md5, file_id, 16); md5_finish(&md5, digest); if (encrypt_len > 5) { // MD5 the result 50 times.. for (i = 0; i < 50; i ++) { md5_init(&md5); md5_append(&md5, digest, 16); md5_finish(&md5, digest); } } memcpy(encrypt_key, digest, (size_t)encrypt_len); /* * Compute the user key... */ if (encrypt_len > 5) { md5_init(&md5); md5_append(&md5, pad, 32); md5_append(&md5, file_id, 16); md5_finish(&md5, user_key); memset(user_key + 16, 0, 16); // Encrypt the result 20 times... for (i = 0; i < 20; i ++) { // XOR each byte in the key with the loop counter... for (j = 0; j < encrypt_len; j ++) digest[j] = (uchar)(encrypt_key[j] ^ i); rc4_init(&rc4, digest, (size_t)encrypt_len); rc4_encrypt(&rc4, user_key, user_key, 16); } } else { rc4_init(&rc4, encrypt_key, (size_t)encrypt_len); rc4_encrypt(&rc4, pad, user_key, 32); } /* * Write the encryption dictionary... */ encrypt_object = pdf_start_object(out); fputs("/Filter/Standard/O<", out); for (i = 0; i < 32; i ++) fprintf(out, "%02x", owner_key[i]); fputs(">/U<", out); for (i = 0; i < 32; i ++) fprintf(out, "%02x", user_key[i]); fputs(">", out); if (encrypt_len > 5) { // N-bit encryption... fprintf(out, "/P %d/V 2/R 3/Length %d", (int)perm_value, encrypt_len * 8); } else fprintf(out, "/P %d/V 1/R 2", (int)perm_value); pdf_end_object(out); } else encrypt_object = 0; /* * Write info object... */ info_object = pdf_start_object(out); fputs("/Producer", out); write_string(out, (uchar *)HTMLDOC_PRODUCER, 0); fputs("/CreationDate", out); snprintf(temp, sizeof(temp), "D:%04d%02d%02d%02d%02d%02d+0000", doc_date.tm_year + 1900, doc_date.tm_mon + 1, doc_date.tm_mday, doc_date.tm_hour, doc_date.tm_min, doc_date.tm_sec); write_string(out, (uchar *)temp, 0); if (doc_title != NULL) { fputs("/Title", out); write_utf16(out, doc_title); } if (author != NULL || copyright != NULL) { if (author && copyright) snprintf(temp, sizeof(temp), "%s, %s", author, copyright); else if (author) strlcpy(temp, (const char *)author, sizeof(temp)); else strlcpy(temp, (const char *)copyright, sizeof(temp)); fputs("/Author", out); write_utf16(out, (uchar *)temp); } if (creator != NULL) { fputs("/Creator", out); write_utf16(out, creator); } if (keywords != NULL) { fputs("/Keywords", out); write_utf16(out, keywords); } if (subject != NULL) { fputs("/Subject", out); write_utf16(out, subject); } pdf_end_object(out); /* * Write the font encoding for the selected character set. Note that * we *should* be able to use the WinAnsiEncoding value for ISO-8859-1 * to make smaller files, however Acrobat Exchange does not like it * despite the fact that it is defined in the PDF specification... */ encoding_object = pdf_start_object(out); fputs("/Type/Encoding", out); fputs("/Differences[", out); for (i = 0, j = -1; i < 256; i ++) if (_htmlGlyphs[i]) { /* * Output a character index if we had blank ones... */ if (j != (i - 1)) fprintf(out, " %d", i); fprintf(out, "/%s", _htmlGlyphs[i]); j = i; } fputs("]", out); pdf_end_object(out); memset(font_desc, 0, sizeof(font_desc)); /* * Build font descriptors for the EmbedFonts fonts... */ for (i = 0; i < TYPE_MAX; i ++) if (EmbedFonts || !_htmlStandardFonts[i]) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) font_desc[i][j] = write_type1(out, (typeface_t )i, (style_t)j); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) { font_objects[i * STYLE_MAX + j] = pdf_start_object(out); fputs("/Type/Font", out); fputs("/Subtype/Type1", out); fprintf(out, "/BaseFont/%s", _htmlFonts[i][j]); if (font_desc[i][j]) { // Embed Type1 font... fputs("/FirstChar 0", out); fputs("/LastChar 255", out); fprintf(out, "/Widths %d 0 R", font_desc[i][j] + 1); fprintf(out, "/FontDescriptor %d 0 R", font_desc[i][j]); } if (i < TYPE_SYMBOL) /* Use native encoding for symbols */ fprintf(out, "/Encoding %d 0 R", encoding_object); pdf_end_object(out); } } } /* * 'write_string()' - Write a text entity. */ static void write_string(FILE *out, /* I - Output file */ uchar *s, /* I - String */ int compress) /* I - Compress output? */ { int i; /* Looping var */ if (Encryption && !compress && PSLevel == 0) { int len, // Length of string bytes; // Current bytes encrypted uchar news[1024]; // New string /* * Write an encrypted string... */ putc('<', out); encrypt_init(); for (len = strlen((char *)s); len > 0; len -= bytes, s += bytes) { if (len > (int)sizeof(news)) bytes = (int)sizeof(news); else bytes = len; rc4_encrypt(&encrypt_state, s, news, (size_t)bytes); for (i = 0; i < bytes; i ++) fprintf(out, "%02x", news[i]); } putc('>', out); } else { uchar nbsp = 160; // Non-breaking space char if (compress) flate_write(out, (uchar *)"(", 1); else putc('(', out); if (_htmlUTF8) nbsp = _htmlCharacters[160]; while (*s != '\0') { if (*s == nbsp) { /* &nbsp; */ if (compress) flate_write(out, (uchar *)" ", 1); else putc(' ', out); } else if (*s < 32 || *s > 126) { if (compress) flate_printf(out, "\\%o", *s); else fprintf(out, "\\%o", *s); } else if (compress) { if (*s == '(' || *s == ')' || *s == '\\') flate_write(out, (uchar *)"\\", 1); flate_write(out, s, 1); } else { if (*s == '(' || *s == ')' || *s == '\\') putc('\\', out); putc(*s, out); } s ++; } if (compress) flate_write(out, (uchar *)")", 1); else putc(')', out); } } /* * 'write_text()' - Write a text entity. */ static void write_text(FILE *out, /* I - Output file */ render_t *r) /* I - Text entity */ { uchar *ptr; /* Pointer into text */ // Quick optimization - don't output spaces... for (ptr = r->data.text.buffer; *ptr; ptr ++) if (!isspace(*ptr) && *ptr != 0xa0) break; if (!*ptr) return; // Not just whitespace - send it out... set_color(out, r->data.text.rgb); set_font(out, r->data.text.typeface, r->data.text.style, r->data.text.size); set_pos(out, r->x, r->y); if (PSLevel > 0) { if (r->data.text.spacing > 0.0f) fprintf(out, " %.3f", r->data.text.spacing); } else if (r->data.text.spacing != render_spacing) flate_printf(out, " %.3f Tc", render_spacing = r->data.text.spacing); write_string(out, r->data.text.buffer, PSLevel == 0); if (PSLevel > 0) { if (r->data.text.spacing > 0.0f) fputs("J\n", out); else fputs("S\n", out); } else flate_puts("Tj\n", out); render_x += r->width; } /* * 'write_trailer()' - Write the file trailer. */ static void write_trailer(FILE *out, /* I - Output file */ int num_file_pages, /* I - Number of pages in file */ uchar *lang) /* I - Language */ { int i, j, k, /* Looping vars */ type, /* Type of number */ offset, /* Offset to xref table in PDF file */ start; /* Start page number */ page_t *page; /* Start page of chapter */ char prefix[64], /* Prefix string */ *prefptr; /* Pointer into prefix string */ static const char *modes[] = /* Page modes */ { "UseNone", "UseOutlines", "FullScreen" }; static const char *layouts[] = /* Page layouts */ { "SinglePage", "OneColumn", "TwoColumnLeft", "TwoColumnRight" }; if (PSLevel > 0) { /* * PostScript... */ fputs("%%Trailer\n", out); if (num_file_pages > 0) fprintf(out, "%%%%Pages: %d\n", num_file_pages); fputs("%%EOF\n", out); } else { /* * PDF... */ root_object = pdf_start_object(out); fputs("/Type/Catalog", out); fprintf(out, "/Pages %d 0 R", pages_object); if (PDFVersion >= 12) { if (names_object) fprintf(out, "/Names %d 0 R", names_object); fprintf(out, "/PageLayout/%s", layouts[PDFPageLayout]); } if (lang) fprintf(out, "/Lang(%s)", (char *)lang); if (outline_object > 0) fprintf(out, "/Outlines %d 0 R", outline_object); switch (PDFFirstPage) { case PDF_PAGE_1 : if (TitlePage) { fprintf(out, "/OpenAction[%d 0 R/XYZ null null 0]", pages_object + 1); break; } break; case PDF_TOC : if (TocLevels > 0) { fprintf(out, "/OpenAction[%d 0 R/XYZ null null 0]", pages_object + 2 * chapter_outstarts[0] + 1); break; } break; case PDF_CHAPTER_1 : fprintf(out, "/OpenAction[%d 0 R/XYZ null null 0]", pages_object + 2 * chapter_outstarts[1] + 1); break; } fprintf(out, "/PageMode/%s", modes[PDFPageMode]); if (PDFVersion > 12 && NumberUp == 1) { // Output the PageLabels tree... fputs("/PageLabels<</Nums[", out); for (i = 0; i < chapter_starts[1]; i ++) { fprintf(out, "%d<</P", i); if (i & 1) write_string(out, (uchar *)"eltit", 0); else write_string(out, (uchar *)"title", 0); fputs(">>", out); } if (TocLevels > 0 && OutputType == OUTPUT_BOOK) { type = 'r'; for (j = 0; j < 3; j ++) if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(1)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(1)"))) type = 'D'; else if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(I)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(I)"))) type = 'R'; else if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(a)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(a)"))) type = 'a'; else if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(A)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(A)"))) type = 'A'; fprintf(out, "%d<</S/%c>>", i, type); i += chapter_ends[0] - chapter_starts[0] + 1; } for (j = 1; j <= TocDocCount; j ++) { if (chapter_starts[j] < 0) continue; page = pages + chapter_starts[j]; start = chapter_starts[j] - chapter_starts[1] + 1; type = 'D'; prefix[0] = '\0'; for (k = 0; k < 3; k ++) { if (page->header[k] && strstr((char *)page->header[k], "PAGE")) strlcpy(prefix, (char *)page->header[k], sizeof(prefix)); else if (page->footer[k] && strstr((char *)page->footer[k], "PAGE")) strlcpy(prefix, (char *)page->footer[k], sizeof(prefix)); if ((page->header[k] && strstr((char *)page->header[k], "PAGE(i)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(i)"))) type = 'r'; else if ((page->header[k] && strstr((char *)page->header[k], "PAGE(I)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(I)"))) type = 'R'; else if ((page->header[k] && strstr((char *)page->header[k], "PAGE(a)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(a)"))) type = 'a'; else if ((page->header[k] && strstr((char *)page->header[k], "PAGE(A)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(A)"))) type = 'A'; if ((page->header[k] && strstr((char *)page->header[k], "$CHAPTERPAGE")) || (page->footer[k] && strstr((char *)page->footer[k], "$CHAPTERPAGE"))) start = 1; } if ((prefptr = strstr(prefix, "$PAGE")) == NULL) prefptr = strstr(prefix, "$CHAPTERPAGE"); fprintf(out, "%d<</S/%c/St %d", i, type, start); if (prefptr) { *prefptr = '\0'; fputs("/P", out); write_string(out, (uchar *)prefix, 0); } fputs(">>", out); i += chapter_ends[j] - chapter_starts[j] + 1; } fputs("]>>", out); } pdf_end_object(out); offset = ftell(out); fputs("xref\n", out); fprintf(out, "0 %d \n", (int)num_objects + 1); fputs("0000000000 65535 f \n", out); for (i = 1; i <= (int)num_objects; i ++) fprintf(out, "%010d 00000 n \n", objects[i]); fputs("trailer\n", out); fputs("<<", out); fprintf(out, "/Size %d", (int)num_objects + 1); fprintf(out, "/Root %d 0 R", root_object); fprintf(out, "/Info %d 0 R", info_object); fputs("/ID[<", out); for (i = 0; i < 16; i ++) fprintf(out, "%02x", file_id[i]); fputs("><", out); for (i = 0; i < 16; i ++) fprintf(out, "%02x", file_id[i]); fputs(">]", out); if (Encryption) fprintf(out, "/Encrypt %d 0 R", encrypt_object); fputs(">>\n", out); fputs("startxref\n", out); fprintf(out, "%d\n", offset); fputs("%%EOF\n", out); } } /* * 'write_type1()' - Write an embedded Type 1 font. */ static int /* O - Object number */ write_type1(FILE *out, /* I - File to write to */ typeface_t typeface, /* I - Typeface */ style_t style) /* I - Style */ { char filename[1024]; /* PFA filename */ FILE *fp; /* PFA file */ int ch; /* Character value */ int width; /* Width value */ char glyph[64], /* Glyph name */ line[1024], /* Line from AFM file */ *lineptr, /* Pointer into line */ *dataptr; /* Pointer for data */ int ascent, /* Ascent above baseline */ cap_height, /* Ascent of CAPITALS */ x_height, /* Ascent of lowercase */ descent, /* Decent below baseline */ bbox[4], /* Bounding box */ italic_angle; /* Angle for italics */ int widths[256]; /* Character widths */ int length1, /* Length1 value for font */ length2, /* Length2 value for font */ length3; /* Length3 value for font */ static int tflags[] = /* PDF typeface flags */ { 33, /* Courier */ 34, /* Times-Roman */ 32, /* Helvetica */ 33, /* Monospace */ 34, /* Serif */ 32, /* Sans */ 4, /* Symbol */ 4 /* Dingbats */ }; static int sflags[] = /* PDF style flags */ { 0, /* Normal */ 0, /* Bold */ 64, /* Italic */ 64 /* Bold-Italic */ }; /* * This function writes a Type1 font, either as an object for PDF * output or as an in-line font in PostScript output. This is useful * because the Type1 fonts that Adobe ships typically do not include * the full set of characters required by some of the ISO character * sets. */ /* * Try to open the PFA file for the Type1 font... */ snprintf(filename, sizeof(filename), "%s/fonts/%s.pfa", _htmlData, _htmlFonts[typeface][style]); if ((fp = fopen(filename, "r")) == NULL) { #ifndef DEBUG progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open font file %s!", filename); #endif /* !DEBUG */ return (0); } /* * Write the font (object)... */ if (PSLevel) { /* * Embed a Type1 font in the PostScript output... */ fprintf(out, "%%%%BeginResource: font %s\n", _htmlFonts[typeface][style]); line[0] = '\0'; while (fgets(line, sizeof(line), fp) != NULL) fputs(line, out); if (line[strlen(line) - 1] != '\n') fputs("\n", out); fputs("%%EndResource\n", out); fclose(fp); } else { /* * Embed a Type1 font object in the PDF output... */ length1 = 0; length2 = 0; length3 = 0; while (fgets(line, sizeof(line), fp) != NULL) { length1 += strlen(line); if (strstr(line, "currentfile eexec") != NULL) break; } while (fgets(line, sizeof(line), fp) != NULL) { if (!strcmp(line, "00000000000000000000000000000000" "00000000000000000000000000000000\n")) break; length2 += (strlen(line) - 1) / 2; } length3 = strlen(line); while (fgets(line, sizeof(line), fp) != NULL) length3 += strlen(line); rewind(fp); pdf_start_object(out); fprintf(out, "/Length1 %d", length1); fprintf(out, "/Length2 %d", length2); fprintf(out, "/Length3 %d", length3); if (Compression) fputs("/Filter/FlateDecode", out); pdf_start_stream(out); flate_open_stream(out); while (fgets(line, sizeof(line), fp) != NULL) { flate_puts(line, out); if (strstr(line, "currentfile eexec") != NULL) break; } while (fgets(line, sizeof(line), fp) != NULL) { if (!strcmp(line, "00000000000000000000000000000000" "00000000000000000000000000000000\n")) break; for (lineptr = line, dataptr = line; isxdigit(*lineptr); lineptr += 2) { if (isdigit(lineptr[0])) ch = (lineptr[0] - '0') << 4; else ch = (tolower(lineptr[0] & 255) - 'a' + 10) << 4; if (isdigit(lineptr[1])) ch |= lineptr[1] - '0'; else ch |= tolower(lineptr[1] & 255) - 'a' + 10; *dataptr++ = (char)ch; } flate_write(out, (uchar *)line, dataptr - line); } flate_puts(line, out); while (fgets(line, sizeof(line), fp) != NULL) flate_puts(line, out); flate_close_stream(out); pdf_end_object(out); fclose(fp); /* * Try to open the AFM file for the Type1 font... */ snprintf(filename, sizeof(filename), "%s/fonts/%s.afm", _htmlData, _htmlFonts[typeface][style]); if ((fp = fopen(filename, "r")) == NULL) { #ifndef DEBUG progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open font width file %s!", filename); #endif /* !DEBUG */ return (0); } /* * Set the default values (Courier)... */ for (ch = 0; ch < 256; ch ++) widths[ch] = 600; ascent = 629; cap_height = 562; x_height = 426; descent = -157; bbox[0] = -28; bbox[1] = -250; bbox[2] = 628; bbox[3] = 805; italic_angle = 0; /* * Read the AFM file... */ while (fgets(line, sizeof(line), fp) != NULL) { if (strncmp(line, "ItalicAngle ", 12) == 0) italic_angle = atoi(line + 12); else if (strncmp(line, "FontBBox ", 9) == 0) sscanf(line + 9, "%d%d%d%d", bbox + 0, bbox + 1, bbox + 2, bbox + 3); else if (strncmp(line, "CapHeight ", 10) == 0) cap_height = atoi(line + 10); else if (strncmp(line, "XHeight ", 8) == 0) x_height = atoi(line + 8); else if (strncmp(line, "Ascender ", 9) == 0) ascent = atoi(line + 9); else if (strncmp(line, "Descender ", 10) == 0) descent = atoi(line + 10); else if (strncmp(line, "C ", 2) == 0) { if (typeface < TYPE_SYMBOL) { /* * Handle encoding of Courier, Times, and Helvetica using * assigned charset... */ if (sscanf(line, "%*s%*s%*s%*s%d%*s%*s%63s", &width, glyph) != 2) continue; for (ch = 0; ch < 256; ch ++) if (_htmlGlyphs[ch] && strcmp(_htmlGlyphs[ch], glyph) == 0) break; if (ch < 256) widths[ch] = width; } else { /* * Symbol font uses its own encoding... */ if (sscanf(line, "%*s%d%*s%*s%d", &ch, &width) != 2) continue; if (ch >= 0 && ch < 256) widths[ch] = width; } } } fclose(fp); /* * Write the font descriptor... */ pdf_start_object(out); fputs("/Type/FontDescriptor", out); fprintf(out, "/Ascent %d", ascent); fprintf(out, "/Descent %d", descent); fprintf(out, "/CapHeight %d", cap_height); fprintf(out, "/XHeight %d", x_height); fprintf(out, "/FontBBox[%d %d %d %d]", bbox[0], bbox[1], bbox[2], bbox[3]); fprintf(out, "/ItalicAngle %d", italic_angle); fprintf(out, "/StemV %d", widths['v']); fprintf(out, "/Flags %d", tflags[typeface] | sflags[style]); fprintf(out, "/FontName/%s", _htmlFonts[typeface][style]); fprintf(out, "/FontFile %d 0 R", (int)num_objects - 1); pdf_end_object(out); /* * Write the character widths... */ pdf_start_object(out, 1); fprintf(out, "%d", widths[0]); for (ch = 1; ch < 256; ch ++) fprintf(out, " %d", widths[ch]); pdf_end_object(out); } /* * Return the font descriptor... */ return (num_objects - 1); } /* * 'write_utf16()' - Write a UTF-16 string... */ static void write_utf16(FILE *out, // I - File to write to uchar *s) // I - String to write { uchar *sptr; // Pointer into string /* * We start by checking to see if the string is composed only of * ASCII characters; if so, we can just write a normal string... */ for (sptr = s; *sptr && !(*sptr & 0x80); sptr ++); if (!*sptr) { /* * Write an ASCII string... */ write_string(out, s, 0); } else if (Encryption) { /* * Convert the string to Unicode and encrypt... */ int ch; // Character value uchar unicode[2], // Unicode character enicode[2]; // Encrypted unicode character putc('<', out); encrypt_init(); unicode[0] = 0xfe; // Start with BOM unicode[1] = 0xff; rc4_encrypt(&encrypt_state, unicode, enicode, 2); fprintf(out, "%02x%02x", enicode[0], enicode[1]); for (sptr = s; *sptr; sptr ++) { ch = _htmlUnicode[*sptr]; unicode[0] = (uchar)(ch >> 8); unicode[1] = (uchar)ch; rc4_encrypt(&encrypt_state, unicode, enicode, 2); fprintf(out, "%02x%02x", enicode[0], enicode[1]); } putc('>', out); } else { /* * Convert the string to Unicode... */ fputs("<feff", out); // Start with BOM for (sptr = s; *sptr; sptr ++) fprintf(out, "%04x", _htmlUnicode[*sptr]); putc('>', out); } } /* * 'encrypt_init()' - Initialize the RC4 encryption context for the current * object. */ static void encrypt_init(void) { int i; /* Looping var */ uchar data[21], /* Key data */ *dataptr; /* Pointer to key data */ md5_state_t md5; /* MD5 state */ md5_byte_t digest[16]; /* MD5 digest value */ /* * Compute the key data for the MD5 hash. */ for (i = 0, dataptr = data; i < encrypt_len; i ++) *dataptr++ = encrypt_key[i]; *dataptr++ = (uchar)num_objects; *dataptr++ = (uchar)(num_objects >> 8); *dataptr++ = (uchar)(num_objects >> 16); *dataptr++ = 0; *dataptr++ = 0; /* * Hash it... */ md5_init(&md5); md5_append(&md5, data, encrypt_len + 5); md5_finish(&md5, digest); /* * Initialize the RC4 context using the first N+5 bytes of the digest... */ if (encrypt_len > 11) rc4_init(&encrypt_state, digest, 16); else rc4_init(&encrypt_state, digest, (size_t)(encrypt_len + 5)); } /* * 'flate_open_stream()' - Open a deflated output stream. */ static void flate_open_stream(FILE *out) /* I - Output file */ { if (Encryption && !PSLevel) encrypt_init(); if (!Compression) return; compressor_active = 1; compressor.zalloc = (alloc_func)0; compressor.zfree = (free_func)0; compressor.opaque = (voidpf)0; deflateInit(&compressor, Compression); compressor.next_out = (Bytef *)comp_buffer; compressor.avail_out = sizeof(comp_buffer); } /* * 'flate_close_stream()' - Close a deflated output stream. */ static void flate_close_stream(FILE *out) /* I - Output file */ { int status; /* Deflate status */ if (!Compression) { #ifdef HTMLDOC_ASCII85 if (PSLevel) ps_ascii85(out, (uchar *)"", 0, 1); #endif // HTMLDOC_ASCII85 return; } while ((status = deflate(&compressor, Z_FINISH)) != Z_STREAM_END) { if (status < Z_OK && status != Z_BUF_ERROR) { progress_error(HD_ERROR_OUT_OF_MEMORY, "deflate() failed (%d)", status); return; } if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #else ps_hex(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #endif // HTMLDOC_ASCII85 else { if (Encryption) rc4_encrypt(&encrypt_state, comp_buffer, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); fwrite(comp_buffer, (size_t)((uchar *)compressor.next_out - (uchar *)comp_buffer), 1, out); } compressor.next_out = (Bytef *)comp_buffer; compressor.avail_out = sizeof(comp_buffer); } if ((uchar *)compressor.next_out > (uchar *)comp_buffer) { if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #else ps_hex(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #endif // HTMLDOC_ASCII85 else { if (Encryption) rc4_encrypt(&encrypt_state, comp_buffer, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); fwrite(comp_buffer, (size_t)((uchar *)compressor.next_out - (uchar *)comp_buffer), 1, out); } } deflateEnd(&compressor); compressor_active = 0; #ifdef HTMLDOC_ASCII85 if (PSLevel) ps_ascii85(out, (uchar *)"", 0, 1); #else if (PSLevel) { // End of data marker... fputs(">\n", out); } #endif // HTMLDOC_ASCII85 } /* * 'flate_puts()' - Write a character string to a compressed stream. */ static void flate_puts(const char *s, /* I - String to write */ FILE *out) /* I - Output file */ { flate_write(out, (uchar *)s, strlen(s)); } /* * 'flate_printf()' - Write a formatted character string to a compressed stream. */ static void flate_printf(FILE *out, /* I - Output file */ const char *format, /* I - Format string */ ...) /* I - Additional args as necessary */ { int length; /* Length of output string */ char buf[10240]; /* Output buffer */ va_list ap; /* Argument pointer */ va_start(ap, format); length = vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); flate_write(out, (uchar *)buf, length); } /* * 'flate_write()' - Write data to a compressed stream. */ static void flate_write(FILE *out, /* I - Output file */ uchar *buf, /* I - Buffer */ int length, /* I - Number of bytes to write */ int flush) /* I - Flush when writing data? */ { int status; /* Deflate status */ if (compressor_active) { compressor.next_in = buf; compressor.avail_in = (unsigned)length; while (compressor.avail_in > 0) { if (compressor.avail_out < (int)(sizeof(comp_buffer) / 8)) { if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #else ps_hex(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #endif // HTMLDOC_ASCII85 else { if (Encryption) rc4_encrypt(&encrypt_state, comp_buffer, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); fwrite(comp_buffer, (size_t)((uchar *)compressor.next_out - (uchar *)comp_buffer), 1, out); } compressor.next_out = (Bytef *)comp_buffer; compressor.avail_out = sizeof(comp_buffer); } status = deflate(&compressor, flush ? Z_FULL_FLUSH : Z_NO_FLUSH); if (status < Z_OK && status != Z_BUF_ERROR) { progress_error(HD_ERROR_OUT_OF_MEMORY, "deflate() failed (%d)", status); return; } flush = 0; } } else if (Encryption && !PSLevel) { int i, // Looping var bytes; // Number of bytes to encrypt/write uchar newbuf[1024]; // New encrypted data buffer for (i = 0; i < length; i += sizeof(newbuf)) { if ((bytes = length - i) > (int)sizeof(newbuf)) bytes = sizeof(newbuf); rc4_encrypt(&encrypt_state, buf + i, newbuf, (size_t)bytes); fwrite(newbuf, (size_t)bytes, 1, out); } } else if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, buf, length); #else ps_hex(out, buf, length); #endif // HTMLDOC_ASCII85 else fwrite(buf, (size_t)length, 1, out); }
null
241
CWE-787
CVE-2021-23206
# Changes in HTMLDOC v1.9.12 - Fixed a crash bug with "data:" URIs and EPUB output (Issue #410) - Fixed a crach bug with bogus table attributes (Issue #417) - Fixed a crash bug with malformed URIs (Issue #418) - Fixed a crash bug with malformed GIF files (Issue #423) - Fixed some issues reported by Coverity. # Changes in HTMLDOC v1.9.11 - Added high-resolution desktop icons for Linux. - Updated the internal HTTP library to fix truncation of redirection URLs (Issue #396) - Fixed a regression in the handling of character entities for UTF-8 input (Issue #401) - The `--numbered` option did not work when the table-of-contents was disabled (Issue #405) # Changes in HTMLDOC v1.9.10 - Updated local zlib to v1.2.11. - Updated local libpng to v1.6.37. - Fixed packaging issues on macOS and Windows (Issue #377, Issue #386) - Now ignore sRGB profile errors in PNG files (Issue #390) - The GUI would crash when saving (Issue #391) - Page comments are now allowed in `pre` text (Issue #394) # Changes in HTMLDOC v1.9.9 - Fixed a redirection issue - some sites (incorrectly) provide an incomplete Location: URL in the HTTP response. - Fixed https: support on newer versions of Windows (Issue #378) - Fixed a problem with remote URLs containing spaces (Issue #379) - Fixed a UTF-8 processing bug for Markdown files (Issue #383) - Added support for `<FONT FACE="monospace">` (Issue #385) # Changes in HTMLDOC v1.9.8 - Added support for a `HTMLDOC.filename` META keyword that controls the filename reported in CGI mode; the default remains "htmldoc.pdf" (Issue #367) - Fixed a paragraph formatting issue with large inline images (Issue #369) - Fixed a buffer underflow issue (Issue #370) - Fixed PDF page numbers (Issue #371) - Added support for a new `L` header/footer format (`$LETTERHEAD`), which inserts a letterhead image at its full size (Issue #372, Issue #373, Issue #375) - Updated the build documentation (Issue #374) # Changes in HTMLDOC v1.9.7 - Refactored the PRE rendering code to work around compiler optimization bugs (Issue #349) - Added support for links with targets (Issue #351) - Fixed a table rowspan + valign bug (Issue #360) # Changes in HTMLDOC v1.9.6 - Added support for data URIs (Issue #340) - HTMLDOC no longer includes a PDF table of contents when converting a single web page (Issue #344) - Updated the markdown support with external links, additional inline markup, and hard line breaks. - Links in markdown text no longer render with a leading space as part of the link (Issue #346) - Fixed a buffer underflow bug discovered by AddressSanitizer. - Fixed a bug in UTF-8 support (Issue #348) - PDF output now includes the base language of the input document(s) (Issue #350) - Optimized the loading of font widths (Issue #354) - Optimized PDF page resources (Issue #356) - Optimized the base memory used for font widths (Issue #357) - Added proper `&shy;` support (Issue #361) - Title files can now be markdown. # Changes in HTMLDOC v1.9.5 - The GUI did not support EPUB output. - Empty markdown table cells were not rendered in PDF or PostScript output. - The automatically-generated title page now supports both "docnumber" and "version" metadata. - Added support for dc:subject and dc:language metadata in EPUB output from the HTML keywords and lang values. - Added support for the subject and language metadata in markdown input. - Fixed a buffer underflow bug (Issue #338) - `htmldoc --help` now reports whether HTTPS URLs are supported (Issue #339) - Fixed an issue with HTML title pages and EPUB output. # Changes in HTMLDOC v1.9.4 - Inline fixed-width text is no longer reduced in size automatically (Issue #309) - Optimized initialization of font width data (Issue #334) # Changes in HTMLDOC v1.9.3 - Fixed formatting bugs with aligned images (Issue #322, Issue #324) - Fixed support for three digit "#RGB" color values (Issue #323) - Fixed character set support for markdown metadata. - Updated libpng to v1.6.34 (Issue #326) - The makefiles did not use the CPPFLAGS value (Issue #328) # Changes in HTMLDOC v1.9.2 - Added Markdown table support. - Fixed parsing of TBODY, TFOOT, and THEAD elements in HTML files. # Changes in HTMLDOC v1.9.1 - Fixed monospace font size issue (Issue #309) - Added support for reproducible builds (Issue #310) - Added limited support for the HTML 4.0 SPAN element (Issue #311) - Added (extremely limited) UTF-8 support for input files (Issue #314) - Fixed buffer underflow for (invalid) short HTML comments (Issue #316) - Now indent PRE text, by popular request. - EPUB output now makes sure that `<element property>` is written as `<element property="property">`. - Now support both NAME and ID for table-of-contents targets. # Changes in HTMLDOC v1.9 - Added support for repeating a single header row for tables that span multiple pages (Issue #16) - Added support for embedding the current filename/URL in the header or footer (Issue #50) - Added EPUB support (Issue #301) - Added Markdown support (Issue #302) - Fixed a regression in header/footer image scaling (Issue #303) - Documentation updates (Issue #305) - Compiler fixes (Issue #304, Issue #306) - Fixed a bug when running HTMLDOC as a macOS application. - Updated the bundled libpng to v1.6.29. # Changes in HTMLDOC v1.8.30 - Updated documentation to reflect new project page on Github. - Dropped old CDE and IRIX desktop integration files. - Cleaned up the GUI and adopted new default text editors for Linux and macOS. - PAGE BREAK comments at the end of a file in web page mode would lose the first page (Issue #251) - Fixed the scaling of header/footer images to limit them to the height of the header or footer (Issue #273) - Fixed an issue with the top-level makefile not exiting with an error as needed (Issue #282) - Fixed a URL referencing bug when the same hostname but a different port was used (Issue #290) - Fixed build issue on macOS (Issue #291) - Fixed handling of indexed+alpha PNG images (Issue #295) # Changes in HTMLDOC v1.8.29 - Updated local PNG library to version 1.6.20. - Updated local JPEG library to version 9b. - Dropped support for OpenSSL. - Added configure script support for libjpeg-turbo. - Updated HTTP code to latest CUPS/ippsample sources. - Duplex PDF output incorrectly forced an even number of pages - The table of contents showed the wrong page numbers after headings containing the "_HD_OMIT_TOC" attribute. - Fixed reported build issues - The configure script's --enable-local* options did not work. # Changes in HTMLDOC v1.8.28 - Updated local zlib to version 1.2.8. - Updated local PNG library to version 1.6.8. - Updated local JPEG library to version 9. - Updated default PDF version to 1.4. - SECURITY: Fixed three buffer overflow issues when reading AFM files and parsing page sizes. - Fixed incompatibility with Fortify's version of strcpy, which does not work properly with variable-length arrays - Fixed compilation against PNG library 1.5 or later - Fixed documentation errors - Marked Zapf-Dingbats as a standard font - Fixed GPL license text in GUI - Fixed a table formatting problem when a column has multiple colspan values - Fixed parsing of HTML comments - Fixed potential out-of-bounds read in table-of-contents rendering code - Fixed handling of image URLs with ampersands in them - Fixed top/bottom margins for logo and header/footer images - Fixed image alignment bug - Fixed X11 build problem # Changes in HTMLDOC v1.8.27 - Fixed a crash bug that appeared when more than 10 blank pages were present in a document - Color changes were not reflected in PRE text - Remote URLs did not always work on older operating systems - Image filenames using % escapes were not decoded properly. - Rows using BGCOLOR that spanned across multiple pages did not render properly - Rows no longer start on a new page due to a cell with both HEIGHT and ROWSPAN specified - CMYK JPEG images caused HTMLDOC to crash - Table cell width calculations didn't always account for the proper minimum width - Images were not copied when generating indexed HTML output to a directory - Changing the bottom margin resulted in text that was formatted below the bottom margin. - The Monospace-Oblique font was not embedded properly in PDF files. # Changes in HTMLDOC v1.8.26 - Outline and keyword strings in PDF files are now stored as Unicode - The Flate compression code could get in an infinite loop if it ran out of memory - Book files saved from the GUI did not handle filenames with spaces - Fixed and re-enabled the ASCII85Device filter support in PostScript Level 2/3 output - Character entities in the first word of a file were not rendered properly - Fixed-size table columns were incorrectly resized when a table width was also specified and there was extra space to distribute - Text could "walk" up or down when in-line images were used - Row backgrounds incorrectly replaced cell backgrounds when the first cell in a row used ROWSPAN - HTMLDOC did not correctly parse FONT FACE attributes - Images in Level 2/3 PostScript output did not work on some printers - The GUI did not use the first page header # Changes in HTMLDOC v1.8.25 - Added "--overflow" and "--no-overflow" command-line options to show or hide the content-too-large errors; the default is "--no-overflow". - Added "--header1" command-line option and "HEADER1" page comments to set the page header for the first page of each chapter. - Added "timing" and "remotebytes" debug data generation. - Added DejaVu font collection to better support Cyrillic and Greek text; the new fonts are available under the generic names "monospace", "sans", and "serif". - Added "--referer" command-line option and corresponding CGI-mode support to pass Referer: information in HTTP requests - On Windows, HTMLDOC now logs CGI mode errors to a file called "htmldoc.log" in the Windows temporary directory. - HTMLDOC no longer uses Base-85 encoding for image data when producing Level 2 and 3 PostScript output. It appears that many printers and PostScript interpreters cannot properly decode this data when the original image data is not a multiple of 8 bits. - HTMLDOC now renders STRONG elements in boldface instead of bold-italic to match the W3C recommendations. - HTMLDOC now automatically inserts a TR element before a TD or TH element as needed to improve web site compatibility; this also triggers a HTML error in --strict mode. - "$HFIMAGEn" didn't work in a header/footer string. - HTMLDOC could crash when rendering a table. - Book files were not used in CGI mode - Cookies were not sent in HTTP requests - Table cells were not aligned properly when the ROWSPAN attribute was set to 1 - HTMLDOC crashed when rendering unresolved hyperlinks in aligned images - Documented the HTMLDOC_NOCGI environment variable - HTMLDOC sometimes crashed when rendering tables with background colors - HTMLDOC would crash when writing encrypted strings longer than 1024 bytes - HTMLDOC didn't set the data directory when running in CGI mode on Windows. - HTMLDOC could crash when loading the Symbol.afm file - HTMLDOC did not always honor HEIGHT attributes in table rows. - Tables with a mix of colspan and rowspan sometimes caused cells to be moved vertically outside the cell.
null
# Changes in HTMLDOC v1.9.12 - Fixed a crash bug with "data:" URIs and EPUB output (Issue #410) - Fixed crash bugs with bogus table attributes (Issue #416, Issue #417) - Fixed a crash bug with malformed URIs (Issue #418) - Fixed a crash bug with malformed GIF files (Issue #423) - Fixed some issues reported by Coverity. # Changes in HTMLDOC v1.9.11 - Added high-resolution desktop icons for Linux. - Updated the internal HTTP library to fix truncation of redirection URLs (Issue #396) - Fixed a regression in the handling of character entities for UTF-8 input (Issue #401) - The `--numbered` option did not work when the table-of-contents was disabled (Issue #405) # Changes in HTMLDOC v1.9.10 - Updated local zlib to v1.2.11. - Updated local libpng to v1.6.37. - Fixed packaging issues on macOS and Windows (Issue #377, Issue #386) - Now ignore sRGB profile errors in PNG files (Issue #390) - The GUI would crash when saving (Issue #391) - Page comments are now allowed in `pre` text (Issue #394) # Changes in HTMLDOC v1.9.9 - Fixed a redirection issue - some sites (incorrectly) provide an incomplete Location: URL in the HTTP response. - Fixed https: support on newer versions of Windows (Issue #378) - Fixed a problem with remote URLs containing spaces (Issue #379) - Fixed a UTF-8 processing bug for Markdown files (Issue #383) - Added support for `<FONT FACE="monospace">` (Issue #385) # Changes in HTMLDOC v1.9.8 - Added support for a `HTMLDOC.filename` META keyword that controls the filename reported in CGI mode; the default remains "htmldoc.pdf" (Issue #367) - Fixed a paragraph formatting issue with large inline images (Issue #369) - Fixed a buffer underflow issue (Issue #370) - Fixed PDF page numbers (Issue #371) - Added support for a new `L` header/footer format (`$LETTERHEAD`), which inserts a letterhead image at its full size (Issue #372, Issue #373, Issue #375) - Updated the build documentation (Issue #374) # Changes in HTMLDOC v1.9.7 - Refactored the PRE rendering code to work around compiler optimization bugs (Issue #349) - Added support for links with targets (Issue #351) - Fixed a table rowspan + valign bug (Issue #360) # Changes in HTMLDOC v1.9.6 - Added support for data URIs (Issue #340) - HTMLDOC no longer includes a PDF table of contents when converting a single web page (Issue #344) - Updated the markdown support with external links, additional inline markup, and hard line breaks. - Links in markdown text no longer render with a leading space as part of the link (Issue #346) - Fixed a buffer underflow bug discovered by AddressSanitizer. - Fixed a bug in UTF-8 support (Issue #348) - PDF output now includes the base language of the input document(s) (Issue #350) - Optimized the loading of font widths (Issue #354) - Optimized PDF page resources (Issue #356) - Optimized the base memory used for font widths (Issue #357) - Added proper `&shy;` support (Issue #361) - Title files can now be markdown. # Changes in HTMLDOC v1.9.5 - The GUI did not support EPUB output. - Empty markdown table cells were not rendered in PDF or PostScript output. - The automatically-generated title page now supports both "docnumber" and "version" metadata. - Added support for dc:subject and dc:language metadata in EPUB output from the HTML keywords and lang values. - Added support for the subject and language metadata in markdown input. - Fixed a buffer underflow bug (Issue #338) - `htmldoc --help` now reports whether HTTPS URLs are supported (Issue #339) - Fixed an issue with HTML title pages and EPUB output. # Changes in HTMLDOC v1.9.4 - Inline fixed-width text is no longer reduced in size automatically (Issue #309) - Optimized initialization of font width data (Issue #334) # Changes in HTMLDOC v1.9.3 - Fixed formatting bugs with aligned images (Issue #322, Issue #324) - Fixed support for three digit "#RGB" color values (Issue #323) - Fixed character set support for markdown metadata. - Updated libpng to v1.6.34 (Issue #326) - The makefiles did not use the CPPFLAGS value (Issue #328) # Changes in HTMLDOC v1.9.2 - Added Markdown table support. - Fixed parsing of TBODY, TFOOT, and THEAD elements in HTML files. # Changes in HTMLDOC v1.9.1 - Fixed monospace font size issue (Issue #309) - Added support for reproducible builds (Issue #310) - Added limited support for the HTML 4.0 SPAN element (Issue #311) - Added (extremely limited) UTF-8 support for input files (Issue #314) - Fixed buffer underflow for (invalid) short HTML comments (Issue #316) - Now indent PRE text, by popular request. - EPUB output now makes sure that `<element property>` is written as `<element property="property">`. - Now support both NAME and ID for table-of-contents targets. # Changes in HTMLDOC v1.9 - Added support for repeating a single header row for tables that span multiple pages (Issue #16) - Added support for embedding the current filename/URL in the header or footer (Issue #50) - Added EPUB support (Issue #301) - Added Markdown support (Issue #302) - Fixed a regression in header/footer image scaling (Issue #303) - Documentation updates (Issue #305) - Compiler fixes (Issue #304, Issue #306) - Fixed a bug when running HTMLDOC as a macOS application. - Updated the bundled libpng to v1.6.29. # Changes in HTMLDOC v1.8.30 - Updated documentation to reflect new project page on Github. - Dropped old CDE and IRIX desktop integration files. - Cleaned up the GUI and adopted new default text editors for Linux and macOS. - PAGE BREAK comments at the end of a file in web page mode would lose the first page (Issue #251) - Fixed the scaling of header/footer images to limit them to the height of the header or footer (Issue #273) - Fixed an issue with the top-level makefile not exiting with an error as needed (Issue #282) - Fixed a URL referencing bug when the same hostname but a different port was used (Issue #290) - Fixed build issue on macOS (Issue #291) - Fixed handling of indexed+alpha PNG images (Issue #295) # Changes in HTMLDOC v1.8.29 - Updated local PNG library to version 1.6.20. - Updated local JPEG library to version 9b. - Dropped support for OpenSSL. - Added configure script support for libjpeg-turbo. - Updated HTTP code to latest CUPS/ippsample sources. - Duplex PDF output incorrectly forced an even number of pages - The table of contents showed the wrong page numbers after headings containing the "_HD_OMIT_TOC" attribute. - Fixed reported build issues - The configure script's --enable-local* options did not work. # Changes in HTMLDOC v1.8.28 - Updated local zlib to version 1.2.8. - Updated local PNG library to version 1.6.8. - Updated local JPEG library to version 9. - Updated default PDF version to 1.4. - SECURITY: Fixed three buffer overflow issues when reading AFM files and parsing page sizes. - Fixed incompatibility with Fortify's version of strcpy, which does not work properly with variable-length arrays - Fixed compilation against PNG library 1.5 or later - Fixed documentation errors - Marked Zapf-Dingbats as a standard font - Fixed GPL license text in GUI - Fixed a table formatting problem when a column has multiple colspan values - Fixed parsing of HTML comments - Fixed potential out-of-bounds read in table-of-contents rendering code - Fixed handling of image URLs with ampersands in them - Fixed top/bottom margins for logo and header/footer images - Fixed image alignment bug - Fixed X11 build problem # Changes in HTMLDOC v1.8.27 - Fixed a crash bug that appeared when more than 10 blank pages were present in a document - Color changes were not reflected in PRE text - Remote URLs did not always work on older operating systems - Image filenames using % escapes were not decoded properly. - Rows using BGCOLOR that spanned across multiple pages did not render properly - Rows no longer start on a new page due to a cell with both HEIGHT and ROWSPAN specified - CMYK JPEG images caused HTMLDOC to crash - Table cell width calculations didn't always account for the proper minimum width - Images were not copied when generating indexed HTML output to a directory - Changing the bottom margin resulted in text that was formatted below the bottom margin. - The Monospace-Oblique font was not embedded properly in PDF files. # Changes in HTMLDOC v1.8.26 - Outline and keyword strings in PDF files are now stored as Unicode - The Flate compression code could get in an infinite loop if it ran out of memory - Book files saved from the GUI did not handle filenames with spaces - Fixed and re-enabled the ASCII85Device filter support in PostScript Level 2/3 output - Character entities in the first word of a file were not rendered properly - Fixed-size table columns were incorrectly resized when a table width was also specified and there was extra space to distribute - Text could "walk" up or down when in-line images were used - Row backgrounds incorrectly replaced cell backgrounds when the first cell in a row used ROWSPAN - HTMLDOC did not correctly parse FONT FACE attributes - Images in Level 2/3 PostScript output did not work on some printers - The GUI did not use the first page header # Changes in HTMLDOC v1.8.25 - Added "--overflow" and "--no-overflow" command-line options to show or hide the content-too-large errors; the default is "--no-overflow". - Added "--header1" command-line option and "HEADER1" page comments to set the page header for the first page of each chapter. - Added "timing" and "remotebytes" debug data generation. - Added DejaVu font collection to better support Cyrillic and Greek text; the new fonts are available under the generic names "monospace", "sans", and "serif". - Added "--referer" command-line option and corresponding CGI-mode support to pass Referer: information in HTTP requests - On Windows, HTMLDOC now logs CGI mode errors to a file called "htmldoc.log" in the Windows temporary directory. - HTMLDOC no longer uses Base-85 encoding for image data when producing Level 2 and 3 PostScript output. It appears that many printers and PostScript interpreters cannot properly decode this data when the original image data is not a multiple of 8 bits. - HTMLDOC now renders STRONG elements in boldface instead of bold-italic to match the W3C recommendations. - HTMLDOC now automatically inserts a TR element before a TD or TH element as needed to improve web site compatibility; this also triggers a HTML error in --strict mode. - "$HFIMAGEn" didn't work in a header/footer string. - HTMLDOC could crash when rendering a table. - Book files were not used in CGI mode - Cookies were not sent in HTTP requests - Table cells were not aligned properly when the ROWSPAN attribute was set to 1 - HTMLDOC crashed when rendering unresolved hyperlinks in aligned images - Documented the HTMLDOC_NOCGI environment variable - HTMLDOC sometimes crashed when rendering tables with background colors - HTMLDOC would crash when writing encrypted strings longer than 1024 bytes - HTMLDOC didn't set the data directory when running in CGI mode on Windows. - HTMLDOC could crash when loading the Symbol.afm file - HTMLDOC did not always honor HEIGHT attributes in table rows. - Tables with a mix of colspan and rowspan sometimes caused cells to be moved vertically outside the cell.
null
242
CWE-787
CVE-2021-24036
/* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #include <folly/io/IOBuf.h> #include <cassert> #include <cstdint> #include <cstdlib> #include <stdexcept> #include <folly/Conv.h> #include <folly/Likely.h> #include <folly/Memory.h> #include <folly/ScopeGuard.h> #include <folly/hash/SpookyHashV2.h> #include <folly/io/Cursor.h> #include <folly/lang/Align.h> #include <folly/lang/Exception.h> #include <folly/memory/Malloc.h> #include <folly/memory/SanitizeAddress.h> /* * Callbacks that will be invoked when IOBuf allocates or frees memory. * Note that io_buf_alloc_cb() will also be invoked when IOBuf takes ownership * of a malloc-allocated buffer, even if it was allocated earlier by another * part of the code. * * By default these are unimplemented, but programs can define these functions * to perform their own custom logic on memory allocation. This is intended * primarily to help programs track memory usage and possibly take action * when thresholds are hit. Callers should generally avoid performing any * expensive work in these callbacks, since they may be called from arbitrary * locations in the code that use IOBuf, possibly while holding locks. */ #if FOLLY_HAVE_WEAK_SYMBOLS FOLLY_ATTR_WEAK void io_buf_alloc_cb(void* /*ptr*/, size_t /*size*/) noexcept; FOLLY_ATTR_WEAK void io_buf_free_cb(void* /*ptr*/, size_t /*size*/) noexcept; #else static void (*io_buf_alloc_cb)(void* /*ptr*/, size_t /*size*/) noexcept = nullptr; static void (*io_buf_free_cb)(void* /*ptr*/, size_t /*size*/) noexcept = nullptr; #endif using std::unique_ptr; namespace { enum : uint16_t { kHeapMagic = 0xa5a5, // This memory segment contains an IOBuf that is still in use kIOBufInUse = 0x01, // This memory segment contains buffer data that is still in use kDataInUse = 0x02, // This memory segment contains a SharedInfo that is still in use kSharedInfoInUse = 0x04, }; enum : std::size_t { // When create() is called for buffers less than kDefaultCombinedBufSize, // we allocate a single combined memory segment for the IOBuf and the data // together. See the comments for createCombined()/createSeparate() for more // details. // // (The size of 1k is largely just a guess here. We could could probably do // benchmarks of real applications to see if adjusting this number makes a // difference. Callers that know their exact use case can also explicitly // call createCombined() or createSeparate().) kDefaultCombinedBufSize = 1024 }; // Helper function for IOBuf::takeOwnership() // The user's free function is not allowed to throw. // (We are already in the middle of throwing an exception, so // we cannot let this exception go unhandled.) void takeOwnershipError( bool freeOnError, void* buf, folly::IOBuf::FreeFunction freeFn, void* userData) noexcept { if (!freeOnError) { return; } if (!freeFn) { free(buf); return; } freeFn(buf, userData); } } // namespace namespace folly { // use free for size >= 4GB // since we can store only 32 bits in the size var struct IOBuf::HeapPrefix { HeapPrefix(uint16_t flg, size_t sz) : magic(kHeapMagic), flags(flg), size((sz == ((size_t)(uint32_t)sz)) ? static_cast<uint32_t>(sz) : 0) {} ~HeapPrefix() { // Reset magic to 0 on destruction. This is solely for debugging purposes // to help catch bugs where someone tries to use HeapStorage after it has // been deleted. magic = 0; } uint16_t magic; std::atomic<uint16_t> flags; uint32_t size; }; struct IOBuf::HeapStorage { HeapPrefix prefix; // The IOBuf is last in the HeapStorage object. // This way operator new will work even if allocating a subclass of IOBuf // that requires more space. folly::IOBuf buf; }; struct IOBuf::HeapFullStorage { // Make sure jemalloc allocates from the 64-byte class. Putting this here // because HeapStorage is private so it can't be at namespace level. static_assert(sizeof(HeapStorage) <= 64, "IOBuf may not grow over 56 bytes!"); HeapStorage hs; SharedInfo shared; folly::max_align_t align; }; IOBuf::SharedInfo::SharedInfo() : freeFn(nullptr), userData(nullptr), useHeapFullStorage(false) { // Use relaxed memory ordering here. Since we are creating a new SharedInfo, // no other threads should be referring to it yet. refcount.store(1, std::memory_order_relaxed); } IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg, bool hfs) : freeFn(fn), userData(arg), useHeapFullStorage(hfs) { // Use relaxed memory ordering here. Since we are creating a new SharedInfo, // no other threads should be referring to it yet. refcount.store(1, std::memory_order_relaxed); } void IOBuf::SharedInfo::invokeAndDeleteEachObserver( SharedInfoObserverEntryBase* observerListHead, ObserverCb cb) noexcept { if (observerListHead && cb) { // break the chain observerListHead->prev->next = nullptr; auto entry = observerListHead; while (entry) { auto tmp = entry->next; cb(*entry); delete entry; entry = tmp; } } } void IOBuf::SharedInfo::releaseStorage(SharedInfo* info) noexcept { if (info->useHeapFullStorage) { auto storageAddr = reinterpret_cast<uint8_t*>(info) - offsetof(HeapFullStorage, shared); auto storage = reinterpret_cast<HeapFullStorage*>(storageAddr); info->~SharedInfo(); IOBuf::releaseStorage(&storage->hs, kSharedInfoInUse); } } void* IOBuf::operator new(size_t size) { size_t fullSize = offsetof(HeapStorage, buf) + size; auto storage = static_cast<HeapStorage*>(checkedMalloc(fullSize)); new (&storage->prefix) HeapPrefix(kIOBufInUse, fullSize); if (io_buf_alloc_cb) { io_buf_alloc_cb(storage, fullSize); } return &(storage->buf); } void* IOBuf::operator new(size_t /* size */, void* ptr) { return ptr; } void IOBuf::operator delete(void* ptr) { auto storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf); auto storage = reinterpret_cast<HeapStorage*>(storageAddr); releaseStorage(storage, kIOBufInUse); } void IOBuf::operator delete(void* /* ptr */, void* /* placement */) { // Provide matching operator for `IOBuf::new` to avoid MSVC compilation // warning (C4291) about memory leak when exception is thrown in the // constructor. } void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) noexcept { CHECK_EQ(storage->prefix.magic, static_cast<uint16_t>(kHeapMagic)); // Use relaxed memory order here. If we are unlucky and happen to get // out-of-date data the compare_exchange_weak() call below will catch // it and load new data with memory_order_acq_rel. auto flags = storage->prefix.flags.load(std::memory_order_acquire); DCHECK_EQ((flags & freeFlags), freeFlags); while (true) { auto newFlags = uint16_t(flags & ~freeFlags); if (newFlags == 0) { // save the size size_t size = storage->prefix.size; // The storage space is now unused. Free it. storage->prefix.HeapPrefix::~HeapPrefix(); if (FOLLY_LIKELY(size)) { if (io_buf_free_cb) { io_buf_free_cb(storage, size); } sizedFree(storage, size); } else { free(storage); } return; } // This storage segment still contains portions that are in use. // Just clear the flags specified in freeFlags for now. auto ret = storage->prefix.flags.compare_exchange_weak( flags, newFlags, std::memory_order_acq_rel); if (ret) { // We successfully updated the flags. return; } // We failed to update the flags. Some other thread probably updated them // and cleared some of the other bits. Continue around the loop to see if // we are the last user now, or if we need to try updating the flags again. } } void IOBuf::freeInternalBuf(void* /* buf */, void* userData) noexcept { auto storage = static_cast<HeapStorage*>(userData); releaseStorage(storage, kDataInUse); } IOBuf::IOBuf(CreateOp, std::size_t capacity) : next_(this), prev_(this), data_(nullptr), length_(0), flagsAndSharedInfo_(0) { SharedInfo* info; allocExtBuffer(capacity, &buf_, &info, &capacity_); setSharedInfo(info); data_ = buf_; } IOBuf::IOBuf( CopyBufferOp /* op */, const void* buf, std::size_t size, std::size_t headroom, std::size_t minTailroom) : IOBuf(CREATE, headroom + size + minTailroom) { advance(headroom); if (size > 0) { assert(buf != nullptr); memcpy(writableData(), buf, size); append(size); } } IOBuf::IOBuf( CopyBufferOp op, ByteRange br, std::size_t headroom, std::size_t minTailroom) : IOBuf(op, br.data(), br.size(), headroom, minTailroom) {} unique_ptr<IOBuf> IOBuf::create(std::size_t capacity) { // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer // all with a single allocation. // // We don't do this for larger buffers since it can be wasteful if the user // needs to reallocate the buffer but keeps using the same IOBuf object. // In this case we can't free the data space until the IOBuf is also // destroyed. Callers can explicitly call createCombined() or // createSeparate() if they know their use case better, and know if they are // likely to reallocate the buffer later. if (capacity <= kDefaultCombinedBufSize) { return createCombined(capacity); } // if we have nallocx, we want to allocate the capacity and the overhead in // a single allocation only if we do not cross into the next allocation class // for some buffer sizes, this can use about 25% extra memory if (canNallocx()) { auto mallocSize = goodMallocSize(capacity); // round capacity to a multiple of 8 size_t minSize = ((capacity + 7) & ~7) + sizeof(SharedInfo); // if we do not have space for the overhead, allocate the mem separateley if (mallocSize < minSize) { auto* buf = checkedMalloc(mallocSize); return takeOwnership(SIZED_FREE, buf, mallocSize, 0, 0); } } return createSeparate(capacity); } unique_ptr<IOBuf> IOBuf::createCombined(std::size_t capacity) { // To save a memory allocation, allocate space for the IOBuf object, the // SharedInfo struct, and the data itself all with a single call to malloc(). size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity; size_t mallocSize = goodMallocSize(requiredStorage); auto storage = static_cast<HeapFullStorage*>(checkedMalloc(mallocSize)); new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kDataInUse, mallocSize); new (&storage->shared) SharedInfo(freeInternalBuf, storage); if (io_buf_alloc_cb) { io_buf_alloc_cb(storage, mallocSize); } auto bufAddr = reinterpret_cast<uint8_t*>(&storage->align); uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize; auto actualCapacity = size_t(storageEnd - bufAddr); unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf( InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared), bufAddr, actualCapacity, bufAddr, 0)); return ret; } unique_ptr<IOBuf> IOBuf::createSeparate(std::size_t capacity) { return std::make_unique<IOBuf>(CREATE, capacity); } unique_ptr<IOBuf> IOBuf::createChain( size_t totalCapacity, std::size_t maxBufCapacity) { unique_ptr<IOBuf> out = create(std::min(totalCapacity, size_t(maxBufCapacity))); size_t allocatedCapacity = out->capacity(); while (allocatedCapacity < totalCapacity) { unique_ptr<IOBuf> newBuf = create( std::min(totalCapacity - allocatedCapacity, size_t(maxBufCapacity))); allocatedCapacity += newBuf->capacity(); out->prependChain(std::move(newBuf)); } return out; } size_t IOBuf::goodSize(size_t minCapacity, CombinedOption combined) { if (combined == CombinedOption::DEFAULT) { combined = minCapacity <= kDefaultCombinedBufSize ? CombinedOption::COMBINED : CombinedOption::SEPARATE; } size_t overhead; if (combined == CombinedOption::COMBINED) { overhead = offsetof(HeapFullStorage, align); } else { // Pad minCapacity to a multiple of 8 minCapacity = (minCapacity + 7) & ~7; overhead = sizeof(SharedInfo); } size_t goodSize = folly::goodMallocSize(minCapacity + overhead); return goodSize - overhead; } IOBuf::IOBuf( TakeOwnershipOp, void* buf, std::size_t capacity, std::size_t offset, std::size_t length, FreeFunction freeFn, void* userData, bool freeOnError) : next_(this), prev_(this), data_(static_cast<uint8_t*>(buf) + offset), buf_(static_cast<uint8_t*>(buf)), length_(length), capacity_(capacity), flagsAndSharedInfo_( packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) { // do not allow only user data without a freeFn // since we use that for folly::sizedFree DCHECK(!userData || (userData && freeFn)); auto rollback = makeGuard([&] { // takeOwnershipError(freeOnError, buf, freeFn, userData); }); setSharedInfo(new SharedInfo(freeFn, userData)); rollback.dismiss(); } IOBuf::IOBuf( TakeOwnershipOp, SizedFree, void* buf, std::size_t capacity, std::size_t offset, std::size_t length, bool freeOnError) : next_(this), prev_(this), data_(static_cast<uint8_t*>(buf) + offset), buf_(static_cast<uint8_t*>(buf)), length_(length), capacity_(capacity), flagsAndSharedInfo_( packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) { auto rollback = makeGuard([&] { // takeOwnershipError(freeOnError, buf, nullptr, nullptr); }); setSharedInfo(new SharedInfo(nullptr, reinterpret_cast<void*>(capacity))); rollback.dismiss(); if (io_buf_alloc_cb && capacity) { io_buf_alloc_cb(buf, capacity); } } unique_ptr<IOBuf> IOBuf::takeOwnership( void* buf, std::size_t capacity, std::size_t offset, std::size_t length, FreeFunction freeFn, void* userData, bool freeOnError, TakeOwnershipOption option) { // do not allow only user data without a freeFn // since we use that for folly::sizedFree DCHECK( !userData || (userData && freeFn) || (userData && !freeFn && (option == TakeOwnershipOption::STORE_SIZE))); HeapFullStorage* storage = nullptr; auto rollback = makeGuard([&] { if (storage) { free(storage); } takeOwnershipError(freeOnError, buf, freeFn, userData); }); size_t requiredStorage = sizeof(HeapFullStorage); size_t mallocSize = goodMallocSize(requiredStorage); storage = static_cast<HeapFullStorage*>(checkedMalloc(mallocSize)); new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kSharedInfoInUse, mallocSize); new (&storage->shared) SharedInfo(freeFn, userData, true /*useHeapFullStorage*/); auto result = unique_ptr<IOBuf>(new (&storage->hs.buf) IOBuf( InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared), static_cast<uint8_t*>(buf), capacity, static_cast<uint8_t*>(buf) + offset, length)); rollback.dismiss(); if (io_buf_alloc_cb) { io_buf_alloc_cb(storage, mallocSize); if (userData && !freeFn && (option == TakeOwnershipOption::STORE_SIZE)) { // Even though we did not allocate the buffer, call io_buf_alloc_cb() // since we will call io_buf_free_cb() on destruction, and we want these // calls to be 1:1. io_buf_alloc_cb(buf, capacity); } } return result; } IOBuf::IOBuf(WrapBufferOp, const void* buf, std::size_t capacity) noexcept : IOBuf( InternalConstructor(), 0, // We cast away the const-ness of the buffer here. // This is okay since IOBuf users must use unshare() to create a copy // of this buffer before writing to the buffer. static_cast<uint8_t*>(const_cast<void*>(buf)), capacity, static_cast<uint8_t*>(const_cast<void*>(buf)), capacity) {} IOBuf::IOBuf(WrapBufferOp op, ByteRange br) noexcept : IOBuf(op, br.data(), br.size()) {} unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, std::size_t capacity) { return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity); } IOBuf IOBuf::wrapBufferAsValue(const void* buf, std::size_t capacity) noexcept { return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity); } IOBuf::IOBuf() noexcept = default; IOBuf::IOBuf(IOBuf&& other) noexcept : data_(other.data_), buf_(other.buf_), length_(other.length_), capacity_(other.capacity_), flagsAndSharedInfo_(other.flagsAndSharedInfo_) { // Reset other so it is a clean state to be destroyed. other.data_ = nullptr; other.buf_ = nullptr; other.length_ = 0; other.capacity_ = 0; other.flagsAndSharedInfo_ = 0; // If other was part of the chain, assume ownership of the rest of its chain. // (It's only valid to perform move assignment on the head of a chain.) if (other.next_ != &other) { next_ = other.next_; next_->prev_ = this; other.next_ = &other; prev_ = other.prev_; prev_->next_ = this; other.prev_ = &other; } // Sanity check to make sure that other is in a valid state to be destroyed. DCHECK_EQ(other.prev_, &other); DCHECK_EQ(other.next_, &other); } IOBuf::IOBuf(const IOBuf& other) { *this = other.cloneAsValue(); } IOBuf::IOBuf( InternalConstructor, uintptr_t flagsAndSharedInfo, uint8_t* buf, std::size_t capacity, uint8_t* data, std::size_t length) noexcept : next_(this), prev_(this), data_(data), buf_(buf), length_(length), capacity_(capacity), flagsAndSharedInfo_(flagsAndSharedInfo) { assert(data >= buf); assert(data + length <= buf + capacity); CHECK(!folly::asan_region_is_poisoned(buf, capacity)); } IOBuf::~IOBuf() { // Destroying an IOBuf destroys the entire chain. // Users of IOBuf should only explicitly delete the head of any chain. // The other elements in the chain will be automatically destroyed. while (next_ != this) { // Since unlink() returns unique_ptr() and we don't store it, // it will automatically delete the unlinked element. (void)next_->unlink(); } decrementRefcount(); } IOBuf& IOBuf::operator=(IOBuf&& other) noexcept { if (this == &other) { return *this; } // If we are part of a chain, delete the rest of the chain. while (next_ != this) { // Since unlink() returns unique_ptr() and we don't store it, // it will automatically delete the unlinked element. (void)next_->unlink(); } // Decrement our refcount on the current buffer decrementRefcount(); // Take ownership of the other buffer's data data_ = other.data_; buf_ = other.buf_; length_ = other.length_; capacity_ = other.capacity_; flagsAndSharedInfo_ = other.flagsAndSharedInfo_; // Reset other so it is a clean state to be destroyed. other.data_ = nullptr; other.buf_ = nullptr; other.length_ = 0; other.capacity_ = 0; other.flagsAndSharedInfo_ = 0; // If other was part of the chain, assume ownership of the rest of its chain. // (It's only valid to perform move assignment on the head of a chain.) if (other.next_ != &other) { next_ = other.next_; next_->prev_ = this; other.next_ = &other; prev_ = other.prev_; prev_->next_ = this; other.prev_ = &other; } // Sanity check to make sure that other is in a valid state to be destroyed. DCHECK_EQ(other.prev_, &other); DCHECK_EQ(other.next_, &other); return *this; } IOBuf& IOBuf::operator=(const IOBuf& other) { if (this != &other) { *this = IOBuf(other); } return *this; } bool IOBuf::empty() const { const IOBuf* current = this; do { if (current->length() != 0) { return false; } current = current->next_; } while (current != this); return true; } size_t IOBuf::countChainElements() const { size_t numElements = 1; for (IOBuf* current = next_; current != this; current = current->next_) { ++numElements; } return numElements; } std::size_t IOBuf::computeChainDataLength() const { std::size_t fullLength = length_; for (IOBuf* current = next_; current != this; current = current->next_) { fullLength += current->length_; } return fullLength; } std::size_t IOBuf::computeChainCapacity() const { std::size_t fullCapacity = capacity_; for (IOBuf* current = next_; current != this; current = current->next_) { fullCapacity += current->capacity_; } return fullCapacity; } void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) { // Take ownership of the specified IOBuf IOBuf* other = iobuf.release(); // Remember the pointer to the tail of the other chain IOBuf* otherTail = other->prev_; // Hook up prev_->next_ to point at the start of the other chain, // and other->prev_ to point at prev_ prev_->next_ = other; other->prev_ = prev_; // Hook up otherTail->next_ to point at us, // and prev_ to point back at otherTail, otherTail->next_ = this; prev_ = otherTail; } unique_ptr<IOBuf> IOBuf::clone() const { auto tmp = cloneOne(); for (IOBuf* current = next_; current != this; current = current->next_) { tmp->prependChain(current->cloneOne()); } return tmp; } unique_ptr<IOBuf> IOBuf::cloneOne() const { if (SharedInfo* info = sharedInfo()) { info->refcount.fetch_add(1, std::memory_order_acq_rel); } return std::unique_ptr<IOBuf>(new IOBuf( InternalConstructor(), flagsAndSharedInfo_, buf_, capacity_, data_, length_)); } unique_ptr<IOBuf> IOBuf::cloneCoalesced() const { return std::make_unique<IOBuf>(cloneCoalescedAsValue()); } unique_ptr<IOBuf> IOBuf::cloneCoalescedWithHeadroomTailroom( std::size_t newHeadroom, std::size_t newTailroom) const { return std::make_unique<IOBuf>( cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom)); } IOBuf IOBuf::cloneAsValue() const { auto tmp = cloneOneAsValue(); for (IOBuf* current = next_; current != this; current = current->next_) { tmp.prependChain(current->cloneOne()); } return tmp; } IOBuf IOBuf::cloneOneAsValue() const { if (SharedInfo* info = sharedInfo()) { info->refcount.fetch_add(1, std::memory_order_acq_rel); } return IOBuf( InternalConstructor(), flagsAndSharedInfo_, buf_, capacity_, data_, length_); } IOBuf IOBuf::cloneCoalescedAsValue() const { const std::size_t newHeadroom = headroom(); const std::size_t newTailroom = prev()->tailroom(); return cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom); } IOBuf IOBuf::cloneCoalescedAsValueWithHeadroomTailroom( std::size_t newHeadroom, std::size_t newTailroom) const { if (!isChained() && newHeadroom <= headroom() && newTailroom <= tailroom()) { return cloneOneAsValue(); } // Coalesce into newBuf const std::size_t newLength = computeChainDataLength(); const std::size_t newCapacity = newLength + newHeadroom + newTailroom; IOBuf newBuf{CREATE, newCapacity}; newBuf.advance(newHeadroom); auto current = this; do { if (current->length() > 0) { DCHECK_NOTNULL(current->data()); DCHECK_LE(current->length(), newBuf.tailroom()); memcpy(newBuf.writableTail(), current->data(), current->length()); newBuf.append(current->length()); } current = current->next(); } while (current != this); DCHECK_EQ(newLength, newBuf.length()); DCHECK_EQ(newHeadroom, newBuf.headroom()); DCHECK_LE(newTailroom, newBuf.tailroom()); return newBuf; } void IOBuf::unshareOneSlow() { // Allocate a new buffer for the data uint8_t* buf; SharedInfo* sharedInfo; std::size_t actualCapacity; allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity); // Copy the data // Maintain the same amount of headroom. Since we maintained the same // minimum capacity we also maintain at least the same amount of tailroom. std::size_t headlen = headroom(); if (length_ > 0) { assert(data_ != nullptr); memcpy(buf + headlen, data_, length_); } // Release our reference on the old buffer decrementRefcount(); // Make sure flags are all cleared. setFlagsAndSharedInfo(0, sharedInfo); // Update the buffer pointers to point to the new buffer data_ = buf + headlen; buf_ = buf; } void IOBuf::unshareChained() { // unshareChained() should only be called if we are part of a chain of // multiple IOBufs. The caller should have already verified this. assert(isChained()); IOBuf* current = this; while (true) { if (current->isSharedOne()) { // we have to unshare break; } current = current->next_; if (current == this) { // None of the IOBufs in the chain are shared, // so return without doing anything return; } } // We have to unshare. Let coalesceSlow() do the work. coalesceSlow(); } void IOBuf::markExternallyShared() { IOBuf* current = this; do { current->markExternallySharedOne(); current = current->next_; } while (current != this); } void IOBuf::makeManagedChained() { assert(isChained()); IOBuf* current = this; while (true) { current->makeManagedOne(); current = current->next_; if (current == this) { break; } } } void IOBuf::coalesceSlow() { // coalesceSlow() should only be called if we are part of a chain of multiple // IOBufs. The caller should have already verified this. DCHECK(isChained()); // Compute the length of the entire chain std::size_t newLength = 0; IOBuf* end = this; do { newLength += end->length_; end = end->next_; } while (end != this); coalesceAndReallocate(newLength, end); // We should be only element left in the chain now DCHECK(!isChained()); } void IOBuf::coalesceSlow(size_t maxLength) { // coalesceSlow() should only be called if we are part of a chain of multiple // IOBufs. The caller should have already verified this. DCHECK(isChained()); DCHECK_LT(length_, maxLength); // Compute the length of the entire chain std::size_t newLength = 0; IOBuf* end = this; while (true) { newLength += end->length_; end = end->next_; if (newLength >= maxLength) { break; } if (end == this) { throw_exception<std::overflow_error>( "attempted to coalesce more data than " "available"); } } coalesceAndReallocate(newLength, end); // We should have the requested length now DCHECK_GE(length_, maxLength); } void IOBuf::coalesceAndReallocate( size_t newHeadroom, size_t newLength, IOBuf* end, size_t newTailroom) { std::size_t newCapacity = newLength + newHeadroom + newTailroom; // Allocate space for the coalesced buffer. // We always convert to an external buffer, even if we happened to be an // internal buffer before. uint8_t* newBuf; SharedInfo* newInfo; std::size_t actualCapacity; allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity); // Copy the data into the new buffer uint8_t* newData = newBuf + newHeadroom; uint8_t* p = newData; IOBuf* current = this; size_t remaining = newLength; do { if (current->length_ > 0) { assert(current->length_ <= remaining); assert(current->data_ != nullptr); remaining -= current->length_; memcpy(p, current->data_, current->length_); p += current->length_; } current = current->next_; } while (current != end); assert(remaining == 0); // Point at the new buffer decrementRefcount(); // Make sure flags are all cleared. setFlagsAndSharedInfo(0, newInfo); capacity_ = actualCapacity; buf_ = newBuf; data_ = newData; length_ = newLength; // Separate from the rest of our chain. // Since we don't store the unique_ptr returned by separateChain(), // this will immediately delete the returned subchain. if (isChained()) { (void)separateChain(next_, current->prev_); } } void IOBuf::decrementRefcount() noexcept { // Externally owned buffers don't have a SharedInfo object and aren't managed // by the reference count SharedInfo* info = sharedInfo(); if (!info) { return; } // Avoid doing atomic decrement if the refcount is 1. // This is safe, because it means that we're the last reference and destroying // the object. Anything trying to copy it is already undefined behavior. if (info->refcount.load(std::memory_order_acquire) > 1) { // Decrement the refcount uint32_t newcnt = info->refcount.fetch_sub(1, std::memory_order_acq_rel); // Note that fetch_sub() returns the value before we decremented. // If it is 1, we were the only remaining user; if it is greater there are // still other users. if (newcnt > 1) { return; } } // save the useHeapFullStorage flag here since // freeExtBuffer can delete the sharedInfo() bool useHeapFullStorage = info->useHeapFullStorage; // We were the last user. Free the buffer freeExtBuffer(); // Free the SharedInfo if it was allocated separately. // // This is only used by takeOwnership(). // // To avoid this special case handling in decrementRefcount(), we could have // takeOwnership() set a custom freeFn() that calls the user's free function // then frees the SharedInfo object. (This would require that // takeOwnership() store the user's free function with its allocated // SharedInfo object.) However, handling this specially with a flag seems // like it shouldn't be problematic. if (flags() & kFlagFreeSharedInfo) { delete info; } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(info); } } } void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) { size_t newCapacity = (size_t)length_ + minHeadroom + minTailroom; DCHECK_LT(newCapacity, UINT32_MAX); // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may // reallocate and free the original buffer. It should only ever be called if // we are the only user of the buffer. DCHECK(!isSharedOne()); // We'll need to reallocate the buffer. // There are a few options. // - If we have enough total room, move the data around in the buffer // and adjust the data_ pointer. // - If we're using an internal buffer, we'll switch to an external // buffer with enough headroom and tailroom. // - If we have enough headroom (headroom() >= minHeadroom) but not too much // (so we don't waste memory), we can try one of two things, depending on // whether we use jemalloc or not: // - If using jemalloc, we can try to expand in place, avoiding a memcpy() // - If not using jemalloc and we don't have too much to copy, // we'll use realloc() (note that realloc might have to copy // headroom + data + tailroom, see smartRealloc in folly/memory/Malloc.h) // - Otherwise, bite the bullet and reallocate. if (headroom() + tailroom() >= minHeadroom + minTailroom) { uint8_t* newData = writableBuffer() + minHeadroom; memmove(newData, data_, length_); data_ = newData; return; } size_t newAllocatedCapacity = 0; uint8_t* newBuffer = nullptr; std::size_t newHeadroom = 0; std::size_t oldHeadroom = headroom(); // If we have a buffer allocated with malloc and we just need more tailroom, // try to use realloc()/xallocx() to grow the buffer in place. SharedInfo* info = sharedInfo(); bool useHeapFullStorage = info && info->useHeapFullStorage; if (info && (info->freeFn == nullptr) && length_ != 0 && oldHeadroom >= minHeadroom) { size_t headSlack = oldHeadroom - minHeadroom; newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack); if (usingJEMalloc()) { // We assume that tailroom is more useful and more important than // headroom (not least because realloc / xallocx allow us to grow the // buffer at the tail, but not at the head) So, if we have more headroom // than we need, we consider that "wasted". We arbitrarily define "too // much" headroom to be 25% of the capacity. if (headSlack * 4 <= newCapacity) { size_t allocatedCapacity = capacity() + sizeof(SharedInfo); void* p = buf_; if (allocatedCapacity >= jemallocMinInPlaceExpandable) { if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) { if (io_buf_free_cb) { io_buf_free_cb(p, reinterpret_cast<size_t>(info->userData)); } newBuffer = static_cast<uint8_t*>(p); newHeadroom = oldHeadroom; // update the userData info->userData = reinterpret_cast<void*>(newAllocatedCapacity); if (io_buf_alloc_cb) { io_buf_alloc_cb(newBuffer, newAllocatedCapacity); } } // if xallocx failed, do nothing, fall back to malloc/memcpy/free } } } else { // Not using jemalloc size_t copySlack = capacity() - length_; if (copySlack * 2 <= length_) { void* p = realloc(buf_, newAllocatedCapacity); if (UNLIKELY(p == nullptr)) { throw_exception<std::bad_alloc>(); } newBuffer = static_cast<uint8_t*>(p); newHeadroom = oldHeadroom; } } } // None of the previous reallocation strategies worked (or we're using // an internal buffer). malloc/copy/free. if (newBuffer == nullptr) { newAllocatedCapacity = goodExtBufferSize(newCapacity); newBuffer = static_cast<uint8_t*>(checkedMalloc(newAllocatedCapacity)); if (length_ > 0) { assert(data_ != nullptr); memcpy(newBuffer + minHeadroom, data_, length_); } if (sharedInfo()) { freeExtBuffer(); } newHeadroom = minHeadroom; } std::size_t cap; initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap); if (flags() & kFlagFreeSharedInfo) { delete sharedInfo(); } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(sharedInfo()); } } setFlagsAndSharedInfo(0, info); capacity_ = cap; buf_ = newBuffer; data_ = newBuffer + newHeadroom; // length_ is unchanged } // The user's free function should never throw. Otherwise we might throw from // the IOBuf destructor. Other code paths like coalesce() also assume that // decrementRefcount() cannot throw. void IOBuf::freeExtBuffer() noexcept { SharedInfo* info = sharedInfo(); DCHECK(info); // save the observerListHead // since the SharedInfo can be freed auto observerListHead = info->observerListHead; info->observerListHead = nullptr; if (info->freeFn) { info->freeFn(buf_, info->userData); } else { // this will invoke free if info->userData is 0 size_t size = reinterpret_cast<size_t>(info->userData); if (size) { if (io_buf_free_cb) { io_buf_free_cb(buf_, size); } folly::sizedFree(buf_, size); } else { free(buf_); } } SharedInfo::invokeAndDeleteEachObserver( observerListHead, [](auto& entry) { entry.afterFreeExtBuffer(); }); if (kIsMobile) { buf_ = nullptr; } } void IOBuf::allocExtBuffer( std::size_t minCapacity, uint8_t** bufReturn, SharedInfo** infoReturn, std::size_t* capacityReturn) { size_t mallocSize = goodExtBufferSize(minCapacity); auto buf = static_cast<uint8_t*>(checkedMalloc(mallocSize)); initExtBuffer(buf, mallocSize, infoReturn, capacityReturn); // the userData and the freeFn are nullptr here // just store the mallocSize in userData (*infoReturn)->userData = reinterpret_cast<void*>(mallocSize); if (io_buf_alloc_cb) { io_buf_alloc_cb(buf, mallocSize); } *bufReturn = buf; } size_t IOBuf::goodExtBufferSize(std::size_t minCapacity) { // Determine how much space we should allocate. We'll store the SharedInfo // for the external buffer just after the buffer itself. (We store it just // after the buffer rather than just before so that the code can still just // use free(buf_) to free the buffer.) size_t minSize = static_cast<size_t>(minCapacity) + sizeof(SharedInfo); // Add room for padding so that the SharedInfo will be aligned on an 8-byte // boundary. minSize = (minSize + 7) & ~7; // Use goodMallocSize() to bump up the capacity to a decent size to request // from malloc, so we can use all of the space that malloc will probably give // us anyway. return goodMallocSize(minSize); } void IOBuf::initExtBuffer( uint8_t* buf, size_t mallocSize, SharedInfo** infoReturn, std::size_t* capacityReturn) { // Find the SharedInfo storage at the end of the buffer // and construct the SharedInfo. uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo); auto sharedInfo = new (infoStart) SharedInfo; *capacityReturn = std::size_t(infoStart - buf); *infoReturn = sharedInfo; } fbstring IOBuf::moveToFbString() { // we need to save useHeapFullStorage and the observerListHead since // sharedInfo() may not be valid after fbstring str bool useHeapFullStorage = false; SharedInfoObserverEntryBase* observerListHead = nullptr; // malloc-allocated buffers are just fine, everything else needs // to be turned into one. if (!sharedInfo() || // user owned, not ours to give up sharedInfo()->freeFn || // not malloc()-ed headroom() != 0 || // malloc()-ed block doesn't start at beginning tailroom() == 0 || // no room for NUL terminator isShared() || // shared isChained()) { // chained // We might as well get rid of all head and tailroom if we're going // to reallocate; we need 1 byte for NUL terminator. coalesceAndReallocate(0, computeChainDataLength(), this, 1); } else { auto info = sharedInfo(); if (info) { // if we do not call coalesceAndReallocate // we might need to call SharedInfo::releaseStorage() // and/or SharedInfo::invokeAndDeleteEachObserver() useHeapFullStorage = info->useHeapFullStorage; // save the observerListHead // the coalesceAndReallocate path will call // decrementRefcount and freeExtBuffer if needed // so the observer lis notification is needed here observerListHead = info->observerListHead; info->observerListHead = nullptr; } } // Ensure NUL terminated *writableTail() = 0; fbstring str( reinterpret_cast<char*>(writableData()), length(), capacity(), AcquireMallocatedString()); if (io_buf_free_cb && sharedInfo() && sharedInfo()->userData) { io_buf_free_cb( writableData(), reinterpret_cast<size_t>(sharedInfo()->userData)); } SharedInfo::invokeAndDeleteEachObserver( observerListHead, [](auto& entry) { entry.afterReleaseExtBuffer(); }); if (flags() & kFlagFreeSharedInfo) { delete sharedInfo(); } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(sharedInfo()); } } // Reset to a state where we can be deleted cleanly flagsAndSharedInfo_ = 0; buf_ = nullptr; clear(); return str; } IOBuf::Iterator IOBuf::cbegin() const { return Iterator(this, this); } IOBuf::Iterator IOBuf::cend() const { return Iterator(nullptr, nullptr); } folly::fbvector<struct iovec> IOBuf::getIov() const { folly::fbvector<struct iovec> iov; iov.reserve(countChainElements()); appendToIov(&iov); return iov; } void IOBuf::appendToIov(folly::fbvector<struct iovec>* iov) const { IOBuf const* p = this; do { // some code can get confused by empty iovs, so skip them if (p->length() > 0) { iov->push_back({(void*)p->data(), folly::to<size_t>(p->length())}); } p = p->next(); } while (p != this); } unique_ptr<IOBuf> IOBuf::wrapIov(const iovec* vec, size_t count) { unique_ptr<IOBuf> result = nullptr; for (size_t i = 0; i < count; ++i) { size_t len = vec[i].iov_len; void* data = vec[i].iov_base; if (len > 0) { auto buf = wrapBuffer(data, len); if (!result) { result = std::move(buf); } else { result->prependChain(std::move(buf)); } } } if (UNLIKELY(result == nullptr)) { return create(0); } return result; } std::unique_ptr<IOBuf> IOBuf::takeOwnershipIov( const iovec* vec, size_t count, FreeFunction freeFn, void* userData, bool freeOnError) { unique_ptr<IOBuf> result = nullptr; for (size_t i = 0; i < count; ++i) { size_t len = vec[i].iov_len; void* data = vec[i].iov_base; if (len > 0) { auto buf = takeOwnership(data, len, freeFn, userData, freeOnError); if (!result) { result = std::move(buf); } else { result->prependChain(std::move(buf)); } } } if (UNLIKELY(result == nullptr)) { return create(0); } return result; } IOBuf::FillIovResult IOBuf::fillIov(struct iovec* iov, size_t len) const { IOBuf const* p = this; size_t i = 0; size_t totalBytes = 0; while (i < len) { // some code can get confused by empty iovs, so skip them if (p->length() > 0) { iov[i].iov_base = const_cast<uint8_t*>(p->data()); iov[i].iov_len = p->length(); totalBytes += p->length(); i++; } p = p->next(); if (p == this) { return {i, totalBytes}; } } return {0, 0}; } uint32_t IOBuf::approximateShareCountOne() const { if (UNLIKELY(!sharedInfo())) { return 1U; } return sharedInfo()->refcount.load(std::memory_order_acquire); } size_t IOBufHash::operator()(const IOBuf& buf) const noexcept { folly::hash::SpookyHashV2 hasher; hasher.Init(0, 0); io::Cursor cursor(&buf); for (;;) { auto b = cursor.peekBytes(); if (b.empty()) { break; } hasher.Update(b.data(), b.size()); cursor.skip(b.size()); } uint64_t h1; uint64_t h2; hasher.Final(&h1, &h2); return static_cast<std::size_t>(h1); } ordering IOBufCompare::impl(const IOBuf& a, const IOBuf& b) const noexcept { io::Cursor ca(&a); io::Cursor cb(&b); for (;;) { auto ba = ca.peekBytes(); auto bb = cb.peekBytes(); if (ba.empty() || bb.empty()) { return to_ordering(int(bb.empty()) - int(ba.empty())); } const size_t n = std::min(ba.size(), bb.size()); DCHECK_GT(n, 0u); const ordering r = to_ordering(std::memcmp(ba.data(), bb.data(), n)); if (r != ordering::eq) { return r; } // Cursor::skip() may throw if n is too large, but n is not too large here ca.skip(n); cb.skip(n); } } } // namespace folly
null
/* * Copyright (c) Facebook, Inc. and its affiliates. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #include <folly/io/IOBuf.h> #include <cassert> #include <cstdint> #include <cstdlib> #include <limits> #include <stdexcept> #include <folly/Conv.h> #include <folly/Likely.h> #include <folly/Memory.h> #include <folly/ScopeGuard.h> #include <folly/hash/SpookyHashV2.h> #include <folly/io/Cursor.h> #include <folly/lang/Align.h> #include <folly/lang/CheckedMath.h> #include <folly/lang/Exception.h> #include <folly/memory/Malloc.h> #include <folly/memory/SanitizeAddress.h> /* * Callbacks that will be invoked when IOBuf allocates or frees memory. * Note that io_buf_alloc_cb() will also be invoked when IOBuf takes ownership * of a malloc-allocated buffer, even if it was allocated earlier by another * part of the code. * * By default these are unimplemented, but programs can define these functions * to perform their own custom logic on memory allocation. This is intended * primarily to help programs track memory usage and possibly take action * when thresholds are hit. Callers should generally avoid performing any * expensive work in these callbacks, since they may be called from arbitrary * locations in the code that use IOBuf, possibly while holding locks. */ #if FOLLY_HAVE_WEAK_SYMBOLS FOLLY_ATTR_WEAK void io_buf_alloc_cb(void* /*ptr*/, size_t /*size*/) noexcept; FOLLY_ATTR_WEAK void io_buf_free_cb(void* /*ptr*/, size_t /*size*/) noexcept; #else static void (*io_buf_alloc_cb)(void* /*ptr*/, size_t /*size*/) noexcept = nullptr; static void (*io_buf_free_cb)(void* /*ptr*/, size_t /*size*/) noexcept = nullptr; #endif using std::unique_ptr; namespace { enum : uint16_t { kHeapMagic = 0xa5a5, // This memory segment contains an IOBuf that is still in use kIOBufInUse = 0x01, // This memory segment contains buffer data that is still in use kDataInUse = 0x02, // This memory segment contains a SharedInfo that is still in use kSharedInfoInUse = 0x04, }; enum : std::size_t { // When create() is called for buffers less than kDefaultCombinedBufSize, // we allocate a single combined memory segment for the IOBuf and the data // together. See the comments for createCombined()/createSeparate() for more // details. // // (The size of 1k is largely just a guess here. We could could probably do // benchmarks of real applications to see if adjusting this number makes a // difference. Callers that know their exact use case can also explicitly // call createCombined() or createSeparate().) kDefaultCombinedBufSize = 1024, kMaxIOBufSize = std::numeric_limits<size_t>::max() >> 1, }; // Helper function for IOBuf::takeOwnership() // The user's free function is not allowed to throw. // (We are already in the middle of throwing an exception, so // we cannot let this exception go unhandled.) void takeOwnershipError( bool freeOnError, void* buf, folly::IOBuf::FreeFunction freeFn, void* userData) noexcept { if (!freeOnError) { return; } if (!freeFn) { free(buf); return; } freeFn(buf, userData); } } // namespace namespace folly { // use free for size >= 4GB // since we can store only 32 bits in the size var struct IOBuf::HeapPrefix { HeapPrefix(uint16_t flg, size_t sz) : magic(kHeapMagic), flags(flg), size((sz == ((size_t)(uint32_t)sz)) ? static_cast<uint32_t>(sz) : 0) {} ~HeapPrefix() { // Reset magic to 0 on destruction. This is solely for debugging purposes // to help catch bugs where someone tries to use HeapStorage after it has // been deleted. magic = 0; } uint16_t magic; std::atomic<uint16_t> flags; uint32_t size; }; struct IOBuf::HeapStorage { HeapPrefix prefix; // The IOBuf is last in the HeapStorage object. // This way operator new will work even if allocating a subclass of IOBuf // that requires more space. folly::IOBuf buf; }; struct IOBuf::HeapFullStorage { // Make sure jemalloc allocates from the 64-byte class. Putting this here // because HeapStorage is private so it can't be at namespace level. static_assert(sizeof(HeapStorage) <= 64, "IOBuf may not grow over 56 bytes!"); HeapStorage hs; SharedInfo shared; folly::max_align_t align; }; IOBuf::SharedInfo::SharedInfo() : freeFn(nullptr), userData(nullptr), useHeapFullStorage(false) { // Use relaxed memory ordering here. Since we are creating a new SharedInfo, // no other threads should be referring to it yet. refcount.store(1, std::memory_order_relaxed); } IOBuf::SharedInfo::SharedInfo(FreeFunction fn, void* arg, bool hfs) : freeFn(fn), userData(arg), useHeapFullStorage(hfs) { // Use relaxed memory ordering here. Since we are creating a new SharedInfo, // no other threads should be referring to it yet. refcount.store(1, std::memory_order_relaxed); } void IOBuf::SharedInfo::invokeAndDeleteEachObserver( SharedInfoObserverEntryBase* observerListHead, ObserverCb cb) noexcept { if (observerListHead && cb) { // break the chain observerListHead->prev->next = nullptr; auto entry = observerListHead; while (entry) { auto tmp = entry->next; cb(*entry); delete entry; entry = tmp; } } } void IOBuf::SharedInfo::releaseStorage(SharedInfo* info) noexcept { if (info->useHeapFullStorage) { auto storageAddr = reinterpret_cast<uint8_t*>(info) - offsetof(HeapFullStorage, shared); auto storage = reinterpret_cast<HeapFullStorage*>(storageAddr); info->~SharedInfo(); IOBuf::releaseStorage(&storage->hs, kSharedInfoInUse); } } void* IOBuf::operator new(size_t size) { if (size > kMaxIOBufSize) { throw_exception<std::bad_alloc>(); } size_t fullSize = offsetof(HeapStorage, buf) + size; auto storage = static_cast<HeapStorage*>(checkedMalloc(fullSize)); new (&storage->prefix) HeapPrefix(kIOBufInUse, fullSize); if (io_buf_alloc_cb) { io_buf_alloc_cb(storage, fullSize); } return &(storage->buf); } void* IOBuf::operator new(size_t /* size */, void* ptr) { return ptr; } void IOBuf::operator delete(void* ptr) { auto storageAddr = static_cast<uint8_t*>(ptr) - offsetof(HeapStorage, buf); auto storage = reinterpret_cast<HeapStorage*>(storageAddr); releaseStorage(storage, kIOBufInUse); } void IOBuf::operator delete(void* /* ptr */, void* /* placement */) { // Provide matching operator for `IOBuf::new` to avoid MSVC compilation // warning (C4291) about memory leak when exception is thrown in the // constructor. } void IOBuf::releaseStorage(HeapStorage* storage, uint16_t freeFlags) noexcept { CHECK_EQ(storage->prefix.magic, static_cast<uint16_t>(kHeapMagic)); // Use relaxed memory order here. If we are unlucky and happen to get // out-of-date data the compare_exchange_weak() call below will catch // it and load new data with memory_order_acq_rel. auto flags = storage->prefix.flags.load(std::memory_order_acquire); DCHECK_EQ((flags & freeFlags), freeFlags); while (true) { auto newFlags = uint16_t(flags & ~freeFlags); if (newFlags == 0) { // save the size size_t size = storage->prefix.size; // The storage space is now unused. Free it. storage->prefix.HeapPrefix::~HeapPrefix(); if (FOLLY_LIKELY(size)) { if (io_buf_free_cb) { io_buf_free_cb(storage, size); } sizedFree(storage, size); } else { free(storage); } return; } // This storage segment still contains portions that are in use. // Just clear the flags specified in freeFlags for now. auto ret = storage->prefix.flags.compare_exchange_weak( flags, newFlags, std::memory_order_acq_rel); if (ret) { // We successfully updated the flags. return; } // We failed to update the flags. Some other thread probably updated them // and cleared some of the other bits. Continue around the loop to see if // we are the last user now, or if we need to try updating the flags again. } } void IOBuf::freeInternalBuf(void* /* buf */, void* userData) noexcept { auto storage = static_cast<HeapStorage*>(userData); releaseStorage(storage, kDataInUse); } IOBuf::IOBuf(CreateOp, std::size_t capacity) : next_(this), prev_(this), data_(nullptr), length_(0), flagsAndSharedInfo_(0) { SharedInfo* info; allocExtBuffer(capacity, &buf_, &info, &capacity_); setSharedInfo(info); data_ = buf_; } IOBuf::IOBuf( CopyBufferOp /* op */, const void* buf, std::size_t size, std::size_t headroom, std::size_t minTailroom) : IOBuf(CREATE, headroom + size + minTailroom) { advance(headroom); if (size > 0) { assert(buf != nullptr); memcpy(writableData(), buf, size); append(size); } } IOBuf::IOBuf( CopyBufferOp op, ByteRange br, std::size_t headroom, std::size_t minTailroom) : IOBuf(op, br.data(), br.size(), headroom, minTailroom) {} unique_ptr<IOBuf> IOBuf::create(std::size_t capacity) { if (capacity > kMaxIOBufSize) { throw_exception<std::bad_alloc>(); } // For smaller-sized buffers, allocate the IOBuf, SharedInfo, and the buffer // all with a single allocation. // // We don't do this for larger buffers since it can be wasteful if the user // needs to reallocate the buffer but keeps using the same IOBuf object. // In this case we can't free the data space until the IOBuf is also // destroyed. Callers can explicitly call createCombined() or // createSeparate() if they know their use case better, and know if they are // likely to reallocate the buffer later. if (capacity <= kDefaultCombinedBufSize) { return createCombined(capacity); } // if we have nallocx, we want to allocate the capacity and the overhead in // a single allocation only if we do not cross into the next allocation class // for some buffer sizes, this can use about 25% extra memory if (canNallocx()) { auto mallocSize = goodMallocSize(capacity); // round capacity to a multiple of 8 size_t minSize = ((capacity + 7) & ~7) + sizeof(SharedInfo); // if we do not have space for the overhead, allocate the mem separateley if (mallocSize < minSize) { auto* buf = checkedMalloc(mallocSize); return takeOwnership(SIZED_FREE, buf, mallocSize, 0, 0); } } return createSeparate(capacity); } unique_ptr<IOBuf> IOBuf::createCombined(std::size_t capacity) { if (capacity > kMaxIOBufSize) { throw_exception<std::bad_alloc>(); } // To save a memory allocation, allocate space for the IOBuf object, the // SharedInfo struct, and the data itself all with a single call to malloc(). size_t requiredStorage = offsetof(HeapFullStorage, align) + capacity; size_t mallocSize = goodMallocSize(requiredStorage); auto storage = static_cast<HeapFullStorage*>(checkedMalloc(mallocSize)); new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kDataInUse, mallocSize); new (&storage->shared) SharedInfo(freeInternalBuf, storage); if (io_buf_alloc_cb) { io_buf_alloc_cb(storage, mallocSize); } auto bufAddr = reinterpret_cast<uint8_t*>(&storage->align); uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize; auto actualCapacity = size_t(storageEnd - bufAddr); unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf( InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared), bufAddr, actualCapacity, bufAddr, 0)); return ret; } unique_ptr<IOBuf> IOBuf::createSeparate(std::size_t capacity) { return std::make_unique<IOBuf>(CREATE, capacity); } unique_ptr<IOBuf> IOBuf::createChain( size_t totalCapacity, std::size_t maxBufCapacity) { unique_ptr<IOBuf> out = create(std::min(totalCapacity, size_t(maxBufCapacity))); size_t allocatedCapacity = out->capacity(); while (allocatedCapacity < totalCapacity) { unique_ptr<IOBuf> newBuf = create( std::min(totalCapacity - allocatedCapacity, size_t(maxBufCapacity))); allocatedCapacity += newBuf->capacity(); out->prependChain(std::move(newBuf)); } return out; } size_t IOBuf::goodSize(size_t minCapacity, CombinedOption combined) { if (combined == CombinedOption::DEFAULT) { combined = minCapacity <= kDefaultCombinedBufSize ? CombinedOption::COMBINED : CombinedOption::SEPARATE; } size_t overhead; if (combined == CombinedOption::COMBINED) { overhead = offsetof(HeapFullStorage, align); } else { // Pad minCapacity to a multiple of 8 minCapacity = (minCapacity + 7) & ~7; overhead = sizeof(SharedInfo); } size_t goodSize = folly::goodMallocSize(minCapacity + overhead); return goodSize - overhead; } IOBuf::IOBuf( TakeOwnershipOp, void* buf, std::size_t capacity, std::size_t offset, std::size_t length, FreeFunction freeFn, void* userData, bool freeOnError) : next_(this), prev_(this), data_(static_cast<uint8_t*>(buf) + offset), buf_(static_cast<uint8_t*>(buf)), length_(length), capacity_(capacity), flagsAndSharedInfo_( packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) { // do not allow only user data without a freeFn // since we use that for folly::sizedFree DCHECK(!userData || (userData && freeFn)); auto rollback = makeGuard([&] { // takeOwnershipError(freeOnError, buf, freeFn, userData); }); setSharedInfo(new SharedInfo(freeFn, userData)); rollback.dismiss(); } IOBuf::IOBuf( TakeOwnershipOp, SizedFree, void* buf, std::size_t capacity, std::size_t offset, std::size_t length, bool freeOnError) : next_(this), prev_(this), data_(static_cast<uint8_t*>(buf) + offset), buf_(static_cast<uint8_t*>(buf)), length_(length), capacity_(capacity), flagsAndSharedInfo_( packFlagsAndSharedInfo(kFlagFreeSharedInfo, nullptr)) { auto rollback = makeGuard([&] { // takeOwnershipError(freeOnError, buf, nullptr, nullptr); }); setSharedInfo(new SharedInfo(nullptr, reinterpret_cast<void*>(capacity))); rollback.dismiss(); if (io_buf_alloc_cb && capacity) { io_buf_alloc_cb(buf, capacity); } } unique_ptr<IOBuf> IOBuf::takeOwnership( void* buf, std::size_t capacity, std::size_t offset, std::size_t length, FreeFunction freeFn, void* userData, bool freeOnError, TakeOwnershipOption option) { if (capacity > kMaxIOBufSize) { throw_exception<std::bad_alloc>(); } // do not allow only user data without a freeFn // since we use that for folly::sizedFree DCHECK( !userData || (userData && freeFn) || (userData && !freeFn && (option == TakeOwnershipOption::STORE_SIZE))); HeapFullStorage* storage = nullptr; auto rollback = makeGuard([&] { if (storage) { free(storage); } takeOwnershipError(freeOnError, buf, freeFn, userData); }); size_t requiredStorage = sizeof(HeapFullStorage); size_t mallocSize = goodMallocSize(requiredStorage); storage = static_cast<HeapFullStorage*>(checkedMalloc(mallocSize)); new (&storage->hs.prefix) HeapPrefix(kIOBufInUse | kSharedInfoInUse, mallocSize); new (&storage->shared) SharedInfo(freeFn, userData, true /*useHeapFullStorage*/); auto result = unique_ptr<IOBuf>(new (&storage->hs.buf) IOBuf( InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared), static_cast<uint8_t*>(buf), capacity, static_cast<uint8_t*>(buf) + offset, length)); rollback.dismiss(); if (io_buf_alloc_cb) { io_buf_alloc_cb(storage, mallocSize); if (userData && !freeFn && (option == TakeOwnershipOption::STORE_SIZE)) { // Even though we did not allocate the buffer, call io_buf_alloc_cb() // since we will call io_buf_free_cb() on destruction, and we want these // calls to be 1:1. io_buf_alloc_cb(buf, capacity); } } return result; } IOBuf::IOBuf(WrapBufferOp, const void* buf, std::size_t capacity) noexcept : IOBuf( InternalConstructor(), 0, // We cast away the const-ness of the buffer here. // This is okay since IOBuf users must use unshare() to create a copy // of this buffer before writing to the buffer. static_cast<uint8_t*>(const_cast<void*>(buf)), capacity, static_cast<uint8_t*>(const_cast<void*>(buf)), capacity) {} IOBuf::IOBuf(WrapBufferOp op, ByteRange br) noexcept : IOBuf(op, br.data(), br.size()) {} unique_ptr<IOBuf> IOBuf::wrapBuffer(const void* buf, std::size_t capacity) { return std::make_unique<IOBuf>(WRAP_BUFFER, buf, capacity); } IOBuf IOBuf::wrapBufferAsValue(const void* buf, std::size_t capacity) noexcept { return IOBuf(WrapBufferOp::WRAP_BUFFER, buf, capacity); } IOBuf::IOBuf() noexcept = default; IOBuf::IOBuf(IOBuf&& other) noexcept : data_(other.data_), buf_(other.buf_), length_(other.length_), capacity_(other.capacity_), flagsAndSharedInfo_(other.flagsAndSharedInfo_) { // Reset other so it is a clean state to be destroyed. other.data_ = nullptr; other.buf_ = nullptr; other.length_ = 0; other.capacity_ = 0; other.flagsAndSharedInfo_ = 0; // If other was part of the chain, assume ownership of the rest of its chain. // (It's only valid to perform move assignment on the head of a chain.) if (other.next_ != &other) { next_ = other.next_; next_->prev_ = this; other.next_ = &other; prev_ = other.prev_; prev_->next_ = this; other.prev_ = &other; } // Sanity check to make sure that other is in a valid state to be destroyed. DCHECK_EQ(other.prev_, &other); DCHECK_EQ(other.next_, &other); } IOBuf::IOBuf(const IOBuf& other) { *this = other.cloneAsValue(); } IOBuf::IOBuf( InternalConstructor, uintptr_t flagsAndSharedInfo, uint8_t* buf, std::size_t capacity, uint8_t* data, std::size_t length) noexcept : next_(this), prev_(this), data_(data), buf_(buf), length_(length), capacity_(capacity), flagsAndSharedInfo_(flagsAndSharedInfo) { assert(data >= buf); assert(data + length <= buf + capacity); CHECK(!folly::asan_region_is_poisoned(buf, capacity)); } IOBuf::~IOBuf() { // Destroying an IOBuf destroys the entire chain. // Users of IOBuf should only explicitly delete the head of any chain. // The other elements in the chain will be automatically destroyed. while (next_ != this) { // Since unlink() returns unique_ptr() and we don't store it, // it will automatically delete the unlinked element. (void)next_->unlink(); } decrementRefcount(); } IOBuf& IOBuf::operator=(IOBuf&& other) noexcept { if (this == &other) { return *this; } // If we are part of a chain, delete the rest of the chain. while (next_ != this) { // Since unlink() returns unique_ptr() and we don't store it, // it will automatically delete the unlinked element. (void)next_->unlink(); } // Decrement our refcount on the current buffer decrementRefcount(); // Take ownership of the other buffer's data data_ = other.data_; buf_ = other.buf_; length_ = other.length_; capacity_ = other.capacity_; flagsAndSharedInfo_ = other.flagsAndSharedInfo_; // Reset other so it is a clean state to be destroyed. other.data_ = nullptr; other.buf_ = nullptr; other.length_ = 0; other.capacity_ = 0; other.flagsAndSharedInfo_ = 0; // If other was part of the chain, assume ownership of the rest of its chain. // (It's only valid to perform move assignment on the head of a chain.) if (other.next_ != &other) { next_ = other.next_; next_->prev_ = this; other.next_ = &other; prev_ = other.prev_; prev_->next_ = this; other.prev_ = &other; } // Sanity check to make sure that other is in a valid state to be destroyed. DCHECK_EQ(other.prev_, &other); DCHECK_EQ(other.next_, &other); return *this; } IOBuf& IOBuf::operator=(const IOBuf& other) { if (this != &other) { *this = IOBuf(other); } return *this; } bool IOBuf::empty() const { const IOBuf* current = this; do { if (current->length() != 0) { return false; } current = current->next_; } while (current != this); return true; } size_t IOBuf::countChainElements() const { size_t numElements = 1; for (IOBuf* current = next_; current != this; current = current->next_) { ++numElements; } return numElements; } std::size_t IOBuf::computeChainDataLength() const { std::size_t fullLength = length_; for (IOBuf* current = next_; current != this; current = current->next_) { fullLength += current->length_; } return fullLength; } std::size_t IOBuf::computeChainCapacity() const { std::size_t fullCapacity = capacity_; for (IOBuf* current = next_; current != this; current = current->next_) { fullCapacity += current->capacity_; } return fullCapacity; } void IOBuf::prependChain(unique_ptr<IOBuf>&& iobuf) { // Take ownership of the specified IOBuf IOBuf* other = iobuf.release(); // Remember the pointer to the tail of the other chain IOBuf* otherTail = other->prev_; // Hook up prev_->next_ to point at the start of the other chain, // and other->prev_ to point at prev_ prev_->next_ = other; other->prev_ = prev_; // Hook up otherTail->next_ to point at us, // and prev_ to point back at otherTail, otherTail->next_ = this; prev_ = otherTail; } unique_ptr<IOBuf> IOBuf::clone() const { auto tmp = cloneOne(); for (IOBuf* current = next_; current != this; current = current->next_) { tmp->prependChain(current->cloneOne()); } return tmp; } unique_ptr<IOBuf> IOBuf::cloneOne() const { if (SharedInfo* info = sharedInfo()) { info->refcount.fetch_add(1, std::memory_order_acq_rel); } return std::unique_ptr<IOBuf>(new IOBuf( InternalConstructor(), flagsAndSharedInfo_, buf_, capacity_, data_, length_)); } unique_ptr<IOBuf> IOBuf::cloneCoalesced() const { return std::make_unique<IOBuf>(cloneCoalescedAsValue()); } unique_ptr<IOBuf> IOBuf::cloneCoalescedWithHeadroomTailroom( std::size_t newHeadroom, std::size_t newTailroom) const { return std::make_unique<IOBuf>( cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom)); } IOBuf IOBuf::cloneAsValue() const { auto tmp = cloneOneAsValue(); for (IOBuf* current = next_; current != this; current = current->next_) { tmp.prependChain(current->cloneOne()); } return tmp; } IOBuf IOBuf::cloneOneAsValue() const { if (SharedInfo* info = sharedInfo()) { info->refcount.fetch_add(1, std::memory_order_acq_rel); } return IOBuf( InternalConstructor(), flagsAndSharedInfo_, buf_, capacity_, data_, length_); } IOBuf IOBuf::cloneCoalescedAsValue() const { const std::size_t newHeadroom = headroom(); const std::size_t newTailroom = prev()->tailroom(); return cloneCoalescedAsValueWithHeadroomTailroom(newHeadroom, newTailroom); } IOBuf IOBuf::cloneCoalescedAsValueWithHeadroomTailroom( std::size_t newHeadroom, std::size_t newTailroom) const { if (!isChained() && newHeadroom <= headroom() && newTailroom <= tailroom()) { return cloneOneAsValue(); } // Coalesce into newBuf const std::size_t newLength = computeChainDataLength(); const std::size_t newCapacity = newLength + newHeadroom + newTailroom; IOBuf newBuf{CREATE, newCapacity}; newBuf.advance(newHeadroom); auto current = this; do { if (current->length() > 0) { DCHECK_NOTNULL(current->data()); DCHECK_LE(current->length(), newBuf.tailroom()); memcpy(newBuf.writableTail(), current->data(), current->length()); newBuf.append(current->length()); } current = current->next(); } while (current != this); DCHECK_EQ(newLength, newBuf.length()); DCHECK_EQ(newHeadroom, newBuf.headroom()); DCHECK_LE(newTailroom, newBuf.tailroom()); return newBuf; } void IOBuf::unshareOneSlow() { // Allocate a new buffer for the data uint8_t* buf; SharedInfo* sharedInfo; std::size_t actualCapacity; allocExtBuffer(capacity_, &buf, &sharedInfo, &actualCapacity); // Copy the data // Maintain the same amount of headroom. Since we maintained the same // minimum capacity we also maintain at least the same amount of tailroom. std::size_t headlen = headroom(); if (length_ > 0) { assert(data_ != nullptr); memcpy(buf + headlen, data_, length_); } // Release our reference on the old buffer decrementRefcount(); // Make sure flags are all cleared. setFlagsAndSharedInfo(0, sharedInfo); // Update the buffer pointers to point to the new buffer data_ = buf + headlen; buf_ = buf; } void IOBuf::unshareChained() { // unshareChained() should only be called if we are part of a chain of // multiple IOBufs. The caller should have already verified this. assert(isChained()); IOBuf* current = this; while (true) { if (current->isSharedOne()) { // we have to unshare break; } current = current->next_; if (current == this) { // None of the IOBufs in the chain are shared, // so return without doing anything return; } } // We have to unshare. Let coalesceSlow() do the work. coalesceSlow(); } void IOBuf::markExternallyShared() { IOBuf* current = this; do { current->markExternallySharedOne(); current = current->next_; } while (current != this); } void IOBuf::makeManagedChained() { assert(isChained()); IOBuf* current = this; while (true) { current->makeManagedOne(); current = current->next_; if (current == this) { break; } } } void IOBuf::coalesceSlow() { // coalesceSlow() should only be called if we are part of a chain of multiple // IOBufs. The caller should have already verified this. DCHECK(isChained()); // Compute the length of the entire chain std::size_t newLength = 0; IOBuf* end = this; do { newLength += end->length_; end = end->next_; } while (end != this); coalesceAndReallocate(newLength, end); // We should be only element left in the chain now DCHECK(!isChained()); } void IOBuf::coalesceSlow(size_t maxLength) { // coalesceSlow() should only be called if we are part of a chain of multiple // IOBufs. The caller should have already verified this. DCHECK(isChained()); DCHECK_LT(length_, maxLength); // Compute the length of the entire chain std::size_t newLength = 0; IOBuf* end = this; while (true) { newLength += end->length_; end = end->next_; if (newLength >= maxLength) { break; } if (end == this) { throw_exception<std::overflow_error>( "attempted to coalesce more data than " "available"); } } coalesceAndReallocate(newLength, end); // We should have the requested length now DCHECK_GE(length_, maxLength); } void IOBuf::coalesceAndReallocate( size_t newHeadroom, size_t newLength, IOBuf* end, size_t newTailroom) { std::size_t newCapacity = newLength + newHeadroom + newTailroom; // Allocate space for the coalesced buffer. // We always convert to an external buffer, even if we happened to be an // internal buffer before. uint8_t* newBuf; SharedInfo* newInfo; std::size_t actualCapacity; allocExtBuffer(newCapacity, &newBuf, &newInfo, &actualCapacity); // Copy the data into the new buffer uint8_t* newData = newBuf + newHeadroom; uint8_t* p = newData; IOBuf* current = this; size_t remaining = newLength; do { if (current->length_ > 0) { assert(current->length_ <= remaining); assert(current->data_ != nullptr); remaining -= current->length_; memcpy(p, current->data_, current->length_); p += current->length_; } current = current->next_; } while (current != end); assert(remaining == 0); // Point at the new buffer decrementRefcount(); // Make sure flags are all cleared. setFlagsAndSharedInfo(0, newInfo); capacity_ = actualCapacity; buf_ = newBuf; data_ = newData; length_ = newLength; // Separate from the rest of our chain. // Since we don't store the unique_ptr returned by separateChain(), // this will immediately delete the returned subchain. if (isChained()) { (void)separateChain(next_, current->prev_); } } void IOBuf::decrementRefcount() noexcept { // Externally owned buffers don't have a SharedInfo object and aren't managed // by the reference count SharedInfo* info = sharedInfo(); if (!info) { return; } // Avoid doing atomic decrement if the refcount is 1. // This is safe, because it means that we're the last reference and destroying // the object. Anything trying to copy it is already undefined behavior. if (info->refcount.load(std::memory_order_acquire) > 1) { // Decrement the refcount uint32_t newcnt = info->refcount.fetch_sub(1, std::memory_order_acq_rel); // Note that fetch_sub() returns the value before we decremented. // If it is 1, we were the only remaining user; if it is greater there are // still other users. if (newcnt > 1) { return; } } // save the useHeapFullStorage flag here since // freeExtBuffer can delete the sharedInfo() bool useHeapFullStorage = info->useHeapFullStorage; // We were the last user. Free the buffer freeExtBuffer(); // Free the SharedInfo if it was allocated separately. // // This is only used by takeOwnership(). // // To avoid this special case handling in decrementRefcount(), we could have // takeOwnership() set a custom freeFn() that calls the user's free function // then frees the SharedInfo object. (This would require that // takeOwnership() store the user's free function with its allocated // SharedInfo object.) However, handling this specially with a flag seems // like it shouldn't be problematic. if (flags() & kFlagFreeSharedInfo) { delete info; } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(info); } } } void IOBuf::reserveSlow(std::size_t minHeadroom, std::size_t minTailroom) { size_t newCapacity = length_; if (!checked_add(&newCapacity, newCapacity, minHeadroom) || !checked_add(&newCapacity, newCapacity, minTailroom) || newCapacity > kMaxIOBufSize) { // overflow throw_exception<std::bad_alloc>(); } // reserveSlow() is dangerous if anyone else is sharing the buffer, as we may // reallocate and free the original buffer. It should only ever be called if // we are the only user of the buffer. DCHECK(!isSharedOne()); // We'll need to reallocate the buffer. // There are a few options. // - If we have enough total room, move the data around in the buffer // and adjust the data_ pointer. // - If we're using an internal buffer, we'll switch to an external // buffer with enough headroom and tailroom. // - If we have enough headroom (headroom() >= minHeadroom) but not too much // (so we don't waste memory), we can try one of two things, depending on // whether we use jemalloc or not: // - If using jemalloc, we can try to expand in place, avoiding a memcpy() // - If not using jemalloc and we don't have too much to copy, // we'll use realloc() (note that realloc might have to copy // headroom + data + tailroom, see smartRealloc in folly/memory/Malloc.h) // - Otherwise, bite the bullet and reallocate. if (headroom() + tailroom() >= minHeadroom + minTailroom) { uint8_t* newData = writableBuffer() + minHeadroom; memmove(newData, data_, length_); data_ = newData; return; } size_t newAllocatedCapacity = 0; uint8_t* newBuffer = nullptr; std::size_t newHeadroom = 0; std::size_t oldHeadroom = headroom(); // If we have a buffer allocated with malloc and we just need more tailroom, // try to use realloc()/xallocx() to grow the buffer in place. SharedInfo* info = sharedInfo(); bool useHeapFullStorage = info && info->useHeapFullStorage; if (info && (info->freeFn == nullptr) && length_ != 0 && oldHeadroom >= minHeadroom) { size_t headSlack = oldHeadroom - minHeadroom; newAllocatedCapacity = goodExtBufferSize(newCapacity + headSlack); if (usingJEMalloc()) { // We assume that tailroom is more useful and more important than // headroom (not least because realloc / xallocx allow us to grow the // buffer at the tail, but not at the head) So, if we have more headroom // than we need, we consider that "wasted". We arbitrarily define "too // much" headroom to be 25% of the capacity. if (headSlack * 4 <= newCapacity) { size_t allocatedCapacity = capacity() + sizeof(SharedInfo); void* p = buf_; if (allocatedCapacity >= jemallocMinInPlaceExpandable) { if (xallocx(p, newAllocatedCapacity, 0, 0) == newAllocatedCapacity) { if (io_buf_free_cb) { io_buf_free_cb(p, reinterpret_cast<size_t>(info->userData)); } newBuffer = static_cast<uint8_t*>(p); newHeadroom = oldHeadroom; // update the userData info->userData = reinterpret_cast<void*>(newAllocatedCapacity); if (io_buf_alloc_cb) { io_buf_alloc_cb(newBuffer, newAllocatedCapacity); } } // if xallocx failed, do nothing, fall back to malloc/memcpy/free } } } else { // Not using jemalloc size_t copySlack = capacity() - length_; if (copySlack * 2 <= length_) { void* p = realloc(buf_, newAllocatedCapacity); if (UNLIKELY(p == nullptr)) { throw_exception<std::bad_alloc>(); } newBuffer = static_cast<uint8_t*>(p); newHeadroom = oldHeadroom; } } } // None of the previous reallocation strategies worked (or we're using // an internal buffer). malloc/copy/free. if (newBuffer == nullptr) { newAllocatedCapacity = goodExtBufferSize(newCapacity); newBuffer = static_cast<uint8_t*>(checkedMalloc(newAllocatedCapacity)); if (length_ > 0) { assert(data_ != nullptr); memcpy(newBuffer + minHeadroom, data_, length_); } if (sharedInfo()) { freeExtBuffer(); } newHeadroom = minHeadroom; } std::size_t cap; initExtBuffer(newBuffer, newAllocatedCapacity, &info, &cap); if (flags() & kFlagFreeSharedInfo) { delete sharedInfo(); } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(sharedInfo()); } } setFlagsAndSharedInfo(0, info); capacity_ = cap; buf_ = newBuffer; data_ = newBuffer + newHeadroom; // length_ is unchanged } // The user's free function should never throw. Otherwise we might throw from // the IOBuf destructor. Other code paths like coalesce() also assume that // decrementRefcount() cannot throw. void IOBuf::freeExtBuffer() noexcept { SharedInfo* info = sharedInfo(); DCHECK(info); // save the observerListHead // since the SharedInfo can be freed auto observerListHead = info->observerListHead; info->observerListHead = nullptr; if (info->freeFn) { info->freeFn(buf_, info->userData); } else { // this will invoke free if info->userData is 0 size_t size = reinterpret_cast<size_t>(info->userData); if (size) { if (io_buf_free_cb) { io_buf_free_cb(buf_, size); } folly::sizedFree(buf_, size); } else { free(buf_); } } SharedInfo::invokeAndDeleteEachObserver( observerListHead, [](auto& entry) { entry.afterFreeExtBuffer(); }); if (kIsMobile) { buf_ = nullptr; } } void IOBuf::allocExtBuffer( std::size_t minCapacity, uint8_t** bufReturn, SharedInfo** infoReturn, std::size_t* capacityReturn) { if (minCapacity > kMaxIOBufSize) { throw_exception<std::bad_alloc>(); } size_t mallocSize = goodExtBufferSize(minCapacity); auto buf = static_cast<uint8_t*>(checkedMalloc(mallocSize)); initExtBuffer(buf, mallocSize, infoReturn, capacityReturn); // the userData and the freeFn are nullptr here // just store the mallocSize in userData (*infoReturn)->userData = reinterpret_cast<void*>(mallocSize); if (io_buf_alloc_cb) { io_buf_alloc_cb(buf, mallocSize); } *bufReturn = buf; } size_t IOBuf::goodExtBufferSize(std::size_t minCapacity) { if (minCapacity > kMaxIOBufSize) { throw_exception<std::bad_alloc>(); } // Determine how much space we should allocate. We'll store the SharedInfo // for the external buffer just after the buffer itself. (We store it just // after the buffer rather than just before so that the code can still just // use free(buf_) to free the buffer.) size_t minSize = static_cast<size_t>(minCapacity) + sizeof(SharedInfo); // Add room for padding so that the SharedInfo will be aligned on an 8-byte // boundary. minSize = (minSize + 7) & ~7; // Use goodMallocSize() to bump up the capacity to a decent size to request // from malloc, so we can use all of the space that malloc will probably give // us anyway. return goodMallocSize(minSize); } void IOBuf::initExtBuffer( uint8_t* buf, size_t mallocSize, SharedInfo** infoReturn, std::size_t* capacityReturn) { // Find the SharedInfo storage at the end of the buffer // and construct the SharedInfo. uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo); auto sharedInfo = new (infoStart) SharedInfo; *capacityReturn = std::size_t(infoStart - buf); *infoReturn = sharedInfo; } fbstring IOBuf::moveToFbString() { // we need to save useHeapFullStorage and the observerListHead since // sharedInfo() may not be valid after fbstring str bool useHeapFullStorage = false; SharedInfoObserverEntryBase* observerListHead = nullptr; // malloc-allocated buffers are just fine, everything else needs // to be turned into one. if (!sharedInfo() || // user owned, not ours to give up sharedInfo()->freeFn || // not malloc()-ed headroom() != 0 || // malloc()-ed block doesn't start at beginning tailroom() == 0 || // no room for NUL terminator isShared() || // shared isChained()) { // chained // We might as well get rid of all head and tailroom if we're going // to reallocate; we need 1 byte for NUL terminator. coalesceAndReallocate(0, computeChainDataLength(), this, 1); } else { auto info = sharedInfo(); if (info) { // if we do not call coalesceAndReallocate // we might need to call SharedInfo::releaseStorage() // and/or SharedInfo::invokeAndDeleteEachObserver() useHeapFullStorage = info->useHeapFullStorage; // save the observerListHead // the coalesceAndReallocate path will call // decrementRefcount and freeExtBuffer if needed // so the observer lis notification is needed here observerListHead = info->observerListHead; info->observerListHead = nullptr; } } // Ensure NUL terminated *writableTail() = 0; fbstring str( reinterpret_cast<char*>(writableData()), length(), capacity(), AcquireMallocatedString()); if (io_buf_free_cb && sharedInfo() && sharedInfo()->userData) { io_buf_free_cb( writableData(), reinterpret_cast<size_t>(sharedInfo()->userData)); } SharedInfo::invokeAndDeleteEachObserver( observerListHead, [](auto& entry) { entry.afterReleaseExtBuffer(); }); if (flags() & kFlagFreeSharedInfo) { delete sharedInfo(); } else { if (useHeapFullStorage) { SharedInfo::releaseStorage(sharedInfo()); } } // Reset to a state where we can be deleted cleanly flagsAndSharedInfo_ = 0; buf_ = nullptr; clear(); return str; } IOBuf::Iterator IOBuf::cbegin() const { return Iterator(this, this); } IOBuf::Iterator IOBuf::cend() const { return Iterator(nullptr, nullptr); } folly::fbvector<struct iovec> IOBuf::getIov() const { folly::fbvector<struct iovec> iov; iov.reserve(countChainElements()); appendToIov(&iov); return iov; } void IOBuf::appendToIov(folly::fbvector<struct iovec>* iov) const { IOBuf const* p = this; do { // some code can get confused by empty iovs, so skip them if (p->length() > 0) { iov->push_back({(void*)p->data(), folly::to<size_t>(p->length())}); } p = p->next(); } while (p != this); } unique_ptr<IOBuf> IOBuf::wrapIov(const iovec* vec, size_t count) { unique_ptr<IOBuf> result = nullptr; for (size_t i = 0; i < count; ++i) { size_t len = vec[i].iov_len; void* data = vec[i].iov_base; if (len > 0) { auto buf = wrapBuffer(data, len); if (!result) { result = std::move(buf); } else { result->prependChain(std::move(buf)); } } } if (UNLIKELY(result == nullptr)) { return create(0); } return result; } std::unique_ptr<IOBuf> IOBuf::takeOwnershipIov( const iovec* vec, size_t count, FreeFunction freeFn, void* userData, bool freeOnError) { unique_ptr<IOBuf> result = nullptr; for (size_t i = 0; i < count; ++i) { size_t len = vec[i].iov_len; void* data = vec[i].iov_base; if (len > 0) { auto buf = takeOwnership(data, len, freeFn, userData, freeOnError); if (!result) { result = std::move(buf); } else { result->prependChain(std::move(buf)); } } } if (UNLIKELY(result == nullptr)) { return create(0); } return result; } IOBuf::FillIovResult IOBuf::fillIov(struct iovec* iov, size_t len) const { IOBuf const* p = this; size_t i = 0; size_t totalBytes = 0; while (i < len) { // some code can get confused by empty iovs, so skip them if (p->length() > 0) { iov[i].iov_base = const_cast<uint8_t*>(p->data()); iov[i].iov_len = p->length(); totalBytes += p->length(); i++; } p = p->next(); if (p == this) { return {i, totalBytes}; } } return {0, 0}; } uint32_t IOBuf::approximateShareCountOne() const { if (UNLIKELY(!sharedInfo())) { return 1U; } return sharedInfo()->refcount.load(std::memory_order_acquire); } size_t IOBufHash::operator()(const IOBuf& buf) const noexcept { folly::hash::SpookyHashV2 hasher; hasher.Init(0, 0); io::Cursor cursor(&buf); for (;;) { auto b = cursor.peekBytes(); if (b.empty()) { break; } hasher.Update(b.data(), b.size()); cursor.skip(b.size()); } uint64_t h1; uint64_t h2; hasher.Final(&h1, &h2); return static_cast<std::size_t>(h1); } ordering IOBufCompare::impl(const IOBuf& a, const IOBuf& b) const noexcept { io::Cursor ca(&a); io::Cursor cb(&b); for (;;) { auto ba = ca.peekBytes(); auto bb = cb.peekBytes(); if (ba.empty() || bb.empty()) { return to_ordering(int(bb.empty()) - int(ba.empty())); } const size_t n = std::min(ba.size(), bb.size()); DCHECK_GT(n, 0u); const ordering r = to_ordering(std::memcmp(ba.data(), bb.data(), n)); if (r != ordering::eq) { return r; } // Cursor::skip() may throw if n is too large, but n is not too large here ca.skip(n); cb.skip(n); } } } // namespace folly
null
243
CWE-787
CVE-2021-26259
/* * PostScript + PDF output routines for HTMLDOC, a HTML document processing * program. * * Just in case you didn't notice it, this file is too big; it will be * broken into more manageable pieces once we make all of the output * "drivers" into classes... * * Copyright © 2011-2021 by Michael R Sweet. * Copyright © 1997-2010 by Easy Software Products. All rights reserved. * * This program is free software. Distribution and use rights are outlined in * the file "COPYING". */ /* * Include necessary headers. */ /* * The GCC compiler on HP-UX has a nasty habit of incorrectly "fixing" * the vmtypes.h header file provided with HP-UX. The following * conditional magic makes sure that "page_t" (which we use in our * code) is not defined... */ #ifdef __hpux # define page_t hpux_page_t #endif // __hpux /*#define DEBUG*/ #include "htmldoc.h" #include "markdown.h" #include "md5-private.h" #define md5_append _cupsMD5Append #define md5_finish _cupsMD5Finish #define md5_init _cupsMD5Init typedef unsigned char md5_byte_t; #define md5_state_t _cups_md5_state_t #include "rc4.h" #include <stdarg.h> #include <ctype.h> #include <time.h> #include <math.h> #ifdef WIN32 # include <io.h> #else # include <unistd.h> #endif // WIN32 #include <fcntl.h> #include <zlib.h> extern "C" { /* Workaround for JPEG header problems... */ #include <jpeglib.h> /* JPEG/JFIF image definitions */ } #ifdef __hpux # undef page_t #endif // __hpux /* * Output options... */ #define HTMLDOC_ASCII85 //#define HTMLDOC_INTERPOLATION #define HTMLDOC_PRODUCER "htmldoc " SVERSION " Copyright 2011-2019 by Michael R Sweet" /* * Constants... */ #define RENDER_TEXT 0 /* Text fragment */ #define RENDER_IMAGE 1 /* Image */ #define RENDER_BOX 2 /* Box */ #define RENDER_LINK 3 /* Hyperlink */ #define RENDER_BG 4 /* Background image */ /* * Structures... */ typedef struct render_str /**** Render entity structure ****/ { struct render_str *prev; /* Previous rendering entity */ struct render_str *next; /* Next rendering entity */ int type; /* Type of entity */ float x, /* Position in points */ y, /* ... */ width, /* Size in points */ height; /* ... */ union { struct { int typeface, /* Typeface for text */ style; /* Style of text */ float size; /* Size of text in points */ float spacing; /* Inter-character spacing */ float rgb[3]; /* Color of text */ uchar buffer[1]; /* String buffer */ } text; image_t *image; /* Image pointer */ float box[3]; /* Box color */ uchar link[1]; /* Link URL */ } data; } render_t; typedef struct /**** Named link position structure */ { short page, /* Page # */ top; /* Top position */ uchar name[124]; /* Reference name */ } link_t; typedef struct //// Page information { int width, // Width of page in points length, // Length of page in points left, // Left margin in points right, // Right margin in points top, // Top margin in points bottom, // Bottom margin in points duplex, // Duplex this page? landscape; // Landscape orientation? render_t *start, // First render element *end; // Last render element uchar *url, // URL/file *chapter, // Chapter text *heading; // Heading text tree_t *headnode; // Heading node uchar *header[3], // Headers for regular pages *header1[3], // Headers for first pages *footer[3]; // Footers for all pages char media_color[64], // Media color media_type[64]; // Media type int media_position; // Media position char page_text[64]; // Page number for TOC image_t *background_image; // Background image float background_color[3]; // Background color // Number-up support int nup; // Number up pages int outpage; // Output page # float outmatrix[2][3]; // Transform matrix } page_t; typedef struct //// Output page info { int nup; // Number up pages int pages[16]; // Pages on this output page int annot_object; // Annotation object } outpage_t; /* * Local globals... */ static time_t doc_time; // Current time static struct tm doc_date; // Current date static uchar *current_url = NULL; static int title_page; static int chapter, chapter_outstarts[MAX_CHAPTERS], chapter_outends[MAX_CHAPTERS], chapter_starts[MAX_CHAPTERS], chapter_ends[MAX_CHAPTERS]; static size_t num_headings = 0, alloc_headings = 0; static int *heading_pages = NULL, *heading_tops = NULL; static size_t num_pages = 0, alloc_pages = 0; static page_t *pages = NULL; static tree_t *current_heading; static size_t num_outpages = 0; static outpage_t *outpages = NULL; static size_t num_links = 0, alloc_links = 0; static link_t *links = NULL; static uchar list_types[16]; static int list_values[16]; static char stdout_filename[256]; static size_t num_objects = 0, alloc_objects = 0; static int *objects = NULL, root_object, info_object, outline_object, pages_object, names_object, encrypt_object, font_objects[TYPE_MAX * STYLE_MAX]; static uchar *doc_title = NULL; static image_t *logo_image = NULL; static float logo_width, logo_height; static image_t *lh_image = NULL; static float lh_width, lh_height; static image_t *hfimage[MAX_HF_IMAGES]; static float hfimage_width[MAX_HF_IMAGES], hfimage_height[MAX_HF_IMAGES]; static float maxhfheight; static image_t *background_image = NULL; static float background_color[3] = { 1.0, 1.0, 1.0 }, link_color[3] = { 0.0, 0.0, 1.0 }; static int render_typeface, render_style; static float render_size, render_rgb[3], render_x, render_y, render_startx, render_spacing; static int compressor_active = 0; static z_stream compressor; static uchar comp_buffer[8192]; static uchar encrypt_key[16]; static int encrypt_len; static rc4_context_t encrypt_state; static md5_byte_t file_id[16]; /* * Local functions... */ extern "C" { typedef int (*compare_func_t)(const void *, const void *); } static void pspdf_debug_stats(); static void pspdf_transform_coords(page_t *p, float &x, float &y); static void pspdf_transform_page(int outpage, int pos, int page); static void pspdf_prepare_outpages(); static void pspdf_prepare_page(int page); static void pspdf_prepare_heading(int page, int print_page, uchar **format, int y, char *page_text, int page_len); static void ps_write_document(uchar *author, uchar *creator, uchar *copyright, uchar *keywords, uchar *subject, uchar *lang); static void ps_write_outpage(FILE *out, int outpage); static void ps_write_page(FILE *out, int page); static void ps_write_background(FILE *out); static void pdf_write_document(uchar *author, uchar *creator, uchar *copyright, uchar *keywords, uchar *subject, uchar *lang, tree_t *doc, tree_t *toc); static void pdf_write_outpage(FILE *out, int outpage); static void pdf_write_page(FILE *out, int page); static void pdf_write_resources(FILE *out, int page); #ifdef DEBUG_TOC static void pdf_text_contents(FILE *out, tree_t *toc, int indent = 0); #endif // DEBUG_TOC static void pdf_write_contents(FILE *out, tree_t *toc, int parent, int prev, int next, int *heading); static void pdf_write_files(FILE *out, tree_t *doc); static void pdf_write_links(FILE *out); static void pdf_write_names(FILE *out); static int pdf_count_headings(tree_t *toc); static int pdf_start_object(FILE *out, int array = 0); static void pdf_start_stream(FILE *out); static void pdf_end_object(FILE *out); static void encrypt_init(void); static void flate_open_stream(FILE *out); static void flate_close_stream(FILE *out); static void flate_puts(const char *s, FILE *out); static void flate_printf(FILE *out, const char *format, ...); static void flate_write(FILE *out, uchar *inbuf, int length, int flush=0); static void parse_contents(tree_t *t, float left, float width, float bottom, float length, float *y, int *page, int *heading, tree_t *chap); static void parse_doc(tree_t *t, float *left, float *right, float *bottom, float *top, float *x, float *y, int *page, tree_t *cpara, int *needspace); static void parse_heading(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_paragraph(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_pre(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_table(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_list(tree_t *t, float *left, float *width, float *bottom, float *length, float *x, float *y, int *page, int needspace); static void init_list(tree_t *t); static void parse_comment(tree_t *t, float *left, float *width, float *bottom, float *length, float *x, float *y, int *page, tree_t *para, int needspace); static void check_pages(int page); static void add_link(uchar *name, int page, int top); static link_t *find_link(uchar *name); static int compare_links(link_t *n1, link_t *n2); static void find_background(tree_t *t); static void write_background(int page, FILE *out); static render_t *new_render(int page, int type, double x, double y, double width, double height, void *data, render_t *insert = 0); static float get_cell_size(tree_t *t, float left, float right, float *minwidth, float *prefwidth, float *minheight); static float get_table_size(tree_t *t, float left, float right, float *minwidth, float *prefwidth, float *minheight); static tree_t *flatten_tree(tree_t *t); static float get_width(uchar *s, int typeface, int style, int size); static void update_image_size(tree_t *t); static uchar *get_title(tree_t *doc); static FILE *open_file(void); static void set_color(FILE *out, float *rgb); static void set_font(FILE *out, int typeface, int style, float size); static void set_pos(FILE *out, float x, float y); static void write_prolog(FILE *out, int pages, uchar *author, uchar *creator, uchar *copyright, uchar *keywords, uchar *subject); static void ps_hex(FILE *out, uchar *data, int length); #ifdef HTMLDOC_ASCII85 static void ps_ascii85(FILE *out, uchar *data, int length, int eod = 0); #endif // HTMLDOC_ASCII85 static void jpg_init(j_compress_ptr cinfo); static boolean jpg_empty(j_compress_ptr cinfo); static void jpg_term(j_compress_ptr cinfo); static void jpg_setup(FILE *out, image_t *img, j_compress_ptr cinfo); static int compare_rgb(unsigned *rgb1, unsigned *rgb2); static void write_image(FILE *out, render_t *r, int write_obj = 0); static void write_imagemask(FILE *out, render_t *r); static void write_string(FILE *out, uchar *s, int compress); static void write_text(FILE *out, render_t *r); static void write_trailer(FILE *out, int pages, uchar *lang); static int write_type1(FILE *out, typeface_t typeface, style_t style); static void write_utf16(FILE *out, uchar *s); /* * 'pspdf_export()' - Export PostScript/PDF file(s)... */ int pspdf_export(tree_t *document, /* I - Document to export */ tree_t *toc) /* I - Table of contents for document */ { int i, j; /* Looping vars */ const char *title_file; /* Location of title image/file */ uchar *author, /* Author of document */ *creator, /* HTML file creator (Netscape, etc) */ *copyright, /* File copyright */ *docnumber, /* Document number */ *keywords, /* Search keywords */ *subject, /* Subject */ *lang; /* Language */ tree_t *t; /* Title page document tree */ FILE *fp; /* Title page file */ float x, y, /* Current page position */ left, right, /* Left and right margins */ bottom, top, /* Bottom and top margins */ width, /* Width of , author, etc */ height; /* Height of area */ int page, /* Current page # */ pos, /* Current header/footer position */ heading, /* Current heading # */ toc_duplex, /* Duplex TOC pages? */ toc_landscape, /* Do TOC in landscape? */ toc_width, /* Width of TOC pages */ toc_length, /* Length of TOC pages */ toc_left, /* TOC page margins */ toc_right, toc_bottom, toc_top; image_t *timage; /* Title image */ float timage_width, /* Title image width */ timage_height; /* Title image height */ render_t *r; /* Rendering structure... */ float rgb[3]; /* Text color */ int needspace; /* Need whitespace */ /* * Figure out the printable area of the output page... */ if (Landscape) { PagePrintWidth = PageLength - PageLeft - PageRight; PagePrintLength = PageWidth - PageTop - PageBottom; } else { PagePrintWidth = PageWidth - PageLeft - PageRight; PagePrintLength = PageLength - PageTop - PageBottom; } toc_width = PageWidth; toc_length = PageLength; toc_left = PageLeft; toc_right = PageRight; toc_bottom = PageBottom; toc_top = PageTop; toc_landscape = Landscape; toc_duplex = PageDuplex; /* * Get the document title, author, etc... */ doc_title = get_title(document); author = htmlGetMeta(document, (uchar *)"author"); creator = htmlGetMeta(document, (uchar *)"generator"); copyright = htmlGetMeta(document, (uchar *)"copyright"); docnumber = htmlGetMeta(document, (uchar *)"docnumber"); keywords = htmlGetMeta(document, (uchar *)"keywords"); subject = htmlGetMeta(document, (uchar *)"subject"); lang = htmlGetMeta(document, (uchar *)"lang"); logo_image = image_load(LogoImage, !OutputColor); lh_image = image_load(Letterhead, !OutputColor); maxhfheight = 0.0f; if (docnumber == NULL) docnumber = htmlGetMeta(document, (uchar *)"version"); if (lh_image != NULL) { lh_width = (float)(lh_image->width * PagePrintWidth / _htmlBrowserWidth); lh_height = (float)(lh_width * lh_image->height / lh_image->width); if (lh_height > maxhfheight) maxhfheight = lh_height; } else lh_width = lh_height = 0.0f; if (logo_image != NULL) { logo_width = (float)(logo_image->width * PagePrintWidth / _htmlBrowserWidth); logo_height = (float)(logo_width * logo_image->height / logo_image->width); if (logo_height > (2.0 * HeadFootSize)) { // Issue #273: too large logo image will overlap the body text, so cap // the height of the logo image to the header/footer size... // // Issue #303: regression prevents using header/footer images for special // underlining/etc. effects. logo_height = (float)(2.0 * HeadFootSize); logo_width = logo_height * logo_image->width / logo_image->height; } if (logo_height > maxhfheight) maxhfheight = logo_height; } else logo_width = logo_height = 0.0f; for (int hfi = 0; hfi < MAX_HF_IMAGES; hfi ++) { hfimage[hfi] = image_load(HFImage[hfi], !OutputColor); if (hfimage[hfi]) { hfimage_width[hfi] = (float)(hfimage[hfi]->width * PagePrintWidth / _htmlBrowserWidth); hfimage_height[hfi] = (float)(hfimage_width[hfi] * hfimage[hfi]->height / hfimage[hfi]->width); if (hfimage_height[hfi] > (2.0 * HeadFootSize)) { // Issue #273: too large logo image will overlap the body text, so cap // the height of the logo image to the header/footer size... // // Issue #303: regression prevents using header/footer images for special // underlining/etc. effects. hfimage_height[hfi] = (float)(2.0 * HeadFootSize); hfimage_width[hfi] = hfimage_height[hfi] * hfimage[hfi]->width / hfimage[hfi]->height; } if (hfimage_height[hfi] > maxhfheight) maxhfheight = hfimage_height[hfi]; } else hfimage_width[hfi] = hfimage_height[hfi] = 0.0f; } find_background(document); get_color((uchar *)LinkColor, link_color); /* * Initialize page rendering variables... */ num_pages = 0; alloc_pages = 0; pages = NULL; memset(list_types, 0267, sizeof(list_types)); memset(list_values, 0, sizeof(list_values)); memset(chapter_starts, -1, sizeof(chapter_starts)); memset(chapter_ends, -1, sizeof(chapter_starts)); /* * Get the current date, using the SOURCE_DATE_EPOCH environment variable, if * present, for the number of seconds since the epoch - this enables * reproducible builds (Issue #310). */ const char *source_date_epoch = getenv("SOURCE_DATE_EPOCH"); if (!source_date_epoch || (doc_time = (time_t)strtol(source_date_epoch, NULL, 10)) <= 0) doc_time = time(NULL); gmtime_r(&doc_time, &doc_date); num_headings = 0; alloc_headings = 0; heading_pages = NULL; heading_tops = NULL; num_links = 0; alloc_links = 0; links = NULL; num_pages = 0; DEBUG_printf(("pspdf_export: TitlePage = %d, TitleImage = \"%s\"\n", TitlePage, TitleImage)); if (TitlePage) { const char *title_ext = file_extension(TitleImage); #ifdef WIN32 if (TitleImage[0] && stricmp(title_ext, "bmp") != 0 && stricmp(title_ext, "gif") != 0 && stricmp(title_ext, "jpg") != 0 && stricmp(title_ext, "png") != 0) #else if (TitleImage[0] && strcmp(title_ext, "bmp") != 0 && strcmp(title_ext, "gif") != 0 && strcmp(title_ext, "jpg") != 0 && strcmp(title_ext, "png") != 0) #endif // WIN32 { DEBUG_printf(("pspdf_export: Generating a titlepage using \"%s\"\n", TitleImage)); // Find the title file... if ((title_file = file_find(Path, TitleImage)) == NULL) { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to find title file \"%s\"!", TitleImage); return (1); } // Write a title page from HTML source... if ((fp = fopen(title_file, "rb")) == NULL) { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open title file \"%s\" - %s!", TitleImage, strerror(errno)); return (1); } #ifdef _WIN32 if (!stricmp(title_ext, "md")) #else if (!strcmp(title_ext, "md")) #endif // _WIN32 t = mdReadFile(NULL, fp, file_directory(TitleImage)); else t = htmlReadFile(NULL, fp, file_directory(TitleImage)); htmlFixLinks(t, t, (uchar *)file_directory(TitleImage)); fclose(fp); page = 0; title_page = 1; current_heading = NULL; x = 0.0f; bottom = 0.0f; top = PagePrintLength; y = top; needspace = 0; left = 0.0f; right = PagePrintWidth; parse_doc(t, &left, &right, &bottom, &top, &x, &y, &page, NULL, &needspace); if (PageDuplex && (num_pages & 1)) check_pages(num_pages); htmlDeleteTree(t); } else { /* * Create a standard title page... */ if ((timage = image_load(TitleImage, !OutputColor)) != NULL) { timage_width = (float)(timage->width * PagePrintWidth / _htmlBrowserWidth); timage_height = (float)(timage_width * timage->height / timage->width); } else timage_width = timage_height = 0.0f; check_pages(0); if (PageDuplex) check_pages(1); height = 0.0; if (timage != NULL) height += timage_height + _htmlSpacings[SIZE_P]; if (doc_title != NULL) height += _htmlSpacings[SIZE_H1] + _htmlSpacings[SIZE_P]; if (author != NULL) height += _htmlSpacings[SIZE_P]; if (docnumber != NULL) height += _htmlSpacings[SIZE_P]; if (copyright != NULL) height += _htmlSpacings[SIZE_P]; y = 0.5f * (PagePrintLength + height); if (timage != NULL) { new_render(0, RENDER_IMAGE, 0.5f * (PagePrintWidth - timage_width), y - timage_height, timage_width, timage_height, timage); y -= timage_height + _htmlSpacings[SIZE_P]; } get_color(_htmlTextColor, rgb); if (doc_title != NULL) { width = get_width(doc_title, _htmlHeadingFont, STYLE_BOLD, SIZE_H1); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_H1], width, _htmlSizes[SIZE_H1], doc_title); r->data.text.typeface = _htmlHeadingFont; r->data.text.style = STYLE_BOLD; r->data.text.size = (float)_htmlSizes[SIZE_H1]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); y -= _htmlSpacings[SIZE_H1]; if (docnumber != NULL) { width = get_width(docnumber, _htmlBodyFont, STYLE_NORMAL, SIZE_P); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_P], width, _htmlSizes[SIZE_P], docnumber); r->data.text.typeface = _htmlBodyFont; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[SIZE_P]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); y -= _htmlSpacings[SIZE_P]; } y -= _htmlSpacings[SIZE_P]; } if (author != NULL) { width = get_width(author, _htmlBodyFont, STYLE_NORMAL, SIZE_P); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_P], width, _htmlSizes[SIZE_P], author); r->data.text.typeface = _htmlBodyFont; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[SIZE_P]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); y -= _htmlSpacings[SIZE_P]; } if (copyright != NULL) { width = get_width(copyright, _htmlBodyFont, STYLE_NORMAL, SIZE_P); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_P], width, _htmlSizes[SIZE_P], copyright); r->data.text.typeface = _htmlBodyFont; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[SIZE_P]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); } } for (page = 0; page < (int)num_pages; page ++) strlcpy((char *)pages[page].page_text, (page & 1) ? "eltit" : "title", sizeof(pages[page].page_text)); } else page = 0; /* * Parse the document... */ if (OutputType == OUTPUT_BOOK) chapter = 0; else { chapter = 1; TocDocCount = 1; chapter_starts[1] = num_pages; } title_page = 0; current_heading = NULL; x = 0.0f; needspace = 0; left = 0.0f; right = PagePrintWidth; // Adjust top margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Header[pos] && (strstr(Header[pos], "$IMAGE") != NULL || strstr(Header[pos], "$HFIMAGE") != NULL || strstr(Header[pos], "$LETTERHEAD") != NULL)) temp_adjust = image_adjust; else if (Header1[pos] && (strstr(Header1[pos], "$IMAGE") != NULL || strstr(Header1[pos], "$HFIMAGE") != NULL || strstr(Header1[pos], "$LETTERHEAD") != NULL)) temp_adjust = image_adjust; else if (Header[pos] || Header1[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } top = PagePrintLength - adjust; // Adjust bottom margin as needed... for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Footer[pos] && (strstr(Footer[pos], "$IMAGE") != NULL || strstr(Footer[pos], "$HFIMAGE") != NULL || strstr(Footer[pos], "$LETTERHEAD") != NULL)) temp_adjust = image_adjust; else if (Footer[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } bottom = adjust; y = top; parse_doc(document, &left, &right, &bottom, &top, &x, &y, &page, NULL, &needspace); if (PageDuplex && (num_pages & 1)) { if (PSLevel == 0) chapter_ends[chapter] = num_pages - 1; check_pages(num_pages); if (PSLevel > 0) chapter_ends[chapter] = num_pages - 1; } else chapter_ends[chapter] = num_pages - 1; for (chapter = 1; chapter <= TocDocCount; chapter ++) for (page = chapter_starts[chapter]; page <= chapter_ends[chapter]; page ++) pspdf_prepare_page(page); /* * Parse the table-of-contents if necessary... */ if (TocLevels > 0 && num_headings > 0) { // Restore default page size, etc... PageWidth = toc_width; PageLength = toc_length; PageLeft = toc_left; PageRight = toc_right; PageBottom = toc_bottom; PageTop = toc_top; Landscape = toc_landscape; PageDuplex = toc_duplex; if (Landscape) { PagePrintWidth = PageLength - PageLeft - PageRight; PagePrintLength = PageWidth - PageTop - PageBottom; } else { PagePrintWidth = PageWidth - PageLeft - PageRight; PagePrintLength = PageLength - PageTop - PageBottom; } // Adjust top margin as needed... for (pos = 0; pos < 3; pos ++) if (TocHeader[pos]) break; if (pos == 3) top = PagePrintLength; else if (maxhfheight > HeadFootSize) top = (float)(PagePrintLength - maxhfheight - HeadFootSize); else top = (float)(PagePrintLength - 2 * HeadFootSize); // Adjust bottom margin as needed... for (pos = 0; pos < 3; pos ++) if (TocFooter[pos]) break; if (pos == 3) bottom = 0.0f; else if (maxhfheight > HeadFootSize) bottom = (float)(maxhfheight + HeadFootSize); else bottom = (float)(2 * HeadFootSize); y = 0.0; page = num_pages - 1; heading = 0; chapter_starts[0] = num_pages; chapter = 0; parse_contents(toc, 0, PagePrintWidth, bottom, top, &y, &page, &heading, 0); if (PageDuplex && (num_pages & 1)) check_pages(num_pages); chapter_ends[0] = num_pages - 1; for (page = chapter_starts[0]; page <= chapter_ends[0]; page ++) pspdf_prepare_page(page); } if (TocDocCount > MAX_CHAPTERS) TocDocCount = MAX_CHAPTERS; /* * Do we have any pages? */ if (num_pages > 0 && TocDocCount > 0) { /* * Yes, write the document to disk... */ pspdf_prepare_outpages(); pspdf_debug_stats(); progress_error(HD_ERROR_NONE, "PAGES: %d", (int)num_outpages); if (PSLevel > 0) ps_write_document(author, creator, copyright, keywords, subject, lang); else pdf_write_document(author, creator, copyright, keywords, subject, lang, document, toc); } else { /* * No, show an error... */ pspdf_debug_stats(); progress_error(HD_ERROR_NO_PAGES, "Error: no pages generated! (did you remember to use webpage mode?"); } /* * Free memory... */ if (doc_title != NULL) free(doc_title); if (alloc_links) { free(links); num_links = 0; alloc_links = 0; links = NULL; } for (i = 0; i < (int)num_pages; i ++) { if ((i == 0 || pages[i].chapter != pages[i - 1].chapter) && pages[i].chapter) free(pages[i].chapter); if ((i == 0 || pages[i].heading != pages[i - 1].heading) && pages[i].heading) free(pages[i].heading); if (!pages[i].heading) continue; for (j = 0; j < 3; j ++) { if (!pages[i].header[j]) continue; if (i == 0 || pages[i].header[j] != pages[i - 1].header[j]) free(pages[i].header[j]); } for (j = 0; j < 3; j ++) { if (!pages[i].header1[j]) continue; if (i == 0 || pages[i].header1[j] != pages[i - 1].header1[j]) free(pages[i].header1[j]); } for (j = 0; j < 3; j ++) { if (!pages[i].footer[j]) continue; if (i == 0 || pages[i].footer[j] != pages[i - 1].footer[j]) free(pages[i].footer[j]); } } for (i = 0; i < 3; i ++) { Header[i] = NULL; Header1[i] = NULL; Footer[i] = NULL; TocHeader[i] = NULL; TocFooter[i] = NULL; } if (alloc_pages) { free(pages); free(outpages); num_pages = 0; alloc_pages = 0; pages = NULL; } if (alloc_headings) { free(heading_pages); free(heading_tops); num_headings = 0; alloc_headings = 0; heading_pages = NULL; heading_tops = NULL; } return (0); } // // 'pspdf_debug_stats()' - Display debug statistics for render memory use. // static void pspdf_debug_stats() { const char *debug; // HTMLDOC_DEBUG env var int i; // Looping var render_t *r; // Render node int bytes; // Number of bytes if ((debug = getenv("HTMLDOC_DEBUG")) == NULL || (strstr(debug, "all") == NULL && strstr(debug, "memory") == NULL)) return; bytes = alloc_headings * sizeof(int) * 2; bytes += alloc_pages * sizeof(page_t); for (i = 0; i < (int)num_pages; i ++) { for (r = pages[i].start; r != NULL; r = r->next) { bytes += sizeof(render_t); if (r->type == RENDER_TEXT) bytes += strlen((char *)r->data.text.buffer); } } bytes += num_outpages * sizeof(outpage_t); bytes += alloc_links * sizeof(link_t); bytes += alloc_objects * sizeof(int); progress_error(HD_ERROR_NONE, "DEBUG: Render Data = %d kbytes", (bytes + 1023) / 1024); } /* * 'pspdf_transform_coords()' - Transform page coordinates. */ static void pspdf_transform_coords(page_t *p, // I - Page float &x, // IO - X coordinate float &y) // IO - Y coordinate { float tx, ty; // Temporary X and Y tx = x; ty = y; x = tx * p->outmatrix[0][0] + ty * p->outmatrix[0][1] + p->outmatrix[0][2]; y = tx * p->outmatrix[1][0] + ty * p->outmatrix[1][1] + p->outmatrix[1][2]; } /* * 'pspdf_transform_page()' - Transform a page. */ static void pspdf_transform_page(int outpage, // I - Output page int pos, // I - Position on page int page) // I - Input page { outpage_t *op; // Current output page page_t *bp; // Current base page page_t *p; // Current input page int x, y; // Position on output page double w, l, // Width and length of subpage tx, ty; // Translation values for subpage double pw, pl; // Printable width and length of full page DEBUG_printf(("pspdf_transform_page(outpage = %d, pos = %d, page = %d)\n", outpage, pos, page)); if (pos > 15) progress_error(HD_ERROR_INTERNAL_ERROR, "Internal error: pos = %d", pos); op = outpages + outpage; op->pages[pos] = page; bp = pages + op->pages[0]; p = pages + page; p->outpage = outpage; pw = bp->width; pl = bp->length; DEBUG_printf((" width = %d, length = %d\n", p->width, p->length)); switch (op->nup) { default : case 1 : p->outmatrix[0][0] = 1.0f; p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = 1.0f; p->outmatrix[0][2] = 0.0f; p->outmatrix[1][2] = 0.0f; break; case 2 : x = pos & 1; l = pw; w = l * p->width / p->length; if (w > (pl * 0.5f)) { w = pl * 0.5f; l = w * p->length / p->width; } tx = 0.5 * (pl * 0.5 - w); ty = 0.5 * (pw - l); p->outmatrix[0][0] = 0.0f; p->outmatrix[1][0] = (float)(w / p->width); p->outmatrix[0][1] = (float)(-w / p->width); p->outmatrix[1][1] = 0.0f; p->outmatrix[0][2] = (float)(ty + pl * w / p->width); p->outmatrix[1][2] = (float)(tx + x * pl / 2); break; case 4 : x = pos & 1; y = 1 - pos / 2; w = pw * 0.5; l = w * p->length / p->width; if (l > (pl * 0.5)) { l = pl * 0.5; w = l * p->width / p->length; } tx = 0.5 * (pw * 0.5 - w); ty = 0.5 * (pl * 0.5 - l); p->outmatrix[0][0] = (float)(w / p->width); p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = (float)(w / p->width); p->outmatrix[0][2] = (float)(tx + x * pw / 2); p->outmatrix[1][2] = (float)(ty + y * pl / 2); break; case 6 : x = pos % 3; y = pos / 3; l = pw * 0.5; w = l * p->width / p->length; if (w > (pl * 0.333f)) { w = pl * 0.333f; l = w * p->length / p->width; } tx = 0.5 * (pl * 0.333 - w); ty = 0.5 * (pw * 0.5 - l); p->outmatrix[0][0] = 0.0f; p->outmatrix[1][0] = (float)(w / p->width); p->outmatrix[0][1] = (float)(-w / p->width); p->outmatrix[1][1] = 0.0f; p->outmatrix[0][2] = (float)(ty + y * pw / 2 + pl * w / p->width); p->outmatrix[1][2] = (float)(tx + x * pl / 3); break; case 9 : x = pos % 3; y = 2 - pos / 3; w = pw * 0.333; l = w * p->length / p->width; if (l > (pl * 0.333)) { l = pl * 0.333; w = l * p->width / p->length; } tx = 0.5 * (pw * 0.333 - w); ty = 0.5 * (pl * 0.333 - l); p->outmatrix[0][0] = (float)(w / p->width); p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = (float)(w / p->width); p->outmatrix[0][2] = (float)(tx + x * pw / 3); p->outmatrix[1][2] = (float)(ty + y * pl / 3); break; case 16 : x = pos & 3; y = 3 - pos / 4; w = pw * 0.25; l = w * p->length / p->width; if (l > (pl * 0.25)) { l = pl * 0.25; w = l * p->width / p->length; } tx = 0.5 * (pw * 0.25 - w); ty = 0.5 * (pl * 0.25 - l); p->outmatrix[0][0] = (float)(w / p->width); p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = (float)(w / p->width); p->outmatrix[0][2] = (float)(tx + x * pw / 4); p->outmatrix[1][2] = (float)(ty + y * pl / 4); break; } } /* * 'pspdf_prepare_outpages()' - Prepare output pages... */ static void pspdf_prepare_outpages() { int c, i, j; /* Looping vars */ int nup; /* Current number-up value */ page_t *page; /* Current page */ outpage_t *outpage; /* Current output page */ // Allocate an output page array... outpages = (outpage_t *)malloc(sizeof(outpage_t) * num_pages); memset(outpages, -1, sizeof(outpage_t) * num_pages); num_outpages = 0; outpage = outpages; // Handle the title page, as needed... if (TitlePage) { for (i = 0, j = 0, nup = -1, page = pages; i < chapter_starts[1]; i ++, page ++) { if (nup != page->nup) { if (j) { // Break the current output page... outpage ++; num_outpages ++; } nup = page->nup; j = 0; } if (!j) outpage->nup = nup; pspdf_transform_page(num_outpages, j, i); j ++; if (j >= nup) { j = 0; outpage ++; num_outpages ++; } } if (j) { // Break the current output page... outpage ++; num_outpages ++; } } // Loop through each chapter, adding pages as needed... if (OutputType == OUTPUT_BOOK && TocLevels > 0) c = 0; else c = 1; for (; c <= TocDocCount; c ++) { if (chapter_starts[c] < 0) continue; chapter_outstarts[c] = num_outpages; for (i = chapter_starts[c], j = 0, nup = -1, page = pages + i; i <= chapter_ends[c]; i ++, page ++) { if (nup != page->nup) { if (j) { // Break the current output page... outpage ++; num_outpages ++; } nup = page->nup; j = 0; } if (!j) outpage->nup = nup; pspdf_transform_page(num_outpages, j, i); j ++; if (j >= nup) { j = 0; outpage ++; num_outpages ++; } } if (j) { // Break the current output page... outpage ++; num_outpages ++; } chapter_outends[c] = num_outpages; } #ifdef DEBUG for (c = 0; c <= TocDocCount; c ++) printf("chapter_outstarts[%d] = %d, chapter_outends[%d] = %d\n", c, chapter_outstarts[c], c, chapter_outends[c]); printf("num_outpages = %d\n", (int)num_outpages); for (i = 0, outpage = outpages; i < (int)num_outpages; i ++, outpage ++) { printf("outpage[%d]:\tnup=%d, pages=[", i, outpage->nup); for (j = 0; j < outpage->nup; j ++) printf(" %d", outpage->pages[j]); puts(" ]"); page = pages + outpage->pages[0]; printf("\t\twidth = %d, length = %d\n", page->width, page->length); } for (c = 0; c <= TocDocCount; c ++) printf("chapter_starts[%d] = %d, chapter_ends[%d] = %d\n", c, chapter_starts[c], c, chapter_ends[c]); for (i = 0; i < (int)num_pages; i ++) printf("pages[%d]->outpage = %d\n", i, pages[i].outpage); for (i = 0; i < (int)num_headings; i ++) printf("heading_pages[%d] = %d\n", i, heading_pages[i]); for (i = 0; i < (int)num_links; i ++) printf("links[%d].name = \"%s\", page = %d\n", i, links[i].name, links[i].page); #endif // DEBUG } /* * 'pspdf_prepare_page()' - Add headers/footers to page before writing... */ static void pspdf_prepare_page(int page) /* I - Page number */ { int print_page; /* Printed page # */ char page_text[64]; /* Page number text */ int top; /* Top of page */ DEBUG_printf(("pspdf_prepare_page(%d)\n", page)); /* * Make a page number; use roman numerals for the table of contents * and arabic numbers for all others... */ if (chapter == 0 && OutputType == OUTPUT_BOOK) { print_page = page - chapter_starts[0] + 1; strlcpy(page_text, format_number(print_page, 'i'), sizeof(page_text)); } else if (chapter < 0) { print_page = 0; // Safe because page_text is more than 6 chars strlcpy(page_text, (page & 1) ? (char *)"eltit" : (char *)"title", sizeof(page_text)); } else { print_page = page - chapter_starts[1] + 1; strlcpy(page_text, format_number(print_page, '1'), sizeof(page_text)); } DEBUG_printf(("BEFORE page %d page_text is \"%s\"...\n", page, page_text)); DEBUG_printf((" header[0] = \"%s\"\n", pages[page].header[0])); DEBUG_printf((" header[1] = \"%s\"\n", pages[page].header[1])); DEBUG_printf((" header[2] = \"%s\"\n", pages[page].header[2])); /* * Add page headings... */ if (pages[page].landscape) { PagePrintWidth = pages[page].length - pages[page].right - pages[page].left; PagePrintLength = pages[page].width - pages[page].top - pages[page].bottom; } else { PagePrintWidth = pages[page].width - pages[page].right - pages[page].left; PagePrintLength = pages[page].length - pages[page].top - pages[page].bottom; } top = (int)(PagePrintLength - HeadFootSize); if (chapter == 0) { /* * Add table-of-contents header & footer... */ pspdf_prepare_heading(page, print_page, pages[page].header, top, page_text, sizeof(page_text)); pspdf_prepare_heading(page, print_page, pages[page].footer, 0, page_text, sizeof(page_text)); } else if (chapter > 0 && !title_page) { /* * Add chapter header & footer... */ if (page > chapter_starts[chapter] || OutputType != OUTPUT_BOOK) pspdf_prepare_heading(page, print_page, pages[page].header, top, page_text, sizeof(page_text)); else pspdf_prepare_heading(page, print_page, pages[page].header1, top, page_text, sizeof(page_text)); pspdf_prepare_heading(page, print_page, pages[page].footer, 0, page_text, sizeof(page_text)); } /* * Copy the page number for the TOC... */ strlcpy(pages[page].page_text, page_text, sizeof(pages[page].page_text)); DEBUG_printf(("AFTER page %d page_text is \"%s\"...\n", page, page_text)); } /* * 'pspdf_prepare_heading()' - Add headers/footers to page before writing... */ static void pspdf_prepare_heading(int page, // I - Page number int print_page, // I - Printed page number uchar **format, // I - Page headings int y, // I - Baseline of heading char *page_text, // O - Page number text int page_len) // I - Size of page text { int pos, // Position in heading dir; // Direction of page char *number; // Page number char buffer[1024], // String buffer *bufptr, // Pointer into buffer *formatptr; // Pointer into format string int formatlen; // Length of format command string render_t *temp; // Render structure for titles, etc. DEBUG_printf(("pspdf_prepare_heading(%d, %d, [\"%s\",\"%s\",\"%s\"], %d, %p, %d)\n", page, print_page, format[0], format[1], format[2], y, (void *)page_text, page_len)); /* * Add page headings... */ if (PageDuplex && (page & 1)) { dir = -1; format += 2; } else dir = 1; for (pos = 0; pos < 3; pos ++, format += dir) { /* * Add the appropriate object... */ if (!*format) continue; temp = NULL; if (strncasecmp((char *)*format, "$LOGOIMAGE", 10) == 0 && logo_image) { // Insert the logo image... if (y < (PagePrintLength / 2)) temp = new_render(page, RENDER_IMAGE, 0, y, logo_width, logo_height, logo_image); else // Offset from top temp = new_render(page, RENDER_IMAGE, 0, y + HeadFootSize - logo_height, logo_width, logo_height, logo_image); } else if (strncasecmp((char *)*format, "$LETTERHEAD", 11) == 0 && lh_image) { // Insert the logo image as a letterhead... if (y < (PagePrintLength / 2)) temp = new_render(page, RENDER_IMAGE, 0, y, lh_width, lh_height, lh_image); else // Offset from top temp = new_render(page, RENDER_IMAGE, 0, y + HeadFootSize - lh_height, lh_width, lh_height, lh_image); } else if (strncasecmp((char *)*format, "$HFIMAGE", 8) == 0) { int hfi; // Header/footer image index char *hfp; // Pointer into $HFIMAGE hfi = strtol((char*)((*format) + 8), &hfp, 10); if (hfi < 0 || hfi >= MAX_HF_IMAGES || !(isspace(*hfp) || !*hfp)) progress_error(HD_ERROR_BAD_HF_STRING, "Bad $HFIMAGE... substitution on page %d.", page + 1); else { if (y < (PagePrintLength / 2)) temp = new_render(page, RENDER_IMAGE, 0, y, hfimage_width[hfi], hfimage_height[hfi], hfimage[hfi]); else temp = new_render(page, RENDER_IMAGE, 0, y + HeadFootSize - hfimage_height[hfi], hfimage_width[hfi], hfimage_height[hfi], hfimage[hfi]); } } else { // Otherwise format the text... buffer[sizeof(buffer) - 1] = '\0'; for (bufptr = buffer, formatptr = (char *)*format; *formatptr;) { if (*formatptr == '$') { if (formatptr[1] == '$') { if (bufptr < (buffer + sizeof(buffer) - 1)) *bufptr++ = '$'; formatptr += 2; continue; } else if (!formatptr[1]) break; formatptr ++; for (formatlen = 1; isalpha(formatptr[formatlen]); formatlen ++); if (formatlen == 4 && strncasecmp(formatptr, "PAGE", 4) == 0) { if (formatptr[4] == '(' && formatptr[5] && formatptr[6] == ')') { number = format_number(print_page, formatptr[5]); formatptr += 7; } else { number = format_number(print_page, '1'); formatptr += 4; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 5 && strncasecmp(formatptr, "PAGES", 5) == 0) { if (formatptr[5] == '(' && formatptr[6] && formatptr[7] == ')') { number = format_number(chapter_ends[TocDocCount] - chapter_starts[1] + 1, formatptr[6]); formatptr += 8; } else { number = format_number(chapter_ends[TocDocCount] - chapter_starts[1] + 1, '1'); formatptr += 5; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 11 && strncasecmp(formatptr, "CHAPTERPAGE", 11) == 0) { int chapter_page; chapter_page = print_page - chapter_starts[::chapter] + chapter_starts[1]; if (formatptr[11] == '(' && formatptr[12] && formatptr[13] == ')') { number = format_number(chapter_page, formatptr[12]); formatptr += 14; } else { number = format_number(chapter_page, '1'); formatptr += 11; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 12 && strncasecmp(formatptr, "CHAPTERPAGES", 12) == 0) { if (formatptr[12] == '(' && formatptr[13] && formatptr[14] == ')') { number = format_number(chapter_ends[::chapter] - chapter_starts[::chapter] + 1, formatptr[13]); formatptr += 15; } else { number = format_number(chapter_ends[::chapter] - chapter_starts[::chapter] + 1, '1'); formatptr += 12; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 5 && strncasecmp(formatptr, "TITLE", 5) == 0) { formatptr += 5; if (doc_title) { strlcpy(bufptr, (char *)doc_title, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } } else if (formatlen == 7 && strncasecmp(formatptr, "CHAPTER", 7) == 0) { formatptr += 7; if (pages[page].chapter) { strlcpy(bufptr, (char *)(pages[page].chapter), sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } } else if (formatlen == 7 && strncasecmp(formatptr, "HEADING", 7) == 0) { formatptr += 7; if (pages[page].heading) { strlcpy(bufptr, (char *)(pages[page].heading), sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } } else if (formatlen == 4 && strncasecmp(formatptr, "TIME", 4) == 0) { formatptr += 4; strftime(bufptr, sizeof(buffer) - 1 - (size_t)(bufptr - buffer), "%X", &doc_date); bufptr += strlen(bufptr); } else if (formatlen == 4 && strncasecmp(formatptr, "DATE", 4) == 0) { formatptr += 4; strftime(bufptr, sizeof(buffer) - 1 - (size_t)(bufptr - buffer), "%x", &doc_date); bufptr += strlen(bufptr); } else if (formatlen == 3 && strncasecmp(formatptr, "URL", 3) == 0) { uchar *url = pages[page].url ? pages[page].url : (uchar *)"Unknown"; formatptr += 3; strlcpy(bufptr, (char *)url, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else { progress_error(HD_ERROR_BAD_HF_STRING, "Bad header/footer $ command on page %d.", page + 1); strlcpy(bufptr, formatptr - 1, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); formatptr += formatlen; } } else if (bufptr < (buffer + sizeof(buffer) - 1)) *bufptr++ = *formatptr++; else break; } *bufptr = '\0'; temp = new_render(page, RENDER_TEXT, 0, y, get_width((uchar *)buffer, HeadFootType, HeadFootStyle, SIZE_P) * HeadFootSize / _htmlSizes[SIZE_P], HeadFootSize, (uchar *)buffer); if (strstr((char *)*format, "$PAGE") || strstr((char *)*format, "$CHAPTERPAGE")) strlcpy(page_text, buffer, (size_t)page_len); } if (temp == NULL) continue; /* * Justify the object... */ switch (pos) { case 0 : /* Left justified */ break; case 1 : /* Centered */ temp->x = (float)((PagePrintWidth - temp->width) * 0.5); break; case 2 : /* Right justified */ temp->x = PagePrintWidth - temp->width; break; } /* * Set the text font and color... */ if (temp->type == RENDER_TEXT) { temp->data.text.typeface = HeadFootType; temp->data.text.style = HeadFootStyle; temp->data.text.size = (float)HeadFootSize; get_color(_htmlTextColor, temp->data.text.rgb); } } } /* * 'ps_write_document()' - Write all render entities to PostScript file(s). */ static void ps_write_document(uchar *author, /* I - Author of document */ uchar *creator, /* I - Application that generated the HTML file */ uchar *copyright, /* I - Copyright (if any) on the document */ uchar *keywords, /* I - Search keywords */ uchar *subject, /* I - Subject */ uchar *lang) /* I - Language */ { FILE *out; /* Output file */ int page; /* Current page # */ int first; /* First chapter */ /* * Write the title page(s)... */ chapter = -1; out = NULL; if (!OutputFiles) { out = open_file(); if (out == NULL) { progress_error(HD_ERROR_WRITE_ERROR, "Unable to open output file - %s\n", strerror(errno)); return; } write_prolog(out, num_outpages, author, creator, copyright, keywords, subject); } if (OutputType == OUTPUT_BOOK && TocLevels > 0) first = 0; else first = 1; if (TitlePage) { if (OutputFiles) { out = open_file(); write_prolog(out, chapter_outstarts[first], author, creator, copyright, keywords, subject); } for (page = 0; page < chapter_outstarts[first]; page ++) ps_write_outpage(out, page); if (OutputFiles) { write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); fclose(out); } } for (chapter = first; chapter <= TocDocCount; chapter ++) { if (chapter_starts[chapter] < 0) continue; if (OutputFiles) { out = open_file(); if (out == NULL) { progress_error(HD_ERROR_WRITE_ERROR, "Unable to create output file - %s\n", strerror(errno)); return; } write_prolog(out, chapter_outends[chapter] - chapter_outstarts[chapter], author, creator, copyright, keywords, subject); } for (page = chapter_outstarts[chapter]; page < chapter_outends[chapter]; page ++) ps_write_outpage(out, page); /* * Close the output file as necessary... */ if (OutputFiles) { write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); fclose(out); } } /* * Close the output file as necessary... */ if (!OutputFiles) { write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); if (out != stdout) fclose(out); } if (Verbosity) progress_hide(); } /* * 'ps_write_outpage()' - Write an output page. */ static void ps_write_outpage(FILE *out, /* I - Output file */ int outpage) /* I - Output page number */ { int file_page; /* Current page # in document */ page_t *p; /* Current page */ outpage_t *op; /* Current output page */ int i; /* Looping var */ if (outpage < 0 || outpage >= (int)num_outpages) return; op = outpages + outpage; p = pages + op->pages[0]; DEBUG_printf(("ps_write_outpage(%p, %d)\n", (void *)out, outpage)); /* * Let the user know which page we are writing... */ if (Verbosity) { progress_show("Writing page %s...", p->page_text); progress_update(100 * outpage / (int)num_outpages); } /* * Figure out the page number in the file... */ if (OutputFiles && chapter >= 0) file_page = outpage - chapter_outstarts[chapter] + 1; else if (chapter < 0) file_page = outpage + 1; else if (chapter == 0) { if (TitlePage) file_page = outpage + 1; else file_page = outpage - chapter_outstarts[0] + 1; } else { if (TitlePage) file_page = outpage + 1; else file_page = outpage - chapter_outstarts[1] + 1; } /* * Output the page prolog... */ fprintf(out, "%%%%Page: (%s) %d\n", p->page_text, file_page); if (op->nup == 1) { if (p->duplex && !(file_page & 1)) fprintf(out, "%%%%PageBoundingBox: %d %d %d %d\n", p->right, p->bottom, p->width - p->left, p->length - p->top); else fprintf(out, "%%%%PageBoundingBox: %d %d %d %d\n", p->left, p->bottom, p->width - p->right, p->length - p->top); } else fprintf(out, "%%%%PageBoundingBox: 0 0 %d %d\n", p->width, p->length); if (PSLevel > 1 && PSCommands) { fputs("%%BeginPageSetup\n", out); if (p->width == 612 && p->length == 792) fputs("%%BeginFeature: *PageSize Letter\n", out); else if (p->width == 612 && p->length == 1008) fputs("%%BeginFeature: *PageSize Legal\n", out); else if (p->width == 792 && p->length == 1224) fputs("%%BeginFeature: *PageSize Tabloid\n", out); else if (p->width == 842 && p->length == 1190) fputs("%%BeginFeature: *PageSize A3\n", out); else if (p->width == 595 && p->length == 842) fputs("%%BeginFeature: *PageSize A4\n", out); else fprintf(out, "%%%%BeginFeature: *PageSize w%dh%d\n", p->width, p->length); fprintf(out, "%d %d SetPageSize\n", p->width, p->length); fputs("%%EndFeature\n", out); if (p->duplex) { if (p->landscape) { fputs("%%BeginFeature: *Duplex DuplexTumble\n", out); fputs("true true SetDuplexMode\n", out); fputs("%%EndFeature\n", out); } else { fputs("%%BeginFeature: *Duplex DuplexNoTumble\n", out); fputs("true false SetDuplexMode\n", out); fputs("%%EndFeature\n", out); } } else { fputs("%%BeginFeature: *Duplex None\n", out); fputs("false false SetDuplexMode\n", out); fputs("%%EndFeature\n", out); } if (p->media_color[0]) { fprintf(out, "%%%%BeginFeature: *MediaColor %s\n", p->media_color); fprintf(out, "(%s) SetMediaColor\n", p->media_color); fputs("%%EndFeature\n", out); } if (p->media_position) { fprintf(out, "%%%%BeginFeature: *InputSlot Tray%d\n", p->media_position); fprintf(out, "%d SetMediaPosition\n", p->media_position); fputs("%%EndFeature\n", out); } if (p->media_type[0]) { fprintf(out, "%%%%BeginFeature: *MediaType %s\n", p->media_type); fprintf(out, "(%s) SetMediaType\n", p->media_type); fputs("%%EndFeature\n", out); } fputs("%%EndPageSetup\n", out); } /* * Render all of the pages... */ switch (op->nup) { case 1 : ps_write_page(out, op->pages[0]); break; default : for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; fprintf(out, "GS[%.3f %.3f %.3f %.3f %.3f %.3f]CM\n", p->outmatrix[0][0], p->outmatrix[1][0], p->outmatrix[0][1], p->outmatrix[1][1], p->outmatrix[0][2], p->outmatrix[1][2]); ps_write_page(out, op->pages[i]); fputs("GR\n", out); } break; } /* * Output the page trailer... */ fputs("SP\n", out); fflush(out); } /* * 'ps_write_page()' - Write all render entities on a page to a PostScript file. */ static void ps_write_page(FILE *out, /* I - Output file */ int page) /* I - Page number */ { render_t *r, /* Render pointer */ *next; /* Next render */ page_t *p; /* Current page */ const char *debug; /* HTMLDOC_DEBUG environment variable */ if (page < 0 || page >= (int)alloc_pages) return; p = pages + page; DEBUG_printf(("ps_write_page(%p, %d)\n", (void *)out, page)); /* * Clear the render cache... */ render_typeface = -1; render_style = -1; render_size = -1; render_rgb[0] = -1.0f; render_rgb[1] = -1.0f; render_rgb[2] = -1.0f; render_x = -1.0f; render_y = -1.0f; render_spacing = -1.0f; /* * Setup the page... */ fputs("GS\n", out); if (p->landscape) { if (p->duplex && (page & 1)) fprintf(out, "0 %d T -90 RO\n", p->length); else fprintf(out, "%d 0 T 90 RO\n", p->width); } write_background(page, out); if (p->duplex && (page & 1)) fprintf(out, "%d %d T\n", p->right, p->bottom); else fprintf(out, "%d %d T\n", p->left, p->bottom); /* * Render all graphics elements... */ for (r = p->start; r != NULL; r = r->next) switch (r->type) { case RENDER_BOX : set_color(out, r->data.box); set_pos(out, r->x, r->y); if (r->height > 0.0f) fprintf(out, " %.1f %.1f F\n", r->width, r->height); else fprintf(out, " %.1f L\n", r->width); render_x = -1.0f; break; case RENDER_IMAGE : if (r->width > 0.01f && r->height > 0.01f) write_image(out, r); break; } /* * Render all text elements, freeing used memory as we go... */ for (r = p->start, next = NULL; r != NULL; r = next) { if (r->type == RENDER_TEXT) write_text(out, r); next = r->next; free(r); } p->start = NULL; if ((debug = getenv("HTMLDOC_DEBUG")) != NULL && strstr(debug, "margin")) { // Show printable area... fprintf(out, "1 0 1 C 0 0 %d %d B\n", p->width - p->right - p->left, p->length - p->top - p->bottom); } /* * Output the page trailer... */ fputs("GR\n", out); } /* * 'ps_write_background()' - Write a background image... */ static void ps_write_background(FILE *out) /* I - Output file */ { int y, /* Current line */ pwidth; /* Pixel width */ if (!background_image->pixels) image_load(background_image->filename, !OutputColor, 1); pwidth = background_image->width * background_image->depth; fputs("/BG[", out); for (y = 0; y < background_image->height; y ++) { putc('<', out); ps_hex(out, background_image->pixels + y * pwidth, pwidth); putc('>', out); } fputs("]def", out); image_unload(background_image); } /* * 'pdf_write_document()' - Write all render entities to a PDF file. */ static void pdf_write_document(uchar *author, // I - Author of document uchar *creator, // I - Application that generated the HTML file uchar *copyright, // I - Copyright (if any) on the document uchar *keywords, // I - Search keywords uchar *subject, // I - Subject uchar *lang, // I - Language tree_t *doc, // I - Document tree_t *toc) // I - Table of contents tree { int i; // Looping variable FILE *out; // Output file int outpage, // Current page # heading; // Current heading # int bytes; // Number of bytes char buffer[8192]; // Copy buffer int num_images; // Number of images in document image_t **images; // Pointers to images render_t temp; // Dummy rendering data... // Open the output file... out = open_file(); if (out == NULL) { progress_error(HD_ERROR_WRITE_ERROR, "Unable to write document file - %s\n", strerror(errno)); return; } // Clear the objects array... num_objects = 0; alloc_objects = 0; objects = NULL; // Write the prolog... write_prolog(out, num_outpages, author, creator, copyright, keywords, subject); // Write images as needed... num_images = image_getlist(&images); for (i = 0; i < num_images; i ++) { int hfi; // Header/footer image index for (hfi = 0; hfi < MAX_HF_IMAGES; hfi ++) if (images[i] == hfimage[hfi]) break; if (images[i]->use > 1 || images[i]->mask || (images[i]->width * images[i]->height * images[i]->depth) > 65536 || images[i] == background_image || images[i] == logo_image || hfi < MAX_HF_IMAGES) { progress_show("Writing image %d (%s)...", i + 1, images[i]->filename); progress_update(100 * i / num_images); temp.data.image = images[i]; write_image(out, &temp, 1); } } // Write links and target names... pdf_write_links(out); if (PDFVersion >= 12) pdf_write_names(out); // Verify that everything is working so far... pdf_start_object(out); if (pages_object != (int)num_objects) progress_error(HD_ERROR_INTERNAL_ERROR, "Internal error: pages_object != num_objects"); fputs("/Type/Pages", out); fprintf(out, "/Count %d", (int)num_outpages); fputs("/Kids[", out); for (outpage = 0; outpage < (int)num_outpages; outpage ++) fprintf(out, "%d 0 R\n", pages_object + outpage * 2 + 1); fputs("]", out); pdf_end_object(out); for (outpage = 0; outpage < (int)num_outpages; outpage ++) pdf_write_outpage(out, outpage); if (OutputType == OUTPUT_BOOK && TocLevels > 0) { /* * Write the outline tree using the table-of-contents... */ heading = 0; #ifdef DEBUG_TOC pdf_text_contents(out, toc); #endif // DEBUG_TOC pdf_write_contents(out, toc, 0, 0, 0, &heading); } else { /* * Write the outline tree using the HTML files. */ pdf_write_files(out, doc); } /* * Write the trailer and close the output file... */ write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); if (CGIMode) { const char *meta_filename = (const char *)htmlGetMeta(doc, (uchar *)"HTMLDOC.filename"); const char *filename; if (meta_filename) { if ((filename = strrchr(meta_filename, '/')) != NULL) filename ++; else filename = meta_filename; } else filename = "htmldoc.pdf"; // In CGI mode, we only produce PDF output to stdout... printf("Content-Type: application/pdf\r\n" "Content-Length: %ld\r\n" "Content-Disposition: inline; filename=\"%s\"\r\n" "Accept-Ranges: none\r\n" "X-Creator: HTMLDOC " SVERSION "\r\n" "\r\n", ftell(out), filename); } fclose(out); // // If we are sending the output to stdout, copy the temp file now... // if (!OutputPath[0]) { #ifdef WIN32 // Make sure we are in binary mode... stupid Microsoft! setmode(1, O_BINARY); #elif defined(__EMX__) // OS/2 has a setmode for FILE's... fflush(stdout); _fsetmode(stdout, "b"); #endif // WIN32 || __EMX__ // Open the temporary file and copy it to stdout... out = fopen(stdout_filename, "rb"); while ((bytes = fread(buffer, 1, sizeof(buffer), out)) > 0) fwrite(buffer, 1, (size_t)bytes, stdout); // Close the temporary file (it is removed when the program exits...) fclose(out); } // Clear the objects array... if (alloc_objects) { free(objects); num_objects = 0; alloc_objects = 0; objects = NULL; } if (Verbosity) progress_hide(); } /* * 'pdf_write_resources()' - Write the resources dictionary for a page. */ static void pdf_write_resources(FILE *out, /* I - Output file */ int outpage) /* I - Output page for resources */ { int i; /* Looping var */ outpage_t *op; /* Current output page */ page_t *p; /* Current page */ render_t *r; /* Render pointer */ int fonts_used[TYPE_MAX * STYLE_MAX]; /* Non-zero if the page uses a font */ int images_used; /* Non-zero if the page uses an image */ int text_used; /* Non-zero if the page uses text */ static const char *effects[] = /* Effects and their commands */ { "", "/S/Box/M/I", "/S/Box/M/O", "/S/Dissolve", "/S/Glitter/Di 270", "/S/Glitter/Di 315", "/S/Glitter/Di 0", "/S/Blinds/Dm/H", "/S/Split/Dm/H/M/I", "/S/Split/Dm/H/M/O", "/S/Blinds/Dm/V", "/S/Split/Dm/V/M/I", "/S/Split/Dm/V/M/O", "/S/Wipe/Di 270", "/S/Wipe/Di 180", "/S/Wipe/Di 0", "/S/Wipe/Di 90" }; memset(fonts_used, 0, sizeof(fonts_used)); images_used = background_image != NULL; text_used = 0; op = outpages + outpage; for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_IMAGE) images_used = 1; else if (r->type == RENDER_TEXT) { text_used = 1; fonts_used[r->data.text.typeface * 4 + r->data.text.style] = 1; } } fputs("/Resources<<", out); if (!images_used) fputs("/ProcSet[/PDF/Text]", out); else if (PDFVersion >= 12) { if (OutputColor) fputs("/ProcSet[/PDF/Text/ImageB/ImageC/ImageI]", out); else fputs("/ProcSet[/PDF/Text/ImageB/ImageI]", out); } else { if (OutputColor) fputs("/ProcSet[/PDF/Text/ImageB/ImageC]", out); else fputs("/ProcSet[/PDF/Text/ImageB]", out); } if (text_used) { fputs("/Font<<", out); for (i = 0; i < (TYPE_MAX * STYLE_MAX); i ++) if (fonts_used[i]) fprintf(out, "/F%x %d 0 R", i, font_objects[i]); fputs(">>", out); } fputs("/XObject<<", out); for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_IMAGE && r->data.image->obj) fprintf(out, "/I%d %d 0 R", r->data.image->obj, r->data.image->obj); } if (background_image) fprintf(out, "/I%d %d 0 R", background_image->obj, background_image->obj); fputs(">>>>", out); if (PDFEffect) fprintf(out, "/Dur %.0f/Trans<</Type/Trans/D %.1f%s>>", PDFPageDuration, PDFEffectDuration, effects[PDFEffect]); } /* * 'pdf_write_outpage()' - Write an output page. */ static void pdf_write_outpage(FILE *out, /* I - Output file */ int outpage) /* I - Output page number */ { int i; /* Looping var */ page_t *p; /* Current page */ outpage_t *op; /* Output page */ DEBUG_printf(("pdf_write_outpage(out = %p, outpage = %d)\n", (void *)out, outpage)); if (outpage < 0 || outpage >= (int)num_outpages) return; op = outpages + outpage; p = pages + op->pages[0]; DEBUG_printf(("op->pages[0] = %d (%dx%d)\n", op->pages[0], p->width, p->length)); /* * Let the user know which page we are writing... */ if (Verbosity) { progress_show("Writing page %s...", p->page_text); progress_update(100 * outpage / (int)num_outpages); } /* * Output the page prolog... */ pdf_start_object(out); fputs("/Type/Page", out); fprintf(out, "/Parent %d 0 R", pages_object); fprintf(out, "/Contents %d 0 R", (int)num_objects + 1); if (p->landscape) fprintf(out, "/MediaBox[0 0 %d %d]", p->length, p->width); else fprintf(out, "/MediaBox[0 0 %d %d]", p->width, p->length); pdf_write_resources(out, outpage); /* * Actions (links)... */ if (op->annot_object > 0) fprintf(out, "/Annots %d 0 R", op->annot_object); pdf_end_object(out); pdf_start_object(out); if (Compression) fputs("/Filter/FlateDecode", out); pdf_start_stream(out); flate_open_stream(out); /* * Render all of the pages... */ switch (op->nup) { case 1 : pdf_write_page(out, op->pages[0]); break; default : for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; flate_printf(out, "q %.3f %.3f %.3f %.3f %.3f %.3f cm\n", p->outmatrix[0][0], p->outmatrix[1][0], p->outmatrix[0][1], p->outmatrix[1][1], p->outmatrix[0][2], p->outmatrix[1][2]); pdf_write_page(out, op->pages[i]); flate_puts("Q\n", out); } break; } /* * Close out the page... */ flate_close_stream(out); pdf_end_object(out); } /* * 'pdf_write_page()' - Write a page to a PDF file. */ static void pdf_write_page(FILE *out, /* I - Output file */ int page) /* I - Page number */ { render_t *r, /* Render pointer */ *next; /* Next render */ float box[3]; /* RGB color for boxes */ page_t *p; /* Current page */ const char *debug; /* HTMLDOC_DEBUG environment variable */ if (page < 0 || page >= (int)alloc_pages) return; p = pages + page; /* * Clear the render cache... */ render_rgb[0] = -1.0f; render_rgb[1] = -1.0f; render_rgb[2] = -1.0f; render_x = -1.0f; render_y = -1.0f; /* * Output the page header... */ flate_puts("q\n", out); write_background(page, out); if (p->duplex && (page & 1)) flate_printf(out, "1 0 0 1 %d %d cm\n", p->right, p->bottom); else flate_printf(out, "1 0 0 1 %d %d cm\n", p->left, p->bottom); /* * Render all graphics elements... */ box[0] = -1.0f; box[1] = -1.0f; box[2] = -1.0f; for (r = p->start; r != NULL; r = r->next) switch (r->type) { case RENDER_IMAGE : if (r->width > 0.01f && r->height > 0.01f) write_image(out, r); break; case RENDER_BOX : if (r->height == 0.0) { if (box[0] != r->data.box[0] || box[1] != r->data.box[1] || box[2] != r->data.box[2]) { box[0] = r->data.box[0]; box[1] = r->data.box[1]; box[2] = r->data.box[2]; if (OutputColor) flate_printf(out, "%.2f %.2f %.2f RG\n", box[0], box[1], box[2]); else flate_printf(out, "%.2f G\n", box[0] * 0.31f + box[1] * 0.61f + box[2] * 0.08f); } flate_printf(out, "%.1f %.1f m %.1f %.1f l S\n", r->x, r->y, r->x + r->width, r->y); } else { set_color(out, r->data.box); flate_printf(out, "%.1f %.1f %.1f %.1f re f\n", r->x, r->y, r->width, r->height); } break; } /* * Render all text elements, freeing used memory as we go... */ flate_puts("BT\n", out); render_typeface = -1; render_style = -1; render_size = -1; render_x = -1.0f; render_y = -1.0f; render_spacing = -1.0f; for (r = p->start, next = NULL; r != NULL; r = next) { if (r->type == RENDER_TEXT) write_text(out, r); next = r->next; free(r); } p->start = NULL; flate_puts("ET\n", out); if ((debug = getenv("HTMLDOC_DEBUG")) != NULL && strstr(debug, "margin")) { // Show printable area... flate_printf(out, "1 0 1 RG 0 0 %d %d re S\n", p->width - p->right - p->left, p->length - p->top - p->bottom); } /* * Output the page trailer... */ flate_puts("Q\n", out); } #ifdef DEBUG_TOC static void pdf_text_contents(FILE *out, tree_t *toc, int indent) { static const char *spaces = " " " "; if (indent > 16) indent = 16; while (toc) { fprintf(out, "%% %s<%s>", spaces + 64 - 4 * indent, _htmlMarkups[toc->markup]); switch (toc->markup) { case MARKUP_A : tree_t *temp; for (temp = toc->child; temp; temp = temp->next) fputs((char *)temp->data, out); break; default : fputs("\n", out); pdf_text_contents(out, toc->child, indent + 1); fprintf(out, "%% %s", spaces + 64 - 4 * indent); break; } fprintf(out, "</%s>\n", _htmlMarkups[toc->markup]); toc = toc->next; } } #endif // DEBUG_TOC /* * 'pdf_write_contents()' - Write the table of contents as outline records to * a PDF file. */ static void pdf_write_contents(FILE *out, /* I - Output file */ tree_t *toc, /* I - Table of contents tree */ int parent, /* I - Parent outline object */ int prev, /* I - Previous outline object */ int next, /* I - Next outline object */ int *heading) /* IO - Current heading # */ { int i, /* Looping var */ thisobj, /* This object */ entry, /* TOC entry object */ count; /* Number of entries at this level */ uchar *text; /* Entry text */ tree_t *temp; /* Looping var */ int *entry_counts, /* Number of sub-entries for this entry */ *entry_objects; /* Objects for each entry */ tree_t **entries; /* Pointers to each entry */ float x, y; /* Position of link */ /* * Make an object for this entry... */ if (toc == NULL) { /* * This is for the Table of Contents page... */ thisobj = pdf_start_object(out); fprintf(out, "/Parent %d 0 R", parent); fputs("/Title", out); write_utf16(out, (uchar *)TocTitle); x = 0.0f; y = PagePrintLength + PageBottom; pspdf_transform_coords(pages + chapter_starts[0], x, y); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * chapter_outstarts[0] + 1, x, y); if (prev > 0) fprintf(out, "/Prev %d 0 R", prev); if (next > 0) fprintf(out, "/Next %d 0 R", next); pdf_end_object(out); return; } /* * Allocate the arrays... Add 1 to hold the TOC at the top level... */ if ((entry_counts = (int *)calloc(sizeof(int), num_headings + 1)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)num_headings, strerror(errno)); return; } if ((entry_objects = (int *)calloc(sizeof(int), num_headings + 1)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)num_headings, strerror(errno)); free(entry_counts); return; } if ((entries = (tree_t **)calloc(sizeof(tree_t *), num_headings + 1)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)num_headings, strerror(errno)); free(entry_objects); free(entry_counts); return; } if (parent == 0 && TocLevels > 0) { /* * Add the table of contents to the top-level contents... */ entries[0] = NULL; entry_objects[0] = num_objects + 2; entry = num_objects + 3; count = 1; } else { entry = num_objects + 2; count = 0; } /* * Find and count the children (entries)... */ if (toc->markup == MARKUP_B && toc->next && toc->next->markup == MARKUP_UL) temp = toc->next->child; else if (toc->markup == MARKUP_LI && toc->last_child && toc->last_child->markup == MARKUP_UL) temp = toc->last_child->child; else temp = toc->child; for (; temp && count <= (int)num_headings; temp = temp->next) { if (temp->markup == MARKUP_B) { entries[count] = temp; entry_objects[count] = entry; if (temp->next && temp->next->markup == MARKUP_UL) entry_counts[count] = pdf_count_headings(temp->next->child); else entry_counts[count] = 0; entry += entry_counts[count] + 1; count ++; } else if (temp->markup == MARKUP_LI) { entries[count] = temp; entry_objects[count] = entry; if (temp->last_child && temp->last_child->markup == MARKUP_UL) entry_counts[count] = pdf_count_headings(temp->last_child); else entry_counts[count] = 0; entry += entry_counts[count] + 1; count ++; } } /* * Output the top-level object... */ thisobj = pdf_start_object(out); if (parent == 0) outline_object = thisobj; else fprintf(out, "/Parent %d 0 R", parent); if (count > 0) { fprintf(out, "/Count %d", parent == 0 ? count : -count); fprintf(out, "/First %d 0 R", entry_objects[0]); fprintf(out, "/Last %d 0 R", entry_objects[count - 1]); } if (parent > 0 && toc->child && toc->child->markup == MARKUP_A) { if ((text = htmlGetText(toc->child->child)) != NULL) { fputs("/Title", out); write_utf16(out, text); free(text); } i = heading_pages[*heading]; x = 0.0f; y = heading_tops[*heading] + pages[i].bottom; pspdf_transform_coords(pages + i, x, y); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[i].outpage + 1, x, y); (*heading) ++; } if (prev > 0) fprintf(out, "/Prev %d 0 R", prev); if (next > 0) fprintf(out, "/Next %d 0 R", next); pdf_end_object(out); for (i = 0; i < count ; i ++) pdf_write_contents(out, entries[i], thisobj, i > 0 ? entry_objects[i - 1] : 0, i < (count - 1) ? entry_objects[i + 1] : 0, heading); free(entry_objects); free(entry_counts); free(entries); } // // 'pdf_write_files()' - Write an outline of HTML files. // static void pdf_write_files(FILE *out, // I - Output file tree_t *doc) // I - Document tree { int i, // Looping var num_files, // Number of FILE elements alloc_text; // Allocated text? uchar *text; // Entry text tree_t *temp; // Current node link_t *link; // Link to file... float x, y; // Position of link // Figure out the number of (top-level) files in the document... for (num_files = 0, temp = doc; temp; temp = temp->next) if (temp->markup == MARKUP_FILE) num_files ++; if (num_files < 2) { // No files to outline... outline_object = 0; return; } // Write the outline dictionary... outline_object = pdf_start_object(out); fprintf(out, "/Count %d", num_files); fprintf(out, "/First %d 0 R", outline_object + 1); fprintf(out, "/Last %d 0 R", outline_object + num_files); pdf_end_object(out); // Now write the outline items... for (i = 0, temp = doc; temp; temp = temp->next) if (temp->markup == MARKUP_FILE) { alloc_text = 0; if ((text = get_title(temp->child)) != NULL) alloc_text = 1; else if ((text = htmlGetVariable(temp, (uchar *)"_HD_FILENAME")) == NULL) text = (uchar *)"Unknown"; pdf_start_object(out); fprintf(out, "/Parent %d 0 R", outline_object); fputs("/Title", out); write_utf16(out, text); if (alloc_text) free(text); if ((link = find_link(htmlGetVariable(temp, (uchar *)"_HD_FILENAME"))) != NULL) { x = 0.0f; y = link->top + pages[link->page].bottom; pspdf_transform_coords(pages + link->page, x, y); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[link->page].outpage + 1, x, y); } if (i > 0) fprintf(out, "/Prev %d 0 R", outline_object + i); if (i < (num_files - 1)) fprintf(out, "/Next %d 0 R", outline_object + i + 2); pdf_end_object(out); i ++; } } /* * 'pdf_count_headings()' - Count the number of headings under this TOC * entry. */ static int /* O - Number of headings found */ pdf_count_headings(tree_t *toc) /* I - TOC entry */ { int headings; /* Number of headings */ for (headings = 0; toc != NULL; toc = toc->next) { if (toc->markup == MARKUP_A) headings ++; if (toc->child != NULL) headings += pdf_count_headings(toc->child); } return (headings); } /* * PDF object state variables... */ static int pdf_stream_length = 0; static int pdf_stream_start = 0; static int pdf_object_type = 0; /* * 'pdf_start_object()' - Start a new PDF object... */ static int // O - Object number pdf_start_object(FILE *out, // I - File to write to int array) // I - 1 = array, 0 = dictionary { int *temp; // Temporary integer pointer num_objects ++; // Allocate memory as necessary... if (num_objects >= alloc_objects) { alloc_objects += ALLOC_OBJECTS; if (alloc_objects == ALLOC_OBJECTS) temp = (int *)malloc(sizeof(int) * alloc_objects); else temp = (int *)realloc(objects, sizeof(int) * alloc_objects); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d objects - %s", (int)alloc_objects, strerror(errno)); alloc_objects -= ALLOC_OBJECTS; return (0); } objects = temp; } objects[num_objects] = ftell(out); fprintf(out, "%d 0 obj", (int)num_objects); pdf_object_type = array; fputs(pdf_object_type ? "[" : "<<", out); return (num_objects); } /* * 'pdf_start_stream()' - Start a new PDF stream... */ static void pdf_start_stream(FILE *out) // I - File to write to { // Write the "/Length " string, get the position, and then write 10 // zeroes to cover the maximum size of a stream. fputs("/Length ", out); pdf_stream_length = ftell(out); fputs("0000000000>>stream\n", out); pdf_stream_start = ftell(out); } /* * 'pdf_end_object()' - End a PDF object... */ static void pdf_end_object(FILE *out) // I - File to write to { int length; // Total length of stream if (pdf_stream_start) { // For streams, go back and update the length field in the // object dictionary... length = ftell(out) - pdf_stream_start; fseek(out, pdf_stream_length, SEEK_SET); fprintf(out, "%-10d", length); fseek(out, 0, SEEK_END); pdf_stream_start = 0; fputs("endstream\n", out); } else fputs(pdf_object_type ? "]" : ">>", out); fputs("endobj\n", out); } /* * 'pdf_write_links()' - Write annotation link objects for each page in the * document. */ static void pdf_write_links(FILE *out) /* I - Output file */ { int i, /* Looping var */ outpage, /* Current page */ lobj, /* Current link */ num_lobjs, /* Number of links on this page */ alloc_lobjs, /* Number of links to allocate */ *lobjs; /* Link objects */ float x, y; /* Position of last link */ render_t *r, /* Current render primitive */ *rlast, /* Last render link primitive */ *rprev; /* Previous render primitive */ link_t *link; /* Local link */ page_t *p; /* Current page */ outpage_t *op; /* Current output page */ /* * First combine adjacent, identical links... */ for (outpage = 0, op = outpages; outpage < (int)num_outpages; outpage ++, op ++) { for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start, x = 0.0f, y = 0.0f, rlast = NULL, rprev = NULL; r != NULL; rprev = r, r = r->next) if (r->type == RENDER_LINK) { if (fabs(r->x - x) < 0.1f && fabs(r->y - y) < 0.1f && rlast != NULL && strcmp((const char *)rlast->data.link, (const char *)r->data.link) == 0) { // Combine this primitive with the previous one in rlast... rlast->width = r->x + r->width - rlast->x; x = rlast->x + rlast->width; // Delete this render primitive... rprev->next = r->next; free(r); r = rprev; } else { // Can't combine; just save this info for later use... rlast = r; x = r->x + r->width; y = r->y; } } } } /* * Setup the initial pages_object number... */ pages_object = num_objects + 1; /* * Add space for named links in PDF 1.2 output... */ if (PDFVersion >= 12) pages_object += num_links + 3; /* * Stop here if we won't be generating links in the output... */ if (!Links) return; /* * Figure out how many link objects we'll have... */ for (outpage = 0, op = outpages, alloc_lobjs = 0; outpage < (int)num_pages; outpage ++, op ++) { num_lobjs = 0; for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_LINK) { if (find_link(r->data.link) != NULL) num_lobjs ++; else num_lobjs += 2; } } if (num_lobjs > 0) pages_object += num_lobjs + 1; if (num_lobjs > alloc_lobjs) alloc_lobjs = num_lobjs; } if (alloc_lobjs == 0) return; /* * Allocate memory for the links... */ if ((lobjs = (int *)malloc(sizeof(int) * (size_t)alloc_lobjs)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d link objects - %s", alloc_lobjs, strerror(errno)); return; } /* * Then generate annotation objects for all the links... */ for (outpage = 0, op = outpages; outpage < (int)num_pages; outpage ++, op ++) { num_lobjs = 0; for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_LINK) { if ((link = find_link(r->data.link)) != NULL) { /* * Local link... */ float x1, y1, x2, y2; lobjs[num_lobjs ++] = pdf_start_object(out); fputs("/Subtype/Link", out); if (PageDuplex && (op->pages[i] & 1)) { x1 = r->x + p->right; y1 = r->y + p->bottom - 2; x2 = r->x + r->width + p->right; y2 = r->y + r->height + p->bottom; } else { x1 = r->x + p->left; y1 = r->y + p->bottom - 2; x2 = r->x + r->width + p->left; y2 = r->y + r->height + p->bottom; } pspdf_transform_coords(p, x1, y1); pspdf_transform_coords(p, x2, y2); fprintf(out, "/Rect[%.1f %.1f %.1f %.1f]", x1, y1, x2, y2); fputs("/Border[0 0 0]", out); x1 = 0.0f; y1 = link->top + pages[link->page].bottom; pspdf_transform_coords(pages + link->page, x1, y1); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[link->page].outpage + 1, x1, y1); pdf_end_object(out); } else { /* * Remote link... */ pdf_start_object(out); if (PDFVersion >= 12 && file_method((char *)r->data.link) == NULL) { #ifdef WIN32 if (strcasecmp(file_extension((char *)r->data.link), "pdf") == 0) #else if (strcmp(file_extension((char *)r->data.link), "pdf") == 0) #endif /* WIN32 */ { /* * Link to external PDF file... */ const char *target = file_target((char *)r->data.link); fputs("/S/GoToR", out); if (target) { char url[1024], *urlptr; fputs("/D", out); write_string(out, (uchar *)target, 0); strlcpy(url, (char *)r->data.link, sizeof(url)); if ((urlptr = strrchr(url, '#')) != NULL) *urlptr = '\0'; fputs("/F", out); write_string(out, (uchar *)url, 0); } else { fputs("/D[0/XYZ null null 0]/F", out); write_string(out, r->data.link, 0); } } else { /* * Link to external filename... */ fputs("/S/Launch", out); fputs("/F", out); write_string(out, r->data.link, 0); if (StrictHTML) progress_error(HD_ERROR_UNRESOLVED_LINK, "Unable to resolve link to \"%s\"!", r->data.link); } } else { /* * Link to web file... */ fputs("/S/URI", out); fputs("/URI", out); write_string(out, r->data.link, 0); } pdf_end_object(out); lobjs[num_lobjs ++] = pdf_start_object(out); fputs("/Subtype/Link", out); if (PageDuplex && (outpage & 1)) fprintf(out, "/Rect[%.1f %.1f %.1f %.1f]", r->x + PageRight, r->y + PageBottom, r->x + r->width + PageRight, r->y + r->height + PageBottom); else fprintf(out, "/Rect[%.1f %.1f %.1f %.1f]", r->x + PageLeft, r->y + PageBottom - 2, r->x + r->width + PageLeft, r->y + r->height + PageBottom); fputs("/Border[0 0 0]", out); fprintf(out, "/A %d 0 R", (int)num_objects - 1); pdf_end_object(out); } } } if (num_lobjs > 0) { outpages[outpage].annot_object = pdf_start_object(out, 1); for (lobj = 0; lobj < num_lobjs; lobj ++) fprintf(out, "%d 0 R%s", lobjs[lobj], lobj < (num_lobjs - 1) ? "\n" : ""); pdf_end_object(out); } } free(lobjs); } /* * 'pdf_write_names()' - Write named destinations for each link. */ static void pdf_write_names(FILE *out) /* I - Output file */ { int i; /* Looping var */ uchar *s; /* Current character in name */ link_t *link; /* Local link */ /* * Convert all link names to lowercase... */ for (i = num_links, link = links; i > 0; i --, link ++) for (s = link->name; *s != '\0'; s ++) *s = (uchar)tolower(*s); /* * Write the root name tree entry... */ names_object = pdf_start_object(out); fprintf(out, "/Dests %d 0 R", (int)num_objects + 1); pdf_end_object(out); /* * Write the name tree child list... */ pdf_start_object(out); fprintf(out, "/Kids[%d 0 R]", (int)num_objects + 1); pdf_end_object(out); /* * Write the leaf node for the name tree... */ pdf_start_object(out); fputs("/Limits[", out); write_string(out, links[0].name, 0); write_string(out, links[num_links - 1].name, 0); fputs("]", out); fputs("/Names[", out); for (i = 1, link = links; i <= (int)num_links; i ++, link ++) { write_string(out, link->name, 0); fprintf(out, "%d 0 R", (int)num_objects + i); } fputs("]", out); pdf_end_object(out); for (i = num_links, link = links; i > 0; i --, link ++) { pdf_start_object(out); float x, y; x = 0.0f; y = link->top + pages[link->page].bottom; pspdf_transform_coords(pages + link->page, x, y); fprintf(out, "/D[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[link->page].outpage + 1, x, y); pdf_end_object(out); } } /* * 'render_contents()' - Render a single heading. */ static void render_contents(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int heading, /* I - Heading # */ tree_t *chap) /* I - Chapter heading */ { float x, width, numberwidth, height, rgb[3]; int hpage; uchar number[1024], *nptr, *link; tree_t *flat, *temp, *next; render_t *r; float dot_width; DEBUG_printf(("render_contents(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, y=%.1f, page=%d, heading=%d, chap=%p)\n", (void *)t, left, right, bottom, top, *y, *page, heading, (void *)chap)); if (!t) return; dot_width = _htmlSizes[SIZE_P] * _htmlWidths[t->typeface][t->style]['.'] * 0.001f; /* * Put the text... */ flat = flatten_tree(t->child->child); for (height = 0.0, temp = flat; temp != NULL; temp = temp->next) if (temp->height > height) height = temp->height; height *= _htmlSpacings[SIZE_P] / _htmlSizes[SIZE_P]; if (t->indent) x = left + 18.0f + 18.0f * t->indent; else x = left; *y -= height; /* * Get the width of the page number, leave room for three dots... */ if (heading >= 0 && heading < (int)num_headings) { hpage = heading_pages[heading]; numberwidth = (float)(get_width((uchar *)pages[hpage].page_text, t->typeface, t->style, t->size) + 3.0f * dot_width); } else { hpage = 0; numberwidth = 0.0f; } for (temp = flat; temp != NULL; temp = next) { rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; if ((x + temp->width) >= (right - numberwidth)) { /* * Too wide to fit, continue on the next line */ *y -= _htmlSpacings[SIZE_P]; x = left + 36.0f * t->indent; } if (*y < bottom) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); width = get_width((uchar *)TocTitle, _htmlHeadingFont, STYLE_BOLD, SIZE_H1); *y = (float)(top - _htmlSpacings[SIZE_H1]); x = (float)(left + 0.5f * (right - left - width)); r = new_render(*page, RENDER_TEXT, x, *y, 0, 0, TocTitle); r->data.text.typeface = _htmlHeadingFont; r->data.text.style = STYLE_BOLD; r->data.text.size = (float)_htmlSizes[SIZE_H1]; get_color(_htmlTextColor, r->data.text.rgb); *y -= _htmlSpacings[SIZE_H1]; if (t->indent) x = left + 18.0f + 18.0f * t->indent; else x = left; if (chap != t) { *y += height; render_contents(chap, left, right, bottom, top, y, page, -1, 0); *y -= _htmlSpacings[SIZE_P]; } } if (temp->link != NULL) { link = htmlGetVariable(temp->link, (uchar *)"HREF"); /* * Add a page link... */ new_render(*page, RENDER_LINK, x, *y, temp->width, temp->height, link); if (PSLevel == 0 && Links) { memcpy(rgb, link_color, sizeof(rgb)); temp->red = (uchar)(link_color[0] * 255.0); temp->green = (uchar)(link_color[1] * 255.0); temp->blue = (uchar)(link_color[2] * 255.0); if (LinkStyle) new_render(*page, RENDER_BOX, x, *y - 1, temp->width, 0, link_color); } } if ((link = htmlGetVariable(temp, (uchar *)"ID")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } switch (temp->markup) { case MARKUP_A : if ((link = htmlGetVariable(temp, (uchar *)"NAME")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } break; case MARKUP_NONE : if (temp->data == NULL) break; if (temp->underline) new_render(*page, RENDER_BOX, x, *y - 1, temp->width, 0, rgb); if (temp->strikethrough) new_render(*page, RENDER_BOX, x, *y + temp->height * 0.25f, temp->width, 0, rgb); r = new_render(*page, RENDER_TEXT, x, *y, 0, 0, temp->data); r->data.text.typeface = temp->typeface; r->data.text.style = temp->style; r->data.text.size = (float)_htmlSizes[temp->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (temp->superscript) r->y += height - temp->height; else if (temp->subscript) r->y -= height * _htmlSizes[0] / _htmlSpacings[0] - temp->height; break; case MARKUP_IMG : update_image_size(temp); new_render(*page, RENDER_IMAGE, x, *y, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); break; default : break; } x += temp->width; next = temp->next; free(temp); } if (numberwidth > 0.0f) { /* * Draw dots leading up to the page number... */ width = (float)(numberwidth - 3.0 * dot_width + x); for (nptr = number; nptr < (number + sizeof(number) - 1) && width < right; width += dot_width) *nptr++ = '.'; if (nptr > number) nptr --; strlcpy((char *)nptr, pages[hpage].page_text, sizeof(number) - (size_t)(nptr - number)); r = new_render(*page, RENDER_TEXT, right - width + x, *y, 0, 0, number); r->data.text.typeface = t->typeface; r->data.text.style = t->style; r->data.text.size = (float)_htmlSizes[t->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); } } /* * 'count_headings()' - Count the number of headings in the TOC. */ static int count_headings(tree_t *t) // I - Tree to count { int count; // Number of headings... count = 0; while (t != NULL) { switch (t->markup) { case MARKUP_B : case MARKUP_LI : count ++; if (t->last_child && t->last_child->markup == MARKUP_UL) count += count_headings(t->last_child); break; default : count += count_headings(t->child); break; } t = t->next; } return (count); } /* * 'parse_contents()' - Parse the table of contents and produce a * rendering list... */ static void parse_contents(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int *heading, /* IO - Heading # */ tree_t *chap) /* I - Chapter heading */ { DEBUG_printf(("parse_contents(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, y=%.1f, page=%d, heading=%d, chap=%p)\n", (void *)t, left, right, bottom, top, *y, *page, *heading, (void *)chap)); while (t != NULL) { switch (t->markup) { case MARKUP_B : /* Top-level TOC */ if (t->prev != NULL) /* Advance one line prior to top-levels... */ *y -= _htmlSpacings[SIZE_P]; if (*y < (bottom + _htmlSpacings[SIZE_P] * 3)) *y = 0; // Force page break chap = t; case MARKUP_LI : /* Lower-level TOC */ DEBUG_printf(("parse_contents: heading=%d, page = %d\n", *heading, heading_pages[*heading])); /* * Put the text unless the author has flagged it otherwise... */ if (htmlGetVariable(t, (uchar *)"_HD_OMIT_TOC") == NULL) { render_contents(t, left, right, bottom, top, y, page, *heading, chap); /* * Update current headings for header/footer strings in TOC. */ check_pages(*page); if (t->markup == MARKUP_B && pages[*page].chapter == pages[*page - 1].chapter) pages[*page].chapter = htmlGetText(t->child->child); if (pages[*page].heading == pages[*page - 1].heading) pages[*page].heading = htmlGetText(t->child->child); /* * Next heading... */ (*heading) ++; if (t->last_child->markup == MARKUP_UL) parse_contents(t->last_child, left, right, bottom, top, y, page, heading, chap); } else if (t->next != NULL && t->next->markup == MARKUP_UL) { /* * Skip children of omitted heading... */ t = t->next; (*heading) += count_headings(t->child) + 1; } else (*heading) ++; break; default : parse_contents(t->child, left, right, bottom, top, y, page, heading, chap); break; } t = t->next; } } /* * 'parse_doc()' - Parse a document tree and produce rendering list output. */ static void parse_doc(tree_t *t, /* I - Tree to parse */ float *left, /* I - Left margin */ float *right, /* I - Printable width */ float *bottom, /* I - Bottom margin */ float *top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ tree_t *cpara, /* I - Current paragraph */ int *needspace) /* I - Need whitespace before this element */ { int i; /* Looping var */ tree_t *para, /* Phoney paragraph tree entry */ *temp; /* Paragraph entry */ var_t *var; /* Variable entry */ uchar *name; /* ID name */ uchar *style; /* STYLE attribute */ float width, /* Width of horizontal rule */ height, /* Height of rule */ rgb[3]; /* RGB color of rule */ DEBUG_printf(("parse_doc(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, x=%.1f, y=%.1f, page=%d, cpara=%p, needspace=%d\n", (void *)t, *left, *right, *bottom, *top, *x, *y, *page, (void *)cpara, *needspace)); DEBUG_printf((" title_page = %d, chapter = %d\n", title_page, chapter)); if (cpara == NULL) para = htmlNewTree(NULL, MARKUP_P, NULL); else para = cpara; while (t != NULL) { if (t->markup == MARKUP_FILE) current_url = htmlGetVariable(t, (uchar *)"_HD_URL"); if (((t->markup == MARKUP_H1 && OutputType == OUTPUT_BOOK) || (t->markup == MARKUP_FILE && OutputType == OUTPUT_WEBPAGES)) && !title_page) { // New page on H1 in book mode or file in webpage mode... if (para->child != NULL && chapter > 0) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if ((chapter > 0 && OutputType == OUTPUT_BOOK) || ((*page > 0 || *y < *top) && OutputType == OUTPUT_WEBPAGES)) { if (*y < *top) (*page) ++; if (PageDuplex && (*page & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); chapter_ends[chapter] = *page - 1; } // Make sure header and footer strings are correct... check_pages(*page); memcpy(pages[*page].header, Header, sizeof(pages[*page].header)); memcpy(pages[*page].header1, Header1, sizeof(pages[*page].header1)); memcpy(pages[*page].footer, Footer, sizeof(pages[*page].footer)); // Bump the chapter/file count... chapter ++; if (chapter >= MAX_CHAPTERS) { progress_error(HD_ERROR_TOO_MANY_CHAPTERS, "Too many chapters/files in document (%d > %d)!", chapter, MAX_CHAPTERS); chapter = MAX_CHAPTERS - 1; } else chapter_starts[chapter] = *page; if (chapter > TocDocCount) TocDocCount = chapter; *y = *top; *x = *left; *needspace = 0; } if ((name = htmlGetVariable(t, (uchar *)"ID")) != NULL) { /* * Add a link target using the ID=name variable... */ add_link(name, *page, (int)*y); } else if (t->markup == MARKUP_FILE) { /* * Add a file link... */ uchar newname[256], /* New filename */ *sep; /* "?" separator in links */ // Strip any trailing HTTP GET data stuff... strlcpy((char *)newname, (char *)htmlGetVariable(t, (uchar *)"_HD_FILENAME"), sizeof(newname)); if ((sep = (uchar *)strchr((char *)newname, '?')) != NULL) *sep = '\0'; // Add the link add_link(newname, *page, (int)*y); } if (chapter == 0 && !title_page) { // Need to handle page comments before the first heading... if (t->markup == MARKUP_COMMENT) parse_comment(t, left, right, bottom, top, x, y, page, para, *needspace); if (t->child != NULL) parse_doc(t->child, left, right, bottom, top, x, y, page, para, needspace); t = t->next; continue; } // Check for some basic stylesheet stuff... if ((style = htmlGetStyle(t, (uchar *)"page-break-before:")) != NULL && strcasecmp((char *)style, "avoid") != 0) { // Advance to the next page... (*page) ++; *x = *left; *y = *top; *needspace = 0; // See if we need to go to the next left/righthand page... if (PageDuplex && ((*page) & 1) && strcasecmp((char *)style, "right") == 0) (*page) ++; else if (PageDuplex && !((*page) & 1) && strcasecmp((char *)style, "left") == 0) (*page) ++; // Update the progress as necessary... if (Verbosity) progress_show("Formatting page %d", *page); } // Process the markup... switch (t->markup) { case MARKUP_IMG : update_image_size(t); case MARKUP_NONE : case MARKUP_BR : if (para->child == NULL) { if (t->parent == NULL) { para->halignment = ALIGN_LEFT; para->indent = 0; } else { para->halignment = t->parent->halignment; para->indent = t->parent->indent; } } // Skip heading whitespace... if (para->child == NULL && t->markup == MARKUP_NONE && t->data != NULL && strcmp((char *)t->data, " ") == 0) break; if ((temp = htmlAddTree(para, t->markup, t->data)) != NULL) { temp->link = t->link; temp->width = t->width; temp->height = t->height; temp->typeface = t->typeface; temp->style = t->style; temp->size = t->size; temp->underline = t->underline; temp->strikethrough = t->strikethrough; temp->superscript = t->superscript; temp->subscript = t->subscript; temp->halignment = t->halignment; temp->valignment = t->valignment; temp->red = t->red; temp->green = t->green; temp->blue = t->blue; for (i = 0, var = t->vars; i < t->nvars; i ++, var ++) htmlSetVariable(temp, var->name, var->value); } break; case MARKUP_TABLE : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } parse_table(t, *left, *right, *bottom, *top, x, y, page, *needspace); *needspace = 0; break; case MARKUP_H1 : case MARKUP_H2 : case MARKUP_H3 : case MARKUP_H4 : case MARKUP_H5 : case MARKUP_H6 : case MARKUP_H7 : case MARKUP_H8 : case MARKUP_H9 : case MARKUP_H10 : case MARKUP_H11 : case MARKUP_H12 : case MARKUP_H13 : case MARKUP_H14 : case MARKUP_H15 : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } parse_heading(t, *left, *right, *bottom, *top, x, y, page, *needspace); *needspace = 1; break; case MARKUP_BLOCKQUOTE : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } *left += 36; *right -= 36; parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *left -= 36; *right += 36; *x = *left; *needspace = 1; break; case MARKUP_CENTER : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *x = *left; *needspace = 1; break; case MARKUP_P : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *x = *left; *needspace = 1; break; case MARKUP_DIV : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } break; case MARKUP_PRE : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } *left += 36.0f; *x = *left; parse_pre(t, *left, *right, *bottom, *top, x, y, page, *needspace); *left -= 36.0f; *x = *left; *needspace = 1; break; case MARKUP_DIR : case MARKUP_MENU : case MARKUP_UL : case MARKUP_OL : init_list(t); case MARKUP_DL : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if (t->indent == 1) *needspace = 1; *left += 36.0f; *x = *left; parse_doc(t->child, left, right, bottom, top, x, y, page, para, needspace); *left -= 36.0f; if (t->indent == 1) *needspace = 1; break; case MARKUP_LI : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } parse_list(t, left, right, bottom, top, x, y, page, *needspace); *x = *left; *needspace = t->next && t->next->markup != MARKUP_LI && t->next->markup != MARKUP_UL && t->next->markup != MARKUP_OL; break; case MARKUP_DT : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } *left -= 36.0f; *x = *left; parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *left += 36.0f; *x = *left; *needspace = 0; break; case MARKUP_DD : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *x = *left; *needspace = 0; break; case MARKUP_HR : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if (htmlGetVariable(t, (uchar *)"BREAK") == NULL) { /* * Generate a horizontal rule... */ if ((name = htmlGetVariable(t, (uchar *)"WIDTH")) == NULL) width = *right - *left; else { if (strchr((char *)name, '%') != NULL) width = atoi((char *)name) * (*right - *left) / 100; else width = (float)(atoi((char *)name) * PagePrintWidth / _htmlBrowserWidth); } if ((name = htmlGetVariable(t, (uchar *)"SIZE")) == NULL) height = 2; else height = (float)(atoi((char *)name) * PagePrintWidth / _htmlBrowserWidth); switch (t->halignment) { case ALIGN_LEFT : *x = *left; break; case ALIGN_CENTER : *x = *left + (*right - *left - width) * 0.5f; break; case ALIGN_RIGHT : *x = *right - width; break; } if (*y < (*bottom + height + _htmlSpacings[SIZE_P])) { /* * Won't fit on this page... */ (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; } (*y) -= height + _htmlSpacings[SIZE_P]; rgb[0] = t->red / 255.0f; rgb[1] = t->green / 255.0f; rgb[2] = t->blue / 255.0f; new_render(*page, RENDER_BOX, *x, *y + _htmlSpacings[SIZE_P] * 0.5, width, height, rgb); } else { /* * <HR BREAK> generates a page break... */ (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; } *x = *left; *needspace = 0; break; case MARKUP_COMMENT : // Check comments for commands... parse_comment(t, left, right, bottom, top, x, y, page, para, *needspace); break; case MARKUP_HEAD : // Ignore document HEAD section case MARKUP_TITLE : // Ignore title and meta stuff case MARKUP_META : case MARKUP_SCRIPT : // Ignore script stuff case MARKUP_INPUT : // Ignore form stuff case MARKUP_SELECT : case MARKUP_OPTION : case MARKUP_TEXTAREA : break; case MARKUP_STYLE : break; case MARKUP_A : if (htmlGetVariable(t, (uchar *)"NAME") != NULL) { /* * Add this named destination to the paragraph tree... */ if (para->child == NULL) { para->halignment = t->halignment; para->indent = t->indent; } if ((temp = htmlAddTree(para, t->markup, t->data)) != NULL) { temp->link = t->link; temp->width = t->width; temp->height = t->height; temp->typeface = t->typeface; temp->style = t->style; temp->size = t->size; temp->underline = t->underline; temp->strikethrough = t->strikethrough; temp->superscript = t->superscript; temp->subscript = t->subscript; temp->halignment = t->halignment; temp->valignment = t->valignment; temp->red = t->red; temp->green = t->green; temp->blue = t->blue; for (i = 0, var = t->vars; i < t->nvars; i ++, var ++) htmlSetVariable(temp, var->name, var->value); } } default : if (t->child != NULL) parse_doc(t->child, left, right, bottom, top, x, y, page, para, needspace); break; } // Check for some basic stylesheet stuff... if ((style = htmlGetStyle(t, (uchar *)"page-break-after:")) != NULL && strcasecmp((char *)style, "avoid") != 0) { // Advance to the next page... (*page) ++; *x = *left; *y = *top; *needspace = 0; // See if we need to go to the next left/righthand page... if (PageDuplex && ((*page) & 1) && strcasecmp((char *)style, "right") == 0) (*page) ++; else if (PageDuplex && !((*page) & 1) && strcasecmp((char *)style, "left") == 0) (*page) ++; // Update the progress as necessary... if (Verbosity) progress_show("Formatting page %d", *page); } // Move to the next node... t = t->next; } if (para->child != NULL && cpara != para) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } if (cpara != para) htmlDeleteTree(para); DEBUG_printf(("LEAVING parse_doc(), x = %.1f, y = %.1f, page = %d\n", *x, *y, *page)); } /* * 'parse_heading()' - Parse a heading tree and produce rendering list output. */ static void parse_heading(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace) /* I - Need whitespace? */ { int *temp; // Temporary integer array pointer DEBUG_printf(("parse_heading(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, x=%.1f, y=%.1f, page=%d, needspace=%d\n", (void *)t, left, right, bottom, top, *x, *y, *page, needspace)); if (((t->markup - MARKUP_H1) < TocLevels || TocLevels == 0) && !title_page) current_heading = t->child; if (*y < (5 * _htmlSpacings[SIZE_P] + bottom)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } check_pages(*page); if (t->markup == MARKUP_H1 && !title_page) pages[*page].chapter = htmlGetText(current_heading); if ((pages[*page].heading == NULL || t->markup == MARKUP_H1 || (*page > 0 && pages[*page].heading == pages[*page - 1].heading)) && !title_page) { pages[*page].heading = htmlGetText(current_heading); pages[*page].headnode = current_heading; } if ((t->markup - MARKUP_H1) < TocLevels && !title_page) { DEBUG_printf(("H%d: heading_pages[%d] = %d\n", t->markup - MARKUP_H1 + 1, (int)num_headings, *page - 1)); // See if we need to resize the headings arrays... if (num_headings >= alloc_headings) { alloc_headings += ALLOC_HEADINGS; if (num_headings == 0) temp = (int *)malloc(sizeof(int) * alloc_headings); else temp = (int *)realloc(heading_pages, sizeof(int) * alloc_headings); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)alloc_headings, strerror(errno)); alloc_headings -= ALLOC_HEADINGS; return; } memset(temp + alloc_headings - ALLOC_HEADINGS, 0, sizeof(int) * ALLOC_HEADINGS); heading_pages = temp; if (num_headings == 0) temp = (int *)malloc(sizeof(int) * alloc_headings); else temp = (int *)realloc(heading_tops, sizeof(int) * alloc_headings); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)alloc_headings, strerror(errno)); alloc_headings -= ALLOC_HEADINGS; return; } memset(temp + alloc_headings - ALLOC_HEADINGS, 0, sizeof(int) * ALLOC_HEADINGS); heading_tops = temp; } heading_pages[num_headings] = *page; heading_tops[num_headings] = (int)(*y + 4 * _htmlSpacings[SIZE_P]); num_headings ++; } parse_paragraph(t, left, right, bottom, top, x, y, page, needspace); if (t->halignment == ALIGN_RIGHT && t->markup == MARKUP_H1 && OutputType == OUTPUT_BOOK && !title_page) { /* * Special case - chapter heading for users manual... */ *y = bottom + 0.5f * (top - bottom); } } #if defined(PARA_DEBUG) && !defined(DEBUG) # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) printf x # define DEBUG_puts(x) puts(x) #endif /* PARA_DEBUG && !defined(DEBUG) */ /* * 'parse_paragraph()' - Parse a paragraph tree and produce rendering list * output. */ static void parse_paragraph(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace)/* I - Need whitespace? */ { int whitespace; /* Non-zero if a fragment ends in whitespace */ tree_t *flat, *start, *end, *prev, *temp; float width, height, offset, spacing, borderspace, temp_y, temp_width, temp_height; float format_width, image_y, image_left, image_right; int image_page = *page; float char_spacing; int num_chars; render_t *r; uchar *align, *hspace, *vspace, *link, *border; float rgb[3]; uchar line[10240], *lineptr, *dataptr; tree_t *linetype; float linex, linewidth; int firstline; DEBUG_printf(("parse_paragraph(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, x=%.1f, y=%.1f, page=%d, needspace=%d\n", (void *)t, left, right, bottom, top, *x, *y, *page, needspace)); flat = flatten_tree(t->child); image_left = left; image_right = right; image_y = 0; if (flat == NULL) DEBUG_puts("parse_paragraph: flat == NULL!"); // Add leading whitespace... if (*y < top && needspace) *y -= _htmlSpacings[SIZE_P]; /* * First scan for images with left/right alignment tags... */ for (temp = flat, prev = NULL; temp != NULL;) { if (temp->markup == MARKUP_IMG) update_image_size(temp); if (temp->markup == MARKUP_IMG && (align = htmlGetVariable(temp, (uchar *)"ALIGN"))) { if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; if (strcasecmp((char *)align, "LEFT") == 0) { if ((vspace = htmlGetVariable(temp, (uchar *)"VSPACE")) != NULL) *y -= atoi((char *)vspace); if (*y < (bottom + temp->height + 2 * borderspace)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } if (borderspace > 0.0f) { if (temp->link && PSLevel == 0) memcpy(rgb, link_color, sizeof(rgb)); else { rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; } // Top new_render(*page, RENDER_BOX, image_left, *y - borderspace, temp->width + 2 * borderspace, borderspace, rgb); // Left new_render(*page, RENDER_BOX, image_left, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Right new_render(*page, RENDER_BOX, image_left + temp->width + borderspace, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Bottom new_render(*page, RENDER_BOX, image_left, *y - temp->height - 2 * borderspace, temp->width + 2 * borderspace, borderspace, rgb); } *y -= borderspace; new_render(*page, RENDER_IMAGE, image_left + borderspace, *y - temp->height, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); if (temp->link && (link = htmlGetVariable(temp->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, image_left + borderspace, *y - temp->height, temp->width, temp->height, link); } *y -= borderspace; if (vspace != NULL) *y -= atoi((char *)vspace); image_left += temp->width + 2 * borderspace; temp_y = *y - temp->height; image_page = *page; if (temp_y < image_y || image_y == 0) image_y = temp_y; if ((hspace = htmlGetVariable(temp, (uchar *)"HSPACE")) != NULL) image_left += atoi((char *)hspace); if (prev != NULL) prev->next = temp->next; else flat = temp->next; free(temp); temp = prev; } else if (strcasecmp((char *)align, "RIGHT") == 0) { if ((vspace = htmlGetVariable(temp, (uchar *)"VSPACE")) != NULL) *y -= atoi((char *)vspace); if (*y < (bottom + temp->height + 2 * borderspace)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } image_right -= temp->width + 2 * borderspace; image_page = *page; if (borderspace > 0.0f) { if (temp->link && PSLevel == 0) memcpy(rgb, link_color, sizeof(rgb)); else { rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; } // Top new_render(*page, RENDER_BOX, image_right, *y - borderspace, temp->width + 2 * borderspace, borderspace, rgb); // Left new_render(*page, RENDER_BOX, image_right, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Right new_render(*page, RENDER_BOX, image_right + temp->width + borderspace, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Bottom new_render(*page, RENDER_BOX, image_right, *y - temp->height - 2 * borderspace, temp->width + 2 * borderspace, borderspace, rgb); } *y -= borderspace; new_render(*page, RENDER_IMAGE, image_right + borderspace, *y - temp->height, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); if (temp->link && (link = htmlGetVariable(temp->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, image_right + borderspace, *y - temp->height, temp->width, temp->height, link); } *y -= borderspace; if (vspace != NULL) *y -= atoi((char *)vspace); temp_y = *y - temp->height; if (temp_y < image_y || image_y == 0) image_y = temp_y; if ((hspace = htmlGetVariable(temp, (uchar *)"HSPACE")) != NULL) image_right -= atoi((char *)hspace); if (prev != NULL) prev->next = temp->next; else flat = temp->next; free(temp); temp = prev; } } if (temp != NULL) { prev = temp; temp = temp->next; } else temp = flat; } /* * Then format the text and inline images... */ format_width = image_right - image_left; firstline = 1; DEBUG_printf(("format_width = %.1f\n", format_width)); // Make stupid compiler warnings go away (if you can't put // enough smarts in the compiler, don't add the warning!) offset = 0.0f; temp_width = 0.0f; temp_height = 0.0f; lineptr = NULL; linex = 0.0f; linewidth = 0.0f; while (flat != NULL) { start = flat; end = flat; width = 0.0; while (flat != NULL) { // Get fragments... temp_width = 0.0; temp = flat; whitespace = 0; while (temp != NULL && !whitespace) { if (temp->markup == MARKUP_NONE && temp->data[0] == ' ') { if (temp == start) temp_width -= _htmlWidths[temp->typeface][temp->style][' '] * _htmlSizes[temp->size] * 0.001f; else if (temp_width > 0.0f) whitespace = 1; } else whitespace = 0; if (whitespace) break; if (temp->markup == MARKUP_IMG) { if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; temp_width += 2 * borderspace; } prev = temp; temp = temp->next; temp_width += prev->width; if ((temp_width >= format_width && prev->markup == MARKUP_IMG) || prev->markup == MARKUP_BR) { break; } else if (prev->markup == MARKUP_NONE) { int ch = prev->data[strlen((char *)prev->data) - 1]; if (_htmlUTF8) ch = _htmlUnicode[ch]; if (ch == 173) break; } } if ((width + temp_width) <= format_width) { width += temp_width; end = temp; flat = temp; if (prev->markup == MARKUP_BR) break; } else if (width == 0.0) { width += temp_width; end = temp; flat = temp; break; } else break; } if (start == end) { end = start->next; flat = start->next; width = start->width; } for (height = 0.0, num_chars = 0, temp = prev = start; temp != end; temp = temp->next) { prev = temp; if (temp->markup == MARKUP_NONE) num_chars += strlen((char *)temp->data); if (temp->height > height) height = temp->height; } for (spacing = 0.0, temp = prev = start; temp != end; temp = temp->next) { prev = temp; if (temp->markup != MARKUP_IMG) temp_height = (float)(temp->height * _htmlSpacings[0] / _htmlSizes[0]); else { if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; temp_height = temp->height + 2 * borderspace; } if (temp_height > spacing) spacing = temp_height; } if (firstline && end != NULL && *y < (bottom + height + _htmlSpacings[t->size])) { // Go to next page since only 1 line will fit on this one... (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } firstline = 0; if (height == 0.0f) height = spacing; for (temp = start; temp != end; temp = temp->next) if (temp->markup != MARKUP_A) break; if (temp != NULL && temp->markup == MARKUP_NONE && temp->data[0] == ' ') { // Drop leading space... for (dataptr = temp->data; *dataptr; dataptr ++) *dataptr = dataptr[1]; *dataptr = '\0'; temp_width = _htmlWidths[temp->typeface][temp->style][' '] * _htmlSizes[temp->size] * 0.001f; temp->width -= temp_width; num_chars --; } if (end != NULL) temp = end->prev; else temp = NULL; DEBUG_printf((" BEFORE page=%d, y=%.1f, height=%.1f, spacing=%.1f, bottom=%.1f\n", *page, *y, height, spacing, bottom)); if (*y < (spacing + bottom)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } *y -= height; DEBUG_printf((" page=%d, y=%.1f, width=%.1f, height=%.1f\n", *page, *y, width, height)); if (Verbosity) progress_update(100 - (int)(100 * (*y) / PagePrintLength)); char_spacing = 0.0f; whitespace = 0; temp = start; linetype = NULL; rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; switch (t->halignment) { case ALIGN_LEFT : linex = image_left; break; case ALIGN_CENTER : linex = image_left + 0.5f * (format_width - width); break; case ALIGN_RIGHT : linex = image_right - width; break; case ALIGN_JUSTIFY : linex = image_left; if (flat != NULL && flat->prev->markup != MARKUP_BR && num_chars > 1) char_spacing = (format_width - width) / (num_chars - 1); break; } while (temp != end) { if (temp->link != NULL && PSLevel == 0 && Links && temp->markup == MARKUP_NONE) { temp->red = (uchar)(link_color[0] * 255.0); temp->green = (uchar)(link_color[1] * 255.0); temp->blue = (uchar)(link_color[2] * 255.0); } /* * See if we are doing a run of characters in a line and need to * output this run... */ if (linetype != NULL && (temp->markup != MARKUP_NONE || temp->typeface != linetype->typeface || temp->style != linetype->style || temp->size != linetype->size || temp->superscript != linetype->superscript || temp->subscript != linetype->subscript || temp->red != linetype->red || temp->green != linetype->green || temp->blue != linetype->blue)) { r = new_render(*page, RENDER_TEXT, linex - linewidth, *y, linewidth, linetype->height, line); r->data.text.typeface = linetype->typeface; r->data.text.style = linetype->style; r->data.text.size = (float)_htmlSizes[linetype->size]; r->data.text.spacing = char_spacing; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (linetype->superscript) r->y += height - linetype->height; else if (linetype->subscript) r->y -= height - linetype->height; free(linetype); linetype = NULL; } if ((link = htmlGetVariable(temp, (uchar *)"ID")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } switch (temp->markup) { case MARKUP_A : if ((link = htmlGetVariable(temp, (uchar *)"NAME")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } default : temp_width = temp->width; break; case MARKUP_NONE : if (temp->data == NULL) break; if (((temp->width - right + left) > 0.001 || (temp->height - top + bottom) > 0.001) && OverflowErrors) progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Text on page %d too large - " "truncation or overlapping may occur!", *page + 1); if (linetype == NULL) { linetype = temp; lineptr = line; linewidth = 0.0; rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; } strlcpy((char *)lineptr, (char *)temp->data, sizeof(line) - (size_t)(lineptr - line)); temp_width = temp->width + char_spacing * strlen((char *)lineptr); if (temp->underline || (temp->link && LinkStyle && PSLevel == 0)) new_render(*page, RENDER_BOX, linex, *y - 1, temp_width, 0, rgb); if (temp->strikethrough) new_render(*page, RENDER_BOX, linex, *y + temp->height * 0.25f, temp_width, 0, rgb); linewidth += temp_width; lineptr += strlen((char *)lineptr); if (lineptr > line && lineptr[-1] == ' ') whitespace = 1; else whitespace = 0; break; case MARKUP_IMG : if (((temp->width - right + left) > 0.001 || (temp->height - top + bottom) > 0.001) && OverflowErrors) { DEBUG_printf(("IMAGE: %.3fx%.3f > %.3fx%.3f\n", temp->width, temp->height, right - left, top - bottom)); progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Image on page %d too large - " "truncation or overlapping may occur!", *page + 1); } if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; temp_width += 2 * borderspace; switch (temp->valignment) { case ALIGN_TOP : offset = height - temp->height - 2 * borderspace; break; case ALIGN_MIDDLE : offset = 0.5f * (height - temp->height) - borderspace; break; case ALIGN_BOTTOM : offset = 0.0f; } if (borderspace > 0.0f) { // Top new_render(*page, RENDER_BOX, linex, *y + offset + temp->height + borderspace, temp->width + 2 * borderspace, borderspace, rgb); // Left new_render(*page, RENDER_BOX, linex, *y + offset, borderspace, temp->height + 2 * borderspace, rgb); // Right new_render(*page, RENDER_BOX, linex + temp->width + borderspace, *y + offset, borderspace, temp->height + 2 * borderspace, rgb); // Bottom new_render(*page, RENDER_BOX, linex, *y + offset, temp->width + 2 * borderspace, borderspace, rgb); } new_render(*page, RENDER_IMAGE, linex + borderspace, *y + offset + borderspace, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); whitespace = 0; temp_width = temp->width + 2 * borderspace; break; } if (temp->link != NULL && (link = htmlGetVariable(temp->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, linex, *y + offset, temp->width, temp->height, link); } linex += temp_width; prev = temp; temp = temp->next; if (prev != linetype) free(prev); } /* * See if we have a run of characters that hasn't been output... */ if (linetype != NULL) { r = new_render(*page, RENDER_TEXT, linex - linewidth, *y, linewidth, linetype->height, line); r->data.text.typeface = linetype->typeface; r->data.text.style = linetype->style; r->data.text.spacing = char_spacing; r->data.text.size = (float)_htmlSizes[linetype->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (linetype->superscript) r->y += height - linetype->height; else if (linetype->subscript) r->y -= height - linetype->height; free(linetype); } /* * Update the margins after we pass below the images... */ *y -= spacing - height; DEBUG_printf((" AFTER y=%.1f, bottom=%.1f\n", *y, bottom)); if (*y < bottom) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } if (*y < image_y || *page > image_page) { image_y = 0.0f; image_left = left; image_right = right; format_width = image_right - image_left; } } *x = left; if (*y > image_y && image_y > 0.0f && image_page == *page) *y = image_y; DEBUG_printf(("LEAVING parse_paragraph(), x = %.1f, y = %.1f, page = %d, image_y = %.1f\n", *x, *y, *page, image_y)); } #if defined(PARA_DEBUG) && !defined(DEBUG) # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) # define DEBUG_puts(x) #endif /* PARA_DEBUG && !DEBUG */ /* * 'parse_pre()' - Parse preformatted text and produce rendering list output. */ static void parse_pre(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace) /* I - Need whitespace? */ { tree_t *flat, *start, *next; uchar *link, line[10240], *lineptr, *dataptr; int col; float width, height, rgb[3]; render_t *r; REF(right); DEBUG_printf(("parse_pre(t=%p, left=%.1f, right=%.1f, x=%.1f, y=%.1f, page=%d\n", (void *)t, left, right, *x, *y, *page)); if (t->child == NULL) return; if (*y < top && needspace) *y -= _htmlSpacings[SIZE_P]; flat = flatten_tree(t->child); if (flat == NULL) return; if (flat->markup == MARKUP_NONE && flat->data != NULL) { // Skip leading blank line, if present... for (dataptr = flat->data; isspace(*dataptr); dataptr ++); if (!*dataptr) { next = flat->next; free(flat); flat = next; } } while (flat != NULL) { for (height = 0.0f, start = flat; flat != NULL; flat = flat->next) { if (flat->height > height) height = flat->height; if (flat->markup == MARKUP_BR || (flat->markup == MARKUP_NONE && flat->data && flat->data[strlen((char *)flat->data) - 1] == '\n')) break; } if (flat) flat = flat->next; if (*y < (height + bottom)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } *x = left; *y -= height; if (Verbosity) progress_update(100 - (int)(100 * (*y) / PagePrintLength)); col = 0; while (start != flat) { rgb[0] = start->red / 255.0f; rgb[1] = start->green / 255.0f; rgb[2] = start->blue / 255.0f; if (start->link && (link = htmlGetVariable(start->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, *x, *y, start->width, start->height, link); if (PSLevel == 0 && Links) { memcpy(rgb, link_color, sizeof(rgb)); start->red = (uchar)(link_color[0] * 255.0); start->green = (uchar)(link_color[1] * 255.0); start->blue = (uchar)(link_color[2] * 255.0); if (LinkStyle) new_render(*page, RENDER_BOX, *x, *y - 1, start->width, 0, link_color); } } if ((link = htmlGetVariable(start, (uchar *)"ID")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } switch (start->markup) { case MARKUP_COMMENT : parse_comment(start, &left, &right, &bottom, &top, x, y, page, NULL, 0); break; case MARKUP_A : if ((link = htmlGetVariable(start, (uchar *)"NAME")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } break; case MARKUP_NONE : for (lineptr = line, dataptr = start->data; *dataptr != '\0' && lineptr < (line + sizeof(line) - 1); dataptr ++) if (*dataptr == '\n') break; else if (*dataptr == '\t') { /* This code changed after 15 years to work around new compiler optimization bugs (Issue #349) */ int num_cols = 8 - (col & 7); memcpy(lineptr, " ", num_cols); lineptr += num_cols; col += num_cols; } else if (*dataptr != '\r') { *lineptr++ = *dataptr; col ++; } *lineptr = '\0'; width = get_width(line, start->typeface, start->style, start->size); r = new_render(*page, RENDER_TEXT, *x, *y, width, 0, line); r->data.text.typeface = start->typeface; r->data.text.style = start->style; r->data.text.size = (float)_htmlSizes[start->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (start->underline) new_render(*page, RENDER_BOX, *x, *y - 1, start->width, 0, rgb); if (start->strikethrough) new_render(*page, RENDER_BOX, *x, *y + start->height * 0.25f, start->width, 0, rgb); *x += start->width; break; case MARKUP_IMG : new_render(*page, RENDER_IMAGE, *x, *y, start->width, start->height, image_find((char *)htmlGetVariable(start, (uchar *)"REALSRC"))); *x += start->width; col ++; break; default : break; } next = start->next; free(start); start = next; } if ((*x - right) > 0.001 && OverflowErrors) progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Preformatted text on page %d too long - " "truncation or overlapping may occur!", *page + 1); *y -= _htmlSpacings[t->size] - _htmlSizes[t->size]; } *x = left; } //#define TABLE_DEBUG 1 #ifdef TABLE_DEBUG # undef DEBUG_puts # define DEBUG_puts(x) puts(x) # define DEBUG 1 # undef DEBUG_printf # define DEBUG_printf(x) printf x #endif /* TABLE_DEBUG */ typedef struct { int debug; int num_cols, num_rows; float border, border_left, border_rgb[3], border_size, cellpadding, height; int col_spans[MAX_COLUMNS], row_spans[MAX_COLUMNS]; char col_fixed[MAX_COLUMNS], col_percent[MAX_COLUMNS]; float col_lefts[MAX_COLUMNS], col_rights[MAX_COLUMNS], col_widths[MAX_COLUMNS], col_swidths[MAX_COLUMNS], col_mins[MAX_COLUMNS], col_smins[MAX_COLUMNS], col_prefs[MAX_COLUMNS]; int cell_page[MAX_COLUMNS], // Start page for cell cell_endpage[MAX_COLUMNS]; // End page for cell float cell_y[MAX_COLUMNS], // Row for each cell cell_endy[MAX_COLUMNS], // Row for each cell cell_height[MAX_COLUMNS], // Height of each cell in a row span_heights[MAX_COLUMNS]; // Height of spans render_t *cell_bg[MAX_COLUMNS]; // Background rectangles render_t *cell_start[MAX_COLUMNS]; // Start of the content for a cell in the row render_t *cell_end[MAX_COLUMNS]; // End of the content for a cell in a row } hdtable_t; /* * 'render_table_row()' - Render a table row. */ static void render_table_row(hdtable_t &table, tree_t ***cells, int row, uchar *height_var, float left, // I - Left margin float right, // I - Printable width float bottom, // I - Bottom margin float top, // I - Printable top float *x, float *y, int *page) { int col, tcol, colspan, rowspan, tempspace; float width, temp_y; int temp_page; uchar *var; int do_valign; // True if we should do vertical alignment of cells int row_page; float row_y, row_starty, row_height, // Total height of the row temp_height; // Temporary holder uchar *bgcolor; float bgrgb[3]; do_valign = 1; row_height = 0.0f; row_page = *page; row_y = *y - table.cellpadding; row_starty = row_y; DEBUG_printf(("BEFORE row_y = %.1f, *y = %.1f, row_page = %d\n", row_y, *y, row_page)); for (col = 0, rowspan = 9999; col < table.num_cols; col += colspan) { if (table.row_spans[col] == 0) { if ((var = htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN")) != NULL) table.row_spans[col] = atoi((char *)var); if (table.row_spans[col] == 1) table.row_spans[col] = 0; if (table.row_spans[col] > (table.num_rows - row)) table.row_spans[col] = table.num_rows - row; table.span_heights[col] = 0.0f; } if (table.row_spans[col] < rowspan) rowspan = table.row_spans[col]; for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; } if (!rowspan) rowspan = 1; for (col = 0; col < table.num_cols;) { for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; colspan --; DEBUG_printf((" col = %d, colspan = %d, left = %.1f, right = %.1f, cell = %p\n", col, colspan, table.col_lefts[col], table.col_rights[col + colspan], (void *)cells[row][col])); *x = table.col_lefts[col]; temp_y = *y - table.cellpadding; temp_page = *page; tempspace = 0; if (row == 0 || cells[row][col] != cells[row - 1][col]) { check_pages(*page); if (cells[row][col] == NULL) bgcolor = NULL; else if ((bgcolor = htmlGetVariable(cells[row][col], (uchar *)"BGCOLOR")) != NULL) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); width = table.col_rights[col + colspan] - table.col_lefts[col] + 2 * table.cellpadding; table.border_left = table.col_lefts[col] - table.cellpadding; table.cell_bg[col] = new_render(*page, RENDER_BOX, table.border_left, row_y, width + table.border, 0.0, bgrgb); } else { table.cell_bg[col] = NULL; new_render(*page, RENDER_TEXT, -1.0f, -1.0f, 0.0, 0.0, (void *)""); } DEBUG_printf(("cell_bg[%d] = %p, pages[%d].end = %p\n", col, (void *)table.cell_bg[col], *page, (void *)pages[*page].end)); table.cell_start[col] = pages[*page].end; table.cell_page[col] = temp_page; table.cell_y[col] = temp_y; if (table.debug) { check_pages(*page); render_t *r; char table_text[255]; snprintf(table_text, sizeof(table_text), "cell=%p [%d,%d]", (void *)cells[row][col], row, col); r = new_render(temp_page, RENDER_TEXT, *x, temp_y, get_width((uchar *)table_text, TYPE_COURIER, STYLE_NORMAL, 1), _htmlSizes[1], table_text); r->data.text.typeface = TYPE_COURIER; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[1]; } if (cells[row][col] != NULL && cells[row][col]->child != NULL) { DEBUG_printf((" parsing cell %d,%d; width = %.1f\n", row, col, table.col_rights[col + colspan] - table.col_lefts[col])); bottom += table.cellpadding; top -= table.cellpadding; parse_doc(cells[row][col]->child, table.col_lefts + col, table.col_rights + col + colspan, &bottom, &top, x, &temp_y, &temp_page, NULL, &tempspace); bottom -= table.cellpadding; top += table.cellpadding; } table.cell_endpage[col] = temp_page; table.cell_endy[col] = temp_y; table.cell_height[col] = *y - table.cellpadding - temp_y; table.cell_end[col] = pages[*page].end; if (table.cell_start[col] == NULL) table.cell_start[col] = pages[*page].start; DEBUG_printf(("row = %d, col = %d, y = %.1f, cell_y = %.1f, cell_height = %.1f\n", row, col, *y - table.cellpadding, temp_y, table.cell_height[col])); DEBUG_printf(("cell_start[%d] = %p, cell_end[%d] = %p\n", col, (void *)table.cell_start[col], col, (void *)table.cell_end[col])); } if (table.row_spans[col] == 0 && table.cell_page[col] == table.cell_endpage[col] && table.cell_height[col] > row_height) row_height = table.cell_height[col]; if (table.row_spans[col] <= rowspan) { if (table.cell_page[col] != table.cell_endpage[col]) do_valign = 0; if (table.cell_endpage[col] > row_page) { row_page = table.cell_endpage[col]; row_y = table.cell_endy[col]; } else if (table.cell_endy[col] < row_y && table.cell_endpage[col] == row_page) row_y = table.cell_endy[col]; } DEBUG_printf(("**** col = %d, row = %d, row_y = %.1f, row_page = %d\n", col, row, row_y, row_page)); for (col ++; colspan > 0; colspan --, col ++) { table.cell_start[col] = NULL; table.cell_page[col] = table.cell_page[col - 1]; table.cell_y[col] = table.cell_y[col - 1]; table.cell_end[col] = NULL; table.cell_endpage[col] = table.cell_endpage[col - 1]; table.cell_endy[col] = table.cell_endy[col - 1]; table.cell_height[col] = table.cell_height[col - 1]; } } DEBUG_printf(("row = %d, row_y = %.1f, row_height = %.1f\n", row, row_y, row_height)); for (col = 0; col < table.num_cols; col += colspan) { for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; if (table.row_spans[col]) table.span_heights[col] += row_height; DEBUG_printf(("col = %d, cell_y = %.1f, cell_page = %d, cell_endpage = %d, row_spans = %d, span_heights = %.1f, cell_height = %.1f\n", col, table.cell_y[col], table.cell_page[col], table.cell_endpage[col], table.row_spans[col], table.span_heights[col], table.cell_height[col])); } for (col = 0; col < table.num_cols; col += colspan) { for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; if (table.row_spans[col] == rowspan && table.cell_page[col] == table.cell_endpage[col] && table.cell_height[col] > table.span_heights[col]) { temp_height = table.cell_height[col] - table.span_heights[col]; row_height += temp_height; DEBUG_printf(("Adjusting row-span height by %.1f, new row_height = %.1f\n", temp_height, row_height)); for (tcol = 0; tcol < table.num_cols; tcol ++) if (table.row_spans[tcol]) { table.span_heights[tcol] += temp_height; DEBUG_printf(("col = %d, span_heights = %.1f\n", tcol, table.span_heights[tcol])); } } } DEBUG_printf(("AFTER row = %d, row_page = %d, row_y = %.1f, row_height = %.1f, *y = %.1f, do_valign = %d\n", row, row_page, row_y, row_height, *y, do_valign)); /* * Do the vertical alignment */ if (do_valign) { height_var = NULL; if (cells[row][0] != NULL) { if ((height_var = htmlGetVariable(cells[row][0]->parent, (uchar *)"HEIGHT")) == NULL) for (col = 0; col < table.num_cols; col ++) if (htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN") == NULL) if ((height_var = htmlGetVariable(cells[row][col], (uchar *)"HEIGHT")) != NULL) break; } if (height_var != NULL) { // Hardcode the row height... if (height_var[strlen((char *)height_var) - 1] == '%') temp_height = (float)(atof((char *)height_var) * 0.01f * PagePrintLength); else temp_height = (float)(atof((char *)height_var) * PagePrintWidth / _htmlBrowserWidth); if (table.height > 0 && temp_height > table.height) temp_height = table.height; temp_height -= 2 * table.cellpadding; if (temp_height > row_height) { // Only enforce the height if it is > the actual row height. row_height = temp_height; row_y = *y - temp_height; } } for (col = 0; col < table.num_cols; col += colspan + 1) { render_t *p; float delta_y; for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; colspan --; if (table.cell_start[col] == NULL || table.row_spans[col] > rowspan || cells[row][col] == NULL || cells[row][col]->child == NULL) continue; if (table.row_spans[col] == 1) { int tcol; float span_height = 0.0f; for (tcol = 0; tcol < table.num_cols; tcol ++) { if (table.row_spans[col] == 1 && table.span_heights[col] > span_height) span_height = table.span_heights[col]; } switch (cells[row][col]->valignment) { case ALIGN_MIDDLE : // delta_y = (table.span_heights[col] - table.cell_height[col]) * 0.5f; delta_y = (span_height - table.cell_height[col]) * 0.5f; break; case ALIGN_BOTTOM : // delta_y = table.span_heights[col] - table.cell_height[col]; delta_y = span_height - table.cell_height[col]; break; default : delta_y = 0.0f; break; } } else if (table.row_spans[col]) { delta_y = 0.0f; } else { switch (cells[row][col]->valignment) { case ALIGN_MIDDLE : delta_y = (row_height - table.cell_height[col]) * 0.5f; break; case ALIGN_BOTTOM : delta_y = row_height - table.cell_height[col]; break; default : delta_y = 0.0f; break; } } DEBUG_printf(("row = %d, col = %d, valign = %d, rowspans = %d, cell_height = %.1f, span_heights = %.1f, delta_y = %.1f\n", row, col, cells[row][col]->valignment, table.row_spans[col], table.cell_height[col], table.span_heights[col], delta_y)); if (delta_y > 0.0f) { if (table.cell_start[col] == table.cell_end[col]) p = table.cell_start[col]; else p = table.cell_start[col]->next; for (; p != NULL; p = p->next) { DEBUG_printf(("aligning %p (%s), y was %.1f, now %.1f\n", (void *)p, p->data.text.buffer, p->y, p->y - delta_y)); p->y -= delta_y; if (p == table.cell_end[col]) break; } } #ifdef DEBUG else { if (table.cell_start[col] == table.cell_end[col]) p = table.cell_start[col]; else p = table.cell_start[col]->next; for (; p != NULL; p = p->next) { printf("NOT aligning %p (%s)\n", (void *)p, p->data.text.buffer); if (p == table.cell_end[col]) break; } } #endif /* DEBUG */ } } // Update all current columns with ROWSPAN <= rowspan to use the same // end page and row... for (col = 0, temp_page = -1, temp_y = 99999999; col < table.num_cols; col ++) if (table.row_spans[col] <= rowspan && cells[row][col] != NULL && cells[row][col]->child != NULL) { if (table.cell_endpage[col] > temp_page) { temp_page = table.cell_endpage[col]; temp_y = table.cell_endy[col]; } else if (table.cell_endpage[col] == temp_page && table.cell_endy[col] < temp_y) temp_y = table.cell_endy[col]; } for (col = 0; col < table.num_cols; col ++) if (table.row_spans[col] <= rowspan && cells[row][col] != NULL && cells[row][col]->child != NULL) { table.cell_endpage[col] = temp_page; table.cell_endy[col] = temp_y; } row_y -= table.cellpadding; table.border_left = table.col_lefts[0] - table.cellpadding; width = table.col_rights[table.num_cols - 1] - table.col_lefts[0] + 2 * table.cellpadding; for (bgcolor = NULL, col = 0; col < table.num_cols; col ++) if (table.row_spans[col] <= rowspan && cells[row][col] && !htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN") && (bgcolor = htmlGetVariable(cells[row][col]->parent, (uchar *)"BGCOLOR")) != NULL) break; if (bgcolor) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); if (row_page > *page) { // Draw background on multiple pages... // Bottom of first page... new_render(*page, RENDER_BOX, table.border_left, bottom, width, row_starty - bottom + table.cellpadding, bgrgb, pages[*page].start); // Intervening pages... for (temp_page = *page + 1; temp_page < row_page; temp_page ++) { new_render(temp_page, RENDER_BOX, table.border_left, bottom, width, top - bottom, bgrgb, pages[temp_page].start); } // Top of last page... check_pages(*page); new_render(row_page, RENDER_BOX, table.border_left, row_y, width, top - row_y, bgrgb, pages[row_page].start); } else { // Draw background in row... new_render(row_page, RENDER_BOX, table.border_left, row_y, width, row_height + 2 * table.cellpadding, bgrgb, pages[row_page].start); } } for (col = 0; col < table.num_cols; col += colspan + 1) { for (colspan = 0; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; else if (table.row_spans[col + colspan] > 0) { DEBUG_printf(("row = %d, col = %d, decrementing row_spans (%d) to %d...\n", row, col, table.row_spans[col + colspan], table.row_spans[col + colspan] - rowspan)); table.row_spans[col + colspan] -= rowspan; } colspan --; width = table.col_rights[col + colspan] - table.col_lefts[col] + 2 * table.cellpadding; if (cells[row][col] == NULL || cells[row][col]->child == NULL || table.row_spans[col] > 0) continue; DEBUG_printf(("DRAWING BORDER+BACKGROUND: col=%d, row=%d, cell_page=%d, cell_y=%.1f\n" " cell_endpage=%d, cell_endy=%.1f\n", col, row, table.cell_page[col], table.cell_y[col], table.cell_endpage[col], table.cell_endy[col])); if ((bgcolor = htmlGetVariable(cells[row][col], (uchar *)"BGCOLOR")) != NULL) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); } table.border_left = table.col_lefts[col] - table.cellpadding; if (table.cell_page[col] != table.cell_endpage[col]) { /* * Crossing a page boundary... */ if (table.border > 0) { /* * +---+---+---+ * | | | | */ // Top new_render(table.cell_page[col], RENDER_BOX, table.border_left, table.cell_y[col] + table.cellpadding, width + table.border, table.border, table.border_rgb); // Left new_render(table.cell_page[col], RENDER_BOX, table.border_left, bottom, table.border, table.cell_y[col] - bottom + table.cellpadding + table.border, table.border_rgb); // Right new_render(table.cell_page[col], RENDER_BOX, table.border_left + width, bottom, table.border, table.cell_y[col] - bottom + table.cellpadding + table.border, table.border_rgb); } if (bgcolor != NULL) { table.cell_bg[col]->y = bottom; table.cell_bg[col]->height = table.cell_y[col] - bottom + table.cellpadding + table.border; } for (temp_page = table.cell_page[col] + 1; temp_page < table.cell_endpage[col]; temp_page ++) { /* * | | | | * | | | | */ if (table.border > 0.0f) { // Left new_render(temp_page, RENDER_BOX, table.border_left, bottom, table.border, top - bottom, table.border_rgb); // Right new_render(temp_page, RENDER_BOX, table.border_left + width, bottom, table.border, top - bottom, table.border_rgb); } if (bgcolor != NULL) new_render(temp_page, RENDER_BOX, table.border_left, bottom, width + table.border, top - bottom, bgrgb, pages[temp_page].start); } if (table.border > 0.0f) { /* * | | | | * +---+---+---+ */ // Left new_render(table.cell_endpage[col], RENDER_BOX, table.border_left, row_y, table.border, top - row_y, table.border_rgb); // Right new_render(table.cell_endpage[col], RENDER_BOX, table.border_left + width, row_y, table.border, top - row_y, table.border_rgb); // Bottom new_render(table.cell_endpage[col], RENDER_BOX, table.border_left, row_y, width + table.border, table.border, table.border_rgb); } if (bgcolor != NULL) { check_pages(table.cell_endpage[col]); new_render(table.cell_endpage[col], RENDER_BOX, table.border_left, row_y, width + table.border, top - row_y, bgrgb, pages[table.cell_endpage[col]].start); } } else { /* * +---+---+---+ * | | | | * +---+---+---+ */ if (table.border > 0.0f) { // Top new_render(table.cell_page[col], RENDER_BOX, table.border_left, table.cell_y[col] + table.cellpadding, width + table.border, table.border, table.border_rgb); // Left new_render(table.cell_page[col], RENDER_BOX, table.border_left, row_y, table.border, table.cell_y[col] - row_y + table.cellpadding + table.border, table.border_rgb); // Right new_render(table.cell_page[col], RENDER_BOX, table.border_left + width, row_y, table.border, table.cell_y[col] - row_y + table.cellpadding + table.border, table.border_rgb); // Bottom new_render(table.cell_page[col], RENDER_BOX, table.border_left, row_y, width + table.border, table.border, table.border_rgb); } if (bgcolor != NULL) { table.cell_bg[col]->y = row_y; table.cell_bg[col]->height = table.cell_y[col] - row_y + table.cellpadding + table.border; } } } *page = row_page; *y = row_y; } /* * 'parse_table()' - Parse a table and produce rendering output. */ static void parse_table(tree_t *t, // I - Tree to parse float left, // I - Left margin float right, // I - Printable width float bottom, // I - Bottom margin float top, // I - Printable top float *x, // IO - X position float *y, // IO - Y position int *page, // IO - Page # int needspace) // I - Need whitespace? { int col, row, header_row = -1, tcol, colspan, rowspan, alloc_rows, regular_cols; hdtable_t table; float col_width, col_min, col_pref, col_height, cellspacing, width, pref_width, span_width, regular_width, actual_width, table_width, min_width, temp_width, header_height = 0.0, table_y, temp_bottom, temp_top; int temp_page, table_page; uchar *var, *height_var, // Row HEIGHT variable *header_height_var = NULL; tree_t *temprow, *tempcol, *tempnext, ***cells, *caption; // Caption for bottom, if any float temp_height; // Temporary holder uchar *bgcolor; float bgrgb[3]; const char *htmldoc_debug; // HTMLDOC_DEBUG env var DEBUG_puts("\n\nTABLE"); DEBUG_printf(("parse_table(t=%p, left=%.1f, right=%.1f, x=%.1f, y=%.1f, page=%d\n", (void *)t, left, right, *x, *y, *page)); if (t->child == NULL) return; /* Empty table... */ memset(&table, 0, sizeof(table)); /* * Check debug mode... */ if ((htmldoc_debug = getenv("HTMLDOC_DEBUG")) != NULL && (strstr(htmldoc_debug, "table") || strstr(htmldoc_debug, "all"))) table.debug = 1; else table.debug = 0; /* * Figure out the # of rows, columns, and the desired widths... */ cells = NULL; if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL) { if (var[strlen((char *)var) - 1] == '%') table_width = (float)(atof((char *)var) * (right - left) / 100.0f); else table_width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else table_width = right - left; if ((var = htmlGetVariable(t, (uchar *)"HEIGHT")) != NULL) { if (var[strlen((char *)var) - 1] == '%') table.height = (float)(atof((char *)var) * (top - bottom) / 100.0f); else table.height = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else table.height = -1.0f; DEBUG_printf(("table_width = %.1f\n", table_width)); if ((var = htmlGetVariable(t, (uchar *)"CELLPADDING")) != NULL) table.cellpadding = atoi((char *)var); else table.cellpadding = 1.0f; if ((var = htmlGetVariable(t, (uchar *)"CELLSPACING")) != NULL) cellspacing = atoi((char *)var); else cellspacing = 0.0f; if ((var = htmlGetVariable(t, (uchar *)"BORDER")) != NULL) { if ((table.border = (float)atof((char *)var)) == 0.0 && var[0] != '0') table.border = 1.0f; table.cellpadding += table.border; } else table.border = 0.0f; if (table.debug && table.border == 0.0f) table.border = 0.01f; table.border_rgb[0] = t->red / 255.0f; table.border_rgb[1] = t->green / 255.0f; table.border_rgb[2] = t->blue / 255.0f; if ((var = htmlGetVariable(t, (uchar *)"BORDERCOLOR")) != NULL) get_color(var, table.border_rgb, 0); if (table.border == 0.0f && table.cellpadding > 0.0f) { /* * Ah, the strange table formatting nightmare that is HTML. * Netscape and MSIE assign an invisible border width of 1 * pixel if no border is specified... */ table.cellpadding += 1.0f; } table.border_size = table.border - 1.0f; cellspacing *= PagePrintWidth / _htmlBrowserWidth; table.cellpadding *= PagePrintWidth / _htmlBrowserWidth; table.border *= PagePrintWidth / _htmlBrowserWidth; table.border_size *= PagePrintWidth / _htmlBrowserWidth; DEBUG_printf(("border = %.1f, cellpadding = %.1f\n", table.border, table.cellpadding)); temp_bottom = bottom - table.cellpadding; temp_top = top + table.cellpadding; for (temprow = t->child, table.num_cols = 0, table.num_rows = 0, alloc_rows = 0, caption = NULL; temprow != NULL; temprow = tempnext) { tempnext = temprow->next; if (temprow->markup == MARKUP_CAPTION) { if ((var = htmlGetVariable(temprow, (uchar *)"ALIGN")) == NULL || strcasecmp((char *)var, "bottom")) { /* * Show caption at top... */ parse_paragraph(temprow, left, right, bottom, top, x, y, page, needspace); needspace = 1; } else { /* * Flag caption for bottom of table... */ caption = temprow; } } else if (temprow->markup == MARKUP_TR || ((temprow->markup == MARKUP_TBODY || temprow->markup == MARKUP_THEAD || temprow->markup == MARKUP_TFOOT) && temprow->child != NULL)) { if (temprow->markup == MARKUP_THEAD) header_row = table.num_rows; // Descend into table body as needed... if (temprow->markup == MARKUP_TBODY || temprow->markup == MARKUP_THEAD || temprow->markup == MARKUP_TFOOT) temprow = temprow->child; // Figure out the next row... if ((tempnext = temprow->next) == NULL) if (temprow->parent->markup == MARKUP_TBODY || temprow->parent->markup == MARKUP_THEAD || temprow->parent->markup == MARKUP_TFOOT) tempnext = temprow->parent->next; // Allocate memory for the table as needed... if (table.num_rows >= alloc_rows) { alloc_rows += ALLOC_ROWS; if (alloc_rows == ALLOC_ROWS) cells = (tree_t ***)malloc(sizeof(tree_t **) * (size_t)alloc_rows); else cells = (tree_t ***)realloc(cells, sizeof(tree_t **) * (size_t)alloc_rows); if (cells == (tree_t ***)0) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for table!"); return; } } if ((cells[table.num_rows] = (tree_t **)calloc(sizeof(tree_t *), MAX_COLUMNS)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for table!"); free(cells); return; } #ifdef DEBUG printf("BEFORE row %d: num_cols = %d\n", table.num_rows, table.num_cols); if (table.num_rows) for (col = 0; col < table.num_cols; col ++) printf(" col %d: row_spans[] = %d\n", col, table.row_spans[col]); #endif // DEBUG // Figure out the starting column... if (table.num_rows) { for (col = 0, rowspan = 9999; col < table.num_cols; col ++) if (table.row_spans[col] < rowspan) rowspan = table.row_spans[col]; for (col = 0; col < table.num_cols; col ++) table.row_spans[col] -= rowspan; for (col = 0; table.row_spans[col] && col < table.num_cols; col ++) cells[table.num_rows][col] = cells[table.num_rows - 1][col]; } else col = 0; for (tempcol = temprow->child; tempcol != NULL && col < MAX_COLUMNS; tempcol = tempcol->next) { if (tempcol->markup == MARKUP_TH && table.num_rows == 0) header_row = table.num_rows; if (tempcol->markup == MARKUP_TD || tempcol->markup == MARKUP_TH) { // Handle colspan and rowspan stuff... if ((var = htmlGetVariable(tempcol, (uchar *)"COLSPAN")) != NULL) colspan = atoi((char *)var); else colspan = 1; if ((var = htmlGetVariable(tempcol, (uchar *)"ROWSPAN")) != NULL) { table.row_spans[col] = atoi((char *)var); if (table.row_spans[col] == 1) table.row_spans[col] = 0; for (tcol = 1; tcol < colspan; tcol ++) table.row_spans[col + tcol] = table.row_spans[col]; } // Compute the cell size... col_width = get_cell_size(tempcol, 0.0f, table_width, &col_min, &col_pref, &col_height); if ((var = htmlGetVariable(tempcol, (uchar *)"WIDTH")) != NULL) { if (var[strlen((char *)var) - 1] == '%') { col_width -= 2.0 * table.cellpadding - cellspacing; if (colspan <= 1) table.col_percent[col] = 1; } else { col_width -= 2.0 * table.cellpadding; } } else col_width = 0.0f; tempcol->height = col_height; DEBUG_printf(("%d,%d: colsp=%d, rowsp=%d, width=%.1f, minw=%.1f, prefw=%.1f, minh=%.1f\n", col, table.num_rows, colspan, table.row_spans[col], col_width, col_min, col_pref, col_height)); // Add widths to columns... if (colspan > 1) { if (colspan > table.col_spans[col]) table.col_spans[col] = colspan; if (col_width > table.col_swidths[col]) table.col_swidths[col] = col_width; if (col_min > table.col_smins[col]) table.col_smins[col] = col_min; temp_width = col_width / colspan; for (int i = 0; i < colspan; i ++) { if (temp_width > table.col_widths[col + i]) table.col_widths[col + i] = temp_width; } } else { if (col_width > 0.0f) table.col_fixed[col] = 1; if (col_width > table.col_widths[col]) table.col_widths[col] = col_width; if (col_pref > table.col_prefs[col]) table.col_prefs[col] = col_pref; if (col_min > table.col_mins[col]) table.col_mins[col] = col_min; } while (colspan > 0 && col < MAX_COLUMNS) { cells[table.num_rows][col] = tempcol; col ++; colspan --; } while (table.row_spans[col] && col < table.num_cols) { cells[table.num_rows][col] = cells[table.num_rows - 1][col]; col ++; } } } DEBUG_printf(("header_row=%d\n", header_row)); if (col > table.num_cols) table.num_cols = col; #ifdef DEBUG printf("AFTER row %d: num_cols = %d\n", table.num_rows, table.num_cols); for (col = 0; col < table.num_cols; col ++) printf(" col %d: row_spans[] = %d\n", col, table.row_spans[col]); #endif // DEBUG table.num_rows ++; for (col = 0; col < table.num_cols; col ++) if (table.row_spans[col]) table.row_spans[col] --; } } /* * OK, some people apparently create HTML tables with no columns or * rows... If this happened, return immediately... */ if (table.num_cols == 0) return; /* * Now figure out the width of the table... */ if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL) { if (var[strlen((char *)var) - 1] == '%') width = (float)(atof((char *)var) * (right - left) / 100.0f); else width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else { for (col = 0, width = 0.0; col < table.num_cols; col ++) width += table.col_prefs[col]; width += (2 * table.cellpadding + cellspacing) * table.num_cols - cellspacing; if (width > (right - left)) width = right - left; } /* * Compute the width of each column based on the printable width. */ DEBUG_printf(("\nTABLE: %dx%d\n\n", table.num_cols, table.num_rows)); actual_width = (2 * table.cellpadding + cellspacing) * table.num_cols - cellspacing; regular_width = (width - actual_width) / table.num_cols; DEBUG_printf((" width = %.1f, actual_width = %.1f, regular_width = %.1f\n\n", width, actual_width, regular_width)); DEBUG_puts(" Col Width Min Pref Fixed? Percent?"); DEBUG_puts(" --- ------ ------ ------ ------ --------"); #ifdef DEBUG for (col = 0; col < table.num_cols; col ++) printf(" %-3d %-6.1f %-6.1f %-6.1f %-6s %s\n", col, table.col_widths[col], table.col_mins[col], table.col_prefs[col], table.col_fixed[col] ? "YES" : "NO", table.col_percent[col] ? "YES" : "NO"); puts(""); #endif /* DEBUG */ /* * The first pass just handles columns with a specified width... */ DEBUG_puts("PASS 1: fixed width handling\n"); for (col = 0, regular_cols = 0; col < table.num_cols; col ++) if (table.col_widths[col] > 0.0f) { if (table.col_mins[col] > table.col_widths[col]) { DEBUG_printf((" updating column %d to width=%.1f\n", col, table.col_mins[col])); table.col_widths[col] = table.col_mins[col]; } actual_width += table.col_widths[col]; } else { regular_cols ++; actual_width += table.col_mins[col]; } DEBUG_printf((" actual_width = %.1f, regular_cols = %d\n\n", actual_width,regular_cols)); /* * Pass two uses the "preferred" width whenever possible, and the * minimum otherwise... */ DEBUG_puts("PASS 2: preferred width handling\n"); for (col = 0, pref_width = 0.0f; col < table.num_cols; col ++) if (table.col_widths[col] == 0.0f) pref_width += table.col_prefs[col] - table.col_mins[col]; DEBUG_printf((" pref_width = %.1f\n", pref_width)); if (pref_width > 0.0f) { if ((regular_width = (width - actual_width) / pref_width) < 0.0f) regular_width = 0.0f; else if (regular_width > 1.0f) regular_width = 1.0f; DEBUG_printf((" regular_width = %.1f\n", regular_width)); for (col = 0; col < table.num_cols; col ++) if (table.col_widths[col] == 0.0f) { pref_width = (table.col_prefs[col] - table.col_mins[col]) * regular_width; if ((actual_width + pref_width) > width) { if (col == (table.num_cols - 1) && (width - actual_width) >= table.col_mins[col]) table.col_widths[col] = width - actual_width; else table.col_widths[col] = table.col_mins[col]; } else table.col_widths[col] = pref_width + table.col_mins[col]; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); actual_width += table.col_widths[col] - table.col_mins[col]; } } else { /* * Assign min widths for all cells... */ for (col = 0; col < table.num_cols; col ++) if (table.col_widths[col] == 0.0f) table.col_widths[col] = table.col_mins[col]; } DEBUG_printf((" actual_width = %.1f\n\n", actual_width)); /* * Pass three enforces any hard or minimum widths for COLSPAN'd * columns... */ DEBUG_puts("PASS 3: colspan handling\n\n"); for (col = 0; col < table.num_cols; col ++) { DEBUG_printf((" col %d, colspan %d\n", col, table.col_spans[col])); if (table.col_spans[col] > 1) { for (colspan = 0, span_width = 0.0f; colspan < table.col_spans[col]; colspan ++) span_width += table.col_widths[col + colspan]; pref_width = 0.0f; if (span_width < table.col_swidths[col]) pref_width = table.col_swidths[col]; if (span_width < table.col_smins[col] && pref_width < table.col_smins[col]) pref_width = table.col_smins[col]; for (colspan = 0; colspan < table.col_spans[col]; colspan ++) if (table.col_fixed[col + colspan]) { span_width -= table.col_widths[col + colspan]; pref_width -= table.col_widths[col + colspan]; } DEBUG_printf((" col_swidths=%.1f, col_smins=%.1f, span_width=%.1f, pref_width=%.1f\n", table.col_swidths[col], table.col_smins[col], span_width, pref_width)); if (pref_width > 0.0f && pref_width > span_width) { if (span_width >= 1.0f) { // Expand cells proportionately... regular_width = pref_width / span_width; for (colspan = 0; colspan < table.col_spans[col]; colspan ++) if (!table.col_fixed[col + colspan]) { actual_width -= table.col_widths[col + colspan]; table.col_widths[col + colspan] *= regular_width; actual_width += table.col_widths[col + colspan]; DEBUG_printf((" col_widths[%d] = %.1f\n", col + colspan, table.col_widths[col + colspan])); } } else { // Divide the space up equally between columns, since the // colspan area is always by itself... (this hack brought // to you by Yahoo! and their single cell tables with // colspan=2 :) regular_width = pref_width / table.col_spans[col]; for (colspan = 0; colspan < table.col_spans[col]; colspan ++) { actual_width += regular_width; table.col_widths[col + colspan] += regular_width; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); } } } } } DEBUG_printf((" actual_width = %.1f\n\n", actual_width)); /* * Pass four divides up the remaining space amongst the columns... */ DEBUG_puts("PASS 4: divide remaining space, if any...\n"); if (width > actual_width) { for (col = 0, colspan = 0; col < table.num_cols; col ++) if (!table.col_fixed[col] || table.col_percent[col]) colspan ++; if (colspan > 0) { regular_width = (width - actual_width) / table.num_cols; for (col = 0; col < table.num_cols; col ++) if (!table.col_fixed[col]) { table.col_widths[col] += regular_width; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); } } } else width = actual_width; DEBUG_puts(""); /* * The final pass is only run if the width > table_width... */ DEBUG_puts("PASS 5: Squeeze table as needed..."); if (width > table_width) { /* * Squeeze the table to fit the requested width or the printable width * as determined at the beginning... */ for (col = 0, min_width = -cellspacing; col < table.num_cols; col ++) min_width += table.col_mins[col] + 2 * table.cellpadding + cellspacing; DEBUG_printf((" table_width = %.1f, width = %.1f, min_width = %.1f\n", table_width, width, min_width)); temp_width = table_width - min_width; if (temp_width < 0.0f) temp_width = 0.0f; width -= min_width; if (width < 1.0f) width = 1.0f; for (col = 0; col < table.num_cols; col ++) { table.col_widths[col] = table.col_mins[col] + temp_width * (table.col_widths[col] - table.col_mins[col]) / width; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); } for (col = 0, width = -cellspacing; col < table.num_cols; col ++) width += table.col_widths[col] + 2 * table.cellpadding + cellspacing; DEBUG_printf((" new width = %.1f, max width = %.1f\n", width, right - left)); } if ((width - right + left) > 0.001f && OverflowErrors) progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Table on page %d too wide - truncation or overlapping may occur!", *page + 1); DEBUG_puts(""); DEBUG_printf(("Final table width = %.1f, alignment = %d\n", width, t->halignment)); switch (t->halignment) { case ALIGN_LEFT : *x = left + table.cellpadding; break; case ALIGN_CENTER : *x = left + 0.5f * (right - left - width) + table.cellpadding; break; case ALIGN_RIGHT : *x = right - width + table.cellpadding; break; } for (col = 0; col < table.num_cols; col ++) { table.col_lefts[col] = *x; table.col_rights[col] = *x + table.col_widths[col]; *x = table.col_rights[col] + 2 * table.cellpadding + cellspacing; DEBUG_printf(("left[%d] = %.1f, right[%d] = %.1f\n", col, table.col_lefts[col], col, table.col_rights[col])); } /* * Now render the whole table... */ if (*y < top && needspace) *y -= _htmlSpacings[SIZE_P]; if (table.debug) { check_pages(*page); render_t *r; char table_text[255]; snprintf(table_text, sizeof(table_text), "t=%p", (void *)t); r = new_render(*page, RENDER_TEXT, left, *y, get_width((uchar *)table_text, TYPE_COURIER, STYLE_NORMAL, 3), _htmlSizes[3], table_text); r->data.text.typeface = TYPE_COURIER; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[3]; } table_page = *page; table_y = *y; for (row = 0; row < table.num_rows; row ++) { height_var = NULL; if (cells[row][0] != NULL) { /* * Do page comments... */ if (cells[row][0]->parent->prev != NULL && cells[row][0]->parent->prev->markup == MARKUP_COMMENT) parse_comment(cells[row][0]->parent->prev, &left, &right, &temp_bottom, &temp_top, x, y, page, NULL, 0); /* * Get height... */ if ((height_var = htmlGetVariable(cells[row][0]->parent, (uchar *)"HEIGHT")) == NULL) for (col = 0; col < table.num_cols; col ++) if (htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN") == NULL) if ((height_var = htmlGetVariable(cells[row][col], (uchar *)"HEIGHT")) != NULL) break; } if (height_var != NULL && row == header_row) header_height_var = height_var; if (cells[row][0] != NULL && height_var != NULL) { // Row height specified; make sure it'll fit... if (height_var[strlen((char *)height_var) - 1] == '%') temp_height = (float)(atof((char *)height_var) * 0.01f * (PagePrintLength - 2 * table.cellpadding)); else temp_height = (float)(atof((char *)height_var) * PagePrintWidth / _htmlBrowserWidth); if (table.height > 0.0f && temp_height > table.height) temp_height = table.height; temp_height -= 2 * table.cellpadding; } else { // Use min height computed from get_cell_size()... for (col = 0, temp_height = (float)_htmlSpacings[SIZE_P]; col < table.num_cols; col ++) if (cells[row][col] != NULL && cells[row][col]->height > temp_height && !htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN")) temp_height = cells[row][col]->height; if (table.height > 0.0) { // Table height specified; make sure it'll fit... if (temp_height > table.height) temp_height = table.height; temp_height -= 2 * table.cellpadding; } else if (temp_height > (PageLength / 8.0) && height_var == NULL) temp_height = PageLength / 8.0; } DEBUG_printf(("BEFORE row = %d, temp_height = %.1f, *y = %.1f, *page = %d\n", row, temp_height, *y, *page)); if (*y < (bottom + 2 * table.cellpadding + temp_height) && temp_height <= (top - bottom - 2 * table.cellpadding)) { DEBUG_puts("NEW PAGE"); *y = top - header_height; (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); if (row > 0 && header_row >= 0) { // Render header row... render_table_row(table, cells, header_row, header_height_var, left, right, bottom, top, x, y, page); } } float start_y = *y; temp_page = *page; render_table_row(table, cells, row, height_var, left, right, bottom, top, x, y, page); if (header_row >= 0 && row == header_row) { header_height = *y - start_y; top += header_height; } else if (temp_page != *page && header_row >= 0) { // Render header row on new page(s)... do { float temp_y = top - header_height; temp_page ++; render_table_row(table, cells, header_row, header_height_var, left, right, bottom, top, x, &temp_y, &temp_page); } while (temp_page < *page); } if (row < (table.num_rows - 1)) (*y) -= cellspacing; DEBUG_printf(("END row = %d, *y = %.1f, *page = %d\n", row, *y, *page)); } top -= header_height; /* * Handle table background color... */ if ((bgcolor = htmlGetVariable(t, (uchar *)"BGCOLOR")) != NULL) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); table.border_left = table.col_lefts[0] - table.cellpadding; width = table.col_rights[table.num_cols - 1] - table.col_lefts[0] + 2 * table.cellpadding; if (table_page != *page) { // Draw background on multiple pages... // Bottom of first page... new_render(table_page, RENDER_BOX, table.border_left, bottom, width, table_y - bottom, bgrgb, pages[table_page].start); // Intervening pages... for (temp_page = table_page + 1; temp_page < *page; temp_page ++) { new_render(temp_page, RENDER_BOX, table.border_left, bottom, width, top - bottom, bgrgb, pages[temp_page].start); } // Top of last page... check_pages(*page); new_render(*page, RENDER_BOX, table.border_left, *y, width, top - *y, bgrgb, pages[*page].start); } else { // Draw background in row... new_render(table_page, RENDER_BOX, table.border_left, *y, width, table_y - *y, bgrgb, pages[table_page].start); } } *x = left; if (caption) { /* * Show caption at bottom... */ parse_paragraph(caption, left, right, bottom, top, x, y, page, needspace); needspace = 1; } /* * Free memory for the table... */ if (table.num_rows > 0) { for (row = 0; row < table.num_rows; row ++) free(cells[row]); free(cells); } } #ifdef TABLE_DEBUG # undef DEBUG # undef DEBUG_puts # define DEBUG_puts(x) # undef DEBUG_printf # define DEBUG_printf(x) #endif /* TABLE_DEBUG */ /* * 'parse_list()' - Parse a list entry and produce rendering output. */ static void parse_list(tree_t *t, /* I - Tree to parse */ float *left, /* I - Left margin */ float *right, /* I - Printable width */ float *bottom, /* I - Bottom margin */ float *top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace) /* I - Need whitespace? */ { uchar number[255]; /* List number (for numbered types) */ uchar *value; /* VALUE= variable */ int typeface; /* Typeface of list number */ float width; /* Width of list number */ render_t *r; /* Render primitive */ int oldpage; /* Old page value */ float oldy; /* Old Y value */ float tempx; /* Temporary X value */ DEBUG_printf(("parse_list(t=%p, left=%.1f, right=%.1f, x=%.1f, y=%.1f, page=%d\n", (void *)t, *left, *right, *x, *y, *page)); if (needspace && *y < *top) { *y -= _htmlSpacings[t->size]; needspace = 0; } check_pages(*page); oldy = *y; oldpage = *page; r = pages[*page].end; tempx = *x; if (t->indent == 0) { // Adjust left margin when no UL/OL/DL is being used... *left += _htmlSizes[t->size]; tempx += _htmlSizes[t->size]; } parse_doc(t->child, left, right, bottom, top, &tempx, y, page, NULL, &needspace); // Handle when paragraph wrapped to new page... if (*page != oldpage) { // First see if anything was added to the old page... if ((r != NULL && r->next == NULL) || pages[oldpage].end == NULL) { // No, put the symbol on the next page... oldpage = *page; oldy = *top; } } if ((value = htmlGetVariable(t, (uchar *)"VALUE")) != NULL) { if (isdigit(value[0])) list_values[t->indent] = atoi((char *)value); else if (isupper(value[0])) list_values[t->indent] = value[0] - 'A' + 1; else list_values[t->indent] = value[0] - 'a' + 1; } switch (list_types[t->indent]) { case 'a' : case 'A' : case '1' : case 'i' : case 'I' : strlcpy((char *)number, format_number(list_values[t->indent], (char)list_types[t->indent]), sizeof(number)); strlcat((char *)number, ". ", sizeof(number)); typeface = t->typeface; break; default : snprintf((char *)number, sizeof(number), "%c ", list_types[t->indent]); typeface = TYPE_SYMBOL; break; } width = get_width(number, typeface, t->style, t->size); r = new_render(oldpage, RENDER_TEXT, *left - width, oldy - _htmlSizes[t->size], width, _htmlSpacings[t->size], number); r->data.text.typeface = typeface; r->data.text.style = t->style; r->data.text.size = (float)_htmlSizes[t->size]; r->data.text.rgb[0] = t->red / 255.0f; r->data.text.rgb[1] = t->green / 255.0f; r->data.text.rgb[2] = t->blue / 255.0f; list_values[t->indent] ++; if (t->indent == 0) { // Adjust left margin when no UL/OL/DL is being used... *left -= _htmlSizes[t->size]; } } /* * 'init_list()' - Initialize the list type and value as necessary. */ static void init_list(tree_t *t) /* I - List entry */ { uchar *type, /* TYPE= variable */ *value; /* VALUE= variable */ static uchar *symbols = (uchar *)"\327\267\250\340"; if ((type = htmlGetVariable(t, (uchar *)"TYPE")) != NULL) { if (strlen((char *)type) == 1) list_types[t->indent] = type[0]; else if (strcasecmp((char *)type, "disc") == 0 || strcasecmp((char *)type, "circle") == 0) list_types[t->indent] = symbols[1]; else list_types[t->indent] = symbols[2]; } else if (t->markup == MARKUP_UL) list_types[t->indent] = symbols[t->indent & 3]; else if (t->markup == MARKUP_OL) list_types[t->indent] = '1'; if ((value = htmlGetVariable(t, (uchar *)"VALUE")) == NULL) value = htmlGetVariable(t, (uchar *)"START"); if (value != NULL) { if (isdigit(value[0])) list_values[t->indent] = atoi((char *)value); else if (isupper(value[0])) list_values[t->indent] = value[0] - 'A' + 1; else list_values[t->indent] = value[0] - 'a' + 1; } else if (t->markup == MARKUP_OL) list_values[t->indent] = 1; } /* * 'parse_comment()' - Parse a comment for HTMLDOC comments. */ #ifdef COMMENT_DEBUG # undef DEBUG_puts # define DEBUG_puts(x) puts(x) # define DEBUG # undef DEBUG_printf # define DEBUG_printf(x) printf x #endif /* COMMENT_DEBUG */ static void parse_comment(tree_t *t, /* I - Tree to parse */ float *left, /* I - Left margin */ float *right, /* I - Printable width */ float *bottom, /* I - Bottom margin */ float *top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ tree_t *para, /* I - Current paragraph */ int needspace) /* I - Need whitespace? */ { int i; /* Looping var */ const char *comment; /* Comment text */ char *ptr, /* Pointer into value string */ buffer[1024]; /* Buffer for strings */ int pos, /* Position (left, center, right) */ tof; /* Top of form */ DEBUG_printf(("parse_comment(t=%p, left=%.1f, right=%.1f, bottom=%.1f, " "top=%.1f, x=%.1f, y=%.1f, page=%d, para=%p, needspace=%d\n", (void *)t, *left, *right, *bottom, *top, *x, *y, *page, (void *)para, needspace)); if (t->data == NULL) return; if (para != NULL && para->child != NULL && para->child->next == NULL && para->child->child == NULL && para->child->markup == MARKUP_NONE && strcmp((const char *)para->child->data, " ") == 0) { // Remove paragraph consisting solely of whitespace... htmlDeleteTree(para->child); para->child = para->last_child = NULL; } // Mark if we are at the top of form... tof = (*y >= *top); DEBUG_printf(("BEFORE tof=%d, *y=%.1f, *top=%.1f, *page=%d, t->data=\"%s\"\n", tof, *y, *top, *page, t->data)); DEBUG_printf((" PagePrintWidth = %d\n", PagePrintWidth)); DEBUG_printf(("PagePrintLength = %d\n", PagePrintLength)); DEBUG_printf((" PageWidth = %d\n", PageWidth)); DEBUG_printf((" PageLength = %d\n", PageLength)); DEBUG_printf((" PageLeft = %d\n", PageLeft)); DEBUG_printf((" PageBottom = %d\n", PageBottom)); DEBUG_printf((" PageRight = %d\n", PageRight)); DEBUG_printf((" PageTop = %d\n", PageTop)); DEBUG_printf((" Landscape = %d\n", Landscape)); for (comment = (const char *)t->data; *comment;) { // Skip leading whitespace... while (isspace(*comment)) comment ++; if (!*comment) break; if (strncasecmp(comment, "PAGE BREAK", 10) == 0 && (!comment[10] || isspace(comment[10]))) { /* * <!-- PAGE BREAK --> generates a page break... */ comment += 10; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else if (strncasecmp(comment, "NEW PAGE", 8) == 0 && (!comment[8] || isspace(comment[8]))) { /* * <!-- NEW PAGE --> generates a page break... */ comment += 8; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else if (strncasecmp(comment, "NEW SHEET", 9) == 0 && (!comment[9] || isspace(comment[9]))) { /* * <!-- NEW SHEET --> generate a page break to a new sheet... */ comment += 9; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if (NumberUp == 1) { // NEW SHEET breaks to the next sheet of paper... (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; } else { // NEW SHEET breaks to the next side/sheet... (*page) ++; for (i = *page - 1; i >= 0; i --) if (pages[i].nup != NumberUp) break; i ++; for (i = *page - i; (i % NumberUp) != 0; i ++, (*page) ++); } if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else if (strncasecmp(comment, "HALF PAGE", 9) == 0 && (!comment[9] || isspace(comment[9]))) { /* * <!-- HALF PAGE --> Go to the next half page. If in the * top half of a page, go to the bottom half. If in the * bottom half, go to the next page. */ float halfway; comment += 9; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } halfway = 0.5f * (*top + *bottom); if (*y <= halfway) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else { *x = *left; *y = halfway; tof = 0; } } else if (strncasecmp(comment, "NEED ", 5) == 0) { /* * <!-- NEED amount --> generate a page break if there isn't * enough remaining space... */ comment += 5; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if ((*y - get_measurement(comment, (float)_htmlSpacings[SIZE_P])) < *bottom) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; // Skip amount... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA COLOR ", 12) == 0) { // Media color for page... comment += 12; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); // Get color... if (*comment == '\"') { for (ptr = pages[*page].media_color, comment ++; *comment && *comment != '\"'; comment ++) if (ptr < (pages[*page].media_color + sizeof(pages[*page].media_color) - 1)) *ptr++ = *comment; if (*comment == '\"') comment ++; } else { for (ptr = pages[*page].media_color; *comment && !isspace(*comment); comment ++) if (ptr < (pages[*page].media_color + sizeof(pages[*page].media_color) - 1)) *ptr++ = *comment; } *ptr = '\0'; } else if (strncasecmp(comment, "MEDIA POSITION ", 15) == 0) { // Media position for page... comment += 15; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); pages[*page].media_position = atoi(comment); // Skip position... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA TYPE ", 11) == 0) { // Media type for page... comment += 11; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); // Get type... if (*comment == '\"') { for (ptr = pages[*page].media_type, comment ++; *comment && *comment != '\"'; comment ++) if (ptr < (pages[*page].media_type + sizeof(pages[*page].media_type) - 1)) *ptr++ = *comment; if (*comment == '\"') comment ++; } else { for (ptr = pages[*page].media_type; *comment && !isspace(*comment); comment ++) if (ptr < (pages[*page].media_type + sizeof(pages[*page].media_type) - 1)) *ptr++ = *comment; } *ptr = '\0'; } else if (strncasecmp(comment, "MEDIA SIZE ", 11) == 0) { // Media size... comment += 11; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; tof = 1; } if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); check_pages(*page); *right = PagePrintWidth - *right; *top = PagePrintLength - *top; set_page_size(comment); if (Landscape) { PagePrintWidth = PageLength - PageLeft - PageRight; PagePrintLength = PageWidth - PageTop - PageBottom; } else { PagePrintWidth = PageWidth - PageLeft - PageRight; PagePrintLength = PageLength - PageTop - PageBottom; } *right = PagePrintWidth - *right; *top = PagePrintLength - *top; *x = *left; *y = *top; pages[*page].width = PageWidth; pages[*page].length = PageLength; // Skip width... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA LEFT ", 11) == 0) { // Left margin... comment += 11; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); *right = PagePrintWidth - *right; PageLeft = pages[*page].left = get_measurement(comment); if (Landscape) PagePrintWidth = PageLength - PageRight - PageLeft; else PagePrintWidth = PageWidth - PageRight - PageLeft; *right = PagePrintWidth - *right; // Skip left... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA RIGHT ", 12) == 0) { // Right margin... comment += 12; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); *right = PagePrintWidth - *right; PageRight = pages[*page].right = get_measurement(comment); if (Landscape) PagePrintWidth = PageLength - PageRight - PageLeft; else PagePrintWidth = PageWidth - PageRight - PageLeft; *right = PagePrintWidth - *right; // Skip right... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA BOTTOM ", 13) == 0) { // Bottom margin... comment += 13; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); tof = 1; } *x = *left; check_pages(*page); *top = PagePrintLength - *top; PageBottom = pages[*page].bottom = get_measurement(comment); if (Landscape) PagePrintLength = PageWidth - PageTop - PageBottom; else PagePrintLength = PageLength - PageTop - PageBottom; *top = PagePrintLength - *top; *y = *top; // Skip bottom... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA TOP ", 10) == 0) { // Top margin... comment += 10; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); tof = 1; } *x = *left; check_pages(*page); *top = PagePrintLength - *top; PageTop = pages[*page].top = get_measurement(comment); if (Landscape) PagePrintLength = PageWidth - PageTop - PageBottom; else PagePrintLength = PageLength - PageTop - PageBottom; *top = PagePrintLength - *top; *y = *top; // Skip top... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA LANDSCAPE ", 16) == 0) { // Landscape on/off... comment += 16; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; tof = 1; } if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; check_pages(*page); if (strncasecmp(comment, "OFF", 3) == 0 || tolower(comment[0]) == 'n') { if (Landscape) { *right = PageLength - PageRight - *right; PagePrintWidth = PageWidth - PageRight - PageLeft; *right = PageWidth - PageRight - *right; *top = PageWidth - PageTop - *top; PagePrintLength = PageLength - PageTop - PageBottom; *top = PageLength - PageTop - *top; } Landscape = pages[*page].landscape = 0; } else if (strncasecmp(comment, "ON", 2) == 0 || tolower(comment[0]) == 'y') { if (!Landscape) { *top = PageLength - PageTop - *top; PagePrintLength = PageWidth - PageTop - PageBottom; *top = PageWidth - PageTop - *top; *right = PageWidth - PageRight - *right; PagePrintWidth = PageLength - PageRight - PageLeft; *right = PageLength - PageRight - *right; } Landscape = pages[*page].landscape = 1; } *y = *top; // Skip landscape... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA DUPLEX ", 13) == 0) { // Duplex printing on/off... comment += 13; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; *y = *top; tof = 1; } if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; check_pages(*page); if (strncasecmp(comment, "OFF", 3) == 0 || tolower(comment[0]) == 'n') PageDuplex = pages[*page].duplex = 0; else if (strncasecmp(comment, "ON", 2) == 0 || tolower(comment[0]) == 'y') { if ((*page) & 1) { (*page) ++; check_pages(*page); if (Verbosity) progress_show("Formatting page %d", *page); } PageDuplex = pages[*page].duplex = 1; } // Skip duplex... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "HEADER ", 7) == 0) { // Header string... comment += 7; while (isspace(*comment)) comment ++; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (strncasecmp(comment, "LEFT", 4) == 0 && isspace(comment[4])) { pos = 0; comment += 4; } else if (strncasecmp(comment, "CENTER", 6) == 0 && isspace(comment[6])) { pos = 1; comment += 6; } else if (strncasecmp(comment, "RIGHT", 5) == 0 && isspace(comment[5])) { pos = 2; comment += 5; } else { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER position: \"%s\"", comment); return; } while (isspace(*comment)) comment ++; if (*comment != '\"') { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER string: \"%s\"", comment); return; } for (ptr = buffer, comment ++; *comment && *comment != '\"'; comment ++) { if (*comment == '\\') comment ++; if (ptr < (buffer + sizeof(buffer) - 1)) *ptr++ = *comment; } if (*comment == '\"') comment ++; *ptr = '\0'; if (ptr > buffer) Header[pos] = strdup(buffer); else Header[pos] = NULL; if (tof) { DEBUG_printf(("Setting header %d for page %d to \"%s\"...\n", pos, *page, Header[pos] ? Header[pos] : "(null)")); check_pages(*page); pages[*page].header[pos] = (uchar *)Header[pos]; } // Adjust top margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Header[pos] && (strstr(Header[pos], "$IMAGE") != NULL || strstr(Header[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header1[pos] && (strstr(Header1[pos], "$IMAGE") != NULL || strstr(Header1[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header[pos] || Header1[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } *top = PagePrintLength - adjust; if (tof) *y = *top; } else if (strncasecmp(comment, "HEADER1 ", 8) == 0) { // First page header string... comment += 8; while (isspace(*comment)) comment ++; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (strncasecmp(comment, "LEFT", 4) == 0 && isspace(comment[4])) { pos = 0; comment += 4; } else if (strncasecmp(comment, "CENTER", 6) == 0 && isspace(comment[6])) { pos = 1; comment += 6; } else if (strncasecmp(comment, "RIGHT", 5) == 0 && isspace(comment[5])) { pos = 2; comment += 5; } else { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER1 position: \"%s\"", comment); return; } while (isspace(*comment)) comment ++; if (*comment != '\"') { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER1 string: \"%s\"", comment); return; } for (ptr = buffer, comment ++; *comment && *comment != '\"'; comment ++) { if (*comment == '\\') comment ++; if (ptr < (buffer + sizeof(buffer) - 1)) *ptr++ = *comment; } if (*comment == '\"') comment ++; *ptr = '\0'; if (ptr > buffer) Header1[pos] = strdup(buffer); else Header1[pos] = NULL; // Adjust top margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Header[pos] && (strstr(Header[pos], "$IMAGE") != NULL || strstr(Header[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header1[pos] && (strstr(Header1[pos], "$IMAGE") != NULL || strstr(Header1[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header[pos] || Header1[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } *top = PagePrintLength - adjust; if (tof) *y = *top; } else if (strncasecmp(comment, "FOOTER ", 7) == 0) { // Footer string... comment += 7; while (isspace(*comment)) comment ++; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (strncasecmp(comment, "LEFT", 4) == 0 && isspace(comment[4])) { pos = 0; comment += 4; } else if (strncasecmp(comment, "CENTER", 6) == 0 && isspace(comment[6])) { pos = 1; comment += 6; } else if (strncasecmp(comment, "RIGHT", 5) == 0 && isspace(comment[5])) { pos = 2; comment += 5; } else { progress_error(HD_ERROR_BAD_COMMENT, "Bad FOOTER position: \"%s\"", comment); return; } while (isspace(*comment)) comment ++; if (*comment != '\"') { progress_error(HD_ERROR_BAD_COMMENT, "Bad FOOTER string: \"%s\"", comment); return; } for (ptr = buffer, comment ++; *comment && *comment != '\"'; comment ++) { if (*comment == '\\') comment ++; if (ptr < (buffer + sizeof(buffer) - 1)) *ptr++ = *comment; } if (*comment == '\"') comment ++; *ptr = '\0'; if (ptr > buffer) Footer[pos] = strdup(buffer); else Footer[pos] = NULL; if (tof) { check_pages(*page); pages[*page].footer[pos] = (uchar *)Footer[pos]; } // Adjust bottom margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Footer[pos] && (strstr(Footer[pos], "$IMAGE") != NULL || strstr(Footer[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Footer[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } *bottom = adjust; } else if (strncasecmp(comment, "NUMBER-UP ", 10) == 0) { // N-up printing... comment += 10; while (isspace(*comment)) comment ++; if (!*comment) break; NumberUp = strtol(comment, (char **)&comment, 10); if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (tof) { check_pages(*page); pages[*page].nup = NumberUp; } } else break; } DEBUG_printf(("LEAVING parse_comment() x=%.1f, y=%.1f, page=%d\n", *x, *y, *page)); DEBUG_printf((" PagePrintWidth = %d\n", PagePrintWidth)); DEBUG_printf(("PagePrintLength = %d\n", PagePrintLength)); DEBUG_printf((" PageWidth = %d\n", PageWidth)); DEBUG_printf((" PageLength = %d\n", PageLength)); DEBUG_printf((" PageLeft = %d\n", PageLeft)); DEBUG_printf((" PageBottom = %d\n", PageBottom)); DEBUG_printf((" PageRight = %d\n", PageRight)); DEBUG_printf((" PageTop = %d\n", PageTop)); DEBUG_printf((" Landscape = %d\n", Landscape)); } #ifdef COMMENT_DEBUG # undef DEBUG # undef DEBUG_puts # define DEBUG_puts(x) # undef DEBUG_printf # define DEBUG_printf(x) #endif /* COMMENT_DEBUG */ /* * 'find_background()' - Find the background image/color for the given document. */ static void find_background(tree_t *t) /* I - Document to search */ { uchar *var; /* BGCOLOR/BACKGROUND variable */ /* * First see if the --bodycolor or --bodyimage options have been * specified... */ if (BodyImage[0] != '\0') { background_image = image_load(BodyImage, !OutputColor); return; } else if (BodyColor[0] != '\0') { get_color((uchar *)BodyColor, background_color, 0); return; } /* * If not, search the document tree... */ while (t != NULL && background_image == NULL && background_color[0] == 1.0 && background_color[1] == 1.0 && background_color[2] == 1.0) { if (t->markup == MARKUP_BODY) { if ((var = htmlGetVariable(t, (uchar *)"BACKGROUND")) != NULL) background_image = image_load((char *)var, !OutputColor); if ((var = htmlGetVariable(t, (uchar *)"BGCOLOR")) != NULL) get_color(var, background_color, 0); } if (t->child != NULL) find_background(t->child); t = t->next; } } /* * 'write_background()' - Write the background image/color for to the current * page. */ static void write_background(int page, /* I - Page we are writing for */ FILE *out) /* I - File to write to */ { float x, y; float width, height; int page_width, page_length; if (Landscape) { page_length = pages[page].width; page_width = pages[page].length; } else { page_width = pages[page].width; page_length = pages[page].length; } if (background_color[0] != 1.0 || background_color[1] != 1.0 || background_color[2] != 1.0) { if (PSLevel > 0) { render_x = -1.0; render_y = -1.0; set_color(out, background_color); fprintf(out, "0 0 M %d %d F\n", page_width, page_length); } else { set_color(out, background_color); flate_printf(out, "0 0 %d %d re f\n", page_width, page_length); } } if (background_image != NULL) { width = (float)(background_image->width * 72.0f / _htmlPPI); height = (float)(background_image->height * 72.0f / _htmlPPI); if (width < 1.0f) width = 1.0f; if (height < 1.0f) height = 1.0f; switch (PSLevel) { case 0 : for (x = 0.0; x < page_width; x += width) for (y = page_length; y >= 0.0f;) { y -= height; flate_printf(out, "q %.1f 0 0 %.1f %.1f %.1f cm", width, height, x, y); flate_printf(out, "/I%d Do\n", background_image->obj); flate_puts("Q\n", out); } break; default : fprintf(out, "0 %.1f %d{/y exch neg %d add def\n", height, page_length + (int)height - 1, page_length); fprintf(out, "0 %.1f %d{/x exch def\n", width, page_width); fprintf(out, "GS[%.1f 0 0 %.1f x y]CM/iy -1 def\n", width, height); fprintf(out, "%d %d 8[%d 0 0 %d 0 %d]", background_image->width, background_image->height, background_image->width, -background_image->height, background_image->height); fputs("{/iy iy 1 add def BG iy get}", out); if (background_image->depth == 1) fputs("image\n", out); else fputs("false 3 colorimage\n", out); fputs("GR}for}for\n", out); break; } } } /* * 'new_render()' - Allocate memory for a new rendering structure. */ static render_t * /* O - New render structure */ new_render(int page, /* I - Page number (0-n) */ int type, /* I - Type of render primitive */ double x, /* I - Horizontal position */ double y, /* I - Vertical position */ double width, /* I - Width */ double height, /* I - Height */ void *data, /* I - Data */ render_t *insert) /* I - Insert before here... */ { render_t *r; /* New render primitive */ size_t datalen = 0; /* Length of data */ static render_t dummy; /* Dummy var for errors... */ DEBUG_printf(("new_render(page=%d, type=%d, x=%.1f, y=%.1f, width=%.1f, height=%.1f, data=%p, insert=%p)\n", page, type, x, y, width, height, (void *)data, (void *)insert)); check_pages(page); if (page < 0 || page >= (int)alloc_pages) { progress_error(HD_ERROR_INTERNAL_ERROR, "Page number (%d) out of range (1...%d)\n", page + 1, (int)alloc_pages); memset(&dummy, 0, sizeof(dummy)); return (&dummy); } if ((type != RENDER_TEXT && type != RENDER_LINK) || data == NULL) r = (render_t *)calloc(sizeof(render_t), 1); else { datalen = strlen((char *)data); r = (render_t *)calloc(sizeof(render_t) + datalen, 1); } if (r == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory on page %d\n", (int)page + 1); memset(&dummy, 0, sizeof(dummy)); return (&dummy); } r->type = type; r->x = (float)x; r->y = (float)y; r->width = (float)width; r->height = (float)height; switch (type) { case RENDER_TEXT : if (data == NULL) { free(r); return (NULL); } // Safe because buffer is allocated... memcpy((char *)r->data.text.buffer, (char *)data, datalen); get_color(_htmlTextColor, r->data.text.rgb); break; case RENDER_IMAGE : if (data == NULL) { free(r); return (NULL); } r->data.image = (image_t *)data; break; case RENDER_BOX : memcpy(r->data.box, data, sizeof(r->data.box)); break; case RENDER_LINK : if (data == NULL) { free(r); return (NULL); } // Safe because buffer is allocated... memcpy((char *)r->data.link, (char *)data, datalen); break; } if (insert) { if (insert->prev) insert->prev->next = r; else pages[page].start = r; r->prev = insert->prev; r->next = insert; insert->prev = r; } else { if (pages[page].end != NULL) pages[page].end->next = r; else pages[page].start = r; r->next = NULL; r->prev = pages[page].end; pages[page].end = r; } DEBUG_printf((" returning r = %p\n", (void *)r)); return (r); } /* * 'check_pages()' - Allocate memory for more pages as needed... */ static void check_pages(int page) // I - Current page { page_t *temp; // Temporary page pointer DEBUG_printf(("check_pages(%d)\n", page)); // See if we need to allocate memory for the page... if (page >= (int)alloc_pages) { // Yes, allocate enough for ALLOC_PAGES more pages... while (page >= (int)alloc_pages) alloc_pages += ALLOC_PAGES; // Do the pages pointers... if (num_pages == 0) temp = (page_t *)malloc(sizeof(page_t) * alloc_pages); else temp = (page_t *)realloc(pages, sizeof(page_t) * alloc_pages); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d pages - %s", (int)alloc_pages, strerror(errno)); alloc_pages -= ALLOC_PAGES; return; } memset(temp + num_pages, 0, (alloc_pages - num_pages) * sizeof(page_t)); pages = temp; } // Initialize the page data as needed... for (temp = pages + num_pages; (int)num_pages <= page; num_pages ++, temp ++) { if (!temp->width) { if (num_pages == 0 || !temp[-1].width || !temp[-1].length || chapter == 0) { temp->width = PageWidth; temp->length = PageLength; temp->left = PageLeft; temp->right = PageRight; temp->top = PageTop; temp->bottom = PageBottom; temp->duplex = PageDuplex; temp->landscape = Landscape; temp->nup = NumberUp; } else { memcpy(temp, temp - 1, sizeof(page_t)); temp->start = NULL; temp->end = NULL; } temp->url = current_url; if (chapter == 0) { memcpy(temp->header, TocHeader, sizeof(temp->header)); memcpy(temp->footer, TocFooter, sizeof(temp->footer)); } else { memcpy(temp->header, Header, sizeof(temp->header)); memcpy(temp->header1, Header1, sizeof(temp->header1)); memcpy(temp->footer, Footer, sizeof(temp->footer)); if (current_heading != temp->headnode) { temp->heading = htmlGetText(current_heading); temp->headnode = current_heading; } } memcpy(temp->background_color, background_color, sizeof(temp->background_color)); temp->background_image = background_image; } } } /* * 'add_link()' - Add a named link... */ static void add_link(uchar *name, /* I - Name of link */ int page, /* I - Page # */ int top) /* I - Y position */ { link_t *temp; /* New name */ if (name == NULL) return; DEBUG_printf(("add_link(name=\"%s\", page=%d, top=%d)\n", name, page, top)); if ((temp = find_link(name)) != NULL) { temp->page = (short)page; temp->top = (short)top; } else { // See if we need to allocate memory for links... if (num_links >= alloc_links) { // Allocate more links... alloc_links += ALLOC_LINKS; if (num_links == 0) temp = (link_t *)malloc(sizeof(link_t) * alloc_links); else temp = (link_t *)realloc(links, sizeof(link_t) * alloc_links); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d links - %s", (int)alloc_links, strerror(errno)); alloc_links -= ALLOC_LINKS; return; } links = temp; } // Add a new link... temp = links + num_links; num_links ++; strlcpy((char *)temp->name, (char *)name, sizeof(temp->name)); temp->page = (short)page; temp->top = (short)top; if (num_links > 1) qsort(links, num_links, sizeof(link_t), (compare_func_t)compare_links); } } /* * 'find_link()' - Find a named link... */ static link_t * find_link(uchar *name) /* I - Name to find */ { link_t key, /* Search key */ *match; /* Matching name entry */ if (name == NULL || num_links == 0) return (NULL); if (name[0] == '#') name ++; strlcpy((char *)key.name, (char *)name, sizeof(key.name)); match = (link_t *)bsearch(&key, links, num_links, sizeof(link_t), (compare_func_t)compare_links); return (match); } /* * 'compare_links()' - Compare two named links. */ static int /* O - 0 = equal, -1 or 1 = not equal */ compare_links(link_t *n1, /* I - First name */ link_t *n2) /* I - Second name */ { return (strcasecmp((char *)n1->name, (char *)n2->name)); } #ifdef TABLE_DEBUG # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) printf x # define DEBUG_puts(x) puts(x) #endif /* TABLE_DEBUG */ // // 'get_cell_size()' - Compute the minimum width of a cell. // static float // O - Required width of cell get_cell_size(tree_t *t, // I - Cell float left, // I - Left margin float right, // I - Right margin float *minwidth, // O - Minimum width float *prefwidth, // O - Preferred width float *minheight) // O - Minimum height { tree_t *temp, // Current tree entry *next; // Next tree entry uchar *var; // Attribute value int nowrap; // NOWRAP attribute? float width, // Width of cell frag_width, // Fragment required width frag_height, // Fragment height frag_pref, // Fragment preferred width frag_min, // Fragment minimum width minh, // Local minimum height minw, // Local minimum width prefw, // Local preferred width format_width; // Working format width for images DEBUG_printf(("get_cell_size(%p, %.1f, %.1f, %p, %p, %p)\n", (void *)t, left, right, (void *)minwidth, (void *)prefwidth, (void *)minheight)); // First see if the width has been specified for this cell... if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL && (var[strlen((char *)var) - 1] != '%' || (right - left) > 0.0f)) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') width = (right - left) * atoi((char *)var) * 0.01f; else width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else width = 0.0f; if ((format_width = right - left) <= 0.0f) format_width = PagePrintWidth; minw = 0.0f; prefw = 0.0f; // Then the height... if ((var = htmlGetVariable(t, (uchar *)"HEIGHT")) != NULL) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') minh = PagePrintLength * atoi((char *)var) * 0.01f; else minh = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else minh = 0.0f; nowrap = (htmlGetVariable(t, (uchar *)"NOWRAP") != NULL); DEBUG_printf(("nowrap = %d\n", nowrap)); for (temp = t->child, frag_width = 0.0f, frag_pref = 0.0f; temp != NULL; temp = next) { // Point to next markup, if any... next = temp->child; switch (temp->markup) { case MARKUP_TABLE : // Update widths... if (frag_pref > prefw) prefw = frag_pref; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } if (nowrap && frag_pref > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for break...\n", frag_pref, minw)); minw = frag_pref; } // For nested tables, compute the width of the table. frag_width = get_table_size(temp, left, right, &frag_min, &frag_pref, &frag_height); if (frag_pref > prefw) prefw = frag_pref; if (frag_min > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for nested table...\n", frag_min, minw)); minw = frag_min; } frag_width = 0.0f; frag_pref = 0.0f; frag_min = 0.0f; next = NULL; break; case MARKUP_IMG : // Update the image width as needed... if (temp->markup == MARKUP_IMG) update_image_size(temp); case MARKUP_NONE : case MARKUP_SPACER : frag_height = temp->height; #ifdef TABLE_DEBUG2 if (temp->markup == MARKUP_NONE) printf("FRAG(%s) = %.1f\n", temp->data, temp->width); else if (temp->markup == MARKUP_SPACER) printf("SPACER = %.1f\n", temp->width); else printf("IMG(%s) = %.1f\n", htmlGetVariable(temp, (uchar *)"SRC"), temp->width); #endif // TABLE_DEBUG2 // Handle min/preferred widths separately... if (temp->width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for fragment...\n", temp->width, minw)); minw = temp->width; } if (temp->preformatted && temp->data != NULL && temp->data[strlen((char *)temp->data) - 1] == '\n') { // End of a line - check preferred width... frag_pref += temp->width + 1; if (frag_pref > prefw) prefw = frag_pref; if (temp->preformatted && frag_pref > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for preformatted...\n", frag_pref, minw)); minw = frag_pref; } frag_pref = 0.0f; } else if (temp->data != NULL) frag_pref += temp->width + 1; else if ((frag_pref + temp->width) > format_width) { // parse_paragraph() will force a break if (frag_pref > prefw) prefw = frag_pref; frag_pref = temp->width; } else frag_pref += temp->width; if (temp->preformatted && temp->data != NULL && temp->data[strlen((char *)temp->data) - 1] == '\n') { // Check required width... frag_width += temp->width + 1; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } frag_width = 0.0f; } else if (!temp->preformatted && temp->data != NULL && (isspace(temp->data[0]) || (temp->data[0] && isspace(temp->data[strlen((char *)temp->data) - 1])))) { // Check required width... if (isspace(temp->data[0])) frag_width = temp->width + 1; else frag_width += temp->width + 1; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } if (!isspace(temp->data[0])) frag_width = 0.0f; DEBUG_printf(("frag_width=%.1f after whitespace processing...\n", frag_width)); } else if (temp->data != NULL) frag_width += temp->width + 1; else if ((frag_width + temp->width) > format_width) // parse_paragraph() will force a break frag_width = temp->width; else frag_width += temp->width; break; case MARKUP_ADDRESS : case MARKUP_BLOCKQUOTE : case MARKUP_BR : case MARKUP_CENTER : case MARKUP_DD : case MARKUP_DIV : case MARKUP_DT : case MARKUP_H1 : case MARKUP_H2 : case MARKUP_H3 : case MARKUP_H4 : case MARKUP_H5 : case MARKUP_H6 : case MARKUP_H7 : case MARKUP_H8 : case MARKUP_H9 : case MARKUP_H10 : case MARKUP_H11 : case MARKUP_H12 : case MARKUP_H13 : case MARKUP_H14 : case MARKUP_H15 : case MARKUP_HR : case MARKUP_LI : case MARKUP_P : case MARKUP_PRE : DEBUG_printf(("BREAK at %.1f\n", frag_pref)); if (frag_pref > prefw) prefw = frag_pref; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } if (nowrap && frag_pref > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for break...\n", frag_pref, minw)); minw = frag_pref; } frag_pref = 0.0f; frag_width = 0.0f; default : frag_height = 0.0f; break; } // Update minimum height... if (frag_height > minh) minh = frag_height; // Update next pointer as needed... if (next == NULL) next = temp->next; if (next == NULL) { // This code is almost funny if you say it fast... :) for (next = temp->parent; next != NULL && next != t; next = next->parent) if (next->next != NULL) break; if (next == t) next = NULL; else if (next) next = next->next; } } // Check the last fragment's width... if (frag_pref > prefw) prefw = frag_pref; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } // Handle the "NOWRAP" option... if (nowrap && prefw > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for NOWRAP...\n", prefw, minw)); minw = prefw; } // Return the required, minimum, and preferred size of the cell... *minwidth = minw; *prefwidth = prefw; *minheight = minh; DEBUG_printf(("get_cell_size(): width=%.1f, minw=%.1f, prefw=%.1f, minh=%.1f\n", width, minw, prefw, minh)); return (width); } // // 'get_table_size()' - Compute the minimum width of a table. // static float // O - Minimum width of table get_table_size(tree_t *t, // I - Table float left, // I - Left margin float right, // I - Right margin float *minwidth, // O - Minimum width float *prefwidth, // O - Preferred width float *minheight) // O - Minimum height { tree_t *temp, // Current tree entry *next; // Next tree entry uchar *var; // Attribute value float width, // Required width of table minw, // Minimum width of table minh, // Minimum height of table prefw, // Preferred width of table cell_width, // Cell required width cell_pref, // Cell preferred width cell_min, // Cell minimum width cell_height, // Cell minimum height row_width, // Row required width row_pref, // Row preferred width row_min, // Row minimum width row_height, // Row minimum height border, // Border around cells cellpadding, // Padding inside cells cellspacing; // Spacing around cells int columns, // Current number of columns max_columns, // Maximum columns rows; // Number of rows DEBUG_printf(("get_table_size(%p, %.1f, %.1f, %p, %p, %p)\n", (void *)t, left, right, (void *)minwidth, (void *)prefwidth, (void *)minheight)); // First see if the width has been specified for this table... if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL && (var[strlen((char *)var) - 1] != '%' || (right - left) > 0.0f)) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') width = (right - left) * atoi((char *)var) * 0.01f; else width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else width = 0.0f; minw = 0.0f; prefw = 0.0f; // Then the height... if ((var = htmlGetVariable(t, (uchar *)"HEIGHT")) != NULL) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') minh = PagePrintLength * atoi((char *)var) * 0.01f; else minh = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else minh = 0.0f; // Update the size as needed... for (temp = t->child, row_width = 0.0f, row_min = 0.0f, row_pref = 0.0f, row_height = 0.0f, columns = 0, rows = 0, max_columns = 0; temp != NULL; temp = next) { // Point to next markup, if any... next = temp->child; // Start a new row or add the cell width as needed... if (temp->markup == MARKUP_TR) { minh += row_height; row_width = 0.0f; row_pref = 0.0f; row_min = 0.0f; row_height = 0.0f; rows ++; columns = 0; } else if (temp->markup == MARKUP_TD || temp->markup == MARKUP_TH) { // Update columns... columns ++; if (columns > max_columns) max_columns = columns; // Get widths of cell... cell_width = get_cell_size(temp, left, right, &cell_min, &cell_pref, &cell_height); // Update row widths... row_width += cell_width; row_pref += cell_pref; row_min += cell_min; if (cell_height > row_height) row_height = cell_height; // Check current row widths against table... if (row_pref > prefw) prefw = row_pref; if (row_min > minw) minw = row_min; } // Update next pointer as needed... if (next == NULL) next = temp->next; if (next == NULL) { // This code is almost funny if you say it fast... :) for (next = temp->parent; next != NULL && next != t; next = next->parent) if (next->next != NULL) break; if (next == t) next = NULL; else if (next) next = next->next; } } // Make sure last row is counted in min height calcs. minh += row_height; // Add room for spacing and padding... if ((var = htmlGetVariable(t, (uchar *)"CELLPADDING")) != NULL) cellpadding = atoi((char *)var); else cellpadding = 1.0f; if ((var = htmlGetVariable(t, (uchar *)"CELLSPACING")) != NULL) cellspacing = atoi((char *)var); else cellspacing = 0.0f; if ((var = htmlGetVariable(t, (uchar *)"BORDER")) != NULL) { if ((border = (float)atof((char *)var)) == 0.0 && var[0] != '0') border = 1.0f; cellpadding += border; } else border = 0.0f; if (border == 0.0f && cellpadding > 0.0f) { /* * Ah, the strange table formatting nightmare that is HTML. * Netscape and MSIE assign an invisible border width of 1 * pixel if no border is specified... */ cellpadding += 1.0f; } cellspacing *= PagePrintWidth / _htmlBrowserWidth; cellpadding *= PagePrintWidth / _htmlBrowserWidth; DEBUG_printf(("ADDING %.1f for table space for %d columns...\n", max_columns * (2 * cellpadding + cellspacing) - cellspacing, max_columns)); if (width > 0.0f) width += max_columns * (2 * cellpadding + cellspacing) - cellspacing; minw += max_columns * (2 * cellpadding + cellspacing) - cellspacing; prefw += max_columns * (2 * cellpadding + cellspacing) - cellspacing; minh += rows * (2 * cellpadding + cellspacing) - cellspacing; // Return the required, minimum, and preferred size of the table... *minwidth = minw; *prefwidth = prefw; *minheight = minh; DEBUG_printf(("get_table_size(): width=%.1f, minw=%.1f, prefw=%.1f, minh=%.1f\n", width, minw, prefw, minh)); return (width); } #ifdef TABLE_DEBUG # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) # define DEBUG_puts(x) #endif /* TABLE_DEBUG */ /* * 'flatten_tree()' - Flatten an HTML tree to only include the text, image, * link, and break markups. */ static tree_t * /* O - Flattened markup tree */ flatten_tree(tree_t *t) /* I - Markup tree to flatten */ { tree_t *temp, /* New tree node */ *flat; /* Flattened tree */ flat = NULL; while (t != NULL) { switch (t->markup) { case MARKUP_NONE : if (t->data == NULL) break; case MARKUP_COMMENT : case MARKUP_BR : case MARKUP_SPACER : case MARKUP_IMG : temp = (tree_t *)calloc(sizeof(tree_t), 1); memcpy(temp, t, sizeof(tree_t)); temp->parent = NULL; temp->child = NULL; temp->prev = flat; temp->next = NULL; if (flat != NULL) flat->next = temp; flat = temp; if (temp->markup == MARKUP_IMG) update_image_size(temp); break; case MARKUP_A : if (htmlGetVariable(t, (uchar *)"NAME") != NULL) { temp = (tree_t *)calloc(sizeof(tree_t), 1); memcpy(temp, t, sizeof(tree_t)); temp->parent = NULL; temp->child = NULL; temp->prev = flat; temp->next = NULL; if (flat != NULL) flat->next = temp; flat = temp; } break; case MARKUP_P : case MARKUP_PRE : case MARKUP_H1 : case MARKUP_H2 : case MARKUP_H3 : case MARKUP_H4 : case MARKUP_H5 : case MARKUP_H6 : case MARKUP_H7 : case MARKUP_H8 : case MARKUP_H9 : case MARKUP_H10 : case MARKUP_H11 : case MARKUP_H12 : case MARKUP_H13 : case MARKUP_H14 : case MARKUP_H15 : case MARKUP_UL : case MARKUP_DIR : case MARKUP_MENU : case MARKUP_OL : case MARKUP_DL : case MARKUP_LI : case MARKUP_DD : case MARKUP_DT : case MARKUP_TR : case MARKUP_CAPTION : temp = (tree_t *)calloc(sizeof(tree_t), 1); temp->markup = MARKUP_BR; temp->parent = NULL; temp->child = NULL; temp->prev = flat; temp->next = NULL; if (flat != NULL) flat->next = temp; flat = temp; break; default : break; } if (t->child != NULL && t->markup != MARKUP_UNKNOWN) { temp = flatten_tree(t->child); if (temp != NULL) temp->prev = flat; if (flat != NULL) flat->next = temp; else flat = temp; } if (flat != NULL) while (flat->next != NULL) flat = flat->next; t = t->next; } if (flat == NULL) return (NULL); while (flat->prev != NULL) flat = flat->prev; return (flat); } /* * 'update_image_size()' - Update the size of an image based upon the * printable width. */ static void update_image_size(tree_t *t) /* I - Tree entry */ { image_t *img; /* Image file */ uchar *width, /* Width string */ *height; /* Height string */ width = htmlGetVariable(t, (uchar *)"WIDTH"); height = htmlGetVariable(t, (uchar *)"HEIGHT"); if (width != NULL && height != NULL) { if (width[strlen((char *)width) - 1] == '%') t->width = (float)(atof((char *)width) * PagePrintWidth / 100.0f); else t->width = (float)(atoi((char *)width) * PagePrintWidth / _htmlBrowserWidth); if (height[strlen((char *)height) - 1] == '%') t->height = (float)(atof((char *)height) * PagePrintWidth / 100.0f); else t->height = (float)(atoi((char *)height) * PagePrintWidth / _htmlBrowserWidth); return; } img = image_find((char *)htmlGetVariable(t, (uchar *)"REALSRC")); if (img == NULL) return; if (width != NULL) { if (width[strlen((char *)width) - 1] == '%') t->width = (float)(atof((char *)width) * PagePrintWidth / 100.0f); else t->width = (float)(atoi((char *)width) * PagePrintWidth / _htmlBrowserWidth); t->height = t->width * img->height / img->width; } else if (height != NULL) { if (height[strlen((char *)height) - 1] == '%') t->height = (float)(atof((char *)height) * PagePrintWidth / 100.0f); else t->height = (float)(atoi((char *)height) * PagePrintWidth / _htmlBrowserWidth); t->width = t->height * img->width / img->height; } else { t->width = (float)(img->width * PagePrintWidth / _htmlBrowserWidth); t->height = (float)(img->height * PagePrintWidth / _htmlBrowserWidth); } } /* * 'get_width()' - Get the width of a string in points. */ static float /* O - Width in points */ get_width(uchar *s, /* I - String to scan */ int typeface, /* I - Typeface code */ int style, /* I - Style code */ int size) /* I - Size */ { uchar *ptr; /* Current character */ int width; /* Current width */ DEBUG_printf(("get_width(\"%s\", %d, %d, %d)\n", s == NULL ? "(null)" : (const char *)s, typeface, style, size)); if (s == NULL) return (0.0); if (!_htmlWidthsLoaded[typeface][style]) htmlLoadFontWidths(typeface, style); for (width = 0, ptr = s; *ptr != '\0'; ptr ++) width += _htmlWidths[typeface][style][*ptr]; return (width * _htmlSizes[size] * 0.001f); } /* * 'get_title()' - Get the title string for a document. */ static uchar * /* O - Title string */ get_title(tree_t *doc) /* I - Document */ { uchar *temp; while (doc != NULL) { if (doc->markup == MARKUP_TITLE) return (htmlGetText(doc->child)); else if (doc->child != NULL) if ((temp = get_title(doc->child)) != NULL) return (temp); doc = doc->next; } return (NULL); } /* * 'open_file()' - Open an output file for the current chapter. */ static FILE * /* O - File pointer */ open_file(void) { char filename[255]; /* Filename */ if (OutputFiles && PSLevel > 0) { if (chapter == -1) snprintf(filename, sizeof(filename), "%s/cover.ps", OutputPath); else if (chapter == 0) snprintf(filename, sizeof(filename), "%s/contents.ps", OutputPath); else snprintf(filename, sizeof(filename), "%s/doc%d.ps", OutputPath, chapter); return (fopen(filename, "wb+")); } else if (OutputFiles) { snprintf(filename, sizeof(filename), "%s/doc.pdf", OutputPath); return (fopen(filename, "wb+")); } else if (OutputPath[0] != '\0') return (fopen(OutputPath, "wb+")); else if (PSLevel == 0) return (file_temp(stdout_filename, sizeof(stdout_filename))); else return (stdout); } /* * 'set_color()' - Set the current text color... */ static void set_color(FILE *out, /* I - File to write to */ float *rgb) /* I - RGB color */ { if (rgb[0] == render_rgb[0] && rgb[1] == render_rgb[1] && rgb[2] == render_rgb[2]) return; render_rgb[0] = rgb[0]; render_rgb[1] = rgb[1]; render_rgb[2] = rgb[2]; if (OutputColor) { // Output RGB color... if (PSLevel > 0) fprintf(out, "%.2f %.2f %.2f C ", rgb[0], rgb[1], rgb[2]); else flate_printf(out, "%.2f %.2f %.2f rg ", rgb[0], rgb[1], rgb[2]); } else { // Output grayscale... if (PSLevel > 0) fprintf(out, "%.2f G ", rgb[0] * 0.31f + rgb[1] * 0.61f + rgb[2] * 0.08f); else flate_printf(out, "%.2f g ", rgb[0] * 0.31f + rgb[1] * 0.61f + rgb[2] * 0.08f); } } /* * 'set_font()' - Set the current text font. */ static void set_font(FILE *out, /* I - File to write to */ int typeface, /* I - Typeface code */ int style, /* I - Style code */ float size) /* I - Size */ { char sizes[255], /* Formatted string for size... */ *s; /* Pointer to end of string */ if (typeface == render_typeface && style == render_style && size == render_size) return; /* * Format size and strip trailing 0's and decimals... */ snprintf(sizes, sizeof(sizes), "%.1f", size); for (s = sizes + strlen(sizes) - 1; s > sizes && *s == '0'; s --) *s = '\0'; if (*s == '.') *s = '\0'; /* * Set the new typeface, style, and size. */ if (PSLevel > 0) { if (size != render_size) fprintf(out, "%s FS", sizes); fprintf(out, "/F%x SF ", typeface * 4 + style); } else flate_printf(out, "/F%x %s Tf ", typeface * 4 + style, sizes); render_typeface = typeface; render_style = style; render_size = size; } /* * 'set_pos()' - Set the current text position. */ static void set_pos(FILE *out, /* I - File to write to */ float x, /* I - X position */ float y) /* I - Y position */ { char xs[255], /* Formatted string for X... */ ys[255], /* Formatted string for Y... */ *s; /* Pointer to end of string */ if (fabs(render_x - x) < 0.1 && fabs(render_y - y) < 0.1) return; /* * Format X and Y... */ if (PSLevel > 0 || render_x == -1.0) { snprintf(xs, sizeof(xs), "%.3f", x); snprintf(ys, sizeof(ys), "%.3f", y); } else { snprintf(xs, sizeof(xs), "%.3f", x - render_startx); snprintf(ys, sizeof(ys), "%.3f", y - render_y); } /* * Strip trailing 0's and decimals... */ for (s = xs + strlen(xs) - 1; s > xs && *s == '0'; s --) *s = '\0'; if (*s == '.') *s = '\0'; for (s = ys + strlen(ys) - 1; s > ys && *s == '0'; s --) *s = '\0'; if (*s == '.') *s = '\0'; if (PSLevel > 0) fprintf(out, "%s %s M", xs, ys); else flate_printf(out, "%s %s Td", xs, ys); render_x = render_startx = x; render_y = y; } /* * 'ps_hex()' - Print binary data as a series of hexadecimal numbers. */ static void ps_hex(FILE *out, /* I - File to print to */ uchar *data, /* I - Data to print */ int length) /* I - Number of bytes to print */ { int col; static const char *hex = "0123456789ABCDEF"; col = 0; while (length > 0) { /* * Put the hex uchars out to the file; note that we don't use fprintf() * for speed reasons... */ putc(hex[*data >> 4], out); putc(hex[*data & 15], out); data ++; length --; col = (col + 1) % 40; if (col == 0) putc('\n', out); } if (col > 0) putc('\n', out); } #ifdef HTMLDOC_ASCII85 /* * 'ps_ascii85()' - Print binary data as a series of base-85 numbers. */ static void ps_ascii85(FILE *out, /* I - File to print to */ uchar *data, /* I - Data to print */ int length, /* I - Number of bytes to print */ int eod) /* I - 1 = end-of-data */ { unsigned b = 0; /* Current 32-bit word */ uchar c[5]; /* Base-85 encoded characters */ static int col = 0; /* Column */ static uchar leftdata[4]; /* Leftover data at the end */ static int leftcount = 0; /* Size of leftover data */ length += leftcount; while (length > 3) { switch (leftcount) { case 0 : b = (unsigned)((((((data[0] << 8) | data[1]) << 8) | data[2]) << 8) | data[3]); break; case 1 : b = (unsigned)((((((leftdata[0] << 8) | data[0]) << 8) | data[1]) << 8) | data[2]); break; case 2 : b = (unsigned)((((((leftdata[0] << 8) | leftdata[1]) << 8) | data[0]) << 8) | data[1]); break; case 3 : b = (unsigned)((((((leftdata[0] << 8) | leftdata[1]) << 8) | leftdata[2]) << 8) | data[0]); break; } if (col >= 76) { col = 0; putc('\n', out); } if (b == 0) { putc('z', out); col ++; } else { c[4] = (b % 85) + '!'; b /= 85; c[3] = (b % 85) + '!'; b /= 85; c[2] = (b % 85) + '!'; b /= 85; c[1] = (b % 85) + '!'; b /= 85; c[0] = (uchar)(b + '!'); fwrite(c, 1, 5, out); col += 5; } data += 4 - leftcount; length -= 4 - leftcount; leftcount = 0; } if (length > 0) { // Copy any remainder into the leftdata array... if ((length - leftcount) > 0) memcpy(leftdata + leftcount, data, (size_t)(length - leftcount)); memset(leftdata + length, 0, (size_t)(4 - length)); leftcount = length; } if (eod) { // Do the end-of-data dance... if (col >= 76) { col = 0; putc('\n', out); } if (leftcount > 0) { // Write the remaining bytes as needed... b = (unsigned)((((((leftdata[0] << 8) | leftdata[1]) << 8) | leftdata[2]) << 8) | leftdata[3]); c[4] = (b % 85) + '!'; b /= 85; c[3] = (b % 85) + '!'; b /= 85; c[2] = (b % 85) + '!'; b /= 85; c[1] = (b % 85) + '!'; b /= 85; c[0] = (uchar)(b + '!'); fwrite(c, (size_t)(leftcount + 1), 1, out); leftcount = 0; } fputs("~>\n", out); col = 0; } } #endif // HTMLDOC_ASCII85 /* * JPEG library destination data manager. These routines direct * compressed data from libjpeg into the PDF or PostScript file. */ static FILE *jpg_file; /* JPEG file */ static uchar jpg_buf[8192]; /* JPEG buffer */ static jpeg_destination_mgr jpg_dest; /* JPEG destination manager */ static struct jpeg_error_mgr jerr; /* JPEG error handler */ /* * 'jpg_init()' - Initialize the JPEG destination. */ static void jpg_init(j_compress_ptr cinfo) /* I - Compressor info */ { (void)cinfo; jpg_dest.next_output_byte = jpg_buf; jpg_dest.free_in_buffer = sizeof(jpg_buf); } /* * 'jpg_empty()' - Empty the JPEG output buffer. */ static boolean /* O - True if buffer written OK */ jpg_empty(j_compress_ptr cinfo) /* I - Compressor info */ { (void)cinfo; if (PSLevel > 0) #ifdef HTMLDOC_ASCII85 ps_ascii85(jpg_file, jpg_buf, sizeof(jpg_buf)); #else ps_hex(jpg_file, jpg_buf, sizeof(jpg_buf)); #endif // HTMLDOC_ASCII85 else flate_write(jpg_file, jpg_buf, sizeof(jpg_buf)); jpg_dest.next_output_byte = jpg_buf; jpg_dest.free_in_buffer = sizeof(jpg_buf); return (TRUE); } /* * 'jpg_term()' - Write the last JPEG data to the file. */ static void jpg_term(j_compress_ptr cinfo) /* I - Compressor info */ { int nbytes; /* Number of bytes to write */ (void)cinfo; nbytes = sizeof(jpg_buf) - jpg_dest.free_in_buffer; if (PSLevel > 0) #ifdef HTMLDOC_ASCII85 ps_ascii85(jpg_file, jpg_buf, nbytes); #else ps_hex(jpg_file, jpg_buf, nbytes); #endif // HTMLDOC_ASCII85 else flate_write(jpg_file, jpg_buf, nbytes); } /* * 'jpg_setup()' - Setup the JPEG compressor for writing an image. */ static void jpg_setup(FILE *out, /* I - Output file */ image_t *img, /* I - Output image */ j_compress_ptr cinfo) /* I - Compressor info */ { int i; // Looping var jpg_file = out; cinfo->err = jpeg_std_error(&jerr); jpeg_create_compress(cinfo); cinfo->dest = &jpg_dest; jpg_dest.init_destination = jpg_init; jpg_dest.empty_output_buffer = jpg_empty; jpg_dest.term_destination = jpg_term; cinfo->image_width = (JDIMENSION)img->width; cinfo->image_height = (JDIMENSION)img->height; cinfo->input_components = img->depth; cinfo->in_color_space = img->depth == 1 ? JCS_GRAYSCALE : JCS_RGB; jpeg_set_defaults(cinfo); jpeg_set_quality(cinfo, OutputJPEG, TRUE); // Update things when writing to PS files... if (PSLevel) { // Adobe uses sampling == 1 for (i = 0; i < img->depth; i ++) { cinfo->comp_info[i].h_samp_factor = 1; cinfo->comp_info[i].v_samp_factor = 1; } } cinfo->write_JFIF_header = FALSE; cinfo->write_Adobe_marker = TRUE; jpeg_start_compress(cinfo, TRUE); } /* * 'compare_rgb()' - Compare two RGB colors... */ static int /* O - -1 if rgb1<rgb2, etc. */ compare_rgb(unsigned *rgb1, /* I - First color */ unsigned *rgb2) /* I - Second color */ { return ((int)*rgb1 - (int)*rgb2); } /* * 'write_image()' - Write an image to the given output file... */ static void write_image(FILE *out, /* I - Output file */ render_t *r, /* I - Image to write */ int write_obj) /* I - Write an object? */ { int i, j, k, m, /* Looping vars */ ncolors; /* Number of colors */ uchar *pixel, /* Current pixel */ *indices, /* New indexed pixel array */ *indptr; /* Current index */ int indwidth, /* Width of indexed line */ indbits; /* Bits per index */ int max_colors; /* Max colors to use */ unsigned colors[256], /* Colormap values */ key, /* Color key */ *match; /* Matching color value */ uchar grays[256], /* Grayscale usage */ cmap[256][3]; /* Colormap */ image_t *img; /* Image */ struct jpeg_compress_struct cinfo; /* JPEG compressor */ uchar *data, /* PS Level 3 image data */ *dataptr, /* Pointer into image data */ *maskptr; /* Pointer into mask data */ /* * See if we can optimize the image as indexed without color loss... */ img = r->data.image; ncolors = 0; indices = NULL; indwidth = 0; if (!img->pixels && !img->obj) image_load(img->filename, !OutputColor, 1); // Note: Acrobat 6 tries to decrypt the colormap of indexed in-line images twice, which // is 1) not consistent with prior Acrobat releases and 2) in violation of their // PDF spec. The "img->use > 1 || !Encryption" test prevents the use of indexed // in-line images when encryption is enabled. // // We are filing a bug on this with Adobe, but if history is any indicator, we are // stuck with this workaround forever... if (PSLevel != 1 && PDFVersion >= 12 && img->obj == 0 && (img->use > 1 || !Encryption)) { if (img->depth == 1) { /* * Greyscale image... */ memset(grays, 0, sizeof(grays)); for (i = img->width * img->height, pixel = img->pixels; i > 0; i --, pixel ++) if (!grays[*pixel]) { if (ncolors >= 16) break; grays[*pixel] = 1; ncolors ++; } if (i == 0) { for (i = 0, j = 0; i < 256; i ++) if (grays[i]) { colors[j] = (unsigned)((((i << 8) | i) << 8) | i); grays[i] = (uchar)j; j ++; } } else ncolors = 0; } else { /* * Color image... */ if (OutputJPEG && !Compression) max_colors = 16; else max_colors = 256; for (i = img->width * img->height, pixel = img->pixels, match = NULL; i > 0; i --, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (!match || *match != key) { if (ncolors > 0) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); else match = NULL; } if (match == NULL) { if (ncolors >= max_colors) break; colors[ncolors] = key; ncolors ++; if (ncolors > 1) qsort(colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); } } if (i > 0) ncolors = 0; } } if (ncolors > 0) { if (PSLevel == 3 && img->mask) indbits = 8; else if (ncolors <= 2) indbits = 1; else if (ncolors <= 4) indbits = 2; else if (ncolors <= 16) indbits = 4; else indbits = 8; indwidth = (img->width * indbits + 7) / 8; indices = (uchar *)calloc((size_t)indwidth, (size_t)(img->height + 1)); // height + 1 for PS odd-row-count bug if (img->depth == 1) { /* * Convert a grayscale image... */ switch (indbits) { case 1 : for (i = img->height, pixel = img->pixels, indptr = indices; i > 0; i --) { for (j = img->width, k = 7; j > 0; j --, k = (k + 7) & 7, pixel ++) switch (k) { case 7 : *indptr = (uchar)(grays[*pixel] << 7); break; default : *indptr |= (uchar)(grays[*pixel] << k); break; case 0 : *indptr++ |= (uchar)grays[*pixel]; break; } if (k != 7) indptr ++; } break; case 2 : for (i = img->height, pixel = img->pixels, indptr = indices; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k = (k + 1) & 3, pixel ++) switch (k) { case 0 : *indptr = (uchar)(grays[*pixel] << 6); break; case 1 : *indptr |= (uchar)(grays[*pixel] << 4); break; case 2 : *indptr |= (uchar)(grays[*pixel] << 2); break; case 3 : *indptr++ |= (uchar)grays[*pixel]; break; } if (k) indptr ++; } break; case 4 : for (i = img->height, pixel = img->pixels, indptr = indices; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k ^= 1, pixel ++) if (k) *indptr++ |= grays[*pixel]; else *indptr = (uchar)(grays[*pixel] << 4); if (k) indptr ++; } break; } } else { /* * Convert a color image... */ switch (indbits) { case 1 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width, k = 7; j > 0; j --, k = (k + 7) & 7, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); m = match - colors; switch (k) { case 7 : *indptr = (uchar)(m << 7); break; default : *indptr |= (uchar)(m << k); break; case 0 : *indptr++ |= (uchar)m; break; } } if (k != 7) indptr ++; } break; case 2 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k = (k + 1) & 3, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); m = match - colors; switch (k) { case 0 : *indptr = (uchar)(m << 6); break; case 1 : *indptr |= (uchar)(m << 4); break; case 2 : *indptr |= (uchar)(m << 2); break; case 3 : *indptr++ |= (uchar)m; break; } } if (k) indptr ++; } break; case 4 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k ^= 1, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); m = match - colors; if (k) *indptr++ |= (uchar)m; else *indptr = (uchar)(m << 4); } if (k) indptr ++; } break; case 8 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width; j > 0; j --, pixel += 3, indptr ++) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); *indptr = (uchar)(match - colors); } } break; } } } else indbits = 8; if (ncolors == 1) { /* * Adobe doesn't like 1 color images... */ ncolors = 2; colors[1] = 0; } /* * Now write the image... */ switch (PSLevel) { case 0 : /* PDF */ if (!write_obj) flate_printf(out, "q %.1f 0 0 %.1f %.1f %.1f cm\n", r->width, r->height, r->x, r->y); if (img->obj) { if (img->mask && PDFVersion < 13) write_imagemask(out, r); flate_printf(out, "/I%d Do Q\n", img->obj); break; } if (img->mask && write_obj && PDFVersion >= 13) { // We have a mask image, write it! pdf_start_object(out); fputs("/Type/XObject/Subtype/Image", out); fputs("/ColorSpace/DeviceGray", out); if (img->maskscale == 8) fprintf(out, "/Width %d/Height %d/BitsPerComponent 8", img->width, img->height); else fprintf(out, "/Width %d/Height %d/BitsPerComponent 1/ImageMask true", img->width * img->maskscale, img->height * img->maskscale); if (Compression) fputs("/Filter/FlateDecode", out); pdf_start_stream(out); flate_open_stream(out); if (img->maskscale == 8) flate_write(out, img->mask, img->width * img->height); else flate_write(out, img->mask, img->maskwidth * img->height * img->maskscale); flate_close_stream(out); pdf_end_object(out); } if (write_obj) { // Write an image object... img->obj = pdf_start_object(out); fputs("/Type/XObject/Subtype/Image", out); if (img->mask && PDFVersion >= 13) { if (img->maskscale == 8) fprintf(out, "/SMask %d 0 R", img->obj - 1); else fprintf(out, "/Mask %d 0 R", img->obj - 1); } if (ncolors > 0) { for (i = 0; i < ncolors; i ++) { cmap[i][0] = (uchar)(colors[i] >> 16); cmap[i][1] = (uchar)(colors[i] >> 8); cmap[i][2] = (uchar)colors[i]; } if (Encryption) { // Encrypt the colormap... encrypt_init(); rc4_encrypt(&encrypt_state, cmap[0], cmap[0], (unsigned)(ncolors * 3)); } fprintf(out, "/ColorSpace[/Indexed/DeviceRGB %d<", ncolors - 1); for (i = 0; i < ncolors; i ++) fprintf(out, "%02X%02X%02X", cmap[i][0], cmap[i][1], cmap[i][2]); fputs(">]", out); } else if (img->depth == 1) fputs("/ColorSpace/DeviceGray", out); else fputs("/ColorSpace/DeviceRGB", out); #ifdef HTMLDOC_INTERPOLATION if (ncolors != 2) fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION if (Compression && (ncolors || !OutputJPEG)) fputs("/Filter/FlateDecode", out); else if (OutputJPEG && ncolors == 0) { if (Compression) fputs("/Filter[/FlateDecode/DCTDecode]", out); else fputs("/Filter/DCTDecode", out); } fprintf(out, "/Width %d/Height %d/BitsPerComponent %d", img->width, img->height, indbits); pdf_start_stream(out); flate_open_stream(out); if (OutputJPEG && ncolors == 0) { jpg_setup(out, img, &cinfo); for (i = img->height, pixel = img->pixels; i > 0; i --, pixel += img->width * img->depth) jpeg_write_scanlines(&cinfo, &pixel, 1); jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); } else { if (ncolors > 0) flate_write(out, indices, indwidth * img->height); else flate_write(out, img->pixels, img->width * img->height * img->depth); } flate_close_stream(out); pdf_end_object(out); } else { // Put the image in-line... flate_puts("BI", out); if (ncolors > 0) { flate_printf(out, "/CS[/I/RGB %d<", ncolors - 1); for (i = 0; i < ncolors; i ++) flate_printf(out, "%02X%02X%02X", colors[i] >> 16, (colors[i] >> 8) & 255, colors[i] & 255); flate_puts(">]", out); } else if (img->depth == 1) flate_puts("/CS/G", out); else flate_puts("/CS/RGB", out); if (ncolors != 2) flate_puts("/I true", out); flate_printf(out, "/W %d/H %d/BPC %d", img->width, img->height, indbits); if (ncolors > 0) { flate_puts(" ID\n", out); flate_write(out, indices, indwidth * img->height, 1); } else if (OutputJPEG) { flate_puts("/F/DCT ID\n", out); jpg_setup(out, img, &cinfo); for (i = img->height, pixel = img->pixels; i > 0; i --, pixel += img->width * img->depth) jpeg_write_scanlines(&cinfo, &pixel, 1); jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); } else { flate_puts(" ID\n", out); flate_write(out, img->pixels, img->width * img->height * img->depth, 1); } flate_write(out, (uchar *)"\nEI\nQ\n", 6, 1); } break; case 1 : /* PostScript, Level 1 */ fputs("GS", out); fprintf(out, "[%.1f 0 0 %.1f %.1f %.1f]CM", r->width, r->height, r->x, r->y); if (img->mask) write_imagemask(out, r); fprintf(out, "/picture %d string def\n", img->width * img->depth); if (img->depth == 1) fprintf(out, "%d %d 8 [%d 0 0 %d 0 %d] {currentfile picture readhexstring pop} image\n", img->width, img->height, img->width, -img->height, img->height); else fprintf(out, "%d %d 8 [%d 0 0 %d 0 %d] {currentfile picture readhexstring pop} false 3 colorimage\n", img->width, img->height, img->width, -img->height, img->height); ps_hex(out, img->pixels, img->width * img->height * img->depth); fputs("GR\n", out); break; case 3 : /* PostScript, Level 3 */ // Fallthrough to Level 2 output if compression is disabled and // we aren't doing transparency... if ((Compression && (!OutputJPEG || ncolors > 0)) || (img->mask && img->maskscale == 8)) { fputs("GS", out); fprintf(out, "[%.1f 0 0 %.1f %.1f %.1f]CM", r->width, r->height, r->x, r->y); if (img->mask && img->maskscale != 8) write_imagemask(out, r); if (ncolors > 0) { if (ncolors <= 2) ncolors = 2; /* Adobe doesn't like 1 color images... */ fprintf(out, "[/Indexed/DeviceRGB %d\n<", ncolors - 1); for (i = 0; i < ncolors; i ++) { fprintf(out, "%02X%02X%02X", colors[i] >> 16, (colors[i] >> 8) & 255, colors[i] & 255); if ((i % 13) == 12) putc('\n', out); } fputs(">]setcolorspace\n", out); if (img->mask && img->maskscale == 8) fprintf(out, "<<" "/ImageType 3" "/InterleaveType 1" "/MaskDict<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 1]" ">>\n" "/DataDict", img->width, img->height, img->width, -img->height, img->height); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent %d" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 %d]", img->width, img->height, indbits, img->width, -img->height, img->height, (1 << indbits) - 1); #ifdef HTMLDOC_INTERPOLATION if (ncolors != 2) fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter", out); #else fputs("/DataSource currentfile/ASCIIHexDecode filter", out); #endif // HTMLDOC_ASCII85 if (Compression) fputs("/FlateDecode filter", out); fputs(">>\n", out); if (img->mask && img->maskscale == 8) fputs(">>\n", out); fputs("image\n", out); flate_open_stream(out); if (img->mask && img->maskscale == 8) { data = (uchar *)malloc((size_t)(img->width * 2)); for (i = 0, maskptr = img->mask, indptr = indices; i < img->height; i ++) { for (j = img->width, dataptr = data; j > 0; j --) { *dataptr++ = *maskptr++; *dataptr++ = *indptr++; } flate_write(out, data, img->width * 2); } free(data); } else flate_write(out, indices, indwidth * img->height); flate_close_stream(out); } else { if (img->depth == 1) fputs("/DeviceGray setcolorspace", out); else fputs("/DeviceRGB setcolorspace", out); if (img->mask && img->maskscale == 8) fprintf(out, "<<" "/ImageType 3" "/InterleaveType 1" "/MaskDict<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 1]" ">>\n" "/DataDict", img->width, img->height, img->width, -img->height, img->height); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[%s]", img->width, img->height, img->width, -img->height, img->height, img->depth == 1 ? "0 1" : "0 1 0 1 0 1"); #ifdef HTMLDOC_INTERPOLATION fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter", out); #else fputs("/DataSource currentfile/ASCIIHexDecode filter", out); #endif // HTMLDOC_ASCII85 if (Compression) fputs("/FlateDecode filter", out); fputs(">>\n", out); if (img->mask && img->maskscale == 8) fputs(">>\n", out); fputs("image\n", out); flate_open_stream(out); if (img->mask && img->maskscale == 8) { data = (uchar *)malloc((size_t)(img->width * (img->depth + 1))); for (i = 0, maskptr = img->mask, pixel = img->pixels; i < img->height; i ++) { if (img->depth == 1) { for (j = img->width, dataptr = data; j > 0; j --) { *dataptr++ = *maskptr++; *dataptr++ = *pixel++; } } else { for (j = img->width, dataptr = data; j > 0; j --) { *dataptr++ = *maskptr++; *dataptr++ = *pixel++; *dataptr++ = *pixel++; *dataptr++ = *pixel++; } } flate_write(out, data, img->width * (img->depth + 1)); } free(data); } else flate_write(out, img->pixels, img->width * img->height * img->depth); flate_close_stream(out); } fputs("GR\n", out); break; } case 2 : /* PostScript, Level 2 */ fputs("GS", out); fprintf(out, "[%.1f 0 0 %.1f %.1f %.1f]CM", r->width, r->height, r->x, r->y); if (img->mask) write_imagemask(out, r); if (ncolors > 0) { fprintf(out, "[/Indexed/DeviceRGB %d\n<", ncolors - 1); for (i = 0; i < ncolors; i ++) { fprintf(out, "%02X%02X%02X", colors[i] >> 16, (colors[i] >> 8) & 255, colors[i] & 255); if ((i % 13) == 12) putc('\n', out); } fputs(">]setcolorspace\n", out); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent %d" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 %d]", img->width, img->height, indbits, img->width, -img->height, img->height, (1 << indbits) - 1); #ifdef HTMLDOC_INTERPOLATION if (ncolors != 2) fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter>>image\n", out); ps_ascii85(out, indices, indwidth * img->height, 1); #else fputs("/DataSource currentfile/ASCIIHexDecode filter>>image\n", out); ps_hex(out, indices, indwidth * img->height); // End of data marker... fputs(">\n", out); #endif /* HTMLDOC_ASCII85 */ } else if (OutputJPEG) { if (img->depth == 1) fputs("/DeviceGray setcolorspace\n", out); else fputs("/DeviceRGB setcolorspace\n", out); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[%s]", img->width, img->height, img->width, -img->height, img->height, img->depth == 1 ? "0 1" : "0 1 0 1 0 1"); #ifdef HTMLDOC_INTERPOLATION fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter/DCTDecode filter" ">>image\n", out); #else fputs("/DataSource currentfile/ASCIIHexDecode filter/DCTDecode filter" ">>image\n", out); #endif // HTMLDOC_ASCII85 jpg_setup(out, img, &cinfo); for (i = img->height, pixel = img->pixels; i > 0; i --, pixel += img->width * img->depth) jpeg_write_scanlines(&cinfo, &pixel, 1); jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); #ifdef HTMLDOC_ASCII85 ps_ascii85(out, (uchar *)"", 0, 1); #else // End of data marker... fputs(">\n", out); #endif // HTMLDOC_ASCII85 } else { if (img->depth == 1) fputs("/DeviceGray setcolorspace\n", out); else fputs("/DeviceRGB setcolorspace\n", out); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[%s]", img->width, img->height, img->width, -img->height, img->height, img->depth == 1 ? "0 1" : "0 1 0 1 0 1"); #ifdef HTMLDOC_INTERPOLATION fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter" ">>image\n", out); ps_ascii85(out, img->pixels, img->width * img->height * img->depth, 1); #else fputs("/DataSource currentfile/ASCIIHexDecode filter" ">>image\n", out); ps_hex(out, img->pixels, img->width * img->depth * img->height); // End of data marker... fputs(">\n", out); #endif // HTMLDOC_ASCII85 } fputs("GR\n", out); break; } if (ncolors > 0) free(indices); image_unload(img); } /* * 'write_imagemask()' - Write an imagemask to the output file... */ static void write_imagemask(FILE *out, /* I - Output file */ render_t *r) /* I - Image to write */ { image_t *img; /* Current image */ int x, y; /* Position in mask image */ int startx, count; /* Start and count */ uchar *ptr, /* Pointer into mask image */ byte, /* Current byte */ bit; /* Current bit */ float scalex, scaley; /* 1/(w-1) and 1/(h-1) scaling factors */ int width, height; /* Scaled width and height */ img = r->data.image; width = img->width * img->maskscale; height = img->height * img->maskscale; scalex = 1.0f / width; scaley = 1.0f / height; switch (PSLevel) { case 0 : // PDF break; default : // PostScript fputs("\nnewpath\n", out); break; } for (y = 0; y < height; y ++) { for (x = 0, ptr = img->mask + (height - y - 1) * img->maskwidth, bit = 128, byte = *ptr++, startx = 0, count = 0; x < width; x ++) { if (!(bit & byte)) { if (!count) startx = x; count ++; } else if (count) { switch (PSLevel) { case 0 : // PDF flate_printf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; default : // PostScript fprintf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; } count = 0; } if (bit > 1) bit >>= 1; else { bit = 128; byte = *ptr++; } } if (count) { switch (PSLevel) { case 0 : // PDF flate_printf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; default : // PostScript fprintf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; } } } switch (PSLevel) { case 0 : // PDF flate_puts("W n\n", out); break; default : // PostScript fputs("clip\n", out); break; } } /* * 'write_prolog()' - Write the file prolog... */ static void write_prolog(FILE *out, /* I - Output file */ int page_count, /* I - Number of pages (0 if not known) */ uchar *author, /* I - Author of document */ uchar *creator, /* I - Application that generated the HTML file */ uchar *copyright, /* I - Copyright (if any) on the document */ uchar *keywords, /* I - Search keywords */ uchar *subject) /* I - Subject */ { FILE *prolog; /* PostScript prolog file */ int i, j, /* Looping vars */ encoding_object; /* Font encoding object */ int page; /* Current page */ render_t *r; /* Current render data */ int fonts_used[TYPE_MAX][STYLE_MAX]; /* Whether or not a font is used */ int font_desc[TYPE_MAX][STYLE_MAX]; /* Font descriptor objects */ char temp[1024]; /* Temporary string */ md5_state_t md5; /* MD5 state */ md5_byte_t digest[16]; /* MD5 digest value */ rc4_context_t rc4; /* RC4 context */ uchar owner_pad[32], /* Padded owner password */ owner_key[32], /* Owner key */ user_pad[32], /* Padded user password */ user_key[32]; /* User key */ uchar perm_bytes[4]; /* Permission bytes */ unsigned perm_value; /* Permission value, unsigned */ static unsigned char pad[32] = { /* Padding for passwords */ 0x28, 0xbf, 0x4e, 0x5e, 0x4e, 0x75, 0x8a, 0x41, 0x64, 0x00, 0x4e, 0x56, 0xff, 0xfa, 0x01, 0x08, 0x2e, 0x2e, 0x00, 0xb6, 0xd0, 0x68, 0x3e, 0x80, 0x2f, 0x0c, 0xa9, 0xfe, 0x64, 0x53, 0x69, 0x7a }; /* * See what fonts are used... */ memset(fonts_used, 0, sizeof(fonts_used)); fonts_used[HeadFootType][HeadFootStyle] = 1; for (page = 0; page < (int)num_pages; page ++) for (r = pages[page].start; r != NULL; r = r->next) if (r->type == RENDER_TEXT) fonts_used[r->data.text.typeface][r->data.text.style] = 1; #ifdef DEBUG puts("The following fonts were used:"); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) printf(" %s\n", _htmlFonts[i][j]); #endif // DEBUG /* * Generate the heading... */ if (PSLevel > 0) { /* * Write PostScript prolog stuff... */ if (XRXComments) { int start, end; // Start and end of document pages... int count; // Number of exception pages in this range... // The following comments are Xerox job ticket information that // is used on the high-end Laser Printing Systems rather than // embedded commands... fputs("%XRXbegin: 001.0300\n", out); fputs("%XRXPDLformat: PS-Adobe\n", out); if (doc_title) fprintf(out, "%%XRXtitle: %s\n", doc_title); if (OutputFiles) { // Output a single chapter... if (chapter < 0) { start = 0; end = chapter_outstarts[1] - 1; } else { start = chapter_outstarts[chapter]; end = chapter_outends[chapter]; } } else { start = 0; end = 0; } if (pages[outpages[start].pages[0]].duplex) { if (pages[outpages[start].pages[0]].landscape) fputs("%XRXrequirements: duplex(tumble)\n", out); else fputs("%XRXrequirements: duplex\n", out); } else fputs("%XRXrequirements: simplex\n", out); fputs("%XRXdisposition: PRINT\n", out); fputs("%XRXsignature: False\n", out); fprintf(out, "%%XRXpaperType-size: %.0f %.0f\n", pages[outpages[start].pages[0]].width * 25.4f / 72.0f, pages[outpages[start].pages[0]].length * 25.4f / 72.0f); if (pages[outpages[start].pages[0]].media_type[0]) fprintf(out, "%%XRXpaperType-preFinish: %s 0 0\n", pages[start].media_type); if (pages[outpages[start].pages[0]].media_color[0]) fprintf(out, "%%XRXdocumentPaperColors: %c%s\n", tolower(pages[start].media_color[0]), pages[start].media_color + 1); if (OutputFiles) { // Handle document settings per-chapter... for (i = start + 1; i < end; i += count) { if (pages[outpages[i].pages[0]].width != pages[0].width || pages[outpages[i].pages[0]].length != pages[0].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[0].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[0].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[0].duplex) { for (count = 1; (i + count) <= end; count ++) if (pages[outpages[i].pages[0]].width != pages[outpages[i + count].pages[0]].width || pages[outpages[i].pages[0]].length != pages[outpages[i + count].pages[0]].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[outpages[i + count].pages[0]].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[outpages[i + count].pages[0]].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[outpages[i + count].pages[0]].duplex) break; fprintf(out, "%%XRXpageExceptions: %d %d %.0f %.0f %c%s opaque %s 0 0\n", i + 1, i + count, pages[outpages[i].pages[0]].width * 25.4f / 72.0f, pages[outpages[i].pages[0]].length * 25.4f / 72.0f, tolower(pages[outpages[i].pages[0]].media_color[0]), pages[outpages[i].pages[0]].media_color + 1, pages[outpages[i].pages[0]].media_type[0] ? pages[outpages[i].pages[0]].media_type : "Plain"); if (pages[outpages[i].pages[0]].duplex && pages[outpages[i].pages[0]].landscape) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex(tumble)\n", i + 1, i + count); else if (pages[outpages[i].pages[0]].duplex) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex\n", i + 1, i + count); else fprintf(out, "%%XRXpageExceptions-plex: %d %d simplex\n", i + 1, i + count); } else count = 1; } } else { // All pages are in a single file... for (j = (TocLevels == 0); j <= TocDocCount; j ++) { start = chapter_outstarts[j]; end = chapter_outends[j]; for (i = start + 1; i < end; i += count) { if (pages[outpages[i].pages[0]].width != pages[0].width || pages[outpages[i].pages[0]].length != pages[0].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[0].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[0].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[0].duplex) { for (count = 1; (i + count) < end; count ++) if (pages[outpages[i].pages[0]].width != pages[outpages[i + count].pages[0]].width || pages[outpages[i].pages[0]].length != pages[outpages[i + count].pages[0]].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[outpages[i + count].pages[0]].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[outpages[i + count].pages[0]].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[outpages[i + count].pages[0]].duplex) break; fprintf(out, "%%XRXpageExceptions: %d %d %.0f %.0f %c%s opaque %s 0 0\n", i + 1, i + count, pages[outpages[i].pages[0]].width * 25.4f / 72.0f, pages[outpages[i].pages[0]].length * 25.4f / 72.0f, tolower(pages[outpages[i].pages[0]].media_color[0]), pages[outpages[i].pages[0]].media_color + 1, pages[outpages[i].pages[0]].media_type[0] ? pages[outpages[i].pages[0]].media_type : "Plain"); if (pages[outpages[i].pages[0]].duplex && pages[outpages[i].pages[0]].landscape) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex(tumble)\n", i + 1, i + count); else if (pages[outpages[i].pages[0]].duplex) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex\n", i + 1, i + count); else fprintf(out, "%%XRXpageExceptions-plex: %d %d simplex\n", i + 1, i + count); } else count = 1; } } } fputs("%XRXend\n", out); } fputs("%!PS-Adobe-3.0\n", out); if (Landscape) fprintf(out, "%%%%BoundingBox: 0 0 %d %d\n", PageLength, PageWidth); else fprintf(out, "%%%%BoundingBox: 0 0 %d %d\n", PageWidth, PageLength); fprintf(out,"%%%%LanguageLevel: %d\n", PSLevel); fputs("%%Creator: " HTMLDOC_PRODUCER "\n", out); fprintf(out, "%%%%CreationDate: D:%04d%02d%02d%02d%02d%02d+0000\n", doc_date.tm_year + 1900, doc_date.tm_mon + 1, doc_date.tm_mday, doc_date.tm_hour, doc_date.tm_min, doc_date.tm_sec); if (doc_title != NULL) fprintf(out, "%%%%Title: %s\n", doc_title); if (author != NULL) fprintf(out, "%%%%Author: %s\n", author); if (creator != NULL) fprintf(out, "%%%%Generator: %s\n", creator); if (copyright != NULL) fprintf(out, "%%%%Copyright: %s\n", copyright); if (keywords != NULL) fprintf(out, "%%%%Keywords: %s\n", keywords); if (subject != NULL) fprintf(out, "%%%%Subject: %s\n", keywords); if (page_count > 0) fprintf(out, "%%%%Pages: %d\n", page_count); else fputs("%%Pages: (atend)\n", out); if (!EmbedFonts) { fputs("%%DocumentNeededResources:\n", out); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j] && _htmlStandardFonts[i]) fprintf(out, "%%%%+ font %s\n", _htmlFonts[i][j]); } fputs("%%DocumentProvidedResources:\n", out); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j] && (EmbedFonts || !_htmlStandardFonts[i])) fprintf(out, "%%%%+ font %s\n", _htmlFonts[i][j]); fputs("%%DocumentData: Clean7bit\n", out); fputs("%%EndComments\n", out); fputs("%%BeginProlog\n", out); /* * Embed fonts? */ for (i = 0; i < TYPE_MAX; i ++) { if (EmbedFonts || !_htmlStandardFonts[i]) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) write_type1(out, (typeface_t)i, (style_t)j); } /* * Procedures used throughout the document... */ const char *version = SVERSION; fprintf(out, "%%%%BeginResource: procset htmldoc-page 1.8 %s\n", version + 4); fputs("/BD{bind def}bind def", out); fputs("/B{dup 0 exch rlineto exch 0 rlineto neg 0 exch rlineto\n" "closepath stroke}BD", out); fputs("/C{setrgbcolor}BD\n", out); fputs("/CM{concat}BD", out); fputs("/DF{findfont dup length dict begin{1 index/FID ne{def}{pop pop}\n" "ifelse}forall/Encoding fontencoding def currentdict end definefont pop}BD\n", out); fputs("/F{dup 0 exch rlineto exch 0 rlineto neg 0 exch rlineto closepath fill}BD\n", out); fputs("/FS{/hdFontSize exch def}BD", out); fputs("/G{setgray}BD\n", out); fputs("/GS{gsave}BD", out); fputs("/GR{grestore}BD", out); fputs("/J{0 exch ashow}BD\n", out); fputs("/L{0 rlineto stroke}BD", out); fputs("/M{moveto}BD", out); fputs("/re{4 2 roll moveto 1 index 0 rlineto 0 exch rlineto neg 0 rlineto closepath}BD\n", out); fputs("/RO{rotate}BD", out); fputs("/S{show}BD", out); fputs("/SC{dup scale}BD\n", out); fputs("/SF{findfont hdFontSize scalefont setfont}BD", out); fputs("/SP{showpage}BD", out); fputs("/T{translate}BD\n", out); fputs("%%EndResource\n", out); /* * Output the font encoding for the current character set... For now we * just support 8-bit fonts since true Unicode support needs a very large * number of extra fonts that aren't normally available on a PS printer. */ fputs("/fontencoding[\n", out); for (i = 0, j = 0; i < 256; i ++) { if (_htmlGlyphs[i]) j += strlen(_htmlGlyphs[i]) + 1; else j += 8; if (j > 80) { if (_htmlGlyphs[i]) j = strlen(_htmlGlyphs[i]) + 1; else j = 8; putc('\n', out); } putc('/', out); if (_htmlGlyphs[i]) fputs(_htmlGlyphs[i], out); else fputs(".notdef", out); } fputs("]def\n", out); /* * Fonts... */ for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) { if (i < TYPE_SYMBOL) fprintf(out, "/F%x/%s DF\n", i * 4 + j, _htmlFonts[i][j]); else fprintf(out, "/F%x/%s findfont definefont pop\n", i * 4 + j, _htmlFonts[i][j]); } if (PSCommands) { snprintf(temp, sizeof(temp), "%s/data/prolog.ps", _htmlData); if ((prolog = fopen(temp, "rb")) != NULL) { while (fgets(temp, sizeof(temp), prolog) != NULL) fputs(temp, out); fclose(prolog); } else { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open data file \"%s\" - %s", temp, strerror(errno)); fprintf(out, "%%%%BeginResource: procset htmldoc-device 1.8 %s\n", version + 4); fputs("languagelevel 1 eq{/setpagedevice{pop}BD}if\n", out); fputs("/SetDuplexMode{<</Duplex 3 index/Tumble 5 index>>setpagedevice " "pop pop}BD\n", out); fputs("/SetMediaColor{pop}BD\n", out); fputs("/SetMediaType{pop}BD\n", out); fputs("/SetMediaPosition{pop}BD\n", out); fputs("/SetPageSize{2 array astore<</PageSize 2 index/ImageableArea " "null>>setpagedevice pop}BD\n", out); fputs("%%EndResource\n", out); } } if (background_image != NULL) ps_write_background(out); fputs("%%EndProlog\n", out); } else { /* * Write PDF prolog stuff... */ fprintf(out, "%%PDF-%.1f\n", 0.1 * PDFVersion); fputs("%\342\343\317\323\n", out); num_objects = 0; /* * Compute the file ID... */ md5_init(&md5); md5_append(&md5, (md5_byte_t *)OutputPath, sizeof(OutputPath)); md5_append(&md5, (md5_byte_t *)&doc_time, sizeof(doc_time)); md5_finish(&md5, file_id); /* * Setup encryption stuff as necessary... */ if (Encryption) { /* * Copy and pad the user password... */ strlcpy((char *)user_pad, UserPassword, sizeof(user_pad)); if ((i = strlen(UserPassword)) < 32) memcpy(user_pad + i, pad, (size_t)(32 - i)); if (OwnerPassword[0]) { /* * Copy and pad the owner password... */ strlcpy((char *)owner_pad, OwnerPassword, sizeof(owner_pad)); if ((i = strlen(OwnerPassword)) < 32) memcpy(owner_pad + i, pad, (size_t)(32 - i)); } else { /* * Generate a pseudo-random owner password... */ srand(time(NULL)); for (i = 0; i < 32; i ++) owner_pad[i] = (uchar)rand(); } /* * What is the key length? * * Acrobat 4.0 and earlier (PDF 1.3 and earlier) allow a maximum of * 40-bits. Acrobat 5.0 and newer support 128-bits. */ if (PDFVersion > 13) encrypt_len = 16; // 128 bits else encrypt_len = 5; // 40 bits /* * Compute the owner key... */ md5_init(&md5); md5_append(&md5, owner_pad, 32); md5_finish(&md5, digest); if (encrypt_len > 5) { // MD5 the result 50 more times... for (i = 0; i < 50; i ++) { md5_init(&md5); md5_append(&md5, digest, 16); md5_finish(&md5, digest); } // Copy the padded user password... memcpy(owner_key, user_pad, 32); // Encrypt the result 20 times... for (i = 0; i < 20; i ++) { // XOR each byte in the key with the loop counter... for (j = 0; j < encrypt_len; j ++) encrypt_key[j] = (uchar)(digest[j] ^ i); rc4_init(&rc4, encrypt_key, (size_t)encrypt_len); rc4_encrypt(&rc4, owner_key, owner_key, 32); } } else { rc4_init(&rc4, digest, (size_t)encrypt_len); rc4_encrypt(&rc4, user_pad, owner_key, 32); } /* * Figure out the permissions word; the new N-bit security * handler adds several new permission bits, which we must * simulate... */ perm_value = (unsigned)Permissions; if (encrypt_len > 5) { // N-bit encryption... if (!(perm_value & PDF_PERM_COPY)) perm_value &= (unsigned)~0x00240000; // Mask additional copy perms... } /* * Compute the encryption key... */ md5_init(&md5); md5_append(&md5, user_pad, 32); md5_append(&md5, owner_key, 32); perm_bytes[0] = (uchar)perm_value; perm_bytes[1] = (uchar)(perm_value >> 8); perm_bytes[2] = (uchar)(perm_value >> 16); perm_bytes[3] = (uchar)(perm_value >> 24); md5_append(&md5, perm_bytes, 4); md5_append(&md5, file_id, 16); md5_finish(&md5, digest); if (encrypt_len > 5) { // MD5 the result 50 times.. for (i = 0; i < 50; i ++) { md5_init(&md5); md5_append(&md5, digest, 16); md5_finish(&md5, digest); } } memcpy(encrypt_key, digest, (size_t)encrypt_len); /* * Compute the user key... */ if (encrypt_len > 5) { md5_init(&md5); md5_append(&md5, pad, 32); md5_append(&md5, file_id, 16); md5_finish(&md5, user_key); memset(user_key + 16, 0, 16); // Encrypt the result 20 times... for (i = 0; i < 20; i ++) { // XOR each byte in the key with the loop counter... for (j = 0; j < encrypt_len; j ++) digest[j] = (uchar)(encrypt_key[j] ^ i); rc4_init(&rc4, digest, (size_t)encrypt_len); rc4_encrypt(&rc4, user_key, user_key, 16); } } else { rc4_init(&rc4, encrypt_key, (size_t)encrypt_len); rc4_encrypt(&rc4, pad, user_key, 32); } /* * Write the encryption dictionary... */ encrypt_object = pdf_start_object(out); fputs("/Filter/Standard/O<", out); for (i = 0; i < 32; i ++) fprintf(out, "%02x", owner_key[i]); fputs(">/U<", out); for (i = 0; i < 32; i ++) fprintf(out, "%02x", user_key[i]); fputs(">", out); if (encrypt_len > 5) { // N-bit encryption... fprintf(out, "/P %d/V 2/R 3/Length %d", (int)perm_value, encrypt_len * 8); } else fprintf(out, "/P %d/V 1/R 2", (int)perm_value); pdf_end_object(out); } else encrypt_object = 0; /* * Write info object... */ info_object = pdf_start_object(out); fputs("/Producer", out); write_string(out, (uchar *)HTMLDOC_PRODUCER, 0); fputs("/CreationDate", out); snprintf(temp, sizeof(temp), "D:%04d%02d%02d%02d%02d%02d+0000", doc_date.tm_year + 1900, doc_date.tm_mon + 1, doc_date.tm_mday, doc_date.tm_hour, doc_date.tm_min, doc_date.tm_sec); write_string(out, (uchar *)temp, 0); if (doc_title != NULL) { fputs("/Title", out); write_utf16(out, doc_title); } if (author != NULL || copyright != NULL) { if (author && copyright) snprintf(temp, sizeof(temp), "%s, %s", author, copyright); else if (author) strlcpy(temp, (const char *)author, sizeof(temp)); else strlcpy(temp, (const char *)copyright, sizeof(temp)); fputs("/Author", out); write_utf16(out, (uchar *)temp); } if (creator != NULL) { fputs("/Creator", out); write_utf16(out, creator); } if (keywords != NULL) { fputs("/Keywords", out); write_utf16(out, keywords); } if (subject != NULL) { fputs("/Subject", out); write_utf16(out, subject); } pdf_end_object(out); /* * Write the font encoding for the selected character set. Note that * we *should* be able to use the WinAnsiEncoding value for ISO-8859-1 * to make smaller files, however Acrobat Exchange does not like it * despite the fact that it is defined in the PDF specification... */ encoding_object = pdf_start_object(out); fputs("/Type/Encoding", out); fputs("/Differences[", out); for (i = 0, j = -1; i < 256; i ++) if (_htmlGlyphs[i]) { /* * Output a character index if we had blank ones... */ if (j != (i - 1)) fprintf(out, " %d", i); fprintf(out, "/%s", _htmlGlyphs[i]); j = i; } fputs("]", out); pdf_end_object(out); memset(font_desc, 0, sizeof(font_desc)); /* * Build font descriptors for the EmbedFonts fonts... */ for (i = 0; i < TYPE_MAX; i ++) if (EmbedFonts || !_htmlStandardFonts[i]) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) font_desc[i][j] = write_type1(out, (typeface_t )i, (style_t)j); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) { font_objects[i * STYLE_MAX + j] = pdf_start_object(out); fputs("/Type/Font", out); fputs("/Subtype/Type1", out); fprintf(out, "/BaseFont/%s", _htmlFonts[i][j]); if (font_desc[i][j]) { // Embed Type1 font... fputs("/FirstChar 0", out); fputs("/LastChar 255", out); fprintf(out, "/Widths %d 0 R", font_desc[i][j] + 1); fprintf(out, "/FontDescriptor %d 0 R", font_desc[i][j]); } if (i < TYPE_SYMBOL) /* Use native encoding for symbols */ fprintf(out, "/Encoding %d 0 R", encoding_object); pdf_end_object(out); } } } /* * 'write_string()' - Write a text entity. */ static void write_string(FILE *out, /* I - Output file */ uchar *s, /* I - String */ int compress) /* I - Compress output? */ { int i; /* Looping var */ if (Encryption && !compress && PSLevel == 0) { int len, // Length of string bytes; // Current bytes encrypted uchar news[1024]; // New string /* * Write an encrypted string... */ putc('<', out); encrypt_init(); for (len = strlen((char *)s); len > 0; len -= bytes, s += bytes) { if (len > (int)sizeof(news)) bytes = (int)sizeof(news); else bytes = len; rc4_encrypt(&encrypt_state, s, news, (size_t)bytes); for (i = 0; i < bytes; i ++) fprintf(out, "%02x", news[i]); } putc('>', out); } else { uchar nbsp = 160; // Non-breaking space char if (compress) flate_write(out, (uchar *)"(", 1); else putc('(', out); if (_htmlUTF8) nbsp = _htmlCharacters[160]; while (*s != '\0') { if (*s == nbsp) { /* &nbsp; */ if (compress) flate_write(out, (uchar *)" ", 1); else putc(' ', out); } else if (*s < 32 || *s > 126) { if (compress) flate_printf(out, "\\%o", *s); else fprintf(out, "\\%o", *s); } else if (compress) { if (*s == '(' || *s == ')' || *s == '\\') flate_write(out, (uchar *)"\\", 1); flate_write(out, s, 1); } else { if (*s == '(' || *s == ')' || *s == '\\') putc('\\', out); putc(*s, out); } s ++; } if (compress) flate_write(out, (uchar *)")", 1); else putc(')', out); } } /* * 'write_text()' - Write a text entity. */ static void write_text(FILE *out, /* I - Output file */ render_t *r) /* I - Text entity */ { uchar *ptr; /* Pointer into text */ // Quick optimization - don't output spaces... for (ptr = r->data.text.buffer; *ptr; ptr ++) if (!isspace(*ptr) && *ptr != 0xa0) break; if (!*ptr) return; // Not just whitespace - send it out... set_color(out, r->data.text.rgb); set_font(out, r->data.text.typeface, r->data.text.style, r->data.text.size); set_pos(out, r->x, r->y); if (PSLevel > 0) { if (r->data.text.spacing > 0.0f) fprintf(out, " %.3f", r->data.text.spacing); } else if (r->data.text.spacing != render_spacing) flate_printf(out, " %.3f Tc", render_spacing = r->data.text.spacing); write_string(out, r->data.text.buffer, PSLevel == 0); if (PSLevel > 0) { if (r->data.text.spacing > 0.0f) fputs("J\n", out); else fputs("S\n", out); } else flate_puts("Tj\n", out); render_x += r->width; } /* * 'write_trailer()' - Write the file trailer. */ static void write_trailer(FILE *out, /* I - Output file */ int num_file_pages, /* I - Number of pages in file */ uchar *lang) /* I - Language */ { int i, j, k, /* Looping vars */ type, /* Type of number */ offset, /* Offset to xref table in PDF file */ start; /* Start page number */ page_t *page; /* Start page of chapter */ char prefix[64], /* Prefix string */ *prefptr; /* Pointer into prefix string */ static const char *modes[] = /* Page modes */ { "UseNone", "UseOutlines", "FullScreen" }; static const char *layouts[] = /* Page layouts */ { "SinglePage", "OneColumn", "TwoColumnLeft", "TwoColumnRight" }; if (PSLevel > 0) { /* * PostScript... */ fputs("%%Trailer\n", out); if (num_file_pages > 0) fprintf(out, "%%%%Pages: %d\n", num_file_pages); fputs("%%EOF\n", out); } else { /* * PDF... */ root_object = pdf_start_object(out); fputs("/Type/Catalog", out); fprintf(out, "/Pages %d 0 R", pages_object); if (PDFVersion >= 12) { if (names_object) fprintf(out, "/Names %d 0 R", names_object); fprintf(out, "/PageLayout/%s", layouts[PDFPageLayout]); } if (lang) fprintf(out, "/Lang(%s)", (char *)lang); if (outline_object > 0) fprintf(out, "/Outlines %d 0 R", outline_object); switch (PDFFirstPage) { case PDF_PAGE_1 : if (TitlePage) { fprintf(out, "/OpenAction[%d 0 R/XYZ null null 0]", pages_object + 1); break; } break; case PDF_TOC : if (TocLevels > 0) { fprintf(out, "/OpenAction[%d 0 R/XYZ null null 0]", pages_object + 2 * chapter_outstarts[0] + 1); break; } break; case PDF_CHAPTER_1 : fprintf(out, "/OpenAction[%d 0 R/XYZ null null 0]", pages_object + 2 * chapter_outstarts[1] + 1); break; } fprintf(out, "/PageMode/%s", modes[PDFPageMode]); if (PDFVersion > 12 && NumberUp == 1) { // Output the PageLabels tree... fputs("/PageLabels<</Nums[", out); for (i = 0; i < chapter_starts[1]; i ++) { fprintf(out, "%d<</P", i); if (i & 1) write_string(out, (uchar *)"eltit", 0); else write_string(out, (uchar *)"title", 0); fputs(">>", out); } if (TocLevels > 0 && OutputType == OUTPUT_BOOK) { type = 'r'; for (j = 0; j < 3; j ++) if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(1)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(1)"))) type = 'D'; else if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(I)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(I)"))) type = 'R'; else if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(a)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(a)"))) type = 'a'; else if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(A)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(A)"))) type = 'A'; fprintf(out, "%d<</S/%c>>", i, type); i += chapter_ends[0] - chapter_starts[0] + 1; } for (j = 1; j <= TocDocCount; j ++) { page = pages + chapter_starts[j]; start = chapter_starts[j] - chapter_starts[1] + 1; type = 'D'; prefix[0] = '\0'; for (k = 0; k < 3; k ++) { if (page->header[k] && strstr((char *)page->header[k], "PAGE")) strlcpy(prefix, (char *)page->header[k], sizeof(prefix)); else if (page->footer[k] && strstr((char *)page->footer[k], "PAGE")) strlcpy(prefix, (char *)page->footer[k], sizeof(prefix)); if ((page->header[k] && strstr((char *)page->header[k], "PAGE(i)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(i)"))) type = 'r'; else if ((page->header[k] && strstr((char *)page->header[k], "PAGE(I)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(I)"))) type = 'R'; else if ((page->header[k] && strstr((char *)page->header[k], "PAGE(a)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(a)"))) type = 'a'; else if ((page->header[k] && strstr((char *)page->header[k], "PAGE(A)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(A)"))) type = 'A'; if ((page->header[k] && strstr((char *)page->header[k], "$CHAPTERPAGE")) || (page->footer[k] && strstr((char *)page->footer[k], "$CHAPTERPAGE"))) start = 1; } if ((prefptr = strstr(prefix, "$PAGE")) == NULL) prefptr = strstr(prefix, "$CHAPTERPAGE"); fprintf(out, "%d<</S/%c/St %d", i, type, start); if (prefptr) { *prefptr = '\0'; fputs("/P", out); write_string(out, (uchar *)prefix, 0); } fputs(">>", out); i += chapter_ends[j] - chapter_starts[j] + 1; } fputs("]>>", out); } pdf_end_object(out); offset = ftell(out); fputs("xref\n", out); fprintf(out, "0 %d \n", (int)num_objects + 1); fputs("0000000000 65535 f \n", out); for (i = 1; i <= (int)num_objects; i ++) fprintf(out, "%010d 00000 n \n", objects[i]); fputs("trailer\n", out); fputs("<<", out); fprintf(out, "/Size %d", (int)num_objects + 1); fprintf(out, "/Root %d 0 R", root_object); fprintf(out, "/Info %d 0 R", info_object); fputs("/ID[<", out); for (i = 0; i < 16; i ++) fprintf(out, "%02x", file_id[i]); fputs("><", out); for (i = 0; i < 16; i ++) fprintf(out, "%02x", file_id[i]); fputs(">]", out); if (Encryption) fprintf(out, "/Encrypt %d 0 R", encrypt_object); fputs(">>\n", out); fputs("startxref\n", out); fprintf(out, "%d\n", offset); fputs("%%EOF\n", out); } } /* * 'write_type1()' - Write an embedded Type 1 font. */ static int /* O - Object number */ write_type1(FILE *out, /* I - File to write to */ typeface_t typeface, /* I - Typeface */ style_t style) /* I - Style */ { char filename[1024]; /* PFA filename */ FILE *fp; /* PFA file */ int ch; /* Character value */ int width; /* Width value */ char glyph[64], /* Glyph name */ line[1024], /* Line from AFM file */ *lineptr, /* Pointer into line */ *dataptr; /* Pointer for data */ int ascent, /* Ascent above baseline */ cap_height, /* Ascent of CAPITALS */ x_height, /* Ascent of lowercase */ descent, /* Decent below baseline */ bbox[4], /* Bounding box */ italic_angle; /* Angle for italics */ int widths[256]; /* Character widths */ int length1, /* Length1 value for font */ length2, /* Length2 value for font */ length3; /* Length3 value for font */ static int tflags[] = /* PDF typeface flags */ { 33, /* Courier */ 34, /* Times-Roman */ 32, /* Helvetica */ 33, /* Monospace */ 34, /* Serif */ 32, /* Sans */ 4, /* Symbol */ 4 /* Dingbats */ }; static int sflags[] = /* PDF style flags */ { 0, /* Normal */ 0, /* Bold */ 64, /* Italic */ 64 /* Bold-Italic */ }; /* * This function writes a Type1 font, either as an object for PDF * output or as an in-line font in PostScript output. This is useful * because the Type1 fonts that Adobe ships typically do not include * the full set of characters required by some of the ISO character * sets. */ /* * Try to open the PFA file for the Type1 font... */ snprintf(filename, sizeof(filename), "%s/fonts/%s.pfa", _htmlData, _htmlFonts[typeface][style]); if ((fp = fopen(filename, "r")) == NULL) { #ifndef DEBUG progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open font file %s!", filename); #endif /* !DEBUG */ return (0); } /* * Write the font (object)... */ if (PSLevel) { /* * Embed a Type1 font in the PostScript output... */ fprintf(out, "%%%%BeginResource: font %s\n", _htmlFonts[typeface][style]); line[0] = '\0'; while (fgets(line, sizeof(line), fp) != NULL) fputs(line, out); if (line[strlen(line) - 1] != '\n') fputs("\n", out); fputs("%%EndResource\n", out); fclose(fp); } else { /* * Embed a Type1 font object in the PDF output... */ length1 = 0; length2 = 0; length3 = 0; while (fgets(line, sizeof(line), fp) != NULL) { length1 += strlen(line); if (strstr(line, "currentfile eexec") != NULL) break; } while (fgets(line, sizeof(line), fp) != NULL) { if (!strcmp(line, "00000000000000000000000000000000" "00000000000000000000000000000000\n")) break; length2 += (strlen(line) - 1) / 2; } length3 = strlen(line); while (fgets(line, sizeof(line), fp) != NULL) length3 += strlen(line); rewind(fp); pdf_start_object(out); fprintf(out, "/Length1 %d", length1); fprintf(out, "/Length2 %d", length2); fprintf(out, "/Length3 %d", length3); if (Compression) fputs("/Filter/FlateDecode", out); pdf_start_stream(out); flate_open_stream(out); while (fgets(line, sizeof(line), fp) != NULL) { flate_puts(line, out); if (strstr(line, "currentfile eexec") != NULL) break; } while (fgets(line, sizeof(line), fp) != NULL) { if (!strcmp(line, "00000000000000000000000000000000" "00000000000000000000000000000000\n")) break; for (lineptr = line, dataptr = line; isxdigit(*lineptr); lineptr += 2) { if (isdigit(lineptr[0])) ch = (lineptr[0] - '0') << 4; else ch = (tolower(lineptr[0] & 255) - 'a' + 10) << 4; if (isdigit(lineptr[1])) ch |= lineptr[1] - '0'; else ch |= tolower(lineptr[1] & 255) - 'a' + 10; *dataptr++ = (char)ch; } flate_write(out, (uchar *)line, dataptr - line); } flate_puts(line, out); while (fgets(line, sizeof(line), fp) != NULL) flate_puts(line, out); flate_close_stream(out); pdf_end_object(out); fclose(fp); /* * Try to open the AFM file for the Type1 font... */ snprintf(filename, sizeof(filename), "%s/fonts/%s.afm", _htmlData, _htmlFonts[typeface][style]); if ((fp = fopen(filename, "r")) == NULL) { #ifndef DEBUG progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open font width file %s!", filename); #endif /* !DEBUG */ return (0); } /* * Set the default values (Courier)... */ for (ch = 0; ch < 256; ch ++) widths[ch] = 600; ascent = 629; cap_height = 562; x_height = 426; descent = -157; bbox[0] = -28; bbox[1] = -250; bbox[2] = 628; bbox[3] = 805; italic_angle = 0; /* * Read the AFM file... */ while (fgets(line, sizeof(line), fp) != NULL) { if (strncmp(line, "ItalicAngle ", 12) == 0) italic_angle = atoi(line + 12); else if (strncmp(line, "FontBBox ", 9) == 0) sscanf(line + 9, "%d%d%d%d", bbox + 0, bbox + 1, bbox + 2, bbox + 3); else if (strncmp(line, "CapHeight ", 10) == 0) cap_height = atoi(line + 10); else if (strncmp(line, "XHeight ", 8) == 0) x_height = atoi(line + 8); else if (strncmp(line, "Ascender ", 9) == 0) ascent = atoi(line + 9); else if (strncmp(line, "Descender ", 10) == 0) descent = atoi(line + 10); else if (strncmp(line, "C ", 2) == 0) { if (typeface < TYPE_SYMBOL) { /* * Handle encoding of Courier, Times, and Helvetica using * assigned charset... */ if (sscanf(line, "%*s%*s%*s%*s%d%*s%*s%63s", &width, glyph) != 2) continue; for (ch = 0; ch < 256; ch ++) if (_htmlGlyphs[ch] && strcmp(_htmlGlyphs[ch], glyph) == 0) break; if (ch < 256) widths[ch] = width; } else { /* * Symbol font uses its own encoding... */ if (sscanf(line, "%*s%d%*s%*s%d", &ch, &width) != 2) continue; if (ch >= 0 && ch < 256) widths[ch] = width; } } } fclose(fp); /* * Write the font descriptor... */ pdf_start_object(out); fputs("/Type/FontDescriptor", out); fprintf(out, "/Ascent %d", ascent); fprintf(out, "/Descent %d", descent); fprintf(out, "/CapHeight %d", cap_height); fprintf(out, "/XHeight %d", x_height); fprintf(out, "/FontBBox[%d %d %d %d]", bbox[0], bbox[1], bbox[2], bbox[3]); fprintf(out, "/ItalicAngle %d", italic_angle); fprintf(out, "/StemV %d", widths['v']); fprintf(out, "/Flags %d", tflags[typeface] | sflags[style]); fprintf(out, "/FontName/%s", _htmlFonts[typeface][style]); fprintf(out, "/FontFile %d 0 R", (int)num_objects - 1); pdf_end_object(out); /* * Write the character widths... */ pdf_start_object(out, 1); fprintf(out, "%d", widths[0]); for (ch = 1; ch < 256; ch ++) fprintf(out, " %d", widths[ch]); pdf_end_object(out); } /* * Return the font descriptor... */ return (num_objects - 1); } /* * 'write_utf16()' - Write a UTF-16 string... */ static void write_utf16(FILE *out, // I - File to write to uchar *s) // I - String to write { uchar *sptr; // Pointer into string /* * We start by checking to see if the string is composed only of * ASCII characters; if so, we can just write a normal string... */ for (sptr = s; *sptr && !(*sptr & 0x80); sptr ++); if (!*sptr) { /* * Write an ASCII string... */ write_string(out, s, 0); } else if (Encryption) { /* * Convert the string to Unicode and encrypt... */ int ch; // Character value uchar unicode[2], // Unicode character enicode[2]; // Encrypted unicode character putc('<', out); encrypt_init(); unicode[0] = 0xfe; // Start with BOM unicode[1] = 0xff; rc4_encrypt(&encrypt_state, unicode, enicode, 2); fprintf(out, "%02x%02x", enicode[0], enicode[1]); for (sptr = s; *sptr; sptr ++) { ch = _htmlUnicode[*sptr]; unicode[0] = (uchar)(ch >> 8); unicode[1] = (uchar)ch; rc4_encrypt(&encrypt_state, unicode, enicode, 2); fprintf(out, "%02x%02x", enicode[0], enicode[1]); } putc('>', out); } else { /* * Convert the string to Unicode... */ fputs("<feff", out); // Start with BOM for (sptr = s; *sptr; sptr ++) fprintf(out, "%04x", _htmlUnicode[*sptr]); putc('>', out); } } /* * 'encrypt_init()' - Initialize the RC4 encryption context for the current * object. */ static void encrypt_init(void) { int i; /* Looping var */ uchar data[21], /* Key data */ *dataptr; /* Pointer to key data */ md5_state_t md5; /* MD5 state */ md5_byte_t digest[16]; /* MD5 digest value */ /* * Compute the key data for the MD5 hash. */ for (i = 0, dataptr = data; i < encrypt_len; i ++) *dataptr++ = encrypt_key[i]; *dataptr++ = (uchar)num_objects; *dataptr++ = (uchar)(num_objects >> 8); *dataptr++ = (uchar)(num_objects >> 16); *dataptr++ = 0; *dataptr++ = 0; /* * Hash it... */ md5_init(&md5); md5_append(&md5, data, encrypt_len + 5); md5_finish(&md5, digest); /* * Initialize the RC4 context using the first N+5 bytes of the digest... */ if (encrypt_len > 11) rc4_init(&encrypt_state, digest, 16); else rc4_init(&encrypt_state, digest, (size_t)(encrypt_len + 5)); } /* * 'flate_open_stream()' - Open a deflated output stream. */ static void flate_open_stream(FILE *out) /* I - Output file */ { if (Encryption && !PSLevel) encrypt_init(); if (!Compression) return; compressor_active = 1; compressor.zalloc = (alloc_func)0; compressor.zfree = (free_func)0; compressor.opaque = (voidpf)0; deflateInit(&compressor, Compression); compressor.next_out = (Bytef *)comp_buffer; compressor.avail_out = sizeof(comp_buffer); } /* * 'flate_close_stream()' - Close a deflated output stream. */ static void flate_close_stream(FILE *out) /* I - Output file */ { int status; /* Deflate status */ if (!Compression) { #ifdef HTMLDOC_ASCII85 if (PSLevel) ps_ascii85(out, (uchar *)"", 0, 1); #endif // HTMLDOC_ASCII85 return; } while ((status = deflate(&compressor, Z_FINISH)) != Z_STREAM_END) { if (status < Z_OK && status != Z_BUF_ERROR) { progress_error(HD_ERROR_OUT_OF_MEMORY, "deflate() failed (%d)", status); return; } if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #else ps_hex(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #endif // HTMLDOC_ASCII85 else { if (Encryption) rc4_encrypt(&encrypt_state, comp_buffer, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); fwrite(comp_buffer, (size_t)((uchar *)compressor.next_out - (uchar *)comp_buffer), 1, out); } compressor.next_out = (Bytef *)comp_buffer; compressor.avail_out = sizeof(comp_buffer); } if ((uchar *)compressor.next_out > (uchar *)comp_buffer) { if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #else ps_hex(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #endif // HTMLDOC_ASCII85 else { if (Encryption) rc4_encrypt(&encrypt_state, comp_buffer, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); fwrite(comp_buffer, (size_t)((uchar *)compressor.next_out - (uchar *)comp_buffer), 1, out); } } deflateEnd(&compressor); compressor_active = 0; #ifdef HTMLDOC_ASCII85 if (PSLevel) ps_ascii85(out, (uchar *)"", 0, 1); #else if (PSLevel) { // End of data marker... fputs(">\n", out); } #endif // HTMLDOC_ASCII85 } /* * 'flate_puts()' - Write a character string to a compressed stream. */ static void flate_puts(const char *s, /* I - String to write */ FILE *out) /* I - Output file */ { flate_write(out, (uchar *)s, strlen(s)); } /* * 'flate_printf()' - Write a formatted character string to a compressed stream. */ static void flate_printf(FILE *out, /* I - Output file */ const char *format, /* I - Format string */ ...) /* I - Additional args as necessary */ { int length; /* Length of output string */ char buf[10240]; /* Output buffer */ va_list ap; /* Argument pointer */ va_start(ap, format); length = vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); flate_write(out, (uchar *)buf, length); } /* * 'flate_write()' - Write data to a compressed stream. */ static void flate_write(FILE *out, /* I - Output file */ uchar *buf, /* I - Buffer */ int length, /* I - Number of bytes to write */ int flush) /* I - Flush when writing data? */ { int status; /* Deflate status */ if (compressor_active) { compressor.next_in = buf; compressor.avail_in = (unsigned)length; while (compressor.avail_in > 0) { if (compressor.avail_out < (int)(sizeof(comp_buffer) / 8)) { if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #else ps_hex(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #endif // HTMLDOC_ASCII85 else { if (Encryption) rc4_encrypt(&encrypt_state, comp_buffer, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); fwrite(comp_buffer, (size_t)((uchar *)compressor.next_out - (uchar *)comp_buffer), 1, out); } compressor.next_out = (Bytef *)comp_buffer; compressor.avail_out = sizeof(comp_buffer); } status = deflate(&compressor, flush ? Z_FULL_FLUSH : Z_NO_FLUSH); if (status < Z_OK && status != Z_BUF_ERROR) { progress_error(HD_ERROR_OUT_OF_MEMORY, "deflate() failed (%d)", status); return; } flush = 0; } } else if (Encryption && !PSLevel) { int i, // Looping var bytes; // Number of bytes to encrypt/write uchar newbuf[1024]; // New encrypted data buffer for (i = 0; i < length; i += sizeof(newbuf)) { if ((bytes = length - i) > (int)sizeof(newbuf)) bytes = sizeof(newbuf); rc4_encrypt(&encrypt_state, buf + i, newbuf, (size_t)bytes); fwrite(newbuf, (size_t)bytes, 1, out); } } else if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, buf, length); #else ps_hex(out, buf, length); #endif // HTMLDOC_ASCII85 else fwrite(buf, (size_t)length, 1, out); }
null
/* * PostScript + PDF output routines for HTMLDOC, a HTML document processing * program. * * Just in case you didn't notice it, this file is too big; it will be * broken into more manageable pieces once we make all of the output * "drivers" into classes... * * Copyright © 2011-2021 by Michael R Sweet. * Copyright © 1997-2010 by Easy Software Products. All rights reserved. * * This program is free software. Distribution and use rights are outlined in * the file "COPYING". */ /* * Include necessary headers. */ /* * The GCC compiler on HP-UX has a nasty habit of incorrectly "fixing" * the vmtypes.h header file provided with HP-UX. The following * conditional magic makes sure that "page_t" (which we use in our * code) is not defined... */ #ifdef __hpux # define page_t hpux_page_t #endif // __hpux /*#define DEBUG*/ #include "htmldoc.h" #include "markdown.h" #include "md5-private.h" #define md5_append _cupsMD5Append #define md5_finish _cupsMD5Finish #define md5_init _cupsMD5Init typedef unsigned char md5_byte_t; #define md5_state_t _cups_md5_state_t #include "rc4.h" #include <stdarg.h> #include <ctype.h> #include <time.h> #include <math.h> #ifdef WIN32 # include <io.h> #else # include <unistd.h> #endif // WIN32 #include <fcntl.h> #include <zlib.h> extern "C" { /* Workaround for JPEG header problems... */ #include <jpeglib.h> /* JPEG/JFIF image definitions */ } #ifdef __hpux # undef page_t #endif // __hpux /* * Output options... */ #define HTMLDOC_ASCII85 //#define HTMLDOC_INTERPOLATION #define HTMLDOC_PRODUCER "htmldoc " SVERSION " Copyright 2011-2019 by Michael R Sweet" /* * Constants... */ #define RENDER_TEXT 0 /* Text fragment */ #define RENDER_IMAGE 1 /* Image */ #define RENDER_BOX 2 /* Box */ #define RENDER_LINK 3 /* Hyperlink */ #define RENDER_BG 4 /* Background image */ /* * Structures... */ typedef struct render_str /**** Render entity structure ****/ { struct render_str *prev; /* Previous rendering entity */ struct render_str *next; /* Next rendering entity */ int type; /* Type of entity */ float x, /* Position in points */ y, /* ... */ width, /* Size in points */ height; /* ... */ union { struct { int typeface, /* Typeface for text */ style; /* Style of text */ float size; /* Size of text in points */ float spacing; /* Inter-character spacing */ float rgb[3]; /* Color of text */ uchar buffer[1]; /* String buffer */ } text; image_t *image; /* Image pointer */ float box[3]; /* Box color */ uchar link[1]; /* Link URL */ } data; } render_t; typedef struct /**** Named link position structure */ { short page, /* Page # */ top; /* Top position */ uchar name[124]; /* Reference name */ } link_t; typedef struct //// Page information { int width, // Width of page in points length, // Length of page in points left, // Left margin in points right, // Right margin in points top, // Top margin in points bottom, // Bottom margin in points duplex, // Duplex this page? landscape; // Landscape orientation? render_t *start, // First render element *end; // Last render element uchar *url, // URL/file *chapter, // Chapter text *heading; // Heading text tree_t *headnode; // Heading node uchar *header[3], // Headers for regular pages *header1[3], // Headers for first pages *footer[3]; // Footers for all pages char media_color[64], // Media color media_type[64]; // Media type int media_position; // Media position char page_text[64]; // Page number for TOC image_t *background_image; // Background image float background_color[3]; // Background color // Number-up support int nup; // Number up pages int outpage; // Output page # float outmatrix[2][3]; // Transform matrix } page_t; typedef struct //// Output page info { int nup; // Number up pages int pages[16]; // Pages on this output page int annot_object; // Annotation object } outpage_t; /* * Local globals... */ static time_t doc_time; // Current time static struct tm doc_date; // Current date static uchar *current_url = NULL; static int title_page; static int chapter, chapter_outstarts[MAX_CHAPTERS], chapter_outends[MAX_CHAPTERS], chapter_starts[MAX_CHAPTERS], chapter_ends[MAX_CHAPTERS]; static size_t num_headings = 0, alloc_headings = 0; static int *heading_pages = NULL, *heading_tops = NULL; static size_t num_pages = 0, alloc_pages = 0; static page_t *pages = NULL; static tree_t *current_heading; static size_t num_outpages = 0; static outpage_t *outpages = NULL; static size_t num_links = 0, alloc_links = 0; static link_t *links = NULL; static uchar list_types[16]; static int list_values[16]; static char stdout_filename[256]; static size_t num_objects = 0, alloc_objects = 0; static int *objects = NULL, root_object, info_object, outline_object, pages_object, names_object, encrypt_object, font_objects[TYPE_MAX * STYLE_MAX]; static uchar *doc_title = NULL; static image_t *logo_image = NULL; static float logo_width, logo_height; static image_t *lh_image = NULL; static float lh_width, lh_height; static image_t *hfimage[MAX_HF_IMAGES]; static float hfimage_width[MAX_HF_IMAGES], hfimage_height[MAX_HF_IMAGES]; static float maxhfheight; static image_t *background_image = NULL; static float background_color[3] = { 1.0, 1.0, 1.0 }, link_color[3] = { 0.0, 0.0, 1.0 }; static int render_typeface, render_style; static float render_size, render_rgb[3], render_x, render_y, render_startx, render_spacing; static int compressor_active = 0; static z_stream compressor; static uchar comp_buffer[8192]; static uchar encrypt_key[16]; static int encrypt_len; static rc4_context_t encrypt_state; static md5_byte_t file_id[16]; /* * Local functions... */ extern "C" { typedef int (*compare_func_t)(const void *, const void *); } static void pspdf_debug_stats(); static void pspdf_transform_coords(page_t *p, float &x, float &y); static void pspdf_transform_page(int outpage, int pos, int page); static void pspdf_prepare_outpages(); static void pspdf_prepare_page(int page); static void pspdf_prepare_heading(int page, int print_page, uchar **format, int y, char *page_text, int page_len); static void ps_write_document(uchar *author, uchar *creator, uchar *copyright, uchar *keywords, uchar *subject, uchar *lang); static void ps_write_outpage(FILE *out, int outpage); static void ps_write_page(FILE *out, int page); static void ps_write_background(FILE *out); static void pdf_write_document(uchar *author, uchar *creator, uchar *copyright, uchar *keywords, uchar *subject, uchar *lang, tree_t *doc, tree_t *toc); static void pdf_write_outpage(FILE *out, int outpage); static void pdf_write_page(FILE *out, int page); static void pdf_write_resources(FILE *out, int page); #ifdef DEBUG_TOC static void pdf_text_contents(FILE *out, tree_t *toc, int indent = 0); #endif // DEBUG_TOC static void pdf_write_contents(FILE *out, tree_t *toc, int parent, int prev, int next, int *heading); static void pdf_write_files(FILE *out, tree_t *doc); static void pdf_write_links(FILE *out); static void pdf_write_names(FILE *out); static int pdf_count_headings(tree_t *toc); static int pdf_start_object(FILE *out, int array = 0); static void pdf_start_stream(FILE *out); static void pdf_end_object(FILE *out); static void encrypt_init(void); static void flate_open_stream(FILE *out); static void flate_close_stream(FILE *out); static void flate_puts(const char *s, FILE *out); static void flate_printf(FILE *out, const char *format, ...); static void flate_write(FILE *out, uchar *inbuf, int length, int flush=0); static void parse_contents(tree_t *t, float left, float width, float bottom, float length, float *y, int *page, int *heading, tree_t *chap); static void parse_doc(tree_t *t, float *left, float *right, float *bottom, float *top, float *x, float *y, int *page, tree_t *cpara, int *needspace); static void parse_heading(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_paragraph(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_pre(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_table(tree_t *t, float left, float width, float bottom, float length, float *x, float *y, int *page, int needspace); static void parse_list(tree_t *t, float *left, float *width, float *bottom, float *length, float *x, float *y, int *page, int needspace); static void init_list(tree_t *t); static void parse_comment(tree_t *t, float *left, float *width, float *bottom, float *length, float *x, float *y, int *page, tree_t *para, int needspace); static void check_pages(int page); static void add_link(uchar *name, int page, int top); static link_t *find_link(uchar *name); static int compare_links(link_t *n1, link_t *n2); static void find_background(tree_t *t); static void write_background(int page, FILE *out); static render_t *new_render(int page, int type, double x, double y, double width, double height, void *data, render_t *insert = 0); static float get_cell_size(tree_t *t, float left, float right, float *minwidth, float *prefwidth, float *minheight); static float get_table_size(tree_t *t, float left, float right, float *minwidth, float *prefwidth, float *minheight); static tree_t *flatten_tree(tree_t *t); static float get_width(uchar *s, int typeface, int style, int size); static void update_image_size(tree_t *t); static uchar *get_title(tree_t *doc); static FILE *open_file(void); static void set_color(FILE *out, float *rgb); static void set_font(FILE *out, int typeface, int style, float size); static void set_pos(FILE *out, float x, float y); static void write_prolog(FILE *out, int pages, uchar *author, uchar *creator, uchar *copyright, uchar *keywords, uchar *subject); static void ps_hex(FILE *out, uchar *data, int length); #ifdef HTMLDOC_ASCII85 static void ps_ascii85(FILE *out, uchar *data, int length, int eod = 0); #endif // HTMLDOC_ASCII85 static void jpg_init(j_compress_ptr cinfo); static boolean jpg_empty(j_compress_ptr cinfo); static void jpg_term(j_compress_ptr cinfo); static void jpg_setup(FILE *out, image_t *img, j_compress_ptr cinfo); static int compare_rgb(unsigned *rgb1, unsigned *rgb2); static void write_image(FILE *out, render_t *r, int write_obj = 0); static void write_imagemask(FILE *out, render_t *r); static void write_string(FILE *out, uchar *s, int compress); static void write_text(FILE *out, render_t *r); static void write_trailer(FILE *out, int pages, uchar *lang); static int write_type1(FILE *out, typeface_t typeface, style_t style); static void write_utf16(FILE *out, uchar *s); /* * 'pspdf_export()' - Export PostScript/PDF file(s)... */ int pspdf_export(tree_t *document, /* I - Document to export */ tree_t *toc) /* I - Table of contents for document */ { int i, j; /* Looping vars */ const char *title_file; /* Location of title image/file */ uchar *author, /* Author of document */ *creator, /* HTML file creator (Netscape, etc) */ *copyright, /* File copyright */ *docnumber, /* Document number */ *keywords, /* Search keywords */ *subject, /* Subject */ *lang; /* Language */ tree_t *t; /* Title page document tree */ FILE *fp; /* Title page file */ float x, y, /* Current page position */ left, right, /* Left and right margins */ bottom, top, /* Bottom and top margins */ width, /* Width of , author, etc */ height; /* Height of area */ int page, /* Current page # */ pos, /* Current header/footer position */ heading, /* Current heading # */ toc_duplex, /* Duplex TOC pages? */ toc_landscape, /* Do TOC in landscape? */ toc_width, /* Width of TOC pages */ toc_length, /* Length of TOC pages */ toc_left, /* TOC page margins */ toc_right, toc_bottom, toc_top; image_t *timage; /* Title image */ float timage_width, /* Title image width */ timage_height; /* Title image height */ render_t *r; /* Rendering structure... */ float rgb[3]; /* Text color */ int needspace; /* Need whitespace */ /* * Figure out the printable area of the output page... */ if (Landscape) { PagePrintWidth = PageLength - PageLeft - PageRight; PagePrintLength = PageWidth - PageTop - PageBottom; } else { PagePrintWidth = PageWidth - PageLeft - PageRight; PagePrintLength = PageLength - PageTop - PageBottom; } toc_width = PageWidth; toc_length = PageLength; toc_left = PageLeft; toc_right = PageRight; toc_bottom = PageBottom; toc_top = PageTop; toc_landscape = Landscape; toc_duplex = PageDuplex; /* * Get the document title, author, etc... */ doc_title = get_title(document); author = htmlGetMeta(document, (uchar *)"author"); creator = htmlGetMeta(document, (uchar *)"generator"); copyright = htmlGetMeta(document, (uchar *)"copyright"); docnumber = htmlGetMeta(document, (uchar *)"docnumber"); keywords = htmlGetMeta(document, (uchar *)"keywords"); subject = htmlGetMeta(document, (uchar *)"subject"); lang = htmlGetMeta(document, (uchar *)"lang"); logo_image = image_load(LogoImage, !OutputColor); lh_image = image_load(Letterhead, !OutputColor); maxhfheight = 0.0f; if (docnumber == NULL) docnumber = htmlGetMeta(document, (uchar *)"version"); if (lh_image != NULL) { lh_width = (float)(lh_image->width * PagePrintWidth / _htmlBrowserWidth); lh_height = (float)(lh_width * lh_image->height / lh_image->width); if (lh_height > maxhfheight) maxhfheight = lh_height; } else lh_width = lh_height = 0.0f; if (logo_image != NULL) { logo_width = (float)(logo_image->width * PagePrintWidth / _htmlBrowserWidth); logo_height = (float)(logo_width * logo_image->height / logo_image->width); if (logo_height > (2.0 * HeadFootSize)) { // Issue #273: too large logo image will overlap the body text, so cap // the height of the logo image to the header/footer size... // // Issue #303: regression prevents using header/footer images for special // underlining/etc. effects. logo_height = (float)(2.0 * HeadFootSize); logo_width = logo_height * logo_image->width / logo_image->height; } if (logo_height > maxhfheight) maxhfheight = logo_height; } else logo_width = logo_height = 0.0f; for (int hfi = 0; hfi < MAX_HF_IMAGES; hfi ++) { hfimage[hfi] = image_load(HFImage[hfi], !OutputColor); if (hfimage[hfi]) { hfimage_width[hfi] = (float)(hfimage[hfi]->width * PagePrintWidth / _htmlBrowserWidth); hfimage_height[hfi] = (float)(hfimage_width[hfi] * hfimage[hfi]->height / hfimage[hfi]->width); if (hfimage_height[hfi] > (2.0 * HeadFootSize)) { // Issue #273: too large logo image will overlap the body text, so cap // the height of the logo image to the header/footer size... // // Issue #303: regression prevents using header/footer images for special // underlining/etc. effects. hfimage_height[hfi] = (float)(2.0 * HeadFootSize); hfimage_width[hfi] = hfimage_height[hfi] * hfimage[hfi]->width / hfimage[hfi]->height; } if (hfimage_height[hfi] > maxhfheight) maxhfheight = hfimage_height[hfi]; } else hfimage_width[hfi] = hfimage_height[hfi] = 0.0f; } find_background(document); get_color((uchar *)LinkColor, link_color); /* * Initialize page rendering variables... */ num_pages = 0; alloc_pages = 0; pages = NULL; memset(list_types, 0267, sizeof(list_types)); memset(list_values, 0, sizeof(list_values)); memset(chapter_starts, -1, sizeof(chapter_starts)); memset(chapter_ends, -1, sizeof(chapter_starts)); /* * Get the current date, using the SOURCE_DATE_EPOCH environment variable, if * present, for the number of seconds since the epoch - this enables * reproducible builds (Issue #310). */ const char *source_date_epoch = getenv("SOURCE_DATE_EPOCH"); if (!source_date_epoch || (doc_time = (time_t)strtol(source_date_epoch, NULL, 10)) <= 0) doc_time = time(NULL); gmtime_r(&doc_time, &doc_date); num_headings = 0; alloc_headings = 0; heading_pages = NULL; heading_tops = NULL; num_links = 0; alloc_links = 0; links = NULL; num_pages = 0; DEBUG_printf(("pspdf_export: TitlePage = %d, TitleImage = \"%s\"\n", TitlePage, TitleImage)); if (TitlePage) { const char *title_ext = file_extension(TitleImage); #ifdef WIN32 if (TitleImage[0] && stricmp(title_ext, "bmp") != 0 && stricmp(title_ext, "gif") != 0 && stricmp(title_ext, "jpg") != 0 && stricmp(title_ext, "png") != 0) #else if (TitleImage[0] && strcmp(title_ext, "bmp") != 0 && strcmp(title_ext, "gif") != 0 && strcmp(title_ext, "jpg") != 0 && strcmp(title_ext, "png") != 0) #endif // WIN32 { DEBUG_printf(("pspdf_export: Generating a titlepage using \"%s\"\n", TitleImage)); // Find the title file... if ((title_file = file_find(Path, TitleImage)) == NULL) { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to find title file \"%s\"!", TitleImage); return (1); } // Write a title page from HTML source... if ((fp = fopen(title_file, "rb")) == NULL) { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open title file \"%s\" - %s!", TitleImage, strerror(errno)); return (1); } #ifdef _WIN32 if (!stricmp(title_ext, "md")) #else if (!strcmp(title_ext, "md")) #endif // _WIN32 t = mdReadFile(NULL, fp, file_directory(TitleImage)); else t = htmlReadFile(NULL, fp, file_directory(TitleImage)); htmlFixLinks(t, t, (uchar *)file_directory(TitleImage)); fclose(fp); page = 0; title_page = 1; current_heading = NULL; x = 0.0f; bottom = 0.0f; top = PagePrintLength; y = top; needspace = 0; left = 0.0f; right = PagePrintWidth; parse_doc(t, &left, &right, &bottom, &top, &x, &y, &page, NULL, &needspace); if (PageDuplex && (num_pages & 1)) check_pages(num_pages); htmlDeleteTree(t); } else { /* * Create a standard title page... */ if ((timage = image_load(TitleImage, !OutputColor)) != NULL) { timage_width = (float)(timage->width * PagePrintWidth / _htmlBrowserWidth); timage_height = (float)(timage_width * timage->height / timage->width); } else timage_width = timage_height = 0.0f; check_pages(0); if (PageDuplex) check_pages(1); height = 0.0; if (timage != NULL) height += timage_height + _htmlSpacings[SIZE_P]; if (doc_title != NULL) height += _htmlSpacings[SIZE_H1] + _htmlSpacings[SIZE_P]; if (author != NULL) height += _htmlSpacings[SIZE_P]; if (docnumber != NULL) height += _htmlSpacings[SIZE_P]; if (copyright != NULL) height += _htmlSpacings[SIZE_P]; y = 0.5f * (PagePrintLength + height); if (timage != NULL) { new_render(0, RENDER_IMAGE, 0.5f * (PagePrintWidth - timage_width), y - timage_height, timage_width, timage_height, timage); y -= timage_height + _htmlSpacings[SIZE_P]; } get_color(_htmlTextColor, rgb); if (doc_title != NULL) { width = get_width(doc_title, _htmlHeadingFont, STYLE_BOLD, SIZE_H1); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_H1], width, _htmlSizes[SIZE_H1], doc_title); r->data.text.typeface = _htmlHeadingFont; r->data.text.style = STYLE_BOLD; r->data.text.size = (float)_htmlSizes[SIZE_H1]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); y -= _htmlSpacings[SIZE_H1]; if (docnumber != NULL) { width = get_width(docnumber, _htmlBodyFont, STYLE_NORMAL, SIZE_P); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_P], width, _htmlSizes[SIZE_P], docnumber); r->data.text.typeface = _htmlBodyFont; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[SIZE_P]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); y -= _htmlSpacings[SIZE_P]; } y -= _htmlSpacings[SIZE_P]; } if (author != NULL) { width = get_width(author, _htmlBodyFont, STYLE_NORMAL, SIZE_P); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_P], width, _htmlSizes[SIZE_P], author); r->data.text.typeface = _htmlBodyFont; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[SIZE_P]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); y -= _htmlSpacings[SIZE_P]; } if (copyright != NULL) { width = get_width(copyright, _htmlBodyFont, STYLE_NORMAL, SIZE_P); r = new_render(0, RENDER_TEXT, (PagePrintWidth - width) * 0.5f, y - _htmlSpacings[SIZE_P], width, _htmlSizes[SIZE_P], copyright); r->data.text.typeface = _htmlBodyFont; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[SIZE_P]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); } } for (page = 0; page < (int)num_pages; page ++) strlcpy((char *)pages[page].page_text, (page & 1) ? "eltit" : "title", sizeof(pages[page].page_text)); } else page = 0; /* * Parse the document... */ if (OutputType == OUTPUT_BOOK) chapter = 0; else { chapter = 1; TocDocCount = 1; chapter_starts[1] = num_pages; } title_page = 0; current_heading = NULL; x = 0.0f; needspace = 0; left = 0.0f; right = PagePrintWidth; // Adjust top margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Header[pos] && (strstr(Header[pos], "$IMAGE") != NULL || strstr(Header[pos], "$HFIMAGE") != NULL || strstr(Header[pos], "$LETTERHEAD") != NULL)) temp_adjust = image_adjust; else if (Header1[pos] && (strstr(Header1[pos], "$IMAGE") != NULL || strstr(Header1[pos], "$HFIMAGE") != NULL || strstr(Header1[pos], "$LETTERHEAD") != NULL)) temp_adjust = image_adjust; else if (Header[pos] || Header1[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } top = PagePrintLength - adjust; // Adjust bottom margin as needed... for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Footer[pos] && (strstr(Footer[pos], "$IMAGE") != NULL || strstr(Footer[pos], "$HFIMAGE") != NULL || strstr(Footer[pos], "$LETTERHEAD") != NULL)) temp_adjust = image_adjust; else if (Footer[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } bottom = adjust; y = top; parse_doc(document, &left, &right, &bottom, &top, &x, &y, &page, NULL, &needspace); if (PageDuplex && (num_pages & 1)) { if (PSLevel == 0) chapter_ends[chapter] = num_pages - 1; check_pages(num_pages); if (PSLevel > 0) chapter_ends[chapter] = num_pages - 1; } else chapter_ends[chapter] = num_pages - 1; for (chapter = 1; chapter <= TocDocCount; chapter ++) for (page = chapter_starts[chapter]; page <= chapter_ends[chapter]; page ++) pspdf_prepare_page(page); /* * Parse the table-of-contents if necessary... */ if (TocLevels > 0 && num_headings > 0) { // Restore default page size, etc... PageWidth = toc_width; PageLength = toc_length; PageLeft = toc_left; PageRight = toc_right; PageBottom = toc_bottom; PageTop = toc_top; Landscape = toc_landscape; PageDuplex = toc_duplex; if (Landscape) { PagePrintWidth = PageLength - PageLeft - PageRight; PagePrintLength = PageWidth - PageTop - PageBottom; } else { PagePrintWidth = PageWidth - PageLeft - PageRight; PagePrintLength = PageLength - PageTop - PageBottom; } // Adjust top margin as needed... for (pos = 0; pos < 3; pos ++) if (TocHeader[pos]) break; if (pos == 3) top = PagePrintLength; else if (maxhfheight > HeadFootSize) top = (float)(PagePrintLength - maxhfheight - HeadFootSize); else top = (float)(PagePrintLength - 2 * HeadFootSize); // Adjust bottom margin as needed... for (pos = 0; pos < 3; pos ++) if (TocFooter[pos]) break; if (pos == 3) bottom = 0.0f; else if (maxhfheight > HeadFootSize) bottom = (float)(maxhfheight + HeadFootSize); else bottom = (float)(2 * HeadFootSize); y = 0.0; page = num_pages - 1; heading = 0; chapter_starts[0] = num_pages; chapter = 0; parse_contents(toc, 0, PagePrintWidth, bottom, top, &y, &page, &heading, 0); if (PageDuplex && (num_pages & 1)) check_pages(num_pages); chapter_ends[0] = num_pages - 1; for (page = chapter_starts[0]; page <= chapter_ends[0]; page ++) pspdf_prepare_page(page); } if (TocDocCount > MAX_CHAPTERS) TocDocCount = MAX_CHAPTERS; /* * Do we have any pages? */ if (num_pages > 0 && TocDocCount > 0) { /* * Yes, write the document to disk... */ pspdf_prepare_outpages(); pspdf_debug_stats(); progress_error(HD_ERROR_NONE, "PAGES: %d", (int)num_outpages); if (PSLevel > 0) ps_write_document(author, creator, copyright, keywords, subject, lang); else pdf_write_document(author, creator, copyright, keywords, subject, lang, document, toc); } else { /* * No, show an error... */ pspdf_debug_stats(); progress_error(HD_ERROR_NO_PAGES, "Error: no pages generated! (did you remember to use webpage mode?"); } /* * Free memory... */ if (doc_title != NULL) free(doc_title); if (alloc_links) { free(links); num_links = 0; alloc_links = 0; links = NULL; } for (i = 0; i < (int)num_pages; i ++) { if ((i == 0 || pages[i].chapter != pages[i - 1].chapter) && pages[i].chapter) free(pages[i].chapter); if ((i == 0 || pages[i].heading != pages[i - 1].heading) && pages[i].heading) free(pages[i].heading); if (!pages[i].heading) continue; for (j = 0; j < 3; j ++) { if (!pages[i].header[j]) continue; if (i == 0 || pages[i].header[j] != pages[i - 1].header[j]) free(pages[i].header[j]); } for (j = 0; j < 3; j ++) { if (!pages[i].header1[j]) continue; if (i == 0 || pages[i].header1[j] != pages[i - 1].header1[j]) free(pages[i].header1[j]); } for (j = 0; j < 3; j ++) { if (!pages[i].footer[j]) continue; if (i == 0 || pages[i].footer[j] != pages[i - 1].footer[j]) free(pages[i].footer[j]); } } for (i = 0; i < 3; i ++) { Header[i] = NULL; Header1[i] = NULL; Footer[i] = NULL; TocHeader[i] = NULL; TocFooter[i] = NULL; } if (alloc_pages) { free(pages); free(outpages); num_pages = 0; alloc_pages = 0; pages = NULL; } if (alloc_headings) { free(heading_pages); free(heading_tops); num_headings = 0; alloc_headings = 0; heading_pages = NULL; heading_tops = NULL; } return (0); } // // 'pspdf_debug_stats()' - Display debug statistics for render memory use. // static void pspdf_debug_stats() { const char *debug; // HTMLDOC_DEBUG env var int i; // Looping var render_t *r; // Render node int bytes; // Number of bytes if ((debug = getenv("HTMLDOC_DEBUG")) == NULL || (strstr(debug, "all") == NULL && strstr(debug, "memory") == NULL)) return; bytes = alloc_headings * sizeof(int) * 2; bytes += alloc_pages * sizeof(page_t); for (i = 0; i < (int)num_pages; i ++) { for (r = pages[i].start; r != NULL; r = r->next) { bytes += sizeof(render_t); if (r->type == RENDER_TEXT) bytes += strlen((char *)r->data.text.buffer); } } bytes += num_outpages * sizeof(outpage_t); bytes += alloc_links * sizeof(link_t); bytes += alloc_objects * sizeof(int); progress_error(HD_ERROR_NONE, "DEBUG: Render Data = %d kbytes", (bytes + 1023) / 1024); } /* * 'pspdf_transform_coords()' - Transform page coordinates. */ static void pspdf_transform_coords(page_t *p, // I - Page float &x, // IO - X coordinate float &y) // IO - Y coordinate { float tx, ty; // Temporary X and Y tx = x; ty = y; x = tx * p->outmatrix[0][0] + ty * p->outmatrix[0][1] + p->outmatrix[0][2]; y = tx * p->outmatrix[1][0] + ty * p->outmatrix[1][1] + p->outmatrix[1][2]; } /* * 'pspdf_transform_page()' - Transform a page. */ static void pspdf_transform_page(int outpage, // I - Output page int pos, // I - Position on page int page) // I - Input page { outpage_t *op; // Current output page page_t *bp; // Current base page page_t *p; // Current input page int x, y; // Position on output page double w, l, // Width and length of subpage tx, ty; // Translation values for subpage double pw, pl; // Printable width and length of full page DEBUG_printf(("pspdf_transform_page(outpage = %d, pos = %d, page = %d)\n", outpage, pos, page)); if (pos > 15) progress_error(HD_ERROR_INTERNAL_ERROR, "Internal error: pos = %d", pos); op = outpages + outpage; op->pages[pos] = page; bp = pages + op->pages[0]; p = pages + page; p->outpage = outpage; pw = bp->width; pl = bp->length; DEBUG_printf((" width = %d, length = %d\n", p->width, p->length)); switch (op->nup) { default : case 1 : p->outmatrix[0][0] = 1.0f; p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = 1.0f; p->outmatrix[0][2] = 0.0f; p->outmatrix[1][2] = 0.0f; break; case 2 : x = pos & 1; l = pw; w = l * p->width / p->length; if (w > (pl * 0.5f)) { w = pl * 0.5f; l = w * p->length / p->width; } tx = 0.5 * (pl * 0.5 - w); ty = 0.5 * (pw - l); p->outmatrix[0][0] = 0.0f; p->outmatrix[1][0] = (float)(w / p->width); p->outmatrix[0][1] = (float)(-w / p->width); p->outmatrix[1][1] = 0.0f; p->outmatrix[0][2] = (float)(ty + pl * w / p->width); p->outmatrix[1][2] = (float)(tx + x * pl / 2); break; case 4 : x = pos & 1; y = 1 - pos / 2; w = pw * 0.5; l = w * p->length / p->width; if (l > (pl * 0.5)) { l = pl * 0.5; w = l * p->width / p->length; } tx = 0.5 * (pw * 0.5 - w); ty = 0.5 * (pl * 0.5 - l); p->outmatrix[0][0] = (float)(w / p->width); p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = (float)(w / p->width); p->outmatrix[0][2] = (float)(tx + x * pw / 2); p->outmatrix[1][2] = (float)(ty + y * pl / 2); break; case 6 : x = pos % 3; y = pos / 3; l = pw * 0.5; w = l * p->width / p->length; if (w > (pl * 0.333f)) { w = pl * 0.333f; l = w * p->length / p->width; } tx = 0.5 * (pl * 0.333 - w); ty = 0.5 * (pw * 0.5 - l); p->outmatrix[0][0] = 0.0f; p->outmatrix[1][0] = (float)(w / p->width); p->outmatrix[0][1] = (float)(-w / p->width); p->outmatrix[1][1] = 0.0f; p->outmatrix[0][2] = (float)(ty + y * pw / 2 + pl * w / p->width); p->outmatrix[1][2] = (float)(tx + x * pl / 3); break; case 9 : x = pos % 3; y = 2 - pos / 3; w = pw * 0.333; l = w * p->length / p->width; if (l > (pl * 0.333)) { l = pl * 0.333; w = l * p->width / p->length; } tx = 0.5 * (pw * 0.333 - w); ty = 0.5 * (pl * 0.333 - l); p->outmatrix[0][0] = (float)(w / p->width); p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = (float)(w / p->width); p->outmatrix[0][2] = (float)(tx + x * pw / 3); p->outmatrix[1][2] = (float)(ty + y * pl / 3); break; case 16 : x = pos & 3; y = 3 - pos / 4; w = pw * 0.25; l = w * p->length / p->width; if (l > (pl * 0.25)) { l = pl * 0.25; w = l * p->width / p->length; } tx = 0.5 * (pw * 0.25 - w); ty = 0.5 * (pl * 0.25 - l); p->outmatrix[0][0] = (float)(w / p->width); p->outmatrix[1][0] = 0.0f; p->outmatrix[0][1] = 0.0f; p->outmatrix[1][1] = (float)(w / p->width); p->outmatrix[0][2] = (float)(tx + x * pw / 4); p->outmatrix[1][2] = (float)(ty + y * pl / 4); break; } } /* * 'pspdf_prepare_outpages()' - Prepare output pages... */ static void pspdf_prepare_outpages() { int c, i, j; /* Looping vars */ int nup; /* Current number-up value */ page_t *page; /* Current page */ outpage_t *outpage; /* Current output page */ // Allocate an output page array... outpages = (outpage_t *)malloc(sizeof(outpage_t) * num_pages); memset(outpages, -1, sizeof(outpage_t) * num_pages); num_outpages = 0; outpage = outpages; // Handle the title page, as needed... if (TitlePage) { for (i = 0, j = 0, nup = -1, page = pages; i < chapter_starts[1]; i ++, page ++) { if (nup != page->nup) { if (j) { // Break the current output page... outpage ++; num_outpages ++; } nup = page->nup; j = 0; } if (!j) outpage->nup = nup; pspdf_transform_page(num_outpages, j, i); j ++; if (j >= nup) { j = 0; outpage ++; num_outpages ++; } } if (j) { // Break the current output page... outpage ++; num_outpages ++; } } // Loop through each chapter, adding pages as needed... if (OutputType == OUTPUT_BOOK && TocLevels > 0) c = 0; else c = 1; for (; c <= TocDocCount; c ++) { if (chapter_starts[c] < 0) continue; chapter_outstarts[c] = num_outpages; for (i = chapter_starts[c], j = 0, nup = -1, page = pages + i; i <= chapter_ends[c]; i ++, page ++) { if (nup != page->nup) { if (j) { // Break the current output page... outpage ++; num_outpages ++; } nup = page->nup; j = 0; } if (!j) outpage->nup = nup; pspdf_transform_page(num_outpages, j, i); j ++; if (j >= nup) { j = 0; outpage ++; num_outpages ++; } } if (j) { // Break the current output page... outpage ++; num_outpages ++; } chapter_outends[c] = num_outpages; } #ifdef DEBUG for (c = 0; c <= TocDocCount; c ++) printf("chapter_outstarts[%d] = %d, chapter_outends[%d] = %d\n", c, chapter_outstarts[c], c, chapter_outends[c]); printf("num_outpages = %d\n", (int)num_outpages); for (i = 0, outpage = outpages; i < (int)num_outpages; i ++, outpage ++) { printf("outpage[%d]:\tnup=%d, pages=[", i, outpage->nup); for (j = 0; j < outpage->nup; j ++) printf(" %d", outpage->pages[j]); puts(" ]"); page = pages + outpage->pages[0]; printf("\t\twidth = %d, length = %d\n", page->width, page->length); } for (c = 0; c <= TocDocCount; c ++) printf("chapter_starts[%d] = %d, chapter_ends[%d] = %d\n", c, chapter_starts[c], c, chapter_ends[c]); for (i = 0; i < (int)num_pages; i ++) printf("pages[%d]->outpage = %d\n", i, pages[i].outpage); for (i = 0; i < (int)num_headings; i ++) printf("heading_pages[%d] = %d\n", i, heading_pages[i]); for (i = 0; i < (int)num_links; i ++) printf("links[%d].name = \"%s\", page = %d\n", i, links[i].name, links[i].page); #endif // DEBUG } /* * 'pspdf_prepare_page()' - Add headers/footers to page before writing... */ static void pspdf_prepare_page(int page) /* I - Page number */ { int print_page; /* Printed page # */ char page_text[64]; /* Page number text */ int top; /* Top of page */ DEBUG_printf(("pspdf_prepare_page(%d)\n", page)); /* * Make a page number; use roman numerals for the table of contents * and arabic numbers for all others... */ if (chapter == 0 && OutputType == OUTPUT_BOOK) { print_page = page - chapter_starts[0] + 1; strlcpy(page_text, format_number(print_page, 'i'), sizeof(page_text)); } else if (chapter < 0) { print_page = 0; // Safe because page_text is more than 6 chars strlcpy(page_text, (page & 1) ? (char *)"eltit" : (char *)"title", sizeof(page_text)); } else { print_page = page - chapter_starts[1] + 1; strlcpy(page_text, format_number(print_page, '1'), sizeof(page_text)); } DEBUG_printf(("BEFORE page %d page_text is \"%s\"...\n", page, page_text)); DEBUG_printf((" header[0] = \"%s\"\n", pages[page].header[0])); DEBUG_printf((" header[1] = \"%s\"\n", pages[page].header[1])); DEBUG_printf((" header[2] = \"%s\"\n", pages[page].header[2])); /* * Add page headings... */ if (pages[page].landscape) { PagePrintWidth = pages[page].length - pages[page].right - pages[page].left; PagePrintLength = pages[page].width - pages[page].top - pages[page].bottom; } else { PagePrintWidth = pages[page].width - pages[page].right - pages[page].left; PagePrintLength = pages[page].length - pages[page].top - pages[page].bottom; } top = (int)(PagePrintLength - HeadFootSize); if (chapter == 0) { /* * Add table-of-contents header & footer... */ pspdf_prepare_heading(page, print_page, pages[page].header, top, page_text, sizeof(page_text)); pspdf_prepare_heading(page, print_page, pages[page].footer, 0, page_text, sizeof(page_text)); } else if (chapter > 0 && !title_page) { /* * Add chapter header & footer... */ if (page > chapter_starts[chapter] || OutputType != OUTPUT_BOOK) pspdf_prepare_heading(page, print_page, pages[page].header, top, page_text, sizeof(page_text)); else pspdf_prepare_heading(page, print_page, pages[page].header1, top, page_text, sizeof(page_text)); pspdf_prepare_heading(page, print_page, pages[page].footer, 0, page_text, sizeof(page_text)); } /* * Copy the page number for the TOC... */ strlcpy(pages[page].page_text, page_text, sizeof(pages[page].page_text)); DEBUG_printf(("AFTER page %d page_text is \"%s\"...\n", page, page_text)); } /* * 'pspdf_prepare_heading()' - Add headers/footers to page before writing... */ static void pspdf_prepare_heading(int page, // I - Page number int print_page, // I - Printed page number uchar **format, // I - Page headings int y, // I - Baseline of heading char *page_text, // O - Page number text int page_len) // I - Size of page text { int pos, // Position in heading dir; // Direction of page char *number; // Page number char buffer[1024], // String buffer *bufptr, // Pointer into buffer *formatptr; // Pointer into format string int formatlen; // Length of format command string render_t *temp; // Render structure for titles, etc. DEBUG_printf(("pspdf_prepare_heading(%d, %d, [\"%s\",\"%s\",\"%s\"], %d, %p, %d)\n", page, print_page, format[0], format[1], format[2], y, (void *)page_text, page_len)); /* * Add page headings... */ if (PageDuplex && (page & 1)) { dir = -1; format += 2; } else dir = 1; for (pos = 0; pos < 3; pos ++, format += dir) { /* * Add the appropriate object... */ if (!*format) continue; temp = NULL; if (strncasecmp((char *)*format, "$LOGOIMAGE", 10) == 0 && logo_image) { // Insert the logo image... if (y < (PagePrintLength / 2)) temp = new_render(page, RENDER_IMAGE, 0, y, logo_width, logo_height, logo_image); else // Offset from top temp = new_render(page, RENDER_IMAGE, 0, y + HeadFootSize - logo_height, logo_width, logo_height, logo_image); } else if (strncasecmp((char *)*format, "$LETTERHEAD", 11) == 0 && lh_image) { // Insert the logo image as a letterhead... if (y < (PagePrintLength / 2)) temp = new_render(page, RENDER_IMAGE, 0, y, lh_width, lh_height, lh_image); else // Offset from top temp = new_render(page, RENDER_IMAGE, 0, y + HeadFootSize - lh_height, lh_width, lh_height, lh_image); } else if (strncasecmp((char *)*format, "$HFIMAGE", 8) == 0) { int hfi; // Header/footer image index char *hfp; // Pointer into $HFIMAGE hfi = strtol((char*)((*format) + 8), &hfp, 10); if (hfi < 0 || hfi >= MAX_HF_IMAGES || !(isspace(*hfp) || !*hfp)) progress_error(HD_ERROR_BAD_HF_STRING, "Bad $HFIMAGE... substitution on page %d.", page + 1); else { if (y < (PagePrintLength / 2)) temp = new_render(page, RENDER_IMAGE, 0, y, hfimage_width[hfi], hfimage_height[hfi], hfimage[hfi]); else temp = new_render(page, RENDER_IMAGE, 0, y + HeadFootSize - hfimage_height[hfi], hfimage_width[hfi], hfimage_height[hfi], hfimage[hfi]); } } else { // Otherwise format the text... buffer[sizeof(buffer) - 1] = '\0'; for (bufptr = buffer, formatptr = (char *)*format; *formatptr;) { if (*formatptr == '$') { if (formatptr[1] == '$') { if (bufptr < (buffer + sizeof(buffer) - 1)) *bufptr++ = '$'; formatptr += 2; continue; } else if (!formatptr[1]) break; formatptr ++; for (formatlen = 1; isalpha(formatptr[formatlen]); formatlen ++); if (formatlen == 4 && strncasecmp(formatptr, "PAGE", 4) == 0) { if (formatptr[4] == '(' && formatptr[5] && formatptr[6] == ')') { number = format_number(print_page, formatptr[5]); formatptr += 7; } else { number = format_number(print_page, '1'); formatptr += 4; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 5 && strncasecmp(formatptr, "PAGES", 5) == 0) { if (formatptr[5] == '(' && formatptr[6] && formatptr[7] == ')') { number = format_number(chapter_ends[TocDocCount] - chapter_starts[1] + 1, formatptr[6]); formatptr += 8; } else { number = format_number(chapter_ends[TocDocCount] - chapter_starts[1] + 1, '1'); formatptr += 5; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 11 && strncasecmp(formatptr, "CHAPTERPAGE", 11) == 0) { int chapter_page; chapter_page = print_page - chapter_starts[::chapter] + chapter_starts[1]; if (formatptr[11] == '(' && formatptr[12] && formatptr[13] == ')') { number = format_number(chapter_page, formatptr[12]); formatptr += 14; } else { number = format_number(chapter_page, '1'); formatptr += 11; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 12 && strncasecmp(formatptr, "CHAPTERPAGES", 12) == 0) { if (formatptr[12] == '(' && formatptr[13] && formatptr[14] == ')') { number = format_number(chapter_ends[::chapter] - chapter_starts[::chapter] + 1, formatptr[13]); formatptr += 15; } else { number = format_number(chapter_ends[::chapter] - chapter_starts[::chapter] + 1, '1'); formatptr += 12; } strlcpy(bufptr, number, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else if (formatlen == 5 && strncasecmp(formatptr, "TITLE", 5) == 0) { formatptr += 5; if (doc_title) { strlcpy(bufptr, (char *)doc_title, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } } else if (formatlen == 7 && strncasecmp(formatptr, "CHAPTER", 7) == 0) { formatptr += 7; if (pages[page].chapter) { strlcpy(bufptr, (char *)(pages[page].chapter), sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } } else if (formatlen == 7 && strncasecmp(formatptr, "HEADING", 7) == 0) { formatptr += 7; if (pages[page].heading) { strlcpy(bufptr, (char *)(pages[page].heading), sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } } else if (formatlen == 4 && strncasecmp(formatptr, "TIME", 4) == 0) { formatptr += 4; strftime(bufptr, sizeof(buffer) - 1 - (size_t)(bufptr - buffer), "%X", &doc_date); bufptr += strlen(bufptr); } else if (formatlen == 4 && strncasecmp(formatptr, "DATE", 4) == 0) { formatptr += 4; strftime(bufptr, sizeof(buffer) - 1 - (size_t)(bufptr - buffer), "%x", &doc_date); bufptr += strlen(bufptr); } else if (formatlen == 3 && strncasecmp(formatptr, "URL", 3) == 0) { uchar *url = pages[page].url ? pages[page].url : (uchar *)"Unknown"; formatptr += 3; strlcpy(bufptr, (char *)url, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); } else { progress_error(HD_ERROR_BAD_HF_STRING, "Bad header/footer $ command on page %d.", page + 1); strlcpy(bufptr, formatptr - 1, sizeof(buffer) - (size_t)(bufptr - buffer)); bufptr += strlen(bufptr); formatptr += formatlen; } } else if (bufptr < (buffer + sizeof(buffer) - 1)) *bufptr++ = *formatptr++; else break; } *bufptr = '\0'; temp = new_render(page, RENDER_TEXT, 0, y, get_width((uchar *)buffer, HeadFootType, HeadFootStyle, SIZE_P) * HeadFootSize / _htmlSizes[SIZE_P], HeadFootSize, (uchar *)buffer); if (strstr((char *)*format, "$PAGE") || strstr((char *)*format, "$CHAPTERPAGE")) strlcpy(page_text, buffer, (size_t)page_len); } if (temp == NULL) continue; /* * Justify the object... */ switch (pos) { case 0 : /* Left justified */ break; case 1 : /* Centered */ temp->x = (float)((PagePrintWidth - temp->width) * 0.5); break; case 2 : /* Right justified */ temp->x = PagePrintWidth - temp->width; break; } /* * Set the text font and color... */ if (temp->type == RENDER_TEXT) { temp->data.text.typeface = HeadFootType; temp->data.text.style = HeadFootStyle; temp->data.text.size = (float)HeadFootSize; get_color(_htmlTextColor, temp->data.text.rgb); } } } /* * 'ps_write_document()' - Write all render entities to PostScript file(s). */ static void ps_write_document(uchar *author, /* I - Author of document */ uchar *creator, /* I - Application that generated the HTML file */ uchar *copyright, /* I - Copyright (if any) on the document */ uchar *keywords, /* I - Search keywords */ uchar *subject, /* I - Subject */ uchar *lang) /* I - Language */ { FILE *out; /* Output file */ int page; /* Current page # */ int first; /* First chapter */ /* * Write the title page(s)... */ chapter = -1; out = NULL; if (!OutputFiles) { out = open_file(); if (out == NULL) { progress_error(HD_ERROR_WRITE_ERROR, "Unable to open output file - %s\n", strerror(errno)); return; } write_prolog(out, num_outpages, author, creator, copyright, keywords, subject); } if (OutputType == OUTPUT_BOOK && TocLevels > 0) first = 0; else first = 1; if (TitlePage) { if (OutputFiles) { out = open_file(); write_prolog(out, chapter_outstarts[first], author, creator, copyright, keywords, subject); } for (page = 0; page < chapter_outstarts[first]; page ++) ps_write_outpage(out, page); if (OutputFiles) { write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); fclose(out); } } for (chapter = first; chapter <= TocDocCount; chapter ++) { if (chapter_starts[chapter] < 0) continue; if (OutputFiles) { out = open_file(); if (out == NULL) { progress_error(HD_ERROR_WRITE_ERROR, "Unable to create output file - %s\n", strerror(errno)); return; } write_prolog(out, chapter_outends[chapter] - chapter_outstarts[chapter], author, creator, copyright, keywords, subject); } for (page = chapter_outstarts[chapter]; page < chapter_outends[chapter]; page ++) ps_write_outpage(out, page); /* * Close the output file as necessary... */ if (OutputFiles) { write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); fclose(out); } } /* * Close the output file as necessary... */ if (!OutputFiles) { write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); if (out != stdout) fclose(out); } if (Verbosity) progress_hide(); } /* * 'ps_write_outpage()' - Write an output page. */ static void ps_write_outpage(FILE *out, /* I - Output file */ int outpage) /* I - Output page number */ { int file_page; /* Current page # in document */ page_t *p; /* Current page */ outpage_t *op; /* Current output page */ int i; /* Looping var */ if (outpage < 0 || outpage >= (int)num_outpages) return; op = outpages + outpage; p = pages + op->pages[0]; DEBUG_printf(("ps_write_outpage(%p, %d)\n", (void *)out, outpage)); /* * Let the user know which page we are writing... */ if (Verbosity) { progress_show("Writing page %s...", p->page_text); progress_update(100 * outpage / (int)num_outpages); } /* * Figure out the page number in the file... */ if (OutputFiles && chapter >= 0) file_page = outpage - chapter_outstarts[chapter] + 1; else if (chapter < 0) file_page = outpage + 1; else if (chapter == 0) { if (TitlePage) file_page = outpage + 1; else file_page = outpage - chapter_outstarts[0] + 1; } else { if (TitlePage) file_page = outpage + 1; else file_page = outpage - chapter_outstarts[1] + 1; } /* * Output the page prolog... */ fprintf(out, "%%%%Page: (%s) %d\n", p->page_text, file_page); if (op->nup == 1) { if (p->duplex && !(file_page & 1)) fprintf(out, "%%%%PageBoundingBox: %d %d %d %d\n", p->right, p->bottom, p->width - p->left, p->length - p->top); else fprintf(out, "%%%%PageBoundingBox: %d %d %d %d\n", p->left, p->bottom, p->width - p->right, p->length - p->top); } else fprintf(out, "%%%%PageBoundingBox: 0 0 %d %d\n", p->width, p->length); if (PSLevel > 1 && PSCommands) { fputs("%%BeginPageSetup\n", out); if (p->width == 612 && p->length == 792) fputs("%%BeginFeature: *PageSize Letter\n", out); else if (p->width == 612 && p->length == 1008) fputs("%%BeginFeature: *PageSize Legal\n", out); else if (p->width == 792 && p->length == 1224) fputs("%%BeginFeature: *PageSize Tabloid\n", out); else if (p->width == 842 && p->length == 1190) fputs("%%BeginFeature: *PageSize A3\n", out); else if (p->width == 595 && p->length == 842) fputs("%%BeginFeature: *PageSize A4\n", out); else fprintf(out, "%%%%BeginFeature: *PageSize w%dh%d\n", p->width, p->length); fprintf(out, "%d %d SetPageSize\n", p->width, p->length); fputs("%%EndFeature\n", out); if (p->duplex) { if (p->landscape) { fputs("%%BeginFeature: *Duplex DuplexTumble\n", out); fputs("true true SetDuplexMode\n", out); fputs("%%EndFeature\n", out); } else { fputs("%%BeginFeature: *Duplex DuplexNoTumble\n", out); fputs("true false SetDuplexMode\n", out); fputs("%%EndFeature\n", out); } } else { fputs("%%BeginFeature: *Duplex None\n", out); fputs("false false SetDuplexMode\n", out); fputs("%%EndFeature\n", out); } if (p->media_color[0]) { fprintf(out, "%%%%BeginFeature: *MediaColor %s\n", p->media_color); fprintf(out, "(%s) SetMediaColor\n", p->media_color); fputs("%%EndFeature\n", out); } if (p->media_position) { fprintf(out, "%%%%BeginFeature: *InputSlot Tray%d\n", p->media_position); fprintf(out, "%d SetMediaPosition\n", p->media_position); fputs("%%EndFeature\n", out); } if (p->media_type[0]) { fprintf(out, "%%%%BeginFeature: *MediaType %s\n", p->media_type); fprintf(out, "(%s) SetMediaType\n", p->media_type); fputs("%%EndFeature\n", out); } fputs("%%EndPageSetup\n", out); } /* * Render all of the pages... */ switch (op->nup) { case 1 : ps_write_page(out, op->pages[0]); break; default : for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; fprintf(out, "GS[%.3f %.3f %.3f %.3f %.3f %.3f]CM\n", p->outmatrix[0][0], p->outmatrix[1][0], p->outmatrix[0][1], p->outmatrix[1][1], p->outmatrix[0][2], p->outmatrix[1][2]); ps_write_page(out, op->pages[i]); fputs("GR\n", out); } break; } /* * Output the page trailer... */ fputs("SP\n", out); fflush(out); } /* * 'ps_write_page()' - Write all render entities on a page to a PostScript file. */ static void ps_write_page(FILE *out, /* I - Output file */ int page) /* I - Page number */ { render_t *r, /* Render pointer */ *next; /* Next render */ page_t *p; /* Current page */ const char *debug; /* HTMLDOC_DEBUG environment variable */ if (page < 0 || page >= (int)alloc_pages) return; p = pages + page; DEBUG_printf(("ps_write_page(%p, %d)\n", (void *)out, page)); /* * Clear the render cache... */ render_typeface = -1; render_style = -1; render_size = -1; render_rgb[0] = -1.0f; render_rgb[1] = -1.0f; render_rgb[2] = -1.0f; render_x = -1.0f; render_y = -1.0f; render_spacing = -1.0f; /* * Setup the page... */ fputs("GS\n", out); if (p->landscape) { if (p->duplex && (page & 1)) fprintf(out, "0 %d T -90 RO\n", p->length); else fprintf(out, "%d 0 T 90 RO\n", p->width); } write_background(page, out); if (p->duplex && (page & 1)) fprintf(out, "%d %d T\n", p->right, p->bottom); else fprintf(out, "%d %d T\n", p->left, p->bottom); /* * Render all graphics elements... */ for (r = p->start; r != NULL; r = r->next) switch (r->type) { case RENDER_BOX : set_color(out, r->data.box); set_pos(out, r->x, r->y); if (r->height > 0.0f) fprintf(out, " %.1f %.1f F\n", r->width, r->height); else fprintf(out, " %.1f L\n", r->width); render_x = -1.0f; break; case RENDER_IMAGE : if (r->width > 0.01f && r->height > 0.01f) write_image(out, r); break; } /* * Render all text elements, freeing used memory as we go... */ for (r = p->start, next = NULL; r != NULL; r = next) { if (r->type == RENDER_TEXT) write_text(out, r); next = r->next; free(r); } p->start = NULL; if ((debug = getenv("HTMLDOC_DEBUG")) != NULL && strstr(debug, "margin")) { // Show printable area... fprintf(out, "1 0 1 C 0 0 %d %d B\n", p->width - p->right - p->left, p->length - p->top - p->bottom); } /* * Output the page trailer... */ fputs("GR\n", out); } /* * 'ps_write_background()' - Write a background image... */ static void ps_write_background(FILE *out) /* I - Output file */ { int y, /* Current line */ pwidth; /* Pixel width */ if (!background_image->pixels) image_load(background_image->filename, !OutputColor, 1); pwidth = background_image->width * background_image->depth; fputs("/BG[", out); for (y = 0; y < background_image->height; y ++) { putc('<', out); ps_hex(out, background_image->pixels + y * pwidth, pwidth); putc('>', out); } fputs("]def", out); image_unload(background_image); } /* * 'pdf_write_document()' - Write all render entities to a PDF file. */ static void pdf_write_document(uchar *author, // I - Author of document uchar *creator, // I - Application that generated the HTML file uchar *copyright, // I - Copyright (if any) on the document uchar *keywords, // I - Search keywords uchar *subject, // I - Subject uchar *lang, // I - Language tree_t *doc, // I - Document tree_t *toc) // I - Table of contents tree { int i; // Looping variable FILE *out; // Output file int outpage, // Current page # heading; // Current heading # int bytes; // Number of bytes char buffer[8192]; // Copy buffer int num_images; // Number of images in document image_t **images; // Pointers to images render_t temp; // Dummy rendering data... // Open the output file... out = open_file(); if (out == NULL) { progress_error(HD_ERROR_WRITE_ERROR, "Unable to write document file - %s\n", strerror(errno)); return; } // Clear the objects array... num_objects = 0; alloc_objects = 0; objects = NULL; // Write the prolog... write_prolog(out, num_outpages, author, creator, copyright, keywords, subject); // Write images as needed... num_images = image_getlist(&images); for (i = 0; i < num_images; i ++) { int hfi; // Header/footer image index for (hfi = 0; hfi < MAX_HF_IMAGES; hfi ++) if (images[i] == hfimage[hfi]) break; if (images[i]->use > 1 || images[i]->mask || (images[i]->width * images[i]->height * images[i]->depth) > 65536 || images[i] == background_image || images[i] == logo_image || hfi < MAX_HF_IMAGES) { progress_show("Writing image %d (%s)...", i + 1, images[i]->filename); progress_update(100 * i / num_images); temp.data.image = images[i]; write_image(out, &temp, 1); } } // Write links and target names... pdf_write_links(out); if (PDFVersion >= 12) pdf_write_names(out); // Verify that everything is working so far... pdf_start_object(out); if (pages_object != (int)num_objects) progress_error(HD_ERROR_INTERNAL_ERROR, "Internal error: pages_object != num_objects"); fputs("/Type/Pages", out); fprintf(out, "/Count %d", (int)num_outpages); fputs("/Kids[", out); for (outpage = 0; outpage < (int)num_outpages; outpage ++) fprintf(out, "%d 0 R\n", pages_object + outpage * 2 + 1); fputs("]", out); pdf_end_object(out); for (outpage = 0; outpage < (int)num_outpages; outpage ++) pdf_write_outpage(out, outpage); if (OutputType == OUTPUT_BOOK && TocLevels > 0) { /* * Write the outline tree using the table-of-contents... */ heading = 0; #ifdef DEBUG_TOC pdf_text_contents(out, toc); #endif // DEBUG_TOC pdf_write_contents(out, toc, 0, 0, 0, &heading); } else { /* * Write the outline tree using the HTML files. */ pdf_write_files(out, doc); } /* * Write the trailer and close the output file... */ write_trailer(out, 0, lang); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); if (CGIMode) { const char *meta_filename = (const char *)htmlGetMeta(doc, (uchar *)"HTMLDOC.filename"); const char *filename; if (meta_filename) { if ((filename = strrchr(meta_filename, '/')) != NULL) filename ++; else filename = meta_filename; } else filename = "htmldoc.pdf"; // In CGI mode, we only produce PDF output to stdout... printf("Content-Type: application/pdf\r\n" "Content-Length: %ld\r\n" "Content-Disposition: inline; filename=\"%s\"\r\n" "Accept-Ranges: none\r\n" "X-Creator: HTMLDOC " SVERSION "\r\n" "\r\n", ftell(out), filename); } fclose(out); // // If we are sending the output to stdout, copy the temp file now... // if (!OutputPath[0]) { #ifdef WIN32 // Make sure we are in binary mode... stupid Microsoft! setmode(1, O_BINARY); #elif defined(__EMX__) // OS/2 has a setmode for FILE's... fflush(stdout); _fsetmode(stdout, "b"); #endif // WIN32 || __EMX__ // Open the temporary file and copy it to stdout... out = fopen(stdout_filename, "rb"); while ((bytes = fread(buffer, 1, sizeof(buffer), out)) > 0) fwrite(buffer, 1, (size_t)bytes, stdout); // Close the temporary file (it is removed when the program exits...) fclose(out); } // Clear the objects array... if (alloc_objects) { free(objects); num_objects = 0; alloc_objects = 0; objects = NULL; } if (Verbosity) progress_hide(); } /* * 'pdf_write_resources()' - Write the resources dictionary for a page. */ static void pdf_write_resources(FILE *out, /* I - Output file */ int outpage) /* I - Output page for resources */ { int i; /* Looping var */ outpage_t *op; /* Current output page */ page_t *p; /* Current page */ render_t *r; /* Render pointer */ int fonts_used[TYPE_MAX * STYLE_MAX]; /* Non-zero if the page uses a font */ int images_used; /* Non-zero if the page uses an image */ int text_used; /* Non-zero if the page uses text */ static const char *effects[] = /* Effects and their commands */ { "", "/S/Box/M/I", "/S/Box/M/O", "/S/Dissolve", "/S/Glitter/Di 270", "/S/Glitter/Di 315", "/S/Glitter/Di 0", "/S/Blinds/Dm/H", "/S/Split/Dm/H/M/I", "/S/Split/Dm/H/M/O", "/S/Blinds/Dm/V", "/S/Split/Dm/V/M/I", "/S/Split/Dm/V/M/O", "/S/Wipe/Di 270", "/S/Wipe/Di 180", "/S/Wipe/Di 0", "/S/Wipe/Di 90" }; memset(fonts_used, 0, sizeof(fonts_used)); images_used = background_image != NULL; text_used = 0; op = outpages + outpage; for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_IMAGE) images_used = 1; else if (r->type == RENDER_TEXT) { text_used = 1; fonts_used[r->data.text.typeface * 4 + r->data.text.style] = 1; } } fputs("/Resources<<", out); if (!images_used) fputs("/ProcSet[/PDF/Text]", out); else if (PDFVersion >= 12) { if (OutputColor) fputs("/ProcSet[/PDF/Text/ImageB/ImageC/ImageI]", out); else fputs("/ProcSet[/PDF/Text/ImageB/ImageI]", out); } else { if (OutputColor) fputs("/ProcSet[/PDF/Text/ImageB/ImageC]", out); else fputs("/ProcSet[/PDF/Text/ImageB]", out); } if (text_used) { fputs("/Font<<", out); for (i = 0; i < (TYPE_MAX * STYLE_MAX); i ++) if (fonts_used[i]) fprintf(out, "/F%x %d 0 R", i, font_objects[i]); fputs(">>", out); } fputs("/XObject<<", out); for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_IMAGE && r->data.image->obj) fprintf(out, "/I%d %d 0 R", r->data.image->obj, r->data.image->obj); } if (background_image) fprintf(out, "/I%d %d 0 R", background_image->obj, background_image->obj); fputs(">>>>", out); if (PDFEffect) fprintf(out, "/Dur %.0f/Trans<</Type/Trans/D %.1f%s>>", PDFPageDuration, PDFEffectDuration, effects[PDFEffect]); } /* * 'pdf_write_outpage()' - Write an output page. */ static void pdf_write_outpage(FILE *out, /* I - Output file */ int outpage) /* I - Output page number */ { int i; /* Looping var */ page_t *p; /* Current page */ outpage_t *op; /* Output page */ DEBUG_printf(("pdf_write_outpage(out = %p, outpage = %d)\n", (void *)out, outpage)); if (outpage < 0 || outpage >= (int)num_outpages) return; op = outpages + outpage; p = pages + op->pages[0]; DEBUG_printf(("op->pages[0] = %d (%dx%d)\n", op->pages[0], p->width, p->length)); /* * Let the user know which page we are writing... */ if (Verbosity) { progress_show("Writing page %s...", p->page_text); progress_update(100 * outpage / (int)num_outpages); } /* * Output the page prolog... */ pdf_start_object(out); fputs("/Type/Page", out); fprintf(out, "/Parent %d 0 R", pages_object); fprintf(out, "/Contents %d 0 R", (int)num_objects + 1); if (p->landscape) fprintf(out, "/MediaBox[0 0 %d %d]", p->length, p->width); else fprintf(out, "/MediaBox[0 0 %d %d]", p->width, p->length); pdf_write_resources(out, outpage); /* * Actions (links)... */ if (op->annot_object > 0) fprintf(out, "/Annots %d 0 R", op->annot_object); pdf_end_object(out); pdf_start_object(out); if (Compression) fputs("/Filter/FlateDecode", out); pdf_start_stream(out); flate_open_stream(out); /* * Render all of the pages... */ switch (op->nup) { case 1 : pdf_write_page(out, op->pages[0]); break; default : for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; flate_printf(out, "q %.3f %.3f %.3f %.3f %.3f %.3f cm\n", p->outmatrix[0][0], p->outmatrix[1][0], p->outmatrix[0][1], p->outmatrix[1][1], p->outmatrix[0][2], p->outmatrix[1][2]); pdf_write_page(out, op->pages[i]); flate_puts("Q\n", out); } break; } /* * Close out the page... */ flate_close_stream(out); pdf_end_object(out); } /* * 'pdf_write_page()' - Write a page to a PDF file. */ static void pdf_write_page(FILE *out, /* I - Output file */ int page) /* I - Page number */ { render_t *r, /* Render pointer */ *next; /* Next render */ float box[3]; /* RGB color for boxes */ page_t *p; /* Current page */ const char *debug; /* HTMLDOC_DEBUG environment variable */ if (page < 0 || page >= (int)alloc_pages) return; p = pages + page; /* * Clear the render cache... */ render_rgb[0] = -1.0f; render_rgb[1] = -1.0f; render_rgb[2] = -1.0f; render_x = -1.0f; render_y = -1.0f; /* * Output the page header... */ flate_puts("q\n", out); write_background(page, out); if (p->duplex && (page & 1)) flate_printf(out, "1 0 0 1 %d %d cm\n", p->right, p->bottom); else flate_printf(out, "1 0 0 1 %d %d cm\n", p->left, p->bottom); /* * Render all graphics elements... */ box[0] = -1.0f; box[1] = -1.0f; box[2] = -1.0f; for (r = p->start; r != NULL; r = r->next) switch (r->type) { case RENDER_IMAGE : if (r->width > 0.01f && r->height > 0.01f) write_image(out, r); break; case RENDER_BOX : if (r->height == 0.0) { if (box[0] != r->data.box[0] || box[1] != r->data.box[1] || box[2] != r->data.box[2]) { box[0] = r->data.box[0]; box[1] = r->data.box[1]; box[2] = r->data.box[2]; if (OutputColor) flate_printf(out, "%.2f %.2f %.2f RG\n", box[0], box[1], box[2]); else flate_printf(out, "%.2f G\n", box[0] * 0.31f + box[1] * 0.61f + box[2] * 0.08f); } flate_printf(out, "%.1f %.1f m %.1f %.1f l S\n", r->x, r->y, r->x + r->width, r->y); } else { set_color(out, r->data.box); flate_printf(out, "%.1f %.1f %.1f %.1f re f\n", r->x, r->y, r->width, r->height); } break; } /* * Render all text elements, freeing used memory as we go... */ flate_puts("BT\n", out); render_typeface = -1; render_style = -1; render_size = -1; render_x = -1.0f; render_y = -1.0f; render_spacing = -1.0f; for (r = p->start, next = NULL; r != NULL; r = next) { if (r->type == RENDER_TEXT) write_text(out, r); next = r->next; free(r); } p->start = NULL; flate_puts("ET\n", out); if ((debug = getenv("HTMLDOC_DEBUG")) != NULL && strstr(debug, "margin")) { // Show printable area... flate_printf(out, "1 0 1 RG 0 0 %d %d re S\n", p->width - p->right - p->left, p->length - p->top - p->bottom); } /* * Output the page trailer... */ flate_puts("Q\n", out); } #ifdef DEBUG_TOC static void pdf_text_contents(FILE *out, tree_t *toc, int indent) { static const char *spaces = " " " "; if (indent > 16) indent = 16; while (toc) { fprintf(out, "%% %s<%s>", spaces + 64 - 4 * indent, _htmlMarkups[toc->markup]); switch (toc->markup) { case MARKUP_A : tree_t *temp; for (temp = toc->child; temp; temp = temp->next) fputs((char *)temp->data, out); break; default : fputs("\n", out); pdf_text_contents(out, toc->child, indent + 1); fprintf(out, "%% %s", spaces + 64 - 4 * indent); break; } fprintf(out, "</%s>\n", _htmlMarkups[toc->markup]); toc = toc->next; } } #endif // DEBUG_TOC /* * 'pdf_write_contents()' - Write the table of contents as outline records to * a PDF file. */ static void pdf_write_contents(FILE *out, /* I - Output file */ tree_t *toc, /* I - Table of contents tree */ int parent, /* I - Parent outline object */ int prev, /* I - Previous outline object */ int next, /* I - Next outline object */ int *heading) /* IO - Current heading # */ { int i, /* Looping var */ thisobj, /* This object */ entry, /* TOC entry object */ count; /* Number of entries at this level */ uchar *text; /* Entry text */ tree_t *temp; /* Looping var */ int *entry_counts, /* Number of sub-entries for this entry */ *entry_objects; /* Objects for each entry */ tree_t **entries; /* Pointers to each entry */ float x, y; /* Position of link */ /* * Make an object for this entry... */ if (toc == NULL) { /* * This is for the Table of Contents page... */ thisobj = pdf_start_object(out); fprintf(out, "/Parent %d 0 R", parent); fputs("/Title", out); write_utf16(out, (uchar *)TocTitle); x = 0.0f; y = PagePrintLength + PageBottom; pspdf_transform_coords(pages + chapter_starts[0], x, y); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * chapter_outstarts[0] + 1, x, y); if (prev > 0) fprintf(out, "/Prev %d 0 R", prev); if (next > 0) fprintf(out, "/Next %d 0 R", next); pdf_end_object(out); return; } /* * Allocate the arrays... Add 1 to hold the TOC at the top level... */ if ((entry_counts = (int *)calloc(sizeof(int), num_headings + 1)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)num_headings, strerror(errno)); return; } if ((entry_objects = (int *)calloc(sizeof(int), num_headings + 1)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)num_headings, strerror(errno)); free(entry_counts); return; } if ((entries = (tree_t **)calloc(sizeof(tree_t *), num_headings + 1)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)num_headings, strerror(errno)); free(entry_objects); free(entry_counts); return; } if (parent == 0 && TocLevels > 0) { /* * Add the table of contents to the top-level contents... */ entries[0] = NULL; entry_objects[0] = num_objects + 2; entry = num_objects + 3; count = 1; } else { entry = num_objects + 2; count = 0; } /* * Find and count the children (entries)... */ if (toc->markup == MARKUP_B && toc->next && toc->next->markup == MARKUP_UL) temp = toc->next->child; else if (toc->markup == MARKUP_LI && toc->last_child && toc->last_child->markup == MARKUP_UL) temp = toc->last_child->child; else temp = toc->child; for (; temp && count <= (int)num_headings; temp = temp->next) { if (temp->markup == MARKUP_B) { entries[count] = temp; entry_objects[count] = entry; if (temp->next && temp->next->markup == MARKUP_UL) entry_counts[count] = pdf_count_headings(temp->next->child); else entry_counts[count] = 0; entry += entry_counts[count] + 1; count ++; } else if (temp->markup == MARKUP_LI) { entries[count] = temp; entry_objects[count] = entry; if (temp->last_child && temp->last_child->markup == MARKUP_UL) entry_counts[count] = pdf_count_headings(temp->last_child); else entry_counts[count] = 0; entry += entry_counts[count] + 1; count ++; } } /* * Output the top-level object... */ thisobj = pdf_start_object(out); if (parent == 0) outline_object = thisobj; else fprintf(out, "/Parent %d 0 R", parent); if (count > 0) { fprintf(out, "/Count %d", parent == 0 ? count : -count); fprintf(out, "/First %d 0 R", entry_objects[0]); fprintf(out, "/Last %d 0 R", entry_objects[count - 1]); } if (parent > 0 && toc->child && toc->child->markup == MARKUP_A) { if ((text = htmlGetText(toc->child->child)) != NULL) { fputs("/Title", out); write_utf16(out, text); free(text); } i = heading_pages[*heading]; x = 0.0f; y = heading_tops[*heading] + pages[i].bottom; pspdf_transform_coords(pages + i, x, y); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[i].outpage + 1, x, y); (*heading) ++; } if (prev > 0) fprintf(out, "/Prev %d 0 R", prev); if (next > 0) fprintf(out, "/Next %d 0 R", next); pdf_end_object(out); for (i = 0; i < count ; i ++) pdf_write_contents(out, entries[i], thisobj, i > 0 ? entry_objects[i - 1] : 0, i < (count - 1) ? entry_objects[i + 1] : 0, heading); free(entry_objects); free(entry_counts); free(entries); } // // 'pdf_write_files()' - Write an outline of HTML files. // static void pdf_write_files(FILE *out, // I - Output file tree_t *doc) // I - Document tree { int i, // Looping var num_files, // Number of FILE elements alloc_text; // Allocated text? uchar *text; // Entry text tree_t *temp; // Current node link_t *link; // Link to file... float x, y; // Position of link // Figure out the number of (top-level) files in the document... for (num_files = 0, temp = doc; temp; temp = temp->next) if (temp->markup == MARKUP_FILE) num_files ++; if (num_files < 2) { // No files to outline... outline_object = 0; return; } // Write the outline dictionary... outline_object = pdf_start_object(out); fprintf(out, "/Count %d", num_files); fprintf(out, "/First %d 0 R", outline_object + 1); fprintf(out, "/Last %d 0 R", outline_object + num_files); pdf_end_object(out); // Now write the outline items... for (i = 0, temp = doc; temp; temp = temp->next) if (temp->markup == MARKUP_FILE) { alloc_text = 0; if ((text = get_title(temp->child)) != NULL) alloc_text = 1; else if ((text = htmlGetVariable(temp, (uchar *)"_HD_FILENAME")) == NULL) text = (uchar *)"Unknown"; pdf_start_object(out); fprintf(out, "/Parent %d 0 R", outline_object); fputs("/Title", out); write_utf16(out, text); if (alloc_text) free(text); if ((link = find_link(htmlGetVariable(temp, (uchar *)"_HD_FILENAME"))) != NULL) { x = 0.0f; y = link->top + pages[link->page].bottom; pspdf_transform_coords(pages + link->page, x, y); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[link->page].outpage + 1, x, y); } if (i > 0) fprintf(out, "/Prev %d 0 R", outline_object + i); if (i < (num_files - 1)) fprintf(out, "/Next %d 0 R", outline_object + i + 2); pdf_end_object(out); i ++; } } /* * 'pdf_count_headings()' - Count the number of headings under this TOC * entry. */ static int /* O - Number of headings found */ pdf_count_headings(tree_t *toc) /* I - TOC entry */ { int headings; /* Number of headings */ for (headings = 0; toc != NULL; toc = toc->next) { if (toc->markup == MARKUP_A) headings ++; if (toc->child != NULL) headings += pdf_count_headings(toc->child); } return (headings); } /* * PDF object state variables... */ static int pdf_stream_length = 0; static int pdf_stream_start = 0; static int pdf_object_type = 0; /* * 'pdf_start_object()' - Start a new PDF object... */ static int // O - Object number pdf_start_object(FILE *out, // I - File to write to int array) // I - 1 = array, 0 = dictionary { int *temp; // Temporary integer pointer num_objects ++; // Allocate memory as necessary... if (num_objects >= alloc_objects) { alloc_objects += ALLOC_OBJECTS; if (alloc_objects == ALLOC_OBJECTS) temp = (int *)malloc(sizeof(int) * alloc_objects); else temp = (int *)realloc(objects, sizeof(int) * alloc_objects); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d objects - %s", (int)alloc_objects, strerror(errno)); alloc_objects -= ALLOC_OBJECTS; return (0); } objects = temp; } objects[num_objects] = ftell(out); fprintf(out, "%d 0 obj", (int)num_objects); pdf_object_type = array; fputs(pdf_object_type ? "[" : "<<", out); return (num_objects); } /* * 'pdf_start_stream()' - Start a new PDF stream... */ static void pdf_start_stream(FILE *out) // I - File to write to { // Write the "/Length " string, get the position, and then write 10 // zeroes to cover the maximum size of a stream. fputs("/Length ", out); pdf_stream_length = ftell(out); fputs("0000000000>>stream\n", out); pdf_stream_start = ftell(out); } /* * 'pdf_end_object()' - End a PDF object... */ static void pdf_end_object(FILE *out) // I - File to write to { int length; // Total length of stream if (pdf_stream_start) { // For streams, go back and update the length field in the // object dictionary... length = ftell(out) - pdf_stream_start; fseek(out, pdf_stream_length, SEEK_SET); fprintf(out, "%-10d", length); fseek(out, 0, SEEK_END); pdf_stream_start = 0; fputs("endstream\n", out); } else fputs(pdf_object_type ? "]" : ">>", out); fputs("endobj\n", out); } /* * 'pdf_write_links()' - Write annotation link objects for each page in the * document. */ static void pdf_write_links(FILE *out) /* I - Output file */ { int i, /* Looping var */ outpage, /* Current page */ lobj, /* Current link */ num_lobjs, /* Number of links on this page */ alloc_lobjs, /* Number of links to allocate */ *lobjs; /* Link objects */ float x, y; /* Position of last link */ render_t *r, /* Current render primitive */ *rlast, /* Last render link primitive */ *rprev; /* Previous render primitive */ link_t *link; /* Local link */ page_t *p; /* Current page */ outpage_t *op; /* Current output page */ /* * First combine adjacent, identical links... */ for (outpage = 0, op = outpages; outpage < (int)num_outpages; outpage ++, op ++) { for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start, x = 0.0f, y = 0.0f, rlast = NULL, rprev = NULL; r != NULL; rprev = r, r = r->next) if (r->type == RENDER_LINK) { if (fabs(r->x - x) < 0.1f && fabs(r->y - y) < 0.1f && rlast != NULL && strcmp((const char *)rlast->data.link, (const char *)r->data.link) == 0) { // Combine this primitive with the previous one in rlast... rlast->width = r->x + r->width - rlast->x; x = rlast->x + rlast->width; // Delete this render primitive... rprev->next = r->next; free(r); r = rprev; } else { // Can't combine; just save this info for later use... rlast = r; x = r->x + r->width; y = r->y; } } } } /* * Setup the initial pages_object number... */ pages_object = num_objects + 1; /* * Add space for named links in PDF 1.2 output... */ if (PDFVersion >= 12) pages_object += num_links + 3; /* * Stop here if we won't be generating links in the output... */ if (!Links) return; /* * Figure out how many link objects we'll have... */ for (outpage = 0, op = outpages, alloc_lobjs = 0; outpage < (int)num_pages; outpage ++, op ++) { num_lobjs = 0; for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_LINK) { if (find_link(r->data.link) != NULL) num_lobjs ++; else num_lobjs += 2; } } if (num_lobjs > 0) pages_object += num_lobjs + 1; if (num_lobjs > alloc_lobjs) alloc_lobjs = num_lobjs; } if (alloc_lobjs == 0) return; /* * Allocate memory for the links... */ if ((lobjs = (int *)malloc(sizeof(int) * (size_t)alloc_lobjs)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d link objects - %s", alloc_lobjs, strerror(errno)); return; } /* * Then generate annotation objects for all the links... */ for (outpage = 0, op = outpages; outpage < (int)num_pages; outpage ++, op ++) { num_lobjs = 0; for (i = 0; i < op->nup; i ++) { if (op->pages[i] < 0) break; p = pages + op->pages[i]; for (r = p->start; r != NULL; r = r->next) if (r->type == RENDER_LINK) { if ((link = find_link(r->data.link)) != NULL) { /* * Local link... */ float x1, y1, x2, y2; lobjs[num_lobjs ++] = pdf_start_object(out); fputs("/Subtype/Link", out); if (PageDuplex && (op->pages[i] & 1)) { x1 = r->x + p->right; y1 = r->y + p->bottom - 2; x2 = r->x + r->width + p->right; y2 = r->y + r->height + p->bottom; } else { x1 = r->x + p->left; y1 = r->y + p->bottom - 2; x2 = r->x + r->width + p->left; y2 = r->y + r->height + p->bottom; } pspdf_transform_coords(p, x1, y1); pspdf_transform_coords(p, x2, y2); fprintf(out, "/Rect[%.1f %.1f %.1f %.1f]", x1, y1, x2, y2); fputs("/Border[0 0 0]", out); x1 = 0.0f; y1 = link->top + pages[link->page].bottom; pspdf_transform_coords(pages + link->page, x1, y1); fprintf(out, "/Dest[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[link->page].outpage + 1, x1, y1); pdf_end_object(out); } else { /* * Remote link... */ pdf_start_object(out); if (PDFVersion >= 12 && file_method((char *)r->data.link) == NULL) { #ifdef WIN32 if (strcasecmp(file_extension((char *)r->data.link), "pdf") == 0) #else if (strcmp(file_extension((char *)r->data.link), "pdf") == 0) #endif /* WIN32 */ { /* * Link to external PDF file... */ const char *target = file_target((char *)r->data.link); fputs("/S/GoToR", out); if (target) { char url[1024], *urlptr; fputs("/D", out); write_string(out, (uchar *)target, 0); strlcpy(url, (char *)r->data.link, sizeof(url)); if ((urlptr = strrchr(url, '#')) != NULL) *urlptr = '\0'; fputs("/F", out); write_string(out, (uchar *)url, 0); } else { fputs("/D[0/XYZ null null 0]/F", out); write_string(out, r->data.link, 0); } } else { /* * Link to external filename... */ fputs("/S/Launch", out); fputs("/F", out); write_string(out, r->data.link, 0); if (StrictHTML) progress_error(HD_ERROR_UNRESOLVED_LINK, "Unable to resolve link to \"%s\"!", r->data.link); } } else { /* * Link to web file... */ fputs("/S/URI", out); fputs("/URI", out); write_string(out, r->data.link, 0); } pdf_end_object(out); lobjs[num_lobjs ++] = pdf_start_object(out); fputs("/Subtype/Link", out); if (PageDuplex && (outpage & 1)) fprintf(out, "/Rect[%.1f %.1f %.1f %.1f]", r->x + PageRight, r->y + PageBottom, r->x + r->width + PageRight, r->y + r->height + PageBottom); else fprintf(out, "/Rect[%.1f %.1f %.1f %.1f]", r->x + PageLeft, r->y + PageBottom - 2, r->x + r->width + PageLeft, r->y + r->height + PageBottom); fputs("/Border[0 0 0]", out); fprintf(out, "/A %d 0 R", (int)num_objects - 1); pdf_end_object(out); } } } if (num_lobjs > 0) { outpages[outpage].annot_object = pdf_start_object(out, 1); for (lobj = 0; lobj < num_lobjs; lobj ++) fprintf(out, "%d 0 R%s", lobjs[lobj], lobj < (num_lobjs - 1) ? "\n" : ""); pdf_end_object(out); } } free(lobjs); } /* * 'pdf_write_names()' - Write named destinations for each link. */ static void pdf_write_names(FILE *out) /* I - Output file */ { int i; /* Looping var */ uchar *s; /* Current character in name */ link_t *link; /* Local link */ /* * Convert all link names to lowercase... */ for (i = num_links, link = links; i > 0; i --, link ++) for (s = link->name; *s != '\0'; s ++) *s = (uchar)tolower(*s); /* * Write the root name tree entry... */ names_object = pdf_start_object(out); fprintf(out, "/Dests %d 0 R", (int)num_objects + 1); pdf_end_object(out); /* * Write the name tree child list... */ pdf_start_object(out); fprintf(out, "/Kids[%d 0 R]", (int)num_objects + 1); pdf_end_object(out); /* * Write the leaf node for the name tree... */ pdf_start_object(out); fputs("/Limits[", out); write_string(out, links[0].name, 0); write_string(out, links[num_links - 1].name, 0); fputs("]", out); fputs("/Names[", out); for (i = 1, link = links; i <= (int)num_links; i ++, link ++) { write_string(out, link->name, 0); fprintf(out, "%d 0 R", (int)num_objects + i); } fputs("]", out); pdf_end_object(out); for (i = num_links, link = links; i > 0; i --, link ++) { pdf_start_object(out); float x, y; x = 0.0f; y = link->top + pages[link->page].bottom; pspdf_transform_coords(pages + link->page, x, y); fprintf(out, "/D[%d 0 R/XYZ %.0f %.0f 0]", pages_object + 2 * pages[link->page].outpage + 1, x, y); pdf_end_object(out); } } /* * 'render_contents()' - Render a single heading. */ static void render_contents(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int heading, /* I - Heading # */ tree_t *chap) /* I - Chapter heading */ { float x, width, numberwidth, height, rgb[3]; int hpage; uchar number[1024], *nptr, *link; tree_t *flat, *temp, *next; render_t *r; float dot_width; DEBUG_printf(("render_contents(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, y=%.1f, page=%d, heading=%d, chap=%p)\n", (void *)t, left, right, bottom, top, *y, *page, heading, (void *)chap)); if (!t) return; dot_width = _htmlSizes[SIZE_P] * _htmlWidths[t->typeface][t->style]['.'] * 0.001f; /* * Put the text... */ flat = flatten_tree(t->child->child); for (height = 0.0, temp = flat; temp != NULL; temp = temp->next) if (temp->height > height) height = temp->height; height *= _htmlSpacings[SIZE_P] / _htmlSizes[SIZE_P]; if (t->indent) x = left + 18.0f + 18.0f * t->indent; else x = left; *y -= height; /* * Get the width of the page number, leave room for three dots... */ if (heading >= 0 && heading < (int)num_headings) { hpage = heading_pages[heading]; numberwidth = (float)(get_width((uchar *)pages[hpage].page_text, t->typeface, t->style, t->size) + 3.0f * dot_width); } else { hpage = 0; numberwidth = 0.0f; } for (temp = flat; temp != NULL; temp = next) { rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; if ((x + temp->width) >= (right - numberwidth)) { /* * Too wide to fit, continue on the next line */ *y -= _htmlSpacings[SIZE_P]; x = left + 36.0f * t->indent; } if (*y < bottom) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); width = get_width((uchar *)TocTitle, _htmlHeadingFont, STYLE_BOLD, SIZE_H1); *y = (float)(top - _htmlSpacings[SIZE_H1]); x = (float)(left + 0.5f * (right - left - width)); r = new_render(*page, RENDER_TEXT, x, *y, 0, 0, TocTitle); r->data.text.typeface = _htmlHeadingFont; r->data.text.style = STYLE_BOLD; r->data.text.size = (float)_htmlSizes[SIZE_H1]; get_color(_htmlTextColor, r->data.text.rgb); *y -= _htmlSpacings[SIZE_H1]; if (t->indent) x = left + 18.0f + 18.0f * t->indent; else x = left; if (chap != t) { *y += height; render_contents(chap, left, right, bottom, top, y, page, -1, 0); *y -= _htmlSpacings[SIZE_P]; } } if (temp->link != NULL) { link = htmlGetVariable(temp->link, (uchar *)"HREF"); /* * Add a page link... */ new_render(*page, RENDER_LINK, x, *y, temp->width, temp->height, link); if (PSLevel == 0 && Links) { memcpy(rgb, link_color, sizeof(rgb)); temp->red = (uchar)(link_color[0] * 255.0); temp->green = (uchar)(link_color[1] * 255.0); temp->blue = (uchar)(link_color[2] * 255.0); if (LinkStyle) new_render(*page, RENDER_BOX, x, *y - 1, temp->width, 0, link_color); } } if ((link = htmlGetVariable(temp, (uchar *)"ID")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } switch (temp->markup) { case MARKUP_A : if ((link = htmlGetVariable(temp, (uchar *)"NAME")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } break; case MARKUP_NONE : if (temp->data == NULL) break; if (temp->underline) new_render(*page, RENDER_BOX, x, *y - 1, temp->width, 0, rgb); if (temp->strikethrough) new_render(*page, RENDER_BOX, x, *y + temp->height * 0.25f, temp->width, 0, rgb); r = new_render(*page, RENDER_TEXT, x, *y, 0, 0, temp->data); r->data.text.typeface = temp->typeface; r->data.text.style = temp->style; r->data.text.size = (float)_htmlSizes[temp->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (temp->superscript) r->y += height - temp->height; else if (temp->subscript) r->y -= height * _htmlSizes[0] / _htmlSpacings[0] - temp->height; break; case MARKUP_IMG : update_image_size(temp); new_render(*page, RENDER_IMAGE, x, *y, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); break; default : break; } x += temp->width; next = temp->next; free(temp); } if (numberwidth > 0.0f) { /* * Draw dots leading up to the page number... */ width = (float)(numberwidth - 3.0 * dot_width + x); for (nptr = number; nptr < (number + sizeof(number) - 1) && width < right; width += dot_width) *nptr++ = '.'; if (nptr > number) nptr --; strlcpy((char *)nptr, pages[hpage].page_text, sizeof(number) - (size_t)(nptr - number)); r = new_render(*page, RENDER_TEXT, right - width + x, *y, 0, 0, number); r->data.text.typeface = t->typeface; r->data.text.style = t->style; r->data.text.size = (float)_htmlSizes[t->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); } } /* * 'count_headings()' - Count the number of headings in the TOC. */ static int count_headings(tree_t *t) // I - Tree to count { int count; // Number of headings... count = 0; while (t != NULL) { switch (t->markup) { case MARKUP_B : case MARKUP_LI : count ++; if (t->last_child && t->last_child->markup == MARKUP_UL) count += count_headings(t->last_child); break; default : count += count_headings(t->child); break; } t = t->next; } return (count); } /* * 'parse_contents()' - Parse the table of contents and produce a * rendering list... */ static void parse_contents(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int *heading, /* IO - Heading # */ tree_t *chap) /* I - Chapter heading */ { DEBUG_printf(("parse_contents(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, y=%.1f, page=%d, heading=%d, chap=%p)\n", (void *)t, left, right, bottom, top, *y, *page, *heading, (void *)chap)); while (t != NULL) { switch (t->markup) { case MARKUP_B : /* Top-level TOC */ if (t->prev != NULL) /* Advance one line prior to top-levels... */ *y -= _htmlSpacings[SIZE_P]; if (*y < (bottom + _htmlSpacings[SIZE_P] * 3)) *y = 0; // Force page break chap = t; case MARKUP_LI : /* Lower-level TOC */ DEBUG_printf(("parse_contents: heading=%d, page = %d\n", *heading, heading_pages[*heading])); /* * Put the text unless the author has flagged it otherwise... */ if (htmlGetVariable(t, (uchar *)"_HD_OMIT_TOC") == NULL) { render_contents(t, left, right, bottom, top, y, page, *heading, chap); /* * Update current headings for header/footer strings in TOC. */ check_pages(*page); if (t->markup == MARKUP_B && pages[*page].chapter == pages[*page - 1].chapter) pages[*page].chapter = htmlGetText(t->child->child); if (pages[*page].heading == pages[*page - 1].heading) pages[*page].heading = htmlGetText(t->child->child); /* * Next heading... */ (*heading) ++; if (t->last_child->markup == MARKUP_UL) parse_contents(t->last_child, left, right, bottom, top, y, page, heading, chap); } else if (t->next != NULL && t->next->markup == MARKUP_UL) { /* * Skip children of omitted heading... */ t = t->next; (*heading) += count_headings(t->child) + 1; } else (*heading) ++; break; default : parse_contents(t->child, left, right, bottom, top, y, page, heading, chap); break; } t = t->next; } } /* * 'parse_doc()' - Parse a document tree and produce rendering list output. */ static void parse_doc(tree_t *t, /* I - Tree to parse */ float *left, /* I - Left margin */ float *right, /* I - Printable width */ float *bottom, /* I - Bottom margin */ float *top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ tree_t *cpara, /* I - Current paragraph */ int *needspace) /* I - Need whitespace before this element */ { int i; /* Looping var */ tree_t *para, /* Phoney paragraph tree entry */ *temp; /* Paragraph entry */ var_t *var; /* Variable entry */ uchar *name; /* ID name */ uchar *style; /* STYLE attribute */ float width, /* Width of horizontal rule */ height, /* Height of rule */ rgb[3]; /* RGB color of rule */ DEBUG_printf(("parse_doc(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, x=%.1f, y=%.1f, page=%d, cpara=%p, needspace=%d\n", (void *)t, *left, *right, *bottom, *top, *x, *y, *page, (void *)cpara, *needspace)); DEBUG_printf((" title_page = %d, chapter = %d\n", title_page, chapter)); if (cpara == NULL) para = htmlNewTree(NULL, MARKUP_P, NULL); else para = cpara; while (t != NULL) { if (t->markup == MARKUP_FILE) current_url = htmlGetVariable(t, (uchar *)"_HD_URL"); if (((t->markup == MARKUP_H1 && OutputType == OUTPUT_BOOK) || (t->markup == MARKUP_FILE && OutputType == OUTPUT_WEBPAGES)) && !title_page) { // New page on H1 in book mode or file in webpage mode... if (para->child != NULL && chapter > 0) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if ((chapter > 0 && OutputType == OUTPUT_BOOK) || ((*page > 0 || *y < *top) && OutputType == OUTPUT_WEBPAGES)) { if (*y < *top) (*page) ++; if (PageDuplex && (*page & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); chapter_ends[chapter] = *page - 1; } // Make sure header and footer strings are correct... check_pages(*page); memcpy(pages[*page].header, Header, sizeof(pages[*page].header)); memcpy(pages[*page].header1, Header1, sizeof(pages[*page].header1)); memcpy(pages[*page].footer, Footer, sizeof(pages[*page].footer)); // Bump the chapter/file count... chapter ++; if (chapter >= MAX_CHAPTERS) { progress_error(HD_ERROR_TOO_MANY_CHAPTERS, "Too many chapters/files in document (%d > %d)!", chapter, MAX_CHAPTERS); chapter = MAX_CHAPTERS - 1; } else chapter_starts[chapter] = *page; if (chapter > TocDocCount) TocDocCount = chapter; *y = *top; *x = *left; *needspace = 0; } if ((name = htmlGetVariable(t, (uchar *)"ID")) != NULL) { /* * Add a link target using the ID=name variable... */ add_link(name, *page, (int)*y); } else if (t->markup == MARKUP_FILE) { /* * Add a file link... */ uchar newname[256], /* New filename */ *sep; /* "?" separator in links */ // Strip any trailing HTTP GET data stuff... strlcpy((char *)newname, (char *)htmlGetVariable(t, (uchar *)"_HD_FILENAME"), sizeof(newname)); if ((sep = (uchar *)strchr((char *)newname, '?')) != NULL) *sep = '\0'; // Add the link add_link(newname, *page, (int)*y); } if (chapter == 0 && !title_page) { // Need to handle page comments before the first heading... if (t->markup == MARKUP_COMMENT) parse_comment(t, left, right, bottom, top, x, y, page, para, *needspace); if (t->child != NULL) parse_doc(t->child, left, right, bottom, top, x, y, page, para, needspace); t = t->next; continue; } // Check for some basic stylesheet stuff... if ((style = htmlGetStyle(t, (uchar *)"page-break-before:")) != NULL && strcasecmp((char *)style, "avoid") != 0) { // Advance to the next page... (*page) ++; *x = *left; *y = *top; *needspace = 0; // See if we need to go to the next left/righthand page... if (PageDuplex && ((*page) & 1) && strcasecmp((char *)style, "right") == 0) (*page) ++; else if (PageDuplex && !((*page) & 1) && strcasecmp((char *)style, "left") == 0) (*page) ++; // Update the progress as necessary... if (Verbosity) progress_show("Formatting page %d", *page); } // Process the markup... switch (t->markup) { case MARKUP_IMG : update_image_size(t); case MARKUP_NONE : case MARKUP_BR : if (para->child == NULL) { if (t->parent == NULL) { para->halignment = ALIGN_LEFT; para->indent = 0; } else { para->halignment = t->parent->halignment; para->indent = t->parent->indent; } } // Skip heading whitespace... if (para->child == NULL && t->markup == MARKUP_NONE && t->data != NULL && strcmp((char *)t->data, " ") == 0) break; if ((temp = htmlAddTree(para, t->markup, t->data)) != NULL) { temp->link = t->link; temp->width = t->width; temp->height = t->height; temp->typeface = t->typeface; temp->style = t->style; temp->size = t->size; temp->underline = t->underline; temp->strikethrough = t->strikethrough; temp->superscript = t->superscript; temp->subscript = t->subscript; temp->halignment = t->halignment; temp->valignment = t->valignment; temp->red = t->red; temp->green = t->green; temp->blue = t->blue; for (i = 0, var = t->vars; i < t->nvars; i ++, var ++) htmlSetVariable(temp, var->name, var->value); } break; case MARKUP_TABLE : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } parse_table(t, *left, *right, *bottom, *top, x, y, page, *needspace); *needspace = 0; break; case MARKUP_H1 : case MARKUP_H2 : case MARKUP_H3 : case MARKUP_H4 : case MARKUP_H5 : case MARKUP_H6 : case MARKUP_H7 : case MARKUP_H8 : case MARKUP_H9 : case MARKUP_H10 : case MARKUP_H11 : case MARKUP_H12 : case MARKUP_H13 : case MARKUP_H14 : case MARKUP_H15 : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } parse_heading(t, *left, *right, *bottom, *top, x, y, page, *needspace); *needspace = 1; break; case MARKUP_BLOCKQUOTE : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } *left += 36; *right -= 36; parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *left -= 36; *right += 36; *x = *left; *needspace = 1; break; case MARKUP_CENTER : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *x = *left; *needspace = 1; break; case MARKUP_P : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *x = *left; *needspace = 1; break; case MARKUP_DIV : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } break; case MARKUP_PRE : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 1; } *left += 36.0f; *x = *left; parse_pre(t, *left, *right, *bottom, *top, x, y, page, *needspace); *left -= 36.0f; *x = *left; *needspace = 1; break; case MARKUP_DIR : case MARKUP_MENU : case MARKUP_UL : case MARKUP_OL : init_list(t); case MARKUP_DL : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if (t->indent == 1) *needspace = 1; *left += 36.0f; *x = *left; parse_doc(t->child, left, right, bottom, top, x, y, page, para, needspace); *left -= 36.0f; if (t->indent == 1) *needspace = 1; break; case MARKUP_LI : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } parse_list(t, left, right, bottom, top, x, y, page, *needspace); *x = *left; *needspace = t->next && t->next->markup != MARKUP_LI && t->next->markup != MARKUP_UL && t->next->markup != MARKUP_OL; break; case MARKUP_DT : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } *left -= 36.0f; *x = *left; parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *left += 36.0f; *x = *left; *needspace = 0; break; case MARKUP_DD : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } parse_doc(t->child, left, right, bottom, top, x, y, page, NULL, needspace); *x = *left; *needspace = 0; break; case MARKUP_HR : if (para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if (htmlGetVariable(t, (uchar *)"BREAK") == NULL) { /* * Generate a horizontal rule... */ if ((name = htmlGetVariable(t, (uchar *)"WIDTH")) == NULL) width = *right - *left; else { if (strchr((char *)name, '%') != NULL) width = atoi((char *)name) * (*right - *left) / 100; else width = (float)(atoi((char *)name) * PagePrintWidth / _htmlBrowserWidth); } if ((name = htmlGetVariable(t, (uchar *)"SIZE")) == NULL) height = 2; else height = (float)(atoi((char *)name) * PagePrintWidth / _htmlBrowserWidth); switch (t->halignment) { case ALIGN_LEFT : *x = *left; break; case ALIGN_CENTER : *x = *left + (*right - *left - width) * 0.5f; break; case ALIGN_RIGHT : *x = *right - width; break; } if (*y < (*bottom + height + _htmlSpacings[SIZE_P])) { /* * Won't fit on this page... */ (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; } (*y) -= height + _htmlSpacings[SIZE_P]; rgb[0] = t->red / 255.0f; rgb[1] = t->green / 255.0f; rgb[2] = t->blue / 255.0f; new_render(*page, RENDER_BOX, *x, *y + _htmlSpacings[SIZE_P] * 0.5, width, height, rgb); } else { /* * <HR BREAK> generates a page break... */ (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; } *x = *left; *needspace = 0; break; case MARKUP_COMMENT : // Check comments for commands... parse_comment(t, left, right, bottom, top, x, y, page, para, *needspace); break; case MARKUP_HEAD : // Ignore document HEAD section case MARKUP_TITLE : // Ignore title and meta stuff case MARKUP_META : case MARKUP_SCRIPT : // Ignore script stuff case MARKUP_INPUT : // Ignore form stuff case MARKUP_SELECT : case MARKUP_OPTION : case MARKUP_TEXTAREA : break; case MARKUP_STYLE : break; case MARKUP_A : if (htmlGetVariable(t, (uchar *)"NAME") != NULL) { /* * Add this named destination to the paragraph tree... */ if (para->child == NULL) { para->halignment = t->halignment; para->indent = t->indent; } if ((temp = htmlAddTree(para, t->markup, t->data)) != NULL) { temp->link = t->link; temp->width = t->width; temp->height = t->height; temp->typeface = t->typeface; temp->style = t->style; temp->size = t->size; temp->underline = t->underline; temp->strikethrough = t->strikethrough; temp->superscript = t->superscript; temp->subscript = t->subscript; temp->halignment = t->halignment; temp->valignment = t->valignment; temp->red = t->red; temp->green = t->green; temp->blue = t->blue; for (i = 0, var = t->vars; i < t->nvars; i ++, var ++) htmlSetVariable(temp, var->name, var->value); } } default : if (t->child != NULL) parse_doc(t->child, left, right, bottom, top, x, y, page, para, needspace); break; } // Check for some basic stylesheet stuff... if ((style = htmlGetStyle(t, (uchar *)"page-break-after:")) != NULL && strcasecmp((char *)style, "avoid") != 0) { // Advance to the next page... (*page) ++; *x = *left; *y = *top; *needspace = 0; // See if we need to go to the next left/righthand page... if (PageDuplex && ((*page) & 1) && strcasecmp((char *)style, "right") == 0) (*page) ++; else if (PageDuplex && !((*page) & 1) && strcasecmp((char *)style, "left") == 0) (*page) ++; // Update the progress as necessary... if (Verbosity) progress_show("Formatting page %d", *page); } // Move to the next node... t = t->next; } if (para->child != NULL && cpara != para) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, *needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; *needspace = 0; } if (cpara != para) htmlDeleteTree(para); DEBUG_printf(("LEAVING parse_doc(), x = %.1f, y = %.1f, page = %d\n", *x, *y, *page)); } /* * 'parse_heading()' - Parse a heading tree and produce rendering list output. */ static void parse_heading(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace) /* I - Need whitespace? */ { int *temp; // Temporary integer array pointer DEBUG_printf(("parse_heading(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, x=%.1f, y=%.1f, page=%d, needspace=%d\n", (void *)t, left, right, bottom, top, *x, *y, *page, needspace)); if (((t->markup - MARKUP_H1) < TocLevels || TocLevels == 0) && !title_page) current_heading = t->child; if (*y < (5 * _htmlSpacings[SIZE_P] + bottom)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } check_pages(*page); if (t->markup == MARKUP_H1 && !title_page) pages[*page].chapter = htmlGetText(current_heading); if ((pages[*page].heading == NULL || t->markup == MARKUP_H1 || (*page > 0 && pages[*page].heading == pages[*page - 1].heading)) && !title_page) { pages[*page].heading = htmlGetText(current_heading); pages[*page].headnode = current_heading; } if ((t->markup - MARKUP_H1) < TocLevels && !title_page) { DEBUG_printf(("H%d: heading_pages[%d] = %d\n", t->markup - MARKUP_H1 + 1, (int)num_headings, *page - 1)); // See if we need to resize the headings arrays... if (num_headings >= alloc_headings) { alloc_headings += ALLOC_HEADINGS; if (num_headings == 0) temp = (int *)malloc(sizeof(int) * alloc_headings); else temp = (int *)realloc(heading_pages, sizeof(int) * alloc_headings); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)alloc_headings, strerror(errno)); alloc_headings -= ALLOC_HEADINGS; return; } memset(temp + alloc_headings - ALLOC_HEADINGS, 0, sizeof(int) * ALLOC_HEADINGS); heading_pages = temp; if (num_headings == 0) temp = (int *)malloc(sizeof(int) * alloc_headings); else temp = (int *)realloc(heading_tops, sizeof(int) * alloc_headings); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d headings - %s", (int)alloc_headings, strerror(errno)); alloc_headings -= ALLOC_HEADINGS; return; } memset(temp + alloc_headings - ALLOC_HEADINGS, 0, sizeof(int) * ALLOC_HEADINGS); heading_tops = temp; } heading_pages[num_headings] = *page; heading_tops[num_headings] = (int)(*y + 4 * _htmlSpacings[SIZE_P]); num_headings ++; } parse_paragraph(t, left, right, bottom, top, x, y, page, needspace); if (t->halignment == ALIGN_RIGHT && t->markup == MARKUP_H1 && OutputType == OUTPUT_BOOK && !title_page) { /* * Special case - chapter heading for users manual... */ *y = bottom + 0.5f * (top - bottom); } } #if defined(PARA_DEBUG) && !defined(DEBUG) # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) printf x # define DEBUG_puts(x) puts(x) #endif /* PARA_DEBUG && !defined(DEBUG) */ /* * 'parse_paragraph()' - Parse a paragraph tree and produce rendering list * output. */ static void parse_paragraph(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace)/* I - Need whitespace? */ { int whitespace; /* Non-zero if a fragment ends in whitespace */ tree_t *flat, *start, *end, *prev, *temp; float width, height, offset, spacing, borderspace, temp_y, temp_width, temp_height; float format_width, image_y, image_left, image_right; int image_page = *page; float char_spacing; int num_chars; render_t *r; uchar *align, *hspace, *vspace, *link, *border; float rgb[3]; uchar line[10240], *lineptr, *dataptr; tree_t *linetype; float linex, linewidth; int firstline; DEBUG_printf(("parse_paragraph(t=%p, left=%.1f, right=%.1f, bottom=%.1f, top=%.1f, x=%.1f, y=%.1f, page=%d, needspace=%d\n", (void *)t, left, right, bottom, top, *x, *y, *page, needspace)); flat = flatten_tree(t->child); image_left = left; image_right = right; image_y = 0; if (flat == NULL) DEBUG_puts("parse_paragraph: flat == NULL!"); // Add leading whitespace... if (*y < top && needspace) *y -= _htmlSpacings[SIZE_P]; /* * First scan for images with left/right alignment tags... */ for (temp = flat, prev = NULL; temp != NULL;) { if (temp->markup == MARKUP_IMG) update_image_size(temp); if (temp->markup == MARKUP_IMG && (align = htmlGetVariable(temp, (uchar *)"ALIGN"))) { if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; if (strcasecmp((char *)align, "LEFT") == 0) { if ((vspace = htmlGetVariable(temp, (uchar *)"VSPACE")) != NULL) *y -= atoi((char *)vspace); if (*y < (bottom + temp->height + 2 * borderspace)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } if (borderspace > 0.0f) { if (temp->link && PSLevel == 0) memcpy(rgb, link_color, sizeof(rgb)); else { rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; } // Top new_render(*page, RENDER_BOX, image_left, *y - borderspace, temp->width + 2 * borderspace, borderspace, rgb); // Left new_render(*page, RENDER_BOX, image_left, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Right new_render(*page, RENDER_BOX, image_left + temp->width + borderspace, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Bottom new_render(*page, RENDER_BOX, image_left, *y - temp->height - 2 * borderspace, temp->width + 2 * borderspace, borderspace, rgb); } *y -= borderspace; new_render(*page, RENDER_IMAGE, image_left + borderspace, *y - temp->height, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); if (temp->link && (link = htmlGetVariable(temp->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, image_left + borderspace, *y - temp->height, temp->width, temp->height, link); } *y -= borderspace; if (vspace != NULL) *y -= atoi((char *)vspace); image_left += temp->width + 2 * borderspace; temp_y = *y - temp->height; image_page = *page; if (temp_y < image_y || image_y == 0) image_y = temp_y; if ((hspace = htmlGetVariable(temp, (uchar *)"HSPACE")) != NULL) image_left += atoi((char *)hspace); if (prev != NULL) prev->next = temp->next; else flat = temp->next; free(temp); temp = prev; } else if (strcasecmp((char *)align, "RIGHT") == 0) { if ((vspace = htmlGetVariable(temp, (uchar *)"VSPACE")) != NULL) *y -= atoi((char *)vspace); if (*y < (bottom + temp->height + 2 * borderspace)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } image_right -= temp->width + 2 * borderspace; image_page = *page; if (borderspace > 0.0f) { if (temp->link && PSLevel == 0) memcpy(rgb, link_color, sizeof(rgb)); else { rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; } // Top new_render(*page, RENDER_BOX, image_right, *y - borderspace, temp->width + 2 * borderspace, borderspace, rgb); // Left new_render(*page, RENDER_BOX, image_right, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Right new_render(*page, RENDER_BOX, image_right + temp->width + borderspace, *y - temp->height - 2 * borderspace, borderspace, temp->height + 2 * borderspace, rgb); // Bottom new_render(*page, RENDER_BOX, image_right, *y - temp->height - 2 * borderspace, temp->width + 2 * borderspace, borderspace, rgb); } *y -= borderspace; new_render(*page, RENDER_IMAGE, image_right + borderspace, *y - temp->height, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); if (temp->link && (link = htmlGetVariable(temp->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, image_right + borderspace, *y - temp->height, temp->width, temp->height, link); } *y -= borderspace; if (vspace != NULL) *y -= atoi((char *)vspace); temp_y = *y - temp->height; if (temp_y < image_y || image_y == 0) image_y = temp_y; if ((hspace = htmlGetVariable(temp, (uchar *)"HSPACE")) != NULL) image_right -= atoi((char *)hspace); if (prev != NULL) prev->next = temp->next; else flat = temp->next; free(temp); temp = prev; } } if (temp != NULL) { prev = temp; temp = temp->next; } else temp = flat; } /* * Then format the text and inline images... */ format_width = image_right - image_left; firstline = 1; DEBUG_printf(("format_width = %.1f\n", format_width)); // Make stupid compiler warnings go away (if you can't put // enough smarts in the compiler, don't add the warning!) offset = 0.0f; temp_width = 0.0f; temp_height = 0.0f; lineptr = NULL; linex = 0.0f; linewidth = 0.0f; while (flat != NULL) { start = flat; end = flat; width = 0.0; while (flat != NULL) { // Get fragments... temp_width = 0.0; temp = flat; whitespace = 0; while (temp != NULL && !whitespace) { if (temp->markup == MARKUP_NONE && temp->data[0] == ' ') { if (temp == start) temp_width -= _htmlWidths[temp->typeface][temp->style][' '] * _htmlSizes[temp->size] * 0.001f; else if (temp_width > 0.0f) whitespace = 1; } else whitespace = 0; if (whitespace) break; if (temp->markup == MARKUP_IMG) { if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; temp_width += 2 * borderspace; } prev = temp; temp = temp->next; temp_width += prev->width; if ((temp_width >= format_width && prev->markup == MARKUP_IMG) || prev->markup == MARKUP_BR) { break; } else if (prev->markup == MARKUP_NONE) { int ch = prev->data[strlen((char *)prev->data) - 1]; if (_htmlUTF8) ch = _htmlUnicode[ch]; if (ch == 173) break; } } if ((width + temp_width) <= format_width) { width += temp_width; end = temp; flat = temp; if (prev->markup == MARKUP_BR) break; } else if (width == 0.0) { width += temp_width; end = temp; flat = temp; break; } else break; } if (start == end) { end = start->next; flat = start->next; width = start->width; } for (height = 0.0, num_chars = 0, temp = prev = start; temp != end; temp = temp->next) { prev = temp; if (temp->markup == MARKUP_NONE) num_chars += strlen((char *)temp->data); if (temp->height > height) height = temp->height; } for (spacing = 0.0, temp = prev = start; temp != end; temp = temp->next) { prev = temp; if (temp->markup != MARKUP_IMG) temp_height = (float)(temp->height * _htmlSpacings[0] / _htmlSizes[0]); else { if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; temp_height = temp->height + 2 * borderspace; } if (temp_height > spacing) spacing = temp_height; } if (firstline && end != NULL && *y < (bottom + height + _htmlSpacings[t->size])) { // Go to next page since only 1 line will fit on this one... (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } firstline = 0; if (height == 0.0f) height = spacing; for (temp = start; temp != end; temp = temp->next) if (temp->markup != MARKUP_A) break; if (temp != NULL && temp->markup == MARKUP_NONE && temp->data[0] == ' ') { // Drop leading space... for (dataptr = temp->data; *dataptr; dataptr ++) *dataptr = dataptr[1]; *dataptr = '\0'; temp_width = _htmlWidths[temp->typeface][temp->style][' '] * _htmlSizes[temp->size] * 0.001f; temp->width -= temp_width; num_chars --; } if (end != NULL) temp = end->prev; else temp = NULL; DEBUG_printf((" BEFORE page=%d, y=%.1f, height=%.1f, spacing=%.1f, bottom=%.1f\n", *page, *y, height, spacing, bottom)); if (*y < (spacing + bottom)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } *y -= height; DEBUG_printf((" page=%d, y=%.1f, width=%.1f, height=%.1f\n", *page, *y, width, height)); if (Verbosity) progress_update(100 - (int)(100 * (*y) / PagePrintLength)); char_spacing = 0.0f; whitespace = 0; temp = start; linetype = NULL; rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; switch (t->halignment) { case ALIGN_LEFT : linex = image_left; break; case ALIGN_CENTER : linex = image_left + 0.5f * (format_width - width); break; case ALIGN_RIGHT : linex = image_right - width; break; case ALIGN_JUSTIFY : linex = image_left; if (flat != NULL && flat->prev->markup != MARKUP_BR && num_chars > 1) char_spacing = (format_width - width) / (num_chars - 1); break; } while (temp != end) { if (temp->link != NULL && PSLevel == 0 && Links && temp->markup == MARKUP_NONE) { temp->red = (uchar)(link_color[0] * 255.0); temp->green = (uchar)(link_color[1] * 255.0); temp->blue = (uchar)(link_color[2] * 255.0); } /* * See if we are doing a run of characters in a line and need to * output this run... */ if (linetype != NULL && (temp->markup != MARKUP_NONE || temp->typeface != linetype->typeface || temp->style != linetype->style || temp->size != linetype->size || temp->superscript != linetype->superscript || temp->subscript != linetype->subscript || temp->red != linetype->red || temp->green != linetype->green || temp->blue != linetype->blue)) { r = new_render(*page, RENDER_TEXT, linex - linewidth, *y, linewidth, linetype->height, line); r->data.text.typeface = linetype->typeface; r->data.text.style = linetype->style; r->data.text.size = (float)_htmlSizes[linetype->size]; r->data.text.spacing = char_spacing; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (linetype->superscript) r->y += height - linetype->height; else if (linetype->subscript) r->y -= height - linetype->height; free(linetype); linetype = NULL; } if ((link = htmlGetVariable(temp, (uchar *)"ID")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } switch (temp->markup) { case MARKUP_A : if ((link = htmlGetVariable(temp, (uchar *)"NAME")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } default : temp_width = temp->width; break; case MARKUP_NONE : if (temp->data == NULL) break; if (((temp->width - right + left) > 0.001 || (temp->height - top + bottom) > 0.001) && OverflowErrors) progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Text on page %d too large - " "truncation or overlapping may occur!", *page + 1); if (linetype == NULL) { linetype = temp; lineptr = line; linewidth = 0.0; rgb[0] = temp->red / 255.0f; rgb[1] = temp->green / 255.0f; rgb[2] = temp->blue / 255.0f; } strlcpy((char *)lineptr, (char *)temp->data, sizeof(line) - (size_t)(lineptr - line)); temp_width = temp->width + char_spacing * strlen((char *)lineptr); if (temp->underline || (temp->link && LinkStyle && PSLevel == 0)) new_render(*page, RENDER_BOX, linex, *y - 1, temp_width, 0, rgb); if (temp->strikethrough) new_render(*page, RENDER_BOX, linex, *y + temp->height * 0.25f, temp_width, 0, rgb); linewidth += temp_width; lineptr += strlen((char *)lineptr); if (lineptr > line && lineptr[-1] == ' ') whitespace = 1; else whitespace = 0; break; case MARKUP_IMG : if (((temp->width - right + left) > 0.001 || (temp->height - top + bottom) > 0.001) && OverflowErrors) { DEBUG_printf(("IMAGE: %.3fx%.3f > %.3fx%.3f\n", temp->width, temp->height, right - left, top - bottom)); progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Image on page %d too large - " "truncation or overlapping may occur!", *page + 1); } if ((border = htmlGetVariable(temp, (uchar *)"BORDER")) != NULL) borderspace = (float)atof((char *)border); else if (temp->link) borderspace = 1; else borderspace = 0; borderspace *= PagePrintWidth / _htmlBrowserWidth; temp_width += 2 * borderspace; switch (temp->valignment) { case ALIGN_TOP : offset = height - temp->height - 2 * borderspace; break; case ALIGN_MIDDLE : offset = 0.5f * (height - temp->height) - borderspace; break; case ALIGN_BOTTOM : offset = 0.0f; } if (borderspace > 0.0f) { // Top new_render(*page, RENDER_BOX, linex, *y + offset + temp->height + borderspace, temp->width + 2 * borderspace, borderspace, rgb); // Left new_render(*page, RENDER_BOX, linex, *y + offset, borderspace, temp->height + 2 * borderspace, rgb); // Right new_render(*page, RENDER_BOX, linex + temp->width + borderspace, *y + offset, borderspace, temp->height + 2 * borderspace, rgb); // Bottom new_render(*page, RENDER_BOX, linex, *y + offset, temp->width + 2 * borderspace, borderspace, rgb); } new_render(*page, RENDER_IMAGE, linex + borderspace, *y + offset + borderspace, temp->width, temp->height, image_find((char *)htmlGetVariable(temp, (uchar *)"REALSRC"))); whitespace = 0; temp_width = temp->width + 2 * borderspace; break; } if (temp->link != NULL && (link = htmlGetVariable(temp->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, linex, *y + offset, temp->width, temp->height, link); } linex += temp_width; prev = temp; temp = temp->next; if (prev != linetype) free(prev); } /* * See if we have a run of characters that hasn't been output... */ if (linetype != NULL) { r = new_render(*page, RENDER_TEXT, linex - linewidth, *y, linewidth, linetype->height, line); r->data.text.typeface = linetype->typeface; r->data.text.style = linetype->style; r->data.text.spacing = char_spacing; r->data.text.size = (float)_htmlSizes[linetype->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (linetype->superscript) r->y += height - linetype->height; else if (linetype->subscript) r->y -= height - linetype->height; free(linetype); } /* * Update the margins after we pass below the images... */ *y -= spacing - height; DEBUG_printf((" AFTER y=%.1f, bottom=%.1f\n", *y, bottom)); if (*y < bottom) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } if (*y < image_y || *page > image_page) { image_y = 0.0f; image_left = left; image_right = right; format_width = image_right - image_left; } } *x = left; if (*y > image_y && image_y > 0.0f && image_page == *page) *y = image_y; DEBUG_printf(("LEAVING parse_paragraph(), x = %.1f, y = %.1f, page = %d, image_y = %.1f\n", *x, *y, *page, image_y)); } #if defined(PARA_DEBUG) && !defined(DEBUG) # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) # define DEBUG_puts(x) #endif /* PARA_DEBUG && !DEBUG */ /* * 'parse_pre()' - Parse preformatted text and produce rendering list output. */ static void parse_pre(tree_t *t, /* I - Tree to parse */ float left, /* I - Left margin */ float right, /* I - Printable width */ float bottom, /* I - Bottom margin */ float top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace) /* I - Need whitespace? */ { tree_t *flat, *start, *next; uchar *link, line[10240], *lineptr, *dataptr; int col; float width, height, rgb[3]; render_t *r; REF(right); DEBUG_printf(("parse_pre(t=%p, left=%.1f, right=%.1f, x=%.1f, y=%.1f, page=%d\n", (void *)t, left, right, *x, *y, *page)); if (t->child == NULL) return; if (*y < top && needspace) *y -= _htmlSpacings[SIZE_P]; flat = flatten_tree(t->child); if (flat == NULL) return; if (flat->markup == MARKUP_NONE && flat->data != NULL) { // Skip leading blank line, if present... for (dataptr = flat->data; isspace(*dataptr); dataptr ++); if (!*dataptr) { next = flat->next; free(flat); flat = next; } } while (flat != NULL) { for (height = 0.0f, start = flat; flat != NULL; flat = flat->next) { if (flat->height > height) height = flat->height; if (flat->markup == MARKUP_BR || (flat->markup == MARKUP_NONE && flat->data && flat->data[strlen((char *)flat->data) - 1] == '\n')) break; } if (flat) flat = flat->next; if (*y < (height + bottom)) { (*page) ++; *y = top; if (Verbosity) progress_show("Formatting page %d", *page); } *x = left; *y -= height; if (Verbosity) progress_update(100 - (int)(100 * (*y) / PagePrintLength)); col = 0; while (start != flat) { rgb[0] = start->red / 255.0f; rgb[1] = start->green / 255.0f; rgb[2] = start->blue / 255.0f; if (start->link && (link = htmlGetVariable(start->link, (uchar *)"_HD_FULL_HREF")) != NULL) { /* * Add a page link... */ new_render(*page, RENDER_LINK, *x, *y, start->width, start->height, link); if (PSLevel == 0 && Links) { memcpy(rgb, link_color, sizeof(rgb)); start->red = (uchar)(link_color[0] * 255.0); start->green = (uchar)(link_color[1] * 255.0); start->blue = (uchar)(link_color[2] * 255.0); if (LinkStyle) new_render(*page, RENDER_BOX, *x, *y - 1, start->width, 0, link_color); } } if ((link = htmlGetVariable(start, (uchar *)"ID")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } switch (start->markup) { case MARKUP_COMMENT : parse_comment(start, &left, &right, &bottom, &top, x, y, page, NULL, 0); break; case MARKUP_A : if ((link = htmlGetVariable(start, (uchar *)"NAME")) != NULL) { /* * Add a target link... */ add_link(link, *page, (int)(*y + height)); } break; case MARKUP_NONE : for (lineptr = line, dataptr = start->data; *dataptr != '\0' && lineptr < (line + sizeof(line) - 1); dataptr ++) if (*dataptr == '\n') break; else if (*dataptr == '\t') { /* This code changed after 15 years to work around new compiler optimization bugs (Issue #349) */ int num_cols = 8 - (col & 7); memcpy(lineptr, " ", num_cols); lineptr += num_cols; col += num_cols; } else if (*dataptr != '\r') { *lineptr++ = *dataptr; col ++; } *lineptr = '\0'; width = get_width(line, start->typeface, start->style, start->size); r = new_render(*page, RENDER_TEXT, *x, *y, width, 0, line); r->data.text.typeface = start->typeface; r->data.text.style = start->style; r->data.text.size = (float)_htmlSizes[start->size]; memcpy(r->data.text.rgb, rgb, sizeof(rgb)); if (start->underline) new_render(*page, RENDER_BOX, *x, *y - 1, start->width, 0, rgb); if (start->strikethrough) new_render(*page, RENDER_BOX, *x, *y + start->height * 0.25f, start->width, 0, rgb); *x += start->width; break; case MARKUP_IMG : new_render(*page, RENDER_IMAGE, *x, *y, start->width, start->height, image_find((char *)htmlGetVariable(start, (uchar *)"REALSRC"))); *x += start->width; col ++; break; default : break; } next = start->next; free(start); start = next; } if ((*x - right) > 0.001 && OverflowErrors) progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Preformatted text on page %d too long - " "truncation or overlapping may occur!", *page + 1); *y -= _htmlSpacings[t->size] - _htmlSizes[t->size]; } *x = left; } //#define TABLE_DEBUG 1 #ifdef TABLE_DEBUG # undef DEBUG_puts # define DEBUG_puts(x) puts(x) # define DEBUG 1 # undef DEBUG_printf # define DEBUG_printf(x) printf x #endif /* TABLE_DEBUG */ typedef struct { int debug; int num_cols, num_rows; float border, border_left, border_rgb[3], border_size, cellpadding, height; int col_spans[MAX_COLUMNS], row_spans[MAX_COLUMNS]; char col_fixed[MAX_COLUMNS], col_percent[MAX_COLUMNS]; float col_lefts[MAX_COLUMNS], col_rights[MAX_COLUMNS], col_widths[MAX_COLUMNS], col_swidths[MAX_COLUMNS], col_mins[MAX_COLUMNS], col_smins[MAX_COLUMNS], col_prefs[MAX_COLUMNS]; int cell_page[MAX_COLUMNS], // Start page for cell cell_endpage[MAX_COLUMNS]; // End page for cell float cell_y[MAX_COLUMNS], // Row for each cell cell_endy[MAX_COLUMNS], // Row for each cell cell_height[MAX_COLUMNS], // Height of each cell in a row span_heights[MAX_COLUMNS]; // Height of spans render_t *cell_bg[MAX_COLUMNS]; // Background rectangles render_t *cell_start[MAX_COLUMNS]; // Start of the content for a cell in the row render_t *cell_end[MAX_COLUMNS]; // End of the content for a cell in a row } hdtable_t; /* * 'render_table_row()' - Render a table row. */ static void render_table_row(hdtable_t &table, tree_t ***cells, int row, uchar *height_var, float left, // I - Left margin float right, // I - Printable width float bottom, // I - Bottom margin float top, // I - Printable top float *x, float *y, int *page) { int col, tcol, colspan, rowspan, tempspace; float width, temp_y; int temp_page; uchar *var; int do_valign; // True if we should do vertical alignment of cells int row_page; float row_y, row_starty, row_height, // Total height of the row temp_height; // Temporary holder uchar *bgcolor; float bgrgb[3]; do_valign = 1; row_height = 0.0f; row_page = *page; row_y = *y - table.cellpadding; row_starty = row_y; DEBUG_printf(("BEFORE row_y = %.1f, *y = %.1f, row_page = %d\n", row_y, *y, row_page)); for (col = 0, rowspan = 9999; col < table.num_cols; col += colspan) { if (table.row_spans[col] == 0) { if ((var = htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN")) != NULL) table.row_spans[col] = atoi((char *)var); if (table.row_spans[col] == 1) table.row_spans[col] = 0; if (table.row_spans[col] > (table.num_rows - row)) table.row_spans[col] = table.num_rows - row; table.span_heights[col] = 0.0f; } if (table.row_spans[col] < rowspan) rowspan = table.row_spans[col]; for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; } if (!rowspan) rowspan = 1; for (col = 0; col < table.num_cols;) { for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; colspan --; DEBUG_printf((" col = %d, colspan = %d, left = %.1f, right = %.1f, cell = %p\n", col, colspan, table.col_lefts[col], table.col_rights[col + colspan], (void *)cells[row][col])); *x = table.col_lefts[col]; temp_y = *y - table.cellpadding; temp_page = *page; tempspace = 0; if (row == 0 || cells[row][col] != cells[row - 1][col]) { check_pages(*page); if (cells[row][col] == NULL) bgcolor = NULL; else if ((bgcolor = htmlGetVariable(cells[row][col], (uchar *)"BGCOLOR")) != NULL) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); width = table.col_rights[col + colspan] - table.col_lefts[col] + 2 * table.cellpadding; table.border_left = table.col_lefts[col] - table.cellpadding; table.cell_bg[col] = new_render(*page, RENDER_BOX, table.border_left, row_y, width + table.border, 0.0, bgrgb); } else { table.cell_bg[col] = NULL; new_render(*page, RENDER_TEXT, -1.0f, -1.0f, 0.0, 0.0, (void *)""); } DEBUG_printf(("cell_bg[%d] = %p, pages[%d].end = %p\n", col, (void *)table.cell_bg[col], *page, (void *)pages[*page].end)); table.cell_start[col] = pages[*page].end; table.cell_page[col] = temp_page; table.cell_y[col] = temp_y; if (table.debug) { check_pages(*page); render_t *r; char table_text[255]; snprintf(table_text, sizeof(table_text), "cell=%p [%d,%d]", (void *)cells[row][col], row, col); r = new_render(temp_page, RENDER_TEXT, *x, temp_y, get_width((uchar *)table_text, TYPE_COURIER, STYLE_NORMAL, 1), _htmlSizes[1], table_text); r->data.text.typeface = TYPE_COURIER; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[1]; } if (cells[row][col] != NULL && cells[row][col]->child != NULL) { DEBUG_printf((" parsing cell %d,%d; width = %.1f\n", row, col, table.col_rights[col + colspan] - table.col_lefts[col])); bottom += table.cellpadding; top -= table.cellpadding; parse_doc(cells[row][col]->child, table.col_lefts + col, table.col_rights + col + colspan, &bottom, &top, x, &temp_y, &temp_page, NULL, &tempspace); bottom -= table.cellpadding; top += table.cellpadding; } table.cell_endpage[col] = temp_page; table.cell_endy[col] = temp_y; table.cell_height[col] = *y - table.cellpadding - temp_y; table.cell_end[col] = pages[*page].end; if (table.cell_start[col] == NULL) table.cell_start[col] = pages[*page].start; DEBUG_printf(("row = %d, col = %d, y = %.1f, cell_y = %.1f, cell_height = %.1f\n", row, col, *y - table.cellpadding, temp_y, table.cell_height[col])); DEBUG_printf(("cell_start[%d] = %p, cell_end[%d] = %p\n", col, (void *)table.cell_start[col], col, (void *)table.cell_end[col])); } if (table.row_spans[col] == 0 && table.cell_page[col] == table.cell_endpage[col] && table.cell_height[col] > row_height) row_height = table.cell_height[col]; if (table.row_spans[col] <= rowspan) { if (table.cell_page[col] != table.cell_endpage[col]) do_valign = 0; if (table.cell_endpage[col] > row_page) { row_page = table.cell_endpage[col]; row_y = table.cell_endy[col]; } else if (table.cell_endy[col] < row_y && table.cell_endpage[col] == row_page) row_y = table.cell_endy[col]; } DEBUG_printf(("**** col = %d, row = %d, row_y = %.1f, row_page = %d\n", col, row, row_y, row_page)); for (col ++; colspan > 0; colspan --, col ++) { table.cell_start[col] = NULL; table.cell_page[col] = table.cell_page[col - 1]; table.cell_y[col] = table.cell_y[col - 1]; table.cell_end[col] = NULL; table.cell_endpage[col] = table.cell_endpage[col - 1]; table.cell_endy[col] = table.cell_endy[col - 1]; table.cell_height[col] = table.cell_height[col - 1]; } } DEBUG_printf(("row = %d, row_y = %.1f, row_height = %.1f\n", row, row_y, row_height)); for (col = 0; col < table.num_cols; col += colspan) { for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; if (table.row_spans[col]) table.span_heights[col] += row_height; DEBUG_printf(("col = %d, cell_y = %.1f, cell_page = %d, cell_endpage = %d, row_spans = %d, span_heights = %.1f, cell_height = %.1f\n", col, table.cell_y[col], table.cell_page[col], table.cell_endpage[col], table.row_spans[col], table.span_heights[col], table.cell_height[col])); } for (col = 0; col < table.num_cols; col += colspan) { for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; if (table.row_spans[col] == rowspan && table.cell_page[col] == table.cell_endpage[col] && table.cell_height[col] > table.span_heights[col]) { temp_height = table.cell_height[col] - table.span_heights[col]; row_height += temp_height; DEBUG_printf(("Adjusting row-span height by %.1f, new row_height = %.1f\n", temp_height, row_height)); for (tcol = 0; tcol < table.num_cols; tcol ++) if (table.row_spans[tcol]) { table.span_heights[tcol] += temp_height; DEBUG_printf(("col = %d, span_heights = %.1f\n", tcol, table.span_heights[tcol])); } } } DEBUG_printf(("AFTER row = %d, row_page = %d, row_y = %.1f, row_height = %.1f, *y = %.1f, do_valign = %d\n", row, row_page, row_y, row_height, *y, do_valign)); /* * Do the vertical alignment */ if (do_valign) { height_var = NULL; if (cells[row][0] != NULL) { if ((height_var = htmlGetVariable(cells[row][0]->parent, (uchar *)"HEIGHT")) == NULL) for (col = 0; col < table.num_cols; col ++) if (htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN") == NULL) if ((height_var = htmlGetVariable(cells[row][col], (uchar *)"HEIGHT")) != NULL) break; } if (height_var != NULL) { // Hardcode the row height... if (height_var[strlen((char *)height_var) - 1] == '%') temp_height = (float)(atof((char *)height_var) * 0.01f * PagePrintLength); else temp_height = (float)(atof((char *)height_var) * PagePrintWidth / _htmlBrowserWidth); if (table.height > 0 && temp_height > table.height) temp_height = table.height; temp_height -= 2 * table.cellpadding; if (temp_height > row_height) { // Only enforce the height if it is > the actual row height. row_height = temp_height; row_y = *y - temp_height; } } for (col = 0; col < table.num_cols; col += colspan + 1) { render_t *p; float delta_y; for (colspan = 1; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; colspan --; if (table.cell_start[col] == NULL || table.row_spans[col] > rowspan || cells[row][col] == NULL || cells[row][col]->child == NULL) continue; if (table.row_spans[col] == 1) { int tcol; float span_height = 0.0f; for (tcol = 0; tcol < table.num_cols; tcol ++) { if (table.row_spans[col] == 1 && table.span_heights[col] > span_height) span_height = table.span_heights[col]; } switch (cells[row][col]->valignment) { case ALIGN_MIDDLE : // delta_y = (table.span_heights[col] - table.cell_height[col]) * 0.5f; delta_y = (span_height - table.cell_height[col]) * 0.5f; break; case ALIGN_BOTTOM : // delta_y = table.span_heights[col] - table.cell_height[col]; delta_y = span_height - table.cell_height[col]; break; default : delta_y = 0.0f; break; } } else if (table.row_spans[col]) { delta_y = 0.0f; } else { switch (cells[row][col]->valignment) { case ALIGN_MIDDLE : delta_y = (row_height - table.cell_height[col]) * 0.5f; break; case ALIGN_BOTTOM : delta_y = row_height - table.cell_height[col]; break; default : delta_y = 0.0f; break; } } DEBUG_printf(("row = %d, col = %d, valign = %d, rowspans = %d, cell_height = %.1f, span_heights = %.1f, delta_y = %.1f\n", row, col, cells[row][col]->valignment, table.row_spans[col], table.cell_height[col], table.span_heights[col], delta_y)); if (delta_y > 0.0f) { if (table.cell_start[col] == table.cell_end[col]) p = table.cell_start[col]; else p = table.cell_start[col]->next; for (; p != NULL; p = p->next) { DEBUG_printf(("aligning %p (%s), y was %.1f, now %.1f\n", (void *)p, p->data.text.buffer, p->y, p->y - delta_y)); p->y -= delta_y; if (p == table.cell_end[col]) break; } } #ifdef DEBUG else { if (table.cell_start[col] == table.cell_end[col]) p = table.cell_start[col]; else p = table.cell_start[col]->next; for (; p != NULL; p = p->next) { printf("NOT aligning %p (%s)\n", (void *)p, p->data.text.buffer); if (p == table.cell_end[col]) break; } } #endif /* DEBUG */ } } // Update all current columns with ROWSPAN <= rowspan to use the same // end page and row... for (col = 0, temp_page = -1, temp_y = 99999999; col < table.num_cols; col ++) if (table.row_spans[col] <= rowspan && cells[row][col] != NULL && cells[row][col]->child != NULL) { if (table.cell_endpage[col] > temp_page) { temp_page = table.cell_endpage[col]; temp_y = table.cell_endy[col]; } else if (table.cell_endpage[col] == temp_page && table.cell_endy[col] < temp_y) temp_y = table.cell_endy[col]; } for (col = 0; col < table.num_cols; col ++) if (table.row_spans[col] <= rowspan && cells[row][col] != NULL && cells[row][col]->child != NULL) { table.cell_endpage[col] = temp_page; table.cell_endy[col] = temp_y; } row_y -= table.cellpadding; table.border_left = table.col_lefts[0] - table.cellpadding; width = table.col_rights[table.num_cols - 1] - table.col_lefts[0] + 2 * table.cellpadding; for (bgcolor = NULL, col = 0; col < table.num_cols; col ++) if (table.row_spans[col] <= rowspan && cells[row][col] && !htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN") && (bgcolor = htmlGetVariable(cells[row][col]->parent, (uchar *)"BGCOLOR")) != NULL) break; if (bgcolor) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); if (row_page > *page) { // Draw background on multiple pages... // Bottom of first page... new_render(*page, RENDER_BOX, table.border_left, bottom, width, row_starty - bottom + table.cellpadding, bgrgb, pages[*page].start); // Intervening pages... for (temp_page = *page + 1; temp_page < row_page; temp_page ++) { new_render(temp_page, RENDER_BOX, table.border_left, bottom, width, top - bottom, bgrgb, pages[temp_page].start); } // Top of last page... check_pages(*page); new_render(row_page, RENDER_BOX, table.border_left, row_y, width, top - row_y, bgrgb, pages[row_page].start); } else { // Draw background in row... new_render(row_page, RENDER_BOX, table.border_left, row_y, width, row_height + 2 * table.cellpadding, bgrgb, pages[row_page].start); } } for (col = 0; col < table.num_cols; col += colspan + 1) { for (colspan = 0; (col + colspan) < table.num_cols; colspan ++) if (cells[row][col] != cells[row][col + colspan]) break; else if (table.row_spans[col + colspan] > 0) { DEBUG_printf(("row = %d, col = %d, decrementing row_spans (%d) to %d...\n", row, col, table.row_spans[col + colspan], table.row_spans[col + colspan] - rowspan)); table.row_spans[col + colspan] -= rowspan; } colspan --; width = table.col_rights[col + colspan] - table.col_lefts[col] + 2 * table.cellpadding; if (cells[row][col] == NULL || cells[row][col]->child == NULL || table.row_spans[col] > 0) continue; DEBUG_printf(("DRAWING BORDER+BACKGROUND: col=%d, row=%d, cell_page=%d, cell_y=%.1f\n" " cell_endpage=%d, cell_endy=%.1f\n", col, row, table.cell_page[col], table.cell_y[col], table.cell_endpage[col], table.cell_endy[col])); if ((bgcolor = htmlGetVariable(cells[row][col], (uchar *)"BGCOLOR")) != NULL) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); } table.border_left = table.col_lefts[col] - table.cellpadding; if (table.cell_page[col] != table.cell_endpage[col]) { /* * Crossing a page boundary... */ if (table.border > 0) { /* * +---+---+---+ * | | | | */ // Top new_render(table.cell_page[col], RENDER_BOX, table.border_left, table.cell_y[col] + table.cellpadding, width + table.border, table.border, table.border_rgb); // Left new_render(table.cell_page[col], RENDER_BOX, table.border_left, bottom, table.border, table.cell_y[col] - bottom + table.cellpadding + table.border, table.border_rgb); // Right new_render(table.cell_page[col], RENDER_BOX, table.border_left + width, bottom, table.border, table.cell_y[col] - bottom + table.cellpadding + table.border, table.border_rgb); } if (bgcolor != NULL) { table.cell_bg[col]->y = bottom; table.cell_bg[col]->height = table.cell_y[col] - bottom + table.cellpadding + table.border; } for (temp_page = table.cell_page[col] + 1; temp_page < table.cell_endpage[col]; temp_page ++) { /* * | | | | * | | | | */ if (table.border > 0.0f) { // Left new_render(temp_page, RENDER_BOX, table.border_left, bottom, table.border, top - bottom, table.border_rgb); // Right new_render(temp_page, RENDER_BOX, table.border_left + width, bottom, table.border, top - bottom, table.border_rgb); } if (bgcolor != NULL) new_render(temp_page, RENDER_BOX, table.border_left, bottom, width + table.border, top - bottom, bgrgb, pages[temp_page].start); } if (table.border > 0.0f) { /* * | | | | * +---+---+---+ */ // Left new_render(table.cell_endpage[col], RENDER_BOX, table.border_left, row_y, table.border, top - row_y, table.border_rgb); // Right new_render(table.cell_endpage[col], RENDER_BOX, table.border_left + width, row_y, table.border, top - row_y, table.border_rgb); // Bottom new_render(table.cell_endpage[col], RENDER_BOX, table.border_left, row_y, width + table.border, table.border, table.border_rgb); } if (bgcolor != NULL) { check_pages(table.cell_endpage[col]); new_render(table.cell_endpage[col], RENDER_BOX, table.border_left, row_y, width + table.border, top - row_y, bgrgb, pages[table.cell_endpage[col]].start); } } else { /* * +---+---+---+ * | | | | * +---+---+---+ */ if (table.border > 0.0f) { // Top new_render(table.cell_page[col], RENDER_BOX, table.border_left, table.cell_y[col] + table.cellpadding, width + table.border, table.border, table.border_rgb); // Left new_render(table.cell_page[col], RENDER_BOX, table.border_left, row_y, table.border, table.cell_y[col] - row_y + table.cellpadding + table.border, table.border_rgb); // Right new_render(table.cell_page[col], RENDER_BOX, table.border_left + width, row_y, table.border, table.cell_y[col] - row_y + table.cellpadding + table.border, table.border_rgb); // Bottom new_render(table.cell_page[col], RENDER_BOX, table.border_left, row_y, width + table.border, table.border, table.border_rgb); } if (bgcolor != NULL) { table.cell_bg[col]->y = row_y; table.cell_bg[col]->height = table.cell_y[col] - row_y + table.cellpadding + table.border; } } } *page = row_page; *y = row_y; } /* * 'parse_table()' - Parse a table and produce rendering output. */ static void parse_table(tree_t *t, // I - Tree to parse float left, // I - Left margin float right, // I - Printable width float bottom, // I - Bottom margin float top, // I - Printable top float *x, // IO - X position float *y, // IO - Y position int *page, // IO - Page # int needspace) // I - Need whitespace? { int col, row, header_row = -1, tcol, colspan, rowspan, alloc_rows, regular_cols; hdtable_t table; float col_width, col_min, col_pref, col_height, cellspacing, width, pref_width, span_width, regular_width, actual_width, table_width, min_width, temp_width, header_height = 0.0, table_y, temp_bottom, temp_top; int temp_page, table_page; uchar *var, *height_var, // Row HEIGHT variable *header_height_var = NULL; tree_t *temprow, *tempcol, *tempnext, ***cells, *caption; // Caption for bottom, if any float temp_height; // Temporary holder uchar *bgcolor; float bgrgb[3]; const char *htmldoc_debug; // HTMLDOC_DEBUG env var DEBUG_puts("\n\nTABLE"); DEBUG_printf(("parse_table(t=%p, left=%.1f, right=%.1f, x=%.1f, y=%.1f, page=%d\n", (void *)t, left, right, *x, *y, *page)); if (t->child == NULL) return; /* Empty table... */ memset(&table, 0, sizeof(table)); /* * Check debug mode... */ if ((htmldoc_debug = getenv("HTMLDOC_DEBUG")) != NULL && (strstr(htmldoc_debug, "table") || strstr(htmldoc_debug, "all"))) table.debug = 1; else table.debug = 0; /* * Figure out the # of rows, columns, and the desired widths... */ cells = NULL; if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL) { if (var[strlen((char *)var) - 1] == '%') table_width = (float)(atof((char *)var) * (right - left) / 100.0f); else table_width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); if (table_width < 0.0f || table_width > PagePrintWidth) table_width = right - left; } else table_width = right - left; if ((var = htmlGetVariable(t, (uchar *)"HEIGHT")) != NULL) { if (var[strlen((char *)var) - 1] == '%') table.height = (float)(atof((char *)var) * (top - bottom) / 100.0f); else table.height = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else table.height = -1.0f; DEBUG_printf(("table_width = %.1f\n", table_width)); if ((var = htmlGetVariable(t, (uchar *)"CELLPADDING")) != NULL) { if ((table.cellpadding = atoi((char *)var)) < 0.0f) table.cellpadding = 0.0f; else if (table.cellpadding > 20.0f) table.cellpadding = 20.0f; } else table.cellpadding = 1.0f; if ((var = htmlGetVariable(t, (uchar *)"CELLSPACING")) != NULL) { if ((cellspacing = atoi((char *)var)) < 0.0f) cellspacing = 0.0f; else if (cellspacing > 20.0f) cellspacing = 20.0f; } else cellspacing = 0.0f; if ((var = htmlGetVariable(t, (uchar *)"BORDER")) != NULL) { if ((table.border = (float)atof((char *)var)) <= 0.0 && var[0] != '0') table.border = 1.0f; else if (table.border > 20.0f) table.border = 20.0f; table.cellpadding += table.border; } else table.border = 0.0f; if (table.debug && table.border == 0.0f) table.border = 0.01f; table.border_rgb[0] = t->red / 255.0f; table.border_rgb[1] = t->green / 255.0f; table.border_rgb[2] = t->blue / 255.0f; if ((var = htmlGetVariable(t, (uchar *)"BORDERCOLOR")) != NULL) get_color(var, table.border_rgb, 0); if (table.border == 0.0f && table.cellpadding > 0.0f) { /* * Ah, the strange table formatting nightmare that is HTML. * Netscape and MSIE assign an invisible border width of 1 * pixel if no border is specified... */ table.cellpadding += 1.0f; } table.border_size = table.border - 1.0f; cellspacing *= PagePrintWidth / _htmlBrowserWidth; table.cellpadding *= PagePrintWidth / _htmlBrowserWidth; table.border *= PagePrintWidth / _htmlBrowserWidth; table.border_size *= PagePrintWidth / _htmlBrowserWidth; DEBUG_printf(("border = %.1f, cellpadding = %.1f\n", table.border, table.cellpadding)); temp_bottom = bottom - table.cellpadding; temp_top = top + table.cellpadding; for (temprow = t->child, table.num_cols = 0, table.num_rows = 0, alloc_rows = 0, caption = NULL; temprow != NULL; temprow = tempnext) { tempnext = temprow->next; if (temprow->markup == MARKUP_CAPTION) { if ((var = htmlGetVariable(temprow, (uchar *)"ALIGN")) == NULL || strcasecmp((char *)var, "bottom")) { /* * Show caption at top... */ parse_paragraph(temprow, left, right, bottom, top, x, y, page, needspace); needspace = 1; } else { /* * Flag caption for bottom of table... */ caption = temprow; } } else if (temprow->markup == MARKUP_TR || ((temprow->markup == MARKUP_TBODY || temprow->markup == MARKUP_THEAD || temprow->markup == MARKUP_TFOOT) && temprow->child != NULL)) { if (temprow->markup == MARKUP_THEAD) header_row = table.num_rows; // Descend into table body as needed... if (temprow->markup == MARKUP_TBODY || temprow->markup == MARKUP_THEAD || temprow->markup == MARKUP_TFOOT) temprow = temprow->child; // Figure out the next row... if ((tempnext = temprow->next) == NULL) if (temprow->parent->markup == MARKUP_TBODY || temprow->parent->markup == MARKUP_THEAD || temprow->parent->markup == MARKUP_TFOOT) tempnext = temprow->parent->next; // Allocate memory for the table as needed... if (table.num_rows >= alloc_rows) { alloc_rows += ALLOC_ROWS; if (alloc_rows == ALLOC_ROWS) cells = (tree_t ***)malloc(sizeof(tree_t **) * (size_t)alloc_rows); else cells = (tree_t ***)realloc(cells, sizeof(tree_t **) * (size_t)alloc_rows); if (cells == (tree_t ***)0) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for table!"); return; } } if ((cells[table.num_rows] = (tree_t **)calloc(sizeof(tree_t *), MAX_COLUMNS)) == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for table!"); free(cells); return; } #ifdef DEBUG printf("BEFORE row %d: num_cols = %d\n", table.num_rows, table.num_cols); if (table.num_rows) for (col = 0; col < table.num_cols; col ++) printf(" col %d: row_spans[] = %d\n", col, table.row_spans[col]); #endif // DEBUG // Figure out the starting column... if (table.num_rows) { for (col = 0, rowspan = 9999; col < table.num_cols; col ++) if (table.row_spans[col] < rowspan) rowspan = table.row_spans[col]; for (col = 0; col < table.num_cols; col ++) table.row_spans[col] -= rowspan; for (col = 0; table.row_spans[col] && col < table.num_cols; col ++) cells[table.num_rows][col] = cells[table.num_rows - 1][col]; } else col = 0; for (tempcol = temprow->child; tempcol != NULL && col < MAX_COLUMNS; tempcol = tempcol->next) { if (tempcol->markup == MARKUP_TH && table.num_rows == 0) header_row = table.num_rows; if (tempcol->markup == MARKUP_TD || tempcol->markup == MARKUP_TH) { // Handle colspan and rowspan stuff... if ((var = htmlGetVariable(tempcol, (uchar *)"COLSPAN")) != NULL) colspan = atoi((char *)var); else colspan = 1; if ((var = htmlGetVariable(tempcol, (uchar *)"ROWSPAN")) != NULL) { table.row_spans[col] = atoi((char *)var); if (table.row_spans[col] == 1) table.row_spans[col] = 0; for (tcol = 1; tcol < colspan; tcol ++) table.row_spans[col + tcol] = table.row_spans[col]; } // Compute the cell size... col_width = get_cell_size(tempcol, 0.0f, table_width, &col_min, &col_pref, &col_height); if ((var = htmlGetVariable(tempcol, (uchar *)"WIDTH")) != NULL) { if (var[strlen((char *)var) - 1] == '%') { col_width -= 2.0 * table.cellpadding - cellspacing; if (colspan <= 1) table.col_percent[col] = 1; } else { col_width -= 2.0 * table.cellpadding; } } else col_width = 0.0f; tempcol->height = col_height; DEBUG_printf(("%d,%d: colsp=%d, rowsp=%d, width=%.1f, minw=%.1f, prefw=%.1f, minh=%.1f\n", col, table.num_rows, colspan, table.row_spans[col], col_width, col_min, col_pref, col_height)); // Add widths to columns... if (colspan > 1) { if (colspan > table.col_spans[col]) table.col_spans[col] = colspan; if (col_width > table.col_swidths[col]) table.col_swidths[col] = col_width; if (col_min > table.col_smins[col]) table.col_smins[col] = col_min; temp_width = col_width / colspan; for (int i = 0; i < colspan; i ++) { if (temp_width > table.col_widths[col + i]) table.col_widths[col + i] = temp_width; } } else { if (col_width > 0.0f) table.col_fixed[col] = 1; if (col_width > table.col_widths[col]) table.col_widths[col] = col_width; if (col_pref > table.col_prefs[col]) table.col_prefs[col] = col_pref; if (col_min > table.col_mins[col]) table.col_mins[col] = col_min; } while (colspan > 0 && col < MAX_COLUMNS) { cells[table.num_rows][col] = tempcol; col ++; colspan --; } while (table.row_spans[col] && col < table.num_cols) { cells[table.num_rows][col] = cells[table.num_rows - 1][col]; col ++; } } } DEBUG_printf(("header_row=%d\n", header_row)); if (col > table.num_cols) table.num_cols = col; #ifdef DEBUG printf("AFTER row %d: num_cols = %d\n", table.num_rows, table.num_cols); for (col = 0; col < table.num_cols; col ++) printf(" col %d: row_spans[] = %d\n", col, table.row_spans[col]); #endif // DEBUG table.num_rows ++; for (col = 0; col < table.num_cols; col ++) if (table.row_spans[col]) table.row_spans[col] --; } } /* * OK, some people apparently create HTML tables with no columns or * rows... If this happened, return immediately... */ if (table.num_cols == 0) return; /* * Now figure out the width of the table... */ if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL) { if (var[strlen((char *)var) - 1] == '%') width = (float)(atof((char *)var) * (right - left) / 100.0f); else width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else { for (col = 0, width = 0.0; col < table.num_cols; col ++) width += table.col_prefs[col]; width += (2 * table.cellpadding + cellspacing) * table.num_cols - cellspacing; if (width > (right - left)) width = right - left; } /* * Compute the width of each column based on the printable width. */ DEBUG_printf(("\nTABLE: %dx%d\n\n", table.num_cols, table.num_rows)); actual_width = (2 * table.cellpadding + cellspacing) * table.num_cols - cellspacing; regular_width = (width - actual_width) / table.num_cols; DEBUG_printf((" width = %.1f, actual_width = %.1f, regular_width = %.1f\n\n", width, actual_width, regular_width)); DEBUG_puts(" Col Width Min Pref Fixed? Percent?"); DEBUG_puts(" --- ------ ------ ------ ------ --------"); #ifdef DEBUG for (col = 0; col < table.num_cols; col ++) printf(" %-3d %-6.1f %-6.1f %-6.1f %-6s %s\n", col, table.col_widths[col], table.col_mins[col], table.col_prefs[col], table.col_fixed[col] ? "YES" : "NO", table.col_percent[col] ? "YES" : "NO"); puts(""); #endif /* DEBUG */ /* * The first pass just handles columns with a specified width... */ DEBUG_puts("PASS 1: fixed width handling\n"); for (col = 0, regular_cols = 0; col < table.num_cols; col ++) if (table.col_widths[col] > 0.0f) { if (table.col_mins[col] > table.col_widths[col]) { DEBUG_printf((" updating column %d to width=%.1f\n", col, table.col_mins[col])); table.col_widths[col] = table.col_mins[col]; } actual_width += table.col_widths[col]; } else { regular_cols ++; actual_width += table.col_mins[col]; } DEBUG_printf((" actual_width = %.1f, regular_cols = %d\n\n", actual_width,regular_cols)); /* * Pass two uses the "preferred" width whenever possible, and the * minimum otherwise... */ DEBUG_puts("PASS 2: preferred width handling\n"); for (col = 0, pref_width = 0.0f; col < table.num_cols; col ++) if (table.col_widths[col] == 0.0f) pref_width += table.col_prefs[col] - table.col_mins[col]; DEBUG_printf((" pref_width = %.1f\n", pref_width)); if (pref_width > 0.0f) { if ((regular_width = (width - actual_width) / pref_width) < 0.0f) regular_width = 0.0f; else if (regular_width > 1.0f) regular_width = 1.0f; DEBUG_printf((" regular_width = %.1f\n", regular_width)); for (col = 0; col < table.num_cols; col ++) if (table.col_widths[col] == 0.0f) { pref_width = (table.col_prefs[col] - table.col_mins[col]) * regular_width; if ((actual_width + pref_width) > width) { if (col == (table.num_cols - 1) && (width - actual_width) >= table.col_mins[col]) table.col_widths[col] = width - actual_width; else table.col_widths[col] = table.col_mins[col]; } else table.col_widths[col] = pref_width + table.col_mins[col]; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); actual_width += table.col_widths[col] - table.col_mins[col]; } } else { /* * Assign min widths for all cells... */ for (col = 0; col < table.num_cols; col ++) if (table.col_widths[col] == 0.0f) table.col_widths[col] = table.col_mins[col]; } DEBUG_printf((" actual_width = %.1f\n\n", actual_width)); /* * Pass three enforces any hard or minimum widths for COLSPAN'd * columns... */ DEBUG_puts("PASS 3: colspan handling\n\n"); for (col = 0; col < table.num_cols; col ++) { DEBUG_printf((" col %d, colspan %d\n", col, table.col_spans[col])); if (table.col_spans[col] > 1) { for (colspan = 0, span_width = 0.0f; colspan < table.col_spans[col]; colspan ++) span_width += table.col_widths[col + colspan]; pref_width = 0.0f; if (span_width < table.col_swidths[col]) pref_width = table.col_swidths[col]; if (span_width < table.col_smins[col] && pref_width < table.col_smins[col]) pref_width = table.col_smins[col]; for (colspan = 0; colspan < table.col_spans[col]; colspan ++) if (table.col_fixed[col + colspan]) { span_width -= table.col_widths[col + colspan]; pref_width -= table.col_widths[col + colspan]; } DEBUG_printf((" col_swidths=%.1f, col_smins=%.1f, span_width=%.1f, pref_width=%.1f\n", table.col_swidths[col], table.col_smins[col], span_width, pref_width)); if (pref_width > 0.0f && pref_width > span_width) { if (span_width >= 1.0f) { // Expand cells proportionately... regular_width = pref_width / span_width; for (colspan = 0; colspan < table.col_spans[col]; colspan ++) if (!table.col_fixed[col + colspan]) { actual_width -= table.col_widths[col + colspan]; table.col_widths[col + colspan] *= regular_width; actual_width += table.col_widths[col + colspan]; DEBUG_printf((" col_widths[%d] = %.1f\n", col + colspan, table.col_widths[col + colspan])); } } else { // Divide the space up equally between columns, since the // colspan area is always by itself... (this hack brought // to you by Yahoo! and their single cell tables with // colspan=2 :) regular_width = pref_width / table.col_spans[col]; for (colspan = 0; colspan < table.col_spans[col]; colspan ++) { actual_width += regular_width; table.col_widths[col + colspan] += regular_width; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); } } } } } DEBUG_printf((" actual_width = %.1f\n\n", actual_width)); /* * Pass four divides up the remaining space amongst the columns... */ DEBUG_puts("PASS 4: divide remaining space, if any...\n"); if (width > actual_width) { for (col = 0, colspan = 0; col < table.num_cols; col ++) if (!table.col_fixed[col] || table.col_percent[col]) colspan ++; if (colspan > 0) { regular_width = (width - actual_width) / table.num_cols; for (col = 0; col < table.num_cols; col ++) if (!table.col_fixed[col]) { table.col_widths[col] += regular_width; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); } } } else width = actual_width; DEBUG_puts(""); /* * The final pass is only run if the width > table_width... */ DEBUG_puts("PASS 5: Squeeze table as needed..."); if (width > table_width) { /* * Squeeze the table to fit the requested width or the printable width * as determined at the beginning... */ for (col = 0, min_width = -cellspacing; col < table.num_cols; col ++) min_width += table.col_mins[col] + 2 * table.cellpadding + cellspacing; DEBUG_printf((" table_width = %.1f, width = %.1f, min_width = %.1f\n", table_width, width, min_width)); temp_width = table_width - min_width; if (temp_width < 0.0f) temp_width = 0.0f; width -= min_width; if (width < 1.0f) width = 1.0f; for (col = 0; col < table.num_cols; col ++) { table.col_widths[col] = table.col_mins[col] + temp_width * (table.col_widths[col] - table.col_mins[col]) / width; DEBUG_printf((" col_widths[%d] = %.1f\n", col, table.col_widths[col])); } for (col = 0, width = -cellspacing; col < table.num_cols; col ++) width += table.col_widths[col] + 2 * table.cellpadding + cellspacing; DEBUG_printf((" new width = %.1f, max width = %.1f\n", width, right - left)); } if ((width - right + left) > 0.001f && OverflowErrors) progress_error(HD_ERROR_CONTENT_TOO_LARGE, "Table on page %d too wide - truncation or overlapping may occur!", *page + 1); DEBUG_puts(""); DEBUG_printf(("Final table width = %.1f, alignment = %d\n", width, t->halignment)); switch (t->halignment) { case ALIGN_LEFT : *x = left + table.cellpadding; break; case ALIGN_CENTER : *x = left + 0.5f * (right - left - width) + table.cellpadding; break; case ALIGN_RIGHT : *x = right - width + table.cellpadding; break; } for (col = 0; col < table.num_cols; col ++) { table.col_lefts[col] = *x; table.col_rights[col] = *x + table.col_widths[col]; *x = table.col_rights[col] + 2 * table.cellpadding + cellspacing; DEBUG_printf(("left[%d] = %.1f, right[%d] = %.1f\n", col, table.col_lefts[col], col, table.col_rights[col])); } /* * Now render the whole table... */ if (*y < top && needspace) *y -= _htmlSpacings[SIZE_P]; if (table.debug) { check_pages(*page); render_t *r; char table_text[255]; snprintf(table_text, sizeof(table_text), "t=%p", (void *)t); r = new_render(*page, RENDER_TEXT, left, *y, get_width((uchar *)table_text, TYPE_COURIER, STYLE_NORMAL, 3), _htmlSizes[3], table_text); r->data.text.typeface = TYPE_COURIER; r->data.text.style = STYLE_NORMAL; r->data.text.size = (float)_htmlSizes[3]; } table_page = *page; table_y = *y; for (row = 0; row < table.num_rows; row ++) { height_var = NULL; if (cells[row][0] != NULL) { /* * Do page comments... */ if (cells[row][0]->parent->prev != NULL && cells[row][0]->parent->prev->markup == MARKUP_COMMENT) parse_comment(cells[row][0]->parent->prev, &left, &right, &temp_bottom, &temp_top, x, y, page, NULL, 0); /* * Get height... */ if ((height_var = htmlGetVariable(cells[row][0]->parent, (uchar *)"HEIGHT")) == NULL) for (col = 0; col < table.num_cols; col ++) if (htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN") == NULL) if ((height_var = htmlGetVariable(cells[row][col], (uchar *)"HEIGHT")) != NULL) break; } if (height_var != NULL && row == header_row) header_height_var = height_var; if (cells[row][0] != NULL && height_var != NULL) { // Row height specified; make sure it'll fit... if (height_var[strlen((char *)height_var) - 1] == '%') temp_height = (float)(atof((char *)height_var) * 0.01f * (PagePrintLength - 2 * table.cellpadding)); else temp_height = (float)(atof((char *)height_var) * PagePrintWidth / _htmlBrowserWidth); if (table.height > 0.0f && temp_height > table.height) temp_height = table.height; temp_height -= 2 * table.cellpadding; } else { // Use min height computed from get_cell_size()... for (col = 0, temp_height = (float)_htmlSpacings[SIZE_P]; col < table.num_cols; col ++) if (cells[row][col] != NULL && cells[row][col]->height > temp_height && !htmlGetVariable(cells[row][col], (uchar *)"ROWSPAN")) temp_height = cells[row][col]->height; if (table.height > 0.0) { // Table height specified; make sure it'll fit... if (temp_height > table.height) temp_height = table.height; temp_height -= 2 * table.cellpadding; } else if (temp_height > (PageLength / 8.0) && height_var == NULL) temp_height = PageLength / 8.0; } DEBUG_printf(("BEFORE row = %d, temp_height = %.1f, *y = %.1f, *page = %d\n", row, temp_height, *y, *page)); if (*y < (bottom + 2 * table.cellpadding + temp_height) && temp_height <= (top - bottom - 2 * table.cellpadding)) { DEBUG_puts("NEW PAGE"); *y = top - header_height; (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); if (row > 0 && header_row >= 0) { // Render header row... render_table_row(table, cells, header_row, header_height_var, left, right, bottom, top, x, y, page); } } float start_y = *y; temp_page = *page; render_table_row(table, cells, row, height_var, left, right, bottom, top, x, y, page); if (header_row >= 0 && row == header_row) { header_height = *y - start_y; top += header_height; } else if (temp_page != *page && header_row >= 0) { // Render header row on new page(s)... do { float temp_y = top - header_height; temp_page ++; render_table_row(table, cells, header_row, header_height_var, left, right, bottom, top, x, &temp_y, &temp_page); } while (temp_page < *page); } if (row < (table.num_rows - 1)) (*y) -= cellspacing; DEBUG_printf(("END row = %d, *y = %.1f, *page = %d\n", row, *y, *page)); } top -= header_height; /* * Handle table background color... */ if ((bgcolor = htmlGetVariable(t, (uchar *)"BGCOLOR")) != NULL) { memcpy(bgrgb, background_color, sizeof(bgrgb)); get_color(bgcolor, bgrgb, 0); table.border_left = table.col_lefts[0] - table.cellpadding; width = table.col_rights[table.num_cols - 1] - table.col_lefts[0] + 2 * table.cellpadding; if (table_page != *page) { // Draw background on multiple pages... // Bottom of first page... new_render(table_page, RENDER_BOX, table.border_left, bottom, width, table_y - bottom, bgrgb, pages[table_page].start); // Intervening pages... for (temp_page = table_page + 1; temp_page < *page; temp_page ++) { new_render(temp_page, RENDER_BOX, table.border_left, bottom, width, top - bottom, bgrgb, pages[temp_page].start); } // Top of last page... check_pages(*page); new_render(*page, RENDER_BOX, table.border_left, *y, width, top - *y, bgrgb, pages[*page].start); } else { // Draw background in row... new_render(table_page, RENDER_BOX, table.border_left, *y, width, table_y - *y, bgrgb, pages[table_page].start); } } *x = left; if (caption) { /* * Show caption at bottom... */ parse_paragraph(caption, left, right, bottom, top, x, y, page, needspace); needspace = 1; } /* * Free memory for the table... */ if (table.num_rows > 0) { for (row = 0; row < table.num_rows; row ++) free(cells[row]); free(cells); } } #ifdef TABLE_DEBUG # undef DEBUG # undef DEBUG_puts # define DEBUG_puts(x) # undef DEBUG_printf # define DEBUG_printf(x) #endif /* TABLE_DEBUG */ /* * 'parse_list()' - Parse a list entry and produce rendering output. */ static void parse_list(tree_t *t, /* I - Tree to parse */ float *left, /* I - Left margin */ float *right, /* I - Printable width */ float *bottom, /* I - Bottom margin */ float *top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ int needspace) /* I - Need whitespace? */ { uchar number[255]; /* List number (for numbered types) */ uchar *value; /* VALUE= variable */ int typeface; /* Typeface of list number */ float width; /* Width of list number */ render_t *r; /* Render primitive */ int oldpage; /* Old page value */ float oldy; /* Old Y value */ float tempx; /* Temporary X value */ DEBUG_printf(("parse_list(t=%p, left=%.1f, right=%.1f, x=%.1f, y=%.1f, page=%d\n", (void *)t, *left, *right, *x, *y, *page)); if (needspace && *y < *top) { *y -= _htmlSpacings[t->size]; needspace = 0; } check_pages(*page); oldy = *y; oldpage = *page; r = pages[*page].end; tempx = *x; if (t->indent == 0) { // Adjust left margin when no UL/OL/DL is being used... *left += _htmlSizes[t->size]; tempx += _htmlSizes[t->size]; } parse_doc(t->child, left, right, bottom, top, &tempx, y, page, NULL, &needspace); // Handle when paragraph wrapped to new page... if (*page != oldpage) { // First see if anything was added to the old page... if ((r != NULL && r->next == NULL) || pages[oldpage].end == NULL) { // No, put the symbol on the next page... oldpage = *page; oldy = *top; } } if ((value = htmlGetVariable(t, (uchar *)"VALUE")) != NULL) { if (isdigit(value[0])) list_values[t->indent] = atoi((char *)value); else if (isupper(value[0])) list_values[t->indent] = value[0] - 'A' + 1; else list_values[t->indent] = value[0] - 'a' + 1; } switch (list_types[t->indent]) { case 'a' : case 'A' : case '1' : case 'i' : case 'I' : strlcpy((char *)number, format_number(list_values[t->indent], (char)list_types[t->indent]), sizeof(number)); strlcat((char *)number, ". ", sizeof(number)); typeface = t->typeface; break; default : snprintf((char *)number, sizeof(number), "%c ", list_types[t->indent]); typeface = TYPE_SYMBOL; break; } width = get_width(number, typeface, t->style, t->size); r = new_render(oldpage, RENDER_TEXT, *left - width, oldy - _htmlSizes[t->size], width, _htmlSpacings[t->size], number); r->data.text.typeface = typeface; r->data.text.style = t->style; r->data.text.size = (float)_htmlSizes[t->size]; r->data.text.rgb[0] = t->red / 255.0f; r->data.text.rgb[1] = t->green / 255.0f; r->data.text.rgb[2] = t->blue / 255.0f; list_values[t->indent] ++; if (t->indent == 0) { // Adjust left margin when no UL/OL/DL is being used... *left -= _htmlSizes[t->size]; } } /* * 'init_list()' - Initialize the list type and value as necessary. */ static void init_list(tree_t *t) /* I - List entry */ { uchar *type, /* TYPE= variable */ *value; /* VALUE= variable */ static uchar *symbols = (uchar *)"\327\267\250\340"; if ((type = htmlGetVariable(t, (uchar *)"TYPE")) != NULL) { if (strlen((char *)type) == 1) list_types[t->indent] = type[0]; else if (strcasecmp((char *)type, "disc") == 0 || strcasecmp((char *)type, "circle") == 0) list_types[t->indent] = symbols[1]; else list_types[t->indent] = symbols[2]; } else if (t->markup == MARKUP_UL) list_types[t->indent] = symbols[t->indent & 3]; else if (t->markup == MARKUP_OL) list_types[t->indent] = '1'; if ((value = htmlGetVariable(t, (uchar *)"VALUE")) == NULL) value = htmlGetVariable(t, (uchar *)"START"); if (value != NULL) { if (isdigit(value[0])) list_values[t->indent] = atoi((char *)value); else if (isupper(value[0])) list_values[t->indent] = value[0] - 'A' + 1; else list_values[t->indent] = value[0] - 'a' + 1; } else if (t->markup == MARKUP_OL) list_values[t->indent] = 1; } /* * 'parse_comment()' - Parse a comment for HTMLDOC comments. */ #ifdef COMMENT_DEBUG # undef DEBUG_puts # define DEBUG_puts(x) puts(x) # define DEBUG # undef DEBUG_printf # define DEBUG_printf(x) printf x #endif /* COMMENT_DEBUG */ static void parse_comment(tree_t *t, /* I - Tree to parse */ float *left, /* I - Left margin */ float *right, /* I - Printable width */ float *bottom, /* I - Bottom margin */ float *top, /* I - Printable top */ float *x, /* IO - X position */ float *y, /* IO - Y position */ int *page, /* IO - Page # */ tree_t *para, /* I - Current paragraph */ int needspace) /* I - Need whitespace? */ { int i; /* Looping var */ const char *comment; /* Comment text */ char *ptr, /* Pointer into value string */ buffer[1024]; /* Buffer for strings */ int pos, /* Position (left, center, right) */ tof; /* Top of form */ DEBUG_printf(("parse_comment(t=%p, left=%.1f, right=%.1f, bottom=%.1f, " "top=%.1f, x=%.1f, y=%.1f, page=%d, para=%p, needspace=%d\n", (void *)t, *left, *right, *bottom, *top, *x, *y, *page, (void *)para, needspace)); if (t->data == NULL) return; if (para != NULL && para->child != NULL && para->child->next == NULL && para->child->child == NULL && para->child->markup == MARKUP_NONE && strcmp((const char *)para->child->data, " ") == 0) { // Remove paragraph consisting solely of whitespace... htmlDeleteTree(para->child); para->child = para->last_child = NULL; } // Mark if we are at the top of form... tof = (*y >= *top); DEBUG_printf(("BEFORE tof=%d, *y=%.1f, *top=%.1f, *page=%d, t->data=\"%s\"\n", tof, *y, *top, *page, t->data)); DEBUG_printf((" PagePrintWidth = %d\n", PagePrintWidth)); DEBUG_printf(("PagePrintLength = %d\n", PagePrintLength)); DEBUG_printf((" PageWidth = %d\n", PageWidth)); DEBUG_printf((" PageLength = %d\n", PageLength)); DEBUG_printf((" PageLeft = %d\n", PageLeft)); DEBUG_printf((" PageBottom = %d\n", PageBottom)); DEBUG_printf((" PageRight = %d\n", PageRight)); DEBUG_printf((" PageTop = %d\n", PageTop)); DEBUG_printf((" Landscape = %d\n", Landscape)); for (comment = (const char *)t->data; *comment;) { // Skip leading whitespace... while (isspace(*comment)) comment ++; if (!*comment) break; if (strncasecmp(comment, "PAGE BREAK", 10) == 0 && (!comment[10] || isspace(comment[10]))) { /* * <!-- PAGE BREAK --> generates a page break... */ comment += 10; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else if (strncasecmp(comment, "NEW PAGE", 8) == 0 && (!comment[8] || isspace(comment[8]))) { /* * <!-- NEW PAGE --> generates a page break... */ comment += 8; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else if (strncasecmp(comment, "NEW SHEET", 9) == 0 && (!comment[9] || isspace(comment[9]))) { /* * <!-- NEW SHEET --> generate a page break to a new sheet... */ comment += 9; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } if (NumberUp == 1) { // NEW SHEET breaks to the next sheet of paper... (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; } else { // NEW SHEET breaks to the next side/sheet... (*page) ++; for (i = *page - 1; i >= 0; i --) if (pages[i].nup != NumberUp) break; i ++; for (i = *page - i; (i % NumberUp) != 0; i ++, (*page) ++); } if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else if (strncasecmp(comment, "HALF PAGE", 9) == 0 && (!comment[9] || isspace(comment[9]))) { /* * <!-- HALF PAGE --> Go to the next half page. If in the * top half of a page, go to the bottom half. If in the * bottom half, go to the next page. */ float halfway; comment += 9; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; } halfway = 0.5f * (*top + *bottom); if (*y <= halfway) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; *y = *top; tof = 1; } else { *x = *left; *y = halfway; tof = 0; } } else if (strncasecmp(comment, "NEED ", 5) == 0) { /* * <!-- NEED amount --> generate a page break if there isn't * enough remaining space... */ comment += 5; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if ((*y - get_measurement(comment, (float)_htmlSpacings[SIZE_P])) < *bottom) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; // Skip amount... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA COLOR ", 12) == 0) { // Media color for page... comment += 12; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); // Get color... if (*comment == '\"') { for (ptr = pages[*page].media_color, comment ++; *comment && *comment != '\"'; comment ++) if (ptr < (pages[*page].media_color + sizeof(pages[*page].media_color) - 1)) *ptr++ = *comment; if (*comment == '\"') comment ++; } else { for (ptr = pages[*page].media_color; *comment && !isspace(*comment); comment ++) if (ptr < (pages[*page].media_color + sizeof(pages[*page].media_color) - 1)) *ptr++ = *comment; } *ptr = '\0'; } else if (strncasecmp(comment, "MEDIA POSITION ", 15) == 0) { // Media position for page... comment += 15; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); pages[*page].media_position = atoi(comment); // Skip position... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA TYPE ", 11) == 0) { // Media type for page... comment += 11; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); // Get type... if (*comment == '\"') { for (ptr = pages[*page].media_type, comment ++; *comment && *comment != '\"'; comment ++) if (ptr < (pages[*page].media_type + sizeof(pages[*page].media_type) - 1)) *ptr++ = *comment; if (*comment == '\"') comment ++; } else { for (ptr = pages[*page].media_type; *comment && !isspace(*comment); comment ++) if (ptr < (pages[*page].media_type + sizeof(pages[*page].media_type) - 1)) *ptr++ = *comment; } *ptr = '\0'; } else if (strncasecmp(comment, "MEDIA SIZE ", 11) == 0) { // Media size... comment += 11; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; tof = 1; } if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); check_pages(*page); *right = PagePrintWidth - *right; *top = PagePrintLength - *top; set_page_size(comment); if (Landscape) { PagePrintWidth = PageLength - PageLeft - PageRight; PagePrintLength = PageWidth - PageTop - PageBottom; } else { PagePrintWidth = PageWidth - PageLeft - PageRight; PagePrintLength = PageLength - PageTop - PageBottom; } *right = PagePrintWidth - *right; *top = PagePrintLength - *top; *x = *left; *y = *top; pages[*page].width = PageWidth; pages[*page].length = PageLength; // Skip width... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA LEFT ", 11) == 0) { // Left margin... comment += 11; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); *right = PagePrintWidth - *right; PageLeft = pages[*page].left = get_measurement(comment); if (Landscape) PagePrintWidth = PageLength - PageRight - PageLeft; else PagePrintWidth = PageWidth - PageRight - PageLeft; *right = PagePrintWidth - *right; // Skip left... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA RIGHT ", 12) == 0) { // Right margin... comment += 12; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *y = *top; tof = 1; } *x = *left; check_pages(*page); *right = PagePrintWidth - *right; PageRight = pages[*page].right = get_measurement(comment); if (Landscape) PagePrintWidth = PageLength - PageRight - PageLeft; else PagePrintWidth = PageWidth - PageRight - PageLeft; *right = PagePrintWidth - *right; // Skip right... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA BOTTOM ", 13) == 0) { // Bottom margin... comment += 13; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); tof = 1; } *x = *left; check_pages(*page); *top = PagePrintLength - *top; PageBottom = pages[*page].bottom = get_measurement(comment); if (Landscape) PagePrintLength = PageWidth - PageTop - PageBottom; else PagePrintLength = PageLength - PageTop - PageBottom; *top = PagePrintLength - *top; *y = *top; // Skip bottom... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA TOP ", 10) == 0) { // Top margin... comment += 10; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); tof = 1; } *x = *left; check_pages(*page); *top = PagePrintLength - *top; PageTop = pages[*page].top = get_measurement(comment); if (Landscape) PagePrintLength = PageWidth - PageTop - PageBottom; else PagePrintLength = PageLength - PageTop - PageBottom; *top = PagePrintLength - *top; *y = *top; // Skip top... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA LANDSCAPE ", 16) == 0) { // Landscape on/off... comment += 16; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; tof = 1; } if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; check_pages(*page); if (strncasecmp(comment, "OFF", 3) == 0 || tolower(comment[0]) == 'n') { if (Landscape) { *right = PageLength - PageRight - *right; PagePrintWidth = PageWidth - PageRight - PageLeft; *right = PageWidth - PageRight - *right; *top = PageWidth - PageTop - *top; PagePrintLength = PageLength - PageTop - PageBottom; *top = PageLength - PageTop - *top; } Landscape = pages[*page].landscape = 0; } else if (strncasecmp(comment, "ON", 2) == 0 || tolower(comment[0]) == 'y') { if (!Landscape) { *top = PageLength - PageTop - *top; PagePrintLength = PageWidth - PageTop - PageBottom; *top = PageWidth - PageTop - *top; *right = PageWidth - PageRight - *right; PagePrintWidth = PageLength - PageRight - PageLeft; *right = PageLength - PageRight - *right; } Landscape = pages[*page].landscape = 1; } *y = *top; // Skip landscape... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "MEDIA DUPLEX ", 13) == 0) { // Duplex printing on/off... comment += 13; while (isspace(*comment)) comment ++; if (!*comment) break; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (!tof) { (*page) ++; *y = *top; tof = 1; } if (PageDuplex && ((*page) & 1)) (*page) ++; if (Verbosity) progress_show("Formatting page %d", *page); *x = *left; check_pages(*page); if (strncasecmp(comment, "OFF", 3) == 0 || tolower(comment[0]) == 'n') PageDuplex = pages[*page].duplex = 0; else if (strncasecmp(comment, "ON", 2) == 0 || tolower(comment[0]) == 'y') { if ((*page) & 1) { (*page) ++; check_pages(*page); if (Verbosity) progress_show("Formatting page %d", *page); } PageDuplex = pages[*page].duplex = 1; } // Skip duplex... while (*comment && !isspace(*comment)) comment ++; } else if (strncasecmp(comment, "HEADER ", 7) == 0) { // Header string... comment += 7; while (isspace(*comment)) comment ++; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (strncasecmp(comment, "LEFT", 4) == 0 && isspace(comment[4])) { pos = 0; comment += 4; } else if (strncasecmp(comment, "CENTER", 6) == 0 && isspace(comment[6])) { pos = 1; comment += 6; } else if (strncasecmp(comment, "RIGHT", 5) == 0 && isspace(comment[5])) { pos = 2; comment += 5; } else { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER position: \"%s\"", comment); return; } while (isspace(*comment)) comment ++; if (*comment != '\"') { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER string: \"%s\"", comment); return; } for (ptr = buffer, comment ++; *comment && *comment != '\"'; comment ++) { if (*comment == '\\') comment ++; if (ptr < (buffer + sizeof(buffer) - 1)) *ptr++ = *comment; } if (*comment == '\"') comment ++; *ptr = '\0'; if (ptr > buffer) Header[pos] = strdup(buffer); else Header[pos] = NULL; if (tof) { DEBUG_printf(("Setting header %d for page %d to \"%s\"...\n", pos, *page, Header[pos] ? Header[pos] : "(null)")); check_pages(*page); pages[*page].header[pos] = (uchar *)Header[pos]; } // Adjust top margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Header[pos] && (strstr(Header[pos], "$IMAGE") != NULL || strstr(Header[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header1[pos] && (strstr(Header1[pos], "$IMAGE") != NULL || strstr(Header1[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header[pos] || Header1[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } *top = PagePrintLength - adjust; if (tof) *y = *top; } else if (strncasecmp(comment, "HEADER1 ", 8) == 0) { // First page header string... comment += 8; while (isspace(*comment)) comment ++; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (strncasecmp(comment, "LEFT", 4) == 0 && isspace(comment[4])) { pos = 0; comment += 4; } else if (strncasecmp(comment, "CENTER", 6) == 0 && isspace(comment[6])) { pos = 1; comment += 6; } else if (strncasecmp(comment, "RIGHT", 5) == 0 && isspace(comment[5])) { pos = 2; comment += 5; } else { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER1 position: \"%s\"", comment); return; } while (isspace(*comment)) comment ++; if (*comment != '\"') { progress_error(HD_ERROR_BAD_COMMENT, "Bad HEADER1 string: \"%s\"", comment); return; } for (ptr = buffer, comment ++; *comment && *comment != '\"'; comment ++) { if (*comment == '\\') comment ++; if (ptr < (buffer + sizeof(buffer) - 1)) *ptr++ = *comment; } if (*comment == '\"') comment ++; *ptr = '\0'; if (ptr > buffer) Header1[pos] = strdup(buffer); else Header1[pos] = NULL; // Adjust top margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Header[pos] && (strstr(Header[pos], "$IMAGE") != NULL || strstr(Header[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header1[pos] && (strstr(Header1[pos], "$IMAGE") != NULL || strstr(Header1[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Header[pos] || Header1[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } *top = PagePrintLength - adjust; if (tof) *y = *top; } else if (strncasecmp(comment, "FOOTER ", 7) == 0) { // Footer string... comment += 7; while (isspace(*comment)) comment ++; if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (strncasecmp(comment, "LEFT", 4) == 0 && isspace(comment[4])) { pos = 0; comment += 4; } else if (strncasecmp(comment, "CENTER", 6) == 0 && isspace(comment[6])) { pos = 1; comment += 6; } else if (strncasecmp(comment, "RIGHT", 5) == 0 && isspace(comment[5])) { pos = 2; comment += 5; } else { progress_error(HD_ERROR_BAD_COMMENT, "Bad FOOTER position: \"%s\"", comment); return; } while (isspace(*comment)) comment ++; if (*comment != '\"') { progress_error(HD_ERROR_BAD_COMMENT, "Bad FOOTER string: \"%s\"", comment); return; } for (ptr = buffer, comment ++; *comment && *comment != '\"'; comment ++) { if (*comment == '\\') comment ++; if (ptr < (buffer + sizeof(buffer) - 1)) *ptr++ = *comment; } if (*comment == '\"') comment ++; *ptr = '\0'; if (ptr > buffer) Footer[pos] = strdup(buffer); else Footer[pos] = NULL; if (tof) { check_pages(*page); pages[*page].footer[pos] = (uchar *)Footer[pos]; } // Adjust bottom margin as needed... float adjust, image_adjust, temp_adjust; if (maxhfheight > HeadFootSize) image_adjust = (float)(maxhfheight + HeadFootSize); else image_adjust = (float)(2 * HeadFootSize); for (adjust = 0.0, pos = 0; pos < 3; pos ++) { if (Footer[pos] && (strstr(Footer[pos], "$IMAGE") != NULL || strstr(Footer[pos], "$HFIMAGE") != NULL)) temp_adjust = image_adjust; else if (Footer[pos]) temp_adjust = (float)(2 * HeadFootSize); else temp_adjust = 0.0; if (temp_adjust > adjust) adjust = temp_adjust; } *bottom = adjust; } else if (strncasecmp(comment, "NUMBER-UP ", 10) == 0) { // N-up printing... comment += 10; while (isspace(*comment)) comment ++; if (!*comment) break; NumberUp = strtol(comment, (char **)&comment, 10); if (para != NULL && para->child != NULL) { parse_paragraph(para, *left, *right, *bottom, *top, x, y, page, needspace); htmlDeleteTree(para->child); para->child = para->last_child = NULL; // Mark if we are still at the top of form... tof = (*y >= *top); } if (tof) { check_pages(*page); pages[*page].nup = NumberUp; } } else break; } DEBUG_printf(("LEAVING parse_comment() x=%.1f, y=%.1f, page=%d\n", *x, *y, *page)); DEBUG_printf((" PagePrintWidth = %d\n", PagePrintWidth)); DEBUG_printf(("PagePrintLength = %d\n", PagePrintLength)); DEBUG_printf((" PageWidth = %d\n", PageWidth)); DEBUG_printf((" PageLength = %d\n", PageLength)); DEBUG_printf((" PageLeft = %d\n", PageLeft)); DEBUG_printf((" PageBottom = %d\n", PageBottom)); DEBUG_printf((" PageRight = %d\n", PageRight)); DEBUG_printf((" PageTop = %d\n", PageTop)); DEBUG_printf((" Landscape = %d\n", Landscape)); } #ifdef COMMENT_DEBUG # undef DEBUG # undef DEBUG_puts # define DEBUG_puts(x) # undef DEBUG_printf # define DEBUG_printf(x) #endif /* COMMENT_DEBUG */ /* * 'find_background()' - Find the background image/color for the given document. */ static void find_background(tree_t *t) /* I - Document to search */ { uchar *var; /* BGCOLOR/BACKGROUND variable */ /* * First see if the --bodycolor or --bodyimage options have been * specified... */ if (BodyImage[0] != '\0') { background_image = image_load(BodyImage, !OutputColor); return; } else if (BodyColor[0] != '\0') { get_color((uchar *)BodyColor, background_color, 0); return; } /* * If not, search the document tree... */ while (t != NULL && background_image == NULL && background_color[0] == 1.0 && background_color[1] == 1.0 && background_color[2] == 1.0) { if (t->markup == MARKUP_BODY) { if ((var = htmlGetVariable(t, (uchar *)"BACKGROUND")) != NULL) background_image = image_load((char *)var, !OutputColor); if ((var = htmlGetVariable(t, (uchar *)"BGCOLOR")) != NULL) get_color(var, background_color, 0); } if (t->child != NULL) find_background(t->child); t = t->next; } } /* * 'write_background()' - Write the background image/color for to the current * page. */ static void write_background(int page, /* I - Page we are writing for */ FILE *out) /* I - File to write to */ { float x, y; float width, height; int page_width, page_length; if (Landscape) { page_length = pages[page].width; page_width = pages[page].length; } else { page_width = pages[page].width; page_length = pages[page].length; } if (background_color[0] != 1.0 || background_color[1] != 1.0 || background_color[2] != 1.0) { if (PSLevel > 0) { render_x = -1.0; render_y = -1.0; set_color(out, background_color); fprintf(out, "0 0 M %d %d F\n", page_width, page_length); } else { set_color(out, background_color); flate_printf(out, "0 0 %d %d re f\n", page_width, page_length); } } if (background_image != NULL) { width = (float)(background_image->width * 72.0f / _htmlPPI); height = (float)(background_image->height * 72.0f / _htmlPPI); if (width < 1.0f) width = 1.0f; if (height < 1.0f) height = 1.0f; switch (PSLevel) { case 0 : for (x = 0.0; x < page_width; x += width) for (y = page_length; y >= 0.0f;) { y -= height; flate_printf(out, "q %.1f 0 0 %.1f %.1f %.1f cm", width, height, x, y); flate_printf(out, "/I%d Do\n", background_image->obj); flate_puts("Q\n", out); } break; default : fprintf(out, "0 %.1f %d{/y exch neg %d add def\n", height, page_length + (int)height - 1, page_length); fprintf(out, "0 %.1f %d{/x exch def\n", width, page_width); fprintf(out, "GS[%.1f 0 0 %.1f x y]CM/iy -1 def\n", width, height); fprintf(out, "%d %d 8[%d 0 0 %d 0 %d]", background_image->width, background_image->height, background_image->width, -background_image->height, background_image->height); fputs("{/iy iy 1 add def BG iy get}", out); if (background_image->depth == 1) fputs("image\n", out); else fputs("false 3 colorimage\n", out); fputs("GR}for}for\n", out); break; } } } /* * 'new_render()' - Allocate memory for a new rendering structure. */ static render_t * /* O - New render structure */ new_render(int page, /* I - Page number (0-n) */ int type, /* I - Type of render primitive */ double x, /* I - Horizontal position */ double y, /* I - Vertical position */ double width, /* I - Width */ double height, /* I - Height */ void *data, /* I - Data */ render_t *insert) /* I - Insert before here... */ { render_t *r; /* New render primitive */ size_t datalen = 0; /* Length of data */ static render_t dummy; /* Dummy var for errors... */ DEBUG_printf(("new_render(page=%d, type=%d, x=%.1f, y=%.1f, width=%.1f, height=%.1f, data=%p, insert=%p)\n", page, type, x, y, width, height, (void *)data, (void *)insert)); check_pages(page); if (page < 0 || page >= (int)alloc_pages) { progress_error(HD_ERROR_INTERNAL_ERROR, "Page number (%d) out of range (1...%d)\n", page + 1, (int)alloc_pages); memset(&dummy, 0, sizeof(dummy)); return (&dummy); } if ((type != RENDER_TEXT && type != RENDER_LINK) || data == NULL) r = (render_t *)calloc(sizeof(render_t), 1); else { datalen = strlen((char *)data); r = (render_t *)calloc(sizeof(render_t) + datalen, 1); } if (r == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory on page %d\n", (int)page + 1); memset(&dummy, 0, sizeof(dummy)); return (&dummy); } r->type = type; r->x = (float)x; r->y = (float)y; r->width = (float)width; r->height = (float)height; switch (type) { case RENDER_TEXT : if (data == NULL) { free(r); return (NULL); } // Safe because buffer is allocated... memcpy((char *)r->data.text.buffer, (char *)data, datalen); get_color(_htmlTextColor, r->data.text.rgb); break; case RENDER_IMAGE : if (data == NULL) { free(r); return (NULL); } r->data.image = (image_t *)data; break; case RENDER_BOX : memcpy(r->data.box, data, sizeof(r->data.box)); break; case RENDER_LINK : if (data == NULL) { free(r); return (NULL); } // Safe because buffer is allocated... memcpy((char *)r->data.link, (char *)data, datalen); break; } if (insert) { if (insert->prev) insert->prev->next = r; else pages[page].start = r; r->prev = insert->prev; r->next = insert; insert->prev = r; } else { if (pages[page].end != NULL) pages[page].end->next = r; else pages[page].start = r; r->next = NULL; r->prev = pages[page].end; pages[page].end = r; } DEBUG_printf((" returning r = %p\n", (void *)r)); return (r); } /* * 'check_pages()' - Allocate memory for more pages as needed... */ static void check_pages(int page) // I - Current page { page_t *temp; // Temporary page pointer DEBUG_printf(("check_pages(%d)\n", page)); // See if we need to allocate memory for the page... if (page >= (int)alloc_pages) { // Yes, allocate enough for ALLOC_PAGES more pages... while (page >= (int)alloc_pages) alloc_pages += ALLOC_PAGES; // Do the pages pointers... if (num_pages == 0) temp = (page_t *)malloc(sizeof(page_t) * alloc_pages); else temp = (page_t *)realloc(pages, sizeof(page_t) * alloc_pages); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d pages - %s", (int)alloc_pages, strerror(errno)); alloc_pages -= ALLOC_PAGES; return; } memset(temp + num_pages, 0, (alloc_pages - num_pages) * sizeof(page_t)); pages = temp; } // Initialize the page data as needed... for (temp = pages + num_pages; (int)num_pages <= page; num_pages ++, temp ++) { if (!temp->width) { if (num_pages == 0 || !temp[-1].width || !temp[-1].length || chapter == 0) { temp->width = PageWidth; temp->length = PageLength; temp->left = PageLeft; temp->right = PageRight; temp->top = PageTop; temp->bottom = PageBottom; temp->duplex = PageDuplex; temp->landscape = Landscape; temp->nup = NumberUp; } else { memcpy(temp, temp - 1, sizeof(page_t)); temp->start = NULL; temp->end = NULL; } temp->url = current_url; if (chapter == 0) { memcpy(temp->header, TocHeader, sizeof(temp->header)); memcpy(temp->footer, TocFooter, sizeof(temp->footer)); } else { memcpy(temp->header, Header, sizeof(temp->header)); memcpy(temp->header1, Header1, sizeof(temp->header1)); memcpy(temp->footer, Footer, sizeof(temp->footer)); if (current_heading != temp->headnode) { temp->heading = htmlGetText(current_heading); temp->headnode = current_heading; } } memcpy(temp->background_color, background_color, sizeof(temp->background_color)); temp->background_image = background_image; } } } /* * 'add_link()' - Add a named link... */ static void add_link(uchar *name, /* I - Name of link */ int page, /* I - Page # */ int top) /* I - Y position */ { link_t *temp; /* New name */ if (name == NULL) return; DEBUG_printf(("add_link(name=\"%s\", page=%d, top=%d)\n", name, page, top)); if ((temp = find_link(name)) != NULL) { temp->page = (short)page; temp->top = (short)top; } else { // See if we need to allocate memory for links... if (num_links >= alloc_links) { // Allocate more links... alloc_links += ALLOC_LINKS; if (num_links == 0) temp = (link_t *)malloc(sizeof(link_t) * alloc_links); else temp = (link_t *)realloc(links, sizeof(link_t) * alloc_links); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d links - %s", (int)alloc_links, strerror(errno)); alloc_links -= ALLOC_LINKS; return; } links = temp; } // Add a new link... temp = links + num_links; num_links ++; strlcpy((char *)temp->name, (char *)name, sizeof(temp->name)); temp->page = (short)page; temp->top = (short)top; if (num_links > 1) qsort(links, num_links, sizeof(link_t), (compare_func_t)compare_links); } } /* * 'find_link()' - Find a named link... */ static link_t * find_link(uchar *name) /* I - Name to find */ { link_t key, /* Search key */ *match; /* Matching name entry */ if (name == NULL || num_links == 0) return (NULL); if (name[0] == '#') name ++; strlcpy((char *)key.name, (char *)name, sizeof(key.name)); match = (link_t *)bsearch(&key, links, num_links, sizeof(link_t), (compare_func_t)compare_links); return (match); } /* * 'compare_links()' - Compare two named links. */ static int /* O - 0 = equal, -1 or 1 = not equal */ compare_links(link_t *n1, /* I - First name */ link_t *n2) /* I - Second name */ { return (strcasecmp((char *)n1->name, (char *)n2->name)); } #ifdef TABLE_DEBUG # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) printf x # define DEBUG_puts(x) puts(x) #endif /* TABLE_DEBUG */ // // 'get_cell_size()' - Compute the minimum width of a cell. // static float // O - Required width of cell get_cell_size(tree_t *t, // I - Cell float left, // I - Left margin float right, // I - Right margin float *minwidth, // O - Minimum width float *prefwidth, // O - Preferred width float *minheight) // O - Minimum height { tree_t *temp, // Current tree entry *next; // Next tree entry uchar *var; // Attribute value int nowrap; // NOWRAP attribute? float width, // Width of cell frag_width, // Fragment required width frag_height, // Fragment height frag_pref, // Fragment preferred width frag_min, // Fragment minimum width minh, // Local minimum height minw, // Local minimum width prefw, // Local preferred width format_width; // Working format width for images DEBUG_printf(("get_cell_size(%p, %.1f, %.1f, %p, %p, %p)\n", (void *)t, left, right, (void *)minwidth, (void *)prefwidth, (void *)minheight)); // First see if the width has been specified for this cell... if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL && (var[strlen((char *)var) - 1] != '%' || (right - left) > 0.0f)) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') width = (right - left) * atoi((char *)var) * 0.01f; else width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else width = 0.0f; if ((format_width = right - left) <= 0.0f) format_width = PagePrintWidth; minw = 0.0f; prefw = 0.0f; // Then the height... if ((var = htmlGetVariable(t, (uchar *)"HEIGHT")) != NULL) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') minh = PagePrintLength * atoi((char *)var) * 0.01f; else minh = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else minh = 0.0f; nowrap = (htmlGetVariable(t, (uchar *)"NOWRAP") != NULL); DEBUG_printf(("nowrap = %d\n", nowrap)); for (temp = t->child, frag_width = 0.0f, frag_pref = 0.0f; temp != NULL; temp = next) { // Point to next markup, if any... next = temp->child; switch (temp->markup) { case MARKUP_TABLE : // Update widths... if (frag_pref > prefw) prefw = frag_pref; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } if (nowrap && frag_pref > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for break...\n", frag_pref, minw)); minw = frag_pref; } // For nested tables, compute the width of the table. frag_width = get_table_size(temp, left, right, &frag_min, &frag_pref, &frag_height); if (frag_pref > prefw) prefw = frag_pref; if (frag_min > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for nested table...\n", frag_min, minw)); minw = frag_min; } frag_width = 0.0f; frag_pref = 0.0f; frag_min = 0.0f; next = NULL; break; case MARKUP_IMG : // Update the image width as needed... if (temp->markup == MARKUP_IMG) update_image_size(temp); case MARKUP_NONE : case MARKUP_SPACER : frag_height = temp->height; #ifdef TABLE_DEBUG2 if (temp->markup == MARKUP_NONE) printf("FRAG(%s) = %.1f\n", temp->data, temp->width); else if (temp->markup == MARKUP_SPACER) printf("SPACER = %.1f\n", temp->width); else printf("IMG(%s) = %.1f\n", htmlGetVariable(temp, (uchar *)"SRC"), temp->width); #endif // TABLE_DEBUG2 // Handle min/preferred widths separately... if (temp->width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for fragment...\n", temp->width, minw)); minw = temp->width; } if (temp->preformatted && temp->data != NULL && temp->data[strlen((char *)temp->data) - 1] == '\n') { // End of a line - check preferred width... frag_pref += temp->width + 1; if (frag_pref > prefw) prefw = frag_pref; if (temp->preformatted && frag_pref > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for preformatted...\n", frag_pref, minw)); minw = frag_pref; } frag_pref = 0.0f; } else if (temp->data != NULL) frag_pref += temp->width + 1; else if ((frag_pref + temp->width) > format_width) { // parse_paragraph() will force a break if (frag_pref > prefw) prefw = frag_pref; frag_pref = temp->width; } else frag_pref += temp->width; if (temp->preformatted && temp->data != NULL && temp->data[strlen((char *)temp->data) - 1] == '\n') { // Check required width... frag_width += temp->width + 1; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } frag_width = 0.0f; } else if (!temp->preformatted && temp->data != NULL && (isspace(temp->data[0]) || (temp->data[0] && isspace(temp->data[strlen((char *)temp->data) - 1])))) { // Check required width... if (isspace(temp->data[0])) frag_width = temp->width + 1; else frag_width += temp->width + 1; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } if (!isspace(temp->data[0])) frag_width = 0.0f; DEBUG_printf(("frag_width=%.1f after whitespace processing...\n", frag_width)); } else if (temp->data != NULL) frag_width += temp->width + 1; else if ((frag_width + temp->width) > format_width) // parse_paragraph() will force a break frag_width = temp->width; else frag_width += temp->width; break; case MARKUP_ADDRESS : case MARKUP_BLOCKQUOTE : case MARKUP_BR : case MARKUP_CENTER : case MARKUP_DD : case MARKUP_DIV : case MARKUP_DT : case MARKUP_H1 : case MARKUP_H2 : case MARKUP_H3 : case MARKUP_H4 : case MARKUP_H5 : case MARKUP_H6 : case MARKUP_H7 : case MARKUP_H8 : case MARKUP_H9 : case MARKUP_H10 : case MARKUP_H11 : case MARKUP_H12 : case MARKUP_H13 : case MARKUP_H14 : case MARKUP_H15 : case MARKUP_HR : case MARKUP_LI : case MARKUP_P : case MARKUP_PRE : DEBUG_printf(("BREAK at %.1f\n", frag_pref)); if (frag_pref > prefw) prefw = frag_pref; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } if (nowrap && frag_pref > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for break...\n", frag_pref, minw)); minw = frag_pref; } frag_pref = 0.0f; frag_width = 0.0f; default : frag_height = 0.0f; break; } // Update minimum height... if (frag_height > minh) minh = frag_height; // Update next pointer as needed... if (next == NULL) next = temp->next; if (next == NULL) { // This code is almost funny if you say it fast... :) for (next = temp->parent; next != NULL && next != t; next = next->parent) if (next->next != NULL) break; if (next == t) next = NULL; else if (next) next = next->next; } } // Check the last fragment's width... if (frag_pref > prefw) prefw = frag_pref; if (frag_width > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for block...\n", frag_width, minw)); minw = frag_width; } // Handle the "NOWRAP" option... if (nowrap && prefw > minw) { DEBUG_printf(("Setting minw to %.1f (was %.1f) for NOWRAP...\n", prefw, minw)); minw = prefw; } // Return the required, minimum, and preferred size of the cell... *minwidth = minw; *prefwidth = prefw; *minheight = minh; DEBUG_printf(("get_cell_size(): width=%.1f, minw=%.1f, prefw=%.1f, minh=%.1f\n", width, minw, prefw, minh)); return (width); } // // 'get_table_size()' - Compute the minimum width of a table. // static float // O - Minimum width of table get_table_size(tree_t *t, // I - Table float left, // I - Left margin float right, // I - Right margin float *minwidth, // O - Minimum width float *prefwidth, // O - Preferred width float *minheight) // O - Minimum height { tree_t *temp, // Current tree entry *next; // Next tree entry uchar *var; // Attribute value float width, // Required width of table minw, // Minimum width of table minh, // Minimum height of table prefw, // Preferred width of table cell_width, // Cell required width cell_pref, // Cell preferred width cell_min, // Cell minimum width cell_height, // Cell minimum height row_width, // Row required width row_pref, // Row preferred width row_min, // Row minimum width row_height, // Row minimum height border, // Border around cells cellpadding, // Padding inside cells cellspacing; // Spacing around cells int columns, // Current number of columns max_columns, // Maximum columns rows; // Number of rows DEBUG_printf(("get_table_size(%p, %.1f, %.1f, %p, %p, %p)\n", (void *)t, left, right, (void *)minwidth, (void *)prefwidth, (void *)minheight)); // First see if the width has been specified for this table... if ((var = htmlGetVariable(t, (uchar *)"WIDTH")) != NULL && (var[strlen((char *)var) - 1] != '%' || (right - left) > 0.0f)) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') width = (right - left) * atoi((char *)var) * 0.01f; else width = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else width = 0.0f; minw = 0.0f; prefw = 0.0f; // Then the height... if ((var = htmlGetVariable(t, (uchar *)"HEIGHT")) != NULL) { // Yes, use it! if (var[strlen((char *)var) - 1] == '%') minh = PagePrintLength * atoi((char *)var) * 0.01f; else minh = (float)(atoi((char *)var) * PagePrintWidth / _htmlBrowserWidth); } else minh = 0.0f; // Update the size as needed... for (temp = t->child, row_width = 0.0f, row_min = 0.0f, row_pref = 0.0f, row_height = 0.0f, columns = 0, rows = 0, max_columns = 0; temp != NULL; temp = next) { // Point to next markup, if any... next = temp->child; // Start a new row or add the cell width as needed... if (temp->markup == MARKUP_TR) { minh += row_height; row_width = 0.0f; row_pref = 0.0f; row_min = 0.0f; row_height = 0.0f; rows ++; columns = 0; } else if (temp->markup == MARKUP_TD || temp->markup == MARKUP_TH) { // Update columns... columns ++; if (columns > max_columns) max_columns = columns; // Get widths of cell... cell_width = get_cell_size(temp, left, right, &cell_min, &cell_pref, &cell_height); // Update row widths... row_width += cell_width; row_pref += cell_pref; row_min += cell_min; if (cell_height > row_height) row_height = cell_height; // Check current row widths against table... if (row_pref > prefw) prefw = row_pref; if (row_min > minw) minw = row_min; } // Update next pointer as needed... if (next == NULL) next = temp->next; if (next == NULL) { // This code is almost funny if you say it fast... :) for (next = temp->parent; next != NULL && next != t; next = next->parent) if (next->next != NULL) break; if (next == t) next = NULL; else if (next) next = next->next; } } // Make sure last row is counted in min height calcs. minh += row_height; // Add room for spacing and padding... if ((var = htmlGetVariable(t, (uchar *)"CELLPADDING")) != NULL) cellpadding = atoi((char *)var); else cellpadding = 1.0f; if ((var = htmlGetVariable(t, (uchar *)"CELLSPACING")) != NULL) cellspacing = atoi((char *)var); else cellspacing = 0.0f; if ((var = htmlGetVariable(t, (uchar *)"BORDER")) != NULL) { if ((border = (float)atof((char *)var)) == 0.0 && var[0] != '0') border = 1.0f; cellpadding += border; } else border = 0.0f; if (border == 0.0f && cellpadding > 0.0f) { /* * Ah, the strange table formatting nightmare that is HTML. * Netscape and MSIE assign an invisible border width of 1 * pixel if no border is specified... */ cellpadding += 1.0f; } cellspacing *= PagePrintWidth / _htmlBrowserWidth; cellpadding *= PagePrintWidth / _htmlBrowserWidth; DEBUG_printf(("ADDING %.1f for table space for %d columns...\n", max_columns * (2 * cellpadding + cellspacing) - cellspacing, max_columns)); if (width > 0.0f) width += max_columns * (2 * cellpadding + cellspacing) - cellspacing; minw += max_columns * (2 * cellpadding + cellspacing) - cellspacing; prefw += max_columns * (2 * cellpadding + cellspacing) - cellspacing; minh += rows * (2 * cellpadding + cellspacing) - cellspacing; // Return the required, minimum, and preferred size of the table... *minwidth = minw; *prefwidth = prefw; *minheight = minh; DEBUG_printf(("get_table_size(): width=%.1f, minw=%.1f, prefw=%.1f, minh=%.1f\n", width, minw, prefw, minh)); return (width); } #ifdef TABLE_DEBUG # undef DEBUG_printf # undef DEBUG_puts # define DEBUG_printf(x) # define DEBUG_puts(x) #endif /* TABLE_DEBUG */ /* * 'flatten_tree()' - Flatten an HTML tree to only include the text, image, * link, and break markups. */ static tree_t * /* O - Flattened markup tree */ flatten_tree(tree_t *t) /* I - Markup tree to flatten */ { tree_t *temp, /* New tree node */ *flat; /* Flattened tree */ flat = NULL; while (t != NULL) { switch (t->markup) { case MARKUP_NONE : if (t->data == NULL) break; case MARKUP_COMMENT : case MARKUP_BR : case MARKUP_SPACER : case MARKUP_IMG : temp = (tree_t *)calloc(sizeof(tree_t), 1); memcpy(temp, t, sizeof(tree_t)); temp->parent = NULL; temp->child = NULL; temp->prev = flat; temp->next = NULL; if (flat != NULL) flat->next = temp; flat = temp; if (temp->markup == MARKUP_IMG) update_image_size(temp); break; case MARKUP_A : if (htmlGetVariable(t, (uchar *)"NAME") != NULL) { temp = (tree_t *)calloc(sizeof(tree_t), 1); memcpy(temp, t, sizeof(tree_t)); temp->parent = NULL; temp->child = NULL; temp->prev = flat; temp->next = NULL; if (flat != NULL) flat->next = temp; flat = temp; } break; case MARKUP_P : case MARKUP_PRE : case MARKUP_H1 : case MARKUP_H2 : case MARKUP_H3 : case MARKUP_H4 : case MARKUP_H5 : case MARKUP_H6 : case MARKUP_H7 : case MARKUP_H8 : case MARKUP_H9 : case MARKUP_H10 : case MARKUP_H11 : case MARKUP_H12 : case MARKUP_H13 : case MARKUP_H14 : case MARKUP_H15 : case MARKUP_UL : case MARKUP_DIR : case MARKUP_MENU : case MARKUP_OL : case MARKUP_DL : case MARKUP_LI : case MARKUP_DD : case MARKUP_DT : case MARKUP_TR : case MARKUP_CAPTION : temp = (tree_t *)calloc(sizeof(tree_t), 1); temp->markup = MARKUP_BR; temp->parent = NULL; temp->child = NULL; temp->prev = flat; temp->next = NULL; if (flat != NULL) flat->next = temp; flat = temp; break; default : break; } if (t->child != NULL && t->markup != MARKUP_UNKNOWN) { temp = flatten_tree(t->child); if (temp != NULL) temp->prev = flat; if (flat != NULL) flat->next = temp; else flat = temp; } if (flat != NULL) while (flat->next != NULL) flat = flat->next; t = t->next; } if (flat == NULL) return (NULL); while (flat->prev != NULL) flat = flat->prev; return (flat); } /* * 'update_image_size()' - Update the size of an image based upon the * printable width. */ static void update_image_size(tree_t *t) /* I - Tree entry */ { image_t *img; /* Image file */ uchar *width, /* Width string */ *height; /* Height string */ width = htmlGetVariable(t, (uchar *)"WIDTH"); height = htmlGetVariable(t, (uchar *)"HEIGHT"); if (width != NULL && height != NULL) { if (width[strlen((char *)width) - 1] == '%') t->width = (float)(atof((char *)width) * PagePrintWidth / 100.0f); else t->width = (float)(atoi((char *)width) * PagePrintWidth / _htmlBrowserWidth); if (height[strlen((char *)height) - 1] == '%') t->height = (float)(atof((char *)height) * PagePrintWidth / 100.0f); else t->height = (float)(atoi((char *)height) * PagePrintWidth / _htmlBrowserWidth); return; } img = image_find((char *)htmlGetVariable(t, (uchar *)"REALSRC")); if (img == NULL) return; if (width != NULL) { if (width[strlen((char *)width) - 1] == '%') t->width = (float)(atof((char *)width) * PagePrintWidth / 100.0f); else t->width = (float)(atoi((char *)width) * PagePrintWidth / _htmlBrowserWidth); t->height = t->width * img->height / img->width; } else if (height != NULL) { if (height[strlen((char *)height) - 1] == '%') t->height = (float)(atof((char *)height) * PagePrintWidth / 100.0f); else t->height = (float)(atoi((char *)height) * PagePrintWidth / _htmlBrowserWidth); t->width = t->height * img->width / img->height; } else { t->width = (float)(img->width * PagePrintWidth / _htmlBrowserWidth); t->height = (float)(img->height * PagePrintWidth / _htmlBrowserWidth); } } /* * 'get_width()' - Get the width of a string in points. */ static float /* O - Width in points */ get_width(uchar *s, /* I - String to scan */ int typeface, /* I - Typeface code */ int style, /* I - Style code */ int size) /* I - Size */ { uchar *ptr; /* Current character */ int width; /* Current width */ DEBUG_printf(("get_width(\"%s\", %d, %d, %d)\n", s == NULL ? "(null)" : (const char *)s, typeface, style, size)); if (s == NULL) return (0.0); if (!_htmlWidthsLoaded[typeface][style]) htmlLoadFontWidths(typeface, style); for (width = 0, ptr = s; *ptr != '\0'; ptr ++) width += _htmlWidths[typeface][style][*ptr]; return (width * _htmlSizes[size] * 0.001f); } /* * 'get_title()' - Get the title string for a document. */ static uchar * /* O - Title string */ get_title(tree_t *doc) /* I - Document */ { uchar *temp; while (doc != NULL) { if (doc->markup == MARKUP_TITLE) return (htmlGetText(doc->child)); else if (doc->child != NULL) if ((temp = get_title(doc->child)) != NULL) return (temp); doc = doc->next; } return (NULL); } /* * 'open_file()' - Open an output file for the current chapter. */ static FILE * /* O - File pointer */ open_file(void) { char filename[255]; /* Filename */ if (OutputFiles && PSLevel > 0) { if (chapter == -1) snprintf(filename, sizeof(filename), "%s/cover.ps", OutputPath); else if (chapter == 0) snprintf(filename, sizeof(filename), "%s/contents.ps", OutputPath); else snprintf(filename, sizeof(filename), "%s/doc%d.ps", OutputPath, chapter); return (fopen(filename, "wb+")); } else if (OutputFiles) { snprintf(filename, sizeof(filename), "%s/doc.pdf", OutputPath); return (fopen(filename, "wb+")); } else if (OutputPath[0] != '\0') return (fopen(OutputPath, "wb+")); else if (PSLevel == 0) return (file_temp(stdout_filename, sizeof(stdout_filename))); else return (stdout); } /* * 'set_color()' - Set the current text color... */ static void set_color(FILE *out, /* I - File to write to */ float *rgb) /* I - RGB color */ { if (rgb[0] == render_rgb[0] && rgb[1] == render_rgb[1] && rgb[2] == render_rgb[2]) return; render_rgb[0] = rgb[0]; render_rgb[1] = rgb[1]; render_rgb[2] = rgb[2]; if (OutputColor) { // Output RGB color... if (PSLevel > 0) fprintf(out, "%.2f %.2f %.2f C ", rgb[0], rgb[1], rgb[2]); else flate_printf(out, "%.2f %.2f %.2f rg ", rgb[0], rgb[1], rgb[2]); } else { // Output grayscale... if (PSLevel > 0) fprintf(out, "%.2f G ", rgb[0] * 0.31f + rgb[1] * 0.61f + rgb[2] * 0.08f); else flate_printf(out, "%.2f g ", rgb[0] * 0.31f + rgb[1] * 0.61f + rgb[2] * 0.08f); } } /* * 'set_font()' - Set the current text font. */ static void set_font(FILE *out, /* I - File to write to */ int typeface, /* I - Typeface code */ int style, /* I - Style code */ float size) /* I - Size */ { char sizes[255], /* Formatted string for size... */ *s; /* Pointer to end of string */ if (typeface == render_typeface && style == render_style && size == render_size) return; /* * Format size and strip trailing 0's and decimals... */ snprintf(sizes, sizeof(sizes), "%.1f", size); for (s = sizes + strlen(sizes) - 1; s > sizes && *s == '0'; s --) *s = '\0'; if (*s == '.') *s = '\0'; /* * Set the new typeface, style, and size. */ if (PSLevel > 0) { if (size != render_size) fprintf(out, "%s FS", sizes); fprintf(out, "/F%x SF ", typeface * 4 + style); } else flate_printf(out, "/F%x %s Tf ", typeface * 4 + style, sizes); render_typeface = typeface; render_style = style; render_size = size; } /* * 'set_pos()' - Set the current text position. */ static void set_pos(FILE *out, /* I - File to write to */ float x, /* I - X position */ float y) /* I - Y position */ { char xs[255], /* Formatted string for X... */ ys[255], /* Formatted string for Y... */ *s; /* Pointer to end of string */ if (fabs(render_x - x) < 0.1 && fabs(render_y - y) < 0.1) return; /* * Format X and Y... */ if (PSLevel > 0 || render_x == -1.0) { snprintf(xs, sizeof(xs), "%.3f", x); snprintf(ys, sizeof(ys), "%.3f", y); } else { snprintf(xs, sizeof(xs), "%.3f", x - render_startx); snprintf(ys, sizeof(ys), "%.3f", y - render_y); } /* * Strip trailing 0's and decimals... */ for (s = xs + strlen(xs) - 1; s > xs && *s == '0'; s --) *s = '\0'; if (*s == '.') *s = '\0'; for (s = ys + strlen(ys) - 1; s > ys && *s == '0'; s --) *s = '\0'; if (*s == '.') *s = '\0'; if (PSLevel > 0) fprintf(out, "%s %s M", xs, ys); else flate_printf(out, "%s %s Td", xs, ys); render_x = render_startx = x; render_y = y; } /* * 'ps_hex()' - Print binary data as a series of hexadecimal numbers. */ static void ps_hex(FILE *out, /* I - File to print to */ uchar *data, /* I - Data to print */ int length) /* I - Number of bytes to print */ { int col; static const char *hex = "0123456789ABCDEF"; col = 0; while (length > 0) { /* * Put the hex uchars out to the file; note that we don't use fprintf() * for speed reasons... */ putc(hex[*data >> 4], out); putc(hex[*data & 15], out); data ++; length --; col = (col + 1) % 40; if (col == 0) putc('\n', out); } if (col > 0) putc('\n', out); } #ifdef HTMLDOC_ASCII85 /* * 'ps_ascii85()' - Print binary data as a series of base-85 numbers. */ static void ps_ascii85(FILE *out, /* I - File to print to */ uchar *data, /* I - Data to print */ int length, /* I - Number of bytes to print */ int eod) /* I - 1 = end-of-data */ { unsigned b = 0; /* Current 32-bit word */ uchar c[5]; /* Base-85 encoded characters */ static int col = 0; /* Column */ static uchar leftdata[4]; /* Leftover data at the end */ static int leftcount = 0; /* Size of leftover data */ length += leftcount; while (length > 3) { switch (leftcount) { case 0 : b = (unsigned)((((((data[0] << 8) | data[1]) << 8) | data[2]) << 8) | data[3]); break; case 1 : b = (unsigned)((((((leftdata[0] << 8) | data[0]) << 8) | data[1]) << 8) | data[2]); break; case 2 : b = (unsigned)((((((leftdata[0] << 8) | leftdata[1]) << 8) | data[0]) << 8) | data[1]); break; case 3 : b = (unsigned)((((((leftdata[0] << 8) | leftdata[1]) << 8) | leftdata[2]) << 8) | data[0]); break; } if (col >= 76) { col = 0; putc('\n', out); } if (b == 0) { putc('z', out); col ++; } else { c[4] = (b % 85) + '!'; b /= 85; c[3] = (b % 85) + '!'; b /= 85; c[2] = (b % 85) + '!'; b /= 85; c[1] = (b % 85) + '!'; b /= 85; c[0] = (uchar)(b + '!'); fwrite(c, 1, 5, out); col += 5; } data += 4 - leftcount; length -= 4 - leftcount; leftcount = 0; } if (length > 0) { // Copy any remainder into the leftdata array... if ((length - leftcount) > 0) memcpy(leftdata + leftcount, data, (size_t)(length - leftcount)); memset(leftdata + length, 0, (size_t)(4 - length)); leftcount = length; } if (eod) { // Do the end-of-data dance... if (col >= 76) { col = 0; putc('\n', out); } if (leftcount > 0) { // Write the remaining bytes as needed... b = (unsigned)((((((leftdata[0] << 8) | leftdata[1]) << 8) | leftdata[2]) << 8) | leftdata[3]); c[4] = (b % 85) + '!'; b /= 85; c[3] = (b % 85) + '!'; b /= 85; c[2] = (b % 85) + '!'; b /= 85; c[1] = (b % 85) + '!'; b /= 85; c[0] = (uchar)(b + '!'); fwrite(c, (size_t)(leftcount + 1), 1, out); leftcount = 0; } fputs("~>\n", out); col = 0; } } #endif // HTMLDOC_ASCII85 /* * JPEG library destination data manager. These routines direct * compressed data from libjpeg into the PDF or PostScript file. */ static FILE *jpg_file; /* JPEG file */ static uchar jpg_buf[8192]; /* JPEG buffer */ static jpeg_destination_mgr jpg_dest; /* JPEG destination manager */ static struct jpeg_error_mgr jerr; /* JPEG error handler */ /* * 'jpg_init()' - Initialize the JPEG destination. */ static void jpg_init(j_compress_ptr cinfo) /* I - Compressor info */ { (void)cinfo; jpg_dest.next_output_byte = jpg_buf; jpg_dest.free_in_buffer = sizeof(jpg_buf); } /* * 'jpg_empty()' - Empty the JPEG output buffer. */ static boolean /* O - True if buffer written OK */ jpg_empty(j_compress_ptr cinfo) /* I - Compressor info */ { (void)cinfo; if (PSLevel > 0) #ifdef HTMLDOC_ASCII85 ps_ascii85(jpg_file, jpg_buf, sizeof(jpg_buf)); #else ps_hex(jpg_file, jpg_buf, sizeof(jpg_buf)); #endif // HTMLDOC_ASCII85 else flate_write(jpg_file, jpg_buf, sizeof(jpg_buf)); jpg_dest.next_output_byte = jpg_buf; jpg_dest.free_in_buffer = sizeof(jpg_buf); return (TRUE); } /* * 'jpg_term()' - Write the last JPEG data to the file. */ static void jpg_term(j_compress_ptr cinfo) /* I - Compressor info */ { int nbytes; /* Number of bytes to write */ (void)cinfo; nbytes = sizeof(jpg_buf) - jpg_dest.free_in_buffer; if (PSLevel > 0) #ifdef HTMLDOC_ASCII85 ps_ascii85(jpg_file, jpg_buf, nbytes); #else ps_hex(jpg_file, jpg_buf, nbytes); #endif // HTMLDOC_ASCII85 else flate_write(jpg_file, jpg_buf, nbytes); } /* * 'jpg_setup()' - Setup the JPEG compressor for writing an image. */ static void jpg_setup(FILE *out, /* I - Output file */ image_t *img, /* I - Output image */ j_compress_ptr cinfo) /* I - Compressor info */ { int i; // Looping var jpg_file = out; cinfo->err = jpeg_std_error(&jerr); jpeg_create_compress(cinfo); cinfo->dest = &jpg_dest; jpg_dest.init_destination = jpg_init; jpg_dest.empty_output_buffer = jpg_empty; jpg_dest.term_destination = jpg_term; cinfo->image_width = (JDIMENSION)img->width; cinfo->image_height = (JDIMENSION)img->height; cinfo->input_components = img->depth; cinfo->in_color_space = img->depth == 1 ? JCS_GRAYSCALE : JCS_RGB; jpeg_set_defaults(cinfo); jpeg_set_quality(cinfo, OutputJPEG, TRUE); // Update things when writing to PS files... if (PSLevel) { // Adobe uses sampling == 1 for (i = 0; i < img->depth; i ++) { cinfo->comp_info[i].h_samp_factor = 1; cinfo->comp_info[i].v_samp_factor = 1; } } cinfo->write_JFIF_header = FALSE; cinfo->write_Adobe_marker = TRUE; jpeg_start_compress(cinfo, TRUE); } /* * 'compare_rgb()' - Compare two RGB colors... */ static int /* O - -1 if rgb1<rgb2, etc. */ compare_rgb(unsigned *rgb1, /* I - First color */ unsigned *rgb2) /* I - Second color */ { return ((int)*rgb1 - (int)*rgb2); } /* * 'write_image()' - Write an image to the given output file... */ static void write_image(FILE *out, /* I - Output file */ render_t *r, /* I - Image to write */ int write_obj) /* I - Write an object? */ { int i, j, k, m, /* Looping vars */ ncolors; /* Number of colors */ uchar *pixel, /* Current pixel */ *indices, /* New indexed pixel array */ *indptr; /* Current index */ int indwidth, /* Width of indexed line */ indbits; /* Bits per index */ int max_colors; /* Max colors to use */ unsigned colors[256], /* Colormap values */ key, /* Color key */ *match; /* Matching color value */ uchar grays[256], /* Grayscale usage */ cmap[256][3]; /* Colormap */ image_t *img; /* Image */ struct jpeg_compress_struct cinfo; /* JPEG compressor */ uchar *data, /* PS Level 3 image data */ *dataptr, /* Pointer into image data */ *maskptr; /* Pointer into mask data */ /* * See if we can optimize the image as indexed without color loss... */ img = r->data.image; ncolors = 0; indices = NULL; indwidth = 0; if (!img->pixels && !img->obj) image_load(img->filename, !OutputColor, 1); // Note: Acrobat 6 tries to decrypt the colormap of indexed in-line images twice, which // is 1) not consistent with prior Acrobat releases and 2) in violation of their // PDF spec. The "img->use > 1 || !Encryption" test prevents the use of indexed // in-line images when encryption is enabled. // // We are filing a bug on this with Adobe, but if history is any indicator, we are // stuck with this workaround forever... if (PSLevel != 1 && PDFVersion >= 12 && img->obj == 0 && (img->use > 1 || !Encryption)) { if (img->depth == 1) { /* * Greyscale image... */ memset(grays, 0, sizeof(grays)); for (i = img->width * img->height, pixel = img->pixels; i > 0; i --, pixel ++) if (!grays[*pixel]) { if (ncolors >= 16) break; grays[*pixel] = 1; ncolors ++; } if (i == 0) { for (i = 0, j = 0; i < 256; i ++) if (grays[i]) { colors[j] = (unsigned)((((i << 8) | i) << 8) | i); grays[i] = (uchar)j; j ++; } } else ncolors = 0; } else { /* * Color image... */ if (OutputJPEG && !Compression) max_colors = 16; else max_colors = 256; for (i = img->width * img->height, pixel = img->pixels, match = NULL; i > 0; i --, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (!match || *match != key) { if (ncolors > 0) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); else match = NULL; } if (match == NULL) { if (ncolors >= max_colors) break; colors[ncolors] = key; ncolors ++; if (ncolors > 1) qsort(colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); } } if (i > 0) ncolors = 0; } } if (ncolors > 0) { if (PSLevel == 3 && img->mask) indbits = 8; else if (ncolors <= 2) indbits = 1; else if (ncolors <= 4) indbits = 2; else if (ncolors <= 16) indbits = 4; else indbits = 8; indwidth = (img->width * indbits + 7) / 8; indices = (uchar *)calloc((size_t)indwidth, (size_t)(img->height + 1)); // height + 1 for PS odd-row-count bug if (img->depth == 1) { /* * Convert a grayscale image... */ switch (indbits) { case 1 : for (i = img->height, pixel = img->pixels, indptr = indices; i > 0; i --) { for (j = img->width, k = 7; j > 0; j --, k = (k + 7) & 7, pixel ++) switch (k) { case 7 : *indptr = (uchar)(grays[*pixel] << 7); break; default : *indptr |= (uchar)(grays[*pixel] << k); break; case 0 : *indptr++ |= (uchar)grays[*pixel]; break; } if (k != 7) indptr ++; } break; case 2 : for (i = img->height, pixel = img->pixels, indptr = indices; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k = (k + 1) & 3, pixel ++) switch (k) { case 0 : *indptr = (uchar)(grays[*pixel] << 6); break; case 1 : *indptr |= (uchar)(grays[*pixel] << 4); break; case 2 : *indptr |= (uchar)(grays[*pixel] << 2); break; case 3 : *indptr++ |= (uchar)grays[*pixel]; break; } if (k) indptr ++; } break; case 4 : for (i = img->height, pixel = img->pixels, indptr = indices; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k ^= 1, pixel ++) if (k) *indptr++ |= grays[*pixel]; else *indptr = (uchar)(grays[*pixel] << 4); if (k) indptr ++; } break; } } else { /* * Convert a color image... */ switch (indbits) { case 1 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width, k = 7; j > 0; j --, k = (k + 7) & 7, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); m = match - colors; switch (k) { case 7 : *indptr = (uchar)(m << 7); break; default : *indptr |= (uchar)(m << k); break; case 0 : *indptr++ |= (uchar)m; break; } } if (k != 7) indptr ++; } break; case 2 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k = (k + 1) & 3, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); m = match - colors; switch (k) { case 0 : *indptr = (uchar)(m << 6); break; case 1 : *indptr |= (uchar)(m << 4); break; case 2 : *indptr |= (uchar)(m << 2); break; case 3 : *indptr++ |= (uchar)m; break; } } if (k) indptr ++; } break; case 4 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width, k = 0; j > 0; j --, k ^= 1, pixel += 3) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); m = match - colors; if (k) *indptr++ |= (uchar)m; else *indptr = (uchar)(m << 4); } if (k) indptr ++; } break; case 8 : for (i = img->height, pixel = img->pixels, indptr = indices, match = colors; i > 0; i --) { for (j = img->width; j > 0; j --, pixel += 3, indptr ++) { key = (unsigned)((((pixel[0] << 8) | pixel[1]) << 8) | pixel[2]); if (*match != key) match = (unsigned *)bsearch(&key, colors, (size_t)ncolors, sizeof(unsigned), (compare_func_t)compare_rgb); *indptr = (uchar)(match - colors); } } break; } } } else indbits = 8; if (ncolors == 1) { /* * Adobe doesn't like 1 color images... */ ncolors = 2; colors[1] = 0; } /* * Now write the image... */ switch (PSLevel) { case 0 : /* PDF */ if (!write_obj) flate_printf(out, "q %.1f 0 0 %.1f %.1f %.1f cm\n", r->width, r->height, r->x, r->y); if (img->obj) { if (img->mask && PDFVersion < 13) write_imagemask(out, r); flate_printf(out, "/I%d Do Q\n", img->obj); break; } if (img->mask && write_obj && PDFVersion >= 13) { // We have a mask image, write it! pdf_start_object(out); fputs("/Type/XObject/Subtype/Image", out); fputs("/ColorSpace/DeviceGray", out); if (img->maskscale == 8) fprintf(out, "/Width %d/Height %d/BitsPerComponent 8", img->width, img->height); else fprintf(out, "/Width %d/Height %d/BitsPerComponent 1/ImageMask true", img->width * img->maskscale, img->height * img->maskscale); if (Compression) fputs("/Filter/FlateDecode", out); pdf_start_stream(out); flate_open_stream(out); if (img->maskscale == 8) flate_write(out, img->mask, img->width * img->height); else flate_write(out, img->mask, img->maskwidth * img->height * img->maskscale); flate_close_stream(out); pdf_end_object(out); } if (write_obj) { // Write an image object... img->obj = pdf_start_object(out); fputs("/Type/XObject/Subtype/Image", out); if (img->mask && PDFVersion >= 13) { if (img->maskscale == 8) fprintf(out, "/SMask %d 0 R", img->obj - 1); else fprintf(out, "/Mask %d 0 R", img->obj - 1); } if (ncolors > 0) { for (i = 0; i < ncolors; i ++) { cmap[i][0] = (uchar)(colors[i] >> 16); cmap[i][1] = (uchar)(colors[i] >> 8); cmap[i][2] = (uchar)colors[i]; } if (Encryption) { // Encrypt the colormap... encrypt_init(); rc4_encrypt(&encrypt_state, cmap[0], cmap[0], (unsigned)(ncolors * 3)); } fprintf(out, "/ColorSpace[/Indexed/DeviceRGB %d<", ncolors - 1); for (i = 0; i < ncolors; i ++) fprintf(out, "%02X%02X%02X", cmap[i][0], cmap[i][1], cmap[i][2]); fputs(">]", out); } else if (img->depth == 1) fputs("/ColorSpace/DeviceGray", out); else fputs("/ColorSpace/DeviceRGB", out); #ifdef HTMLDOC_INTERPOLATION if (ncolors != 2) fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION if (Compression && (ncolors || !OutputJPEG)) fputs("/Filter/FlateDecode", out); else if (OutputJPEG && ncolors == 0) { if (Compression) fputs("/Filter[/FlateDecode/DCTDecode]", out); else fputs("/Filter/DCTDecode", out); } fprintf(out, "/Width %d/Height %d/BitsPerComponent %d", img->width, img->height, indbits); pdf_start_stream(out); flate_open_stream(out); if (OutputJPEG && ncolors == 0) { jpg_setup(out, img, &cinfo); for (i = img->height, pixel = img->pixels; i > 0; i --, pixel += img->width * img->depth) jpeg_write_scanlines(&cinfo, &pixel, 1); jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); } else { if (ncolors > 0) flate_write(out, indices, indwidth * img->height); else flate_write(out, img->pixels, img->width * img->height * img->depth); } flate_close_stream(out); pdf_end_object(out); } else { // Put the image in-line... flate_puts("BI", out); if (ncolors > 0) { flate_printf(out, "/CS[/I/RGB %d<", ncolors - 1); for (i = 0; i < ncolors; i ++) flate_printf(out, "%02X%02X%02X", colors[i] >> 16, (colors[i] >> 8) & 255, colors[i] & 255); flate_puts(">]", out); } else if (img->depth == 1) flate_puts("/CS/G", out); else flate_puts("/CS/RGB", out); if (ncolors != 2) flate_puts("/I true", out); flate_printf(out, "/W %d/H %d/BPC %d", img->width, img->height, indbits); if (ncolors > 0) { flate_puts(" ID\n", out); flate_write(out, indices, indwidth * img->height, 1); } else if (OutputJPEG) { flate_puts("/F/DCT ID\n", out); jpg_setup(out, img, &cinfo); for (i = img->height, pixel = img->pixels; i > 0; i --, pixel += img->width * img->depth) jpeg_write_scanlines(&cinfo, &pixel, 1); jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); } else { flate_puts(" ID\n", out); flate_write(out, img->pixels, img->width * img->height * img->depth, 1); } flate_write(out, (uchar *)"\nEI\nQ\n", 6, 1); } break; case 1 : /* PostScript, Level 1 */ fputs("GS", out); fprintf(out, "[%.1f 0 0 %.1f %.1f %.1f]CM", r->width, r->height, r->x, r->y); if (img->mask) write_imagemask(out, r); fprintf(out, "/picture %d string def\n", img->width * img->depth); if (img->depth == 1) fprintf(out, "%d %d 8 [%d 0 0 %d 0 %d] {currentfile picture readhexstring pop} image\n", img->width, img->height, img->width, -img->height, img->height); else fprintf(out, "%d %d 8 [%d 0 0 %d 0 %d] {currentfile picture readhexstring pop} false 3 colorimage\n", img->width, img->height, img->width, -img->height, img->height); ps_hex(out, img->pixels, img->width * img->height * img->depth); fputs("GR\n", out); break; case 3 : /* PostScript, Level 3 */ // Fallthrough to Level 2 output if compression is disabled and // we aren't doing transparency... if ((Compression && (!OutputJPEG || ncolors > 0)) || (img->mask && img->maskscale == 8)) { fputs("GS", out); fprintf(out, "[%.1f 0 0 %.1f %.1f %.1f]CM", r->width, r->height, r->x, r->y); if (img->mask && img->maskscale != 8) write_imagemask(out, r); if (ncolors > 0) { if (ncolors <= 2) ncolors = 2; /* Adobe doesn't like 1 color images... */ fprintf(out, "[/Indexed/DeviceRGB %d\n<", ncolors - 1); for (i = 0; i < ncolors; i ++) { fprintf(out, "%02X%02X%02X", colors[i] >> 16, (colors[i] >> 8) & 255, colors[i] & 255); if ((i % 13) == 12) putc('\n', out); } fputs(">]setcolorspace\n", out); if (img->mask && img->maskscale == 8) fprintf(out, "<<" "/ImageType 3" "/InterleaveType 1" "/MaskDict<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 1]" ">>\n" "/DataDict", img->width, img->height, img->width, -img->height, img->height); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent %d" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 %d]", img->width, img->height, indbits, img->width, -img->height, img->height, (1 << indbits) - 1); #ifdef HTMLDOC_INTERPOLATION if (ncolors != 2) fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter", out); #else fputs("/DataSource currentfile/ASCIIHexDecode filter", out); #endif // HTMLDOC_ASCII85 if (Compression) fputs("/FlateDecode filter", out); fputs(">>\n", out); if (img->mask && img->maskscale == 8) fputs(">>\n", out); fputs("image\n", out); flate_open_stream(out); if (img->mask && img->maskscale == 8) { data = (uchar *)malloc((size_t)(img->width * 2)); for (i = 0, maskptr = img->mask, indptr = indices; i < img->height; i ++) { for (j = img->width, dataptr = data; j > 0; j --) { *dataptr++ = *maskptr++; *dataptr++ = *indptr++; } flate_write(out, data, img->width * 2); } free(data); } else flate_write(out, indices, indwidth * img->height); flate_close_stream(out); } else { if (img->depth == 1) fputs("/DeviceGray setcolorspace", out); else fputs("/DeviceRGB setcolorspace", out); if (img->mask && img->maskscale == 8) fprintf(out, "<<" "/ImageType 3" "/InterleaveType 1" "/MaskDict<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 1]" ">>\n" "/DataDict", img->width, img->height, img->width, -img->height, img->height); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[%s]", img->width, img->height, img->width, -img->height, img->height, img->depth == 1 ? "0 1" : "0 1 0 1 0 1"); #ifdef HTMLDOC_INTERPOLATION fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter", out); #else fputs("/DataSource currentfile/ASCIIHexDecode filter", out); #endif // HTMLDOC_ASCII85 if (Compression) fputs("/FlateDecode filter", out); fputs(">>\n", out); if (img->mask && img->maskscale == 8) fputs(">>\n", out); fputs("image\n", out); flate_open_stream(out); if (img->mask && img->maskscale == 8) { data = (uchar *)malloc((size_t)(img->width * (img->depth + 1))); for (i = 0, maskptr = img->mask, pixel = img->pixels; i < img->height; i ++) { if (img->depth == 1) { for (j = img->width, dataptr = data; j > 0; j --) { *dataptr++ = *maskptr++; *dataptr++ = *pixel++; } } else { for (j = img->width, dataptr = data; j > 0; j --) { *dataptr++ = *maskptr++; *dataptr++ = *pixel++; *dataptr++ = *pixel++; *dataptr++ = *pixel++; } } flate_write(out, data, img->width * (img->depth + 1)); } free(data); } else flate_write(out, img->pixels, img->width * img->height * img->depth); flate_close_stream(out); } fputs("GR\n", out); break; } case 2 : /* PostScript, Level 2 */ fputs("GS", out); fprintf(out, "[%.1f 0 0 %.1f %.1f %.1f]CM", r->width, r->height, r->x, r->y); if (img->mask) write_imagemask(out, r); if (ncolors > 0) { fprintf(out, "[/Indexed/DeviceRGB %d\n<", ncolors - 1); for (i = 0; i < ncolors; i ++) { fprintf(out, "%02X%02X%02X", colors[i] >> 16, (colors[i] >> 8) & 255, colors[i] & 255); if ((i % 13) == 12) putc('\n', out); } fputs(">]setcolorspace\n", out); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent %d" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[0 %d]", img->width, img->height, indbits, img->width, -img->height, img->height, (1 << indbits) - 1); #ifdef HTMLDOC_INTERPOLATION if (ncolors != 2) fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter>>image\n", out); ps_ascii85(out, indices, indwidth * img->height, 1); #else fputs("/DataSource currentfile/ASCIIHexDecode filter>>image\n", out); ps_hex(out, indices, indwidth * img->height); // End of data marker... fputs(">\n", out); #endif /* HTMLDOC_ASCII85 */ } else if (OutputJPEG) { if (img->depth == 1) fputs("/DeviceGray setcolorspace\n", out); else fputs("/DeviceRGB setcolorspace\n", out); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[%s]", img->width, img->height, img->width, -img->height, img->height, img->depth == 1 ? "0 1" : "0 1 0 1 0 1"); #ifdef HTMLDOC_INTERPOLATION fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter/DCTDecode filter" ">>image\n", out); #else fputs("/DataSource currentfile/ASCIIHexDecode filter/DCTDecode filter" ">>image\n", out); #endif // HTMLDOC_ASCII85 jpg_setup(out, img, &cinfo); for (i = img->height, pixel = img->pixels; i > 0; i --, pixel += img->width * img->depth) jpeg_write_scanlines(&cinfo, &pixel, 1); jpeg_finish_compress(&cinfo); jpeg_destroy_compress(&cinfo); #ifdef HTMLDOC_ASCII85 ps_ascii85(out, (uchar *)"", 0, 1); #else // End of data marker... fputs(">\n", out); #endif // HTMLDOC_ASCII85 } else { if (img->depth == 1) fputs("/DeviceGray setcolorspace\n", out); else fputs("/DeviceRGB setcolorspace\n", out); fprintf(out, "<<" "/ImageType 1" "/Width %d" "/Height %d" "/BitsPerComponent 8" "/ImageMatrix[%d 0 0 %d 0 %d]" "/Decode[%s]", img->width, img->height, img->width, -img->height, img->height, img->depth == 1 ? "0 1" : "0 1 0 1 0 1"); #ifdef HTMLDOC_INTERPOLATION fputs("/Interpolate true", out); #endif // HTMLDOC_INTERPOLATION #ifdef HTMLDOC_ASCII85 fputs("/DataSource currentfile/ASCII85Decode filter" ">>image\n", out); ps_ascii85(out, img->pixels, img->width * img->height * img->depth, 1); #else fputs("/DataSource currentfile/ASCIIHexDecode filter" ">>image\n", out); ps_hex(out, img->pixels, img->width * img->depth * img->height); // End of data marker... fputs(">\n", out); #endif // HTMLDOC_ASCII85 } fputs("GR\n", out); break; } if (ncolors > 0) free(indices); image_unload(img); } /* * 'write_imagemask()' - Write an imagemask to the output file... */ static void write_imagemask(FILE *out, /* I - Output file */ render_t *r) /* I - Image to write */ { image_t *img; /* Current image */ int x, y; /* Position in mask image */ int startx, count; /* Start and count */ uchar *ptr, /* Pointer into mask image */ byte, /* Current byte */ bit; /* Current bit */ float scalex, scaley; /* 1/(w-1) and 1/(h-1) scaling factors */ int width, height; /* Scaled width and height */ img = r->data.image; width = img->width * img->maskscale; height = img->height * img->maskscale; scalex = 1.0f / width; scaley = 1.0f / height; switch (PSLevel) { case 0 : // PDF break; default : // PostScript fputs("\nnewpath\n", out); break; } for (y = 0; y < height; y ++) { for (x = 0, ptr = img->mask + (height - y - 1) * img->maskwidth, bit = 128, byte = *ptr++, startx = 0, count = 0; x < width; x ++) { if (!(bit & byte)) { if (!count) startx = x; count ++; } else if (count) { switch (PSLevel) { case 0 : // PDF flate_printf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; default : // PostScript fprintf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; } count = 0; } if (bit > 1) bit >>= 1; else { bit = 128; byte = *ptr++; } } if (count) { switch (PSLevel) { case 0 : // PDF flate_printf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; default : // PostScript fprintf(out, "%.6f %.6f %.6f %.6f re\n", (float)startx * scalex, (float)y * scaley, (float)count * scalex, 1.0f * scaley); break; } } } switch (PSLevel) { case 0 : // PDF flate_puts("W n\n", out); break; default : // PostScript fputs("clip\n", out); break; } } /* * 'write_prolog()' - Write the file prolog... */ static void write_prolog(FILE *out, /* I - Output file */ int page_count, /* I - Number of pages (0 if not known) */ uchar *author, /* I - Author of document */ uchar *creator, /* I - Application that generated the HTML file */ uchar *copyright, /* I - Copyright (if any) on the document */ uchar *keywords, /* I - Search keywords */ uchar *subject) /* I - Subject */ { FILE *prolog; /* PostScript prolog file */ int i, j, /* Looping vars */ encoding_object; /* Font encoding object */ int page; /* Current page */ render_t *r; /* Current render data */ int fonts_used[TYPE_MAX][STYLE_MAX]; /* Whether or not a font is used */ int font_desc[TYPE_MAX][STYLE_MAX]; /* Font descriptor objects */ char temp[1024]; /* Temporary string */ md5_state_t md5; /* MD5 state */ md5_byte_t digest[16]; /* MD5 digest value */ rc4_context_t rc4; /* RC4 context */ uchar owner_pad[32], /* Padded owner password */ owner_key[32], /* Owner key */ user_pad[32], /* Padded user password */ user_key[32]; /* User key */ uchar perm_bytes[4]; /* Permission bytes */ unsigned perm_value; /* Permission value, unsigned */ static unsigned char pad[32] = { /* Padding for passwords */ 0x28, 0xbf, 0x4e, 0x5e, 0x4e, 0x75, 0x8a, 0x41, 0x64, 0x00, 0x4e, 0x56, 0xff, 0xfa, 0x01, 0x08, 0x2e, 0x2e, 0x00, 0xb6, 0xd0, 0x68, 0x3e, 0x80, 0x2f, 0x0c, 0xa9, 0xfe, 0x64, 0x53, 0x69, 0x7a }; /* * See what fonts are used... */ memset(fonts_used, 0, sizeof(fonts_used)); fonts_used[HeadFootType][HeadFootStyle] = 1; for (page = 0; page < (int)num_pages; page ++) for (r = pages[page].start; r != NULL; r = r->next) if (r->type == RENDER_TEXT) fonts_used[r->data.text.typeface][r->data.text.style] = 1; #ifdef DEBUG puts("The following fonts were used:"); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) printf(" %s\n", _htmlFonts[i][j]); #endif // DEBUG /* * Generate the heading... */ if (PSLevel > 0) { /* * Write PostScript prolog stuff... */ if (XRXComments) { int start, end; // Start and end of document pages... int count; // Number of exception pages in this range... // The following comments are Xerox job ticket information that // is used on the high-end Laser Printing Systems rather than // embedded commands... fputs("%XRXbegin: 001.0300\n", out); fputs("%XRXPDLformat: PS-Adobe\n", out); if (doc_title) fprintf(out, "%%XRXtitle: %s\n", doc_title); if (OutputFiles) { // Output a single chapter... if (chapter < 0) { start = 0; end = chapter_outstarts[1] - 1; } else { start = chapter_outstarts[chapter]; end = chapter_outends[chapter]; } } else { start = 0; end = 0; } if (pages[outpages[start].pages[0]].duplex) { if (pages[outpages[start].pages[0]].landscape) fputs("%XRXrequirements: duplex(tumble)\n", out); else fputs("%XRXrequirements: duplex\n", out); } else fputs("%XRXrequirements: simplex\n", out); fputs("%XRXdisposition: PRINT\n", out); fputs("%XRXsignature: False\n", out); fprintf(out, "%%XRXpaperType-size: %.0f %.0f\n", pages[outpages[start].pages[0]].width * 25.4f / 72.0f, pages[outpages[start].pages[0]].length * 25.4f / 72.0f); if (pages[outpages[start].pages[0]].media_type[0]) fprintf(out, "%%XRXpaperType-preFinish: %s 0 0\n", pages[start].media_type); if (pages[outpages[start].pages[0]].media_color[0]) fprintf(out, "%%XRXdocumentPaperColors: %c%s\n", tolower(pages[start].media_color[0]), pages[start].media_color + 1); if (OutputFiles) { // Handle document settings per-chapter... for (i = start + 1; i < end; i += count) { if (pages[outpages[i].pages[0]].width != pages[0].width || pages[outpages[i].pages[0]].length != pages[0].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[0].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[0].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[0].duplex) { for (count = 1; (i + count) <= end; count ++) if (pages[outpages[i].pages[0]].width != pages[outpages[i + count].pages[0]].width || pages[outpages[i].pages[0]].length != pages[outpages[i + count].pages[0]].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[outpages[i + count].pages[0]].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[outpages[i + count].pages[0]].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[outpages[i + count].pages[0]].duplex) break; fprintf(out, "%%XRXpageExceptions: %d %d %.0f %.0f %c%s opaque %s 0 0\n", i + 1, i + count, pages[outpages[i].pages[0]].width * 25.4f / 72.0f, pages[outpages[i].pages[0]].length * 25.4f / 72.0f, tolower(pages[outpages[i].pages[0]].media_color[0]), pages[outpages[i].pages[0]].media_color + 1, pages[outpages[i].pages[0]].media_type[0] ? pages[outpages[i].pages[0]].media_type : "Plain"); if (pages[outpages[i].pages[0]].duplex && pages[outpages[i].pages[0]].landscape) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex(tumble)\n", i + 1, i + count); else if (pages[outpages[i].pages[0]].duplex) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex\n", i + 1, i + count); else fprintf(out, "%%XRXpageExceptions-plex: %d %d simplex\n", i + 1, i + count); } else count = 1; } } else { // All pages are in a single file... for (j = (TocLevels == 0); j <= TocDocCount; j ++) { start = chapter_outstarts[j]; end = chapter_outends[j]; for (i = start + 1; i < end; i += count) { if (pages[outpages[i].pages[0]].width != pages[0].width || pages[outpages[i].pages[0]].length != pages[0].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[0].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[0].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[0].duplex) { for (count = 1; (i + count) < end; count ++) if (pages[outpages[i].pages[0]].width != pages[outpages[i + count].pages[0]].width || pages[outpages[i].pages[0]].length != pages[outpages[i + count].pages[0]].length || strcmp(pages[outpages[i].pages[0]].media_type, pages[outpages[i + count].pages[0]].media_type) != 0 || strcmp(pages[outpages[i].pages[0]].media_color, pages[outpages[i + count].pages[0]].media_color) != 0 || pages[outpages[i].pages[0]].duplex != pages[outpages[i + count].pages[0]].duplex) break; fprintf(out, "%%XRXpageExceptions: %d %d %.0f %.0f %c%s opaque %s 0 0\n", i + 1, i + count, pages[outpages[i].pages[0]].width * 25.4f / 72.0f, pages[outpages[i].pages[0]].length * 25.4f / 72.0f, tolower(pages[outpages[i].pages[0]].media_color[0]), pages[outpages[i].pages[0]].media_color + 1, pages[outpages[i].pages[0]].media_type[0] ? pages[outpages[i].pages[0]].media_type : "Plain"); if (pages[outpages[i].pages[0]].duplex && pages[outpages[i].pages[0]].landscape) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex(tumble)\n", i + 1, i + count); else if (pages[outpages[i].pages[0]].duplex) fprintf(out, "%%XRXpageExceptions-plex: %d %d duplex\n", i + 1, i + count); else fprintf(out, "%%XRXpageExceptions-plex: %d %d simplex\n", i + 1, i + count); } else count = 1; } } } fputs("%XRXend\n", out); } fputs("%!PS-Adobe-3.0\n", out); if (Landscape) fprintf(out, "%%%%BoundingBox: 0 0 %d %d\n", PageLength, PageWidth); else fprintf(out, "%%%%BoundingBox: 0 0 %d %d\n", PageWidth, PageLength); fprintf(out,"%%%%LanguageLevel: %d\n", PSLevel); fputs("%%Creator: " HTMLDOC_PRODUCER "\n", out); fprintf(out, "%%%%CreationDate: D:%04d%02d%02d%02d%02d%02d+0000\n", doc_date.tm_year + 1900, doc_date.tm_mon + 1, doc_date.tm_mday, doc_date.tm_hour, doc_date.tm_min, doc_date.tm_sec); if (doc_title != NULL) fprintf(out, "%%%%Title: %s\n", doc_title); if (author != NULL) fprintf(out, "%%%%Author: %s\n", author); if (creator != NULL) fprintf(out, "%%%%Generator: %s\n", creator); if (copyright != NULL) fprintf(out, "%%%%Copyright: %s\n", copyright); if (keywords != NULL) fprintf(out, "%%%%Keywords: %s\n", keywords); if (subject != NULL) fprintf(out, "%%%%Subject: %s\n", keywords); if (page_count > 0) fprintf(out, "%%%%Pages: %d\n", page_count); else fputs("%%Pages: (atend)\n", out); if (!EmbedFonts) { fputs("%%DocumentNeededResources:\n", out); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j] && _htmlStandardFonts[i]) fprintf(out, "%%%%+ font %s\n", _htmlFonts[i][j]); } fputs("%%DocumentProvidedResources:\n", out); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j] && (EmbedFonts || !_htmlStandardFonts[i])) fprintf(out, "%%%%+ font %s\n", _htmlFonts[i][j]); fputs("%%DocumentData: Clean7bit\n", out); fputs("%%EndComments\n", out); fputs("%%BeginProlog\n", out); /* * Embed fonts? */ for (i = 0; i < TYPE_MAX; i ++) { if (EmbedFonts || !_htmlStandardFonts[i]) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) write_type1(out, (typeface_t)i, (style_t)j); } /* * Procedures used throughout the document... */ const char *version = SVERSION; fprintf(out, "%%%%BeginResource: procset htmldoc-page 1.8 %s\n", version + 4); fputs("/BD{bind def}bind def", out); fputs("/B{dup 0 exch rlineto exch 0 rlineto neg 0 exch rlineto\n" "closepath stroke}BD", out); fputs("/C{setrgbcolor}BD\n", out); fputs("/CM{concat}BD", out); fputs("/DF{findfont dup length dict begin{1 index/FID ne{def}{pop pop}\n" "ifelse}forall/Encoding fontencoding def currentdict end definefont pop}BD\n", out); fputs("/F{dup 0 exch rlineto exch 0 rlineto neg 0 exch rlineto closepath fill}BD\n", out); fputs("/FS{/hdFontSize exch def}BD", out); fputs("/G{setgray}BD\n", out); fputs("/GS{gsave}BD", out); fputs("/GR{grestore}BD", out); fputs("/J{0 exch ashow}BD\n", out); fputs("/L{0 rlineto stroke}BD", out); fputs("/M{moveto}BD", out); fputs("/re{4 2 roll moveto 1 index 0 rlineto 0 exch rlineto neg 0 rlineto closepath}BD\n", out); fputs("/RO{rotate}BD", out); fputs("/S{show}BD", out); fputs("/SC{dup scale}BD\n", out); fputs("/SF{findfont hdFontSize scalefont setfont}BD", out); fputs("/SP{showpage}BD", out); fputs("/T{translate}BD\n", out); fputs("%%EndResource\n", out); /* * Output the font encoding for the current character set... For now we * just support 8-bit fonts since true Unicode support needs a very large * number of extra fonts that aren't normally available on a PS printer. */ fputs("/fontencoding[\n", out); for (i = 0, j = 0; i < 256; i ++) { if (_htmlGlyphs[i]) j += strlen(_htmlGlyphs[i]) + 1; else j += 8; if (j > 80) { if (_htmlGlyphs[i]) j = strlen(_htmlGlyphs[i]) + 1; else j = 8; putc('\n', out); } putc('/', out); if (_htmlGlyphs[i]) fputs(_htmlGlyphs[i], out); else fputs(".notdef", out); } fputs("]def\n", out); /* * Fonts... */ for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) { if (i < TYPE_SYMBOL) fprintf(out, "/F%x/%s DF\n", i * 4 + j, _htmlFonts[i][j]); else fprintf(out, "/F%x/%s findfont definefont pop\n", i * 4 + j, _htmlFonts[i][j]); } if (PSCommands) { snprintf(temp, sizeof(temp), "%s/data/prolog.ps", _htmlData); if ((prolog = fopen(temp, "rb")) != NULL) { while (fgets(temp, sizeof(temp), prolog) != NULL) fputs(temp, out); fclose(prolog); } else { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open data file \"%s\" - %s", temp, strerror(errno)); fprintf(out, "%%%%BeginResource: procset htmldoc-device 1.8 %s\n", version + 4); fputs("languagelevel 1 eq{/setpagedevice{pop}BD}if\n", out); fputs("/SetDuplexMode{<</Duplex 3 index/Tumble 5 index>>setpagedevice " "pop pop}BD\n", out); fputs("/SetMediaColor{pop}BD\n", out); fputs("/SetMediaType{pop}BD\n", out); fputs("/SetMediaPosition{pop}BD\n", out); fputs("/SetPageSize{2 array astore<</PageSize 2 index/ImageableArea " "null>>setpagedevice pop}BD\n", out); fputs("%%EndResource\n", out); } } if (background_image != NULL) ps_write_background(out); fputs("%%EndProlog\n", out); } else { /* * Write PDF prolog stuff... */ fprintf(out, "%%PDF-%.1f\n", 0.1 * PDFVersion); fputs("%\342\343\317\323\n", out); num_objects = 0; /* * Compute the file ID... */ md5_init(&md5); md5_append(&md5, (md5_byte_t *)OutputPath, sizeof(OutputPath)); md5_append(&md5, (md5_byte_t *)&doc_time, sizeof(doc_time)); md5_finish(&md5, file_id); /* * Setup encryption stuff as necessary... */ if (Encryption) { /* * Copy and pad the user password... */ strlcpy((char *)user_pad, UserPassword, sizeof(user_pad)); if ((i = strlen(UserPassword)) < 32) memcpy(user_pad + i, pad, (size_t)(32 - i)); if (OwnerPassword[0]) { /* * Copy and pad the owner password... */ strlcpy((char *)owner_pad, OwnerPassword, sizeof(owner_pad)); if ((i = strlen(OwnerPassword)) < 32) memcpy(owner_pad + i, pad, (size_t)(32 - i)); } else { /* * Generate a pseudo-random owner password... */ srand(time(NULL)); for (i = 0; i < 32; i ++) owner_pad[i] = (uchar)rand(); } /* * What is the key length? * * Acrobat 4.0 and earlier (PDF 1.3 and earlier) allow a maximum of * 40-bits. Acrobat 5.0 and newer support 128-bits. */ if (PDFVersion > 13) encrypt_len = 16; // 128 bits else encrypt_len = 5; // 40 bits /* * Compute the owner key... */ md5_init(&md5); md5_append(&md5, owner_pad, 32); md5_finish(&md5, digest); if (encrypt_len > 5) { // MD5 the result 50 more times... for (i = 0; i < 50; i ++) { md5_init(&md5); md5_append(&md5, digest, 16); md5_finish(&md5, digest); } // Copy the padded user password... memcpy(owner_key, user_pad, 32); // Encrypt the result 20 times... for (i = 0; i < 20; i ++) { // XOR each byte in the key with the loop counter... for (j = 0; j < encrypt_len; j ++) encrypt_key[j] = (uchar)(digest[j] ^ i); rc4_init(&rc4, encrypt_key, (size_t)encrypt_len); rc4_encrypt(&rc4, owner_key, owner_key, 32); } } else { rc4_init(&rc4, digest, (size_t)encrypt_len); rc4_encrypt(&rc4, user_pad, owner_key, 32); } /* * Figure out the permissions word; the new N-bit security * handler adds several new permission bits, which we must * simulate... */ perm_value = (unsigned)Permissions; if (encrypt_len > 5) { // N-bit encryption... if (!(perm_value & PDF_PERM_COPY)) perm_value &= (unsigned)~0x00240000; // Mask additional copy perms... } /* * Compute the encryption key... */ md5_init(&md5); md5_append(&md5, user_pad, 32); md5_append(&md5, owner_key, 32); perm_bytes[0] = (uchar)perm_value; perm_bytes[1] = (uchar)(perm_value >> 8); perm_bytes[2] = (uchar)(perm_value >> 16); perm_bytes[3] = (uchar)(perm_value >> 24); md5_append(&md5, perm_bytes, 4); md5_append(&md5, file_id, 16); md5_finish(&md5, digest); if (encrypt_len > 5) { // MD5 the result 50 times.. for (i = 0; i < 50; i ++) { md5_init(&md5); md5_append(&md5, digest, 16); md5_finish(&md5, digest); } } memcpy(encrypt_key, digest, (size_t)encrypt_len); /* * Compute the user key... */ if (encrypt_len > 5) { md5_init(&md5); md5_append(&md5, pad, 32); md5_append(&md5, file_id, 16); md5_finish(&md5, user_key); memset(user_key + 16, 0, 16); // Encrypt the result 20 times... for (i = 0; i < 20; i ++) { // XOR each byte in the key with the loop counter... for (j = 0; j < encrypt_len; j ++) digest[j] = (uchar)(encrypt_key[j] ^ i); rc4_init(&rc4, digest, (size_t)encrypt_len); rc4_encrypt(&rc4, user_key, user_key, 16); } } else { rc4_init(&rc4, encrypt_key, (size_t)encrypt_len); rc4_encrypt(&rc4, pad, user_key, 32); } /* * Write the encryption dictionary... */ encrypt_object = pdf_start_object(out); fputs("/Filter/Standard/O<", out); for (i = 0; i < 32; i ++) fprintf(out, "%02x", owner_key[i]); fputs(">/U<", out); for (i = 0; i < 32; i ++) fprintf(out, "%02x", user_key[i]); fputs(">", out); if (encrypt_len > 5) { // N-bit encryption... fprintf(out, "/P %d/V 2/R 3/Length %d", (int)perm_value, encrypt_len * 8); } else fprintf(out, "/P %d/V 1/R 2", (int)perm_value); pdf_end_object(out); } else encrypt_object = 0; /* * Write info object... */ info_object = pdf_start_object(out); fputs("/Producer", out); write_string(out, (uchar *)HTMLDOC_PRODUCER, 0); fputs("/CreationDate", out); snprintf(temp, sizeof(temp), "D:%04d%02d%02d%02d%02d%02d+0000", doc_date.tm_year + 1900, doc_date.tm_mon + 1, doc_date.tm_mday, doc_date.tm_hour, doc_date.tm_min, doc_date.tm_sec); write_string(out, (uchar *)temp, 0); if (doc_title != NULL) { fputs("/Title", out); write_utf16(out, doc_title); } if (author != NULL || copyright != NULL) { if (author && copyright) snprintf(temp, sizeof(temp), "%s, %s", author, copyright); else if (author) strlcpy(temp, (const char *)author, sizeof(temp)); else strlcpy(temp, (const char *)copyright, sizeof(temp)); fputs("/Author", out); write_utf16(out, (uchar *)temp); } if (creator != NULL) { fputs("/Creator", out); write_utf16(out, creator); } if (keywords != NULL) { fputs("/Keywords", out); write_utf16(out, keywords); } if (subject != NULL) { fputs("/Subject", out); write_utf16(out, subject); } pdf_end_object(out); /* * Write the font encoding for the selected character set. Note that * we *should* be able to use the WinAnsiEncoding value for ISO-8859-1 * to make smaller files, however Acrobat Exchange does not like it * despite the fact that it is defined in the PDF specification... */ encoding_object = pdf_start_object(out); fputs("/Type/Encoding", out); fputs("/Differences[", out); for (i = 0, j = -1; i < 256; i ++) if (_htmlGlyphs[i]) { /* * Output a character index if we had blank ones... */ if (j != (i - 1)) fprintf(out, " %d", i); fprintf(out, "/%s", _htmlGlyphs[i]); j = i; } fputs("]", out); pdf_end_object(out); memset(font_desc, 0, sizeof(font_desc)); /* * Build font descriptors for the EmbedFonts fonts... */ for (i = 0; i < TYPE_MAX; i ++) if (EmbedFonts || !_htmlStandardFonts[i]) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) font_desc[i][j] = write_type1(out, (typeface_t )i, (style_t)j); for (i = 0; i < TYPE_MAX; i ++) for (j = 0; j < STYLE_MAX; j ++) if (fonts_used[i][j]) { font_objects[i * STYLE_MAX + j] = pdf_start_object(out); fputs("/Type/Font", out); fputs("/Subtype/Type1", out); fprintf(out, "/BaseFont/%s", _htmlFonts[i][j]); if (font_desc[i][j]) { // Embed Type1 font... fputs("/FirstChar 0", out); fputs("/LastChar 255", out); fprintf(out, "/Widths %d 0 R", font_desc[i][j] + 1); fprintf(out, "/FontDescriptor %d 0 R", font_desc[i][j]); } if (i < TYPE_SYMBOL) /* Use native encoding for symbols */ fprintf(out, "/Encoding %d 0 R", encoding_object); pdf_end_object(out); } } } /* * 'write_string()' - Write a text entity. */ static void write_string(FILE *out, /* I - Output file */ uchar *s, /* I - String */ int compress) /* I - Compress output? */ { int i; /* Looping var */ if (Encryption && !compress && PSLevel == 0) { int len, // Length of string bytes; // Current bytes encrypted uchar news[1024]; // New string /* * Write an encrypted string... */ putc('<', out); encrypt_init(); for (len = strlen((char *)s); len > 0; len -= bytes, s += bytes) { if (len > (int)sizeof(news)) bytes = (int)sizeof(news); else bytes = len; rc4_encrypt(&encrypt_state, s, news, (size_t)bytes); for (i = 0; i < bytes; i ++) fprintf(out, "%02x", news[i]); } putc('>', out); } else { uchar nbsp = 160; // Non-breaking space char if (compress) flate_write(out, (uchar *)"(", 1); else putc('(', out); if (_htmlUTF8) nbsp = _htmlCharacters[160]; while (*s != '\0') { if (*s == nbsp) { /* &nbsp; */ if (compress) flate_write(out, (uchar *)" ", 1); else putc(' ', out); } else if (*s < 32 || *s > 126) { if (compress) flate_printf(out, "\\%o", *s); else fprintf(out, "\\%o", *s); } else if (compress) { if (*s == '(' || *s == ')' || *s == '\\') flate_write(out, (uchar *)"\\", 1); flate_write(out, s, 1); } else { if (*s == '(' || *s == ')' || *s == '\\') putc('\\', out); putc(*s, out); } s ++; } if (compress) flate_write(out, (uchar *)")", 1); else putc(')', out); } } /* * 'write_text()' - Write a text entity. */ static void write_text(FILE *out, /* I - Output file */ render_t *r) /* I - Text entity */ { uchar *ptr; /* Pointer into text */ // Quick optimization - don't output spaces... for (ptr = r->data.text.buffer; *ptr; ptr ++) if (!isspace(*ptr) && *ptr != 0xa0) break; if (!*ptr) return; // Not just whitespace - send it out... set_color(out, r->data.text.rgb); set_font(out, r->data.text.typeface, r->data.text.style, r->data.text.size); set_pos(out, r->x, r->y); if (PSLevel > 0) { if (r->data.text.spacing > 0.0f) fprintf(out, " %.3f", r->data.text.spacing); } else if (r->data.text.spacing != render_spacing) flate_printf(out, " %.3f Tc", render_spacing = r->data.text.spacing); write_string(out, r->data.text.buffer, PSLevel == 0); if (PSLevel > 0) { if (r->data.text.spacing > 0.0f) fputs("J\n", out); else fputs("S\n", out); } else flate_puts("Tj\n", out); render_x += r->width; } /* * 'write_trailer()' - Write the file trailer. */ static void write_trailer(FILE *out, /* I - Output file */ int num_file_pages, /* I - Number of pages in file */ uchar *lang) /* I - Language */ { int i, j, k, /* Looping vars */ type, /* Type of number */ offset, /* Offset to xref table in PDF file */ start; /* Start page number */ page_t *page; /* Start page of chapter */ char prefix[64], /* Prefix string */ *prefptr; /* Pointer into prefix string */ static const char *modes[] = /* Page modes */ { "UseNone", "UseOutlines", "FullScreen" }; static const char *layouts[] = /* Page layouts */ { "SinglePage", "OneColumn", "TwoColumnLeft", "TwoColumnRight" }; if (PSLevel > 0) { /* * PostScript... */ fputs("%%Trailer\n", out); if (num_file_pages > 0) fprintf(out, "%%%%Pages: %d\n", num_file_pages); fputs("%%EOF\n", out); } else { /* * PDF... */ root_object = pdf_start_object(out); fputs("/Type/Catalog", out); fprintf(out, "/Pages %d 0 R", pages_object); if (PDFVersion >= 12) { if (names_object) fprintf(out, "/Names %d 0 R", names_object); fprintf(out, "/PageLayout/%s", layouts[PDFPageLayout]); } if (lang) fprintf(out, "/Lang(%s)", (char *)lang); if (outline_object > 0) fprintf(out, "/Outlines %d 0 R", outline_object); switch (PDFFirstPage) { case PDF_PAGE_1 : if (TitlePage) { fprintf(out, "/OpenAction[%d 0 R/XYZ null null 0]", pages_object + 1); break; } break; case PDF_TOC : if (TocLevels > 0) { fprintf(out, "/OpenAction[%d 0 R/XYZ null null 0]", pages_object + 2 * chapter_outstarts[0] + 1); break; } break; case PDF_CHAPTER_1 : fprintf(out, "/OpenAction[%d 0 R/XYZ null null 0]", pages_object + 2 * chapter_outstarts[1] + 1); break; } fprintf(out, "/PageMode/%s", modes[PDFPageMode]); if (PDFVersion > 12 && NumberUp == 1) { // Output the PageLabels tree... fputs("/PageLabels<</Nums[", out); for (i = 0; i < chapter_starts[1]; i ++) { fprintf(out, "%d<</P", i); if (i & 1) write_string(out, (uchar *)"eltit", 0); else write_string(out, (uchar *)"title", 0); fputs(">>", out); } if (TocLevels > 0 && OutputType == OUTPUT_BOOK) { type = 'r'; for (j = 0; j < 3; j ++) if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(1)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(1)"))) type = 'D'; else if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(I)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(I)"))) type = 'R'; else if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(a)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(a)"))) type = 'a'; else if ((TocHeader[j] && strstr(TocHeader[j], "$PAGE(A)")) || (TocFooter[j] && strstr(TocFooter[j], "$PAGE(A)"))) type = 'A'; fprintf(out, "%d<</S/%c>>", i, type); i += chapter_ends[0] - chapter_starts[0] + 1; } for (j = 1; j <= TocDocCount; j ++) { page = pages + chapter_starts[j]; start = chapter_starts[j] - chapter_starts[1] + 1; type = 'D'; prefix[0] = '\0'; for (k = 0; k < 3; k ++) { if (page->header[k] && strstr((char *)page->header[k], "PAGE")) strlcpy(prefix, (char *)page->header[k], sizeof(prefix)); else if (page->footer[k] && strstr((char *)page->footer[k], "PAGE")) strlcpy(prefix, (char *)page->footer[k], sizeof(prefix)); if ((page->header[k] && strstr((char *)page->header[k], "PAGE(i)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(i)"))) type = 'r'; else if ((page->header[k] && strstr((char *)page->header[k], "PAGE(I)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(I)"))) type = 'R'; else if ((page->header[k] && strstr((char *)page->header[k], "PAGE(a)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(a)"))) type = 'a'; else if ((page->header[k] && strstr((char *)page->header[k], "PAGE(A)")) || (page->footer[k] && strstr((char *)page->footer[k], "PAGE(A)"))) type = 'A'; if ((page->header[k] && strstr((char *)page->header[k], "$CHAPTERPAGE")) || (page->footer[k] && strstr((char *)page->footer[k], "$CHAPTERPAGE"))) start = 1; } if ((prefptr = strstr(prefix, "$PAGE")) == NULL) prefptr = strstr(prefix, "$CHAPTERPAGE"); fprintf(out, "%d<</S/%c/St %d", i, type, start); if (prefptr) { *prefptr = '\0'; fputs("/P", out); write_string(out, (uchar *)prefix, 0); } fputs(">>", out); i += chapter_ends[j] - chapter_starts[j] + 1; } fputs("]>>", out); } pdf_end_object(out); offset = ftell(out); fputs("xref\n", out); fprintf(out, "0 %d \n", (int)num_objects + 1); fputs("0000000000 65535 f \n", out); for (i = 1; i <= (int)num_objects; i ++) fprintf(out, "%010d 00000 n \n", objects[i]); fputs("trailer\n", out); fputs("<<", out); fprintf(out, "/Size %d", (int)num_objects + 1); fprintf(out, "/Root %d 0 R", root_object); fprintf(out, "/Info %d 0 R", info_object); fputs("/ID[<", out); for (i = 0; i < 16; i ++) fprintf(out, "%02x", file_id[i]); fputs("><", out); for (i = 0; i < 16; i ++) fprintf(out, "%02x", file_id[i]); fputs(">]", out); if (Encryption) fprintf(out, "/Encrypt %d 0 R", encrypt_object); fputs(">>\n", out); fputs("startxref\n", out); fprintf(out, "%d\n", offset); fputs("%%EOF\n", out); } } /* * 'write_type1()' - Write an embedded Type 1 font. */ static int /* O - Object number */ write_type1(FILE *out, /* I - File to write to */ typeface_t typeface, /* I - Typeface */ style_t style) /* I - Style */ { char filename[1024]; /* PFA filename */ FILE *fp; /* PFA file */ int ch; /* Character value */ int width; /* Width value */ char glyph[64], /* Glyph name */ line[1024], /* Line from AFM file */ *lineptr, /* Pointer into line */ *dataptr; /* Pointer for data */ int ascent, /* Ascent above baseline */ cap_height, /* Ascent of CAPITALS */ x_height, /* Ascent of lowercase */ descent, /* Decent below baseline */ bbox[4], /* Bounding box */ italic_angle; /* Angle for italics */ int widths[256]; /* Character widths */ int length1, /* Length1 value for font */ length2, /* Length2 value for font */ length3; /* Length3 value for font */ static int tflags[] = /* PDF typeface flags */ { 33, /* Courier */ 34, /* Times-Roman */ 32, /* Helvetica */ 33, /* Monospace */ 34, /* Serif */ 32, /* Sans */ 4, /* Symbol */ 4 /* Dingbats */ }; static int sflags[] = /* PDF style flags */ { 0, /* Normal */ 0, /* Bold */ 64, /* Italic */ 64 /* Bold-Italic */ }; /* * This function writes a Type1 font, either as an object for PDF * output or as an in-line font in PostScript output. This is useful * because the Type1 fonts that Adobe ships typically do not include * the full set of characters required by some of the ISO character * sets. */ /* * Try to open the PFA file for the Type1 font... */ snprintf(filename, sizeof(filename), "%s/fonts/%s.pfa", _htmlData, _htmlFonts[typeface][style]); if ((fp = fopen(filename, "r")) == NULL) { #ifndef DEBUG progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open font file %s!", filename); #endif /* !DEBUG */ return (0); } /* * Write the font (object)... */ if (PSLevel) { /* * Embed a Type1 font in the PostScript output... */ fprintf(out, "%%%%BeginResource: font %s\n", _htmlFonts[typeface][style]); line[0] = '\0'; while (fgets(line, sizeof(line), fp) != NULL) fputs(line, out); if (line[strlen(line) - 1] != '\n') fputs("\n", out); fputs("%%EndResource\n", out); fclose(fp); } else { /* * Embed a Type1 font object in the PDF output... */ length1 = 0; length2 = 0; length3 = 0; while (fgets(line, sizeof(line), fp) != NULL) { length1 += strlen(line); if (strstr(line, "currentfile eexec") != NULL) break; } while (fgets(line, sizeof(line), fp) != NULL) { if (!strcmp(line, "00000000000000000000000000000000" "00000000000000000000000000000000\n")) break; length2 += (strlen(line) - 1) / 2; } length3 = strlen(line); while (fgets(line, sizeof(line), fp) != NULL) length3 += strlen(line); rewind(fp); pdf_start_object(out); fprintf(out, "/Length1 %d", length1); fprintf(out, "/Length2 %d", length2); fprintf(out, "/Length3 %d", length3); if (Compression) fputs("/Filter/FlateDecode", out); pdf_start_stream(out); flate_open_stream(out); while (fgets(line, sizeof(line), fp) != NULL) { flate_puts(line, out); if (strstr(line, "currentfile eexec") != NULL) break; } while (fgets(line, sizeof(line), fp) != NULL) { if (!strcmp(line, "00000000000000000000000000000000" "00000000000000000000000000000000\n")) break; for (lineptr = line, dataptr = line; isxdigit(*lineptr); lineptr += 2) { if (isdigit(lineptr[0])) ch = (lineptr[0] - '0') << 4; else ch = (tolower(lineptr[0] & 255) - 'a' + 10) << 4; if (isdigit(lineptr[1])) ch |= lineptr[1] - '0'; else ch |= tolower(lineptr[1] & 255) - 'a' + 10; *dataptr++ = (char)ch; } flate_write(out, (uchar *)line, dataptr - line); } flate_puts(line, out); while (fgets(line, sizeof(line), fp) != NULL) flate_puts(line, out); flate_close_stream(out); pdf_end_object(out); fclose(fp); /* * Try to open the AFM file for the Type1 font... */ snprintf(filename, sizeof(filename), "%s/fonts/%s.afm", _htmlData, _htmlFonts[typeface][style]); if ((fp = fopen(filename, "r")) == NULL) { #ifndef DEBUG progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open font width file %s!", filename); #endif /* !DEBUG */ return (0); } /* * Set the default values (Courier)... */ for (ch = 0; ch < 256; ch ++) widths[ch] = 600; ascent = 629; cap_height = 562; x_height = 426; descent = -157; bbox[0] = -28; bbox[1] = -250; bbox[2] = 628; bbox[3] = 805; italic_angle = 0; /* * Read the AFM file... */ while (fgets(line, sizeof(line), fp) != NULL) { if (strncmp(line, "ItalicAngle ", 12) == 0) italic_angle = atoi(line + 12); else if (strncmp(line, "FontBBox ", 9) == 0) sscanf(line + 9, "%d%d%d%d", bbox + 0, bbox + 1, bbox + 2, bbox + 3); else if (strncmp(line, "CapHeight ", 10) == 0) cap_height = atoi(line + 10); else if (strncmp(line, "XHeight ", 8) == 0) x_height = atoi(line + 8); else if (strncmp(line, "Ascender ", 9) == 0) ascent = atoi(line + 9); else if (strncmp(line, "Descender ", 10) == 0) descent = atoi(line + 10); else if (strncmp(line, "C ", 2) == 0) { if (typeface < TYPE_SYMBOL) { /* * Handle encoding of Courier, Times, and Helvetica using * assigned charset... */ if (sscanf(line, "%*s%*s%*s%*s%d%*s%*s%63s", &width, glyph) != 2) continue; for (ch = 0; ch < 256; ch ++) if (_htmlGlyphs[ch] && strcmp(_htmlGlyphs[ch], glyph) == 0) break; if (ch < 256) widths[ch] = width; } else { /* * Symbol font uses its own encoding... */ if (sscanf(line, "%*s%d%*s%*s%d", &ch, &width) != 2) continue; if (ch >= 0 && ch < 256) widths[ch] = width; } } } fclose(fp); /* * Write the font descriptor... */ pdf_start_object(out); fputs("/Type/FontDescriptor", out); fprintf(out, "/Ascent %d", ascent); fprintf(out, "/Descent %d", descent); fprintf(out, "/CapHeight %d", cap_height); fprintf(out, "/XHeight %d", x_height); fprintf(out, "/FontBBox[%d %d %d %d]", bbox[0], bbox[1], bbox[2], bbox[3]); fprintf(out, "/ItalicAngle %d", italic_angle); fprintf(out, "/StemV %d", widths['v']); fprintf(out, "/Flags %d", tflags[typeface] | sflags[style]); fprintf(out, "/FontName/%s", _htmlFonts[typeface][style]); fprintf(out, "/FontFile %d 0 R", (int)num_objects - 1); pdf_end_object(out); /* * Write the character widths... */ pdf_start_object(out, 1); fprintf(out, "%d", widths[0]); for (ch = 1; ch < 256; ch ++) fprintf(out, " %d", widths[ch]); pdf_end_object(out); } /* * Return the font descriptor... */ return (num_objects - 1); } /* * 'write_utf16()' - Write a UTF-16 string... */ static void write_utf16(FILE *out, // I - File to write to uchar *s) // I - String to write { uchar *sptr; // Pointer into string /* * We start by checking to see if the string is composed only of * ASCII characters; if so, we can just write a normal string... */ for (sptr = s; *sptr && !(*sptr & 0x80); sptr ++); if (!*sptr) { /* * Write an ASCII string... */ write_string(out, s, 0); } else if (Encryption) { /* * Convert the string to Unicode and encrypt... */ int ch; // Character value uchar unicode[2], // Unicode character enicode[2]; // Encrypted unicode character putc('<', out); encrypt_init(); unicode[0] = 0xfe; // Start with BOM unicode[1] = 0xff; rc4_encrypt(&encrypt_state, unicode, enicode, 2); fprintf(out, "%02x%02x", enicode[0], enicode[1]); for (sptr = s; *sptr; sptr ++) { ch = _htmlUnicode[*sptr]; unicode[0] = (uchar)(ch >> 8); unicode[1] = (uchar)ch; rc4_encrypt(&encrypt_state, unicode, enicode, 2); fprintf(out, "%02x%02x", enicode[0], enicode[1]); } putc('>', out); } else { /* * Convert the string to Unicode... */ fputs("<feff", out); // Start with BOM for (sptr = s; *sptr; sptr ++) fprintf(out, "%04x", _htmlUnicode[*sptr]); putc('>', out); } } /* * 'encrypt_init()' - Initialize the RC4 encryption context for the current * object. */ static void encrypt_init(void) { int i; /* Looping var */ uchar data[21], /* Key data */ *dataptr; /* Pointer to key data */ md5_state_t md5; /* MD5 state */ md5_byte_t digest[16]; /* MD5 digest value */ /* * Compute the key data for the MD5 hash. */ for (i = 0, dataptr = data; i < encrypt_len; i ++) *dataptr++ = encrypt_key[i]; *dataptr++ = (uchar)num_objects; *dataptr++ = (uchar)(num_objects >> 8); *dataptr++ = (uchar)(num_objects >> 16); *dataptr++ = 0; *dataptr++ = 0; /* * Hash it... */ md5_init(&md5); md5_append(&md5, data, encrypt_len + 5); md5_finish(&md5, digest); /* * Initialize the RC4 context using the first N+5 bytes of the digest... */ if (encrypt_len > 11) rc4_init(&encrypt_state, digest, 16); else rc4_init(&encrypt_state, digest, (size_t)(encrypt_len + 5)); } /* * 'flate_open_stream()' - Open a deflated output stream. */ static void flate_open_stream(FILE *out) /* I - Output file */ { if (Encryption && !PSLevel) encrypt_init(); if (!Compression) return; compressor_active = 1; compressor.zalloc = (alloc_func)0; compressor.zfree = (free_func)0; compressor.opaque = (voidpf)0; deflateInit(&compressor, Compression); compressor.next_out = (Bytef *)comp_buffer; compressor.avail_out = sizeof(comp_buffer); } /* * 'flate_close_stream()' - Close a deflated output stream. */ static void flate_close_stream(FILE *out) /* I - Output file */ { int status; /* Deflate status */ if (!Compression) { #ifdef HTMLDOC_ASCII85 if (PSLevel) ps_ascii85(out, (uchar *)"", 0, 1); #endif // HTMLDOC_ASCII85 return; } while ((status = deflate(&compressor, Z_FINISH)) != Z_STREAM_END) { if (status < Z_OK && status != Z_BUF_ERROR) { progress_error(HD_ERROR_OUT_OF_MEMORY, "deflate() failed (%d)", status); return; } if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #else ps_hex(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #endif // HTMLDOC_ASCII85 else { if (Encryption) rc4_encrypt(&encrypt_state, comp_buffer, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); fwrite(comp_buffer, (size_t)((uchar *)compressor.next_out - (uchar *)comp_buffer), 1, out); } compressor.next_out = (Bytef *)comp_buffer; compressor.avail_out = sizeof(comp_buffer); } if ((uchar *)compressor.next_out > (uchar *)comp_buffer) { if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #else ps_hex(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #endif // HTMLDOC_ASCII85 else { if (Encryption) rc4_encrypt(&encrypt_state, comp_buffer, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); fwrite(comp_buffer, (size_t)((uchar *)compressor.next_out - (uchar *)comp_buffer), 1, out); } } deflateEnd(&compressor); compressor_active = 0; #ifdef HTMLDOC_ASCII85 if (PSLevel) ps_ascii85(out, (uchar *)"", 0, 1); #else if (PSLevel) { // End of data marker... fputs(">\n", out); } #endif // HTMLDOC_ASCII85 } /* * 'flate_puts()' - Write a character string to a compressed stream. */ static void flate_puts(const char *s, /* I - String to write */ FILE *out) /* I - Output file */ { flate_write(out, (uchar *)s, strlen(s)); } /* * 'flate_printf()' - Write a formatted character string to a compressed stream. */ static void flate_printf(FILE *out, /* I - Output file */ const char *format, /* I - Format string */ ...) /* I - Additional args as necessary */ { int length; /* Length of output string */ char buf[10240]; /* Output buffer */ va_list ap; /* Argument pointer */ va_start(ap, format); length = vsnprintf(buf, sizeof(buf), format, ap); va_end(ap); flate_write(out, (uchar *)buf, length); } /* * 'flate_write()' - Write data to a compressed stream. */ static void flate_write(FILE *out, /* I - Output file */ uchar *buf, /* I - Buffer */ int length, /* I - Number of bytes to write */ int flush) /* I - Flush when writing data? */ { int status; /* Deflate status */ if (compressor_active) { compressor.next_in = buf; compressor.avail_in = (unsigned)length; while (compressor.avail_in > 0) { if (compressor.avail_out < (int)(sizeof(comp_buffer) / 8)) { if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #else ps_hex(out, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); #endif // HTMLDOC_ASCII85 else { if (Encryption) rc4_encrypt(&encrypt_state, comp_buffer, comp_buffer, (uchar *)compressor.next_out - (uchar *)comp_buffer); fwrite(comp_buffer, (size_t)((uchar *)compressor.next_out - (uchar *)comp_buffer), 1, out); } compressor.next_out = (Bytef *)comp_buffer; compressor.avail_out = sizeof(comp_buffer); } status = deflate(&compressor, flush ? Z_FULL_FLUSH : Z_NO_FLUSH); if (status < Z_OK && status != Z_BUF_ERROR) { progress_error(HD_ERROR_OUT_OF_MEMORY, "deflate() failed (%d)", status); return; } flush = 0; } } else if (Encryption && !PSLevel) { int i, // Looping var bytes; // Number of bytes to encrypt/write uchar newbuf[1024]; // New encrypted data buffer for (i = 0; i < length; i += sizeof(newbuf)) { if ((bytes = length - i) > (int)sizeof(newbuf)) bytes = sizeof(newbuf); rc4_encrypt(&encrypt_state, buf + i, newbuf, (size_t)bytes); fwrite(newbuf, (size_t)bytes, 1, out); } } else if (PSLevel) #ifdef HTMLDOC_ASCII85 ps_ascii85(out, buf, length); #else ps_hex(out, buf, length); #endif // HTMLDOC_ASCII85 else fwrite(buf, (size_t)length, 1, out); }
null
244
CWE-787
CVE-2021-29279
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2019-2021 * All rights reserved * * This file is part of GPAC / FLAC reframer filter * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/avparse.h> #include <gpac/constants.h> #include <gpac/filters.h> typedef struct { u64 pos; Double duration; } FLACIdx; typedef struct { u32 block_size; u32 sample_rate; } FLACHeader; typedef struct { //filter args Double index; //only one input pid declared GF_FilterPid *ipid; //only one output pid declared GF_FilterPid *opid; GF_BitStream *bs; u64 file_pos, cts, prev_cts; GF_Fraction64 duration; Double start_range; Bool in_seek; u32 timescale; Bool is_playing; Bool is_file; Bool initial_play_done, file_loaded; Bool initialized; u32 sample_rate, nb_channels, bits_per_sample, block_size; u8 *flac_buffer; u32 flac_buffer_size, flac_buffer_alloc, resume_from; u64 byte_offset; GF_FilterPacket *src_pck; Bool recompute_cts; FLACIdx *indexes; u32 index_alloc_size, index_size; u32 bitrate; } GF_FLACDmxCtx; GF_Err flac_dmx_configure_pid(GF_Filter *filter, GF_FilterPid *pid, Bool is_remove) { const GF_PropertyValue *p; GF_FLACDmxCtx *ctx = gf_filter_get_udta(filter); if (is_remove) { ctx->ipid = NULL; if (ctx->opid) { gf_filter_pid_remove(ctx->opid); ctx->opid = NULL; } return GF_OK; } if (! gf_filter_pid_check_caps(pid)) return GF_NOT_SUPPORTED; ctx->ipid = pid; p = gf_filter_pid_get_property(pid, GF_PROP_PID_TIMESCALE); if (p) ctx->timescale = p->value.uint; p = gf_filter_pid_get_property_str(pid, "nocts"); if (p && p->value.boolean) ctx->recompute_cts = GF_TRUE; else ctx->recompute_cts = GF_FALSE; if (ctx->timescale && !ctx->opid) { ctx->opid = gf_filter_pid_new(filter); gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, NULL); } return GF_OK; } static void flac_dmx_check_dur(GF_Filter *filter, GF_FLACDmxCtx *ctx) { u64 rate; FILE *stream; const GF_PropertyValue *p; if (!ctx->opid || ctx->timescale || ctx->file_loaded) return; if (ctx->index<=0) { ctx->file_loaded = GF_TRUE; return; } p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILEPATH); if (!p || !p->value.string || !strncmp(p->value.string, "gmem://", 7)) { ctx->is_file = GF_FALSE; ctx->file_loaded = GF_TRUE; return; } ctx->is_file = GF_TRUE; stream = gf_fopen(p->value.string, "rb"); if (!stream) return; gf_fseek(stream, 0, SEEK_END); rate = gf_ftell(stream); gf_fclose(stream); if (ctx->duration.num && !gf_sys_is_test_mode() ) { rate *= 8 * ctx->duration.den; rate /= ctx->duration.num; ctx->bitrate = (u32) rate; } p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILE_CACHED); if (p && p->value.boolean) ctx->file_loaded = GF_TRUE; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CAN_DATAREF, & PROP_BOOL(GF_TRUE ) ); } static void flac_dmx_check_pid(GF_Filter *filter, GF_FLACDmxCtx *ctx, u8 *dsi, u32 dsi_size) { if (!ctx->opid) { ctx->opid = gf_filter_pid_new(filter); flac_dmx_check_dur(filter, ctx); } //copy properties at init or reconfig gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_STREAM_TYPE, & PROP_UINT( GF_STREAM_AUDIO)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, NULL ); if (ctx->is_file && ctx->index) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_PLAYBACK_MODE, & PROP_UINT(GF_PLAYBACK_MODE_FASTFORWARD) ); } if (ctx->duration.num) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DURATION, & PROP_FRAC64(ctx->duration)); if (!ctx->timescale) gf_filter_pid_set_name(ctx->opid, "audio"); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DECODER_CONFIG, & PROP_DATA( dsi, dsi_size ) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, & PROP_UINT( GF_CODECID_FLAC ) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_TIMESCALE, & PROP_UINT(ctx->timescale ? ctx->timescale : ctx->sample_rate)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAMPLE_RATE, & PROP_UINT(ctx->sample_rate)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_NUM_CHANNELS, & PROP_UINT(ctx->nb_channels) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAMPLES_PER_FRAME, & PROP_UINT(ctx->block_size) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_AUDIO_BPS, & PROP_UINT(ctx->bits_per_sample) ); if (ctx->bitrate) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_BITRATE, & PROP_UINT(ctx->bitrate)); } } static Bool flac_dmx_process_event(GF_Filter *filter, const GF_FilterEvent *evt) { u32 i; GF_FilterEvent fevt; GF_FLACDmxCtx *ctx = gf_filter_get_udta(filter); if (evt->base.on_pid != ctx->opid) return GF_TRUE; switch (evt->base.type) { case GF_FEVT_PLAY: if (!ctx->is_playing) { ctx->is_playing = GF_TRUE; } if (! ctx->is_file) { if (evt->play.start_range || ctx->initial_play_done) { ctx->flac_buffer_size = 0; ctx->resume_from = 0; } ctx->initial_play_done = GF_TRUE; return GF_FALSE; } flac_dmx_check_dur(filter, ctx); ctx->start_range = evt->play.start_range; ctx->in_seek = GF_TRUE; ctx->file_pos = 0; if (ctx->start_range) { for (i=1; i<ctx->index_size; i++) { if (ctx->indexes[i].duration>ctx->start_range) { ctx->cts = (u64) (ctx->indexes[i-1].duration * ctx->sample_rate); ctx->file_pos = ctx->indexes[i-1].pos; break; } } } if (!ctx->initial_play_done) { ctx->initial_play_done = GF_TRUE; //seek will not change the current source state, don't send a seek if (!ctx->file_pos) return GF_TRUE; } ctx->flac_buffer_size = 0; ctx->resume_from = 0; //post a seek GF_FEVT_INIT(fevt, GF_FEVT_SOURCE_SEEK, ctx->ipid); fevt.seek.start_offset = ctx->file_pos; gf_filter_pid_send_event(ctx->ipid, &fevt); //cancel event return GF_TRUE; case GF_FEVT_STOP: ctx->is_playing = GF_FALSE; if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = NULL; //don't cancel event return GF_FALSE; case GF_FEVT_SET_SPEED: //cancel event return GF_TRUE; default: break; } //by default don't cancel event - to rework once we have downloading in place return GF_FALSE; } static GFINLINE void flac_dmx_update_cts(GF_FLACDmxCtx *ctx, u32 nb_samp) { if (ctx->timescale) { u64 inc = nb_samp; inc *= ctx->timescale; inc /= ctx->sample_rate; ctx->cts += inc; } else { ctx->cts += nb_samp; } } u8 const flac_dmx_crc8_table[256] = { 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15, 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D, 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65, 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D, 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5, 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD, 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85, 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD, 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2, 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA, 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2, 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A, 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32, 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A, 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42, 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A, 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C, 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4, 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC, 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4, 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C, 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44, 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C, 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34, 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B, 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63, 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B, 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13, 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB, 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83, 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB, 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3 }; u8 flac_dmx_crc8(u8 *data, u32 len) { u8 crc = 0; while (len--) crc = flac_dmx_crc8_table[crc ^ *data++]; return crc; } static u32 flac_dmx_block_sizes[] = { 0, 192, 576, 1152, 2304, 4608, 0, 0, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768 }; static u32 flac_dmx_samplerates[] = { 0, 88200, 176400, 192000, 8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000 }; static Bool flac_parse_header(GF_FLACDmxCtx *ctx, char *data, u32 size, FLACHeader *hdr) { u32 block_size, sample_rate, res, top, pos, crc, crc_hdr; gf_bs_reassign_buffer(ctx->bs, data, size); gf_bs_read_int(ctx->bs, 15); /*block_strategy = */gf_bs_read_int(ctx->bs, 1); block_size = gf_bs_read_int(ctx->bs, 4); sample_rate = gf_bs_read_int(ctx->bs, 4); /*u32 channel_layout = */gf_bs_read_int(ctx->bs, 4); /*u32 bps = */gf_bs_read_int(ctx->bs, 3); gf_bs_read_int(ctx->bs, 1); res = gf_bs_read_u8(ctx->bs); top = (res & 128) >> 1; if ((res & 0xC0) == 0x80 || (res >= 0xFE)) return GF_FALSE; while (res & top) { s32 tmp = gf_bs_read_u8(ctx->bs); tmp -= 128; if(tmp>>6) return GF_FALSE; res = (res<<6) + tmp; top <<= 5; } //res &= (top << 1) - 1; if (block_size==6) block_size = 1 + gf_bs_read_int(ctx->bs, 8); else if (block_size==7) block_size = 1 + gf_bs_read_int(ctx->bs, 16); else { block_size = flac_dmx_block_sizes[block_size]; } #if 0 if (bps==0) bps = ctx->bits_per_sample; else if (bps==1) bps = 8; else if (bps==2) bps = 12; else if (bps==4) bps = 16; else if (bps==5) bps = 20; else if (bps==6) bps = 24; #endif if (sample_rate==0) sample_rate = ctx->sample_rate; else if ((sample_rate&0xC)==0xC) { if (sample_rate==0xC) sample_rate = gf_bs_read_u8(ctx->bs); else if (sample_rate==0xD) sample_rate = gf_bs_read_u16(ctx->bs); else if (sample_rate==0xE) sample_rate = 10*gf_bs_read_u16(ctx->bs); } else { sample_rate = flac_dmx_samplerates[sample_rate]; } pos = (u32) gf_bs_get_position(ctx->bs); crc = gf_bs_read_u8(ctx->bs); crc_hdr = flac_dmx_crc8(data, pos); if (crc != crc_hdr) { return GF_FALSE; } hdr->sample_rate = sample_rate; hdr->block_size = block_size; return GF_TRUE; } GF_Err flac_dmx_process(GF_Filter *filter) { GF_FLACDmxCtx *ctx = gf_filter_get_udta(filter); GF_FilterPacket *pck, *dst_pck; u8 *output; u8 *start; Bool final_flush=GF_FALSE; u32 pck_size, remain, prev_pck_size; u64 cts = GF_FILTER_NO_TS; FLACHeader hdr; //always reparse duration if (!ctx->duration.num) flac_dmx_check_dur(filter, ctx); if (ctx->opid && !ctx->is_playing) return GF_OK; pck = gf_filter_pid_get_packet(ctx->ipid); if (!pck) { if (gf_filter_pid_is_eos(ctx->ipid)) { if (!ctx->flac_buffer_size) { if (ctx->opid) gf_filter_pid_set_eos(ctx->opid); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = NULL; return GF_EOS; } final_flush = GF_TRUE; } else { return GF_OK; } } prev_pck_size = ctx->flac_buffer_size; if (pck && !ctx->resume_from) { u8 *data = (u8 *) gf_filter_pck_get_data(pck, &pck_size); if (ctx->byte_offset != GF_FILTER_NO_BO) { u64 byte_offset = gf_filter_pck_get_byte_offset(pck); if (!ctx->flac_buffer_size) { ctx->byte_offset = byte_offset; } else if (ctx->byte_offset + ctx->flac_buffer_size != byte_offset) { ctx->byte_offset = GF_FILTER_NO_BO; if ((byte_offset != GF_FILTER_NO_BO) && (byte_offset>ctx->flac_buffer_size) ) { ctx->byte_offset = byte_offset - ctx->flac_buffer_size; } } } if (ctx->flac_buffer_size + pck_size > ctx->flac_buffer_alloc) { ctx->flac_buffer_alloc = ctx->flac_buffer_size + pck_size; ctx->flac_buffer = gf_realloc(ctx->flac_buffer, ctx->flac_buffer_alloc); } memcpy(ctx->flac_buffer + ctx->flac_buffer_size, data, pck_size); ctx->flac_buffer_size += pck_size; } //input pid sets some timescale - we flushed pending data , update cts if (ctx->timescale && pck) { cts = gf_filter_pck_get_cts(pck); } if (cts == GF_FILTER_NO_TS) { //avoids updating cts prev_pck_size = 0; } remain = ctx->flac_buffer_size; start = ctx->flac_buffer; if (ctx->resume_from) { start += ctx->resume_from - 1; remain -= ctx->resume_from - 1; ctx->resume_from = 0; } while (remain>2) { u32 next_frame=0, nb_samp; u32 cur_size = remain-2; u8 *cur_buf = start+2; u8 *hdr_start = NULL; if (final_flush) { next_frame = remain; } else { while (cur_size) { //wait till we have a frame header hdr_start = memchr(cur_buf, 0xFF, cur_size); if (!hdr_start) break; next_frame = (u32) (hdr_start-start); if (next_frame == remain) break; if ((hdr_start[1]&0xFC) == 0xF8) { if (flac_parse_header(ctx, hdr_start, (u32) remain - next_frame, &hdr)) break; } cur_buf = hdr_start+1; cur_size = (u32) (cur_buf - start); assert(cur_size<=remain); cur_size = remain - cur_size; hdr_start = NULL; } if (!hdr_start) break; if (next_frame == remain) break; } if (!ctx->initialized) { u32 size = next_frame; u32 dsi_end = 0; //we have a header gf_bs_reassign_buffer(ctx->bs, ctx->flac_buffer, size); u32 magic = gf_bs_read_u32(ctx->bs); if (magic != GF_4CC('f','L','a','C')) { } while (gf_bs_available(ctx->bs)) { Bool last = gf_bs_read_int(ctx->bs, 1); u32 type = gf_bs_read_int(ctx->bs, 7); u32 len = gf_bs_read_int(ctx->bs, 24); if (type==0) { u16 min_block_size = gf_bs_read_u16(ctx->bs); u16 max_block_size = gf_bs_read_u16(ctx->bs); /*u32 min_frame_size = */gf_bs_read_u24(ctx->bs); /*u32 max_frame_size = */gf_bs_read_u24(ctx->bs); ctx->sample_rate = gf_bs_read_int(ctx->bs, 20); ctx->nb_channels = 1 + gf_bs_read_int(ctx->bs, 3); ctx->bits_per_sample = 1 + gf_bs_read_int(ctx->bs, 5); if (min_block_size==max_block_size) ctx->block_size = min_block_size; else ctx->block_size = 0; ctx->duration.num = gf_bs_read_long_int(ctx->bs, 36); ctx->duration.den = ctx->sample_rate; //ignore the rest gf_bs_skip_bytes(ctx->bs, 16); dsi_end = (u32) gf_bs_get_position(ctx->bs); } else { //ignore the rest for now //TODO: expose metadata, pictures and co gf_bs_skip_bytes(ctx->bs, len); } if (last) break; } flac_dmx_check_pid(filter, ctx, ctx->flac_buffer+4, dsi_end-4); remain -= size; start += size; ctx->initialized = GF_TRUE; if (!ctx->is_playing) break; continue; } //we have a next frame, check we are synchronize if ((start[0] != 0xFF) && ((start[1]&0xFC) != 0xF8)) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[FLACDmx] invalid frame, droping %d bytes and resyncing\n", next_frame)); start += next_frame; remain -= next_frame; continue; } flac_parse_header(ctx,start, next_frame, &hdr); if (hdr.sample_rate != ctx->sample_rate) { ctx->sample_rate = hdr.sample_rate; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAMPLE_RATE, & PROP_UINT(ctx->sample_rate)); } nb_samp = hdr.block_size; if (ctx->in_seek) { u64 nb_samples_at_seek = (u64) (ctx->start_range * ctx->sample_rate); if (ctx->cts + nb_samp >= nb_samples_at_seek) { //u32 samples_to_discard = (ctx->cts + nb_samp ) - nb_samples_at_seek; ctx->in_seek = GF_FALSE; } } if (ctx->timescale && !prev_pck_size && (cts != GF_FILTER_NO_TS) ) { ctx->cts = cts; cts = GF_FILTER_NO_TS; } if (!ctx->in_seek) { dst_pck = gf_filter_pck_new_alloc(ctx->opid, next_frame, &output); memcpy(output, start, next_frame); gf_filter_pck_set_cts(dst_pck, ctx->cts); if (!ctx->timescale || (ctx->timescale==ctx->sample_rate) ) gf_filter_pck_set_duration(dst_pck, nb_samp); else { gf_filter_pck_set_duration(dst_pck, (nb_samp * ctx->timescale) / ctx->sample_rate); } gf_filter_pck_set_sap(dst_pck, GF_FILTER_SAP_1); gf_filter_pck_set_framing(dst_pck, GF_TRUE, GF_TRUE); if (ctx->byte_offset != GF_FILTER_NO_BO) { gf_filter_pck_set_byte_offset(dst_pck, ctx->byte_offset); } gf_filter_pck_send(dst_pck); } flac_dmx_update_cts(ctx, nb_samp); assert (start[0] == 0xFF); assert((start[1]&0xFC) == 0xF8); start += next_frame; assert(remain >= next_frame); remain -= next_frame; } if (!pck) { ctx->flac_buffer_size = 0; return flac_dmx_process(filter); } else { if (remain < ctx->flac_buffer_size) { memmove(ctx->flac_buffer, start, remain); } ctx->flac_buffer_size = remain; gf_filter_pid_drop_packet(ctx->ipid); } return GF_OK; } static GF_Err flac_dmx_initialize(GF_Filter *filter) { GF_FLACDmxCtx *ctx = gf_filter_get_udta(filter); ctx->bs = gf_bs_new((u8 *)ctx, 1, GF_BITSTREAM_READ); return GF_OK; } static void flac_dmx_finalize(GF_Filter *filter) { GF_FLACDmxCtx *ctx = gf_filter_get_udta(filter); if (ctx->bs) gf_bs_del(ctx->bs); if (ctx->indexes) gf_free(ctx->indexes); if (ctx->flac_buffer) gf_free(ctx->flac_buffer); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); } static const char *flac_dmx_probe_data(const u8 *data, u32 size, GF_FilterProbeScore *score) { if ((size>4) && !strncmp(data, "fLaC", 4)) { *score = GF_FPROBE_SUPPORTED; return "audio/flac"; } return NULL; } static const GF_FilterCapability FLACDmxCaps[] = { CAP_UINT(GF_CAPS_INPUT, GF_PROP_PID_STREAM_TYPE, GF_STREAM_FILE), CAP_STRING(GF_CAPS_INPUT, GF_PROP_PID_FILE_EXT, "flac"), CAP_STRING(GF_CAPS_INPUT, GF_PROP_PID_MIME, "audio/flac"), CAP_UINT(GF_CAPS_OUTPUT, GF_PROP_PID_STREAM_TYPE, GF_STREAM_AUDIO), CAP_UINT(GF_CAPS_OUTPUT, GF_PROP_PID_CODECID, GF_CODECID_FLAC), CAP_BOOL(GF_CAPS_OUTPUT_EXCLUDED, GF_PROP_PID_UNFRAMED, GF_TRUE), {0}, CAP_UINT(GF_CAPS_INPUT_OUTPUT,GF_PROP_PID_STREAM_TYPE, GF_STREAM_AUDIO), CAP_BOOL(GF_CAPS_INPUT,GF_PROP_PID_UNFRAMED, GF_TRUE), CAP_UINT(GF_CAPS_INPUT_OUTPUT,GF_PROP_PID_CODECID, GF_CODECID_FLAC), CAP_BOOL(GF_CAPS_OUTPUT_EXCLUDED, GF_PROP_PID_UNFRAMED, GF_TRUE), }; #define OFFS(_n) #_n, offsetof(GF_FLACDmxCtx, _n) static const GF_FilterArgs FLACDmxArgs[] = { { OFFS(index), "indexing window length", GF_PROP_DOUBLE, "1.0", NULL, 0}, {0} }; GF_FilterRegister FLACDmxRegister = { .name = "rfflac", GF_FS_SET_DESCRIPTION("FLAC reframer") GF_FS_SET_HELP("This filter parses FLAC files/data and outputs corresponding audio PID and frames.") .private_size = sizeof(GF_FLACDmxCtx), .args = FLACDmxArgs, .finalize = flac_dmx_finalize, .initialize = flac_dmx_initialize, SETCAPS(FLACDmxCaps), .configure_pid = flac_dmx_configure_pid, .process = flac_dmx_process, .probe_data = flac_dmx_probe_data, .process_event = flac_dmx_process_event }; const GF_FilterRegister *flac_dmx_register(GF_FilterSession *session) { return &FLACDmxRegister; }
null
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2019-2021 * All rights reserved * * This file is part of GPAC / FLAC reframer filter * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/avparse.h> #include <gpac/constants.h> #include <gpac/filters.h> typedef struct { u64 pos; Double duration; } FLACIdx; typedef struct { u32 block_size; u32 sample_rate; } FLACHeader; typedef struct { //filter args Double index; //only one input pid declared GF_FilterPid *ipid; //only one output pid declared GF_FilterPid *opid; GF_BitStream *bs; u64 file_pos, cts, prev_cts; GF_Fraction64 duration; Double start_range; Bool in_seek; u32 timescale; Bool is_playing; Bool is_file; Bool initial_play_done, file_loaded; Bool in_error; Bool initialized; u32 sample_rate, nb_channels, bits_per_sample, block_size; u8 *flac_buffer; u32 flac_buffer_size, flac_buffer_alloc, resume_from; u64 byte_offset; GF_FilterPacket *src_pck; Bool recompute_cts; FLACIdx *indexes; u32 index_alloc_size, index_size; u32 bitrate; } GF_FLACDmxCtx; GF_Err flac_dmx_configure_pid(GF_Filter *filter, GF_FilterPid *pid, Bool is_remove) { const GF_PropertyValue *p; GF_FLACDmxCtx *ctx = gf_filter_get_udta(filter); if (is_remove) { ctx->ipid = NULL; if (ctx->opid) { gf_filter_pid_remove(ctx->opid); ctx->opid = NULL; } return GF_OK; } if (! gf_filter_pid_check_caps(pid)) return GF_NOT_SUPPORTED; ctx->ipid = pid; p = gf_filter_pid_get_property(pid, GF_PROP_PID_TIMESCALE); if (p) ctx->timescale = p->value.uint; p = gf_filter_pid_get_property_str(pid, "nocts"); if (p && p->value.boolean) ctx->recompute_cts = GF_TRUE; else ctx->recompute_cts = GF_FALSE; if (ctx->timescale && !ctx->opid) { ctx->opid = gf_filter_pid_new(filter); gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, NULL); } return GF_OK; } static void flac_dmx_check_dur(GF_Filter *filter, GF_FLACDmxCtx *ctx) { u64 rate; FILE *stream; const GF_PropertyValue *p; if (!ctx->opid || ctx->timescale || ctx->file_loaded) return; if (ctx->index<=0) { ctx->file_loaded = GF_TRUE; return; } p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILEPATH); if (!p || !p->value.string || !strncmp(p->value.string, "gmem://", 7)) { ctx->is_file = GF_FALSE; ctx->file_loaded = GF_TRUE; return; } ctx->is_file = GF_TRUE; stream = gf_fopen(p->value.string, "rb"); if (!stream) return; gf_fseek(stream, 0, SEEK_END); rate = gf_ftell(stream); gf_fclose(stream); if (ctx->duration.num && !gf_sys_is_test_mode() ) { rate *= 8 * ctx->duration.den; rate /= ctx->duration.num; ctx->bitrate = (u32) rate; } p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILE_CACHED); if (p && p->value.boolean) ctx->file_loaded = GF_TRUE; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CAN_DATAREF, & PROP_BOOL(GF_TRUE ) ); } static void flac_dmx_check_pid(GF_Filter *filter, GF_FLACDmxCtx *ctx, u8 *dsi, u32 dsi_size) { if (!ctx->opid) { ctx->opid = gf_filter_pid_new(filter); flac_dmx_check_dur(filter, ctx); } //copy properties at init or reconfig gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_STREAM_TYPE, & PROP_UINT( GF_STREAM_AUDIO)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, NULL ); if (ctx->is_file && ctx->index) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_PLAYBACK_MODE, & PROP_UINT(GF_PLAYBACK_MODE_FASTFORWARD) ); } if (ctx->duration.num) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DURATION, & PROP_FRAC64(ctx->duration)); if (!ctx->timescale) gf_filter_pid_set_name(ctx->opid, "audio"); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DECODER_CONFIG, & PROP_DATA( dsi, dsi_size ) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, & PROP_UINT( GF_CODECID_FLAC ) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_TIMESCALE, & PROP_UINT(ctx->timescale ? ctx->timescale : ctx->sample_rate)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAMPLE_RATE, & PROP_UINT(ctx->sample_rate)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_NUM_CHANNELS, & PROP_UINT(ctx->nb_channels) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAMPLES_PER_FRAME, & PROP_UINT(ctx->block_size) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_AUDIO_BPS, & PROP_UINT(ctx->bits_per_sample) ); if (ctx->bitrate) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_BITRATE, & PROP_UINT(ctx->bitrate)); } } static Bool flac_dmx_process_event(GF_Filter *filter, const GF_FilterEvent *evt) { u32 i; GF_FilterEvent fevt; GF_FLACDmxCtx *ctx = gf_filter_get_udta(filter); if (evt->base.on_pid != ctx->opid) return GF_TRUE; switch (evt->base.type) { case GF_FEVT_PLAY: if (!ctx->is_playing) { ctx->is_playing = GF_TRUE; } if (! ctx->is_file) { if (evt->play.start_range || ctx->initial_play_done) { ctx->flac_buffer_size = 0; ctx->resume_from = 0; } ctx->initial_play_done = GF_TRUE; return GF_FALSE; } flac_dmx_check_dur(filter, ctx); ctx->start_range = evt->play.start_range; ctx->in_seek = GF_TRUE; ctx->file_pos = 0; if (ctx->start_range) { for (i=1; i<ctx->index_size; i++) { if (ctx->indexes[i].duration>ctx->start_range) { ctx->cts = (u64) (ctx->indexes[i-1].duration * ctx->sample_rate); ctx->file_pos = ctx->indexes[i-1].pos; break; } } } if (!ctx->initial_play_done) { ctx->initial_play_done = GF_TRUE; //seek will not change the current source state, don't send a seek if (!ctx->file_pos) return GF_TRUE; } ctx->flac_buffer_size = 0; ctx->resume_from = 0; //post a seek GF_FEVT_INIT(fevt, GF_FEVT_SOURCE_SEEK, ctx->ipid); fevt.seek.start_offset = ctx->file_pos; gf_filter_pid_send_event(ctx->ipid, &fevt); //cancel event return GF_TRUE; case GF_FEVT_STOP: ctx->is_playing = GF_FALSE; if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = NULL; //don't cancel event return GF_FALSE; case GF_FEVT_SET_SPEED: //cancel event return GF_TRUE; default: break; } //by default don't cancel event - to rework once we have downloading in place return GF_FALSE; } static GFINLINE void flac_dmx_update_cts(GF_FLACDmxCtx *ctx, u32 nb_samp) { if (ctx->timescale) { u64 inc = nb_samp; inc *= ctx->timescale; inc /= ctx->sample_rate; ctx->cts += inc; } else { ctx->cts += nb_samp; } } u8 const flac_dmx_crc8_table[256] = { 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15, 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D, 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65, 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D, 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5, 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD, 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85, 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD, 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2, 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA, 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2, 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A, 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32, 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A, 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42, 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A, 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C, 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4, 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC, 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4, 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C, 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44, 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C, 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34, 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B, 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63, 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B, 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13, 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB, 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83, 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB, 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3 }; u8 flac_dmx_crc8(u8 *data, u32 len) { u8 crc = 0; while (len--) crc = flac_dmx_crc8_table[crc ^ *data++]; return crc; } static u32 flac_dmx_block_sizes[] = { 0, 192, 576, 1152, 2304, 4608, 0, 0, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768 }; static u32 flac_dmx_samplerates[] = { 0, 88200, 176400, 192000, 8000, 16000, 22050, 24000, 32000, 44100, 48000, 96000 }; static Bool flac_parse_header(GF_FLACDmxCtx *ctx, char *data, u32 size, FLACHeader *hdr) { u32 block_size, sample_rate, res, top, pos, crc, crc_hdr; gf_bs_reassign_buffer(ctx->bs, data, size); gf_bs_read_int(ctx->bs, 15); /*block_strategy = */gf_bs_read_int(ctx->bs, 1); block_size = gf_bs_read_int(ctx->bs, 4); sample_rate = gf_bs_read_int(ctx->bs, 4); /*u32 channel_layout = */gf_bs_read_int(ctx->bs, 4); /*u32 bps = */gf_bs_read_int(ctx->bs, 3); gf_bs_read_int(ctx->bs, 1); res = gf_bs_read_u8(ctx->bs); top = (res & 128) >> 1; if ((res & 0xC0) == 0x80 || (res >= 0xFE)) return GF_FALSE; while (res & top) { s32 tmp = gf_bs_read_u8(ctx->bs); tmp -= 128; if(tmp>>6) return GF_FALSE; res = (res<<6) + tmp; top <<= 5; } //res &= (top << 1) - 1; if (block_size==6) block_size = 1 + gf_bs_read_int(ctx->bs, 8); else if (block_size==7) block_size = 1 + gf_bs_read_int(ctx->bs, 16); else { block_size = flac_dmx_block_sizes[block_size]; } #if 0 if (bps==0) bps = ctx->bits_per_sample; else if (bps==1) bps = 8; else if (bps==2) bps = 12; else if (bps==4) bps = 16; else if (bps==5) bps = 20; else if (bps==6) bps = 24; #endif if (sample_rate==0) sample_rate = ctx->sample_rate; else if ((sample_rate&0xC)==0xC) { if (sample_rate==0xC) sample_rate = gf_bs_read_u8(ctx->bs); else if (sample_rate==0xD) sample_rate = gf_bs_read_u16(ctx->bs); else if (sample_rate==0xE) sample_rate = 10*gf_bs_read_u16(ctx->bs); } else { sample_rate = flac_dmx_samplerates[sample_rate]; } pos = (u32) gf_bs_get_position(ctx->bs); crc = gf_bs_read_u8(ctx->bs); crc_hdr = flac_dmx_crc8(data, pos); if (crc != crc_hdr) { return GF_FALSE; } hdr->sample_rate = sample_rate; hdr->block_size = block_size; return GF_TRUE; } GF_Err flac_dmx_process(GF_Filter *filter) { GF_FLACDmxCtx *ctx = gf_filter_get_udta(filter); GF_FilterPacket *pck, *dst_pck; u8 *output; u8 *start; Bool final_flush=GF_FALSE; u32 pck_size, remain, prev_pck_size; u64 cts = GF_FILTER_NO_TS; FLACHeader hdr; if (ctx->in_error) return GF_NON_COMPLIANT_BITSTREAM; //always reparse duration if (!ctx->duration.num) flac_dmx_check_dur(filter, ctx); if (ctx->opid && !ctx->is_playing) return GF_OK; pck = gf_filter_pid_get_packet(ctx->ipid); if (!pck) { if (gf_filter_pid_is_eos(ctx->ipid)) { if (!ctx->flac_buffer_size) { if (ctx->opid) gf_filter_pid_set_eos(ctx->opid); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = NULL; return GF_EOS; } final_flush = GF_TRUE; } else { return GF_OK; } } prev_pck_size = ctx->flac_buffer_size; if (pck && !ctx->resume_from) { u8 *data = (u8 *) gf_filter_pck_get_data(pck, &pck_size); if (ctx->byte_offset != GF_FILTER_NO_BO) { u64 byte_offset = gf_filter_pck_get_byte_offset(pck); if (!ctx->flac_buffer_size) { ctx->byte_offset = byte_offset; } else if (ctx->byte_offset + ctx->flac_buffer_size != byte_offset) { ctx->byte_offset = GF_FILTER_NO_BO; if ((byte_offset != GF_FILTER_NO_BO) && (byte_offset>ctx->flac_buffer_size) ) { ctx->byte_offset = byte_offset - ctx->flac_buffer_size; } } } if (ctx->flac_buffer_size + pck_size > ctx->flac_buffer_alloc) { ctx->flac_buffer_alloc = ctx->flac_buffer_size + pck_size; ctx->flac_buffer = gf_realloc(ctx->flac_buffer, ctx->flac_buffer_alloc); } memcpy(ctx->flac_buffer + ctx->flac_buffer_size, data, pck_size); ctx->flac_buffer_size += pck_size; } //input pid sets some timescale - we flushed pending data , update cts if (ctx->timescale && pck) { cts = gf_filter_pck_get_cts(pck); } if (cts == GF_FILTER_NO_TS) { //avoids updating cts prev_pck_size = 0; } remain = ctx->flac_buffer_size; start = ctx->flac_buffer; if (ctx->resume_from) { start += ctx->resume_from - 1; remain -= ctx->resume_from - 1; ctx->resume_from = 0; } while (remain>2) { u32 next_frame=0, nb_samp; u32 cur_size = remain-2; u8 *cur_buf = start+2; u8 *hdr_start = NULL; if (final_flush) { next_frame = remain; } else { while (cur_size) { //wait till we have a frame header hdr_start = memchr(cur_buf, 0xFF, cur_size); if (!hdr_start) break; next_frame = (u32) (hdr_start-start); if (next_frame == remain) break; if ((hdr_start[1]&0xFC) == 0xF8) { if (flac_parse_header(ctx, hdr_start, (u32) remain - next_frame, &hdr)) break; } cur_buf = hdr_start+1; cur_size = (u32) (cur_buf - start); assert(cur_size<=remain); cur_size = remain - cur_size; hdr_start = NULL; } if (!hdr_start) break; if (next_frame == remain) break; } if (!ctx->initialized) { u32 size = next_frame; u32 dsi_end = 0; //we have a header gf_bs_reassign_buffer(ctx->bs, ctx->flac_buffer, size); u32 magic = gf_bs_read_u32(ctx->bs); if (magic != GF_4CC('f','L','a','C')) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[FLACDmx] invalid FLAC magic\n")); ctx->in_error = GF_TRUE; ctx->flac_buffer_size = 0; if (pck) gf_filter_pid_drop_packet(ctx->ipid); return GF_NON_COMPLIANT_BITSTREAM; } while (gf_bs_available(ctx->bs)) { Bool last = gf_bs_read_int(ctx->bs, 1); u32 type = gf_bs_read_int(ctx->bs, 7); u32 len = gf_bs_read_int(ctx->bs, 24); if (type==0) { u16 min_block_size = gf_bs_read_u16(ctx->bs); u16 max_block_size = gf_bs_read_u16(ctx->bs); /*u32 min_frame_size = */gf_bs_read_u24(ctx->bs); /*u32 max_frame_size = */gf_bs_read_u24(ctx->bs); ctx->sample_rate = gf_bs_read_int(ctx->bs, 20); ctx->nb_channels = 1 + gf_bs_read_int(ctx->bs, 3); ctx->bits_per_sample = 1 + gf_bs_read_int(ctx->bs, 5); if (min_block_size==max_block_size) ctx->block_size = min_block_size; else ctx->block_size = 0; ctx->duration.num = gf_bs_read_long_int(ctx->bs, 36); ctx->duration.den = ctx->sample_rate; //ignore the rest gf_bs_skip_bytes(ctx->bs, 16); dsi_end = (u32) gf_bs_get_position(ctx->bs); } else { //ignore the rest for now //TODO: expose metadata, pictures and co gf_bs_skip_bytes(ctx->bs, len); } if (last) break; } if (!dsi_end) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[FLACDmx] invalid FLAC header\n")); ctx->in_error = GF_TRUE; ctx->flac_buffer_size = 0; if (pck) gf_filter_pid_drop_packet(ctx->ipid); return GF_NON_COMPLIANT_BITSTREAM; } flac_dmx_check_pid(filter, ctx, ctx->flac_buffer+4, dsi_end-4); remain -= size; start += size; ctx->initialized = GF_TRUE; if (!ctx->is_playing) break; continue; } //we have a next frame, check we are synchronize if ((start[0] != 0xFF) && ((start[1]&0xFC) != 0xF8)) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[FLACDmx] invalid frame, droping %d bytes and resyncing\n", next_frame)); start += next_frame; remain -= next_frame; continue; } flac_parse_header(ctx,start, next_frame, &hdr); if (hdr.sample_rate != ctx->sample_rate) { ctx->sample_rate = hdr.sample_rate; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAMPLE_RATE, & PROP_UINT(ctx->sample_rate)); } nb_samp = hdr.block_size; if (ctx->in_seek) { u64 nb_samples_at_seek = (u64) (ctx->start_range * ctx->sample_rate); if (ctx->cts + nb_samp >= nb_samples_at_seek) { //u32 samples_to_discard = (ctx->cts + nb_samp ) - nb_samples_at_seek; ctx->in_seek = GF_FALSE; } } if (ctx->timescale && !prev_pck_size && (cts != GF_FILTER_NO_TS) ) { ctx->cts = cts; cts = GF_FILTER_NO_TS; } if (!ctx->in_seek) { dst_pck = gf_filter_pck_new_alloc(ctx->opid, next_frame, &output); memcpy(output, start, next_frame); gf_filter_pck_set_cts(dst_pck, ctx->cts); if (!ctx->timescale || (ctx->timescale==ctx->sample_rate) ) gf_filter_pck_set_duration(dst_pck, nb_samp); else { gf_filter_pck_set_duration(dst_pck, (nb_samp * ctx->timescale) / ctx->sample_rate); } gf_filter_pck_set_sap(dst_pck, GF_FILTER_SAP_1); gf_filter_pck_set_framing(dst_pck, GF_TRUE, GF_TRUE); if (ctx->byte_offset != GF_FILTER_NO_BO) { gf_filter_pck_set_byte_offset(dst_pck, ctx->byte_offset); } gf_filter_pck_send(dst_pck); } flac_dmx_update_cts(ctx, nb_samp); assert (start[0] == 0xFF); assert((start[1]&0xFC) == 0xF8); start += next_frame; assert(remain >= next_frame); remain -= next_frame; } if (!pck) { ctx->flac_buffer_size = 0; return flac_dmx_process(filter); } else { if (remain < ctx->flac_buffer_size) { memmove(ctx->flac_buffer, start, remain); } ctx->flac_buffer_size = remain; gf_filter_pid_drop_packet(ctx->ipid); } return GF_OK; } static GF_Err flac_dmx_initialize(GF_Filter *filter) { GF_FLACDmxCtx *ctx = gf_filter_get_udta(filter); ctx->bs = gf_bs_new((u8 *)ctx, 1, GF_BITSTREAM_READ); return GF_OK; } static void flac_dmx_finalize(GF_Filter *filter) { GF_FLACDmxCtx *ctx = gf_filter_get_udta(filter); if (ctx->bs) gf_bs_del(ctx->bs); if (ctx->indexes) gf_free(ctx->indexes); if (ctx->flac_buffer) gf_free(ctx->flac_buffer); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); } static const char *flac_dmx_probe_data(const u8 *data, u32 size, GF_FilterProbeScore *score) { if ((size>4) && !strncmp(data, "fLaC", 4)) { *score = GF_FPROBE_SUPPORTED; return "audio/flac"; } return NULL; } static const GF_FilterCapability FLACDmxCaps[] = { CAP_UINT(GF_CAPS_INPUT, GF_PROP_PID_STREAM_TYPE, GF_STREAM_FILE), CAP_STRING(GF_CAPS_INPUT, GF_PROP_PID_FILE_EXT, "flac"), CAP_STRING(GF_CAPS_INPUT, GF_PROP_PID_MIME, "audio/flac"), CAP_UINT(GF_CAPS_OUTPUT, GF_PROP_PID_STREAM_TYPE, GF_STREAM_AUDIO), CAP_UINT(GF_CAPS_OUTPUT, GF_PROP_PID_CODECID, GF_CODECID_FLAC), CAP_BOOL(GF_CAPS_OUTPUT_EXCLUDED, GF_PROP_PID_UNFRAMED, GF_TRUE), {0}, CAP_UINT(GF_CAPS_INPUT_OUTPUT,GF_PROP_PID_STREAM_TYPE, GF_STREAM_AUDIO), CAP_BOOL(GF_CAPS_INPUT,GF_PROP_PID_UNFRAMED, GF_TRUE), CAP_UINT(GF_CAPS_INPUT_OUTPUT,GF_PROP_PID_CODECID, GF_CODECID_FLAC), CAP_BOOL(GF_CAPS_OUTPUT_EXCLUDED, GF_PROP_PID_UNFRAMED, GF_TRUE), }; #define OFFS(_n) #_n, offsetof(GF_FLACDmxCtx, _n) static const GF_FilterArgs FLACDmxArgs[] = { { OFFS(index), "indexing window length", GF_PROP_DOUBLE, "1.0", NULL, 0}, {0} }; GF_FilterRegister FLACDmxRegister = { .name = "rfflac", GF_FS_SET_DESCRIPTION("FLAC reframer") GF_FS_SET_HELP("This filter parses FLAC files/data and outputs corresponding audio PID and frames.") .private_size = sizeof(GF_FLACDmxCtx), .args = FLACDmxArgs, .finalize = flac_dmx_finalize, .initialize = flac_dmx_initialize, SETCAPS(FLACDmxCaps), .configure_pid = flac_dmx_configure_pid, .process = flac_dmx_process, .probe_data = flac_dmx_probe_data, .process_event = flac_dmx_process_event }; const GF_FilterRegister *flac_dmx_register(GF_FilterSession *session) { return &FLACDmxRegister; }
null
245
CWE-787
CVE-2021-29464
// ***************************************************************** -*- C++ -*- /* * Copyright (C) 2004-2021 Exiv2 authors * This program is part of the Exiv2 distribution. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, 5th Floor, Boston, MA 02110-1301 USA. */ // ***************************************************************************** // included header files #include "config.h" #include "jp2image.hpp" #include "tiffimage.hpp" #include "image.hpp" #include "image_int.hpp" #include "basicio.hpp" #include "enforce.hpp" #include "error.hpp" #include "futils.hpp" #include "types.hpp" #include "safe_op.hpp" // + standard includes #include <string> #include <cstring> #include <iostream> #include <cassert> #include <cstdio> // JPEG-2000 box types const uint32_t kJp2BoxTypeJp2Header = 0x6a703268; // 'jp2h' const uint32_t kJp2BoxTypeImageHeader = 0x69686472; // 'ihdr' const uint32_t kJp2BoxTypeColorHeader = 0x636f6c72; // 'colr' const uint32_t kJp2BoxTypeUuid = 0x75756964; // 'uuid' const uint32_t kJp2BoxTypeClose = 0x6a703263; // 'jp2c' // from openjpeg-2.1.2/src/lib/openjp2/jp2.h /*#define JPIP_JPIP 0x6a706970*/ #define JP2_JP 0x6a502020 /**< JPEG 2000 signature box */ #define JP2_FTYP 0x66747970 /**< File type box */ #define JP2_JP2H 0x6a703268 /**< JP2 header box (super-box) */ #define JP2_IHDR 0x69686472 /**< Image header box */ #define JP2_COLR 0x636f6c72 /**< Colour specification box */ #define JP2_JP2C 0x6a703263 /**< Contiguous codestream box */ #define JP2_URL 0x75726c20 /**< Data entry URL box */ #define JP2_PCLR 0x70636c72 /**< Palette box */ #define JP2_CMAP 0x636d6170 /**< Component Mapping box */ #define JP2_CDEF 0x63646566 /**< Channel Definition box */ #define JP2_DTBL 0x6474626c /**< Data Reference box */ #define JP2_BPCC 0x62706363 /**< Bits per component box */ #define JP2_JP2 0x6a703220 /**< File type fields */ /* For the future */ /* #define JP2_RES 0x72657320 */ /**< Resolution box (super-box) */ /* #define JP2_JP2I 0x6a703269 */ /**< Intellectual property box */ /* #define JP2_XML 0x786d6c20 */ /**< XML box */ /* #define JP2_UUID 0x75756994 */ /**< UUID box */ /* #define JP2_UINF 0x75696e66 */ /**< UUID info box (super-box) */ /* #define JP2_ULST 0x756c7374 */ /**< UUID list box */ // JPEG-2000 UUIDs for embedded metadata // // See http://www.jpeg.org/public/wg1n2600.doc for information about embedding IPTC-NAA data in JPEG-2000 files // See http://www.adobe.com/devnet/xmp/pdfs/xmp_specification.pdf for information about embedding XMP data in JPEG-2000 files const unsigned char kJp2UuidExif[] = "JpgTiffExif->JP2"; const unsigned char kJp2UuidIptc[] = "\x33\xc7\xa4\xd2\xb8\x1d\x47\x23\xa0\xba\xf1\xa3\xe0\x97\xad\x38"; const unsigned char kJp2UuidXmp[] = "\xbe\x7a\xcf\xcb\x97\xa9\x42\xe8\x9c\x71\x99\x94\x91\xe3\xaf\xac"; // See section B.1.1 (JPEG 2000 Signature box) of JPEG-2000 specification const unsigned char Jp2Signature[12] = { 0x00, 0x00, 0x00, 0x0c, 0x6a, 0x50, 0x20, 0x20, 0x0d, 0x0a, 0x87, 0x0a }; const unsigned char Jp2Blank[] = { 0x00,0x00,0x00,0x0c,0x6a,0x50,0x20,0x20,0x0d,0x0a,0x87,0x0a,0x00,0x00,0x00,0x14, 0x66,0x74,0x79,0x70,0x6a,0x70,0x32,0x20,0x00,0x00,0x00,0x00,0x6a,0x70,0x32,0x20, 0x00,0x00,0x00,0x2d,0x6a,0x70,0x32,0x68,0x00,0x00,0x00,0x16,0x69,0x68,0x64,0x72, 0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x07,0x07,0x00,0x00,0x00,0x00, 0x00,0x0f,0x63,0x6f,0x6c,0x72,0x01,0x00,0x00,0x00,0x00,0x00,0x11,0x00,0x00,0x00, 0x00,0x6a,0x70,0x32,0x63,0xff,0x4f,0xff,0x51,0x00,0x29,0x00,0x00,0x00,0x00,0x00, 0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07, 0x01,0x01,0xff,0x64,0x00,0x23,0x00,0x01,0x43,0x72,0x65,0x61,0x74,0x6f,0x72,0x3a, 0x20,0x4a,0x61,0x73,0x50,0x65,0x72,0x20,0x56,0x65,0x72,0x73,0x69,0x6f,0x6e,0x20, 0x31,0x2e,0x39,0x30,0x30,0x2e,0x31,0xff,0x52,0x00,0x0c,0x00,0x00,0x00,0x01,0x00, 0x05,0x04,0x04,0x00,0x01,0xff,0x5c,0x00,0x13,0x40,0x40,0x48,0x48,0x50,0x48,0x48, 0x50,0x48,0x48,0x50,0x48,0x48,0x50,0x48,0x48,0x50,0xff,0x90,0x00,0x0a,0x00,0x00, 0x00,0x00,0x00,0x2d,0x00,0x01,0xff,0x5d,0x00,0x14,0x00,0x40,0x40,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x93,0xcf,0xb4, 0x04,0x00,0x80,0x80,0x80,0x80,0x80,0xff,0xd9 }; //! @cond IGNORE struct Jp2BoxHeader { uint32_t length; uint32_t type; }; struct Jp2ImageHeaderBox { uint32_t imageHeight; uint32_t imageWidth; uint16_t componentCount; uint8_t bitsPerComponent; uint8_t compressionType; uint8_t colorspaceIsUnknown; uint8_t intellectualPropertyFlag; uint16_t compressionTypeProfile; }; struct Jp2UuidBox { uint8_t uuid[16]; }; //! @endcond // ***************************************************************************** // class member definitions namespace Exiv2 { Jp2Image::Jp2Image(BasicIo::UniquePtr io, bool create) : Image(ImageType::jp2, mdExif | mdIptc | mdXmp, std::move(io)) { if (create) { if (io_->open() == 0) { #ifdef EXIV2_DEBUG_MESSAGES std::cerr << "Exiv2::Jp2Image:: Creating JPEG2000 image to memory" << std::endl; #endif IoCloser closer(*io_); if (io_->write(Jp2Blank, sizeof(Jp2Blank)) != sizeof(Jp2Blank)) { #ifdef EXIV2_DEBUG_MESSAGES std::cerr << "Exiv2::Jp2Image:: Failed to create JPEG2000 image on memory" << std::endl; #endif } } } } // Jp2Image::Jp2Image std::string Jp2Image::mimeType() const { return "image/jp2"; } void Jp2Image::setComment(const std::string& /*comment*/) { // Todo: implement me! throw(Error(kerInvalidSettingForImage, "Image comment", "JP2")); } // Jp2Image::setComment static void lf(std::ostream& out,bool& bLF) { if ( bLF ) { out << std::endl; out.flush(); bLF = false ; } } static bool isBigEndian() { union { uint32_t i; char c[4]; } e = { 0x01000000 }; return e.c[0]?true:false; } static std::string toAscii(long n) { const char* p = (const char*) &n; std::string result; bool bBigEndian = isBigEndian(); for ( int i = 0 ; i < 4 ; i++) { result += p[ bBigEndian ? i : (3-i) ]; } return result; } static void boxes_check(size_t b,size_t m) { if ( b > m ) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata box maximum exceeded" << std::endl; #endif throw Error(kerCorruptedMetadata); } } void Jp2Image::readMetadata() { #ifdef EXIV2_DEBUG_MESSAGES std::cerr << "Exiv2::Jp2Image::readMetadata: Reading JPEG-2000 file " << io_->path() << std::endl; #endif if (io_->open() != 0) { throw Error(kerDataSourceOpenFailed, io_->path(), strError()); } IoCloser closer(*io_); // Ensure that this is the correct image type if (!isJp2Type(*io_, true)) { if (io_->error() || io_->eof()) throw Error(kerFailedToReadImageData); throw Error(kerNotAnImage, "JPEG-2000"); } long position = 0; Jp2BoxHeader box = {0,0}; Jp2BoxHeader subBox = {0,0}; Jp2ImageHeaderBox ihdr = {0,0,0,0,0,0,0,0}; Jp2UuidBox uuid = {{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}}; size_t boxes = 0 ; size_t boxem = 1000 ; // boxes max while (io_->read((byte*)&box, sizeof(box)) == sizeof(box)) { boxes_check(boxes++,boxem ); position = io_->tell(); box.length = getLong((byte*)&box.length, bigEndian); box.type = getLong((byte*)&box.type, bigEndian); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: " << "Position: " << position << " box type: " << toAscii(box.type) << " length: " << box.length << std::endl; #endif if (box.length == 0) return ; if (box.length == 1) { // FIXME. Special case. the real box size is given in another place. } switch(box.type) { case kJp2BoxTypeJp2Header: { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: JP2Header box found" << std::endl; #endif long restore = io_->tell(); while (io_->read((byte*)&subBox, sizeof(subBox)) == sizeof(subBox) && subBox.length ) { boxes_check(boxes++, boxem) ; subBox.length = getLong((byte*)&subBox.length, bigEndian); subBox.type = getLong((byte*)&subBox.type, bigEndian); if (subBox.length > io_->size() ) { throw Error(kerCorruptedMetadata); } #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: " << "subBox = " << toAscii(subBox.type) << " length = " << subBox.length << std::endl; #endif if(subBox.type == kJp2BoxTypeColorHeader && subBox.length != 15) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: " << "Color data found" << std::endl; #endif const long pad = 3 ; // 3 padding bytes 2 0 0 const size_t data_length = Safe::add(subBox.length, static_cast<uint32_t>(8)); // data_length makes no sense if it is larger than the rest of the file if (data_length > io_->size() - io_->tell()) { throw Error(kerCorruptedMetadata); } DataBuf data(static_cast<long>(data_length)); io_->read(data.pData_,data.size_); const long iccLength = getULong(data.pData_+pad, bigEndian); // subtracting pad from data.size_ is safe: // size_ is at least 8 and pad = 3 if (iccLength > data.size_ - pad) { throw Error(kerCorruptedMetadata); } DataBuf icc(iccLength); ::memcpy(icc.pData_,data.pData_+pad,icc.size_); #ifdef EXIV2_DEBUG_MESSAGES const char* iccPath = "/tmp/libexiv2_jp2.icc"; FILE* f = fopen(iccPath,"wb"); if ( f ) { fwrite(icc.pData_,icc.size_,1,f); fclose(f); } std::cout << "Exiv2::Jp2Image::readMetadata: wrote iccProfile " << icc.size_<< " bytes to " << iccPath << std::endl ; #endif setIccProfile(icc); } if( subBox.type == kJp2BoxTypeImageHeader) { io_->read((byte*)&ihdr, sizeof(ihdr)); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: Ihdr data found" << std::endl; #endif ihdr.imageHeight = getLong((byte*)&ihdr.imageHeight, bigEndian); ihdr.imageWidth = getLong((byte*)&ihdr.imageWidth, bigEndian); ihdr.componentCount = getShort((byte*)&ihdr.componentCount, bigEndian); ihdr.compressionTypeProfile = getShort((byte*)&ihdr.compressionTypeProfile, bigEndian); pixelWidth_ = ihdr.imageWidth; pixelHeight_ = ihdr.imageHeight; } io_->seek(restore,BasicIo::beg); if ( io_->seek(subBox.length, Exiv2::BasicIo::cur) != 0 ) { throw Error(kerCorruptedMetadata); } restore = io_->tell(); } break; } case kJp2BoxTypeUuid: { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: UUID box found" << std::endl; #endif if (io_->read((byte*)&uuid, sizeof(uuid)) == sizeof(uuid)) { DataBuf rawData; long bufRead; bool bIsExif = memcmp(uuid.uuid, kJp2UuidExif, sizeof(uuid))==0; bool bIsIPTC = memcmp(uuid.uuid, kJp2UuidIptc, sizeof(uuid))==0; bool bIsXMP = memcmp(uuid.uuid, kJp2UuidXmp , sizeof(uuid))==0; if(bIsExif) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: Exif data found" << std::endl ; #endif rawData.alloc(box.length - (sizeof(box) + sizeof(uuid))); bufRead = io_->read(rawData.pData_, rawData.size_); if (io_->error()) throw Error(kerFailedToReadImageData); if (bufRead != rawData.size_) throw Error(kerInputDataReadFailed); if (rawData.size_ > 8) // "II*\0long" { // Find the position of Exif header in bytes array. long pos = ( (rawData.pData_[0] == rawData.pData_[1]) && (rawData.pData_[0]=='I' || rawData.pData_[0]=='M') ) ? 0 : -1; // #1242 Forgive having Exif\0\0 in rawData.pData_ const byte exifHeader[] = { 0x45, 0x78, 0x69, 0x66, 0x00, 0x00 }; for (long i=0 ; pos < 0 && i < rawData.size_-(long)sizeof(exifHeader) ; i++) { if (memcmp(exifHeader, &rawData.pData_[i], sizeof(exifHeader)) == 0) { pos = i+sizeof(exifHeader); #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Reading non-standard UUID-EXIF_bad box in " << io_->path() << std::endl; #endif } } // If found it, store only these data at from this place. if (pos >= 0 ) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: Exif header found at position " << pos << std::endl; #endif ByteOrder bo = TiffParser::decode(exifData(), iptcData(), xmpData(), rawData.pData_ + pos, rawData.size_ - pos); setByteOrder(bo); } } else { #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Failed to decode Exif metadata." << std::endl; #endif exifData_.clear(); } } if(bIsIPTC) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: Iptc data found" << std::endl; #endif rawData.alloc(box.length - (sizeof(box) + sizeof(uuid))); bufRead = io_->read(rawData.pData_, rawData.size_); if (io_->error()) throw Error(kerFailedToReadImageData); if (bufRead != rawData.size_) throw Error(kerInputDataReadFailed); if (IptcParser::decode(iptcData_, rawData.pData_, rawData.size_)) { #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Failed to decode IPTC metadata." << std::endl; #endif iptcData_.clear(); } } if(bIsXMP) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: Xmp data found" << std::endl; #endif rawData.alloc(box.length - (uint32_t)(sizeof(box) + sizeof(uuid))); bufRead = io_->read(rawData.pData_, rawData.size_); if (io_->error()) throw Error(kerFailedToReadImageData); if (bufRead != rawData.size_) throw Error(kerInputDataReadFailed); xmpPacket_.assign(reinterpret_cast<char *>(rawData.pData_), rawData.size_); std::string::size_type idx = xmpPacket_.find_first_of('<'); if (idx != std::string::npos && idx > 0) { #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Removing " << static_cast<uint32_t>(idx) << " characters from the beginning of the XMP packet" << std::endl; #endif xmpPacket_ = xmpPacket_.substr(idx); } if (xmpPacket_.size() > 0 && XmpParser::decode(xmpData_, xmpPacket_)) { #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Failed to decode XMP metadata." << std::endl; #endif } } } break; } default: { break; } } // Move to the next box. io_->seek(static_cast<long>(position - sizeof(box) + box.length), BasicIo::beg); if (io_->error()) throw Error(kerFailedToReadImageData); } } // Jp2Image::readMetadata void Jp2Image::printStructure(std::ostream& out, PrintStructureOption option, int depth) { if (io_->open() != 0) throw Error(kerDataSourceOpenFailed, io_->path(), strError()); // Ensure that this is the correct image type if (!isJp2Type(*io_, false)) { if (io_->error() || io_->eof()) throw Error(kerFailedToReadImageData); throw Error(kerNotAJpeg); } bool bPrint = option == kpsBasic || option == kpsRecursive; bool bRecursive = option == kpsRecursive; bool bICC = option == kpsIccProfile; bool bXMP = option == kpsXMP; bool bIPTCErase = option == kpsIptcErase; if (bPrint) { out << "STRUCTURE OF JPEG2000 FILE: " << io_->path() << std::endl; out << " address | length | box | data" << std::endl; } if ( bPrint || bXMP || bICC || bIPTCErase ) { long position = 0; Jp2BoxHeader box = {1,1}; Jp2BoxHeader subBox = {1,1}; Jp2UuidBox uuid = {{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}}; bool bLF = false; while (box.length && box.type != kJp2BoxTypeClose && io_->read((byte*)&box, sizeof(box)) == sizeof(box)) { position = io_->tell(); box.length = getLong((byte*)&box.length, bigEndian); box.type = getLong((byte*)&box.type, bigEndian); enforce(box.length <= io_->size()-io_->tell() , Exiv2::kerCorruptedMetadata); if (bPrint) { out << Internal::stringFormat("%8ld | %8ld | ", position - sizeof(box), (size_t)box.length) << toAscii(box.type) << " | "; bLF = true; if (box.type == kJp2BoxTypeClose) lf(out, bLF); } if (box.type == kJp2BoxTypeClose) break; switch (box.type) { case kJp2BoxTypeJp2Header: { lf(out, bLF); while (io_->read((byte*)&subBox, sizeof(subBox)) == sizeof(subBox) && io_->tell() < position + (long)box.length) // don't read beyond the box! { int address = io_->tell() - sizeof(subBox); subBox.length = getLong((byte*)&subBox.length, bigEndian); subBox.type = getLong((byte*)&subBox.type, bigEndian); if (subBox.length < sizeof(box) || subBox.length > io_->size() - io_->tell()) { throw Error(kerCorruptedMetadata); } DataBuf data(subBox.length - sizeof(box)); io_->read(data.pData_, data.size_); if (bPrint) { out << Internal::stringFormat("%8ld | %8ld | sub:", (size_t)address, (size_t)subBox.length) << toAscii(subBox.type) << " | " << Internal::binaryToString(makeSlice(data, 0, std::min(30l, data.size_))); bLF = true; } if (subBox.type == kJp2BoxTypeColorHeader) { long pad = 3; // don't know why there are 3 padding bytes if (bPrint) { out << " | pad:"; for (int i = 0; i < 3; i++) out << " " << (int)data.pData_[i]; } long iccLength = getULong(data.pData_ + pad, bigEndian); if (bPrint) { out << " | iccLength:" << iccLength; } if (bICC) { out.write((const char*)data.pData_ + pad, iccLength); } } lf(out, bLF); } } break; case kJp2BoxTypeUuid: { if (io_->read((byte*)&uuid, sizeof(uuid)) == sizeof(uuid)) { bool bIsExif = memcmp(uuid.uuid, kJp2UuidExif, sizeof(uuid)) == 0; bool bIsIPTC = memcmp(uuid.uuid, kJp2UuidIptc, sizeof(uuid)) == 0; bool bIsXMP = memcmp(uuid.uuid, kJp2UuidXmp, sizeof(uuid)) == 0; bool bUnknown = !(bIsExif || bIsIPTC || bIsXMP); if (bPrint) { if (bIsExif) out << "Exif: "; if (bIsIPTC) out << "IPTC: "; if (bIsXMP) out << "XMP : "; if (bUnknown) out << "????: "; } DataBuf rawData; rawData.alloc(box.length - sizeof(uuid) - sizeof(box)); long bufRead = io_->read(rawData.pData_, rawData.size_); if (io_->error()) throw Error(kerFailedToReadImageData); if (bufRead != rawData.size_) throw Error(kerInputDataReadFailed); if (bPrint) { out << Internal::binaryToString( makeSlice(rawData, 0, rawData.size_>40?40:rawData.size_)); out.flush(); } lf(out, bLF); if (bIsExif && bRecursive && rawData.size_ > 8) { // "II*\0long" if ((rawData.pData_[0] == rawData.pData_[1]) && (rawData.pData_[0] == 'I' || rawData.pData_[0] == 'M')) { BasicIo::UniquePtr p = BasicIo::UniquePtr(new MemIo(rawData.pData_, rawData.size_)); printTiffStructure(*p, out, option, depth); } } if (bIsIPTC && bRecursive) { IptcData::printStructure(out, makeSlice(rawData.pData_, 0, rawData.size_), depth); } if (bIsXMP && bXMP) { out.write((const char*)rawData.pData_, rawData.size_); } } } break; default: break; } // Move to the next box. io_->seek(static_cast<long>(position - sizeof(box) + box.length), BasicIo::beg); if (io_->error()) throw Error(kerFailedToReadImageData); if (bPrint) lf(out, bLF); } } } // JpegBase::printStructure void Jp2Image::writeMetadata() { if (io_->open() != 0) { throw Error(kerDataSourceOpenFailed, io_->path(), strError()); } IoCloser closer(*io_); BasicIo::UniquePtr tempIo(new MemIo); assert (tempIo.get() != 0); doWriteMetadata(*tempIo); // may throw io_->close(); io_->transfer(*tempIo); // may throw } // Jp2Image::writeMetadata #ifdef __clang__ // ignore cast align errors. dataBuf.pData_ is allocated by malloc() and 4 (or 8 byte aligned). #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-align" #endif void Jp2Image::encodeJp2Header(const DataBuf& boxBuf,DataBuf& outBuf) { DataBuf output(boxBuf.size_ + iccProfile_.size_ + 100); // allocate sufficient space int outlen = sizeof(Jp2BoxHeader) ; // now many bytes have we written to output? int inlen = sizeof(Jp2BoxHeader) ; // how many bytes have we read from boxBuf? Jp2BoxHeader* pBox = (Jp2BoxHeader*) boxBuf.pData_; int32_t length = getLong((byte*)&pBox->length, bigEndian); int32_t count = sizeof (Jp2BoxHeader); char* p = (char*) boxBuf.pData_; bool bWroteColor = false ; while ( count < length || !bWroteColor ) { Jp2BoxHeader* pSubBox = (Jp2BoxHeader*) (p+count) ; // copy data. pointer could be into a memory mapped file which we will decode! Jp2BoxHeader subBox ; memcpy(&subBox,pSubBox,sizeof(subBox)); Jp2BoxHeader newBox = subBox; if ( count < length ) { subBox.length = getLong((byte*)&subBox.length, bigEndian); subBox.type = getLong((byte*)&subBox.type , bigEndian); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Jp2Image::encodeJp2Header subbox: "<< toAscii(subBox.type) << " length = " << subBox.length << std::endl; #endif count += subBox.length; newBox.type = subBox.type; } else { subBox.length=0; newBox.type = kJp2BoxTypeColorHeader; count = length; } int32_t newlen = subBox.length; if ( newBox.type == kJp2BoxTypeColorHeader ) { bWroteColor = true ; if ( ! iccProfileDefined() ) { const char* pad = "\x01\x00\x00\x00\x00\x00\x10\x00\x00\x05\x1cuuid"; uint32_t psize = 15; newlen = sizeof(newBox) + psize ; ul2Data((byte*)&newBox.length,psize ,bigEndian); ul2Data((byte*)&newBox.type ,newBox.type,bigEndian); ::memcpy(output.pData_+outlen ,&newBox ,sizeof(newBox)); ::memcpy(output.pData_+outlen+sizeof(newBox) ,pad ,psize ); } else { const char* pad = "\x02\x00\x00"; uint32_t psize = 3; newlen = sizeof(newBox) + psize + iccProfile_.size_; ul2Data((byte*)&newBox.length,newlen,bigEndian); ul2Data((byte*)&newBox.type,newBox.type,bigEndian); ::memcpy(output.pData_+outlen ,&newBox ,sizeof(newBox) ); ::memcpy(output.pData_+outlen+sizeof(newBox) , pad ,psize ); ::memcpy(output.pData_+outlen+sizeof(newBox)+psize,iccProfile_.pData_,iccProfile_.size_); } } else { ::memcpy(output.pData_+outlen,boxBuf.pData_+inlen,subBox.length); } outlen += newlen; inlen += subBox.length; } // allocate the correct number of bytes, copy the data and update the box header outBuf.alloc(outlen); ::memcpy(outBuf.pData_,output.pData_,outlen); pBox = (Jp2BoxHeader*) outBuf.pData_; ul2Data((byte*)&pBox->type,kJp2BoxTypeJp2Header,bigEndian); ul2Data((byte*)&pBox->length,outlen,bigEndian); } // Jp2Image::encodeJp2Header #ifdef __clang__ #pragma clang diagnostic pop #endif void Jp2Image::doWriteMetadata(BasicIo& outIo) { if (!io_->isopen()) throw Error(kerInputDataReadFailed); if (!outIo.isopen()) throw Error(kerImageWriteFailed); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Writing JPEG-2000 file " << io_->path() << std::endl; std::cout << "Exiv2::Jp2Image::doWriteMetadata: tmp file created " << outIo.path() << std::endl; #endif // Ensure that this is the correct image type if (!isJp2Type(*io_, true)) { if (io_->error() || io_->eof()) throw Error(kerInputDataReadFailed); throw Error(kerNoImageInInputData); } // Write JPEG2000 Signature. if (outIo.write(Jp2Signature, 12) != 12) throw Error(kerImageWriteFailed); Jp2BoxHeader box = {0,0}; byte boxDataSize[4]; byte boxUUIDtype[4]; DataBuf bheaderBuf(8); // Box header : 4 bytes (data size) + 4 bytes (box type). // FIXME: Andreas, why the loop do not stop when EOF is taken from _io. The loop go out by an exception // generated by a zero size data read. while(io_->tell() < (long) io_->size()) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Position: " << io_->tell() << " / " << io_->size() << std::endl; #endif // Read chunk header. std::memset(bheaderBuf.pData_, 0x00, bheaderBuf.size_); long bufRead = io_->read(bheaderBuf.pData_, bheaderBuf.size_); if (io_->error()) throw Error(kerFailedToReadImageData); if (bufRead != bheaderBuf.size_) throw Error(kerInputDataReadFailed); // Decode box header. box.length = getLong(bheaderBuf.pData_, bigEndian); box.type = getLong(bheaderBuf.pData_ + 4, bigEndian); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: box type: " << toAscii(box.type) << " length: " << box.length << std::endl; #endif if (box.length == 0) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Null Box size has been found. " "This is the last box of file." << std::endl; #endif box.length = (uint32_t) (io_->size() - io_->tell() + 8); } if (box.length < 8) { // box is broken, so there is nothing we can do here throw Error(kerCorruptedMetadata); } // Read whole box : Box header + Box data (not fixed size - can be null). DataBuf boxBuf(box.length); // Box header (8 bytes) + box data. memcpy(boxBuf.pData_, bheaderBuf.pData_, 8); // Copy header. bufRead = io_->read(boxBuf.pData_ + 8, box.length - 8); // Extract box data. if (io_->error()) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Error reading source file" << std::endl; #endif throw Error(kerFailedToReadImageData); } if (bufRead != (long)(box.length - 8)) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Cannot read source file data" << std::endl; #endif throw Error(kerInputDataReadFailed); } switch(box.type) { case kJp2BoxTypeJp2Header: { DataBuf newBuf; encodeJp2Header(boxBuf,newBuf); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Write JP2Header box (length: " << box.length << ")" << std::endl; #endif if (outIo.write(newBuf.pData_, newBuf.size_) != newBuf.size_) throw Error(kerImageWriteFailed); // Write all updated metadata here, just after JP2Header. if (exifData_.count() > 0) { // Update Exif data to a new UUID box Blob blob; ExifParser::encode(blob, littleEndian, exifData_); if (blob.size()) { DataBuf rawExif(static_cast<long>(blob.size())); memcpy(rawExif.pData_, &blob[0], blob.size()); DataBuf boxData(8 + 16 + rawExif.size_); ul2Data(boxDataSize, boxData.size_, Exiv2::bigEndian); ul2Data(boxUUIDtype, kJp2BoxTypeUuid, Exiv2::bigEndian); memcpy(boxData.pData_, boxDataSize, 4); memcpy(boxData.pData_ + 4, boxUUIDtype, 4); memcpy(boxData.pData_ + 8, kJp2UuidExif, 16); memcpy(boxData.pData_ + 8 + 16, rawExif.pData_, rawExif.size_); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Write box with Exif metadata (length: " << boxData.size_ << std::endl; #endif if (outIo.write(boxData.pData_, boxData.size_) != boxData.size_) throw Error(kerImageWriteFailed); } } if (iptcData_.count() > 0) { // Update Iptc data to a new UUID box DataBuf rawIptc = IptcParser::encode(iptcData_); if (rawIptc.size_ > 0) { DataBuf boxData(8 + 16 + rawIptc.size_); ul2Data(boxDataSize, boxData.size_, Exiv2::bigEndian); ul2Data(boxUUIDtype, kJp2BoxTypeUuid, Exiv2::bigEndian); memcpy(boxData.pData_, boxDataSize, 4); memcpy(boxData.pData_ + 4, boxUUIDtype, 4); memcpy(boxData.pData_ + 8, kJp2UuidIptc, 16); memcpy(boxData.pData_ + 8 + 16, rawIptc.pData_, rawIptc.size_); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Write box with Iptc metadata (length: " << boxData.size_ << std::endl; #endif if (outIo.write(boxData.pData_, boxData.size_) != boxData.size_) throw Error(kerImageWriteFailed); } } if (writeXmpFromPacket() == false) { if (XmpParser::encode(xmpPacket_, xmpData_) > 1) { #ifndef SUPPRESS_WARNINGS EXV_ERROR << "Failed to encode XMP metadata." << std::endl; #endif } } if (xmpPacket_.size() > 0) { // Update Xmp data to a new UUID box DataBuf xmp(reinterpret_cast<const byte*>(xmpPacket_.data()), static_cast<long>(xmpPacket_.size())); DataBuf boxData(8 + 16 + xmp.size_); ul2Data(boxDataSize, boxData.size_, Exiv2::bigEndian); ul2Data(boxUUIDtype, kJp2BoxTypeUuid, Exiv2::bigEndian); memcpy(boxData.pData_, boxDataSize, 4); memcpy(boxData.pData_ + 4, boxUUIDtype, 4); memcpy(boxData.pData_ + 8, kJp2UuidXmp, 16); memcpy(boxData.pData_ + 8 + 16, xmp.pData_, xmp.size_); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Write box with XMP metadata (length: " << boxData.size_ << ")" << std::endl; #endif if (outIo.write(boxData.pData_, boxData.size_) != boxData.size_) throw Error(kerImageWriteFailed); } break; } case kJp2BoxTypeUuid: { if(memcmp(boxBuf.pData_ + 8, kJp2UuidExif, 16) == 0) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: strip Exif Uuid box" << std::endl; #endif } else if(memcmp(boxBuf.pData_ + 8, kJp2UuidIptc, 16) == 0) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: strip Iptc Uuid box" << std::endl; #endif } else if(memcmp(boxBuf.pData_ + 8, kJp2UuidXmp, 16) == 0) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: strip Xmp Uuid box" << std::endl; #endif } else { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: write Uuid box (length: " << box.length << ")" << std::endl; #endif if (outIo.write(boxBuf.pData_, boxBuf.size_) != boxBuf.size_) throw Error(kerImageWriteFailed); } break; } default: { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: write box (length: " << box.length << ")" << std::endl; #endif if (outIo.write(boxBuf.pData_, boxBuf.size_) != boxBuf.size_) throw Error(kerImageWriteFailed); break; } } } #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: EOF" << std::endl; #endif } // Jp2Image::doWriteMetadata // ************************************************************************* // free functions Image::UniquePtr newJp2Instance(BasicIo::UniquePtr io, bool create) { Image::UniquePtr image(new Jp2Image(std::move(io), create)); if (!image->good()) { image.reset(); } return image; } bool isJp2Type(BasicIo& iIo, bool advance) { const int32_t len = 12; byte buf[len]; iIo.read(buf, len); if (iIo.error() || iIo.eof()) { return false; } bool matched = (memcmp(buf, Jp2Signature, len) == 0); if (!advance || !matched) { iIo.seek(-len, BasicIo::cur); } return matched; } } // namespace Exiv2
null
// ***************************************************************** -*- C++ -*- /* * Copyright (C) 2004-2021 Exiv2 authors * This program is part of the Exiv2 distribution. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, 5th Floor, Boston, MA 02110-1301 USA. */ // ***************************************************************************** // included header files #include "config.h" #include "jp2image.hpp" #include "tiffimage.hpp" #include "image.hpp" #include "image_int.hpp" #include "basicio.hpp" #include "enforce.hpp" #include "error.hpp" #include "futils.hpp" #include "types.hpp" #include "safe_op.hpp" // + standard includes #include <string> #include <cstring> #include <iostream> #include <cassert> #include <cstdio> // JPEG-2000 box types const uint32_t kJp2BoxTypeJp2Header = 0x6a703268; // 'jp2h' const uint32_t kJp2BoxTypeImageHeader = 0x69686472; // 'ihdr' const uint32_t kJp2BoxTypeColorHeader = 0x636f6c72; // 'colr' const uint32_t kJp2BoxTypeUuid = 0x75756964; // 'uuid' const uint32_t kJp2BoxTypeClose = 0x6a703263; // 'jp2c' // from openjpeg-2.1.2/src/lib/openjp2/jp2.h /*#define JPIP_JPIP 0x6a706970*/ #define JP2_JP 0x6a502020 /**< JPEG 2000 signature box */ #define JP2_FTYP 0x66747970 /**< File type box */ #define JP2_JP2H 0x6a703268 /**< JP2 header box (super-box) */ #define JP2_IHDR 0x69686472 /**< Image header box */ #define JP2_COLR 0x636f6c72 /**< Colour specification box */ #define JP2_JP2C 0x6a703263 /**< Contiguous codestream box */ #define JP2_URL 0x75726c20 /**< Data entry URL box */ #define JP2_PCLR 0x70636c72 /**< Palette box */ #define JP2_CMAP 0x636d6170 /**< Component Mapping box */ #define JP2_CDEF 0x63646566 /**< Channel Definition box */ #define JP2_DTBL 0x6474626c /**< Data Reference box */ #define JP2_BPCC 0x62706363 /**< Bits per component box */ #define JP2_JP2 0x6a703220 /**< File type fields */ /* For the future */ /* #define JP2_RES 0x72657320 */ /**< Resolution box (super-box) */ /* #define JP2_JP2I 0x6a703269 */ /**< Intellectual property box */ /* #define JP2_XML 0x786d6c20 */ /**< XML box */ /* #define JP2_UUID 0x75756994 */ /**< UUID box */ /* #define JP2_UINF 0x75696e66 */ /**< UUID info box (super-box) */ /* #define JP2_ULST 0x756c7374 */ /**< UUID list box */ // JPEG-2000 UUIDs for embedded metadata // // See http://www.jpeg.org/public/wg1n2600.doc for information about embedding IPTC-NAA data in JPEG-2000 files // See http://www.adobe.com/devnet/xmp/pdfs/xmp_specification.pdf for information about embedding XMP data in JPEG-2000 files const unsigned char kJp2UuidExif[] = "JpgTiffExif->JP2"; const unsigned char kJp2UuidIptc[] = "\x33\xc7\xa4\xd2\xb8\x1d\x47\x23\xa0\xba\xf1\xa3\xe0\x97\xad\x38"; const unsigned char kJp2UuidXmp[] = "\xbe\x7a\xcf\xcb\x97\xa9\x42\xe8\x9c\x71\x99\x94\x91\xe3\xaf\xac"; // See section B.1.1 (JPEG 2000 Signature box) of JPEG-2000 specification const unsigned char Jp2Signature[12] = { 0x00, 0x00, 0x00, 0x0c, 0x6a, 0x50, 0x20, 0x20, 0x0d, 0x0a, 0x87, 0x0a }; const unsigned char Jp2Blank[] = { 0x00,0x00,0x00,0x0c,0x6a,0x50,0x20,0x20,0x0d,0x0a,0x87,0x0a,0x00,0x00,0x00,0x14, 0x66,0x74,0x79,0x70,0x6a,0x70,0x32,0x20,0x00,0x00,0x00,0x00,0x6a,0x70,0x32,0x20, 0x00,0x00,0x00,0x2d,0x6a,0x70,0x32,0x68,0x00,0x00,0x00,0x16,0x69,0x68,0x64,0x72, 0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x07,0x07,0x00,0x00,0x00,0x00, 0x00,0x0f,0x63,0x6f,0x6c,0x72,0x01,0x00,0x00,0x00,0x00,0x00,0x11,0x00,0x00,0x00, 0x00,0x6a,0x70,0x32,0x63,0xff,0x4f,0xff,0x51,0x00,0x29,0x00,0x00,0x00,0x00,0x00, 0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x01,0x00,0x00,0x00,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x07, 0x01,0x01,0xff,0x64,0x00,0x23,0x00,0x01,0x43,0x72,0x65,0x61,0x74,0x6f,0x72,0x3a, 0x20,0x4a,0x61,0x73,0x50,0x65,0x72,0x20,0x56,0x65,0x72,0x73,0x69,0x6f,0x6e,0x20, 0x31,0x2e,0x39,0x30,0x30,0x2e,0x31,0xff,0x52,0x00,0x0c,0x00,0x00,0x00,0x01,0x00, 0x05,0x04,0x04,0x00,0x01,0xff,0x5c,0x00,0x13,0x40,0x40,0x48,0x48,0x50,0x48,0x48, 0x50,0x48,0x48,0x50,0x48,0x48,0x50,0x48,0x48,0x50,0xff,0x90,0x00,0x0a,0x00,0x00, 0x00,0x00,0x00,0x2d,0x00,0x01,0xff,0x5d,0x00,0x14,0x00,0x40,0x40,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x93,0xcf,0xb4, 0x04,0x00,0x80,0x80,0x80,0x80,0x80,0xff,0xd9 }; //! @cond IGNORE struct Jp2BoxHeader { uint32_t length; uint32_t type; }; struct Jp2ImageHeaderBox { uint32_t imageHeight; uint32_t imageWidth; uint16_t componentCount; uint8_t bitsPerComponent; uint8_t compressionType; uint8_t colorspaceIsUnknown; uint8_t intellectualPropertyFlag; uint16_t compressionTypeProfile; }; struct Jp2UuidBox { uint8_t uuid[16]; }; //! @endcond // ***************************************************************************** // class member definitions namespace Exiv2 { Jp2Image::Jp2Image(BasicIo::UniquePtr io, bool create) : Image(ImageType::jp2, mdExif | mdIptc | mdXmp, std::move(io)) { if (create) { if (io_->open() == 0) { #ifdef EXIV2_DEBUG_MESSAGES std::cerr << "Exiv2::Jp2Image:: Creating JPEG2000 image to memory" << std::endl; #endif IoCloser closer(*io_); if (io_->write(Jp2Blank, sizeof(Jp2Blank)) != sizeof(Jp2Blank)) { #ifdef EXIV2_DEBUG_MESSAGES std::cerr << "Exiv2::Jp2Image:: Failed to create JPEG2000 image on memory" << std::endl; #endif } } } } // Jp2Image::Jp2Image std::string Jp2Image::mimeType() const { return "image/jp2"; } void Jp2Image::setComment(const std::string& /*comment*/) { // Todo: implement me! throw(Error(kerInvalidSettingForImage, "Image comment", "JP2")); } // Jp2Image::setComment static void lf(std::ostream& out,bool& bLF) { if ( bLF ) { out << std::endl; out.flush(); bLF = false ; } } static bool isBigEndian() { union { uint32_t i; char c[4]; } e = { 0x01000000 }; return e.c[0]?true:false; } static std::string toAscii(long n) { const char* p = (const char*) &n; std::string result; bool bBigEndian = isBigEndian(); for ( int i = 0 ; i < 4 ; i++) { result += p[ bBigEndian ? i : (3-i) ]; } return result; } static void boxes_check(size_t b,size_t m) { if ( b > m ) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata box maximum exceeded" << std::endl; #endif throw Error(kerCorruptedMetadata); } } void Jp2Image::readMetadata() { #ifdef EXIV2_DEBUG_MESSAGES std::cerr << "Exiv2::Jp2Image::readMetadata: Reading JPEG-2000 file " << io_->path() << std::endl; #endif if (io_->open() != 0) { throw Error(kerDataSourceOpenFailed, io_->path(), strError()); } IoCloser closer(*io_); // Ensure that this is the correct image type if (!isJp2Type(*io_, true)) { if (io_->error() || io_->eof()) throw Error(kerFailedToReadImageData); throw Error(kerNotAnImage, "JPEG-2000"); } long position = 0; Jp2BoxHeader box = {0,0}; Jp2BoxHeader subBox = {0,0}; Jp2ImageHeaderBox ihdr = {0,0,0,0,0,0,0,0}; Jp2UuidBox uuid = {{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}}; size_t boxes = 0 ; size_t boxem = 1000 ; // boxes max while (io_->read((byte*)&box, sizeof(box)) == sizeof(box)) { boxes_check(boxes++,boxem ); position = io_->tell(); box.length = getLong((byte*)&box.length, bigEndian); box.type = getLong((byte*)&box.type, bigEndian); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: " << "Position: " << position << " box type: " << toAscii(box.type) << " length: " << box.length << std::endl; #endif if (box.length == 0) return ; if (box.length == 1) { // FIXME. Special case. the real box size is given in another place. } switch(box.type) { case kJp2BoxTypeJp2Header: { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: JP2Header box found" << std::endl; #endif long restore = io_->tell(); while (io_->read((byte*)&subBox, sizeof(subBox)) == sizeof(subBox) && subBox.length ) { boxes_check(boxes++, boxem) ; subBox.length = getLong((byte*)&subBox.length, bigEndian); subBox.type = getLong((byte*)&subBox.type, bigEndian); if (subBox.length > io_->size() ) { throw Error(kerCorruptedMetadata); } #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: " << "subBox = " << toAscii(subBox.type) << " length = " << subBox.length << std::endl; #endif if(subBox.type == kJp2BoxTypeColorHeader && subBox.length != 15) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: " << "Color data found" << std::endl; #endif const long pad = 3 ; // 3 padding bytes 2 0 0 const size_t data_length = Safe::add(subBox.length, static_cast<uint32_t>(8)); // data_length makes no sense if it is larger than the rest of the file if (data_length > io_->size() - io_->tell()) { throw Error(kerCorruptedMetadata); } DataBuf data(static_cast<long>(data_length)); io_->read(data.pData_,data.size_); const long iccLength = getULong(data.pData_+pad, bigEndian); // subtracting pad from data.size_ is safe: // size_ is at least 8 and pad = 3 if (iccLength > data.size_ - pad) { throw Error(kerCorruptedMetadata); } DataBuf icc(iccLength); ::memcpy(icc.pData_,data.pData_+pad,icc.size_); #ifdef EXIV2_DEBUG_MESSAGES const char* iccPath = "/tmp/libexiv2_jp2.icc"; FILE* f = fopen(iccPath,"wb"); if ( f ) { fwrite(icc.pData_,icc.size_,1,f); fclose(f); } std::cout << "Exiv2::Jp2Image::readMetadata: wrote iccProfile " << icc.size_<< " bytes to " << iccPath << std::endl ; #endif setIccProfile(icc); } if( subBox.type == kJp2BoxTypeImageHeader) { io_->read((byte*)&ihdr, sizeof(ihdr)); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: Ihdr data found" << std::endl; #endif ihdr.imageHeight = getLong((byte*)&ihdr.imageHeight, bigEndian); ihdr.imageWidth = getLong((byte*)&ihdr.imageWidth, bigEndian); ihdr.componentCount = getShort((byte*)&ihdr.componentCount, bigEndian); ihdr.compressionTypeProfile = getShort((byte*)&ihdr.compressionTypeProfile, bigEndian); pixelWidth_ = ihdr.imageWidth; pixelHeight_ = ihdr.imageHeight; } io_->seek(restore,BasicIo::beg); if ( io_->seek(subBox.length, Exiv2::BasicIo::cur) != 0 ) { throw Error(kerCorruptedMetadata); } restore = io_->tell(); } break; } case kJp2BoxTypeUuid: { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: UUID box found" << std::endl; #endif if (io_->read((byte*)&uuid, sizeof(uuid)) == sizeof(uuid)) { DataBuf rawData; long bufRead; bool bIsExif = memcmp(uuid.uuid, kJp2UuidExif, sizeof(uuid))==0; bool bIsIPTC = memcmp(uuid.uuid, kJp2UuidIptc, sizeof(uuid))==0; bool bIsXMP = memcmp(uuid.uuid, kJp2UuidXmp , sizeof(uuid))==0; if(bIsExif) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: Exif data found" << std::endl ; #endif rawData.alloc(box.length - (sizeof(box) + sizeof(uuid))); bufRead = io_->read(rawData.pData_, rawData.size_); if (io_->error()) throw Error(kerFailedToReadImageData); if (bufRead != rawData.size_) throw Error(kerInputDataReadFailed); if (rawData.size_ > 8) // "II*\0long" { // Find the position of Exif header in bytes array. long pos = ( (rawData.pData_[0] == rawData.pData_[1]) && (rawData.pData_[0]=='I' || rawData.pData_[0]=='M') ) ? 0 : -1; // #1242 Forgive having Exif\0\0 in rawData.pData_ const byte exifHeader[] = { 0x45, 0x78, 0x69, 0x66, 0x00, 0x00 }; for (long i=0 ; pos < 0 && i < rawData.size_-(long)sizeof(exifHeader) ; i++) { if (memcmp(exifHeader, &rawData.pData_[i], sizeof(exifHeader)) == 0) { pos = i+sizeof(exifHeader); #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Reading non-standard UUID-EXIF_bad box in " << io_->path() << std::endl; #endif } } // If found it, store only these data at from this place. if (pos >= 0 ) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: Exif header found at position " << pos << std::endl; #endif ByteOrder bo = TiffParser::decode(exifData(), iptcData(), xmpData(), rawData.pData_ + pos, rawData.size_ - pos); setByteOrder(bo); } } else { #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Failed to decode Exif metadata." << std::endl; #endif exifData_.clear(); } } if(bIsIPTC) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: Iptc data found" << std::endl; #endif rawData.alloc(box.length - (sizeof(box) + sizeof(uuid))); bufRead = io_->read(rawData.pData_, rawData.size_); if (io_->error()) throw Error(kerFailedToReadImageData); if (bufRead != rawData.size_) throw Error(kerInputDataReadFailed); if (IptcParser::decode(iptcData_, rawData.pData_, rawData.size_)) { #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Failed to decode IPTC metadata." << std::endl; #endif iptcData_.clear(); } } if(bIsXMP) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::readMetadata: Xmp data found" << std::endl; #endif rawData.alloc(box.length - (uint32_t)(sizeof(box) + sizeof(uuid))); bufRead = io_->read(rawData.pData_, rawData.size_); if (io_->error()) throw Error(kerFailedToReadImageData); if (bufRead != rawData.size_) throw Error(kerInputDataReadFailed); xmpPacket_.assign(reinterpret_cast<char *>(rawData.pData_), rawData.size_); std::string::size_type idx = xmpPacket_.find_first_of('<'); if (idx != std::string::npos && idx > 0) { #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Removing " << static_cast<uint32_t>(idx) << " characters from the beginning of the XMP packet" << std::endl; #endif xmpPacket_ = xmpPacket_.substr(idx); } if (xmpPacket_.size() > 0 && XmpParser::decode(xmpData_, xmpPacket_)) { #ifndef SUPPRESS_WARNINGS EXV_WARNING << "Failed to decode XMP metadata." << std::endl; #endif } } } break; } default: { break; } } // Move to the next box. io_->seek(static_cast<long>(position - sizeof(box) + box.length), BasicIo::beg); if (io_->error()) throw Error(kerFailedToReadImageData); } } // Jp2Image::readMetadata void Jp2Image::printStructure(std::ostream& out, PrintStructureOption option, int depth) { if (io_->open() != 0) throw Error(kerDataSourceOpenFailed, io_->path(), strError()); // Ensure that this is the correct image type if (!isJp2Type(*io_, false)) { if (io_->error() || io_->eof()) throw Error(kerFailedToReadImageData); throw Error(kerNotAJpeg); } bool bPrint = option == kpsBasic || option == kpsRecursive; bool bRecursive = option == kpsRecursive; bool bICC = option == kpsIccProfile; bool bXMP = option == kpsXMP; bool bIPTCErase = option == kpsIptcErase; if (bPrint) { out << "STRUCTURE OF JPEG2000 FILE: " << io_->path() << std::endl; out << " address | length | box | data" << std::endl; } if ( bPrint || bXMP || bICC || bIPTCErase ) { long position = 0; Jp2BoxHeader box = {1,1}; Jp2BoxHeader subBox = {1,1}; Jp2UuidBox uuid = {{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}}; bool bLF = false; while (box.length && box.type != kJp2BoxTypeClose && io_->read((byte*)&box, sizeof(box)) == sizeof(box)) { position = io_->tell(); box.length = getLong((byte*)&box.length, bigEndian); box.type = getLong((byte*)&box.type, bigEndian); enforce(box.length <= io_->size()-io_->tell() , Exiv2::kerCorruptedMetadata); if (bPrint) { out << Internal::stringFormat("%8ld | %8ld | ", position - sizeof(box), (size_t)box.length) << toAscii(box.type) << " | "; bLF = true; if (box.type == kJp2BoxTypeClose) lf(out, bLF); } if (box.type == kJp2BoxTypeClose) break; switch (box.type) { case kJp2BoxTypeJp2Header: { lf(out, bLF); while (io_->read((byte*)&subBox, sizeof(subBox)) == sizeof(subBox) && io_->tell() < position + (long)box.length) // don't read beyond the box! { int address = io_->tell() - sizeof(subBox); subBox.length = getLong((byte*)&subBox.length, bigEndian); subBox.type = getLong((byte*)&subBox.type, bigEndian); if (subBox.length < sizeof(box) || subBox.length > io_->size() - io_->tell()) { throw Error(kerCorruptedMetadata); } DataBuf data(subBox.length - sizeof(box)); io_->read(data.pData_, data.size_); if (bPrint) { out << Internal::stringFormat("%8ld | %8ld | sub:", (size_t)address, (size_t)subBox.length) << toAscii(subBox.type) << " | " << Internal::binaryToString(makeSlice(data, 0, std::min(30l, data.size_))); bLF = true; } if (subBox.type == kJp2BoxTypeColorHeader) { long pad = 3; // don't know why there are 3 padding bytes if (bPrint) { out << " | pad:"; for (int i = 0; i < 3; i++) out << " " << (int)data.pData_[i]; } long iccLength = getULong(data.pData_ + pad, bigEndian); if (bPrint) { out << " | iccLength:" << iccLength; } if (bICC) { out.write((const char*)data.pData_ + pad, iccLength); } } lf(out, bLF); } } break; case kJp2BoxTypeUuid: { if (io_->read((byte*)&uuid, sizeof(uuid)) == sizeof(uuid)) { bool bIsExif = memcmp(uuid.uuid, kJp2UuidExif, sizeof(uuid)) == 0; bool bIsIPTC = memcmp(uuid.uuid, kJp2UuidIptc, sizeof(uuid)) == 0; bool bIsXMP = memcmp(uuid.uuid, kJp2UuidXmp, sizeof(uuid)) == 0; bool bUnknown = !(bIsExif || bIsIPTC || bIsXMP); if (bPrint) { if (bIsExif) out << "Exif: "; if (bIsIPTC) out << "IPTC: "; if (bIsXMP) out << "XMP : "; if (bUnknown) out << "????: "; } DataBuf rawData; rawData.alloc(box.length - sizeof(uuid) - sizeof(box)); long bufRead = io_->read(rawData.pData_, rawData.size_); if (io_->error()) throw Error(kerFailedToReadImageData); if (bufRead != rawData.size_) throw Error(kerInputDataReadFailed); if (bPrint) { out << Internal::binaryToString( makeSlice(rawData, 0, rawData.size_>40?40:rawData.size_)); out.flush(); } lf(out, bLF); if (bIsExif && bRecursive && rawData.size_ > 8) { // "II*\0long" if ((rawData.pData_[0] == rawData.pData_[1]) && (rawData.pData_[0] == 'I' || rawData.pData_[0] == 'M')) { BasicIo::UniquePtr p = BasicIo::UniquePtr(new MemIo(rawData.pData_, rawData.size_)); printTiffStructure(*p, out, option, depth); } } if (bIsIPTC && bRecursive) { IptcData::printStructure(out, makeSlice(rawData.pData_, 0, rawData.size_), depth); } if (bIsXMP && bXMP) { out.write((const char*)rawData.pData_, rawData.size_); } } } break; default: break; } // Move to the next box. io_->seek(static_cast<long>(position - sizeof(box) + box.length), BasicIo::beg); if (io_->error()) throw Error(kerFailedToReadImageData); if (bPrint) lf(out, bLF); } } } // JpegBase::printStructure void Jp2Image::writeMetadata() { if (io_->open() != 0) { throw Error(kerDataSourceOpenFailed, io_->path(), strError()); } IoCloser closer(*io_); BasicIo::UniquePtr tempIo(new MemIo); assert (tempIo.get() != 0); doWriteMetadata(*tempIo); // may throw io_->close(); io_->transfer(*tempIo); // may throw } // Jp2Image::writeMetadata #ifdef __clang__ // ignore cast align errors. dataBuf.pData_ is allocated by malloc() and 4 (or 8 byte aligned). #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-align" #endif void Jp2Image::encodeJp2Header(const DataBuf& boxBuf,DataBuf& outBuf) { DataBuf output(boxBuf.size_ + iccProfile_.size_ + 100); // allocate sufficient space long outlen = sizeof(Jp2BoxHeader) ; // now many bytes have we written to output? long inlen = sizeof(Jp2BoxHeader) ; // how many bytes have we read from boxBuf? Jp2BoxHeader* pBox = (Jp2BoxHeader*) boxBuf.pData_; uint32_t length = getLong((byte*)&pBox->length, bigEndian); uint32_t count = sizeof (Jp2BoxHeader); char* p = (char*) boxBuf.pData_; bool bWroteColor = false ; while ( count < length || !bWroteColor ) { Jp2BoxHeader* pSubBox = (Jp2BoxHeader*) (p+count) ; // copy data. pointer could be into a memory mapped file which we will decode! Jp2BoxHeader subBox ; memcpy(&subBox,pSubBox,sizeof(subBox)); Jp2BoxHeader newBox = subBox; if ( count < length ) { subBox.length = getLong((byte*)&subBox.length, bigEndian); subBox.type = getLong((byte*)&subBox.type , bigEndian); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Jp2Image::encodeJp2Header subbox: "<< toAscii(subBox.type) << " length = " << subBox.length << std::endl; #endif enforce(subBox.length <= length - count, Exiv2::kerCorruptedMetadata); count += subBox.length; newBox.type = subBox.type; } else { subBox.length=0; newBox.type = kJp2BoxTypeColorHeader; count = length; } uint32_t newlen = subBox.length; if ( newBox.type == kJp2BoxTypeColorHeader ) { bWroteColor = true ; if ( ! iccProfileDefined() ) { const char* pad = "\x01\x00\x00\x00\x00\x00\x10\x00\x00\x05\x1cuuid"; uint32_t psize = 15; newlen = sizeof(newBox) + psize ; enforce(newlen <= output.size_ - outlen, Exiv2::kerCorruptedMetadata); ul2Data((byte*)&newBox.length,psize ,bigEndian); ul2Data((byte*)&newBox.type ,newBox.type,bigEndian); ::memcpy(output.pData_+outlen ,&newBox ,sizeof(newBox)); ::memcpy(output.pData_+outlen+sizeof(newBox) ,pad ,psize ); } else { const char* pad = "\x02\x00\x00"; uint32_t psize = 3; newlen = sizeof(newBox) + psize + iccProfile_.size_; enforce(newlen <= output.size_ - outlen, Exiv2::kerCorruptedMetadata); ul2Data((byte*)&newBox.length,newlen,bigEndian); ul2Data((byte*)&newBox.type,newBox.type,bigEndian); ::memcpy(output.pData_+outlen ,&newBox ,sizeof(newBox) ); ::memcpy(output.pData_+outlen+sizeof(newBox) , pad ,psize ); ::memcpy(output.pData_+outlen+sizeof(newBox)+psize,iccProfile_.pData_,iccProfile_.size_); } } else { enforce(newlen <= output.size_ - outlen, Exiv2::kerCorruptedMetadata); ::memcpy(output.pData_+outlen,boxBuf.pData_+inlen,subBox.length); } outlen += newlen; inlen += subBox.length; } // allocate the correct number of bytes, copy the data and update the box header outBuf.alloc(outlen); ::memcpy(outBuf.pData_,output.pData_,outlen); pBox = (Jp2BoxHeader*) outBuf.pData_; ul2Data((byte*)&pBox->type,kJp2BoxTypeJp2Header,bigEndian); ul2Data((byte*)&pBox->length,outlen,bigEndian); } // Jp2Image::encodeJp2Header #ifdef __clang__ #pragma clang diagnostic pop #endif void Jp2Image::doWriteMetadata(BasicIo& outIo) { if (!io_->isopen()) throw Error(kerInputDataReadFailed); if (!outIo.isopen()) throw Error(kerImageWriteFailed); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Writing JPEG-2000 file " << io_->path() << std::endl; std::cout << "Exiv2::Jp2Image::doWriteMetadata: tmp file created " << outIo.path() << std::endl; #endif // Ensure that this is the correct image type if (!isJp2Type(*io_, true)) { if (io_->error() || io_->eof()) throw Error(kerInputDataReadFailed); throw Error(kerNoImageInInputData); } // Write JPEG2000 Signature. if (outIo.write(Jp2Signature, 12) != 12) throw Error(kerImageWriteFailed); Jp2BoxHeader box = {0,0}; byte boxDataSize[4]; byte boxUUIDtype[4]; DataBuf bheaderBuf(8); // Box header : 4 bytes (data size) + 4 bytes (box type). // FIXME: Andreas, why the loop do not stop when EOF is taken from _io. The loop go out by an exception // generated by a zero size data read. while(io_->tell() < (long) io_->size()) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Position: " << io_->tell() << " / " << io_->size() << std::endl; #endif // Read chunk header. std::memset(bheaderBuf.pData_, 0x00, bheaderBuf.size_); long bufRead = io_->read(bheaderBuf.pData_, bheaderBuf.size_); if (io_->error()) throw Error(kerFailedToReadImageData); if (bufRead != bheaderBuf.size_) throw Error(kerInputDataReadFailed); // Decode box header. box.length = getLong(bheaderBuf.pData_, bigEndian); box.type = getLong(bheaderBuf.pData_ + 4, bigEndian); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: box type: " << toAscii(box.type) << " length: " << box.length << std::endl; #endif if (box.length == 0) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Null Box size has been found. " "This is the last box of file." << std::endl; #endif box.length = (uint32_t) (io_->size() - io_->tell() + 8); } if (box.length < 8) { // box is broken, so there is nothing we can do here throw Error(kerCorruptedMetadata); } // Read whole box : Box header + Box data (not fixed size - can be null). DataBuf boxBuf(box.length); // Box header (8 bytes) + box data. memcpy(boxBuf.pData_, bheaderBuf.pData_, 8); // Copy header. bufRead = io_->read(boxBuf.pData_ + 8, box.length - 8); // Extract box data. if (io_->error()) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Error reading source file" << std::endl; #endif throw Error(kerFailedToReadImageData); } if (bufRead != (long)(box.length - 8)) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Cannot read source file data" << std::endl; #endif throw Error(kerInputDataReadFailed); } switch(box.type) { case kJp2BoxTypeJp2Header: { DataBuf newBuf; encodeJp2Header(boxBuf,newBuf); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Write JP2Header box (length: " << box.length << ")" << std::endl; #endif if (outIo.write(newBuf.pData_, newBuf.size_) != newBuf.size_) throw Error(kerImageWriteFailed); // Write all updated metadata here, just after JP2Header. if (exifData_.count() > 0) { // Update Exif data to a new UUID box Blob blob; ExifParser::encode(blob, littleEndian, exifData_); if (blob.size()) { DataBuf rawExif(static_cast<long>(blob.size())); memcpy(rawExif.pData_, &blob[0], blob.size()); DataBuf boxData(8 + 16 + rawExif.size_); ul2Data(boxDataSize, boxData.size_, Exiv2::bigEndian); ul2Data(boxUUIDtype, kJp2BoxTypeUuid, Exiv2::bigEndian); memcpy(boxData.pData_, boxDataSize, 4); memcpy(boxData.pData_ + 4, boxUUIDtype, 4); memcpy(boxData.pData_ + 8, kJp2UuidExif, 16); memcpy(boxData.pData_ + 8 + 16, rawExif.pData_, rawExif.size_); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Write box with Exif metadata (length: " << boxData.size_ << std::endl; #endif if (outIo.write(boxData.pData_, boxData.size_) != boxData.size_) throw Error(kerImageWriteFailed); } } if (iptcData_.count() > 0) { // Update Iptc data to a new UUID box DataBuf rawIptc = IptcParser::encode(iptcData_); if (rawIptc.size_ > 0) { DataBuf boxData(8 + 16 + rawIptc.size_); ul2Data(boxDataSize, boxData.size_, Exiv2::bigEndian); ul2Data(boxUUIDtype, kJp2BoxTypeUuid, Exiv2::bigEndian); memcpy(boxData.pData_, boxDataSize, 4); memcpy(boxData.pData_ + 4, boxUUIDtype, 4); memcpy(boxData.pData_ + 8, kJp2UuidIptc, 16); memcpy(boxData.pData_ + 8 + 16, rawIptc.pData_, rawIptc.size_); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Write box with Iptc metadata (length: " << boxData.size_ << std::endl; #endif if (outIo.write(boxData.pData_, boxData.size_) != boxData.size_) throw Error(kerImageWriteFailed); } } if (writeXmpFromPacket() == false) { if (XmpParser::encode(xmpPacket_, xmpData_) > 1) { #ifndef SUPPRESS_WARNINGS EXV_ERROR << "Failed to encode XMP metadata." << std::endl; #endif } } if (xmpPacket_.size() > 0) { // Update Xmp data to a new UUID box DataBuf xmp(reinterpret_cast<const byte*>(xmpPacket_.data()), static_cast<long>(xmpPacket_.size())); DataBuf boxData(8 + 16 + xmp.size_); ul2Data(boxDataSize, boxData.size_, Exiv2::bigEndian); ul2Data(boxUUIDtype, kJp2BoxTypeUuid, Exiv2::bigEndian); memcpy(boxData.pData_, boxDataSize, 4); memcpy(boxData.pData_ + 4, boxUUIDtype, 4); memcpy(boxData.pData_ + 8, kJp2UuidXmp, 16); memcpy(boxData.pData_ + 8 + 16, xmp.pData_, xmp.size_); #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: Write box with XMP metadata (length: " << boxData.size_ << ")" << std::endl; #endif if (outIo.write(boxData.pData_, boxData.size_) != boxData.size_) throw Error(kerImageWriteFailed); } break; } case kJp2BoxTypeUuid: { if(memcmp(boxBuf.pData_ + 8, kJp2UuidExif, 16) == 0) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: strip Exif Uuid box" << std::endl; #endif } else if(memcmp(boxBuf.pData_ + 8, kJp2UuidIptc, 16) == 0) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: strip Iptc Uuid box" << std::endl; #endif } else if(memcmp(boxBuf.pData_ + 8, kJp2UuidXmp, 16) == 0) { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: strip Xmp Uuid box" << std::endl; #endif } else { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: write Uuid box (length: " << box.length << ")" << std::endl; #endif if (outIo.write(boxBuf.pData_, boxBuf.size_) != boxBuf.size_) throw Error(kerImageWriteFailed); } break; } default: { #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: write box (length: " << box.length << ")" << std::endl; #endif if (outIo.write(boxBuf.pData_, boxBuf.size_) != boxBuf.size_) throw Error(kerImageWriteFailed); break; } } } #ifdef EXIV2_DEBUG_MESSAGES std::cout << "Exiv2::Jp2Image::doWriteMetadata: EOF" << std::endl; #endif } // Jp2Image::doWriteMetadata // ************************************************************************* // free functions Image::UniquePtr newJp2Instance(BasicIo::UniquePtr io, bool create) { Image::UniquePtr image(new Jp2Image(std::move(io), create)); if (!image->good()) { image.reset(); } return image; } bool isJp2Type(BasicIo& iIo, bool advance) { const int32_t len = 12; byte buf[len]; iIo.read(buf, len); if (iIo.error() || iIo.eof()) { return false; } bool matched = (memcmp(buf, Jp2Signature, len) == 0); if (!advance || !matched) { iIo.seek(-len, BasicIo::cur); } return matched; } } // namespace Exiv2
null
246
CWE-787
CVE-2021-29512
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/math_ops.cc. #include "tensorflow/core/platform/errors.h" #define EIGEN_USE_THREADS #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/bincount_op.h" #include "tensorflow/core/kernels/fill_functor.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { using thread::ThreadPool; typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; namespace functor { template <typename Tidx, typename T> struct BincountFunctor<CPUDevice, Tidx, T, true> { static Status Compute(OpKernelContext* context, const typename TTypes<Tidx, 1>::ConstTensor& arr, const typename TTypes<T, 1>::ConstTensor& weights, typename TTypes<T, 1>::Tensor& output, const Tidx num_bins) { Tensor all_nonneg_t; TF_RETURN_IF_ERROR(context->allocate_temp( DT_BOOL, TensorShape({}), &all_nonneg_t, AllocatorAttributes())); all_nonneg_t.scalar<bool>().device(context->eigen_cpu_device()) = (arr >= Tidx(0)).all(); if (!all_nonneg_t.scalar<bool>()()) { return errors::InvalidArgument("Input arr must be non-negative!"); } // Allocate partial output bin sums for each worker thread. Worker ids in // ParallelForWithWorkerId range from 0 to NumThreads() inclusive. ThreadPool* thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; const int64 num_threads = thread_pool->NumThreads() + 1; Tensor partial_bins_t; TF_RETURN_IF_ERROR(context->allocate_temp( DT_BOOL, TensorShape({num_threads, num_bins}), &partial_bins_t)); auto partial_bins = partial_bins_t.matrix<bool>(); partial_bins.setZero(); thread_pool->ParallelForWithWorkerId( arr.size(), 8 /* cost */, [&](int64 start_ind, int64 limit_ind, int64 worker_id) { for (int64 i = start_ind; i < limit_ind; i++) { Tidx value = arr(i); if (value < num_bins) { partial_bins(worker_id, value) = true; } } }); // Sum the partial bins along the 0th axis. Eigen::array<int, 1> reduce_dim({0}); output.device(context->eigen_cpu_device()) = partial_bins.any(reduce_dim).cast<T>(); return Status::OK(); } }; template <typename Tidx, typename T> struct BincountFunctor<CPUDevice, Tidx, T, false> { static Status Compute(OpKernelContext* context, const typename TTypes<Tidx, 1>::ConstTensor& arr, const typename TTypes<T, 1>::ConstTensor& weights, typename TTypes<T, 1>::Tensor& output, const Tidx num_bins) { Tensor all_nonneg_t; TF_RETURN_IF_ERROR(context->allocate_temp( DT_BOOL, TensorShape({}), &all_nonneg_t, AllocatorAttributes())); all_nonneg_t.scalar<bool>().device(context->eigen_cpu_device()) = (arr >= Tidx(0)).all(); if (!all_nonneg_t.scalar<bool>()()) { return errors::InvalidArgument("Input arr must be non-negative!"); } // Allocate partial output bin sums for each worker thread. Worker ids in // ParallelForWithWorkerId range from 0 to NumThreads() inclusive. ThreadPool* thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; const int64 num_threads = thread_pool->NumThreads() + 1; Tensor partial_bins_t; TF_RETURN_IF_ERROR(context->allocate_temp( DataTypeToEnum<T>::value, TensorShape({num_threads, num_bins}), &partial_bins_t)); auto partial_bins = partial_bins_t.matrix<T>(); partial_bins.setZero(); thread_pool->ParallelForWithWorkerId( arr.size(), 8 /* cost */, [&](int64 start_ind, int64 limit_ind, int64 worker_id) { for (int64 i = start_ind; i < limit_ind; i++) { Tidx value = arr(i); if (value < num_bins) { if (weights.size()) { partial_bins(worker_id, value) += weights(i); } else { // Complex numbers don't support "++". partial_bins(worker_id, value) += T(1); } } } }); // Sum the partial bins along the 0th axis. Eigen::array<int, 1> reduce_dim({0}); output.device(context->eigen_cpu_device()) = partial_bins.sum(reduce_dim); return Status::OK(); } }; template <typename Tidx, typename T, bool binary_output> struct BincountReduceFunctor<CPUDevice, Tidx, T, binary_output> { static Status Compute(OpKernelContext* context, const typename TTypes<Tidx, 2>::ConstTensor& in, const typename TTypes<T, 2>::ConstTensor& weights, typename TTypes<T, 2>::Tensor& out, const Tidx num_bins) { const int num_rows = out.dimension(0); const int num_cols = in.dimension(1); ThreadPool* thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; thread_pool->ParallelForWithWorkerId( num_rows, 8 /* cost */, [&](int64 start_row, int64 end_row, int64 worker_id) { for (int64 i = start_row; i < end_row; ++i) { for (int64 j = 0; j < num_cols; ++j) { Tidx value = in(i, j); if (value < num_bins) { if (binary_output) { out(i, value) = T(1); } else { if (weights.size()) { out(i, value) += weights(i, j); } else { out(i, value) += T(1); } } } } } }); return Status::OK(); } }; } // namespace functor template <typename Device, typename T> class BincountOp : public OpKernel { public: explicit BincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { const Tensor& arr_t = ctx->input(0); const Tensor& size_tensor = ctx->input(1); OP_REQUIRES(ctx, size_tensor.dims() == 0, errors::InvalidArgument("Shape must be rank 0 but is rank ", size_tensor.dims())); int32 size = size_tensor.scalar<int32>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); const Tensor& weights_t = ctx->input(2); const auto arr = arr_t.flat<int32>(); const auto weights = weights_t.flat<T>(); Tensor* output_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &output_t)); auto output = output_t->flat<T>(); OP_REQUIRES_OK(ctx, functor::BincountFunctor<Device, int32, T, false>::Compute( ctx, arr, weights, output, size)); } }; #define REGISTER_KERNELS(type) \ REGISTER_KERNEL_BUILDER( \ Name("Bincount").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ BincountOp<CPUDevice, type>) TF_CALL_NUMBER_TYPES(REGISTER_KERNELS); #undef REGISTER_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_KERNELS(type) \ REGISTER_KERNEL_BUILDER(Name("Bincount") \ .Device(DEVICE_GPU) \ .HostMemory("size") \ .TypeConstraint<type>("T"), \ BincountOp<GPUDevice, type>) TF_CALL_int32(REGISTER_KERNELS); TF_CALL_float(REGISTER_KERNELS); #undef REGISTER_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename Device, typename Tidx, typename T> class DenseBincountOp : public OpKernel { public: explicit DenseBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_)); } void Compute(OpKernelContext* ctx) override { const Tensor& data = ctx->input(0); OP_REQUIRES(ctx, data.dims() <= 2, errors::InvalidArgument( "Shape must be at most rank 2 but is rank ", data.dims())); const Tensor& size_t = ctx->input(1); const Tensor& weights = ctx->input(2); Tidx size = size_t.scalar<Tidx>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); Tensor* out_t; functor::SetZeroFunctor<Device, T> fill; if (data.dims() == 1) { OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &out_t)); auto out = out_t->flat<T>(); fill(ctx->eigen_device<Device>(), out); if (binary_output_) { OP_REQUIRES_OK( ctx, functor::BincountFunctor<Device, Tidx, T, true>::Compute( ctx, data.flat<Tidx>(), weights.flat<T>(), out, size)); } else { OP_REQUIRES_OK( ctx, functor::BincountFunctor<Device, Tidx, T, false>::Compute( ctx, data.flat<Tidx>(), weights.flat<T>(), out, size)); } } else if (data.dims() == 2) { const int64 num_rows = data.dim_size(0); auto weight_matrix = (weights.NumElements() == 0) ? weights.shaped<T, 2>(gtl::InlinedVector<int64, 2>(2, 0)) : weights.matrix<T>(); OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); auto out = out_t->matrix<T>(); fill(ctx->eigen_device<Device>(), out_t->flat<T>()); if (binary_output_) { OP_REQUIRES_OK( ctx, functor::BincountReduceFunctor<Device, Tidx, T, true>::Compute( ctx, data.matrix<Tidx>(), weight_matrix, out, size)); } else { OP_REQUIRES_OK( ctx, functor::BincountReduceFunctor<Device, Tidx, T, false>::Compute( ctx, data.matrix<Tidx>(), weight_matrix, out, size)); } } } private: bool binary_output_; }; #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("DenseBincount") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ DenseBincountOp<CPUDevice, Tidx, T>); #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("DenseBincount") \ .Device(DEVICE_GPU) \ .HostMemory("size") \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ DenseBincountOp<GPUDevice, Tidx, T>); #define REGISTER_GPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_int32(REGISTER_GPU_KERNELS); TF_CALL_float(REGISTER_GPU_KERNELS); #undef REGISTER_GPU_KERNELS #undef REGISTER_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename Device, typename Tidx, typename T> class SparseBincountOp : public OpKernel { public: explicit SparseBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_)); } void Compute(OpKernelContext* ctx) override { const Tensor& indices = ctx->input(0); const auto values = ctx->input(1).flat<Tidx>(); const Tensor& dense_shape = ctx->input(2); const Tensor& size_t = ctx->input(3); const auto weights = ctx->input(4).flat<T>(); const int64 weights_size = weights.size(); Tidx size = size_t.scalar<Tidx>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); bool is_1d = dense_shape.NumElements() == 1; Tensor* out_t; functor::SetZeroFunctor<Device, T> fill; if (is_1d) { OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &out_t)); auto out = out_t->flat<T>(); fill(ctx->eigen_device<Device>(), out); if (binary_output_) { OP_REQUIRES_OK(ctx, functor::BincountFunctor<Device, Tidx, T, true>::Compute( ctx, values, weights, out, size)); } else { OP_REQUIRES_OK( ctx, functor::BincountFunctor<Device, Tidx, T, false>::Compute( ctx, values, weights, out, size)); } } else { const auto shape = dense_shape.flat<int64>(); const int64 num_rows = shape(0); OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); const auto out = out_t->matrix<T>(); fill(ctx->eigen_device<Device>(), out_t->flat<T>()); const auto indices_mat = indices.matrix<int64>(); for (int64 i = 0; i < indices_mat.dimension(0); ++i) { const int64 batch = indices_mat(i, 0); const Tidx bin = values(i); if (bin < size) { if (binary_output_) { out(batch, bin) = T(1); } else { if (weights_size) { out(batch, bin) += weights(i); } else { out(batch, bin) += T(1); } } } } } } private: bool binary_output_; }; #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("SparseBincount") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ SparseBincountOp<CPUDevice, Tidx, T>); #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS template <typename Device, typename Tidx, typename T> class RaggedBincountOp : public OpKernel { public: explicit RaggedBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_)); } void Compute(OpKernelContext* ctx) override { const auto splits = ctx->input(0).flat<int64>(); const auto values = ctx->input(1).flat<Tidx>(); const Tensor& size_t = ctx->input(2); const auto weights = ctx->input(3).flat<T>(); const int64 weights_size = weights.size(); Tidx size = size_t.scalar<Tidx>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); int num_rows = splits.size() - 1; int num_values = values.size(); int batch_idx = 0; Tensor* out_t; OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); functor::SetZeroFunctor<Device, T> fill; fill(ctx->eigen_device<Device>(), out_t->flat<T>()); const auto out = out_t->matrix<T>(); for (int idx = 0; idx < num_values; ++idx) { while (idx >= splits(batch_idx)) { batch_idx++; } Tidx bin = values(idx); OP_REQUIRES(ctx, bin >= 0, errors::InvalidArgument("Input must be non-negative")); if (bin < size) { if (binary_output_) { out(batch_idx - 1, bin) = T(1); } else { T value = (weights_size > 0) ? weights(idx) : T(1); out(batch_idx - 1, bin) += value; } } } } private: bool binary_output_; }; #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("RaggedBincount") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ RaggedBincountOp<CPUDevice, Tidx, T>); #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS } // end namespace tensorflow
null
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/math_ops.cc. #include "tensorflow/core/platform/errors.h" #define EIGEN_USE_THREADS #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/bincount_op.h" #include "tensorflow/core/kernels/fill_functor.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { using thread::ThreadPool; typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; namespace functor { template <typename Tidx, typename T> struct BincountFunctor<CPUDevice, Tidx, T, true> { static Status Compute(OpKernelContext* context, const typename TTypes<Tidx, 1>::ConstTensor& arr, const typename TTypes<T, 1>::ConstTensor& weights, typename TTypes<T, 1>::Tensor& output, const Tidx num_bins) { Tensor all_nonneg_t; TF_RETURN_IF_ERROR(context->allocate_temp( DT_BOOL, TensorShape({}), &all_nonneg_t, AllocatorAttributes())); all_nonneg_t.scalar<bool>().device(context->eigen_cpu_device()) = (arr >= Tidx(0)).all(); if (!all_nonneg_t.scalar<bool>()()) { return errors::InvalidArgument("Input arr must be non-negative!"); } // Allocate partial output bin sums for each worker thread. Worker ids in // ParallelForWithWorkerId range from 0 to NumThreads() inclusive. ThreadPool* thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; const int64 num_threads = thread_pool->NumThreads() + 1; Tensor partial_bins_t; TF_RETURN_IF_ERROR(context->allocate_temp( DT_BOOL, TensorShape({num_threads, num_bins}), &partial_bins_t)); auto partial_bins = partial_bins_t.matrix<bool>(); partial_bins.setZero(); thread_pool->ParallelForWithWorkerId( arr.size(), 8 /* cost */, [&](int64 start_ind, int64 limit_ind, int64 worker_id) { for (int64 i = start_ind; i < limit_ind; i++) { Tidx value = arr(i); if (value < num_bins) { partial_bins(worker_id, value) = true; } } }); // Sum the partial bins along the 0th axis. Eigen::array<int, 1> reduce_dim({0}); output.device(context->eigen_cpu_device()) = partial_bins.any(reduce_dim).cast<T>(); return Status::OK(); } }; template <typename Tidx, typename T> struct BincountFunctor<CPUDevice, Tidx, T, false> { static Status Compute(OpKernelContext* context, const typename TTypes<Tidx, 1>::ConstTensor& arr, const typename TTypes<T, 1>::ConstTensor& weights, typename TTypes<T, 1>::Tensor& output, const Tidx num_bins) { Tensor all_nonneg_t; TF_RETURN_IF_ERROR(context->allocate_temp( DT_BOOL, TensorShape({}), &all_nonneg_t, AllocatorAttributes())); all_nonneg_t.scalar<bool>().device(context->eigen_cpu_device()) = (arr >= Tidx(0)).all(); if (!all_nonneg_t.scalar<bool>()()) { return errors::InvalidArgument("Input arr must be non-negative!"); } // Allocate partial output bin sums for each worker thread. Worker ids in // ParallelForWithWorkerId range from 0 to NumThreads() inclusive. ThreadPool* thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; const int64 num_threads = thread_pool->NumThreads() + 1; Tensor partial_bins_t; TF_RETURN_IF_ERROR(context->allocate_temp( DataTypeToEnum<T>::value, TensorShape({num_threads, num_bins}), &partial_bins_t)); auto partial_bins = partial_bins_t.matrix<T>(); partial_bins.setZero(); thread_pool->ParallelForWithWorkerId( arr.size(), 8 /* cost */, [&](int64 start_ind, int64 limit_ind, int64 worker_id) { for (int64 i = start_ind; i < limit_ind; i++) { Tidx value = arr(i); if (value < num_bins) { if (weights.size()) { partial_bins(worker_id, value) += weights(i); } else { // Complex numbers don't support "++". partial_bins(worker_id, value) += T(1); } } } }); // Sum the partial bins along the 0th axis. Eigen::array<int, 1> reduce_dim({0}); output.device(context->eigen_cpu_device()) = partial_bins.sum(reduce_dim); return Status::OK(); } }; template <typename Tidx, typename T, bool binary_output> struct BincountReduceFunctor<CPUDevice, Tidx, T, binary_output> { static Status Compute(OpKernelContext* context, const typename TTypes<Tidx, 2>::ConstTensor& in, const typename TTypes<T, 2>::ConstTensor& weights, typename TTypes<T, 2>::Tensor& out, const Tidx num_bins) { const int num_rows = out.dimension(0); const int num_cols = in.dimension(1); ThreadPool* thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; thread_pool->ParallelForWithWorkerId( num_rows, 8 /* cost */, [&](int64 start_row, int64 end_row, int64 worker_id) { for (int64 i = start_row; i < end_row; ++i) { for (int64 j = 0; j < num_cols; ++j) { Tidx value = in(i, j); if (value < num_bins) { if (binary_output) { out(i, value) = T(1); } else { if (weights.size()) { out(i, value) += weights(i, j); } else { out(i, value) += T(1); } } } } } }); return Status::OK(); } }; } // namespace functor template <typename Device, typename T> class BincountOp : public OpKernel { public: explicit BincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { const Tensor& arr_t = ctx->input(0); const Tensor& size_tensor = ctx->input(1); OP_REQUIRES(ctx, size_tensor.dims() == 0, errors::InvalidArgument("Shape must be rank 0 but is rank ", size_tensor.dims())); int32 size = size_tensor.scalar<int32>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); const Tensor& weights_t = ctx->input(2); const auto arr = arr_t.flat<int32>(); const auto weights = weights_t.flat<T>(); Tensor* output_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &output_t)); auto output = output_t->flat<T>(); OP_REQUIRES_OK(ctx, functor::BincountFunctor<Device, int32, T, false>::Compute( ctx, arr, weights, output, size)); } }; #define REGISTER_KERNELS(type) \ REGISTER_KERNEL_BUILDER( \ Name("Bincount").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ BincountOp<CPUDevice, type>) TF_CALL_NUMBER_TYPES(REGISTER_KERNELS); #undef REGISTER_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_KERNELS(type) \ REGISTER_KERNEL_BUILDER(Name("Bincount") \ .Device(DEVICE_GPU) \ .HostMemory("size") \ .TypeConstraint<type>("T"), \ BincountOp<GPUDevice, type>) TF_CALL_int32(REGISTER_KERNELS); TF_CALL_float(REGISTER_KERNELS); #undef REGISTER_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename Device, typename Tidx, typename T> class DenseBincountOp : public OpKernel { public: explicit DenseBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_)); } void Compute(OpKernelContext* ctx) override { const Tensor& data = ctx->input(0); OP_REQUIRES(ctx, data.dims() <= 2, errors::InvalidArgument( "Shape must be at most rank 2 but is rank ", data.dims())); const Tensor& size_t = ctx->input(1); const Tensor& weights = ctx->input(2); Tidx size = size_t.scalar<Tidx>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); Tensor* out_t; functor::SetZeroFunctor<Device, T> fill; if (data.dims() == 1) { OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &out_t)); auto out = out_t->flat<T>(); fill(ctx->eigen_device<Device>(), out); if (binary_output_) { OP_REQUIRES_OK( ctx, functor::BincountFunctor<Device, Tidx, T, true>::Compute( ctx, data.flat<Tidx>(), weights.flat<T>(), out, size)); } else { OP_REQUIRES_OK( ctx, functor::BincountFunctor<Device, Tidx, T, false>::Compute( ctx, data.flat<Tidx>(), weights.flat<T>(), out, size)); } } else if (data.dims() == 2) { const int64 num_rows = data.dim_size(0); auto weight_matrix = (weights.NumElements() == 0) ? weights.shaped<T, 2>(gtl::InlinedVector<int64, 2>(2, 0)) : weights.matrix<T>(); OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); auto out = out_t->matrix<T>(); fill(ctx->eigen_device<Device>(), out_t->flat<T>()); if (binary_output_) { OP_REQUIRES_OK( ctx, functor::BincountReduceFunctor<Device, Tidx, T, true>::Compute( ctx, data.matrix<Tidx>(), weight_matrix, out, size)); } else { OP_REQUIRES_OK( ctx, functor::BincountReduceFunctor<Device, Tidx, T, false>::Compute( ctx, data.matrix<Tidx>(), weight_matrix, out, size)); } } } private: bool binary_output_; }; #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("DenseBincount") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ DenseBincountOp<CPUDevice, Tidx, T>); #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("DenseBincount") \ .Device(DEVICE_GPU) \ .HostMemory("size") \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ DenseBincountOp<GPUDevice, Tidx, T>); #define REGISTER_GPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_int32(REGISTER_GPU_KERNELS); TF_CALL_float(REGISTER_GPU_KERNELS); #undef REGISTER_GPU_KERNELS #undef REGISTER_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename Device, typename Tidx, typename T> class SparseBincountOp : public OpKernel { public: explicit SparseBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_)); } void Compute(OpKernelContext* ctx) override { const Tensor& indices = ctx->input(0); const auto values = ctx->input(1).flat<Tidx>(); const Tensor& dense_shape = ctx->input(2); const Tensor& size_t = ctx->input(3); const auto weights = ctx->input(4).flat<T>(); const int64 weights_size = weights.size(); Tidx size = size_t.scalar<Tidx>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); bool is_1d = dense_shape.NumElements() == 1; Tensor* out_t; functor::SetZeroFunctor<Device, T> fill; if (is_1d) { OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &out_t)); auto out = out_t->flat<T>(); fill(ctx->eigen_device<Device>(), out); if (binary_output_) { OP_REQUIRES_OK(ctx, functor::BincountFunctor<Device, Tidx, T, true>::Compute( ctx, values, weights, out, size)); } else { OP_REQUIRES_OK( ctx, functor::BincountFunctor<Device, Tidx, T, false>::Compute( ctx, values, weights, out, size)); } } else { const auto shape = dense_shape.flat<int64>(); const int64 num_rows = shape(0); OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); const auto out = out_t->matrix<T>(); fill(ctx->eigen_device<Device>(), out_t->flat<T>()); const auto indices_mat = indices.matrix<int64>(); for (int64 i = 0; i < indices_mat.dimension(0); ++i) { const int64 batch = indices_mat(i, 0); const Tidx bin = values(i); if (bin < size) { if (binary_output_) { out(batch, bin) = T(1); } else { if (weights_size) { out(batch, bin) += weights(i); } else { out(batch, bin) += T(1); } } } } } } private: bool binary_output_; }; #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("SparseBincount") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ SparseBincountOp<CPUDevice, Tidx, T>); #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS template <typename Device, typename Tidx, typename T> class RaggedBincountOp : public OpKernel { public: explicit RaggedBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_)); } void Compute(OpKernelContext* ctx) override { const auto splits = ctx->input(0).flat<int64>(); const auto values = ctx->input(1).flat<Tidx>(); const Tensor& size_t = ctx->input(2); const auto weights = ctx->input(3).flat<T>(); const int64 weights_size = weights.size(); Tidx size = size_t.scalar<Tidx>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); int num_rows = splits.size() - 1; int num_values = values.size(); int batch_idx = 0; OP_REQUIRES(ctx, splits(0) == 0, errors::InvalidArgument("Splits must start with 0, not with ", splits(0))); OP_REQUIRES(ctx, splits(num_rows) == num_values, errors::InvalidArgument( "Splits must end with the number of values, got ", splits(num_rows), " instead of ", num_values)); Tensor* out_t; OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); functor::SetZeroFunctor<Device, T> fill; fill(ctx->eigen_device<Device>(), out_t->flat<T>()); const auto out = out_t->matrix<T>(); for (int idx = 0; idx < num_values; ++idx) { while (idx >= splits(batch_idx)) { batch_idx++; } Tidx bin = values(idx); OP_REQUIRES(ctx, bin >= 0, errors::InvalidArgument("Input must be non-negative")); if (bin < size) { if (binary_output_) { out(batch_idx - 1, bin) = T(1); } else { T value = (weights_size > 0) ? weights(idx) : T(1); out(batch_idx - 1, bin) += value; } } } } private: bool binary_output_; }; #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("RaggedBincount") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ RaggedBincountOp<CPUDevice, Tidx, T>); #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS } // end namespace tensorflow
null
247
CWE-787
CVE-2021-29514
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/math_ops.cc. #include "tensorflow/core/platform/errors.h" #define EIGEN_USE_THREADS #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/bincount_op.h" #include "tensorflow/core/kernels/fill_functor.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { using thread::ThreadPool; typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; namespace functor { template <typename Tidx, typename T> struct BincountFunctor<CPUDevice, Tidx, T, true> { static Status Compute(OpKernelContext* context, const typename TTypes<Tidx, 1>::ConstTensor& arr, const typename TTypes<T, 1>::ConstTensor& weights, typename TTypes<T, 1>::Tensor& output, const Tidx num_bins) { Tensor all_nonneg_t; TF_RETURN_IF_ERROR(context->allocate_temp( DT_BOOL, TensorShape({}), &all_nonneg_t, AllocatorAttributes())); all_nonneg_t.scalar<bool>().device(context->eigen_cpu_device()) = (arr >= Tidx(0)).all(); if (!all_nonneg_t.scalar<bool>()()) { return errors::InvalidArgument("Input arr must be non-negative!"); } // Allocate partial output bin sums for each worker thread. Worker ids in // ParallelForWithWorkerId range from 0 to NumThreads() inclusive. ThreadPool* thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; const int64 num_threads = thread_pool->NumThreads() + 1; Tensor partial_bins_t; TF_RETURN_IF_ERROR(context->allocate_temp( DT_BOOL, TensorShape({num_threads, num_bins}), &partial_bins_t)); auto partial_bins = partial_bins_t.matrix<bool>(); partial_bins.setZero(); thread_pool->ParallelForWithWorkerId( arr.size(), 8 /* cost */, [&](int64 start_ind, int64 limit_ind, int64 worker_id) { for (int64 i = start_ind; i < limit_ind; i++) { Tidx value = arr(i); if (value < num_bins) { partial_bins(worker_id, value) = true; } } }); // Sum the partial bins along the 0th axis. Eigen::array<int, 1> reduce_dim({0}); output.device(context->eigen_cpu_device()) = partial_bins.any(reduce_dim).cast<T>(); return Status::OK(); } }; template <typename Tidx, typename T> struct BincountFunctor<CPUDevice, Tidx, T, false> { static Status Compute(OpKernelContext* context, const typename TTypes<Tidx, 1>::ConstTensor& arr, const typename TTypes<T, 1>::ConstTensor& weights, typename TTypes<T, 1>::Tensor& output, const Tidx num_bins) { Tensor all_nonneg_t; TF_RETURN_IF_ERROR(context->allocate_temp( DT_BOOL, TensorShape({}), &all_nonneg_t, AllocatorAttributes())); all_nonneg_t.scalar<bool>().device(context->eigen_cpu_device()) = (arr >= Tidx(0)).all(); if (!all_nonneg_t.scalar<bool>()()) { return errors::InvalidArgument("Input arr must be non-negative!"); } // Allocate partial output bin sums for each worker thread. Worker ids in // ParallelForWithWorkerId range from 0 to NumThreads() inclusive. ThreadPool* thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; const int64 num_threads = thread_pool->NumThreads() + 1; Tensor partial_bins_t; TF_RETURN_IF_ERROR(context->allocate_temp( DataTypeToEnum<T>::value, TensorShape({num_threads, num_bins}), &partial_bins_t)); auto partial_bins = partial_bins_t.matrix<T>(); partial_bins.setZero(); thread_pool->ParallelForWithWorkerId( arr.size(), 8 /* cost */, [&](int64 start_ind, int64 limit_ind, int64 worker_id) { for (int64 i = start_ind; i < limit_ind; i++) { Tidx value = arr(i); if (value < num_bins) { if (weights.size()) { partial_bins(worker_id, value) += weights(i); } else { // Complex numbers don't support "++". partial_bins(worker_id, value) += T(1); } } } }); // Sum the partial bins along the 0th axis. Eigen::array<int, 1> reduce_dim({0}); output.device(context->eigen_cpu_device()) = partial_bins.sum(reduce_dim); return Status::OK(); } }; template <typename Tidx, typename T, bool binary_output> struct BincountReduceFunctor<CPUDevice, Tidx, T, binary_output> { static Status Compute(OpKernelContext* context, const typename TTypes<Tidx, 2>::ConstTensor& in, const typename TTypes<T, 2>::ConstTensor& weights, typename TTypes<T, 2>::Tensor& out, const Tidx num_bins) { const int num_rows = out.dimension(0); const int num_cols = in.dimension(1); ThreadPool* thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; thread_pool->ParallelForWithWorkerId( num_rows, 8 /* cost */, [&](int64 start_row, int64 end_row, int64 worker_id) { for (int64 i = start_row; i < end_row; ++i) { for (int64 j = 0; j < num_cols; ++j) { Tidx value = in(i, j); if (value < num_bins) { if (binary_output) { out(i, value) = T(1); } else { if (weights.size()) { out(i, value) += weights(i, j); } else { out(i, value) += T(1); } } } } } }); return Status::OK(); } }; } // namespace functor template <typename Device, typename T> class BincountOp : public OpKernel { public: explicit BincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { const Tensor& arr_t = ctx->input(0); const Tensor& size_tensor = ctx->input(1); OP_REQUIRES(ctx, size_tensor.dims() == 0, errors::InvalidArgument("Shape must be rank 0 but is rank ", size_tensor.dims())); int32 size = size_tensor.scalar<int32>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); const Tensor& weights_t = ctx->input(2); const auto arr = arr_t.flat<int32>(); const auto weights = weights_t.flat<T>(); Tensor* output_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &output_t)); auto output = output_t->flat<T>(); OP_REQUIRES_OK(ctx, functor::BincountFunctor<Device, int32, T, false>::Compute( ctx, arr, weights, output, size)); } }; #define REGISTER_KERNELS(type) \ REGISTER_KERNEL_BUILDER( \ Name("Bincount").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ BincountOp<CPUDevice, type>) TF_CALL_NUMBER_TYPES(REGISTER_KERNELS); #undef REGISTER_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_KERNELS(type) \ REGISTER_KERNEL_BUILDER(Name("Bincount") \ .Device(DEVICE_GPU) \ .HostMemory("size") \ .TypeConstraint<type>("T"), \ BincountOp<GPUDevice, type>) TF_CALL_int32(REGISTER_KERNELS); TF_CALL_float(REGISTER_KERNELS); #undef REGISTER_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename Device, typename Tidx, typename T> class DenseBincountOp : public OpKernel { public: explicit DenseBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_)); } void Compute(OpKernelContext* ctx) override { const Tensor& data = ctx->input(0); OP_REQUIRES(ctx, data.dims() <= 2, errors::InvalidArgument( "Shape must be at most rank 2 but is rank ", data.dims())); const Tensor& size_t = ctx->input(1); const Tensor& weights = ctx->input(2); Tidx size = size_t.scalar<Tidx>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); Tensor* out_t; functor::SetZeroFunctor<Device, T> fill; if (data.dims() == 1) { OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &out_t)); auto out = out_t->flat<T>(); fill(ctx->eigen_device<Device>(), out); if (binary_output_) { OP_REQUIRES_OK( ctx, functor::BincountFunctor<Device, Tidx, T, true>::Compute( ctx, data.flat<Tidx>(), weights.flat<T>(), out, size)); } else { OP_REQUIRES_OK( ctx, functor::BincountFunctor<Device, Tidx, T, false>::Compute( ctx, data.flat<Tidx>(), weights.flat<T>(), out, size)); } } else if (data.dims() == 2) { const int64 num_rows = data.dim_size(0); auto weight_matrix = (weights.NumElements() == 0) ? weights.shaped<T, 2>(gtl::InlinedVector<int64, 2>(2, 0)) : weights.matrix<T>(); OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); auto out = out_t->matrix<T>(); fill(ctx->eigen_device<Device>(), out_t->flat<T>()); if (binary_output_) { OP_REQUIRES_OK( ctx, functor::BincountReduceFunctor<Device, Tidx, T, true>::Compute( ctx, data.matrix<Tidx>(), weight_matrix, out, size)); } else { OP_REQUIRES_OK( ctx, functor::BincountReduceFunctor<Device, Tidx, T, false>::Compute( ctx, data.matrix<Tidx>(), weight_matrix, out, size)); } } } private: bool binary_output_; }; #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("DenseBincount") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ DenseBincountOp<CPUDevice, Tidx, T>); #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("DenseBincount") \ .Device(DEVICE_GPU) \ .HostMemory("size") \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ DenseBincountOp<GPUDevice, Tidx, T>); #define REGISTER_GPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_int32(REGISTER_GPU_KERNELS); TF_CALL_float(REGISTER_GPU_KERNELS); #undef REGISTER_GPU_KERNELS #undef REGISTER_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename Device, typename Tidx, typename T> class SparseBincountOp : public OpKernel { public: explicit SparseBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_)); } void Compute(OpKernelContext* ctx) override { const Tensor& indices = ctx->input(0); const auto values = ctx->input(1).flat<Tidx>(); const Tensor& dense_shape = ctx->input(2); const Tensor& size_t = ctx->input(3); const auto weights = ctx->input(4).flat<T>(); const int64 weights_size = weights.size(); Tidx size = size_t.scalar<Tidx>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); bool is_1d = dense_shape.NumElements() == 1; Tensor* out_t; functor::SetZeroFunctor<Device, T> fill; if (is_1d) { OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &out_t)); auto out = out_t->flat<T>(); fill(ctx->eigen_device<Device>(), out); if (binary_output_) { OP_REQUIRES_OK(ctx, functor::BincountFunctor<Device, Tidx, T, true>::Compute( ctx, values, weights, out, size)); } else { OP_REQUIRES_OK( ctx, functor::BincountFunctor<Device, Tidx, T, false>::Compute( ctx, values, weights, out, size)); } } else { const auto shape = dense_shape.flat<int64>(); const int64 num_rows = shape(0); OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); const auto out = out_t->matrix<T>(); fill(ctx->eigen_device<Device>(), out_t->flat<T>()); const auto indices_mat = indices.matrix<int64>(); for (int64 i = 0; i < indices_mat.dimension(0); ++i) { const int64 batch = indices_mat(i, 0); const Tidx bin = values(i); if (bin < size) { if (binary_output_) { out(batch, bin) = T(1); } else { if (weights_size) { out(batch, bin) += weights(i); } else { out(batch, bin) += T(1); } } } } } } private: bool binary_output_; }; #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("SparseBincount") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ SparseBincountOp<CPUDevice, Tidx, T>); #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS template <typename Device, typename Tidx, typename T> class RaggedBincountOp : public OpKernel { public: explicit RaggedBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_)); } void Compute(OpKernelContext* ctx) override { const auto splits = ctx->input(0).flat<int64>(); const auto values = ctx->input(1).flat<Tidx>(); const Tensor& size_t = ctx->input(2); const auto weights = ctx->input(3).flat<T>(); const int64 weights_size = weights.size(); Tidx size = size_t.scalar<Tidx>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); int num_rows = splits.size() - 1; int num_values = values.size(); int batch_idx = 0; Tensor* out_t; OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); functor::SetZeroFunctor<Device, T> fill; fill(ctx->eigen_device<Device>(), out_t->flat<T>()); const auto out = out_t->matrix<T>(); for (int idx = 0; idx < num_values; ++idx) { while (idx >= splits(batch_idx)) { batch_idx++; } Tidx bin = values(idx); OP_REQUIRES(ctx, bin >= 0, errors::InvalidArgument("Input must be non-negative")); if (bin < size) { if (binary_output_) { out(batch_idx - 1, bin) = T(1); } else { T value = (weights_size > 0) ? weights(idx) : T(1); out(batch_idx - 1, bin) += value; } } } } private: bool binary_output_; }; #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("RaggedBincount") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ RaggedBincountOp<CPUDevice, Tidx, T>); #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS } // end namespace tensorflow
null
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/math_ops.cc. #include "tensorflow/core/platform/errors.h" #define EIGEN_USE_THREADS #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/bincount_op.h" #include "tensorflow/core/kernels/fill_functor.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { using thread::ThreadPool; typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; namespace functor { template <typename Tidx, typename T> struct BincountFunctor<CPUDevice, Tidx, T, true> { static Status Compute(OpKernelContext* context, const typename TTypes<Tidx, 1>::ConstTensor& arr, const typename TTypes<T, 1>::ConstTensor& weights, typename TTypes<T, 1>::Tensor& output, const Tidx num_bins) { Tensor all_nonneg_t; TF_RETURN_IF_ERROR(context->allocate_temp( DT_BOOL, TensorShape({}), &all_nonneg_t, AllocatorAttributes())); all_nonneg_t.scalar<bool>().device(context->eigen_cpu_device()) = (arr >= Tidx(0)).all(); if (!all_nonneg_t.scalar<bool>()()) { return errors::InvalidArgument("Input arr must be non-negative!"); } // Allocate partial output bin sums for each worker thread. Worker ids in // ParallelForWithWorkerId range from 0 to NumThreads() inclusive. ThreadPool* thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; const int64 num_threads = thread_pool->NumThreads() + 1; Tensor partial_bins_t; TF_RETURN_IF_ERROR(context->allocate_temp( DT_BOOL, TensorShape({num_threads, num_bins}), &partial_bins_t)); auto partial_bins = partial_bins_t.matrix<bool>(); partial_bins.setZero(); thread_pool->ParallelForWithWorkerId( arr.size(), 8 /* cost */, [&](int64 start_ind, int64 limit_ind, int64 worker_id) { for (int64 i = start_ind; i < limit_ind; i++) { Tidx value = arr(i); if (value < num_bins) { partial_bins(worker_id, value) = true; } } }); // Sum the partial bins along the 0th axis. Eigen::array<int, 1> reduce_dim({0}); output.device(context->eigen_cpu_device()) = partial_bins.any(reduce_dim).cast<T>(); return Status::OK(); } }; template <typename Tidx, typename T> struct BincountFunctor<CPUDevice, Tidx, T, false> { static Status Compute(OpKernelContext* context, const typename TTypes<Tidx, 1>::ConstTensor& arr, const typename TTypes<T, 1>::ConstTensor& weights, typename TTypes<T, 1>::Tensor& output, const Tidx num_bins) { Tensor all_nonneg_t; TF_RETURN_IF_ERROR(context->allocate_temp( DT_BOOL, TensorShape({}), &all_nonneg_t, AllocatorAttributes())); all_nonneg_t.scalar<bool>().device(context->eigen_cpu_device()) = (arr >= Tidx(0)).all(); if (!all_nonneg_t.scalar<bool>()()) { return errors::InvalidArgument("Input arr must be non-negative!"); } // Allocate partial output bin sums for each worker thread. Worker ids in // ParallelForWithWorkerId range from 0 to NumThreads() inclusive. ThreadPool* thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; const int64 num_threads = thread_pool->NumThreads() + 1; Tensor partial_bins_t; TF_RETURN_IF_ERROR(context->allocate_temp( DataTypeToEnum<T>::value, TensorShape({num_threads, num_bins}), &partial_bins_t)); auto partial_bins = partial_bins_t.matrix<T>(); partial_bins.setZero(); thread_pool->ParallelForWithWorkerId( arr.size(), 8 /* cost */, [&](int64 start_ind, int64 limit_ind, int64 worker_id) { for (int64 i = start_ind; i < limit_ind; i++) { Tidx value = arr(i); if (value < num_bins) { if (weights.size()) { partial_bins(worker_id, value) += weights(i); } else { // Complex numbers don't support "++". partial_bins(worker_id, value) += T(1); } } } }); // Sum the partial bins along the 0th axis. Eigen::array<int, 1> reduce_dim({0}); output.device(context->eigen_cpu_device()) = partial_bins.sum(reduce_dim); return Status::OK(); } }; template <typename Tidx, typename T, bool binary_output> struct BincountReduceFunctor<CPUDevice, Tidx, T, binary_output> { static Status Compute(OpKernelContext* context, const typename TTypes<Tidx, 2>::ConstTensor& in, const typename TTypes<T, 2>::ConstTensor& weights, typename TTypes<T, 2>::Tensor& out, const Tidx num_bins) { const int num_rows = out.dimension(0); const int num_cols = in.dimension(1); ThreadPool* thread_pool = context->device()->tensorflow_cpu_worker_threads()->workers; thread_pool->ParallelForWithWorkerId( num_rows, 8 /* cost */, [&](int64 start_row, int64 end_row, int64 worker_id) { for (int64 i = start_row; i < end_row; ++i) { for (int64 j = 0; j < num_cols; ++j) { Tidx value = in(i, j); if (value < num_bins) { if (binary_output) { out(i, value) = T(1); } else { if (weights.size()) { out(i, value) += weights(i, j); } else { out(i, value) += T(1); } } } } } }); return Status::OK(); } }; } // namespace functor template <typename Device, typename T> class BincountOp : public OpKernel { public: explicit BincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) {} void Compute(OpKernelContext* ctx) override { const Tensor& arr_t = ctx->input(0); const Tensor& size_tensor = ctx->input(1); OP_REQUIRES(ctx, size_tensor.dims() == 0, errors::InvalidArgument("Shape must be rank 0 but is rank ", size_tensor.dims())); int32 size = size_tensor.scalar<int32>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); const Tensor& weights_t = ctx->input(2); const auto arr = arr_t.flat<int32>(); const auto weights = weights_t.flat<T>(); Tensor* output_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &output_t)); auto output = output_t->flat<T>(); OP_REQUIRES_OK(ctx, functor::BincountFunctor<Device, int32, T, false>::Compute( ctx, arr, weights, output, size)); } }; #define REGISTER_KERNELS(type) \ REGISTER_KERNEL_BUILDER( \ Name("Bincount").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ BincountOp<CPUDevice, type>) TF_CALL_NUMBER_TYPES(REGISTER_KERNELS); #undef REGISTER_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_KERNELS(type) \ REGISTER_KERNEL_BUILDER(Name("Bincount") \ .Device(DEVICE_GPU) \ .HostMemory("size") \ .TypeConstraint<type>("T"), \ BincountOp<GPUDevice, type>) TF_CALL_int32(REGISTER_KERNELS); TF_CALL_float(REGISTER_KERNELS); #undef REGISTER_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename Device, typename Tidx, typename T> class DenseBincountOp : public OpKernel { public: explicit DenseBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_)); } void Compute(OpKernelContext* ctx) override { const Tensor& data = ctx->input(0); OP_REQUIRES(ctx, data.dims() <= 2, errors::InvalidArgument( "Shape must be at most rank 2 but is rank ", data.dims())); const Tensor& size_t = ctx->input(1); const Tensor& weights = ctx->input(2); Tidx size = size_t.scalar<Tidx>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); Tensor* out_t; functor::SetZeroFunctor<Device, T> fill; if (data.dims() == 1) { OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &out_t)); auto out = out_t->flat<T>(); fill(ctx->eigen_device<Device>(), out); if (binary_output_) { OP_REQUIRES_OK( ctx, functor::BincountFunctor<Device, Tidx, T, true>::Compute( ctx, data.flat<Tidx>(), weights.flat<T>(), out, size)); } else { OP_REQUIRES_OK( ctx, functor::BincountFunctor<Device, Tidx, T, false>::Compute( ctx, data.flat<Tidx>(), weights.flat<T>(), out, size)); } } else if (data.dims() == 2) { const int64 num_rows = data.dim_size(0); auto weight_matrix = (weights.NumElements() == 0) ? weights.shaped<T, 2>(gtl::InlinedVector<int64, 2>(2, 0)) : weights.matrix<T>(); OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); auto out = out_t->matrix<T>(); fill(ctx->eigen_device<Device>(), out_t->flat<T>()); if (binary_output_) { OP_REQUIRES_OK( ctx, functor::BincountReduceFunctor<Device, Tidx, T, true>::Compute( ctx, data.matrix<Tidx>(), weight_matrix, out, size)); } else { OP_REQUIRES_OK( ctx, functor::BincountReduceFunctor<Device, Tidx, T, false>::Compute( ctx, data.matrix<Tidx>(), weight_matrix, out, size)); } } } private: bool binary_output_; }; #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("DenseBincount") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ DenseBincountOp<CPUDevice, Tidx, T>); #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("DenseBincount") \ .Device(DEVICE_GPU) \ .HostMemory("size") \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ DenseBincountOp<GPUDevice, Tidx, T>); #define REGISTER_GPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_int32(REGISTER_GPU_KERNELS); TF_CALL_float(REGISTER_GPU_KERNELS); #undef REGISTER_GPU_KERNELS #undef REGISTER_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename Device, typename Tidx, typename T> class SparseBincountOp : public OpKernel { public: explicit SparseBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_)); } void Compute(OpKernelContext* ctx) override { const Tensor& indices = ctx->input(0); const auto values = ctx->input(1).flat<Tidx>(); const Tensor& dense_shape = ctx->input(2); const Tensor& size_t = ctx->input(3); const auto weights = ctx->input(4).flat<T>(); const int64 weights_size = weights.size(); Tidx size = size_t.scalar<Tidx>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); bool is_1d = dense_shape.NumElements() == 1; Tensor* out_t; functor::SetZeroFunctor<Device, T> fill; if (is_1d) { OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({size}), &out_t)); auto out = out_t->flat<T>(); fill(ctx->eigen_device<Device>(), out); if (binary_output_) { OP_REQUIRES_OK(ctx, functor::BincountFunctor<Device, Tidx, T, true>::Compute( ctx, values, weights, out, size)); } else { OP_REQUIRES_OK( ctx, functor::BincountFunctor<Device, Tidx, T, false>::Compute( ctx, values, weights, out, size)); } } else { const auto shape = dense_shape.flat<int64>(); const int64 num_rows = shape(0); OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); const auto out = out_t->matrix<T>(); fill(ctx->eigen_device<Device>(), out_t->flat<T>()); const auto indices_mat = indices.matrix<int64>(); for (int64 i = 0; i < indices_mat.dimension(0); ++i) { const int64 batch = indices_mat(i, 0); const Tidx bin = values(i); if (bin < size) { if (binary_output_) { out(batch, bin) = T(1); } else { if (weights_size) { out(batch, bin) += weights(i); } else { out(batch, bin) += T(1); } } } } } } private: bool binary_output_; }; #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("SparseBincount") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ SparseBincountOp<CPUDevice, Tidx, T>); #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS template <typename Device, typename Tidx, typename T> class RaggedBincountOp : public OpKernel { public: explicit RaggedBincountOp(OpKernelConstruction* ctx) : OpKernel(ctx) { OP_REQUIRES_OK(ctx, ctx->GetAttr("binary_output", &binary_output_)); } void Compute(OpKernelContext* ctx) override { const auto splits = ctx->input(0).flat<int64>(); const auto values = ctx->input(1).flat<Tidx>(); const Tensor& size_t = ctx->input(2); const auto weights = ctx->input(3).flat<T>(); const int64 weights_size = weights.size(); Tidx size = size_t.scalar<Tidx>()(); OP_REQUIRES( ctx, size >= 0, errors::InvalidArgument("size (", size, ") must be non-negative")); int num_rows = splits.size() - 1; int num_values = values.size(); int batch_idx = 0; OP_REQUIRES(ctx, splits(0) == 0, errors::InvalidArgument("Splits must start with 0, not with ", splits(0))); OP_REQUIRES(ctx, splits(num_rows) == num_values, errors::InvalidArgument( "Splits must end with the number of values, got ", splits(num_rows), " instead of ", num_values)); Tensor* out_t; OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({num_rows, size}), &out_t)); functor::SetZeroFunctor<Device, T> fill; fill(ctx->eigen_device<Device>(), out_t->flat<T>()); const auto out = out_t->matrix<T>(); for (int idx = 0; idx < num_values; ++idx) { while (idx >= splits(batch_idx)) { batch_idx++; } Tidx bin = values(idx); OP_REQUIRES(ctx, bin >= 0, errors::InvalidArgument("Input must be non-negative")); if (bin < size) { if (binary_output_) { out(batch_idx - 1, bin) = T(1); } else { T value = (weights_size > 0) ? weights(idx) : T(1); out(batch_idx - 1, bin) += value; } } } } private: bool binary_output_; }; #define REGISTER_KERNELS(Tidx, T) \ REGISTER_KERNEL_BUILDER(Name("RaggedBincount") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<Tidx>("Tidx"), \ RaggedBincountOp<CPUDevice, Tidx, T>); #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNELS(int32, T); \ REGISTER_KERNELS(int64, T); TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS } // end namespace tensorflow
null
248
CWE-787
CVE-2021-29520
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define USE_EIGEN_TENSOR #define EIGEN_USE_THREADS #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/kernels/conv_2d.h" #include "tensorflow/core/kernels/conv_3d.h" #include "tensorflow/core/kernels/conv_grad_ops.h" #include "tensorflow/core/kernels/conv_grad_shape_utils.h" #include "tensorflow/core/kernels/conv_ops_gpu.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #include "tensorflow/core/util/use_cudnn.h" #include "tensorflow/core/util/work_sharder.h" #if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL) #include "tensorflow/core/kernels/eigen_contraction_kernel.h" #endif #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/platform/stream_executor.h" using stream_executor::dnn::DimIndex; #include "tensorflow/core/protobuf/autotuning.pb.h" #include "tensorflow/core/util/proto/proto_utils.h" #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #if GOOGLE_CUDA #include "third_party/gpus/cudnn/cudnn.h" #include "tensorflow/stream_executor/gpu/gpu_asm_opts.h" #include "tensorflow/stream_executor/gpu/redzone_allocator.h" #include "tensorflow/stream_executor/tf_allocator_adapter.h" #endif // GOOGLE_CUDA namespace { // TODO(ezhulenev): Split this file into conv_grad_filter_ops_3d.cc and // conv_grad_input_ops_3d.cc. // TODO(ezhulenev): Generalize Col2im and Im2col for 2-d and 3-d kernels. // "Depth" is already used for the channel dimension, so for the third spatial // dimension in this file we use "plane", although in NDHWC layout it's // indicated with a "D". // Returns in 'im_data' (assumed to be zero-initialized) image patch in storage // order (planes, height, width, depth), constructed from patches in 'col_data', // which is required to be in storage order (out_planes * out_height * // out_width, filter_planes, filter_height, filter_width, in_depth). // // Based on 2-dimensional implementation written by Yangqing Jia (jiayq). template <typename T> void Col2im(const T* col_data, const int depth, const int planes, const int height, const int width, const int filter_p, const int filter_h, const int filter_w, const int pad_pt, const int pad_t, const int pad_l, const int pad_pb, const int pad_b, const int pad_r, const int stride_p, const int stride_h, const int stride_w, T* im_data) { const int planes_col = (planes + pad_pt + pad_pb - filter_p) / stride_p + 1; const int height_col = (height + pad_t + pad_b - filter_h) / stride_h + 1; const int width_col = (width + pad_l + pad_r - filter_w) / stride_w + 1; int p_pad = -pad_pt; for (int p = 0; p < planes_col; ++p) { int h_pad = -pad_t; for (int h = 0; h < height_col; ++h) { int w_pad = -pad_l; for (int w = 0; w < width_col; ++w) { T* im_patch_data = im_data + (p_pad * height * width + h_pad * width + w_pad) * depth; for (int ip = p_pad; ip < p_pad + filter_p; ++ip) { for (int ih = h_pad; ih < h_pad + filter_h; ++ih) { for (int iw = w_pad; iw < w_pad + filter_w; ++iw) { if (ip >= 0 && ip < planes && ih >= 0 && ih < height && iw >= 0 && iw < width) { for (int i = 0; i < depth; ++i) { im_patch_data[i] += col_data[i]; } } im_patch_data += depth; col_data += depth; } // Jump over remaining number of depth. im_patch_data += depth * (width - filter_w); } // Jump over remaining number of (depth * width). im_patch_data += (depth * width) * (height - filter_h); } w_pad += stride_w; } h_pad += stride_h; } p_pad += stride_p; } } // Returns in 'col_data', image patches in storage order (planes, height, width, // depth) extracted from image at 'input_data', which is required to be in // storage order (batch, planes, height, width, depth). // // Based on 2-dimensional implementation written by Yangqing Jia (jiayq). template <typename T> void Im2col(const T* input_data, const int depth, const int planes, const int height, const int width, const int filter_p, const int filter_h, const int filter_w, const int pad_pt, const int pad_t, const int pad_l, const int pad_pb, const int pad_b, const int pad_r, const int stride_p, const int stride_h, const int stride_w, T* col_data) { const int planes_col = (planes + pad_pt + pad_pb - filter_p) / stride_p + 1; const int height_col = (height + pad_t + pad_b - filter_h) / stride_h + 1; const int width_col = (width + pad_l + pad_r - filter_w) / stride_w + 1; int p_pad = -pad_pt; for (int p = 0; p < planes_col; ++p) { int h_pad = -pad_t; for (int h = 0; h < height_col; ++h) { int w_pad = -pad_l; for (int w = 0; w < width_col; ++w) { for (int ip = p_pad; ip < p_pad + filter_p; ++ip) { for (int ih = h_pad; ih < h_pad + filter_h; ++ih) { for (int iw = w_pad; iw < w_pad + filter_w; ++iw) { if (ip >= 0 && ip < planes && ih >= 0 && ih < height && iw >= 0 && iw < width) { memcpy(col_data, input_data + (ip * height * width + ih * width + iw) * depth, sizeof(T) * depth); } else { // This should be simply padded with zero. memset(col_data, 0, sizeof(T) * depth); } col_data += depth; } } } w_pad += stride_w; } h_pad += stride_h; } p_pad += stride_p; } } } // namespace namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; // Backprop for input that offloads computation to // Eigen::CuboidConvolutionBackwardInput. template <typename Device, class T> class Conv3DBackpropInputOp : public OpKernel { public: explicit Conv3DBackpropInputOp(OpKernelConstruction* context) : OpKernel(context), data_format_(FORMAT_NHWC), takes_shape_(type_string().find("V2") != std::string::npos) { // data_format is only available in V2. if (takes_shape_) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Conv3DBackpropInputOpV2 only supports NDHWC on the CPU.")); } OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); OP_REQUIRES(context, dilation_.size() == 5, errors::InvalidArgument("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, 'C') == 1 && GetTensorDim(dilation_, data_format_, 'N') == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilation rates in the batch and depth dimensions.")); // TODO(yangzihao): Add CPU version of dilated conv 3D. OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, '0') == 1 && GetTensorDim(dilation_, data_format_, '1') == 1 && GetTensorDim(dilation_, data_format_, '2') == 1), errors::InvalidArgument( "Current CPU implementation does not yet support " "dilation rates larger than 1.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& filter = context->input(1); const TensorShape& filter_shape = filter.shape(); const Tensor& out_backprop = context->input(2); const TensorShape& out_backprop_shape = out_backprop.shape(); TensorShape input_shape; if (takes_shape_) { const Tensor& input_sizes = context->input(0); // tensor::MakeShape is able to handle both DT_INT32 and DT_INT64 for // input_sizes. OP_REQUIRES_OK(context, tensor::MakeShape(input_sizes, &input_shape)); } else { input_shape = context->input(0).shape(); } ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, input_shape, filter_shape, out_backprop_shape, stride_, padding_, data_format_, &dims)); Tensor* in_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, input_shape, &in_backprop)); functor::CuboidConvolutionBackwardInput<Device, T>()( context->eigen_device<Device>(), in_backprop->tensor<T, 5>(), // input_backward filter.tensor<T, 5>(), // filter out_backprop.tensor<T, 5>(), // output_backward static_cast<int>(dims.spatial_dims[0].stride), // stride_planes static_cast<int>(dims.spatial_dims[1].stride), // stride_rows static_cast<int>(dims.spatial_dims[2].stride)); // stride_cols } private: std::vector<int32> dilation_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool takes_shape_; TF_DISALLOW_COPY_AND_ASSIGN(Conv3DBackpropInputOp); }; // Custom backprop for input that explicitly does the work sharding and calls // Eigen only to multiply matrices. template <typename Device, class T> class Conv3DCustomBackpropInputOp : public OpKernel { // Limit the maximum size of allocated temporary buffer to // kMaxTempAllocationOverhead times the size of the input tensors (input, // filter, out_backprop). If the size of the temporary buffer exceeds this // limit, fallback on Eigen implementation. static constexpr int kMaxTempAllocationOverhead = 25; public: explicit Conv3DCustomBackpropInputOp(OpKernelConstruction* context) : OpKernel(context), data_format_(FORMAT_NHWC), takes_shape_(type_string().find("V2") != std::string::npos) { // data_format is only available in V2. if (takes_shape_) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Conv3DBackpropInputOpV2 only supports NDHWC on the CPU.")); } OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); OP_REQUIRES(context, dilation_.size() == 5, errors::InvalidArgument("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, 'C') == 1 && GetTensorDim(dilation_, data_format_, 'N') == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilation rates in the batch and depth dimensions.")); // TODO(yangzihao): Add CPU version of dilated conv 3D. OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, '0') == 1 && GetTensorDim(dilation_, data_format_, '1') == 1 && GetTensorDim(dilation_, data_format_, '2') == 1), errors::InvalidArgument( "Current CPU implementation does not yet support " "dilation rates larger than 1.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& filter = context->input(1); const TensorShape& filter_shape = filter.shape(); const Tensor& out_backprop = context->input(2); const TensorShape& out_backprop_shape = out_backprop.shape(); TensorShape input_shape; if (takes_shape_) { const Tensor& input_sizes = context->input(0); // tensor::MakeShape is able to handle both DT_INT32 and DT_INT64 for // input_sizes. OP_REQUIRES_OK(context, tensor::MakeShape(input_sizes, &input_shape)); } else { input_shape = context->input(0).shape(); } ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, input_shape, filter_shape, out_backprop_shape, stride_, padding_, data_format_, &dims)); Tensor* in_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, input_shape, &in_backprop)); int64 top_pad_planes, bottom_pad_planes; int64 top_pad_rows, bottom_pad_rows; int64 left_pad_cols, right_pad_cols; OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose( dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, dims.spatial_dims[0].stride, padding_, &dims.spatial_dims[0].output_size, &top_pad_planes, &bottom_pad_planes)); OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose( dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, dims.spatial_dims[1].stride, padding_, &dims.spatial_dims[1].output_size, &top_pad_rows, &bottom_pad_rows)); OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose( dims.spatial_dims[2].input_size, dims.spatial_dims[2].filter_size, dims.spatial_dims[2].stride, padding_, &dims.spatial_dims[2].output_size, &left_pad_cols, &right_pad_cols)); // TODO(ezhulenev): Extract work size and shard estimation to shared // functions in conv_grad_ops, and update 2d convolution backprop. // The total dimension size of each kernel. const int64 filter_total_size = dims.spatial_dims[0].filter_size * dims.spatial_dims[1].filter_size * dims.spatial_dims[2].filter_size * dims.in_depth; // The output image size is the spatial size of the output. const int64 output_image_size = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size * dims.spatial_dims[2].output_size; const auto cache_sizes = Eigen::internal::CacheSizes(); const ptrdiff_t l3_cache_size = cache_sizes.m_l3; // Use L3 cache size as target working set size. const size_t target_working_set_size = l3_cache_size / sizeof(T); // Calculate size of matrices involved in MatMul: C = A x B. const int64 size_A = output_image_size * dims.out_depth; const int64 size_B = filter_total_size * dims.out_depth; const int64 size_C = output_image_size * filter_total_size; const int64 work_unit_size = size_A + size_B + size_C; auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); // Use parallel tensor contractions if there is no batching. // // Compared to Conv2D code, this version is missing work size estimation. In // benchmarks I didn't find a case when it's beneficial to run parallel // contraction compared to sharding and matmuls. const bool use_parallel_contraction = dims.batch_size == 1; const size_t shard_size = use_parallel_contraction ? 1 : (target_working_set_size + work_unit_size - 1) / work_unit_size; // Total number of elements in all the tensors used by this kernel. int64 total_tensor_elements = input_shape.num_elements() + filter_shape.num_elements() + out_backprop_shape.num_elements(); // Shape of the temporary workspace buffer. TensorShape col_buffer_shape = {static_cast<int64>(shard_size), static_cast<int64>(output_image_size), static_cast<int64>(filter_total_size)}; int64 col_buffer_elements = col_buffer_shape.num_elements(); // If the temporary allocation overhead is too large, fallback on Eigen // implementation which requires much less memory. int64 col_buffer_overhead = col_buffer_elements / total_tensor_elements; if (col_buffer_overhead > kMaxTempAllocationOverhead) { VLOG(2) << "Fallback on Eigen implementation of Conv3DBackpropInputOp: " "col_buffer_overhead=" << col_buffer_overhead; functor::CuboidConvolutionBackwardInput<Device, T>()( context->eigen_device<Device>(), in_backprop->tensor<T, 5>(), // input_backward filter.tensor<T, 5>(), // filter out_backprop.tensor<T, 5>(), // output_backward static_cast<int>(dims.spatial_dims[0].stride), // stride_planes static_cast<int>(dims.spatial_dims[1].stride), // stride_rows static_cast<int>(dims.spatial_dims[2].stride)); // stride_cols return; } Tensor col_buffer; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, col_buffer_shape, &col_buffer)); // The input offset corresponding to a single input image. const int64 input_offset = dims.spatial_dims[0].input_size * dims.spatial_dims[1].input_size * dims.spatial_dims[2].input_size * dims.in_depth; // The output offset corresponding to a single output image. const int64 output_offset = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size * dims.spatial_dims[2].output_size * dims.out_depth; const T* filter_data = filter.template flat<T>().data(); T* col_buffer_data = col_buffer.template flat<T>().data(); const T* out_backprop_data = out_backprop.template flat<T>().data(); auto in_backprop_flat = in_backprop->template flat<T>(); T* input_backprop_data = in_backprop_flat.data(); in_backprop_flat.device(context->eigen_device<Device>()) = in_backprop_flat.constant(T(0)); if (use_parallel_contraction) { typedef Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor>, Eigen::Unaligned> TensorMap; typedef Eigen::TensorMap<Eigen::Tensor<const T, 2, Eigen::RowMajor>, Eigen::Unaligned> ConstTensorMap; // Initialize contraction dims (we need to transpose 'B' below). Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> contract_dims; contract_dims[0].first = 1; contract_dims[0].second = 1; for (int image_id = 0; image_id < dims.batch_size; ++image_id) { // Compute gradient into col_buffer. TensorMap C(col_buffer_data, output_image_size, filter_total_size); ConstTensorMap A(out_backprop_data + output_offset * image_id, output_image_size, dims.out_depth); ConstTensorMap B(filter_data, filter_total_size, dims.out_depth); C.device(context->eigen_cpu_device()) = A.contract(B, contract_dims); Col2im<T>(col_buffer_data, dims.in_depth, // Input spatial dimensions. dims.spatial_dims[0].input_size, // input planes dims.spatial_dims[1].input_size, // input rows dims.spatial_dims[2].input_size, // input cols // Filter spatial dimensions. dims.spatial_dims[0].filter_size, // filter planes dims.spatial_dims[1].filter_size, // filter rows dims.spatial_dims[2].filter_size, // filter cols // Spatial padding. top_pad_planes, top_pad_rows, left_pad_cols, bottom_pad_planes, bottom_pad_rows, right_pad_cols, // Spatial striding. dims.spatial_dims[0].stride, // stride planes dims.spatial_dims[1].stride, // stride rows dims.spatial_dims[2].stride, // stride cols input_backprop_data); input_backprop_data += input_offset; } } else { typedef Eigen::Map< Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> MatrixMap; typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> ConstMatrixMap; for (int image_id = 0; image_id < dims.batch_size; image_id += shard_size) { const int shard_limit = std::min(static_cast<int>(shard_size), static_cast<int>(dims.batch_size) - image_id); auto shard = [&dims, &top_pad_planes, &top_pad_rows, &left_pad_cols, &bottom_pad_planes, &bottom_pad_rows, &right_pad_cols, &output_image_size, &filter_total_size, &input_backprop_data, &col_buffer_data, &out_backprop_data, &filter_data, &input_offset, &output_offset, &size_C](int64 start, int64 limit) { for (int shard_id = start; shard_id < limit; ++shard_id) { T* im2col_buf = col_buffer_data + shard_id * size_C; T* input_data = input_backprop_data + shard_id * input_offset; const T* out_data = out_backprop_data + shard_id * output_offset; // Compute gradient into 'im2col_buf'. MatrixMap C(im2col_buf, output_image_size, filter_total_size); ConstMatrixMap A(out_data, output_image_size, dims.out_depth); ConstMatrixMap B(filter_data, filter_total_size, dims.out_depth); C.noalias() = A * B.transpose(); Col2im<T>(im2col_buf, dims.in_depth, // Input spatial dimensions. dims.spatial_dims[0].input_size, // input planes dims.spatial_dims[1].input_size, // input rows dims.spatial_dims[2].input_size, // input cols // Filter spatial dimensions. dims.spatial_dims[0].filter_size, // filter planes dims.spatial_dims[1].filter_size, // filter rows dims.spatial_dims[2].filter_size, // filter cols // Spatial padding. top_pad_planes, top_pad_rows, left_pad_cols, bottom_pad_planes, bottom_pad_rows, right_pad_cols, // Spatial striding. dims.spatial_dims[0].stride, // stride planes dims.spatial_dims[1].stride, // stride rows dims.spatial_dims[2].stride, // stride cols input_data); } }; Shard(worker_threads.num_threads, worker_threads.workers, shard_limit, work_unit_size, shard); input_backprop_data += input_offset * shard_limit; out_backprop_data += output_offset * shard_limit; } } } private: std::vector<int32> dilation_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool takes_shape_; TF_DISALLOW_COPY_AND_ASSIGN(Conv3DCustomBackpropInputOp); }; // Custom backrop input kernel is 30% - 4x faster when compiled with AVX2 than // default Eigen implementation (at the cost of ~2x-8x peak memory usage). #define REGISTER_CPU_KERNEL(T) \ REGISTER_KERNEL_BUILDER( \ Name("Conv3DBackpropInput").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ Conv3DCustomBackpropInputOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER( \ Name("Conv3DBackpropInputV2").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ Conv3DCustomBackpropInputOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropInput") \ .Device(DEVICE_CPU) \ .Label("custom") \ .TypeConstraint<T>("T"), \ Conv3DCustomBackpropInputOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropInputV2") \ .Device(DEVICE_CPU) \ .Label("custom") \ .TypeConstraint<T>("T"), \ Conv3DCustomBackpropInputOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropInput") \ .Device(DEVICE_CPU) \ .Label("eigen_tensor") \ .TypeConstraint<T>("T"), \ Conv3DBackpropInputOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropInputV2") \ .Device(DEVICE_CPU) \ .Label("eigen_tensor") \ .TypeConstraint<T>("T"), \ Conv3DBackpropInputOp<CPUDevice, T>); TF_CALL_half(REGISTER_CPU_KERNEL); TF_CALL_float(REGISTER_CPU_KERNEL); TF_CALL_double(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL // Backprop for filter that offloads computation to // Eigen::CuboidConvolutionBackwardFilter. template <typename Device, class T> class Conv3DBackpropFilterOp : public OpKernel { public: explicit Conv3DBackpropFilterOp(OpKernelConstruction* context) : OpKernel(context), data_format_(FORMAT_NHWC), takes_shape_(type_string().find("V2") != std::string::npos) { // data_format is only available in V2. if (takes_shape_) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Conv3DBackpropFilterOpV2 only supports NDHWC on the CPU.")); } OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); OP_REQUIRES(context, dilation_.size() == 5, errors::InvalidArgument("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, 'C') == 1 && GetTensorDim(dilation_, data_format_, 'N') == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilation rates in the batch and depth dimensions.")); // TODO(yangzihao): Add CPU version of dilated conv 3D. OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, '0') == 1 && GetTensorDim(dilation_, data_format_, '1') == 1 && GetTensorDim(dilation_, data_format_, '2') == 1), errors::InvalidArgument( "Current CPU implementation does not yet support " "dilation rates larger than 1.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const TensorShape& input_shape = input.shape(); const Tensor& out_backprop = context->input(2); const TensorShape& out_backprop_shape = out_backprop.shape(); TensorShape filter_shape; if (takes_shape_) { const Tensor& filter_sizes = context->input(1); OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( filter_sizes.vec<int32>(), &filter_shape)); } else { filter_shape = context->input(1).shape(); } ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropFilterOp", /*num_spatial_dims=*/3, input_shape, filter_shape, out_backprop_shape, stride_, padding_, data_format_, &dims)); Tensor* filter_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, filter_shape, &filter_backprop)); if (input_shape.num_elements() == 0) { filter_backprop->template flat<T>().setZero(); return; } functor::CuboidConvolutionBackwardFilter<Device, T>()( context->eigen_device<Device>(), filter_backprop->tensor<T, 5>(), // filter_backward input.tensor<T, 5>(), // input out_backprop.tensor<T, 5>(), // output_backward static_cast<int>(dims.spatial_dims[0].stride), // stride_planes static_cast<int>(dims.spatial_dims[1].stride), // stride_rows static_cast<int>(dims.spatial_dims[2].stride)); // stride_cols } private: std::vector<int32> dilation_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool takes_shape_; TF_DISALLOW_COPY_AND_ASSIGN(Conv3DBackpropFilterOp); }; // Custom backprop for filter that explicitly does the work sharding and calls // Eigen only to multiply matrices. template <typename Device, class T> class Conv3DCustomBackpropFilterOp : public OpKernel { // Limit the maximum size of allocated temporary buffer to // kMaxTempAllocationOverhead times the size of the input tensors (input, // filter, out_backprop). If the size of the temporary buffer exceeds this // limit, fallback on Eigen implementation. static constexpr int kMaxTempAllocationOverhead = 25; public: explicit Conv3DCustomBackpropFilterOp(OpKernelConstruction* context) : OpKernel(context), data_format_(FORMAT_NHWC), takes_shape_(type_string().find("V2") != std::string::npos) { // data_format is only available in V2. if (takes_shape_) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Conv3DBackpropFilterOpV2 only supports NDHWC on the CPU.")); } OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); OP_REQUIRES(context, dilation_.size() == 5, errors::InvalidArgument("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, 'C') == 1 && GetTensorDim(dilation_, data_format_, 'N') == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilation rates in the batch and depth dimensions.")); // TODO(yangzihao): Add CPU version of dilated conv 3D. OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, '0') == 1 && GetTensorDim(dilation_, data_format_, '1') == 1 && GetTensorDim(dilation_, data_format_, '2') == 1), errors::InvalidArgument( "Current CPU implementation does not yet support " "dilation rates larger than 1.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const TensorShape& input_shape = input.shape(); const Tensor& out_backprop = context->input(2); const TensorShape& out_backprop_shape = out_backprop.shape(); TensorShape filter_shape; if (takes_shape_) { const Tensor& filter_sizes = context->input(1); OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( filter_sizes.vec<int32>(), &filter_shape)); } else { filter_shape = context->input(1).shape(); } ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropFilterOp", /*num_spatial_dims=*/3, input_shape, filter_shape, out_backprop_shape, stride_, padding_, data_format_, &dims)); Tensor* filter_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, filter_shape, &filter_backprop)); if (input_shape.num_elements() == 0) { filter_backprop->template flat<T>().setZero(); return; } int64 top_pad_planes, bottom_pad_planes; int64 top_pad_rows, bottom_pad_rows; int64 left_pad_cols, right_pad_cols; OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose( dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, dims.spatial_dims[0].stride, padding_, &dims.spatial_dims[0].output_size, &top_pad_planes, &bottom_pad_planes)); OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose( dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, dims.spatial_dims[1].stride, padding_, &dims.spatial_dims[1].output_size, &top_pad_rows, &bottom_pad_rows)); OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose( dims.spatial_dims[2].input_size, dims.spatial_dims[2].filter_size, dims.spatial_dims[2].stride, padding_, &dims.spatial_dims[2].output_size, &left_pad_cols, &right_pad_cols)); // TODO(ezhulenev): Extract work size and shard estimation to shared // functions in conv_grad_ops, and update 2d convolution backprop. // The total dimension size of each kernel. const int64 filter_total_size = dims.spatial_dims[0].filter_size * dims.spatial_dims[1].filter_size * dims.spatial_dims[2].filter_size * dims.in_depth; // The output image size is the spatial size of the output. const int64 output_image_size = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size * dims.spatial_dims[2].output_size; // Shard 'batch' images (volumes) into 'shard_size' groups of images // (volumes) to be fed into the parallel matmul. Calculate 'shard_size' by // dividing the L3 cache size ('target_working_set_size') by the matmul size // of an individual image ('work_unit_size'). const auto cache_sizes = Eigen::internal::CacheSizes(); const ptrdiff_t l3_cache_size = cache_sizes.m_l3; // TODO(andydavis) // *) Consider reducing 'target_working_set_size' if L3 is shared by // other concurrently running tensorflow ops. const size_t target_working_set_size = l3_cache_size / sizeof(T); const int64 size_A = output_image_size * filter_total_size; const int64 size_B = output_image_size * dims.out_depth; const int64 size_C = filter_total_size * dims.out_depth; const int64 work_unit_size = size_A + size_B + size_C; const size_t shard_size = (target_working_set_size + work_unit_size - 1) / work_unit_size; // Total number of elements in all the tensors used by this kernel. int64 total_tensor_elements = input_shape.num_elements() + filter_shape.num_elements() + out_backprop_shape.num_elements(); // Shape of the temporary workspace buffer. TensorShape col_buffer_shape = {static_cast<int64>(shard_size), static_cast<int64>(output_image_size), static_cast<int64>(filter_total_size)}; int64 col_buffer_elements = col_buffer_shape.num_elements(); // If the temporary allocation overhead is too large, fallback on Eigen // implementation which requires much less memory. int64 col_buffer_overhead = col_buffer_elements / total_tensor_elements; if (col_buffer_overhead > kMaxTempAllocationOverhead) { VLOG(2) << "Fallback on Eigen implementation of Conv3DBackpropFilterOp: " "col_buffer_overhead=" << col_buffer_overhead; functor::CuboidConvolutionBackwardFilter<Device, T>()( context->eigen_device<Device>(), filter_backprop->tensor<T, 5>(), // filter_backward input.tensor<T, 5>(), // input out_backprop.tensor<T, 5>(), // output_backward static_cast<int>(dims.spatial_dims[0].stride), // stride_planes static_cast<int>(dims.spatial_dims[1].stride), // stride_rows static_cast<int>(dims.spatial_dims[2].stride)); // stride_cols return; } Tensor col_buffer; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, col_buffer_shape, &col_buffer)); // The input offset corresponding to a single input image. const int64 input_offset = dims.spatial_dims[0].input_size * dims.spatial_dims[1].input_size * dims.spatial_dims[2].input_size * dims.in_depth; // The output offset corresponding to a single output image. const int64 output_offset = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size * dims.spatial_dims[2].output_size * dims.out_depth; const T* input_data = input.template flat<T>().data(); T* col_buffer_data = col_buffer.template flat<T>().data(); const T* out_backprop_data = out_backprop.template flat<T>().data(); T* filter_backprop_data = filter_backprop->template flat<T>().data(); typedef Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor>, Eigen::Unaligned> TensorMap; typedef Eigen::TensorMap<Eigen::Tensor<const T, 2, Eigen::RowMajor>, Eigen::Unaligned> ConstTensorMap; TensorMap C(filter_backprop_data, filter_total_size, dims.out_depth); C.setZero(); // Initialize contraction dims (we need to transpose 'A' below). Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> contract_dims; contract_dims[0].first = 0; contract_dims[0].second = 0; auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); for (int image_id = 0; image_id < dims.batch_size; image_id += shard_size) { const int shard_limit = std::min(static_cast<int>(shard_size), static_cast<int>(dims.batch_size) - image_id); auto shard = [&input_data, &col_buffer_data, &dims, &top_pad_planes, &top_pad_rows, &left_pad_cols, &bottom_pad_planes, &bottom_pad_rows, &right_pad_cols, &input_offset, &size_A](int64 start, int64 limit) { for (int shard_id = start; shard_id < limit; ++shard_id) { const T* input_data_shard = input_data + shard_id * input_offset; T* col_data_shard = col_buffer_data + shard_id * size_A; // When we compute the gradient with respect to the filters, we need // to do im2col to allow gemm-type computation. Im2col<T>(input_data_shard, dims.in_depth, // Input spatial dimensions. dims.spatial_dims[0].input_size, // input planes dims.spatial_dims[1].input_size, // input rows dims.spatial_dims[2].input_size, // input cols // Filter spatial dimensions. dims.spatial_dims[0].filter_size, // filter planes dims.spatial_dims[1].filter_size, // filter rows dims.spatial_dims[2].filter_size, // filter cols // Spatial padding. top_pad_planes, top_pad_rows, left_pad_cols, bottom_pad_planes, bottom_pad_rows, right_pad_cols, // Spatial striding. dims.spatial_dims[0].stride, // stride planes dims.spatial_dims[1].stride, // stride rows dims.spatial_dims[2].stride, // stride cols col_data_shard); } }; Shard(worker_threads.num_threads, worker_threads.workers, shard_limit, size_A, shard); ConstTensorMap A(col_buffer_data, output_image_size * shard_limit, filter_total_size); ConstTensorMap B(out_backprop_data, output_image_size * shard_limit, dims.out_depth); // Gradient with respect to filter. C.device(context->eigen_cpu_device()) += A.contract(B, contract_dims); input_data += input_offset * shard_limit; out_backprop_data += output_offset * shard_limit; } } private: std::vector<int32> dilation_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool takes_shape_; TF_DISALLOW_COPY_AND_ASSIGN(Conv3DCustomBackpropFilterOp); }; // Custom backrop input kernel is 30% - 4x faster when compiled with AVX2 than // default Eigen implementation (at the cost of ~2x-8x peak memory usage). #define REGISTER_CPU_KERNEL(T) \ REGISTER_KERNEL_BUILDER( \ Name("Conv3DBackpropFilter").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ Conv3DCustomBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilterV2") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T"), \ Conv3DCustomBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilter") \ .Device(DEVICE_CPU) \ .Label("custom") \ .TypeConstraint<T>("T"), \ Conv3DCustomBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilterV2") \ .Device(DEVICE_CPU) \ .Label("custom") \ .TypeConstraint<T>("T"), \ Conv3DCustomBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilter") \ .Device(DEVICE_CPU) \ .Label("eigen_tensor") \ .TypeConstraint<T>("T"), \ Conv3DBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilterV2") \ .Device(DEVICE_CPU) \ .Label("eigen_tensor") \ .TypeConstraint<T>("T"), \ Conv3DBackpropFilterOp<CPUDevice, T>); TF_CALL_float(REGISTER_CPU_KERNEL); TF_CALL_double(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL // WARNING: Eigen::half is not trivially copyable and can't be used in // custom backprop filter kernel because of memcpy and memset in Im2col. #define REGISTER_CPU_KERNEL(T) \ REGISTER_KERNEL_BUILDER( \ Name("Conv3DBackpropFilter").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ Conv3DBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilterV2") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T"), \ Conv3DBackpropFilterOp<CPUDevice, T>); TF_CALL_half(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL // GPU definitions of both ops. #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM // Forward declarations of the functor specializations for GPU. // This ensures that the custom implementation is used instead of the default // Eigen one (which is used for CPU). namespace functor { #define DECLARE_GPU_SPEC(T) \ template <> \ void TransformFilter<GPUDevice, T, int, 5>::operator()( \ const GPUDevice& d, FilterTensorFormat dst_filter_format, \ typename TTypes<T, 5, int>::ConstTensor in, \ typename TTypes<T, 5, int>::Tensor out); \ template <> \ void ReverseTransformFilter<GPUDevice, T, 5>::operator()( \ const GPUDevice& d, FilterTensorFormat src_filter_format, \ typename TTypes<T, 5>::ConstTensor in, \ typename TTypes<T, 5>::Tensor out); \ template <> \ void PadInput<GPUDevice, T, int, 5>::operator()( \ const GPUDevice& d, typename TTypes<T, 5, int>::ConstTensor in, \ const std::array<int, 3>& padding_left, \ const std::array<int, 3>& padding_right, \ typename TTypes<T, 5, int>::Tensor out, TensorFormat format, \ const T& padding_value); DECLARE_GPU_SPEC(Eigen::half); DECLARE_GPU_SPEC(float); DECLARE_GPU_SPEC(double); #undef DECLARE_GPU_SPEC } // namespace functor // A dummy type to group backward data autotune results together. struct Conv3dBackwardDataAutoTuneGroup { static string name() { return "Conv3dBwdData"; } }; typedef AutoTuneSingleton<Conv3dBackwardDataAutoTuneGroup, ConvParameters, se::dnn::AlgorithmConfig> AutoTuneConv3dBwdData; template <typename T> class Conv3DBackpropInputOp<GPUDevice, T> : public OpKernel { public: explicit Conv3DBackpropInputOp(OpKernelConstruction* context) : OpKernel(context), data_format_(FORMAT_NHWC), takes_shape_(type_string().find("V2") != std::string::npos) { // data_format is only available in V2. if (takes_shape_) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); } OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); OP_REQUIRES(context, dilation_.size() == 5, errors::InvalidArgument("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, 'C') == 1 && GetTensorDim(dilation_, data_format_, 'N') == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilation rates in the batch and depth dimensions.")); OP_REQUIRES( context, (GetTensorDim(dilation_, data_format_, '0') > 0 && GetTensorDim(dilation_, data_format_, '1') > 0 && GetTensorDim(dilation_, data_format_, '2') > 0), errors::InvalidArgument("Dilated rates should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, '0') > 0 && GetTensorDim(stride_, data_format_, '1') > 0 && GetTensorDim(stride_, data_format_, '2') > 0), errors::InvalidArgument("Spatial strides should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); cudnn_use_autotune_ = CudnnUseAutotune(); } void Compute(OpKernelContext* context) override { const Tensor& filter = context->input(1); const TensorShape& filter_shape = filter.shape(); const Tensor& out_backprop = context->input(2); const TensorShape& out_backprop_shape = out_backprop.shape(); TensorShape input_shape; if (takes_shape_) { const Tensor& input_sizes = context->input(0); OP_REQUIRES_OK(context, tensor::MakeShape(input_sizes, &input_shape)); } else { input_shape = context->input(0).shape(); } ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensionsV2( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, input_shape, filter_shape, out_backprop_shape, dilation_, stride_, padding_, /*explicit_paddings=*/{}, data_format_, &dims)); Tensor* in_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, input_shape, &in_backprop)); auto* stream = context->op_device_context()->stream(); OP_REQUIRES(context, stream, errors::Internal("No GPU stream available.")); bool is_grouped_convolution = filter_shape.dim_size(3) != dims.in_depth; if (!is_grouped_convolution && dims.filter_size(0) == 1 && dims.filter_size(1) == 1 && dims.filter_size(2) == 1 && dims.dilation(0) == 1 && dims.dilation(1) == 1 && dims.dilation(2) == 1 && dims.stride(0) == 1 && dims.stride(1) == 1 && dims.stride(2) == 1 && data_format_ == FORMAT_NHWC) { const uint64 m = dims.batch_size * dims.input_size(0) * dims.input_size(1) * dims.input_size(2); const uint64 k = dims.out_depth; const uint64 n = dims.in_depth; auto a_ptr = AsDeviceMemory(out_backprop.template flat<T>().data(), out_backprop.template flat<T>().size()); auto b_ptr = AsDeviceMemory(filter.template flat<T>().data(), filter.template flat<T>().size()); auto c_ptr = AsDeviceMemory(in_backprop->template flat<T>().data(), in_backprop->template flat<T>().size()); auto transpose = se::blas::Transpose::kTranspose; auto no_transpose = se::blas::Transpose::kNoTranspose; bool blas_launch_status = stream ->ThenBlasGemm(transpose, no_transpose, n, m, k, 1.0f, b_ptr, k, a_ptr, k, 0.0f, &c_ptr, n) .ok(); if (!blas_launch_status) { context->SetStatus(errors::Internal("Blas SGEMM launch failed : m=", m, ", n=", n, ", k=", k)); } return; } else if (!is_grouped_convolution && dims.filter_size(0) == dims.input_size(0) && dims.filter_size(1) == dims.input_size(1) && dims.filter_size(2) == dims.input_size(2) && padding_ == Padding::VALID && data_format_ == FORMAT_NHWC) { const uint64 m = dims.batch_size; const uint64 k = dims.out_depth; const uint64 n = dims.input_size(0) * dims.input_size(1) * dims.input_size(2) * dims.in_depth; auto a_ptr = AsDeviceMemory(out_backprop.template flat<T>().data(), out_backprop.template flat<T>().size()); auto b_ptr = AsDeviceMemory(filter.template flat<T>().data(), filter.template flat<T>().size()); auto c_ptr = AsDeviceMemory(in_backprop->template flat<T>().data(), in_backprop->template flat<T>().size()); auto transpose = se::blas::Transpose::kTranspose; auto no_transpose = se::blas::Transpose::kNoTranspose; bool blas_launch_status = stream ->ThenBlasGemm(transpose, no_transpose, n, m, k, 1.0f, b_ptr, k, a_ptr, k, 0.0f, &c_ptr, n) .ok(); if (!blas_launch_status) { context->SetStatus(errors::Internal("Blas SGEMM launch failed : m=", m, ", n=", n, ", k=", k)); } return; } int padding_planes = dims.SpatialPadding(padding_, 0); int padding_rows = dims.SpatialPadding(padding_, 1); int padding_cols = dims.SpatialPadding(padding_, 2); const bool planes_odd = (padding_planes % 2 != 0); const bool rows_odd = (padding_rows % 2 != 0); const bool cols_odd = (padding_cols % 2 != 0); TensorShape compatible_input_shape; if (rows_odd || cols_odd || planes_odd) { // cuDNN only supports the same amount of padding on both sides. compatible_input_shape = { dims.batch_size, dims.in_depth, dims.input_size(0) + planes_odd, dims.input_size(1) + rows_odd, dims.input_size(2) + cols_odd, }; } else { compatible_input_shape = {dims.batch_size, dims.in_depth, dims.input_size(0), dims.input_size(1), dims.input_size(2)}; } CHECK(padding_rows >= 0 && padding_cols >= 0 && padding_planes >= 0) << "Negative paddings: (" << padding_rows << ", " << padding_cols << ", " << padding_planes << ")"; #if GOOGLE_CUDA const bool compute_in_nhwc = CUDNN_VERSION >= 8000 && DataTypeToEnum<T>::value == DT_HALF; #else // fast NDHWC implementation is a CUDA only feature const bool compute_in_nhwc = false; #endif const TensorFormat compute_data_format = (compute_in_nhwc && data_format_ == FORMAT_NHWC) ? FORMAT_NHWC : FORMAT_NCHW; VLOG(3) << "Compute Conv3DBackpropInput with cuDNN:" << " data_format=" << ToString(data_format_) << " compute_data_format=" << ToString(compute_data_format); constexpr auto kComputeInNHWC = std::make_tuple(se::dnn::DataLayout::kBatchYXDepth, se::dnn::FilterLayout::kOutputYXInput); constexpr auto kComputeInNCHW = std::make_tuple(se::dnn::DataLayout::kBatchDepthYX, se::dnn::FilterLayout::kOutputInputYX); se::dnn::DataLayout compute_data_layout; se::dnn::FilterLayout filter_layout; std::tie(compute_data_layout, filter_layout) = compute_data_format == FORMAT_NHWC ? kComputeInNHWC : kComputeInNCHW; se::dnn::BatchDescriptor input_desc(3); input_desc.set_count(dims.batch_size) .set_spatial_dim(DimIndex::X, compatible_input_shape.dim_size(4)) .set_spatial_dim(DimIndex::Y, compatible_input_shape.dim_size(3)) .set_spatial_dim(DimIndex::Z, compatible_input_shape.dim_size(2)) .set_feature_map_count(dims.in_depth) .set_layout(compute_data_layout); se::dnn::BatchDescriptor output_desc(3); output_desc.set_count(dims.batch_size) .set_spatial_dim(DimIndex::X, dims.output_size(2)) .set_spatial_dim(DimIndex::Y, dims.output_size(1)) .set_spatial_dim(DimIndex::Z, dims.output_size(0)) .set_feature_map_count(dims.out_depth) .set_layout(compute_data_layout); se::dnn::FilterDescriptor filter_desc(3); filter_desc.set_spatial_dim(DimIndex::X, dims.filter_size(2)) .set_spatial_dim(DimIndex::Y, dims.filter_size(1)) .set_spatial_dim(DimIndex::Z, dims.filter_size(0)) .set_input_feature_map_count(filter_shape.dim_size(3)) .set_output_feature_map_count(filter_shape.dim_size(4)) .set_layout(filter_layout); se::dnn::ConvolutionDescriptor conv_desc(3); conv_desc.set_dilation_rate(DimIndex::X, dims.dilation(2)) .set_dilation_rate(DimIndex::Y, dims.dilation(1)) .set_dilation_rate(DimIndex::Z, dims.dilation(0)) .set_filter_stride(DimIndex::X, dims.stride(2)) .set_filter_stride(DimIndex::Y, dims.stride(1)) .set_filter_stride(DimIndex::Z, dims.stride(0)) .set_zero_padding(DimIndex::X, padding_cols / 2) .set_zero_padding(DimIndex::Y, padding_rows / 2) .set_zero_padding(DimIndex::Z, padding_planes / 2) .set_group_count(dims.in_depth / filter_shape.dim_size(3)); // Shape: out, in, z, y, x. Tensor transformed_filter; auto dst_format = compute_data_format == FORMAT_NCHW ? FORMAT_OIHW : FORMAT_OHWI; TensorShape dst_shape = dst_format == FORMAT_OIHW ? TensorShape({filter_shape.dim_size(4), filter_shape.dim_size(3), dims.filter_size(0), dims.filter_size(1), dims.filter_size(2)}) : TensorShape({filter_shape.dim_size(4), dims.filter_size(0), dims.filter_size(1), dims.filter_size(2), filter_shape.dim_size(3)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, dst_shape, &transformed_filter)); functor::TransformFilter<GPUDevice, T, int, 5>()( context->eigen_device<GPUDevice>(), dst_format, To32Bit(filter.tensor<T, 5>()), To32Bit(transformed_filter.tensor<T, 5>())); // Shape: batch, filters, z, y, x. Tensor transformed_out_backprop; if (data_format_ == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) { TensorShape nchw_shape = {dims.batch_size, dims.out_depth, dims.output_size(0), dims.output_size(1), dims.output_size(2)}; if (dims.out_depth > 1) { OP_REQUIRES_OK(context, context->allocate_temp( DataTypeToEnum<T>::value, nchw_shape, &transformed_out_backprop)); functor::NHWCToNCHW<GPUDevice, T, 5>()( context->eigen_device<GPUDevice>(), out_backprop.tensor<T, 5>(), transformed_out_backprop.tensor<T, 5>()); } else { CHECK(transformed_out_backprop.CopyFrom(out_backprop, nchw_shape)); } } else { transformed_out_backprop = out_backprop; } // Shape: batch, filters, z, y, x. Tensor pre_transformed_in_backprop; OP_REQUIRES_OK(context, context->allocate_temp( DataTypeToEnum<T>::value, ShapeFromFormat(compute_data_format, compatible_input_shape.dim_size(0), {{compatible_input_shape.dim_size(2), compatible_input_shape.dim_size(3), compatible_input_shape.dim_size(4)}}, compatible_input_shape.dim_size(1)), &pre_transformed_in_backprop)); auto out_backprop_ptr = AsDeviceMemory(transformed_out_backprop.template flat<T>().data(), transformed_out_backprop.template flat<T>().size()); auto filter_ptr = AsDeviceMemory(transformed_filter.template flat<T>().data(), transformed_filter.template flat<T>().size()); auto in_backprop_ptr = AsDeviceMemory(pre_transformed_in_backprop.template flat<T>().data(), pre_transformed_in_backprop.template flat<T>().size()); static int64 ConvolveBackwardDataScratchSize = GetDnnWorkspaceLimit( "TF_CUDNN_WORKSPACE_LIMIT_IN_MB", 1LL << 32); // 4GB by default const int device_id = stream->parent()->device_ordinal(); // To make sure the Conv3DBackpropInputV2 get the correct dtype, we infer // the dtype from 2nd input, i.e., out_backprop. DataType dtype = context->input(2).dtype(); const ConvParameters conv_parameters = { dims.batch_size, dims.in_depth, {{dims.input_size(0), dims.input_size(1), dims.input_size(2)}}, compute_data_format, dims.out_depth, {{dims.filter_size(0), dims.filter_size(1), dims.filter_size(2)}}, {{dims.dilation(0), dims.dilation(1), dims.dilation(2)}}, {{dims.stride(0), dims.stride(1), dims.stride(2)}}, {{padding_planes, padding_rows, padding_cols}}, dtype, device_id, conv_desc.group_count()}; using se::dnn::AlgorithmConfig; using se::dnn::AlgorithmDesc; using se::dnn::ProfileResult; #if TENSORFLOW_USE_ROCM // cudnn_use_autotune is applicable only the CUDA flow // for ROCm/MIOpen, we need to call GetMIOpenConvolveAlgorithms explicitly // if we do not have a cached algorithm_config for this conv_parameters cudnn_use_autotune_ = true; #endif AlgorithmConfig algorithm_config; if (cudnn_use_autotune_ && !AutoTuneConv3dBwdData::GetInstance()->Find( conv_parameters, &algorithm_config)) { std::vector<std::unique_ptr<se::dnn::ConvolveExecutionPlan>> plans; #if GOOGLE_CUDA std::vector<AlgorithmDesc> algorithms; std::vector<AlgorithmConfig> configs; if (CudnnUseFrontend()) { OP_REQUIRES(context, stream->parent()->GetConvolveExecutionPlans( se::dnn::ConvolutionKind::BACKWARD_DATA, se::dnn::ToDataType<T>::value, stream, input_desc, filter_desc, output_desc, conv_desc, &plans), errors::Unknown( "Failed to get convolution execution plan. This is " "probably because cuDNN failed to initialize, so try " "looking to see if a warning log message was printed " "above.")); for (const auto& plan : plans) { configs.push_back(AlgorithmConfig( AlgorithmDesc{plan->getTag(), plan->get_raw_desc()}, plan->getWorkspaceSize())); } } else { OP_REQUIRES(context, stream->parent()->GetConvolveBackwardDataAlgorithms( conv_parameters.ShouldIncludeWinogradNonfusedAlgo<T>( stream->parent()), &algorithms), errors::Unknown( "Failed to get convolution execution plan. This is " "probably because cuDNN failed to initialize, so try " "looking to see if a warning log message was printed " "above.")); for (const auto& algorithm : algorithms) { configs.push_back(AlgorithmConfig(algorithm)); } } se::TfAllocatorAdapter tf_allocator_adapter( context->device()->GetAllocator({}), stream); se::RedzoneAllocator rz_allocator(stream, &tf_allocator_adapter, se::GpuAsmOpts()); se::DeviceMemory<T> in_backprop_ptr_rz( WrapRedzoneBestEffort(&rz_allocator, in_backprop_ptr)); std::vector<tensorflow::AutotuneResult> results; for (auto& profile_config : configs) { // TODO(zhengxq): profile each algorithm multiple times to better // accuracy. DnnScratchAllocator scratch_allocator(ConvolveBackwardDataScratchSize, context); se::RedzoneAllocator rz_scratch_allocator( stream, &tf_allocator_adapter, se::GpuAsmOpts(), /*memory_limit=*/ConvolveBackwardDataScratchSize); se::ScratchAllocator* allocator_used = !RedzoneCheckDisabled() ? static_cast<se::ScratchAllocator*>(&rz_scratch_allocator) : static_cast<se::ScratchAllocator*>(&scratch_allocator); ProfileResult profile_result; Status cudnn_launch_status; if (CudnnUseFrontend()) { cudnn_launch_status = stream->ConvolveBackwardDataWithExecutionPlan( filter_desc, filter_ptr, output_desc, out_backprop_ptr, conv_desc, input_desc, &in_backprop_ptr_rz, allocator_used, profile_config, &profile_result); } else { cudnn_launch_status = stream->ConvolveBackwardDataWithAlgorithm( filter_desc, filter_ptr, output_desc, out_backprop_ptr, conv_desc, input_desc, &in_backprop_ptr_rz, allocator_used, profile_config, &profile_result); } if (cudnn_launch_status.ok() && profile_result.is_valid()) { results.emplace_back(); auto& result = results.back(); if (CudnnUseFrontend()) { result.mutable_cuda_conv_plan()->set_exec_plan_id( profile_config.algorithm()->exec_plan_id()); } else { result.mutable_conv()->set_algorithm( profile_config.algorithm()->algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_config.algorithm()->tensor_ops_enabled()); } result.set_scratch_bytes( !RedzoneCheckDisabled() ? rz_scratch_allocator.TotalAllocatedBytesExcludingRedzones() : scratch_allocator.TotalByteSize()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); // TODO(george): they don't do results at all?? CheckRedzones(rz_scratch_allocator, &result); CheckRedzones(rz_allocator, &result); } else { // When CuDNN frontend APIs are used, we need to make sure the // profiling results are one-to-one mapping of the "plans". So, we // insert dummy results when the excution fails. results.emplace_back(); auto& result = results.back(); result.mutable_failure()->set_kind(AutotuneResult::UNKNOWN); result.mutable_failure()->set_msg( absl::StrCat("Profiling failure on CUDNN engine: ", profile_config.algorithm()->exec_plan_id())); } } #elif TENSORFLOW_USE_ROCM DnnScratchAllocator scratch_allocator(ConvolveBackwardDataScratchSize, context); std::vector<ProfileResult> algorithms; CHECK(stream->parent()->GetMIOpenConvolveAlgorithms( se::dnn::ConvolutionKind::BACKWARD_DATA, se::dnn::ToDataType<T>::value, stream, input_desc, in_backprop_ptr, filter_desc, filter_ptr, output_desc, out_backprop_ptr, conv_desc, &scratch_allocator, &algorithms)); std::vector<tensorflow::AutotuneResult> results; for (auto miopen_algorithm : algorithms) { auto profile_algorithm = miopen_algorithm.algorithm(); ProfileResult profile_result; auto miopen_launch_status = stream->ConvolveBackwardDataWithAlgorithm( filter_desc, filter_ptr, output_desc, out_backprop_ptr, conv_desc, input_desc, &in_backprop_ptr, &scratch_allocator, AlgorithmConfig(profile_algorithm, miopen_algorithm.scratch_size()), &profile_result); if (miopen_launch_status.ok()) { if (profile_result.is_valid()) { results.emplace_back(); auto& result = results.back(); result.mutable_conv()->set_algorithm(profile_algorithm.algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_algorithm.tensor_ops_enabled()); result.set_scratch_bytes(scratch_allocator.TotalByteSize()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); } } } #endif LogConvAutotuneResults(se::dnn::ConvolutionKind::BACKWARD_DATA, se::dnn::ToDataType<T>::value, in_backprop_ptr, filter_ptr, out_backprop_ptr, input_desc, filter_desc, output_desc, conv_desc, stream->parent(), results); if (CudnnUseFrontend()) { OP_REQUIRES_OK(context, BestCudnnConvAlgorithm(results, &plans, &algorithm_config)); } else { OP_REQUIRES_OK(context, BestCudnnConvAlgorithm(results, nullptr, &algorithm_config)); } AutoTuneConv3dBwdData::GetInstance()->Insert(conv_parameters, algorithm_config); } Status cudnn_launch_status; DnnScratchAllocator scratch_allocator(ConvolveBackwardDataScratchSize, context); if (CudnnUseFrontend()) { if (algorithm_config.algorithm().has_value()) { VLOG(4) << "Conv3DBackpropInput Execution Plan: " << algorithm_config.algorithm()->exec_plan_id(); } else { VLOG(4) << "Convolution AutoTune has been turned off"; } cudnn_launch_status = stream->ConvolveBackwardDataWithExecutionPlan( filter_desc, filter_ptr, output_desc, out_backprop_ptr, conv_desc, input_desc, &in_backprop_ptr, &scratch_allocator, algorithm_config, nullptr); } else { cudnn_launch_status = stream->ConvolveBackwardDataWithAlgorithm( filter_desc, filter_ptr, output_desc, out_backprop_ptr, conv_desc, input_desc, &in_backprop_ptr, &scratch_allocator, algorithm_config, nullptr); } if (!cudnn_launch_status.ok()) { context->SetStatus(cudnn_launch_status); } if (rows_odd || cols_odd || planes_odd) { Tensor in_backprop_remove_padding; OP_REQUIRES_OK( context, context->allocate_temp( DataTypeToEnum<T>::value, ShapeFromFormat(compute_data_format, dims.batch_size, {{dims.input_size(0), dims.input_size(1), dims.input_size(2)}}, dims.in_depth), &in_backprop_remove_padding)); // Remove the padding for odd spatial dimensions. functor::PadInput<GPUDevice, T, int, 5>()( context->eigen_device<GPUDevice>(), To32Bit(const_cast<const Tensor&>(pre_transformed_in_backprop) .tensor<T, 5>()), {{0, 0, 0}}, {{-planes_odd, -rows_odd, -cols_odd}}, To32Bit(in_backprop_remove_padding.tensor<T, 5>()), compute_data_format, T{}); pre_transformed_in_backprop = in_backprop_remove_padding; } if (data_format_ == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) { auto toConstTensor = [](const Tensor& x) -> const Tensor { return x; }; functor::NCHWToNHWC<GPUDevice, T, 5>()( context->eigen_device<GPUDevice>(), toConstTensor(pre_transformed_in_backprop).template tensor<T, 5>(), in_backprop->tensor<T, 5>()); } else { *in_backprop = pre_transformed_in_backprop; } } private: std::vector<int32> dilation_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool takes_shape_; bool cudnn_use_autotune_; }; // A dummy type to group backward filter autotune results together. struct Conv3dBackwardFilterAutoTuneGroup { static string name() { return "Conv3dBwdFilter"; } }; typedef AutoTuneSingleton<Conv3dBackwardFilterAutoTuneGroup, ConvParameters, se::dnn::AlgorithmConfig> AutoTuneConv3dBwdFilter; template <typename T> class Conv3DBackpropFilterOp<GPUDevice, T> : public OpKernel { public: explicit Conv3DBackpropFilterOp(OpKernelConstruction* context) : OpKernel(context), data_format_(FORMAT_NHWC), takes_shape_(type_string().find("V2") != std::string::npos) { // data_format is only available in V2. if (takes_shape_) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); } OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); OP_REQUIRES(context, dilation_.size() == 5, errors::InvalidArgument("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, 'C') == 1 && GetTensorDim(dilation_, data_format_, 'N') == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilation rates in the batch and depth dimensions.")); OP_REQUIRES( context, (GetTensorDim(dilation_, data_format_, '0') > 0 && GetTensorDim(dilation_, data_format_, '1') > 0 && GetTensorDim(dilation_, data_format_, '2') > 0), errors::InvalidArgument("Dilated rates should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, '0') > 0 && GetTensorDim(stride_, data_format_, '1') > 0 && GetTensorDim(stride_, data_format_, '2') > 0), errors::InvalidArgument("Spatial strides should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); cudnn_use_autotune_ = CudnnUseAutotune(); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const TensorShape& input_shape = input.shape(); const Tensor& out_backprop = context->input(2); const TensorShape& out_backprop_shape = out_backprop.shape(); TensorShape filter_shape; if (takes_shape_) { const Tensor& filter_sizes = context->input(1); OP_REQUIRES_OK(context, tensor::MakeShape(filter_sizes, &filter_shape)); } else { filter_shape = context->input(1).shape(); } ConvBackpropDimensions dims; OP_REQUIRES_OK( context, ConvBackpropComputeDimensionsV2( "Conv3DBackpropFilterOp", /*num_spatial_dims=*/3, input_shape, filter_shape, out_backprop_shape, dilation_, stride_, padding_, /*explicit_paddings=*/{}, data_format_, &dims)); Tensor* filter_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, filter_shape, &filter_backprop)); auto* stream = context->op_device_context()->stream(); OP_REQUIRES(context, stream, errors::Internal("No GPU stream available.")); bool is_grouped_convolution = filter_shape.dim_size(3) != dims.in_depth; if (!is_grouped_convolution && dims.filter_size(1) == 1 && dims.filter_size(2) == 1 && dims.filter_size(0) == 1 && dims.dilation(2) == 1 && dims.dilation(1) == 1 && dims.dilation(0) == 1 && dims.stride(2) == 1 && dims.stride(1) == 1 && dims.stride(0) == 1 && data_format_ == FORMAT_NHWC) { const uint64 m = dims.in_depth; const uint64 k = dims.batch_size * dims.input_size(1) * dims.input_size(2) * dims.input_size(0); const uint64 n = dims.out_depth; // The shape of output backprop is // [batch, out_z, out_y, out_x, out_depth] // From cublas's perspective, it is: n x k auto a_ptr = AsDeviceMemory(out_backprop.template flat<T>().data(), out_backprop.template flat<T>().size()); // The shape of input is: // [batch, in_z, in_y, in_x, in_depth], // From cublas's perspective, it is: m x k auto b_ptr = AsDeviceMemory(input.template flat<T>().data(), input.template flat<T>().size()); // The shape of the filter backprop is: // [1, 1, 1, in_depth, out_depth] // From cublas's perspective, it is: n x m auto c_ptr = AsDeviceMemory(filter_backprop->template flat<T>().data(), filter_backprop->template flat<T>().size()); bool blas_launch_status = stream ->ThenBlasGemm(se::blas::Transpose::kNoTranspose, se::blas::Transpose::kTranspose, n, m, k, 1.0f, a_ptr, n, b_ptr, m, 0.0f, &c_ptr, n) .ok(); if (!blas_launch_status) { context->SetStatus(errors::Internal("Blas SGEMM launch failed : m=", m, ", n=", n, ", k=", k)); } return; } else if (!is_grouped_convolution && dims.filter_size(0) == dims.input_size(0) && dims.filter_size(1) == dims.input_size(1) && dims.filter_size(2) == dims.input_size(2) && padding_ == Padding::VALID && data_format_ == FORMAT_NHWC) { const uint64 m = dims.input_size(0) * dims.input_size(1) * dims.input_size(2) * dims.in_depth; const uint64 k = dims.batch_size; const uint64 n = dims.out_depth; auto a_ptr = AsDeviceMemory(input.template flat<T>().data(), input.template flat<T>().size()); auto b_ptr = AsDeviceMemory(out_backprop.template flat<T>().data(), out_backprop.template flat<T>().size()); auto c_ptr = AsDeviceMemory(filter_backprop->template flat<T>().data(), filter_backprop->template flat<T>().size()); bool blas_launch_status = stream ->ThenBlasGemm(se::blas::Transpose::kNoTranspose, se::blas::Transpose::kTranspose, n, m, k, 1.0f, b_ptr, n, a_ptr, m, 0.0f, &c_ptr, n) .ok(); if (!blas_launch_status) { context->SetStatus(errors::Internal("Blas SGEMM launch failed : m=", m, ", n=", n, ", k=", k)); } return; } int padding_planes = dims.SpatialPadding(padding_, 0); int padding_rows = dims.SpatialPadding(padding_, 1); int padding_cols = dims.SpatialPadding(padding_, 2); const bool planes_odd = (padding_planes % 2 != 0); const bool rows_odd = (padding_rows % 2 != 0); const bool cols_odd = (padding_cols % 2 != 0); Tensor compatible_input; if (rows_odd || cols_odd || planes_odd) { OP_REQUIRES_OK(context, context->allocate_temp( DataTypeToEnum<T>::value, ShapeFromFormat(data_format_, dims.batch_size, {{dims.input_size(0) + planes_odd, dims.input_size(1) + rows_odd, dims.input_size(2) + cols_odd}}, dims.in_depth), &compatible_input)); functor::PadInput<GPUDevice, T, int, 5>()( context->template eigen_device<GPUDevice>(), To32Bit(input.tensor<T, 5>()), {{0, 0, 0}}, {{planes_odd, rows_odd, cols_odd}}, To32Bit(compatible_input.tensor<T, 5>()), data_format_, T{}); } else { compatible_input = input; } CHECK(padding_rows >= 0 && padding_cols >= 0 && padding_planes >= 0) << "Negative paddings: (" << padding_rows << ", " << padding_cols << ", " << padding_planes << ")"; #if GOOGLE_CUDA const bool compute_in_nhwc = CUDNN_VERSION >= 8000 && DataTypeToEnum<T>::value == DT_HALF; #else // fast NDHWC implementation is a CUDA only feature const bool compute_in_nhwc = false; #endif const TensorFormat compute_data_format = (compute_in_nhwc && data_format_ == FORMAT_NHWC) ? FORMAT_NHWC : FORMAT_NCHW; VLOG(3) << "Compute Conv3DBackpropFilter with cuDNN:" << " data_format=" << ToString(data_format_) << " compute_data_format=" << ToString(compute_data_format); constexpr auto kComputeInNHWC = std::make_tuple(se::dnn::DataLayout::kBatchYXDepth, se::dnn::FilterLayout::kOutputYXInput); constexpr auto kComputeInNCHW = std::make_tuple(se::dnn::DataLayout::kBatchDepthYX, se::dnn::FilterLayout::kOutputInputYX); se::dnn::DataLayout compute_data_layout; se::dnn::FilterLayout filter_layout; std::tie(compute_data_layout, filter_layout) = compute_data_format == FORMAT_NHWC ? kComputeInNHWC : kComputeInNCHW; se::dnn::BatchDescriptor input_desc(3); input_desc.set_count(dims.batch_size) .set_spatial_dim(DimIndex::X, GetTensorDim(compatible_input, data_format_, '2')) .set_spatial_dim(DimIndex::Y, GetTensorDim(compatible_input, data_format_, '1')) .set_spatial_dim(DimIndex::Z, GetTensorDim(compatible_input, data_format_, '0')) .set_feature_map_count(dims.in_depth) .set_layout(compute_data_layout); se::dnn::BatchDescriptor output_desc(3); output_desc.set_count(dims.batch_size) .set_spatial_dim(DimIndex::X, dims.output_size(2)) .set_spatial_dim(DimIndex::Y, dims.output_size(1)) .set_spatial_dim(DimIndex::Z, dims.output_size(0)) .set_feature_map_count(dims.out_depth) .set_layout(compute_data_layout); se::dnn::FilterDescriptor filter_desc(3); filter_desc.set_spatial_dim(DimIndex::X, dims.filter_size(2)) .set_spatial_dim(DimIndex::Y, dims.filter_size(1)) .set_spatial_dim(DimIndex::Z, dims.filter_size(0)) .set_input_feature_map_count(filter_shape.dim_size(3)) .set_output_feature_map_count(filter_shape.dim_size(4)) .set_layout(filter_layout); se::dnn::ConvolutionDescriptor conv_desc(3); conv_desc.set_dilation_rate(DimIndex::X, dims.dilation(2)) .set_dilation_rate(DimIndex::Y, dims.dilation(1)) .set_dilation_rate(DimIndex::Z, dims.dilation(0)) .set_filter_stride(DimIndex::X, dims.stride(2)) .set_filter_stride(DimIndex::Y, dims.stride(1)) .set_filter_stride(DimIndex::Z, dims.stride(0)) .set_zero_padding(DimIndex::X, padding_cols / 2) .set_zero_padding(DimIndex::Y, padding_rows / 2) .set_zero_padding(DimIndex::Z, padding_planes / 2) .set_group_count(dims.in_depth / filter_shape.dim_size(3)); Tensor pre_transformed_filter_backprop; auto dst_format = compute_data_format == FORMAT_NCHW ? FORMAT_OIHW : FORMAT_OHWI; TensorShape dst_shape = dst_format == FORMAT_OIHW ? TensorShape({filter_shape.dim_size(4), filter_shape.dim_size(3), dims.filter_size(0), dims.filter_size(1), dims.filter_size(2)}) : TensorShape({filter_shape.dim_size(4), dims.filter_size(0), dims.filter_size(1), dims.filter_size(2), filter_shape.dim_size(3)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, dst_shape, &pre_transformed_filter_backprop)); Tensor transformed_out_backprop; if (data_format_ == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) { VLOG(4) << "Convert the `out_backprop` tensor from NDHWC to NCDHW."; TensorShape nchw_shape = {dims.batch_size, dims.out_depth, dims.output_size(0), dims.output_size(1), dims.output_size(2)}; OP_REQUIRES_OK( context, context->allocate_temp(DataTypeToEnum<T>::value, nchw_shape, &transformed_out_backprop)); if (dims.out_depth > 1) { functor::NHWCToNCHW<GPUDevice, T, 5>()( context->eigen_device<GPUDevice>(), out_backprop.tensor<T, 5>(), transformed_out_backprop.tensor<T, 5>()); } else { CHECK(transformed_out_backprop.CopyFrom(out_backprop, nchw_shape)); } } else { transformed_out_backprop = out_backprop; } Tensor transformed_input; if (data_format_ == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) { VLOG(4) << "Convert the `input` tensor from NDHWC to NCDHW."; TensorShape nchw_shape = { dims.batch_size, dims.in_depth, compatible_input.dim_size(1), compatible_input.dim_size(2), compatible_input.dim_size(3)}; if (dims.in_depth > 1) { OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, nchw_shape, &transformed_input)); functor::NHWCToNCHW<GPUDevice, T, 5>()( context->eigen_device<GPUDevice>(), const_cast<const Tensor&>(compatible_input).tensor<T, 5>(), transformed_input.tensor<T, 5>()); } else { CHECK(transformed_input.CopyFrom(compatible_input, nchw_shape)); } } else { transformed_input = compatible_input; } auto out_backprop_ptr = AsDeviceMemory(transformed_out_backprop.template flat<T>().data(), transformed_out_backprop.template flat<T>().size()); auto filter_backprop_ptr = AsDeviceMemory( pre_transformed_filter_backprop.template flat<T>().data(), pre_transformed_filter_backprop.template flat<T>().size()); auto input_ptr = AsDeviceMemory(transformed_input.template flat<T>().data(), transformed_input.template flat<T>().size()); static int64 ConvolveBackwardFilterScratchSize = GetDnnWorkspaceLimit( "TF_CUDNN_WORKSPACE_LIMIT_IN_MB", 1LL << 32); // 4GB by default const int device_id = stream->parent()->device_ordinal(); DataType dtype = input.dtype(); const ConvParameters conv_parameters = { dims.batch_size, dims.in_depth, {{dims.input_size(0), dims.input_size(1), dims.input_size(2)}}, compute_data_format, dims.out_depth, {{dims.filter_size(0), dims.filter_size(1), dims.filter_size(2)}}, {{dims.dilation(0), dims.dilation(1), dims.dilation(2)}}, {{dims.stride(0), dims.stride(1), dims.stride(2)}}, {{padding_planes, padding_rows, padding_cols}}, dtype, device_id, conv_desc.group_count()}; using se::dnn::AlgorithmConfig; using se::dnn::AlgorithmDesc; using se::dnn::ProfileResult; #if TENSORFLOW_USE_ROCM // cudnn_use_autotune is applicable only the CUDA flow // for ROCm/MIOpen, we need to call GetMIOpenConvolveAlgorithms explicitly // if we do not have a cached algorithm_config for this conv_parameters cudnn_use_autotune_ = true; #endif AlgorithmConfig algorithm_config; if (cudnn_use_autotune_ && !AutoTuneConv3dBwdFilter::GetInstance()->Find( conv_parameters, &algorithm_config)) { std::vector<std::unique_ptr<se::dnn::ConvolveExecutionPlan>> plans; #if GOOGLE_CUDA std::vector<AlgorithmDesc> algorithms; std::vector<AlgorithmConfig> configs; if (CudnnUseFrontend()) { OP_REQUIRES(context, stream->parent()->GetConvolveExecutionPlans( se::dnn::ConvolutionKind::BACKWARD_FILTER, se::dnn::ToDataType<T>::value, stream, input_desc, filter_desc, output_desc, conv_desc, &plans), errors::Unknown( "Failed to get convolution execution plan. This is " "probably because cuDNN failed to initialize, so try " "looking to see if a warning log message was printed " "above.")); for (const auto& plan : plans) { configs.push_back(AlgorithmConfig( AlgorithmDesc{plan->getTag(), plan->get_raw_desc()}, plan->getWorkspaceSize())); } } else { OP_REQUIRES(context, stream->parent()->GetConvolveBackwardFilterAlgorithms( conv_parameters.ShouldIncludeWinogradNonfusedAlgo<T>( stream->parent()), &algorithms), errors::Unknown( "Failed to get convolution execution plan. This is " "probably because cuDNN failed to initialize, so try " "looking to see if a warning log message was printed " "above.")); for (const auto& algorithm : algorithms) { configs.push_back(AlgorithmConfig(algorithm)); } } std::vector<tensorflow::AutotuneResult> results; for (auto& profile_config : configs) { // TODO(zhengxq): profile each algorithm multiple times to better // accuracy. DnnScratchAllocator scratch_allocator(ConvolveBackwardFilterScratchSize, context); ProfileResult profile_result; Status cudnn_launch_status; if (CudnnUseFrontend()) { cudnn_launch_status = stream->ConvolveBackwardFilterWithExecutionPlan( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, profile_config, &profile_result); } else { cudnn_launch_status = stream->ConvolveBackwardFilterWithAlgorithm( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, profile_config, &profile_result); } if (cudnn_launch_status.ok() && profile_result.is_valid()) { results.emplace_back(); auto& result = results.back(); if (CudnnUseFrontend()) { result.mutable_cuda_conv_plan()->set_exec_plan_id( profile_config.algorithm()->exec_plan_id()); } else { result.mutable_conv()->set_algorithm( profile_config.algorithm()->algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_config.algorithm()->tensor_ops_enabled()); } result.set_scratch_bytes(scratch_allocator.TotalByteSize()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); } else if (CudnnUseFrontend()) { // When CuDNN frontend APIs are used, we need to make sure the // profiling results are one-to-one mapping of the "plans". So, we // insert dummy results when the excution fails. results.emplace_back(); auto& result = results.back(); result.mutable_failure()->set_kind(AutotuneResult::UNKNOWN); result.mutable_failure()->set_msg( absl::StrCat("Profiling failure on CUDNN engine: ", profile_config.algorithm()->exec_plan_id())); } } #elif TENSORFLOW_USE_ROCM DnnScratchAllocator scratch_allocator(ConvolveBackwardFilterScratchSize, context); std::vector<ProfileResult> algorithms; CHECK(stream->parent()->GetMIOpenConvolveAlgorithms( se::dnn::ConvolutionKind::BACKWARD_FILTER, se::dnn::ToDataType<T>::value, stream, input_desc, input_ptr, filter_desc, filter_backprop_ptr, output_desc, out_backprop_ptr, conv_desc, &scratch_allocator, &algorithms)); std::vector<tensorflow::AutotuneResult> results; for (auto miopen_algorithm : algorithms) { auto profile_algorithm = miopen_algorithm.algorithm(); ProfileResult profile_result; auto cudnn_launch_status = stream->ConvolveBackwardFilterWithAlgorithm( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, AlgorithmConfig(profile_algorithm, miopen_algorithm.scratch_size()), &profile_result); if (cudnn_launch_status.ok()) { if (profile_result.is_valid()) { results.emplace_back(); auto& result = results.back(); result.mutable_conv()->set_algorithm(profile_algorithm.algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_algorithm.tensor_ops_enabled()); result.set_scratch_bytes(scratch_allocator.TotalByteSize()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); } } } #endif LogConvAutotuneResults(se::dnn::ConvolutionKind::BACKWARD_FILTER, se::dnn::ToDataType<T>::value, input_ptr, filter_backprop_ptr, out_backprop_ptr, input_desc, filter_desc, output_desc, conv_desc, stream->parent(), results); if (CudnnUseFrontend()) { OP_REQUIRES_OK(context, BestCudnnConvAlgorithm(results, &plans, &algorithm_config)); } else { Status s = BestCudnnConvAlgorithm(results, nullptr, &algorithm_config); #if GOOGLE_CUDA if (s.code() == error::NOT_FOUND) { size_t version = cudnnGetVersion(); // For cuDNN 8.0.3 and 8.0.4, no cudnnConvolutionBwdFilterAlgo_t will // work in certain cases. In such cases we improve the error message. // This is fixed in cuDNN 8.0.5. For more context, see: // https://github.com/tensorflow/tensorflow/issues/46589 if (version == 8003 || version == 8004) { std::string version_str = (version == 8003 ? "8.0.3" : "8.0.4"); s = errors::NotFound( "No algorithm worked! Please try upgrading to cuDNN 8.0.5. You " "are using cuDNN ", version_str, ", which has a bug causing this error."); } } #endif OP_REQUIRES_OK(context, s); } AutoTuneConv3dBwdFilter::GetInstance()->Insert(conv_parameters, algorithm_config); } Status cudnn_launch_status; DnnScratchAllocator scratch_allocator(ConvolveBackwardFilterScratchSize, context); if (CudnnUseFrontend()) { if (algorithm_config.algorithm().has_value()) { VLOG(4) << "Conv3DBackpropFilter Execution Plan: " << algorithm_config.algorithm()->exec_plan_id(); } else { VLOG(4) << "Convolution AutoTune has been turned off"; } cudnn_launch_status = stream->ConvolveBackwardFilterWithExecutionPlan( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, algorithm_config, nullptr); } else { cudnn_launch_status = stream->ConvolveBackwardFilterWithAlgorithm( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, algorithm_config, nullptr); } if (!cudnn_launch_status.ok()) { context->SetStatus(cudnn_launch_status); } auto toConstTensor = [](const Tensor& x) -> const Tensor { return x; }; functor::ReverseTransformFilter<GPUDevice, T, 5>()( context->eigen_device<GPUDevice>(), /*src_filter_format=*/dst_format, toConstTensor(pre_transformed_filter_backprop).template tensor<T, 5>(), filter_backprop->tensor<T, 5>()); } private: std::vector<int32> dilation_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool takes_shape_; bool cudnn_use_autotune_; }; #define REGISTER_GPU_KERNEL(T) \ REGISTER_KERNEL_BUILDER( \ Name("Conv3DBackpropInput").Device(DEVICE_GPU).TypeConstraint<T>("T"), \ Conv3DBackpropInputOp<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropInputV2") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T") \ .HostMemory("input_sizes"), \ Conv3DBackpropInputOp<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER( \ Name("Conv3DBackpropFilter").Device(DEVICE_GPU).TypeConstraint<T>("T"), \ Conv3DBackpropFilterOp<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilterV2") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T") \ .HostMemory("filter_sizes"), \ Conv3DBackpropFilterOp<GPUDevice, T>); TF_CALL_half(REGISTER_GPU_KERNEL); TF_CALL_float(REGISTER_GPU_KERNEL); TF_CALL_double(REGISTER_GPU_KERNEL); #undef REGISTER_GPU_KERNEL #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM } // namespace tensorflow
null
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define USE_EIGEN_TENSOR #define EIGEN_USE_THREADS #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/kernels/conv_2d.h" #include "tensorflow/core/kernels/conv_3d.h" #include "tensorflow/core/kernels/conv_grad_ops.h" #include "tensorflow/core/kernels/conv_grad_shape_utils.h" #include "tensorflow/core/kernels/conv_ops_gpu.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #include "tensorflow/core/util/use_cudnn.h" #include "tensorflow/core/util/work_sharder.h" #if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL) #include "tensorflow/core/kernels/eigen_contraction_kernel.h" #endif #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/platform/stream_executor.h" using stream_executor::dnn::DimIndex; #include "tensorflow/core/protobuf/autotuning.pb.h" #include "tensorflow/core/util/proto/proto_utils.h" #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #if GOOGLE_CUDA #include "third_party/gpus/cudnn/cudnn.h" #include "tensorflow/stream_executor/gpu/gpu_asm_opts.h" #include "tensorflow/stream_executor/gpu/redzone_allocator.h" #include "tensorflow/stream_executor/tf_allocator_adapter.h" #endif // GOOGLE_CUDA namespace { // TODO(ezhulenev): Split this file into conv_grad_filter_ops_3d.cc and // conv_grad_input_ops_3d.cc. // TODO(ezhulenev): Generalize Col2im and Im2col for 2-d and 3-d kernels. // "Depth" is already used for the channel dimension, so for the third spatial // dimension in this file we use "plane", although in NDHWC layout it's // indicated with a "D". // Returns in 'im_data' (assumed to be zero-initialized) image patch in storage // order (planes, height, width, depth), constructed from patches in 'col_data', // which is required to be in storage order (out_planes * out_height * // out_width, filter_planes, filter_height, filter_width, in_depth). // // Based on 2-dimensional implementation written by Yangqing Jia (jiayq). template <typename T> void Col2im(const T* col_data, const int depth, const int planes, const int height, const int width, const int filter_p, const int filter_h, const int filter_w, const int pad_pt, const int pad_t, const int pad_l, const int pad_pb, const int pad_b, const int pad_r, const int stride_p, const int stride_h, const int stride_w, T* im_data) { const int planes_col = (planes + pad_pt + pad_pb - filter_p) / stride_p + 1; const int height_col = (height + pad_t + pad_b - filter_h) / stride_h + 1; const int width_col = (width + pad_l + pad_r - filter_w) / stride_w + 1; int p_pad = -pad_pt; for (int p = 0; p < planes_col; ++p) { int h_pad = -pad_t; for (int h = 0; h < height_col; ++h) { int w_pad = -pad_l; for (int w = 0; w < width_col; ++w) { T* im_patch_data = im_data + (p_pad * height * width + h_pad * width + w_pad) * depth; for (int ip = p_pad; ip < p_pad + filter_p; ++ip) { for (int ih = h_pad; ih < h_pad + filter_h; ++ih) { for (int iw = w_pad; iw < w_pad + filter_w; ++iw) { if (ip >= 0 && ip < planes && ih >= 0 && ih < height && iw >= 0 && iw < width) { for (int i = 0; i < depth; ++i) { im_patch_data[i] += col_data[i]; } } im_patch_data += depth; col_data += depth; } // Jump over remaining number of depth. im_patch_data += depth * (width - filter_w); } // Jump over remaining number of (depth * width). im_patch_data += (depth * width) * (height - filter_h); } w_pad += stride_w; } h_pad += stride_h; } p_pad += stride_p; } } // Returns in 'col_data', image patches in storage order (planes, height, width, // depth) extracted from image at 'input_data', which is required to be in // storage order (batch, planes, height, width, depth). // // Based on 2-dimensional implementation written by Yangqing Jia (jiayq). template <typename T> void Im2col(const T* input_data, const int depth, const int planes, const int height, const int width, const int filter_p, const int filter_h, const int filter_w, const int pad_pt, const int pad_t, const int pad_l, const int pad_pb, const int pad_b, const int pad_r, const int stride_p, const int stride_h, const int stride_w, T* col_data) { const int planes_col = (planes + pad_pt + pad_pb - filter_p) / stride_p + 1; const int height_col = (height + pad_t + pad_b - filter_h) / stride_h + 1; const int width_col = (width + pad_l + pad_r - filter_w) / stride_w + 1; int p_pad = -pad_pt; for (int p = 0; p < planes_col; ++p) { int h_pad = -pad_t; for (int h = 0; h < height_col; ++h) { int w_pad = -pad_l; for (int w = 0; w < width_col; ++w) { for (int ip = p_pad; ip < p_pad + filter_p; ++ip) { for (int ih = h_pad; ih < h_pad + filter_h; ++ih) { for (int iw = w_pad; iw < w_pad + filter_w; ++iw) { if (ip >= 0 && ip < planes && ih >= 0 && ih < height && iw >= 0 && iw < width) { memcpy(col_data, input_data + (ip * height * width + ih * width + iw) * depth, sizeof(T) * depth); } else { // This should be simply padded with zero. memset(col_data, 0, sizeof(T) * depth); } col_data += depth; } } } w_pad += stride_w; } h_pad += stride_h; } p_pad += stride_p; } } } // namespace namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; // Backprop for input that offloads computation to // Eigen::CuboidConvolutionBackwardInput. template <typename Device, class T> class Conv3DBackpropInputOp : public OpKernel { public: explicit Conv3DBackpropInputOp(OpKernelConstruction* context) : OpKernel(context), data_format_(FORMAT_NHWC), takes_shape_(type_string().find("V2") != std::string::npos) { // data_format is only available in V2. if (takes_shape_) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Conv3DBackpropInputOpV2 only supports NDHWC on the CPU.")); } OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); OP_REQUIRES(context, dilation_.size() == 5, errors::InvalidArgument("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, 'C') == 1 && GetTensorDim(dilation_, data_format_, 'N') == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilation rates in the batch and depth dimensions.")); // TODO(yangzihao): Add CPU version of dilated conv 3D. OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, '0') == 1 && GetTensorDim(dilation_, data_format_, '1') == 1 && GetTensorDim(dilation_, data_format_, '2') == 1), errors::InvalidArgument( "Current CPU implementation does not yet support " "dilation rates larger than 1.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& filter = context->input(1); const TensorShape& filter_shape = filter.shape(); const Tensor& out_backprop = context->input(2); const TensorShape& out_backprop_shape = out_backprop.shape(); TensorShape input_shape; if (takes_shape_) { const Tensor& input_sizes = context->input(0); // tensor::MakeShape is able to handle both DT_INT32 and DT_INT64 for // input_sizes. OP_REQUIRES_OK(context, tensor::MakeShape(input_sizes, &input_shape)); } else { input_shape = context->input(0).shape(); } OP_REQUIRES( context, input_shape.dim_size(4) == filter_shape.dim_size(3), errors::InvalidArgument("input and filter_sizes must have the same " "number of channels. Got ", input_shape.dim_size(4), " for input and ", filter_shape.dim_size(3), " for filter_sizes")); OP_REQUIRES( context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), errors::InvalidArgument("out_backprop and filter_sizes must have the " "same number of channels. Got ", out_backprop_shape.dim_size(4), " for out_backprop and ", filter_shape.dim_size(4), " for filter_sizes")); ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, input_shape, filter_shape, out_backprop_shape, stride_, padding_, data_format_, &dims)); Tensor* in_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, input_shape, &in_backprop)); functor::CuboidConvolutionBackwardInput<Device, T>()( context->eigen_device<Device>(), in_backprop->tensor<T, 5>(), // input_backward filter.tensor<T, 5>(), // filter out_backprop.tensor<T, 5>(), // output_backward static_cast<int>(dims.spatial_dims[0].stride), // stride_planes static_cast<int>(dims.spatial_dims[1].stride), // stride_rows static_cast<int>(dims.spatial_dims[2].stride)); // stride_cols } private: std::vector<int32> dilation_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool takes_shape_; TF_DISALLOW_COPY_AND_ASSIGN(Conv3DBackpropInputOp); }; // Custom backprop for input that explicitly does the work sharding and calls // Eigen only to multiply matrices. template <typename Device, class T> class Conv3DCustomBackpropInputOp : public OpKernel { // Limit the maximum size of allocated temporary buffer to // kMaxTempAllocationOverhead times the size of the input tensors (input, // filter, out_backprop). If the size of the temporary buffer exceeds this // limit, fallback on Eigen implementation. static constexpr int kMaxTempAllocationOverhead = 25; public: explicit Conv3DCustomBackpropInputOp(OpKernelConstruction* context) : OpKernel(context), data_format_(FORMAT_NHWC), takes_shape_(type_string().find("V2") != std::string::npos) { // data_format is only available in V2. if (takes_shape_) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Conv3DBackpropInputOpV2 only supports NDHWC on the CPU.")); } OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); OP_REQUIRES(context, dilation_.size() == 5, errors::InvalidArgument("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, 'C') == 1 && GetTensorDim(dilation_, data_format_, 'N') == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilation rates in the batch and depth dimensions.")); // TODO(yangzihao): Add CPU version of dilated conv 3D. OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, '0') == 1 && GetTensorDim(dilation_, data_format_, '1') == 1 && GetTensorDim(dilation_, data_format_, '2') == 1), errors::InvalidArgument( "Current CPU implementation does not yet support " "dilation rates larger than 1.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& filter = context->input(1); const TensorShape& filter_shape = filter.shape(); const Tensor& out_backprop = context->input(2); const TensorShape& out_backprop_shape = out_backprop.shape(); TensorShape input_shape; if (takes_shape_) { const Tensor& input_sizes = context->input(0); // tensor::MakeShape is able to handle both DT_INT32 and DT_INT64 for // input_sizes. OP_REQUIRES_OK(context, tensor::MakeShape(input_sizes, &input_shape)); } else { input_shape = context->input(0).shape(); } OP_REQUIRES( context, input_shape.dim_size(4) == filter_shape.dim_size(3), errors::InvalidArgument("input and filter_sizes must have the same " "number of channels. Got ", input_shape.dim_size(4), " for input and ", filter_shape.dim_size(3), " for filter_sizes")); OP_REQUIRES( context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), errors::InvalidArgument("out_backprop and filter_sizes must have the " "same number of channels. Got ", out_backprop_shape.dim_size(4), " for out_backprop and ", filter_shape.dim_size(4), " for filter_sizes")); ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, input_shape, filter_shape, out_backprop_shape, stride_, padding_, data_format_, &dims)); Tensor* in_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, input_shape, &in_backprop)); int64 top_pad_planes, bottom_pad_planes; int64 top_pad_rows, bottom_pad_rows; int64 left_pad_cols, right_pad_cols; OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose( dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, dims.spatial_dims[0].stride, padding_, &dims.spatial_dims[0].output_size, &top_pad_planes, &bottom_pad_planes)); OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose( dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, dims.spatial_dims[1].stride, padding_, &dims.spatial_dims[1].output_size, &top_pad_rows, &bottom_pad_rows)); OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose( dims.spatial_dims[2].input_size, dims.spatial_dims[2].filter_size, dims.spatial_dims[2].stride, padding_, &dims.spatial_dims[2].output_size, &left_pad_cols, &right_pad_cols)); // TODO(ezhulenev): Extract work size and shard estimation to shared // functions in conv_grad_ops, and update 2d convolution backprop. // The total dimension size of each kernel. const int64 filter_total_size = dims.spatial_dims[0].filter_size * dims.spatial_dims[1].filter_size * dims.spatial_dims[2].filter_size * dims.in_depth; // The output image size is the spatial size of the output. const int64 output_image_size = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size * dims.spatial_dims[2].output_size; const auto cache_sizes = Eigen::internal::CacheSizes(); const ptrdiff_t l3_cache_size = cache_sizes.m_l3; // Use L3 cache size as target working set size. const size_t target_working_set_size = l3_cache_size / sizeof(T); // Calculate size of matrices involved in MatMul: C = A x B. const int64 size_A = output_image_size * dims.out_depth; const int64 size_B = filter_total_size * dims.out_depth; const int64 size_C = output_image_size * filter_total_size; const int64 work_unit_size = size_A + size_B + size_C; auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); // Use parallel tensor contractions if there is no batching. // // Compared to Conv2D code, this version is missing work size estimation. In // benchmarks I didn't find a case when it's beneficial to run parallel // contraction compared to sharding and matmuls. const bool use_parallel_contraction = dims.batch_size == 1; const size_t shard_size = use_parallel_contraction ? 1 : (target_working_set_size + work_unit_size - 1) / work_unit_size; // Total number of elements in all the tensors used by this kernel. int64 total_tensor_elements = input_shape.num_elements() + filter_shape.num_elements() + out_backprop_shape.num_elements(); // Shape of the temporary workspace buffer. TensorShape col_buffer_shape = {static_cast<int64>(shard_size), static_cast<int64>(output_image_size), static_cast<int64>(filter_total_size)}; int64 col_buffer_elements = col_buffer_shape.num_elements(); // If the temporary allocation overhead is too large, fallback on Eigen // implementation which requires much less memory. int64 col_buffer_overhead = col_buffer_elements / total_tensor_elements; if (col_buffer_overhead > kMaxTempAllocationOverhead) { VLOG(2) << "Fallback on Eigen implementation of Conv3DBackpropInputOp: " "col_buffer_overhead=" << col_buffer_overhead; functor::CuboidConvolutionBackwardInput<Device, T>()( context->eigen_device<Device>(), in_backprop->tensor<T, 5>(), // input_backward filter.tensor<T, 5>(), // filter out_backprop.tensor<T, 5>(), // output_backward static_cast<int>(dims.spatial_dims[0].stride), // stride_planes static_cast<int>(dims.spatial_dims[1].stride), // stride_rows static_cast<int>(dims.spatial_dims[2].stride)); // stride_cols return; } Tensor col_buffer; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, col_buffer_shape, &col_buffer)); // The input offset corresponding to a single input image. const int64 input_offset = dims.spatial_dims[0].input_size * dims.spatial_dims[1].input_size * dims.spatial_dims[2].input_size * dims.in_depth; // The output offset corresponding to a single output image. const int64 output_offset = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size * dims.spatial_dims[2].output_size * dims.out_depth; const T* filter_data = filter.template flat<T>().data(); T* col_buffer_data = col_buffer.template flat<T>().data(); const T* out_backprop_data = out_backprop.template flat<T>().data(); auto in_backprop_flat = in_backprop->template flat<T>(); T* input_backprop_data = in_backprop_flat.data(); in_backprop_flat.device(context->eigen_device<Device>()) = in_backprop_flat.constant(T(0)); if (use_parallel_contraction) { typedef Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor>, Eigen::Unaligned> TensorMap; typedef Eigen::TensorMap<Eigen::Tensor<const T, 2, Eigen::RowMajor>, Eigen::Unaligned> ConstTensorMap; // Initialize contraction dims (we need to transpose 'B' below). Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> contract_dims; contract_dims[0].first = 1; contract_dims[0].second = 1; for (int image_id = 0; image_id < dims.batch_size; ++image_id) { // Compute gradient into col_buffer. TensorMap C(col_buffer_data, output_image_size, filter_total_size); ConstTensorMap A(out_backprop_data + output_offset * image_id, output_image_size, dims.out_depth); ConstTensorMap B(filter_data, filter_total_size, dims.out_depth); C.device(context->eigen_cpu_device()) = A.contract(B, contract_dims); Col2im<T>(col_buffer_data, dims.in_depth, // Input spatial dimensions. dims.spatial_dims[0].input_size, // input planes dims.spatial_dims[1].input_size, // input rows dims.spatial_dims[2].input_size, // input cols // Filter spatial dimensions. dims.spatial_dims[0].filter_size, // filter planes dims.spatial_dims[1].filter_size, // filter rows dims.spatial_dims[2].filter_size, // filter cols // Spatial padding. top_pad_planes, top_pad_rows, left_pad_cols, bottom_pad_planes, bottom_pad_rows, right_pad_cols, // Spatial striding. dims.spatial_dims[0].stride, // stride planes dims.spatial_dims[1].stride, // stride rows dims.spatial_dims[2].stride, // stride cols input_backprop_data); input_backprop_data += input_offset; } } else { typedef Eigen::Map< Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> MatrixMap; typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> ConstMatrixMap; for (int image_id = 0; image_id < dims.batch_size; image_id += shard_size) { const int shard_limit = std::min(static_cast<int>(shard_size), static_cast<int>(dims.batch_size) - image_id); auto shard = [&dims, &top_pad_planes, &top_pad_rows, &left_pad_cols, &bottom_pad_planes, &bottom_pad_rows, &right_pad_cols, &output_image_size, &filter_total_size, &input_backprop_data, &col_buffer_data, &out_backprop_data, &filter_data, &input_offset, &output_offset, &size_C](int64 start, int64 limit) { for (int shard_id = start; shard_id < limit; ++shard_id) { T* im2col_buf = col_buffer_data + shard_id * size_C; T* input_data = input_backprop_data + shard_id * input_offset; const T* out_data = out_backprop_data + shard_id * output_offset; // Compute gradient into 'im2col_buf'. MatrixMap C(im2col_buf, output_image_size, filter_total_size); ConstMatrixMap A(out_data, output_image_size, dims.out_depth); ConstMatrixMap B(filter_data, filter_total_size, dims.out_depth); C.noalias() = A * B.transpose(); Col2im<T>(im2col_buf, dims.in_depth, // Input spatial dimensions. dims.spatial_dims[0].input_size, // input planes dims.spatial_dims[1].input_size, // input rows dims.spatial_dims[2].input_size, // input cols // Filter spatial dimensions. dims.spatial_dims[0].filter_size, // filter planes dims.spatial_dims[1].filter_size, // filter rows dims.spatial_dims[2].filter_size, // filter cols // Spatial padding. top_pad_planes, top_pad_rows, left_pad_cols, bottom_pad_planes, bottom_pad_rows, right_pad_cols, // Spatial striding. dims.spatial_dims[0].stride, // stride planes dims.spatial_dims[1].stride, // stride rows dims.spatial_dims[2].stride, // stride cols input_data); } }; Shard(worker_threads.num_threads, worker_threads.workers, shard_limit, work_unit_size, shard); input_backprop_data += input_offset * shard_limit; out_backprop_data += output_offset * shard_limit; } } } private: std::vector<int32> dilation_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool takes_shape_; TF_DISALLOW_COPY_AND_ASSIGN(Conv3DCustomBackpropInputOp); }; // Custom backrop input kernel is 30% - 4x faster when compiled with AVX2 than // default Eigen implementation (at the cost of ~2x-8x peak memory usage). #define REGISTER_CPU_KERNEL(T) \ REGISTER_KERNEL_BUILDER( \ Name("Conv3DBackpropInput").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ Conv3DCustomBackpropInputOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER( \ Name("Conv3DBackpropInputV2").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ Conv3DCustomBackpropInputOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropInput") \ .Device(DEVICE_CPU) \ .Label("custom") \ .TypeConstraint<T>("T"), \ Conv3DCustomBackpropInputOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropInputV2") \ .Device(DEVICE_CPU) \ .Label("custom") \ .TypeConstraint<T>("T"), \ Conv3DCustomBackpropInputOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropInput") \ .Device(DEVICE_CPU) \ .Label("eigen_tensor") \ .TypeConstraint<T>("T"), \ Conv3DBackpropInputOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropInputV2") \ .Device(DEVICE_CPU) \ .Label("eigen_tensor") \ .TypeConstraint<T>("T"), \ Conv3DBackpropInputOp<CPUDevice, T>); TF_CALL_half(REGISTER_CPU_KERNEL); TF_CALL_float(REGISTER_CPU_KERNEL); TF_CALL_double(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL // Backprop for filter that offloads computation to // Eigen::CuboidConvolutionBackwardFilter. template <typename Device, class T> class Conv3DBackpropFilterOp : public OpKernel { public: explicit Conv3DBackpropFilterOp(OpKernelConstruction* context) : OpKernel(context), data_format_(FORMAT_NHWC), takes_shape_(type_string().find("V2") != std::string::npos) { // data_format is only available in V2. if (takes_shape_) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Conv3DBackpropFilterOpV2 only supports NDHWC on the CPU.")); } OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); OP_REQUIRES(context, dilation_.size() == 5, errors::InvalidArgument("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, 'C') == 1 && GetTensorDim(dilation_, data_format_, 'N') == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilation rates in the batch and depth dimensions.")); // TODO(yangzihao): Add CPU version of dilated conv 3D. OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, '0') == 1 && GetTensorDim(dilation_, data_format_, '1') == 1 && GetTensorDim(dilation_, data_format_, '2') == 1), errors::InvalidArgument( "Current CPU implementation does not yet support " "dilation rates larger than 1.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const TensorShape& input_shape = input.shape(); const Tensor& out_backprop = context->input(2); const TensorShape& out_backprop_shape = out_backprop.shape(); TensorShape filter_shape; if (takes_shape_) { const Tensor& filter_sizes = context->input(1); OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( filter_sizes.vec<int32>(), &filter_shape)); } else { filter_shape = context->input(1).shape(); } OP_REQUIRES( context, input_shape.dim_size(4) == filter_shape.dim_size(3), errors::InvalidArgument("input and filter_sizes must have the same " "number of channels. Got ", input_shape.dim_size(4), " for input and ", filter_shape.dim_size(3), " for filter_sizes")); OP_REQUIRES( context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), errors::InvalidArgument("out_backprop and filter_sizes must have the " "same number of channels. Got ", out_backprop_shape.dim_size(4), " for out_backprop and ", filter_shape.dim_size(4), " for filter_sizes")); ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropFilterOp", /*num_spatial_dims=*/3, input_shape, filter_shape, out_backprop_shape, stride_, padding_, data_format_, &dims)); Tensor* filter_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, filter_shape, &filter_backprop)); if (input_shape.num_elements() == 0) { filter_backprop->template flat<T>().setZero(); return; } functor::CuboidConvolutionBackwardFilter<Device, T>()( context->eigen_device<Device>(), filter_backprop->tensor<T, 5>(), // filter_backward input.tensor<T, 5>(), // input out_backprop.tensor<T, 5>(), // output_backward static_cast<int>(dims.spatial_dims[0].stride), // stride_planes static_cast<int>(dims.spatial_dims[1].stride), // stride_rows static_cast<int>(dims.spatial_dims[2].stride)); // stride_cols } private: std::vector<int32> dilation_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool takes_shape_; TF_DISALLOW_COPY_AND_ASSIGN(Conv3DBackpropFilterOp); }; // Custom backprop for filter that explicitly does the work sharding and calls // Eigen only to multiply matrices. template <typename Device, class T> class Conv3DCustomBackpropFilterOp : public OpKernel { // Limit the maximum size of allocated temporary buffer to // kMaxTempAllocationOverhead times the size of the input tensors (input, // filter, out_backprop). If the size of the temporary buffer exceeds this // limit, fallback on Eigen implementation. static constexpr int kMaxTempAllocationOverhead = 25; public: explicit Conv3DCustomBackpropFilterOp(OpKernelConstruction* context) : OpKernel(context), data_format_(FORMAT_NHWC), takes_shape_(type_string().find("V2") != std::string::npos) { // data_format is only available in V2. if (takes_shape_) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Conv3DBackpropFilterOpV2 only supports NDHWC on the CPU.")); } OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); OP_REQUIRES(context, dilation_.size() == 5, errors::InvalidArgument("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, 'C') == 1 && GetTensorDim(dilation_, data_format_, 'N') == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilation rates in the batch and depth dimensions.")); // TODO(yangzihao): Add CPU version of dilated conv 3D. OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, '0') == 1 && GetTensorDim(dilation_, data_format_, '1') == 1 && GetTensorDim(dilation_, data_format_, '2') == 1), errors::InvalidArgument( "Current CPU implementation does not yet support " "dilation rates larger than 1.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const TensorShape& input_shape = input.shape(); const Tensor& out_backprop = context->input(2); const TensorShape& out_backprop_shape = out_backprop.shape(); TensorShape filter_shape; if (takes_shape_) { const Tensor& filter_sizes = context->input(1); OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( filter_sizes.vec<int32>(), &filter_shape)); } else { filter_shape = context->input(1).shape(); } OP_REQUIRES( context, input_shape.dim_size(4) == filter_shape.dim_size(3), errors::InvalidArgument("input and filter_sizes must have the same " "number of channels. Got ", input_shape.dim_size(4), " for input and ", filter_shape.dim_size(3), " for filter_sizes")); OP_REQUIRES( context, out_backprop_shape.dim_size(4) == filter_shape.dim_size(4), errors::InvalidArgument("out_backprop and filter_sizes must have the " "same number of channels. Got ", out_backprop_shape.dim_size(4), " for out_backprop and ", filter_shape.dim_size(4), " for filter_sizes")); ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensions( "Conv3DBackpropFilterOp", /*num_spatial_dims=*/3, input_shape, filter_shape, out_backprop_shape, stride_, padding_, data_format_, &dims)); Tensor* filter_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, filter_shape, &filter_backprop)); if (input_shape.num_elements() == 0) { filter_backprop->template flat<T>().setZero(); return; } int64 top_pad_planes, bottom_pad_planes; int64 top_pad_rows, bottom_pad_rows; int64 left_pad_cols, right_pad_cols; OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose( dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, dims.spatial_dims[0].stride, padding_, &dims.spatial_dims[0].output_size, &top_pad_planes, &bottom_pad_planes)); OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose( dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, dims.spatial_dims[1].stride, padding_, &dims.spatial_dims[1].output_size, &top_pad_rows, &bottom_pad_rows)); OP_REQUIRES_OK(context, GetWindowedOutputSizeVerbose( dims.spatial_dims[2].input_size, dims.spatial_dims[2].filter_size, dims.spatial_dims[2].stride, padding_, &dims.spatial_dims[2].output_size, &left_pad_cols, &right_pad_cols)); // TODO(ezhulenev): Extract work size and shard estimation to shared // functions in conv_grad_ops, and update 2d convolution backprop. // The total dimension size of each kernel. const int64 filter_total_size = dims.spatial_dims[0].filter_size * dims.spatial_dims[1].filter_size * dims.spatial_dims[2].filter_size * dims.in_depth; // The output image size is the spatial size of the output. const int64 output_image_size = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size * dims.spatial_dims[2].output_size; // Shard 'batch' images (volumes) into 'shard_size' groups of images // (volumes) to be fed into the parallel matmul. Calculate 'shard_size' by // dividing the L3 cache size ('target_working_set_size') by the matmul size // of an individual image ('work_unit_size'). const auto cache_sizes = Eigen::internal::CacheSizes(); const ptrdiff_t l3_cache_size = cache_sizes.m_l3; // TODO(andydavis) // *) Consider reducing 'target_working_set_size' if L3 is shared by // other concurrently running tensorflow ops. const size_t target_working_set_size = l3_cache_size / sizeof(T); const int64 size_A = output_image_size * filter_total_size; const int64 size_B = output_image_size * dims.out_depth; const int64 size_C = filter_total_size * dims.out_depth; const int64 work_unit_size = size_A + size_B + size_C; const size_t shard_size = (target_working_set_size + work_unit_size - 1) / work_unit_size; // Total number of elements in all the tensors used by this kernel. int64 total_tensor_elements = input_shape.num_elements() + filter_shape.num_elements() + out_backprop_shape.num_elements(); // Shape of the temporary workspace buffer. TensorShape col_buffer_shape = {static_cast<int64>(shard_size), static_cast<int64>(output_image_size), static_cast<int64>(filter_total_size)}; int64 col_buffer_elements = col_buffer_shape.num_elements(); // If the temporary allocation overhead is too large, fallback on Eigen // implementation which requires much less memory. int64 col_buffer_overhead = col_buffer_elements / total_tensor_elements; if (col_buffer_overhead > kMaxTempAllocationOverhead) { VLOG(2) << "Fallback on Eigen implementation of Conv3DBackpropFilterOp: " "col_buffer_overhead=" << col_buffer_overhead; functor::CuboidConvolutionBackwardFilter<Device, T>()( context->eigen_device<Device>(), filter_backprop->tensor<T, 5>(), // filter_backward input.tensor<T, 5>(), // input out_backprop.tensor<T, 5>(), // output_backward static_cast<int>(dims.spatial_dims[0].stride), // stride_planes static_cast<int>(dims.spatial_dims[1].stride), // stride_rows static_cast<int>(dims.spatial_dims[2].stride)); // stride_cols return; } Tensor col_buffer; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, col_buffer_shape, &col_buffer)); // The input offset corresponding to a single input image. const int64 input_offset = dims.spatial_dims[0].input_size * dims.spatial_dims[1].input_size * dims.spatial_dims[2].input_size * dims.in_depth; // The output offset corresponding to a single output image. const int64 output_offset = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size * dims.spatial_dims[2].output_size * dims.out_depth; const T* input_data = input.template flat<T>().data(); T* col_buffer_data = col_buffer.template flat<T>().data(); const T* out_backprop_data = out_backprop.template flat<T>().data(); T* filter_backprop_data = filter_backprop->template flat<T>().data(); typedef Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor>, Eigen::Unaligned> TensorMap; typedef Eigen::TensorMap<Eigen::Tensor<const T, 2, Eigen::RowMajor>, Eigen::Unaligned> ConstTensorMap; TensorMap C(filter_backprop_data, filter_total_size, dims.out_depth); C.setZero(); // Initialize contraction dims (we need to transpose 'A' below). Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> contract_dims; contract_dims[0].first = 0; contract_dims[0].second = 0; auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); for (int image_id = 0; image_id < dims.batch_size; image_id += shard_size) { const int shard_limit = std::min(static_cast<int>(shard_size), static_cast<int>(dims.batch_size) - image_id); auto shard = [&input_data, &col_buffer_data, &dims, &top_pad_planes, &top_pad_rows, &left_pad_cols, &bottom_pad_planes, &bottom_pad_rows, &right_pad_cols, &input_offset, &size_A](int64 start, int64 limit) { for (int shard_id = start; shard_id < limit; ++shard_id) { const T* input_data_shard = input_data + shard_id * input_offset; T* col_data_shard = col_buffer_data + shard_id * size_A; // When we compute the gradient with respect to the filters, we need // to do im2col to allow gemm-type computation. Im2col<T>(input_data_shard, dims.in_depth, // Input spatial dimensions. dims.spatial_dims[0].input_size, // input planes dims.spatial_dims[1].input_size, // input rows dims.spatial_dims[2].input_size, // input cols // Filter spatial dimensions. dims.spatial_dims[0].filter_size, // filter planes dims.spatial_dims[1].filter_size, // filter rows dims.spatial_dims[2].filter_size, // filter cols // Spatial padding. top_pad_planes, top_pad_rows, left_pad_cols, bottom_pad_planes, bottom_pad_rows, right_pad_cols, // Spatial striding. dims.spatial_dims[0].stride, // stride planes dims.spatial_dims[1].stride, // stride rows dims.spatial_dims[2].stride, // stride cols col_data_shard); } }; Shard(worker_threads.num_threads, worker_threads.workers, shard_limit, size_A, shard); ConstTensorMap A(col_buffer_data, output_image_size * shard_limit, filter_total_size); ConstTensorMap B(out_backprop_data, output_image_size * shard_limit, dims.out_depth); // Gradient with respect to filter. C.device(context->eigen_cpu_device()) += A.contract(B, contract_dims); input_data += input_offset * shard_limit; out_backprop_data += output_offset * shard_limit; } } private: std::vector<int32> dilation_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool takes_shape_; TF_DISALLOW_COPY_AND_ASSIGN(Conv3DCustomBackpropFilterOp); }; // Custom backrop input kernel is 30% - 4x faster when compiled with AVX2 than // default Eigen implementation (at the cost of ~2x-8x peak memory usage). #define REGISTER_CPU_KERNEL(T) \ REGISTER_KERNEL_BUILDER( \ Name("Conv3DBackpropFilter").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ Conv3DCustomBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilterV2") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T"), \ Conv3DCustomBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilter") \ .Device(DEVICE_CPU) \ .Label("custom") \ .TypeConstraint<T>("T"), \ Conv3DCustomBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilterV2") \ .Device(DEVICE_CPU) \ .Label("custom") \ .TypeConstraint<T>("T"), \ Conv3DCustomBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilter") \ .Device(DEVICE_CPU) \ .Label("eigen_tensor") \ .TypeConstraint<T>("T"), \ Conv3DBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilterV2") \ .Device(DEVICE_CPU) \ .Label("eigen_tensor") \ .TypeConstraint<T>("T"), \ Conv3DBackpropFilterOp<CPUDevice, T>); TF_CALL_float(REGISTER_CPU_KERNEL); TF_CALL_double(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL // WARNING: Eigen::half is not trivially copyable and can't be used in // custom backprop filter kernel because of memcpy and memset in Im2col. #define REGISTER_CPU_KERNEL(T) \ REGISTER_KERNEL_BUILDER( \ Name("Conv3DBackpropFilter").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ Conv3DBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilterV2") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T"), \ Conv3DBackpropFilterOp<CPUDevice, T>); TF_CALL_half(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL // GPU definitions of both ops. #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM // Forward declarations of the functor specializations for GPU. // This ensures that the custom implementation is used instead of the default // Eigen one (which is used for CPU). namespace functor { #define DECLARE_GPU_SPEC(T) \ template <> \ void TransformFilter<GPUDevice, T, int, 5>::operator()( \ const GPUDevice& d, FilterTensorFormat dst_filter_format, \ typename TTypes<T, 5, int>::ConstTensor in, \ typename TTypes<T, 5, int>::Tensor out); \ template <> \ void ReverseTransformFilter<GPUDevice, T, 5>::operator()( \ const GPUDevice& d, FilterTensorFormat src_filter_format, \ typename TTypes<T, 5>::ConstTensor in, \ typename TTypes<T, 5>::Tensor out); \ template <> \ void PadInput<GPUDevice, T, int, 5>::operator()( \ const GPUDevice& d, typename TTypes<T, 5, int>::ConstTensor in, \ const std::array<int, 3>& padding_left, \ const std::array<int, 3>& padding_right, \ typename TTypes<T, 5, int>::Tensor out, TensorFormat format, \ const T& padding_value); DECLARE_GPU_SPEC(Eigen::half); DECLARE_GPU_SPEC(float); DECLARE_GPU_SPEC(double); #undef DECLARE_GPU_SPEC } // namespace functor // A dummy type to group backward data autotune results together. struct Conv3dBackwardDataAutoTuneGroup { static string name() { return "Conv3dBwdData"; } }; typedef AutoTuneSingleton<Conv3dBackwardDataAutoTuneGroup, ConvParameters, se::dnn::AlgorithmConfig> AutoTuneConv3dBwdData; template <typename T> class Conv3DBackpropInputOp<GPUDevice, T> : public OpKernel { public: explicit Conv3DBackpropInputOp(OpKernelConstruction* context) : OpKernel(context), data_format_(FORMAT_NHWC), takes_shape_(type_string().find("V2") != std::string::npos) { // data_format is only available in V2. if (takes_shape_) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); } OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); OP_REQUIRES(context, dilation_.size() == 5, errors::InvalidArgument("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, 'C') == 1 && GetTensorDim(dilation_, data_format_, 'N') == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilation rates in the batch and depth dimensions.")); OP_REQUIRES( context, (GetTensorDim(dilation_, data_format_, '0') > 0 && GetTensorDim(dilation_, data_format_, '1') > 0 && GetTensorDim(dilation_, data_format_, '2') > 0), errors::InvalidArgument("Dilated rates should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, '0') > 0 && GetTensorDim(stride_, data_format_, '1') > 0 && GetTensorDim(stride_, data_format_, '2') > 0), errors::InvalidArgument("Spatial strides should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); cudnn_use_autotune_ = CudnnUseAutotune(); } void Compute(OpKernelContext* context) override { const Tensor& filter = context->input(1); const TensorShape& filter_shape = filter.shape(); const Tensor& out_backprop = context->input(2); const TensorShape& out_backprop_shape = out_backprop.shape(); TensorShape input_shape; if (takes_shape_) { const Tensor& input_sizes = context->input(0); OP_REQUIRES_OK(context, tensor::MakeShape(input_sizes, &input_shape)); } else { input_shape = context->input(0).shape(); } ConvBackpropDimensions dims; OP_REQUIRES_OK(context, ConvBackpropComputeDimensionsV2( "Conv3DBackpropInputOp", /*num_spatial_dims=*/3, input_shape, filter_shape, out_backprop_shape, dilation_, stride_, padding_, /*explicit_paddings=*/{}, data_format_, &dims)); Tensor* in_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, input_shape, &in_backprop)); auto* stream = context->op_device_context()->stream(); OP_REQUIRES(context, stream, errors::Internal("No GPU stream available.")); bool is_grouped_convolution = filter_shape.dim_size(3) != dims.in_depth; if (!is_grouped_convolution && dims.filter_size(0) == 1 && dims.filter_size(1) == 1 && dims.filter_size(2) == 1 && dims.dilation(0) == 1 && dims.dilation(1) == 1 && dims.dilation(2) == 1 && dims.stride(0) == 1 && dims.stride(1) == 1 && dims.stride(2) == 1 && data_format_ == FORMAT_NHWC) { const uint64 m = dims.batch_size * dims.input_size(0) * dims.input_size(1) * dims.input_size(2); const uint64 k = dims.out_depth; const uint64 n = dims.in_depth; auto a_ptr = AsDeviceMemory(out_backprop.template flat<T>().data(), out_backprop.template flat<T>().size()); auto b_ptr = AsDeviceMemory(filter.template flat<T>().data(), filter.template flat<T>().size()); auto c_ptr = AsDeviceMemory(in_backprop->template flat<T>().data(), in_backprop->template flat<T>().size()); auto transpose = se::blas::Transpose::kTranspose; auto no_transpose = se::blas::Transpose::kNoTranspose; bool blas_launch_status = stream ->ThenBlasGemm(transpose, no_transpose, n, m, k, 1.0f, b_ptr, k, a_ptr, k, 0.0f, &c_ptr, n) .ok(); if (!blas_launch_status) { context->SetStatus(errors::Internal("Blas SGEMM launch failed : m=", m, ", n=", n, ", k=", k)); } return; } else if (!is_grouped_convolution && dims.filter_size(0) == dims.input_size(0) && dims.filter_size(1) == dims.input_size(1) && dims.filter_size(2) == dims.input_size(2) && padding_ == Padding::VALID && data_format_ == FORMAT_NHWC) { const uint64 m = dims.batch_size; const uint64 k = dims.out_depth; const uint64 n = dims.input_size(0) * dims.input_size(1) * dims.input_size(2) * dims.in_depth; auto a_ptr = AsDeviceMemory(out_backprop.template flat<T>().data(), out_backprop.template flat<T>().size()); auto b_ptr = AsDeviceMemory(filter.template flat<T>().data(), filter.template flat<T>().size()); auto c_ptr = AsDeviceMemory(in_backprop->template flat<T>().data(), in_backprop->template flat<T>().size()); auto transpose = se::blas::Transpose::kTranspose; auto no_transpose = se::blas::Transpose::kNoTranspose; bool blas_launch_status = stream ->ThenBlasGemm(transpose, no_transpose, n, m, k, 1.0f, b_ptr, k, a_ptr, k, 0.0f, &c_ptr, n) .ok(); if (!blas_launch_status) { context->SetStatus(errors::Internal("Blas SGEMM launch failed : m=", m, ", n=", n, ", k=", k)); } return; } int padding_planes = dims.SpatialPadding(padding_, 0); int padding_rows = dims.SpatialPadding(padding_, 1); int padding_cols = dims.SpatialPadding(padding_, 2); const bool planes_odd = (padding_planes % 2 != 0); const bool rows_odd = (padding_rows % 2 != 0); const bool cols_odd = (padding_cols % 2 != 0); TensorShape compatible_input_shape; if (rows_odd || cols_odd || planes_odd) { // cuDNN only supports the same amount of padding on both sides. compatible_input_shape = { dims.batch_size, dims.in_depth, dims.input_size(0) + planes_odd, dims.input_size(1) + rows_odd, dims.input_size(2) + cols_odd, }; } else { compatible_input_shape = {dims.batch_size, dims.in_depth, dims.input_size(0), dims.input_size(1), dims.input_size(2)}; } CHECK(padding_rows >= 0 && padding_cols >= 0 && padding_planes >= 0) << "Negative paddings: (" << padding_rows << ", " << padding_cols << ", " << padding_planes << ")"; #if GOOGLE_CUDA const bool compute_in_nhwc = CUDNN_VERSION >= 8000 && DataTypeToEnum<T>::value == DT_HALF; #else // fast NDHWC implementation is a CUDA only feature const bool compute_in_nhwc = false; #endif const TensorFormat compute_data_format = (compute_in_nhwc && data_format_ == FORMAT_NHWC) ? FORMAT_NHWC : FORMAT_NCHW; VLOG(3) << "Compute Conv3DBackpropInput with cuDNN:" << " data_format=" << ToString(data_format_) << " compute_data_format=" << ToString(compute_data_format); constexpr auto kComputeInNHWC = std::make_tuple(se::dnn::DataLayout::kBatchYXDepth, se::dnn::FilterLayout::kOutputYXInput); constexpr auto kComputeInNCHW = std::make_tuple(se::dnn::DataLayout::kBatchDepthYX, se::dnn::FilterLayout::kOutputInputYX); se::dnn::DataLayout compute_data_layout; se::dnn::FilterLayout filter_layout; std::tie(compute_data_layout, filter_layout) = compute_data_format == FORMAT_NHWC ? kComputeInNHWC : kComputeInNCHW; se::dnn::BatchDescriptor input_desc(3); input_desc.set_count(dims.batch_size) .set_spatial_dim(DimIndex::X, compatible_input_shape.dim_size(4)) .set_spatial_dim(DimIndex::Y, compatible_input_shape.dim_size(3)) .set_spatial_dim(DimIndex::Z, compatible_input_shape.dim_size(2)) .set_feature_map_count(dims.in_depth) .set_layout(compute_data_layout); se::dnn::BatchDescriptor output_desc(3); output_desc.set_count(dims.batch_size) .set_spatial_dim(DimIndex::X, dims.output_size(2)) .set_spatial_dim(DimIndex::Y, dims.output_size(1)) .set_spatial_dim(DimIndex::Z, dims.output_size(0)) .set_feature_map_count(dims.out_depth) .set_layout(compute_data_layout); se::dnn::FilterDescriptor filter_desc(3); filter_desc.set_spatial_dim(DimIndex::X, dims.filter_size(2)) .set_spatial_dim(DimIndex::Y, dims.filter_size(1)) .set_spatial_dim(DimIndex::Z, dims.filter_size(0)) .set_input_feature_map_count(filter_shape.dim_size(3)) .set_output_feature_map_count(filter_shape.dim_size(4)) .set_layout(filter_layout); se::dnn::ConvolutionDescriptor conv_desc(3); conv_desc.set_dilation_rate(DimIndex::X, dims.dilation(2)) .set_dilation_rate(DimIndex::Y, dims.dilation(1)) .set_dilation_rate(DimIndex::Z, dims.dilation(0)) .set_filter_stride(DimIndex::X, dims.stride(2)) .set_filter_stride(DimIndex::Y, dims.stride(1)) .set_filter_stride(DimIndex::Z, dims.stride(0)) .set_zero_padding(DimIndex::X, padding_cols / 2) .set_zero_padding(DimIndex::Y, padding_rows / 2) .set_zero_padding(DimIndex::Z, padding_planes / 2) .set_group_count(dims.in_depth / filter_shape.dim_size(3)); // Shape: out, in, z, y, x. Tensor transformed_filter; auto dst_format = compute_data_format == FORMAT_NCHW ? FORMAT_OIHW : FORMAT_OHWI; TensorShape dst_shape = dst_format == FORMAT_OIHW ? TensorShape({filter_shape.dim_size(4), filter_shape.dim_size(3), dims.filter_size(0), dims.filter_size(1), dims.filter_size(2)}) : TensorShape({filter_shape.dim_size(4), dims.filter_size(0), dims.filter_size(1), dims.filter_size(2), filter_shape.dim_size(3)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, dst_shape, &transformed_filter)); functor::TransformFilter<GPUDevice, T, int, 5>()( context->eigen_device<GPUDevice>(), dst_format, To32Bit(filter.tensor<T, 5>()), To32Bit(transformed_filter.tensor<T, 5>())); // Shape: batch, filters, z, y, x. Tensor transformed_out_backprop; if (data_format_ == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) { TensorShape nchw_shape = {dims.batch_size, dims.out_depth, dims.output_size(0), dims.output_size(1), dims.output_size(2)}; if (dims.out_depth > 1) { OP_REQUIRES_OK(context, context->allocate_temp( DataTypeToEnum<T>::value, nchw_shape, &transformed_out_backprop)); functor::NHWCToNCHW<GPUDevice, T, 5>()( context->eigen_device<GPUDevice>(), out_backprop.tensor<T, 5>(), transformed_out_backprop.tensor<T, 5>()); } else { CHECK(transformed_out_backprop.CopyFrom(out_backprop, nchw_shape)); } } else { transformed_out_backprop = out_backprop; } // Shape: batch, filters, z, y, x. Tensor pre_transformed_in_backprop; OP_REQUIRES_OK(context, context->allocate_temp( DataTypeToEnum<T>::value, ShapeFromFormat(compute_data_format, compatible_input_shape.dim_size(0), {{compatible_input_shape.dim_size(2), compatible_input_shape.dim_size(3), compatible_input_shape.dim_size(4)}}, compatible_input_shape.dim_size(1)), &pre_transformed_in_backprop)); auto out_backprop_ptr = AsDeviceMemory(transformed_out_backprop.template flat<T>().data(), transformed_out_backprop.template flat<T>().size()); auto filter_ptr = AsDeviceMemory(transformed_filter.template flat<T>().data(), transformed_filter.template flat<T>().size()); auto in_backprop_ptr = AsDeviceMemory(pre_transformed_in_backprop.template flat<T>().data(), pre_transformed_in_backprop.template flat<T>().size()); static int64 ConvolveBackwardDataScratchSize = GetDnnWorkspaceLimit( "TF_CUDNN_WORKSPACE_LIMIT_IN_MB", 1LL << 32); // 4GB by default const int device_id = stream->parent()->device_ordinal(); // To make sure the Conv3DBackpropInputV2 get the correct dtype, we infer // the dtype from 2nd input, i.e., out_backprop. DataType dtype = context->input(2).dtype(); const ConvParameters conv_parameters = { dims.batch_size, dims.in_depth, {{dims.input_size(0), dims.input_size(1), dims.input_size(2)}}, compute_data_format, dims.out_depth, {{dims.filter_size(0), dims.filter_size(1), dims.filter_size(2)}}, {{dims.dilation(0), dims.dilation(1), dims.dilation(2)}}, {{dims.stride(0), dims.stride(1), dims.stride(2)}}, {{padding_planes, padding_rows, padding_cols}}, dtype, device_id, conv_desc.group_count()}; using se::dnn::AlgorithmConfig; using se::dnn::AlgorithmDesc; using se::dnn::ProfileResult; #if TENSORFLOW_USE_ROCM // cudnn_use_autotune is applicable only the CUDA flow // for ROCm/MIOpen, we need to call GetMIOpenConvolveAlgorithms explicitly // if we do not have a cached algorithm_config for this conv_parameters cudnn_use_autotune_ = true; #endif AlgorithmConfig algorithm_config; if (cudnn_use_autotune_ && !AutoTuneConv3dBwdData::GetInstance()->Find( conv_parameters, &algorithm_config)) { std::vector<std::unique_ptr<se::dnn::ConvolveExecutionPlan>> plans; #if GOOGLE_CUDA std::vector<AlgorithmDesc> algorithms; std::vector<AlgorithmConfig> configs; if (CudnnUseFrontend()) { OP_REQUIRES(context, stream->parent()->GetConvolveExecutionPlans( se::dnn::ConvolutionKind::BACKWARD_DATA, se::dnn::ToDataType<T>::value, stream, input_desc, filter_desc, output_desc, conv_desc, &plans), errors::Unknown( "Failed to get convolution execution plan. This is " "probably because cuDNN failed to initialize, so try " "looking to see if a warning log message was printed " "above.")); for (const auto& plan : plans) { configs.push_back(AlgorithmConfig( AlgorithmDesc{plan->getTag(), plan->get_raw_desc()}, plan->getWorkspaceSize())); } } else { OP_REQUIRES(context, stream->parent()->GetConvolveBackwardDataAlgorithms( conv_parameters.ShouldIncludeWinogradNonfusedAlgo<T>( stream->parent()), &algorithms), errors::Unknown( "Failed to get convolution execution plan. This is " "probably because cuDNN failed to initialize, so try " "looking to see if a warning log message was printed " "above.")); for (const auto& algorithm : algorithms) { configs.push_back(AlgorithmConfig(algorithm)); } } se::TfAllocatorAdapter tf_allocator_adapter( context->device()->GetAllocator({}), stream); se::RedzoneAllocator rz_allocator(stream, &tf_allocator_adapter, se::GpuAsmOpts()); se::DeviceMemory<T> in_backprop_ptr_rz( WrapRedzoneBestEffort(&rz_allocator, in_backprop_ptr)); std::vector<tensorflow::AutotuneResult> results; for (auto& profile_config : configs) { // TODO(zhengxq): profile each algorithm multiple times to better // accuracy. DnnScratchAllocator scratch_allocator(ConvolveBackwardDataScratchSize, context); se::RedzoneAllocator rz_scratch_allocator( stream, &tf_allocator_adapter, se::GpuAsmOpts(), /*memory_limit=*/ConvolveBackwardDataScratchSize); se::ScratchAllocator* allocator_used = !RedzoneCheckDisabled() ? static_cast<se::ScratchAllocator*>(&rz_scratch_allocator) : static_cast<se::ScratchAllocator*>(&scratch_allocator); ProfileResult profile_result; Status cudnn_launch_status; if (CudnnUseFrontend()) { cudnn_launch_status = stream->ConvolveBackwardDataWithExecutionPlan( filter_desc, filter_ptr, output_desc, out_backprop_ptr, conv_desc, input_desc, &in_backprop_ptr_rz, allocator_used, profile_config, &profile_result); } else { cudnn_launch_status = stream->ConvolveBackwardDataWithAlgorithm( filter_desc, filter_ptr, output_desc, out_backprop_ptr, conv_desc, input_desc, &in_backprop_ptr_rz, allocator_used, profile_config, &profile_result); } if (cudnn_launch_status.ok() && profile_result.is_valid()) { results.emplace_back(); auto& result = results.back(); if (CudnnUseFrontend()) { result.mutable_cuda_conv_plan()->set_exec_plan_id( profile_config.algorithm()->exec_plan_id()); } else { result.mutable_conv()->set_algorithm( profile_config.algorithm()->algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_config.algorithm()->tensor_ops_enabled()); } result.set_scratch_bytes( !RedzoneCheckDisabled() ? rz_scratch_allocator.TotalAllocatedBytesExcludingRedzones() : scratch_allocator.TotalByteSize()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); // TODO(george): they don't do results at all?? CheckRedzones(rz_scratch_allocator, &result); CheckRedzones(rz_allocator, &result); } else { // When CuDNN frontend APIs are used, we need to make sure the // profiling results are one-to-one mapping of the "plans". So, we // insert dummy results when the excution fails. results.emplace_back(); auto& result = results.back(); result.mutable_failure()->set_kind(AutotuneResult::UNKNOWN); result.mutable_failure()->set_msg( absl::StrCat("Profiling failure on CUDNN engine: ", profile_config.algorithm()->exec_plan_id())); } } #elif TENSORFLOW_USE_ROCM DnnScratchAllocator scratch_allocator(ConvolveBackwardDataScratchSize, context); std::vector<ProfileResult> algorithms; CHECK(stream->parent()->GetMIOpenConvolveAlgorithms( se::dnn::ConvolutionKind::BACKWARD_DATA, se::dnn::ToDataType<T>::value, stream, input_desc, in_backprop_ptr, filter_desc, filter_ptr, output_desc, out_backprop_ptr, conv_desc, &scratch_allocator, &algorithms)); std::vector<tensorflow::AutotuneResult> results; for (auto miopen_algorithm : algorithms) { auto profile_algorithm = miopen_algorithm.algorithm(); ProfileResult profile_result; auto miopen_launch_status = stream->ConvolveBackwardDataWithAlgorithm( filter_desc, filter_ptr, output_desc, out_backprop_ptr, conv_desc, input_desc, &in_backprop_ptr, &scratch_allocator, AlgorithmConfig(profile_algorithm, miopen_algorithm.scratch_size()), &profile_result); if (miopen_launch_status.ok()) { if (profile_result.is_valid()) { results.emplace_back(); auto& result = results.back(); result.mutable_conv()->set_algorithm(profile_algorithm.algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_algorithm.tensor_ops_enabled()); result.set_scratch_bytes(scratch_allocator.TotalByteSize()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); } } } #endif LogConvAutotuneResults(se::dnn::ConvolutionKind::BACKWARD_DATA, se::dnn::ToDataType<T>::value, in_backprop_ptr, filter_ptr, out_backprop_ptr, input_desc, filter_desc, output_desc, conv_desc, stream->parent(), results); if (CudnnUseFrontend()) { OP_REQUIRES_OK(context, BestCudnnConvAlgorithm(results, &plans, &algorithm_config)); } else { OP_REQUIRES_OK(context, BestCudnnConvAlgorithm(results, nullptr, &algorithm_config)); } AutoTuneConv3dBwdData::GetInstance()->Insert(conv_parameters, algorithm_config); } Status cudnn_launch_status; DnnScratchAllocator scratch_allocator(ConvolveBackwardDataScratchSize, context); if (CudnnUseFrontend()) { if (algorithm_config.algorithm().has_value()) { VLOG(4) << "Conv3DBackpropInput Execution Plan: " << algorithm_config.algorithm()->exec_plan_id(); } else { VLOG(4) << "Convolution AutoTune has been turned off"; } cudnn_launch_status = stream->ConvolveBackwardDataWithExecutionPlan( filter_desc, filter_ptr, output_desc, out_backprop_ptr, conv_desc, input_desc, &in_backprop_ptr, &scratch_allocator, algorithm_config, nullptr); } else { cudnn_launch_status = stream->ConvolveBackwardDataWithAlgorithm( filter_desc, filter_ptr, output_desc, out_backprop_ptr, conv_desc, input_desc, &in_backprop_ptr, &scratch_allocator, algorithm_config, nullptr); } if (!cudnn_launch_status.ok()) { context->SetStatus(cudnn_launch_status); } if (rows_odd || cols_odd || planes_odd) { Tensor in_backprop_remove_padding; OP_REQUIRES_OK( context, context->allocate_temp( DataTypeToEnum<T>::value, ShapeFromFormat(compute_data_format, dims.batch_size, {{dims.input_size(0), dims.input_size(1), dims.input_size(2)}}, dims.in_depth), &in_backprop_remove_padding)); // Remove the padding for odd spatial dimensions. functor::PadInput<GPUDevice, T, int, 5>()( context->eigen_device<GPUDevice>(), To32Bit(const_cast<const Tensor&>(pre_transformed_in_backprop) .tensor<T, 5>()), {{0, 0, 0}}, {{-planes_odd, -rows_odd, -cols_odd}}, To32Bit(in_backprop_remove_padding.tensor<T, 5>()), compute_data_format, T{}); pre_transformed_in_backprop = in_backprop_remove_padding; } if (data_format_ == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) { auto toConstTensor = [](const Tensor& x) -> const Tensor { return x; }; functor::NCHWToNHWC<GPUDevice, T, 5>()( context->eigen_device<GPUDevice>(), toConstTensor(pre_transformed_in_backprop).template tensor<T, 5>(), in_backprop->tensor<T, 5>()); } else { *in_backprop = pre_transformed_in_backprop; } } private: std::vector<int32> dilation_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool takes_shape_; bool cudnn_use_autotune_; }; // A dummy type to group backward filter autotune results together. struct Conv3dBackwardFilterAutoTuneGroup { static string name() { return "Conv3dBwdFilter"; } }; typedef AutoTuneSingleton<Conv3dBackwardFilterAutoTuneGroup, ConvParameters, se::dnn::AlgorithmConfig> AutoTuneConv3dBwdFilter; template <typename T> class Conv3DBackpropFilterOp<GPUDevice, T> : public OpKernel { public: explicit Conv3DBackpropFilterOp(OpKernelConstruction* context) : OpKernel(context), data_format_(FORMAT_NHWC), takes_shape_(type_string().find("V2") != std::string::npos) { // data_format is only available in V2. if (takes_shape_) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); } OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilation_)); OP_REQUIRES(context, dilation_.size() == 5, errors::InvalidArgument("Dilation rates field must " "specify 5 dimensions")); OP_REQUIRES(context, (GetTensorDim(dilation_, data_format_, 'C') == 1 && GetTensorDim(dilation_, data_format_, 'N') == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilation rates in the batch and depth dimensions.")); OP_REQUIRES( context, (GetTensorDim(dilation_, data_format_, '0') > 0 && GetTensorDim(dilation_, data_format_, '1') > 0 && GetTensorDim(dilation_, data_format_, '2') > 0), errors::InvalidArgument("Dilated rates should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES( context, (GetTensorDim(stride_, data_format_, '0') > 0 && GetTensorDim(stride_, data_format_, '1') > 0 && GetTensorDim(stride_, data_format_, '2') > 0), errors::InvalidArgument("Spatial strides should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); cudnn_use_autotune_ = CudnnUseAutotune(); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const TensorShape& input_shape = input.shape(); const Tensor& out_backprop = context->input(2); const TensorShape& out_backprop_shape = out_backprop.shape(); TensorShape filter_shape; if (takes_shape_) { const Tensor& filter_sizes = context->input(1); OP_REQUIRES_OK(context, tensor::MakeShape(filter_sizes, &filter_shape)); } else { filter_shape = context->input(1).shape(); } ConvBackpropDimensions dims; OP_REQUIRES_OK( context, ConvBackpropComputeDimensionsV2( "Conv3DBackpropFilterOp", /*num_spatial_dims=*/3, input_shape, filter_shape, out_backprop_shape, dilation_, stride_, padding_, /*explicit_paddings=*/{}, data_format_, &dims)); Tensor* filter_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, filter_shape, &filter_backprop)); auto* stream = context->op_device_context()->stream(); OP_REQUIRES(context, stream, errors::Internal("No GPU stream available.")); bool is_grouped_convolution = filter_shape.dim_size(3) != dims.in_depth; if (!is_grouped_convolution && dims.filter_size(1) == 1 && dims.filter_size(2) == 1 && dims.filter_size(0) == 1 && dims.dilation(2) == 1 && dims.dilation(1) == 1 && dims.dilation(0) == 1 && dims.stride(2) == 1 && dims.stride(1) == 1 && dims.stride(0) == 1 && data_format_ == FORMAT_NHWC) { const uint64 m = dims.in_depth; const uint64 k = dims.batch_size * dims.input_size(1) * dims.input_size(2) * dims.input_size(0); const uint64 n = dims.out_depth; // The shape of output backprop is // [batch, out_z, out_y, out_x, out_depth] // From cublas's perspective, it is: n x k auto a_ptr = AsDeviceMemory(out_backprop.template flat<T>().data(), out_backprop.template flat<T>().size()); // The shape of input is: // [batch, in_z, in_y, in_x, in_depth], // From cublas's perspective, it is: m x k auto b_ptr = AsDeviceMemory(input.template flat<T>().data(), input.template flat<T>().size()); // The shape of the filter backprop is: // [1, 1, 1, in_depth, out_depth] // From cublas's perspective, it is: n x m auto c_ptr = AsDeviceMemory(filter_backprop->template flat<T>().data(), filter_backprop->template flat<T>().size()); bool blas_launch_status = stream ->ThenBlasGemm(se::blas::Transpose::kNoTranspose, se::blas::Transpose::kTranspose, n, m, k, 1.0f, a_ptr, n, b_ptr, m, 0.0f, &c_ptr, n) .ok(); if (!blas_launch_status) { context->SetStatus(errors::Internal("Blas SGEMM launch failed : m=", m, ", n=", n, ", k=", k)); } return; } else if (!is_grouped_convolution && dims.filter_size(0) == dims.input_size(0) && dims.filter_size(1) == dims.input_size(1) && dims.filter_size(2) == dims.input_size(2) && padding_ == Padding::VALID && data_format_ == FORMAT_NHWC) { const uint64 m = dims.input_size(0) * dims.input_size(1) * dims.input_size(2) * dims.in_depth; const uint64 k = dims.batch_size; const uint64 n = dims.out_depth; auto a_ptr = AsDeviceMemory(input.template flat<T>().data(), input.template flat<T>().size()); auto b_ptr = AsDeviceMemory(out_backprop.template flat<T>().data(), out_backprop.template flat<T>().size()); auto c_ptr = AsDeviceMemory(filter_backprop->template flat<T>().data(), filter_backprop->template flat<T>().size()); bool blas_launch_status = stream ->ThenBlasGemm(se::blas::Transpose::kNoTranspose, se::blas::Transpose::kTranspose, n, m, k, 1.0f, b_ptr, n, a_ptr, m, 0.0f, &c_ptr, n) .ok(); if (!blas_launch_status) { context->SetStatus(errors::Internal("Blas SGEMM launch failed : m=", m, ", n=", n, ", k=", k)); } return; } int padding_planes = dims.SpatialPadding(padding_, 0); int padding_rows = dims.SpatialPadding(padding_, 1); int padding_cols = dims.SpatialPadding(padding_, 2); const bool planes_odd = (padding_planes % 2 != 0); const bool rows_odd = (padding_rows % 2 != 0); const bool cols_odd = (padding_cols % 2 != 0); Tensor compatible_input; if (rows_odd || cols_odd || planes_odd) { OP_REQUIRES_OK(context, context->allocate_temp( DataTypeToEnum<T>::value, ShapeFromFormat(data_format_, dims.batch_size, {{dims.input_size(0) + planes_odd, dims.input_size(1) + rows_odd, dims.input_size(2) + cols_odd}}, dims.in_depth), &compatible_input)); functor::PadInput<GPUDevice, T, int, 5>()( context->template eigen_device<GPUDevice>(), To32Bit(input.tensor<T, 5>()), {{0, 0, 0}}, {{planes_odd, rows_odd, cols_odd}}, To32Bit(compatible_input.tensor<T, 5>()), data_format_, T{}); } else { compatible_input = input; } CHECK(padding_rows >= 0 && padding_cols >= 0 && padding_planes >= 0) << "Negative paddings: (" << padding_rows << ", " << padding_cols << ", " << padding_planes << ")"; #if GOOGLE_CUDA const bool compute_in_nhwc = CUDNN_VERSION >= 8000 && DataTypeToEnum<T>::value == DT_HALF; #else // fast NDHWC implementation is a CUDA only feature const bool compute_in_nhwc = false; #endif const TensorFormat compute_data_format = (compute_in_nhwc && data_format_ == FORMAT_NHWC) ? FORMAT_NHWC : FORMAT_NCHW; VLOG(3) << "Compute Conv3DBackpropFilter with cuDNN:" << " data_format=" << ToString(data_format_) << " compute_data_format=" << ToString(compute_data_format); constexpr auto kComputeInNHWC = std::make_tuple(se::dnn::DataLayout::kBatchYXDepth, se::dnn::FilterLayout::kOutputYXInput); constexpr auto kComputeInNCHW = std::make_tuple(se::dnn::DataLayout::kBatchDepthYX, se::dnn::FilterLayout::kOutputInputYX); se::dnn::DataLayout compute_data_layout; se::dnn::FilterLayout filter_layout; std::tie(compute_data_layout, filter_layout) = compute_data_format == FORMAT_NHWC ? kComputeInNHWC : kComputeInNCHW; se::dnn::BatchDescriptor input_desc(3); input_desc.set_count(dims.batch_size) .set_spatial_dim(DimIndex::X, GetTensorDim(compatible_input, data_format_, '2')) .set_spatial_dim(DimIndex::Y, GetTensorDim(compatible_input, data_format_, '1')) .set_spatial_dim(DimIndex::Z, GetTensorDim(compatible_input, data_format_, '0')) .set_feature_map_count(dims.in_depth) .set_layout(compute_data_layout); se::dnn::BatchDescriptor output_desc(3); output_desc.set_count(dims.batch_size) .set_spatial_dim(DimIndex::X, dims.output_size(2)) .set_spatial_dim(DimIndex::Y, dims.output_size(1)) .set_spatial_dim(DimIndex::Z, dims.output_size(0)) .set_feature_map_count(dims.out_depth) .set_layout(compute_data_layout); se::dnn::FilterDescriptor filter_desc(3); filter_desc.set_spatial_dim(DimIndex::X, dims.filter_size(2)) .set_spatial_dim(DimIndex::Y, dims.filter_size(1)) .set_spatial_dim(DimIndex::Z, dims.filter_size(0)) .set_input_feature_map_count(filter_shape.dim_size(3)) .set_output_feature_map_count(filter_shape.dim_size(4)) .set_layout(filter_layout); se::dnn::ConvolutionDescriptor conv_desc(3); conv_desc.set_dilation_rate(DimIndex::X, dims.dilation(2)) .set_dilation_rate(DimIndex::Y, dims.dilation(1)) .set_dilation_rate(DimIndex::Z, dims.dilation(0)) .set_filter_stride(DimIndex::X, dims.stride(2)) .set_filter_stride(DimIndex::Y, dims.stride(1)) .set_filter_stride(DimIndex::Z, dims.stride(0)) .set_zero_padding(DimIndex::X, padding_cols / 2) .set_zero_padding(DimIndex::Y, padding_rows / 2) .set_zero_padding(DimIndex::Z, padding_planes / 2) .set_group_count(dims.in_depth / filter_shape.dim_size(3)); Tensor pre_transformed_filter_backprop; auto dst_format = compute_data_format == FORMAT_NCHW ? FORMAT_OIHW : FORMAT_OHWI; TensorShape dst_shape = dst_format == FORMAT_OIHW ? TensorShape({filter_shape.dim_size(4), filter_shape.dim_size(3), dims.filter_size(0), dims.filter_size(1), dims.filter_size(2)}) : TensorShape({filter_shape.dim_size(4), dims.filter_size(0), dims.filter_size(1), dims.filter_size(2), filter_shape.dim_size(3)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, dst_shape, &pre_transformed_filter_backprop)); Tensor transformed_out_backprop; if (data_format_ == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) { VLOG(4) << "Convert the `out_backprop` tensor from NDHWC to NCDHW."; TensorShape nchw_shape = {dims.batch_size, dims.out_depth, dims.output_size(0), dims.output_size(1), dims.output_size(2)}; OP_REQUIRES_OK( context, context->allocate_temp(DataTypeToEnum<T>::value, nchw_shape, &transformed_out_backprop)); if (dims.out_depth > 1) { functor::NHWCToNCHW<GPUDevice, T, 5>()( context->eigen_device<GPUDevice>(), out_backprop.tensor<T, 5>(), transformed_out_backprop.tensor<T, 5>()); } else { CHECK(transformed_out_backprop.CopyFrom(out_backprop, nchw_shape)); } } else { transformed_out_backprop = out_backprop; } Tensor transformed_input; if (data_format_ == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) { VLOG(4) << "Convert the `input` tensor from NDHWC to NCDHW."; TensorShape nchw_shape = { dims.batch_size, dims.in_depth, compatible_input.dim_size(1), compatible_input.dim_size(2), compatible_input.dim_size(3)}; if (dims.in_depth > 1) { OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::value, nchw_shape, &transformed_input)); functor::NHWCToNCHW<GPUDevice, T, 5>()( context->eigen_device<GPUDevice>(), const_cast<const Tensor&>(compatible_input).tensor<T, 5>(), transformed_input.tensor<T, 5>()); } else { CHECK(transformed_input.CopyFrom(compatible_input, nchw_shape)); } } else { transformed_input = compatible_input; } auto out_backprop_ptr = AsDeviceMemory(transformed_out_backprop.template flat<T>().data(), transformed_out_backprop.template flat<T>().size()); auto filter_backprop_ptr = AsDeviceMemory( pre_transformed_filter_backprop.template flat<T>().data(), pre_transformed_filter_backprop.template flat<T>().size()); auto input_ptr = AsDeviceMemory(transformed_input.template flat<T>().data(), transformed_input.template flat<T>().size()); static int64 ConvolveBackwardFilterScratchSize = GetDnnWorkspaceLimit( "TF_CUDNN_WORKSPACE_LIMIT_IN_MB", 1LL << 32); // 4GB by default const int device_id = stream->parent()->device_ordinal(); DataType dtype = input.dtype(); const ConvParameters conv_parameters = { dims.batch_size, dims.in_depth, {{dims.input_size(0), dims.input_size(1), dims.input_size(2)}}, compute_data_format, dims.out_depth, {{dims.filter_size(0), dims.filter_size(1), dims.filter_size(2)}}, {{dims.dilation(0), dims.dilation(1), dims.dilation(2)}}, {{dims.stride(0), dims.stride(1), dims.stride(2)}}, {{padding_planes, padding_rows, padding_cols}}, dtype, device_id, conv_desc.group_count()}; using se::dnn::AlgorithmConfig; using se::dnn::AlgorithmDesc; using se::dnn::ProfileResult; #if TENSORFLOW_USE_ROCM // cudnn_use_autotune is applicable only the CUDA flow // for ROCm/MIOpen, we need to call GetMIOpenConvolveAlgorithms explicitly // if we do not have a cached algorithm_config for this conv_parameters cudnn_use_autotune_ = true; #endif AlgorithmConfig algorithm_config; if (cudnn_use_autotune_ && !AutoTuneConv3dBwdFilter::GetInstance()->Find( conv_parameters, &algorithm_config)) { std::vector<std::unique_ptr<se::dnn::ConvolveExecutionPlan>> plans; #if GOOGLE_CUDA std::vector<AlgorithmDesc> algorithms; std::vector<AlgorithmConfig> configs; if (CudnnUseFrontend()) { OP_REQUIRES(context, stream->parent()->GetConvolveExecutionPlans( se::dnn::ConvolutionKind::BACKWARD_FILTER, se::dnn::ToDataType<T>::value, stream, input_desc, filter_desc, output_desc, conv_desc, &plans), errors::Unknown( "Failed to get convolution execution plan. This is " "probably because cuDNN failed to initialize, so try " "looking to see if a warning log message was printed " "above.")); for (const auto& plan : plans) { configs.push_back(AlgorithmConfig( AlgorithmDesc{plan->getTag(), plan->get_raw_desc()}, plan->getWorkspaceSize())); } } else { OP_REQUIRES(context, stream->parent()->GetConvolveBackwardFilterAlgorithms( conv_parameters.ShouldIncludeWinogradNonfusedAlgo<T>( stream->parent()), &algorithms), errors::Unknown( "Failed to get convolution execution plan. This is " "probably because cuDNN failed to initialize, so try " "looking to see if a warning log message was printed " "above.")); for (const auto& algorithm : algorithms) { configs.push_back(AlgorithmConfig(algorithm)); } } std::vector<tensorflow::AutotuneResult> results; for (auto& profile_config : configs) { // TODO(zhengxq): profile each algorithm multiple times to better // accuracy. DnnScratchAllocator scratch_allocator(ConvolveBackwardFilterScratchSize, context); ProfileResult profile_result; Status cudnn_launch_status; if (CudnnUseFrontend()) { cudnn_launch_status = stream->ConvolveBackwardFilterWithExecutionPlan( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, profile_config, &profile_result); } else { cudnn_launch_status = stream->ConvolveBackwardFilterWithAlgorithm( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, profile_config, &profile_result); } if (cudnn_launch_status.ok() && profile_result.is_valid()) { results.emplace_back(); auto& result = results.back(); if (CudnnUseFrontend()) { result.mutable_cuda_conv_plan()->set_exec_plan_id( profile_config.algorithm()->exec_plan_id()); } else { result.mutable_conv()->set_algorithm( profile_config.algorithm()->algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_config.algorithm()->tensor_ops_enabled()); } result.set_scratch_bytes(scratch_allocator.TotalByteSize()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); } else if (CudnnUseFrontend()) { // When CuDNN frontend APIs are used, we need to make sure the // profiling results are one-to-one mapping of the "plans". So, we // insert dummy results when the excution fails. results.emplace_back(); auto& result = results.back(); result.mutable_failure()->set_kind(AutotuneResult::UNKNOWN); result.mutable_failure()->set_msg( absl::StrCat("Profiling failure on CUDNN engine: ", profile_config.algorithm()->exec_plan_id())); } } #elif TENSORFLOW_USE_ROCM DnnScratchAllocator scratch_allocator(ConvolveBackwardFilterScratchSize, context); std::vector<ProfileResult> algorithms; CHECK(stream->parent()->GetMIOpenConvolveAlgorithms( se::dnn::ConvolutionKind::BACKWARD_FILTER, se::dnn::ToDataType<T>::value, stream, input_desc, input_ptr, filter_desc, filter_backprop_ptr, output_desc, out_backprop_ptr, conv_desc, &scratch_allocator, &algorithms)); std::vector<tensorflow::AutotuneResult> results; for (auto miopen_algorithm : algorithms) { auto profile_algorithm = miopen_algorithm.algorithm(); ProfileResult profile_result; auto cudnn_launch_status = stream->ConvolveBackwardFilterWithAlgorithm( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, AlgorithmConfig(profile_algorithm, miopen_algorithm.scratch_size()), &profile_result); if (cudnn_launch_status.ok()) { if (profile_result.is_valid()) { results.emplace_back(); auto& result = results.back(); result.mutable_conv()->set_algorithm(profile_algorithm.algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_algorithm.tensor_ops_enabled()); result.set_scratch_bytes(scratch_allocator.TotalByteSize()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); } } } #endif LogConvAutotuneResults(se::dnn::ConvolutionKind::BACKWARD_FILTER, se::dnn::ToDataType<T>::value, input_ptr, filter_backprop_ptr, out_backprop_ptr, input_desc, filter_desc, output_desc, conv_desc, stream->parent(), results); if (CudnnUseFrontend()) { OP_REQUIRES_OK(context, BestCudnnConvAlgorithm(results, &plans, &algorithm_config)); } else { Status s = BestCudnnConvAlgorithm(results, nullptr, &algorithm_config); #if GOOGLE_CUDA if (s.code() == error::NOT_FOUND) { size_t version = cudnnGetVersion(); // For cuDNN 8.0.3 and 8.0.4, no cudnnConvolutionBwdFilterAlgo_t will // work in certain cases. In such cases we improve the error message. // This is fixed in cuDNN 8.0.5. For more context, see: // https://github.com/tensorflow/tensorflow/issues/46589 if (version == 8003 || version == 8004) { std::string version_str = (version == 8003 ? "8.0.3" : "8.0.4"); s = errors::NotFound( "No algorithm worked! Please try upgrading to cuDNN 8.0.5. You " "are using cuDNN ", version_str, ", which has a bug causing this error."); } } #endif OP_REQUIRES_OK(context, s); } AutoTuneConv3dBwdFilter::GetInstance()->Insert(conv_parameters, algorithm_config); } Status cudnn_launch_status; DnnScratchAllocator scratch_allocator(ConvolveBackwardFilterScratchSize, context); if (CudnnUseFrontend()) { if (algorithm_config.algorithm().has_value()) { VLOG(4) << "Conv3DBackpropFilter Execution Plan: " << algorithm_config.algorithm()->exec_plan_id(); } else { VLOG(4) << "Convolution AutoTune has been turned off"; } cudnn_launch_status = stream->ConvolveBackwardFilterWithExecutionPlan( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, algorithm_config, nullptr); } else { cudnn_launch_status = stream->ConvolveBackwardFilterWithAlgorithm( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, algorithm_config, nullptr); } if (!cudnn_launch_status.ok()) { context->SetStatus(cudnn_launch_status); } auto toConstTensor = [](const Tensor& x) -> const Tensor { return x; }; functor::ReverseTransformFilter<GPUDevice, T, 5>()( context->eigen_device<GPUDevice>(), /*src_filter_format=*/dst_format, toConstTensor(pre_transformed_filter_backprop).template tensor<T, 5>(), filter_backprop->tensor<T, 5>()); } private: std::vector<int32> dilation_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool takes_shape_; bool cudnn_use_autotune_; }; #define REGISTER_GPU_KERNEL(T) \ REGISTER_KERNEL_BUILDER( \ Name("Conv3DBackpropInput").Device(DEVICE_GPU).TypeConstraint<T>("T"), \ Conv3DBackpropInputOp<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropInputV2") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T") \ .HostMemory("input_sizes"), \ Conv3DBackpropInputOp<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER( \ Name("Conv3DBackpropFilter").Device(DEVICE_GPU).TypeConstraint<T>("T"), \ Conv3DBackpropFilterOp<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv3DBackpropFilterV2") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T") \ .HostMemory("filter_sizes"), \ Conv3DBackpropFilterOp<GPUDevice, T>); TF_CALL_half(REGISTER_GPU_KERNEL); TF_CALL_float(REGISTER_GPU_KERNEL); TF_CALL_double(REGISTER_GPU_KERNEL); #undef REGISTER_GPU_KERNEL #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM } // namespace tensorflow
null
249
CWE-787
CVE-2021-29535
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Implements a quantized eight-bit version of the matmul operation. #define EIGEN_USE_THREADS #if defined(__ARM_NEON__) || defined(__ARM_NEON) #define USE_NEON #include <arm_neon.h> #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/meta_support.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/bcast.h" namespace tensorflow { namespace { template <class T, class Toutput> void ScalarMultiply(OpKernelContext* context, const T* full_input, int32 full_input_offset, int64 num_elements, T scalar_input, int32 scalar_input_offset, Toutput* output) { const int32 scalar_minus_offset = static_cast<int32>(scalar_input) - scalar_input_offset; for (int i = 0; i < num_elements; ++i) { output[i] = (static_cast<int32>(full_input[i]) - full_input_offset) * scalar_minus_offset; } } #ifdef USE_NEON template <> void ScalarMultiply<quint8, qint32>(OpKernelContext* context, const quint8* full_input, int32 full_input_offset, int64 num_elements, quint8 scalar_input, int32 scalar_input_offset, qint32* output) { const int16 scalar_minus_offset = static_cast<int16>(scalar_input) - scalar_input_offset; const int16x4_t scalar_minus_offset_16x4 = vmov_n_s16(scalar_minus_offset); const uint8x8_t full_input_offset_8x8 = vmov_n_u8(full_input_offset); // Go through the results in 16-element chunks for NEON acceleration. int i; for (i = 0; i < (num_elements - 15); i += 16) { // Load the tensor inputs. const uint8* full_input_ptr = &(full_input->value) + i; const uint8x16_t full_input_8x16 = vld1q_u8(full_input_ptr); // Break into two sets of vectors so we can do further calculations // easily. const uint8x8_t full_input_high_8x8 = vget_high_u8(full_input_8x16); const uint8x8_t full_input_low_8x8 = vget_low_u8(full_input_8x16); // Subtract off the offset value to get 16-bit results. const int16x8_t full_input_minus_offset_high_16x8 = vreinterpretq_s16_u16( vsubl_u8(full_input_high_8x8, full_input_offset_8x8)); const int16x8_t full_input_minus_offset_low_16x8 = vreinterpretq_s16_u16( vsubl_u8(full_input_low_8x8, full_input_offset_8x8)); // We have to work with 4-wide vectors, so extract them. const int16x4_t x_high_high_16x4 = vget_high_s16(full_input_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(full_input_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(full_input_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(full_input_minus_offset_low_16x8); // Perform the multiplication. const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, scalar_minus_offset_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, scalar_minus_offset_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, scalar_minus_offset_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, scalar_minus_offset_16x4); // Write out the results. int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } // Finish up any remaining elements that weren't a multiple of 16. for (; i < num_elements; ++i) { output[i] = (static_cast<int32>(full_input[i]) - full_input_offset) * scalar_minus_offset; } } #endif // USE_NEON template <class T, class Toutput> void VectorMultiply(OpKernelContext* context, const T* x_data, int32 offset_x, const T* y_data, int32 offset_y, int64 num_elements, Toutput* output) { for (int i = 0; i < num_elements; ++i) { output[i] = (static_cast<int32>(x_data[i]) - offset_x) * (static_cast<int32>(y_data[i]) - offset_y); } } #ifdef USE_NEON template <> void VectorMultiply<quint8, qint32>(OpKernelContext* context, const quint8* x_data, int32 offset_x, const quint8* y_data, int32 offset_y, int64 num_elements, qint32* output) { const uint8x8_t offset_x_8x8 = vmov_n_u8(offset_x); const uint8x8_t offset_y_8x8 = vmov_n_u8(offset_y); int i; // Go through the results in 16-element chunks for NEON acceleration. for (i = 0; i < (num_elements - 15); i += 16) { // Load the vector inputs. const uint8* x_data_ptr = &(x_data->value) + i; const uint8x16_t x_8x16 = vld1q_u8(x_data_ptr); const uint8* y_data_ptr = &(y_data->value) + i; const uint8x16_t y_8x16 = vld1q_u8(y_data_ptr); // Break into two sets of vectors so we can do further calculations easily. const uint8x8_t x_high_8x8 = vget_high_u8(x_8x16); const uint8x8_t x_low_8x8 = vget_low_u8(x_8x16); const uint8x8_t y_high_8x8 = vget_high_u8(y_8x16); const uint8x8_t y_low_8x8 = vget_low_u8(y_8x16); // Subtract off the offset values to get 16-bit results. const int16x8_t x_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_high_8x8, offset_x_8x8)); const int16x8_t x_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_low_8x8, offset_x_8x8)); const int16x8_t y_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_high_8x8, offset_y_8x8)); const int16x8_t y_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_low_8x8, offset_y_8x8)); // We have to work with 4-wide vectors, so extract them. const int16x4_t x_high_high_16x4 = vget_high_s16(x_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(x_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(x_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(x_minus_offset_low_16x8); const int16x4_t y_high_high_16x4 = vget_high_s16(y_minus_offset_high_16x8); const int16x4_t y_high_low_16x4 = vget_low_s16(y_minus_offset_high_16x8); const int16x4_t y_low_high_16x4 = vget_high_s16(y_minus_offset_low_16x8); const int16x4_t y_low_low_16x4 = vget_low_s16(y_minus_offset_low_16x8); // Perform the multiplication. const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, y_high_high_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, y_high_low_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, y_low_high_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, y_low_low_16x4); // Write out the results. int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } for (; i < num_elements; ++i) { output[i] = (static_cast<int32>(x_data[i]) - offset_x) * (static_cast<int32>(y_data[i]) - offset_y); } } #endif // USE_NEON template <class T, class Toutput> void VectorTensorMultiply(const T* vector_data, int32 vector_offset, int64 vector_num_elements, const T* tensor_data, int32 tensor_offset, int64 tensor_num_elements, Toutput* output) { for (int i = 0; i < tensor_num_elements; ++i) { const int64 vector_i = i % vector_num_elements; output[i] = (static_cast<int32>(vector_data[vector_i]) - vector_offset) * (static_cast<int32>(tensor_data[i]) - tensor_offset); } } #ifdef USE_NEON template <> void VectorTensorMultiply<quint8, qint32>( const quint8* vector_data, int32 vector_offset, int64 vector_num_elements, const quint8* tensor_data, int32 tensor_offset, int64 tensor_num_elements, qint32* output) { const uint8x8_t offset_x_8x8 = vmov_n_u8(vector_offset); const uint8x8_t offset_y_8x8 = vmov_n_u8(tensor_offset); CHECK_EQ(0, tensor_num_elements % vector_num_elements); for (int base_i = 0; base_i < tensor_num_elements; base_i += vector_num_elements) { int i = base_i; const int end_i = base_i + vector_num_elements; // Go through the results in 16-element chunks for NEON acceleration. int vector_i; for (vector_i = 0; vector_i < (vector_num_elements - 15); vector_i += 16, i += 16) { // Load the vector inputs. const uint8* x_data_ptr = &(vector_data->value) + vector_i; const uint8x16_t x_8x16 = vld1q_u8(x_data_ptr); const uint8* y_data_ptr = &(tensor_data->value) + i; const uint8x16_t y_8x16 = vld1q_u8(y_data_ptr); // Break into two sets of vectors so we can do further calculations // easily. const uint8x8_t x_high_8x8 = vget_high_u8(x_8x16); const uint8x8_t x_low_8x8 = vget_low_u8(x_8x16); const uint8x8_t y_high_8x8 = vget_high_u8(y_8x16); const uint8x8_t y_low_8x8 = vget_low_u8(y_8x16); // Subtract off the offset values to get 16-bit results. const int16x8_t x_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_high_8x8, offset_x_8x8)); const int16x8_t x_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_low_8x8, offset_x_8x8)); const int16x8_t y_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_high_8x8, offset_y_8x8)); const int16x8_t y_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_low_8x8, offset_y_8x8)); // We have to work with 4-wide vectors, so extract them. const int16x4_t x_high_high_16x4 = vget_high_s16(x_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(x_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(x_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(x_minus_offset_low_16x8); const int16x4_t y_high_high_16x4 = vget_high_s16(y_minus_offset_high_16x8); const int16x4_t y_high_low_16x4 = vget_low_s16(y_minus_offset_high_16x8); const int16x4_t y_low_high_16x4 = vget_high_s16(y_minus_offset_low_16x8); const int16x4_t y_low_low_16x4 = vget_low_s16(y_minus_offset_low_16x8); // Perform the multiplication. const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, y_high_high_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, y_high_low_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, y_low_high_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, y_low_low_16x4); // Write out the results. int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } for (; i < end_i; ++i, ++vector_i) { output[i] = (static_cast<int32>(vector_data[vector_i]) - vector_offset) * (static_cast<int32>(tensor_data[i]) - tensor_offset); } } } #endif // USE_NEON } // namespace template <class T, class Toutput> class QuantizedMulOp : public OpKernel { public: explicit QuantizedMulOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& x = context->input(0); const Tensor& y = context->input(1); const float min_x = context->input(2).flat<float>()(0); const float max_x = context->input(3).flat<float>()(0); const float min_y = context->input(4).flat<float>()(0); const float max_y = context->input(5).flat<float>()(0); BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); if (!bcast.IsValid()) { context->SetStatus(errors::InvalidArgument( "Incompatible shapes: ", x.shape().DebugString(), " vs. ", y.shape().DebugString())); return; } Tensor* z; OP_REQUIRES_OK(context, context->allocate_output( 0, BCast::ToShape(bcast.output_shape()), &z)); // Make sure that we have valid quantization ranges for the input buffers. // If the difference between the min and max is negative or zero, it makes // it hard to do meaningful intermediate operations on the values. OP_REQUIRES(context, (max_x > min_x), errors::InvalidArgument("max_x must be larger than min_a.")); OP_REQUIRES(context, (max_y > min_y), errors::InvalidArgument("max_x must be larger than min_b.")); const int32 offset_x = FloatToQuantizedUnclamped<T>(0.0f, min_x, max_x); const int32 offset_y = FloatToQuantizedUnclamped<T>(0.0f, min_y, max_y); const T* x_data = x.flat<T>().data(); const T* y_data = y.flat<T>().data(); Toutput* z_data = z->flat<Toutput>().data(); const int ndims = bcast.x_reshape().size(); if (ndims <= 1) { if (x.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, y_data, offset_y, y.NumElements(), x_data[0], offset_x, z_data); } else if (y.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, x_data, offset_x, x.NumElements(), y_data[0], offset_y, z_data); } else { VectorMultiply<T, Toutput>(context, x_data, offset_x, y_data, offset_y, x.NumElements(), z_data); } } else if (ndims == 2) { const T* vector_data; int64 vector_num_elements; int32 vector_offset; const T* tensor_data; int64 tensor_num_elements; int32 tensor_offset; if (x.NumElements() < y.NumElements()) { vector_data = x_data; vector_num_elements = x.NumElements(); vector_offset = offset_x; tensor_data = y_data; tensor_num_elements = y.NumElements(); tensor_offset = offset_y; } else { vector_data = y_data; vector_num_elements = y.NumElements(); vector_offset = offset_y; tensor_data = x_data; tensor_num_elements = x.NumElements(); tensor_offset = offset_x; } if (vector_num_elements == 0) { context->SetStatus( errors::InvalidArgument("vector must have at least 1 element")); return; } VectorTensorMultiply<T, Toutput>( vector_data, vector_offset, vector_num_elements, tensor_data, tensor_offset, tensor_num_elements, z_data); } else { LOG(INFO) << "ndims=" << ndims; LOG(INFO) << "bcast.x_reshape()=" << TensorShape(bcast.x_reshape()).DebugString(); LOG(INFO) << "bcast.y_reshape()=" << TensorShape(bcast.y_reshape()).DebugString(); LOG(INFO) << "bcast.x_bcast()=" << TensorShape(bcast.x_bcast()).DebugString(); LOG(INFO) << "bcast.y_bcast()=" << TensorShape(bcast.y_bcast()).DebugString(); context->SetStatus(errors::Unimplemented( "Broadcast between ", context->input(0).shape().DebugString(), " and ", context->input(1).shape().DebugString(), " is not supported yet.")); return; } float min_z_value; float max_z_value; QuantizationRangeForMultiplication<T, T, Toutput>( min_x, max_x, min_y, max_y, &min_z_value, &max_z_value); Tensor* z_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &z_min)); z_min->flat<float>()(0) = min_z_value; Tensor* z_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &z_max)); z_max->flat<float>()(0) = max_z_value; } }; REGISTER_KERNEL_BUILDER(Name("QuantizedMul") .Device(DEVICE_CPU) .TypeConstraint<quint8>("T1") .TypeConstraint<quint8>("T2") .TypeConstraint<qint32>("Toutput"), QuantizedMulOp<quint8, qint32>); } // namespace tensorflow
null
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Implements a quantized eight-bit version of the matmul operation. #define EIGEN_USE_THREADS #if defined(__ARM_NEON__) || defined(__ARM_NEON) #define USE_NEON #include <arm_neon.h> #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/meta_support.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/bcast.h" namespace tensorflow { namespace { template <class T, class Toutput> void ScalarMultiply(OpKernelContext* context, const T* full_input, int32 full_input_offset, int64 num_elements, T scalar_input, int32 scalar_input_offset, Toutput* output) { const int32 scalar_minus_offset = static_cast<int32>(scalar_input) - scalar_input_offset; for (int i = 0; i < num_elements; ++i) { output[i] = (static_cast<int32>(full_input[i]) - full_input_offset) * scalar_minus_offset; } } #ifdef USE_NEON template <> void ScalarMultiply<quint8, qint32>(OpKernelContext* context, const quint8* full_input, int32 full_input_offset, int64 num_elements, quint8 scalar_input, int32 scalar_input_offset, qint32* output) { const int16 scalar_minus_offset = static_cast<int16>(scalar_input) - scalar_input_offset; const int16x4_t scalar_minus_offset_16x4 = vmov_n_s16(scalar_minus_offset); const uint8x8_t full_input_offset_8x8 = vmov_n_u8(full_input_offset); // Go through the results in 16-element chunks for NEON acceleration. int i; for (i = 0; i < (num_elements - 15); i += 16) { // Load the tensor inputs. const uint8* full_input_ptr = &(full_input->value) + i; const uint8x16_t full_input_8x16 = vld1q_u8(full_input_ptr); // Break into two sets of vectors so we can do further calculations // easily. const uint8x8_t full_input_high_8x8 = vget_high_u8(full_input_8x16); const uint8x8_t full_input_low_8x8 = vget_low_u8(full_input_8x16); // Subtract off the offset value to get 16-bit results. const int16x8_t full_input_minus_offset_high_16x8 = vreinterpretq_s16_u16( vsubl_u8(full_input_high_8x8, full_input_offset_8x8)); const int16x8_t full_input_minus_offset_low_16x8 = vreinterpretq_s16_u16( vsubl_u8(full_input_low_8x8, full_input_offset_8x8)); // We have to work with 4-wide vectors, so extract them. const int16x4_t x_high_high_16x4 = vget_high_s16(full_input_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(full_input_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(full_input_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(full_input_minus_offset_low_16x8); // Perform the multiplication. const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, scalar_minus_offset_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, scalar_minus_offset_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, scalar_minus_offset_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, scalar_minus_offset_16x4); // Write out the results. int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } // Finish up any remaining elements that weren't a multiple of 16. for (; i < num_elements; ++i) { output[i] = (static_cast<int32>(full_input[i]) - full_input_offset) * scalar_minus_offset; } } #endif // USE_NEON template <class T, class Toutput> void VectorMultiply(OpKernelContext* context, const T* x_data, int32 offset_x, const T* y_data, int32 offset_y, int64 num_elements, Toutput* output) { for (int i = 0; i < num_elements; ++i) { output[i] = (static_cast<int32>(x_data[i]) - offset_x) * (static_cast<int32>(y_data[i]) - offset_y); } } #ifdef USE_NEON template <> void VectorMultiply<quint8, qint32>(OpKernelContext* context, const quint8* x_data, int32 offset_x, const quint8* y_data, int32 offset_y, int64 num_elements, qint32* output) { const uint8x8_t offset_x_8x8 = vmov_n_u8(offset_x); const uint8x8_t offset_y_8x8 = vmov_n_u8(offset_y); int i; // Go through the results in 16-element chunks for NEON acceleration. for (i = 0; i < (num_elements - 15); i += 16) { // Load the vector inputs. const uint8* x_data_ptr = &(x_data->value) + i; const uint8x16_t x_8x16 = vld1q_u8(x_data_ptr); const uint8* y_data_ptr = &(y_data->value) + i; const uint8x16_t y_8x16 = vld1q_u8(y_data_ptr); // Break into two sets of vectors so we can do further calculations easily. const uint8x8_t x_high_8x8 = vget_high_u8(x_8x16); const uint8x8_t x_low_8x8 = vget_low_u8(x_8x16); const uint8x8_t y_high_8x8 = vget_high_u8(y_8x16); const uint8x8_t y_low_8x8 = vget_low_u8(y_8x16); // Subtract off the offset values to get 16-bit results. const int16x8_t x_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_high_8x8, offset_x_8x8)); const int16x8_t x_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_low_8x8, offset_x_8x8)); const int16x8_t y_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_high_8x8, offset_y_8x8)); const int16x8_t y_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_low_8x8, offset_y_8x8)); // We have to work with 4-wide vectors, so extract them. const int16x4_t x_high_high_16x4 = vget_high_s16(x_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(x_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(x_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(x_minus_offset_low_16x8); const int16x4_t y_high_high_16x4 = vget_high_s16(y_minus_offset_high_16x8); const int16x4_t y_high_low_16x4 = vget_low_s16(y_minus_offset_high_16x8); const int16x4_t y_low_high_16x4 = vget_high_s16(y_minus_offset_low_16x8); const int16x4_t y_low_low_16x4 = vget_low_s16(y_minus_offset_low_16x8); // Perform the multiplication. const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, y_high_high_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, y_high_low_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, y_low_high_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, y_low_low_16x4); // Write out the results. int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } for (; i < num_elements; ++i) { output[i] = (static_cast<int32>(x_data[i]) - offset_x) * (static_cast<int32>(y_data[i]) - offset_y); } } #endif // USE_NEON template <class T, class Toutput> void VectorTensorMultiply(const T* vector_data, int32 vector_offset, int64 vector_num_elements, const T* tensor_data, int32 tensor_offset, int64 tensor_num_elements, Toutput* output) { for (int i = 0; i < tensor_num_elements; ++i) { const int64 vector_i = i % vector_num_elements; output[i] = (static_cast<int32>(vector_data[vector_i]) - vector_offset) * (static_cast<int32>(tensor_data[i]) - tensor_offset); } } #ifdef USE_NEON template <> void VectorTensorMultiply<quint8, qint32>( const quint8* vector_data, int32 vector_offset, int64 vector_num_elements, const quint8* tensor_data, int32 tensor_offset, int64 tensor_num_elements, qint32* output) { const uint8x8_t offset_x_8x8 = vmov_n_u8(vector_offset); const uint8x8_t offset_y_8x8 = vmov_n_u8(tensor_offset); CHECK_EQ(0, tensor_num_elements % vector_num_elements); for (int base_i = 0; base_i < tensor_num_elements; base_i += vector_num_elements) { int i = base_i; const int end_i = base_i + vector_num_elements; // Go through the results in 16-element chunks for NEON acceleration. int vector_i; for (vector_i = 0; vector_i < (vector_num_elements - 15); vector_i += 16, i += 16) { // Load the vector inputs. const uint8* x_data_ptr = &(vector_data->value) + vector_i; const uint8x16_t x_8x16 = vld1q_u8(x_data_ptr); const uint8* y_data_ptr = &(tensor_data->value) + i; const uint8x16_t y_8x16 = vld1q_u8(y_data_ptr); // Break into two sets of vectors so we can do further calculations // easily. const uint8x8_t x_high_8x8 = vget_high_u8(x_8x16); const uint8x8_t x_low_8x8 = vget_low_u8(x_8x16); const uint8x8_t y_high_8x8 = vget_high_u8(y_8x16); const uint8x8_t y_low_8x8 = vget_low_u8(y_8x16); // Subtract off the offset values to get 16-bit results. const int16x8_t x_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_high_8x8, offset_x_8x8)); const int16x8_t x_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(x_low_8x8, offset_x_8x8)); const int16x8_t y_minus_offset_high_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_high_8x8, offset_y_8x8)); const int16x8_t y_minus_offset_low_16x8 = vreinterpretq_s16_u16(vsubl_u8(y_low_8x8, offset_y_8x8)); // We have to work with 4-wide vectors, so extract them. const int16x4_t x_high_high_16x4 = vget_high_s16(x_minus_offset_high_16x8); const int16x4_t x_high_low_16x4 = vget_low_s16(x_minus_offset_high_16x8); const int16x4_t x_low_high_16x4 = vget_high_s16(x_minus_offset_low_16x8); const int16x4_t x_low_low_16x4 = vget_low_s16(x_minus_offset_low_16x8); const int16x4_t y_high_high_16x4 = vget_high_s16(y_minus_offset_high_16x8); const int16x4_t y_high_low_16x4 = vget_low_s16(y_minus_offset_high_16x8); const int16x4_t y_low_high_16x4 = vget_high_s16(y_minus_offset_low_16x8); const int16x4_t y_low_low_16x4 = vget_low_s16(y_minus_offset_low_16x8); // Perform the multiplication. const int32x4_t z_high_high_32x4 = vmull_s16(x_high_high_16x4, y_high_high_16x4); const int32x4_t z_high_low_32x4 = vmull_s16(x_high_low_16x4, y_high_low_16x4); const int32x4_t z_low_high_32x4 = vmull_s16(x_low_high_16x4, y_low_high_16x4); const int32x4_t z_low_low_32x4 = vmull_s16(x_low_low_16x4, y_low_low_16x4); // Write out the results. int32* output_ptr = &(output->value) + i; vst1q_s32(output_ptr + 0, z_low_low_32x4); vst1q_s32(output_ptr + 4, z_low_high_32x4); vst1q_s32(output_ptr + 8, z_high_low_32x4); vst1q_s32(output_ptr + 12, z_high_high_32x4); } for (; i < end_i; ++i, ++vector_i) { output[i] = (static_cast<int32>(vector_data[vector_i]) - vector_offset) * (static_cast<int32>(tensor_data[i]) - tensor_offset); } } } #endif // USE_NEON } // namespace template <class T, class Toutput> class QuantizedMulOp : public OpKernel { public: explicit QuantizedMulOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& x = context->input(0); const Tensor& y = context->input(1); auto& min_x_tensor = context->input(2); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_x_tensor.shape()), errors::InvalidArgument("min_x must be a scalar")); const float min_x = min_x_tensor.flat<float>()(0); auto& max_x_tensor = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_x_tensor.shape()), errors::InvalidArgument("max_x must be a scalar")); const float max_x = max_x_tensor.flat<float>()(0); auto& min_y_tensor = context->input(4); OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_y_tensor.shape()), errors::InvalidArgument("min_y must be a scalar")); const float min_y = min_y_tensor.flat<float>()(0); auto& max_y_tensor = context->input(5); OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_y_tensor.shape()), errors::InvalidArgument("max_y must be a scalar")); const float max_y = max_y_tensor.flat<float>()(0); BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); if (!bcast.IsValid()) { context->SetStatus(errors::InvalidArgument( "Incompatible shapes: ", x.shape().DebugString(), " vs. ", y.shape().DebugString())); return; } Tensor* z; OP_REQUIRES_OK(context, context->allocate_output( 0, BCast::ToShape(bcast.output_shape()), &z)); // Make sure that we have valid quantization ranges for the input buffers. // If the difference between the min and max is negative or zero, it makes // it hard to do meaningful intermediate operations on the values. OP_REQUIRES(context, (max_x > min_x), errors::InvalidArgument("max_x must be larger than min_a.")); OP_REQUIRES(context, (max_y > min_y), errors::InvalidArgument("max_x must be larger than min_b.")); const int32 offset_x = FloatToQuantizedUnclamped<T>(0.0f, min_x, max_x); const int32 offset_y = FloatToQuantizedUnclamped<T>(0.0f, min_y, max_y); const T* x_data = x.flat<T>().data(); const T* y_data = y.flat<T>().data(); Toutput* z_data = z->flat<Toutput>().data(); const int ndims = bcast.x_reshape().size(); if (ndims <= 1) { if (x.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, y_data, offset_y, y.NumElements(), x_data[0], offset_x, z_data); } else if (y.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, x_data, offset_x, x.NumElements(), y_data[0], offset_y, z_data); } else { VectorMultiply<T, Toutput>(context, x_data, offset_x, y_data, offset_y, x.NumElements(), z_data); } } else if (ndims == 2) { const T* vector_data; int64 vector_num_elements; int32 vector_offset; const T* tensor_data; int64 tensor_num_elements; int32 tensor_offset; if (x.NumElements() < y.NumElements()) { vector_data = x_data; vector_num_elements = x.NumElements(); vector_offset = offset_x; tensor_data = y_data; tensor_num_elements = y.NumElements(); tensor_offset = offset_y; } else { vector_data = y_data; vector_num_elements = y.NumElements(); vector_offset = offset_y; tensor_data = x_data; tensor_num_elements = x.NumElements(); tensor_offset = offset_x; } if (vector_num_elements == 0) { context->SetStatus( errors::InvalidArgument("vector must have at least 1 element")); return; } VectorTensorMultiply<T, Toutput>( vector_data, vector_offset, vector_num_elements, tensor_data, tensor_offset, tensor_num_elements, z_data); } else { LOG(INFO) << "ndims=" << ndims; LOG(INFO) << "bcast.x_reshape()=" << TensorShape(bcast.x_reshape()).DebugString(); LOG(INFO) << "bcast.y_reshape()=" << TensorShape(bcast.y_reshape()).DebugString(); LOG(INFO) << "bcast.x_bcast()=" << TensorShape(bcast.x_bcast()).DebugString(); LOG(INFO) << "bcast.y_bcast()=" << TensorShape(bcast.y_bcast()).DebugString(); context->SetStatus(errors::Unimplemented( "Broadcast between ", context->input(0).shape().DebugString(), " and ", context->input(1).shape().DebugString(), " is not supported yet.")); return; } float min_z_value; float max_z_value; QuantizationRangeForMultiplication<T, T, Toutput>( min_x, max_x, min_y, max_y, &min_z_value, &max_z_value); Tensor* z_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &z_min)); z_min->flat<float>()(0) = min_z_value; Tensor* z_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &z_max)); z_max->flat<float>()(0) = max_z_value; } }; REGISTER_KERNEL_BUILDER(Name("QuantizedMul") .Device(DEVICE_CPU) .TypeConstraint<quint8>("T1") .TypeConstraint<quint8>("T2") .TypeConstraint<qint32>("Toutput"), QuantizedMulOp<quint8, qint32>); } // namespace tensorflow
null
250
CWE-787
CVE-2021-29536
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/reshape_op.h" namespace tensorflow { class QuantizedReshapeOp : public ReshapeOp { public: explicit QuantizedReshapeOp(OpKernelConstruction* c) : ReshapeOp(c) {} void Compute(OpKernelContext* ctx) override { // This call processes inputs 1 and 2 to write output 0. ReshapeOp::Compute(ctx); const float input_min_float = ctx->input(2).flat<float>()(0); const float input_max_float = ctx->input(3).flat<float>()(0); Tensor* output_min = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min)); output_min->flat<float>()(0) = input_min_float; Tensor* output_max = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({}), &output_max)); output_max->flat<float>()(0) = input_max_float; } }; #define REGISTER_CPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("QuantizedReshape") \ .Device(DEVICE_CPU) \ .HostMemory("shape") \ .TypeConstraint<type>("T"), \ QuantizedReshapeOp) REGISTER_CPU_KERNEL(::tensorflow::quint8); REGISTER_CPU_KERNEL(::tensorflow::qint32); #undef REGISTER_CPU_KERNEL } // namespace tensorflow
null
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/reshape_op.h" namespace tensorflow { class QuantizedReshapeOp : public ReshapeOp { public: explicit QuantizedReshapeOp(OpKernelConstruction* c) : ReshapeOp(c) {} void Compute(OpKernelContext* ctx) override { // This call processes inputs 1 and 2 to write output 0. ReshapeOp::Compute(ctx); if (!ctx->status().ok()) { return; } const auto& input_min_float_tensor = ctx->input(2); const auto& input_min_float_shape = input_min_float_tensor.shape(); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(input_min_float_shape) || (TensorShapeUtils::IsVector(input_min_float_shape) && (input_min_float_shape.dim_size(0) == 1)), errors::InvalidArgument( "input_min must be a scalar or a vector of 1 element")); const float input_min_float = input_min_float_tensor.flat<float>()(0); const auto& input_max_float_tensor = ctx->input(3); const auto& input_max_float_shape = input_max_float_tensor.shape(); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(input_max_float_shape) || (TensorShapeUtils::IsVector(input_max_float_shape) && (input_max_float_shape.dim_size(0) == 1)), errors::InvalidArgument( "input_max must be a scalar or a vector of 1 element")); const float input_max_float = input_max_float_tensor.flat<float>()(0); Tensor* output_min = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min)); output_min->flat<float>()(0) = input_min_float; Tensor* output_max = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({}), &output_max)); output_max->flat<float>()(0) = input_max_float; } }; #define REGISTER_CPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("QuantizedReshape") \ .Device(DEVICE_CPU) \ .HostMemory("shape") \ .TypeConstraint<type>("T"), \ QuantizedReshapeOp) REGISTER_CPU_KERNEL(::tensorflow::quint8); REGISTER_CPU_KERNEL(::tensorflow::qint32); #undef REGISTER_CPU_KERNEL } // namespace tensorflow
null
251
CWE-787
CVE-2021-29537
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Implements a quantized version of the resize bilinear op. #define EIGEN_USE_THREADS #if defined(__ARM_NEON__) || defined(__ARM_NEON) #define USE_NEON #define QUANTIZED_RESIZE_BILINEAR_USE_NEON #include <arm_neon.h> #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/image_resizer_state.h" namespace tensorflow { static constexpr bool USE_REFERENCE = false; namespace { // Compute the interpolation indices only once. template <typename T_SCALE> struct InterpolationCache { std::vector<int64> lower; // Lower source index used in the interpolation std::vector<int64> upper; // Upper source index used in the interpolation // 1-D linear interpolation scale (see: // https://en.wikipedia.org/wiki/Bilinear_interpolation) std::vector<float> lerp; std::vector<T_SCALE> ilerp; }; template <typename T_SCALE, typename Scaler> inline void ComputeInterpolationWeights( const int64 out_size, const int64 in_size, const float scale, const int resolution, InterpolationCache<T_SCALE>* interpolation) { const Scaler scaler; interpolation->lower.resize(out_size + 1); interpolation->upper.resize(out_size + 1); interpolation->lerp.resize(out_size + 1); interpolation->ilerp.resize(out_size + 1); interpolation->lower[out_size] = 0; interpolation->upper[out_size] = 0; for (int64 i = out_size - 1; i >= 0; --i) { const float in = scaler(i, scale); const float in_f = std::floor(in); interpolation->lower[i] = std::max(static_cast<int64>(in_f), static_cast<int64>(0)); interpolation->upper[i] = std::min(static_cast<int64>(std::ceil(in)), in_size - 1); interpolation->lower[i] = std::min(interpolation->lower[i], interpolation->upper[i]); interpolation->lerp[i] = in - in_f; interpolation->ilerp[i] = static_cast<T_SCALE>((in - in_f) * (1 << resolution)); } } template <typename T_SCALE> inline InterpolationCache<T_SCALE> BuildLerpCache( const int64 out_size, const int64 in_size, const float scale, const int index_step, const int resolution, const bool half_pixel_centers) { InterpolationCache<T_SCALE> cache; // Compute the cached interpolation weights on the x and y dimensions. if (half_pixel_centers) { ComputeInterpolationWeights<T_SCALE, HalfPixelScaler>( out_size, in_size, scale, resolution, &cache); } else { ComputeInterpolationWeights<T_SCALE, LegacyScaler>(out_size, in_size, scale, resolution, &cache); } CHECK(index_step > 0); if (index_step > 1) { for (int i = 0; i < cache.lower.size(); ++i) { cache.lower[i] *= index_step; cache.upper[i] *= index_step; } } return cache; } /** * Computes the bilinear interpolation from the appropriate 4 float points * and the linear interpolation weights. */ template <typename T> inline T ComputeLerpReference(const T in_top_left, const T in_top_right, const T in_bottom_left, const T in_bottom_right, const float x_lerp, const float y_lerp, const float min, const float max) { const float top_left = QuantizedToFloat<T>(in_top_left, min, max); const float top_right = QuantizedToFloat<T>(in_top_right, min, max); const float bottom_left = QuantizedToFloat<T>(in_bottom_left, min, max); const float bottom_right = QuantizedToFloat<T>(in_bottom_right, min, max); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; const float out = top + (bottom - top) * y_lerp; return FloatToQuantized<T>(out, min, max); } template <typename T, typename T_SCALE, typename T_CALC> inline T_CALC MulOffset(T a, T b, T_SCALE c) { return (static_cast<T_CALC>(a) - static_cast<T_CALC>(b)) * static_cast<T_CALC>(c); } template <int RESOLUTION, typename T, typename T_SCALE, typename T_CALC> inline T ComputeLerp(const T top_left, const T top_right, const T bottom_left, const T bottom_right, const T_SCALE x_lerp, const T_SCALE y_lerp) { constexpr T_CALC RESOLUTION_MULT = (1 << RESOLUTION); const T_CALC top = static_cast<T_CALC>(top_left) * RESOLUTION_MULT + MulOffset<T, T_SCALE, T_CALC>(top_right, top_left, x_lerp); const T_CALC bottom = static_cast<T_CALC>(bottom_left) * RESOLUTION_MULT + MulOffset<T, T_SCALE, T_CALC>(bottom_right, bottom_left, x_lerp); const T_CALC out = top + (bottom - top) / RESOLUTION_MULT * y_lerp; return static_cast<T>( static_cast<int32>((out + RESOLUTION_MULT / 2) / RESOLUTION_MULT)); } #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON inline uint8x8_t ToUint8x8(const quint8* v0, const quint8* v1, const quint8* v2, const quint8* v3, const quint8* v4, const quint8* v5, const quint8* v6, const quint8* v7) { static const uint8x8_t ZERO_8x8 = vmov_n_u8(0); uint8x8_t ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v0), ZERO_8x8, 0); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v1), ret, 1); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v2), ret, 2); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v3), ret, 3); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v4), ret, 4); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v5), ret, 5); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v6), ret, 6); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v7), ret, 7); return ret; } inline int16x8_t ToInt16x8(const int16* v0, const int16* v1, const int16* v2, const int16* v3, const int16* v4, const int16* v5, const int16* v6, const int16* v7) { static const int16x8_t ZERO_16x8 = vmovq_n_s16(0); int16x8_t ret = vld1q_lane_s16(v0, ZERO_16x8, 0); ret = vld1q_lane_s16(v1, ret, 1); ret = vld1q_lane_s16(v2, ret, 2); ret = vld1q_lane_s16(v3, ret, 3); ret = vld1q_lane_s16(v4, ret, 4); ret = vld1q_lane_s16(v5, ret, 5); ret = vld1q_lane_s16(v6, ret, 6); ret = vld1q_lane_s16(v7, ret, 7); return ret; } inline int32x2_t ToInt32x2(const qint32* v0, const qint32* v1) { static const int32x2_t ZERO_32x2 = vmov_n_s32(0); const int32x2_t ret0 = vld1_lane_s32(reinterpret_cast<const int32*>(v0), ZERO_32x2, 0); const int32x2_t ret1 = vld1_lane_s32(reinterpret_cast<const int32*>(v1), ret0, 1); return ret1; } template <int RESOLUTION, bool X_LERP_SAME> inline int32x2_t ComputeLerpx2( const qint32* top_left0, const qint32* top_right0, const qint32* bottom_left0, const qint32* bottom_right0, const qint32* top_left1, const qint32* top_right1, const qint32* bottom_left1, const qint32* bottom_right1, const int32* x_lerp, const int32x2_t y_lerpsx) { const int32x2_t x_lerpsx = X_LERP_SAME ? vld1_dup_s32(reinterpret_cast<const int32*>(x_lerp)) : vld1_s32(reinterpret_cast<const int32*>(x_lerp)); const int32x2_t top_leftsx = ToInt32x2(top_left0, top_left1); const int32x2_t top_rightsx = ToInt32x2(top_right0, top_right1); const int32x2_t bottom_leftsx = ToInt32x2(bottom_left0, bottom_left1); const int32x2_t bottom_rightsx = ToInt32x2(bottom_right0, bottom_right1); const int32x2_t retval = ComputeLerp32x2<RESOLUTION>(top_leftsx, top_rightsx, bottom_leftsx, bottom_rightsx, x_lerpsx, y_lerpsx); return retval; } template <int RESOLUTION> inline uint8x8_t ComputeLerpx8( const quint8* tl0, const quint8* tr0, const quint8* bl0, const quint8* br0, const int16* xlp0, const quint8* tl1, const quint8* tr1, const quint8* bl1, const quint8* br1, const int16* xlp1, const quint8* tl2, const quint8* tr2, const quint8* bl2, const quint8* br2, const int16* xlp2, const quint8* tl3, const quint8* tr3, const quint8* bl3, const quint8* br3, const int16* xlp3, const quint8* tl4, const quint8* tr4, const quint8* bl4, const quint8* br4, const int16* xlp4, const quint8* tl5, const quint8* tr5, const quint8* bl5, const quint8* br5, const int16* xlp5, const quint8* tl6, const quint8* tr6, const quint8* bl6, const quint8* br6, const int16* xlp6, const quint8* tl7, const quint8* tr7, const quint8* bl7, const quint8* br7, const int16* xlp7, const int16x8_t ys_lerpsx) { const uint8x8_t tl8x8 = ToUint8x8(tl0, tl1, tl2, tl3, tl4, tl5, tl6, tl7); const uint8x8_t tr8x8 = ToUint8x8(tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7); const uint8x8_t bl8x8 = ToUint8x8(bl0, bl1, bl2, bl3, bl4, bl5, bl6, bl7); const uint8x8_t br8x8 = ToUint8x8(br0, br1, br2, br3, br4, br5, br6, br7); const int16x8_t xs_lerpsx = ToInt16x8(xlp0, xlp1, xlp2, xlp3, xlp4, xlp5, xlp6, xlp7); return ComputeLerp8x8<RESOLUTION>(tl8x8, tr8x8, bl8x8, br8x8, xs_lerpsx, ys_lerpsx); } // Expand address at compile time to improve performance template <int RESOLUTION, int ID0, int CH0, int ID1, int CH1, int ID2, int CH2, int ID3, int CH3, int ID4, int CH4, int ID5, int CH5, int ID6, int CH6, int ID7, int CH7> inline uint8x8_t ComputeLerpx8Tmpl(const quint8* const yl, const quint8* yu, const int64* xl, const int64* xu, const int16* xlp, const int16x8_t ys_lerpsx) { return ComputeLerpx8<RESOLUTION>( yl + xl[ID0] + CH0, yl + xu[ID0] + CH0, yu + xl[ID0] + CH0, yu + xu[ID0] + CH0, xlp + ID0, yl + xl[ID1] + CH1, yl + xu[ID1] + CH1, yu + xl[ID1] + CH1, yu + xu[ID1] + CH1, xlp + ID1, yl + xl[ID2] + CH2, yl + xu[ID2] + CH2, yu + xl[ID2] + CH2, yu + xu[ID2] + CH2, xlp + ID2, yl + xl[ID3] + CH3, yl + xu[ID3] + CH3, yu + xl[ID3] + CH3, yu + xu[ID3] + CH3, xlp + ID3, yl + xl[ID4] + CH4, yl + xu[ID4] + CH4, yu + xl[ID4] + CH4, yu + xu[ID4] + CH4, xlp + ID4, yl + xl[ID5] + CH5, yl + xu[ID5] + CH5, yu + xl[ID5] + CH5, yu + xu[ID5] + CH5, xlp + ID5, yl + xl[ID6] + CH6, yl + xu[ID6] + CH6, yu + xl[ID6] + CH6, yu + xu[ID6] + CH6, xlp + ID6, yl + xl[ID7] + CH7, yl + xu[ID7] + CH7, yu + xl[ID7] + CH7, yu + xu[ID7] + CH7, xlp + ID7, ys_lerpsx); } #endif template <int RESOLUTION, typename T, typename T_SCALE, typename T_CALC> inline void OutputLerpForChannels(const InterpolationCache<T_SCALE>& xs, const int64 x, const T_SCALE ys_ilerp, const int channels, const float min, const float max, const T* ys_input_lower_ptr, const T* ys_input_upper_ptr, T* output_y_ptr) { const int64 xs_lower = xs.lower[x]; const int64 xs_upper = xs.upper[x]; const T_SCALE xs_ilerp = xs.ilerp[x]; for (int c = 0; c < channels; ++c) { const T top_left = ys_input_lower_ptr[xs_lower + c]; const T top_right = ys_input_lower_ptr[xs_upper + c]; const T bottom_left = ys_input_upper_ptr[xs_lower + c]; const T bottom_right = ys_input_upper_ptr[xs_upper + c]; const T val = ComputeLerp<RESOLUTION, T, T_SCALE, T_CALC>( top_left, top_right, bottom_left, bottom_right, xs_ilerp, ys_ilerp); output_y_ptr[x * channels + c] = val; } } template <int RES> inline void OutputLerp8x8x1(const InterpolationCache<int16>& xs, const int64 x_start, const int16 ys_ilerp, const float min, const float max, const quint8* const ys_input_lower_ptr, const quint8* const ys_input_upper_ptr, quint8* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int16x8_t y_lerpsx = vmovq_n_s16(ys_ilerp); const uint8x8_t x0x7 = ComputeLerpx8Tmpl<RES, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start), x0x7); #else for (int x = x_start; x < x_start + 8; ++x) { OutputLerpForChannels<RES, quint8, int16, int16>( xs, x, ys_ilerp, 1, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RES> inline void OutputLerp8x8x3(const InterpolationCache<int16>& xs, const int64 x_start, const int16 ys_ilerp, const float min, const float max, const quint8* const ys_input_lower_ptr, const quint8* const ys_input_upper_ptr, quint8* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int16x8_t y_lerpsx = vmovq_n_s16(ys_ilerp); const uint8x8_t x0c0x2c1 = ComputeLerpx8Tmpl<RES, 0, 0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 2, 2, 0, 2, 1>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3), x0c0x2c1); const uint8x8_t x2c2x5c0 = ComputeLerpx8Tmpl<RES, 2, 2, 3, 0, 3, 1, 3, 2, 4, 0, 4, 1, 4, 2, 5, 0>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3 + 8), x2c2x5c0); const uint8x8_t x5c1x7c2 = ComputeLerpx8Tmpl<RES, 5, 1, 5, 2, 6, 0, 6, 1, 6, 2, 7, 0, 7, 1, 7, 2>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3 + 16), x5c1x7c2); #else for (int x = x_start; x < x_start + 8; ++x) { OutputLerpForChannels<RES, quint8, int16, int16>( xs, x, ys_ilerp, 3, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RESOLUTION> inline void OutputLerp32x4x1(const InterpolationCache<int32>& xs, const int64 x_start, const int32 ys_ilerp, const float min, const float max, const qint32* const ys_input_lower_ptr, const qint32* const ys_input_upper_ptr, qint32* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int64 xs_lower0 = xs.lower[x_start]; const int64 xs_upper0 = xs.upper[x_start]; const int32* const xs_ilerp0 = &xs.ilerp[x_start]; const int64 xs_lower1 = xs.lower[x_start + 1]; const int64 xs_upper1 = xs.upper[x_start + 1]; const int64 xs_lower2 = xs.lower[x_start + 2]; const int64 xs_upper2 = xs.upper[x_start + 2]; const int32* const xs_ilerp2 = &xs.ilerp[x_start + 2]; const int64 xs_lower3 = xs.lower[x_start + 3]; const int64 xs_upper3 = xs.upper[x_start + 3]; const int32x2_t y_lerpsx = vmov_n_s32(ys_ilerp); const int32x2_t x0x1 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower0, ys_input_lower_ptr + xs_upper0, ys_input_upper_ptr + xs_lower0, ys_input_upper_ptr + xs_upper0, ys_input_lower_ptr + xs_lower1, ys_input_lower_ptr + xs_upper1, ys_input_upper_ptr + xs_lower1, ys_input_upper_ptr + xs_upper1, xs_ilerp0, y_lerpsx); const int32x2_t x1x2 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower2, ys_input_lower_ptr + xs_upper2, ys_input_upper_ptr + xs_lower2, ys_input_upper_ptr + xs_upper2, ys_input_lower_ptr + xs_lower3, ys_input_lower_ptr + xs_upper3, ys_input_upper_ptr + xs_lower3, ys_input_upper_ptr + xs_upper3, xs_ilerp2, y_lerpsx); const int32x4_t x0x1x2x3 = vcombine_s32(x0x1, x1x2); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start), x0x1x2x3); #else for (int x = x_start; x < x_start + 4; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64>( xs, x, ys_ilerp, 1, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RESOLUTION> inline void OutputLerp32x4x3(const InterpolationCache<int32>& xs, const int64 x_start, const int32 ys_ilerp, const float min, const float max, const qint32* const ys_input_lower_ptr, const qint32* const ys_input_upper_ptr, qint32* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int64 xs_lower0 = xs.lower[x_start]; const int64 xs_upper0 = xs.upper[x_start]; const int32* const xs_ilerp0 = &xs.ilerp[x_start]; const int64 xs_lower1 = xs.lower[x_start + 1]; const int64 xs_upper1 = xs.upper[x_start + 1]; const int32* const xs_ilerp1 = &xs.ilerp[x_start + 1]; const int64 xs_lower2 = xs.lower[x_start + 2]; const int64 xs_upper2 = xs.upper[x_start + 2]; const int32* const xs_ilerp2 = &xs.ilerp[x_start + 2]; const int64 xs_lower3 = xs.lower[x_start + 3]; const int64 xs_upper3 = xs.upper[x_start + 3]; const int32* const xs_ilerp3 = &xs.ilerp[x_start + 3]; const int32x2_t y_lerpsx = vmov_n_s32(ys_ilerp); const int32x2_t x0c0x0c1 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower0, ys_input_lower_ptr + xs_upper0, ys_input_upper_ptr + xs_lower0, ys_input_upper_ptr + xs_upper0, ys_input_lower_ptr + xs_lower0 + 1, ys_input_lower_ptr + xs_upper0 + 1, ys_input_upper_ptr + xs_lower0 + 1, ys_input_upper_ptr + xs_upper0 + 1, xs_ilerp0, y_lerpsx); const int32x2_t x0c2x1c0 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower0 + 2, ys_input_lower_ptr + xs_upper0 + 2, ys_input_upper_ptr + xs_lower0 + 2, ys_input_upper_ptr + xs_upper0 + 2, ys_input_lower_ptr + xs_lower1, ys_input_lower_ptr + xs_upper1, ys_input_upper_ptr + xs_lower1, ys_input_upper_ptr + xs_upper1, xs_ilerp0, y_lerpsx); const int32x2_t x1c1x1c2 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower1 + 1, ys_input_lower_ptr + xs_upper1 + 1, ys_input_upper_ptr + xs_lower1 + 1, ys_input_upper_ptr + xs_upper1 + 1, ys_input_lower_ptr + xs_lower1 + 2, ys_input_lower_ptr + xs_upper1 + 2, ys_input_upper_ptr + xs_lower1 + 2, ys_input_upper_ptr + xs_upper1 + 2, xs_ilerp1, y_lerpsx); const int32x2_t x2c0x2c1 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower2, ys_input_lower_ptr + xs_upper2, ys_input_upper_ptr + xs_lower2, ys_input_upper_ptr + xs_upper2, ys_input_lower_ptr + xs_lower2 + 1, ys_input_lower_ptr + xs_upper2 + 1, ys_input_upper_ptr + xs_lower2 + 1, ys_input_upper_ptr + xs_upper2 + 1, xs_ilerp2, y_lerpsx); const int32x2_t x2c2x3c0 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower2 + 2, ys_input_lower_ptr + xs_upper2 + 2, ys_input_upper_ptr + xs_lower2 + 2, ys_input_upper_ptr + xs_upper2 + 2, ys_input_lower_ptr + xs_lower3, ys_input_lower_ptr + xs_upper3, ys_input_upper_ptr + xs_lower3, ys_input_upper_ptr + xs_upper3, xs_ilerp2, y_lerpsx); const int32x2_t x3c1x3c2 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower3 + 1, ys_input_lower_ptr + xs_upper3 + 1, ys_input_upper_ptr + xs_lower3 + 1, ys_input_upper_ptr + xs_upper3 + 1, ys_input_lower_ptr + xs_lower3 + 2, ys_input_lower_ptr + xs_upper3 + 2, ys_input_upper_ptr + xs_lower3 + 2, ys_input_upper_ptr + xs_upper3 + 2, xs_ilerp3, y_lerpsx); const int32x4_t x0c0x0c1x0c2x1c0 = vcombine_s32(x0c0x0c1, x0c2x1c0); const int32x4_t x1c1x1c2x2c0x2c1 = vcombine_s32(x1c1x1c2, x2c0x2c1); const int32x4_t x2c2x3c0x3c1x3c2 = vcombine_s32(x2c2x3c0, x3c1x3c2); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3), x0c0x0c1x0c2x1c0); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3 + 4), x1c1x1c2x2c0x2c1); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3 + 8), x2c2x3c0x3c1x3c2); #else for (int x = x_start; x < x_start + 4; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64>( xs, x, ys_ilerp, 3, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <typename T> void ResizeImageReference(typename TTypes<T, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { CHECK_NOTNULL(output); const InterpolationCache<float> xs = BuildLerpCache<float>( out_width, in_width, width_scale, channels, 0, half_pixel_centers); const InterpolationCache<float> ys = BuildLerpCache<float>( out_height, in_height, height_scale, 1, 0, half_pixel_centers); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int64 out_row_size = out_width * channels; const T* input_b_ptr = images.data(); T* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { const T* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const T* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const float ys_lerp = ys.lerp[y]; for (int64 x = 0; x < out_width; ++x) { const int64 xs_lower = xs.lower[x]; const int64 xs_upper = xs.upper[x]; const float xs_lerp = xs.lerp[x]; for (int c = 0; c < channels; ++c) { const T top_left = ys_input_lower_ptr[xs_lower + c]; const T top_right = ys_input_lower_ptr[xs_upper + c]; const T bottom_left = ys_input_upper_ptr[xs_lower + c]; const T bottom_right = ys_input_upper_ptr[xs_upper + c]; const T val = ComputeLerpReference<T>( top_left, top_right, bottom_left, bottom_right, xs_lerp, ys_lerp, in_min, in_max); output_y_ptr[x * channels + c] = val; } } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <typename T> void ResizeImage(typename TTypes<T, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { ResizeImageReference<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } template <> void ResizeImage<qint32>(typename TTypes<qint32, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<qint32, 4>::Tensor* output) { // 30 is maximum resolution for signed int. constexpr int RESOLUTION = 30; constexpr int SIMD_STEP = 4; CHECK_NOTNULL(output); const InterpolationCache<int32> xs = BuildLerpCache<int32>(out_width, in_width, width_scale, channels, RESOLUTION, half_pixel_centers); const InterpolationCache<int32> ys = BuildLerpCache<int32>( out_height, in_height, height_scale, 1, RESOLUTION, half_pixel_centers); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int64 out_row_size = out_width * channels; const qint32* input_b_ptr = images.data(); qint32* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { const qint32* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const qint32* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const int32 ys_ilerp = ys.ilerp[y]; // Optimized for channels == 1 or channels == 3 as this // is typical channels. int64 x = 0; if (channels == 1) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp32x4x1<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } else if (channels == 3) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp32x4x3<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } for (; x < out_width; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64>( xs, x, ys_ilerp, channels, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <> void ResizeImage<quint8>(typename TTypes<quint8, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<quint8, 4>::Tensor* output) { // 7 is maximum resolution for unsigned byte. constexpr int RESOLUTION = 7; constexpr int SIMD_STEP = 8; CHECK_NOTNULL(output); const InterpolationCache<int16> xs = BuildLerpCache<int16>(out_width, in_width, width_scale, channels, RESOLUTION, half_pixel_centers); const InterpolationCache<int16> ys = BuildLerpCache<int16>( out_height, in_height, height_scale, 1, RESOLUTION, half_pixel_centers); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int64 out_row_size = out_width * channels; const quint8* input_b_ptr = images.data(); quint8* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { const quint8* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const quint8* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const int32 ys_ilerp = ys.ilerp[y]; // Optimized for channels == 1 or channels == 3 as this // is typical channels. // TODO(satok): Support more generic NEON optimized implementation // for different channels. int64 x = 0; if (channels == 1) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp8x8x1<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } else if (channels == 3) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp8x8x3<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } for (; x < out_width; ++x) { OutputLerpForChannels<RESOLUTION, quint8, int16, int16>( xs, x, ys_ilerp, channels, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <typename T> void ResizeBilinear(const typename TTypes<T, 4>::ConstTensor& images, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { CHECK_NOTNULL(output); const int batch_size = images.dimension(0); const int64 in_height = images.dimension(1); const int64 in_width = images.dimension(2); const int channels = images.dimension(3); const int64 out_height = output->dimension(1); const int64 out_width = output->dimension(2); // Handle no-op resizes efficiently. if (out_height == in_height && out_width == in_width) { *output = images.template cast<T>(); return; } if (USE_REFERENCE) { ResizeImageReference<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } else { ResizeImage<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } } } // namespace template <class T> class QuantizedResizeBilinearOp : public OpKernel { public: explicit QuantizedResizeBilinearOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_)); OP_REQUIRES_OK( context, context->GetAttr("half_pixel_centers", &half_pixel_centers_)); } void Compute(OpKernelContext* context) override { const float in_min = context->input(2).flat<float>()(0); const float in_max = context->input(3).flat<float>()(0); ImageResizerState st(align_corners_, false); st.ValidateAndCreateOutput(context); if (!context->status().ok()) return; // Return if the output is empty. if (st.output->NumElements() == 0) return; typename TTypes<T, 4>::ConstTensor image_data( context->input(0).tensor<T, 4>()); typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>()); ResizeBilinear<T>(image_data, st.height_scale, st.width_scale, in_min, in_max, half_pixel_centers_, &output_data); Tensor* out_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &out_min)); out_min->flat<float>()(0) = in_min; Tensor* out_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &out_max)); out_max->flat<float>()(0) = in_max; } private: bool align_corners_; bool half_pixel_centers_; TF_DISALLOW_COPY_AND_ASSIGN(QuantizedResizeBilinearOp<T>); }; #define REGISTER_CPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("QuantizedResizeBilinear") \ .Device(DEVICE_CPU) \ .HostMemory("size") \ .TypeConstraint<type>("T"), \ QuantizedResizeBilinearOp<type>) REGISTER_CPU_KERNEL(::tensorflow::quint8); REGISTER_CPU_KERNEL(::tensorflow::qint32); REGISTER_CPU_KERNEL(float); } // namespace tensorflow
null
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Implements a quantized version of the resize bilinear op. #define EIGEN_USE_THREADS #if defined(__ARM_NEON__) || defined(__ARM_NEON) #define USE_NEON #define QUANTIZED_RESIZE_BILINEAR_USE_NEON #include <arm_neon.h> #endif #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/quantization_utils.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/image_resizer_state.h" namespace tensorflow { static constexpr bool USE_REFERENCE = false; namespace { // Compute the interpolation indices only once. template <typename T_SCALE> struct InterpolationCache { std::vector<int64> lower; // Lower source index used in the interpolation std::vector<int64> upper; // Upper source index used in the interpolation // 1-D linear interpolation scale (see: // https://en.wikipedia.org/wiki/Bilinear_interpolation) std::vector<float> lerp; std::vector<T_SCALE> ilerp; }; template <typename T_SCALE, typename Scaler> inline void ComputeInterpolationWeights( const int64 out_size, const int64 in_size, const float scale, const int resolution, InterpolationCache<T_SCALE>* interpolation) { const Scaler scaler; interpolation->lower.resize(out_size + 1); interpolation->upper.resize(out_size + 1); interpolation->lerp.resize(out_size + 1); interpolation->ilerp.resize(out_size + 1); interpolation->lower[out_size] = 0; interpolation->upper[out_size] = 0; for (int64 i = out_size - 1; i >= 0; --i) { const float in = scaler(i, scale); const float in_f = std::floor(in); interpolation->lower[i] = std::max(static_cast<int64>(in_f), static_cast<int64>(0)); interpolation->upper[i] = std::min(static_cast<int64>(std::ceil(in)), in_size - 1); interpolation->lower[i] = std::min(interpolation->lower[i], interpolation->upper[i]); interpolation->lerp[i] = in - in_f; interpolation->ilerp[i] = static_cast<T_SCALE>((in - in_f) * (1 << resolution)); } } template <typename T_SCALE> inline InterpolationCache<T_SCALE> BuildLerpCache( const int64 out_size, const int64 in_size, const float scale, const int index_step, const int resolution, const bool half_pixel_centers) { InterpolationCache<T_SCALE> cache; // Compute the cached interpolation weights on the x and y dimensions. if (half_pixel_centers) { ComputeInterpolationWeights<T_SCALE, HalfPixelScaler>( out_size, in_size, scale, resolution, &cache); } else { ComputeInterpolationWeights<T_SCALE, LegacyScaler>(out_size, in_size, scale, resolution, &cache); } CHECK(index_step > 0); if (index_step > 1) { for (int i = 0; i < cache.lower.size(); ++i) { cache.lower[i] *= index_step; cache.upper[i] *= index_step; } } return cache; } /** * Computes the bilinear interpolation from the appropriate 4 float points * and the linear interpolation weights. */ template <typename T> inline T ComputeLerpReference(const T in_top_left, const T in_top_right, const T in_bottom_left, const T in_bottom_right, const float x_lerp, const float y_lerp, const float min, const float max) { const float top_left = QuantizedToFloat<T>(in_top_left, min, max); const float top_right = QuantizedToFloat<T>(in_top_right, min, max); const float bottom_left = QuantizedToFloat<T>(in_bottom_left, min, max); const float bottom_right = QuantizedToFloat<T>(in_bottom_right, min, max); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; const float out = top + (bottom - top) * y_lerp; return FloatToQuantized<T>(out, min, max); } template <typename T, typename T_SCALE, typename T_CALC> inline T_CALC MulOffset(T a, T b, T_SCALE c) { return (static_cast<T_CALC>(a) - static_cast<T_CALC>(b)) * static_cast<T_CALC>(c); } template <int RESOLUTION, typename T, typename T_SCALE, typename T_CALC> inline T ComputeLerp(const T top_left, const T top_right, const T bottom_left, const T bottom_right, const T_SCALE x_lerp, const T_SCALE y_lerp) { constexpr T_CALC RESOLUTION_MULT = (1 << RESOLUTION); const T_CALC top = static_cast<T_CALC>(top_left) * RESOLUTION_MULT + MulOffset<T, T_SCALE, T_CALC>(top_right, top_left, x_lerp); const T_CALC bottom = static_cast<T_CALC>(bottom_left) * RESOLUTION_MULT + MulOffset<T, T_SCALE, T_CALC>(bottom_right, bottom_left, x_lerp); const T_CALC out = top + (bottom - top) / RESOLUTION_MULT * y_lerp; return static_cast<T>( static_cast<int32>((out + RESOLUTION_MULT / 2) / RESOLUTION_MULT)); } #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON inline uint8x8_t ToUint8x8(const quint8* v0, const quint8* v1, const quint8* v2, const quint8* v3, const quint8* v4, const quint8* v5, const quint8* v6, const quint8* v7) { static const uint8x8_t ZERO_8x8 = vmov_n_u8(0); uint8x8_t ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v0), ZERO_8x8, 0); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v1), ret, 1); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v2), ret, 2); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v3), ret, 3); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v4), ret, 4); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v5), ret, 5); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v6), ret, 6); ret = vld1_lane_u8(reinterpret_cast<const uint8*>(v7), ret, 7); return ret; } inline int16x8_t ToInt16x8(const int16* v0, const int16* v1, const int16* v2, const int16* v3, const int16* v4, const int16* v5, const int16* v6, const int16* v7) { static const int16x8_t ZERO_16x8 = vmovq_n_s16(0); int16x8_t ret = vld1q_lane_s16(v0, ZERO_16x8, 0); ret = vld1q_lane_s16(v1, ret, 1); ret = vld1q_lane_s16(v2, ret, 2); ret = vld1q_lane_s16(v3, ret, 3); ret = vld1q_lane_s16(v4, ret, 4); ret = vld1q_lane_s16(v5, ret, 5); ret = vld1q_lane_s16(v6, ret, 6); ret = vld1q_lane_s16(v7, ret, 7); return ret; } inline int32x2_t ToInt32x2(const qint32* v0, const qint32* v1) { static const int32x2_t ZERO_32x2 = vmov_n_s32(0); const int32x2_t ret0 = vld1_lane_s32(reinterpret_cast<const int32*>(v0), ZERO_32x2, 0); const int32x2_t ret1 = vld1_lane_s32(reinterpret_cast<const int32*>(v1), ret0, 1); return ret1; } template <int RESOLUTION, bool X_LERP_SAME> inline int32x2_t ComputeLerpx2( const qint32* top_left0, const qint32* top_right0, const qint32* bottom_left0, const qint32* bottom_right0, const qint32* top_left1, const qint32* top_right1, const qint32* bottom_left1, const qint32* bottom_right1, const int32* x_lerp, const int32x2_t y_lerpsx) { const int32x2_t x_lerpsx = X_LERP_SAME ? vld1_dup_s32(reinterpret_cast<const int32*>(x_lerp)) : vld1_s32(reinterpret_cast<const int32*>(x_lerp)); const int32x2_t top_leftsx = ToInt32x2(top_left0, top_left1); const int32x2_t top_rightsx = ToInt32x2(top_right0, top_right1); const int32x2_t bottom_leftsx = ToInt32x2(bottom_left0, bottom_left1); const int32x2_t bottom_rightsx = ToInt32x2(bottom_right0, bottom_right1); const int32x2_t retval = ComputeLerp32x2<RESOLUTION>(top_leftsx, top_rightsx, bottom_leftsx, bottom_rightsx, x_lerpsx, y_lerpsx); return retval; } template <int RESOLUTION> inline uint8x8_t ComputeLerpx8( const quint8* tl0, const quint8* tr0, const quint8* bl0, const quint8* br0, const int16* xlp0, const quint8* tl1, const quint8* tr1, const quint8* bl1, const quint8* br1, const int16* xlp1, const quint8* tl2, const quint8* tr2, const quint8* bl2, const quint8* br2, const int16* xlp2, const quint8* tl3, const quint8* tr3, const quint8* bl3, const quint8* br3, const int16* xlp3, const quint8* tl4, const quint8* tr4, const quint8* bl4, const quint8* br4, const int16* xlp4, const quint8* tl5, const quint8* tr5, const quint8* bl5, const quint8* br5, const int16* xlp5, const quint8* tl6, const quint8* tr6, const quint8* bl6, const quint8* br6, const int16* xlp6, const quint8* tl7, const quint8* tr7, const quint8* bl7, const quint8* br7, const int16* xlp7, const int16x8_t ys_lerpsx) { const uint8x8_t tl8x8 = ToUint8x8(tl0, tl1, tl2, tl3, tl4, tl5, tl6, tl7); const uint8x8_t tr8x8 = ToUint8x8(tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7); const uint8x8_t bl8x8 = ToUint8x8(bl0, bl1, bl2, bl3, bl4, bl5, bl6, bl7); const uint8x8_t br8x8 = ToUint8x8(br0, br1, br2, br3, br4, br5, br6, br7); const int16x8_t xs_lerpsx = ToInt16x8(xlp0, xlp1, xlp2, xlp3, xlp4, xlp5, xlp6, xlp7); return ComputeLerp8x8<RESOLUTION>(tl8x8, tr8x8, bl8x8, br8x8, xs_lerpsx, ys_lerpsx); } // Expand address at compile time to improve performance template <int RESOLUTION, int ID0, int CH0, int ID1, int CH1, int ID2, int CH2, int ID3, int CH3, int ID4, int CH4, int ID5, int CH5, int ID6, int CH6, int ID7, int CH7> inline uint8x8_t ComputeLerpx8Tmpl(const quint8* const yl, const quint8* yu, const int64* xl, const int64* xu, const int16* xlp, const int16x8_t ys_lerpsx) { return ComputeLerpx8<RESOLUTION>( yl + xl[ID0] + CH0, yl + xu[ID0] + CH0, yu + xl[ID0] + CH0, yu + xu[ID0] + CH0, xlp + ID0, yl + xl[ID1] + CH1, yl + xu[ID1] + CH1, yu + xl[ID1] + CH1, yu + xu[ID1] + CH1, xlp + ID1, yl + xl[ID2] + CH2, yl + xu[ID2] + CH2, yu + xl[ID2] + CH2, yu + xu[ID2] + CH2, xlp + ID2, yl + xl[ID3] + CH3, yl + xu[ID3] + CH3, yu + xl[ID3] + CH3, yu + xu[ID3] + CH3, xlp + ID3, yl + xl[ID4] + CH4, yl + xu[ID4] + CH4, yu + xl[ID4] + CH4, yu + xu[ID4] + CH4, xlp + ID4, yl + xl[ID5] + CH5, yl + xu[ID5] + CH5, yu + xl[ID5] + CH5, yu + xu[ID5] + CH5, xlp + ID5, yl + xl[ID6] + CH6, yl + xu[ID6] + CH6, yu + xl[ID6] + CH6, yu + xu[ID6] + CH6, xlp + ID6, yl + xl[ID7] + CH7, yl + xu[ID7] + CH7, yu + xl[ID7] + CH7, yu + xu[ID7] + CH7, xlp + ID7, ys_lerpsx); } #endif template <int RESOLUTION, typename T, typename T_SCALE, typename T_CALC> inline void OutputLerpForChannels(const InterpolationCache<T_SCALE>& xs, const int64 x, const T_SCALE ys_ilerp, const int channels, const float min, const float max, const T* ys_input_lower_ptr, const T* ys_input_upper_ptr, T* output_y_ptr) { const int64 xs_lower = xs.lower[x]; const int64 xs_upper = xs.upper[x]; const T_SCALE xs_ilerp = xs.ilerp[x]; for (int c = 0; c < channels; ++c) { const T top_left = ys_input_lower_ptr[xs_lower + c]; const T top_right = ys_input_lower_ptr[xs_upper + c]; const T bottom_left = ys_input_upper_ptr[xs_lower + c]; const T bottom_right = ys_input_upper_ptr[xs_upper + c]; const T val = ComputeLerp<RESOLUTION, T, T_SCALE, T_CALC>( top_left, top_right, bottom_left, bottom_right, xs_ilerp, ys_ilerp); output_y_ptr[x * channels + c] = val; } } template <int RES> inline void OutputLerp8x8x1(const InterpolationCache<int16>& xs, const int64 x_start, const int16 ys_ilerp, const float min, const float max, const quint8* const ys_input_lower_ptr, const quint8* const ys_input_upper_ptr, quint8* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int16x8_t y_lerpsx = vmovq_n_s16(ys_ilerp); const uint8x8_t x0x7 = ComputeLerpx8Tmpl<RES, 0, 0, 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start), x0x7); #else for (int x = x_start; x < x_start + 8; ++x) { OutputLerpForChannels<RES, quint8, int16, int16>( xs, x, ys_ilerp, 1, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RES> inline void OutputLerp8x8x3(const InterpolationCache<int16>& xs, const int64 x_start, const int16 ys_ilerp, const float min, const float max, const quint8* const ys_input_lower_ptr, const quint8* const ys_input_upper_ptr, quint8* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int16x8_t y_lerpsx = vmovq_n_s16(ys_ilerp); const uint8x8_t x0c0x2c1 = ComputeLerpx8Tmpl<RES, 0, 0, 0, 1, 0, 2, 1, 0, 1, 1, 1, 2, 2, 0, 2, 1>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3), x0c0x2c1); const uint8x8_t x2c2x5c0 = ComputeLerpx8Tmpl<RES, 2, 2, 3, 0, 3, 1, 3, 2, 4, 0, 4, 1, 4, 2, 5, 0>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3 + 8), x2c2x5c0); const uint8x8_t x5c1x7c2 = ComputeLerpx8Tmpl<RES, 5, 1, 5, 2, 6, 0, 6, 1, 6, 2, 7, 0, 7, 1, 7, 2>( ys_input_lower_ptr, ys_input_upper_ptr, &xs.lower[x_start], &xs.upper[x_start], &xs.ilerp[x_start], y_lerpsx); vst1_u8(reinterpret_cast<uint8_t*>(output_y_ptr + x_start * 3 + 16), x5c1x7c2); #else for (int x = x_start; x < x_start + 8; ++x) { OutputLerpForChannels<RES, quint8, int16, int16>( xs, x, ys_ilerp, 3, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RESOLUTION> inline void OutputLerp32x4x1(const InterpolationCache<int32>& xs, const int64 x_start, const int32 ys_ilerp, const float min, const float max, const qint32* const ys_input_lower_ptr, const qint32* const ys_input_upper_ptr, qint32* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int64 xs_lower0 = xs.lower[x_start]; const int64 xs_upper0 = xs.upper[x_start]; const int32* const xs_ilerp0 = &xs.ilerp[x_start]; const int64 xs_lower1 = xs.lower[x_start + 1]; const int64 xs_upper1 = xs.upper[x_start + 1]; const int64 xs_lower2 = xs.lower[x_start + 2]; const int64 xs_upper2 = xs.upper[x_start + 2]; const int32* const xs_ilerp2 = &xs.ilerp[x_start + 2]; const int64 xs_lower3 = xs.lower[x_start + 3]; const int64 xs_upper3 = xs.upper[x_start + 3]; const int32x2_t y_lerpsx = vmov_n_s32(ys_ilerp); const int32x2_t x0x1 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower0, ys_input_lower_ptr + xs_upper0, ys_input_upper_ptr + xs_lower0, ys_input_upper_ptr + xs_upper0, ys_input_lower_ptr + xs_lower1, ys_input_lower_ptr + xs_upper1, ys_input_upper_ptr + xs_lower1, ys_input_upper_ptr + xs_upper1, xs_ilerp0, y_lerpsx); const int32x2_t x1x2 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower2, ys_input_lower_ptr + xs_upper2, ys_input_upper_ptr + xs_lower2, ys_input_upper_ptr + xs_upper2, ys_input_lower_ptr + xs_lower3, ys_input_lower_ptr + xs_upper3, ys_input_upper_ptr + xs_lower3, ys_input_upper_ptr + xs_upper3, xs_ilerp2, y_lerpsx); const int32x4_t x0x1x2x3 = vcombine_s32(x0x1, x1x2); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start), x0x1x2x3); #else for (int x = x_start; x < x_start + 4; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64>( xs, x, ys_ilerp, 1, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <int RESOLUTION> inline void OutputLerp32x4x3(const InterpolationCache<int32>& xs, const int64 x_start, const int32 ys_ilerp, const float min, const float max, const qint32* const ys_input_lower_ptr, const qint32* const ys_input_upper_ptr, qint32* output_y_ptr) { #ifdef QUANTIZED_RESIZE_BILINEAR_USE_NEON const int64 xs_lower0 = xs.lower[x_start]; const int64 xs_upper0 = xs.upper[x_start]; const int32* const xs_ilerp0 = &xs.ilerp[x_start]; const int64 xs_lower1 = xs.lower[x_start + 1]; const int64 xs_upper1 = xs.upper[x_start + 1]; const int32* const xs_ilerp1 = &xs.ilerp[x_start + 1]; const int64 xs_lower2 = xs.lower[x_start + 2]; const int64 xs_upper2 = xs.upper[x_start + 2]; const int32* const xs_ilerp2 = &xs.ilerp[x_start + 2]; const int64 xs_lower3 = xs.lower[x_start + 3]; const int64 xs_upper3 = xs.upper[x_start + 3]; const int32* const xs_ilerp3 = &xs.ilerp[x_start + 3]; const int32x2_t y_lerpsx = vmov_n_s32(ys_ilerp); const int32x2_t x0c0x0c1 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower0, ys_input_lower_ptr + xs_upper0, ys_input_upper_ptr + xs_lower0, ys_input_upper_ptr + xs_upper0, ys_input_lower_ptr + xs_lower0 + 1, ys_input_lower_ptr + xs_upper0 + 1, ys_input_upper_ptr + xs_lower0 + 1, ys_input_upper_ptr + xs_upper0 + 1, xs_ilerp0, y_lerpsx); const int32x2_t x0c2x1c0 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower0 + 2, ys_input_lower_ptr + xs_upper0 + 2, ys_input_upper_ptr + xs_lower0 + 2, ys_input_upper_ptr + xs_upper0 + 2, ys_input_lower_ptr + xs_lower1, ys_input_lower_ptr + xs_upper1, ys_input_upper_ptr + xs_lower1, ys_input_upper_ptr + xs_upper1, xs_ilerp0, y_lerpsx); const int32x2_t x1c1x1c2 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower1 + 1, ys_input_lower_ptr + xs_upper1 + 1, ys_input_upper_ptr + xs_lower1 + 1, ys_input_upper_ptr + xs_upper1 + 1, ys_input_lower_ptr + xs_lower1 + 2, ys_input_lower_ptr + xs_upper1 + 2, ys_input_upper_ptr + xs_lower1 + 2, ys_input_upper_ptr + xs_upper1 + 2, xs_ilerp1, y_lerpsx); const int32x2_t x2c0x2c1 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower2, ys_input_lower_ptr + xs_upper2, ys_input_upper_ptr + xs_lower2, ys_input_upper_ptr + xs_upper2, ys_input_lower_ptr + xs_lower2 + 1, ys_input_lower_ptr + xs_upper2 + 1, ys_input_upper_ptr + xs_lower2 + 1, ys_input_upper_ptr + xs_upper2 + 1, xs_ilerp2, y_lerpsx); const int32x2_t x2c2x3c0 = ComputeLerpx2<RESOLUTION, false>( ys_input_lower_ptr + xs_lower2 + 2, ys_input_lower_ptr + xs_upper2 + 2, ys_input_upper_ptr + xs_lower2 + 2, ys_input_upper_ptr + xs_upper2 + 2, ys_input_lower_ptr + xs_lower3, ys_input_lower_ptr + xs_upper3, ys_input_upper_ptr + xs_lower3, ys_input_upper_ptr + xs_upper3, xs_ilerp2, y_lerpsx); const int32x2_t x3c1x3c2 = ComputeLerpx2<RESOLUTION, true>( ys_input_lower_ptr + xs_lower3 + 1, ys_input_lower_ptr + xs_upper3 + 1, ys_input_upper_ptr + xs_lower3 + 1, ys_input_upper_ptr + xs_upper3 + 1, ys_input_lower_ptr + xs_lower3 + 2, ys_input_lower_ptr + xs_upper3 + 2, ys_input_upper_ptr + xs_lower3 + 2, ys_input_upper_ptr + xs_upper3 + 2, xs_ilerp3, y_lerpsx); const int32x4_t x0c0x0c1x0c2x1c0 = vcombine_s32(x0c0x0c1, x0c2x1c0); const int32x4_t x1c1x1c2x2c0x2c1 = vcombine_s32(x1c1x1c2, x2c0x2c1); const int32x4_t x2c2x3c0x3c1x3c2 = vcombine_s32(x2c2x3c0, x3c1x3c2); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3), x0c0x0c1x0c2x1c0); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3 + 4), x1c1x1c2x2c0x2c1); vst1q_s32(reinterpret_cast<int32*>(output_y_ptr + x_start * 3 + 8), x2c2x3c0x3c1x3c2); #else for (int x = x_start; x < x_start + 4; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64>( xs, x, ys_ilerp, 3, min, max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } #endif } template <typename T> void ResizeImageReference(typename TTypes<T, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { CHECK_NOTNULL(output); const InterpolationCache<float> xs = BuildLerpCache<float>( out_width, in_width, width_scale, channels, 0, half_pixel_centers); const InterpolationCache<float> ys = BuildLerpCache<float>( out_height, in_height, height_scale, 1, 0, half_pixel_centers); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int64 out_row_size = out_width * channels; const T* input_b_ptr = images.data(); T* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { const T* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const T* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const float ys_lerp = ys.lerp[y]; for (int64 x = 0; x < out_width; ++x) { const int64 xs_lower = xs.lower[x]; const int64 xs_upper = xs.upper[x]; const float xs_lerp = xs.lerp[x]; for (int c = 0; c < channels; ++c) { const T top_left = ys_input_lower_ptr[xs_lower + c]; const T top_right = ys_input_lower_ptr[xs_upper + c]; const T bottom_left = ys_input_upper_ptr[xs_lower + c]; const T bottom_right = ys_input_upper_ptr[xs_upper + c]; const T val = ComputeLerpReference<T>( top_left, top_right, bottom_left, bottom_right, xs_lerp, ys_lerp, in_min, in_max); output_y_ptr[x * channels + c] = val; } } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <typename T> void ResizeImage(typename TTypes<T, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { ResizeImageReference<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } template <> void ResizeImage<qint32>(typename TTypes<qint32, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<qint32, 4>::Tensor* output) { // 30 is maximum resolution for signed int. constexpr int RESOLUTION = 30; constexpr int SIMD_STEP = 4; CHECK_NOTNULL(output); const InterpolationCache<int32> xs = BuildLerpCache<int32>(out_width, in_width, width_scale, channels, RESOLUTION, half_pixel_centers); const InterpolationCache<int32> ys = BuildLerpCache<int32>( out_height, in_height, height_scale, 1, RESOLUTION, half_pixel_centers); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int64 out_row_size = out_width * channels; const qint32* input_b_ptr = images.data(); qint32* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { const qint32* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const qint32* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const int32 ys_ilerp = ys.ilerp[y]; // Optimized for channels == 1 or channels == 3 as this // is typical channels. int64 x = 0; if (channels == 1) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp32x4x1<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } else if (channels == 3) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp32x4x3<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } for (; x < out_width; ++x) { OutputLerpForChannels<RESOLUTION, qint32, int32, int64>( xs, x, ys_ilerp, channels, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <> void ResizeImage<quint8>(typename TTypes<quint8, 4>::ConstTensor images, const int batch_size, const int64 in_height, const int64 in_width, const int64 out_height, const int64 out_width, const int channels, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<quint8, 4>::Tensor* output) { // 7 is maximum resolution for unsigned byte. constexpr int RESOLUTION = 7; constexpr int SIMD_STEP = 8; CHECK_NOTNULL(output); const InterpolationCache<int16> xs = BuildLerpCache<int16>(out_width, in_width, width_scale, channels, RESOLUTION, half_pixel_centers); const InterpolationCache<int16> ys = BuildLerpCache<int16>( out_height, in_height, height_scale, 1, RESOLUTION, half_pixel_centers); const int64 in_row_size = in_width * channels; const int64 in_batch_num_values = in_height * in_row_size; const int64 out_row_size = out_width * channels; const quint8* input_b_ptr = images.data(); quint8* output_y_ptr = output->data(); for (int b = 0; b < batch_size; ++b) { for (int64 y = 0; y < out_height; ++y) { const quint8* ys_input_lower_ptr = input_b_ptr + ys.lower[y] * in_row_size; const quint8* ys_input_upper_ptr = input_b_ptr + ys.upper[y] * in_row_size; const int32 ys_ilerp = ys.ilerp[y]; // Optimized for channels == 1 or channels == 3 as this // is typical channels. // TODO(satok): Support more generic NEON optimized implementation // for different channels. int64 x = 0; if (channels == 1) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp8x8x1<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } else if (channels == 3) { for (; x < out_width - SIMD_STEP + 1; x += SIMD_STEP) { OutputLerp8x8x3<RESOLUTION>(xs, x, ys_ilerp, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } } for (; x < out_width; ++x) { OutputLerpForChannels<RESOLUTION, quint8, int16, int16>( xs, x, ys_ilerp, channels, in_min, in_max, ys_input_lower_ptr, ys_input_upper_ptr, output_y_ptr); } output_y_ptr += out_row_size; } input_b_ptr += in_batch_num_values; } } template <typename T> void ResizeBilinear(const typename TTypes<T, 4>::ConstTensor& images, const float height_scale, const float width_scale, const float in_min, const float in_max, const bool half_pixel_centers, typename TTypes<T, 4>::Tensor* output) { CHECK_NOTNULL(output); const int batch_size = images.dimension(0); const int64 in_height = images.dimension(1); const int64 in_width = images.dimension(2); const int channels = images.dimension(3); const int64 out_height = output->dimension(1); const int64 out_width = output->dimension(2); // Handle no-op resizes efficiently. if (out_height == in_height && out_width == in_width) { *output = images.template cast<T>(); return; } if (USE_REFERENCE) { ResizeImageReference<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } else { ResizeImage<T>(images, batch_size, in_height, in_width, out_height, out_width, channels, height_scale, width_scale, in_min, in_max, half_pixel_centers, output); } } } // namespace template <class T> class QuantizedResizeBilinearOp : public OpKernel { public: explicit QuantizedResizeBilinearOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_)); OP_REQUIRES_OK( context, context->GetAttr("half_pixel_centers", &half_pixel_centers_)); } void Compute(OpKernelContext* context) override { const auto& in_min_tensor = context->input(2); OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_min_tensor.shape()), errors::InvalidArgument("min must be a scalar")); const float in_min = in_min_tensor.flat<float>()(0); const auto& in_max_tensor = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsScalar(in_max_tensor.shape()), errors::InvalidArgument("max must be a scalar")); const float in_max = in_max_tensor.flat<float>()(0); ImageResizerState st(align_corners_, false); st.ValidateAndCreateOutput(context); if (!context->status().ok()) return; // Return if the output is empty. if (st.output->NumElements() == 0) return; typename TTypes<T, 4>::ConstTensor image_data( context->input(0).tensor<T, 4>()); typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>()); ResizeBilinear<T>(image_data, st.height_scale, st.width_scale, in_min, in_max, half_pixel_centers_, &output_data); Tensor* out_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &out_min)); out_min->flat<float>()(0) = in_min; Tensor* out_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &out_max)); out_max->flat<float>()(0) = in_max; } private: bool align_corners_; bool half_pixel_centers_; TF_DISALLOW_COPY_AND_ASSIGN(QuantizedResizeBilinearOp<T>); }; #define REGISTER_CPU_KERNEL(type) \ REGISTER_KERNEL_BUILDER(Name("QuantizedResizeBilinear") \ .Device(DEVICE_CPU) \ .HostMemory("size") \ .TypeConstraint<type>("T"), \ QuantizedResizeBilinearOp<type>) REGISTER_CPU_KERNEL(::tensorflow::quint8); REGISTER_CPU_KERNEL(::tensorflow::qint32); REGISTER_CPU_KERNEL(float); } // namespace tensorflow
null
252
CWE-787
CVE-2021-29540
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/nn_ops.cc. #define USE_EIGEN_TENSOR #define EIGEN_USE_THREADS #include <algorithm> #include <vector> #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/kernels/conv_2d.h" #include "tensorflow/core/kernels/conv_grad_ops.h" #include "tensorflow/core/kernels/conv_grad_shape_utils.h" #include "tensorflow/core/kernels/fill_functor.h" #ifdef TENSORFLOW_USE_LIBXSMM_CONVOLUTIONS #include "tensorflow/core/kernels/xsmm_conv2d.h" #endif #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #include "tensorflow/core/util/use_cudnn.h" #include "tensorflow/core/util/work_sharder.h" #if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL) #include "tensorflow/core/kernels/eigen_contraction_kernel.h" #endif #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/kernels/conv_ops_gpu.h" #include "tensorflow/core/platform/stream_executor.h" #include "tensorflow/core/protobuf/autotuning.pb.h" #include "tensorflow/core/util/proto/proto_utils.h" #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #if GOOGLE_CUDA #include "tensorflow/stream_executor/gpu/gpu_asm_opts.h" #include "tensorflow/stream_executor/gpu/redzone_allocator.h" #include "tensorflow/stream_executor/tf_allocator_adapter.h" #endif // GOOGLE_CUDA namespace { // Returns in 'col_data', image patches in storage order (height, width, depth) // extracted from image at 'input_data', which is required to be in storage // order (batch, height, width, depth). // Implementation written by Yangqing Jia (jiayq). template <typename T> void Im2col(const T* input_data, const int depth, const int height, const int width, const int filter_h, const int filter_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, T* col_data) { int height_col = (height + pad_t + pad_b - filter_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - filter_w) / stride_w + 1; int h_pad = -pad_t; for (int h = 0; h < height_col; ++h) { int w_pad = -pad_l; for (int w = 0; w < width_col; ++w) { for (int ih = h_pad; ih < h_pad + filter_h; ++ih) { for (int iw = w_pad; iw < w_pad + filter_w; ++iw) { if (ih >= 0 && ih < height && iw >= 0 && iw < width) { memcpy(col_data, input_data + (ih * width + iw) * depth, sizeof(T) * depth); } else { // This should be simply padded with zero. memset(col_data, 0, sizeof(T) * depth); } col_data += depth; } } w_pad += stride_w; } h_pad += stride_h; } } } // namespace namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; template <typename T> struct LaunchConv2DBackpropFilterOp<CPUDevice, T> { void operator()(OpKernelContext* ctx, bool use_cudnn, bool cudnn_use_autotune, const Tensor& out_backprop, const Tensor& input, int row_dilation, int col_dilation, int row_stride, int col_stride, const Padding& padding, const std::vector<int64>& explicit_paddings, Tensor* filter_backprop, TensorFormat data_format) { std::vector<int32> dilations(4, 1); dilations[GetTensorDimIndex(data_format, 'H')] = row_dilation; dilations[GetTensorDimIndex(data_format, 'W')] = col_dilation; std::vector<int32> strides(4, 1); strides[GetTensorDimIndex(data_format, 'H')] = row_stride; strides[GetTensorDimIndex(data_format, 'W')] = col_stride; TensorShape filter_shape = filter_backprop->shape(); ConvBackpropDimensions dims; OP_REQUIRES_OK( ctx, ConvBackpropComputeDimensionsV2( "Conv2DBackpropFilter", /*num_spatial_dims=*/2, input.shape(), filter_shape, out_backprop.shape(), dilations, strides, padding, explicit_paddings, data_format, &dims)); int64 padding_top = -1, padding_bottom = -1; int64 padding_left = -1, padding_right = -1; if (padding == EXPLICIT) { GetExplicitPaddingForDim(explicit_paddings, data_format, 'H', &padding_top, &padding_bottom); GetExplicitPaddingForDim(explicit_paddings, data_format, 'W', &padding_left, &padding_right); } int64 expected_out_rows, expected_out_cols; // The function is guaranteed to succeed because we checked the output and // padding was valid earlier. TF_CHECK_OK(GetWindowedOutputSizeVerboseV2( dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, row_dilation, row_stride, padding, &expected_out_rows, &padding_top, &padding_bottom)); DCHECK_EQ(dims.spatial_dims[0].output_size, expected_out_rows); TF_CHECK_OK(GetWindowedOutputSizeVerboseV2( dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, col_dilation, col_stride, padding, &expected_out_cols, &padding_left, &padding_right)); DCHECK_EQ(dims.spatial_dims[1].output_size, expected_out_cols); const CPUDevice& d = ctx->eigen_device<CPUDevice>(); // WARNING: Need to swap row/col, padding_top/padding_left, and // padding_bottom/padding_right when calling Eigen. Eigen expects tensors // in NWHC format, but Tensorflow uses NHWC. auto filter_backprop_t = filter_backprop->tensor<T, 4>(); auto input_t = input.tensor<T, 4>(); auto out_backprop_t = out_backprop.tensor<T, 4>(); if (padding != EXPLICIT) { // If padding was not explicitly defined, Eigen spatial convolution // backward filter will infer correct forward paddings from input tensors. filter_backprop_t.device(d) = Eigen::SpatialConvolutionBackwardKernel( input_t, out_backprop_t, filter_backprop_t.dimension(1), filter_backprop_t.dimension(0), col_stride, row_stride, col_dilation, row_dilation); } else { // Otherwise we have to explicitly pad the input, before passing it to // spatial convolution backward filter. Eigen::array<std::pair<int, int>, 4> paddings; paddings[0] = {0, 0}; paddings[1] = {padding_top, padding_bottom}; paddings[2] = {padding_left, padding_right}; paddings[3] = {0, 0}; auto padded_t = input_t.pad(paddings, T(0)); // TODO(ezhulenev): Pass explicit paddings to Eigen spatial backward // convolution and do not rely on tensor padding expression. filter_backprop_t.device(d) = Eigen::SpatialConvolutionBackwardKernel( padded_t, out_backprop_t, filter_backprop_t.dimension(1), filter_backprop_t.dimension(0), col_stride, row_stride, col_dilation, row_dilation); } } }; #ifdef TENSORFLOW_USE_LIBXSMM_CONVOLUTIONS template <typename Device, class T> struct LaunchXsmmBackwardFilter { bool operator()(OpKernelContext* context, const Device& d, typename TTypes<T, 4>::ConstTensor input_backward, typename TTypes<T, 4>::Tensor kernel, typename TTypes<T, 4>::ConstTensor output_backward, int input_rows, int input_cols, int row_stride, int col_stride, int pad_h, int pad_w, TensorFormat data_format) const { return false; } }; template <> struct LaunchXsmmBackwardFilter<CPUDevice, float> { bool operator()(OpKernelContext* context, const CPUDevice& d, typename TTypes<float, 4>::ConstTensor input, typename TTypes<float, 4>::Tensor filter, typename TTypes<float, 4>::ConstTensor output, int input_rows, int input_cols, int row_stride, int col_stride, int pad_h, int pad_w, TensorFormat data_format) const { auto batch = input.dimension(0); auto in_depth = input.dimension(3); auto out_depth = output.dimension(3); auto filter_rows = filter.dimension(0); auto filter_cols = filter.dimension(1); auto num_threads = context->device()->tensorflow_cpu_worker_threads()->num_threads; // See libxsmm_dnn.h for this struct definition. libxsmm_dnn_conv_desc desc; desc.N = batch; desc.C = in_depth; desc.H = input_rows; desc.W = input_cols; desc.K = out_depth; desc.R = filter_rows; desc.S = filter_cols; desc.u = row_stride; desc.v = col_stride; desc.pad_h = pad_h; desc.pad_w = pad_w; desc.pad_h_in = 0; // pad_rows; // ignored by libxsmm for now. desc.pad_w_in = 0; // pad_cols; // ignored by libxsmm for now. desc.pad_h_out = 0; desc.pad_w_out = 0; desc.threads = num_threads; desc.algo = LIBXSMM_DNN_CONV_ALGO_DIRECT; desc.buffer_format = LIBXSMM_DNN_TENSOR_FORMAT_NHWC; desc.filter_format = LIBXSMM_DNN_TENSOR_FORMAT_RSCK; desc.fuse_ops = LIBXSMM_DNN_CONV_FUSE_NONE; desc.options = LIBXSMM_DNN_CONV_OPTION_NONE; desc.datatype_out = LIBXSMM_DNN_DATATYPE_F32; desc.datatype_in = LIBXSMM_DNN_DATATYPE_F32; if (!CanUseXsmmConv2D(desc, data_format)) { return false; } auto input_ptr = input.data(); auto filter_ptr = filter.data(); auto output_ptr = output.data(); bool success = functor::XsmmBkwFilterConv2D<CPUDevice, float>()( context, desc, input_ptr, filter_ptr, output_ptr); return success; } }; #endif template <typename Device, class T> class Conv2DBackpropFilterOp : public OpKernel { public: explicit Conv2DBackpropFilterOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES_OK(context, context->GetAttr("strides", &strides_)); int stride_n = GetTensorDim(strides_, data_format_, 'N'); int stride_c = GetTensorDim(strides_, data_format_, 'C'); int stride_h = GetTensorDim(strides_, data_format_, 'H'); int stride_w = GetTensorDim(strides_, data_format_, 'W'); OP_REQUIRES( context, (stride_n == 1 && stride_c == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES(context, stride_h > 0 && stride_w > 0, errors::InvalidArgument( "Row and column strides should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilations_)); OP_REQUIRES(context, dilations_.size() == 4, errors::InvalidArgument("Sliding window dilations field must " "specify 4 dimensions")); int dilation_n = GetTensorDim(dilations_, data_format_, 'N'); int dilation_c = GetTensorDim(dilations_, data_format_, 'C'); int dilation_h = GetTensorDim(dilations_, data_format_, 'H'); int dilation_w = GetTensorDim(dilations_, data_format_, 'W'); OP_REQUIRES(context, dilation_n == 1 && dilation_c == 1, errors::InvalidArgument( "Current implementation does not yet support " "dilations in the batch and depth dimensions.")); OP_REQUIRES( context, dilation_h > 0 && dilation_w > 0, errors::InvalidArgument("Dilated rates should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES_OK(context, context->GetAttr("explicit_paddings", &explicit_paddings_)); OP_REQUIRES_OK(context, CheckValidPadding(padding_, explicit_paddings_, /*num_dims=*/4, data_format_)); OP_REQUIRES_OK(context, context->GetAttr("use_cudnn_on_gpu", &use_cudnn_)); cudnn_use_autotune_ = CudnnUseAutotune(); if (std::is_same<Device, CPUDevice>::value) { OP_REQUIRES(context, data_format_ == FORMAT_NHWC, errors::InvalidArgument("Conv2DBackpropFilterOp [CPU] " "only supports NHWC data format.")); // TODO(yangzihao): Add a CPU implementation for dilated convolution. OP_REQUIRES( context, (dilation_h == 1 && dilation_w == 1), errors::InvalidArgument("Conv2DBackpropFilterOp [CPU] not yet " "support dilation rates larger than 1.")); } } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& filter_sizes = context->input(1); const Tensor& out_backprop = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsVector(filter_sizes.shape()), errors::InvalidArgument( "Conv2DBackpropFilter: filter_sizes input must be 1-dim, not ", filter_sizes.dims())); TensorShape filter_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( filter_sizes.vec<int32>(), &filter_shape)); Tensor* filter_backprop = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, filter_shape, &filter_backprop)); // If there is nothing to compute, return. if (filter_shape.num_elements() == 0) { return; } // If input is empty, set gradients to zero. if (input.shape().num_elements() == 0) { functor::SetZeroFunctor<Device, T> f; f(context->eigen_device<Device>(), filter_backprop->flat<T>()); return; } // For now we take the stride from the second and third dimensions only (we // do not support striding on the batch or depth dimension). const int stride_rows = GetTensorDim(strides_, data_format_, 'H'); const int stride_cols = GetTensorDim(strides_, data_format_, 'W'); const int dilation_rows = GetTensorDim(dilations_, data_format_, 'H'); const int dilation_cols = GetTensorDim(dilations_, data_format_, 'W'); VLOG(2) << "Conv2DBackpropFilter:" << " input: " << input.shape().DebugString() << " filter:" << filter_shape.DebugString() << " out_backprop: " << out_backprop.shape().DebugString() << " strides: [" << stride_rows << ", " << stride_cols << "]" << " dilations: [" << dilation_rows << ", " << dilation_cols << "]"; launcher_(context, use_cudnn_, cudnn_use_autotune_, out_backprop, input, dilation_rows, dilation_cols, stride_rows, stride_cols, padding_, explicit_paddings_, filter_backprop, data_format_); } private: std::vector<int32> dilations_; std::vector<int32> strides_; Padding padding_; std::vector<int64> explicit_paddings_; bool use_cudnn_; TensorFormat data_format_; LaunchConv2DBackpropFilterOp<Device, T> launcher_; bool cudnn_use_autotune_; TF_DISALLOW_COPY_AND_ASSIGN(Conv2DBackpropFilterOp); }; // Based on implementation written by Yangqing Jia (jiayq). template <typename Device, class T> class Conv2DCustomBackpropFilterOp : public OpKernel { public: explicit Conv2DCustomBackpropFilterOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES(context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Conv2DCustomBackpropFilterOp only supports NHWC.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &strides_)); OP_REQUIRES(context, strides_.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); OP_REQUIRES( context, (strides_[0] == 1 && strides_[3] == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES(context, strides_[1] > 0 && strides_[2] > 0, errors::InvalidArgument( "Row and column strides should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES_OK(context, context->GetAttr("explicit_paddings", &explicit_paddings_)); OP_REQUIRES_OK(context, CheckValidPadding(padding_, explicit_paddings_, /*num_dims=*/4, data_format_)); OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilations_)); OP_REQUIRES(context, dilations_.size() == 4, errors::InvalidArgument("Sliding window dilations field must " "specify 4 dimensions")); OP_REQUIRES(context, (dilations_[0] == 1 && dilations_[3] == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilations in the batch and depth dimensions.")); if (std::is_same<Device, CPUDevice>::value || std::is_same<Device, GPUDevice>::value) { // TODO(yangzihao): Add a CPU implementation for dilated convolution. OP_REQUIRES(context, (dilations_[1] == 1 && dilations_[2] == 1), errors::InvalidArgument( "Current libxsmm and customized CPU implementations do " "not yet support dilation rates larger than 1.")); dilations_ = {1, 1, 1, 1}; } } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& filter_sizes = context->input(1); const Tensor& out_backprop = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsVector(filter_sizes.shape()), errors::InvalidArgument( "Conv2DCustomBackpropFilter: filter_sizes input must be 1-dim, " "not ", filter_sizes.dims())); TensorShape filter_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( filter_sizes.vec<int32>(), &filter_shape)); ConvBackpropDimensions dims; OP_REQUIRES_OK( context, ConvBackpropComputeDimensionsV2( "Conv2DCustomBackpropFilter", /*num_spatial_dims=*/2, input.shape(), filter_shape, out_backprop.shape(), dilations_, strides_, padding_, explicit_paddings_, data_format_, &dims)); Tensor* filter_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, filter_shape, &filter_backprop)); // If there is nothing to compute, return. if (filter_shape.num_elements() == 0) { return; } int64 pad_top, pad_bottom; int64 pad_left, pad_right; if (padding_ == Padding::EXPLICIT) { pad_top = explicit_paddings_[2]; pad_bottom = explicit_paddings_[3]; pad_left = explicit_paddings_[4]; pad_right = explicit_paddings_[5]; } OP_REQUIRES_OK( context, GetWindowedOutputSizeVerbose( dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, dims.spatial_dims[0].stride, padding_, &dims.spatial_dims[0].output_size, &pad_top, &pad_bottom)); OP_REQUIRES_OK( context, GetWindowedOutputSizeVerbose( dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, dims.spatial_dims[1].stride, padding_, &dims.spatial_dims[1].output_size, &pad_left, &pad_right)); #if defined TENSORFLOW_USE_LIBXSMM_CONVOLUTIONS && \ defined TENSORFLOW_USE_LIBXSMM_BACKWARD_CONVOLUTIONS if (pad_left == pad_right && pad_top == pad_bottom) { if (LaunchXsmmBackwardFilter<Device, T>()( context, context->eigen_device<Device>(), input.tensor<T, 4>(), filter_backprop->tensor<T, 4>(), out_backprop.tensor<T, 4>(), dims.spatial_dims[0].input_size, dims.spatial_dims[1].input_size, static_cast<int>(dims.spatial_dims[0].stride), static_cast<int>(dims.spatial_dims[1].stride), static_cast<int>(pad_top), static_cast<int>(pad_left), data_format_)) { return; } } #endif // The total dimension size of each kernel. const int filter_total_size = dims.spatial_dims[0].filter_size * dims.spatial_dims[1].filter_size * dims.in_depth; // The output image size is the spatial size of the output. const int output_image_size = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size; // Shard 'batch' images into 'shard_size' groups of images to be fed // into the parallel matmul. Calculate 'shard_size' by dividing the L3 cache // size ('target_working_set_size') by the matmul size of an individual // image ('work_unit_size'). // TODO(andydavis) // *) Get L3 cache size from device at runtime (30MB is from ivybridge). // *) Consider reducing 'target_working_set_size' if L3 is shared by // other concurrently running tensorflow ops. const size_t target_working_set_size = (30LL << 20) / sizeof(T); const size_t size_A = output_image_size * filter_total_size; const size_t size_B = output_image_size * dims.out_depth; const size_t size_C = filter_total_size * dims.out_depth; const size_t work_unit_size = size_A + size_B + size_C; const size_t shard_size = (target_working_set_size + work_unit_size - 1) / work_unit_size; Tensor col_buffer; OP_REQUIRES_OK(context, context->allocate_temp( DataTypeToEnum<T>::value, TensorShape({static_cast<int64>(shard_size), static_cast<int64>(output_image_size), static_cast<int64>(filter_total_size)}), &col_buffer)); // The input offset corresponding to a single input image. const int input_offset = dims.spatial_dims[0].input_size * dims.spatial_dims[1].input_size * dims.in_depth; // The output offset corresponding to a single output image. const int output_offset = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size * dims.out_depth; const T* input_data = input.template flat<T>().data(); T* col_buffer_data = col_buffer.template flat<T>().data(); const T* out_backprop_data = out_backprop.template flat<T>().data(); T* filter_backprop_data = filter_backprop->template flat<T>().data(); typedef Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor>, Eigen::Unaligned> TensorMap; typedef Eigen::TensorMap<Eigen::Tensor<const T, 2, Eigen::RowMajor>, Eigen::Unaligned> ConstTensorMap; TensorMap C(filter_backprop_data, filter_total_size, dims.out_depth); C.setZero(); // Initialize contraction dims (we need to transpose 'A' below). Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> contract_dims; contract_dims[0].first = 0; contract_dims[0].second = 0; auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); for (int image_id = 0; image_id < dims.batch_size; image_id += shard_size) { const int shard_limit = std::min(static_cast<int>(shard_size), static_cast<int>(dims.batch_size) - image_id); auto shard = [&input_data, &col_buffer_data, &dims, &pad_top, &pad_left, &pad_bottom, &pad_right, &input_offset, &size_A](int64 start, int64 limit) { for (int shard_id = start; shard_id < limit; ++shard_id) { const T* input_data_shard = input_data + shard_id * input_offset; T* col_data_shard = col_buffer_data + shard_id * size_A; // When we compute the gradient with respect to the filters, we need // to do im2col to allow gemm-type computation. Im2col<T>( input_data_shard, dims.in_depth, dims.spatial_dims[0].input_size, dims.spatial_dims[1].input_size, dims.spatial_dims[0].filter_size, dims.spatial_dims[1].filter_size, pad_top, pad_left, pad_bottom, pad_right, dims.spatial_dims[0].stride, dims.spatial_dims[1].stride, col_data_shard); } }; Shard(worker_threads.num_threads, worker_threads.workers, shard_limit, size_A, shard); ConstTensorMap A(col_buffer_data, output_image_size * shard_limit, filter_total_size); ConstTensorMap B(out_backprop_data, output_image_size * shard_limit, dims.out_depth); // Gradient with respect to filter. C.device(context->eigen_cpu_device()) += A.contract(B, contract_dims); input_data += input_offset * shard_limit; out_backprop_data += output_offset * shard_limit; } } private: std::vector<int32> dilations_; std::vector<int32> strides_; Padding padding_; std::vector<int64> explicit_paddings_; TensorFormat data_format_; TF_DISALLOW_COPY_AND_ASSIGN(Conv2DCustomBackpropFilterOp); }; #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNEL_BUILDER( \ Name("Conv2DBackpropFilter").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ Conv2DCustomBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv2DBackpropFilter") \ .Device(DEVICE_CPU) \ .Label("custom") \ .TypeConstraint<T>("T") \ .AttrConstraint("data_format", "NHWC"), \ Conv2DCustomBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv2DBackpropFilter") \ .Device(DEVICE_CPU) \ .Label("eigen_tensor") \ .TypeConstraint<T>("T") \ .AttrConstraint("data_format", "NHWC"), \ Conv2DBackpropFilterOp<CPUDevice, T>); TF_CALL_half(REGISTER_CPU_KERNELS); TF_CALL_float(REGISTER_CPU_KERNELS); TF_CALL_double(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS // To be used inside depthwise_conv_grad_op.cc. template struct LaunchConv2DBackpropFilterOp<CPUDevice, Eigen::half>; template struct LaunchConv2DBackpropFilterOp<CPUDevice, float>; template struct LaunchConv2DBackpropFilterOp<CPUDevice, double>; // GPU definitions. #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM // The slow version (but compiles for GPU) // A dummy type to group forward backward filter autotune results together. struct ConvBackwardFilterAutoTuneGroup { static string name() { return "ConvBwdFilter"; } }; typedef AutoTuneSingleton<ConvBackwardFilterAutoTuneGroup, ConvParameters, se::dnn::AlgorithmConfig> AutoTuneConvBwdFilter; template <typename T> void LaunchConv2DBackpropFilterOp<Eigen::GpuDevice, T>::operator()( OpKernelContext* ctx, bool use_cudnn, bool cudnn_use_autotune, const Tensor& out_backprop, const Tensor& input, int row_dilation, int col_dilation, int row_stride, int col_stride, const Padding& padding, const std::vector<int64>& explicit_paddings, Tensor* filter_backprop, TensorFormat data_format) { using se::dnn::AlgorithmConfig; using se::dnn::AlgorithmDesc; using se::dnn::ProfileResult; std::vector<int32> dilations(4, 1); dilations[GetTensorDimIndex(data_format, 'H')] = row_dilation; dilations[GetTensorDimIndex(data_format, 'W')] = col_dilation; std::vector<int32> strides(4, 1); strides[GetTensorDimIndex(data_format, 'H')] = row_stride; strides[GetTensorDimIndex(data_format, 'W')] = col_stride; TensorShape filter_shape = filter_backprop->shape(); ConvBackpropDimensions dims; OP_REQUIRES_OK( ctx, ConvBackpropComputeDimensionsV2( "Conv2DBackpropFilter", /*num_spatial_dims=*/2, input.shape(), filter_shape, out_backprop.shape(), dilations, strides, padding, explicit_paddings, data_format, &dims)); int64 padding_top = -1, padding_bottom = -1; int64 padding_left = -1, padding_right = -1; if (padding == EXPLICIT) { GetExplicitPaddingForDim(explicit_paddings, data_format, 'H', &padding_top, &padding_bottom); GetExplicitPaddingForDim(explicit_paddings, data_format, 'W', &padding_left, &padding_right); } int64 expected_out_rows, expected_out_cols; // The function is guaranteed to succeed because we checked the output and // padding was valid earlier. TF_CHECK_OK(GetWindowedOutputSizeVerboseV2( dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, row_dilation, row_stride, padding, &expected_out_rows, &padding_top, &padding_bottom)); DCHECK_EQ(dims.spatial_dims[0].output_size, expected_out_rows); TF_CHECK_OK(GetWindowedOutputSizeVerboseV2( dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, col_dilation, col_stride, padding, &expected_out_cols, &padding_left, &padding_right)); DCHECK_EQ(dims.spatial_dims[1].output_size, expected_out_cols); auto* stream = ctx->op_device_context()->stream(); OP_REQUIRES(ctx, stream, errors::Internal("No GPU stream available.")); if (!use_cudnn) { ctx->SetStatus(errors::Unimplemented( "Conv2DBackprop for GPU is not currently supported " "without cudnn")); return; } // If the filter in-depth (filter_shape.dim_size(2)) is 1 and smaller than the // input depth, it's a depthwise convolution. More generally, if the filter // in-depth divides but is smaller than the input depth, it is a grouped // convolution. bool is_grouped_convolution = filter_shape.dim_size(2) != dims.in_depth; bool cudnn_disable_conv_1x1_optimization_ = CudnnDisableConv1x1Optimization(); if (!cudnn_disable_conv_1x1_optimization_ && dims.spatial_dims[0].filter_size == 1 && dims.spatial_dims[1].filter_size == 1 && !is_grouped_convolution && dims.spatial_dims[0].stride == 1 && dims.spatial_dims[1].stride == 1 && data_format == FORMAT_NHWC && (padding == VALID || padding == SAME)) { const uint64 m = dims.in_depth; const uint64 k = dims.batch_size * dims.spatial_dims[0].input_size * dims.spatial_dims[1].input_size; const uint64 n = dims.out_depth; // The shape of output backprop is // [batch, out_rows, out_cols, out_depth] // From cublas's perspective, it is: n x k auto a_ptr = AsDeviceMemory(out_backprop.template flat<T>().data(), out_backprop.template flat<T>().size()); // The shape of input is // [batch, in_rows, in_cols, in_depth], // From cublas's perspective, it is: m x k auto b_ptr = AsDeviceMemory(input.template flat<T>().data(), input.template flat<T>().size()); // the shape of the filter backprop from the conv_2d should be // [1, 1, in_depth, out_depth] // From cublas's perspective, it is: n x m auto c_ptr = AsDeviceMemory(filter_backprop->template flat<T>().data(), filter_backprop->template flat<T>().size()); bool blas_launch_status = stream ->ThenBlasGemm(se::blas::Transpose::kNoTranspose, se::blas::Transpose::kTranspose, n, m, k, 1.0f, a_ptr, n, b_ptr, m, 0.0f, &c_ptr, n) .ok(); if (!blas_launch_status) { ctx->SetStatus(errors::Internal("Blas SGEMM launch failed : m=", m, ", n=", n, ", k=", k)); } return; } else if (dims.spatial_dims[0].filter_size == dims.spatial_dims[0].input_size && dims.spatial_dims[1].filter_size == dims.spatial_dims[1].input_size && !is_grouped_convolution && padding == VALID && data_format == FORMAT_NHWC) { // The input data and filter have the same height/width, and we are not // using grouped convolution, so call cublas directly. const uint64 m = dims.spatial_dims[0].input_size * dims.spatial_dims[1].input_size * dims.in_depth; const uint64 k = dims.batch_size; const uint64 n = dims.out_depth; auto a_ptr = AsDeviceMemory(input.template flat<T>().data(), input.template flat<T>().size()); auto b_ptr = AsDeviceMemory(out_backprop.template flat<T>().data(), out_backprop.template flat<T>().size()); auto c_ptr = AsDeviceMemory(filter_backprop->template flat<T>().data(), filter_backprop->template flat<T>().size()); bool blas_launch_status = stream ->ThenBlasGemm(se::blas::Transpose::kNoTranspose, se::blas::Transpose::kTranspose, n, m, k, 1.0f, b_ptr, n, a_ptr, m, 0.0f, &c_ptr, n) .ok(); if (!blas_launch_status) { ctx->SetStatus(errors::Internal("Blas SGEMM launch failed : m=", m, ", n=", n, ", k=", k)); } return; } const int64 common_padding_rows = std::min(padding_top, padding_bottom); const int64 common_padding_cols = std::min(padding_left, padding_right); Tensor compatible_input; if (padding_top != padding_bottom || padding_left != padding_right) { // Pad the input in the same way we did during the forward pass, so that // cuDNN or MIOpen receives the same input during the backward pass function // as it did during the forward pass function. const int64 padding_rows_diff = std::abs(padding_bottom - padding_top); const int64 padding_cols_diff = std::abs(padding_right - padding_left); const int64 new_in_rows = dims.spatial_dims[0].input_size + padding_rows_diff; const int64 new_in_cols = dims.spatial_dims[1].input_size + padding_cols_diff; const int64 input_pad_top = padding_top - common_padding_rows; const int64 input_pad_bottom = padding_bottom - common_padding_rows; const int64 input_pad_left = padding_left - common_padding_cols; const int64 input_pad_right = padding_right - common_padding_cols; OP_REQUIRES_OK( ctx, ctx->allocate_temp( DataTypeToEnum<T>::value, ShapeFromFormat(data_format, dims.batch_size, new_in_rows, new_in_cols, dims.in_depth), &compatible_input)); functor::PadInput<GPUDevice, T, int, 4>()( ctx->template eigen_device<GPUDevice>(), To32Bit(input.tensor<T, 4>()), {{static_cast<int>(input_pad_top), static_cast<int>(input_pad_left)}}, {{static_cast<int>(input_pad_bottom), static_cast<int>(input_pad_right)}}, To32Bit(compatible_input.tensor<T, 4>()), data_format, T{}); } else { compatible_input = input; } CHECK(common_padding_rows >= 0 && common_padding_cols >= 0) // Crash OK << "Negative row or col paddings: (" << common_padding_rows << ", " << common_padding_cols << ")"; // The Tensor Core in NVIDIA Volta+ GPUs supports efficient convolution with // fp16 in NHWC data layout. In all other configurations it's more efficient // to run computation in NCHW data format. const bool compute_in_nhwc = DataTypeToEnum<T>::value == DT_HALF && IsVoltaOrLater(*stream->parent()); // We only do one directional conversion: NHWC->NCHW. We never convert in the // other direction. Grappler layout optimizer selects the preferred layout and // adds necessary annotations to the graph. const TensorFormat compute_data_format = (compute_in_nhwc && data_format == FORMAT_NHWC) ? FORMAT_NHWC : FORMAT_NCHW; VLOG(3) << "Compute Conv2DBackpropFilter with cuDNN:" << " data_format=" << ToString(data_format) << " compute_data_format=" << ToString(compute_data_format); constexpr auto kComputeInNHWC = std::make_tuple(se::dnn::DataLayout::kBatchYXDepth, se::dnn::FilterLayout::kOutputYXInput); constexpr auto kComputeInNCHW = std::make_tuple(se::dnn::DataLayout::kBatchDepthYX, se::dnn::FilterLayout::kOutputInputYX); se::dnn::DataLayout compute_data_layout; se::dnn::FilterLayout filter_layout; std::tie(compute_data_layout, filter_layout) = compute_data_format == FORMAT_NHWC ? kComputeInNHWC : kComputeInNCHW; se::dnn::BatchDescriptor input_desc; input_desc.set_count(dims.batch_size) .set_height(GetTensorDim(compatible_input, data_format, 'H')) .set_width(GetTensorDim(compatible_input, data_format, 'W')) .set_feature_map_count(dims.in_depth) .set_layout(compute_data_layout); se::dnn::BatchDescriptor output_desc; output_desc.set_count(dims.batch_size) .set_height(dims.spatial_dims[0].output_size) .set_width(dims.spatial_dims[1].output_size) .set_feature_map_count(dims.out_depth) .set_layout(compute_data_layout); se::dnn::FilterDescriptor filter_desc; filter_desc.set_input_filter_height(dims.spatial_dims[0].filter_size) .set_input_filter_width(dims.spatial_dims[1].filter_size) .set_input_feature_map_count(filter_shape.dim_size(2)) .set_output_feature_map_count(filter_shape.dim_size(3)) .set_layout(filter_layout); se::dnn::ConvolutionDescriptor conv_desc; conv_desc.set_vertical_dilation_rate(dims.spatial_dims[0].dilation) .set_horizontal_dilation_rate(dims.spatial_dims[1].dilation) .set_vertical_filter_stride(dims.spatial_dims[0].stride) .set_horizontal_filter_stride(dims.spatial_dims[1].stride) .set_zero_padding_height(common_padding_rows) .set_zero_padding_width(common_padding_cols) .set_group_count(dims.in_depth / filter_shape.dim_size(2)); // Tensorflow filter format: HWIO // cuDNN filter formats: (data format) -> (filter format) // (1) NCHW -> OIHW // (2) NHWC -> OHWI // // We compute filter backprop into temporary tensor, and then convert it to // the HWIO data format at the end. Tensor pre_transformed_filter_backprop; OP_REQUIRES_OK( ctx, ctx->allocate_temp( DataTypeToEnum<T>::value, TensorShape({filter_shape.dim_size(3), filter_shape.dim_size(2), filter_shape.dim_size(0), filter_shape.dim_size(1)}), &pre_transformed_filter_backprop)); Tensor transformed_out_backprop; if (data_format == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) { VLOG(4) << "Convert the `out_backprop` tensor from NHWC to NCHW."; TensorShape compute_shape = ShapeFromFormat( compute_data_format, dims.batch_size, dims.spatial_dims[0].output_size, dims.spatial_dims[1].output_size, dims.out_depth); if (dims.out_depth > 1) { OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, compute_shape, &transformed_out_backprop)); functor::NHWCToNCHW<GPUDevice, T, 4>()( ctx->eigen_device<GPUDevice>(), out_backprop.tensor<T, 4>(), transformed_out_backprop.tensor<T, 4>()); } else { // If depth <= 1, just reshape. CHECK(transformed_out_backprop.CopyFrom(out_backprop, compute_shape)); } } else { transformed_out_backprop = out_backprop; } Tensor transformed_input; if (data_format == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) { VLOG(4) << "Convert the `input` tensor from NHWC to NCHW."; TensorShape compute_shape = ShapeFromFormat( compute_data_format, GetTensorDim(compatible_input, data_format, 'N'), GetTensorDim(compatible_input, data_format, 'H'), GetTensorDim(compatible_input, data_format, 'W'), GetTensorDim(compatible_input, data_format, 'C')); if (compute_shape.dim_size(1) > 1) { OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, compute_shape, &transformed_input)); functor::NHWCToNCHW<GPUDevice, T, 4>()( ctx->eigen_device<GPUDevice>(), const_cast<const Tensor&>(compatible_input).tensor<T, 4>(), transformed_input.tensor<T, 4>()); } else { // If depth <= 1, just reshape. CHECK(transformed_input.CopyFrom(compatible_input, compute_shape)); } } else { transformed_input = compatible_input; } se::DeviceMemory<T> out_backprop_ptr = AsDeviceMemory(transformed_out_backprop.template flat<T>().data(), transformed_out_backprop.template flat<T>().size()); se::DeviceMemory<T> filter_backprop_ptr = AsDeviceMemory(pre_transformed_filter_backprop.template flat<T>().data(), pre_transformed_filter_backprop.template flat<T>().size()); auto input_ptr = AsDeviceMemory(transformed_input.template flat<T>().data(), transformed_input.template flat<T>().size()); static int64 ConvolveBackwardFilterScratchSize = GetDnnWorkspaceLimit( "TF_CUDNN_WORKSPACE_LIMIT_IN_MB", 1LL << 32 // 4GB by default ); int device_id = stream->parent()->device_ordinal(); DataType dtype = input.dtype(); ConvParameters conv_parameters = { dims.batch_size, // batch dims.in_depth, // in_depths {{input_desc.height(), // in_rows input_desc.width()}}, // in_cols compute_data_format, // compute_data_format dims.out_depth, // out_depths {{dims.spatial_dims[0].filter_size, // filter_rows dims.spatial_dims[1].filter_size, // filter_cols filter_shape.dim_size(2)}}, // filter_depth {{dims.spatial_dims[0].dilation, // dilation_rows dims.spatial_dims[1].dilation}}, // dilation_cols {{dims.spatial_dims[0].stride, // stride_rows dims.spatial_dims[1].stride}}, // stride_cols {{common_padding_rows, // padding_rows common_padding_cols}}, // padding_cols dtype, // tensor datatype device_id, // device_id conv_desc.group_count() // group_count }; #if TENSORFLOW_USE_ROCM // cudnn_use_autotune is applicable only the CUDA flow // for ROCm/MIOpen, we need to call GetMIOpenConvolveAlgorithms explicitly // if we do not have a cached algorithm_config for this conv_parameters cudnn_use_autotune = true; #endif AlgorithmConfig algorithm_config; if (cudnn_use_autotune && !AutoTuneConvBwdFilter::GetInstance()->Find( conv_parameters, &algorithm_config)) { std::vector<std::unique_ptr<se::dnn::ConvolveExecutionPlan>> plans; #if GOOGLE_CUDA std::vector<AlgorithmDesc> algorithms; std::vector<AlgorithmConfig> configs; if (CudnnUseFrontend()) { OP_REQUIRES( ctx, stream->parent()->GetConvolveExecutionPlans( se::dnn::ConvolutionKind::BACKWARD_FILTER, se::dnn::ToDataType<T>::value, stream, input_desc, filter_desc, output_desc, conv_desc, &plans), errors::Unknown("Failed to get convolution execution plan. This is " "probably because cuDNN failed to initialize, so try " "looking to see if a warning log message was printed " "above.")); for (const auto& plan : plans) { configs.push_back( AlgorithmConfig(AlgorithmDesc{plan->getTag(), plan->get_raw_desc()}, plan->getWorkspaceSize())); } } else { OP_REQUIRES( ctx, stream->parent()->GetConvolveBackwardFilterAlgorithms( conv_parameters.ShouldIncludeWinogradNonfusedAlgo<T>( stream->parent()), &algorithms), errors::Unknown("Failed to get convolution execution plan. This is " "probably because cuDNN failed to initialize, so try " "looking to see if a warning log message was printed " "above.")); for (const auto& algorithm : algorithms) { configs.push_back(AlgorithmConfig(algorithm)); } } se::TfAllocatorAdapter tf_allocator_adapter(ctx->device()->GetAllocator({}), stream); se::RedzoneAllocator rz_allocator(stream, &tf_allocator_adapter, se::GpuAsmOpts()); se::DeviceMemory<T> filter_backprop_ptr_rz( WrapRedzoneBestEffort(&rz_allocator, filter_backprop_ptr)); std::vector<tensorflow::AutotuneResult> results; for (auto& profile_config : configs) { // TODO(zhengxq): profile each algorithm multiple times to better // accuracy. DnnScratchAllocator scratch_allocator(ConvolveBackwardFilterScratchSize, ctx); se::RedzoneAllocator rz_scratch_allocator( stream, &tf_allocator_adapter, se::GpuAsmOpts(), /*memory_limit=*/ConvolveBackwardFilterScratchSize); se::ScratchAllocator* allocator_used = !RedzoneCheckDisabled() ? static_cast<se::ScratchAllocator*>(&rz_scratch_allocator) : static_cast<se::ScratchAllocator*>(&scratch_allocator); ProfileResult profile_result; Status cudnn_launch_status; if (CudnnUseFrontend()) { cudnn_launch_status = stream->ConvolveBackwardFilterWithExecutionPlan( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr_rz, allocator_used, profile_config, &profile_result); } else { cudnn_launch_status = stream->ConvolveBackwardFilterWithAlgorithm( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr_rz, allocator_used, profile_config, &profile_result); } if (cudnn_launch_status.ok() && profile_result.is_valid()) { results.emplace_back(); auto& result = results.back(); if (CudnnUseFrontend()) { result.mutable_cuda_conv_plan()->set_exec_plan_id( profile_config.algorithm()->exec_plan_id()); } else { result.mutable_conv()->set_algorithm( profile_config.algorithm()->algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_config.algorithm()->tensor_ops_enabled()); } result.set_scratch_bytes( !RedzoneCheckDisabled() ? rz_scratch_allocator.TotalAllocatedBytesExcludingRedzones() : scratch_allocator.TotalByteSize()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); CheckRedzones(rz_scratch_allocator, &result); CheckRedzones(rz_allocator, &result); } else if (CudnnUseFrontend()) { // When CuDNN frontend APIs are used, we need to make sure the profiling // results are one-to-one mapping of the "plans". So, we insert dummy // results when the excution fails. results.emplace_back(); auto& result = results.back(); result.mutable_failure()->set_kind(AutotuneResult::UNKNOWN); result.mutable_failure()->set_msg( absl::StrCat("Profiling failure on CUDNN engine: ", profile_config.algorithm()->exec_plan_id())); } } #elif TENSORFLOW_USE_ROCM DnnScratchAllocator scratch_allocator(ConvolveBackwardFilterScratchSize, ctx); std::vector<ProfileResult> algorithms; OP_REQUIRES( ctx, stream->parent()->GetMIOpenConvolveAlgorithms( se::dnn::ConvolutionKind::BACKWARD_FILTER, se::dnn::ToDataType<T>::value, stream, input_desc, input_ptr, filter_desc, filter_backprop_ptr, output_desc, out_backprop_ptr, conv_desc, &scratch_allocator, &algorithms), errors::Unknown( "Failed to get convolution algorithm. This is probably " "because MIOpen failed to initialize, so try looking to " "see if a warning log message was printed above.")); std::vector<tensorflow::AutotuneResult> results; if (algorithms.size() == 1) { auto profile_result = algorithms[0]; results.emplace_back(); auto& result = results.back(); result.mutable_conv()->set_algorithm( profile_result.algorithm().algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_result.algorithm().tensor_ops_enabled()); result.set_scratch_bytes(profile_result.scratch_size()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); } else { for (auto miopen_algorithm : algorithms) { auto profile_algorithm = miopen_algorithm.algorithm(); ProfileResult profile_result; auto miopen_launch_status = stream->ConvolveBackwardFilterWithAlgorithm( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, AlgorithmConfig(profile_algorithm, miopen_algorithm.scratch_size()), &profile_result); if (miopen_launch_status.ok() && profile_result.is_valid()) { results.emplace_back(); auto& result = results.back(); result.mutable_conv()->set_algorithm(profile_algorithm.algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_algorithm.tensor_ops_enabled()); result.set_scratch_bytes(scratch_allocator.TotalByteSize()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); } } } #endif LogConvAutotuneResults(se::dnn::ConvolutionKind::BACKWARD_FILTER, se::dnn::ToDataType<T>::value, input_ptr, filter_backprop_ptr, out_backprop_ptr, input_desc, filter_desc, output_desc, conv_desc, stream->parent(), results); if (CudnnUseFrontend()) { OP_REQUIRES_OK( ctx, BestCudnnConvAlgorithm(results, &plans, &algorithm_config)); } else { OP_REQUIRES_OK( ctx, BestCudnnConvAlgorithm(results, nullptr, &algorithm_config)); } AutoTuneConvBwdFilter::GetInstance()->Insert(conv_parameters, algorithm_config); } Status cudnn_launch_status; DnnScratchAllocator scratch_allocator(ConvolveBackwardFilterScratchSize, ctx); if (CudnnUseFrontend()) { if (algorithm_config.algorithm().has_value()) { VLOG(4) << "Conv2DBackpropFilter Execution Plan: " << algorithm_config.algorithm()->exec_plan_id(); } else { VLOG(4) << "Convolution AutoTune has been turned off"; } cudnn_launch_status = stream->ConvolveBackwardFilterWithExecutionPlan( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, algorithm_config, nullptr); } else { cudnn_launch_status = stream->ConvolveBackwardFilterWithAlgorithm( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, algorithm_config, nullptr); } if (!cudnn_launch_status.ok()) { ctx->SetStatus(cudnn_launch_status); return; } FilterTensorFormat src_filter_format = compute_data_format == FORMAT_NCHW ? FORMAT_OIHW : FORMAT_OHWI; auto toConstTensor = [](const Tensor& x) -> const Tensor { return x; }; functor::ReverseTransformFilter<GPUDevice, T, 4>()( ctx->eigen_device<GPUDevice>(), src_filter_format, toConstTensor(pre_transformed_filter_backprop).template tensor<T, 4>(), filter_backprop->tensor<T, 4>()); } // Forward declarations of the functor specializations for GPU. namespace functor { #define DECLARE_GPU_SPEC(T) \ template <> \ void TransformFilter<GPUDevice, T, int, 4>::operator()( \ const GPUDevice& d, FilterTensorFormat dst_filter_format, \ typename TTypes<T, 4, int>::ConstTensor in, \ typename TTypes<T, 4, int>::Tensor out); \ extern template struct TransformFilter<GPUDevice, T, int, 4>; \ template <> \ void PadInput<GPUDevice, T, int, 4>::operator()( \ const GPUDevice& d, typename TTypes<T, 4, int>::ConstTensor in, \ const std::array<int, 2>& padding_left, \ const std::array<int, 2>& padding_right, \ typename TTypes<T, 4, int>::Tensor out, TensorFormat data_format, \ const T& padding_value); \ extern template struct PadInput<GPUDevice, T, int, 4>; DECLARE_GPU_SPEC(float); DECLARE_GPU_SPEC(Eigen::half); DECLARE_GPU_SPEC(double); #undef DECLARE_GPU_SPEC } // namespace functor REGISTER_KERNEL_BUILDER(Name("Conv2DBackpropFilter") .Device(DEVICE_GPU) .TypeConstraint<double>("T") .HostMemory("filter_sizes"), Conv2DBackpropFilterOp<GPUDevice, double>); REGISTER_KERNEL_BUILDER(Name("Conv2DBackpropFilter") .Device(DEVICE_GPU) .TypeConstraint<float>("T") .HostMemory("filter_sizes"), Conv2DBackpropFilterOp<GPUDevice, float>); REGISTER_KERNEL_BUILDER(Name("Conv2DBackpropFilter") .Device(DEVICE_GPU) .TypeConstraint<Eigen::half>("T") .HostMemory("filter_sizes"), Conv2DBackpropFilterOp<GPUDevice, Eigen::half>); // To be used inside depthwise_conv_grad_op.cc. // TODO(reedwm): Move this and the definition to depthwise_conv_grad_op.cc. template struct LaunchConv2DBackpropFilterOp<GPUDevice, float>; template struct LaunchConv2DBackpropFilterOp<GPUDevice, Eigen::half>; template struct LaunchConv2DBackpropFilterOp<GPUDevice, double>; #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM } // namespace tensorflow
null
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/nn_ops.cc. #define USE_EIGEN_TENSOR #define EIGEN_USE_THREADS #include <algorithm> #include <vector> #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/kernels/conv_2d.h" #include "tensorflow/core/kernels/conv_grad_ops.h" #include "tensorflow/core/kernels/conv_grad_shape_utils.h" #include "tensorflow/core/kernels/fill_functor.h" #ifdef TENSORFLOW_USE_LIBXSMM_CONVOLUTIONS #include "tensorflow/core/kernels/xsmm_conv2d.h" #endif #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #include "tensorflow/core/util/use_cudnn.h" #include "tensorflow/core/util/work_sharder.h" #if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL) #include "tensorflow/core/kernels/eigen_contraction_kernel.h" #endif #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/kernels/conv_ops_gpu.h" #include "tensorflow/core/platform/stream_executor.h" #include "tensorflow/core/protobuf/autotuning.pb.h" #include "tensorflow/core/util/proto/proto_utils.h" #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #if GOOGLE_CUDA #include "tensorflow/stream_executor/gpu/gpu_asm_opts.h" #include "tensorflow/stream_executor/gpu/redzone_allocator.h" #include "tensorflow/stream_executor/tf_allocator_adapter.h" #endif // GOOGLE_CUDA namespace { // Returns in 'col_data', image patches in storage order (height, width, depth) // extracted from image at 'input_data', which is required to be in storage // order (batch, height, width, depth). // Implementation written by Yangqing Jia (jiayq). template <typename T> void Im2col(const T* input_data, const int depth, const int height, const int width, const int filter_h, const int filter_w, const int pad_t, const int pad_l, const int pad_b, const int pad_r, const int stride_h, const int stride_w, T* col_data) { int height_col = (height + pad_t + pad_b - filter_h) / stride_h + 1; int width_col = (width + pad_l + pad_r - filter_w) / stride_w + 1; int h_pad = -pad_t; for (int h = 0; h < height_col; ++h) { int w_pad = -pad_l; for (int w = 0; w < width_col; ++w) { for (int ih = h_pad; ih < h_pad + filter_h; ++ih) { for (int iw = w_pad; iw < w_pad + filter_w; ++iw) { if (ih >= 0 && ih < height && iw >= 0 && iw < width) { memcpy(col_data, input_data + (ih * width + iw) * depth, sizeof(T) * depth); } else { // This should be simply padded with zero. memset(col_data, 0, sizeof(T) * depth); } col_data += depth; } } w_pad += stride_w; } h_pad += stride_h; } } } // namespace namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; template <typename T> struct LaunchConv2DBackpropFilterOp<CPUDevice, T> { void operator()(OpKernelContext* ctx, bool use_cudnn, bool cudnn_use_autotune, const Tensor& out_backprop, const Tensor& input, int row_dilation, int col_dilation, int row_stride, int col_stride, const Padding& padding, const std::vector<int64>& explicit_paddings, Tensor* filter_backprop, TensorFormat data_format) { std::vector<int32> dilations(4, 1); dilations[GetTensorDimIndex(data_format, 'H')] = row_dilation; dilations[GetTensorDimIndex(data_format, 'W')] = col_dilation; std::vector<int32> strides(4, 1); strides[GetTensorDimIndex(data_format, 'H')] = row_stride; strides[GetTensorDimIndex(data_format, 'W')] = col_stride; TensorShape filter_shape = filter_backprop->shape(); ConvBackpropDimensions dims; OP_REQUIRES_OK( ctx, ConvBackpropComputeDimensionsV2( "Conv2DBackpropFilter", /*num_spatial_dims=*/2, input.shape(), filter_shape, out_backprop.shape(), dilations, strides, padding, explicit_paddings, data_format, &dims)); int64 padding_top = -1, padding_bottom = -1; int64 padding_left = -1, padding_right = -1; if (padding == EXPLICIT) { GetExplicitPaddingForDim(explicit_paddings, data_format, 'H', &padding_top, &padding_bottom); GetExplicitPaddingForDim(explicit_paddings, data_format, 'W', &padding_left, &padding_right); } int64 expected_out_rows, expected_out_cols; // The function is guaranteed to succeed because we checked the output and // padding was valid earlier. TF_CHECK_OK(GetWindowedOutputSizeVerboseV2( dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, row_dilation, row_stride, padding, &expected_out_rows, &padding_top, &padding_bottom)); DCHECK_EQ(dims.spatial_dims[0].output_size, expected_out_rows); TF_CHECK_OK(GetWindowedOutputSizeVerboseV2( dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, col_dilation, col_stride, padding, &expected_out_cols, &padding_left, &padding_right)); DCHECK_EQ(dims.spatial_dims[1].output_size, expected_out_cols); const CPUDevice& d = ctx->eigen_device<CPUDevice>(); // WARNING: Need to swap row/col, padding_top/padding_left, and // padding_bottom/padding_right when calling Eigen. Eigen expects tensors // in NWHC format, but Tensorflow uses NHWC. auto filter_backprop_t = filter_backprop->tensor<T, 4>(); auto input_t = input.tensor<T, 4>(); auto out_backprop_t = out_backprop.tensor<T, 4>(); if (padding != EXPLICIT) { // If padding was not explicitly defined, Eigen spatial convolution // backward filter will infer correct forward paddings from input tensors. filter_backprop_t.device(d) = Eigen::SpatialConvolutionBackwardKernel( input_t, out_backprop_t, filter_backprop_t.dimension(1), filter_backprop_t.dimension(0), col_stride, row_stride, col_dilation, row_dilation); } else { // Otherwise we have to explicitly pad the input, before passing it to // spatial convolution backward filter. Eigen::array<std::pair<int, int>, 4> paddings; paddings[0] = {0, 0}; paddings[1] = {padding_top, padding_bottom}; paddings[2] = {padding_left, padding_right}; paddings[3] = {0, 0}; auto padded_t = input_t.pad(paddings, T(0)); // TODO(ezhulenev): Pass explicit paddings to Eigen spatial backward // convolution and do not rely on tensor padding expression. filter_backprop_t.device(d) = Eigen::SpatialConvolutionBackwardKernel( padded_t, out_backprop_t, filter_backprop_t.dimension(1), filter_backprop_t.dimension(0), col_stride, row_stride, col_dilation, row_dilation); } } }; #ifdef TENSORFLOW_USE_LIBXSMM_CONVOLUTIONS template <typename Device, class T> struct LaunchXsmmBackwardFilter { bool operator()(OpKernelContext* context, const Device& d, typename TTypes<T, 4>::ConstTensor input_backward, typename TTypes<T, 4>::Tensor kernel, typename TTypes<T, 4>::ConstTensor output_backward, int input_rows, int input_cols, int row_stride, int col_stride, int pad_h, int pad_w, TensorFormat data_format) const { return false; } }; template <> struct LaunchXsmmBackwardFilter<CPUDevice, float> { bool operator()(OpKernelContext* context, const CPUDevice& d, typename TTypes<float, 4>::ConstTensor input, typename TTypes<float, 4>::Tensor filter, typename TTypes<float, 4>::ConstTensor output, int input_rows, int input_cols, int row_stride, int col_stride, int pad_h, int pad_w, TensorFormat data_format) const { auto batch = input.dimension(0); auto in_depth = input.dimension(3); auto out_depth = output.dimension(3); auto filter_rows = filter.dimension(0); auto filter_cols = filter.dimension(1); auto num_threads = context->device()->tensorflow_cpu_worker_threads()->num_threads; // See libxsmm_dnn.h for this struct definition. libxsmm_dnn_conv_desc desc; desc.N = batch; desc.C = in_depth; desc.H = input_rows; desc.W = input_cols; desc.K = out_depth; desc.R = filter_rows; desc.S = filter_cols; desc.u = row_stride; desc.v = col_stride; desc.pad_h = pad_h; desc.pad_w = pad_w; desc.pad_h_in = 0; // pad_rows; // ignored by libxsmm for now. desc.pad_w_in = 0; // pad_cols; // ignored by libxsmm for now. desc.pad_h_out = 0; desc.pad_w_out = 0; desc.threads = num_threads; desc.algo = LIBXSMM_DNN_CONV_ALGO_DIRECT; desc.buffer_format = LIBXSMM_DNN_TENSOR_FORMAT_NHWC; desc.filter_format = LIBXSMM_DNN_TENSOR_FORMAT_RSCK; desc.fuse_ops = LIBXSMM_DNN_CONV_FUSE_NONE; desc.options = LIBXSMM_DNN_CONV_OPTION_NONE; desc.datatype_out = LIBXSMM_DNN_DATATYPE_F32; desc.datatype_in = LIBXSMM_DNN_DATATYPE_F32; if (!CanUseXsmmConv2D(desc, data_format)) { return false; } auto input_ptr = input.data(); auto filter_ptr = filter.data(); auto output_ptr = output.data(); bool success = functor::XsmmBkwFilterConv2D<CPUDevice, float>()( context, desc, input_ptr, filter_ptr, output_ptr); return success; } }; #endif template <typename Device, class T> class Conv2DBackpropFilterOp : public OpKernel { public: explicit Conv2DBackpropFilterOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES_OK(context, context->GetAttr("strides", &strides_)); int stride_n = GetTensorDim(strides_, data_format_, 'N'); int stride_c = GetTensorDim(strides_, data_format_, 'C'); int stride_h = GetTensorDim(strides_, data_format_, 'H'); int stride_w = GetTensorDim(strides_, data_format_, 'W'); OP_REQUIRES( context, (stride_n == 1 && stride_c == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES(context, stride_h > 0 && stride_w > 0, errors::InvalidArgument( "Row and column strides should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilations_)); OP_REQUIRES(context, dilations_.size() == 4, errors::InvalidArgument("Sliding window dilations field must " "specify 4 dimensions")); int dilation_n = GetTensorDim(dilations_, data_format_, 'N'); int dilation_c = GetTensorDim(dilations_, data_format_, 'C'); int dilation_h = GetTensorDim(dilations_, data_format_, 'H'); int dilation_w = GetTensorDim(dilations_, data_format_, 'W'); OP_REQUIRES(context, dilation_n == 1 && dilation_c == 1, errors::InvalidArgument( "Current implementation does not yet support " "dilations in the batch and depth dimensions.")); OP_REQUIRES( context, dilation_h > 0 && dilation_w > 0, errors::InvalidArgument("Dilated rates should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES_OK(context, context->GetAttr("explicit_paddings", &explicit_paddings_)); OP_REQUIRES_OK(context, CheckValidPadding(padding_, explicit_paddings_, /*num_dims=*/4, data_format_)); OP_REQUIRES_OK(context, context->GetAttr("use_cudnn_on_gpu", &use_cudnn_)); cudnn_use_autotune_ = CudnnUseAutotune(); if (std::is_same<Device, CPUDevice>::value) { OP_REQUIRES(context, data_format_ == FORMAT_NHWC, errors::InvalidArgument("Conv2DBackpropFilterOp [CPU] " "only supports NHWC data format.")); // TODO(yangzihao): Add a CPU implementation for dilated convolution. OP_REQUIRES( context, (dilation_h == 1 && dilation_w == 1), errors::InvalidArgument("Conv2DBackpropFilterOp [CPU] not yet " "support dilation rates larger than 1.")); } } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& filter_sizes = context->input(1); const Tensor& out_backprop = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsVector(filter_sizes.shape()), errors::InvalidArgument( "Conv2DBackpropFilter: filter_sizes input must be 1-dim, not ", filter_sizes.dims())); TensorShape filter_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( filter_sizes.vec<int32>(), &filter_shape)); Tensor* filter_backprop = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, filter_shape, &filter_backprop)); // If there is nothing to compute, return. if (filter_shape.num_elements() == 0) { return; } // If input is empty, set gradients to zero. if (input.shape().num_elements() == 0) { functor::SetZeroFunctor<Device, T> f; f(context->eigen_device<Device>(), filter_backprop->flat<T>()); return; } // For now we take the stride from the second and third dimensions only (we // do not support striding on the batch or depth dimension). const int stride_rows = GetTensorDim(strides_, data_format_, 'H'); const int stride_cols = GetTensorDim(strides_, data_format_, 'W'); const int dilation_rows = GetTensorDim(dilations_, data_format_, 'H'); const int dilation_cols = GetTensorDim(dilations_, data_format_, 'W'); VLOG(2) << "Conv2DBackpropFilter:" << " input: " << input.shape().DebugString() << " filter:" << filter_shape.DebugString() << " out_backprop: " << out_backprop.shape().DebugString() << " strides: [" << stride_rows << ", " << stride_cols << "]" << " dilations: [" << dilation_rows << ", " << dilation_cols << "]"; launcher_(context, use_cudnn_, cudnn_use_autotune_, out_backprop, input, dilation_rows, dilation_cols, stride_rows, stride_cols, padding_, explicit_paddings_, filter_backprop, data_format_); } private: std::vector<int32> dilations_; std::vector<int32> strides_; Padding padding_; std::vector<int64> explicit_paddings_; bool use_cudnn_; TensorFormat data_format_; LaunchConv2DBackpropFilterOp<Device, T> launcher_; bool cudnn_use_autotune_; TF_DISALLOW_COPY_AND_ASSIGN(Conv2DBackpropFilterOp); }; // Based on implementation written by Yangqing Jia (jiayq). template <typename Device, class T> class Conv2DCustomBackpropFilterOp : public OpKernel { public: explicit Conv2DCustomBackpropFilterOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES(context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Conv2DCustomBackpropFilterOp only supports NHWC.")); OP_REQUIRES_OK(context, context->GetAttr("strides", &strides_)); OP_REQUIRES(context, strides_.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); OP_REQUIRES( context, (strides_[0] == 1 && strides_[3] == 1), errors::InvalidArgument("Current implementation does not yet support " "strides in the batch and depth dimensions.")); OP_REQUIRES(context, strides_[1] > 0 && strides_[2] > 0, errors::InvalidArgument( "Row and column strides should be larger than 0.")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES_OK(context, context->GetAttr("explicit_paddings", &explicit_paddings_)); OP_REQUIRES_OK(context, CheckValidPadding(padding_, explicit_paddings_, /*num_dims=*/4, data_format_)); OP_REQUIRES_OK(context, context->GetAttr("dilations", &dilations_)); OP_REQUIRES(context, dilations_.size() == 4, errors::InvalidArgument("Sliding window dilations field must " "specify 4 dimensions")); OP_REQUIRES(context, (dilations_[0] == 1 && dilations_[3] == 1), errors::InvalidArgument( "Current implementation does not yet support " "dilations in the batch and depth dimensions.")); if (std::is_same<Device, CPUDevice>::value || std::is_same<Device, GPUDevice>::value) { // TODO(yangzihao): Add a CPU implementation for dilated convolution. OP_REQUIRES(context, (dilations_[1] == 1 && dilations_[2] == 1), errors::InvalidArgument( "Current libxsmm and customized CPU implementations do " "not yet support dilation rates larger than 1.")); dilations_ = {1, 1, 1, 1}; } } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& filter_sizes = context->input(1); const Tensor& out_backprop = context->input(2); OP_REQUIRES( context, TensorShapeUtils::IsVector(filter_sizes.shape()), errors::InvalidArgument( "Conv2DCustomBackpropFilter: filter_sizes input must be 1-dim, " "not ", filter_sizes.dims())); TensorShape filter_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( filter_sizes.vec<int32>(), &filter_shape)); ConvBackpropDimensions dims; OP_REQUIRES_OK( context, ConvBackpropComputeDimensionsV2( "Conv2DCustomBackpropFilter", /*num_spatial_dims=*/2, input.shape(), filter_shape, out_backprop.shape(), dilations_, strides_, padding_, explicit_paddings_, data_format_, &dims)); Tensor* filter_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, filter_shape, &filter_backprop)); // If there is nothing to compute, return. if (filter_shape.num_elements() == 0) { return; } int64 pad_top, pad_bottom; int64 pad_left, pad_right; if (padding_ == Padding::EXPLICIT) { pad_top = explicit_paddings_[2]; pad_bottom = explicit_paddings_[3]; pad_left = explicit_paddings_[4]; pad_right = explicit_paddings_[5]; } OP_REQUIRES_OK( context, GetWindowedOutputSizeVerbose( dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, dims.spatial_dims[0].stride, padding_, &dims.spatial_dims[0].output_size, &pad_top, &pad_bottom)); OP_REQUIRES_OK( context, GetWindowedOutputSizeVerbose( dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, dims.spatial_dims[1].stride, padding_, &dims.spatial_dims[1].output_size, &pad_left, &pad_right)); #if defined TENSORFLOW_USE_LIBXSMM_CONVOLUTIONS && \ defined TENSORFLOW_USE_LIBXSMM_BACKWARD_CONVOLUTIONS if (pad_left == pad_right && pad_top == pad_bottom) { if (LaunchXsmmBackwardFilter<Device, T>()( context, context->eigen_device<Device>(), input.tensor<T, 4>(), filter_backprop->tensor<T, 4>(), out_backprop.tensor<T, 4>(), dims.spatial_dims[0].input_size, dims.spatial_dims[1].input_size, static_cast<int>(dims.spatial_dims[0].stride), static_cast<int>(dims.spatial_dims[1].stride), static_cast<int>(pad_top), static_cast<int>(pad_left), data_format_)) { return; } } #endif // The total dimension size of each kernel. const int filter_total_size = dims.spatial_dims[0].filter_size * dims.spatial_dims[1].filter_size * dims.in_depth; OP_REQUIRES( context, filter_total_size * dims.out_depth == filter_backprop->NumElements(), errors::InvalidArgument( "filter_size does not have enough elements, requested ", filter_total_size * dims.out_depth, ", got ", filter_backprop->NumElements())); // The output image size is the spatial size of the output. const int output_image_size = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size; // Shard 'batch' images into 'shard_size' groups of images to be fed // into the parallel matmul. Calculate 'shard_size' by dividing the L3 cache // size ('target_working_set_size') by the matmul size of an individual // image ('work_unit_size'). // TODO(andydavis) // *) Get L3 cache size from device at runtime (30MB is from ivybridge). // *) Consider reducing 'target_working_set_size' if L3 is shared by // other concurrently running tensorflow ops. const size_t target_working_set_size = (30LL << 20) / sizeof(T); const size_t size_A = output_image_size * filter_total_size; const size_t size_B = output_image_size * dims.out_depth; const size_t size_C = filter_total_size * dims.out_depth; const size_t work_unit_size = size_A + size_B + size_C; OP_REQUIRES( context, work_unit_size != 0, errors::InvalidArgument( "Work size for convolution would be 0, which is not acceptable")); const size_t shard_size = (target_working_set_size + work_unit_size - 1) / work_unit_size; Tensor col_buffer; OP_REQUIRES_OK(context, context->allocate_temp( DataTypeToEnum<T>::value, TensorShape({static_cast<int64>(shard_size), static_cast<int64>(output_image_size), static_cast<int64>(filter_total_size)}), &col_buffer)); // The input offset corresponding to a single input image. const int input_offset = dims.spatial_dims[0].input_size * dims.spatial_dims[1].input_size * dims.in_depth; // The output offset corresponding to a single output image. const int output_offset = dims.spatial_dims[0].output_size * dims.spatial_dims[1].output_size * dims.out_depth; const T* input_data = input.template flat<T>().data(); T* col_buffer_data = col_buffer.template flat<T>().data(); const T* out_backprop_data = out_backprop.template flat<T>().data(); T* filter_backprop_data = filter_backprop->template flat<T>().data(); typedef Eigen::TensorMap<Eigen::Tensor<T, 2, Eigen::RowMajor>, Eigen::Unaligned> TensorMap; typedef Eigen::TensorMap<Eigen::Tensor<const T, 2, Eigen::RowMajor>, Eigen::Unaligned> ConstTensorMap; TensorMap C(filter_backprop_data, filter_total_size, dims.out_depth); C.setZero(); // Initialize contraction dims (we need to transpose 'A' below). Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> contract_dims; contract_dims[0].first = 0; contract_dims[0].second = 0; auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); for (int image_id = 0; image_id < dims.batch_size; image_id += shard_size) { const int shard_limit = std::min(static_cast<int>(shard_size), static_cast<int>(dims.batch_size) - image_id); auto shard = [&input_data, &col_buffer_data, &dims, &pad_top, &pad_left, &pad_bottom, &pad_right, &input_offset, &size_A](int64 start, int64 limit) { for (int shard_id = start; shard_id < limit; ++shard_id) { const T* input_data_shard = input_data + shard_id * input_offset; T* col_data_shard = col_buffer_data + shard_id * size_A; // When we compute the gradient with respect to the filters, we need // to do im2col to allow gemm-type computation. Im2col<T>( input_data_shard, dims.in_depth, dims.spatial_dims[0].input_size, dims.spatial_dims[1].input_size, dims.spatial_dims[0].filter_size, dims.spatial_dims[1].filter_size, pad_top, pad_left, pad_bottom, pad_right, dims.spatial_dims[0].stride, dims.spatial_dims[1].stride, col_data_shard); } }; Shard(worker_threads.num_threads, worker_threads.workers, shard_limit, size_A, shard); ConstTensorMap A(col_buffer_data, output_image_size * shard_limit, filter_total_size); ConstTensorMap B(out_backprop_data, output_image_size * shard_limit, dims.out_depth); // Gradient with respect to filter. C.device(context->eigen_cpu_device()) += A.contract(B, contract_dims); input_data += input_offset * shard_limit; out_backprop_data += output_offset * shard_limit; } } private: std::vector<int32> dilations_; std::vector<int32> strides_; Padding padding_; std::vector<int64> explicit_paddings_; TensorFormat data_format_; TF_DISALLOW_COPY_AND_ASSIGN(Conv2DCustomBackpropFilterOp); }; #define REGISTER_CPU_KERNELS(T) \ REGISTER_KERNEL_BUILDER( \ Name("Conv2DBackpropFilter").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ Conv2DCustomBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv2DBackpropFilter") \ .Device(DEVICE_CPU) \ .Label("custom") \ .TypeConstraint<T>("T") \ .AttrConstraint("data_format", "NHWC"), \ Conv2DCustomBackpropFilterOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("Conv2DBackpropFilter") \ .Device(DEVICE_CPU) \ .Label("eigen_tensor") \ .TypeConstraint<T>("T") \ .AttrConstraint("data_format", "NHWC"), \ Conv2DBackpropFilterOp<CPUDevice, T>); TF_CALL_half(REGISTER_CPU_KERNELS); TF_CALL_float(REGISTER_CPU_KERNELS); TF_CALL_double(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS // To be used inside depthwise_conv_grad_op.cc. template struct LaunchConv2DBackpropFilterOp<CPUDevice, Eigen::half>; template struct LaunchConv2DBackpropFilterOp<CPUDevice, float>; template struct LaunchConv2DBackpropFilterOp<CPUDevice, double>; // GPU definitions. #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM // The slow version (but compiles for GPU) // A dummy type to group forward backward filter autotune results together. struct ConvBackwardFilterAutoTuneGroup { static string name() { return "ConvBwdFilter"; } }; typedef AutoTuneSingleton<ConvBackwardFilterAutoTuneGroup, ConvParameters, se::dnn::AlgorithmConfig> AutoTuneConvBwdFilter; template <typename T> void LaunchConv2DBackpropFilterOp<Eigen::GpuDevice, T>::operator()( OpKernelContext* ctx, bool use_cudnn, bool cudnn_use_autotune, const Tensor& out_backprop, const Tensor& input, int row_dilation, int col_dilation, int row_stride, int col_stride, const Padding& padding, const std::vector<int64>& explicit_paddings, Tensor* filter_backprop, TensorFormat data_format) { using se::dnn::AlgorithmConfig; using se::dnn::AlgorithmDesc; using se::dnn::ProfileResult; std::vector<int32> dilations(4, 1); dilations[GetTensorDimIndex(data_format, 'H')] = row_dilation; dilations[GetTensorDimIndex(data_format, 'W')] = col_dilation; std::vector<int32> strides(4, 1); strides[GetTensorDimIndex(data_format, 'H')] = row_stride; strides[GetTensorDimIndex(data_format, 'W')] = col_stride; TensorShape filter_shape = filter_backprop->shape(); ConvBackpropDimensions dims; OP_REQUIRES_OK( ctx, ConvBackpropComputeDimensionsV2( "Conv2DBackpropFilter", /*num_spatial_dims=*/2, input.shape(), filter_shape, out_backprop.shape(), dilations, strides, padding, explicit_paddings, data_format, &dims)); int64 padding_top = -1, padding_bottom = -1; int64 padding_left = -1, padding_right = -1; if (padding == EXPLICIT) { GetExplicitPaddingForDim(explicit_paddings, data_format, 'H', &padding_top, &padding_bottom); GetExplicitPaddingForDim(explicit_paddings, data_format, 'W', &padding_left, &padding_right); } int64 expected_out_rows, expected_out_cols; // The function is guaranteed to succeed because we checked the output and // padding was valid earlier. TF_CHECK_OK(GetWindowedOutputSizeVerboseV2( dims.spatial_dims[0].input_size, dims.spatial_dims[0].filter_size, row_dilation, row_stride, padding, &expected_out_rows, &padding_top, &padding_bottom)); DCHECK_EQ(dims.spatial_dims[0].output_size, expected_out_rows); TF_CHECK_OK(GetWindowedOutputSizeVerboseV2( dims.spatial_dims[1].input_size, dims.spatial_dims[1].filter_size, col_dilation, col_stride, padding, &expected_out_cols, &padding_left, &padding_right)); DCHECK_EQ(dims.spatial_dims[1].output_size, expected_out_cols); auto* stream = ctx->op_device_context()->stream(); OP_REQUIRES(ctx, stream, errors::Internal("No GPU stream available.")); if (!use_cudnn) { ctx->SetStatus(errors::Unimplemented( "Conv2DBackprop for GPU is not currently supported " "without cudnn")); return; } // If the filter in-depth (filter_shape.dim_size(2)) is 1 and smaller than the // input depth, it's a depthwise convolution. More generally, if the filter // in-depth divides but is smaller than the input depth, it is a grouped // convolution. bool is_grouped_convolution = filter_shape.dim_size(2) != dims.in_depth; bool cudnn_disable_conv_1x1_optimization_ = CudnnDisableConv1x1Optimization(); if (!cudnn_disable_conv_1x1_optimization_ && dims.spatial_dims[0].filter_size == 1 && dims.spatial_dims[1].filter_size == 1 && !is_grouped_convolution && dims.spatial_dims[0].stride == 1 && dims.spatial_dims[1].stride == 1 && data_format == FORMAT_NHWC && (padding == VALID || padding == SAME)) { const uint64 m = dims.in_depth; const uint64 k = dims.batch_size * dims.spatial_dims[0].input_size * dims.spatial_dims[1].input_size; const uint64 n = dims.out_depth; // The shape of output backprop is // [batch, out_rows, out_cols, out_depth] // From cublas's perspective, it is: n x k auto a_ptr = AsDeviceMemory(out_backprop.template flat<T>().data(), out_backprop.template flat<T>().size()); // The shape of input is // [batch, in_rows, in_cols, in_depth], // From cublas's perspective, it is: m x k auto b_ptr = AsDeviceMemory(input.template flat<T>().data(), input.template flat<T>().size()); // the shape of the filter backprop from the conv_2d should be // [1, 1, in_depth, out_depth] // From cublas's perspective, it is: n x m auto c_ptr = AsDeviceMemory(filter_backprop->template flat<T>().data(), filter_backprop->template flat<T>().size()); bool blas_launch_status = stream ->ThenBlasGemm(se::blas::Transpose::kNoTranspose, se::blas::Transpose::kTranspose, n, m, k, 1.0f, a_ptr, n, b_ptr, m, 0.0f, &c_ptr, n) .ok(); if (!blas_launch_status) { ctx->SetStatus(errors::Internal("Blas SGEMM launch failed : m=", m, ", n=", n, ", k=", k)); } return; } else if (dims.spatial_dims[0].filter_size == dims.spatial_dims[0].input_size && dims.spatial_dims[1].filter_size == dims.spatial_dims[1].input_size && !is_grouped_convolution && padding == VALID && data_format == FORMAT_NHWC) { // The input data and filter have the same height/width, and we are not // using grouped convolution, so call cublas directly. const uint64 m = dims.spatial_dims[0].input_size * dims.spatial_dims[1].input_size * dims.in_depth; const uint64 k = dims.batch_size; const uint64 n = dims.out_depth; auto a_ptr = AsDeviceMemory(input.template flat<T>().data(), input.template flat<T>().size()); auto b_ptr = AsDeviceMemory(out_backprop.template flat<T>().data(), out_backprop.template flat<T>().size()); auto c_ptr = AsDeviceMemory(filter_backprop->template flat<T>().data(), filter_backprop->template flat<T>().size()); bool blas_launch_status = stream ->ThenBlasGemm(se::blas::Transpose::kNoTranspose, se::blas::Transpose::kTranspose, n, m, k, 1.0f, b_ptr, n, a_ptr, m, 0.0f, &c_ptr, n) .ok(); if (!blas_launch_status) { ctx->SetStatus(errors::Internal("Blas SGEMM launch failed : m=", m, ", n=", n, ", k=", k)); } return; } const int64 common_padding_rows = std::min(padding_top, padding_bottom); const int64 common_padding_cols = std::min(padding_left, padding_right); Tensor compatible_input; if (padding_top != padding_bottom || padding_left != padding_right) { // Pad the input in the same way we did during the forward pass, so that // cuDNN or MIOpen receives the same input during the backward pass function // as it did during the forward pass function. const int64 padding_rows_diff = std::abs(padding_bottom - padding_top); const int64 padding_cols_diff = std::abs(padding_right - padding_left); const int64 new_in_rows = dims.spatial_dims[0].input_size + padding_rows_diff; const int64 new_in_cols = dims.spatial_dims[1].input_size + padding_cols_diff; const int64 input_pad_top = padding_top - common_padding_rows; const int64 input_pad_bottom = padding_bottom - common_padding_rows; const int64 input_pad_left = padding_left - common_padding_cols; const int64 input_pad_right = padding_right - common_padding_cols; OP_REQUIRES_OK( ctx, ctx->allocate_temp( DataTypeToEnum<T>::value, ShapeFromFormat(data_format, dims.batch_size, new_in_rows, new_in_cols, dims.in_depth), &compatible_input)); functor::PadInput<GPUDevice, T, int, 4>()( ctx->template eigen_device<GPUDevice>(), To32Bit(input.tensor<T, 4>()), {{static_cast<int>(input_pad_top), static_cast<int>(input_pad_left)}}, {{static_cast<int>(input_pad_bottom), static_cast<int>(input_pad_right)}}, To32Bit(compatible_input.tensor<T, 4>()), data_format, T{}); } else { compatible_input = input; } CHECK(common_padding_rows >= 0 && common_padding_cols >= 0) // Crash OK << "Negative row or col paddings: (" << common_padding_rows << ", " << common_padding_cols << ")"; // The Tensor Core in NVIDIA Volta+ GPUs supports efficient convolution with // fp16 in NHWC data layout. In all other configurations it's more efficient // to run computation in NCHW data format. const bool compute_in_nhwc = DataTypeToEnum<T>::value == DT_HALF && IsVoltaOrLater(*stream->parent()); // We only do one directional conversion: NHWC->NCHW. We never convert in the // other direction. Grappler layout optimizer selects the preferred layout and // adds necessary annotations to the graph. const TensorFormat compute_data_format = (compute_in_nhwc && data_format == FORMAT_NHWC) ? FORMAT_NHWC : FORMAT_NCHW; VLOG(3) << "Compute Conv2DBackpropFilter with cuDNN:" << " data_format=" << ToString(data_format) << " compute_data_format=" << ToString(compute_data_format); constexpr auto kComputeInNHWC = std::make_tuple(se::dnn::DataLayout::kBatchYXDepth, se::dnn::FilterLayout::kOutputYXInput); constexpr auto kComputeInNCHW = std::make_tuple(se::dnn::DataLayout::kBatchDepthYX, se::dnn::FilterLayout::kOutputInputYX); se::dnn::DataLayout compute_data_layout; se::dnn::FilterLayout filter_layout; std::tie(compute_data_layout, filter_layout) = compute_data_format == FORMAT_NHWC ? kComputeInNHWC : kComputeInNCHW; se::dnn::BatchDescriptor input_desc; input_desc.set_count(dims.batch_size) .set_height(GetTensorDim(compatible_input, data_format, 'H')) .set_width(GetTensorDim(compatible_input, data_format, 'W')) .set_feature_map_count(dims.in_depth) .set_layout(compute_data_layout); se::dnn::BatchDescriptor output_desc; output_desc.set_count(dims.batch_size) .set_height(dims.spatial_dims[0].output_size) .set_width(dims.spatial_dims[1].output_size) .set_feature_map_count(dims.out_depth) .set_layout(compute_data_layout); se::dnn::FilterDescriptor filter_desc; filter_desc.set_input_filter_height(dims.spatial_dims[0].filter_size) .set_input_filter_width(dims.spatial_dims[1].filter_size) .set_input_feature_map_count(filter_shape.dim_size(2)) .set_output_feature_map_count(filter_shape.dim_size(3)) .set_layout(filter_layout); se::dnn::ConvolutionDescriptor conv_desc; conv_desc.set_vertical_dilation_rate(dims.spatial_dims[0].dilation) .set_horizontal_dilation_rate(dims.spatial_dims[1].dilation) .set_vertical_filter_stride(dims.spatial_dims[0].stride) .set_horizontal_filter_stride(dims.spatial_dims[1].stride) .set_zero_padding_height(common_padding_rows) .set_zero_padding_width(common_padding_cols) .set_group_count(dims.in_depth / filter_shape.dim_size(2)); // Tensorflow filter format: HWIO // cuDNN filter formats: (data format) -> (filter format) // (1) NCHW -> OIHW // (2) NHWC -> OHWI // // We compute filter backprop into temporary tensor, and then convert it to // the HWIO data format at the end. Tensor pre_transformed_filter_backprop; OP_REQUIRES_OK( ctx, ctx->allocate_temp( DataTypeToEnum<T>::value, TensorShape({filter_shape.dim_size(3), filter_shape.dim_size(2), filter_shape.dim_size(0), filter_shape.dim_size(1)}), &pre_transformed_filter_backprop)); Tensor transformed_out_backprop; if (data_format == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) { VLOG(4) << "Convert the `out_backprop` tensor from NHWC to NCHW."; TensorShape compute_shape = ShapeFromFormat( compute_data_format, dims.batch_size, dims.spatial_dims[0].output_size, dims.spatial_dims[1].output_size, dims.out_depth); if (dims.out_depth > 1) { OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, compute_shape, &transformed_out_backprop)); functor::NHWCToNCHW<GPUDevice, T, 4>()( ctx->eigen_device<GPUDevice>(), out_backprop.tensor<T, 4>(), transformed_out_backprop.tensor<T, 4>()); } else { // If depth <= 1, just reshape. CHECK(transformed_out_backprop.CopyFrom(out_backprop, compute_shape)); } } else { transformed_out_backprop = out_backprop; } Tensor transformed_input; if (data_format == FORMAT_NHWC && compute_data_format == FORMAT_NCHW) { VLOG(4) << "Convert the `input` tensor from NHWC to NCHW."; TensorShape compute_shape = ShapeFromFormat( compute_data_format, GetTensorDim(compatible_input, data_format, 'N'), GetTensorDim(compatible_input, data_format, 'H'), GetTensorDim(compatible_input, data_format, 'W'), GetTensorDim(compatible_input, data_format, 'C')); if (compute_shape.dim_size(1) > 1) { OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value, compute_shape, &transformed_input)); functor::NHWCToNCHW<GPUDevice, T, 4>()( ctx->eigen_device<GPUDevice>(), const_cast<const Tensor&>(compatible_input).tensor<T, 4>(), transformed_input.tensor<T, 4>()); } else { // If depth <= 1, just reshape. CHECK(transformed_input.CopyFrom(compatible_input, compute_shape)); } } else { transformed_input = compatible_input; } se::DeviceMemory<T> out_backprop_ptr = AsDeviceMemory(transformed_out_backprop.template flat<T>().data(), transformed_out_backprop.template flat<T>().size()); se::DeviceMemory<T> filter_backprop_ptr = AsDeviceMemory(pre_transformed_filter_backprop.template flat<T>().data(), pre_transformed_filter_backprop.template flat<T>().size()); auto input_ptr = AsDeviceMemory(transformed_input.template flat<T>().data(), transformed_input.template flat<T>().size()); static int64 ConvolveBackwardFilterScratchSize = GetDnnWorkspaceLimit( "TF_CUDNN_WORKSPACE_LIMIT_IN_MB", 1LL << 32 // 4GB by default ); int device_id = stream->parent()->device_ordinal(); DataType dtype = input.dtype(); ConvParameters conv_parameters = { dims.batch_size, // batch dims.in_depth, // in_depths {{input_desc.height(), // in_rows input_desc.width()}}, // in_cols compute_data_format, // compute_data_format dims.out_depth, // out_depths {{dims.spatial_dims[0].filter_size, // filter_rows dims.spatial_dims[1].filter_size, // filter_cols filter_shape.dim_size(2)}}, // filter_depth {{dims.spatial_dims[0].dilation, // dilation_rows dims.spatial_dims[1].dilation}}, // dilation_cols {{dims.spatial_dims[0].stride, // stride_rows dims.spatial_dims[1].stride}}, // stride_cols {{common_padding_rows, // padding_rows common_padding_cols}}, // padding_cols dtype, // tensor datatype device_id, // device_id conv_desc.group_count() // group_count }; #if TENSORFLOW_USE_ROCM // cudnn_use_autotune is applicable only the CUDA flow // for ROCm/MIOpen, we need to call GetMIOpenConvolveAlgorithms explicitly // if we do not have a cached algorithm_config for this conv_parameters cudnn_use_autotune = true; #endif AlgorithmConfig algorithm_config; if (cudnn_use_autotune && !AutoTuneConvBwdFilter::GetInstance()->Find( conv_parameters, &algorithm_config)) { std::vector<std::unique_ptr<se::dnn::ConvolveExecutionPlan>> plans; #if GOOGLE_CUDA std::vector<AlgorithmDesc> algorithms; std::vector<AlgorithmConfig> configs; if (CudnnUseFrontend()) { OP_REQUIRES( ctx, stream->parent()->GetConvolveExecutionPlans( se::dnn::ConvolutionKind::BACKWARD_FILTER, se::dnn::ToDataType<T>::value, stream, input_desc, filter_desc, output_desc, conv_desc, &plans), errors::Unknown("Failed to get convolution execution plan. This is " "probably because cuDNN failed to initialize, so try " "looking to see if a warning log message was printed " "above.")); for (const auto& plan : plans) { configs.push_back( AlgorithmConfig(AlgorithmDesc{plan->getTag(), plan->get_raw_desc()}, plan->getWorkspaceSize())); } } else { OP_REQUIRES( ctx, stream->parent()->GetConvolveBackwardFilterAlgorithms( conv_parameters.ShouldIncludeWinogradNonfusedAlgo<T>( stream->parent()), &algorithms), errors::Unknown("Failed to get convolution execution plan. This is " "probably because cuDNN failed to initialize, so try " "looking to see if a warning log message was printed " "above.")); for (const auto& algorithm : algorithms) { configs.push_back(AlgorithmConfig(algorithm)); } } se::TfAllocatorAdapter tf_allocator_adapter(ctx->device()->GetAllocator({}), stream); se::RedzoneAllocator rz_allocator(stream, &tf_allocator_adapter, se::GpuAsmOpts()); se::DeviceMemory<T> filter_backprop_ptr_rz( WrapRedzoneBestEffort(&rz_allocator, filter_backprop_ptr)); std::vector<tensorflow::AutotuneResult> results; for (auto& profile_config : configs) { // TODO(zhengxq): profile each algorithm multiple times to better // accuracy. DnnScratchAllocator scratch_allocator(ConvolveBackwardFilterScratchSize, ctx); se::RedzoneAllocator rz_scratch_allocator( stream, &tf_allocator_adapter, se::GpuAsmOpts(), /*memory_limit=*/ConvolveBackwardFilterScratchSize); se::ScratchAllocator* allocator_used = !RedzoneCheckDisabled() ? static_cast<se::ScratchAllocator*>(&rz_scratch_allocator) : static_cast<se::ScratchAllocator*>(&scratch_allocator); ProfileResult profile_result; Status cudnn_launch_status; if (CudnnUseFrontend()) { cudnn_launch_status = stream->ConvolveBackwardFilterWithExecutionPlan( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr_rz, allocator_used, profile_config, &profile_result); } else { cudnn_launch_status = stream->ConvolveBackwardFilterWithAlgorithm( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr_rz, allocator_used, profile_config, &profile_result); } if (cudnn_launch_status.ok() && profile_result.is_valid()) { results.emplace_back(); auto& result = results.back(); if (CudnnUseFrontend()) { result.mutable_cuda_conv_plan()->set_exec_plan_id( profile_config.algorithm()->exec_plan_id()); } else { result.mutable_conv()->set_algorithm( profile_config.algorithm()->algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_config.algorithm()->tensor_ops_enabled()); } result.set_scratch_bytes( !RedzoneCheckDisabled() ? rz_scratch_allocator.TotalAllocatedBytesExcludingRedzones() : scratch_allocator.TotalByteSize()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); CheckRedzones(rz_scratch_allocator, &result); CheckRedzones(rz_allocator, &result); } else if (CudnnUseFrontend()) { // When CuDNN frontend APIs are used, we need to make sure the profiling // results are one-to-one mapping of the "plans". So, we insert dummy // results when the excution fails. results.emplace_back(); auto& result = results.back(); result.mutable_failure()->set_kind(AutotuneResult::UNKNOWN); result.mutable_failure()->set_msg( absl::StrCat("Profiling failure on CUDNN engine: ", profile_config.algorithm()->exec_plan_id())); } } #elif TENSORFLOW_USE_ROCM DnnScratchAllocator scratch_allocator(ConvolveBackwardFilterScratchSize, ctx); std::vector<ProfileResult> algorithms; OP_REQUIRES( ctx, stream->parent()->GetMIOpenConvolveAlgorithms( se::dnn::ConvolutionKind::BACKWARD_FILTER, se::dnn::ToDataType<T>::value, stream, input_desc, input_ptr, filter_desc, filter_backprop_ptr, output_desc, out_backprop_ptr, conv_desc, &scratch_allocator, &algorithms), errors::Unknown( "Failed to get convolution algorithm. This is probably " "because MIOpen failed to initialize, so try looking to " "see if a warning log message was printed above.")); std::vector<tensorflow::AutotuneResult> results; if (algorithms.size() == 1) { auto profile_result = algorithms[0]; results.emplace_back(); auto& result = results.back(); result.mutable_conv()->set_algorithm( profile_result.algorithm().algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_result.algorithm().tensor_ops_enabled()); result.set_scratch_bytes(profile_result.scratch_size()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); } else { for (auto miopen_algorithm : algorithms) { auto profile_algorithm = miopen_algorithm.algorithm(); ProfileResult profile_result; auto miopen_launch_status = stream->ConvolveBackwardFilterWithAlgorithm( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, AlgorithmConfig(profile_algorithm, miopen_algorithm.scratch_size()), &profile_result); if (miopen_launch_status.ok() && profile_result.is_valid()) { results.emplace_back(); auto& result = results.back(); result.mutable_conv()->set_algorithm(profile_algorithm.algo_id()); result.mutable_conv()->set_tensor_ops_enabled( profile_algorithm.tensor_ops_enabled()); result.set_scratch_bytes(scratch_allocator.TotalByteSize()); *result.mutable_run_time() = proto_utils::ToDurationProto( absl::Milliseconds(profile_result.elapsed_time_in_ms())); } } } #endif LogConvAutotuneResults(se::dnn::ConvolutionKind::BACKWARD_FILTER, se::dnn::ToDataType<T>::value, input_ptr, filter_backprop_ptr, out_backprop_ptr, input_desc, filter_desc, output_desc, conv_desc, stream->parent(), results); if (CudnnUseFrontend()) { OP_REQUIRES_OK( ctx, BestCudnnConvAlgorithm(results, &plans, &algorithm_config)); } else { OP_REQUIRES_OK( ctx, BestCudnnConvAlgorithm(results, nullptr, &algorithm_config)); } AutoTuneConvBwdFilter::GetInstance()->Insert(conv_parameters, algorithm_config); } Status cudnn_launch_status; DnnScratchAllocator scratch_allocator(ConvolveBackwardFilterScratchSize, ctx); if (CudnnUseFrontend()) { if (algorithm_config.algorithm().has_value()) { VLOG(4) << "Conv2DBackpropFilter Execution Plan: " << algorithm_config.algorithm()->exec_plan_id(); } else { VLOG(4) << "Convolution AutoTune has been turned off"; } cudnn_launch_status = stream->ConvolveBackwardFilterWithExecutionPlan( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, algorithm_config, nullptr); } else { cudnn_launch_status = stream->ConvolveBackwardFilterWithAlgorithm( input_desc, input_ptr, output_desc, out_backprop_ptr, conv_desc, filter_desc, &filter_backprop_ptr, &scratch_allocator, algorithm_config, nullptr); } if (!cudnn_launch_status.ok()) { ctx->SetStatus(cudnn_launch_status); return; } FilterTensorFormat src_filter_format = compute_data_format == FORMAT_NCHW ? FORMAT_OIHW : FORMAT_OHWI; auto toConstTensor = [](const Tensor& x) -> const Tensor { return x; }; functor::ReverseTransformFilter<GPUDevice, T, 4>()( ctx->eigen_device<GPUDevice>(), src_filter_format, toConstTensor(pre_transformed_filter_backprop).template tensor<T, 4>(), filter_backprop->tensor<T, 4>()); } // Forward declarations of the functor specializations for GPU. namespace functor { #define DECLARE_GPU_SPEC(T) \ template <> \ void TransformFilter<GPUDevice, T, int, 4>::operator()( \ const GPUDevice& d, FilterTensorFormat dst_filter_format, \ typename TTypes<T, 4, int>::ConstTensor in, \ typename TTypes<T, 4, int>::Tensor out); \ extern template struct TransformFilter<GPUDevice, T, int, 4>; \ template <> \ void PadInput<GPUDevice, T, int, 4>::operator()( \ const GPUDevice& d, typename TTypes<T, 4, int>::ConstTensor in, \ const std::array<int, 2>& padding_left, \ const std::array<int, 2>& padding_right, \ typename TTypes<T, 4, int>::Tensor out, TensorFormat data_format, \ const T& padding_value); \ extern template struct PadInput<GPUDevice, T, int, 4>; DECLARE_GPU_SPEC(float); DECLARE_GPU_SPEC(Eigen::half); DECLARE_GPU_SPEC(double); #undef DECLARE_GPU_SPEC } // namespace functor REGISTER_KERNEL_BUILDER(Name("Conv2DBackpropFilter") .Device(DEVICE_GPU) .TypeConstraint<double>("T") .HostMemory("filter_sizes"), Conv2DBackpropFilterOp<GPUDevice, double>); REGISTER_KERNEL_BUILDER(Name("Conv2DBackpropFilter") .Device(DEVICE_GPU) .TypeConstraint<float>("T") .HostMemory("filter_sizes"), Conv2DBackpropFilterOp<GPUDevice, float>); REGISTER_KERNEL_BUILDER(Name("Conv2DBackpropFilter") .Device(DEVICE_GPU) .TypeConstraint<Eigen::half>("T") .HostMemory("filter_sizes"), Conv2DBackpropFilterOp<GPUDevice, Eigen::half>); // To be used inside depthwise_conv_grad_op.cc. // TODO(reedwm): Move this and the definition to depthwise_conv_grad_op.cc. template struct LaunchConv2DBackpropFilterOp<GPUDevice, float>; template struct LaunchConv2DBackpropFilterOp<GPUDevice, Eigen::half>; template struct LaunchConv2DBackpropFilterOp<GPUDevice, double>; #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM } // namespace tensorflow
null
253
CWE-787
CVE-2021-29542
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <vector> #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { namespace text { using tensorflow::FakeInput; using tensorflow::NodeDefBuilder; using tensorflow::Status; using tensorflow::TensorShape; class NgramKernelTest : public tensorflow::OpsTestBase { public: void MakeOp(string separator, std::vector<int> ngram_width, string left_pad, string right_pad, int pad_width, bool preserve) { TF_ASSERT_OK(NodeDefBuilder("tested_op", "StringNGrams") .Attr("separator", separator) .Attr("ngram_widths", ngram_width) .Attr("left_pad", left_pad) .Attr("right_pad", right_pad) .Attr("pad_width", pad_width) .Attr("preserve_short_sequences", preserve) .Input(FakeInput()) .Input(FakeInput()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } void assert_string_equal(const std::vector<tstring> &expected, const Tensor &value) { Tensor expected_tensor(allocator(), DT_STRING, TensorShape({static_cast<int64>(expected.size())})); test::FillValues<tstring>(&expected_tensor, expected); test::ExpectTensorEqual<tstring>(expected_tensor, value); } void assert_int64_equal(const std::vector<int64> &expected, const Tensor &value) { Tensor expected_tensor(allocator(), DT_INT64, TensorShape({static_cast<int64>(expected.size())})); test::FillValues<int64>(&expected_tensor, expected); test::ExpectTensorEqual<int64>(expected_tensor, value); } }; TEST_F(NgramKernelTest, TestPaddedTrigrams) { MakeOp("|", {3}, "LP", "RP", -1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|LP|a", "LP|a|b", "a|b|c", "b|c|d", "c|d|RP", "d|RP|RP", // 0 "LP|LP|e", "LP|e|f", "e|f|RP", "f|RP|RP"}); // 1 std::vector<int64> expected_splits({0, 6, 10}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestPaddedBigramsAndTrigrams) { MakeOp("|", {2, 3}, "LP", "RP", -1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|a", "a|b", "b|c", "c|d", "d|RP", "LP|LP|a", "LP|a|b", "a|b|c", "b|c|d", "c|d|RP", "d|RP|RP", // 0 "LP|e", "e|f", "f|RP", "LP|LP|e", "LP|e|f", "e|f|RP", "f|RP|RP"}); // 1 std::vector<int64> expected_splits({0, 11, 18}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestPaddedBigrams) { MakeOp("|", {2}, "LP", "RP", -1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|a", "a|b", "b|c", "c|d", "d|RP", // 0 "LP|e", "e|f", "f|RP"}); // 1 std::vector<int64> expected_splits({0, 5, 8}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestPaddingIsAtMostNGramSizeMinus1) { MakeOp("|", {2}, "LP", "RP", 4, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|a", "a|b", "b|c", "c|d", "d|RP", // 0 "LP|e", "e|f", "f|RP"}); // 1 std::vector<int64> expected_splits({0, 5, 8}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestPaddedUnigramAndBigrams) { MakeOp("|", {1, 2}, "LP", "RP", -1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"a", "b", "c", "d", "LP|a", "a|b", "b|c", "c|d", "d|RP", // 0 "e", "f", "LP|e", "e|f", "f|RP"}); // 1 std::vector<int64> expected_splits({0, 9, 14}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingPaddedNGrams) { // This test validates that n-grams with both left and right padding in a // single ngram token are created correctly. MakeOp("|", {3}, "LP", "RP", -1, false); // Batch items are: // 0: "a" // 1: "b", "c", "d" // 2: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|LP|a", "LP|a|RP", "a|RP|RP", // ngrams for elem. 0 "LP|LP|b", "LP|b|c", "b|c|d", "c|d|RP", "d|RP|RP", // ngrams for elem. 1 "LP|LP|e", "LP|e|f", "e|f|RP", "f|RP|RP"}); // ngrams for elem. 2 std::vector<int64> expected_splits({0, 3, 8, 12}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingPaddedMultiCharNGrams) { MakeOp("|", {3}, "LP", "RP", -1, false); // Batch items are: // 0: "a" // 1: "b", "c", "d" // 2: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"aa", "bb", "cc", "dd", "ee", "ff"}); AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|LP|aa", "LP|aa|RP", "aa|RP|RP", // "LP|LP|bb", "LP|bb|cc", "bb|cc|dd", "cc|dd|RP", "dd|RP|RP", // "LP|LP|ee", "LP|ee|ff", "ee|ff|RP", "ff|RP|RP"}); // std::vector<int64> expected_splits({0, 3, 8, 12}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestMultiOverlappingPaddedNGrams) { // This test validates that n-grams with more than 1 padding value on each // side are created correctly. MakeOp("|", {5}, "LP", "RP", -1, false); // Batch items are: // 0: "a" AddInputFromArray<tstring>(TensorShape({1}), {"a"}); AddInputFromArray<int64>(TensorShape({2}), {0, 1}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"LP|LP|LP|LP|a", "LP|LP|LP|a|RP", "LP|LP|a|RP|RP", "LP|a|RP|RP|RP", "a|RP|RP|RP|RP"}); std::vector<int64> expected_splits({0, 5}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigrams) { MakeOp("|", {3}, "", "", 0, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c", "b|c|d"}); std::vector<int64> expected_splits({0, 2, 2}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsWithEmptySequence) { MakeOp("|", {3}, "", "", 0, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 4, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c", "b|c|d"}); std::vector<int64> expected_splits({0, 2, 2, 2}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsWithPreserveShort) { MakeOp("|", {3}, "", "", 0, true); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c", "b|c|d", "e|f"}); std::vector<int64> expected_splits({0, 2, 3}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsWithPreserveShortAndEmptySequence) { MakeOp("|", {3}, "", "", 0, true); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 4, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c", "b|c|d", "e|f"}); std::vector<int64> expected_splits({0, 2, 2, 3}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsAndQuadgramsWithPreserveShort) { MakeOp("|", {4, 3}, "", "", 0, true); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c|d", "a|b|c", "b|c|d", "e|f"}); std::vector<int64> expected_splits({0, 3, 4}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedBigramsAndTrigrams) { MakeOp("|", {2, 3}, "", "", 0, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"a|b", "b|c", "c|d", "a|b|c", "b|c|d", "e|f"}); std::vector<int64> expected_splits({0, 5, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedBigramsAndTrigramsWithPreserveShort) { MakeOp("|", {2, 3}, "", "", 0, true); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); // Note that in this case, because the bigram 'e|f' was already generated, // the op will not generate a special preserve_short bigram. std::vector<tstring> expected_values( {"a|b", "b|c", "c|d", "a|b|c", "b|c|d", "e|f"}); std::vector<int64> expected_splits({0, 5, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsAndBigramsWithPreserveShort) { MakeOp("|", {3, 2}, "", "", 0, true); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); // Note that in this case, because the bigram 'e|f' was already generated, // the op will not generate a special preserve_short bigram. std::vector<tstring> expected_values( {"a|b|c", "b|c|d", "a|b", "b|c", "c|d", "e|f"}); std::vector<int64> expected_splits({0, 5, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedBigrams) { MakeOp("|", {2}, "", "", 0, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b", "b|c", "c|d", "e|f"}); std::vector<int64> expected_splits({0, 3, 4}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingUnpaddedNGrams) { MakeOp("|", {3}, "", "", 0, false); // Batch items are: // 0: "a" // 1: "b", "c", "d" // 2: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"b|c|d"}); std::vector<int64> expected_splits({0, 0, 1, 1}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingUnpaddedNGramsNoOutput) { MakeOp("|", {5}, "", "", 0, false); // Batch items are: // 0: "a" // 1: "b", "c", "d" // 2: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({}); std::vector<int64> expected_splits({0, 0, 0, 0}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPaddedTrigrams) { MakeOp("|", {3}, "LP", "RP", 1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"LP|a|b", "a|b|c", "b|c|d", "c|d|RP", // "LP|e|f", "e|f|RP"}); std::vector<int64> expected_splits({0, 4, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPaddedBigrams) { MakeOp("|", {2}, "LP", "RP", 1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"LP|a", "a|b", "b|c", "c|d", "d|RP", // "LP|e", "e|f", "f|RP"}); std::vector<int64> expected_splits({0, 5, 8}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPaddedBigramsAnd5grams) { MakeOp("|", {2, 5}, "LP", "RP", 1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|a", "a|b", "b|c", "c|d", "d|RP", "LP|a|b|c|d", "a|b|c|d|RP", // "LP|e", "e|f", "f|RP"}); std::vector<int64> expected_splits({0, 7, 10}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPadded5gramsWithPreserveShort) { MakeOp("|", {5}, "LP", "RP", 1, true); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|a|b|c|d", "a|b|c|d|RP", // "LP|e|f|RP"}); std::vector<int64> expected_splits({0, 2, 3}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingSinglyPaddedNGrams) { MakeOp("|", {3}, "LP", "RP", 1, false); // Batch items are: // 0: "a" // 1: "b", "c", "d" // 2: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|a|RP", // ngrams for elem. 0 "LP|b|c", "b|c|d", "c|d|RP", // ngrams for elem. 1 "LP|e|f", "e|f|RP"}); // ngrams for elem. 2 std::vector<int64> expected_splits({0, 1, 4, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingSinglyPaddedNGramsNoOutput) { MakeOp("|", {5}, "LP", "RP", 1, false); // Batch items are: // 0: "a" // 1: "b", "c", "d" // 2: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"LP|b|c|d|RP"}); std::vector<int64> expected_splits({0, 0, 1, 1}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPaddedUnigrams) { MakeOp("|", {1}, "LP", "RP", 1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a", "b", "c", "d", "e", "f"}); std::vector<int64> expected_splits({0, 4, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestEmptyInput) { MakeOp("|", {1}, "LP", "RP", 3, false); AddInputFromArray<tstring>(TensorShape({0}), {}); AddInputFromArray<int64>(TensorShape({0}), {}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({}); std::vector<int64> expected_splits({}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, ShapeFn) { ShapeInferenceTestOp op("StringNGrams"); INFER_OK(op, "?;?", "[?];[?]"); INFER_OK(op, "[1];?", "[?];[?]"); INFER_OK(op, "[1];[2]", "[?];in1"); INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?"); INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]"); } } // namespace text } // namespace tensorflow
null
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <vector> #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" namespace tensorflow { namespace text { using tensorflow::FakeInput; using tensorflow::NodeDefBuilder; using tensorflow::Status; using tensorflow::TensorShape; class NgramKernelTest : public tensorflow::OpsTestBase { public: void MakeOp(string separator, std::vector<int> ngram_width, string left_pad, string right_pad, int pad_width, bool preserve) { TF_ASSERT_OK(NodeDefBuilder("tested_op", "StringNGrams") .Attr("separator", separator) .Attr("ngram_widths", ngram_width) .Attr("left_pad", left_pad) .Attr("right_pad", right_pad) .Attr("pad_width", pad_width) .Attr("preserve_short_sequences", preserve) .Input(FakeInput()) .Input(FakeInput()) .Finalize(node_def())); TF_ASSERT_OK(InitOp()); } void assert_string_equal(const std::vector<tstring> &expected, const Tensor &value) { Tensor expected_tensor(allocator(), DT_STRING, TensorShape({static_cast<int64>(expected.size())})); test::FillValues<tstring>(&expected_tensor, expected); test::ExpectTensorEqual<tstring>(expected_tensor, value); } void assert_int64_equal(const std::vector<int64> &expected, const Tensor &value) { Tensor expected_tensor(allocator(), DT_INT64, TensorShape({static_cast<int64>(expected.size())})); test::FillValues<int64>(&expected_tensor, expected); test::ExpectTensorEqual<int64>(expected_tensor, value); } }; TEST_F(NgramKernelTest, TestPaddedTrigrams) { MakeOp("|", {3}, "LP", "RP", -1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|LP|a", "LP|a|b", "a|b|c", "b|c|d", "c|d|RP", "d|RP|RP", // 0 "LP|LP|e", "LP|e|f", "e|f|RP", "f|RP|RP"}); // 1 std::vector<int64> expected_splits({0, 6, 10}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestPaddedBigramsAndTrigrams) { MakeOp("|", {2, 3}, "LP", "RP", -1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|a", "a|b", "b|c", "c|d", "d|RP", "LP|LP|a", "LP|a|b", "a|b|c", "b|c|d", "c|d|RP", "d|RP|RP", // 0 "LP|e", "e|f", "f|RP", "LP|LP|e", "LP|e|f", "e|f|RP", "f|RP|RP"}); // 1 std::vector<int64> expected_splits({0, 11, 18}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestPaddedBigrams) { MakeOp("|", {2}, "LP", "RP", -1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|a", "a|b", "b|c", "c|d", "d|RP", // 0 "LP|e", "e|f", "f|RP"}); // 1 std::vector<int64> expected_splits({0, 5, 8}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestPaddingIsAtMostNGramSizeMinus1) { MakeOp("|", {2}, "LP", "RP", 4, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|a", "a|b", "b|c", "c|d", "d|RP", // 0 "LP|e", "e|f", "f|RP"}); // 1 std::vector<int64> expected_splits({0, 5, 8}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestPaddedUnigramAndBigrams) { MakeOp("|", {1, 2}, "LP", "RP", -1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"a", "b", "c", "d", "LP|a", "a|b", "b|c", "c|d", "d|RP", // 0 "e", "f", "LP|e", "e|f", "f|RP"}); // 1 std::vector<int64> expected_splits({0, 9, 14}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingPaddedNGrams) { // This test validates that n-grams with both left and right padding in a // single ngram token are created correctly. MakeOp("|", {3}, "LP", "RP", -1, false); // Batch items are: // 0: "a" // 1: "b", "c", "d" // 2: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|LP|a", "LP|a|RP", "a|RP|RP", // ngrams for elem. 0 "LP|LP|b", "LP|b|c", "b|c|d", "c|d|RP", "d|RP|RP", // ngrams for elem. 1 "LP|LP|e", "LP|e|f", "e|f|RP", "f|RP|RP"}); // ngrams for elem. 2 std::vector<int64> expected_splits({0, 3, 8, 12}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingPaddedMultiCharNGrams) { MakeOp("|", {3}, "LP", "RP", -1, false); // Batch items are: // 0: "a" // 1: "b", "c", "d" // 2: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"aa", "bb", "cc", "dd", "ee", "ff"}); AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|LP|aa", "LP|aa|RP", "aa|RP|RP", // "LP|LP|bb", "LP|bb|cc", "bb|cc|dd", "cc|dd|RP", "dd|RP|RP", // "LP|LP|ee", "LP|ee|ff", "ee|ff|RP", "ff|RP|RP"}); // std::vector<int64> expected_splits({0, 3, 8, 12}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestMultiOverlappingPaddedNGrams) { // This test validates that n-grams with more than 1 padding value on each // side are created correctly. MakeOp("|", {5}, "LP", "RP", -1, false); // Batch items are: // 0: "a" AddInputFromArray<tstring>(TensorShape({1}), {"a"}); AddInputFromArray<int64>(TensorShape({2}), {0, 1}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"LP|LP|LP|LP|a", "LP|LP|LP|a|RP", "LP|LP|a|RP|RP", "LP|a|RP|RP|RP", "a|RP|RP|RP|RP"}); std::vector<int64> expected_splits({0, 5}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigrams) { MakeOp("|", {3}, "", "", 0, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c", "b|c|d"}); std::vector<int64> expected_splits({0, 2, 2}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsWithEmptySequence) { MakeOp("|", {3}, "", "", 0, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 4, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c", "b|c|d"}); std::vector<int64> expected_splits({0, 2, 2, 2}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsWithPreserveShort) { MakeOp("|", {3}, "", "", 0, true); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c", "b|c|d", "e|f"}); std::vector<int64> expected_splits({0, 2, 3}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsWithPreserveShortAndEmptySequence) { MakeOp("|", {3}, "", "", 0, true); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 4, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c", "b|c|d", "e|f"}); std::vector<int64> expected_splits({0, 2, 2, 3}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsAndQuadgramsWithPreserveShort) { MakeOp("|", {4, 3}, "", "", 0, true); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b|c|d", "a|b|c", "b|c|d", "e|f"}); std::vector<int64> expected_splits({0, 3, 4}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedBigramsAndTrigrams) { MakeOp("|", {2, 3}, "", "", 0, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"a|b", "b|c", "c|d", "a|b|c", "b|c|d", "e|f"}); std::vector<int64> expected_splits({0, 5, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedBigramsAndTrigramsWithPreserveShort) { MakeOp("|", {2, 3}, "", "", 0, true); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); // Note that in this case, because the bigram 'e|f' was already generated, // the op will not generate a special preserve_short bigram. std::vector<tstring> expected_values( {"a|b", "b|c", "c|d", "a|b|c", "b|c|d", "e|f"}); std::vector<int64> expected_splits({0, 5, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedTrigramsAndBigramsWithPreserveShort) { MakeOp("|", {3, 2}, "", "", 0, true); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); // Note that in this case, because the bigram 'e|f' was already generated, // the op will not generate a special preserve_short bigram. std::vector<tstring> expected_values( {"a|b|c", "b|c|d", "a|b", "b|c", "c|d", "e|f"}); std::vector<int64> expected_splits({0, 5, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestUnpaddedBigrams) { MakeOp("|", {2}, "", "", 0, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a|b", "b|c", "c|d", "e|f"}); std::vector<int64> expected_splits({0, 3, 4}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingUnpaddedNGrams) { MakeOp("|", {3}, "", "", 0, false); // Batch items are: // 0: "a" // 1: "b", "c", "d" // 2: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"b|c|d"}); std::vector<int64> expected_splits({0, 0, 1, 1}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingUnpaddedNGramsNoOutput) { MakeOp("|", {5}, "", "", 0, false); // Batch items are: // 0: "a" // 1: "b", "c", "d" // 2: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({}); std::vector<int64> expected_splits({0, 0, 0, 0}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPaddedTrigrams) { MakeOp("|", {3}, "LP", "RP", 1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"LP|a|b", "a|b|c", "b|c|d", "c|d|RP", // "LP|e|f", "e|f|RP"}); std::vector<int64> expected_splits({0, 4, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPaddedBigrams) { MakeOp("|", {2}, "LP", "RP", 1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"LP|a", "a|b", "b|c", "c|d", "d|RP", // "LP|e", "e|f", "f|RP"}); std::vector<int64> expected_splits({0, 5, 8}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPaddedBigramsAnd5grams) { MakeOp("|", {2, 5}, "LP", "RP", 1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|a", "a|b", "b|c", "c|d", "d|RP", "LP|a|b|c|d", "a|b|c|d|RP", // "LP|e", "e|f", "f|RP"}); std::vector<int64> expected_splits({0, 7, 10}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPadded5gramsWithPreserveShort) { MakeOp("|", {5}, "LP", "RP", 1, true); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( // {"LP|a|b|c|d", "a|b|c|d|RP", // "LP|e|f|RP"}); std::vector<int64> expected_splits({0, 2, 3}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingSinglyPaddedNGrams) { MakeOp("|", {3}, "LP", "RP", 1, false); // Batch items are: // 0: "a" // 1: "b", "c", "d" // 2: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"LP|a|RP", // ngrams for elem. 0 "LP|b|c", "b|c|d", "c|d|RP", // ngrams for elem. 1 "LP|e|f", "e|f|RP"}); // ngrams for elem. 2 std::vector<int64> expected_splits({0, 1, 4, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestOverlappingSinglyPaddedNGramsNoOutput) { MakeOp("|", {5}, "LP", "RP", 1, false); // Batch items are: // 0: "a" // 1: "b", "c", "d" // 2: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({4}), {0, 1, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"LP|b|c|d|RP"}); std::vector<int64> expected_splits({0, 0, 1, 1}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestSinglyPaddedUnigrams) { MakeOp("|", {1}, "LP", "RP", 1, false); // Batch items are: // 0: "a", "b", "c", "d" // 1: "e", "f" AddInputFromArray<tstring>(TensorShape({6}), {"a", "b", "c", "d", "e", "f"}); AddInputFromArray<int64>(TensorShape({3}), {0, 4, 6}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({"a", "b", "c", "d", "e", "f"}); std::vector<int64> expected_splits({0, 4, 6}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestEmptyInput) { MakeOp("|", {1}, "LP", "RP", 3, false); AddInputFromArray<tstring>(TensorShape({0}), {}); AddInputFromArray<int64>(TensorShape({0}), {}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({}); std::vector<int64> expected_splits({}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestNoTokens) { MakeOp("|", {3}, "L", "R", -1, false); // Batch items are: // 0: // 1: "a" AddInputFromArray<tstring>(TensorShape({1}), {"a"}); AddInputFromArray<int64>(TensorShape({3}), {0, 0, 1}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values( {"L|L|R", "L|R|R", // no input in first split "L|L|a", "L|a|R", "a|R|R"}); // second split std::vector<int64> expected_splits({0, 2, 5}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, TestNoTokensNoPad) { MakeOp("|", {3}, "", "", 0, false); // Batch items are: // 0: // 1: "a" AddInputFromArray<tstring>(TensorShape({1}), {"a"}); AddInputFromArray<int64>(TensorShape({3}), {0, 0, 1}); TF_ASSERT_OK(RunOpKernel()); std::vector<tstring> expected_values({}); std::vector<int64> expected_splits({0, 0, 0}); assert_string_equal(expected_values, *GetOutput(0)); assert_int64_equal(expected_splits, *GetOutput(1)); } TEST_F(NgramKernelTest, ShapeFn) { ShapeInferenceTestOp op("StringNGrams"); INFER_OK(op, "?;?", "[?];[?]"); INFER_OK(op, "[1];?", "[?];[?]"); INFER_OK(op, "[1];[2]", "[?];in1"); INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];?"); INFER_ERROR("Shape must be rank 1 but is rank 0", op, "?;[]"); } } // namespace text } // namespace tensorflow
null
254
CWE-787
CVE-2021-29558
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_ #define TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_ #include <limits> #include <numeric> #include <vector> #include "absl/base/macros.h" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/sparse/dim_comparator.h" #include "tensorflow/core/util/sparse/group_iterator.h" namespace tensorflow { namespace sparse { class SparseTensor { public: typedef typename gtl::ArraySlice<int64> VarDimArray; typedef typename gtl::InlinedVector<int64, 8> ShapeArray; static Status Create(Tensor ix, Tensor vals, const VarDimArray shape, const VarDimArray order, SparseTensor* result); static Status Create(Tensor ix, Tensor vals, const TensorShape& shape, SparseTensor* result); static Status Create(Tensor ix, Tensor vals, const VarDimArray shape, SparseTensor* result); static Status Create(Tensor ix, Tensor vals, const TensorShape& shape, const VarDimArray order, SparseTensor* result); SparseTensor() : dims_(0) {} ABSL_DEPRECATED("Use Create() functions instead of constructors directly.") SparseTensor(Tensor ix, Tensor vals, const TensorShape& shape) : SparseTensor(std::move(ix), std::move(vals), TensorShapeToVector(shape), UndefinedOrder(TensorShapeToVector(shape))) {} ABSL_DEPRECATED("Use Create() functions instead of constructors directly.") SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape) : SparseTensor(std::move(ix), std::move(vals), shape, UndefinedOrder(shape)) {} ABSL_DEPRECATED("use Create() functions instead of constructors directly.") SparseTensor(Tensor ix, Tensor vals, const TensorShape& shape, const VarDimArray order) : SparseTensor(std::move(ix), std::move(vals), TensorShapeToVector(shape), order) {} ABSL_DEPRECATED("Use Create() functions instead of constructors directly.") SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape, const VarDimArray order); SparseTensor(const SparseTensor& other) : SparseTensor(other.ix_, other.vals_, other.shape_, other.order_) {} SparseTensor(SparseTensor&& other) : SparseTensor(std::move(other.ix_), std::move(other.vals_), std::move(other.shape_), std::move(other.order_)) {} SparseTensor& operator=(const SparseTensor& other) { ix_ = other.ix_; vals_ = other.vals_; shape_ = other.shape_; order_ = other.order_; dims_ = other.dims_; return *this; } SparseTensor& operator=(SparseTensor&& other) { ix_ = std::move(other.ix_); vals_ = std::move(other.vals_); shape_ = std::move(other.shape_); order_ = std::move(other.order_); dims_ = std::move(other.dims_); return *this; } std::size_t num_entries() const { return ix_.dim_size(0); } int dims() const { return shape_.size(); } const Tensor& indices() const { return ix_; } const Tensor& values() const { return vals_; } DataType dtype() const { return vals_.dtype(); } Status IndicesValid() const; VarDimArray shape() const { return shape_; } VarDimArray order() const { return order_; } // Resorts the indices and values according to the dimensions in order. template <typename T> void Reorder(const VarDimArray& order); // Returns a group iterable that can be used for clumping indices // and values according to the group indices of interest. // // Precondition: order()[0..group_ix.size()] == group_ix. // // See the README.md in this directory for more usage information. GroupIterable group(const VarDimArray& group_ix) const { DCHECK_LE(group_ix.size(), dims_); for (std::size_t di = 0; di < group_ix.size(); ++di) { DCHECK_GE(group_ix[di], 0) << "Group dimension out of range"; DCHECK_LT(group_ix[di], dims_) << "Group dimension out of range"; DCHECK_EQ(group_ix[di], order_[di]) << "Group dimension does not match sorted order"; } return GroupIterable(ix_, vals_, dims_, group_ix); } // Stores the sparse indices into the dense tensor out. // Preconditions: // out->shape().dims() == shape().dims() // out->shape().dim_size(d) >= shape(d) for all d // // Returns true on success. False on failure (mismatched dimensions // or out-of-bounds indices). // // If initialize==True, ToDense first overwrites all coefficients in out to 0. // template <typename T> bool ToDense(Tensor* out, bool initialize = true); // Concat() will concatenate all the tensors according to their first order // dimension. All tensors must have identical shape except for // the first order dimension. All tensors orders' first dimension // must match. // // If all of the tensors have identical ordering, then the output // will have this ordering. Otherwise the output is set as not // having any order and a Reorder<T>() should be called on it before // performing any subsequent operations. template <typename T> static SparseTensor Concat(const gtl::ArraySlice<SparseTensor>& tensors); // Split() will split the input SparseTensor into a list of num_split // SparseTensor given a splitting dimension. If the input dimension range // isn't an integer multiple of split_dim, we add one extra dimension for // each slice. template <typename T> static Status Split(const SparseTensor& tensor, const int split_dim, const int num_split, std::vector<SparseTensor>* result); // Slice() will slice the input SparseTensor into a SparseTensor based on // specified start and size. Both start and size are 1-D array with each // element of the array representing one dimension. The start is the start // index at each dimension and the size is the size at each dimension. template <typename T> static SparseTensor Slice(const SparseTensor& tensor, const gtl::ArraySlice<int64>& start, const gtl::ArraySlice<int64>& size); // Picks out the dimensions according to `dim_indices`. std::vector<int64> PickDims(gtl::ArraySlice<int64> dim_indices) const { std::vector<int64> res(dim_indices.size()); for (size_t i = 0; i < dim_indices.size(); ++i) { res[i] = shape_[dim_indices[i]]; } return res; } private: static inline ShapeArray UndefinedOrder(const VarDimArray shape) { return ShapeArray(shape.size(), -1); } static inline ShapeArray TensorShapeToVector(const TensorShape& shape) { ShapeArray vec(shape.dims()); for (int i = 0; i < shape.dims(); ++i) vec[i] = shape.dim_size(i); return vec; } // Optimized implementation of `IndicesValid` for 1-D sparse tensors. // REQUIRES: `shape_.size() == 1`. bool IndicesValidVectorFastPath() const; // Optimized implementation of `IndicesValid` for 2-D sparse tensors whose // indices fit within the range of an `int32`. // REQUIRES: `shape_.size() == 2`. bool IndicesValidMatrix32BitFastPath() const; template <bool standard_order> Status IndicesValidHelper() const; // Helper for ToDense<T>() template <typename T> bool ValidateAndInitializeToDense(Tensor* out, bool initialize); // Helper for Split() that returns the slice index. static inline int GetSliceIndex(const int dim, const int split_size, const int residual) { DCHECK_GT(split_size, 0); DCHECK_GE(dim, 0); if (residual == 0) return dim / split_size; const int offset = residual * (split_size + 1); if (dim < offset) { return dim / (split_size + 1); } else { return residual + ((dim - offset) / split_size); } } // Helper for Split() that returns the dimension in the slice. static inline int GetDimensionInSlice(const int dim, const int split_size, const int residual) { DCHECK_GT(split_size, 0); DCHECK_GE(dim, 0); if (residual == 0) return dim % split_size; const int offset = residual * (split_size + 1); if (dim < offset) { return dim % (split_size + 1); } else { return (dim - offset) % split_size; } } // Helper for Split() that returns the shape given a slice index. static inline int GetSliceShape(const int slice_index, const int split_size, const int residual) { DCHECK_GT(split_size, 0); DCHECK_GE(slice_index, 0); if (residual == 0) return split_size; if (slice_index < residual) { return split_size + 1; } else { return split_size; } } Tensor ix_; Tensor vals_; ShapeArray shape_; ShapeArray order_; int dims_; }; // This operation updates the indices and values Tensor rows, so it is // an in-place algorithm. It requires O(N log N) time and O(N) // temporary space. template <typename T> inline void SparseTensor::Reorder(const VarDimArray& order) { DCHECK_EQ(DataTypeToEnum<T>::v(), dtype()) << "Reorder requested with the wrong datatype"; DCHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank"; auto ix_t = ix_.matrix<int64>(); auto vals_t = vals_.vec<T>(); std::vector<int64> reorder(num_entries()); std::iota(reorder.begin(), reorder.end(), 0); // Sort to get order of indices switch (order.size()) { #define CASE_SORT(ORDER_SIZE) \ case ORDER_SIZE: { \ FixedDimComparator<ORDER_SIZE> sorter(ix_t, order, shape()); \ std::sort(reorder.begin(), reorder.end(), sorter); \ break; \ } CASE_SORT(0); CASE_SORT(1); CASE_SORT(2); CASE_SORT(3); CASE_SORT(4); CASE_SORT(5); #undef CASE_SORT default: { DimComparator sorter(ix_t, order, shape()); std::sort(reorder.begin(), reorder.end(), sorter); } } // We have a forward reordering, but what we'll need is a // permutation (the inverse). This can be calculated with O(1) // additional // and O(n) time (INVPERM) but we just do the simple thing here. std::vector<size_t> permutation(reorder.size()); for (std::size_t n = 0; n < reorder.size(); ++n) { permutation[reorder[n]] = n; } // Update indices & values by converting the permutations to // a product of transpositions. Iterate over the cycles in the // permutation, and convert each of those into a product of // transpositions (swaps): // https://en.wikipedia.org/wiki/Cyclic_permutation // This is N swaps, 2*N comparisons. for (std::size_t n = 0; n + 1 < permutation.size(); ++n) { while (n != permutation[n]) { std::size_t r = permutation[n]; std::swap_ranges(&(ix_t(n, 0)), &(ix_t(n + 1, 0)), &(ix_t(r, 0))); std::swap(vals_t(n), vals_t(r)); std::swap(permutation[n], permutation[r]); } } order_ = ShapeArray(order.begin(), order.end()); } template <typename T> inline bool SparseTensor::ValidateAndInitializeToDense(Tensor* out, bool initialize) { DCHECK_EQ(DataTypeToEnum<T>::v(), dtype()) << "ToDense requested with the wrong datatype"; DCHECK_EQ(out->shape().dims(), dims_) << "Incompatible dimensions between SparseTensor and output"; DCHECK_EQ(out->dtype(), DataTypeToEnum<T>::v()) << "Output must be type: " << DataTypeToEnum<T>::v() << " but got: " << out->dtype(); // Make sure the dense output is the same rank and has room // to hold the SparseTensor. const auto& out_shape = out->shape(); if (shape_.size() != out_shape.dims()) return false; for (int d = 0; d < shape_.size(); ++d) { if (shape_[d] > out_shape.dim_size(d)) return false; } if (initialize) { auto out_t = out->flat<T>(); out_t.setConstant(T()); } return true; } template <typename T> inline bool SparseTensor::ToDense(Tensor* out, bool initialize) { if (!ValidateAndInitializeToDense<T>(out, initialize)) return false; auto out_t = out->flat<T>(); auto vals_t = vals_.vec<T>(); auto ix_t = ix_.matrix<int64>(); const int64* const ix_ptr = ix_t.data(); if (dims_ == 1) { // Fast path for sparse vectors. const int64 out_length = out->shape().dim_size(0); for (int n = 0; n < vals_t.dimension(0); ++n) { const int64 index = internal::SubtleMustCopy(ix_ptr[n]); if (!FastBoundsCheck(index, out_length)) return false; out_t(index) = vals_t(n); } return true; } else if (dims_ == 2) { // Fast path for sparse matrices. const auto& out_shape = out->shape(); const int64 out_rows = out_shape.dim_size(0); const int64 out_cols = out_shape.dim_size(1); for (int n = 0; n < vals_t.dimension(0); ++n) { const int64 row_index = internal::SubtleMustCopy(ix_ptr[n * 2]); const int64 col_index = internal::SubtleMustCopy(ix_ptr[n * 2 + 1]); if (!(FastBoundsCheck(row_index, out_rows) && FastBoundsCheck(col_index, out_cols))) { return false; } out_t(row_index * out_cols + col_index) = vals_t(n); } return true; } else { // General path for N-dimensional sparse tensors. gtl::InlinedVector<int64, 4> strides(dims_); const auto& out_shape = out->shape().dim_sizes(); if (dims_ > 0) { strides[dims_ - 1] = 1; } for (int d = dims_ - 2; d >= 0; --d) { strides[d] = strides[d + 1] * out_shape[d + 1]; } for (int n = 0; n < vals_t.dimension(0); ++n) { bool invalid_dims = false; int64 ix = 0; for (int d = 0; d < dims_; ++d) { const int64 ix_n_d = internal::SubtleMustCopy(ix_ptr[n * dims_ + d]); if (!FastBoundsCheck(ix_n_d, out_shape[d])) { invalid_dims = true; } ix += strides[d] * ix_n_d; } if (invalid_dims) return false; out_t(ix) = vals_t(n); } return true; } } template <typename T> inline SparseTensor SparseTensor::Concat( const gtl::ArraySlice<SparseTensor>& tensors) { DCHECK_GE(tensors.size(), size_t{1}) << "Cannot concat 0 SparseTensors"; const int dims = tensors[0].dims_; DCHECK_GE(dims, 1) << "Cannot concat 0-dimensional SparseTensors"; auto order_0 = tensors[0].order(); const int primary_dim = order_0[0]; ShapeArray final_order(order_0.begin(), order_0.end()); ShapeArray final_shape(tensors[0].shape().begin(), tensors[0].shape().end()); final_shape[primary_dim] = 0; // We'll build this up as we go along. int num_entries = 0; bool fully_ordered = true; for (const SparseTensor& st : tensors) { DCHECK_EQ(st.dims_, dims) << "All SparseTensors must have the same rank."; DCHECK_EQ(DataTypeToEnum<T>::v(), st.dtype()) << "Concat requested with the wrong data type"; DCHECK_GE(st.order()[0], 0) << "SparseTensor must be ordered"; DCHECK_EQ(st.order()[0], primary_dim) << "All SparseTensors' order[0] must match. This is the concat dim."; if (st.order() != final_order) fully_ordered = false; const VarDimArray& st_shape = st.shape(); for (int d = 0; d < dims - 1; ++d) { const int cdim = (d < primary_dim) ? d : d + 1; DCHECK_EQ(final_shape[cdim], st_shape[cdim]) << "All SparseTensors' shapes must match except on the concat dim. " << "Concat dim: " << primary_dim << ", mismatched shape at dim: " << cdim << ". Expecting shape like: [" << str_util::Join(final_shape, ",") << "] but saw shape: [" << str_util::Join(st_shape, ",") << "]"; } // Update dimension of final shape final_shape[primary_dim] = (final_shape[primary_dim] + st_shape[primary_dim]); num_entries += st.num_entries(); // Update number of entries } // If nonconsistent ordering among inputs, set final order to -1s. if (!fully_ordered) { final_order = UndefinedOrder(final_shape); } Tensor output_ix(DT_INT64, TensorShape({num_entries, dims})); Tensor output_vals(DataTypeToEnum<T>::v(), TensorShape({num_entries})); TTypes<int64>::Matrix ix_t = output_ix.matrix<int64>(); typename TTypes<T>::Vec vals_t = output_vals.vec<T>(); Eigen::DenseIndex offset = 0; int64 shape_offset = 0; for (const SparseTensor& st : tensors) { const int st_num_entries = st.num_entries(); // Fill in indices & values. if (st_num_entries > 0) { std::copy_n(&st.vals_.vec<T>()(0), st_num_entries, &vals_t(offset)); const auto* st_ix = &st.ix_.matrix<int64>()(0, 0); auto* ix_out = &ix_t(offset, 0); for (std::size_t i = 0; i < st_num_entries * dims; ++i) { *ix_out++ = *st_ix++ + ((i % dims == primary_dim) ? shape_offset : 0); } } offset += st_num_entries; shape_offset += st.shape()[primary_dim]; } return SparseTensor(output_ix, output_vals, final_shape, final_order); } template <typename T> inline Status SparseTensor::Split(const SparseTensor& input_tensor, const int split_dim, const int num_split, std::vector<SparseTensor>* result) { std::vector<Tensor> output_indices; std::vector<Tensor> output_values; std::vector<TensorShape> output_shapes; output_indices.reserve(num_split); output_values.reserve(num_split); output_shapes.reserve(num_split); std::vector<typename TTypes<int64>::Matrix> output_indices_t; std::vector<typename TTypes<T>::Vec> output_values_t; output_indices_t.reserve(num_split); output_values_t.reserve(num_split); auto input_values_t = input_tensor.values().vec<T>(); auto input_indices_t = input_tensor.indices().matrix<int64>(); std::vector<int> num_values(num_split, 0); const int num_dim = input_tensor.shape().size(); const int split_dim_size = input_tensor.shape()[split_dim]; const int split_size = split_dim_size / num_split; if (!(num_split > 0 && num_split <= split_dim_size)) { return errors::InvalidArgument("num_split must be in the interval (0, ", split_dim_size, "]"); } if (!(split_dim >= 0 && split_dim < num_dim)) { return errors::InvalidArgument("num_dim must be in the interval [0, ", num_dim, ")"); } const int residual = split_dim_size % num_split; for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) { const int dim = input_tensor.indices().matrix<int64>()(i, split_dim); int slice_index = GetSliceIndex(dim, split_size, residual); num_values[slice_index]++; } for (int i = 0; i < num_split; ++i) { // TODO(ataei): Pass an allocator to avoid allocating large memory buffer. output_indices.emplace_back(DT_INT64, TensorShape({num_values[i], num_dim})); output_values.emplace_back(DataTypeToEnum<T>::v(), TensorShape({num_values[i]})); output_shapes.emplace_back(input_tensor.shape()); output_indices_t.emplace_back(output_indices[i].matrix<int64>()); output_values_t.emplace_back(output_values[i].vec<T>()); const int size = GetSliceShape(i, split_size, residual); output_shapes[i].set_dim(split_dim, size); } std::vector<int> values_inserted_in_slice(num_split, 0); for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) { const int dim = input_indices_t(i, split_dim); const int slice_index = GetSliceIndex(dim, split_size, residual); const int slice_dim = values_inserted_in_slice[slice_index]++; output_values_t[slice_index](slice_dim) = input_values_t(i); for (int j = 0; j < num_dim; ++j) { const int64 original_dim = input_indices_t(i, j); output_indices_t[slice_index](slice_dim, j) = (j == split_dim) ? GetDimensionInSlice(original_dim, split_size, residual) : original_dim; } } result->clear(); result->reserve(num_split); for (int i = 0; i < num_split; ++i) { SparseTensor tensor; Status create_status = Create(output_indices[i], output_values[i], output_shapes[i], &tensor); if (!create_status.ok()) { return create_status; } result->push_back(std::move(tensor)); } return Status::OK(); } template <typename T> inline SparseTensor SparseTensor::Slice(const SparseTensor& input_tensor, const gtl::ArraySlice<int64>& start, const gtl::ArraySlice<int64>& size) { TensorShape output_shape(input_tensor.shape()); const int dims = input_tensor.dims(); for (int dim = 0; dim < dims; dim++) { // Determine the size of the result; if the selected slice goes beyond the // input boundary, the result will correspond to the size of the overlap // between the input and the selected slice. const int64 input_size = output_shape.dim_size(dim); const int64 start_index = start[dim]; const int64 slice_size = size[dim]; if (start_index + slice_size < input_size) { // The entire selection is within input boundaries. output_shape.set_dim(dim, slice_size); } else if (start_index < input_size) { // The selection starts within input boundaries, but goes beyond them. output_shape.set_dim(dim, input_size - start_index); } else { // The selection is entirely out of input boundaries. output_shape.set_dim(dim, 0); } } auto input_indices_t = input_tensor.indices().matrix<int64>(); auto input_values_t = input_tensor.values().vec<T>(); // Find the number of indices that fall inside start and size. int count = 0; for (int i = 0; i < input_tensor.indices().dim_size(0); i++) { // The following will check to see if an input is within the // range specified by start and size. // The for loop below iterates through all dimensions. In case // the index falls outside of the start and size at any dimension, // it will be considered as a "no hit" (hit = false). In this // case, it will not be counted as the index that fall inside // the range specified by start and size. bool hit = true; for (int dim = 0; dim < dims; dim++) { if (!(start[dim] <= input_indices_t(i, dim) && input_indices_t(i, dim) < start[dim] + size[dim])) { hit = false; break; } } if (!hit) { continue; } count++; } Tensor output_values(DataTypeToEnum<T>::v(), TensorShape({count})); Tensor output_indices(DT_INT64, TensorShape({count, dims})); auto output_values_t = output_values.vec<T>(); auto output_indices_t = output_indices.matrix<int64>(); // Obtain the output indices that fall inside start and size. int index = 0; for (int i = 0; i < input_tensor.indices().dim_size(0) && index < count; i++) { // The logic here is similar as the above except that the above // only count the number of indices while here we actually generate // the output. bool hit = true; for (int dim = 0; dim < dims; dim++) { if (!(start[dim] <= input_indices_t(i, dim) && input_indices_t(i, dim) < start[dim] + size[dim])) { hit = false; break; } } if (!hit) { continue; } output_values_t(index) = input_values_t(i); for (int dim = 0; dim < dims; dim++) { output_indices_t(index, dim) = input_indices_t(i, dim) - start[dim]; } index++; } return SparseTensor(output_indices, output_values, output_shape); } } // namespace sparse } // namespace tensorflow #endif // TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
null
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_ #define TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_ #include <limits> #include <numeric> #include <vector> #include "absl/base/macros.h" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/sparse/dim_comparator.h" #include "tensorflow/core/util/sparse/group_iterator.h" namespace tensorflow { namespace sparse { class SparseTensor { public: typedef typename gtl::ArraySlice<int64> VarDimArray; typedef typename gtl::InlinedVector<int64, 8> ShapeArray; static Status Create(Tensor ix, Tensor vals, const VarDimArray shape, const VarDimArray order, SparseTensor* result); static Status Create(Tensor ix, Tensor vals, const TensorShape& shape, SparseTensor* result); static Status Create(Tensor ix, Tensor vals, const VarDimArray shape, SparseTensor* result); static Status Create(Tensor ix, Tensor vals, const TensorShape& shape, const VarDimArray order, SparseTensor* result); SparseTensor() : dims_(0) {} ABSL_DEPRECATED("Use Create() functions instead of constructors directly.") SparseTensor(Tensor ix, Tensor vals, const TensorShape& shape) : SparseTensor(std::move(ix), std::move(vals), TensorShapeToVector(shape), UndefinedOrder(TensorShapeToVector(shape))) {} ABSL_DEPRECATED("Use Create() functions instead of constructors directly.") SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape) : SparseTensor(std::move(ix), std::move(vals), shape, UndefinedOrder(shape)) {} ABSL_DEPRECATED("use Create() functions instead of constructors directly.") SparseTensor(Tensor ix, Tensor vals, const TensorShape& shape, const VarDimArray order) : SparseTensor(std::move(ix), std::move(vals), TensorShapeToVector(shape), order) {} ABSL_DEPRECATED("Use Create() functions instead of constructors directly.") SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape, const VarDimArray order); SparseTensor(const SparseTensor& other) : SparseTensor(other.ix_, other.vals_, other.shape_, other.order_) {} SparseTensor(SparseTensor&& other) : SparseTensor(std::move(other.ix_), std::move(other.vals_), std::move(other.shape_), std::move(other.order_)) {} SparseTensor& operator=(const SparseTensor& other) { ix_ = other.ix_; vals_ = other.vals_; shape_ = other.shape_; order_ = other.order_; dims_ = other.dims_; return *this; } SparseTensor& operator=(SparseTensor&& other) { ix_ = std::move(other.ix_); vals_ = std::move(other.vals_); shape_ = std::move(other.shape_); order_ = std::move(other.order_); dims_ = std::move(other.dims_); return *this; } std::size_t num_entries() const { return ix_.dim_size(0); } int dims() const { return shape_.size(); } const Tensor& indices() const { return ix_; } const Tensor& values() const { return vals_; } DataType dtype() const { return vals_.dtype(); } Status IndicesValid() const; VarDimArray shape() const { return shape_; } VarDimArray order() const { return order_; } // Resorts the indices and values according to the dimensions in order. template <typename T> void Reorder(const VarDimArray& order); // Returns a group iterable that can be used for clumping indices // and values according to the group indices of interest. // // Precondition: order()[0..group_ix.size()] == group_ix. // // See the README.md in this directory for more usage information. GroupIterable group(const VarDimArray& group_ix) const { DCHECK_LE(group_ix.size(), dims_); for (std::size_t di = 0; di < group_ix.size(); ++di) { DCHECK_GE(group_ix[di], 0) << "Group dimension out of range"; DCHECK_LT(group_ix[di], dims_) << "Group dimension out of range"; DCHECK_EQ(group_ix[di], order_[di]) << "Group dimension does not match sorted order"; } return GroupIterable(ix_, vals_, dims_, group_ix); } // Stores the sparse indices into the dense tensor out. // Preconditions: // out->shape().dims() == shape().dims() // out->shape().dim_size(d) >= shape(d) for all d // // Returns true on success. False on failure (mismatched dimensions // or out-of-bounds indices). // // If initialize==True, ToDense first overwrites all coefficients in out to 0. // template <typename T> bool ToDense(Tensor* out, bool initialize = true); // Concat() will concatenate all the tensors according to their first order // dimension. All tensors must have identical shape except for // the first order dimension. All tensors orders' first dimension // must match. // // If all of the tensors have identical ordering, then the output // will have this ordering. Otherwise the output is set as not // having any order and a Reorder<T>() should be called on it before // performing any subsequent operations. template <typename T> static SparseTensor Concat(const gtl::ArraySlice<SparseTensor>& tensors); // Split() will split the input SparseTensor into a list of num_split // SparseTensor given a splitting dimension. If the input dimension range // isn't an integer multiple of split_dim, we add one extra dimension for // each slice. template <typename T> static Status Split(const SparseTensor& tensor, const int split_dim, const int num_split, std::vector<SparseTensor>* result); // Slice() will slice the input SparseTensor into a SparseTensor based on // specified start and size. Both start and size are 1-D array with each // element of the array representing one dimension. The start is the start // index at each dimension and the size is the size at each dimension. template <typename T> static SparseTensor Slice(const SparseTensor& tensor, const gtl::ArraySlice<int64>& start, const gtl::ArraySlice<int64>& size); // Picks out the dimensions according to `dim_indices`. std::vector<int64> PickDims(gtl::ArraySlice<int64> dim_indices) const { std::vector<int64> res(dim_indices.size()); for (size_t i = 0; i < dim_indices.size(); ++i) { res[i] = shape_[dim_indices[i]]; } return res; } private: static inline ShapeArray UndefinedOrder(const VarDimArray shape) { return ShapeArray(shape.size(), -1); } static inline ShapeArray TensorShapeToVector(const TensorShape& shape) { ShapeArray vec(shape.dims()); for (int i = 0; i < shape.dims(); ++i) vec[i] = shape.dim_size(i); return vec; } // Optimized implementation of `IndicesValid` for 1-D sparse tensors. // REQUIRES: `shape_.size() == 1`. bool IndicesValidVectorFastPath() const; // Optimized implementation of `IndicesValid` for 2-D sparse tensors whose // indices fit within the range of an `int32`. // REQUIRES: `shape_.size() == 2`. bool IndicesValidMatrix32BitFastPath() const; template <bool standard_order> Status IndicesValidHelper() const; // Helper for ToDense<T>() template <typename T> bool ValidateAndInitializeToDense(Tensor* out, bool initialize); // Helper for Split() that returns the slice index. static inline int GetSliceIndex(const int dim, const int split_size, const int residual) { DCHECK_GT(split_size, 0); DCHECK_GE(dim, 0); if (residual == 0) return dim / split_size; const int offset = residual * (split_size + 1); if (dim < offset) { return dim / (split_size + 1); } else { return residual + ((dim - offset) / split_size); } } // Helper for Split() that returns the dimension in the slice. static inline int GetDimensionInSlice(const int dim, const int split_size, const int residual) { DCHECK_GT(split_size, 0); DCHECK_GE(dim, 0); if (residual == 0) return dim % split_size; const int offset = residual * (split_size + 1); if (dim < offset) { return dim % (split_size + 1); } else { return (dim - offset) % split_size; } } // Helper for Split() that returns the shape given a slice index. static inline int GetSliceShape(const int slice_index, const int split_size, const int residual) { DCHECK_GT(split_size, 0); DCHECK_GE(slice_index, 0); if (residual == 0) return split_size; if (slice_index < residual) { return split_size + 1; } else { return split_size; } } Tensor ix_; Tensor vals_; ShapeArray shape_; ShapeArray order_; int dims_; }; // This operation updates the indices and values Tensor rows, so it is // an in-place algorithm. It requires O(N log N) time and O(N) // temporary space. template <typename T> inline void SparseTensor::Reorder(const VarDimArray& order) { DCHECK_EQ(DataTypeToEnum<T>::v(), dtype()) << "Reorder requested with the wrong datatype"; DCHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank"; auto ix_t = ix_.matrix<int64>(); auto vals_t = vals_.vec<T>(); std::vector<int64> reorder(num_entries()); std::iota(reorder.begin(), reorder.end(), 0); // Sort to get order of indices switch (order.size()) { #define CASE_SORT(ORDER_SIZE) \ case ORDER_SIZE: { \ FixedDimComparator<ORDER_SIZE> sorter(ix_t, order, shape()); \ std::sort(reorder.begin(), reorder.end(), sorter); \ break; \ } CASE_SORT(0); CASE_SORT(1); CASE_SORT(2); CASE_SORT(3); CASE_SORT(4); CASE_SORT(5); #undef CASE_SORT default: { DimComparator sorter(ix_t, order, shape()); std::sort(reorder.begin(), reorder.end(), sorter); } } // We have a forward reordering, but what we'll need is a // permutation (the inverse). This can be calculated with O(1) // additional // and O(n) time (INVPERM) but we just do the simple thing here. std::vector<size_t> permutation(reorder.size()); for (std::size_t n = 0; n < reorder.size(); ++n) { permutation[reorder[n]] = n; } // Update indices & values by converting the permutations to // a product of transpositions. Iterate over the cycles in the // permutation, and convert each of those into a product of // transpositions (swaps): // https://en.wikipedia.org/wiki/Cyclic_permutation // This is N swaps, 2*N comparisons. for (std::size_t n = 0; n + 1 < permutation.size(); ++n) { while (n != permutation[n]) { std::size_t r = permutation[n]; std::swap_ranges(&(ix_t(n, 0)), &(ix_t(n + 1, 0)), &(ix_t(r, 0))); std::swap(vals_t(n), vals_t(r)); std::swap(permutation[n], permutation[r]); } } order_ = ShapeArray(order.begin(), order.end()); } template <typename T> inline bool SparseTensor::ValidateAndInitializeToDense(Tensor* out, bool initialize) { DCHECK_EQ(DataTypeToEnum<T>::v(), dtype()) << "ToDense requested with the wrong datatype"; DCHECK_EQ(out->shape().dims(), dims_) << "Incompatible dimensions between SparseTensor and output"; DCHECK_EQ(out->dtype(), DataTypeToEnum<T>::v()) << "Output must be type: " << DataTypeToEnum<T>::v() << " but got: " << out->dtype(); // Make sure the dense output is the same rank and has room // to hold the SparseTensor. const auto& out_shape = out->shape(); if (shape_.size() != out_shape.dims()) return false; for (int d = 0; d < shape_.size(); ++d) { if (shape_[d] > out_shape.dim_size(d)) return false; } if (initialize) { auto out_t = out->flat<T>(); out_t.setConstant(T()); } return true; } template <typename T> inline bool SparseTensor::ToDense(Tensor* out, bool initialize) { if (!ValidateAndInitializeToDense<T>(out, initialize)) return false; auto out_t = out->flat<T>(); auto vals_t = vals_.vec<T>(); auto ix_t = ix_.matrix<int64>(); const int64* const ix_ptr = ix_t.data(); if (dims_ == 1) { // Fast path for sparse vectors. const int64 out_length = out->shape().dim_size(0); for (int n = 0; n < vals_t.dimension(0); ++n) { const int64 index = internal::SubtleMustCopy(ix_ptr[n]); if (!FastBoundsCheck(index, out_length)) return false; out_t(index) = vals_t(n); } return true; } else if (dims_ == 2) { // Fast path for sparse matrices. const auto& out_shape = out->shape(); const int64 out_rows = out_shape.dim_size(0); const int64 out_cols = out_shape.dim_size(1); for (int n = 0; n < vals_t.dimension(0); ++n) { const int64 row_index = internal::SubtleMustCopy(ix_ptr[n * 2]); const int64 col_index = internal::SubtleMustCopy(ix_ptr[n * 2 + 1]); if (!(FastBoundsCheck(row_index, out_rows) && FastBoundsCheck(col_index, out_cols))) { return false; } out_t(row_index * out_cols + col_index) = vals_t(n); } return true; } else { // General path for N-dimensional sparse tensors. gtl::InlinedVector<int64, 4> strides(dims_); const auto& out_shape = out->shape().dim_sizes(); if (dims_ > 0) { strides[dims_ - 1] = 1; } for (int d = dims_ - 2; d >= 0; --d) { strides[d] = strides[d + 1] * out_shape[d + 1]; } for (int n = 0; n < vals_t.dimension(0); ++n) { bool invalid_dims = false; int64 ix = 0; for (int d = 0; d < dims_; ++d) { const int64 ix_n_d = internal::SubtleMustCopy(ix_ptr[n * dims_ + d]); if (!FastBoundsCheck(ix_n_d, out_shape[d])) { invalid_dims = true; } ix += strides[d] * ix_n_d; } if (invalid_dims) return false; out_t(ix) = vals_t(n); } return true; } } template <typename T> inline SparseTensor SparseTensor::Concat( const gtl::ArraySlice<SparseTensor>& tensors) { DCHECK_GE(tensors.size(), size_t{1}) << "Cannot concat 0 SparseTensors"; const int dims = tensors[0].dims_; DCHECK_GE(dims, 1) << "Cannot concat 0-dimensional SparseTensors"; auto order_0 = tensors[0].order(); const int primary_dim = order_0[0]; ShapeArray final_order(order_0.begin(), order_0.end()); ShapeArray final_shape(tensors[0].shape().begin(), tensors[0].shape().end()); final_shape[primary_dim] = 0; // We'll build this up as we go along. int num_entries = 0; bool fully_ordered = true; for (const SparseTensor& st : tensors) { DCHECK_EQ(st.dims_, dims) << "All SparseTensors must have the same rank."; DCHECK_EQ(DataTypeToEnum<T>::v(), st.dtype()) << "Concat requested with the wrong data type"; DCHECK_GE(st.order()[0], 0) << "SparseTensor must be ordered"; DCHECK_EQ(st.order()[0], primary_dim) << "All SparseTensors' order[0] must match. This is the concat dim."; if (st.order() != final_order) fully_ordered = false; const VarDimArray& st_shape = st.shape(); for (int d = 0; d < dims - 1; ++d) { const int cdim = (d < primary_dim) ? d : d + 1; DCHECK_EQ(final_shape[cdim], st_shape[cdim]) << "All SparseTensors' shapes must match except on the concat dim. " << "Concat dim: " << primary_dim << ", mismatched shape at dim: " << cdim << ". Expecting shape like: [" << str_util::Join(final_shape, ",") << "] but saw shape: [" << str_util::Join(st_shape, ",") << "]"; } // Update dimension of final shape final_shape[primary_dim] = (final_shape[primary_dim] + st_shape[primary_dim]); num_entries += st.num_entries(); // Update number of entries } // If nonconsistent ordering among inputs, set final order to -1s. if (!fully_ordered) { final_order = UndefinedOrder(final_shape); } Tensor output_ix(DT_INT64, TensorShape({num_entries, dims})); Tensor output_vals(DataTypeToEnum<T>::v(), TensorShape({num_entries})); TTypes<int64>::Matrix ix_t = output_ix.matrix<int64>(); typename TTypes<T>::Vec vals_t = output_vals.vec<T>(); Eigen::DenseIndex offset = 0; int64 shape_offset = 0; for (const SparseTensor& st : tensors) { const int st_num_entries = st.num_entries(); // Fill in indices & values. if (st_num_entries > 0) { std::copy_n(&st.vals_.vec<T>()(0), st_num_entries, &vals_t(offset)); const auto* st_ix = &st.ix_.matrix<int64>()(0, 0); auto* ix_out = &ix_t(offset, 0); for (std::size_t i = 0; i < st_num_entries * dims; ++i) { *ix_out++ = *st_ix++ + ((i % dims == primary_dim) ? shape_offset : 0); } } offset += st_num_entries; shape_offset += st.shape()[primary_dim]; } return SparseTensor(output_ix, output_vals, final_shape, final_order); } template <typename T> inline Status SparseTensor::Split(const SparseTensor& input_tensor, const int split_dim, const int num_split, std::vector<SparseTensor>* result) { std::vector<Tensor> output_indices; std::vector<Tensor> output_values; std::vector<TensorShape> output_shapes; output_indices.reserve(num_split); output_values.reserve(num_split); output_shapes.reserve(num_split); std::vector<typename TTypes<int64>::Matrix> output_indices_t; std::vector<typename TTypes<T>::Vec> output_values_t; output_indices_t.reserve(num_split); output_values_t.reserve(num_split); auto input_values_t = input_tensor.values().vec<T>(); auto input_indices_t = input_tensor.indices().matrix<int64>(); std::vector<int> num_values(num_split, 0); const int num_dim = input_tensor.shape().size(); const int split_dim_size = input_tensor.shape()[split_dim]; const int split_size = split_dim_size / num_split; if (!(num_split > 0 && num_split <= split_dim_size)) { return errors::InvalidArgument("num_split must be in the interval (0, ", split_dim_size, "]"); } if (!(split_dim >= 0 && split_dim < num_dim)) { return errors::InvalidArgument("num_dim must be in the interval [0, ", num_dim, ")"); } const int residual = split_dim_size % num_split; for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) { const int dim = input_tensor.indices().matrix<int64>()(i, split_dim); int slice_index = GetSliceIndex(dim, split_size, residual); if (slice_index >= num_values.size()) { return errors::InvalidArgument("Slice index ", slice_index, " is larger than num_split."); } num_values[slice_index]++; } for (int i = 0; i < num_split; ++i) { // TODO(ataei): Pass an allocator to avoid allocating large memory buffer. output_indices.emplace_back(DT_INT64, TensorShape({num_values[i], num_dim})); output_values.emplace_back(DataTypeToEnum<T>::v(), TensorShape({num_values[i]})); output_shapes.emplace_back(input_tensor.shape()); output_indices_t.emplace_back(output_indices[i].matrix<int64>()); output_values_t.emplace_back(output_values[i].vec<T>()); const int size = GetSliceShape(i, split_size, residual); output_shapes[i].set_dim(split_dim, size); } std::vector<int> values_inserted_in_slice(num_split, 0); for (int i = 0; i < input_tensor.indices().dim_size(0); ++i) { const int dim = input_indices_t(i, split_dim); const int slice_index = GetSliceIndex(dim, split_size, residual); const int slice_dim = values_inserted_in_slice[slice_index]++; output_values_t[slice_index](slice_dim) = input_values_t(i); for (int j = 0; j < num_dim; ++j) { const int64 original_dim = input_indices_t(i, j); output_indices_t[slice_index](slice_dim, j) = (j == split_dim) ? GetDimensionInSlice(original_dim, split_size, residual) : original_dim; } } result->clear(); result->reserve(num_split); for (int i = 0; i < num_split; ++i) { SparseTensor tensor; Status create_status = Create(output_indices[i], output_values[i], output_shapes[i], &tensor); if (!create_status.ok()) { return create_status; } result->push_back(std::move(tensor)); } return Status::OK(); } template <typename T> inline SparseTensor SparseTensor::Slice(const SparseTensor& input_tensor, const gtl::ArraySlice<int64>& start, const gtl::ArraySlice<int64>& size) { TensorShape output_shape(input_tensor.shape()); const int dims = input_tensor.dims(); for (int dim = 0; dim < dims; dim++) { // Determine the size of the result; if the selected slice goes beyond the // input boundary, the result will correspond to the size of the overlap // between the input and the selected slice. const int64 input_size = output_shape.dim_size(dim); const int64 start_index = start[dim]; const int64 slice_size = size[dim]; if (start_index + slice_size < input_size) { // The entire selection is within input boundaries. output_shape.set_dim(dim, slice_size); } else if (start_index < input_size) { // The selection starts within input boundaries, but goes beyond them. output_shape.set_dim(dim, input_size - start_index); } else { // The selection is entirely out of input boundaries. output_shape.set_dim(dim, 0); } } auto input_indices_t = input_tensor.indices().matrix<int64>(); auto input_values_t = input_tensor.values().vec<T>(); // Find the number of indices that fall inside start and size. int count = 0; for (int i = 0; i < input_tensor.indices().dim_size(0); i++) { // The following will check to see if an input is within the // range specified by start and size. // The for loop below iterates through all dimensions. In case // the index falls outside of the start and size at any dimension, // it will be considered as a "no hit" (hit = false). In this // case, it will not be counted as the index that fall inside // the range specified by start and size. bool hit = true; for (int dim = 0; dim < dims; dim++) { if (!(start[dim] <= input_indices_t(i, dim) && input_indices_t(i, dim) < start[dim] + size[dim])) { hit = false; break; } } if (!hit) { continue; } count++; } Tensor output_values(DataTypeToEnum<T>::v(), TensorShape({count})); Tensor output_indices(DT_INT64, TensorShape({count, dims})); auto output_values_t = output_values.vec<T>(); auto output_indices_t = output_indices.matrix<int64>(); // Obtain the output indices that fall inside start and size. int index = 0; for (int i = 0; i < input_tensor.indices().dim_size(0) && index < count; i++) { // The logic here is similar as the above except that the above // only count the number of indices while here we actually generate // the output. bool hit = true; for (int dim = 0; dim < dims; dim++) { if (!(start[dim] <= input_indices_t(i, dim) && input_indices_t(i, dim) < start[dim] + size[dim])) { hit = false; break; } } if (!hit) { continue; } output_values_t(index) = input_values_t(i); for (int dim = 0; dim < dims; dim++) { output_indices_t(index, dim) = input_indices_t(i, dim) - start[dim]; } index++; } return SparseTensor(output_indices, output_values, output_shape); } } // namespace sparse } // namespace tensorflow #endif // TENSORFLOW_CORE_UTIL_SPARSE_SPARSE_TENSOR_H_
null
255
CWE-787
CVE-2021-29560
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include <stddef.h> #include <algorithm> #include <string> #include <vector> #include "tensorflow/core/framework/kernel_def_builder.h" #include "tensorflow/core/framework/numeric_types.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/broadcast_to_op.h" #include "tensorflow/core/kernels/list_kernels.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/bfloat16.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/bcast.h" #include "tensorflow/core/util/ragged_to_dense_util.h" namespace tensorflow { namespace { typedef Eigen::ThreadPoolDevice CPUDevice; using ::std::vector; const int kShapeInputIndex = 0; const int kValueInputIndex = 1; const int kDefaultValueInputIndex = 2; const int kFirstPartitionInputIndex = 3; template <typename INDEX_TYPE> class RaggedTensorToTensorBaseOp : public OpKernel { public: typedef typename ::tensorflow::TTypes<const INDEX_TYPE>::Flat RowPartitionTensor; explicit RaggedTensorToTensorBaseOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, GetRowPartitionTypes<OpKernelConstruction>( context, &row_partition_types_)); ragged_rank_ = GetRaggedRank(row_partition_types_); } // Returns the relationship between dimension and dimension + 1. RowPartitionType GetRowPartitionTypeByDimension(int dimension) { if (row_partition_types_[0] == RowPartitionType::FIRST_DIM_SIZE) { return row_partition_types_[dimension + 1]; } else { return row_partition_types_[dimension]; } } // Returns the relationship between dimension and dimension + 1. RowPartitionTensor GetRowPartitionTensor(OpKernelContext* c, int dimension) { if (row_partition_types_[0] == RowPartitionType::FIRST_DIM_SIZE) { return c->input(dimension + 1 + kFirstPartitionInputIndex) .flat<INDEX_TYPE>(); } else { return c->input(dimension + kFirstPartitionInputIndex).flat<INDEX_TYPE>(); } } Status GetMaxWidth(OpKernelContext* c, int dimension, INDEX_TYPE* result) { const RowPartitionTensor row_partition_tensor = GetRowPartitionTensor(c, dimension - 1); switch (GetRowPartitionTypeByDimension(dimension - 1)) { case RowPartitionType::VALUE_ROWIDS: *result = GetMaxWidthValueRowID(row_partition_tensor); return Status::OK(); case RowPartitionType::ROW_SPLITS: *result = GetMaxWidthRowSplit(row_partition_tensor); return Status::OK(); default: return errors::InvalidArgument( "Cannot handle partition type ", RowPartitionTypeToString( GetRowPartitionTypeByDimension(dimension - 1))); } } static INDEX_TYPE GetMaxWidthRowSplit(const RowPartitionTensor& row_split) { const INDEX_TYPE tensor_length = row_split.size(); if (tensor_length == 0 || tensor_length == 1) { return 0; } INDEX_TYPE max_width = 0; for (INDEX_TYPE i = 0; i < tensor_length - 1; ++i) { const INDEX_TYPE current_width = row_split(i + 1) - row_split(i); if (current_width > max_width) { max_width = current_width; } } return max_width; } static INDEX_TYPE GetMaxWidthValueRowID( const RowPartitionTensor& value_rowids) { const INDEX_TYPE index_length = value_rowids.size(); if (index_length == 0) { return 0; } INDEX_TYPE first_equal_index = 0; INDEX_TYPE first_equal_index_value = value_rowids(0); INDEX_TYPE max_width = 0; for (INDEX_TYPE i = 1; i < index_length; ++i) { const INDEX_TYPE value = value_rowids(i); if (value != first_equal_index_value) { first_equal_index_value = value; max_width = std::max(i - first_equal_index, max_width); first_equal_index = i; } } return std::max(index_length - first_equal_index, max_width); } Status CalculateOutputSize(INDEX_TYPE first_dim, OpKernelContext* c, vector<INDEX_TYPE>* result) { TensorShapeProto value_shape_proto; c->input(kValueInputIndex).shape().AsProto(&value_shape_proto); TensorShapeProto default_value_shape_proto; c->input(kDefaultValueInputIndex) .shape() .AsProto(&default_value_shape_proto); TensorShapeProto output_shape_proto; TF_RETURN_IF_ERROR(ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto)); TensorShapeProto shape_proto; { PartialTensorShape partial_tensor_shape; TF_RETURN_IF_ERROR(TensorShapeFromTensor(c->input(kShapeInputIndex), &partial_tensor_shape)); partial_tensor_shape.AsProto(&shape_proto); } TF_RETURN_IF_ERROR(CombineRaggedTensorToTensorShapes( ragged_rank_, shape_proto, value_shape_proto, &output_shape_proto)); result->reserve(output_shape_proto.dim_size()); for (const TensorShapeProto::Dim& dim : output_shape_proto.dim()) { // Note that this may be -1 (if dimension size is unknown). result->push_back(dim.size()); } if ((*result)[0] < 0) { (*result)[0] = first_dim; } for (int i = 1; i <= ragged_rank_; ++i) { if ((*result)[i] < 0) { TF_RETURN_IF_ERROR(GetMaxWidth(c, i, &(*result)[i])); } } return Status::OK(); } /** * The output_index represents the index in the output tensor * where the first element of a particular dimension would be written. * If it is -1, it indicates that the index is out of scope. * Example, given first_dimension = 10, first_dimension_output = 6, * and output_index_multiplier = 100: * result = [0 100 200 300 400 500 -1 -1 -1 -1] * If first_dimension_output = 11 instead, then: * result = [0 100 200 300 400 500 600 700 800 900] */ void CalculateFirstParentOutputIndex(INDEX_TYPE first_dimension, INDEX_TYPE output_index_multiplier, INDEX_TYPE first_dimension_output, vector<INDEX_TYPE>* result) { const INDEX_TYPE min_dimension = std::min(first_dimension, first_dimension_output); result->reserve(first_dimension); int current_output_index = 0; for (INDEX_TYPE i = 0; i < min_dimension; ++i, current_output_index += output_index_multiplier) { result->push_back(current_output_index); } for (INDEX_TYPE i = min_dimension; i < first_dimension; ++i) { result->push_back(-1); } DCHECK_EQ(result->size(), first_dimension); } void CalculateOutputIndexRowSplit( OpKernelContext* context, const RowPartitionTensor& row_split, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { INDEX_TYPE row_split_size = row_split.size(); if (row_split_size > 0) { result->reserve(row_split(row_split_size - 1)); } for (INDEX_TYPE i = 0; i < row_split_size - 1; ++i) { INDEX_TYPE row_length = row_split(i + 1) - row_split(i); INDEX_TYPE real_length = std::min(output_size, row_length); INDEX_TYPE parent_output_index_current = parent_output_index[i]; if (parent_output_index_current == -1) { real_length = 0; } for (INDEX_TYPE j = 0; j < real_length; ++j) { result->push_back(parent_output_index_current); parent_output_index_current += output_index_multiplier; } for (INDEX_TYPE j = 0; j < row_length - real_length; ++j) { result->push_back(-1); } } if (row_split_size > 0) { OP_REQUIRES(context, result->size() == row_split(row_split_size - 1), errors::InvalidArgument("Invalid row split size.")); } } // Calculate the output index of the first element of a list. // The parent_output_index is the same computation for the previous list. // -1 indicates an element or list that is out of range. // The output_index_multiplier is the number of output indices one moves // forward for each column. // E.g., given: // value_rowids:[0 1 2 2 2 3 5 5 6] // parent_output_index:[1000 1100 2000 2100 -1 3000 4000] // output_index_multiplier: 10 // output_size: 2 // You get: // result = [1000 1100 2000 2010 -1 2100 -1 -1 3000] // result[0] = parent_output_index[value_rowids[0]] // result[1] = parent_output_index[value_rowids[1]] // result[2] = parent_output_index[value_rowids[2]] // result[3] = parent_output_index[value_rowids[2] + 10] // result[4] = -1 because it is the third element the size is 2. // result[5] = parent_output_index[value_rowids[3]] // result[6] = -1 because parent_output_index[value_rowids[6]] == -1 // result[7] = -1 because parent_output_index[value_rowids[6]] == -1 // result[8] = parent_output_index[value_rowids[7]] void CalculateOutputIndexValueRowID( OpKernelContext* context, const RowPartitionTensor& value_rowids, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const INDEX_TYPE index_size = value_rowids.size(); result->reserve(index_size); if (index_size == 0) { return; } INDEX_TYPE current_output_column = 0; INDEX_TYPE current_value_rowid = value_rowids(0); DCHECK_LT(current_value_rowid, parent_output_index.size()); INDEX_TYPE current_output_index = parent_output_index[current_value_rowid]; result->push_back(current_output_index); for (INDEX_TYPE i = 1; i < index_size; ++i) { INDEX_TYPE next_value_rowid = value_rowids(i); if (next_value_rowid == current_value_rowid) { if (current_output_index >= 0) { ++current_output_column; if (current_output_column < output_size) { current_output_index += output_index_multiplier; } else { current_output_index = -1; } } } else { current_output_column = 0; current_value_rowid = next_value_rowid; DCHECK_LT(next_value_rowid, parent_output_index.size()); current_output_index = parent_output_index[next_value_rowid]; } result->push_back(current_output_index); } OP_REQUIRES(context, result->size() == value_rowids.size(), errors::InvalidArgument("Invalid row ids.")); } Status CalculateOutputIndex(OpKernelContext* context, int dimension, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const RowPartitionTensor row_partition_tensor = GetRowPartitionTensor(context, dimension); auto partition_type = GetRowPartitionTypeByDimension(dimension); switch (partition_type) { case RowPartitionType::VALUE_ROWIDS: CalculateOutputIndexValueRowID( context, row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); return tensorflow::Status::OK(); case RowPartitionType::ROW_SPLITS: CalculateOutputIndexRowSplit( context, row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); return tensorflow::Status::OK(); default: return errors::InvalidArgument( "Unsupported partition type:", RowPartitionTypeToString(partition_type)); } } Status GetFirstDimensionSize(OpKernelContext* context, INDEX_TYPE* result) { const Tensor first_partition_tensor = context->input(kFirstPartitionInputIndex); const RowPartitionType first_partition_type = row_partition_types_[0]; switch (first_partition_type) { case RowPartitionType::FIRST_DIM_SIZE: *result = first_partition_tensor.scalar<INDEX_TYPE>()(); return Status::OK(); case RowPartitionType::VALUE_ROWIDS: return errors::InvalidArgument( "Cannot handle VALUE_ROWIDS in first dimension."); case RowPartitionType::ROW_SPLITS: *result = first_partition_tensor.shape().dim_size(0) - 1; return Status::OK(); default: return errors::InvalidArgument( "Cannot handle type ", RowPartitionTypeToString(first_partition_type)); } } void Compute(OpKernelContext* context) override { INDEX_TYPE first_dimension; const Tensor first_partition_tensor = context->input(kFirstPartitionInputIndex); OP_REQUIRES(context, first_partition_tensor.NumElements() > 0, errors::InvalidArgument("Invalid first partition input. Tensor " "requires at least one element.")); OP_REQUIRES_OK(context, GetFirstDimensionSize(context, &first_dimension)); vector<INDEX_TYPE> output_size; OP_REQUIRES_OK(context, CalculateOutputSize(first_dimension, context, &output_size)); vector<INDEX_TYPE> multiplier; multiplier.resize(ragged_rank_ + 1); multiplier[multiplier.size() - 1] = 1; for (int i = multiplier.size() - 2; i >= 0; --i) { multiplier[i] = multiplier[i + 1] * output_size[i + 1]; } // Full size of the tensor. TensorShape output_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(output_size, &output_shape)); Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output_tensor)); const INDEX_TYPE full_size = multiplier[0] * output_size[0]; if (full_size > 0) { vector<INDEX_TYPE> output_index, new_output_index; int nvals = context->input(kValueInputIndex).shape().dim_size(0); output_index.reserve(nvals); new_output_index.reserve(nvals); CalculateFirstParentOutputIndex(first_dimension, multiplier[0], output_size[0], &output_index); for (int i = 1; i <= ragged_rank_; ++i) { OP_REQUIRES_OK(context, CalculateOutputIndex( context, i - 1, output_index, multiplier[i], output_size[i], &new_output_index)); output_index.swap(new_output_index); new_output_index.clear(); } SetOutput(context, ragged_rank_, output_index, output_tensor); } } virtual void SetOutput(OpKernelContext* context, int ragged_rank, const vector<INDEX_TYPE>& output_index, Tensor* output_tensor) = 0; private: vector<RowPartitionType> row_partition_types_; int ragged_rank_; }; template <typename VALUE_TYPE, typename INDEX_TYPE> void slow_copy_array(VALUE_TYPE* dst, const VALUE_TYPE* src, INDEX_TYPE size) { for (INDEX_TYPE index = 0; index < size; ++index) { dst[index] = src[index]; } } template <typename VALUE_TYPE, typename INDEX_TYPE> void copy_array(VALUE_TYPE* dst, const VALUE_TYPE* src, INDEX_TYPE size) { memcpy(dst, src, size * sizeof(VALUE_TYPE)); } template <> void copy_array<tstring, int64>(tstring* dst, const tstring* src, int64 size) { slow_copy_array(dst, src, size); } template <> void copy_array<tstring, int32>(tstring* dst, const tstring* src, int32 size) { slow_copy_array(dst, src, size); } // If we don't specialize for Eigen::half, we get: // undefined behavior, destination object type 'Eigen::half' // is not TriviallyCopyable template <> void copy_array<Eigen::half, int64>(Eigen::half* dst, const Eigen::half* src, int64 size) { slow_copy_array(dst, src, size); } template <> void copy_array<Eigen::half, int32>(Eigen::half* dst, const Eigen::half* src, int32 size) { slow_copy_array(dst, src, size); } template <typename VALUE_TYPE, typename INDEX_TYPE> class RaggedTensorToTensorOp : public RaggedTensorToTensorBaseOp<INDEX_TYPE> { public: explicit RaggedTensorToTensorOp(OpKernelConstruction* context) : RaggedTensorToTensorBaseOp<INDEX_TYPE>(context) {} void SetOutput(OpKernelContext* context, int ragged_rank, const vector<INDEX_TYPE>& output_index, Tensor* output_tensor) override { // Note: it's ok to use OP_REQUIRES_OK (rather than TF_RETURN_IF_ERROR) // in this function, but only because it's the last thing we do before // returning from Compute(). if (output_tensor->NumElements() == 0) return; const auto& values_tensor = context->input(kValueInputIndex); const VALUE_TYPE* values_base = values_tensor.flat<VALUE_TYPE>().data(); const auto& default_value_tensor = context->input(kDefaultValueInputIndex); VALUE_TYPE* output_base = output_tensor->flat<VALUE_TYPE>().data(); TensorShape element_shape = output_tensor->shape(); element_shape.RemoveDimRange(0, ragged_rank + 1); int value_element_size = element_shape.num_elements(); size_t output_index_size = output_index.size(); // Broadcast the default value to value_element_size. (We can skip this // if default_value_tensor.NumElements() == 1, since we use std::fill // when that's true.) const VALUE_TYPE* default_value = default_value_tensor.flat<VALUE_TYPE>().data(); Tensor bcast_default; // Temporary tensor for result of broadcast if (default_value_tensor.NumElements() != value_element_size && default_value_tensor.NumElements() != 1) { const auto& src_shape = default_value_tensor.shape(); BCast bcast(BCast::FromShape(src_shape), BCast::FromShape(element_shape), /*fewer_dims_optimization=*/true); // Note: bcast should always be valid, since we rejected any incompatible // shapes when we called ValidateDefaultValueShape(). OP_REQUIRES(context, bcast.IsValid(), errors::InvalidArgument("Error broadcasting default_value")); OP_REQUIRES_OK(context, context->allocate_temp(default_value_tensor.dtype(), element_shape, &bcast_default)); const CPUDevice& device = context->eigen_device<CPUDevice>(); functor::BroadcastTo<CPUDevice, VALUE_TYPE>()( device, context, bcast_default, element_shape, default_value_tensor, src_shape, bcast); default_value = bcast_default.flat<VALUE_TYPE>().data(); } // Loop through the output_index vector, finding contiguous regions that // should be copied. Once we find the end of a contiguous region, copy it // and add any necessary padding (with default_value). INDEX_TYPE src_start = 0; // Start of contiguous region (in values) INDEX_TYPE dst_start = 0; // Destination for contiguous region (in output) INDEX_TYPE dst_end = 0; // Destination for contiguous region (in output) for (int src_i = 0; src_i <= output_index_size; ++src_i) { // dst_i is the destination where the value at src_i should be copied. INDEX_TYPE dst_i = src_i < output_index_size ? output_index[src_i] : -1; // If we're still in a contiguous region, then update dst_end go to the // next src_i. if (dst_i == dst_end) { ++dst_end; continue; } // We found the end of contiguous region. This can be because we found // a gap (dst_i > dst_end), or a source value that shouldn't be copied // because it's out-of-bounds (dst_i == -1), or the end of the tensor // (dst_i = -1). if (dst_start < dst_end) { // Copy the contiguous region. const VALUE_TYPE* src = values_base + src_start * value_element_size; VALUE_TYPE* dst = output_base + dst_start * value_element_size; INDEX_TYPE nvals = (dst_end - dst_start) * value_element_size; copy_array<VALUE_TYPE, INDEX_TYPE>(dst, src, nvals); } // Add any necessary padding (w/ default_value). if (src_i >= output_index_size) { // We reached the end of values: pad to the end of output. size_t output_size = output_tensor->NumElements(); dst_i = output_size / value_element_size; } if (dst_i > dst_end) { if (default_value_tensor.NumElements() == 1) { std::fill(output_base + dst_end * value_element_size, output_base + dst_i * value_element_size, *default_value); dst_end = dst_i; } else { while (dst_i > dst_end) { VALUE_TYPE* dst = output_base + dst_end * value_element_size; copy_array<VALUE_TYPE, INDEX_TYPE>(dst, default_value, value_element_size); ++dst_end; } } } // Update indices. if (dst_i < 0) { // src_i should be skipped -- leave it out of the contiguous region. src_start = src_i + 1; dst_start = dst_end; } else { // src_i should be copied -- include it in the contiguous region. src_start = src_i; dst_start = dst_end; dst_end = dst_start + 1; } } } }; #define REGISTER_CPU_KERNEL_INDEX_TYPE(value_type, index_type) \ REGISTER_KERNEL_BUILDER(Name("RaggedTensorToTensor") \ .Device(DEVICE_CPU) \ .TypeConstraint<value_type>("T") \ .TypeConstraint<index_type>("Tindex"), \ RaggedTensorToTensorOp<value_type, index_type>); #define REGISTER_CPU_KERNEL(value_type) \ REGISTER_CPU_KERNEL_INDEX_TYPE(value_type, tensorflow::int64); \ REGISTER_CPU_KERNEL_INDEX_TYPE(value_type, tensorflow::int32); TF_CALL_POD_TYPES(REGISTER_CPU_KERNEL); TF_CALL_string(REGISTER_CPU_KERNEL); TF_CALL_QUANTIZED_TYPES(REGISTER_CPU_KERNEL); TF_CALL_quint16(REGISTER_CPU_KERNEL); TF_CALL_qint16(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL } // namespace } // namespace tensorflow
null
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include <stddef.h> #include <algorithm> #include <string> #include <vector> #include "tensorflow/core/framework/kernel_def_builder.h" #include "tensorflow/core/framework/numeric_types.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/tensor_types.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/broadcast_to_op.h" #include "tensorflow/core/kernels/list_kernels.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/bfloat16.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/bcast.h" #include "tensorflow/core/util/ragged_to_dense_util.h" namespace tensorflow { namespace { typedef Eigen::ThreadPoolDevice CPUDevice; using ::std::vector; const int kShapeInputIndex = 0; const int kValueInputIndex = 1; const int kDefaultValueInputIndex = 2; const int kFirstPartitionInputIndex = 3; template <typename INDEX_TYPE> class RaggedTensorToTensorBaseOp : public OpKernel { public: typedef typename ::tensorflow::TTypes<const INDEX_TYPE>::Flat RowPartitionTensor; explicit RaggedTensorToTensorBaseOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, GetRowPartitionTypes<OpKernelConstruction>( context, &row_partition_types_)); ragged_rank_ = GetRaggedRank(row_partition_types_); } // Returns the relationship between dimension and dimension + 1. RowPartitionType GetRowPartitionTypeByDimension(int dimension) { if (row_partition_types_[0] == RowPartitionType::FIRST_DIM_SIZE) { return row_partition_types_[dimension + 1]; } else { return row_partition_types_[dimension]; } } // Returns the relationship between dimension and dimension + 1. RowPartitionTensor GetRowPartitionTensor(OpKernelContext* c, int dimension) { if (row_partition_types_[0] == RowPartitionType::FIRST_DIM_SIZE) { return c->input(dimension + 1 + kFirstPartitionInputIndex) .flat<INDEX_TYPE>(); } else { return c->input(dimension + kFirstPartitionInputIndex).flat<INDEX_TYPE>(); } } Status GetMaxWidth(OpKernelContext* c, int dimension, INDEX_TYPE* result) { const RowPartitionTensor row_partition_tensor = GetRowPartitionTensor(c, dimension - 1); switch (GetRowPartitionTypeByDimension(dimension - 1)) { case RowPartitionType::VALUE_ROWIDS: *result = GetMaxWidthValueRowID(row_partition_tensor); return Status::OK(); case RowPartitionType::ROW_SPLITS: *result = GetMaxWidthRowSplit(row_partition_tensor); return Status::OK(); default: return errors::InvalidArgument( "Cannot handle partition type ", RowPartitionTypeToString( GetRowPartitionTypeByDimension(dimension - 1))); } } static INDEX_TYPE GetMaxWidthRowSplit(const RowPartitionTensor& row_split) { const INDEX_TYPE tensor_length = row_split.size(); if (tensor_length == 0 || tensor_length == 1) { return 0; } INDEX_TYPE max_width = 0; for (INDEX_TYPE i = 0; i < tensor_length - 1; ++i) { const INDEX_TYPE current_width = row_split(i + 1) - row_split(i); if (current_width > max_width) { max_width = current_width; } } return max_width; } static INDEX_TYPE GetMaxWidthValueRowID( const RowPartitionTensor& value_rowids) { const INDEX_TYPE index_length = value_rowids.size(); if (index_length == 0) { return 0; } INDEX_TYPE first_equal_index = 0; INDEX_TYPE first_equal_index_value = value_rowids(0); INDEX_TYPE max_width = 0; for (INDEX_TYPE i = 1; i < index_length; ++i) { const INDEX_TYPE value = value_rowids(i); if (value != first_equal_index_value) { first_equal_index_value = value; max_width = std::max(i - first_equal_index, max_width); first_equal_index = i; } } return std::max(index_length - first_equal_index, max_width); } Status CalculateOutputSize(INDEX_TYPE first_dim, OpKernelContext* c, vector<INDEX_TYPE>* result) { TensorShapeProto value_shape_proto; c->input(kValueInputIndex).shape().AsProto(&value_shape_proto); TensorShapeProto default_value_shape_proto; c->input(kDefaultValueInputIndex) .shape() .AsProto(&default_value_shape_proto); TensorShapeProto output_shape_proto; TF_RETURN_IF_ERROR(ValidateDefaultValueShape(default_value_shape_proto, value_shape_proto)); TensorShapeProto shape_proto; { PartialTensorShape partial_tensor_shape; TF_RETURN_IF_ERROR(TensorShapeFromTensor(c->input(kShapeInputIndex), &partial_tensor_shape)); partial_tensor_shape.AsProto(&shape_proto); } TF_RETURN_IF_ERROR(CombineRaggedTensorToTensorShapes( ragged_rank_, shape_proto, value_shape_proto, &output_shape_proto)); result->reserve(output_shape_proto.dim_size()); for (const TensorShapeProto::Dim& dim : output_shape_proto.dim()) { // Note that this may be -1 (if dimension size is unknown). result->push_back(dim.size()); } if ((*result)[0] < 0) { (*result)[0] = first_dim; } for (int i = 1; i <= ragged_rank_; ++i) { if ((*result)[i] < 0) { TF_RETURN_IF_ERROR(GetMaxWidth(c, i, &(*result)[i])); } } return Status::OK(); } /** * The output_index represents the index in the output tensor * where the first element of a particular dimension would be written. * If it is -1, it indicates that the index is out of scope. * Example, given first_dimension = 10, first_dimension_output = 6, * and output_index_multiplier = 100: * result = [0 100 200 300 400 500 -1 -1 -1 -1] * If first_dimension_output = 11 instead, then: * result = [0 100 200 300 400 500 600 700 800 900] */ void CalculateFirstParentOutputIndex(INDEX_TYPE first_dimension, INDEX_TYPE output_index_multiplier, INDEX_TYPE first_dimension_output, vector<INDEX_TYPE>* result) { const INDEX_TYPE min_dimension = std::min(first_dimension, first_dimension_output); result->reserve(first_dimension); int current_output_index = 0; for (INDEX_TYPE i = 0; i < min_dimension; ++i, current_output_index += output_index_multiplier) { result->push_back(current_output_index); } for (INDEX_TYPE i = min_dimension; i < first_dimension; ++i) { result->push_back(-1); } DCHECK_EQ(result->size(), first_dimension); } void CalculateOutputIndexRowSplit( OpKernelContext* context, const RowPartitionTensor& row_split, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { INDEX_TYPE row_split_size = row_split.size(); if (row_split_size > 0) { result->reserve(row_split(row_split_size - 1)); } for (INDEX_TYPE i = 0; i < row_split_size - 1; ++i) { INDEX_TYPE row_length = row_split(i + 1) - row_split(i); INDEX_TYPE real_length = std::min(output_size, row_length); INDEX_TYPE parent_output_index_current = parent_output_index[i]; if (parent_output_index_current == -1) { real_length = 0; } for (INDEX_TYPE j = 0; j < real_length; ++j) { result->push_back(parent_output_index_current); parent_output_index_current += output_index_multiplier; } for (INDEX_TYPE j = 0; j < row_length - real_length; ++j) { result->push_back(-1); } } if (row_split_size > 0) { OP_REQUIRES(context, result->size() == row_split(row_split_size - 1), errors::InvalidArgument("Invalid row split size.")); } } // Calculate the output index of the first element of a list. // The parent_output_index is the same computation for the previous list. // -1 indicates an element or list that is out of range. // The output_index_multiplier is the number of output indices one moves // forward for each column. // E.g., given: // value_rowids:[0 1 2 2 2 3 5 5 6] // parent_output_index:[1000 1100 2000 2100 -1 3000 4000] // output_index_multiplier: 10 // output_size: 2 // You get: // result = [1000 1100 2000 2010 -1 2100 -1 -1 3000] // result[0] = parent_output_index[value_rowids[0]] // result[1] = parent_output_index[value_rowids[1]] // result[2] = parent_output_index[value_rowids[2]] // result[3] = parent_output_index[value_rowids[2] + 10] // result[4] = -1 because it is the third element the size is 2. // result[5] = parent_output_index[value_rowids[3]] // result[6] = -1 because parent_output_index[value_rowids[6]] == -1 // result[7] = -1 because parent_output_index[value_rowids[6]] == -1 // result[8] = parent_output_index[value_rowids[7]] void CalculateOutputIndexValueRowID( OpKernelContext* context, const RowPartitionTensor& value_rowids, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const INDEX_TYPE index_size = value_rowids.size(); result->reserve(index_size); if (index_size == 0) { return; } INDEX_TYPE current_output_column = 0; INDEX_TYPE current_value_rowid = value_rowids(0); DCHECK_LT(current_value_rowid, parent_output_index.size()); INDEX_TYPE current_output_index = parent_output_index[current_value_rowid]; result->push_back(current_output_index); for (INDEX_TYPE i = 1; i < index_size; ++i) { INDEX_TYPE next_value_rowid = value_rowids(i); if (next_value_rowid == current_value_rowid) { if (current_output_index >= 0) { ++current_output_column; if (current_output_column < output_size) { current_output_index += output_index_multiplier; } else { current_output_index = -1; } } } else { current_output_column = 0; current_value_rowid = next_value_rowid; DCHECK_LT(next_value_rowid, parent_output_index.size()); current_output_index = parent_output_index[next_value_rowid]; } result->push_back(current_output_index); } OP_REQUIRES(context, result->size() == value_rowids.size(), errors::InvalidArgument("Invalid row ids.")); } Status CalculateOutputIndex(OpKernelContext* context, int dimension, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const RowPartitionTensor row_partition_tensor = GetRowPartitionTensor(context, dimension); auto partition_type = GetRowPartitionTypeByDimension(dimension); switch (partition_type) { case RowPartitionType::VALUE_ROWIDS: CalculateOutputIndexValueRowID( context, row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); return tensorflow::Status::OK(); case RowPartitionType::ROW_SPLITS: if (row_partition_tensor.size() - 1 > parent_output_index.size()) { return errors::InvalidArgument( "Row partition size is greater than output size: ", row_partition_tensor.size() - 1, " > ", parent_output_index.size()); } CalculateOutputIndexRowSplit( context, row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); return tensorflow::Status::OK(); default: return errors::InvalidArgument( "Unsupported partition type:", RowPartitionTypeToString(partition_type)); } } Status GetFirstDimensionSize(OpKernelContext* context, INDEX_TYPE* result) { const Tensor first_partition_tensor = context->input(kFirstPartitionInputIndex); const RowPartitionType first_partition_type = row_partition_types_[0]; switch (first_partition_type) { case RowPartitionType::FIRST_DIM_SIZE: *result = first_partition_tensor.scalar<INDEX_TYPE>()(); return Status::OK(); case RowPartitionType::VALUE_ROWIDS: return errors::InvalidArgument( "Cannot handle VALUE_ROWIDS in first dimension."); case RowPartitionType::ROW_SPLITS: *result = first_partition_tensor.shape().dim_size(0) - 1; return Status::OK(); default: return errors::InvalidArgument( "Cannot handle type ", RowPartitionTypeToString(first_partition_type)); } } void Compute(OpKernelContext* context) override { INDEX_TYPE first_dimension; const Tensor first_partition_tensor = context->input(kFirstPartitionInputIndex); OP_REQUIRES(context, first_partition_tensor.NumElements() > 0, errors::InvalidArgument("Invalid first partition input. Tensor " "requires at least one element.")); OP_REQUIRES_OK(context, GetFirstDimensionSize(context, &first_dimension)); vector<INDEX_TYPE> output_size; OP_REQUIRES_OK(context, CalculateOutputSize(first_dimension, context, &output_size)); vector<INDEX_TYPE> multiplier; multiplier.resize(ragged_rank_ + 1); multiplier[multiplier.size() - 1] = 1; for (int i = multiplier.size() - 2; i >= 0; --i) { multiplier[i] = multiplier[i + 1] * output_size[i + 1]; } // Full size of the tensor. TensorShape output_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(output_size, &output_shape)); Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output_tensor)); const INDEX_TYPE full_size = multiplier[0] * output_size[0]; if (full_size > 0) { vector<INDEX_TYPE> output_index, new_output_index; int nvals = context->input(kValueInputIndex).shape().dim_size(0); output_index.reserve(nvals); new_output_index.reserve(nvals); CalculateFirstParentOutputIndex(first_dimension, multiplier[0], output_size[0], &output_index); for (int i = 1; i <= ragged_rank_; ++i) { OP_REQUIRES_OK(context, CalculateOutputIndex( context, i - 1, output_index, multiplier[i], output_size[i], &new_output_index)); output_index.swap(new_output_index); new_output_index.clear(); } SetOutput(context, ragged_rank_, output_index, output_tensor); } } virtual void SetOutput(OpKernelContext* context, int ragged_rank, const vector<INDEX_TYPE>& output_index, Tensor* output_tensor) = 0; private: vector<RowPartitionType> row_partition_types_; int ragged_rank_; }; template <typename VALUE_TYPE, typename INDEX_TYPE> void slow_copy_array(VALUE_TYPE* dst, const VALUE_TYPE* src, INDEX_TYPE size) { for (INDEX_TYPE index = 0; index < size; ++index) { dst[index] = src[index]; } } template <typename VALUE_TYPE, typename INDEX_TYPE> void copy_array(VALUE_TYPE* dst, const VALUE_TYPE* src, INDEX_TYPE size) { memcpy(dst, src, size * sizeof(VALUE_TYPE)); } template <> void copy_array<tstring, int64>(tstring* dst, const tstring* src, int64 size) { slow_copy_array(dst, src, size); } template <> void copy_array<tstring, int32>(tstring* dst, const tstring* src, int32 size) { slow_copy_array(dst, src, size); } // If we don't specialize for Eigen::half, we get: // undefined behavior, destination object type 'Eigen::half' // is not TriviallyCopyable template <> void copy_array<Eigen::half, int64>(Eigen::half* dst, const Eigen::half* src, int64 size) { slow_copy_array(dst, src, size); } template <> void copy_array<Eigen::half, int32>(Eigen::half* dst, const Eigen::half* src, int32 size) { slow_copy_array(dst, src, size); } template <typename VALUE_TYPE, typename INDEX_TYPE> class RaggedTensorToTensorOp : public RaggedTensorToTensorBaseOp<INDEX_TYPE> { public: explicit RaggedTensorToTensorOp(OpKernelConstruction* context) : RaggedTensorToTensorBaseOp<INDEX_TYPE>(context) {} void SetOutput(OpKernelContext* context, int ragged_rank, const vector<INDEX_TYPE>& output_index, Tensor* output_tensor) override { // Note: it's ok to use OP_REQUIRES_OK (rather than TF_RETURN_IF_ERROR) // in this function, but only because it's the last thing we do before // returning from Compute(). if (output_tensor->NumElements() == 0) return; const auto& values_tensor = context->input(kValueInputIndex); const VALUE_TYPE* values_base = values_tensor.flat<VALUE_TYPE>().data(); const auto& default_value_tensor = context->input(kDefaultValueInputIndex); VALUE_TYPE* output_base = output_tensor->flat<VALUE_TYPE>().data(); TensorShape element_shape = output_tensor->shape(); element_shape.RemoveDimRange(0, ragged_rank + 1); int value_element_size = element_shape.num_elements(); size_t output_index_size = output_index.size(); // Broadcast the default value to value_element_size. (We can skip this // if default_value_tensor.NumElements() == 1, since we use std::fill // when that's true.) const VALUE_TYPE* default_value = default_value_tensor.flat<VALUE_TYPE>().data(); Tensor bcast_default; // Temporary tensor for result of broadcast if (default_value_tensor.NumElements() != value_element_size && default_value_tensor.NumElements() != 1) { const auto& src_shape = default_value_tensor.shape(); BCast bcast(BCast::FromShape(src_shape), BCast::FromShape(element_shape), /*fewer_dims_optimization=*/true); // Note: bcast should always be valid, since we rejected any incompatible // shapes when we called ValidateDefaultValueShape(). OP_REQUIRES(context, bcast.IsValid(), errors::InvalidArgument("Error broadcasting default_value")); OP_REQUIRES_OK(context, context->allocate_temp(default_value_tensor.dtype(), element_shape, &bcast_default)); const CPUDevice& device = context->eigen_device<CPUDevice>(); functor::BroadcastTo<CPUDevice, VALUE_TYPE>()( device, context, bcast_default, element_shape, default_value_tensor, src_shape, bcast); default_value = bcast_default.flat<VALUE_TYPE>().data(); } // Loop through the output_index vector, finding contiguous regions that // should be copied. Once we find the end of a contiguous region, copy it // and add any necessary padding (with default_value). INDEX_TYPE src_start = 0; // Start of contiguous region (in values) INDEX_TYPE dst_start = 0; // Destination for contiguous region (in output) INDEX_TYPE dst_end = 0; // Destination for contiguous region (in output) for (int src_i = 0; src_i <= output_index_size; ++src_i) { // dst_i is the destination where the value at src_i should be copied. INDEX_TYPE dst_i = src_i < output_index_size ? output_index[src_i] : -1; // If we're still in a contiguous region, then update dst_end go to the // next src_i. if (dst_i == dst_end) { ++dst_end; continue; } // We found the end of contiguous region. This can be because we found // a gap (dst_i > dst_end), or a source value that shouldn't be copied // because it's out-of-bounds (dst_i == -1), or the end of the tensor // (dst_i = -1). if (dst_start < dst_end) { // Copy the contiguous region. const VALUE_TYPE* src = values_base + src_start * value_element_size; VALUE_TYPE* dst = output_base + dst_start * value_element_size; INDEX_TYPE nvals = (dst_end - dst_start) * value_element_size; copy_array<VALUE_TYPE, INDEX_TYPE>(dst, src, nvals); } // Add any necessary padding (w/ default_value). if (src_i >= output_index_size) { // We reached the end of values: pad to the end of output. size_t output_size = output_tensor->NumElements(); dst_i = output_size / value_element_size; } if (dst_i > dst_end) { if (default_value_tensor.NumElements() == 1) { std::fill(output_base + dst_end * value_element_size, output_base + dst_i * value_element_size, *default_value); dst_end = dst_i; } else { while (dst_i > dst_end) { VALUE_TYPE* dst = output_base + dst_end * value_element_size; copy_array<VALUE_TYPE, INDEX_TYPE>(dst, default_value, value_element_size); ++dst_end; } } } // Update indices. if (dst_i < 0) { // src_i should be skipped -- leave it out of the contiguous region. src_start = src_i + 1; dst_start = dst_end; } else { // src_i should be copied -- include it in the contiguous region. src_start = src_i; dst_start = dst_end; dst_end = dst_start + 1; } } } }; #define REGISTER_CPU_KERNEL_INDEX_TYPE(value_type, index_type) \ REGISTER_KERNEL_BUILDER(Name("RaggedTensorToTensor") \ .Device(DEVICE_CPU) \ .TypeConstraint<value_type>("T") \ .TypeConstraint<index_type>("Tindex"), \ RaggedTensorToTensorOp<value_type, index_type>); #define REGISTER_CPU_KERNEL(value_type) \ REGISTER_CPU_KERNEL_INDEX_TYPE(value_type, tensorflow::int64); \ REGISTER_CPU_KERNEL_INDEX_TYPE(value_type, tensorflow::int32); TF_CALL_POD_TYPES(REGISTER_CPU_KERNEL); TF_CALL_string(REGISTER_CPU_KERNEL); TF_CALL_QUANTIZED_TYPES(REGISTER_CPU_KERNEL); TF_CALL_quint16(REGISTER_CPU_KERNEL); TF_CALL_qint16(REGISTER_CPU_KERNEL); #undef REGISTER_CPU_KERNEL } // namespace } // namespace tensorflow
null
256
CWE-787
CVE-2021-29566
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/nn_ops.cc. #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/dilation_ops.h" #include <cfloat> #include <vector> #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/util/padding.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; void ParseAttributes(OpKernelConstruction* context, std::vector<int32>* strides, std::vector<int32>* rates, Padding* padding) { OP_REQUIRES_OK(context, context->GetAttr("strides", strides)); OP_REQUIRES(context, strides->size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES(context, (*strides)[0] == 1 && (*strides)[3] == 1, errors::Unimplemented( "Stride is only supported across spatial dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("rates", rates)); OP_REQUIRES(context, rates->size() == 4, errors::InvalidArgument("Input stride (atrous rate) field " "must specify 4 dimensions")); OP_REQUIRES(context, (*rates)[0] == 1 && (*rates)[3] == 1, errors::Unimplemented( "Rate is only supported across spatial dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("padding", padding)); } void ParseSizes(OpKernelContext* context, const std::vector<int32>& strides, const std::vector<int32>& rates, const Padding& padding, int* stride_rows, int* stride_cols, int* rate_rows, int* rate_cols, int64* pad_top, int64* pad_left, int64* out_rows, int64* out_cols) { // Input tensor is of the following dimensions: // [ batch, input_rows, input_cols, depth ] const Tensor& input = context->input(0); OP_REQUIRES(context, input.dims() == 4, errors::InvalidArgument("input must be 4-dimensional", input.shape().DebugString())); const int input_rows = input.dim_size(1); const int input_cols = input.dim_size(2); const int depth = input.dim_size(3); // For now we take the stride and rate from the second and third dimensions // only (we do not support striding on the batch or depth dimension). *stride_rows = strides[1]; *stride_cols = strides[2]; *rate_rows = rates[1]; *rate_cols = rates[2]; // Input filter is of the following dimensions: // [ filter_rows, filter_cols, depth ] const Tensor& filter = context->input(1); OP_REQUIRES(context, filter.dims() == 3, errors::InvalidArgument("filter must be 3-dimensional: ", filter.shape().DebugString())); const int filter_rows = filter.dim_size(0); const int filter_cols = filter.dim_size(1); OP_REQUIRES(context, depth == filter.dim_size(2), errors::InvalidArgument( "input and filter must have the same depth: ", depth, " vs ", filter.dim_size(2))); // Effective filter size, after introducing rate - 1 zeros between each // non-zero filter element. const int filter_rows_eff = filter_rows + (filter_rows - 1) * (*rate_rows - 1); const int filter_cols_eff = filter_cols + (filter_cols - 1) * (*rate_cols - 1); OP_REQUIRES_OK( context, GetWindowedOutputSize(input_rows, filter_rows_eff, *stride_rows, padding, out_rows, pad_top)); OP_REQUIRES_OK( context, GetWindowedOutputSize(input_cols, filter_cols_eff, *stride_cols, padding, out_cols, pad_left)); } template <typename Device, typename T> class DilationOp : public OpKernel { public: explicit DilationOp(OpKernelConstruction* context) : OpKernel(context) { ParseAttributes(context, &strides_, &rates_, &padding_); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& filter = context->input(1); // Determine relevant sizes from input and filters. int stride_rows = 0, stride_cols = 0; int rate_rows = 0, rate_cols = 0; int64 pad_top = 0, pad_left = 0; int64 out_rows = 0, out_cols = 0; ParseSizes(context, strides_, rates_, padding_, &stride_rows, &stride_cols, &rate_rows, &rate_cols, &pad_top, &pad_left, &out_rows, &out_cols); // Output tensor is of the following dimensions: // [ batch, out_rows, out_cols, depth ] const int batch = input.dim_size(0); const int depth = input.dim_size(3); const std::vector<int64> out_sizes = {batch, out_rows, out_cols, depth}; TensorShape out_shape(out_sizes); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); // If there is nothing to compute, return. if (out_shape.num_elements() == 0) { return; } functor::Dilation<Device, T>()( context->eigen_device<Device>(), input.tensor<T, 4>(), filter.tensor<T, 3>(), stride_rows, stride_cols, rate_rows, rate_cols, pad_top, pad_left, output->tensor<T, 4>()); } std::vector<int32> strides_; std::vector<int32> rates_; Padding padding_; }; // Partial specialization of Dilation functor for a CPUDevice. namespace functor { template <typename T> struct Dilation<CPUDevice, T> { void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input, typename TTypes<T, 3>::ConstTensor filter, int stride_rows, int stride_cols, int rate_rows, int rate_cols, int pad_top, int pad_left, typename TTypes<T, 4>::Tensor output) { const int batch = input.dimension(0); const int input_rows = input.dimension(1); const int input_cols = input.dimension(2); const int depth = input.dimension(3); const int filter_rows = filter.dimension(0); const int filter_cols = filter.dimension(1); const int output_rows = output.dimension(1); const int output_cols = output.dimension(2); // This is a reference implementation, likely to be slow. // TODO(gpapan): Write multi-threaded implementation. for (int b = 0; b < batch; ++b) { for (int h_out = 0; h_out < output_rows; ++h_out) { int h_beg = h_out * stride_rows - pad_top; for (int w_out = 0; w_out < output_cols; ++w_out) { int w_beg = w_out * stride_cols - pad_left; for (int d = 0; d < depth; ++d) { T cur_val = Eigen::NumTraits<T>::lowest(); for (int h = 0; h < filter_rows; ++h) { const int h_in = h_beg + h * rate_rows; if (h_in >= 0 && h_in < input_rows) { for (int w = 0; w < filter_cols; ++w) { const int w_in = w_beg + w * rate_cols; if (w_in >= 0 && w_in < input_cols) { const T val = input(b, h_in, w_in, d) + filter(h, w, d); if (val > cur_val) { cur_val = val; } } } } } output(b, h_out, w_out, d) = cur_val; } } } } } }; } // namespace functor template <typename Device, typename T> class DilationBackpropInputOp : public OpKernel { public: explicit DilationBackpropInputOp(OpKernelConstruction* context) : OpKernel(context) { ParseAttributes(context, &strides_, &rates_, &padding_); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& filter = context->input(1); const Tensor& out_backprop = context->input(2); // Determine relevant sizes from input and filters. int stride_rows = 0, stride_cols = 0; int rate_rows = 0, rate_cols = 0; int64 pad_top = 0, pad_left = 0; int64 out_rows = 0, out_cols = 0; ParseSizes(context, strides_, rates_, padding_, &stride_rows, &stride_cols, &rate_rows, &rate_cols, &pad_top, &pad_left, &out_rows, &out_cols); // Verify that the incoming gradient tensor has the expected size // [ batch, out_rows, out_cols, depth ] const int batch = input.dim_size(0); const int depth = input.dim_size(3); OP_REQUIRES(context, batch == out_backprop.dim_size(0) && out_rows == out_backprop.dim_size(1) && out_cols == out_backprop.dim_size(2) && depth == out_backprop.dim_size(3), errors::InvalidArgument("out_backprop has incompatible size.")); // The computed in_backprop has the same dimensions as the input: // [ batch, input_rows, input_cols, depth ] Tensor* in_backprop = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &in_backprop)); // If there is nothing to compute, return. if (input.shape().num_elements() == 0) { return; } functor::DilationBackpropInput<Device, T>()( context->eigen_device<Device>(), input.tensor<T, 4>(), filter.tensor<T, 3>(), out_backprop.tensor<T, 4>(), stride_rows, stride_cols, rate_rows, rate_cols, pad_top, pad_left, in_backprop->tensor<T, 4>()); } std::vector<int32> strides_; std::vector<int32> rates_; Padding padding_; }; // Partial specialization of DilationBackpropInput functor for a CPUDevice. namespace functor { template <typename T> struct DilationBackpropInput<CPUDevice, T> { void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input, typename TTypes<T, 3>::ConstTensor filter, typename TTypes<T, 4>::ConstTensor out_backprop, int stride_rows, int stride_cols, int rate_rows, int rate_cols, int pad_top, int pad_left, typename TTypes<T, 4>::Tensor in_backprop) { const int batch = input.dimension(0); const int input_rows = input.dimension(1); const int input_cols = input.dimension(2); const int depth = input.dimension(3); const int filter_rows = filter.dimension(0); const int filter_cols = filter.dimension(1); const int output_rows = out_backprop.dimension(1); const int output_cols = out_backprop.dimension(2); // Initialize gradient with all zeros. in_backprop.setZero(); // This is a reference implementation, likely to be slow. // TODO(gpapan): Write multi-threaded implementation. // In the case of multiple argmax branches, we only back-propagate along the // last branch, i.e., the one with largest value of `h * filter_cols + w`, // similarly to the max-pooling backward routines. for (int b = 0; b < batch; ++b) { for (int h_out = 0; h_out < output_rows; ++h_out) { int h_beg = h_out * stride_rows - pad_top; for (int w_out = 0; w_out < output_cols; ++w_out) { int w_beg = w_out * stride_cols - pad_left; for (int d = 0; d < depth; ++d) { T cur_val = Eigen::NumTraits<T>::lowest(); int h_in_max = (h_beg < 0) ? 0 : h_beg; int w_in_max = (w_beg < 0) ? 0 : w_beg; for (int h = 0; h < filter_rows; ++h) { const int h_in = h_beg + h * rate_rows; if (h_in >= 0 && h_in < input_rows) { for (int w = 0; w < filter_cols; ++w) { const int w_in = w_beg + w * rate_cols; if (w_in >= 0 && w_in < input_cols) { const T val = input(b, h_in, w_in, d) + filter(h, w, d); if (val > cur_val) { cur_val = val; h_in_max = h_in; w_in_max = w_in; } } } } } in_backprop(b, h_in_max, w_in_max, d) += out_backprop(b, h_out, w_out, d); } } } } } }; } // namespace functor template <typename Device, typename T> class DilationBackpropFilterOp : public OpKernel { public: explicit DilationBackpropFilterOp(OpKernelConstruction* context) : OpKernel(context) { ParseAttributes(context, &strides_, &rates_, &padding_); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& filter = context->input(1); const Tensor& out_backprop = context->input(2); // Determine relevant sizes from input and filters. int stride_rows = 0, stride_cols = 0; int rate_rows = 0, rate_cols = 0; int64 pad_top = 0, pad_left = 0; int64 out_rows = 0, out_cols = 0; ParseSizes(context, strides_, rates_, padding_, &stride_rows, &stride_cols, &rate_rows, &rate_cols, &pad_top, &pad_left, &out_rows, &out_cols); // Verify that the incoming gradient tensor has the expected size // [ batch, out_rows, out_cols, depth ] const int batch = input.dim_size(0); const int depth = input.dim_size(3); OP_REQUIRES(context, batch == out_backprop.dim_size(0) && out_rows == out_backprop.dim_size(1) && out_cols == out_backprop.dim_size(2) && depth == out_backprop.dim_size(3), errors::InvalidArgument("out_backprop has incompatible size.")); // The computed filter_backprop has the same dimensions as the filter: // [ batch, input_rows, input_cols, depth ] Tensor* filter_backprop = nullptr; OP_REQUIRES_OK( context, context->allocate_output(0, filter.shape(), &filter_backprop)); // If there is nothing to compute, return. if (filter.shape().num_elements() == 0) { return; } functor::DilationBackpropFilter<Device, T>()( context->eigen_device<Device>(), input.tensor<T, 4>(), filter.tensor<T, 3>(), out_backprop.tensor<T, 4>(), stride_rows, stride_cols, rate_rows, rate_cols, pad_top, pad_left, filter_backprop->tensor<T, 3>()); } std::vector<int32> strides_; std::vector<int32> rates_; Padding padding_; }; // Partial specialization of DilationBackpropFilter functor for a CPUDevice. namespace functor { template <typename T> struct DilationBackpropFilter<CPUDevice, T> { void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input, typename TTypes<T, 3>::ConstTensor filter, typename TTypes<T, 4>::ConstTensor out_backprop, int stride_rows, int stride_cols, int rate_rows, int rate_cols, int pad_top, int pad_left, typename TTypes<T, 3>::Tensor filter_backprop) { const int batch = input.dimension(0); const int input_rows = input.dimension(1); const int input_cols = input.dimension(2); const int depth = input.dimension(3); const int filter_rows = filter.dimension(0); const int filter_cols = filter.dimension(1); const int output_rows = out_backprop.dimension(1); const int output_cols = out_backprop.dimension(2); // Initialize gradient with all zeros. filter_backprop.setZero(); // This is a reference implementation, likely to be slow. // TODO(gpapan): Write multi-threaded implementation. // In the case of multiple argmax branches, we only back-propagate along the // last branch, i.e., the one with largest value of `h * filter_cols + w`, // similarly to the max-pooling backward routines. for (int b = 0; b < batch; ++b) { for (int h_out = 0; h_out < output_rows; ++h_out) { int h_beg = h_out * stride_rows - pad_top; for (int w_out = 0; w_out < output_cols; ++w_out) { int w_beg = w_out * stride_cols - pad_left; for (int d = 0; d < depth; ++d) { T cur_val = Eigen::NumTraits<T>::lowest(); int h_max = 0; int w_max = 0; for (int h = 0; h < filter_rows; ++h) { const int h_in = h_beg + h * rate_rows; if (h_in >= 0 && h_in < input_rows) { for (int w = 0; w < filter_cols; ++w) { const int w_in = w_beg + w * rate_cols; if (w_in >= 0 && w_in < input_cols) { const T val = input(b, h_in, w_in, d) + filter(h, w, d); if (val > cur_val) { cur_val = val; h_max = h; w_max = w; } } } } } filter_backprop(h_max, w_max, d) += out_backprop(b, h_out, w_out, d); } } } } } }; } // namespace functor #define REGISTER(T) \ REGISTER_KERNEL_BUILDER( \ Name("Dilation2D").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ DilationOp<CPUDevice, T>); \ \ REGISTER_KERNEL_BUILDER(Name("Dilation2DBackpropInput") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T"), \ DilationBackpropInputOp<CPUDevice, T>); \ \ REGISTER_KERNEL_BUILDER(Name("Dilation2DBackpropFilter") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T"), \ DilationBackpropFilterOp<CPUDevice, T>); TF_CALL_REAL_NUMBER_TYPES(REGISTER); #undef REGISTER #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER(T) \ REGISTER_KERNEL_BUILDER( \ Name("Dilation2D").Device(DEVICE_GPU).TypeConstraint<T>("T"), \ DilationOp<GPUDevice, T>); \ \ REGISTER_KERNEL_BUILDER(Name("Dilation2DBackpropInput") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T"), \ DilationBackpropInputOp<GPUDevice, T>); \ \ REGISTER_KERNEL_BUILDER(Name("Dilation2DBackpropFilter") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T"), \ DilationBackpropFilterOp<GPUDevice, T>); TF_CALL_GPU_NUMBER_TYPES(REGISTER); #undef REGISTER #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM } // namespace tensorflow
null
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/nn_ops.cc. #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/dilation_ops.h" #include <cfloat> #include <vector> #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/util/padding.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; void ParseAttributes(OpKernelConstruction* context, std::vector<int32>* strides, std::vector<int32>* rates, Padding* padding) { OP_REQUIRES_OK(context, context->GetAttr("strides", strides)); OP_REQUIRES(context, strides->size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES(context, (*strides)[0] == 1 && (*strides)[3] == 1, errors::Unimplemented( "Stride is only supported across spatial dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("rates", rates)); OP_REQUIRES(context, rates->size() == 4, errors::InvalidArgument("Input stride (atrous rate) field " "must specify 4 dimensions")); OP_REQUIRES(context, (*rates)[0] == 1 && (*rates)[3] == 1, errors::Unimplemented( "Rate is only supported across spatial dimensions.")); OP_REQUIRES_OK(context, context->GetAttr("padding", padding)); } void ParseSizes(OpKernelContext* context, const std::vector<int32>& strides, const std::vector<int32>& rates, const Padding& padding, int* stride_rows, int* stride_cols, int* rate_rows, int* rate_cols, int64* pad_top, int64* pad_left, int64* out_rows, int64* out_cols) { // Input tensor is of the following dimensions: // [ batch, input_rows, input_cols, depth ] const Tensor& input = context->input(0); OP_REQUIRES(context, input.dims() == 4, errors::InvalidArgument("input must be 4-dimensional", input.shape().DebugString())); const int input_rows = input.dim_size(1); const int input_cols = input.dim_size(2); const int depth = input.dim_size(3); // For now we take the stride and rate from the second and third dimensions // only (we do not support striding on the batch or depth dimension). *stride_rows = strides[1]; *stride_cols = strides[2]; *rate_rows = rates[1]; *rate_cols = rates[2]; // Input filter is of the following dimensions: // [ filter_rows, filter_cols, depth ] const Tensor& filter = context->input(1); OP_REQUIRES(context, filter.dims() == 3, errors::InvalidArgument("filter must be 3-dimensional: ", filter.shape().DebugString())); const int filter_rows = filter.dim_size(0); const int filter_cols = filter.dim_size(1); OP_REQUIRES(context, depth == filter.dim_size(2), errors::InvalidArgument( "input and filter must have the same depth: ", depth, " vs ", filter.dim_size(2))); // Effective filter size, after introducing rate - 1 zeros between each // non-zero filter element. const int filter_rows_eff = filter_rows + (filter_rows - 1) * (*rate_rows - 1); const int filter_cols_eff = filter_cols + (filter_cols - 1) * (*rate_cols - 1); OP_REQUIRES_OK( context, GetWindowedOutputSize(input_rows, filter_rows_eff, *stride_rows, padding, out_rows, pad_top)); OP_REQUIRES_OK( context, GetWindowedOutputSize(input_cols, filter_cols_eff, *stride_cols, padding, out_cols, pad_left)); } template <typename Device, typename T> class DilationOp : public OpKernel { public: explicit DilationOp(OpKernelConstruction* context) : OpKernel(context) { ParseAttributes(context, &strides_, &rates_, &padding_); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& filter = context->input(1); // Determine relevant sizes from input and filters. int stride_rows = 0, stride_cols = 0; int rate_rows = 0, rate_cols = 0; int64 pad_top = 0, pad_left = 0; int64 out_rows = 0, out_cols = 0; ParseSizes(context, strides_, rates_, padding_, &stride_rows, &stride_cols, &rate_rows, &rate_cols, &pad_top, &pad_left, &out_rows, &out_cols); if (!context->status().ok()) return; // Output tensor is of the following dimensions: // [ batch, out_rows, out_cols, depth ] const int batch = input.dim_size(0); const int depth = input.dim_size(3); const std::vector<int64> out_sizes = {batch, out_rows, out_cols, depth}; TensorShape out_shape(out_sizes); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); // If there is nothing to compute, return. if (out_shape.num_elements() == 0) { return; } functor::Dilation<Device, T>()( context->eigen_device<Device>(), input.tensor<T, 4>(), filter.tensor<T, 3>(), stride_rows, stride_cols, rate_rows, rate_cols, pad_top, pad_left, output->tensor<T, 4>()); } std::vector<int32> strides_; std::vector<int32> rates_; Padding padding_; }; // Partial specialization of Dilation functor for a CPUDevice. namespace functor { template <typename T> struct Dilation<CPUDevice, T> { void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input, typename TTypes<T, 3>::ConstTensor filter, int stride_rows, int stride_cols, int rate_rows, int rate_cols, int pad_top, int pad_left, typename TTypes<T, 4>::Tensor output) { const int batch = input.dimension(0); const int input_rows = input.dimension(1); const int input_cols = input.dimension(2); const int depth = input.dimension(3); const int filter_rows = filter.dimension(0); const int filter_cols = filter.dimension(1); const int output_rows = output.dimension(1); const int output_cols = output.dimension(2); // This is a reference implementation, likely to be slow. // TODO(gpapan): Write multi-threaded implementation. for (int b = 0; b < batch; ++b) { for (int h_out = 0; h_out < output_rows; ++h_out) { int h_beg = h_out * stride_rows - pad_top; for (int w_out = 0; w_out < output_cols; ++w_out) { int w_beg = w_out * stride_cols - pad_left; for (int d = 0; d < depth; ++d) { T cur_val = Eigen::NumTraits<T>::lowest(); for (int h = 0; h < filter_rows; ++h) { const int h_in = h_beg + h * rate_rows; if (h_in >= 0 && h_in < input_rows) { for (int w = 0; w < filter_cols; ++w) { const int w_in = w_beg + w * rate_cols; if (w_in >= 0 && w_in < input_cols) { const T val = input(b, h_in, w_in, d) + filter(h, w, d); if (val > cur_val) { cur_val = val; } } } } } output(b, h_out, w_out, d) = cur_val; } } } } } }; } // namespace functor template <typename Device, typename T> class DilationBackpropInputOp : public OpKernel { public: explicit DilationBackpropInputOp(OpKernelConstruction* context) : OpKernel(context) { ParseAttributes(context, &strides_, &rates_, &padding_); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& filter = context->input(1); const Tensor& out_backprop = context->input(2); // Determine relevant sizes from input and filters. int stride_rows = 0, stride_cols = 0; int rate_rows = 0, rate_cols = 0; int64 pad_top = 0, pad_left = 0; int64 out_rows = 0, out_cols = 0; ParseSizes(context, strides_, rates_, padding_, &stride_rows, &stride_cols, &rate_rows, &rate_cols, &pad_top, &pad_left, &out_rows, &out_cols); if (!context->status().ok()) return; // Verify that the incoming gradient tensor has the expected size // [ batch, out_rows, out_cols, depth ] const int batch = input.dim_size(0); const int depth = input.dim_size(3); OP_REQUIRES(context, batch == out_backprop.dim_size(0) && out_rows == out_backprop.dim_size(1) && out_cols == out_backprop.dim_size(2) && depth == out_backprop.dim_size(3), errors::InvalidArgument("out_backprop has incompatible size.")); // The computed in_backprop has the same dimensions as the input: // [ batch, input_rows, input_cols, depth ] Tensor* in_backprop = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &in_backprop)); // If there is nothing to compute, return. if (input.shape().num_elements() == 0) { return; } functor::DilationBackpropInput<Device, T>()( context->eigen_device<Device>(), input.tensor<T, 4>(), filter.tensor<T, 3>(), out_backprop.tensor<T, 4>(), stride_rows, stride_cols, rate_rows, rate_cols, pad_top, pad_left, in_backprop->tensor<T, 4>()); } std::vector<int32> strides_; std::vector<int32> rates_; Padding padding_; }; // Partial specialization of DilationBackpropInput functor for a CPUDevice. namespace functor { template <typename T> struct DilationBackpropInput<CPUDevice, T> { void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input, typename TTypes<T, 3>::ConstTensor filter, typename TTypes<T, 4>::ConstTensor out_backprop, int stride_rows, int stride_cols, int rate_rows, int rate_cols, int pad_top, int pad_left, typename TTypes<T, 4>::Tensor in_backprop) { const int batch = input.dimension(0); const int input_rows = input.dimension(1); const int input_cols = input.dimension(2); const int depth = input.dimension(3); const int filter_rows = filter.dimension(0); const int filter_cols = filter.dimension(1); const int output_rows = out_backprop.dimension(1); const int output_cols = out_backprop.dimension(2); // Initialize gradient with all zeros. in_backprop.setZero(); // This is a reference implementation, likely to be slow. // TODO(gpapan): Write multi-threaded implementation. // In the case of multiple argmax branches, we only back-propagate along the // last branch, i.e., the one with largest value of `h * filter_cols + w`, // similarly to the max-pooling backward routines. for (int b = 0; b < batch; ++b) { for (int h_out = 0; h_out < output_rows; ++h_out) { int h_beg = h_out * stride_rows - pad_top; for (int w_out = 0; w_out < output_cols; ++w_out) { int w_beg = w_out * stride_cols - pad_left; for (int d = 0; d < depth; ++d) { T cur_val = Eigen::NumTraits<T>::lowest(); int h_in_max = (h_beg < 0) ? 0 : h_beg; int w_in_max = (w_beg < 0) ? 0 : w_beg; for (int h = 0; h < filter_rows; ++h) { const int h_in = h_beg + h * rate_rows; if (h_in >= 0 && h_in < input_rows) { for (int w = 0; w < filter_cols; ++w) { const int w_in = w_beg + w * rate_cols; if (w_in >= 0 && w_in < input_cols) { const T val = input(b, h_in, w_in, d) + filter(h, w, d); if (val > cur_val) { cur_val = val; h_in_max = h_in; w_in_max = w_in; } } } } } if (h_in_max < input_rows && w_in_max < input_cols) { in_backprop(b, h_in_max, w_in_max, d) += out_backprop(b, h_out, w_out, d); } } } } } } }; } // namespace functor template <typename Device, typename T> class DilationBackpropFilterOp : public OpKernel { public: explicit DilationBackpropFilterOp(OpKernelConstruction* context) : OpKernel(context) { ParseAttributes(context, &strides_, &rates_, &padding_); } void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const Tensor& filter = context->input(1); const Tensor& out_backprop = context->input(2); // Determine relevant sizes from input and filters. int stride_rows = 0, stride_cols = 0; int rate_rows = 0, rate_cols = 0; int64 pad_top = 0, pad_left = 0; int64 out_rows = 0, out_cols = 0; ParseSizes(context, strides_, rates_, padding_, &stride_rows, &stride_cols, &rate_rows, &rate_cols, &pad_top, &pad_left, &out_rows, &out_cols); if (!context->status().ok()) return; // Verify that the incoming gradient tensor has the expected size // [ batch, out_rows, out_cols, depth ] const int batch = input.dim_size(0); const int depth = input.dim_size(3); OP_REQUIRES(context, batch == out_backprop.dim_size(0) && out_rows == out_backprop.dim_size(1) && out_cols == out_backprop.dim_size(2) && depth == out_backprop.dim_size(3), errors::InvalidArgument("out_backprop has incompatible size.")); // The computed filter_backprop has the same dimensions as the filter: // [ batch, input_rows, input_cols, depth ] Tensor* filter_backprop = nullptr; OP_REQUIRES_OK( context, context->allocate_output(0, filter.shape(), &filter_backprop)); // If there is nothing to compute, return. if (filter.shape().num_elements() == 0) { return; } functor::DilationBackpropFilter<Device, T>()( context->eigen_device<Device>(), input.tensor<T, 4>(), filter.tensor<T, 3>(), out_backprop.tensor<T, 4>(), stride_rows, stride_cols, rate_rows, rate_cols, pad_top, pad_left, filter_backprop->tensor<T, 3>()); } std::vector<int32> strides_; std::vector<int32> rates_; Padding padding_; }; // Partial specialization of DilationBackpropFilter functor for a CPUDevice. namespace functor { template <typename T> struct DilationBackpropFilter<CPUDevice, T> { void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input, typename TTypes<T, 3>::ConstTensor filter, typename TTypes<T, 4>::ConstTensor out_backprop, int stride_rows, int stride_cols, int rate_rows, int rate_cols, int pad_top, int pad_left, typename TTypes<T, 3>::Tensor filter_backprop) { const int batch = input.dimension(0); const int input_rows = input.dimension(1); const int input_cols = input.dimension(2); const int depth = input.dimension(3); const int filter_rows = filter.dimension(0); const int filter_cols = filter.dimension(1); const int output_rows = out_backprop.dimension(1); const int output_cols = out_backprop.dimension(2); // Initialize gradient with all zeros. filter_backprop.setZero(); // This is a reference implementation, likely to be slow. // TODO(gpapan): Write multi-threaded implementation. // In the case of multiple argmax branches, we only back-propagate along the // last branch, i.e., the one with largest value of `h * filter_cols + w`, // similarly to the max-pooling backward routines. for (int b = 0; b < batch; ++b) { for (int h_out = 0; h_out < output_rows; ++h_out) { int h_beg = h_out * stride_rows - pad_top; for (int w_out = 0; w_out < output_cols; ++w_out) { int w_beg = w_out * stride_cols - pad_left; for (int d = 0; d < depth; ++d) { T cur_val = Eigen::NumTraits<T>::lowest(); int h_max = 0; int w_max = 0; for (int h = 0; h < filter_rows; ++h) { const int h_in = h_beg + h * rate_rows; if (h_in >= 0 && h_in < input_rows) { for (int w = 0; w < filter_cols; ++w) { const int w_in = w_beg + w * rate_cols; if (w_in >= 0 && w_in < input_cols) { const T val = input(b, h_in, w_in, d) + filter(h, w, d); if (val > cur_val) { cur_val = val; h_max = h; w_max = w; } } } } } if (h_max < filter_rows && w_max < filter_cols) { filter_backprop(h_max, w_max, d) += out_backprop(b, h_out, w_out, d); } } } } } } }; } // namespace functor #define REGISTER(T) \ REGISTER_KERNEL_BUILDER( \ Name("Dilation2D").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ DilationOp<CPUDevice, T>); \ \ REGISTER_KERNEL_BUILDER(Name("Dilation2DBackpropInput") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T"), \ DilationBackpropInputOp<CPUDevice, T>); \ \ REGISTER_KERNEL_BUILDER(Name("Dilation2DBackpropFilter") \ .Device(DEVICE_CPU) \ .TypeConstraint<T>("T"), \ DilationBackpropFilterOp<CPUDevice, T>); TF_CALL_REAL_NUMBER_TYPES(REGISTER); #undef REGISTER #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER(T) \ REGISTER_KERNEL_BUILDER( \ Name("Dilation2D").Device(DEVICE_GPU).TypeConstraint<T>("T"), \ DilationOp<GPUDevice, T>); \ \ REGISTER_KERNEL_BUILDER(Name("Dilation2DBackpropInput") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T"), \ DilationBackpropInputOp<GPUDevice, T>); \ \ REGISTER_KERNEL_BUILDER(Name("Dilation2DBackpropFilter") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T"), \ DilationBackpropFilterOp<GPUDevice, T>); TF_CALL_GPU_NUMBER_TYPES(REGISTER); #undef REGISTER #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM } // namespace tensorflow
null
257
CWE-787
CVE-2021-29571
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See ../ops/image_ops.cc for details. #define EIGEN_USE_THREADS #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/logging.h" namespace tensorflow { namespace { std::vector<std::vector<float>> DefaultColorTable(int depth) { std::vector<std::vector<float>> color_table; color_table.emplace_back(std::vector<float>({1, 1, 0, 1})); // 0: yellow color_table.emplace_back(std::vector<float>({0, 0, 1, 1})); // 1: blue color_table.emplace_back(std::vector<float>({1, 0, 0, 1})); // 2: red color_table.emplace_back(std::vector<float>({0, 1, 0, 1})); // 3: lime color_table.emplace_back(std::vector<float>({0.5, 0, 0.5, 1})); // 4: purple color_table.emplace_back(std::vector<float>({0.5, 0.5, 0, 1})); // 5: olive color_table.emplace_back(std::vector<float>({0.5, 0, 0, 1})); // 6: maroon color_table.emplace_back(std::vector<float>({0, 0, 0.5, 1})); // 7: navy blue color_table.emplace_back(std::vector<float>({0, 1, 1, 1})); // 8: aqua color_table.emplace_back(std::vector<float>({1, 0, 1, 1})); // 9: fuchsia if (depth == 1) { for (int64 i = 0; i < color_table.size(); i++) { color_table[i][0] = 1; } } return color_table; } } // namespace template <class T> class DrawBoundingBoxesOp : public OpKernel { public: explicit DrawBoundingBoxesOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& images = context->input(0); const Tensor& boxes = context->input(1); const int64 depth = images.dim_size(3); OP_REQUIRES(context, images.dims() == 4, errors::InvalidArgument("The rank of the images should be 4")); OP_REQUIRES( context, boxes.dims() == 3, errors::InvalidArgument("The rank of the boxes tensor should be 3")); OP_REQUIRES(context, images.dim_size(0) == boxes.dim_size(0), errors::InvalidArgument("The batch sizes should be the same")); OP_REQUIRES( context, depth == 4 || depth == 1 || depth == 3, errors::InvalidArgument("Channel depth should be either 1 (GRY), " "3 (RGB), or 4 (RGBA)")); const int64 batch_size = images.dim_size(0); const int64 height = images.dim_size(1); const int64 width = images.dim_size(2); std::vector<std::vector<float>> color_table; if (context->num_inputs() == 3) { const Tensor& colors_tensor = context->input(2); OP_REQUIRES(context, colors_tensor.shape().dims() == 2, errors::InvalidArgument("colors must be a 2-D matrix", colors_tensor.shape().DebugString())); OP_REQUIRES(context, colors_tensor.shape().dim_size(1) >= depth, errors::InvalidArgument("colors must have equal or more ", "channels than the image provided: ", colors_tensor.shape().DebugString())); if (colors_tensor.NumElements() != 0) { color_table.clear(); auto colors = colors_tensor.matrix<float>(); for (int64 i = 0; i < colors.dimension(0); i++) { std::vector<float> color_value(4); for (int64 j = 0; j < 4; j++) { color_value[j] = colors(i, j); } color_table.emplace_back(color_value); } } } if (color_table.empty()) { color_table = DefaultColorTable(depth); } Tensor* output; OP_REQUIRES_OK( context, context->allocate_output( 0, TensorShape({batch_size, height, width, depth}), &output)); output->tensor<T, 4>() = images.tensor<T, 4>(); auto canvas = output->tensor<T, 4>(); for (int64 b = 0; b < batch_size; ++b) { const int64 num_boxes = boxes.dim_size(1); const auto tboxes = boxes.tensor<T, 3>(); for (int64 bb = 0; bb < num_boxes; ++bb) { int64 color_index = bb % color_table.size(); const int64 min_box_row = static_cast<float>(tboxes(b, bb, 0)) * (height - 1); const int64 min_box_row_clamp = std::max<int64>(min_box_row, int64{0}); const int64 max_box_row = static_cast<float>(tboxes(b, bb, 2)) * (height - 1); const int64 max_box_row_clamp = std::min<int64>(max_box_row, height - 1); const int64 min_box_col = static_cast<float>(tboxes(b, bb, 1)) * (width - 1); const int64 min_box_col_clamp = std::max<int64>(min_box_col, int64{0}); const int64 max_box_col = static_cast<float>(tboxes(b, bb, 3)) * (width - 1); const int64 max_box_col_clamp = std::min<int64>(max_box_col, width - 1); if (min_box_row > max_box_row || min_box_col > max_box_col) { LOG(WARNING) << "Bounding box (" << min_box_row << "," << min_box_col << "," << max_box_row << "," << max_box_col << ") is inverted and will not be drawn."; continue; } if (min_box_row >= height || max_box_row < 0 || min_box_col >= width || max_box_col < 0) { LOG(WARNING) << "Bounding box (" << min_box_row << "," << min_box_col << "," << max_box_row << "," << max_box_col << ") is completely outside the image" << " and will not be drawn."; continue; } // At this point, {min,max}_box_{row,col}_clamp are inside the // image. OP_REQUIRES( context, min_box_row_clamp >= 0, errors::InvalidArgument("Min box row clamp is less than 0.")); OP_REQUIRES( context, max_box_row_clamp >= 0, errors::InvalidArgument("Max box row clamp is less than 0.")); OP_REQUIRES(context, min_box_row_clamp <= height, errors::InvalidArgument( "Min box row clamp is greater than height.")); OP_REQUIRES(context, max_box_row_clamp <= height, errors::InvalidArgument( "Max box row clamp is greater than height.")); OP_REQUIRES( context, min_box_col_clamp >= 0, errors::InvalidArgument("Min box col clamp is less than 0.")); OP_REQUIRES( context, max_box_col_clamp >= 0, errors::InvalidArgument("Max box col clamp is less than 0.")); OP_REQUIRES(context, min_box_col_clamp <= width, errors::InvalidArgument( "Min box col clamp is greater than width.")); OP_REQUIRES(context, max_box_col_clamp <= width, errors::InvalidArgument( "Max box col clamp is greater than width.")); // At this point, the min_box_row and min_box_col are either // in the image or above/left of it, and max_box_row and // max_box_col are either in the image or below/right or it. OP_REQUIRES( context, min_box_row <= height, errors::InvalidArgument("Min box row is greater than height.")); OP_REQUIRES(context, max_box_row >= 0, errors::InvalidArgument("Max box row is less than 0.")); OP_REQUIRES( context, min_box_col <= width, errors::InvalidArgument("Min box col is greater than width.")); OP_REQUIRES(context, max_box_col >= 0, errors::InvalidArgument("Max box col is less than 0.")); // Draw top line. if (min_box_row >= 0) { for (int64 j = min_box_col_clamp; j <= max_box_col_clamp; ++j) for (int64 c = 0; c < depth; c++) { canvas(b, min_box_row, j, c) = static_cast<T>(color_table[color_index][c]); } } // Draw bottom line. if (max_box_row < height) { for (int64 j = min_box_col_clamp; j <= max_box_col_clamp; ++j) for (int64 c = 0; c < depth; c++) { canvas(b, max_box_row, j, c) = static_cast<T>(color_table[color_index][c]); } } // Draw left line. if (min_box_col >= 0) { for (int64 i = min_box_row_clamp; i <= max_box_row_clamp; ++i) for (int64 c = 0; c < depth; c++) { canvas(b, i, min_box_col, c) = static_cast<T>(color_table[color_index][c]); } } // Draw right line. if (max_box_col < width) { for (int64 i = min_box_row_clamp; i <= max_box_row_clamp; ++i) for (int64 c = 0; c < depth; c++) { canvas(b, i, max_box_col, c) = static_cast<T>(color_table[color_index][c]); } } } } } }; #define REGISTER_CPU_KERNEL(T) \ REGISTER_KERNEL_BUILDER( \ Name("DrawBoundingBoxes").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ DrawBoundingBoxesOp<T>); \ REGISTER_KERNEL_BUILDER( \ Name("DrawBoundingBoxesV2").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ DrawBoundingBoxesOp<T>); TF_CALL_half(REGISTER_CPU_KERNEL); TF_CALL_float(REGISTER_CPU_KERNEL); } // namespace tensorflow
null
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See ../ops/image_ops.cc for details. #define EIGEN_USE_THREADS #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/logging.h" namespace tensorflow { namespace { std::vector<std::vector<float>> DefaultColorTable(int depth) { std::vector<std::vector<float>> color_table; color_table.emplace_back(std::vector<float>({1, 1, 0, 1})); // 0: yellow color_table.emplace_back(std::vector<float>({0, 0, 1, 1})); // 1: blue color_table.emplace_back(std::vector<float>({1, 0, 0, 1})); // 2: red color_table.emplace_back(std::vector<float>({0, 1, 0, 1})); // 3: lime color_table.emplace_back(std::vector<float>({0.5, 0, 0.5, 1})); // 4: purple color_table.emplace_back(std::vector<float>({0.5, 0.5, 0, 1})); // 5: olive color_table.emplace_back(std::vector<float>({0.5, 0, 0, 1})); // 6: maroon color_table.emplace_back(std::vector<float>({0, 0, 0.5, 1})); // 7: navy blue color_table.emplace_back(std::vector<float>({0, 1, 1, 1})); // 8: aqua color_table.emplace_back(std::vector<float>({1, 0, 1, 1})); // 9: fuchsia if (depth == 1) { for (int64 i = 0; i < color_table.size(); i++) { color_table[i][0] = 1; } } return color_table; } } // namespace template <class T> class DrawBoundingBoxesOp : public OpKernel { public: explicit DrawBoundingBoxesOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor& images = context->input(0); const Tensor& boxes = context->input(1); const int64 depth = images.dim_size(3); OP_REQUIRES(context, images.dims() == 4, errors::InvalidArgument("The rank of the images should be 4")); OP_REQUIRES( context, boxes.dims() == 3, errors::InvalidArgument("The rank of the boxes tensor should be 3")); OP_REQUIRES(context, images.dim_size(0) == boxes.dim_size(0), errors::InvalidArgument("The batch sizes should be the same")); OP_REQUIRES( context, depth == 4 || depth == 1 || depth == 3, errors::InvalidArgument("Channel depth should be either 1 (GRY), " "3 (RGB), or 4 (RGBA)")); OP_REQUIRES( context, boxes.dim_size(2) == 4, errors::InvalidArgument( "The size of the third dimension of the box must be 4. Received: ", boxes.dim_size(2))); const int64 batch_size = images.dim_size(0); const int64 height = images.dim_size(1); const int64 width = images.dim_size(2); std::vector<std::vector<float>> color_table; if (context->num_inputs() == 3) { const Tensor& colors_tensor = context->input(2); OP_REQUIRES(context, colors_tensor.shape().dims() == 2, errors::InvalidArgument("colors must be a 2-D matrix", colors_tensor.shape().DebugString())); OP_REQUIRES(context, colors_tensor.shape().dim_size(1) >= depth, errors::InvalidArgument("colors must have equal or more ", "channels than the image provided: ", colors_tensor.shape().DebugString())); if (colors_tensor.NumElements() != 0) { color_table.clear(); auto colors = colors_tensor.matrix<float>(); for (int64 i = 0; i < colors.dimension(0); i++) { std::vector<float> color_value(4); for (int64 j = 0; j < 4; j++) { color_value[j] = colors(i, j); } color_table.emplace_back(color_value); } } } if (color_table.empty()) { color_table = DefaultColorTable(depth); } Tensor* output; OP_REQUIRES_OK( context, context->allocate_output( 0, TensorShape({batch_size, height, width, depth}), &output)); output->tensor<T, 4>() = images.tensor<T, 4>(); auto canvas = output->tensor<T, 4>(); for (int64 b = 0; b < batch_size; ++b) { const int64 num_boxes = boxes.dim_size(1); const auto tboxes = boxes.tensor<T, 3>(); for (int64 bb = 0; bb < num_boxes; ++bb) { int64 color_index = bb % color_table.size(); const int64 min_box_row = static_cast<float>(tboxes(b, bb, 0)) * (height - 1); const int64 min_box_row_clamp = std::max<int64>(min_box_row, int64{0}); const int64 max_box_row = static_cast<float>(tboxes(b, bb, 2)) * (height - 1); const int64 max_box_row_clamp = std::min<int64>(max_box_row, height - 1); const int64 min_box_col = static_cast<float>(tboxes(b, bb, 1)) * (width - 1); const int64 min_box_col_clamp = std::max<int64>(min_box_col, int64{0}); const int64 max_box_col = static_cast<float>(tboxes(b, bb, 3)) * (width - 1); const int64 max_box_col_clamp = std::min<int64>(max_box_col, width - 1); if (min_box_row > max_box_row || min_box_col > max_box_col) { LOG(WARNING) << "Bounding box (" << min_box_row << "," << min_box_col << "," << max_box_row << "," << max_box_col << ") is inverted and will not be drawn."; continue; } if (min_box_row >= height || max_box_row < 0 || min_box_col >= width || max_box_col < 0) { LOG(WARNING) << "Bounding box (" << min_box_row << "," << min_box_col << "," << max_box_row << "," << max_box_col << ") is completely outside the image" << " and will not be drawn."; continue; } // At this point, {min,max}_box_{row,col}_clamp are inside the // image. OP_REQUIRES( context, min_box_row_clamp >= 0, errors::InvalidArgument("Min box row clamp is less than 0.")); OP_REQUIRES( context, max_box_row_clamp >= 0, errors::InvalidArgument("Max box row clamp is less than 0.")); OP_REQUIRES(context, min_box_row_clamp <= height, errors::InvalidArgument( "Min box row clamp is greater than height.")); OP_REQUIRES(context, max_box_row_clamp <= height, errors::InvalidArgument( "Max box row clamp is greater than height.")); OP_REQUIRES( context, min_box_col_clamp >= 0, errors::InvalidArgument("Min box col clamp is less than 0.")); OP_REQUIRES( context, max_box_col_clamp >= 0, errors::InvalidArgument("Max box col clamp is less than 0.")); OP_REQUIRES(context, min_box_col_clamp <= width, errors::InvalidArgument( "Min box col clamp is greater than width.")); OP_REQUIRES(context, max_box_col_clamp <= width, errors::InvalidArgument( "Max box col clamp is greater than width.")); // At this point, the min_box_row and min_box_col are either // in the image or above/left of it, and max_box_row and // max_box_col are either in the image or below/right or it. OP_REQUIRES( context, min_box_row <= height, errors::InvalidArgument("Min box row is greater than height.")); OP_REQUIRES(context, max_box_row >= 0, errors::InvalidArgument("Max box row is less than 0.")); OP_REQUIRES( context, min_box_col <= width, errors::InvalidArgument("Min box col is greater than width.")); OP_REQUIRES(context, max_box_col >= 0, errors::InvalidArgument("Max box col is less than 0.")); // Draw top line. if (min_box_row >= 0) { for (int64 j = min_box_col_clamp; j <= max_box_col_clamp; ++j) for (int64 c = 0; c < depth; c++) { canvas(b, min_box_row, j, c) = static_cast<T>(color_table[color_index][c]); } } // Draw bottom line. if (max_box_row < height) { for (int64 j = min_box_col_clamp; j <= max_box_col_clamp; ++j) for (int64 c = 0; c < depth; c++) { canvas(b, max_box_row, j, c) = static_cast<T>(color_table[color_index][c]); } } // Draw left line. if (min_box_col >= 0) { for (int64 i = min_box_row_clamp; i <= max_box_row_clamp; ++i) for (int64 c = 0; c < depth; c++) { canvas(b, i, min_box_col, c) = static_cast<T>(color_table[color_index][c]); } } // Draw right line. if (max_box_col < width) { for (int64 i = min_box_row_clamp; i <= max_box_row_clamp; ++i) for (int64 c = 0; c < depth; c++) { canvas(b, i, max_box_col, c) = static_cast<T>(color_table[color_index][c]); } } } } } }; #define REGISTER_CPU_KERNEL(T) \ REGISTER_KERNEL_BUILDER( \ Name("DrawBoundingBoxes").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ DrawBoundingBoxesOp<T>); \ REGISTER_KERNEL_BUILDER( \ Name("DrawBoundingBoxesV2").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ DrawBoundingBoxesOp<T>); TF_CALL_half(REGISTER_CPU_KERNEL); TF_CALL_float(REGISTER_CPU_KERNEL); } // namespace tensorflow
null
258
CWE-787
CVE-2021-29576
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/pooling_ops_3d.h" #include <array> #include "third_party/eigen3/Eigen/Core" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/kernels/eigen_pooling.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #include "tensorflow/core/util/work_sharder.h" #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/kernels/cudnn_pooling_gpu.h" #include "tensorflow/core/kernels/pooling_ops_3d_gpu.h" #endif namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; Pool3dParameters::Pool3dParameters(OpKernelContext* context, const std::vector<int32>& ksize, const std::vector<int32>& stride, Padding padding, TensorFormat data_format, const TensorShape& tensor_in_shape) { // For maxpooling, tensor_in should have 4 dimensions. OP_REQUIRES(context, tensor_in_shape.dims() == 5, errors::InvalidArgument("tensor_in must be 4-dimensional")); this->data_format = data_format; depth = GetTensorDim(tensor_in_shape, data_format, 'C'); tensor_in_planes = GetTensorDim(tensor_in_shape, data_format, '0'); tensor_in_rows = GetTensorDim(tensor_in_shape, data_format, '1'); tensor_in_cols = GetTensorDim(tensor_in_shape, data_format, '2'); tensor_in_batch = GetTensorDim(tensor_in_shape, data_format, 'N'); window_planes = GetTensorDim(ksize, data_format, '0'); window_rows = GetTensorDim(ksize, data_format, '1'); window_cols = GetTensorDim(ksize, data_format, '2'); depth_window = GetTensorDim(ksize, data_format, 'C'); plane_stride = GetTensorDim(stride, data_format, '0'); row_stride = GetTensorDim(stride, data_format, '1'); col_stride = GetTensorDim(stride, data_format, '2'); depth_stride = GetTensorDim(stride, data_format, 'C'); // We only support 3D pooling across plane/width/height. Depthwise // pooling is not supported. OP_REQUIRES( context, depth_window == 1 && depth_stride == 1, errors::Unimplemented( "Pooling3d only supports pooling across plane/width/height.")); OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_planes, window_planes, plane_stride, padding, &out_plane, &pad_planes)); OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_rows, window_rows, row_stride, padding, &out_height, &pad_rows)); OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_cols, window_cols, col_stride, padding, &out_width, &pad_cols)); } TensorShape Pool3dParameters::forward_output_shape() { return ShapeFromFormat(data_format, tensor_in_batch, {{out_plane, out_height, out_width}}, depth); } template <typename T> struct LaunchPoolingOp<CPUDevice, T, AVG> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { output->tensor<T, 5>().device(context->eigen_device<CPUDevice>()) = Eigen::CuboidAvgPooling(tensor_in.tensor<T, 5>(), window[0], window[1], window[2], stride[0], stride[1], stride[2], BrainPadding2EigenPadding(padding_type)); } }; template <typename T> struct LaunchPoolingOp<CPUDevice, T, MAX> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { output->tensor<T, 5>().device(context->eigen_device<CPUDevice>()) = Eigen::CuboidMaxPooling(tensor_in.tensor<T, 5>(), window[0], window[1], window[2], stride[0], stride[1], stride[2], BrainPadding2EigenPadding(padding_type)); } }; template <typename Device, typename T, PoolingType Type> class Pooling3DOp : public UnaryOp<T> { public: explicit Pooling3DOp(OpKernelConstruction* context) : UnaryOp<T>(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->device_type() == DEVICE_CPU) { OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument("Default Pooling3DOp only supports NDHWC ", "on device type ", DeviceTypeString(context->device_type()))); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'N') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'C') == 1), errors::Unimplemented( "Pooling is not yet supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); OP_REQUIRES(context, tensor_in.dims() == 5, errors::InvalidArgument("tensor_in must be 5-dimensional")); const int64 depth = GetTensorDim(tensor_in, data_format_, 'C'); const int64 in_batch = GetTensorDim(tensor_in, data_format_, 'N'); // Dimension order for these arrays is: x, y, z. std::array<int64, 3> input_size{ {GetTensorDim(tensor_in, data_format_, '2'), GetTensorDim(tensor_in, data_format_, '1'), GetTensorDim(tensor_in, data_format_, '0')}}; std::array<int64, 3> window{{GetTensorDim(ksize_, data_format_, '2'), GetTensorDim(ksize_, data_format_, '1'), GetTensorDim(ksize_, data_format_, '0')}}; std::array<int64, 3> stride{{GetTensorDim(stride_, data_format_, '2'), GetTensorDim(stride_, data_format_, '1'), GetTensorDim(stride_, data_format_, '0')}}; std::array<int64, 3> padding, out; OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride, padding_, &out, &padding)); TensorShape out_shape = ShapeFromFormat(data_format_, in_batch, {{out[2], out[1], out[0]}}, depth); Tensor* output; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); if (out_shape.num_elements() == 0) return; LaunchPoolingOp<Device, T, Type>::launch(context, tensor_in, window, stride, padding, data_format_, padding_, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename T> struct LaunchMaxPooling3dGradOp<CPUDevice, T> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& out, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* output) { output->flat<T>().setZero(); for (int64 p = 0; p < out_backprop.dim_size(3); ++p) { // Calculate broadcast size for planes/rows/cols. For SAME padding, // current index could be in the padding area, and // p * stride_planes + window_planes // could be beyond the input tensor's boundary. In such cases, change // the starting index and reduce the broadcast size. // // The same procedure is repeated for every spatial dimension in the // nested loops below. int pindex, psize; std::array<int64, 3> input_size{{tensor_in.dim_size(3), tensor_in.dim_size(2), tensor_in.dim_size(1)}}; OP_REQUIRES_OK(context, GetBroadcastSize(p, input_size[0], window[0], stride[0], padding[0], &pindex, &psize)); for (int64 r = 0; r < out_backprop.dim_size(2); ++r) { int rindex, rsize; OP_REQUIRES_OK(context, GetBroadcastSize(r, input_size[1], window[1], stride[1], padding[1], &rindex, &rsize)); for (int64 c = 0; c < out_backprop.dim_size(1); ++c) { int cindex, csize; OP_REQUIRES_OK( context, GetBroadcastSize(c, input_size[2], window[2], stride[2], padding[2], &cindex, &csize)); TensorSlice src{{0, -1}, {c, 1}, {r, 1}, {p, 1}, {0, -1}}; TensorSlice dst{{0, -1}, {cindex, csize}, {rindex, rsize}, {pindex, psize}, {0, -1}}; Eigen::DSizes<Eigen::DenseIndex, 5> src_indices; Eigen::DSizes<Eigen::DenseIndex, 5> src_sizes; Eigen::DSizes<Eigen::DenseIndex, 5> dst_indices; Eigen::DSizes<Eigen::DenseIndex, 5> dst_sizes; src.FillIndicesAndSizes<5>(out_backprop.shape(), &src_indices, &src_sizes); dst.FillIndicesAndSizes<5>(tensor_in.shape(), &dst_indices, &dst_sizes); #if !defined(EIGEN_HAS_INDEX_LIST) Eigen::array<int, 5> bcast = {1, csize, rsize, psize, 1}; #else Eigen::IndexList<Eigen::type2index<1>, int, int, int, Eigen::type2index<1>> bcast; bcast.set(1, csize); bcast.set(2, rsize); bcast.set(3, psize); #endif // Slice from tensor_in. Eigen::Tensor<T, 5, Eigen::RowMajor> tensor_in_slice(dst_sizes); tensor_in_slice.device(context->eigen_cpu_device()) = tensor_in.tensor<T, 5>().slice(dst_indices, dst_sizes); // Slice from tensor_out. Eigen::Tensor<T, 5, Eigen::RowMajor> tensor_out_slice(src_sizes); tensor_out_slice.device(context->eigen_cpu_device()) = tensor_out.tensor<T, 5>().slice(src_indices, src_sizes); // Backprop slice. Eigen::Tensor<T, 5, Eigen::RowMajor> out_backprop_slice(src_sizes); out_backprop_slice.device(context->eigen_cpu_device()) = out_backprop.tensor<T, 5>().slice(src_indices, src_sizes); // The true backprop slice: if an element is the max, choose // the backprop slice; otherwise set to 0. Eigen::Tensor<T, 5, Eigen::RowMajor> select_slice(dst_sizes); Eigen::Tensor<T, 5, Eigen::RowMajor> mat0(dst_sizes); mat0.setZero(); select_slice = ((tensor_in_slice - tensor_out_slice.broadcast(bcast)).abs() < tensor_in_slice.constant(1e-5)) .select(out_backprop_slice.broadcast(bcast), mat0); output->tensor<T, 5>() .slice(dst_indices, dst_sizes) .device(context->eigen_cpu_device()) += select_slice; } } } } }; template <class Device, class T> class MaxPooling3dGradOp : public OpKernel { public: explicit MaxPooling3dGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->device_type() == DEVICE_CPU) { OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default MaxPooling3dGradOp only supports NDHWC ", "on device type ", DeviceTypeString(context->device_type()))); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'N') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'C') == 1), errors::Unimplemented( "Pooling is not yet supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_backprop = context->input(2); OP_REQUIRES(context, tensor_in.dims() == 5, errors::InvalidArgument("tensor_in must be 5-dimensional")); OP_REQUIRES(context, tensor_out.dims() == 5, errors::InvalidArgument("tensor_out must be 5-dimensional")); OP_REQUIRES(context, out_backprop.dims() == 5, errors::InvalidArgument("out_backprop must be 5-dimensional")); const TensorShape& output_shape = tensor_in.shape(); Tensor* input_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &input_backprop)); std::array<int64, 3> input_size{ {GetTensorDim(output_shape, data_format_, '2'), GetTensorDim(output_shape, data_format_, '1'), GetTensorDim(output_shape, data_format_, '0')}}; std::array<int64, 3> window{{GetTensorDim(ksize_, data_format_, '2'), GetTensorDim(ksize_, data_format_, '1'), GetTensorDim(ksize_, data_format_, '0')}}; std::array<int64, 3> stride{{GetTensorDim(stride_, data_format_, '2'), GetTensorDim(stride_, data_format_, '1'), GetTensorDim(stride_, data_format_, '0')}}; std::array<int64, 3> out, padding; OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride, padding_, &out, &padding)); LaunchMaxPooling3dGradOp<Device, T>::launch( context, tensor_in, tensor_out, out_backprop, window, stride, out, padding, data_format_, input_backprop); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename T> struct LaunchAvgPooling3dGradOp<CPUDevice, T> { static void launch(OpKernelContext* context, const TensorShape& tensor_in_shape, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& output_shape, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* output) { output->flat<T>().setZero(); std::array<int64, 3> input_size = {{tensor_in_shape.dim_size(3), tensor_in_shape.dim_size(2), tensor_in_shape.dim_size(1)}}; for (int64 p = 0; p < out_backprop.dim_size(3); ++p) { // Calculate broadcast size for planes/rows/cols. For SAME padding, // current index could be in the padding area, and // p * stride_planes + window_planes // could be beyond the input tensor's boundary. In such cases, change // the starting index and reduce the broadcast size. // // The same procedure is repeated for every spatial dimension in the // nested loops below. int pindex, psize; OP_REQUIRES_OK(context, GetBroadcastSize(p, input_size[0], window[0], stride[0], padding[0], &pindex, &psize)); for (int64 r = 0; r < out_backprop.dim_size(2); ++r) { int rindex, rsize; OP_REQUIRES_OK(context, GetBroadcastSize(r, input_size[1], window[1], stride[1], padding[1], &rindex, &rsize)); for (int64 c = 0; c < out_backprop.dim_size(1); ++c) { int cindex, csize; OP_REQUIRES_OK( context, GetBroadcastSize(c, input_size[2], window[2], stride[2], padding[2], &cindex, &csize)); TensorSlice src{{0, -1}, {c, 1}, {r, 1}, {p, 1}, {0, -1}}; TensorSlice dst{{0, -1}, {cindex, csize}, {rindex, rsize}, {pindex, psize}, {0, -1}}; Eigen::DSizes<Eigen::DenseIndex, 5> src_indices; Eigen::DSizes<Eigen::DenseIndex, 5> src_sizes; Eigen::DSizes<Eigen::DenseIndex, 5> dst_indices; Eigen::DSizes<Eigen::DenseIndex, 5> dst_sizes; src.FillIndicesAndSizes<5>(out_backprop.shape(), &src_indices, &src_sizes); dst.FillIndicesAndSizes<5>(tensor_in_shape, &dst_indices, &dst_sizes); #if !defined(EIGEN_HAS_INDEX_LIST) Eigen::array<int, 5> bcast = {1, csize, rsize, psize, 1}; #else Eigen::IndexList<Eigen::type2index<1>, int, int, int, Eigen::type2index<1>> bcast; bcast.set(1, csize); bcast.set(2, rsize); bcast.set(3, psize); #endif Eigen::Tensor<T, 5, Eigen::RowMajor> slices(src_sizes); slices.device(context->eigen_cpu_device()) = out_backprop.tensor<T, 5>().slice(src_indices, src_sizes); // Divide by the size of the actual patch (psize * rsize * csize). float divide_size = rsize * csize * psize * 1.0f; slices *= slices.constant(1.0f / divide_size); output->tensor<T, 5>() .slice(dst_indices, dst_sizes) .device(context->eigen_cpu_device()) += slices.broadcast(bcast); } } } } }; template <class Device, class T> class AvgPooling3dGradOp : public OpKernel { public: explicit AvgPooling3dGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->device_type() == DEVICE_CPU) { OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default AvgPooling3dGradOp only supports NDHWC ", "on device type ", DeviceTypeString(context->device_type()))); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'N') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'C') == 1), errors::Unimplemented( "Pooling is not yet supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in_shape = context->input(0); const Tensor& out_backprop = context->input(1); OP_REQUIRES( context, tensor_in_shape.dims() == 1 && tensor_in_shape.NumElements() == 5, errors::InvalidArgument("tensor_in must be 1-dimensional and 5 " "elements")); OP_REQUIRES(context, out_backprop.dims() == 5, errors::InvalidArgument("out_backprop must be 5-dimensional")); TensorShape output_shape; auto shape_vec = tensor_in_shape.vec<int32>(); for (int64 i = 0; i < tensor_in_shape.NumElements(); ++i) { output_shape.AddDim(shape_vec(i)); } Tensor* output; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); // Dimension order for these arrays is x, y, z. std::array<int64, 3> input_size{ {GetTensorDim(output_shape, data_format_, '2'), GetTensorDim(output_shape, data_format_, '1'), GetTensorDim(output_shape, data_format_, '0')}}; std::array<int64, 3> window{{GetTensorDim(ksize_, data_format_, '2'), GetTensorDim(ksize_, data_format_, '1'), GetTensorDim(ksize_, data_format_, '0')}}; std::array<int64, 3> stride{{GetTensorDim(stride_, data_format_, '2'), GetTensorDim(stride_, data_format_, '1'), GetTensorDim(stride_, data_format_, '0')}}; std::array<int64, 3> padding, out; OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride, padding_, &out, &padding)); LaunchAvgPooling3dGradOp<Device, T>::launch( context, output_shape, out_backprop, window, stride, out, padding, data_format_, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename T> struct LaunchMaxPooling3dGradGradOp<CPUDevice, T> { static void launch(OpKernelContext* context, const Pool3dParameters& params, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& tensor_top_diff, Tensor* tensor_bottom_diff) { OP_REQUIRES( context, params.data_format == FORMAT_NHWC, errors::InvalidArgument("Default MaxPooling3dGradGradOp only supports", "NDHWC on CPU device type")); typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; ConstEigenMatrixMap in_mat(tensor_in.flat<T>().data(), params.depth, params.tensor_in_planes * params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); ConstEigenMatrixMap out_mat(tensor_out.flat<T>().data(), params.depth, params.out_plane * params.out_width * params.out_height * params.tensor_in_batch); ConstEigenMatrixMap top_diff_mat( tensor_top_diff.flat<T>().data(), params.depth, params.tensor_in_planes * params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); EigenMatrixMap bottom_diff_mat( tensor_bottom_diff->flat<T>().data(), params.depth, params.out_plane * params.out_width * params.out_height * params.tensor_in_batch); const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); auto shard = [&params, &in_mat, &out_mat, &top_diff_mat, &bottom_diff_mat]( int64 start, int64 limit) { const int32 depth = params.depth; const int32 in_planes = params.tensor_in_planes; const int32 in_rows = params.tensor_in_rows; const int32 in_cols = params.tensor_in_cols; const int32 pad_planes = params.pad_planes; const int32 pad_rows = params.pad_rows; const int32 pad_cols = params.pad_cols; const int32 window_planes = params.window_planes; const int32 window_rows = params.window_rows; const int32 window_cols = params.window_cols; const int32 plane_stride = params.plane_stride; const int32 row_stride = params.row_stride; const int32 col_stride = params.col_stride; const int32 out_plane = params.out_plane; const int32 out_height = params.out_height; const int32 out_width = params.out_width; { // Initializes the output grad backprop tensor with 0. const int32 output_image_size = out_plane * out_height * out_width * params.depth; EigenMatrixMap bottom_diff_shard( bottom_diff_mat.data() + start * output_image_size, 1, (limit - start) * output_image_size); bottom_diff_shard.setZero(); } for (int b = start; b < limit; ++b) { for (int pp = 0; pp < out_plane; ++pp) { for (int ph = 0; ph < out_height; ++ph) { for (int pw = 0; pw < out_width; ++pw) { // (p_start, p_end) * (h_start, h_end) * (w_start, w_end) is the // range that the input vector projects to. int p_start = pp * plane_stride - pad_planes; const int p_end = std::min(p_start + window_planes, in_planes); int h_start = ph * row_stride - pad_rows; const int h_end = std::min(h_start + window_rows, in_rows); int w_start = pw * col_stride - pad_cols; const int w_end = std::min(w_start + window_cols, in_cols); p_start = std::max(p_start, 0); h_start = std::max(h_start, 0); w_start = std::max(w_start, 0); const int out_index = ((b * out_plane + pp) * out_height + ph) * out_width + pw; // Find value corresponding to the input maximum in top_diff. for (int d = 0; d < depth; ++d) { const T& output_ref = out_mat.coeffRef(d, out_index); bool should_stop = false; for (int p = p_start; p < p_end && !should_stop; ++p) { for (int h = h_start; h < h_end && !should_stop; ++h) { for (int w = w_start; w < w_end && !should_stop; ++w) { const int in_index = ((b * in_planes + p) * in_rows + h) * in_cols + w; const T& input_ref = in_mat.coeffRef(d, in_index); if (output_ref == input_ref) { T& bottom_diff_ref = bottom_diff_mat.coeffRef(d, out_index); bottom_diff_ref = top_diff_mat.coeffRef(d, in_index); should_stop = true; } } } } } } } } } }; const int64 shard_cost = params.out_plane * params.out_height * params.out_width * params.depth * params.window_planes * params.window_rows * params.window_cols; Shard(worker_threads.num_threads, worker_threads.workers, params.tensor_in_batch, shard_cost, shard); } }; template <class Device, class T> class MaxPooling3dGradGradOp : public OpKernel { public: explicit MaxPooling3dGradGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); const int32 ksize_c = GetTensorDim(ksize_, data_format_, 'C'); const int32 stride_c = GetTensorDim(stride_, data_format_, 'C'); OP_REQUIRES(context, ksize_c == 1 && stride_c == 1, errors::Unimplemented("MaxPooling3dGradGrad is not yet " "supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_grad_backprop = context->input(2); // For maxpooling3d, tensor_in should have 5 dimensions. OP_REQUIRES(context, tensor_in.dims() == 5, errors::InvalidArgument("tensor_in must be 5-dimensional")); OP_REQUIRES(context, tensor_out.dims() == 5, errors::InvalidArgument("tensor_out must be 5-dimensional")); // For maxpooling3d, out_grad_backprop should have 5 dimensions. OP_REQUIRES( context, out_grad_backprop.dims() == 5, errors::InvalidArgument("out_grad_backprop must be 5-dimensional")); Pool3dParameters params{context, ksize_, stride_, padding_, data_format_, tensor_in.shape()}; Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {2}, 0, tensor_out.shape(), &output)); // Given access patterns in LaunchMaxPooling3dGradGradOp, these tensors must // have elements. OP_REQUIRES(context, tensor_in.NumElements() > 0, errors::InvalidArgument("received empty tensor tensor_in: ", tensor_in.DebugString())); OP_REQUIRES(context, tensor_out.NumElements() > 0, errors::InvalidArgument("received empty tensor tensor_out: ", tensor_out.DebugString())); OP_REQUIRES( context, out_grad_backprop.NumElements() > 0, errors::InvalidArgument("received empty tensor out_grad_backprop: ", out_grad_backprop.DebugString())); LaunchMaxPooling3dGradGradOp<Device, T>::launch( context, params, tensor_in, tensor_out, out_grad_backprop, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; #define REGISTER_KERNELS(D, T) \ REGISTER_KERNEL_BUILDER( \ Name("MaxPool3D").Device(DEVICE_##D).TypeConstraint<T>("T"), \ Pooling3DOp<D##Device, T, MAX>); \ REGISTER_KERNEL_BUILDER(Name("MaxPool3DGrad") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T") \ .TypeConstraint<T>("TInput"), \ MaxPooling3dGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER( \ Name("MaxPool3DGradGrad").Device(DEVICE_##D).TypeConstraint<T>("T"), \ MaxPooling3dGradGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER( \ Name("AvgPool3D").Device(DEVICE_##D).TypeConstraint<T>("T"), \ Pooling3DOp<D##Device, T, AVG>); \ REGISTER_KERNEL_BUILDER(Name("AvgPool3DGrad") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T") \ .HostMemory("orig_input_shape"), \ AvgPooling3dGradOp<D##Device, T>); #define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T) TF_CALL_float(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename T> struct LaunchPoolingOp<GPUDevice, T, AVG> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { DnnPooling3dOp<T>::Compute(context, se::dnn::PoolingMode::kAverage, window, stride, padding, data_format, tensor_in, output); } }; template <typename T> struct LaunchPoolingOp<GPUDevice, T, MAX> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { DnnPooling3dOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, window, stride, padding, data_format, tensor_in, output); } }; template <typename T> struct LaunchMaxPooling3dGradOp<GPUDevice, T> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& out, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* input_backprop) { const TensorShape output_shape = tensor_in.shape(); DnnPooling3dGradOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, window, stride, padding, out, data_format, out_backprop, output_shape, &tensor_in, &tensor_out, input_backprop); } }; template <typename T> struct LaunchAvgPooling3dGradOp<GPUDevice, T> { static void launch(OpKernelContext* context, const TensorShape& tensor_in_shape, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& out, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* output) { DnnPooling3dGradOp<T>::Compute( context, se::dnn::PoolingMode::kAverage, window, stride, padding, out, data_format, out_backprop, tensor_in_shape, nullptr, nullptr, output); } }; template <typename T> struct LaunchMaxPooling3dGradGradOp<GPUDevice, T> { static void launch(OpKernelContext* context, const Pool3dParameters& params, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& tensor_top_diff, Tensor* tensor_bottom_diff) { bool status = functor::MaxPool3dGradBackward<T>()( params.data_format, tensor_in.flat<T>().data(), tensor_out.flat<T>().data(), params.tensor_in_batch, params.out_plane, params.out_height, params.out_width, params.depth, params.tensor_in_planes, params.tensor_in_rows, params.tensor_in_cols, params.window_planes, params.window_rows, params.window_cols, params.plane_stride, params.row_stride, params.col_stride, params.pad_planes, params.pad_rows, params.pad_cols, tensor_top_diff.flat<T>().data(), tensor_bottom_diff->flat<T>().data(), context->eigen_gpu_device()); if (!status) { context->SetStatus( errors::Internal("Failed launching MaxPool3dGradBackward")); } } }; #define REGISTER_GPU_KERNELS(T) REGISTER_KERNELS(GPU, T) TF_CALL_float(REGISTER_GPU_KERNELS) TF_CALL_half(REGISTER_GPU_KERNELS) #undef REGISTER_GPU_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #undef REGISTER_KERNELS } // namespace tensorflow
null
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/pooling_ops_3d.h" #include <array> #include "third_party/eigen3/Eigen/Core" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/kernels/eigen_pooling.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #include "tensorflow/core/util/work_sharder.h" #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/kernels/cudnn_pooling_gpu.h" #include "tensorflow/core/kernels/pooling_ops_3d_gpu.h" #endif namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; Pool3dParameters::Pool3dParameters(OpKernelContext* context, const std::vector<int32>& ksize, const std::vector<int32>& stride, Padding padding, TensorFormat data_format, const TensorShape& tensor_in_shape) { // For maxpooling, tensor_in should have 4 dimensions. OP_REQUIRES(context, tensor_in_shape.dims() == 5, errors::InvalidArgument("tensor_in must be 4-dimensional")); this->data_format = data_format; depth = GetTensorDim(tensor_in_shape, data_format, 'C'); tensor_in_planes = GetTensorDim(tensor_in_shape, data_format, '0'); tensor_in_rows = GetTensorDim(tensor_in_shape, data_format, '1'); tensor_in_cols = GetTensorDim(tensor_in_shape, data_format, '2'); tensor_in_batch = GetTensorDim(tensor_in_shape, data_format, 'N'); window_planes = GetTensorDim(ksize, data_format, '0'); window_rows = GetTensorDim(ksize, data_format, '1'); window_cols = GetTensorDim(ksize, data_format, '2'); depth_window = GetTensorDim(ksize, data_format, 'C'); plane_stride = GetTensorDim(stride, data_format, '0'); row_stride = GetTensorDim(stride, data_format, '1'); col_stride = GetTensorDim(stride, data_format, '2'); depth_stride = GetTensorDim(stride, data_format, 'C'); // We only support 3D pooling across plane/width/height. Depthwise // pooling is not supported. OP_REQUIRES( context, depth_window == 1 && depth_stride == 1, errors::Unimplemented( "Pooling3d only supports pooling across plane/width/height.")); OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_planes, window_planes, plane_stride, padding, &out_plane, &pad_planes)); OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_rows, window_rows, row_stride, padding, &out_height, &pad_rows)); OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_cols, window_cols, col_stride, padding, &out_width, &pad_cols)); } TensorShape Pool3dParameters::forward_output_shape() { return ShapeFromFormat(data_format, tensor_in_batch, {{out_plane, out_height, out_width}}, depth); } template <typename T> struct LaunchPoolingOp<CPUDevice, T, AVG> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { output->tensor<T, 5>().device(context->eigen_device<CPUDevice>()) = Eigen::CuboidAvgPooling(tensor_in.tensor<T, 5>(), window[0], window[1], window[2], stride[0], stride[1], stride[2], BrainPadding2EigenPadding(padding_type)); } }; template <typename T> struct LaunchPoolingOp<CPUDevice, T, MAX> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { output->tensor<T, 5>().device(context->eigen_device<CPUDevice>()) = Eigen::CuboidMaxPooling(tensor_in.tensor<T, 5>(), window[0], window[1], window[2], stride[0], stride[1], stride[2], BrainPadding2EigenPadding(padding_type)); } }; template <typename Device, typename T, PoolingType Type> class Pooling3DOp : public UnaryOp<T> { public: explicit Pooling3DOp(OpKernelConstruction* context) : UnaryOp<T>(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->device_type() == DEVICE_CPU) { OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument("Default Pooling3DOp only supports NDHWC ", "on device type ", DeviceTypeString(context->device_type()))); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'N') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'C') == 1), errors::Unimplemented( "Pooling is not yet supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); OP_REQUIRES(context, tensor_in.dims() == 5, errors::InvalidArgument("tensor_in must be 5-dimensional")); const int64 depth = GetTensorDim(tensor_in, data_format_, 'C'); const int64 in_batch = GetTensorDim(tensor_in, data_format_, 'N'); // Dimension order for these arrays is: x, y, z. std::array<int64, 3> input_size{ {GetTensorDim(tensor_in, data_format_, '2'), GetTensorDim(tensor_in, data_format_, '1'), GetTensorDim(tensor_in, data_format_, '0')}}; std::array<int64, 3> window{{GetTensorDim(ksize_, data_format_, '2'), GetTensorDim(ksize_, data_format_, '1'), GetTensorDim(ksize_, data_format_, '0')}}; std::array<int64, 3> stride{{GetTensorDim(stride_, data_format_, '2'), GetTensorDim(stride_, data_format_, '1'), GetTensorDim(stride_, data_format_, '0')}}; std::array<int64, 3> padding, out; OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride, padding_, &out, &padding)); TensorShape out_shape = ShapeFromFormat(data_format_, in_batch, {{out[2], out[1], out[0]}}, depth); Tensor* output; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); if (out_shape.num_elements() == 0) return; LaunchPoolingOp<Device, T, Type>::launch(context, tensor_in, window, stride, padding, data_format_, padding_, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename T> struct LaunchMaxPooling3dGradOp<CPUDevice, T> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& out, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* output) { output->flat<T>().setZero(); for (int64 p = 0; p < out_backprop.dim_size(3); ++p) { // Calculate broadcast size for planes/rows/cols. For SAME padding, // current index could be in the padding area, and // p * stride_planes + window_planes // could be beyond the input tensor's boundary. In such cases, change // the starting index and reduce the broadcast size. // // The same procedure is repeated for every spatial dimension in the // nested loops below. int pindex, psize; std::array<int64, 3> input_size{{tensor_in.dim_size(3), tensor_in.dim_size(2), tensor_in.dim_size(1)}}; OP_REQUIRES_OK(context, GetBroadcastSize(p, input_size[0], window[0], stride[0], padding[0], &pindex, &psize)); for (int64 r = 0; r < out_backprop.dim_size(2); ++r) { int rindex, rsize; OP_REQUIRES_OK(context, GetBroadcastSize(r, input_size[1], window[1], stride[1], padding[1], &rindex, &rsize)); for (int64 c = 0; c < out_backprop.dim_size(1); ++c) { int cindex, csize; OP_REQUIRES_OK( context, GetBroadcastSize(c, input_size[2], window[2], stride[2], padding[2], &cindex, &csize)); TensorSlice src{{0, -1}, {c, 1}, {r, 1}, {p, 1}, {0, -1}}; TensorSlice dst{{0, -1}, {cindex, csize}, {rindex, rsize}, {pindex, psize}, {0, -1}}; Eigen::DSizes<Eigen::DenseIndex, 5> src_indices; Eigen::DSizes<Eigen::DenseIndex, 5> src_sizes; Eigen::DSizes<Eigen::DenseIndex, 5> dst_indices; Eigen::DSizes<Eigen::DenseIndex, 5> dst_sizes; src.FillIndicesAndSizes<5>(out_backprop.shape(), &src_indices, &src_sizes); dst.FillIndicesAndSizes<5>(tensor_in.shape(), &dst_indices, &dst_sizes); #if !defined(EIGEN_HAS_INDEX_LIST) Eigen::array<int, 5> bcast = {1, csize, rsize, psize, 1}; #else Eigen::IndexList<Eigen::type2index<1>, int, int, int, Eigen::type2index<1>> bcast; bcast.set(1, csize); bcast.set(2, rsize); bcast.set(3, psize); #endif // Slice from tensor_in. Eigen::Tensor<T, 5, Eigen::RowMajor> tensor_in_slice(dst_sizes); tensor_in_slice.device(context->eigen_cpu_device()) = tensor_in.tensor<T, 5>().slice(dst_indices, dst_sizes); // Slice from tensor_out. Eigen::Tensor<T, 5, Eigen::RowMajor> tensor_out_slice(src_sizes); tensor_out_slice.device(context->eigen_cpu_device()) = tensor_out.tensor<T, 5>().slice(src_indices, src_sizes); // Backprop slice. Eigen::Tensor<T, 5, Eigen::RowMajor> out_backprop_slice(src_sizes); out_backprop_slice.device(context->eigen_cpu_device()) = out_backprop.tensor<T, 5>().slice(src_indices, src_sizes); // The true backprop slice: if an element is the max, choose // the backprop slice; otherwise set to 0. Eigen::Tensor<T, 5, Eigen::RowMajor> select_slice(dst_sizes); Eigen::Tensor<T, 5, Eigen::RowMajor> mat0(dst_sizes); mat0.setZero(); select_slice = ((tensor_in_slice - tensor_out_slice.broadcast(bcast)).abs() < tensor_in_slice.constant(1e-5)) .select(out_backprop_slice.broadcast(bcast), mat0); output->tensor<T, 5>() .slice(dst_indices, dst_sizes) .device(context->eigen_cpu_device()) += select_slice; } } } } }; template <class Device, class T> class MaxPooling3dGradOp : public OpKernel { public: explicit MaxPooling3dGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->device_type() == DEVICE_CPU) { OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default MaxPooling3dGradOp only supports NDHWC ", "on device type ", DeviceTypeString(context->device_type()))); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'N') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'C') == 1), errors::Unimplemented( "Pooling is not yet supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_backprop = context->input(2); OP_REQUIRES(context, tensor_in.dims() == 5, errors::InvalidArgument("tensor_in must be 5-dimensional")); OP_REQUIRES(context, tensor_out.dims() == 5, errors::InvalidArgument("tensor_out must be 5-dimensional")); OP_REQUIRES(context, out_backprop.dims() == 5, errors::InvalidArgument("out_backprop must be 5-dimensional")); const TensorShape& output_shape = tensor_in.shape(); Tensor* input_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &input_backprop)); std::array<int64, 3> input_size{ {GetTensorDim(output_shape, data_format_, '2'), GetTensorDim(output_shape, data_format_, '1'), GetTensorDim(output_shape, data_format_, '0')}}; std::array<int64, 3> window{{GetTensorDim(ksize_, data_format_, '2'), GetTensorDim(ksize_, data_format_, '1'), GetTensorDim(ksize_, data_format_, '0')}}; std::array<int64, 3> stride{{GetTensorDim(stride_, data_format_, '2'), GetTensorDim(stride_, data_format_, '1'), GetTensorDim(stride_, data_format_, '0')}}; std::array<int64, 3> out, padding; OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride, padding_, &out, &padding)); LaunchMaxPooling3dGradOp<Device, T>::launch( context, tensor_in, tensor_out, out_backprop, window, stride, out, padding, data_format_, input_backprop); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename T> struct LaunchAvgPooling3dGradOp<CPUDevice, T> { static void launch(OpKernelContext* context, const TensorShape& tensor_in_shape, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& output_shape, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* output) { output->flat<T>().setZero(); std::array<int64, 3> input_size = {{tensor_in_shape.dim_size(3), tensor_in_shape.dim_size(2), tensor_in_shape.dim_size(1)}}; for (int64 p = 0; p < out_backprop.dim_size(3); ++p) { // Calculate broadcast size for planes/rows/cols. For SAME padding, // current index could be in the padding area, and // p * stride_planes + window_planes // could be beyond the input tensor's boundary. In such cases, change // the starting index and reduce the broadcast size. // // The same procedure is repeated for every spatial dimension in the // nested loops below. int pindex, psize; OP_REQUIRES_OK(context, GetBroadcastSize(p, input_size[0], window[0], stride[0], padding[0], &pindex, &psize)); for (int64 r = 0; r < out_backprop.dim_size(2); ++r) { int rindex, rsize; OP_REQUIRES_OK(context, GetBroadcastSize(r, input_size[1], window[1], stride[1], padding[1], &rindex, &rsize)); for (int64 c = 0; c < out_backprop.dim_size(1); ++c) { int cindex, csize; OP_REQUIRES_OK( context, GetBroadcastSize(c, input_size[2], window[2], stride[2], padding[2], &cindex, &csize)); TensorSlice src{{0, -1}, {c, 1}, {r, 1}, {p, 1}, {0, -1}}; TensorSlice dst{{0, -1}, {cindex, csize}, {rindex, rsize}, {pindex, psize}, {0, -1}}; Eigen::DSizes<Eigen::DenseIndex, 5> src_indices; Eigen::DSizes<Eigen::DenseIndex, 5> src_sizes; Eigen::DSizes<Eigen::DenseIndex, 5> dst_indices; Eigen::DSizes<Eigen::DenseIndex, 5> dst_sizes; src.FillIndicesAndSizes<5>(out_backprop.shape(), &src_indices, &src_sizes); dst.FillIndicesAndSizes<5>(tensor_in_shape, &dst_indices, &dst_sizes); #if !defined(EIGEN_HAS_INDEX_LIST) Eigen::array<int, 5> bcast = {1, csize, rsize, psize, 1}; #else Eigen::IndexList<Eigen::type2index<1>, int, int, int, Eigen::type2index<1>> bcast; bcast.set(1, csize); bcast.set(2, rsize); bcast.set(3, psize); #endif Eigen::Tensor<T, 5, Eigen::RowMajor> slices(src_sizes); slices.device(context->eigen_cpu_device()) = out_backprop.tensor<T, 5>().slice(src_indices, src_sizes); // Divide by the size of the actual patch (psize * rsize * csize). float divide_size = rsize * csize * psize * 1.0f; slices *= slices.constant(1.0f / divide_size); output->tensor<T, 5>() .slice(dst_indices, dst_sizes) .device(context->eigen_cpu_device()) += slices.broadcast(bcast); } } } } }; template <class Device, class T> class AvgPooling3dGradOp : public OpKernel { public: explicit AvgPooling3dGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->device_type() == DEVICE_CPU) { OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default AvgPooling3dGradOp only supports NDHWC ", "on device type ", DeviceTypeString(context->device_type()))); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'N') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'C') == 1), errors::Unimplemented( "Pooling is not yet supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in_shape = context->input(0); const Tensor& out_backprop = context->input(1); OP_REQUIRES( context, tensor_in_shape.dims() == 1 && tensor_in_shape.NumElements() == 5, errors::InvalidArgument("tensor_in must be 1-dimensional and 5 " "elements")); OP_REQUIRES(context, out_backprop.dims() == 5, errors::InvalidArgument("out_backprop must be 5-dimensional")); TensorShape output_shape; auto shape_vec = tensor_in_shape.vec<int32>(); for (int64 i = 0; i < tensor_in_shape.NumElements(); ++i) { output_shape.AddDim(shape_vec(i)); } Tensor* output; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); // Dimension order for these arrays is x, y, z. std::array<int64, 3> input_size{ {GetTensorDim(output_shape, data_format_, '2'), GetTensorDim(output_shape, data_format_, '1'), GetTensorDim(output_shape, data_format_, '0')}}; std::array<int64, 3> window{{GetTensorDim(ksize_, data_format_, '2'), GetTensorDim(ksize_, data_format_, '1'), GetTensorDim(ksize_, data_format_, '0')}}; std::array<int64, 3> stride{{GetTensorDim(stride_, data_format_, '2'), GetTensorDim(stride_, data_format_, '1'), GetTensorDim(stride_, data_format_, '0')}}; std::array<int64, 3> padding, out; OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride, padding_, &out, &padding)); LaunchAvgPooling3dGradOp<Device, T>::launch( context, output_shape, out_backprop, window, stride, out, padding, data_format_, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename T> struct LaunchMaxPooling3dGradGradOp<CPUDevice, T> { static void launch(OpKernelContext* context, const Pool3dParameters& params, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& tensor_top_diff, Tensor* tensor_bottom_diff) { OP_REQUIRES( context, params.data_format == FORMAT_NHWC, errors::InvalidArgument("Default MaxPooling3dGradGradOp only supports", "NDHWC on CPU device type")); typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; ConstEigenMatrixMap in_mat(tensor_in.flat<T>().data(), params.depth, params.tensor_in_planes * params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); ConstEigenMatrixMap out_mat(tensor_out.flat<T>().data(), params.depth, params.out_plane * params.out_width * params.out_height * params.tensor_in_batch); ConstEigenMatrixMap top_diff_mat( tensor_top_diff.flat<T>().data(), params.depth, params.tensor_in_planes * params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); EigenMatrixMap bottom_diff_mat( tensor_bottom_diff->flat<T>().data(), params.depth, params.out_plane * params.out_width * params.out_height * params.tensor_in_batch); const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); auto shard = [&params, &in_mat, &out_mat, &top_diff_mat, &bottom_diff_mat]( int64 start, int64 limit) { const int32 depth = params.depth; const int32 in_planes = params.tensor_in_planes; const int32 in_rows = params.tensor_in_rows; const int32 in_cols = params.tensor_in_cols; const int32 pad_planes = params.pad_planes; const int32 pad_rows = params.pad_rows; const int32 pad_cols = params.pad_cols; const int32 window_planes = params.window_planes; const int32 window_rows = params.window_rows; const int32 window_cols = params.window_cols; const int32 plane_stride = params.plane_stride; const int32 row_stride = params.row_stride; const int32 col_stride = params.col_stride; const int32 out_plane = params.out_plane; const int32 out_height = params.out_height; const int32 out_width = params.out_width; { // Initializes the output grad backprop tensor with 0. const int32 output_image_size = out_plane * out_height * out_width * params.depth; EigenMatrixMap bottom_diff_shard( bottom_diff_mat.data() + start * output_image_size, 1, (limit - start) * output_image_size); bottom_diff_shard.setZero(); } for (int b = start; b < limit; ++b) { for (int pp = 0; pp < out_plane; ++pp) { for (int ph = 0; ph < out_height; ++ph) { for (int pw = 0; pw < out_width; ++pw) { // (p_start, p_end) * (h_start, h_end) * (w_start, w_end) is the // range that the input vector projects to. int p_start = pp * plane_stride - pad_planes; const int p_end = std::min(p_start + window_planes, in_planes); int h_start = ph * row_stride - pad_rows; const int h_end = std::min(h_start + window_rows, in_rows); int w_start = pw * col_stride - pad_cols; const int w_end = std::min(w_start + window_cols, in_cols); p_start = std::max(p_start, 0); h_start = std::max(h_start, 0); w_start = std::max(w_start, 0); const int out_index = ((b * out_plane + pp) * out_height + ph) * out_width + pw; // Find value corresponding to the input maximum in top_diff. for (int d = 0; d < depth; ++d) { const T& output_ref = out_mat.coeffRef(d, out_index); bool should_stop = false; for (int p = p_start; p < p_end && !should_stop; ++p) { for (int h = h_start; h < h_end && !should_stop; ++h) { for (int w = w_start; w < w_end && !should_stop; ++w) { const int in_index = ((b * in_planes + p) * in_rows + h) * in_cols + w; const T& input_ref = in_mat.coeffRef(d, in_index); if (output_ref == input_ref) { T& bottom_diff_ref = bottom_diff_mat.coeffRef(d, out_index); bottom_diff_ref = top_diff_mat.coeffRef(d, in_index); should_stop = true; } } } } } } } } } }; const int64 shard_cost = params.out_plane * params.out_height * params.out_width * params.depth * params.window_planes * params.window_rows * params.window_cols; Shard(worker_threads.num_threads, worker_threads.workers, params.tensor_in_batch, shard_cost, shard); } }; template <class Device, class T> class MaxPooling3dGradGradOp : public OpKernel { public: explicit MaxPooling3dGradGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); const int32 ksize_c = GetTensorDim(ksize_, data_format_, 'C'); const int32 stride_c = GetTensorDim(stride_, data_format_, 'C'); OP_REQUIRES(context, ksize_c == 1 && stride_c == 1, errors::Unimplemented("MaxPooling3dGradGrad is not yet " "supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_grad_backprop = context->input(2); // For maxpooling3d, tensor_in should have 5 dimensions. OP_REQUIRES(context, tensor_in.dims() == 5, errors::InvalidArgument("tensor_in must be 5-dimensional")); OP_REQUIRES(context, tensor_out.dims() == 5, errors::InvalidArgument("tensor_out must be 5-dimensional")); // For maxpooling3d, out_grad_backprop should have 5 dimensions. OP_REQUIRES( context, out_grad_backprop.dims() == 5, errors::InvalidArgument("out_grad_backprop must be 5-dimensional")); Pool3dParameters params{context, ksize_, stride_, padding_, data_format_, tensor_in.shape()}; if (!context->status().ok()) return; // params is invalid Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {2}, 0, tensor_out.shape(), &output)); // Given access patterns in LaunchMaxPooling3dGradGradOp, these tensors must // have elements. OP_REQUIRES(context, tensor_in.NumElements() > 0, errors::InvalidArgument("received empty tensor tensor_in: ", tensor_in.DebugString())); OP_REQUIRES(context, tensor_out.NumElements() > 0, errors::InvalidArgument("received empty tensor tensor_out: ", tensor_out.DebugString())); OP_REQUIRES( context, out_grad_backprop.NumElements() > 0, errors::InvalidArgument("received empty tensor out_grad_backprop: ", out_grad_backprop.DebugString())); OP_REQUIRES(context, tensor_in.NumElements() == out_grad_backprop.NumElements(), errors::InvalidArgument("tensor_in and out_grad_backprop must " "have same number of elements, got <", tensor_in.DebugString(), "> and <", out_grad_backprop.DebugString(), ">")); OP_REQUIRES( context, tensor_out.NumElements() == output->NumElements(), errors::InvalidArgument( "tensor_out and output must have same number of elements, got <", tensor_out.DebugString(), "> and <", output->DebugString(), ">")); LaunchMaxPooling3dGradGradOp<Device, T>::launch( context, params, tensor_in, tensor_out, out_grad_backprop, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; #define REGISTER_KERNELS(D, T) \ REGISTER_KERNEL_BUILDER( \ Name("MaxPool3D").Device(DEVICE_##D).TypeConstraint<T>("T"), \ Pooling3DOp<D##Device, T, MAX>); \ REGISTER_KERNEL_BUILDER(Name("MaxPool3DGrad") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T") \ .TypeConstraint<T>("TInput"), \ MaxPooling3dGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER( \ Name("MaxPool3DGradGrad").Device(DEVICE_##D).TypeConstraint<T>("T"), \ MaxPooling3dGradGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER( \ Name("AvgPool3D").Device(DEVICE_##D).TypeConstraint<T>("T"), \ Pooling3DOp<D##Device, T, AVG>); \ REGISTER_KERNEL_BUILDER(Name("AvgPool3DGrad") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T") \ .HostMemory("orig_input_shape"), \ AvgPooling3dGradOp<D##Device, T>); #define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T) TF_CALL_float(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename T> struct LaunchPoolingOp<GPUDevice, T, AVG> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { DnnPooling3dOp<T>::Compute(context, se::dnn::PoolingMode::kAverage, window, stride, padding, data_format, tensor_in, output); } }; template <typename T> struct LaunchPoolingOp<GPUDevice, T, MAX> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { DnnPooling3dOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, window, stride, padding, data_format, tensor_in, output); } }; template <typename T> struct LaunchMaxPooling3dGradOp<GPUDevice, T> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& out, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* input_backprop) { const TensorShape output_shape = tensor_in.shape(); DnnPooling3dGradOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, window, stride, padding, out, data_format, out_backprop, output_shape, &tensor_in, &tensor_out, input_backprop); } }; template <typename T> struct LaunchAvgPooling3dGradOp<GPUDevice, T> { static void launch(OpKernelContext* context, const TensorShape& tensor_in_shape, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& out, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* output) { DnnPooling3dGradOp<T>::Compute( context, se::dnn::PoolingMode::kAverage, window, stride, padding, out, data_format, out_backprop, tensor_in_shape, nullptr, nullptr, output); } }; template <typename T> struct LaunchMaxPooling3dGradGradOp<GPUDevice, T> { static void launch(OpKernelContext* context, const Pool3dParameters& params, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& tensor_top_diff, Tensor* tensor_bottom_diff) { bool status = functor::MaxPool3dGradBackward<T>()( params.data_format, tensor_in.flat<T>().data(), tensor_out.flat<T>().data(), params.tensor_in_batch, params.out_plane, params.out_height, params.out_width, params.depth, params.tensor_in_planes, params.tensor_in_rows, params.tensor_in_cols, params.window_planes, params.window_rows, params.window_cols, params.plane_stride, params.row_stride, params.col_stride, params.pad_planes, params.pad_rows, params.pad_cols, tensor_top_diff.flat<T>().data(), tensor_bottom_diff->flat<T>().data(), context->eigen_gpu_device()); if (!status) { context->SetStatus( errors::Internal("Failed launching MaxPool3dGradBackward")); } } }; #define REGISTER_GPU_KERNELS(T) REGISTER_KERNELS(GPU, T) TF_CALL_float(REGISTER_GPU_KERNELS) TF_CALL_half(REGISTER_GPU_KERNELS) #undef REGISTER_GPU_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #undef REGISTER_KERNELS } // namespace tensorflow
null
259
CWE-787
CVE-2021-29577
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/pooling_ops_3d.h" #include <array> #include "third_party/eigen3/Eigen/Core" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/kernels/eigen_pooling.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #include "tensorflow/core/util/work_sharder.h" #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/kernels/cudnn_pooling_gpu.h" #include "tensorflow/core/kernels/pooling_ops_3d_gpu.h" #endif namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; Pool3dParameters::Pool3dParameters(OpKernelContext* context, const std::vector<int32>& ksize, const std::vector<int32>& stride, Padding padding, TensorFormat data_format, const TensorShape& tensor_in_shape) { // For maxpooling, tensor_in should have 4 dimensions. OP_REQUIRES(context, tensor_in_shape.dims() == 5, errors::InvalidArgument("tensor_in must be 4-dimensional")); this->data_format = data_format; depth = GetTensorDim(tensor_in_shape, data_format, 'C'); tensor_in_planes = GetTensorDim(tensor_in_shape, data_format, '0'); tensor_in_rows = GetTensorDim(tensor_in_shape, data_format, '1'); tensor_in_cols = GetTensorDim(tensor_in_shape, data_format, '2'); tensor_in_batch = GetTensorDim(tensor_in_shape, data_format, 'N'); window_planes = GetTensorDim(ksize, data_format, '0'); window_rows = GetTensorDim(ksize, data_format, '1'); window_cols = GetTensorDim(ksize, data_format, '2'); depth_window = GetTensorDim(ksize, data_format, 'C'); plane_stride = GetTensorDim(stride, data_format, '0'); row_stride = GetTensorDim(stride, data_format, '1'); col_stride = GetTensorDim(stride, data_format, '2'); depth_stride = GetTensorDim(stride, data_format, 'C'); // We only support 3D pooling across plane/width/height. Depthwise // pooling is not supported. OP_REQUIRES( context, depth_window == 1 && depth_stride == 1, errors::Unimplemented( "Pooling3d only supports pooling across plane/width/height.")); OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_planes, window_planes, plane_stride, padding, &out_plane, &pad_planes)); OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_rows, window_rows, row_stride, padding, &out_height, &pad_rows)); OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_cols, window_cols, col_stride, padding, &out_width, &pad_cols)); } TensorShape Pool3dParameters::forward_output_shape() { return ShapeFromFormat(data_format, tensor_in_batch, {{out_plane, out_height, out_width}}, depth); } template <typename T> struct LaunchPoolingOp<CPUDevice, T, AVG> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { output->tensor<T, 5>().device(context->eigen_device<CPUDevice>()) = Eigen::CuboidAvgPooling(tensor_in.tensor<T, 5>(), window[0], window[1], window[2], stride[0], stride[1], stride[2], BrainPadding2EigenPadding(padding_type)); } }; template <typename T> struct LaunchPoolingOp<CPUDevice, T, MAX> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { output->tensor<T, 5>().device(context->eigen_device<CPUDevice>()) = Eigen::CuboidMaxPooling(tensor_in.tensor<T, 5>(), window[0], window[1], window[2], stride[0], stride[1], stride[2], BrainPadding2EigenPadding(padding_type)); } }; template <typename Device, typename T, PoolingType Type> class Pooling3DOp : public UnaryOp<T> { public: explicit Pooling3DOp(OpKernelConstruction* context) : UnaryOp<T>(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->device_type() == DEVICE_CPU) { OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument("Default Pooling3DOp only supports NDHWC ", "on device type ", DeviceTypeString(context->device_type()))); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'N') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'C') == 1), errors::Unimplemented( "Pooling is not yet supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); OP_REQUIRES(context, tensor_in.dims() == 5, errors::InvalidArgument("tensor_in must be 5-dimensional")); const int64 depth = GetTensorDim(tensor_in, data_format_, 'C'); const int64 in_batch = GetTensorDim(tensor_in, data_format_, 'N'); // Dimension order for these arrays is: x, y, z. std::array<int64, 3> input_size{ {GetTensorDim(tensor_in, data_format_, '2'), GetTensorDim(tensor_in, data_format_, '1'), GetTensorDim(tensor_in, data_format_, '0')}}; std::array<int64, 3> window{{GetTensorDim(ksize_, data_format_, '2'), GetTensorDim(ksize_, data_format_, '1'), GetTensorDim(ksize_, data_format_, '0')}}; std::array<int64, 3> stride{{GetTensorDim(stride_, data_format_, '2'), GetTensorDim(stride_, data_format_, '1'), GetTensorDim(stride_, data_format_, '0')}}; std::array<int64, 3> padding, out; OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride, padding_, &out, &padding)); TensorShape out_shape = ShapeFromFormat(data_format_, in_batch, {{out[2], out[1], out[0]}}, depth); Tensor* output; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); if (out_shape.num_elements() == 0) return; LaunchPoolingOp<Device, T, Type>::launch(context, tensor_in, window, stride, padding, data_format_, padding_, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename T> struct LaunchMaxPooling3dGradOp<CPUDevice, T> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& out, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* output) { output->flat<T>().setZero(); for (int64 p = 0; p < out_backprop.dim_size(3); ++p) { // Calculate broadcast size for planes/rows/cols. For SAME padding, // current index could be in the padding area, and // p * stride_planes + window_planes // could be beyond the input tensor's boundary. In such cases, change // the starting index and reduce the broadcast size. // // The same procedure is repeated for every spatial dimension in the // nested loops below. int pindex, psize; std::array<int64, 3> input_size{{tensor_in.dim_size(3), tensor_in.dim_size(2), tensor_in.dim_size(1)}}; OP_REQUIRES_OK(context, GetBroadcastSize(p, input_size[0], window[0], stride[0], padding[0], &pindex, &psize)); for (int64 r = 0; r < out_backprop.dim_size(2); ++r) { int rindex, rsize; OP_REQUIRES_OK(context, GetBroadcastSize(r, input_size[1], window[1], stride[1], padding[1], &rindex, &rsize)); for (int64 c = 0; c < out_backprop.dim_size(1); ++c) { int cindex, csize; OP_REQUIRES_OK( context, GetBroadcastSize(c, input_size[2], window[2], stride[2], padding[2], &cindex, &csize)); TensorSlice src{{0, -1}, {c, 1}, {r, 1}, {p, 1}, {0, -1}}; TensorSlice dst{{0, -1}, {cindex, csize}, {rindex, rsize}, {pindex, psize}, {0, -1}}; Eigen::DSizes<Eigen::DenseIndex, 5> src_indices; Eigen::DSizes<Eigen::DenseIndex, 5> src_sizes; Eigen::DSizes<Eigen::DenseIndex, 5> dst_indices; Eigen::DSizes<Eigen::DenseIndex, 5> dst_sizes; src.FillIndicesAndSizes<5>(out_backprop.shape(), &src_indices, &src_sizes); dst.FillIndicesAndSizes<5>(tensor_in.shape(), &dst_indices, &dst_sizes); #if !defined(EIGEN_HAS_INDEX_LIST) Eigen::array<int, 5> bcast = {1, csize, rsize, psize, 1}; #else Eigen::IndexList<Eigen::type2index<1>, int, int, int, Eigen::type2index<1>> bcast; bcast.set(1, csize); bcast.set(2, rsize); bcast.set(3, psize); #endif // Slice from tensor_in. Eigen::Tensor<T, 5, Eigen::RowMajor> tensor_in_slice(dst_sizes); tensor_in_slice.device(context->eigen_cpu_device()) = tensor_in.tensor<T, 5>().slice(dst_indices, dst_sizes); // Slice from tensor_out. Eigen::Tensor<T, 5, Eigen::RowMajor> tensor_out_slice(src_sizes); tensor_out_slice.device(context->eigen_cpu_device()) = tensor_out.tensor<T, 5>().slice(src_indices, src_sizes); // Backprop slice. Eigen::Tensor<T, 5, Eigen::RowMajor> out_backprop_slice(src_sizes); out_backprop_slice.device(context->eigen_cpu_device()) = out_backprop.tensor<T, 5>().slice(src_indices, src_sizes); // The true backprop slice: if an element is the max, choose // the backprop slice; otherwise set to 0. Eigen::Tensor<T, 5, Eigen::RowMajor> select_slice(dst_sizes); Eigen::Tensor<T, 5, Eigen::RowMajor> mat0(dst_sizes); mat0.setZero(); select_slice = ((tensor_in_slice - tensor_out_slice.broadcast(bcast)).abs() < tensor_in_slice.constant(1e-5)) .select(out_backprop_slice.broadcast(bcast), mat0); output->tensor<T, 5>() .slice(dst_indices, dst_sizes) .device(context->eigen_cpu_device()) += select_slice; } } } } }; template <class Device, class T> class MaxPooling3dGradOp : public OpKernel { public: explicit MaxPooling3dGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->device_type() == DEVICE_CPU) { OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default MaxPooling3dGradOp only supports NDHWC ", "on device type ", DeviceTypeString(context->device_type()))); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'N') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'C') == 1), errors::Unimplemented( "Pooling is not yet supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_backprop = context->input(2); OP_REQUIRES(context, tensor_in.dims() == 5, errors::InvalidArgument("tensor_in must be 5-dimensional")); OP_REQUIRES(context, tensor_out.dims() == 5, errors::InvalidArgument("tensor_out must be 5-dimensional")); OP_REQUIRES(context, out_backprop.dims() == 5, errors::InvalidArgument("out_backprop must be 5-dimensional")); const TensorShape& output_shape = tensor_in.shape(); Tensor* input_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &input_backprop)); std::array<int64, 3> input_size{ {GetTensorDim(output_shape, data_format_, '2'), GetTensorDim(output_shape, data_format_, '1'), GetTensorDim(output_shape, data_format_, '0')}}; std::array<int64, 3> window{{GetTensorDim(ksize_, data_format_, '2'), GetTensorDim(ksize_, data_format_, '1'), GetTensorDim(ksize_, data_format_, '0')}}; std::array<int64, 3> stride{{GetTensorDim(stride_, data_format_, '2'), GetTensorDim(stride_, data_format_, '1'), GetTensorDim(stride_, data_format_, '0')}}; std::array<int64, 3> out, padding; OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride, padding_, &out, &padding)); LaunchMaxPooling3dGradOp<Device, T>::launch( context, tensor_in, tensor_out, out_backprop, window, stride, out, padding, data_format_, input_backprop); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename T> struct LaunchAvgPooling3dGradOp<CPUDevice, T> { static void launch(OpKernelContext* context, const TensorShape& tensor_in_shape, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& output_shape, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* output) { output->flat<T>().setZero(); std::array<int64, 3> input_size = {{tensor_in_shape.dim_size(3), tensor_in_shape.dim_size(2), tensor_in_shape.dim_size(1)}}; for (int64 p = 0; p < out_backprop.dim_size(3); ++p) { // Calculate broadcast size for planes/rows/cols. For SAME padding, // current index could be in the padding area, and // p * stride_planes + window_planes // could be beyond the input tensor's boundary. In such cases, change // the starting index and reduce the broadcast size. // // The same procedure is repeated for every spatial dimension in the // nested loops below. int pindex, psize; OP_REQUIRES_OK(context, GetBroadcastSize(p, input_size[0], window[0], stride[0], padding[0], &pindex, &psize)); for (int64 r = 0; r < out_backprop.dim_size(2); ++r) { int rindex, rsize; OP_REQUIRES_OK(context, GetBroadcastSize(r, input_size[1], window[1], stride[1], padding[1], &rindex, &rsize)); for (int64 c = 0; c < out_backprop.dim_size(1); ++c) { int cindex, csize; OP_REQUIRES_OK( context, GetBroadcastSize(c, input_size[2], window[2], stride[2], padding[2], &cindex, &csize)); TensorSlice src{{0, -1}, {c, 1}, {r, 1}, {p, 1}, {0, -1}}; TensorSlice dst{{0, -1}, {cindex, csize}, {rindex, rsize}, {pindex, psize}, {0, -1}}; Eigen::DSizes<Eigen::DenseIndex, 5> src_indices; Eigen::DSizes<Eigen::DenseIndex, 5> src_sizes; Eigen::DSizes<Eigen::DenseIndex, 5> dst_indices; Eigen::DSizes<Eigen::DenseIndex, 5> dst_sizes; src.FillIndicesAndSizes<5>(out_backprop.shape(), &src_indices, &src_sizes); dst.FillIndicesAndSizes<5>(tensor_in_shape, &dst_indices, &dst_sizes); #if !defined(EIGEN_HAS_INDEX_LIST) Eigen::array<int, 5> bcast = {1, csize, rsize, psize, 1}; #else Eigen::IndexList<Eigen::type2index<1>, int, int, int, Eigen::type2index<1>> bcast; bcast.set(1, csize); bcast.set(2, rsize); bcast.set(3, psize); #endif Eigen::Tensor<T, 5, Eigen::RowMajor> slices(src_sizes); slices.device(context->eigen_cpu_device()) = out_backprop.tensor<T, 5>().slice(src_indices, src_sizes); // Divide by the size of the actual patch (psize * rsize * csize). float divide_size = rsize * csize * psize * 1.0f; slices *= slices.constant(1.0f / divide_size); output->tensor<T, 5>() .slice(dst_indices, dst_sizes) .device(context->eigen_cpu_device()) += slices.broadcast(bcast); } } } } }; template <class Device, class T> class AvgPooling3dGradOp : public OpKernel { public: explicit AvgPooling3dGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->device_type() == DEVICE_CPU) { OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default AvgPooling3dGradOp only supports NDHWC ", "on device type ", DeviceTypeString(context->device_type()))); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'N') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'C') == 1), errors::Unimplemented( "Pooling is not yet supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in_shape = context->input(0); const Tensor& out_backprop = context->input(1); OP_REQUIRES( context, tensor_in_shape.dims() == 1 && tensor_in_shape.NumElements() == 5, errors::InvalidArgument("tensor_in must be 1-dimensional and 5 " "elements")); OP_REQUIRES(context, out_backprop.dims() == 5, errors::InvalidArgument("out_backprop must be 5-dimensional")); TensorShape output_shape; auto shape_vec = tensor_in_shape.vec<int32>(); for (int64 i = 0; i < tensor_in_shape.NumElements(); ++i) { output_shape.AddDim(shape_vec(i)); } Tensor* output; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); // Dimension order for these arrays is x, y, z. std::array<int64, 3> input_size{ {GetTensorDim(output_shape, data_format_, '2'), GetTensorDim(output_shape, data_format_, '1'), GetTensorDim(output_shape, data_format_, '0')}}; std::array<int64, 3> window{{GetTensorDim(ksize_, data_format_, '2'), GetTensorDim(ksize_, data_format_, '1'), GetTensorDim(ksize_, data_format_, '0')}}; std::array<int64, 3> stride{{GetTensorDim(stride_, data_format_, '2'), GetTensorDim(stride_, data_format_, '1'), GetTensorDim(stride_, data_format_, '0')}}; std::array<int64, 3> padding, out; OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride, padding_, &out, &padding)); LaunchAvgPooling3dGradOp<Device, T>::launch( context, output_shape, out_backprop, window, stride, out, padding, data_format_, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename T> struct LaunchMaxPooling3dGradGradOp<CPUDevice, T> { static void launch(OpKernelContext* context, const Pool3dParameters& params, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& tensor_top_diff, Tensor* tensor_bottom_diff) { OP_REQUIRES( context, params.data_format == FORMAT_NHWC, errors::InvalidArgument("Default MaxPooling3dGradGradOp only supports", "NDHWC on CPU device type")); typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; ConstEigenMatrixMap in_mat(tensor_in.flat<T>().data(), params.depth, params.tensor_in_planes * params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); ConstEigenMatrixMap out_mat(tensor_out.flat<T>().data(), params.depth, params.out_plane * params.out_width * params.out_height * params.tensor_in_batch); ConstEigenMatrixMap top_diff_mat( tensor_top_diff.flat<T>().data(), params.depth, params.tensor_in_planes * params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); EigenMatrixMap bottom_diff_mat( tensor_bottom_diff->flat<T>().data(), params.depth, params.out_plane * params.out_width * params.out_height * params.tensor_in_batch); const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); auto shard = [&params, &in_mat, &out_mat, &top_diff_mat, &bottom_diff_mat]( int64 start, int64 limit) { const int32 depth = params.depth; const int32 in_planes = params.tensor_in_planes; const int32 in_rows = params.tensor_in_rows; const int32 in_cols = params.tensor_in_cols; const int32 pad_planes = params.pad_planes; const int32 pad_rows = params.pad_rows; const int32 pad_cols = params.pad_cols; const int32 window_planes = params.window_planes; const int32 window_rows = params.window_rows; const int32 window_cols = params.window_cols; const int32 plane_stride = params.plane_stride; const int32 row_stride = params.row_stride; const int32 col_stride = params.col_stride; const int32 out_plane = params.out_plane; const int32 out_height = params.out_height; const int32 out_width = params.out_width; { // Initializes the output grad backprop tensor with 0. const int32 output_image_size = out_plane * out_height * out_width * params.depth; EigenMatrixMap bottom_diff_shard( bottom_diff_mat.data() + start * output_image_size, 1, (limit - start) * output_image_size); bottom_diff_shard.setZero(); } for (int b = start; b < limit; ++b) { for (int pp = 0; pp < out_plane; ++pp) { for (int ph = 0; ph < out_height; ++ph) { for (int pw = 0; pw < out_width; ++pw) { // (p_start, p_end) * (h_start, h_end) * (w_start, w_end) is the // range that the input vector projects to. int p_start = pp * plane_stride - pad_planes; const int p_end = std::min(p_start + window_planes, in_planes); int h_start = ph * row_stride - pad_rows; const int h_end = std::min(h_start + window_rows, in_rows); int w_start = pw * col_stride - pad_cols; const int w_end = std::min(w_start + window_cols, in_cols); p_start = std::max(p_start, 0); h_start = std::max(h_start, 0); w_start = std::max(w_start, 0); const int out_index = ((b * out_plane + pp) * out_height + ph) * out_width + pw; // Find value corresponding to the input maximum in top_diff. for (int d = 0; d < depth; ++d) { const T& output_ref = out_mat.coeffRef(d, out_index); bool should_stop = false; for (int p = p_start; p < p_end && !should_stop; ++p) { for (int h = h_start; h < h_end && !should_stop; ++h) { for (int w = w_start; w < w_end && !should_stop; ++w) { const int in_index = ((b * in_planes + p) * in_rows + h) * in_cols + w; const T& input_ref = in_mat.coeffRef(d, in_index); if (output_ref == input_ref) { T& bottom_diff_ref = bottom_diff_mat.coeffRef(d, out_index); bottom_diff_ref = top_diff_mat.coeffRef(d, in_index); should_stop = true; } } } } } } } } } }; const int64 shard_cost = params.out_plane * params.out_height * params.out_width * params.depth * params.window_planes * params.window_rows * params.window_cols; Shard(worker_threads.num_threads, worker_threads.workers, params.tensor_in_batch, shard_cost, shard); } }; template <class Device, class T> class MaxPooling3dGradGradOp : public OpKernel { public: explicit MaxPooling3dGradGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); const int32 ksize_c = GetTensorDim(ksize_, data_format_, 'C'); const int32 stride_c = GetTensorDim(stride_, data_format_, 'C'); OP_REQUIRES(context, ksize_c == 1 && stride_c == 1, errors::Unimplemented("MaxPooling3dGradGrad is not yet " "supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_grad_backprop = context->input(2); // For maxpooling3d, tensor_in should have 5 dimensions. OP_REQUIRES(context, tensor_in.dims() == 5, errors::InvalidArgument("tensor_in must be 5-dimensional")); OP_REQUIRES(context, tensor_out.dims() == 5, errors::InvalidArgument("tensor_out must be 5-dimensional")); // For maxpooling3d, out_grad_backprop should have 5 dimensions. OP_REQUIRES( context, out_grad_backprop.dims() == 5, errors::InvalidArgument("out_grad_backprop must be 5-dimensional")); Pool3dParameters params{context, ksize_, stride_, padding_, data_format_, tensor_in.shape()}; if (!context->status().ok()) return; // params is invalid Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {2}, 0, tensor_out.shape(), &output)); // Given access patterns in LaunchMaxPooling3dGradGradOp, these tensors must // have elements. OP_REQUIRES(context, tensor_in.NumElements() > 0, errors::InvalidArgument("received empty tensor tensor_in: ", tensor_in.DebugString())); OP_REQUIRES(context, tensor_out.NumElements() > 0, errors::InvalidArgument("received empty tensor tensor_out: ", tensor_out.DebugString())); OP_REQUIRES( context, out_grad_backprop.NumElements() > 0, errors::InvalidArgument("received empty tensor out_grad_backprop: ", out_grad_backprop.DebugString())); OP_REQUIRES(context, tensor_in.NumElements() == out_grad_backprop.NumElements(), errors::InvalidArgument("tensor_in and out_grad_backprop must " "have same number of elements, got <", tensor_in.DebugString(), "> and <", out_grad_backprop.DebugString(), ">")); OP_REQUIRES( context, tensor_out.NumElements() == output->NumElements(), errors::InvalidArgument( "tensor_out and output must have same number of elements, got <", tensor_out.DebugString(), "> and <", output->DebugString(), ">")); LaunchMaxPooling3dGradGradOp<Device, T>::launch( context, params, tensor_in, tensor_out, out_grad_backprop, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; #define REGISTER_KERNELS(D, T) \ REGISTER_KERNEL_BUILDER( \ Name("MaxPool3D").Device(DEVICE_##D).TypeConstraint<T>("T"), \ Pooling3DOp<D##Device, T, MAX>); \ REGISTER_KERNEL_BUILDER(Name("MaxPool3DGrad") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T") \ .TypeConstraint<T>("TInput"), \ MaxPooling3dGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER( \ Name("MaxPool3DGradGrad").Device(DEVICE_##D).TypeConstraint<T>("T"), \ MaxPooling3dGradGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER( \ Name("AvgPool3D").Device(DEVICE_##D).TypeConstraint<T>("T"), \ Pooling3DOp<D##Device, T, AVG>); \ REGISTER_KERNEL_BUILDER(Name("AvgPool3DGrad") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T") \ .HostMemory("orig_input_shape"), \ AvgPooling3dGradOp<D##Device, T>); #define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T) TF_CALL_float(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename T> struct LaunchPoolingOp<GPUDevice, T, AVG> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { DnnPooling3dOp<T>::Compute(context, se::dnn::PoolingMode::kAverage, window, stride, padding, data_format, tensor_in, output); } }; template <typename T> struct LaunchPoolingOp<GPUDevice, T, MAX> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { DnnPooling3dOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, window, stride, padding, data_format, tensor_in, output); } }; template <typename T> struct LaunchMaxPooling3dGradOp<GPUDevice, T> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& out, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* input_backprop) { const TensorShape output_shape = tensor_in.shape(); DnnPooling3dGradOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, window, stride, padding, out, data_format, out_backprop, output_shape, &tensor_in, &tensor_out, input_backprop); } }; template <typename T> struct LaunchAvgPooling3dGradOp<GPUDevice, T> { static void launch(OpKernelContext* context, const TensorShape& tensor_in_shape, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& out, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* output) { DnnPooling3dGradOp<T>::Compute( context, se::dnn::PoolingMode::kAverage, window, stride, padding, out, data_format, out_backprop, tensor_in_shape, nullptr, nullptr, output); } }; template <typename T> struct LaunchMaxPooling3dGradGradOp<GPUDevice, T> { static void launch(OpKernelContext* context, const Pool3dParameters& params, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& tensor_top_diff, Tensor* tensor_bottom_diff) { bool status = functor::MaxPool3dGradBackward<T>()( params.data_format, tensor_in.flat<T>().data(), tensor_out.flat<T>().data(), params.tensor_in_batch, params.out_plane, params.out_height, params.out_width, params.depth, params.tensor_in_planes, params.tensor_in_rows, params.tensor_in_cols, params.window_planes, params.window_rows, params.window_cols, params.plane_stride, params.row_stride, params.col_stride, params.pad_planes, params.pad_rows, params.pad_cols, tensor_top_diff.flat<T>().data(), tensor_bottom_diff->flat<T>().data(), context->eigen_gpu_device()); if (!status) { context->SetStatus( errors::Internal("Failed launching MaxPool3dGradBackward")); } } }; #define REGISTER_GPU_KERNELS(T) REGISTER_KERNELS(GPU, T) TF_CALL_float(REGISTER_GPU_KERNELS) TF_CALL_half(REGISTER_GPU_KERNELS) #undef REGISTER_GPU_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #undef REGISTER_KERNELS } // namespace tensorflow
null
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/pooling_ops_3d.h" #include <array> #include "third_party/eigen3/Eigen/Core" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/kernels/eigen_pooling.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #include "tensorflow/core/util/work_sharder.h" #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/kernels/cudnn_pooling_gpu.h" #include "tensorflow/core/kernels/pooling_ops_3d_gpu.h" #endif namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; Pool3dParameters::Pool3dParameters(OpKernelContext* context, const std::vector<int32>& ksize, const std::vector<int32>& stride, Padding padding, TensorFormat data_format, const TensorShape& tensor_in_shape) { // For maxpooling, tensor_in should have 4 dimensions. OP_REQUIRES(context, tensor_in_shape.dims() == 5, errors::InvalidArgument("tensor_in must be 4-dimensional")); this->data_format = data_format; depth = GetTensorDim(tensor_in_shape, data_format, 'C'); tensor_in_planes = GetTensorDim(tensor_in_shape, data_format, '0'); tensor_in_rows = GetTensorDim(tensor_in_shape, data_format, '1'); tensor_in_cols = GetTensorDim(tensor_in_shape, data_format, '2'); tensor_in_batch = GetTensorDim(tensor_in_shape, data_format, 'N'); window_planes = GetTensorDim(ksize, data_format, '0'); window_rows = GetTensorDim(ksize, data_format, '1'); window_cols = GetTensorDim(ksize, data_format, '2'); depth_window = GetTensorDim(ksize, data_format, 'C'); plane_stride = GetTensorDim(stride, data_format, '0'); row_stride = GetTensorDim(stride, data_format, '1'); col_stride = GetTensorDim(stride, data_format, '2'); depth_stride = GetTensorDim(stride, data_format, 'C'); // We only support 3D pooling across plane/width/height. Depthwise // pooling is not supported. OP_REQUIRES( context, depth_window == 1 && depth_stride == 1, errors::Unimplemented( "Pooling3d only supports pooling across plane/width/height.")); OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_planes, window_planes, plane_stride, padding, &out_plane, &pad_planes)); OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_rows, window_rows, row_stride, padding, &out_height, &pad_rows)); OP_REQUIRES_OK(context, GetWindowedOutputSize(tensor_in_cols, window_cols, col_stride, padding, &out_width, &pad_cols)); } TensorShape Pool3dParameters::forward_output_shape() { return ShapeFromFormat(data_format, tensor_in_batch, {{out_plane, out_height, out_width}}, depth); } template <typename T> struct LaunchPoolingOp<CPUDevice, T, AVG> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { output->tensor<T, 5>().device(context->eigen_device<CPUDevice>()) = Eigen::CuboidAvgPooling(tensor_in.tensor<T, 5>(), window[0], window[1], window[2], stride[0], stride[1], stride[2], BrainPadding2EigenPadding(padding_type)); } }; template <typename T> struct LaunchPoolingOp<CPUDevice, T, MAX> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { output->tensor<T, 5>().device(context->eigen_device<CPUDevice>()) = Eigen::CuboidMaxPooling(tensor_in.tensor<T, 5>(), window[0], window[1], window[2], stride[0], stride[1], stride[2], BrainPadding2EigenPadding(padding_type)); } }; template <typename Device, typename T, PoolingType Type> class Pooling3DOp : public UnaryOp<T> { public: explicit Pooling3DOp(OpKernelConstruction* context) : UnaryOp<T>(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->device_type() == DEVICE_CPU) { OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument("Default Pooling3DOp only supports NDHWC ", "on device type ", DeviceTypeString(context->device_type()))); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'N') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'C') == 1), errors::Unimplemented( "Pooling is not yet supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); OP_REQUIRES(context, tensor_in.dims() == 5, errors::InvalidArgument("tensor_in must be 5-dimensional")); const int64 depth = GetTensorDim(tensor_in, data_format_, 'C'); const int64 in_batch = GetTensorDim(tensor_in, data_format_, 'N'); // Dimension order for these arrays is: x, y, z. std::array<int64, 3> input_size{ {GetTensorDim(tensor_in, data_format_, '2'), GetTensorDim(tensor_in, data_format_, '1'), GetTensorDim(tensor_in, data_format_, '0')}}; std::array<int64, 3> window{{GetTensorDim(ksize_, data_format_, '2'), GetTensorDim(ksize_, data_format_, '1'), GetTensorDim(ksize_, data_format_, '0')}}; std::array<int64, 3> stride{{GetTensorDim(stride_, data_format_, '2'), GetTensorDim(stride_, data_format_, '1'), GetTensorDim(stride_, data_format_, '0')}}; std::array<int64, 3> padding, out; OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride, padding_, &out, &padding)); TensorShape out_shape = ShapeFromFormat(data_format_, in_batch, {{out[2], out[1], out[0]}}, depth); Tensor* output; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); if (out_shape.num_elements() == 0) return; LaunchPoolingOp<Device, T, Type>::launch(context, tensor_in, window, stride, padding, data_format_, padding_, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename T> struct LaunchMaxPooling3dGradOp<CPUDevice, T> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& out, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* output) { output->flat<T>().setZero(); for (int64 p = 0; p < out_backprop.dim_size(3); ++p) { // Calculate broadcast size for planes/rows/cols. For SAME padding, // current index could be in the padding area, and // p * stride_planes + window_planes // could be beyond the input tensor's boundary. In such cases, change // the starting index and reduce the broadcast size. // // The same procedure is repeated for every spatial dimension in the // nested loops below. int pindex, psize; std::array<int64, 3> input_size{{tensor_in.dim_size(3), tensor_in.dim_size(2), tensor_in.dim_size(1)}}; OP_REQUIRES_OK(context, GetBroadcastSize(p, input_size[0], window[0], stride[0], padding[0], &pindex, &psize)); for (int64 r = 0; r < out_backprop.dim_size(2); ++r) { int rindex, rsize; OP_REQUIRES_OK(context, GetBroadcastSize(r, input_size[1], window[1], stride[1], padding[1], &rindex, &rsize)); for (int64 c = 0; c < out_backprop.dim_size(1); ++c) { int cindex, csize; OP_REQUIRES_OK( context, GetBroadcastSize(c, input_size[2], window[2], stride[2], padding[2], &cindex, &csize)); TensorSlice src{{0, -1}, {c, 1}, {r, 1}, {p, 1}, {0, -1}}; TensorSlice dst{{0, -1}, {cindex, csize}, {rindex, rsize}, {pindex, psize}, {0, -1}}; Eigen::DSizes<Eigen::DenseIndex, 5> src_indices; Eigen::DSizes<Eigen::DenseIndex, 5> src_sizes; Eigen::DSizes<Eigen::DenseIndex, 5> dst_indices; Eigen::DSizes<Eigen::DenseIndex, 5> dst_sizes; src.FillIndicesAndSizes<5>(out_backprop.shape(), &src_indices, &src_sizes); dst.FillIndicesAndSizes<5>(tensor_in.shape(), &dst_indices, &dst_sizes); #if !defined(EIGEN_HAS_INDEX_LIST) Eigen::array<int, 5> bcast = {1, csize, rsize, psize, 1}; #else Eigen::IndexList<Eigen::type2index<1>, int, int, int, Eigen::type2index<1>> bcast; bcast.set(1, csize); bcast.set(2, rsize); bcast.set(3, psize); #endif // Slice from tensor_in. Eigen::Tensor<T, 5, Eigen::RowMajor> tensor_in_slice(dst_sizes); tensor_in_slice.device(context->eigen_cpu_device()) = tensor_in.tensor<T, 5>().slice(dst_indices, dst_sizes); // Slice from tensor_out. Eigen::Tensor<T, 5, Eigen::RowMajor> tensor_out_slice(src_sizes); tensor_out_slice.device(context->eigen_cpu_device()) = tensor_out.tensor<T, 5>().slice(src_indices, src_sizes); // Backprop slice. Eigen::Tensor<T, 5, Eigen::RowMajor> out_backprop_slice(src_sizes); out_backprop_slice.device(context->eigen_cpu_device()) = out_backprop.tensor<T, 5>().slice(src_indices, src_sizes); // The true backprop slice: if an element is the max, choose // the backprop slice; otherwise set to 0. Eigen::Tensor<T, 5, Eigen::RowMajor> select_slice(dst_sizes); Eigen::Tensor<T, 5, Eigen::RowMajor> mat0(dst_sizes); mat0.setZero(); select_slice = ((tensor_in_slice - tensor_out_slice.broadcast(bcast)).abs() < tensor_in_slice.constant(1e-5)) .select(out_backprop_slice.broadcast(bcast), mat0); output->tensor<T, 5>() .slice(dst_indices, dst_sizes) .device(context->eigen_cpu_device()) += select_slice; } } } } }; template <class Device, class T> class MaxPooling3dGradOp : public OpKernel { public: explicit MaxPooling3dGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->device_type() == DEVICE_CPU) { OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default MaxPooling3dGradOp only supports NDHWC ", "on device type ", DeviceTypeString(context->device_type()))); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'N') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'C') == 1), errors::Unimplemented( "Pooling is not yet supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_backprop = context->input(2); OP_REQUIRES(context, tensor_in.dims() == 5, errors::InvalidArgument("tensor_in must be 5-dimensional")); OP_REQUIRES(context, tensor_out.dims() == 5, errors::InvalidArgument("tensor_out must be 5-dimensional")); OP_REQUIRES(context, out_backprop.dims() == 5, errors::InvalidArgument("out_backprop must be 5-dimensional")); const TensorShape& output_shape = tensor_in.shape(); Tensor* input_backprop; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &input_backprop)); std::array<int64, 3> input_size{ {GetTensorDim(output_shape, data_format_, '2'), GetTensorDim(output_shape, data_format_, '1'), GetTensorDim(output_shape, data_format_, '0')}}; std::array<int64, 3> window{{GetTensorDim(ksize_, data_format_, '2'), GetTensorDim(ksize_, data_format_, '1'), GetTensorDim(ksize_, data_format_, '0')}}; std::array<int64, 3> stride{{GetTensorDim(stride_, data_format_, '2'), GetTensorDim(stride_, data_format_, '1'), GetTensorDim(stride_, data_format_, '0')}}; std::array<int64, 3> out, padding; OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride, padding_, &out, &padding)); LaunchMaxPooling3dGradOp<Device, T>::launch( context, tensor_in, tensor_out, out_backprop, window, stride, out, padding, data_format_, input_backprop); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename T> struct LaunchAvgPooling3dGradOp<CPUDevice, T> { static void launch(OpKernelContext* context, const TensorShape& tensor_in_shape, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& output_shape, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* output) { OP_REQUIRES( context, tensor_in_shape.dim_size(0) == out_backprop.dim_size(0), errors::InvalidArgument( "Expected first dimension of tensor_in_shape and " "out_backprop to match, got ", tensor_in_shape.dim_size(0), " and ", out_backprop.dim_size(0))); OP_REQUIRES( context, tensor_in_shape.dim_size(4) == out_backprop.dim_size(4), errors::InvalidArgument( "Expected last dimension of tensor_in_shape and " "out_backprop to match, got ", tensor_in_shape.dim_size(4), " and ", out_backprop.dim_size(4))); output->flat<T>().setZero(); std::array<int64, 3> input_size = {{tensor_in_shape.dim_size(3), tensor_in_shape.dim_size(2), tensor_in_shape.dim_size(1)}}; for (int64 p = 0; p < out_backprop.dim_size(3); ++p) { // Calculate broadcast size for planes/rows/cols. For SAME padding, // current index could be in the padding area, and // p * stride_planes + window_planes // could be beyond the input tensor's boundary. In such cases, change // the starting index and reduce the broadcast size. // // The same procedure is repeated for every spatial dimension in the // nested loops below. int pindex, psize; OP_REQUIRES_OK(context, GetBroadcastSize(p, input_size[0], window[0], stride[0], padding[0], &pindex, &psize)); for (int64 r = 0; r < out_backprop.dim_size(2); ++r) { int rindex, rsize; OP_REQUIRES_OK(context, GetBroadcastSize(r, input_size[1], window[1], stride[1], padding[1], &rindex, &rsize)); for (int64 c = 0; c < out_backprop.dim_size(1); ++c) { int cindex, csize; OP_REQUIRES_OK( context, GetBroadcastSize(c, input_size[2], window[2], stride[2], padding[2], &cindex, &csize)); TensorSlice src{{0, -1}, {c, 1}, {r, 1}, {p, 1}, {0, -1}}; TensorSlice dst{{0, -1}, {cindex, csize}, {rindex, rsize}, {pindex, psize}, {0, -1}}; Eigen::DSizes<Eigen::DenseIndex, 5> src_indices; Eigen::DSizes<Eigen::DenseIndex, 5> src_sizes; Eigen::DSizes<Eigen::DenseIndex, 5> dst_indices; Eigen::DSizes<Eigen::DenseIndex, 5> dst_sizes; src.FillIndicesAndSizes<5>(out_backprop.shape(), &src_indices, &src_sizes); dst.FillIndicesAndSizes<5>(tensor_in_shape, &dst_indices, &dst_sizes); #if !defined(EIGEN_HAS_INDEX_LIST) Eigen::array<int, 5> bcast = {1, csize, rsize, psize, 1}; #else Eigen::IndexList<Eigen::type2index<1>, int, int, int, Eigen::type2index<1>> bcast; bcast.set(1, csize); bcast.set(2, rsize); bcast.set(3, psize); #endif Eigen::Tensor<T, 5, Eigen::RowMajor> slices(src_sizes); slices.device(context->eigen_cpu_device()) = out_backprop.tensor<T, 5>().slice(src_indices, src_sizes); // Divide by the size of the actual patch (psize * rsize * csize). float divide_size = rsize * csize * psize * 1.0f; slices *= slices.constant(1.0f / divide_size); output->tensor<T, 5>() .slice(dst_indices, dst_sizes) .device(context->eigen_cpu_device()) += slices.broadcast(bcast); } } } } }; template <class Device, class T> class AvgPooling3dGradOp : public OpKernel { public: explicit AvgPooling3dGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->device_type() == DEVICE_CPU) { OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default AvgPooling3dGradOp only supports NDHWC ", "on device type ", DeviceTypeString(context->device_type()))); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window stride field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'N') == 1 && GetTensorDim(stride_, data_format_, 'N') == 1), errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, (GetTensorDim(ksize_, data_format_, 'C') == 1 && GetTensorDim(stride_, data_format_, 'C') == 1), errors::Unimplemented( "Pooling is not yet supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in_shape = context->input(0); const Tensor& out_backprop = context->input(1); OP_REQUIRES( context, tensor_in_shape.dims() == 1 && tensor_in_shape.NumElements() == 5, errors::InvalidArgument("tensor_in must be 1-dimensional and 5 " "elements")); OP_REQUIRES(context, out_backprop.dims() == 5, errors::InvalidArgument("out_backprop must be 5-dimensional")); TensorShape output_shape; auto shape_vec = tensor_in_shape.vec<int32>(); for (int64 i = 0; i < tensor_in_shape.NumElements(); ++i) { output_shape.AddDim(shape_vec(i)); } Tensor* output; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); // Dimension order for these arrays is x, y, z. std::array<int64, 3> input_size{ {GetTensorDim(output_shape, data_format_, '2'), GetTensorDim(output_shape, data_format_, '1'), GetTensorDim(output_shape, data_format_, '0')}}; std::array<int64, 3> window{{GetTensorDim(ksize_, data_format_, '2'), GetTensorDim(ksize_, data_format_, '1'), GetTensorDim(ksize_, data_format_, '0')}}; std::array<int64, 3> stride{{GetTensorDim(stride_, data_format_, '2'), GetTensorDim(stride_, data_format_, '1'), GetTensorDim(stride_, data_format_, '0')}}; std::array<int64, 3> padding, out; OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride, padding_, &out, &padding)); LaunchAvgPooling3dGradOp<Device, T>::launch( context, output_shape, out_backprop, window, stride, out, padding, data_format_, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename T> struct LaunchMaxPooling3dGradGradOp<CPUDevice, T> { static void launch(OpKernelContext* context, const Pool3dParameters& params, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& tensor_top_diff, Tensor* tensor_bottom_diff) { OP_REQUIRES( context, params.data_format == FORMAT_NHWC, errors::InvalidArgument("Default MaxPooling3dGradGradOp only supports", "NDHWC on CPU device type")); typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; ConstEigenMatrixMap in_mat(tensor_in.flat<T>().data(), params.depth, params.tensor_in_planes * params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); ConstEigenMatrixMap out_mat(tensor_out.flat<T>().data(), params.depth, params.out_plane * params.out_width * params.out_height * params.tensor_in_batch); ConstEigenMatrixMap top_diff_mat( tensor_top_diff.flat<T>().data(), params.depth, params.tensor_in_planes * params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); EigenMatrixMap bottom_diff_mat( tensor_bottom_diff->flat<T>().data(), params.depth, params.out_plane * params.out_width * params.out_height * params.tensor_in_batch); const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); auto shard = [&params, &in_mat, &out_mat, &top_diff_mat, &bottom_diff_mat]( int64 start, int64 limit) { const int32 depth = params.depth; const int32 in_planes = params.tensor_in_planes; const int32 in_rows = params.tensor_in_rows; const int32 in_cols = params.tensor_in_cols; const int32 pad_planes = params.pad_planes; const int32 pad_rows = params.pad_rows; const int32 pad_cols = params.pad_cols; const int32 window_planes = params.window_planes; const int32 window_rows = params.window_rows; const int32 window_cols = params.window_cols; const int32 plane_stride = params.plane_stride; const int32 row_stride = params.row_stride; const int32 col_stride = params.col_stride; const int32 out_plane = params.out_plane; const int32 out_height = params.out_height; const int32 out_width = params.out_width; { // Initializes the output grad backprop tensor with 0. const int32 output_image_size = out_plane * out_height * out_width * params.depth; EigenMatrixMap bottom_diff_shard( bottom_diff_mat.data() + start * output_image_size, 1, (limit - start) * output_image_size); bottom_diff_shard.setZero(); } for (int b = start; b < limit; ++b) { for (int pp = 0; pp < out_plane; ++pp) { for (int ph = 0; ph < out_height; ++ph) { for (int pw = 0; pw < out_width; ++pw) { // (p_start, p_end) * (h_start, h_end) * (w_start, w_end) is the // range that the input vector projects to. int p_start = pp * plane_stride - pad_planes; const int p_end = std::min(p_start + window_planes, in_planes); int h_start = ph * row_stride - pad_rows; const int h_end = std::min(h_start + window_rows, in_rows); int w_start = pw * col_stride - pad_cols; const int w_end = std::min(w_start + window_cols, in_cols); p_start = std::max(p_start, 0); h_start = std::max(h_start, 0); w_start = std::max(w_start, 0); const int out_index = ((b * out_plane + pp) * out_height + ph) * out_width + pw; // Find value corresponding to the input maximum in top_diff. for (int d = 0; d < depth; ++d) { const T& output_ref = out_mat.coeffRef(d, out_index); bool should_stop = false; for (int p = p_start; p < p_end && !should_stop; ++p) { for (int h = h_start; h < h_end && !should_stop; ++h) { for (int w = w_start; w < w_end && !should_stop; ++w) { const int in_index = ((b * in_planes + p) * in_rows + h) * in_cols + w; const T& input_ref = in_mat.coeffRef(d, in_index); if (output_ref == input_ref) { T& bottom_diff_ref = bottom_diff_mat.coeffRef(d, out_index); bottom_diff_ref = top_diff_mat.coeffRef(d, in_index); should_stop = true; } } } } } } } } } }; const int64 shard_cost = params.out_plane * params.out_height * params.out_width * params.depth * params.window_planes * params.window_rows * params.window_cols; Shard(worker_threads.num_threads, worker_threads.workers, params.tensor_in_batch, shard_cost, shard); } }; template <class Device, class T> class MaxPooling3dGradGradOp : public OpKernel { public: explicit MaxPooling3dGradGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 5, errors::InvalidArgument("Sliding window ksize field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 5, errors::InvalidArgument("Sliding window strides field must " "specify 5 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); const int32 ksize_c = GetTensorDim(ksize_, data_format_, 'C'); const int32 stride_c = GetTensorDim(stride_, data_format_, 'C'); OP_REQUIRES(context, ksize_c == 1 && stride_c == 1, errors::Unimplemented("MaxPooling3dGradGrad is not yet " "supported on the depth dimension.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_grad_backprop = context->input(2); // For maxpooling3d, tensor_in should have 5 dimensions. OP_REQUIRES(context, tensor_in.dims() == 5, errors::InvalidArgument("tensor_in must be 5-dimensional")); OP_REQUIRES(context, tensor_out.dims() == 5, errors::InvalidArgument("tensor_out must be 5-dimensional")); // For maxpooling3d, out_grad_backprop should have 5 dimensions. OP_REQUIRES( context, out_grad_backprop.dims() == 5, errors::InvalidArgument("out_grad_backprop must be 5-dimensional")); Pool3dParameters params{context, ksize_, stride_, padding_, data_format_, tensor_in.shape()}; if (!context->status().ok()) return; // params is invalid Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {2}, 0, tensor_out.shape(), &output)); // Given access patterns in LaunchMaxPooling3dGradGradOp, these tensors must // have elements. OP_REQUIRES(context, tensor_in.NumElements() > 0, errors::InvalidArgument("received empty tensor tensor_in: ", tensor_in.DebugString())); OP_REQUIRES(context, tensor_out.NumElements() > 0, errors::InvalidArgument("received empty tensor tensor_out: ", tensor_out.DebugString())); OP_REQUIRES( context, out_grad_backprop.NumElements() > 0, errors::InvalidArgument("received empty tensor out_grad_backprop: ", out_grad_backprop.DebugString())); OP_REQUIRES(context, tensor_in.NumElements() == out_grad_backprop.NumElements(), errors::InvalidArgument("tensor_in and out_grad_backprop must " "have same number of elements, got <", tensor_in.DebugString(), "> and <", out_grad_backprop.DebugString(), ">")); OP_REQUIRES( context, tensor_out.NumElements() == output->NumElements(), errors::InvalidArgument( "tensor_out and output must have same number of elements, got <", tensor_out.DebugString(), "> and <", output->DebugString(), ">")); LaunchMaxPooling3dGradGradOp<Device, T>::launch( context, params, tensor_in, tensor_out, out_grad_backprop, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; #define REGISTER_KERNELS(D, T) \ REGISTER_KERNEL_BUILDER( \ Name("MaxPool3D").Device(DEVICE_##D).TypeConstraint<T>("T"), \ Pooling3DOp<D##Device, T, MAX>); \ REGISTER_KERNEL_BUILDER(Name("MaxPool3DGrad") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T") \ .TypeConstraint<T>("TInput"), \ MaxPooling3dGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER( \ Name("MaxPool3DGradGrad").Device(DEVICE_##D).TypeConstraint<T>("T"), \ MaxPooling3dGradGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER( \ Name("AvgPool3D").Device(DEVICE_##D).TypeConstraint<T>("T"), \ Pooling3DOp<D##Device, T, AVG>); \ REGISTER_KERNEL_BUILDER(Name("AvgPool3DGrad") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T") \ .HostMemory("orig_input_shape"), \ AvgPooling3dGradOp<D##Device, T>); #define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T) TF_CALL_float(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename T> struct LaunchPoolingOp<GPUDevice, T, AVG> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { DnnPooling3dOp<T>::Compute(context, se::dnn::PoolingMode::kAverage, window, stride, padding, data_format, tensor_in, output); } }; template <typename T> struct LaunchPoolingOp<GPUDevice, T, MAX> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& padding, TensorFormat data_format, Padding padding_type, Tensor* output) { DnnPooling3dOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, window, stride, padding, data_format, tensor_in, output); } }; template <typename T> struct LaunchMaxPooling3dGradOp<GPUDevice, T> { static void launch(OpKernelContext* context, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& out, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* input_backprop) { const TensorShape output_shape = tensor_in.shape(); DnnPooling3dGradOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, window, stride, padding, out, data_format, out_backprop, output_shape, &tensor_in, &tensor_out, input_backprop); } }; template <typename T> struct LaunchAvgPooling3dGradOp<GPUDevice, T> { static void launch(OpKernelContext* context, const TensorShape& tensor_in_shape, const Tensor& out_backprop, const std::array<int64, 3>& window, const std::array<int64, 3>& stride, const std::array<int64, 3>& out, const std::array<int64, 3>& padding, TensorFormat data_format, Tensor* output) { DnnPooling3dGradOp<T>::Compute( context, se::dnn::PoolingMode::kAverage, window, stride, padding, out, data_format, out_backprop, tensor_in_shape, nullptr, nullptr, output); } }; template <typename T> struct LaunchMaxPooling3dGradGradOp<GPUDevice, T> { static void launch(OpKernelContext* context, const Pool3dParameters& params, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& tensor_top_diff, Tensor* tensor_bottom_diff) { bool status = functor::MaxPool3dGradBackward<T>()( params.data_format, tensor_in.flat<T>().data(), tensor_out.flat<T>().data(), params.tensor_in_batch, params.out_plane, params.out_height, params.out_width, params.depth, params.tensor_in_planes, params.tensor_in_rows, params.tensor_in_cols, params.window_planes, params.window_rows, params.window_cols, params.plane_stride, params.row_stride, params.col_stride, params.pad_planes, params.pad_rows, params.pad_cols, tensor_top_diff.flat<T>().data(), tensor_bottom_diff->flat<T>().data(), context->eigen_gpu_device()); if (!status) { context->SetStatus( errors::Internal("Failed launching MaxPool3dGradBackward")); } } }; #define REGISTER_GPU_KERNELS(T) REGISTER_KERNELS(GPU, T) TF_CALL_float(REGISTER_GPU_KERNELS) TF_CALL_half(REGISTER_GPU_KERNELS) #undef REGISTER_GPU_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #undef REGISTER_KERNELS } // namespace tensorflow
null
260
CWE-787
CVE-2021-29578
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include <algorithm> #include <cmath> #include <random> #include <vector> #include "tensorflow/core/kernels/fractional_pool_common.h" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/lib/random/random.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/util/guarded_philox_random.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; template <typename T> class FractionalAvgPoolOp : public OpKernel { public: explicit FractionalAvgPoolOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("pooling_ratio", &pooling_ratio_)); OP_REQUIRES_OK(context, context->GetAttr("pseudo_random", &pseudo_random_)); OP_REQUIRES_OK(context, context->GetAttr("overlapping", &overlapping_)); OP_REQUIRES(context, pooling_ratio_.size() == 4, errors::InvalidArgument( "pooling_ratio field must specify 4 dimensions")); OP_REQUIRES( context, pooling_ratio_[0] == 1 || pooling_ratio_[3] == 1, errors::Unimplemented("Fractional average pooling is not yet " "supported on the batch nor channel dimension.")); OP_REQUIRES_OK(context, context->GetAttr("deterministic", &deterministic_)); OP_REQUIRES_OK(context, context->GetAttr("seed", &seed_)); OP_REQUIRES_OK(context, context->GetAttr("seed2", &seed2_)); if (deterministic_) { // If both seeds are not set when deterministic_ is true, force set seeds. if ((seed_ == 0) && (seed2_ == 0)) { seed_ = random::New64(); seed2_ = random::New64(); } } else { OP_REQUIRES( context, (seed_ == 0) && (seed2_ == 0), errors::InvalidArgument( "Both seed and seed2 should be 0 if deterministic is false.")); } } void Compute(OpKernelContext* context) override { typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; constexpr int tensor_in_and_out_dims = 4; const Tensor& tensor_in = context->input(0); OP_REQUIRES(context, tensor_in.dims() == tensor_in_and_out_dims, errors::InvalidArgument("tensor_in must be 4-dimensional")); std::vector<int> input_size(tensor_in_and_out_dims); std::vector<int> output_size(tensor_in_and_out_dims); for (int i = 0; i < tensor_in_and_out_dims; ++i) { input_size[i] = tensor_in.dim_size(i); OP_REQUIRES( context, pooling_ratio_[i] <= input_size[i], errors::InvalidArgument( "Pooling ratio cannot be bigger than input tensor dim size.")); } // Output size. for (int i = 0; i < tensor_in_and_out_dims; ++i) { output_size[i] = static_cast<int>(std::floor(input_size[i] / pooling_ratio_[i])); DCHECK_GT(output_size[i], 0); } // Generate pooling sequence. std::vector<int64> row_cum_seq; std::vector<int64> col_cum_seq; GuardedPhiloxRandom generator; generator.Init(seed_, seed2_); row_cum_seq = GeneratePoolingSequence(input_size[1], output_size[1], &generator, pseudo_random_); col_cum_seq = GeneratePoolingSequence(input_size[2], output_size[2], &generator, pseudo_random_); // Prepare output. Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 0, TensorShape({output_size[0], output_size[1], output_size[2], output_size[3]}), &output_tensor)); Tensor* output_row_seq_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 1, TensorShape({static_cast<int64>(row_cum_seq.size())}), &output_row_seq_tensor)); Tensor* output_col_seq_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 2, TensorShape({static_cast<int64>(col_cum_seq.size())}), &output_col_seq_tensor)); ConstEigenMatrixMap in_mat(tensor_in.flat<T>().data(), input_size[3], input_size[2] * input_size[1] * input_size[0]); EigenMatrixMap out_mat(output_tensor->flat<T>().data(), output_size[3], output_size[2] * output_size[1] * output_size[0]); // out_count corresponds to number of elements in each pooling cell. Eigen::Matrix<T, Eigen::Dynamic, 1> out_count(out_mat.cols()); // Initializes the output tensor and out_count with 0. out_mat.setZero(); out_count.setZero(); auto output_row_seq_flat = output_row_seq_tensor->flat<int64>(); auto output_col_seq_flat = output_col_seq_tensor->flat<int64>(); // Set output tensors. for (int i = 0; i < row_cum_seq.size(); ++i) { output_row_seq_flat(i) = row_cum_seq[i]; } for (int i = 0; i < col_cum_seq.size(); ++i) { output_col_seq_flat(i) = col_cum_seq[i]; } // For both input and output, // 0: batch // 1: row / row // 2: col / col // 3: depth / channel const int64 row_max = input_size[1] - 1; const int64 col_max = input_size[2] - 1; for (int64 b = 0; b < input_size[0]; ++b) { // row sequence. for (int64 hs = 0; hs < row_cum_seq.size() - 1; ++hs) { // row start and end. const int64 row_start = row_cum_seq[hs]; int64 row_end = overlapping_ ? row_cum_seq[hs + 1] : row_cum_seq[hs + 1] - 1; row_end = std::min(row_end, row_max); // col sequence. for (int64 ws = 0; ws < col_cum_seq.size() - 1; ++ws) { const int64 out_offset = (b * output_size[1] + hs) * output_size[2] + ws; // col start and end. const int64 col_start = col_cum_seq[ws]; int64 col_end = overlapping_ ? col_cum_seq[ws + 1] : col_cum_seq[ws + 1] - 1; col_end = std::min(col_end, col_max); for (int64 h = row_start; h <= row_end; ++h) { for (int64 w = col_start; w <= col_end; ++w) { const int64 in_offset = (b * input_size[1] + h) * input_size[2] + w; out_mat.col(out_offset) += in_mat.col(in_offset); out_count(out_offset)++; } } } } } DCHECK_GT(out_count.minCoeff(), 0); out_mat.array().rowwise() /= out_count.transpose().array(); } private: bool deterministic_; int64 seed_; int64 seed2_; std::vector<float> pooling_ratio_; bool pseudo_random_; bool overlapping_; }; #define REGISTER_FRACTIONALAVGPOOL(type) \ REGISTER_KERNEL_BUILDER( \ Name("FractionalAvgPool").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ FractionalAvgPoolOp<type>) REGISTER_FRACTIONALAVGPOOL(int32); REGISTER_FRACTIONALAVGPOOL(int64); REGISTER_FRACTIONALAVGPOOL(float); REGISTER_FRACTIONALAVGPOOL(double); #undef REGISTER_FRACTIONALAVGPOOL template <class T> class FractionalAvgPoolGradOp : public OpKernel { public: explicit FractionalAvgPoolGradOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("overlapping", &overlapping_)); } void Compute(OpKernelContext* context) override { // Here's the basic idea: // Batch and depth dimension are independent from row and col dimension. And // because FractionalAvgPool currently only support pooling along row and // col, we can basically think of this 4D tensor backpropagation as // operation of a series of 2D planes. // // For each element of a 'slice' (2D plane) of output_backprop, we need to // figure out its contributors when doing FractionalAvgPool operation. This // can be done based on row_pooling_sequence, col_pooling_seq and // overlapping. // Once we figure out the original contributors, we just need to evenly // divide the value of this element among these contributors. // // Internally, we divide the out_backprop tensor and store it in a temporary // tensor of double type. And cast it to the corresponding type. typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>> EigenDoubleMatrixMap; // Grab the inputs. const Tensor& orig_input_tensor_shape = context->input(0); OP_REQUIRES(context, orig_input_tensor_shape.dims() == 1 && orig_input_tensor_shape.NumElements() == 4, errors::InvalidArgument("original input tensor shape must be" "1-dimensional and 4 elements")); const Tensor& out_backprop = context->input(1); const Tensor& row_seq_tensor = context->input(2); const Tensor& col_seq_tensor = context->input(3); const int64 out_batch = out_backprop.dim_size(0); const int64 out_rows = out_backprop.dim_size(1); const int64 out_cols = out_backprop.dim_size(2); const int64 out_depth = out_backprop.dim_size(3); auto row_seq_tensor_flat = row_seq_tensor.flat<int64>(); auto col_seq_tensor_flat = col_seq_tensor.flat<int64>(); auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat<int64>(); const int64 in_batch = orig_input_tensor_shape_flat(0); const int64 in_rows = orig_input_tensor_shape_flat(1); const int64 in_cols = orig_input_tensor_shape_flat(2); const int64 in_depth = orig_input_tensor_shape_flat(3); constexpr int tensor_in_and_out_dims = 4; // Transform orig_input_tensor_shape into TensorShape TensorShape in_shape; for (auto i = 0; i < tensor_in_and_out_dims; ++i) { in_shape.AddDim(orig_input_tensor_shape_flat(i)); } // Create intermediate in_backprop. Tensor in_backprop_tensor_temp; OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp( {0}, DataTypeToEnum<double>::v(), in_shape, &in_backprop_tensor_temp)); in_backprop_tensor_temp.flat<double>().setZero(); // Transform 4D tensor to 2D matrix. EigenDoubleMatrixMap in_backprop_tensor_temp_mat( in_backprop_tensor_temp.flat<double>().data(), in_depth, in_cols * in_rows * in_batch); ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(), out_depth, out_cols * out_rows * out_batch); // Loop through each element of out_backprop and evenly distribute the // element to the corresponding pooling cell. const int64 in_max_row_index = in_rows - 1; const int64 in_max_col_index = in_cols - 1; for (int64 b = 0; b < out_batch; ++b) { for (int64 r = 0; r < out_rows; ++r) { const int64 in_row_start = row_seq_tensor_flat(r); int64 in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1) : row_seq_tensor_flat(r + 1) - 1; in_row_end = std::min(in_row_end, in_max_row_index); for (int64 c = 0; c < out_cols; ++c) { const int64 in_col_start = col_seq_tensor_flat(c); int64 in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1) : col_seq_tensor_flat(c + 1) - 1; in_col_end = std::min(in_col_end, in_max_col_index); const int64 num_elements_in_pooling_cell = (in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1); const int64 out_index = (b * out_rows + r) * out_cols + c; // Now we can evenly distribute out_backprop(b, h, w, *) to // in_backprop(b, hs:he, ws:we, *). for (int64 in_r = in_row_start; in_r <= in_row_end; ++in_r) { for (int64 in_c = in_col_start; in_c <= in_col_end; ++in_c) { const int64 in_index = (b * in_rows + in_r) * in_cols + in_c; // Walk through each channel (depth). for (int64 d = 0; d < out_depth; ++d) { const double out_backprop_element = static_cast<double>( out_backprop_mat.coeffRef(d, out_index)); double& in_backprop_ref = in_backprop_tensor_temp_mat.coeffRef(d, in_index); in_backprop_ref += out_backprop_element / num_elements_in_pooling_cell; } } } } } } // Depending on the type, cast double to type T. Tensor* in_backprop_tensor = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, in_shape, &in_backprop_tensor)); auto in_backprop_tensor_flat = in_backprop_tensor->flat<T>(); auto in_backprop_tensor_temp_flat = in_backprop_tensor_temp.flat<double>(); for (int64 i = 0; i < in_backprop_tensor_flat.size(); ++i) { in_backprop_tensor_flat(i) = static_cast<T>(in_backprop_tensor_temp_flat(i)); } } private: bool overlapping_; }; #define REGISTER_FRACTIONALAVGPOOLGRAD(type) \ REGISTER_KERNEL_BUILDER(Name("FractionalAvgPoolGrad") \ .Device(DEVICE_CPU) \ .TypeConstraint<type>("T"), \ FractionalAvgPoolGradOp<type>) REGISTER_FRACTIONALAVGPOOLGRAD(int32); REGISTER_FRACTIONALAVGPOOLGRAD(int64); REGISTER_FRACTIONALAVGPOOLGRAD(float); REGISTER_FRACTIONALAVGPOOLGRAD(double); #undef REGISTER_FRACTIONALAVGPOOLGRAD } // namespace tensorflow
null
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include <algorithm> #include <cmath> #include <random> #include <vector> #include "tensorflow/core/kernels/fractional_pool_common.h" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/lib/random/random.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/util/guarded_philox_random.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; template <typename T> class FractionalAvgPoolOp : public OpKernel { public: explicit FractionalAvgPoolOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("pooling_ratio", &pooling_ratio_)); OP_REQUIRES_OK(context, context->GetAttr("pseudo_random", &pseudo_random_)); OP_REQUIRES_OK(context, context->GetAttr("overlapping", &overlapping_)); OP_REQUIRES(context, pooling_ratio_.size() == 4, errors::InvalidArgument( "pooling_ratio field must specify 4 dimensions")); OP_REQUIRES( context, pooling_ratio_[0] == 1 || pooling_ratio_[3] == 1, errors::Unimplemented("Fractional average pooling is not yet " "supported on the batch nor channel dimension.")); OP_REQUIRES_OK(context, context->GetAttr("deterministic", &deterministic_)); OP_REQUIRES_OK(context, context->GetAttr("seed", &seed_)); OP_REQUIRES_OK(context, context->GetAttr("seed2", &seed2_)); if (deterministic_) { // If both seeds are not set when deterministic_ is true, force set seeds. if ((seed_ == 0) && (seed2_ == 0)) { seed_ = random::New64(); seed2_ = random::New64(); } } else { OP_REQUIRES( context, (seed_ == 0) && (seed2_ == 0), errors::InvalidArgument( "Both seed and seed2 should be 0 if deterministic is false.")); } } void Compute(OpKernelContext* context) override { typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; constexpr int tensor_in_and_out_dims = 4; const Tensor& tensor_in = context->input(0); OP_REQUIRES(context, tensor_in.dims() == tensor_in_and_out_dims, errors::InvalidArgument("tensor_in must be 4-dimensional")); std::vector<int> input_size(tensor_in_and_out_dims); std::vector<int> output_size(tensor_in_and_out_dims); for (int i = 0; i < tensor_in_and_out_dims; ++i) { input_size[i] = tensor_in.dim_size(i); OP_REQUIRES( context, pooling_ratio_[i] <= input_size[i], errors::InvalidArgument( "Pooling ratio cannot be bigger than input tensor dim size.")); } // Output size. for (int i = 0; i < tensor_in_and_out_dims; ++i) { output_size[i] = static_cast<int>(std::floor(input_size[i] / pooling_ratio_[i])); DCHECK_GT(output_size[i], 0); } // Generate pooling sequence. std::vector<int64> row_cum_seq; std::vector<int64> col_cum_seq; GuardedPhiloxRandom generator; generator.Init(seed_, seed2_); row_cum_seq = GeneratePoolingSequence(input_size[1], output_size[1], &generator, pseudo_random_); col_cum_seq = GeneratePoolingSequence(input_size[2], output_size[2], &generator, pseudo_random_); // Prepare output. Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 0, TensorShape({output_size[0], output_size[1], output_size[2], output_size[3]}), &output_tensor)); Tensor* output_row_seq_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 1, TensorShape({static_cast<int64>(row_cum_seq.size())}), &output_row_seq_tensor)); Tensor* output_col_seq_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 2, TensorShape({static_cast<int64>(col_cum_seq.size())}), &output_col_seq_tensor)); ConstEigenMatrixMap in_mat(tensor_in.flat<T>().data(), input_size[3], input_size[2] * input_size[1] * input_size[0]); EigenMatrixMap out_mat(output_tensor->flat<T>().data(), output_size[3], output_size[2] * output_size[1] * output_size[0]); // out_count corresponds to number of elements in each pooling cell. Eigen::Matrix<T, Eigen::Dynamic, 1> out_count(out_mat.cols()); // Initializes the output tensor and out_count with 0. out_mat.setZero(); out_count.setZero(); auto output_row_seq_flat = output_row_seq_tensor->flat<int64>(); auto output_col_seq_flat = output_col_seq_tensor->flat<int64>(); // Set output tensors. for (int i = 0; i < row_cum_seq.size(); ++i) { output_row_seq_flat(i) = row_cum_seq[i]; } for (int i = 0; i < col_cum_seq.size(); ++i) { output_col_seq_flat(i) = col_cum_seq[i]; } // For both input and output, // 0: batch // 1: row / row // 2: col / col // 3: depth / channel const int64 row_max = input_size[1] - 1; const int64 col_max = input_size[2] - 1; for (int64 b = 0; b < input_size[0]; ++b) { // row sequence. for (int64 hs = 0; hs < row_cum_seq.size() - 1; ++hs) { // row start and end. const int64 row_start = row_cum_seq[hs]; int64 row_end = overlapping_ ? row_cum_seq[hs + 1] : row_cum_seq[hs + 1] - 1; row_end = std::min(row_end, row_max); // col sequence. for (int64 ws = 0; ws < col_cum_seq.size() - 1; ++ws) { const int64 out_offset = (b * output_size[1] + hs) * output_size[2] + ws; // col start and end. const int64 col_start = col_cum_seq[ws]; int64 col_end = overlapping_ ? col_cum_seq[ws + 1] : col_cum_seq[ws + 1] - 1; col_end = std::min(col_end, col_max); for (int64 h = row_start; h <= row_end; ++h) { for (int64 w = col_start; w <= col_end; ++w) { const int64 in_offset = (b * input_size[1] + h) * input_size[2] + w; out_mat.col(out_offset) += in_mat.col(in_offset); out_count(out_offset)++; } } } } } DCHECK_GT(out_count.minCoeff(), 0); out_mat.array().rowwise() /= out_count.transpose().array(); } private: bool deterministic_; int64 seed_; int64 seed2_; std::vector<float> pooling_ratio_; bool pseudo_random_; bool overlapping_; }; #define REGISTER_FRACTIONALAVGPOOL(type) \ REGISTER_KERNEL_BUILDER( \ Name("FractionalAvgPool").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ FractionalAvgPoolOp<type>) REGISTER_FRACTIONALAVGPOOL(int32); REGISTER_FRACTIONALAVGPOOL(int64); REGISTER_FRACTIONALAVGPOOL(float); REGISTER_FRACTIONALAVGPOOL(double); #undef REGISTER_FRACTIONALAVGPOOL template <class T> class FractionalAvgPoolGradOp : public OpKernel { public: explicit FractionalAvgPoolGradOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("overlapping", &overlapping_)); } void Compute(OpKernelContext* context) override { // Here's the basic idea: // Batch and depth dimension are independent from row and col dimension. And // because FractionalAvgPool currently only support pooling along row and // col, we can basically think of this 4D tensor backpropagation as // operation of a series of 2D planes. // // For each element of a 'slice' (2D plane) of output_backprop, we need to // figure out its contributors when doing FractionalAvgPool operation. This // can be done based on row_pooling_sequence, col_pooling_seq and // overlapping. // Once we figure out the original contributors, we just need to evenly // divide the value of this element among these contributors. // // Internally, we divide the out_backprop tensor and store it in a temporary // tensor of double type. And cast it to the corresponding type. typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>> EigenDoubleMatrixMap; // Grab the inputs. const Tensor& orig_input_tensor_shape = context->input(0); OP_REQUIRES(context, orig_input_tensor_shape.dims() == 1 && orig_input_tensor_shape.NumElements() == 4, errors::InvalidArgument("original input tensor shape must be" "1-dimensional and 4 elements")); const Tensor& out_backprop = context->input(1); const Tensor& row_seq_tensor = context->input(2); const Tensor& col_seq_tensor = context->input(3); const int64 out_batch = out_backprop.dim_size(0); const int64 out_rows = out_backprop.dim_size(1); const int64 out_cols = out_backprop.dim_size(2); const int64 out_depth = out_backprop.dim_size(3); OP_REQUIRES(context, row_seq_tensor.NumElements() > out_rows, errors::InvalidArgument("Given out_backprop shape ", out_backprop.shape().DebugString(), ", row_seq_tensor must have at least ", out_rows + 1, " elements, but got ", row_seq_tensor.NumElements())); OP_REQUIRES(context, col_seq_tensor.NumElements() > out_cols, errors::InvalidArgument("Given out_backprop shape ", out_backprop.shape().DebugString(), ", col_seq_tensor must have at least ", out_cols + 1, " elements, but got ", col_seq_tensor.NumElements())); auto row_seq_tensor_flat = row_seq_tensor.flat<int64>(); auto col_seq_tensor_flat = col_seq_tensor.flat<int64>(); auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat<int64>(); const int64 in_batch = orig_input_tensor_shape_flat(0); const int64 in_rows = orig_input_tensor_shape_flat(1); const int64 in_cols = orig_input_tensor_shape_flat(2); const int64 in_depth = orig_input_tensor_shape_flat(3); constexpr int tensor_in_and_out_dims = 4; // Transform orig_input_tensor_shape into TensorShape TensorShape in_shape; for (auto i = 0; i < tensor_in_and_out_dims; ++i) { in_shape.AddDim(orig_input_tensor_shape_flat(i)); } // Create intermediate in_backprop. Tensor in_backprop_tensor_temp; OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp( {0}, DataTypeToEnum<double>::v(), in_shape, &in_backprop_tensor_temp)); in_backprop_tensor_temp.flat<double>().setZero(); // Transform 4D tensor to 2D matrix. EigenDoubleMatrixMap in_backprop_tensor_temp_mat( in_backprop_tensor_temp.flat<double>().data(), in_depth, in_cols * in_rows * in_batch); ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(), out_depth, out_cols * out_rows * out_batch); // Loop through each element of out_backprop and evenly distribute the // element to the corresponding pooling cell. const int64 in_max_row_index = in_rows - 1; const int64 in_max_col_index = in_cols - 1; for (int64 b = 0; b < out_batch; ++b) { for (int64 r = 0; r < out_rows; ++r) { const int64 in_row_start = row_seq_tensor_flat(r); int64 in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1) : row_seq_tensor_flat(r + 1) - 1; in_row_end = std::min(in_row_end, in_max_row_index); for (int64 c = 0; c < out_cols; ++c) { const int64 in_col_start = col_seq_tensor_flat(c); int64 in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1) : col_seq_tensor_flat(c + 1) - 1; in_col_end = std::min(in_col_end, in_max_col_index); const int64 num_elements_in_pooling_cell = (in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1); const int64 out_index = (b * out_rows + r) * out_cols + c; // Now we can evenly distribute out_backprop(b, h, w, *) to // in_backprop(b, hs:he, ws:we, *). for (int64 in_r = in_row_start; in_r <= in_row_end; ++in_r) { for (int64 in_c = in_col_start; in_c <= in_col_end; ++in_c) { const int64 in_index = (b * in_rows + in_r) * in_cols + in_c; // Walk through each channel (depth). for (int64 d = 0; d < out_depth; ++d) { const double out_backprop_element = static_cast<double>( out_backprop_mat.coeffRef(d, out_index)); double& in_backprop_ref = in_backprop_tensor_temp_mat.coeffRef(d, in_index); in_backprop_ref += out_backprop_element / num_elements_in_pooling_cell; } } } } } } // Depending on the type, cast double to type T. Tensor* in_backprop_tensor = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, in_shape, &in_backprop_tensor)); auto in_backprop_tensor_flat = in_backprop_tensor->flat<T>(); auto in_backprop_tensor_temp_flat = in_backprop_tensor_temp.flat<double>(); for (int64 i = 0; i < in_backprop_tensor_flat.size(); ++i) { in_backprop_tensor_flat(i) = static_cast<T>(in_backprop_tensor_temp_flat(i)); } } private: bool overlapping_; }; #define REGISTER_FRACTIONALAVGPOOLGRAD(type) \ REGISTER_KERNEL_BUILDER(Name("FractionalAvgPoolGrad") \ .Device(DEVICE_CPU) \ .TypeConstraint<type>("T"), \ FractionalAvgPoolGradOp<type>) REGISTER_FRACTIONALAVGPOOLGRAD(int32); REGISTER_FRACTIONALAVGPOOLGRAD(int64); REGISTER_FRACTIONALAVGPOOLGRAD(float); REGISTER_FRACTIONALAVGPOOLGRAD(double); #undef REGISTER_FRACTIONALAVGPOOLGRAD } // namespace tensorflow
null
261
CWE-787
CVE-2021-29579
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/nn_ops.cc. #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/maxpooling_op.h" #include <type_traits> #include <vector> #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/kernels/conv_2d.h" #include "tensorflow/core/kernels/eigen_pooling.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/pooling_ops_common.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/util/env_var.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #include "tensorflow/core/util/use_cudnn.h" #if GOOGLE_CUDA #include "third_party/gpus/cudnn/cudnn.h" #endif // GOOGLE_CUDA #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/kernels/maxpooling_op_gpu.h" #include "tensorflow/core/kernels/pooling_ops_common_gpu.h" #include "tensorflow/core/platform/stream_executor.h" #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; const int kInvalidMaxPoolingIndex = -1; template <typename Device, typename T, typename Targmax> static void SpatialMaxPoolWithArgMaxHelper( OpKernelContext* context, Tensor* output, Tensor* output_arg_max, Tensor* input_backprop, const Tensor& tensor_in, const Tensor& out_backprop, const PoolParameters& params, const bool include_batch_in_index) { if (input_backprop != nullptr) { OP_REQUIRES( context, include_batch_in_index, errors::Internal( "SpatialMaxPoolWithArgMaxHelper requires include_batch_in_index " "to be True when input_backprop != nullptr")); OP_REQUIRES( context, (std::is_same<Targmax, int64>::value), errors::Internal("SpatialMaxPoolWithArgMaxHelper requires Targmax " "to be int64 when input_backprop != nullptr")); } typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<Targmax, Eigen::Dynamic, Eigen::Dynamic>> EigenIndexMatrixMap; ConstEigenMatrixMap in_mat( tensor_in.flat<T>().data(), params.depth, params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); EigenMatrixMap out_mat( output->flat<T>().data(), params.depth, params.out_width * params.out_height * params.tensor_in_batch); EigenIndexMatrixMap out_arg_max_mat( output_arg_max->flat<Targmax>().data(), params.depth, params.out_width * params.out_height * params.tensor_in_batch); const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); // The following code basically does the following: // 1. Flattens the input and output tensors into two dimensional arrays. // tensor_in_as_matrix: // depth by (tensor_in_cols * tensor_in_rows * tensor_in_batch) // output_as_matrix: // depth by (out_width * out_height * tensor_in_batch) // // 2. Walks through the set of columns in the flattened tensor_in_as_matrix, // and updates the corresponding column(s) in output_as_matrix with the // max value. auto shard = [&params, &in_mat, &out_mat, &out_arg_max_mat, &input_backprop, &output_arg_max, &out_backprop, include_batch_in_index](int64 start, int64 limit) { const int32 depth = params.depth; const int32 in_rows = params.tensor_in_rows; const int32 in_cols = params.tensor_in_cols; const int32 pad_top = params.pad_top; const int32 pad_left = params.pad_left; const int32 window_rows = params.window_rows; const int32 window_cols = params.window_cols; const int32 row_stride = params.row_stride; const int32 col_stride = params.col_stride; const int32 out_height = params.out_height; const int32 out_width = params.out_width; { // Initializes the output tensor with MIN<T>. const int32 output_image_size = out_height * out_width * depth; EigenMatrixMap out_shard(out_mat.data() + start * output_image_size, 1, (limit - start) * output_image_size); out_shard.setConstant(Eigen::NumTraits<T>::lowest()); EigenIndexMatrixMap out_arg_max_shard( out_arg_max_mat.data() + start * output_image_size, 1, (limit - start) * output_image_size); out_arg_max_shard.setConstant(kInvalidMaxPoolingIndex); } for (int64 b = start; b < limit; ++b) { for (int h = 0; h < in_rows; ++h) { for (int w = 0; w < in_cols; ++w) { // (h_start, h_end) * (w_start, w_end) is the range that the input // vector projects to. const int hpad = h + pad_top; const int wpad = w + pad_left; const int h_start = (hpad < window_rows) ? 0 : (hpad - window_rows) / row_stride + 1; const int h_end = std::min(hpad / row_stride + 1, out_height); const int w_start = (wpad < window_cols) ? 0 : (wpad - window_cols) / col_stride + 1; const int w_end = std::min(wpad / col_stride + 1, out_width); // compute elementwise max const int64 in_index = (b * in_rows + h) * in_cols + w; for (int ph = h_start; ph < h_end; ++ph) { const int64 out_index_base = (b * out_height + ph) * out_width; for (int pw = w_start; pw < w_end; ++pw) { const int64 out_index = out_index_base + pw; /// NOTES(zhengxq): not using the eigen matrix operation for /// now. for (int d = 0; d < depth; ++d) { const T& input_ref = in_mat.coeffRef(d, in_index); T& output_ref = out_mat.coeffRef(d, out_index); Targmax& out_arg_max_ref = out_arg_max_mat.coeffRef(d, out_index); if (output_ref < input_ref || out_arg_max_ref == kInvalidMaxPoolingIndex) { output_ref = input_ref; if (include_batch_in_index) { out_arg_max_ref = in_index * depth + d; } else { out_arg_max_ref = (h * in_cols + w) * depth + d; } } } } } } } } if (input_backprop != nullptr) { auto input_backprop_flat = input_backprop->flat<T>(); auto out_arg_max_flat = output_arg_max->flat<int64>(); auto out_backprop_flat = out_backprop.flat<T>(); // Initialize output to 0. const int64 in_size = in_rows * in_cols * depth; const int64 in_start = start * in_size; const int64 in_end = limit * in_size; EigenMatrixMap in_shard(input_backprop_flat.data() + in_start, 1, in_end - in_start); in_shard.setConstant(T(0)); // Backpropagate. const int out_size = out_height * out_width * depth; const int out_start = start * out_size; const int out_end = limit * out_size; for (int index = out_start; index < out_end; ++index) { int input_backprop_index = out_arg_max_flat(index); // Although this check is in the inner loop, it is worth its value // so we don't end up with memory corruptions. Our benchmark shows that // the performance impact is quite small // CHECK(input_backprop_index >= in_start && input_backprop_index < // in_end) FastBoundsCheck(input_backprop_index - in_start, in_end - in_start); input_backprop_flat(input_backprop_index) += out_backprop_flat(index); } } }; const int64 shard_cost = params.tensor_in_rows * params.tensor_in_cols * params.depth * params.window_rows * params.window_cols; Shard(worker_threads.num_threads, worker_threads.workers, params.tensor_in_batch, shard_cost, shard); } // The operation to compute MaxPool gradients. // It takes three inputs: // - The original input tensor // - The original output tensor // - Backprop tensor for output // It produces one output: backprop tensor for input. template <class Device, class T> class MaxPoolingGradOp : public OpKernel { public: explicit MaxPoolingGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument("Default MaxPoolingGradOp only supports NHWC ", "on device type ", DeviceTypeString(context->device_type()))); if (context->num_inputs() == 3) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES( context, ksize_[3] == 1 && stride_[3] == 1, errors::Unimplemented( "MaxPoolingGrad is not yet supported on the depth dimension.")); } OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); if (padding_ == Padding::EXPLICIT) { OP_REQUIRES_OK( context, context->GetAttr("explicit_paddings", &explicit_paddings_)); OP_REQUIRES_OK(context, CheckValidPadding(padding_, explicit_paddings_, /*num_dims=*/4, data_format_)); } } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_backprop = context->input(2); // For maxpooling, tensor_in should have 4 dimensions. OP_REQUIRES(context, tensor_in.dims() == 4, errors::InvalidArgument("tensor_in must be 4-dimensional")); OP_REQUIRES(context, tensor_out.dims() == 4, errors::InvalidArgument("tensor_out must be 4-dimensional")); // For maxpooling, out_backprop should have 4 dimensions. OP_REQUIRES(context, out_backprop.dims() == 4, errors::InvalidArgument("out_backprop must be 4-dimensional")); const TensorShape& output_shape = tensor_in.shape(); Tensor tensor_out_dup; OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp( {1}, DataTypeToEnum<T>::v(), tensor_out.shape(), &tensor_out_dup)); Tensor tensor_out_arg_max; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<int64>::v(), tensor_out.shape(), &tensor_out_arg_max)); std::vector<int32> ksize = ksize_; std::vector<int32> stride = stride_; if (context->num_inputs() == 5) { const Tensor& tensor_ksize = context->input(3); auto value_ksize = tensor_ksize.flat<int32>(); ksize.resize(tensor_ksize.shape().num_elements()); std::copy_n(&value_ksize(0), ksize.size(), ksize.begin()); const Tensor& tensor_stride = context->input(4); auto value_stride = tensor_stride.flat<int32>(); stride.resize(tensor_stride.shape().num_elements()); std::copy_n(&value_stride(0), stride.size(), stride.begin()); } OP_REQUIRES(context, ksize.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES(context, stride.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); OP_REQUIRES(context, ksize[0] == 1 && stride[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES( context, ksize[3] == 1 && stride[3] == 1, errors::Unimplemented( "MaxPoolingGrad is not yet supported on the depth dimension.")); PoolParameters params{context, ksize, stride, padding_, explicit_paddings_, FORMAT_NHWC, tensor_in.shape()}; if (!context->status().ok()) { return; } Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, output_shape, &output)); SpatialMaxPoolWithArgMaxHelper<CPUDevice, T, int64>( context, &tensor_out_dup, &tensor_out_arg_max, output, tensor_in, out_backprop, params, true); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; std::vector<int64> explicit_paddings_; TensorFormat data_format_; }; #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <class T> class MaxPoolingGradOp<Eigen::GpuDevice, T> : public OpKernel { public: typedef Eigen::GpuDevice Device; explicit MaxPoolingGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->num_inputs() == 3) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); const int32 ksize_n = GetTensorDim(ksize_, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride_, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); } OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); if (padding_ == Padding::EXPLICIT) { OP_REQUIRES_OK( context, context->GetAttr("explicit_paddings", &explicit_paddings_)); OP_REQUIRES_OK(context, CheckValidPadding(padding_, explicit_paddings_, /*num_dims=*/4, data_format_)); } TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_MAXPOOL_NANPROP", false, &propagate_nans_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_backprop = context->input(2); // For maxpooling, tensor_in should have 4 dimensions. OP_REQUIRES(context, tensor_in.dims() == 4, errors::InvalidArgument("tensor_in must be 4-dimensional 4")); OP_REQUIRES(context, tensor_out.dims() == 4, errors::InvalidArgument("tensor_out must be 4-dimensional")); // For maxpooling, out_backprop should have 4 dimensions. OP_REQUIRES(context, out_backprop.dims() == 4, errors::InvalidArgument("out_backprop must be 4-dimensional")); TensorShape output_shape = tensor_in.shape(); std::vector<int32> ksize = ksize_; std::vector<int32> stride = stride_; if (context->num_inputs() == 5) { const Tensor& tensor_ksize = context->input(3); auto value_ksize = tensor_ksize.flat<int32>(); ksize.resize(tensor_ksize.shape().num_elements()); std::copy_n(&value_ksize(0), ksize.size(), ksize.begin()); const Tensor& tensor_stride = context->input(4); auto value_stride = tensor_stride.flat<int32>(); stride.resize(tensor_stride.shape().num_elements()); std::copy_n(&value_stride(0), stride.size(), stride.begin()); } OP_REQUIRES(context, ksize.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES(context, stride.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); const int32 ksize_n = GetTensorDim(ksize, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); int64 pad_top, pad_bottom, pad_left, pad_right; if (padding_ == Padding::EXPLICIT) { GetExplicitPaddingForDim(explicit_paddings_, data_format_, 'H', /*pad_top=*/&pad_top, /*pad_bottom=*/&pad_bottom); GetExplicitPaddingForDim(explicit_paddings_, data_format_, 'W', /*pad_left=*/&pad_left, /*pad_right=*/&pad_right); } DnnPoolingGradOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, ksize, stride, padding_, explicit_paddings_, data_format_, &tensor_in, &tensor_out, out_backprop, output_shape, propagate_nans_); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; std::vector<int64> explicit_paddings_; TensorFormat data_format_; bool propagate_nans_; }; #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM // The operation to compute gradient of MaxPool gradients. // It takes three inputs: // - The original input tensor // - The original output tensor // - Backprop tensor for output gradients // It produces one output: backprop tensor for output gradient. template <class Device, class T> class MaxPoolingGradGradOp : public OpKernel { public: explicit MaxPoolingGradGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default MaxPoolingGradGradOp only supports NHWC ", "on device type ", DeviceTypeString(context->device_type()))); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); if (context->num_inputs() == 3) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, ksize_[3] == 1 && stride_[3] == 1, errors::Unimplemented("MaxPoolingGradGrad is not yet " "supported on the depth dimension.")); } } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_grad_backprop = context->input(2); // For maxpooling, tensor_in should have 4 dimensions. OP_REQUIRES(context, tensor_in.dims() == 4, errors::InvalidArgument("tensor_in must be 4-dimensional")); OP_REQUIRES(context, tensor_out.dims() == 4, errors::InvalidArgument("tensor_out must be 4-dimensional")); // For maxpooling, out_grad_backprop should have 4 dimensions. OP_REQUIRES( context, out_grad_backprop.dims() == 4, errors::InvalidArgument("out_grad_backprop must be 4-dimensional")); std::vector<int32> ksize = ksize_; std::vector<int32> stride = stride_; if (context->num_inputs() == 5) { const Tensor& tensor_ksize = context->input(3); auto value_ksize = tensor_ksize.flat<int32>(); ksize.resize(tensor_ksize.shape().num_elements()); std::copy_n(&value_ksize(0), ksize.size(), ksize.begin()); const Tensor& tensor_stride = context->input(4); auto value_stride = tensor_stride.flat<int32>(); stride.resize(tensor_stride.shape().num_elements()); std::copy_n(&value_stride(0), stride.size(), stride.begin()); } OP_REQUIRES(context, ksize.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES(context, stride.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); OP_REQUIRES(context, ksize[0] == 1 && stride[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES( context, ksize[3] == 1 && stride[3] == 1, errors::Unimplemented( "MaxPoolingGrad is not yet supported on the depth dimension.")); PoolParameters params{context, ksize, stride, padding_, /*explicit_paddings=*/{}, FORMAT_NHWC, tensor_in.shape()}; Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {2}, 0, tensor_out.shape(), &output)); SpatialMaxPoolGradGrad(context, output, tensor_in, tensor_out, out_grad_backprop, params, padding_); } private: void SpatialMaxPoolGradGrad(OpKernelContext* context, Tensor* bottom_diff, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& top_diff, const PoolParameters& params, const Padding& padding) { typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; ConstEigenMatrixMap in_mat( tensor_in.flat<T>().data(), params.depth, params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); ConstEigenMatrixMap out_mat( tensor_out.flat<T>().data(), params.depth, params.out_width * params.out_height * params.tensor_in_batch); ConstEigenMatrixMap top_diff_mat( top_diff.flat<T>().data(), params.depth, params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); EigenMatrixMap bottom_diff_mat( bottom_diff->flat<T>().data(), params.depth, params.out_width * params.out_height * params.tensor_in_batch); const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); // The following code basically does the following: // 1. Flattens the input, output, top_diff and bottom_diff tensors into // two dimensional arrays. // tensor_in_as_matrix: // depth by (tensor_in_cols * tensor_in_rows * tensor_in_batch) // tensor_out_as_matrix: // depth by (out_width * out_height * tensor_in_batch) // top_diff_as_matrix: // depth by (tensor_in_cols * tensor_in_rows * tensor_in_batch) // bottom_diff_as_matrix: // depth by (out_width * out_height * tensor_in_batch) // // 2. Walks through the set of columns in the flattened // tensor_in_as_matrix, tensor_out_as_matrix, top_diff_as_matrix // and updates the column(s) corresponding to the maximum values in // tensor_out_as_matrix with the corresponding values in // top_diff_as_matrix. auto shard = [&params, &in_mat, &out_mat, &top_diff_mat, &bottom_diff_mat]( int64 start, int64 limit) { const int32 depth = params.depth; const int32 in_rows = params.tensor_in_rows; const int32 in_cols = params.tensor_in_cols; const int32 pad_top = params.pad_top; const int32 pad_left = params.pad_left; const int32 window_rows = params.window_rows; const int32 window_cols = params.window_cols; const int32 row_stride = params.row_stride; const int32 col_stride = params.col_stride; const int32 out_height = params.out_height; const int32 out_width = params.out_width; { // Initializes the output grad backprop tensor with 0. const int32 output_image_size = out_height * out_width * params.depth; EigenMatrixMap bottom_diff_shard( bottom_diff_mat.data() + start * output_image_size, 1, (limit - start) * output_image_size); bottom_diff_shard.setZero(); } for (int b = start; b < limit; ++b) { for (int ph = 0; ph < out_height; ++ph) { for (int pw = 0; pw < out_width; ++pw) { // (h_start, h_end) * (w_start, w_end) is the range that the input // vector projects to. int h_start = ph * row_stride - pad_top; const int h_end = std::min(h_start + window_rows, in_rows); int w_start = pw * col_stride - pad_left; const int w_end = std::min(w_start + window_cols, in_cols); h_start = std::max(h_start, 0); w_start = std::max(w_start, 0); const int out_index = (b * out_height + ph) * out_width + pw; // Find value corresponding to the input maximum in top_diff. for (int d = 0; d < depth; ++d) { const T& output_ref = out_mat.coeffRef(d, out_index); bool should_stop = false; for (int h = h_start; h < h_end && !should_stop; ++h) { for (int w = w_start; w < w_end && !should_stop; ++w) { const int in_index = (b * in_rows + h) * in_cols + w; const T& input_ref = in_mat.coeffRef(d, in_index); if (output_ref == input_ref) { T& bottom_diff_ref = bottom_diff_mat.coeffRef(d, out_index); bottom_diff_ref = top_diff_mat.coeffRef(d, in_index); should_stop = true; } } } } } } } }; const int64 shard_cost = params.out_width * params.out_height * params.depth * params.window_rows * params.window_cols; Shard(worker_threads.num_threads, worker_threads.workers, params.tensor_in_batch, shard_cost, shard); } std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <class T> class MaxPoolingGradGradOp<Eigen::GpuDevice, T> : public OpKernel { public: typedef Eigen::GpuDevice Device; explicit MaxPoolingGradGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->num_inputs() == 3) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); const int32 ksize_n = GetTensorDim(ksize_, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride_, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); } OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_grad_backprop = context->input(2); // For maxpooling, tensor_in should have 4 dimensions. OP_REQUIRES(context, tensor_in.dims() == 4, errors::InvalidArgument("tensor_in must be 4-dimensional 4")); OP_REQUIRES(context, tensor_out.dims() == 4, errors::InvalidArgument("tensor_out must be 4-dimensional")); // For maxpooling, out_grad_backprop should have 4 dimensions. OP_REQUIRES( context, out_grad_backprop.dims() == 4, errors::InvalidArgument("out_grad_backprop must be 4-dimensional")); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, tensor_out.shape(), &output)); std::vector<int32> ksize = ksize_; std::vector<int32> stride = stride_; if (context->num_inputs() == 5) { const Tensor& tensor_ksize = context->input(3); auto value_ksize = tensor_ksize.flat<int32>(); ksize.resize(tensor_ksize.shape().num_elements()); std::copy_n(&value_ksize(0), ksize.size(), ksize.begin()); const Tensor& tensor_stride = context->input(4); auto value_stride = tensor_stride.flat<int32>(); stride.resize(tensor_stride.shape().num_elements()); std::copy_n(&value_stride(0), stride.size(), stride.begin()); } OP_REQUIRES(context, ksize.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES(context, stride.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); const int32 ksize_n = GetTensorDim(ksize, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); PoolParameters params{context, ksize, stride, padding_, /*explicit_paddings=*/{}, data_format_, tensor_in.shape()}; functor::MaxPoolGradBackwardNoMask<T>()( data_format_, tensor_in.flat<T>().data(), tensor_out.flat<T>().data(), params.tensor_in_batch, params.out_height, params.out_width, params.depth, params.tensor_in_rows, params.tensor_in_cols, params.window_rows, params.window_cols, params.row_stride, params.col_stride, params.pad_top, params.pad_left, out_grad_backprop.flat<T>().data(), output->flat<T>().data(), context->eigen_device<Eigen::GpuDevice>()); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool use_dnn_; }; #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename Device, typename T> struct LaunchMaxPoolingNoMask; template <typename Device, typename T> class MaxPoolingNoMaskOp : public OpKernel { public: explicit MaxPoolingNoMaskOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default MaxPoolingNoMaskOp only supports NHWC on device type ", DeviceTypeString(context->device_type()))); OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES( context, padding_ != EXPLICIT, errors::Unimplemented( "Explicit padding is not supported for MaxPoolingNoMaskOp.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); PoolParameters params{context, ksize_, stride_, padding_, /*explicit_paddings=*/{}, data_format_, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape({params.tensor_in_batch, params.out_height, params.out_width, params.depth}); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); LaunchMaxPoolingNoMask<Device, T>::launch(context, params, tensor_in, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename Device, typename T> class MaxPoolingNoMaskV2Op : public OpKernel { public: explicit MaxPoolingNoMaskV2Op(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default MaxPoolingNoMaskOp only supports NHWC on device type ", DeviceTypeString(context->device_type()))); if (context->num_inputs() == 1) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); } OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); std::vector<int32> ksize = ksize_; std::vector<int32> stride = stride_; if (context->num_inputs() != 1) { const Tensor& tensor_ksize = context->input(1); auto value_ksize = tensor_ksize.flat<int32>(); ksize.resize(tensor_ksize.shape().num_elements()); std::copy_n(&value_ksize(0), ksize.size(), ksize.begin()); const Tensor& tensor_stride = context->input(2); auto value_stride = tensor_stride.flat<int32>(); stride.resize(tensor_stride.shape().num_elements()); std::copy_n(&value_stride(0), stride.size(), stride.begin()); } OP_REQUIRES(context, ksize.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES(context, stride.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES(context, ksize[0] == 1 && stride[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); PoolParameters params{context, ksize, stride, padding_, /*explicit_paddings=*/{}, data_format_, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape({params.tensor_in_batch, params.out_height, params.out_width, params.depth}); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); LaunchMaxPoolingNoMask<Device, T>::launch(context, params, tensor_in, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename Device, typename T, typename Targmax> struct LaunchMaxPoolingWithArgmax; template <typename T, typename Targmax> struct LaunchMaxPoolingWithArgmax<CPUDevice, T, Targmax> { static void launch(OpKernelContext* context, const PoolParameters& params, const Tensor& input, Tensor* output, Tensor* argmax, bool propagate_nans, bool include_batch_in_index) { Tensor unused; SpatialMaxPoolWithArgMaxHelper<CPUDevice, T, Targmax>( context, output, argmax, /*input_backprop=*/nullptr, input, unused, params, include_batch_in_index); } }; template <typename Device, typename T, typename Targmax> class MaxPoolingWithArgmaxOp : public OpKernel { public: explicit MaxPoolingWithArgmaxOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES_OK(context, context->GetAttr("include_batch_in_index", &include_batch_in_index_)); TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_MAXPOOL_NANPROP", false, &propagate_nans_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); PoolParameters params{context, ksize_, stride_, padding_, /*explicit_paddings=*/{}, FORMAT_NHWC, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape({params.tensor_in_batch, params.out_height, params.out_width, params.depth}); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); Tensor* argmax = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, out_shape, &argmax)); LaunchMaxPoolingWithArgmax<Device, T, Targmax>::launch( context, params, tensor_in, output, argmax, propagate_nans_, include_batch_in_index_); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; bool propagate_nans_; bool include_batch_in_index_; }; template <typename Device, typename T> struct LaunchMaxPoolingGradWithArgmax; template <typename T> struct LaunchMaxPoolingGradWithArgmax<CPUDevice, T> { typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; static void launch(OpKernelContext* context, const PoolParameters& params, const Tensor& grad_in, const Tensor& argmax, Tensor* grad_out, const bool include_batch_in_index) { const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); auto shard = [&grad_in, &argmax, &grad_out, include_batch_in_index]( int64 start, int64 limit) { const int64 batch_size = GetTensorDim(grad_out->shape(), FORMAT_NHWC, 'N'); const int64 output_size_per_batch = grad_out->NumElements() / batch_size; const int64 input_size_per_batch = grad_in.NumElements() / batch_size; { auto grad_out_flat = grad_out->flat<T>(); auto argmax_flat = argmax.flat<int64>(); auto grad_in_flat = grad_in.flat<T>(); const int64 output_start = start * output_size_per_batch; const int64 output_end = limit * output_size_per_batch; EigenMatrixMap inputShard(grad_out_flat.data() + output_start, 1, output_end - output_start); inputShard.setConstant(T(0)); const int input_start = start * input_size_per_batch; const int input_end = limit * input_size_per_batch; for (int64 index = input_start; index < input_end; index++) { if (index >= argmax.NumElements()) { break; } int64 grad_out_index = argmax_flat(index); if (!include_batch_in_index) { const int64 cur_batch = index / input_size_per_batch; grad_out_index += cur_batch * output_size_per_batch; } CHECK(grad_out_index >= output_start && grad_out_index < output_end) << "Invalid output gradient index: " << grad_out_index << ", " << output_start << ", " << output_end; grad_out_flat(grad_out_index) += grad_in_flat(index); } } }; const int64 batch_size = GetTensorDim(grad_out->shape(), FORMAT_NHWC, 'N'); const int64 shard_cost = grad_out->NumElements() / batch_size; Shard(worker_threads.num_threads, worker_threads.workers, batch_size, shard_cost, shard); } }; // TODO(b/175733711): Support int32 argmax type in MaxPoolGradWithArgmax op. template <typename Device, typename T> class MaxPoolingGradWithArgmaxOp : public OpKernel { public: explicit MaxPoolingGradWithArgmaxOp(OpKernelConstruction* context) : OpKernel(context) { string data_format_str; auto status = context->GetAttr("data_format", &data_format_str); if (status.ok()) { OP_REQUIRES(context, FormatFromString(data_format_str, &data_format_), errors::InvalidArgument("Invalid data format")); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES_OK(context, context->GetAttr("include_batch_in_index", &include_batch_in_index_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& grad_in = context->input(1); const Tensor& argmax = context->input(2); PoolParameters params{context, ksize_, stride_, padding_, /*explicit_paddings=*/{}, FORMAT_NHWC, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape({params.tensor_in_batch, params.tensor_in_rows, params.tensor_in_cols, params.depth}); Tensor* grad_out = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, out_shape, &grad_out)); if (out_shape.num_elements() == 0) return; // nothing to be done LaunchMaxPoolingGradWithArgmax<Device, T>::launch( context, params, grad_in, argmax, grad_out, include_batch_in_index_); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool include_batch_in_index_; }; template <typename Device, typename T> struct LaunchMaxPoolingGradGradWithArgmax; template <typename Device, typename T> class MaxPoolingGradGradWithArgmaxOp : public OpKernel { public: explicit MaxPoolingGradGradWithArgmaxOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES_OK(context, context->GetAttr("include_batch_in_index", &include_batch_in_index_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& grad_in = context->input(1); const Tensor& argmax = context->input(2); PoolParameters params{context, ksize_, stride_, padding_, /*explicit_paddings=*/{}, FORMAT_NHWC, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape({params.tensor_in_batch, params.out_height, params.out_width, params.depth}); Tensor* grad_out = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, out_shape, &grad_out)); LaunchMaxPoolingGradGradWithArgmax<Device, T>::launch( context, params, grad_in, argmax, grad_out, include_batch_in_index_); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; bool include_batch_in_index_; }; #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename T> class MaxPoolingNoMaskOp<GPUDevice, T> : public OpKernel { public: typedef GPUDevice Device; explicit MaxPoolingNoMaskOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES_OK(context, context->GetAttr("explicit_paddings", &explicit_paddings_)); const int32 ksize_n = GetTensorDim(ksize_, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride_, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_MAXPOOL_NANPROP", false, &propagate_nans_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); PoolParameters params{ context, ksize_, stride_, padding_, explicit_paddings_, data_format_, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape = ShapeFromFormat(data_format_, params.tensor_in_batch, params.out_height, params.out_width, params.depth); // Assuming qint8 <--> NCHW_VECT_C (int8x4) here. constexpr bool is_int8x4 = std::is_same<T, qint8>::value; OP_REQUIRES(context, (is_int8x4 == (data_format_ == FORMAT_NCHW_VECT_C)), errors::InvalidArgument( "qint8 should be used with data_format NCHW_VECT_C.")); #if CUDNN_VERSION >= 7300 DnnPoolingOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, ksize_, stride_, padding_, explicit_paddings_, data_format_, tensor_in, out_shape, propagate_nans_); #else // These is_int8x4 checks avoid linker errors for missing qint8 kernels. if (!is_int8x4 && data_format_ == FORMAT_NCHW) { DnnPoolingOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, ksize_, stride_, padding_, explicit_paddings_, data_format_, tensor_in, out_shape, propagate_nans_); } else { #if !defined(TENSORFLOW_USE_ROCM) OP_REQUIRES(context, padding_ != EXPLICIT, errors::Unimplemented("Explicit padding is not supported ", "when CUDNN is not enabled.")); #endif Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); if (is_int8x4) { LaunchMaxPoolingNoMask_NCHW_VECT_C<Device>::launch(context, params, tensor_in, output); } else if (data_format_ == FORMAT_NHWC) { LaunchMaxPoolingNoMask<Device, T>::launch(context, params, tensor_in, output, propagate_nans_); } else { LOG(FATAL) << "MaxPool currently only supports the following (layout, " "type) combinations: (NHWC, non-qint8), " "(NCHW, non-qint8) or (NCHW_VECT_C, qint8). The " "requested combination (" << ToString(data_format_) << ", " << DataTypeString(DataTypeToEnum<T>::v()) << ") is not supported."; } } #endif } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; std::vector<int64> explicit_paddings_; TensorFormat data_format_; bool propagate_nans_; }; template <typename T> class MaxPoolingNoMaskV2Op<GPUDevice, T> : public OpKernel { public: typedef GPUDevice Device; explicit MaxPoolingNoMaskV2Op(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->num_inputs() == 1) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); const int32 ksize_n = GetTensorDim(ksize_, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride_, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); } OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_MAXPOOL_NANPROP", false, &propagate_nans_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); std::vector<int32> ksize = ksize_; std::vector<int32> stride = stride_; if (context->num_inputs() != 1) { const Tensor& tensor_ksize = context->input(1); auto value_ksize = tensor_ksize.flat<int32>(); ksize.resize(tensor_ksize.shape().num_elements()); std::copy_n(&value_ksize(0), ksize.size(), ksize.begin()); const Tensor& tensor_stride = context->input(2); auto value_stride = tensor_stride.flat<int32>(); stride.resize(tensor_stride.shape().num_elements()); std::copy_n(&value_stride(0), stride.size(), stride.begin()); } OP_REQUIRES(context, ksize.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES(context, stride.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); const int32 ksize_n = GetTensorDim(ksize, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); PoolParameters params{context, ksize, stride, padding_, /*explicit_paddings=*/{}, data_format_, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape = ShapeFromFormat(data_format_, params.tensor_in_batch, params.out_height, params.out_width, params.depth); if (data_format_ == FORMAT_NCHW) { DnnPoolingOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, ksize, stride, padding_, explicit_paddings_, data_format_, tensor_in, out_shape, propagate_nans_); } else { CHECK(data_format_ == FORMAT_NHWC) << "MaxPool only supports NCHW or NHWC format"; Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); LaunchMaxPoolingNoMask<Device, T>::launch(context, params, tensor_in, output, propagate_nans_); } } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; std::vector<int64> explicit_paddings_; TensorFormat data_format_; bool propagate_nans_; }; template <typename T> struct LaunchMaxPoolingNoMask<Eigen::GpuDevice, T> { static void launch(OpKernelContext* context, const PoolParameters& params, const Tensor& input, Tensor* output, bool propagate_nans) { bool status = functor::MaxPoolForwardWithOptionalArgmax<T>()( input.flat<T>().data(), params.tensor_in_batch, params.tensor_in_rows, params.tensor_in_cols, params.depth, params.out_height, params.out_width, params.window_rows, params.window_cols, params.row_stride, params.col_stride, params.pad_top, params.pad_left, output->flat<T>().data(), nullptr, context->eigen_gpu_device(), propagate_nans, false); if (!status) { context->SetStatus( errors::Internal("Failed launching MaxPoolForwardNoMask")); } } }; template <typename T> struct LaunchMaxPoolingWithArgmax<Eigen::GpuDevice, T, int64> { static void launch(OpKernelContext* context, const PoolParameters& params, const Tensor& input, Tensor* output, Tensor* argmax, bool propagate_nans, bool include_batch_in_index) { bool status = functor::MaxPoolForwardWithOptionalArgmax<T>()( input.flat<T>().data(), params.tensor_in_batch, params.tensor_in_rows, params.tensor_in_cols, params.depth, params.out_height, params.out_width, params.window_rows, params.window_cols, params.row_stride, params.col_stride, params.pad_top, params.pad_left, output->flat<T>().data(), reinterpret_cast<int64*>(argmax->flat<int64>().data()), context->eigen_gpu_device(), propagate_nans, include_batch_in_index); if (!status) { context->SetStatus( errors::Internal("Failed launching MaxPoolForwardWithArgmax")); } } }; template <typename T> struct LaunchMaxPoolingGradWithArgmax<Eigen::GpuDevice, T> { static void launch(OpKernelContext* context, const PoolParameters& params, const Tensor& grad_in, const Tensor& argmax, Tensor* grad_out, const bool include_batch_in_index) { const int input_size = params.tensor_in_batch * params.tensor_in_rows * params.tensor_in_cols * params.depth; const int output_size = params.tensor_in_batch * params.out_height * params.out_width * params.depth; const int top_offset = params.out_height * params.out_width * params.depth; const int bottom_offset = params.tensor_in_rows * params.tensor_in_cols * params.depth; bool status = functor::MaxPoolBackwardWithArgmax<T>()( output_size, input_size, grad_in.flat<T>().data(), reinterpret_cast<const int64*>(argmax.flat<int64>().data()), top_offset, bottom_offset, grad_out->flat<T>().data(), context->eigen_gpu_device(), include_batch_in_index); if (!status) { context->SetStatus( errors::Internal("Failed launching MaxPoolBackwardWithArgmax")); } } }; template <typename T> struct LaunchMaxPoolingGradGradWithArgmax<Eigen::GpuDevice, T> { static void launch(OpKernelContext* context, const PoolParameters& params, const Tensor& grad_in, const Tensor& argmax, Tensor* grad_out, const bool include_batch_in_index) { const int input_size = params.tensor_in_batch * params.tensor_in_rows * params.tensor_in_cols * params.depth; const int output_size = params.tensor_in_batch * params.out_height * params.out_width * params.depth; const int top_offset = params.tensor_in_rows * params.tensor_in_cols * params.depth; const int bottom_offset = params.out_width * params.out_height * params.depth; bool status = functor::MaxPoolGradBackwardWithArgmax<T>()( output_size, input_size, grad_in.flat<T>().data(), reinterpret_cast<const int64*>(argmax.flat<int64>().data()), top_offset, bottom_offset, grad_out->flat<T>().data(), context->eigen_gpu_device(), include_batch_in_index); if (!status) { context->SetStatus( errors::Internal("Failed launching MaxPoolGradBackwardWithArgmax")); } } }; #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_MAX_POOL_KERNELS(D, T) \ REGISTER_KERNEL_BUILDER( \ Name("MaxPoolGrad").Device(DEVICE_##D).TypeConstraint<T>("T"), \ MaxPoolingGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER( \ Name("MaxPoolGradGrad").Device(DEVICE_##D).TypeConstraint<T>("T"), \ MaxPoolingGradGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolGradV2") \ .Device(DEVICE_##D) \ .HostMemory("ksize") \ .HostMemory("strides") \ .TypeConstraint<T>("T"), \ MaxPoolingGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolGradGradV2") \ .Device(DEVICE_##D) \ .HostMemory("ksize") \ .HostMemory("strides") \ .TypeConstraint<T>("T"), \ MaxPoolingGradGradOp<D##Device, T>) \ REGISTER_KERNEL_BUILDER(Name("MaxPoolWithArgmax") \ .Device(DEVICE_##D) \ .TypeConstraint<int64>("Targmax") \ .TypeConstraint<T>("T"), \ MaxPoolingWithArgmaxOp<D##Device, T, int64>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolGradWithArgmax") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T") \ .TypeConstraint<int64>("Targmax"), \ MaxPoolingGradWithArgmaxOp<D##Device, T>); // Below kernels implemented only for CPU device. #define REGISTER_CPU_ONLY_POOL_KERNELS(T) \ REGISTER_KERNEL_BUILDER( \ Name("MaxPool").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ MaxPoolingOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER( \ Name("MaxPoolV2").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ MaxPoolingV2Op<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolWithArgmax") \ .Device(DEVICE_CPU) \ .TypeConstraint<int32>("Targmax") \ .TypeConstraint<T>("T"), \ MaxPoolingWithArgmaxOp<CPUDevice, T, int32>); TF_CALL_REAL_NUMBER_TYPES(REGISTER_CPU_ONLY_POOL_KERNELS); #undef REGISTER_CPU_ONLY_POOL_KERNELS #define REGISTER_CPU_MAX_POOL_KERNELS(T) REGISTER_MAX_POOL_KERNELS(CPU, T); TF_CALL_REAL_NUMBER_TYPES(REGISTER_CPU_MAX_POOL_KERNELS); #undef REGISTER_CPU_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM // Forward declarations for the functor specializations for GPU. namespace functor { #define DECLARE_GPU_SPEC(T) \ template <> \ void SpatialMaxPooling<Eigen::GpuDevice, T>::operator()( \ const Eigen::GpuDevice& d, typename TTypes<T, 4>::Tensor output, \ typename TTypes<T, 4>::ConstTensor input, int window_rows, \ int window_cols, int row_stride, int col_stride, \ const Eigen::PaddingType& padding); \ extern template struct SpatialMaxPooling<Eigen::GpuDevice, T>; TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPEC); #undef DECLARE_GPU_SPEC } // namespace functor #define REGISTER_GPU_MAX_POOL_KERNELS(T) REGISTER_MAX_POOL_KERNELS(GPU, T) TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_MAX_POOL_KERNELS); #undef REGISTER_GPU_MAX_POOL_KERNELS // Below kernels currently implemented only for GPU device. // Note(jiayq): Currently, the Caffe custom implementation is faster than the // default Eigen implementation so we are using the custom kernel as the // default. However, you can explicitly invoke the eigen version using // kernel_label_map. #define REGISTER_GPU_ONLY_POOL_KERNELS(T) \ REGISTER_KERNEL_BUILDER(Name("MaxPool") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T") \ .Label("eigen_tensor"), \ MaxPoolingOp<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolV2") \ .Device(DEVICE_GPU) \ .HostMemory("ksize") \ .HostMemory("strides") \ .TypeConstraint<T>("T") \ .Label("eigen_tensor"), \ MaxPoolingV2Op<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER( \ Name("MaxPool").Device(DEVICE_GPU).TypeConstraint<T>("T"), \ MaxPoolingNoMaskOp<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolV2") \ .Device(DEVICE_GPU) \ .HostMemory("ksize") \ .HostMemory("strides") \ .TypeConstraint<T>("T"), \ MaxPoolingNoMaskV2Op<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolGradGradWithArgmax") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<int64>("Targmax"), \ MaxPoolingGradGradWithArgmaxOp<GPUDevice, T>); TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_ONLY_POOL_KERNELS); // TODO(b/65847473): Re-enable once the underlying build error is fixed. #if !defined(PLATFORM_WINDOWS) REGISTER_KERNEL_BUILDER( Name("MaxPool").Device(DEVICE_GPU).TypeConstraint<qint8>("T"), MaxPoolingNoMaskOp<GPUDevice, qint8>); REGISTER_KERNEL_BUILDER(Name("MaxPoolV2") .Device(DEVICE_GPU) .HostMemory("ksize") .HostMemory("strides") .TypeConstraint<qint8>("T"), MaxPoolingV2Op<GPUDevice, qint8>); REGISTER_KERNEL_BUILDER(Name("MaxPoolV2") .Device(DEVICE_GPU) .HostMemory("ksize") .HostMemory("strides") .TypeConstraint<qint8>("T") .Label("eigen_tensor"), MaxPoolingV2Op<GPUDevice, qint8>); #endif // !defined(PLATFORM_WINDOWS) #undef REGISTER_GPU_ONLY_POOL_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #undef REGISTER_MAX_POOL_KERNELS } // namespace tensorflow
null
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // See docs in ../ops/nn_ops.cc. #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/maxpooling_op.h" #include <type_traits> #include <vector> #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_slice.h" #include "tensorflow/core/kernels/conv_2d.h" #include "tensorflow/core/kernels/eigen_pooling.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/kernels/pooling_ops_common.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/array_slice.h" #include "tensorflow/core/util/env_var.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" #include "tensorflow/core/util/use_cudnn.h" #if GOOGLE_CUDA #include "third_party/gpus/cudnn/cudnn.h" #endif // GOOGLE_CUDA #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/core/kernels/maxpooling_op_gpu.h" #include "tensorflow/core/kernels/pooling_ops_common_gpu.h" #include "tensorflow/core/platform/stream_executor.h" #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; typedef Eigen::GpuDevice GPUDevice; const int kInvalidMaxPoolingIndex = -1; template <typename Device, typename T, typename Targmax> static void SpatialMaxPoolWithArgMaxHelper( OpKernelContext* context, Tensor* output, Tensor* output_arg_max, Tensor* input_backprop, const Tensor& tensor_in, const Tensor& out_backprop, const PoolParameters& params, const bool include_batch_in_index) { if (input_backprop != nullptr) { OP_REQUIRES( context, include_batch_in_index, errors::Internal( "SpatialMaxPoolWithArgMaxHelper requires include_batch_in_index " "to be True when input_backprop != nullptr")); OP_REQUIRES( context, (std::is_same<Targmax, int64>::value), errors::Internal("SpatialMaxPoolWithArgMaxHelper requires Targmax " "to be int64 when input_backprop != nullptr")); } typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<Targmax, Eigen::Dynamic, Eigen::Dynamic>> EigenIndexMatrixMap; ConstEigenMatrixMap in_mat( tensor_in.flat<T>().data(), params.depth, params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); EigenMatrixMap out_mat( output->flat<T>().data(), params.depth, params.out_width * params.out_height * params.tensor_in_batch); EigenIndexMatrixMap out_arg_max_mat( output_arg_max->flat<Targmax>().data(), params.depth, params.out_width * params.out_height * params.tensor_in_batch); const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); // The following code basically does the following: // 1. Flattens the input and output tensors into two dimensional arrays. // tensor_in_as_matrix: // depth by (tensor_in_cols * tensor_in_rows * tensor_in_batch) // output_as_matrix: // depth by (out_width * out_height * tensor_in_batch) // // 2. Walks through the set of columns in the flattened tensor_in_as_matrix, // and updates the corresponding column(s) in output_as_matrix with the // max value. auto shard = [&params, &in_mat, &out_mat, &out_arg_max_mat, &input_backprop, &output_arg_max, &out_backprop, include_batch_in_index](int64 start, int64 limit) { const int32 depth = params.depth; const int32 in_rows = params.tensor_in_rows; const int32 in_cols = params.tensor_in_cols; const int32 pad_top = params.pad_top; const int32 pad_left = params.pad_left; const int32 window_rows = params.window_rows; const int32 window_cols = params.window_cols; const int32 row_stride = params.row_stride; const int32 col_stride = params.col_stride; const int32 out_height = params.out_height; const int32 out_width = params.out_width; { // Initializes the output tensor with MIN<T>. const int32 output_image_size = out_height * out_width * depth; EigenMatrixMap out_shard(out_mat.data() + start * output_image_size, 1, (limit - start) * output_image_size); out_shard.setConstant(Eigen::NumTraits<T>::lowest()); EigenIndexMatrixMap out_arg_max_shard( out_arg_max_mat.data() + start * output_image_size, 1, (limit - start) * output_image_size); out_arg_max_shard.setConstant(kInvalidMaxPoolingIndex); } for (int64 b = start; b < limit; ++b) { for (int h = 0; h < in_rows; ++h) { for (int w = 0; w < in_cols; ++w) { // (h_start, h_end) * (w_start, w_end) is the range that the input // vector projects to. const int hpad = h + pad_top; const int wpad = w + pad_left; const int h_start = (hpad < window_rows) ? 0 : (hpad - window_rows) / row_stride + 1; const int h_end = std::min(hpad / row_stride + 1, out_height); const int w_start = (wpad < window_cols) ? 0 : (wpad - window_cols) / col_stride + 1; const int w_end = std::min(wpad / col_stride + 1, out_width); // compute elementwise max const int64 in_index = (b * in_rows + h) * in_cols + w; for (int ph = h_start; ph < h_end; ++ph) { const int64 out_index_base = (b * out_height + ph) * out_width; for (int pw = w_start; pw < w_end; ++pw) { const int64 out_index = out_index_base + pw; /// NOTES(zhengxq): not using the eigen matrix operation for /// now. for (int d = 0; d < depth; ++d) { const T& input_ref = in_mat.coeffRef(d, in_index); T& output_ref = out_mat.coeffRef(d, out_index); Targmax& out_arg_max_ref = out_arg_max_mat.coeffRef(d, out_index); if (output_ref < input_ref || out_arg_max_ref == kInvalidMaxPoolingIndex) { output_ref = input_ref; if (include_batch_in_index) { out_arg_max_ref = in_index * depth + d; } else { out_arg_max_ref = (h * in_cols + w) * depth + d; } } } } } } } } if (input_backprop != nullptr) { auto input_backprop_flat = input_backprop->flat<T>(); auto out_arg_max_flat = output_arg_max->flat<int64>(); auto out_backprop_flat = out_backprop.flat<T>(); // Initialize output to 0. const int64 in_size = in_rows * in_cols * depth; const int64 in_start = start * in_size; const int64 in_end = limit * in_size; EigenMatrixMap in_shard(input_backprop_flat.data() + in_start, 1, in_end - in_start); in_shard.setConstant(T(0)); // Backpropagate. const int out_size = out_height * out_width * depth; const int out_start = start * out_size; const int out_end = limit * out_size; for (int index = out_start; index < out_end; ++index) { int input_backprop_index = out_arg_max_flat(index); // Although this check is in the inner loop, it is worth its value // so we don't end up with memory corruptions. Our benchmark shows that // the performance impact is quite small // CHECK(input_backprop_index >= in_start && input_backprop_index < // in_end) FastBoundsCheck(input_backprop_index - in_start, in_end - in_start); if (index < out_backprop.NumElements()) { input_backprop_flat(input_backprop_index) += out_backprop_flat(index); } } } }; const int64 shard_cost = params.tensor_in_rows * params.tensor_in_cols * params.depth * params.window_rows * params.window_cols; Shard(worker_threads.num_threads, worker_threads.workers, params.tensor_in_batch, shard_cost, shard); } // The operation to compute MaxPool gradients. // It takes three inputs: // - The original input tensor // - The original output tensor // - Backprop tensor for output // It produces one output: backprop tensor for input. template <class Device, class T> class MaxPoolingGradOp : public OpKernel { public: explicit MaxPoolingGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument("Default MaxPoolingGradOp only supports NHWC ", "on device type ", DeviceTypeString(context->device_type()))); if (context->num_inputs() == 3) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES( context, ksize_[3] == 1 && stride_[3] == 1, errors::Unimplemented( "MaxPoolingGrad is not yet supported on the depth dimension.")); } OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); if (padding_ == Padding::EXPLICIT) { OP_REQUIRES_OK( context, context->GetAttr("explicit_paddings", &explicit_paddings_)); OP_REQUIRES_OK(context, CheckValidPadding(padding_, explicit_paddings_, /*num_dims=*/4, data_format_)); } } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_backprop = context->input(2); // For maxpooling, tensor_in should have 4 dimensions. OP_REQUIRES(context, tensor_in.dims() == 4, errors::InvalidArgument("tensor_in must be 4-dimensional")); OP_REQUIRES(context, tensor_out.dims() == 4, errors::InvalidArgument("tensor_out must be 4-dimensional")); // For maxpooling, out_backprop should have 4 dimensions. OP_REQUIRES(context, out_backprop.dims() == 4, errors::InvalidArgument("out_backprop must be 4-dimensional")); const TensorShape& output_shape = tensor_in.shape(); Tensor tensor_out_dup; OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp( {1}, DataTypeToEnum<T>::v(), tensor_out.shape(), &tensor_out_dup)); Tensor tensor_out_arg_max; OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<int64>::v(), tensor_out.shape(), &tensor_out_arg_max)); std::vector<int32> ksize = ksize_; std::vector<int32> stride = stride_; if (context->num_inputs() == 5) { const Tensor& tensor_ksize = context->input(3); auto value_ksize = tensor_ksize.flat<int32>(); ksize.resize(tensor_ksize.shape().num_elements()); std::copy_n(&value_ksize(0), ksize.size(), ksize.begin()); const Tensor& tensor_stride = context->input(4); auto value_stride = tensor_stride.flat<int32>(); stride.resize(tensor_stride.shape().num_elements()); std::copy_n(&value_stride(0), stride.size(), stride.begin()); } OP_REQUIRES(context, ksize.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES(context, stride.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); OP_REQUIRES(context, ksize[0] == 1 && stride[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES( context, ksize[3] == 1 && stride[3] == 1, errors::Unimplemented( "MaxPoolingGrad is not yet supported on the depth dimension.")); PoolParameters params{context, ksize, stride, padding_, explicit_paddings_, FORMAT_NHWC, tensor_in.shape()}; if (!context->status().ok()) { return; } Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, output_shape, &output)); SpatialMaxPoolWithArgMaxHelper<CPUDevice, T, int64>( context, &tensor_out_dup, &tensor_out_arg_max, output, tensor_in, out_backprop, params, true); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; std::vector<int64> explicit_paddings_; TensorFormat data_format_; }; #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <class T> class MaxPoolingGradOp<Eigen::GpuDevice, T> : public OpKernel { public: typedef Eigen::GpuDevice Device; explicit MaxPoolingGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->num_inputs() == 3) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); const int32 ksize_n = GetTensorDim(ksize_, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride_, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); } OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); if (padding_ == Padding::EXPLICIT) { OP_REQUIRES_OK( context, context->GetAttr("explicit_paddings", &explicit_paddings_)); OP_REQUIRES_OK(context, CheckValidPadding(padding_, explicit_paddings_, /*num_dims=*/4, data_format_)); } TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_MAXPOOL_NANPROP", false, &propagate_nans_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_backprop = context->input(2); // For maxpooling, tensor_in should have 4 dimensions. OP_REQUIRES(context, tensor_in.dims() == 4, errors::InvalidArgument("tensor_in must be 4-dimensional 4")); OP_REQUIRES(context, tensor_out.dims() == 4, errors::InvalidArgument("tensor_out must be 4-dimensional")); // For maxpooling, out_backprop should have 4 dimensions. OP_REQUIRES(context, out_backprop.dims() == 4, errors::InvalidArgument("out_backprop must be 4-dimensional")); TensorShape output_shape = tensor_in.shape(); std::vector<int32> ksize = ksize_; std::vector<int32> stride = stride_; if (context->num_inputs() == 5) { const Tensor& tensor_ksize = context->input(3); auto value_ksize = tensor_ksize.flat<int32>(); ksize.resize(tensor_ksize.shape().num_elements()); std::copy_n(&value_ksize(0), ksize.size(), ksize.begin()); const Tensor& tensor_stride = context->input(4); auto value_stride = tensor_stride.flat<int32>(); stride.resize(tensor_stride.shape().num_elements()); std::copy_n(&value_stride(0), stride.size(), stride.begin()); } OP_REQUIRES(context, ksize.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES(context, stride.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); const int32 ksize_n = GetTensorDim(ksize, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); int64 pad_top, pad_bottom, pad_left, pad_right; if (padding_ == Padding::EXPLICIT) { GetExplicitPaddingForDim(explicit_paddings_, data_format_, 'H', /*pad_top=*/&pad_top, /*pad_bottom=*/&pad_bottom); GetExplicitPaddingForDim(explicit_paddings_, data_format_, 'W', /*pad_left=*/&pad_left, /*pad_right=*/&pad_right); } DnnPoolingGradOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, ksize, stride, padding_, explicit_paddings_, data_format_, &tensor_in, &tensor_out, out_backprop, output_shape, propagate_nans_); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; std::vector<int64> explicit_paddings_; TensorFormat data_format_; bool propagate_nans_; }; #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM // The operation to compute gradient of MaxPool gradients. // It takes three inputs: // - The original input tensor // - The original output tensor // - Backprop tensor for output gradients // It produces one output: backprop tensor for output gradient. template <class Device, class T> class MaxPoolingGradGradOp : public OpKernel { public: explicit MaxPoolingGradGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default MaxPoolingGradGradOp only supports NHWC ", "on device type ", DeviceTypeString(context->device_type()))); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); if (context->num_inputs() == 3) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES(context, ksize_[3] == 1 && stride_[3] == 1, errors::Unimplemented("MaxPoolingGradGrad is not yet " "supported on the depth dimension.")); } } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_grad_backprop = context->input(2); // For maxpooling, tensor_in should have 4 dimensions. OP_REQUIRES(context, tensor_in.dims() == 4, errors::InvalidArgument("tensor_in must be 4-dimensional")); OP_REQUIRES(context, tensor_out.dims() == 4, errors::InvalidArgument("tensor_out must be 4-dimensional")); // For maxpooling, out_grad_backprop should have 4 dimensions. OP_REQUIRES( context, out_grad_backprop.dims() == 4, errors::InvalidArgument("out_grad_backprop must be 4-dimensional")); std::vector<int32> ksize = ksize_; std::vector<int32> stride = stride_; if (context->num_inputs() == 5) { const Tensor& tensor_ksize = context->input(3); auto value_ksize = tensor_ksize.flat<int32>(); ksize.resize(tensor_ksize.shape().num_elements()); std::copy_n(&value_ksize(0), ksize.size(), ksize.begin()); const Tensor& tensor_stride = context->input(4); auto value_stride = tensor_stride.flat<int32>(); stride.resize(tensor_stride.shape().num_elements()); std::copy_n(&value_stride(0), stride.size(), stride.begin()); } OP_REQUIRES(context, ksize.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES(context, stride.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); OP_REQUIRES(context, ksize[0] == 1 && stride[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES( context, ksize[3] == 1 && stride[3] == 1, errors::Unimplemented( "MaxPoolingGrad is not yet supported on the depth dimension.")); PoolParameters params{context, ksize, stride, padding_, /*explicit_paddings=*/{}, FORMAT_NHWC, tensor_in.shape()}; Tensor* output = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {2}, 0, tensor_out.shape(), &output)); SpatialMaxPoolGradGrad(context, output, tensor_in, tensor_out, out_grad_backprop, params, padding_); } private: void SpatialMaxPoolGradGrad(OpKernelContext* context, Tensor* bottom_diff, const Tensor& tensor_in, const Tensor& tensor_out, const Tensor& top_diff, const PoolParameters& params, const Padding& padding) { typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; ConstEigenMatrixMap in_mat( tensor_in.flat<T>().data(), params.depth, params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); ConstEigenMatrixMap out_mat( tensor_out.flat<T>().data(), params.depth, params.out_width * params.out_height * params.tensor_in_batch); ConstEigenMatrixMap top_diff_mat( top_diff.flat<T>().data(), params.depth, params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); EigenMatrixMap bottom_diff_mat( bottom_diff->flat<T>().data(), params.depth, params.out_width * params.out_height * params.tensor_in_batch); const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); // The following code basically does the following: // 1. Flattens the input, output, top_diff and bottom_diff tensors into // two dimensional arrays. // tensor_in_as_matrix: // depth by (tensor_in_cols * tensor_in_rows * tensor_in_batch) // tensor_out_as_matrix: // depth by (out_width * out_height * tensor_in_batch) // top_diff_as_matrix: // depth by (tensor_in_cols * tensor_in_rows * tensor_in_batch) // bottom_diff_as_matrix: // depth by (out_width * out_height * tensor_in_batch) // // 2. Walks through the set of columns in the flattened // tensor_in_as_matrix, tensor_out_as_matrix, top_diff_as_matrix // and updates the column(s) corresponding to the maximum values in // tensor_out_as_matrix with the corresponding values in // top_diff_as_matrix. auto shard = [&params, &in_mat, &out_mat, &top_diff_mat, &bottom_diff_mat]( int64 start, int64 limit) { const int32 depth = params.depth; const int32 in_rows = params.tensor_in_rows; const int32 in_cols = params.tensor_in_cols; const int32 pad_top = params.pad_top; const int32 pad_left = params.pad_left; const int32 window_rows = params.window_rows; const int32 window_cols = params.window_cols; const int32 row_stride = params.row_stride; const int32 col_stride = params.col_stride; const int32 out_height = params.out_height; const int32 out_width = params.out_width; { // Initializes the output grad backprop tensor with 0. const int32 output_image_size = out_height * out_width * params.depth; EigenMatrixMap bottom_diff_shard( bottom_diff_mat.data() + start * output_image_size, 1, (limit - start) * output_image_size); bottom_diff_shard.setZero(); } for (int b = start; b < limit; ++b) { for (int ph = 0; ph < out_height; ++ph) { for (int pw = 0; pw < out_width; ++pw) { // (h_start, h_end) * (w_start, w_end) is the range that the input // vector projects to. int h_start = ph * row_stride - pad_top; const int h_end = std::min(h_start + window_rows, in_rows); int w_start = pw * col_stride - pad_left; const int w_end = std::min(w_start + window_cols, in_cols); h_start = std::max(h_start, 0); w_start = std::max(w_start, 0); const int out_index = (b * out_height + ph) * out_width + pw; // Find value corresponding to the input maximum in top_diff. for (int d = 0; d < depth; ++d) { const T& output_ref = out_mat.coeffRef(d, out_index); bool should_stop = false; for (int h = h_start; h < h_end && !should_stop; ++h) { for (int w = w_start; w < w_end && !should_stop; ++w) { const int in_index = (b * in_rows + h) * in_cols + w; const T& input_ref = in_mat.coeffRef(d, in_index); if (output_ref == input_ref) { T& bottom_diff_ref = bottom_diff_mat.coeffRef(d, out_index); bottom_diff_ref = top_diff_mat.coeffRef(d, in_index); should_stop = true; } } } } } } } }; const int64 shard_cost = params.out_width * params.out_height * params.depth * params.window_rows * params.window_cols; Shard(worker_threads.num_threads, worker_threads.workers, params.tensor_in_batch, shard_cost, shard); } std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <class T> class MaxPoolingGradGradOp<Eigen::GpuDevice, T> : public OpKernel { public: typedef Eigen::GpuDevice Device; explicit MaxPoolingGradGradOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->num_inputs() == 3) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); const int32 ksize_n = GetTensorDim(ksize_, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride_, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); } OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& tensor_out = context->input(1); const Tensor& out_grad_backprop = context->input(2); // For maxpooling, tensor_in should have 4 dimensions. OP_REQUIRES(context, tensor_in.dims() == 4, errors::InvalidArgument("tensor_in must be 4-dimensional 4")); OP_REQUIRES(context, tensor_out.dims() == 4, errors::InvalidArgument("tensor_out must be 4-dimensional")); // For maxpooling, out_grad_backprop should have 4 dimensions. OP_REQUIRES( context, out_grad_backprop.dims() == 4, errors::InvalidArgument("out_grad_backprop must be 4-dimensional")); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, tensor_out.shape(), &output)); std::vector<int32> ksize = ksize_; std::vector<int32> stride = stride_; if (context->num_inputs() == 5) { const Tensor& tensor_ksize = context->input(3); auto value_ksize = tensor_ksize.flat<int32>(); ksize.resize(tensor_ksize.shape().num_elements()); std::copy_n(&value_ksize(0), ksize.size(), ksize.begin()); const Tensor& tensor_stride = context->input(4); auto value_stride = tensor_stride.flat<int32>(); stride.resize(tensor_stride.shape().num_elements()); std::copy_n(&value_stride(0), stride.size(), stride.begin()); } OP_REQUIRES(context, ksize.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES(context, stride.size() == 4, errors::InvalidArgument("Sliding window strides field must " "specify 4 dimensions")); const int32 ksize_n = GetTensorDim(ksize, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); PoolParameters params{context, ksize, stride, padding_, /*explicit_paddings=*/{}, data_format_, tensor_in.shape()}; functor::MaxPoolGradBackwardNoMask<T>()( data_format_, tensor_in.flat<T>().data(), tensor_out.flat<T>().data(), params.tensor_in_batch, params.out_height, params.out_width, params.depth, params.tensor_in_rows, params.tensor_in_cols, params.window_rows, params.window_cols, params.row_stride, params.col_stride, params.pad_top, params.pad_left, out_grad_backprop.flat<T>().data(), output->flat<T>().data(), context->eigen_device<Eigen::GpuDevice>()); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool use_dnn_; }; #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename Device, typename T> struct LaunchMaxPoolingNoMask; template <typename Device, typename T> class MaxPoolingNoMaskOp : public OpKernel { public: explicit MaxPoolingNoMaskOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default MaxPoolingNoMaskOp only supports NHWC on device type ", DeviceTypeString(context->device_type()))); OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES( context, padding_ != EXPLICIT, errors::Unimplemented( "Explicit padding is not supported for MaxPoolingNoMaskOp.")); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); PoolParameters params{context, ksize_, stride_, padding_, /*explicit_paddings=*/{}, data_format_, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape({params.tensor_in_batch, params.out_height, params.out_width, params.depth}); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); LaunchMaxPoolingNoMask<Device, T>::launch(context, params, tensor_in, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename Device, typename T> class MaxPoolingNoMaskV2Op : public OpKernel { public: explicit MaxPoolingNoMaskV2Op(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES( context, data_format_ == FORMAT_NHWC, errors::InvalidArgument( "Default MaxPoolingNoMaskOp only supports NHWC on device type ", DeviceTypeString(context->device_type()))); if (context->num_inputs() == 1) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); } OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); std::vector<int32> ksize = ksize_; std::vector<int32> stride = stride_; if (context->num_inputs() != 1) { const Tensor& tensor_ksize = context->input(1); auto value_ksize = tensor_ksize.flat<int32>(); ksize.resize(tensor_ksize.shape().num_elements()); std::copy_n(&value_ksize(0), ksize.size(), ksize.begin()); const Tensor& tensor_stride = context->input(2); auto value_stride = tensor_stride.flat<int32>(); stride.resize(tensor_stride.shape().num_elements()); std::copy_n(&value_stride(0), stride.size(), stride.begin()); } OP_REQUIRES(context, ksize.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES(context, stride.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES(context, ksize[0] == 1 && stride[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); PoolParameters params{context, ksize, stride, padding_, /*explicit_paddings=*/{}, data_format_, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape({params.tensor_in_batch, params.out_height, params.out_width, params.depth}); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); LaunchMaxPoolingNoMask<Device, T>::launch(context, params, tensor_in, output); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; }; template <typename Device, typename T, typename Targmax> struct LaunchMaxPoolingWithArgmax; template <typename T, typename Targmax> struct LaunchMaxPoolingWithArgmax<CPUDevice, T, Targmax> { static void launch(OpKernelContext* context, const PoolParameters& params, const Tensor& input, Tensor* output, Tensor* argmax, bool propagate_nans, bool include_batch_in_index) { Tensor unused; SpatialMaxPoolWithArgMaxHelper<CPUDevice, T, Targmax>( context, output, argmax, /*input_backprop=*/nullptr, input, unused, params, include_batch_in_index); } }; template <typename Device, typename T, typename Targmax> class MaxPoolingWithArgmaxOp : public OpKernel { public: explicit MaxPoolingWithArgmaxOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES_OK(context, context->GetAttr("include_batch_in_index", &include_batch_in_index_)); TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_MAXPOOL_NANPROP", false, &propagate_nans_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); PoolParameters params{context, ksize_, stride_, padding_, /*explicit_paddings=*/{}, FORMAT_NHWC, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape({params.tensor_in_batch, params.out_height, params.out_width, params.depth}); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); Tensor* argmax = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, out_shape, &argmax)); LaunchMaxPoolingWithArgmax<Device, T, Targmax>::launch( context, params, tensor_in, output, argmax, propagate_nans_, include_batch_in_index_); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; bool propagate_nans_; bool include_batch_in_index_; }; template <typename Device, typename T> struct LaunchMaxPoolingGradWithArgmax; template <typename T> struct LaunchMaxPoolingGradWithArgmax<CPUDevice, T> { typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; static void launch(OpKernelContext* context, const PoolParameters& params, const Tensor& grad_in, const Tensor& argmax, Tensor* grad_out, const bool include_batch_in_index) { const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); auto shard = [&grad_in, &argmax, &grad_out, include_batch_in_index]( int64 start, int64 limit) { const int64 batch_size = GetTensorDim(grad_out->shape(), FORMAT_NHWC, 'N'); const int64 output_size_per_batch = grad_out->NumElements() / batch_size; const int64 input_size_per_batch = grad_in.NumElements() / batch_size; { auto grad_out_flat = grad_out->flat<T>(); auto argmax_flat = argmax.flat<int64>(); auto grad_in_flat = grad_in.flat<T>(); const int64 output_start = start * output_size_per_batch; const int64 output_end = limit * output_size_per_batch; EigenMatrixMap inputShard(grad_out_flat.data() + output_start, 1, output_end - output_start); inputShard.setConstant(T(0)); const int input_start = start * input_size_per_batch; const int input_end = limit * input_size_per_batch; for (int64 index = input_start; index < input_end; index++) { if (index >= argmax.NumElements()) { break; } int64 grad_out_index = argmax_flat(index); if (!include_batch_in_index) { const int64 cur_batch = index / input_size_per_batch; grad_out_index += cur_batch * output_size_per_batch; } CHECK(grad_out_index >= output_start && grad_out_index < output_end) << "Invalid output gradient index: " << grad_out_index << ", " << output_start << ", " << output_end; grad_out_flat(grad_out_index) += grad_in_flat(index); } } }; const int64 batch_size = GetTensorDim(grad_out->shape(), FORMAT_NHWC, 'N'); const int64 shard_cost = grad_out->NumElements() / batch_size; Shard(worker_threads.num_threads, worker_threads.workers, batch_size, shard_cost, shard); } }; // TODO(b/175733711): Support int32 argmax type in MaxPoolGradWithArgmax op. template <typename Device, typename T> class MaxPoolingGradWithArgmaxOp : public OpKernel { public: explicit MaxPoolingGradWithArgmaxOp(OpKernelConstruction* context) : OpKernel(context) { string data_format_str; auto status = context->GetAttr("data_format", &data_format_str); if (status.ok()) { OP_REQUIRES(context, FormatFromString(data_format_str, &data_format_), errors::InvalidArgument("Invalid data format")); } OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES_OK(context, context->GetAttr("include_batch_in_index", &include_batch_in_index_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& grad_in = context->input(1); const Tensor& argmax = context->input(2); PoolParameters params{context, ksize_, stride_, padding_, /*explicit_paddings=*/{}, FORMAT_NHWC, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape({params.tensor_in_batch, params.tensor_in_rows, params.tensor_in_cols, params.depth}); Tensor* grad_out = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, out_shape, &grad_out)); if (out_shape.num_elements() == 0) return; // nothing to be done LaunchMaxPoolingGradWithArgmax<Device, T>::launch( context, params, grad_in, argmax, grad_out, include_batch_in_index_); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; TensorFormat data_format_; bool include_batch_in_index_; }; template <typename Device, typename T> struct LaunchMaxPoolingGradGradWithArgmax; template <typename Device, typename T> class MaxPoolingGradGradWithArgmaxOp : public OpKernel { public: explicit MaxPoolingGradGradWithArgmaxOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES(context, ksize_[0] == 1 && stride_[0] == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); OP_REQUIRES_OK(context, context->GetAttr("include_batch_in_index", &include_batch_in_index_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); const Tensor& grad_in = context->input(1); const Tensor& argmax = context->input(2); PoolParameters params{context, ksize_, stride_, padding_, /*explicit_paddings=*/{}, FORMAT_NHWC, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape({params.tensor_in_batch, params.out_height, params.out_width, params.depth}); Tensor* grad_out = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, out_shape, &grad_out)); LaunchMaxPoolingGradGradWithArgmax<Device, T>::launch( context, params, grad_in, argmax, grad_out, include_batch_in_index_); } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; bool include_batch_in_index_; }; #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM template <typename T> class MaxPoolingNoMaskOp<GPUDevice, T> : public OpKernel { public: typedef GPUDevice Device; explicit MaxPoolingNoMaskOp(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); OP_REQUIRES_OK(context, context->GetAttr("explicit_paddings", &explicit_paddings_)); const int32 ksize_n = GetTensorDim(ksize_, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride_, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_MAXPOOL_NANPROP", false, &propagate_nans_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); PoolParameters params{ context, ksize_, stride_, padding_, explicit_paddings_, data_format_, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape = ShapeFromFormat(data_format_, params.tensor_in_batch, params.out_height, params.out_width, params.depth); // Assuming qint8 <--> NCHW_VECT_C (int8x4) here. constexpr bool is_int8x4 = std::is_same<T, qint8>::value; OP_REQUIRES(context, (is_int8x4 == (data_format_ == FORMAT_NCHW_VECT_C)), errors::InvalidArgument( "qint8 should be used with data_format NCHW_VECT_C.")); #if CUDNN_VERSION >= 7300 DnnPoolingOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, ksize_, stride_, padding_, explicit_paddings_, data_format_, tensor_in, out_shape, propagate_nans_); #else // These is_int8x4 checks avoid linker errors for missing qint8 kernels. if (!is_int8x4 && data_format_ == FORMAT_NCHW) { DnnPoolingOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, ksize_, stride_, padding_, explicit_paddings_, data_format_, tensor_in, out_shape, propagate_nans_); } else { #if !defined(TENSORFLOW_USE_ROCM) OP_REQUIRES(context, padding_ != EXPLICIT, errors::Unimplemented("Explicit padding is not supported ", "when CUDNN is not enabled.")); #endif Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); if (is_int8x4) { LaunchMaxPoolingNoMask_NCHW_VECT_C<Device>::launch(context, params, tensor_in, output); } else if (data_format_ == FORMAT_NHWC) { LaunchMaxPoolingNoMask<Device, T>::launch(context, params, tensor_in, output, propagate_nans_); } else { LOG(FATAL) << "MaxPool currently only supports the following (layout, " "type) combinations: (NHWC, non-qint8), " "(NCHW, non-qint8) or (NCHW_VECT_C, qint8). The " "requested combination (" << ToString(data_format_) << ", " << DataTypeString(DataTypeToEnum<T>::v()) << ") is not supported."; } } #endif } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; std::vector<int64> explicit_paddings_; TensorFormat data_format_; bool propagate_nans_; }; template <typename T> class MaxPoolingNoMaskV2Op<GPUDevice, T> : public OpKernel { public: typedef GPUDevice Device; explicit MaxPoolingNoMaskV2Op(OpKernelConstruction* context) : OpKernel(context) { string data_format; OP_REQUIRES_OK(context, context->GetAttr("data_format", &data_format)); OP_REQUIRES(context, FormatFromString(data_format, &data_format_), errors::InvalidArgument("Invalid data format")); if (context->num_inputs() == 1) { OP_REQUIRES_OK(context, context->GetAttr("ksize", &ksize_)); OP_REQUIRES(context, ksize_.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES_OK(context, context->GetAttr("strides", &stride_)); OP_REQUIRES(context, stride_.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); const int32 ksize_n = GetTensorDim(ksize_, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride_, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); } OP_REQUIRES_OK(context, context->GetAttr("padding", &padding_)); TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_MAXPOOL_NANPROP", false, &propagate_nans_)); } void Compute(OpKernelContext* context) override { const Tensor& tensor_in = context->input(0); std::vector<int32> ksize = ksize_; std::vector<int32> stride = stride_; if (context->num_inputs() != 1) { const Tensor& tensor_ksize = context->input(1); auto value_ksize = tensor_ksize.flat<int32>(); ksize.resize(tensor_ksize.shape().num_elements()); std::copy_n(&value_ksize(0), ksize.size(), ksize.begin()); const Tensor& tensor_stride = context->input(2); auto value_stride = tensor_stride.flat<int32>(); stride.resize(tensor_stride.shape().num_elements()); std::copy_n(&value_stride(0), stride.size(), stride.begin()); } OP_REQUIRES(context, ksize.size() == 4, errors::InvalidArgument("Sliding window ksize field must " "specify 4 dimensions")); OP_REQUIRES(context, stride.size() == 4, errors::InvalidArgument("Sliding window stride field must " "specify 4 dimensions")); const int32 ksize_n = GetTensorDim(ksize, data_format_, 'N'); const int32 stride_n = GetTensorDim(stride, data_format_, 'N'); OP_REQUIRES(context, ksize_n == 1 && stride_n == 1, errors::Unimplemented( "Pooling is not yet supported on the batch dimension.")); PoolParameters params{context, ksize, stride, padding_, /*explicit_paddings=*/{}, data_format_, tensor_in.shape()}; if (!context->status().ok()) { return; } TensorShape out_shape = ShapeFromFormat(data_format_, params.tensor_in_batch, params.out_height, params.out_width, params.depth); if (data_format_ == FORMAT_NCHW) { DnnPoolingOp<T>::Compute(context, se::dnn::PoolingMode::kMaximum, ksize, stride, padding_, explicit_paddings_, data_format_, tensor_in, out_shape, propagate_nans_); } else { CHECK(data_format_ == FORMAT_NHWC) << "MaxPool only supports NCHW or NHWC format"; Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, out_shape, &output)); LaunchMaxPoolingNoMask<Device, T>::launch(context, params, tensor_in, output, propagate_nans_); } } private: std::vector<int32> ksize_; std::vector<int32> stride_; Padding padding_; std::vector<int64> explicit_paddings_; TensorFormat data_format_; bool propagate_nans_; }; template <typename T> struct LaunchMaxPoolingNoMask<Eigen::GpuDevice, T> { static void launch(OpKernelContext* context, const PoolParameters& params, const Tensor& input, Tensor* output, bool propagate_nans) { bool status = functor::MaxPoolForwardWithOptionalArgmax<T>()( input.flat<T>().data(), params.tensor_in_batch, params.tensor_in_rows, params.tensor_in_cols, params.depth, params.out_height, params.out_width, params.window_rows, params.window_cols, params.row_stride, params.col_stride, params.pad_top, params.pad_left, output->flat<T>().data(), nullptr, context->eigen_gpu_device(), propagate_nans, false); if (!status) { context->SetStatus( errors::Internal("Failed launching MaxPoolForwardNoMask")); } } }; template <typename T> struct LaunchMaxPoolingWithArgmax<Eigen::GpuDevice, T, int64> { static void launch(OpKernelContext* context, const PoolParameters& params, const Tensor& input, Tensor* output, Tensor* argmax, bool propagate_nans, bool include_batch_in_index) { bool status = functor::MaxPoolForwardWithOptionalArgmax<T>()( input.flat<T>().data(), params.tensor_in_batch, params.tensor_in_rows, params.tensor_in_cols, params.depth, params.out_height, params.out_width, params.window_rows, params.window_cols, params.row_stride, params.col_stride, params.pad_top, params.pad_left, output->flat<T>().data(), reinterpret_cast<int64*>(argmax->flat<int64>().data()), context->eigen_gpu_device(), propagate_nans, include_batch_in_index); if (!status) { context->SetStatus( errors::Internal("Failed launching MaxPoolForwardWithArgmax")); } } }; template <typename T> struct LaunchMaxPoolingGradWithArgmax<Eigen::GpuDevice, T> { static void launch(OpKernelContext* context, const PoolParameters& params, const Tensor& grad_in, const Tensor& argmax, Tensor* grad_out, const bool include_batch_in_index) { const int input_size = params.tensor_in_batch * params.tensor_in_rows * params.tensor_in_cols * params.depth; const int output_size = params.tensor_in_batch * params.out_height * params.out_width * params.depth; const int top_offset = params.out_height * params.out_width * params.depth; const int bottom_offset = params.tensor_in_rows * params.tensor_in_cols * params.depth; bool status = functor::MaxPoolBackwardWithArgmax<T>()( output_size, input_size, grad_in.flat<T>().data(), reinterpret_cast<const int64*>(argmax.flat<int64>().data()), top_offset, bottom_offset, grad_out->flat<T>().data(), context->eigen_gpu_device(), include_batch_in_index); if (!status) { context->SetStatus( errors::Internal("Failed launching MaxPoolBackwardWithArgmax")); } } }; template <typename T> struct LaunchMaxPoolingGradGradWithArgmax<Eigen::GpuDevice, T> { static void launch(OpKernelContext* context, const PoolParameters& params, const Tensor& grad_in, const Tensor& argmax, Tensor* grad_out, const bool include_batch_in_index) { const int input_size = params.tensor_in_batch * params.tensor_in_rows * params.tensor_in_cols * params.depth; const int output_size = params.tensor_in_batch * params.out_height * params.out_width * params.depth; const int top_offset = params.tensor_in_rows * params.tensor_in_cols * params.depth; const int bottom_offset = params.out_width * params.out_height * params.depth; bool status = functor::MaxPoolGradBackwardWithArgmax<T>()( output_size, input_size, grad_in.flat<T>().data(), reinterpret_cast<const int64*>(argmax.flat<int64>().data()), top_offset, bottom_offset, grad_out->flat<T>().data(), context->eigen_gpu_device(), include_batch_in_index); if (!status) { context->SetStatus( errors::Internal("Failed launching MaxPoolGradBackwardWithArgmax")); } } }; #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #define REGISTER_MAX_POOL_KERNELS(D, T) \ REGISTER_KERNEL_BUILDER( \ Name("MaxPoolGrad").Device(DEVICE_##D).TypeConstraint<T>("T"), \ MaxPoolingGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER( \ Name("MaxPoolGradGrad").Device(DEVICE_##D).TypeConstraint<T>("T"), \ MaxPoolingGradGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolGradV2") \ .Device(DEVICE_##D) \ .HostMemory("ksize") \ .HostMemory("strides") \ .TypeConstraint<T>("T"), \ MaxPoolingGradOp<D##Device, T>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolGradGradV2") \ .Device(DEVICE_##D) \ .HostMemory("ksize") \ .HostMemory("strides") \ .TypeConstraint<T>("T"), \ MaxPoolingGradGradOp<D##Device, T>) \ REGISTER_KERNEL_BUILDER(Name("MaxPoolWithArgmax") \ .Device(DEVICE_##D) \ .TypeConstraint<int64>("Targmax") \ .TypeConstraint<T>("T"), \ MaxPoolingWithArgmaxOp<D##Device, T, int64>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolGradWithArgmax") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T") \ .TypeConstraint<int64>("Targmax"), \ MaxPoolingGradWithArgmaxOp<D##Device, T>); // Below kernels implemented only for CPU device. #define REGISTER_CPU_ONLY_POOL_KERNELS(T) \ REGISTER_KERNEL_BUILDER( \ Name("MaxPool").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ MaxPoolingOp<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER( \ Name("MaxPoolV2").Device(DEVICE_CPU).TypeConstraint<T>("T"), \ MaxPoolingV2Op<CPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolWithArgmax") \ .Device(DEVICE_CPU) \ .TypeConstraint<int32>("Targmax") \ .TypeConstraint<T>("T"), \ MaxPoolingWithArgmaxOp<CPUDevice, T, int32>); TF_CALL_REAL_NUMBER_TYPES(REGISTER_CPU_ONLY_POOL_KERNELS); #undef REGISTER_CPU_ONLY_POOL_KERNELS #define REGISTER_CPU_MAX_POOL_KERNELS(T) REGISTER_MAX_POOL_KERNELS(CPU, T); TF_CALL_REAL_NUMBER_TYPES(REGISTER_CPU_MAX_POOL_KERNELS); #undef REGISTER_CPU_KERNELS #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM // Forward declarations for the functor specializations for GPU. namespace functor { #define DECLARE_GPU_SPEC(T) \ template <> \ void SpatialMaxPooling<Eigen::GpuDevice, T>::operator()( \ const Eigen::GpuDevice& d, typename TTypes<T, 4>::Tensor output, \ typename TTypes<T, 4>::ConstTensor input, int window_rows, \ int window_cols, int row_stride, int col_stride, \ const Eigen::PaddingType& padding); \ extern template struct SpatialMaxPooling<Eigen::GpuDevice, T>; TF_CALL_GPU_NUMBER_TYPES(DECLARE_GPU_SPEC); #undef DECLARE_GPU_SPEC } // namespace functor #define REGISTER_GPU_MAX_POOL_KERNELS(T) REGISTER_MAX_POOL_KERNELS(GPU, T) TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_MAX_POOL_KERNELS); #undef REGISTER_GPU_MAX_POOL_KERNELS // Below kernels currently implemented only for GPU device. // Note(jiayq): Currently, the Caffe custom implementation is faster than the // default Eigen implementation so we are using the custom kernel as the // default. However, you can explicitly invoke the eigen version using // kernel_label_map. #define REGISTER_GPU_ONLY_POOL_KERNELS(T) \ REGISTER_KERNEL_BUILDER(Name("MaxPool") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T") \ .Label("eigen_tensor"), \ MaxPoolingOp<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolV2") \ .Device(DEVICE_GPU) \ .HostMemory("ksize") \ .HostMemory("strides") \ .TypeConstraint<T>("T") \ .Label("eigen_tensor"), \ MaxPoolingV2Op<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER( \ Name("MaxPool").Device(DEVICE_GPU).TypeConstraint<T>("T"), \ MaxPoolingNoMaskOp<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolV2") \ .Device(DEVICE_GPU) \ .HostMemory("ksize") \ .HostMemory("strides") \ .TypeConstraint<T>("T"), \ MaxPoolingNoMaskV2Op<GPUDevice, T>); \ REGISTER_KERNEL_BUILDER(Name("MaxPoolGradGradWithArgmax") \ .Device(DEVICE_GPU) \ .TypeConstraint<T>("T") \ .TypeConstraint<int64>("Targmax"), \ MaxPoolingGradGradWithArgmaxOp<GPUDevice, T>); TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU_ONLY_POOL_KERNELS); // TODO(b/65847473): Re-enable once the underlying build error is fixed. #if !defined(PLATFORM_WINDOWS) REGISTER_KERNEL_BUILDER( Name("MaxPool").Device(DEVICE_GPU).TypeConstraint<qint8>("T"), MaxPoolingNoMaskOp<GPUDevice, qint8>); REGISTER_KERNEL_BUILDER(Name("MaxPoolV2") .Device(DEVICE_GPU) .HostMemory("ksize") .HostMemory("strides") .TypeConstraint<qint8>("T"), MaxPoolingV2Op<GPUDevice, qint8>); REGISTER_KERNEL_BUILDER(Name("MaxPoolV2") .Device(DEVICE_GPU) .HostMemory("ksize") .HostMemory("strides") .TypeConstraint<qint8>("T") .Label("eigen_tensor"), MaxPoolingV2Op<GPUDevice, qint8>); #endif // !defined(PLATFORM_WINDOWS) #undef REGISTER_GPU_ONLY_POOL_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #undef REGISTER_MAX_POOL_KERNELS } // namespace tensorflow
null
262
CWE-787
CVE-2021-29603
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/reference/arg_min_max.h" #include <stdint.h> #include <functional> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace arg_min_max { constexpr int kInputTensor = 0; constexpr int kAxis = 1; constexpr int kOutputTensor = 0; TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* axis, TfLiteTensor* output) { int axis_value; // Retrive all 8 bytes when axis type is kTfLiteInt64 to avoid data loss. if (axis->type == kTfLiteInt64) { axis_value = static_cast<int>(*GetTensorData<int64_t>(axis)); } else { axis_value = *GetTensorData<int>(axis); } if (axis_value < 0) { axis_value += NumDimensions(input); } // Copy the input dimensions to output except the axis dimension. TfLiteIntArray* output_dims = TfLiteIntArrayCreate(NumDimensions(input) - 1); int j = 0; for (int i = 0; i < NumDimensions(input); ++i) { if (i != axis_value) { output_dims->data[j] = SizeOfDimension(input, i); ++j; } } return context->ResizeTensor(context, output, output_dims); } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* axis; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis)); // Make sure the axis is only 1 dimension. TF_LITE_ENSURE_EQ(context, NumElements(axis), 1); // Make sure the axis is only either int32 or int64. TF_LITE_ENSURE(context, axis->type == kTfLiteInt32 || axis->type == kTfLiteInt64); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); auto* params = reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data); switch (params->output_type) { case kTfLiteInt32: output->type = kTfLiteInt32; break; case kTfLiteInt64: output->type = kTfLiteInt64; break; default: context->ReportError(context, "Unknown index output data type: %d", params->output_type); return kTfLiteError; } // Check conditions for different types. switch (input->type) { case kTfLiteFloat32: case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt32: break; default: context->ReportError( context, "Unknown input type: %d, only float32 and int types are supported", input->type); return kTfLiteError; } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (IsConstantTensor(axis)) { TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output)); } else { SetTensorToDynamic(output); } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* axis; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output)); } #define TF_LITE_ARG_MIN_MAX(data_type, axis_type, output_type) \ optimized_ops::ArgMinMax( \ GetTensorShape(input), GetTensorData<data_type>(input), \ GetTensorData<axis_type>(axis), GetTensorShape(output), \ GetTensorData<output_type>(output), is_arg_max) if (axis->type == kTfLiteInt32) { switch (output->type) { case kTfLiteInt32: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; case kTfLiteInt64: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int32_t, int64_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int64_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; default: context->ReportError( context, "Only int32 and int64 are supported currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } else { switch (output->type) { case kTfLiteInt32: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int64_t, int32_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int32_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int32_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int32_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; case kTfLiteInt64: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int64_t, int64_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int64_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int64_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int64_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; default: context->ReportError( context, "Only int32 and int64 are supported currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } #undef TF_LITE_ARG_MIN_MAX return kTfLiteOk; } TfLiteStatus ArgMinEval(TfLiteContext* context, TfLiteNode* node) { return Eval(context, node, false); } TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) { return Eval(context, node, true); } } // namespace arg_min_max TfLiteRegistration* Register_ARG_MAX() { static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare, arg_min_max::ArgMaxEval}; return &r; } TfLiteRegistration* Register_ARG_MIN() { static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare, arg_min_max::ArgMinEval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
null
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/reference/arg_min_max.h" #include <stdint.h> #include <functional> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace arg_min_max { constexpr int kInputTensor = 0; constexpr int kAxis = 1; constexpr int kOutputTensor = 0; TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* axis, TfLiteTensor* output) { int axis_value; // Retrive all 8 bytes when axis type is kTfLiteInt64 to avoid data loss. if (axis->type == kTfLiteInt64) { axis_value = static_cast<int>(*GetTensorData<int64_t>(axis)); } else { axis_value = *GetTensorData<int>(axis); } if (axis_value < 0) { axis_value += NumDimensions(input); } TF_LITE_ENSURE(context, axis_value >= 0); TF_LITE_ENSURE(context, axis_value < NumDimensions(input)); // Copy the input dimensions to output except the axis dimension. TfLiteIntArray* output_dims = TfLiteIntArrayCreate(NumDimensions(input) - 1); int j = 0; for (int i = 0; i < NumDimensions(input); ++i) { if (i != axis_value) { output_dims->data[j] = SizeOfDimension(input, i); ++j; } } return context->ResizeTensor(context, output, output_dims); } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* axis; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis)); // Make sure the axis is only 1 dimension. TF_LITE_ENSURE_EQ(context, NumElements(axis), 1); // Make sure the axis is only either int32 or int64. TF_LITE_ENSURE(context, axis->type == kTfLiteInt32 || axis->type == kTfLiteInt64); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); auto* params = reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data); switch (params->output_type) { case kTfLiteInt32: output->type = kTfLiteInt32; break; case kTfLiteInt64: output->type = kTfLiteInt64; break; default: context->ReportError(context, "Unknown index output data type: %d", params->output_type); return kTfLiteError; } // Check conditions for different types. switch (input->type) { case kTfLiteFloat32: case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt32: break; default: context->ReportError( context, "Unknown input type: %d, only float32 and int types are supported", input->type); return kTfLiteError; } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (IsConstantTensor(axis)) { TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output)); } else { SetTensorToDynamic(output); } return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* axis; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxis, &axis)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output)); } #define TF_LITE_ARG_MIN_MAX(data_type, axis_type, output_type) \ optimized_ops::ArgMinMax( \ GetTensorShape(input), GetTensorData<data_type>(input), \ GetTensorData<axis_type>(axis), GetTensorShape(output), \ GetTensorData<output_type>(output), is_arg_max) if (axis->type == kTfLiteInt32) { switch (output->type) { case kTfLiteInt32: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; case kTfLiteInt64: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int32_t, int64_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int64_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; default: context->ReportError( context, "Only int32 and int64 are supported currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } else { switch (output->type) { case kTfLiteInt32: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int64_t, int32_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int32_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int32_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int32_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; case kTfLiteInt64: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int64_t, int64_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int64_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int64_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int64_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; default: context->ReportError( context, "Only int32 and int64 are supported currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } #undef TF_LITE_ARG_MIN_MAX return kTfLiteOk; } TfLiteStatus ArgMinEval(TfLiteContext* context, TfLiteNode* node) { return Eval(context, node, false); } TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) { return Eval(context, node, true); } } // namespace arg_min_max TfLiteRegistration* Register_ARG_MAX() { static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare, arg_min_max::ArgMaxEval}; return &r; } TfLiteRegistration* Register_ARG_MIN() { static TfLiteRegistration r = {nullptr, nullptr, arg_min_max::Prepare, arg_min_max::ArgMinEval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
null
263
CWE-787
CVE-2021-29609
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { template <typename T, typename Treal> class SparseAddOp : public OpKernel { public: explicit SparseAddOp(OpKernelConstruction *ctx) : OpKernel(ctx) {} void Compute(OpKernelContext *ctx) override { // (0) validations const Tensor *a_indices, *b_indices, *a_values_t, *b_values_t, *a_shape, *b_shape, *thresh_t; OP_REQUIRES_OK(ctx, ctx->input("a_indices", &a_indices)); OP_REQUIRES_OK(ctx, ctx->input("b_indices", &b_indices)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a_indices->shape()) && TensorShapeUtils::IsMatrix(b_indices->shape()), errors::InvalidArgument( "Input indices should be matrices but received shapes: ", a_indices->shape().DebugString(), " and ", b_indices->shape().DebugString())); const int64 a_nnz = a_indices->dim_size(0); const int64 b_nnz = b_indices->dim_size(0); OP_REQUIRES_OK(ctx, ctx->input("a_values", &a_values_t)); OP_REQUIRES_OK(ctx, ctx->input("b_values", &b_values_t)); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_values_t->shape()) && TensorShapeUtils::IsVector(b_values_t->shape()), errors::InvalidArgument( "Input values should be vectors but received shapes: ", a_values_t->shape().DebugString(), " and ", b_values_t->shape().DebugString())); auto a_values = ctx->input(1).vec<T>(); auto b_values = ctx->input(4).vec<T>(); OP_REQUIRES( ctx, a_values.size() == a_nnz && b_values.size() == b_nnz, errors::InvalidArgument("Expected ", a_nnz, " and ", b_nnz, " non-empty input values, got ", a_values.size(), " and ", b_values.size())); OP_REQUIRES_OK(ctx, ctx->input("a_shape", &a_shape)); OP_REQUIRES_OK(ctx, ctx->input("b_shape", &b_shape)); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_shape->shape()) && TensorShapeUtils::IsVector(b_shape->shape()), errors::InvalidArgument( "Input shapes should be a vector but received shapes ", a_shape->shape().DebugString(), " and ", b_shape->shape().DebugString())); OP_REQUIRES( ctx, a_shape->IsSameSize(*b_shape), errors::InvalidArgument( "Operands do not have the same ranks; got shapes: ", a_shape->SummarizeValue(10), " and ", b_shape->SummarizeValue(10))); const auto a_shape_flat = a_shape->flat<int64>(); const auto b_shape_flat = b_shape->flat<int64>(); for (int i = 0; i < a_shape->NumElements(); ++i) { OP_REQUIRES(ctx, a_shape_flat(i) == b_shape_flat(i), errors::InvalidArgument( "Operands' shapes do not match: got ", a_shape_flat(i), " and ", b_shape_flat(i), " for dimension ", i)); } OP_REQUIRES_OK(ctx, ctx->input("thresh", &thresh_t)); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(thresh_t->shape()), errors::InvalidArgument( "The magnitude threshold must be a scalar: got shape ", thresh_t->shape().DebugString())); // std::abs() so that it works for complex{64,128} values as well const Treal thresh = thresh_t->scalar<Treal>()(); // (1) do a pass over inputs, and append values and indices to vectors auto a_indices_mat = a_indices->matrix<int64>(); auto b_indices_mat = b_indices->matrix<int64>(); std::vector<std::pair<bool, int64>> entries_to_copy; // from_a?, idx entries_to_copy.reserve(a_nnz + b_nnz); std::vector<T> out_values; const int num_dims = a_shape->dim_size(0); // The input and output sparse tensors are assumed to be ordered along // increasing dimension number. int64 i = 0, j = 0; T s; while (i < a_nnz && j < b_nnz) { switch (sparse::DimComparator::cmp(a_indices_mat, b_indices_mat, i, j, num_dims)) { case -1: entries_to_copy.emplace_back(true, i); out_values.push_back(a_values(i)); ++i; break; case 0: s = a_values(i) + b_values(j); if (thresh <= std::abs(s)) { entries_to_copy.emplace_back(true, i); out_values.push_back(s); } ++i; ++j; break; case 1: entries_to_copy.emplace_back(false, j); out_values.push_back(b_values(j)); ++j; break; } } #define HANDLE_LEFTOVERS(A_OR_B, IDX, IS_A) \ while (IDX < A_OR_B##_nnz) { \ entries_to_copy.emplace_back(IS_A, IDX); \ out_values.push_back(A_OR_B##_values(IDX)); \ ++IDX; \ } // at most one of these calls appends new values HANDLE_LEFTOVERS(a, i, true); HANDLE_LEFTOVERS(b, j, false); #undef HANDLE_LEFTOVERS // (2) allocate and fill output tensors const int64 sum_nnz = out_values.size(); Tensor *out_indices_t, *out_values_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({sum_nnz, num_dims}), &out_indices_t)); OP_REQUIRES_OK( ctx, ctx->allocate_output(1, TensorShape({sum_nnz}), &out_values_t)); auto out_indices_mat = out_indices_t->matrix<int64>(); auto out_values_flat = out_values_t->vec<T>(); for (i = 0; i < sum_nnz; ++i) { const bool from_a = entries_to_copy[i].first; const int64 idx = entries_to_copy[i].second; out_indices_mat.chip<0>(i) = from_a ? a_indices_mat.chip<0>(idx) : b_indices_mat.chip<0>(idx); } if (sum_nnz > 0) { std::copy_n(out_values.begin(), sum_nnz, &out_values_flat(0)); } ctx->set_output(2, *a_shape); } }; #define REGISTER_KERNELS(type, thresh_type) \ REGISTER_KERNEL_BUILDER( \ Name("SparseAdd").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ SparseAddOp<type, thresh_type>) // The list below is equivalent to TF_CALL_REAL_NUMBER_TYPES, minus uint8. This // is because std::abs() on uint8 does not compile. REGISTER_KERNELS(float, float); REGISTER_KERNELS(double, double); REGISTER_KERNELS(int64, int64); REGISTER_KERNELS(int32, int32); REGISTER_KERNELS(int16, int16); REGISTER_KERNELS(int8, int8); REGISTER_KERNELS(complex64, float); REGISTER_KERNELS(complex128, double); #undef REGISTER_KERNELS } // namespace tensorflow
null
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/op_requires.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { template <typename T, typename Treal> class SparseAddOp : public OpKernel { public: explicit SparseAddOp(OpKernelConstruction *ctx) : OpKernel(ctx) {} void Compute(OpKernelContext *ctx) override { // (0) validations const Tensor *a_indices, *b_indices, *a_values_t, *b_values_t, *a_shape, *b_shape, *thresh_t; OP_REQUIRES_OK(ctx, ctx->input("a_indices", &a_indices)); OP_REQUIRES_OK(ctx, ctx->input("b_indices", &b_indices)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a_indices->shape()) && TensorShapeUtils::IsMatrix(b_indices->shape()), errors::InvalidArgument( "Input indices should be matrices but received shapes: ", a_indices->shape().DebugString(), " and ", b_indices->shape().DebugString())); const int64 a_nnz = a_indices->dim_size(0); const int64 b_nnz = b_indices->dim_size(0); OP_REQUIRES_OK(ctx, ctx->input("a_values", &a_values_t)); OP_REQUIRES_OK(ctx, ctx->input("b_values", &b_values_t)); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_values_t->shape()) && TensorShapeUtils::IsVector(b_values_t->shape()), errors::InvalidArgument( "Input values should be vectors but received shapes: ", a_values_t->shape().DebugString(), " and ", b_values_t->shape().DebugString())); auto a_values = ctx->input(1).vec<T>(); auto b_values = ctx->input(4).vec<T>(); OP_REQUIRES( ctx, a_values.size() == a_nnz && b_values.size() == b_nnz, errors::InvalidArgument("Expected ", a_nnz, " and ", b_nnz, " non-empty input values, got ", a_values.size(), " and ", b_values.size())); OP_REQUIRES_OK(ctx, ctx->input("a_shape", &a_shape)); OP_REQUIRES_OK(ctx, ctx->input("b_shape", &b_shape)); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_shape->shape()) && TensorShapeUtils::IsVector(b_shape->shape()), errors::InvalidArgument( "Input shapes should be a vector but received shapes ", a_shape->shape().DebugString(), " and ", b_shape->shape().DebugString())); OP_REQUIRES( ctx, a_shape->IsSameSize(*b_shape), errors::InvalidArgument( "Operands do not have the same ranks; got shapes: ", a_shape->SummarizeValue(10), " and ", b_shape->SummarizeValue(10))); const auto a_shape_flat = a_shape->flat<int64>(); const auto b_shape_flat = b_shape->flat<int64>(); for (int i = 0; i < a_shape->NumElements(); ++i) { OP_REQUIRES(ctx, a_shape_flat(i) == b_shape_flat(i), errors::InvalidArgument( "Operands' shapes do not match: got ", a_shape_flat(i), " and ", b_shape_flat(i), " for dimension ", i)); } OP_REQUIRES_OK(ctx, ctx->input("thresh", &thresh_t)); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(thresh_t->shape()), errors::InvalidArgument( "The magnitude threshold must be a scalar: got shape ", thresh_t->shape().DebugString())); // std::abs() so that it works for complex{64,128} values as well const Treal thresh = thresh_t->scalar<Treal>()(); // (1) do a pass over inputs, and append values and indices to vectors auto a_indices_mat = a_indices->matrix<int64>(); auto b_indices_mat = b_indices->matrix<int64>(); std::vector<std::pair<bool, int64>> entries_to_copy; // from_a?, idx entries_to_copy.reserve(a_nnz + b_nnz); std::vector<T> out_values; const int num_dims = a_shape->dim_size(0); OP_REQUIRES(ctx, num_dims > 0, errors::InvalidArgument("Invalid input_a shape. Received: ", a_shape->DebugString())); // The input and output sparse tensors are assumed to be ordered along // increasing dimension number. int64 i = 0, j = 0; T s; while (i < a_nnz && j < b_nnz) { switch (sparse::DimComparator::cmp(a_indices_mat, b_indices_mat, i, j, num_dims)) { case -1: entries_to_copy.emplace_back(true, i); out_values.push_back(a_values(i)); ++i; break; case 0: s = a_values(i) + b_values(j); if (thresh <= std::abs(s)) { entries_to_copy.emplace_back(true, i); out_values.push_back(s); } ++i; ++j; break; case 1: entries_to_copy.emplace_back(false, j); out_values.push_back(b_values(j)); ++j; break; } } #define HANDLE_LEFTOVERS(A_OR_B, IDX, IS_A) \ while (IDX < A_OR_B##_nnz) { \ entries_to_copy.emplace_back(IS_A, IDX); \ out_values.push_back(A_OR_B##_values(IDX)); \ ++IDX; \ } // at most one of these calls appends new values HANDLE_LEFTOVERS(a, i, true); HANDLE_LEFTOVERS(b, j, false); #undef HANDLE_LEFTOVERS // (2) allocate and fill output tensors const int64 sum_nnz = out_values.size(); Tensor *out_indices_t, *out_values_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({sum_nnz, num_dims}), &out_indices_t)); OP_REQUIRES_OK( ctx, ctx->allocate_output(1, TensorShape({sum_nnz}), &out_values_t)); auto out_indices_mat = out_indices_t->matrix<int64>(); auto out_values_flat = out_values_t->vec<T>(); for (i = 0; i < sum_nnz; ++i) { const bool from_a = entries_to_copy[i].first; const int64 idx = entries_to_copy[i].second; out_indices_mat.chip<0>(i) = from_a ? a_indices_mat.chip<0>(idx) : b_indices_mat.chip<0>(idx); } if (sum_nnz > 0) { std::copy_n(out_values.begin(), sum_nnz, &out_values_flat(0)); } ctx->set_output(2, *a_shape); } }; #define REGISTER_KERNELS(type, thresh_type) \ REGISTER_KERNEL_BUILDER( \ Name("SparseAdd").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ SparseAddOp<type, thresh_type>) // The list below is equivalent to TF_CALL_REAL_NUMBER_TYPES, minus uint8. This // is because std::abs() on uint8 does not compile. REGISTER_KERNELS(float, float); REGISTER_KERNELS(double, double); REGISTER_KERNELS(int64, int64); REGISTER_KERNELS(int32, int32); REGISTER_KERNELS(int16, int16); REGISTER_KERNELS(int8, int8); REGISTER_KERNELS(complex64, float); REGISTER_KERNELS(complex128, double); #undef REGISTER_KERNELS } // namespace tensorflow
null
264
CWE-787
CVE-2021-29614
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Parsing Ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_parsing_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import parsing_config # go/tf-wildcard-import # pylint: disable=wildcard-import,undefined-variable from tensorflow.python.ops.gen_parsing_ops import * # pylint: enable=wildcard-import,undefined-variable from tensorflow.python.util import deprecation from tensorflow.python.util import dispatch from tensorflow.python.util.tf_export import tf_export ops.NotDifferentiable("DecodeRaw") ops.NotDifferentiable("DecodePaddedRaw") ops.NotDifferentiable("ParseTensor") ops.NotDifferentiable("SerializeTensor") ops.NotDifferentiable("StringToNumber") VarLenFeature = parsing_config.VarLenFeature RaggedFeature = parsing_config.RaggedFeature SparseFeature = parsing_config.SparseFeature FixedLenFeature = parsing_config.FixedLenFeature FixedLenSequenceFeature = parsing_config.FixedLenSequenceFeature # pylint: disable=protected-access _ParseOpParams = parsing_config._ParseOpParams _construct_tensors_for_composite_features = ( parsing_config._construct_tensors_for_composite_features) # pylint: enable=protected-access # TODO(b/122887740) Switch files that use this private symbol to use new name. _construct_sparse_tensors_for_sparse_features = \ _construct_tensors_for_composite_features def _prepend_none_dimension(features): """Returns a copy of features with adjusted FixedLenSequenceFeature shapes.""" if features: modified_features = dict(features) # Create a copy to modify for key, feature in features.items(): if isinstance(feature, FixedLenSequenceFeature): if not feature.allow_missing: raise ValueError("Unsupported: FixedLenSequenceFeature requires " "allow_missing to be True.") modified_features[key] = FixedLenSequenceFeature( [None] + list(feature.shape), feature.dtype, feature.allow_missing, feature.default_value) return modified_features else: return features @tf_export("io.parse_example", v1=[]) @dispatch.add_dispatch_support def parse_example_v2(serialized, features, example_names=None, name=None): # pylint: disable=line-too-long """Parses `Example` protos into a `dict` of tensors. Parses a number of serialized [`Example`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto) protos given in `serialized`. We refer to `serialized` as a batch with `batch_size` many entries of individual `Example` protos. `example_names` may contain descriptive names for the corresponding serialized protos. These may be useful for debugging purposes, but they have no effect on the output. If not `None`, `example_names` must be the same length as `serialized`. This op parses serialized examples into a dictionary mapping keys to `Tensor` `SparseTensor`, and `RaggedTensor` objects. `features` is a dict from keys to `VarLenFeature`, `SparseFeature`, `RaggedFeature`, and `FixedLenFeature` objects. Each `VarLenFeature` and `SparseFeature` is mapped to a `SparseTensor`; each `FixedLenFeature` is mapped to a `Tensor`; and each `RaggedFeature` is mapped to a `RaggedTensor`. Each `VarLenFeature` maps to a `SparseTensor` of the specified type representing a ragged matrix. Its indices are `[batch, index]` where `batch` identifies the example in `serialized`, and `index` is the value's index in the list of values associated with that feature and example. Each `SparseFeature` maps to a `SparseTensor` of the specified type representing a Tensor of `dense_shape` `[batch_size] + SparseFeature.size`. Its `values` come from the feature in the examples with key `value_key`. A `values[i]` comes from a position `k` in the feature of an example at batch entry `batch`. This positional information is recorded in `indices[i]` as `[batch, index_0, index_1, ...]` where `index_j` is the `k-th` value of the feature in the example at with key `SparseFeature.index_key[j]`. In other words, we split the indices (except the first index indicating the batch entry) of a `SparseTensor` by dimension into different features of the `Example`. Due to its complexity a `VarLenFeature` should be preferred over a `SparseFeature` whenever possible. Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or `tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`. `FixedLenFeature` entries with a `default_value` are optional. With no default value, we will fail if that `Feature` is missing from any example in `serialized`. Each `FixedLenSequenceFeature` `df` maps to a `Tensor` of the specified type (or `tf.float32` if not specified) and shape `(serialized.size(), None) + df.shape`. All examples in `serialized` will be padded with `default_value` along the second dimension. Each `RaggedFeature` maps to a `RaggedTensor` of the specified type. It is formed by stacking the `RaggedTensor` for each example, where the `RaggedTensor` for each individual example is constructed using the tensors specified by `RaggedTensor.values_key` and `RaggedTensor.partition`. See the `tf.io.RaggedFeature` documentation for details and examples. Examples: For example, if one expects a `tf.float32` `VarLenFeature` `ft` and three serialized `Example`s are provided: ``` serialized = [ features { feature { key: "ft" value { float_list { value: [1.0, 2.0] } } } }, features { feature []}, features { feature { key: "ft" value { float_list { value: [3.0] } } } ] ``` then the output will look like: ```python {"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]], values=[1.0, 2.0, 3.0], dense_shape=(3, 2)) } ``` If instead a `FixedLenSequenceFeature` with `default_value = -1.0` and `shape=[]` is used then the output will look like: ```python {"ft": [[1.0, 2.0], [3.0, -1.0]]} ``` Given two `Example` input protos in `serialized`: ``` [ features { feature { key: "kw" value { bytes_list { value: [ "knit", "big" ] } } } feature { key: "gps" value { float_list { value: [] } } } }, features { feature { key: "kw" value { bytes_list { value: [ "emmy" ] } } } feature { key: "dank" value { int64_list { value: [ 42 ] } } } feature { key: "gps" value { } } } ] ``` And arguments ``` example_names: ["input0", "input1"], features: { "kw": VarLenFeature(tf.string), "dank": VarLenFeature(tf.int64), "gps": VarLenFeature(tf.float32), } ``` Then the output is a dictionary: ```python { "kw": SparseTensor( indices=[[0, 0], [0, 1], [1, 0]], values=["knit", "big", "emmy"] dense_shape=[2, 2]), "dank": SparseTensor( indices=[[1, 0]], values=[42], dense_shape=[2, 1]), "gps": SparseTensor( indices=[], values=[], dense_shape=[2, 0]), } ``` For dense results in two serialized `Example`s: ``` [ features { feature { key: "age" value { int64_list { value: [ 0 ] } } } feature { key: "gender" value { bytes_list { value: [ "f" ] } } } }, features { feature { key: "age" value { int64_list { value: [] } } } feature { key: "gender" value { bytes_list { value: [ "f" ] } } } } ] ``` We can use arguments: ``` example_names: ["input0", "input1"], features: { "age": FixedLenFeature([], dtype=tf.int64, default_value=-1), "gender": FixedLenFeature([], dtype=tf.string), } ``` And the expected output is: ```python { "age": [[0], [-1]], "gender": [["f"], ["f"]], } ``` An alternative to `VarLenFeature` to obtain a `SparseTensor` is `SparseFeature`. For example, given two `Example` input protos in `serialized`: ``` [ features { feature { key: "val" value { float_list { value: [ 0.5, -1.0 ] } } } feature { key: "ix" value { int64_list { value: [ 3, 20 ] } } } }, features { feature { key: "val" value { float_list { value: [ 0.0 ] } } } feature { key: "ix" value { int64_list { value: [ 42 ] } } } } ] ``` And arguments ``` example_names: ["input0", "input1"], features: { "sparse": SparseFeature( index_key="ix", value_key="val", dtype=tf.float32, size=100), } ``` Then the output is a dictionary: ```python { "sparse": SparseTensor( indices=[[0, 3], [0, 20], [1, 42]], values=[0.5, -1.0, 0.0] dense_shape=[2, 100]), } ``` See the `tf.io.RaggedFeature` documentation for examples showing how `RaggedFeature` can be used to obtain `RaggedTensor`s. Args: serialized: A vector (1-D Tensor) of strings, a batch of binary serialized `Example` protos. features: A `dict` mapping feature keys to `FixedLenFeature`, `VarLenFeature`, `SparseFeature`, and `RaggedFeature` values. example_names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos in the batch. name: A name for this operation (optional). Returns: A `dict` mapping feature keys to `Tensor`, `SparseTensor`, and `RaggedTensor` values. Raises: ValueError: if any feature is invalid. """ if not features: raise ValueError("Missing: features was %s." % features) features = _prepend_none_dimension(features) params = _ParseOpParams.from_features(features, [ VarLenFeature, SparseFeature, FixedLenFeature, FixedLenSequenceFeature, RaggedFeature ]) outputs = _parse_example_raw(serialized, example_names, params, name=name) return _construct_tensors_for_composite_features(features, outputs) @tf_export(v1=["io.parse_example", "parse_example"]) @dispatch.add_dispatch_support def parse_example(serialized, features, name=None, example_names=None): return parse_example_v2(serialized, features, example_names, name) parse_example.__doc__ = parse_example_v2.__doc__ def _parse_example_raw(serialized, names, params, name): """Parses `Example` protos. Args: serialized: A vector (1-D Tensor) of strings, a batch of binary serialized `Example` protos. names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos. params: A `ParseOpParams` containing the parameters for the parse op. name: A name for this operation (optional). Returns: A `dict` mapping keys to `Tensor`s and `SparseTensor`s and `RaggedTensor`s. """ if params.num_features == 0: raise ValueError("Must provide at least one feature key") with ops.name_scope(name, "ParseExample", [serialized, names]): names = [] if names is None else names serialized = ops.convert_to_tensor(serialized, name="serialized") if params.ragged_keys and serialized.shape.ndims is None: raise ValueError("serialized must have statically-known rank to " "parse ragged features.") outputs = gen_parsing_ops.parse_example_v2( serialized=serialized, names=names, sparse_keys=params.sparse_keys, dense_keys=params.dense_keys, ragged_keys=params.ragged_keys, dense_defaults=params.dense_defaults_vec, num_sparse=len(params.sparse_keys), sparse_types=params.sparse_types, ragged_value_types=params.ragged_value_types, ragged_split_types=params.ragged_split_types, dense_shapes=params.dense_shapes_as_proto, name=name) (sparse_indices, sparse_values, sparse_shapes, dense_values, ragged_values, ragged_row_splits) = outputs # pylint: disable=protected-access ragged_tensors = parsing_config._build_ragged_tensors( serialized.shape, ragged_values, ragged_row_splits) sparse_tensors = [ sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape) in zip(sparse_indices, sparse_values, sparse_shapes)] return dict( zip(params.sparse_keys + params.dense_keys + params.ragged_keys, sparse_tensors + dense_values + ragged_tensors)) @tf_export(v1=["io.parse_single_example", "parse_single_example"]) @dispatch.add_dispatch_support def parse_single_example(serialized, features, name=None, example_names=None): """Parses a single `Example` proto. Similar to `parse_example`, except: For dense tensors, the returned `Tensor` is identical to the output of `parse_example`, except there is no batch dimension, the output shape is the same as the shape given in `dense_shape`. For `SparseTensor`s, the first (batch) column of the indices matrix is removed (the indices matrix is a column vector), the values vector is unchanged, and the first (`batch_size`) entry of the shape vector is removed (it is now a single element vector). One might see performance advantages by batching `Example` protos with `parse_example` instead of using this function directly. Args: serialized: A scalar string Tensor, a single serialized Example. features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. name: A name for this operation (optional). example_names: (Optional) A scalar string Tensor, the associated name. Returns: A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Raises: ValueError: if any feature is invalid. """ return parse_single_example_v2(serialized, features, example_names, name) @tf_export("io.parse_single_example", v1=[]) @dispatch.add_dispatch_support def parse_single_example_v2( serialized, features, example_names=None, name=None ): """Parses a single `Example` proto. Similar to `parse_example`, except: For dense tensors, the returned `Tensor` is identical to the output of `parse_example`, except there is no batch dimension, the output shape is the same as the shape given in `dense_shape`. For `SparseTensor`s, the first (batch) column of the indices matrix is removed (the indices matrix is a column vector), the values vector is unchanged, and the first (`batch_size`) entry of the shape vector is removed (it is now a single element vector). One might see performance advantages by batching `Example` protos with `parse_example` instead of using this function directly. Args: serialized: A scalar string Tensor, a single serialized Example. features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. example_names: (Optional) A scalar string Tensor, the associated name. name: A name for this operation (optional). Returns: A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Raises: ValueError: if any feature is invalid. """ if not features: raise ValueError("Missing features.") with ops.name_scope(name, "ParseSingleExample", [serialized, example_names]): serialized = ops.convert_to_tensor(serialized, name="serialized") serialized = _assert_scalar(serialized, "serialized") return parse_example_v2(serialized, features, example_names, name) @tf_export("io.parse_sequence_example") @dispatch.add_dispatch_support def parse_sequence_example(serialized, context_features=None, sequence_features=None, example_names=None, name=None): # pylint: disable=line-too-long """Parses a batch of `SequenceExample` protos. Parses a vector of serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto) protos given in `serialized`. This op parses serialized sequence examples into a tuple of dictionaries, each mapping keys to `Tensor` and `SparseTensor` objects. The first dictionary contains mappings for keys appearing in `context_features`, and the second dictionary contains mappings for keys appearing in `sequence_features`. At least one of `context_features` and `sequence_features` must be provided and non-empty. The `context_features` keys are associated with a `SequenceExample` as a whole, independent of time / frame. In contrast, the `sequence_features` keys provide a way to access variable-length data within the `FeatureList` section of the `SequenceExample` proto. While the shapes of `context_features` values are fixed with respect to frame, the frame dimension (the first dimension) of `sequence_features` values may vary between `SequenceExample` protos, and even between `feature_list` keys within the same `SequenceExample`. `context_features` contains `VarLenFeature`, `RaggedFeature`, and `FixedLenFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each `FixedLenFeature` is mapped to a `Tensor`, of the specified type, shape, and default value. `sequence_features` contains `VarLenFeature`, `RaggedFeature`, and `FixedLenSequenceFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor; and each `FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type. The shape will be `(B,T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where `B` is the batch size, and `T` is the length of the associated `FeatureList` in the `SequenceExample`. For instance, `FixedLenSequenceFeature([])` yields a scalar 2-D `Tensor` of static shape `[None, None]` and dynamic shape `[B, T]`, while `FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 3-D matrix `Tensor` of static shape `[None, None, k]` and dynamic shape `[B, T, k]`. Like the input, the resulting output tensors have a batch dimension. This means that the original per-example shapes of `VarLenFeature`s and `FixedLenSequenceFeature`s can be lost. To handle that situation, this op also provides dicts of shape tensors as part of the output. There is one dict for the context features, and one for the feature_list features. Context features of type `FixedLenFeature`s will not be present, since their shapes are already known by the caller. In situations where the input 'FixedLenFeature`s are of different lengths across examples, the shorter examples will be padded with default datatype values: 0 for numeric types, and the empty string for string types. Each `SparseTensor` corresponding to `sequence_features` represents a ragged vector. Its indices are `[time, index]`, where `time` is the `FeatureList` entry and `index` is the value's index in the list of values associated with that time. `FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature` entries with `allow_missing=True` are optional; otherwise, we will fail if that `Feature` or `FeatureList` is missing from any example in `serialized`. `example_name` may contain a descriptive name for the corresponding serialized proto. This may be useful for debugging purposes, but it has no effect on the output. If not `None`, `example_name` must be a scalar. Args: serialized: A vector (1-D Tensor) of type string containing binary serialized `SequenceExample` protos. context_features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` or `RaggedFeature` values. These features are associated with a `SequenceExample` as a whole. sequence_features: A `dict` mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature` or `RaggedFeature` values. These features are associated with data within the `FeatureList` section of the `SequenceExample` proto. example_names: A vector (1-D Tensor) of strings (optional), the name of the serialized protos. name: A name for this operation (optional). Returns: A tuple of three `dict`s, each mapping keys to `Tensor`s, `SparseTensor`s, and `RaggedTensor`. The first dict contains the context key/values, the second dict contains the feature_list key/values, and the final dict contains the lengths of any dense feature_list features. Raises: ValueError: if any feature is invalid. """ if not (context_features or sequence_features): raise ValueError("Missing features.") context_params = _ParseOpParams.from_features( context_features, [VarLenFeature, FixedLenFeature, RaggedFeature]) feature_list_params = _ParseOpParams.from_features( sequence_features, [VarLenFeature, FixedLenSequenceFeature, RaggedFeature]) with ops.name_scope(name, "ParseSequenceExample", [serialized, example_names]): outputs = _parse_sequence_example_raw(serialized, example_names, context_params, feature_list_params, name) context_output, feature_list_output, feature_list_lengths = outputs if context_params.ragged_keys: context_output = _construct_tensors_for_composite_features( context_features, context_output) if feature_list_params.ragged_keys: feature_list_output = _construct_tensors_for_composite_features( sequence_features, feature_list_output) return context_output, feature_list_output, feature_list_lengths def _parse_sequence_example_raw(serialized, debug_name, context, feature_list, name=None): """Parses a vector of `SequenceExample` protos. Args: serialized: A vector (1-D Tensor) of type string, containing binary serialized `SequenceExample` protos. debug_name: A vector (1-D Tensor) of strings (optional), the names of the serialized protos. context: A `ParseOpParams` containing the parameters for the parse op for the context features. feature_list: A `ParseOpParams` containing the parameters for the parse op for the feature_list features. name: A name for this operation (optional). Returns: A tuple of three `dict`s, each mapping keys to `Tensor`s, `SparseTensor`s, and `RaggedTensor`s. The first dict contains the context key/values, the second dict contains the feature_list key/values, and the final dict contains the lengths of any dense feature_list features. Raises: TypeError: if feature_list.dense_defaults is not either None or a dict. """ if context.num_features + feature_list.num_features == 0: raise ValueError("Must provide at least one feature key") with ops.name_scope(name, "ParseSequenceExample", [serialized]): debug_name = [] if debug_name is None else debug_name # Internal feature_list_dense_missing_assumed_empty = [] for k, v in feature_list.dense_defaults.items(): if v is not None: raise ValueError("Value feature_list.dense_defaults[%s] must be None" % k) feature_list_dense_missing_assumed_empty.append(k) has_ragged = context.ragged_keys or feature_list.ragged_keys serialized = ops.convert_to_tensor(serialized, name="serialized") if has_ragged and serialized.shape.ndims is None: raise ValueError("serialized must have statically-known rank to " "parse ragged features.") feature_list_dense_missing_assumed_empty_vector = [ key in feature_list_dense_missing_assumed_empty for key in feature_list.dense_keys ] outputs = gen_parsing_ops.parse_sequence_example_v2( # Inputs serialized=serialized, debug_name=debug_name, context_sparse_keys=context.sparse_keys, context_dense_keys=context.dense_keys, context_ragged_keys=context.ragged_keys, feature_list_sparse_keys=feature_list.sparse_keys, feature_list_dense_keys=feature_list.dense_keys, feature_list_ragged_keys=feature_list.ragged_keys, feature_list_dense_missing_assumed_empty=( feature_list_dense_missing_assumed_empty_vector), context_dense_defaults=context.dense_defaults_vec, # Attrs Ncontext_sparse=len(context.sparse_keys), Nfeature_list_sparse=len(feature_list.sparse_keys), Nfeature_list_dense=len(feature_list.dense_keys), context_sparse_types=context.sparse_types, context_ragged_value_types=context.ragged_value_types, context_ragged_split_types=context.ragged_split_types, feature_list_dense_types=feature_list.dense_types, feature_list_sparse_types=feature_list.sparse_types, feature_list_ragged_value_types=feature_list.ragged_value_types, feature_list_ragged_split_types=feature_list.ragged_split_types, context_dense_shapes=context.dense_shapes_as_proto, feature_list_dense_shapes=feature_list.dense_shapes, name=name) (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, context_ragged_values, context_ragged_row_splits, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values, feature_list_dense_lengths, feature_list_ragged_values, feature_list_ragged_outer_splits, feature_list_ragged_inner_splits) = outputs # pylint: disable=protected-access context_ragged_tensors = parsing_config._build_ragged_tensors( serialized.shape, context_ragged_values, context_ragged_row_splits) feature_list_ragged_tensors = parsing_config._build_ragged_tensors( serialized.shape, feature_list_ragged_values, feature_list_ragged_outer_splits, feature_list_ragged_inner_splits) # pylint: disable=g-complex-comprehension context_sparse_tensors = [ sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape) in zip(context_sparse_indices, context_sparse_values, context_sparse_shapes) ] feature_list_sparse_tensors = [ sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape ) in zip(feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes) ] # pylint: enable=g-complex-comprehension context_output = dict( zip( context.sparse_keys + context.dense_keys + context.ragged_keys, context_sparse_tensors + context_dense_values + context_ragged_tensors)) feature_list_output = dict( zip( feature_list.sparse_keys + feature_list.dense_keys + feature_list.ragged_keys, feature_list_sparse_tensors + feature_list_dense_values + feature_list_ragged_tensors)) feature_list_lengths = dict( zip(feature_list.dense_keys, feature_list_dense_lengths)) return (context_output, feature_list_output, feature_list_lengths) @tf_export("io.parse_single_sequence_example", v1=["io.parse_single_sequence_example", "parse_single_sequence_example"]) @dispatch.add_dispatch_support def parse_single_sequence_example( serialized, context_features=None, sequence_features=None, example_name=None, name=None): # pylint: disable=line-too-long """Parses a single `SequenceExample` proto. Parses a single serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto) proto given in `serialized`. This op parses a serialized sequence example into a tuple of dictionaries, each mapping keys to `Tensor` and `SparseTensor` objects. The first dictionary contains mappings for keys appearing in `context_features`, and the second dictionary contains mappings for keys appearing in `sequence_features`. At least one of `context_features` and `sequence_features` must be provided and non-empty. The `context_features` keys are associated with a `SequenceExample` as a whole, independent of time / frame. In contrast, the `sequence_features` keys provide a way to access variable-length data within the `FeatureList` section of the `SequenceExample` proto. While the shapes of `context_features` values are fixed with respect to frame, the frame dimension (the first dimension) of `sequence_features` values may vary between `SequenceExample` protos, and even between `feature_list` keys within the same `SequenceExample`. `context_features` contains `VarLenFeature`, `RaggedFeature`, and `FixedLenFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each `FixedLenFeature` is mapped to a `Tensor`, of the specified type, shape, and default value. `sequence_features` contains `VarLenFeature`, `RaggedFeature`, and `FixedLenSequenceFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each `FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type. The shape will be `(T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where `T` is the length of the associated `FeatureList` in the `SequenceExample`. For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Tensor` of static shape `[None]` and dynamic shape `[T]`, while `FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Tensor` of static shape `[None, k]` and dynamic shape `[T, k]`. Each `SparseTensor` corresponding to `sequence_features` represents a ragged vector. Its indices are `[time, index]`, where `time` is the `FeatureList` entry and `index` is the value's index in the list of values associated with that time. `FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature` entries with `allow_missing=True` are optional; otherwise, we will fail if that `Feature` or `FeatureList` is missing from any example in `serialized`. `example_name` may contain a descriptive name for the corresponding serialized proto. This may be useful for debugging purposes, but it has no effect on the output. If not `None`, `example_name` must be a scalar. Note that the batch version of this function, `tf.parse_sequence_example`, is written for better memory efficiency and will be faster on large `SequenceExample`s. Args: serialized: A scalar (0-D Tensor) of type string, a single binary serialized `SequenceExample` proto. context_features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` or `RaggedFeature` values. These features are associated with a `SequenceExample` as a whole. sequence_features: A `dict` mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature` or `RaggedFeature` values. These features are associated with data within the `FeatureList` section of the `SequenceExample` proto. example_name: A scalar (0-D Tensor) of strings (optional), the name of the serialized proto. name: A name for this operation (optional). Returns: A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s and `RaggedTensor`s. * The first dict contains the context key/values. * The second dict contains the feature_list key/values. Raises: ValueError: if any feature is invalid. """ # pylint: enable=line-too-long if not (context_features or sequence_features): raise ValueError("Missing features.") context_params = _ParseOpParams.from_features( context_features, [VarLenFeature, FixedLenFeature, RaggedFeature]) feature_list_params = _ParseOpParams.from_features( sequence_features, [VarLenFeature, FixedLenSequenceFeature, RaggedFeature]) with ops.name_scope(name, "ParseSingleSequenceExample", [serialized, example_name]): context_output, feature_list_output = ( _parse_single_sequence_example_raw(serialized, context_params, feature_list_params, example_name, name)) if context_params.ragged_keys: context_output = _construct_tensors_for_composite_features( context_features, context_output) if feature_list_params.ragged_keys: feature_list_output = _construct_tensors_for_composite_features( sequence_features, feature_list_output) return context_output, feature_list_output def _parse_single_sequence_example_raw(serialized, context, feature_list, debug_name, name=None): """Parses a single `SequenceExample` proto. Args: serialized: A scalar (0-D Tensor) of type string, a single binary serialized `SequenceExample` proto. context: A `ParseOpParams` containing the parameters for the parse op for the context features. feature_list: A `ParseOpParams` containing the parameters for the parse op for the feature_list features. debug_name: A scalar (0-D Tensor) of strings (optional), the name of the serialized proto. name: A name for this operation (optional). Returns: A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s. The first dict contains the context key/values. The second dict contains the feature_list key/values. Raises: TypeError: if feature_list.dense_defaults is not either None or a dict. """ with ops.name_scope(name, "ParseSingleExample", [serialized, debug_name]): serialized = ops.convert_to_tensor(serialized, name="serialized") serialized = _assert_scalar(serialized, "serialized") return _parse_sequence_example_raw(serialized, debug_name, context, feature_list, name)[:2] @tf_export("io.decode_raw", v1=[]) @dispatch.add_dispatch_support def decode_raw(input_bytes, out_type, little_endian=True, fixed_length=None, name=None): r"""Convert raw bytes from input tensor into numeric tensors. The input tensor is interpreted as a sequence of bytes. These bytes are then decoded as numbers in the format specified by `out_type`. >>> tf.io.decode_raw(tf.constant("1"), tf.uint8) <tf.Tensor: shape=(1,), dtype=uint8, numpy=array([49], dtype=uint8)> >>> tf.io.decode_raw(tf.constant("1,2"), tf.uint8) <tf.Tensor: shape=(3,), dtype=uint8, numpy=array([49, 44, 50], dtype=uint8)> Note that the rank of the output tensor is always one more than the input one: >>> tf.io.decode_raw(tf.constant(["1","2"]), tf.uint8).shape TensorShape([2, 1]) >>> tf.io.decode_raw(tf.constant([["1"],["2"]]), tf.uint8).shape TensorShape([2, 1, 1]) This is because each byte in the input is converted to a new value on the output (if output type is `uint8` or `int8`, otherwise chunks of inputs get coverted to a new value): >>> tf.io.decode_raw(tf.constant("123"), tf.uint8) <tf.Tensor: shape=(3,), dtype=uint8, numpy=array([49, 50, 51], dtype=uint8)> >>> tf.io.decode_raw(tf.constant("1234"), tf.uint8) <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([49, 50, 51, 52], ... >>> # chuncked output >>> tf.io.decode_raw(tf.constant("12"), tf.uint16) <tf.Tensor: shape=(1,), dtype=uint16, numpy=array([12849], dtype=uint16)> >>> tf.io.decode_raw(tf.constant("1234"), tf.uint16) <tf.Tensor: shape=(2,), dtype=uint16, numpy=array([12849, 13363], ... >>> # int64 output >>> tf.io.decode_raw(tf.constant("12345678"), tf.int64) <tf.Tensor: ... numpy=array([4050765991979987505])> >>> tf.io.decode_raw(tf.constant("1234567887654321"), tf.int64) <tf.Tensor: ... numpy=array([4050765991979987505, 3544952156018063160])> The operation allows specifying endianness via the `little_endian` parameter. >>> tf.io.decode_raw(tf.constant("\x0a\x0b"), tf.int16) <tf.Tensor: shape=(1,), dtype=int16, numpy=array([2826], dtype=int16)> >>> hex(2826) '0xb0a' >>> tf.io.decode_raw(tf.constant("\x0a\x0b"), tf.int16, little_endian=False) <tf.Tensor: shape=(1,), dtype=int16, numpy=array([2571], dtype=int16)> >>> hex(2571) '0xa0b' If the elements of `input_bytes` are of different length, you must specify `fixed_length`: >>> tf.io.decode_raw(tf.constant([["1"],["23"]]), tf.uint8, fixed_length=4) <tf.Tensor: shape=(2, 1, 4), dtype=uint8, numpy= array([[[49, 0, 0, 0]], [[50, 51, 0, 0]]], dtype=uint8)> If the `fixed_length` value is larger that the length of the `out_type` dtype, multiple values are generated: >>> tf.io.decode_raw(tf.constant(["1212"]), tf.uint16, fixed_length=4) <tf.Tensor: shape=(1, 2), dtype=uint16, numpy=array([[12849, 12849]], ... Note: There is currently a bug in `fixed_length` that can result in data loss: >>> # truncated to length of type as it matches fixed_length >>> tf.io.decode_raw(tf.constant(["1212"]), tf.uint16, fixed_length=2) <tf.Tensor: shape=(1, 1), dtype=uint16, numpy=array([[12849]], dtype=uint16)> >>> # ignores the second component >>> tf.io.decode_raw(tf.constant(["12","34"]), tf.uint16, fixed_length=2) <tf.Tensor: shape=(2, 1), dtype=uint16, numpy= array([[12849], [ 0]], dtype=uint16)> >>> tf.io.decode_raw(tf.constant(["12","34"]), tf.uint16, fixed_length=4) <tf.Tensor: shape=(2, 2), dtype=uint16, numpy= array([[12849, 0], [ 0, 0]], dtype=uint16)> This will be fixed on a future release of TensorFlow. Args: input_bytes: Each element of the input Tensor is converted to an array of bytes. Currently, this must be a tensor of strings (bytes), although semantically the operation should support any input. out_type: `DType` of the output. Acceptable types are `half`, `float`, `double`, `int32`, `uint16`, `uint8`, `int16`, `int8`, `int64`. little_endian: Whether the `input_bytes` data is in little-endian format. Data will be converted into host byte order if necessary. fixed_length: If set, the first `fixed_length` bytes of each element will be converted. Data will be zero-padded or truncated to the specified length. `fixed_length` must be a multiple of the size of `out_type`. `fixed_length` must be specified if the elements of `input_bytes` are of variable length. name: A name for the operation (optional). Returns: A `Tensor` object storing the decoded bytes. """ if fixed_length is not None: return gen_parsing_ops.decode_padded_raw( input_bytes, fixed_length=fixed_length, out_type=out_type, little_endian=little_endian, name=name) else: return gen_parsing_ops.decode_raw( input_bytes, out_type, little_endian=little_endian, name=name) @tf_export(v1=["decode_raw", "io.decode_raw"]) @dispatch.add_dispatch_support @deprecation.deprecated_args(None, "bytes is deprecated, use input_bytes instead", "bytes") def decode_raw_v1( input_bytes=None, out_type=None, little_endian=True, name=None, bytes=None # pylint: disable=redefined-builtin ): """Convert raw byte strings into tensors. Args: input_bytes: Each element of the input Tensor is converted to an array of bytes. out_type: `DType` of the output. Acceptable types are `half`, `float`, `double`, `int32`, `uint16`, `uint8`, `int16`, `int8`, `int64`. little_endian: Whether the `input_bytes` data is in little-endian format. Data will be converted into host byte order if necessary. name: A name for the operation (optional). bytes: Deprecated parameter. Use `input_bytes` instead. Returns: A `Tensor` object storing the decoded bytes. """ input_bytes = deprecation.deprecated_argument_lookup("input_bytes", input_bytes, "bytes", bytes) # out_type is a required positional argument in the original API, and had to # be changed to a keyword argument in order to facilitate the transition from # the reserved named `bytes` to `input_bytes`. Ensure it's still set. if out_type is None: raise ValueError( "decode_raw_v1() missing 1 positional argument: 'out_type'") return gen_parsing_ops.decode_raw( input_bytes, out_type, little_endian=little_endian, name=name) # Swap `name` and `na_value` for backward compatibility. @tf_export(v1=["io.decode_csv", "decode_csv"]) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints("decode_csv") def decode_csv(records, record_defaults, field_delim=",", use_quote_delim=True, name=None, na_value="", select_cols=None): """Convert CSV records to tensors. Each column maps to one tensor. RFC 4180 format is expected for the CSV records. (https://tools.ietf.org/html/rfc4180) Note that we allow leading and trailing spaces with int or float field. Args: records: A `Tensor` of type `string`. Each string is a record/row in the csv and all records should have the same format. record_defaults: A list of `Tensor` objects with specific types. Acceptable types are `float32`, `float64`, `int32`, `int64`, `string`. One tensor per column of the input record, with either a scalar default value for that column or an empty vector if the column is required. field_delim: An optional `string`. Defaults to `","`. char delimiter to separate fields in a record. use_quote_delim: An optional `bool`. Defaults to `True`. If false, treats double quotation marks as regular characters inside of the string fields (ignoring RFC 4180, Section 2, Bullet 5). name: A name for the operation (optional). na_value: Additional string to recognize as NA/NaN. select_cols: Optional sorted list of column indices to select. If specified, only this subset of columns will be parsed and returned. Returns: A list of `Tensor` objects. Has the same type as `record_defaults`. Each tensor will have the same shape as records. Raises: ValueError: If any of the arguments is malformed. """ return decode_csv_v2( records, record_defaults, field_delim, use_quote_delim, na_value, select_cols, name ) @tf_export("io.decode_csv", v1=[]) @dispatch.add_dispatch_support def decode_csv_v2(records, record_defaults, field_delim=",", use_quote_delim=True, na_value="", select_cols=None, name=None): """Convert CSV records to tensors. Each column maps to one tensor. RFC 4180 format is expected for the CSV records. (https://tools.ietf.org/html/rfc4180) Note that we allow leading and trailing spaces with int or float field. Args: records: A `Tensor` of type `string`. Each string is a record/row in the csv and all records should have the same format. record_defaults: A list of `Tensor` objects with specific types. Acceptable types are `float32`, `float64`, `int32`, `int64`, `string`. One tensor per column of the input record, with either a scalar default value for that column or an empty vector if the column is required. field_delim: An optional `string`. Defaults to `","`. char delimiter to separate fields in a record. use_quote_delim: An optional `bool`. Defaults to `True`. If false, treats double quotation marks as regular characters inside of the string fields (ignoring RFC 4180, Section 2, Bullet 5). na_value: Additional string to recognize as NA/NaN. select_cols: Optional sorted list of column indices to select. If specified, only this subset of columns will be parsed and returned. name: A name for the operation (optional). Returns: A list of `Tensor` objects. Has the same type as `record_defaults`. Each tensor will have the same shape as records. Raises: ValueError: If any of the arguments is malformed. """ if select_cols is not None and any(select_cols[i] >= select_cols[i + 1] for i in range(len(select_cols) - 1)): raise ValueError("select_cols is not strictly increasing.") if select_cols is not None and select_cols[0] < 0: raise ValueError("select_cols contains negative values.") if select_cols is not None and len(select_cols) != len(record_defaults): raise ValueError("Length of select_cols and record_defaults do not match.") return gen_parsing_ops.decode_csv( records=records, record_defaults=record_defaults, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, name=name, select_cols=select_cols, ) def _assert_scalar(value, name): """Asserts that `value` is scalar, and returns `value`.""" value_rank = value.shape.rank if value_rank is None: check = control_flow_ops.Assert( math_ops.equal(array_ops.rank(value), 0), ["Input %s must be a scalar" % name], name="%sIsScalar" % name.capitalize()) result = control_flow_ops.with_dependencies([check], value, name="%sDependencies" % name) result.set_shape([]) return result elif value_rank == 0: return value else: raise ValueError("Input %s must be a scalar" % name) @tf_export("io.decode_json_example", v1=["decode_json_example", "io.decode_json_example"]) def decode_json_example(json_examples, name=None): r"""Convert JSON-encoded Example records to binary protocol buffer strings. Note: This is **not** a general purpose JSON parsing op. This op converts JSON-serialized `tf.train.Example` (maybe created with `json_format.MessageToJson`, following the [standard JSON mapping]( https://developers.google.com/protocol-buffers/docs/proto3#json)) to a binary-serialized `tf.train.Example` (equivalent to `Example.SerializeToString()`) suitable for conversion to tensors with `tf.io.parse_example`. Here is a `tf.train.Example` proto: >>> example = tf.train.Example( ... features=tf.train.Features( ... feature={ ... "a": tf.train.Feature( ... int64_list=tf.train.Int64List( ... value=[1, 1, 3]))})) Here it is converted to JSON: >>> from google.protobuf import json_format >>> example_json = json_format.MessageToJson(example) >>> print(example_json) { "features": { "feature": { "a": { "int64List": { "value": [ "1", "1", "3" ] } } } } } This op converts the above json string to a binary proto: >>> example_binary = tf.io.decode_json_example(example_json) >>> example_binary.numpy() b'\n\x0f\n\r\n\x01a\x12\x08\x1a\x06\x08\x01\x08\x01\x08\x03' The OP works on string tensors of andy shape: >>> tf.io.decode_json_example([ ... [example_json, example_json], ... [example_json, example_json]]).shape.as_list() [2, 2] This resulting binary-string is equivalent to `Example.SerializeToString()`, and can be converted to Tensors using `tf.io.parse_example` and related functions: >>> tf.io.parse_example( ... serialized=[example_binary.numpy(), ... example.SerializeToString()], ... features = {'a': tf.io.FixedLenFeature(shape=[3], dtype=tf.int64)}) {'a': <tf.Tensor: shape=(2, 3), dtype=int64, numpy= array([[1, 1, 3], [1, 1, 3]])>} Args: json_examples: A string tensor containing json-serialized `tf.Example` protos. name: A name for the op. Returns: A string Tensor containing the binary-serialized `tf.Example` protos. Raises: `tf.errors.InvalidArgumentError`: If the JSON could not be converted to a `tf.Example` """ return gen_parsing_ops.decode_json_example(json_examples, name=name)
null
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Parsing Ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_parsing_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import parsing_config # go/tf-wildcard-import # pylint: disable=wildcard-import,undefined-variable from tensorflow.python.ops.gen_parsing_ops import * # pylint: enable=wildcard-import,undefined-variable from tensorflow.python.util import deprecation from tensorflow.python.util import dispatch from tensorflow.python.util.tf_export import tf_export ops.NotDifferentiable("DecodeRaw") ops.NotDifferentiable("DecodePaddedRaw") ops.NotDifferentiable("ParseTensor") ops.NotDifferentiable("SerializeTensor") ops.NotDifferentiable("StringToNumber") VarLenFeature = parsing_config.VarLenFeature RaggedFeature = parsing_config.RaggedFeature SparseFeature = parsing_config.SparseFeature FixedLenFeature = parsing_config.FixedLenFeature FixedLenSequenceFeature = parsing_config.FixedLenSequenceFeature # pylint: disable=protected-access _ParseOpParams = parsing_config._ParseOpParams _construct_tensors_for_composite_features = ( parsing_config._construct_tensors_for_composite_features) # pylint: enable=protected-access # TODO(b/122887740) Switch files that use this private symbol to use new name. _construct_sparse_tensors_for_sparse_features = \ _construct_tensors_for_composite_features def _prepend_none_dimension(features): """Returns a copy of features with adjusted FixedLenSequenceFeature shapes.""" if features: modified_features = dict(features) # Create a copy to modify for key, feature in features.items(): if isinstance(feature, FixedLenSequenceFeature): if not feature.allow_missing: raise ValueError("Unsupported: FixedLenSequenceFeature requires " "allow_missing to be True.") modified_features[key] = FixedLenSequenceFeature( [None] + list(feature.shape), feature.dtype, feature.allow_missing, feature.default_value) return modified_features else: return features @tf_export("io.parse_example", v1=[]) @dispatch.add_dispatch_support def parse_example_v2(serialized, features, example_names=None, name=None): # pylint: disable=line-too-long """Parses `Example` protos into a `dict` of tensors. Parses a number of serialized [`Example`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto) protos given in `serialized`. We refer to `serialized` as a batch with `batch_size` many entries of individual `Example` protos. `example_names` may contain descriptive names for the corresponding serialized protos. These may be useful for debugging purposes, but they have no effect on the output. If not `None`, `example_names` must be the same length as `serialized`. This op parses serialized examples into a dictionary mapping keys to `Tensor` `SparseTensor`, and `RaggedTensor` objects. `features` is a dict from keys to `VarLenFeature`, `SparseFeature`, `RaggedFeature`, and `FixedLenFeature` objects. Each `VarLenFeature` and `SparseFeature` is mapped to a `SparseTensor`; each `FixedLenFeature` is mapped to a `Tensor`; and each `RaggedFeature` is mapped to a `RaggedTensor`. Each `VarLenFeature` maps to a `SparseTensor` of the specified type representing a ragged matrix. Its indices are `[batch, index]` where `batch` identifies the example in `serialized`, and `index` is the value's index in the list of values associated with that feature and example. Each `SparseFeature` maps to a `SparseTensor` of the specified type representing a Tensor of `dense_shape` `[batch_size] + SparseFeature.size`. Its `values` come from the feature in the examples with key `value_key`. A `values[i]` comes from a position `k` in the feature of an example at batch entry `batch`. This positional information is recorded in `indices[i]` as `[batch, index_0, index_1, ...]` where `index_j` is the `k-th` value of the feature in the example at with key `SparseFeature.index_key[j]`. In other words, we split the indices (except the first index indicating the batch entry) of a `SparseTensor` by dimension into different features of the `Example`. Due to its complexity a `VarLenFeature` should be preferred over a `SparseFeature` whenever possible. Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or `tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`. `FixedLenFeature` entries with a `default_value` are optional. With no default value, we will fail if that `Feature` is missing from any example in `serialized`. Each `FixedLenSequenceFeature` `df` maps to a `Tensor` of the specified type (or `tf.float32` if not specified) and shape `(serialized.size(), None) + df.shape`. All examples in `serialized` will be padded with `default_value` along the second dimension. Each `RaggedFeature` maps to a `RaggedTensor` of the specified type. It is formed by stacking the `RaggedTensor` for each example, where the `RaggedTensor` for each individual example is constructed using the tensors specified by `RaggedTensor.values_key` and `RaggedTensor.partition`. See the `tf.io.RaggedFeature` documentation for details and examples. Examples: For example, if one expects a `tf.float32` `VarLenFeature` `ft` and three serialized `Example`s are provided: ``` serialized = [ features { feature { key: "ft" value { float_list { value: [1.0, 2.0] } } } }, features { feature []}, features { feature { key: "ft" value { float_list { value: [3.0] } } } ] ``` then the output will look like: ```python {"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]], values=[1.0, 2.0, 3.0], dense_shape=(3, 2)) } ``` If instead a `FixedLenSequenceFeature` with `default_value = -1.0` and `shape=[]` is used then the output will look like: ```python {"ft": [[1.0, 2.0], [3.0, -1.0]]} ``` Given two `Example` input protos in `serialized`: ``` [ features { feature { key: "kw" value { bytes_list { value: [ "knit", "big" ] } } } feature { key: "gps" value { float_list { value: [] } } } }, features { feature { key: "kw" value { bytes_list { value: [ "emmy" ] } } } feature { key: "dank" value { int64_list { value: [ 42 ] } } } feature { key: "gps" value { } } } ] ``` And arguments ``` example_names: ["input0", "input1"], features: { "kw": VarLenFeature(tf.string), "dank": VarLenFeature(tf.int64), "gps": VarLenFeature(tf.float32), } ``` Then the output is a dictionary: ```python { "kw": SparseTensor( indices=[[0, 0], [0, 1], [1, 0]], values=["knit", "big", "emmy"] dense_shape=[2, 2]), "dank": SparseTensor( indices=[[1, 0]], values=[42], dense_shape=[2, 1]), "gps": SparseTensor( indices=[], values=[], dense_shape=[2, 0]), } ``` For dense results in two serialized `Example`s: ``` [ features { feature { key: "age" value { int64_list { value: [ 0 ] } } } feature { key: "gender" value { bytes_list { value: [ "f" ] } } } }, features { feature { key: "age" value { int64_list { value: [] } } } feature { key: "gender" value { bytes_list { value: [ "f" ] } } } } ] ``` We can use arguments: ``` example_names: ["input0", "input1"], features: { "age": FixedLenFeature([], dtype=tf.int64, default_value=-1), "gender": FixedLenFeature([], dtype=tf.string), } ``` And the expected output is: ```python { "age": [[0], [-1]], "gender": [["f"], ["f"]], } ``` An alternative to `VarLenFeature` to obtain a `SparseTensor` is `SparseFeature`. For example, given two `Example` input protos in `serialized`: ``` [ features { feature { key: "val" value { float_list { value: [ 0.5, -1.0 ] } } } feature { key: "ix" value { int64_list { value: [ 3, 20 ] } } } }, features { feature { key: "val" value { float_list { value: [ 0.0 ] } } } feature { key: "ix" value { int64_list { value: [ 42 ] } } } } ] ``` And arguments ``` example_names: ["input0", "input1"], features: { "sparse": SparseFeature( index_key="ix", value_key="val", dtype=tf.float32, size=100), } ``` Then the output is a dictionary: ```python { "sparse": SparseTensor( indices=[[0, 3], [0, 20], [1, 42]], values=[0.5, -1.0, 0.0] dense_shape=[2, 100]), } ``` See the `tf.io.RaggedFeature` documentation for examples showing how `RaggedFeature` can be used to obtain `RaggedTensor`s. Args: serialized: A vector (1-D Tensor) of strings, a batch of binary serialized `Example` protos. features: A `dict` mapping feature keys to `FixedLenFeature`, `VarLenFeature`, `SparseFeature`, and `RaggedFeature` values. example_names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos in the batch. name: A name for this operation (optional). Returns: A `dict` mapping feature keys to `Tensor`, `SparseTensor`, and `RaggedTensor` values. Raises: ValueError: if any feature is invalid. """ if not features: raise ValueError("Missing: features was %s." % features) features = _prepend_none_dimension(features) params = _ParseOpParams.from_features(features, [ VarLenFeature, SparseFeature, FixedLenFeature, FixedLenSequenceFeature, RaggedFeature ]) outputs = _parse_example_raw(serialized, example_names, params, name=name) return _construct_tensors_for_composite_features(features, outputs) @tf_export(v1=["io.parse_example", "parse_example"]) @dispatch.add_dispatch_support def parse_example(serialized, features, name=None, example_names=None): return parse_example_v2(serialized, features, example_names, name) parse_example.__doc__ = parse_example_v2.__doc__ def _parse_example_raw(serialized, names, params, name): """Parses `Example` protos. Args: serialized: A vector (1-D Tensor) of strings, a batch of binary serialized `Example` protos. names: A vector (1-D Tensor) of strings (optional), the names of the serialized protos. params: A `ParseOpParams` containing the parameters for the parse op. name: A name for this operation (optional). Returns: A `dict` mapping keys to `Tensor`s and `SparseTensor`s and `RaggedTensor`s. """ if params.num_features == 0: raise ValueError("Must provide at least one feature key") with ops.name_scope(name, "ParseExample", [serialized, names]): names = [] if names is None else names serialized = ops.convert_to_tensor(serialized, name="serialized") if params.ragged_keys and serialized.shape.ndims is None: raise ValueError("serialized must have statically-known rank to " "parse ragged features.") outputs = gen_parsing_ops.parse_example_v2( serialized=serialized, names=names, sparse_keys=params.sparse_keys, dense_keys=params.dense_keys, ragged_keys=params.ragged_keys, dense_defaults=params.dense_defaults_vec, num_sparse=len(params.sparse_keys), sparse_types=params.sparse_types, ragged_value_types=params.ragged_value_types, ragged_split_types=params.ragged_split_types, dense_shapes=params.dense_shapes_as_proto, name=name) (sparse_indices, sparse_values, sparse_shapes, dense_values, ragged_values, ragged_row_splits) = outputs # pylint: disable=protected-access ragged_tensors = parsing_config._build_ragged_tensors( serialized.shape, ragged_values, ragged_row_splits) sparse_tensors = [ sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape) in zip(sparse_indices, sparse_values, sparse_shapes)] return dict( zip(params.sparse_keys + params.dense_keys + params.ragged_keys, sparse_tensors + dense_values + ragged_tensors)) @tf_export(v1=["io.parse_single_example", "parse_single_example"]) @dispatch.add_dispatch_support def parse_single_example(serialized, features, name=None, example_names=None): """Parses a single `Example` proto. Similar to `parse_example`, except: For dense tensors, the returned `Tensor` is identical to the output of `parse_example`, except there is no batch dimension, the output shape is the same as the shape given in `dense_shape`. For `SparseTensor`s, the first (batch) column of the indices matrix is removed (the indices matrix is a column vector), the values vector is unchanged, and the first (`batch_size`) entry of the shape vector is removed (it is now a single element vector). One might see performance advantages by batching `Example` protos with `parse_example` instead of using this function directly. Args: serialized: A scalar string Tensor, a single serialized Example. features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. name: A name for this operation (optional). example_names: (Optional) A scalar string Tensor, the associated name. Returns: A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Raises: ValueError: if any feature is invalid. """ return parse_single_example_v2(serialized, features, example_names, name) @tf_export("io.parse_single_example", v1=[]) @dispatch.add_dispatch_support def parse_single_example_v2( serialized, features, example_names=None, name=None ): """Parses a single `Example` proto. Similar to `parse_example`, except: For dense tensors, the returned `Tensor` is identical to the output of `parse_example`, except there is no batch dimension, the output shape is the same as the shape given in `dense_shape`. For `SparseTensor`s, the first (batch) column of the indices matrix is removed (the indices matrix is a column vector), the values vector is unchanged, and the first (`batch_size`) entry of the shape vector is removed (it is now a single element vector). One might see performance advantages by batching `Example` protos with `parse_example` instead of using this function directly. Args: serialized: A scalar string Tensor, a single serialized Example. features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` values. example_names: (Optional) A scalar string Tensor, the associated name. name: A name for this operation (optional). Returns: A `dict` mapping feature keys to `Tensor` and `SparseTensor` values. Raises: ValueError: if any feature is invalid. """ if not features: raise ValueError("Missing features.") with ops.name_scope(name, "ParseSingleExample", [serialized, example_names]): serialized = ops.convert_to_tensor(serialized, name="serialized") serialized = _assert_scalar(serialized, "serialized") return parse_example_v2(serialized, features, example_names, name) @tf_export("io.parse_sequence_example") @dispatch.add_dispatch_support def parse_sequence_example(serialized, context_features=None, sequence_features=None, example_names=None, name=None): # pylint: disable=line-too-long """Parses a batch of `SequenceExample` protos. Parses a vector of serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto) protos given in `serialized`. This op parses serialized sequence examples into a tuple of dictionaries, each mapping keys to `Tensor` and `SparseTensor` objects. The first dictionary contains mappings for keys appearing in `context_features`, and the second dictionary contains mappings for keys appearing in `sequence_features`. At least one of `context_features` and `sequence_features` must be provided and non-empty. The `context_features` keys are associated with a `SequenceExample` as a whole, independent of time / frame. In contrast, the `sequence_features` keys provide a way to access variable-length data within the `FeatureList` section of the `SequenceExample` proto. While the shapes of `context_features` values are fixed with respect to frame, the frame dimension (the first dimension) of `sequence_features` values may vary between `SequenceExample` protos, and even between `feature_list` keys within the same `SequenceExample`. `context_features` contains `VarLenFeature`, `RaggedFeature`, and `FixedLenFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each `FixedLenFeature` is mapped to a `Tensor`, of the specified type, shape, and default value. `sequence_features` contains `VarLenFeature`, `RaggedFeature`, and `FixedLenSequenceFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor; and each `FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type. The shape will be `(B,T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where `B` is the batch size, and `T` is the length of the associated `FeatureList` in the `SequenceExample`. For instance, `FixedLenSequenceFeature([])` yields a scalar 2-D `Tensor` of static shape `[None, None]` and dynamic shape `[B, T]`, while `FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 3-D matrix `Tensor` of static shape `[None, None, k]` and dynamic shape `[B, T, k]`. Like the input, the resulting output tensors have a batch dimension. This means that the original per-example shapes of `VarLenFeature`s and `FixedLenSequenceFeature`s can be lost. To handle that situation, this op also provides dicts of shape tensors as part of the output. There is one dict for the context features, and one for the feature_list features. Context features of type `FixedLenFeature`s will not be present, since their shapes are already known by the caller. In situations where the input 'FixedLenFeature`s are of different lengths across examples, the shorter examples will be padded with default datatype values: 0 for numeric types, and the empty string for string types. Each `SparseTensor` corresponding to `sequence_features` represents a ragged vector. Its indices are `[time, index]`, where `time` is the `FeatureList` entry and `index` is the value's index in the list of values associated with that time. `FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature` entries with `allow_missing=True` are optional; otherwise, we will fail if that `Feature` or `FeatureList` is missing from any example in `serialized`. `example_name` may contain a descriptive name for the corresponding serialized proto. This may be useful for debugging purposes, but it has no effect on the output. If not `None`, `example_name` must be a scalar. Args: serialized: A vector (1-D Tensor) of type string containing binary serialized `SequenceExample` protos. context_features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` or `RaggedFeature` values. These features are associated with a `SequenceExample` as a whole. sequence_features: A `dict` mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature` or `RaggedFeature` values. These features are associated with data within the `FeatureList` section of the `SequenceExample` proto. example_names: A vector (1-D Tensor) of strings (optional), the name of the serialized protos. name: A name for this operation (optional). Returns: A tuple of three `dict`s, each mapping keys to `Tensor`s, `SparseTensor`s, and `RaggedTensor`. The first dict contains the context key/values, the second dict contains the feature_list key/values, and the final dict contains the lengths of any dense feature_list features. Raises: ValueError: if any feature is invalid. """ if not (context_features or sequence_features): raise ValueError("Missing features.") context_params = _ParseOpParams.from_features( context_features, [VarLenFeature, FixedLenFeature, RaggedFeature]) feature_list_params = _ParseOpParams.from_features( sequence_features, [VarLenFeature, FixedLenSequenceFeature, RaggedFeature]) with ops.name_scope(name, "ParseSequenceExample", [serialized, example_names]): outputs = _parse_sequence_example_raw(serialized, example_names, context_params, feature_list_params, name) context_output, feature_list_output, feature_list_lengths = outputs if context_params.ragged_keys: context_output = _construct_tensors_for_composite_features( context_features, context_output) if feature_list_params.ragged_keys: feature_list_output = _construct_tensors_for_composite_features( sequence_features, feature_list_output) return context_output, feature_list_output, feature_list_lengths def _parse_sequence_example_raw(serialized, debug_name, context, feature_list, name=None): """Parses a vector of `SequenceExample` protos. Args: serialized: A vector (1-D Tensor) of type string, containing binary serialized `SequenceExample` protos. debug_name: A vector (1-D Tensor) of strings (optional), the names of the serialized protos. context: A `ParseOpParams` containing the parameters for the parse op for the context features. feature_list: A `ParseOpParams` containing the parameters for the parse op for the feature_list features. name: A name for this operation (optional). Returns: A tuple of three `dict`s, each mapping keys to `Tensor`s, `SparseTensor`s, and `RaggedTensor`s. The first dict contains the context key/values, the second dict contains the feature_list key/values, and the final dict contains the lengths of any dense feature_list features. Raises: TypeError: if feature_list.dense_defaults is not either None or a dict. """ if context.num_features + feature_list.num_features == 0: raise ValueError("Must provide at least one feature key") with ops.name_scope(name, "ParseSequenceExample", [serialized]): debug_name = [] if debug_name is None else debug_name # Internal feature_list_dense_missing_assumed_empty = [] for k, v in feature_list.dense_defaults.items(): if v is not None: raise ValueError("Value feature_list.dense_defaults[%s] must be None" % k) feature_list_dense_missing_assumed_empty.append(k) has_ragged = context.ragged_keys or feature_list.ragged_keys serialized = ops.convert_to_tensor(serialized, name="serialized") if has_ragged and serialized.shape.ndims is None: raise ValueError("serialized must have statically-known rank to " "parse ragged features.") feature_list_dense_missing_assumed_empty_vector = [ key in feature_list_dense_missing_assumed_empty for key in feature_list.dense_keys ] outputs = gen_parsing_ops.parse_sequence_example_v2( # Inputs serialized=serialized, debug_name=debug_name, context_sparse_keys=context.sparse_keys, context_dense_keys=context.dense_keys, context_ragged_keys=context.ragged_keys, feature_list_sparse_keys=feature_list.sparse_keys, feature_list_dense_keys=feature_list.dense_keys, feature_list_ragged_keys=feature_list.ragged_keys, feature_list_dense_missing_assumed_empty=( feature_list_dense_missing_assumed_empty_vector), context_dense_defaults=context.dense_defaults_vec, # Attrs Ncontext_sparse=len(context.sparse_keys), Nfeature_list_sparse=len(feature_list.sparse_keys), Nfeature_list_dense=len(feature_list.dense_keys), context_sparse_types=context.sparse_types, context_ragged_value_types=context.ragged_value_types, context_ragged_split_types=context.ragged_split_types, feature_list_dense_types=feature_list.dense_types, feature_list_sparse_types=feature_list.sparse_types, feature_list_ragged_value_types=feature_list.ragged_value_types, feature_list_ragged_split_types=feature_list.ragged_split_types, context_dense_shapes=context.dense_shapes_as_proto, feature_list_dense_shapes=feature_list.dense_shapes, name=name) (context_sparse_indices, context_sparse_values, context_sparse_shapes, context_dense_values, context_ragged_values, context_ragged_row_splits, feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes, feature_list_dense_values, feature_list_dense_lengths, feature_list_ragged_values, feature_list_ragged_outer_splits, feature_list_ragged_inner_splits) = outputs # pylint: disable=protected-access context_ragged_tensors = parsing_config._build_ragged_tensors( serialized.shape, context_ragged_values, context_ragged_row_splits) feature_list_ragged_tensors = parsing_config._build_ragged_tensors( serialized.shape, feature_list_ragged_values, feature_list_ragged_outer_splits, feature_list_ragged_inner_splits) # pylint: disable=g-complex-comprehension context_sparse_tensors = [ sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape) in zip(context_sparse_indices, context_sparse_values, context_sparse_shapes) ] feature_list_sparse_tensors = [ sparse_tensor.SparseTensor(ix, val, shape) for (ix, val, shape ) in zip(feature_list_sparse_indices, feature_list_sparse_values, feature_list_sparse_shapes) ] # pylint: enable=g-complex-comprehension context_output = dict( zip( context.sparse_keys + context.dense_keys + context.ragged_keys, context_sparse_tensors + context_dense_values + context_ragged_tensors)) feature_list_output = dict( zip( feature_list.sparse_keys + feature_list.dense_keys + feature_list.ragged_keys, feature_list_sparse_tensors + feature_list_dense_values + feature_list_ragged_tensors)) feature_list_lengths = dict( zip(feature_list.dense_keys, feature_list_dense_lengths)) return (context_output, feature_list_output, feature_list_lengths) @tf_export("io.parse_single_sequence_example", v1=["io.parse_single_sequence_example", "parse_single_sequence_example"]) @dispatch.add_dispatch_support def parse_single_sequence_example( serialized, context_features=None, sequence_features=None, example_name=None, name=None): # pylint: disable=line-too-long """Parses a single `SequenceExample` proto. Parses a single serialized [`SequenceExample`](https://www.tensorflow.org/code/tensorflow/core/example/example.proto) proto given in `serialized`. This op parses a serialized sequence example into a tuple of dictionaries, each mapping keys to `Tensor` and `SparseTensor` objects. The first dictionary contains mappings for keys appearing in `context_features`, and the second dictionary contains mappings for keys appearing in `sequence_features`. At least one of `context_features` and `sequence_features` must be provided and non-empty. The `context_features` keys are associated with a `SequenceExample` as a whole, independent of time / frame. In contrast, the `sequence_features` keys provide a way to access variable-length data within the `FeatureList` section of the `SequenceExample` proto. While the shapes of `context_features` values are fixed with respect to frame, the frame dimension (the first dimension) of `sequence_features` values may vary between `SequenceExample` protos, and even between `feature_list` keys within the same `SequenceExample`. `context_features` contains `VarLenFeature`, `RaggedFeature`, and `FixedLenFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each `FixedLenFeature` is mapped to a `Tensor`, of the specified type, shape, and default value. `sequence_features` contains `VarLenFeature`, `RaggedFeature`, and `FixedLenSequenceFeature` objects. Each `VarLenFeature` is mapped to a `SparseTensor`; each `RaggedFeature` is mapped to a `RaggedTensor`; and each `FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type. The shape will be `(T,) + df.dense_shape` for `FixedLenSequenceFeature` `df`, where `T` is the length of the associated `FeatureList` in the `SequenceExample`. For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Tensor` of static shape `[None]` and dynamic shape `[T]`, while `FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Tensor` of static shape `[None, k]` and dynamic shape `[T, k]`. Each `SparseTensor` corresponding to `sequence_features` represents a ragged vector. Its indices are `[time, index]`, where `time` is the `FeatureList` entry and `index` is the value's index in the list of values associated with that time. `FixedLenFeature` entries with a `default_value` and `FixedLenSequenceFeature` entries with `allow_missing=True` are optional; otherwise, we will fail if that `Feature` or `FeatureList` is missing from any example in `serialized`. `example_name` may contain a descriptive name for the corresponding serialized proto. This may be useful for debugging purposes, but it has no effect on the output. If not `None`, `example_name` must be a scalar. Note that the batch version of this function, `tf.parse_sequence_example`, is written for better memory efficiency and will be faster on large `SequenceExample`s. Args: serialized: A scalar (0-D Tensor) of type string, a single binary serialized `SequenceExample` proto. context_features: A `dict` mapping feature keys to `FixedLenFeature` or `VarLenFeature` or `RaggedFeature` values. These features are associated with a `SequenceExample` as a whole. sequence_features: A `dict` mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature` or `RaggedFeature` values. These features are associated with data within the `FeatureList` section of the `SequenceExample` proto. example_name: A scalar (0-D Tensor) of strings (optional), the name of the serialized proto. name: A name for this operation (optional). Returns: A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s and `RaggedTensor`s. * The first dict contains the context key/values. * The second dict contains the feature_list key/values. Raises: ValueError: if any feature is invalid. """ # pylint: enable=line-too-long if not (context_features or sequence_features): raise ValueError("Missing features.") context_params = _ParseOpParams.from_features( context_features, [VarLenFeature, FixedLenFeature, RaggedFeature]) feature_list_params = _ParseOpParams.from_features( sequence_features, [VarLenFeature, FixedLenSequenceFeature, RaggedFeature]) with ops.name_scope(name, "ParseSingleSequenceExample", [serialized, example_name]): context_output, feature_list_output = ( _parse_single_sequence_example_raw(serialized, context_params, feature_list_params, example_name, name)) if context_params.ragged_keys: context_output = _construct_tensors_for_composite_features( context_features, context_output) if feature_list_params.ragged_keys: feature_list_output = _construct_tensors_for_composite_features( sequence_features, feature_list_output) return context_output, feature_list_output def _parse_single_sequence_example_raw(serialized, context, feature_list, debug_name, name=None): """Parses a single `SequenceExample` proto. Args: serialized: A scalar (0-D Tensor) of type string, a single binary serialized `SequenceExample` proto. context: A `ParseOpParams` containing the parameters for the parse op for the context features. feature_list: A `ParseOpParams` containing the parameters for the parse op for the feature_list features. debug_name: A scalar (0-D Tensor) of strings (optional), the name of the serialized proto. name: A name for this operation (optional). Returns: A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s. The first dict contains the context key/values. The second dict contains the feature_list key/values. Raises: TypeError: if feature_list.dense_defaults is not either None or a dict. """ with ops.name_scope(name, "ParseSingleExample", [serialized, debug_name]): serialized = ops.convert_to_tensor(serialized, name="serialized") serialized = _assert_scalar(serialized, "serialized") return _parse_sequence_example_raw(serialized, debug_name, context, feature_list, name)[:2] @tf_export("io.decode_raw", v1=[]) @dispatch.add_dispatch_support def decode_raw(input_bytes, out_type, little_endian=True, fixed_length=None, name=None): r"""Convert raw bytes from input tensor into numeric tensors. Every component of the input tensor is interpreted as a sequence of bytes. These bytes are then decoded as numbers in the format specified by `out_type`. >>> tf.io.decode_raw(tf.constant("1"), tf.uint8) <tf.Tensor: shape=(1,), dtype=uint8, numpy=array([49], dtype=uint8)> >>> tf.io.decode_raw(tf.constant("1,2"), tf.uint8) <tf.Tensor: shape=(3,), dtype=uint8, numpy=array([49, 44, 50], dtype=uint8)> Note that the rank of the output tensor is always one more than the input one: >>> tf.io.decode_raw(tf.constant(["1","2"]), tf.uint8).shape TensorShape([2, 1]) >>> tf.io.decode_raw(tf.constant([["1"],["2"]]), tf.uint8).shape TensorShape([2, 1, 1]) This is because each byte in the input is converted to a new value on the output (if output type is `uint8` or `int8`, otherwise chunks of inputs get coverted to a new value): >>> tf.io.decode_raw(tf.constant("123"), tf.uint8) <tf.Tensor: shape=(3,), dtype=uint8, numpy=array([49, 50, 51], dtype=uint8)> >>> tf.io.decode_raw(tf.constant("1234"), tf.uint8) <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([49, 50, 51, 52], ... >>> # chuncked output >>> tf.io.decode_raw(tf.constant("12"), tf.uint16) <tf.Tensor: shape=(1,), dtype=uint16, numpy=array([12849], dtype=uint16)> >>> tf.io.decode_raw(tf.constant("1234"), tf.uint16) <tf.Tensor: shape=(2,), dtype=uint16, numpy=array([12849, 13363], ... >>> # int64 output >>> tf.io.decode_raw(tf.constant("12345678"), tf.int64) <tf.Tensor: ... numpy=array([4050765991979987505])> >>> tf.io.decode_raw(tf.constant("1234567887654321"), tf.int64) <tf.Tensor: ... numpy=array([4050765991979987505, 3544952156018063160])> The operation allows specifying endianness via the `little_endian` parameter. >>> tf.io.decode_raw(tf.constant("\x0a\x0b"), tf.int16) <tf.Tensor: shape=(1,), dtype=int16, numpy=array([2826], dtype=int16)> >>> hex(2826) '0xb0a' >>> tf.io.decode_raw(tf.constant("\x0a\x0b"), tf.int16, little_endian=False) <tf.Tensor: shape=(1,), dtype=int16, numpy=array([2571], dtype=int16)> >>> hex(2571) '0xa0b' If the elements of `input_bytes` are of different length, you must specify `fixed_length`: >>> tf.io.decode_raw(tf.constant([["1"],["23"]]), tf.uint8, fixed_length=4) <tf.Tensor: shape=(2, 1, 4), dtype=uint8, numpy= array([[[49, 0, 0, 0]], [[50, 51, 0, 0]]], dtype=uint8)> If the `fixed_length` value is larger that the length of the `out_type` dtype, multiple values are generated: >>> tf.io.decode_raw(tf.constant(["1212"]), tf.uint16, fixed_length=4) <tf.Tensor: shape=(1, 2), dtype=uint16, numpy=array([[12849, 12849]], ... If the input value is larger than `fixed_length`, it is truncated: >>> x=''.join([chr(1), chr(2), chr(3), chr(4)]) >>> tf.io.decode_raw(x, tf.uint16, fixed_length=2) <tf.Tensor: shape=(1,), dtype=uint16, numpy=array([513], dtype=uint16)> >>> hex(513) '0x201' If `little_endian` and `fixed_length` are specified, truncation to the fixed length occurs before endianness conversion: >>> x=''.join([chr(1), chr(2), chr(3), chr(4)]) >>> tf.io.decode_raw(x, tf.uint16, fixed_length=2, little_endian=False) <tf.Tensor: shape=(1,), dtype=uint16, numpy=array([258], dtype=uint16)> >>> hex(258) '0x102' If input values all have the same length, then specifying `fixed_length` equal to the size of the strings should not change output: >>> x = ["12345678", "87654321"] >>> tf.io.decode_raw(x, tf.int16) <tf.Tensor: shape=(2, 4), dtype=int16, numpy= array([[12849, 13363, 13877, 14391], [14136, 13622, 13108, 12594]], dtype=int16)> >>> tf.io.decode_raw(x, tf.int16, fixed_length=len(x[0])) <tf.Tensor: shape=(2, 4), dtype=int16, numpy= array([[12849, 13363, 13877, 14391], [14136, 13622, 13108, 12594]], dtype=int16)> Args: input_bytes: Each element of the input Tensor is converted to an array of bytes. Currently, this must be a tensor of strings (bytes), although semantically the operation should support any input. out_type: `DType` of the output. Acceptable types are `half`, `float`, `double`, `int32`, `uint16`, `uint8`, `int16`, `int8`, `int64`. little_endian: Whether the `input_bytes` data is in little-endian format. Data will be converted into host byte order if necessary. fixed_length: If set, the first `fixed_length` bytes of each element will be converted. Data will be zero-padded or truncated to the specified length. `fixed_length` must be a multiple of the size of `out_type`. `fixed_length` must be specified if the elements of `input_bytes` are of variable length. name: A name for the operation (optional). Returns: A `Tensor` object storing the decoded bytes. """ if fixed_length is not None: return gen_parsing_ops.decode_padded_raw( input_bytes, fixed_length=fixed_length, out_type=out_type, little_endian=little_endian, name=name) else: return gen_parsing_ops.decode_raw( input_bytes, out_type, little_endian=little_endian, name=name) @tf_export(v1=["decode_raw", "io.decode_raw"]) @dispatch.add_dispatch_support @deprecation.deprecated_args(None, "bytes is deprecated, use input_bytes instead", "bytes") def decode_raw_v1( input_bytes=None, out_type=None, little_endian=True, name=None, bytes=None # pylint: disable=redefined-builtin ): """Convert raw byte strings into tensors. Args: input_bytes: Each element of the input Tensor is converted to an array of bytes. out_type: `DType` of the output. Acceptable types are `half`, `float`, `double`, `int32`, `uint16`, `uint8`, `int16`, `int8`, `int64`. little_endian: Whether the `input_bytes` data is in little-endian format. Data will be converted into host byte order if necessary. name: A name for the operation (optional). bytes: Deprecated parameter. Use `input_bytes` instead. Returns: A `Tensor` object storing the decoded bytes. """ input_bytes = deprecation.deprecated_argument_lookup("input_bytes", input_bytes, "bytes", bytes) # out_type is a required positional argument in the original API, and had to # be changed to a keyword argument in order to facilitate the transition from # the reserved named `bytes` to `input_bytes`. Ensure it's still set. if out_type is None: raise ValueError( "decode_raw_v1() missing 1 positional argument: 'out_type'") return gen_parsing_ops.decode_raw( input_bytes, out_type, little_endian=little_endian, name=name) # Swap `name` and `na_value` for backward compatibility. @tf_export(v1=["io.decode_csv", "decode_csv"]) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints("decode_csv") def decode_csv(records, record_defaults, field_delim=",", use_quote_delim=True, name=None, na_value="", select_cols=None): """Convert CSV records to tensors. Each column maps to one tensor. RFC 4180 format is expected for the CSV records. (https://tools.ietf.org/html/rfc4180) Note that we allow leading and trailing spaces with int or float field. Args: records: A `Tensor` of type `string`. Each string is a record/row in the csv and all records should have the same format. record_defaults: A list of `Tensor` objects with specific types. Acceptable types are `float32`, `float64`, `int32`, `int64`, `string`. One tensor per column of the input record, with either a scalar default value for that column or an empty vector if the column is required. field_delim: An optional `string`. Defaults to `","`. char delimiter to separate fields in a record. use_quote_delim: An optional `bool`. Defaults to `True`. If false, treats double quotation marks as regular characters inside of the string fields (ignoring RFC 4180, Section 2, Bullet 5). name: A name for the operation (optional). na_value: Additional string to recognize as NA/NaN. select_cols: Optional sorted list of column indices to select. If specified, only this subset of columns will be parsed and returned. Returns: A list of `Tensor` objects. Has the same type as `record_defaults`. Each tensor will have the same shape as records. Raises: ValueError: If any of the arguments is malformed. """ return decode_csv_v2( records, record_defaults, field_delim, use_quote_delim, na_value, select_cols, name ) @tf_export("io.decode_csv", v1=[]) @dispatch.add_dispatch_support def decode_csv_v2(records, record_defaults, field_delim=",", use_quote_delim=True, na_value="", select_cols=None, name=None): """Convert CSV records to tensors. Each column maps to one tensor. RFC 4180 format is expected for the CSV records. (https://tools.ietf.org/html/rfc4180) Note that we allow leading and trailing spaces with int or float field. Args: records: A `Tensor` of type `string`. Each string is a record/row in the csv and all records should have the same format. record_defaults: A list of `Tensor` objects with specific types. Acceptable types are `float32`, `float64`, `int32`, `int64`, `string`. One tensor per column of the input record, with either a scalar default value for that column or an empty vector if the column is required. field_delim: An optional `string`. Defaults to `","`. char delimiter to separate fields in a record. use_quote_delim: An optional `bool`. Defaults to `True`. If false, treats double quotation marks as regular characters inside of the string fields (ignoring RFC 4180, Section 2, Bullet 5). na_value: Additional string to recognize as NA/NaN. select_cols: Optional sorted list of column indices to select. If specified, only this subset of columns will be parsed and returned. name: A name for the operation (optional). Returns: A list of `Tensor` objects. Has the same type as `record_defaults`. Each tensor will have the same shape as records. Raises: ValueError: If any of the arguments is malformed. """ if select_cols is not None and any(select_cols[i] >= select_cols[i + 1] for i in range(len(select_cols) - 1)): raise ValueError("select_cols is not strictly increasing.") if select_cols is not None and select_cols[0] < 0: raise ValueError("select_cols contains negative values.") if select_cols is not None and len(select_cols) != len(record_defaults): raise ValueError("Length of select_cols and record_defaults do not match.") return gen_parsing_ops.decode_csv( records=records, record_defaults=record_defaults, field_delim=field_delim, use_quote_delim=use_quote_delim, na_value=na_value, name=name, select_cols=select_cols, ) def _assert_scalar(value, name): """Asserts that `value` is scalar, and returns `value`.""" value_rank = value.shape.rank if value_rank is None: check = control_flow_ops.Assert( math_ops.equal(array_ops.rank(value), 0), ["Input %s must be a scalar" % name], name="%sIsScalar" % name.capitalize()) result = control_flow_ops.with_dependencies([check], value, name="%sDependencies" % name) result.set_shape([]) return result elif value_rank == 0: return value else: raise ValueError("Input %s must be a scalar" % name) @tf_export("io.decode_json_example", v1=["decode_json_example", "io.decode_json_example"]) def decode_json_example(json_examples, name=None): r"""Convert JSON-encoded Example records to binary protocol buffer strings. Note: This is **not** a general purpose JSON parsing op. This op converts JSON-serialized `tf.train.Example` (maybe created with `json_format.MessageToJson`, following the [standard JSON mapping]( https://developers.google.com/protocol-buffers/docs/proto3#json)) to a binary-serialized `tf.train.Example` (equivalent to `Example.SerializeToString()`) suitable for conversion to tensors with `tf.io.parse_example`. Here is a `tf.train.Example` proto: >>> example = tf.train.Example( ... features=tf.train.Features( ... feature={ ... "a": tf.train.Feature( ... int64_list=tf.train.Int64List( ... value=[1, 1, 3]))})) Here it is converted to JSON: >>> from google.protobuf import json_format >>> example_json = json_format.MessageToJson(example) >>> print(example_json) { "features": { "feature": { "a": { "int64List": { "value": [ "1", "1", "3" ] } } } } } This op converts the above json string to a binary proto: >>> example_binary = tf.io.decode_json_example(example_json) >>> example_binary.numpy() b'\n\x0f\n\r\n\x01a\x12\x08\x1a\x06\x08\x01\x08\x01\x08\x03' The OP works on string tensors of andy shape: >>> tf.io.decode_json_example([ ... [example_json, example_json], ... [example_json, example_json]]).shape.as_list() [2, 2] This resulting binary-string is equivalent to `Example.SerializeToString()`, and can be converted to Tensors using `tf.io.parse_example` and related functions: >>> tf.io.parse_example( ... serialized=[example_binary.numpy(), ... example.SerializeToString()], ... features = {'a': tf.io.FixedLenFeature(shape=[3], dtype=tf.int64)}) {'a': <tf.Tensor: shape=(2, 3), dtype=int64, numpy= array([[1, 1, 3], [1, 1, 3]])>} Args: json_examples: A string tensor containing json-serialized `tf.Example` protos. name: A name for the op. Returns: A string Tensor containing the binary-serialized `tf.Example` protos. Raises: `tf.errors.InvalidArgumentError`: If the JSON could not be converted to a `tf.Example` """ return gen_parsing_ops.decode_json_example(json_examples, name=name)
null
265
CWE-787
CVE-2021-30019
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / AAC ADTS reframer filter * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/avparse.h> #include <gpac/constants.h> #include <gpac/filters.h> #ifndef GPAC_DISABLE_AV_PARSERS enum { AAC_SIGNAL_NONE=0, AAC_SIGNAL_IMPLICIT, AAC_SIGNAL_EXPLICIT }; typedef struct { Bool is_mp2, no_crc; u32 profile, sr_idx, nb_ch, frame_size, hdr_size; } ADTSHeader; typedef struct { u64 pos; Double duration; } ADTSIdx; typedef struct { //filter args u32 frame_size; Double index; u32 sbr; u32 ps; // Bool mpeg4; Bool ovsbr; Bool expart; s32 aacchcfg; //only one input pid declared GF_FilterPid *ipid; //output pid for audio GF_FilterPid *opid; //video pid for cover art GF_FilterPid *vpid; GF_BitStream *bs; u64 file_pos, cts; u32 sr_idx, nb_ch, is_mp2, profile; GF_Fraction64 duration; Double start_range; Bool in_seek; u32 timescale; ADTSHeader hdr; u32 dts_inc; Bool is_playing; Bool is_file, file_loaded; Bool initial_play_done; GF_FilterPacket *src_pck; ADTSIdx *indexes; u32 index_alloc_size, index_size; u8 *adts_buffer; u32 adts_buffer_size, adts_buffer_alloc, resume_from; u64 byte_offset; u32 tag_size; u8 *id3_buffer; u32 id3_buffer_size, id3_buffer_alloc; u32 nb_frames; GF_M4ADecSpecInfo acfg; u32 bitrate; } GF_ADTSDmxCtx; static Bool adts_dmx_sync_frame_bs(GF_BitStream *bs, ADTSHeader *hdr) { u32 val; u64 pos; while (gf_bs_available(bs)>7) { val = gf_bs_read_u8(bs); if (val!=0xFF) continue; val = gf_bs_read_int(bs, 4); if (val != 0x0F) { gf_bs_read_int(bs, 4); continue; } hdr->is_mp2 = (Bool)gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 2); hdr->no_crc = (Bool)gf_bs_read_int(bs, 1); pos = gf_bs_get_position(bs) - 2; hdr->profile = 1 + gf_bs_read_int(bs, 2); hdr->sr_idx = gf_bs_read_int(bs, 4); gf_bs_read_int(bs, 1); hdr->nb_ch = gf_bs_read_int(bs, 3); //value 1->6 match channel number, value 7 is 7.1 if (hdr->nb_ch==7) hdr->nb_ch = 8; gf_bs_read_int(bs, 4); hdr->frame_size = gf_bs_read_int(bs, 13); gf_bs_read_int(bs, 11); gf_bs_read_int(bs, 2); hdr->hdr_size = 7; if (!hdr->no_crc) { gf_bs_read_u16(bs); hdr->hdr_size = 9; } if (!GF_M4ASampleRates[hdr->sr_idx] || (hdr->frame_size < hdr->hdr_size)) { gf_bs_seek(bs, pos+1); continue; } hdr->frame_size -= hdr->hdr_size; if (gf_bs_available(bs) == hdr->frame_size) { return GF_TRUE; } if (gf_bs_available(bs) < hdr->frame_size) { break; } gf_bs_skip_bytes(bs, hdr->frame_size); val = gf_bs_read_u8(bs); if (val!=0xFF) { gf_bs_seek(bs, pos+1); continue; } val = gf_bs_read_int(bs, 4); if (val!=0x0F) { gf_bs_read_int(bs, 4); gf_bs_seek(bs, pos+1); continue; } gf_bs_seek(bs, pos+hdr->hdr_size); return GF_TRUE; } return GF_FALSE; } void id3dmx_flush(GF_Filter *filter, u8 *id3_buf, u32 id3_buf_size, GF_FilterPid *audio_pid, GF_FilterPid **video_pid_p); GF_Err adts_dmx_configure_pid(GF_Filter *filter, GF_FilterPid *pid, Bool is_remove) { const GF_PropertyValue *p; GF_ADTSDmxCtx *ctx = gf_filter_get_udta(filter); if (is_remove) { ctx->ipid = NULL; if (ctx->opid) { gf_filter_pid_remove(ctx->opid); ctx->opid = NULL; } return GF_OK; } if (! gf_filter_pid_check_caps(pid)) return GF_NOT_SUPPORTED; ctx->ipid = pid; p = gf_filter_pid_get_property(pid, GF_PROP_PID_TIMESCALE); if (p) ctx->timescale = p->value.uint; if (ctx->timescale && !ctx->opid) { ctx->opid = gf_filter_pid_new(filter); gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, NULL); //we don't update copy props on output for now - if we decide we need it, we will need to also force resengin the decoder config } return GF_OK; } static void adts_dmx_check_dur(GF_Filter *filter, GF_ADTSDmxCtx *ctx) { FILE *stream; GF_BitStream *bs; ADTSHeader hdr; u64 duration, cur_dur, rate; s32 sr_idx = -1; const GF_PropertyValue *p; if (!ctx->opid || ctx->timescale || ctx->file_loaded) return; if (ctx->index<=0) { ctx->file_loaded = GF_TRUE; return; } p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILEPATH); if (!p || !p->value.string || !strncmp(p->value.string, "gmem://", 7)) { ctx->is_file = GF_FALSE; ctx->file_loaded = GF_TRUE; return; } ctx->is_file = GF_TRUE; stream = gf_fopen(p->value.string, "rb"); if (!stream) return; ctx->index_size = 0; bs = gf_bs_from_file(stream, GF_BITSTREAM_READ); duration = 0; cur_dur = 0; while (adts_dmx_sync_frame_bs(bs, &hdr)) { if ((sr_idx>=0) && (sr_idx != hdr.sr_idx)) { duration *= GF_M4ASampleRates[hdr.sr_idx]; duration /= GF_M4ASampleRates[sr_idx]; cur_dur *= GF_M4ASampleRates[hdr.sr_idx]; cur_dur /= GF_M4ASampleRates[sr_idx]; } sr_idx = hdr.sr_idx; duration += ctx->frame_size; cur_dur += ctx->frame_size; if (cur_dur > ctx->index * GF_M4ASampleRates[sr_idx]) { if (!ctx->index_alloc_size) ctx->index_alloc_size = 10; else if (ctx->index_alloc_size == ctx->index_size) ctx->index_alloc_size *= 2; ctx->indexes = gf_realloc(ctx->indexes, sizeof(ADTSIdx)*ctx->index_alloc_size); ctx->indexes[ctx->index_size].pos = gf_bs_get_position(bs) - hdr.hdr_size; ctx->indexes[ctx->index_size].duration = (Double) duration; ctx->indexes[ctx->index_size].duration /= GF_M4ASampleRates[sr_idx]; ctx->index_size ++; cur_dur = 0; } gf_bs_skip_bytes(bs, hdr.frame_size); } rate = gf_bs_get_position(bs); gf_bs_del(bs); gf_fclose(stream); if (sr_idx>=0) { if (!ctx->duration.num || (ctx->duration.num * GF_M4ASampleRates[sr_idx] != duration * ctx->duration.den)) { ctx->duration.num = (s32) duration; ctx->duration.den = GF_M4ASampleRates[sr_idx]; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DURATION, & PROP_FRAC64(ctx->duration)); if (duration && !gf_sys_is_test_mode() ) { rate *= 8 * ctx->duration.den; rate /= ctx->duration.num; ctx->bitrate = (u32) rate; } } } p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILE_CACHED); if (p && p->value.boolean) ctx->file_loaded = GF_TRUE; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CAN_DATAREF, & PROP_BOOL(GF_TRUE ) ); } static void adts_dmx_check_pid(GF_Filter *filter, GF_ADTSDmxCtx *ctx) { GF_BitStream *dsi; Bool use_implicit=GF_FALSE; u8 *dsi_b; u32 i, sbr_sr_idx, dsi_s, sr, sbr_sr, codecid, timescale=0; if (!ctx->opid) { ctx->opid = gf_filter_pid_new(filter); gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); adts_dmx_check_dur(filter, ctx); } if ((ctx->sr_idx == ctx->hdr.sr_idx) && (ctx->nb_ch == ctx->hdr.nb_ch) && (ctx->is_mp2 == ctx->hdr.is_mp2) && (ctx->profile == ctx->hdr.profile) ) return; //copy properties at init or reconfig gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_STREAM_TYPE, & PROP_UINT( GF_STREAM_AUDIO)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, & PROP_UINT( GF_CODECID_AAC_MPEG4)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAMPLES_PER_FRAME, & PROP_UINT(ctx->frame_size) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, & PROP_BOOL(GF_FALSE) ); if (ctx->is_file && ctx->index) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_PLAYBACK_MODE, & PROP_UINT(GF_PLAYBACK_MODE_FASTFORWARD) ); } if (ctx->duration.num) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DURATION, & PROP_FRAC64(ctx->duration)); ctx->is_mp2 = ctx->hdr.is_mp2; ctx->nb_ch = ctx->hdr.nb_ch; ctx->profile = ctx->hdr.profile; sr = GF_M4ASampleRates[ctx->hdr.sr_idx]; if (!ctx->timescale) { //we change sample rate, change cts if (ctx->cts && (ctx->sr_idx != ctx->hdr.sr_idx)) { ctx->cts *= sr; ctx->cts /= GF_M4ASampleRates[ctx->sr_idx]; } } ctx->sr_idx = ctx->hdr.sr_idx; /*keep MPEG-2 AAC codecid even for HE-SBR (that's correct according to latest MPEG-4 audio spec)*/ codecid = ctx->hdr.is_mp2 ? ctx->hdr.profile+GF_CODECID_AAC_MPEG2_MP-1 : GF_CODECID_AAC_MPEG4; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, & PROP_UINT(codecid) ); //force explicit SBR if explicit PS if (ctx->ps==AAC_SIGNAL_EXPLICIT) { ctx->sbr = AAC_SIGNAL_EXPLICIT; } /*no provision for explicit indication of MPEG-2 AAC through MPEG-4 PLs, so force implicit*/ if (ctx->hdr.is_mp2) { if (ctx->sbr == AAC_SIGNAL_EXPLICIT) ctx->sbr = AAC_SIGNAL_IMPLICIT; if (ctx->ps == AAC_SIGNAL_EXPLICIT) ctx->ps = AAC_SIGNAL_IMPLICIT; } dsi = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); ctx->dts_inc = ctx->frame_size; if (!ctx->ovsbr) { sbr_sr = 0; sbr_sr_idx = 0; for (i=0; i<16; i++) { if (GF_M4ASampleRates[i] == (u32) 2*sr) { sbr_sr_idx = i; sbr_sr = 2*sr; break; } } } else { sbr_sr = sr; sbr_sr_idx = ctx->hdr.sr_idx; } ctx->acfg.base_object_type = ctx->hdr.profile; ctx->acfg.base_sr = sr; ctx->acfg.base_sr_index = ctx->hdr.sr_idx; ctx->acfg.nb_chan = ctx->hdr.nb_ch; ctx->acfg.sbr_object_type = 0; /*explicit PS signal (non backward-compatible), only for stereo ADTS*/ if (ctx->acfg.nb_chan<=2) { if (ctx->ps==AAC_SIGNAL_EXPLICIT) { ctx->acfg.base_object_type = 29; ctx->acfg.sbr_object_type = ctx->hdr.profile; ctx->acfg.sbr_sr = sr; ctx->acfg.sbr_sr_index = ctx->acfg.base_sr_index; } else if (ctx->ps==AAC_SIGNAL_IMPLICIT) { use_implicit = GF_TRUE; } } if (ctx->sbr==AAC_SIGNAL_EXPLICIT) { //don't overwrite obj type if explicit PS is used if (ctx->acfg.base_object_type != 29) ctx->acfg.base_object_type = 5; ctx->acfg.sbr_object_type = ctx->hdr.profile; ctx->acfg.sbr_sr = sbr_sr; ctx->acfg.sbr_sr_index = sbr_sr_idx; } else if (ctx->sbr==AAC_SIGNAL_IMPLICIT) { sbr_sr = 0; use_implicit = GF_TRUE; } else { sbr_sr = 0; } ctx->acfg.audioPL = gf_m4a_get_profile(&ctx->acfg); /*for better interop, always store using full SR when using explict signaling*/ if (sbr_sr) { ctx->dts_inc *= 2; sr = sbr_sr; } gf_m4a_write_config_bs(dsi, &ctx->acfg); gf_bs_align(dsi); //implicit signaling, not written by gf_m4a_write_config_bs if (use_implicit) { gf_bs_write_int(dsi, 0x2b7, 11); /*sync extension type*/ gf_bs_write_int(dsi, 5, 5); /*audio objectType*/ /*implicit AAC SBR signal*/ if (ctx->sbr==AAC_SIGNAL_IMPLICIT) { gf_bs_write_int(dsi, 1, 1); /*SBR present flag*/ gf_bs_write_int(dsi, sbr_sr_idx, 4); } else { gf_bs_write_int(dsi, 0, 1); /*SBR present flag*/ } if (ctx->ps==AAC_SIGNAL_IMPLICIT) { gf_bs_write_int(dsi, 0x548, 11); /*sync extension type*/ gf_bs_write_int(dsi, 1, 1); /* PS present flag */ } gf_bs_align(dsi); } gf_bs_get_content(dsi, &dsi_b, &dsi_s); gf_bs_del(dsi); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DECODER_CONFIG, & PROP_DATA_NO_COPY(dsi_b, dsi_s) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_PROFILE_LEVEL, & PROP_UINT (ctx->acfg.audioPL) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAMPLE_RATE, & PROP_UINT(sr)); timescale = sr; if (ctx->ovsbr) timescale = 2*sr; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_TIMESCALE, & PROP_UINT(ctx->timescale ? ctx->timescale : timescale)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_NUM_CHANNELS, & PROP_UINT(ctx->nb_ch) ); if (ctx->bitrate) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_BITRATE, & PROP_UINT(ctx->bitrate)); } if (ctx->id3_buffer_size) { id3dmx_flush(filter, ctx->id3_buffer, ctx->id3_buffer_size, ctx->opid, ctx->expart ? &ctx->vpid : NULL); ctx->id3_buffer_size = 0; } } static Bool adts_dmx_process_event(GF_Filter *filter, const GF_FilterEvent *evt) { u32 i; GF_FilterEvent fevt; GF_ADTSDmxCtx *ctx = gf_filter_get_udta(filter); switch (evt->base.type) { case GF_FEVT_PLAY: if (!ctx->is_playing) { ctx->is_playing = GF_TRUE; ctx->cts = 0; } ctx->nb_frames = 0; ctx->id3_buffer_size = 0; if (! ctx->is_file) { if (evt->play.start_range || ctx->initial_play_done) { ctx->adts_buffer_size = 0; ctx->resume_from = 0; } ctx->initial_play_done = GF_TRUE; return GF_FALSE; } ctx->start_range = evt->play.start_range; ctx->in_seek = GF_TRUE; ctx->file_pos = 0; if (ctx->start_range) { for (i=1; i<ctx->index_size; i++) { if (ctx->indexes[i].duration>ctx->start_range) { ctx->cts = (u64) (ctx->indexes[i-1].duration * GF_M4ASampleRates[ctx->sr_idx]); ctx->file_pos = ctx->indexes[i-1].pos; break; } } } if (!ctx->initial_play_done) { ctx->initial_play_done = GF_TRUE; //seek will not change the current source state, don't send a seek if (!ctx->file_pos) return GF_TRUE; } ctx->resume_from = 0; ctx->adts_buffer_size = 0; //post a seek GF_FEVT_INIT(fevt, GF_FEVT_SOURCE_SEEK, ctx->ipid); fevt.seek.start_offset = ctx->file_pos; gf_filter_pid_send_event(ctx->ipid, &fevt); //cancel event return GF_TRUE; case GF_FEVT_STOP: //don't cancel event ctx->is_playing = GF_FALSE; return GF_FALSE; case GF_FEVT_SET_SPEED: //cancel event return GF_TRUE; default: break; } //by default don't cancel event - to rework once we have downloading in place return GF_FALSE; } static GFINLINE void adts_dmx_update_cts(GF_ADTSDmxCtx *ctx) { assert(ctx->dts_inc); if (ctx->timescale) { u64 inc = ctx->dts_inc; inc *= ctx->timescale; inc /= GF_M4ASampleRates[ctx->sr_idx]; ctx->cts += inc; } else { ctx->cts += ctx->dts_inc; } } GF_Err adts_dmx_process(GF_Filter *filter) { GF_ADTSDmxCtx *ctx = gf_filter_get_udta(filter); GF_FilterPacket *pck, *dst_pck; u8 *data, *output; u8 *start; u32 pck_size, remain, prev_pck_size; u64 cts = GF_FILTER_NO_TS; //always reparse duration if (!ctx->duration.num) adts_dmx_check_dur(filter, ctx); if (ctx->opid && !ctx->is_playing) return GF_OK; pck = gf_filter_pid_get_packet(ctx->ipid); if (!pck) { if (gf_filter_pid_is_eos(ctx->ipid)) { if (!ctx->adts_buffer_size) { if (ctx->opid) gf_filter_pid_set_eos(ctx->opid); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = NULL; return GF_EOS; } } else { return GF_OK; } } prev_pck_size = ctx->adts_buffer_size; if (pck && !ctx->resume_from) { data = (char *) gf_filter_pck_get_data(pck, &pck_size); if (!pck_size) { gf_filter_pid_drop_packet(ctx->ipid); return GF_OK; } if (ctx->byte_offset != GF_FILTER_NO_BO) { u64 byte_offset = gf_filter_pck_get_byte_offset(pck); if (!ctx->adts_buffer_size) { ctx->byte_offset = byte_offset; } else if (ctx->byte_offset + ctx->adts_buffer_size != byte_offset) { ctx->byte_offset = GF_FILTER_NO_BO; if ((byte_offset != GF_FILTER_NO_BO) && (byte_offset>ctx->adts_buffer_size) ) { ctx->byte_offset = byte_offset - ctx->adts_buffer_size; } } } if (ctx->adts_buffer_size + pck_size > ctx->adts_buffer_alloc) { ctx->adts_buffer_alloc = ctx->adts_buffer_size + pck_size; ctx->adts_buffer = gf_realloc(ctx->adts_buffer, ctx->adts_buffer_alloc); } memcpy(ctx->adts_buffer + ctx->adts_buffer_size, data, pck_size); ctx->adts_buffer_size += pck_size; } //input pid sets some timescale - we flushed pending data , update cts if (ctx->timescale && pck) { cts = gf_filter_pck_get_cts(pck); } if (cts == GF_FILTER_NO_TS) { //avoids updating cts prev_pck_size = 0; } remain = ctx->adts_buffer_size; start = ctx->adts_buffer; if (ctx->resume_from) { start += ctx->resume_from - 1; remain -= ctx->resume_from - 1; ctx->resume_from = 0; } while (remain) { u8 *sync; u32 sync_pos, size, offset, bytes_to_drop=0, nb_blocks_per_frame; if (!ctx->tag_size && (remain>3)) { /* Did we read an ID3v2 ? */ if (start[0] == 'I' && start[1] == 'D' && start[2] == '3') { if (remain<10) return GF_OK; ctx->tag_size = ((start[9] & 0x7f) + ((start[8] & 0x7f) << 7) + ((start[7] & 0x7f) << 14) + ((start[6] & 0x7f) << 21)); bytes_to_drop = 10; if (ctx->id3_buffer_alloc < ctx->tag_size+10) { ctx->id3_buffer = gf_realloc(ctx->id3_buffer, ctx->tag_size+10); ctx->id3_buffer_alloc = ctx->tag_size+10; } memcpy(ctx->id3_buffer, start, 10); ctx->id3_buffer_size = 10; goto drop_byte; } } if (ctx->tag_size) { if (ctx->tag_size>remain) { bytes_to_drop = remain; ctx->tag_size-=remain; } else { bytes_to_drop = ctx->tag_size; ctx->tag_size = 0; } memcpy(ctx->id3_buffer + ctx->id3_buffer_size, start, bytes_to_drop); ctx->id3_buffer_size += bytes_to_drop; if (!ctx->tag_size && ctx->opid) { id3dmx_flush(filter, ctx->id3_buffer, ctx->id3_buffer_size, ctx->opid, ctx->expart ? &ctx->vpid : NULL); ctx->id3_buffer_size = 0; } goto drop_byte; } sync = memchr(start, 0xFF, remain); sync_pos = (u32) (sync ? sync - start : remain); //couldn't find sync byte in this packet if (remain - sync_pos < 7) { break; } //not sync ! if ((sync[1] & 0xF0) != 0xF0) { GF_LOG(ctx->nb_frames ? GF_LOG_WARNING : GF_LOG_DEBUG, GF_LOG_PARSER, ("[ADTSDmx] invalid ADTS sync bytes, resyncing\n")); ctx->nb_frames = 0; goto drop_byte; } if (!ctx->bs) { ctx->bs = gf_bs_new(sync + 1, remain - sync_pos - 1, GF_BITSTREAM_READ); } else { gf_bs_reassign_buffer(ctx->bs, sync + 1, remain - sync_pos - 1); } //ok parse header gf_bs_read_int(ctx->bs, 4); ctx->hdr.is_mp2 = (Bool)gf_bs_read_int(ctx->bs, 1); //if (ctx->mpeg4) //we deprecate old MPEG-2 signaling for AAC in ISOBMFF, as it is not well supported anyway and we don't write adif_header as //supposed to be for these types ctx->hdr.is_mp2 = 0; gf_bs_read_int(ctx->bs, 2); ctx->hdr.no_crc = (Bool)gf_bs_read_int(ctx->bs, 1); ctx->hdr.profile = 1 + gf_bs_read_int(ctx->bs, 2); ctx->hdr.sr_idx = gf_bs_read_int(ctx->bs, 4); gf_bs_read_int(ctx->bs, 1); ctx->hdr.nb_ch = gf_bs_read_int(ctx->bs, 3); gf_bs_read_int(ctx->bs, 4); ctx->hdr.frame_size = gf_bs_read_int(ctx->bs, 13); gf_bs_read_int(ctx->bs, 11); nb_blocks_per_frame = gf_bs_read_int(ctx->bs, 2); ctx->hdr.hdr_size = 7; if (!ctx->hdr.no_crc) { u32 skip; if (!nb_blocks_per_frame) { skip = 2; } else { skip = 2 + 2*nb_blocks_per_frame; //and we have 2 bytes per raw_data_block } ctx->hdr.hdr_size += skip; gf_bs_skip_bytes(ctx->bs, skip); } if (!ctx->hdr.frame_size || !GF_M4ASampleRates[ctx->hdr.sr_idx]) { GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("[ADTSDmx] Invalid ADTS frame header, resyncing\n")); ctx->nb_frames = 0; goto drop_byte; } if ((nb_blocks_per_frame>2) || (nb_blocks_per_frame && ctx->hdr.nb_ch)) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[ADTSDmx] Unsupported multi-block ADTS frame header - patch welcome\n")); ctx->nb_frames = 0; goto drop_byte; } else if (!nb_blocks_per_frame) { if (ctx->aacchcfg<0) ctx->hdr.nb_ch = -ctx->aacchcfg; else if (!ctx->hdr.nb_ch) ctx->hdr.nb_ch = ctx->aacchcfg; if (!ctx->hdr.nb_ch) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[ADTSDmx] Missing channel configuration in ADTS frame header, defaulting to stereo - use `--aacchcfg` to force config\n")); ctx->hdr.nb_ch = ctx->aacchcfg = 2; } } if (nb_blocks_per_frame==2) { u32 pos = (u32) gf_bs_get_position(ctx->bs); gf_m4a_parse_program_config_element(ctx->bs, &ctx->acfg); if (!ctx->hdr.no_crc) gf_bs_skip_bytes(ctx->bs, 2); //per block CRC ctx->hdr.hdr_size += (u32) gf_bs_get_position(ctx->bs) - pos; } //value 1->6 match channel number, value 7 is 7.1 if (ctx->hdr.nb_ch==7) ctx->hdr.nb_ch = 8; //ready to send packet if (ctx->hdr.frame_size + 1 < remain) { u32 next_frame = ctx->hdr.frame_size; //make sure we are sync! if ((sync[next_frame] !=0xFF) || ((sync[next_frame+1] & 0xF0) !=0xF0) ) { GF_LOG(ctx->nb_frames ? GF_LOG_WARNING : GF_LOG_DEBUG, GF_LOG_PARSER, ("[ADTSDmx] invalid next ADTS frame sync, resyncing\n")); ctx->nb_frames = 0; goto drop_byte; } } //otherwise wait for next frame, unless if end of stream else if (pck) { if (ctx->timescale && !prev_pck_size && (cts != GF_FILTER_NO_TS) ) { ctx->cts = cts; } break; } adts_dmx_check_pid(filter, ctx); if (!ctx->is_playing) { ctx->resume_from = 1 + ctx->adts_buffer_size - remain; return GF_OK; } ctx->nb_frames++; size = ctx->hdr.frame_size - ctx->hdr.hdr_size; offset = ctx->hdr.hdr_size; //per raw-block CRC if ((nb_blocks_per_frame==2) && !ctx->hdr.no_crc) size -= 2; if (ctx->in_seek) { u64 nb_samples_at_seek = (u64) (ctx->start_range * GF_M4ASampleRates[ctx->sr_idx]); if (ctx->cts + ctx->dts_inc >= nb_samples_at_seek) { //u32 samples_to_discard = (ctx->cts + ctx->dts_inc) - nb_samples_at_seek; ctx->in_seek = GF_FALSE; } } bytes_to_drop = ctx->hdr.frame_size; if (ctx->timescale && !prev_pck_size && (cts != GF_FILTER_NO_TS) ) { ctx->cts = cts; cts = GF_FILTER_NO_TS; } if (!ctx->in_seek) { dst_pck = gf_filter_pck_new_alloc(ctx->opid, size, &output); if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck); memcpy(output, sync + offset, size); gf_filter_pck_set_dts(dst_pck, ctx->cts); gf_filter_pck_set_cts(dst_pck, ctx->cts); gf_filter_pck_set_duration(dst_pck, ctx->dts_inc); gf_filter_pck_set_framing(dst_pck, GF_TRUE, GF_TRUE); gf_filter_pck_set_sap(dst_pck, GF_FILTER_SAP_1); if (ctx->byte_offset != GF_FILTER_NO_BO) { gf_filter_pck_set_byte_offset(dst_pck, ctx->byte_offset + ctx->hdr.hdr_size); } gf_filter_pck_send(dst_pck); } adts_dmx_update_cts(ctx); //truncated last frame if (bytes_to_drop>remain) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[ADTSDmx] truncated ADTS frame!\n")); bytes_to_drop=remain; } drop_byte: if (!bytes_to_drop) { bytes_to_drop = 1; } start += bytes_to_drop; remain -= bytes_to_drop; if (prev_pck_size) { if (prev_pck_size > bytes_to_drop) prev_pck_size -= bytes_to_drop; else { prev_pck_size=0; if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = pck; if (pck) gf_filter_pck_ref_props(&ctx->src_pck); } } if (ctx->byte_offset != GF_FILTER_NO_BO) ctx->byte_offset += bytes_to_drop; } if (!pck) { ctx->adts_buffer_size = 0; return adts_dmx_process(filter); } else { if (remain) { memmove(ctx->adts_buffer, start, remain); } ctx->adts_buffer_size = remain; gf_filter_pid_drop_packet(ctx->ipid); } return GF_OK; } static void adts_dmx_finalize(GF_Filter *filter) { GF_ADTSDmxCtx *ctx = gf_filter_get_udta(filter); if (ctx->bs) gf_bs_del(ctx->bs); if (ctx->indexes) gf_free(ctx->indexes); if (ctx->adts_buffer) gf_free(ctx->adts_buffer); if (ctx->id3_buffer) gf_free(ctx->id3_buffer); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); } static const char *adts_dmx_probe_data(const u8 *data, u32 size, GF_FilterProbeScore *score) { u32 nb_frames=0, next_pos=0, max_consecutive_frames=0; ADTSHeader prev_hdr; GF_BitStream *bs; Bool has_id3=GF_FALSE; Bool has_broken_data=GF_FALSE; /*check for id3*/ if (size>= 10) { if (data[0] == 'I' && data[1] == 'D' && data[2] == '3') { u32 tag_size = ((data[9] & 0x7f) + ((data[8] & 0x7f) << 7) + ((data[7] & 0x7f) << 14) + ((data[6] & 0x7f) << 21)); if (tag_size+10 > size) { GF_LOG(GF_LOG_WARNING, GF_LOG_MEDIA, ("ID3 tag detected size %d but probe data only %d bytes, will rely on file extension (try increasing probe size using --block_size)\n", tag_size+10, size)); *score = GF_FPROBE_EXT_MATCH; return "aac|adts"; } data += tag_size+10; size -= tag_size+10; has_id3 = GF_TRUE; } } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); memset(&prev_hdr, 0, sizeof(ADTSHeader)); while (gf_bs_available(bs)) { ADTSHeader hdr; u32 pos; hdr.frame_size = 0; if (!adts_dmx_sync_frame_bs(bs, &hdr)) { if (hdr.frame_size) { //nb_frames++; max_consecutive_frames++; } break; } if ((hdr.hdr_size!=7) && (hdr.hdr_size!=9)) continue; // if (!hdr.nb_ch) continue; pos = (u32) gf_bs_get_position(bs); if (!nb_frames) { nb_frames = 1; } else if ((next_pos + hdr.hdr_size == pos) && (hdr.sr_idx==prev_hdr.sr_idx) && (hdr.nb_ch==prev_hdr.nb_ch) ) { nb_frames++; if (max_consecutive_frames<nb_frames) max_consecutive_frames = nb_frames; if (max_consecutive_frames>5) break; } else { nb_frames=1; has_broken_data=GF_TRUE; } prev_hdr = hdr; gf_bs_skip_bytes(bs, hdr.frame_size); next_pos = (u32) gf_bs_get_position(bs); } gf_bs_del(bs); if (max_consecutive_frames>=4) { *score = has_broken_data ? GF_FPROBE_MAYBE_SUPPORTED : GF_FPROBE_SUPPORTED; return "audio/aac"; } if (has_id3 && max_consecutive_frames) { *score = GF_FPROBE_MAYBE_SUPPORTED; return "audio/aac"; } return NULL; } static const GF_FilterCapability ADTSDmxCaps[] = { CAP_UINT(GF_CAPS_INPUT, GF_PROP_PID_STREAM_TYPE, GF_STREAM_FILE), CAP_STRING(GF_CAPS_INPUT, GF_PROP_PID_FILE_EXT, "aac|adts"), CAP_STRING(GF_CAPS_INPUT, GF_PROP_PID_MIME, "audio/x-m4a|audio/aac|audio/aacp|audio/x-aac"), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_STREAM_TYPE, GF_STREAM_AUDIO), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_AAC_MPEG4), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_AAC_MPEG2_MP), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_AAC_MPEG2_LCP), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_AAC_MPEG2_SSRP), //we explitely set this one to prevent adts->latm reframer connection CAP_BOOL(GF_CAPS_OUTPUT_STATIC_EXCLUDED, GF_PROP_PID_UNFRAMED, GF_TRUE), {0}, CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_STREAM_TYPE, GF_STREAM_AUDIO), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_AAC_MPEG4), CAP_BOOL(GF_CAPS_INPUT,GF_PROP_PID_UNFRAMED, GF_TRUE), }; #define OFFS(_n) #_n, offsetof(GF_ADTSDmxCtx, _n) static const GF_FilterArgs ADTSDmxArgs[] = { { OFFS(frame_size), "size of AAC frame in audio samples", GF_PROP_UINT, "1024", NULL, GF_FS_ARG_HINT_EXPERT}, { OFFS(index), "indexing window length", GF_PROP_DOUBLE, "1.0", NULL, 0}, // { OFFS(mpeg4), "force signaling as MPEG-4 AAC", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(ovsbr), "force oversampling SBR (does not multiply timescales by 2)", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(sbr), "set SBR signaling\n"\ "- no: no SBR signaling at all\n"\ "- imp: backward-compatible SBR signaling (audio signaled as AAC-LC)\n"\ "- exp: explicit SBR signaling (audio signaled as AAC-SBR)"\ , GF_PROP_UINT, "no", "no|imp|exp", GF_FS_ARG_HINT_ADVANCED}, { OFFS(ps), "set PS signaling\n"\ "- no: no PS signaling at all\n"\ "- imp: backward-compatible PS signaling (audio signaled as AAC-LC)\n"\ "- exp: explicit PS signaling (audio signaled as AAC-PS)"\ , GF_PROP_UINT, "no", "no|imp|exp", GF_FS_ARG_HINT_ADVANCED}, { OFFS(expart), "expose pictures as a dedicated video pid", GF_PROP_BOOL, "false", NULL, 0}, { OFFS(aacchcfg), "set AAC channel configuration to this value if missing from ADTS header, use negative value to always override", GF_PROP_SINT, "0", NULL, GF_FS_ARG_HINT_EXPERT}, {0} }; GF_FilterRegister ADTSDmxRegister = { .name = "rfadts", GF_FS_SET_DESCRIPTION("ADTS reframer") GF_FS_SET_HELP("This filter parses AAC files/data and outputs corresponding audio PID and frames.") .private_size = sizeof(GF_ADTSDmxCtx), .args = ADTSDmxArgs, .finalize = adts_dmx_finalize, SETCAPS(ADTSDmxCaps), .configure_pid = adts_dmx_configure_pid, .process = adts_dmx_process, .probe_data = adts_dmx_probe_data, .process_event = adts_dmx_process_event }; const GF_FilterRegister *adts_dmx_register(GF_FilterSession *session) { return &ADTSDmxRegister; } #else const GF_FilterRegister *adts_dmx_register(GF_FilterSession *session) { return NULL; } #endif // GPAC_DISABLE_AV_PARSERS
null
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / AAC ADTS reframer filter * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/avparse.h> #include <gpac/constants.h> #include <gpac/filters.h> #ifndef GPAC_DISABLE_AV_PARSERS enum { AAC_SIGNAL_NONE=0, AAC_SIGNAL_IMPLICIT, AAC_SIGNAL_EXPLICIT }; typedef struct { Bool is_mp2, no_crc; u32 profile, sr_idx, nb_ch, frame_size, hdr_size; } ADTSHeader; typedef struct { u64 pos; Double duration; } ADTSIdx; typedef struct { //filter args u32 frame_size; Double index; u32 sbr; u32 ps; // Bool mpeg4; Bool ovsbr; Bool expart; s32 aacchcfg; //only one input pid declared GF_FilterPid *ipid; //output pid for audio GF_FilterPid *opid; //video pid for cover art GF_FilterPid *vpid; GF_BitStream *bs; u64 file_pos, cts; u32 sr_idx, nb_ch, is_mp2, profile; GF_Fraction64 duration; Double start_range; Bool in_seek; u32 timescale; ADTSHeader hdr; u32 dts_inc; Bool is_playing; Bool is_file, file_loaded; Bool initial_play_done; GF_FilterPacket *src_pck; ADTSIdx *indexes; u32 index_alloc_size, index_size; u8 *adts_buffer; u32 adts_buffer_size, adts_buffer_alloc, resume_from; u64 byte_offset; u32 tag_size; u8 *id3_buffer; u32 id3_buffer_size, id3_buffer_alloc; u32 nb_frames; GF_M4ADecSpecInfo acfg; u32 bitrate; } GF_ADTSDmxCtx; static Bool adts_dmx_sync_frame_bs(GF_BitStream *bs, ADTSHeader *hdr) { u32 val; u64 pos; while (gf_bs_available(bs)>7) { val = gf_bs_read_u8(bs); if (val!=0xFF) continue; val = gf_bs_read_int(bs, 4); if (val != 0x0F) { gf_bs_read_int(bs, 4); continue; } hdr->is_mp2 = (Bool)gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 2); hdr->no_crc = (Bool)gf_bs_read_int(bs, 1); pos = gf_bs_get_position(bs) - 2; hdr->profile = 1 + gf_bs_read_int(bs, 2); hdr->sr_idx = gf_bs_read_int(bs, 4); gf_bs_read_int(bs, 1); hdr->nb_ch = gf_bs_read_int(bs, 3); //value 1->6 match channel number, value 7 is 7.1 if (hdr->nb_ch==7) hdr->nb_ch = 8; gf_bs_read_int(bs, 4); hdr->frame_size = gf_bs_read_int(bs, 13); gf_bs_read_int(bs, 11); gf_bs_read_int(bs, 2); hdr->hdr_size = 7; if (!hdr->no_crc) { gf_bs_read_u16(bs); hdr->hdr_size = 9; } if (!GF_M4ASampleRates[hdr->sr_idx] || (hdr->frame_size < hdr->hdr_size)) { gf_bs_seek(bs, pos+1); continue; } hdr->frame_size -= hdr->hdr_size; if (gf_bs_available(bs) == hdr->frame_size) { return GF_TRUE; } if (gf_bs_available(bs) < hdr->frame_size) { break; } gf_bs_skip_bytes(bs, hdr->frame_size); val = gf_bs_read_u8(bs); if (val!=0xFF) { gf_bs_seek(bs, pos+1); continue; } val = gf_bs_read_int(bs, 4); if (val!=0x0F) { gf_bs_read_int(bs, 4); gf_bs_seek(bs, pos+1); continue; } gf_bs_seek(bs, pos+hdr->hdr_size); return GF_TRUE; } return GF_FALSE; } void id3dmx_flush(GF_Filter *filter, u8 *id3_buf, u32 id3_buf_size, GF_FilterPid *audio_pid, GF_FilterPid **video_pid_p); GF_Err adts_dmx_configure_pid(GF_Filter *filter, GF_FilterPid *pid, Bool is_remove) { const GF_PropertyValue *p; GF_ADTSDmxCtx *ctx = gf_filter_get_udta(filter); if (is_remove) { ctx->ipid = NULL; if (ctx->opid) { gf_filter_pid_remove(ctx->opid); ctx->opid = NULL; } return GF_OK; } if (! gf_filter_pid_check_caps(pid)) return GF_NOT_SUPPORTED; ctx->ipid = pid; p = gf_filter_pid_get_property(pid, GF_PROP_PID_TIMESCALE); if (p) ctx->timescale = p->value.uint; if (ctx->timescale && !ctx->opid) { ctx->opid = gf_filter_pid_new(filter); gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, NULL); //we don't update copy props on output for now - if we decide we need it, we will need to also force resengin the decoder config } return GF_OK; } static void adts_dmx_check_dur(GF_Filter *filter, GF_ADTSDmxCtx *ctx) { FILE *stream; GF_BitStream *bs; ADTSHeader hdr; u64 duration, cur_dur, rate; s32 sr_idx = -1; const GF_PropertyValue *p; if (!ctx->opid || ctx->timescale || ctx->file_loaded) return; if (ctx->index<=0) { ctx->file_loaded = GF_TRUE; return; } p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILEPATH); if (!p || !p->value.string || !strncmp(p->value.string, "gmem://", 7)) { ctx->is_file = GF_FALSE; ctx->file_loaded = GF_TRUE; return; } ctx->is_file = GF_TRUE; stream = gf_fopen(p->value.string, "rb"); if (!stream) return; ctx->index_size = 0; bs = gf_bs_from_file(stream, GF_BITSTREAM_READ); duration = 0; cur_dur = 0; while (adts_dmx_sync_frame_bs(bs, &hdr)) { if ((sr_idx>=0) && (sr_idx != hdr.sr_idx)) { duration *= GF_M4ASampleRates[hdr.sr_idx]; duration /= GF_M4ASampleRates[sr_idx]; cur_dur *= GF_M4ASampleRates[hdr.sr_idx]; cur_dur /= GF_M4ASampleRates[sr_idx]; } sr_idx = hdr.sr_idx; duration += ctx->frame_size; cur_dur += ctx->frame_size; if (cur_dur > ctx->index * GF_M4ASampleRates[sr_idx]) { if (!ctx->index_alloc_size) ctx->index_alloc_size = 10; else if (ctx->index_alloc_size == ctx->index_size) ctx->index_alloc_size *= 2; ctx->indexes = gf_realloc(ctx->indexes, sizeof(ADTSIdx)*ctx->index_alloc_size); ctx->indexes[ctx->index_size].pos = gf_bs_get_position(bs) - hdr.hdr_size; ctx->indexes[ctx->index_size].duration = (Double) duration; ctx->indexes[ctx->index_size].duration /= GF_M4ASampleRates[sr_idx]; ctx->index_size ++; cur_dur = 0; } gf_bs_skip_bytes(bs, hdr.frame_size); } rate = gf_bs_get_position(bs); gf_bs_del(bs); gf_fclose(stream); if (sr_idx>=0) { if (!ctx->duration.num || (ctx->duration.num * GF_M4ASampleRates[sr_idx] != duration * ctx->duration.den)) { ctx->duration.num = (s32) duration; ctx->duration.den = GF_M4ASampleRates[sr_idx]; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DURATION, & PROP_FRAC64(ctx->duration)); if (duration && !gf_sys_is_test_mode() ) { rate *= 8 * ctx->duration.den; rate /= ctx->duration.num; ctx->bitrate = (u32) rate; } } } p = gf_filter_pid_get_property(ctx->ipid, GF_PROP_PID_FILE_CACHED); if (p && p->value.boolean) ctx->file_loaded = GF_TRUE; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CAN_DATAREF, & PROP_BOOL(GF_TRUE ) ); } static void adts_dmx_check_pid(GF_Filter *filter, GF_ADTSDmxCtx *ctx) { GF_BitStream *dsi; Bool use_implicit=GF_FALSE; u8 *dsi_b; u32 i, sbr_sr_idx, dsi_s, sr, sbr_sr, codecid, timescale=0; if (!ctx->opid) { ctx->opid = gf_filter_pid_new(filter); gf_filter_pid_copy_properties(ctx->opid, ctx->ipid); adts_dmx_check_dur(filter, ctx); } if ((ctx->sr_idx == ctx->hdr.sr_idx) && (ctx->nb_ch == ctx->hdr.nb_ch) && (ctx->is_mp2 == ctx->hdr.is_mp2) && (ctx->profile == ctx->hdr.profile) ) return; //copy properties at init or reconfig gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_STREAM_TYPE, & PROP_UINT( GF_STREAM_AUDIO)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, & PROP_UINT( GF_CODECID_AAC_MPEG4)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAMPLES_PER_FRAME, & PROP_UINT(ctx->frame_size) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_UNFRAMED, & PROP_BOOL(GF_FALSE) ); if (ctx->is_file && ctx->index) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_PLAYBACK_MODE, & PROP_UINT(GF_PLAYBACK_MODE_FASTFORWARD) ); } if (ctx->duration.num) gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DURATION, & PROP_FRAC64(ctx->duration)); ctx->is_mp2 = ctx->hdr.is_mp2; ctx->nb_ch = ctx->hdr.nb_ch; ctx->profile = ctx->hdr.profile; sr = GF_M4ASampleRates[ctx->hdr.sr_idx]; if (!ctx->timescale) { //we change sample rate, change cts if (ctx->cts && (ctx->sr_idx != ctx->hdr.sr_idx)) { ctx->cts *= sr; ctx->cts /= GF_M4ASampleRates[ctx->sr_idx]; } } ctx->sr_idx = ctx->hdr.sr_idx; /*keep MPEG-2 AAC codecid even for HE-SBR (that's correct according to latest MPEG-4 audio spec)*/ codecid = ctx->hdr.is_mp2 ? ctx->hdr.profile+GF_CODECID_AAC_MPEG2_MP-1 : GF_CODECID_AAC_MPEG4; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_CODECID, & PROP_UINT(codecid) ); //force explicit SBR if explicit PS if (ctx->ps==AAC_SIGNAL_EXPLICIT) { ctx->sbr = AAC_SIGNAL_EXPLICIT; } /*no provision for explicit indication of MPEG-2 AAC through MPEG-4 PLs, so force implicit*/ if (ctx->hdr.is_mp2) { if (ctx->sbr == AAC_SIGNAL_EXPLICIT) ctx->sbr = AAC_SIGNAL_IMPLICIT; if (ctx->ps == AAC_SIGNAL_EXPLICIT) ctx->ps = AAC_SIGNAL_IMPLICIT; } dsi = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); ctx->dts_inc = ctx->frame_size; if (!ctx->ovsbr) { sbr_sr = 0; sbr_sr_idx = 0; for (i=0; i<16; i++) { if (GF_M4ASampleRates[i] == (u32) 2*sr) { sbr_sr_idx = i; sbr_sr = 2*sr; break; } } } else { sbr_sr = sr; sbr_sr_idx = ctx->hdr.sr_idx; } ctx->acfg.base_object_type = ctx->hdr.profile; ctx->acfg.base_sr = sr; ctx->acfg.base_sr_index = ctx->hdr.sr_idx; ctx->acfg.nb_chan = ctx->hdr.nb_ch; ctx->acfg.sbr_object_type = 0; /*explicit PS signal (non backward-compatible), only for stereo ADTS*/ if (ctx->acfg.nb_chan<=2) { if (ctx->ps==AAC_SIGNAL_EXPLICIT) { ctx->acfg.base_object_type = 29; ctx->acfg.sbr_object_type = ctx->hdr.profile; ctx->acfg.sbr_sr = sr; ctx->acfg.sbr_sr_index = ctx->acfg.base_sr_index; } else if (ctx->ps==AAC_SIGNAL_IMPLICIT) { use_implicit = GF_TRUE; } } if (ctx->sbr==AAC_SIGNAL_EXPLICIT) { //don't overwrite obj type if explicit PS is used if (ctx->acfg.base_object_type != 29) ctx->acfg.base_object_type = 5; ctx->acfg.sbr_object_type = ctx->hdr.profile; ctx->acfg.sbr_sr = sbr_sr; ctx->acfg.sbr_sr_index = sbr_sr_idx; } else if (ctx->sbr==AAC_SIGNAL_IMPLICIT) { sbr_sr = 0; use_implicit = GF_TRUE; } else { sbr_sr = 0; } ctx->acfg.audioPL = gf_m4a_get_profile(&ctx->acfg); /*for better interop, always store using full SR when using explict signaling*/ if (sbr_sr) { ctx->dts_inc *= 2; sr = sbr_sr; } gf_m4a_write_config_bs(dsi, &ctx->acfg); gf_bs_align(dsi); //implicit signaling, not written by gf_m4a_write_config_bs if (use_implicit) { gf_bs_write_int(dsi, 0x2b7, 11); /*sync extension type*/ gf_bs_write_int(dsi, 5, 5); /*audio objectType*/ /*implicit AAC SBR signal*/ if (ctx->sbr==AAC_SIGNAL_IMPLICIT) { gf_bs_write_int(dsi, 1, 1); /*SBR present flag*/ gf_bs_write_int(dsi, sbr_sr_idx, 4); } else { gf_bs_write_int(dsi, 0, 1); /*SBR present flag*/ } if (ctx->ps==AAC_SIGNAL_IMPLICIT) { gf_bs_write_int(dsi, 0x548, 11); /*sync extension type*/ gf_bs_write_int(dsi, 1, 1); /* PS present flag */ } gf_bs_align(dsi); } gf_bs_get_content(dsi, &dsi_b, &dsi_s); gf_bs_del(dsi); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_DECODER_CONFIG, & PROP_DATA_NO_COPY(dsi_b, dsi_s) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_PROFILE_LEVEL, & PROP_UINT (ctx->acfg.audioPL) ); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_SAMPLE_RATE, & PROP_UINT(sr)); timescale = sr; if (ctx->ovsbr) timescale = 2*sr; gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_TIMESCALE, & PROP_UINT(ctx->timescale ? ctx->timescale : timescale)); gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_NUM_CHANNELS, & PROP_UINT(ctx->nb_ch) ); if (ctx->bitrate) { gf_filter_pid_set_property(ctx->opid, GF_PROP_PID_BITRATE, & PROP_UINT(ctx->bitrate)); } if (ctx->id3_buffer_size) { id3dmx_flush(filter, ctx->id3_buffer, ctx->id3_buffer_size, ctx->opid, ctx->expart ? &ctx->vpid : NULL); ctx->id3_buffer_size = 0; } } static Bool adts_dmx_process_event(GF_Filter *filter, const GF_FilterEvent *evt) { u32 i; GF_FilterEvent fevt; GF_ADTSDmxCtx *ctx = gf_filter_get_udta(filter); switch (evt->base.type) { case GF_FEVT_PLAY: if (!ctx->is_playing) { ctx->is_playing = GF_TRUE; ctx->cts = 0; } ctx->nb_frames = 0; ctx->id3_buffer_size = 0; if (! ctx->is_file) { if (evt->play.start_range || ctx->initial_play_done) { ctx->adts_buffer_size = 0; ctx->resume_from = 0; } ctx->initial_play_done = GF_TRUE; return GF_FALSE; } ctx->start_range = evt->play.start_range; ctx->in_seek = GF_TRUE; ctx->file_pos = 0; if (ctx->start_range) { for (i=1; i<ctx->index_size; i++) { if (ctx->indexes[i].duration>ctx->start_range) { ctx->cts = (u64) (ctx->indexes[i-1].duration * GF_M4ASampleRates[ctx->sr_idx]); ctx->file_pos = ctx->indexes[i-1].pos; break; } } } if (!ctx->initial_play_done) { ctx->initial_play_done = GF_TRUE; //seek will not change the current source state, don't send a seek if (!ctx->file_pos) return GF_TRUE; } ctx->resume_from = 0; ctx->adts_buffer_size = 0; //post a seek GF_FEVT_INIT(fevt, GF_FEVT_SOURCE_SEEK, ctx->ipid); fevt.seek.start_offset = ctx->file_pos; gf_filter_pid_send_event(ctx->ipid, &fevt); //cancel event return GF_TRUE; case GF_FEVT_STOP: //don't cancel event ctx->is_playing = GF_FALSE; return GF_FALSE; case GF_FEVT_SET_SPEED: //cancel event return GF_TRUE; default: break; } //by default don't cancel event - to rework once we have downloading in place return GF_FALSE; } static GFINLINE void adts_dmx_update_cts(GF_ADTSDmxCtx *ctx) { assert(ctx->dts_inc); if (ctx->timescale) { u64 inc = ctx->dts_inc; inc *= ctx->timescale; inc /= GF_M4ASampleRates[ctx->sr_idx]; ctx->cts += inc; } else { ctx->cts += ctx->dts_inc; } } GF_Err adts_dmx_process(GF_Filter *filter) { GF_ADTSDmxCtx *ctx = gf_filter_get_udta(filter); GF_FilterPacket *pck, *dst_pck; u8 *data, *output; u8 *start; u32 pck_size, remain, prev_pck_size; u64 cts = GF_FILTER_NO_TS; //always reparse duration if (!ctx->duration.num) adts_dmx_check_dur(filter, ctx); if (ctx->opid && !ctx->is_playing) return GF_OK; pck = gf_filter_pid_get_packet(ctx->ipid); if (!pck) { if (gf_filter_pid_is_eos(ctx->ipid)) { if (!ctx->adts_buffer_size) { if (ctx->opid) gf_filter_pid_set_eos(ctx->opid); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = NULL; return GF_EOS; } } else { return GF_OK; } } prev_pck_size = ctx->adts_buffer_size; if (pck && !ctx->resume_from) { data = (char *) gf_filter_pck_get_data(pck, &pck_size); if (!pck_size) { gf_filter_pid_drop_packet(ctx->ipid); return GF_OK; } if (ctx->byte_offset != GF_FILTER_NO_BO) { u64 byte_offset = gf_filter_pck_get_byte_offset(pck); if (!ctx->adts_buffer_size) { ctx->byte_offset = byte_offset; } else if (ctx->byte_offset + ctx->adts_buffer_size != byte_offset) { ctx->byte_offset = GF_FILTER_NO_BO; if ((byte_offset != GF_FILTER_NO_BO) && (byte_offset>ctx->adts_buffer_size) ) { ctx->byte_offset = byte_offset - ctx->adts_buffer_size; } } } if (ctx->adts_buffer_size + pck_size > ctx->adts_buffer_alloc) { ctx->adts_buffer_alloc = ctx->adts_buffer_size + pck_size; ctx->adts_buffer = gf_realloc(ctx->adts_buffer, ctx->adts_buffer_alloc); } memcpy(ctx->adts_buffer + ctx->adts_buffer_size, data, pck_size); ctx->adts_buffer_size += pck_size; } //input pid sets some timescale - we flushed pending data , update cts if (ctx->timescale && pck) { cts = gf_filter_pck_get_cts(pck); } if (cts == GF_FILTER_NO_TS) { //avoids updating cts prev_pck_size = 0; } remain = ctx->adts_buffer_size; start = ctx->adts_buffer; if (ctx->resume_from) { start += ctx->resume_from - 1; remain -= ctx->resume_from - 1; ctx->resume_from = 0; } while (remain) { u8 *sync; u32 sync_pos, size, offset, bytes_to_drop=0, nb_blocks_per_frame; if (!ctx->tag_size && (remain>3)) { /* Did we read an ID3v2 ? */ if (start[0] == 'I' && start[1] == 'D' && start[2] == '3') { if (remain<10) return GF_OK; ctx->tag_size = ((start[9] & 0x7f) + ((start[8] & 0x7f) << 7) + ((start[7] & 0x7f) << 14) + ((start[6] & 0x7f) << 21)); bytes_to_drop = 10; if (ctx->id3_buffer_alloc < ctx->tag_size+10) { ctx->id3_buffer = gf_realloc(ctx->id3_buffer, ctx->tag_size+10); ctx->id3_buffer_alloc = ctx->tag_size+10; } memcpy(ctx->id3_buffer, start, 10); ctx->id3_buffer_size = 10; goto drop_byte; } } if (ctx->tag_size) { if (ctx->tag_size>remain) { bytes_to_drop = remain; ctx->tag_size-=remain; } else { bytes_to_drop = ctx->tag_size; ctx->tag_size = 0; } memcpy(ctx->id3_buffer + ctx->id3_buffer_size, start, bytes_to_drop); ctx->id3_buffer_size += bytes_to_drop; if (!ctx->tag_size && ctx->opid) { id3dmx_flush(filter, ctx->id3_buffer, ctx->id3_buffer_size, ctx->opid, ctx->expart ? &ctx->vpid : NULL); ctx->id3_buffer_size = 0; } goto drop_byte; } sync = memchr(start, 0xFF, remain); sync_pos = (u32) (sync ? sync - start : remain); //couldn't find sync byte in this packet if (remain - sync_pos < 7) { break; } //not sync ! if ((sync[1] & 0xF0) != 0xF0) { GF_LOG(ctx->nb_frames ? GF_LOG_WARNING : GF_LOG_DEBUG, GF_LOG_PARSER, ("[ADTSDmx] invalid ADTS sync bytes, resyncing\n")); ctx->nb_frames = 0; goto drop_byte; } if (!ctx->bs) { ctx->bs = gf_bs_new(sync + 1, remain - sync_pos - 1, GF_BITSTREAM_READ); } else { gf_bs_reassign_buffer(ctx->bs, sync + 1, remain - sync_pos - 1); } //ok parse header gf_bs_read_int(ctx->bs, 4); ctx->hdr.is_mp2 = (Bool)gf_bs_read_int(ctx->bs, 1); //if (ctx->mpeg4) //we deprecate old MPEG-2 signaling for AAC in ISOBMFF, as it is not well supported anyway and we don't write adif_header as //supposed to be for these types ctx->hdr.is_mp2 = 0; gf_bs_read_int(ctx->bs, 2); ctx->hdr.no_crc = (Bool)gf_bs_read_int(ctx->bs, 1); ctx->hdr.profile = 1 + gf_bs_read_int(ctx->bs, 2); ctx->hdr.sr_idx = gf_bs_read_int(ctx->bs, 4); gf_bs_read_int(ctx->bs, 1); ctx->hdr.nb_ch = gf_bs_read_int(ctx->bs, 3); gf_bs_read_int(ctx->bs, 4); ctx->hdr.frame_size = gf_bs_read_int(ctx->bs, 13); gf_bs_read_int(ctx->bs, 11); nb_blocks_per_frame = gf_bs_read_int(ctx->bs, 2); ctx->hdr.hdr_size = 7; if (!ctx->hdr.no_crc) { u32 skip; if (!nb_blocks_per_frame) { skip = 2; } else { skip = 2 + 2*nb_blocks_per_frame; //and we have 2 bytes per raw_data_block } ctx->hdr.hdr_size += skip; gf_bs_skip_bytes(ctx->bs, skip); } if (!ctx->hdr.frame_size || !GF_M4ASampleRates[ctx->hdr.sr_idx]) { GF_LOG(GF_LOG_DEBUG, GF_LOG_PARSER, ("[ADTSDmx] Invalid ADTS frame header, resyncing\n")); ctx->nb_frames = 0; goto drop_byte; } if ((nb_blocks_per_frame>2) || (nb_blocks_per_frame && ctx->hdr.nb_ch)) { GF_LOG(GF_LOG_ERROR, GF_LOG_PARSER, ("[ADTSDmx] Unsupported multi-block ADTS frame header - patch welcome\n")); ctx->nb_frames = 0; goto drop_byte; } else if (!nb_blocks_per_frame) { if (ctx->aacchcfg<0) ctx->hdr.nb_ch = -ctx->aacchcfg; else if (!ctx->hdr.nb_ch) ctx->hdr.nb_ch = ctx->aacchcfg; if (!ctx->hdr.nb_ch) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[ADTSDmx] Missing channel configuration in ADTS frame header, defaulting to stereo - use `--aacchcfg` to force config\n")); ctx->hdr.nb_ch = ctx->aacchcfg = 2; } } if (nb_blocks_per_frame==2) { u32 pos = (u32) gf_bs_get_position(ctx->bs); gf_m4a_parse_program_config_element(ctx->bs, &ctx->acfg); if (!ctx->hdr.no_crc) gf_bs_skip_bytes(ctx->bs, 2); //per block CRC ctx->hdr.hdr_size += (u32) gf_bs_get_position(ctx->bs) - pos; } //value 1->6 match channel number, value 7 is 7.1 if (ctx->hdr.nb_ch==7) ctx->hdr.nb_ch = 8; //ready to send packet if (ctx->hdr.frame_size + 1 < remain) { u32 next_frame = ctx->hdr.frame_size; //make sure we are sync! if ((sync[next_frame] !=0xFF) || ((sync[next_frame+1] & 0xF0) !=0xF0) ) { GF_LOG(ctx->nb_frames ? GF_LOG_WARNING : GF_LOG_DEBUG, GF_LOG_PARSER, ("[ADTSDmx] invalid next ADTS frame sync, resyncing\n")); ctx->nb_frames = 0; goto drop_byte; } } //otherwise wait for next frame, unless if end of stream else if (pck) { if (ctx->timescale && !prev_pck_size && (cts != GF_FILTER_NO_TS) ) { ctx->cts = cts; } break; } if (ctx->hdr.frame_size < ctx->hdr.hdr_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[ADTSDmx] Corrupted ADTS frame header, resyncing\n")); ctx->nb_frames = 0; goto drop_byte; } adts_dmx_check_pid(filter, ctx); if (!ctx->is_playing) { ctx->resume_from = 1 + ctx->adts_buffer_size - remain; return GF_OK; } ctx->nb_frames++; size = ctx->hdr.frame_size - ctx->hdr.hdr_size; offset = ctx->hdr.hdr_size; //per raw-block CRC if ((nb_blocks_per_frame==2) && !ctx->hdr.no_crc) size -= 2; if (ctx->in_seek) { u64 nb_samples_at_seek = (u64) (ctx->start_range * GF_M4ASampleRates[ctx->sr_idx]); if (ctx->cts + ctx->dts_inc >= nb_samples_at_seek) { //u32 samples_to_discard = (ctx->cts + ctx->dts_inc) - nb_samples_at_seek; ctx->in_seek = GF_FALSE; } } bytes_to_drop = ctx->hdr.frame_size; if (ctx->timescale && !prev_pck_size && (cts != GF_FILTER_NO_TS) ) { ctx->cts = cts; cts = GF_FILTER_NO_TS; } if (!ctx->in_seek) { dst_pck = gf_filter_pck_new_alloc(ctx->opid, size, &output); if (ctx->src_pck) gf_filter_pck_merge_properties(ctx->src_pck, dst_pck); memcpy(output, sync + offset, size); gf_filter_pck_set_dts(dst_pck, ctx->cts); gf_filter_pck_set_cts(dst_pck, ctx->cts); gf_filter_pck_set_duration(dst_pck, ctx->dts_inc); gf_filter_pck_set_framing(dst_pck, GF_TRUE, GF_TRUE); gf_filter_pck_set_sap(dst_pck, GF_FILTER_SAP_1); if (ctx->byte_offset != GF_FILTER_NO_BO) { gf_filter_pck_set_byte_offset(dst_pck, ctx->byte_offset + ctx->hdr.hdr_size); } gf_filter_pck_send(dst_pck); } adts_dmx_update_cts(ctx); //truncated last frame if (bytes_to_drop>remain) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[ADTSDmx] truncated ADTS frame!\n")); bytes_to_drop=remain; } drop_byte: if (!bytes_to_drop) { bytes_to_drop = 1; } start += bytes_to_drop; remain -= bytes_to_drop; if (prev_pck_size) { if (prev_pck_size > bytes_to_drop) prev_pck_size -= bytes_to_drop; else { prev_pck_size=0; if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); ctx->src_pck = pck; if (pck) gf_filter_pck_ref_props(&ctx->src_pck); } } if (ctx->byte_offset != GF_FILTER_NO_BO) ctx->byte_offset += bytes_to_drop; } if (!pck) { ctx->adts_buffer_size = 0; return adts_dmx_process(filter); } else { if (remain) { memmove(ctx->adts_buffer, start, remain); } ctx->adts_buffer_size = remain; gf_filter_pid_drop_packet(ctx->ipid); } return GF_OK; } static void adts_dmx_finalize(GF_Filter *filter) { GF_ADTSDmxCtx *ctx = gf_filter_get_udta(filter); if (ctx->bs) gf_bs_del(ctx->bs); if (ctx->indexes) gf_free(ctx->indexes); if (ctx->adts_buffer) gf_free(ctx->adts_buffer); if (ctx->id3_buffer) gf_free(ctx->id3_buffer); if (ctx->src_pck) gf_filter_pck_unref(ctx->src_pck); } static const char *adts_dmx_probe_data(const u8 *data, u32 size, GF_FilterProbeScore *score) { u32 nb_frames=0, next_pos=0, max_consecutive_frames=0; ADTSHeader prev_hdr; GF_BitStream *bs; Bool has_id3=GF_FALSE; Bool has_broken_data=GF_FALSE; /*check for id3*/ if (size>= 10) { if (data[0] == 'I' && data[1] == 'D' && data[2] == '3') { u32 tag_size = ((data[9] & 0x7f) + ((data[8] & 0x7f) << 7) + ((data[7] & 0x7f) << 14) + ((data[6] & 0x7f) << 21)); if (tag_size+10 > size) { GF_LOG(GF_LOG_WARNING, GF_LOG_MEDIA, ("ID3 tag detected size %d but probe data only %d bytes, will rely on file extension (try increasing probe size using --block_size)\n", tag_size+10, size)); *score = GF_FPROBE_EXT_MATCH; return "aac|adts"; } data += tag_size+10; size -= tag_size+10; has_id3 = GF_TRUE; } } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); memset(&prev_hdr, 0, sizeof(ADTSHeader)); while (gf_bs_available(bs)) { ADTSHeader hdr; u32 pos; hdr.frame_size = 0; if (!adts_dmx_sync_frame_bs(bs, &hdr)) { if (hdr.frame_size) { //nb_frames++; max_consecutive_frames++; } break; } if ((hdr.hdr_size!=7) && (hdr.hdr_size!=9)) continue; // if (!hdr.nb_ch) continue; pos = (u32) gf_bs_get_position(bs); if (!nb_frames) { nb_frames = 1; } else if ((next_pos + hdr.hdr_size == pos) && (hdr.sr_idx==prev_hdr.sr_idx) && (hdr.nb_ch==prev_hdr.nb_ch) ) { nb_frames++; if (max_consecutive_frames<nb_frames) max_consecutive_frames = nb_frames; if (max_consecutive_frames>5) break; } else { nb_frames=1; has_broken_data=GF_TRUE; } prev_hdr = hdr; gf_bs_skip_bytes(bs, hdr.frame_size); next_pos = (u32) gf_bs_get_position(bs); } gf_bs_del(bs); if (max_consecutive_frames>=4) { *score = has_broken_data ? GF_FPROBE_MAYBE_SUPPORTED : GF_FPROBE_SUPPORTED; return "audio/aac"; } if (has_id3 && max_consecutive_frames) { *score = GF_FPROBE_MAYBE_SUPPORTED; return "audio/aac"; } return NULL; } static const GF_FilterCapability ADTSDmxCaps[] = { CAP_UINT(GF_CAPS_INPUT, GF_PROP_PID_STREAM_TYPE, GF_STREAM_FILE), CAP_STRING(GF_CAPS_INPUT, GF_PROP_PID_FILE_EXT, "aac|adts"), CAP_STRING(GF_CAPS_INPUT, GF_PROP_PID_MIME, "audio/x-m4a|audio/aac|audio/aacp|audio/x-aac"), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_STREAM_TYPE, GF_STREAM_AUDIO), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_AAC_MPEG4), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_AAC_MPEG2_MP), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_AAC_MPEG2_LCP), CAP_UINT(GF_CAPS_OUTPUT_STATIC, GF_PROP_PID_CODECID, GF_CODECID_AAC_MPEG2_SSRP), //we explitely set this one to prevent adts->latm reframer connection CAP_BOOL(GF_CAPS_OUTPUT_STATIC_EXCLUDED, GF_PROP_PID_UNFRAMED, GF_TRUE), {0}, CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_STREAM_TYPE, GF_STREAM_AUDIO), CAP_UINT(GF_CAPS_INPUT,GF_PROP_PID_CODECID, GF_CODECID_AAC_MPEG4), CAP_BOOL(GF_CAPS_INPUT,GF_PROP_PID_UNFRAMED, GF_TRUE), }; #define OFFS(_n) #_n, offsetof(GF_ADTSDmxCtx, _n) static const GF_FilterArgs ADTSDmxArgs[] = { { OFFS(frame_size), "size of AAC frame in audio samples", GF_PROP_UINT, "1024", NULL, GF_FS_ARG_HINT_EXPERT}, { OFFS(index), "indexing window length", GF_PROP_DOUBLE, "1.0", NULL, 0}, // { OFFS(mpeg4), "force signaling as MPEG-4 AAC", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(ovsbr), "force oversampling SBR (does not multiply timescales by 2)", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_HINT_ADVANCED}, { OFFS(sbr), "set SBR signaling\n"\ "- no: no SBR signaling at all\n"\ "- imp: backward-compatible SBR signaling (audio signaled as AAC-LC)\n"\ "- exp: explicit SBR signaling (audio signaled as AAC-SBR)"\ , GF_PROP_UINT, "no", "no|imp|exp", GF_FS_ARG_HINT_ADVANCED}, { OFFS(ps), "set PS signaling\n"\ "- no: no PS signaling at all\n"\ "- imp: backward-compatible PS signaling (audio signaled as AAC-LC)\n"\ "- exp: explicit PS signaling (audio signaled as AAC-PS)"\ , GF_PROP_UINT, "no", "no|imp|exp", GF_FS_ARG_HINT_ADVANCED}, { OFFS(expart), "expose pictures as a dedicated video pid", GF_PROP_BOOL, "false", NULL, 0}, { OFFS(aacchcfg), "set AAC channel configuration to this value if missing from ADTS header, use negative value to always override", GF_PROP_SINT, "0", NULL, GF_FS_ARG_HINT_EXPERT}, {0} }; GF_FilterRegister ADTSDmxRegister = { .name = "rfadts", GF_FS_SET_DESCRIPTION("ADTS reframer") GF_FS_SET_HELP("This filter parses AAC files/data and outputs corresponding audio PID and frames.") .private_size = sizeof(GF_ADTSDmxCtx), .args = ADTSDmxArgs, .finalize = adts_dmx_finalize, SETCAPS(ADTSDmxCaps), .configure_pid = adts_dmx_configure_pid, .process = adts_dmx_process, .probe_data = adts_dmx_probe_data, .process_event = adts_dmx_process_event }; const GF_FilterRegister *adts_dmx_register(GF_FilterSession *session) { return &ADTSDmxRegister; } #else const GF_FilterRegister *adts_dmx_register(GF_FilterSession *session) { return NULL; } #endif // GPAC_DISABLE_AV_PARSERS
null
266
CWE-787
CVE-2021-30020
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre, Romain Bouqueau, Cyril Concolato * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / Media Tools sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/media_dev.h> #include <gpac/constants.h> #include <gpac/mpeg4_odf.h> #include <gpac/maths.h> #include <gpac/avparse.h> #ifndef GPAC_DISABLE_OGG #include <gpac/internal/ogg.h> #endif //uncomment/define globally to remove all bitstream parsing logging from code (this will break inspect mode ananlyze=bs) //#define GPAC_DISABLE_AVPARSE_LOGS #ifndef GPAC_DISABLE_AVPARSE_LOGS void gf_bs_log_idx(GF_BitStream *bs, u32 nBits, const char *fname, s64 val, s32 idx1, s32 idx2, s32 idx3); #define gf_bs_log(_bs, _nBits, _fname, _val) gf_bs_log_idx(_bs, _nBits, _fname, _val, -1, -1, -1) u32 gf_bs_read_int_log_idx3(GF_BitStream *bs, u32 nBits, const char *fname, s32 idx1, s32 idx2, s32 idx3) { u32 val = gf_bs_read_int(bs, nBits); gf_bs_log_idx(bs, nBits, fname, val, idx1, idx2, idx3); return val; } #define gf_bs_read_int_log(_bs, _nBits, _fname) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, -1, -1, -1) #define gf_bs_read_int_log_idx(_bs, _nBits, _fname, _idx) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, _idx, -1, -1) #define gf_bs_read_int_log_idx2(_bs, _nBits, _fname, _idx1, _idx2) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, (s32) _idx1, (s32) _idx2, -1) #else #define gf_bs_log(_bs, _nBits, _fname, _val) #define gf_bs_log_idx(_bs, _nBits, _fname, _val, _idx1, _idx2, _idx3) #define gf_bs_read_int_log(_bs, _nbb, _f) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx(_bs, _nbb, _f, _idx) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx2(_bs, _nbb, _f, _idx1, _idx2) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx3(_bs, _nbb, _f, _idx1, _idx2, _idx3) gf_bs_read_int(_bs, _nbb) #endif static const struct { u32 w, h; } std_par[] = { { 4, 3}, {3, 2}, {16, 9}, {5, 3}, {5, 4}, {8, 5}, {2, 1}, {1, 1}, {0, 0}, }; GF_EXPORT void gf_media_reduce_aspect_ratio(u32 *width, u32 *height) { u32 i = 0; u32 w = *width; u32 h = *height; while (std_par[i].w) { if (std_par[i].w * h == std_par[i].h * w) { *width = std_par[i].w; *height = std_par[i].h; return; } i++; } //not standard one, reduce by power of 2 i = 2; while (1) { if (w <= i) return; if (h <= i) return; if (w % i) return; if (h % i) return; *width = w / i; *height = h / i; i *= 2; } } GF_EXPORT void gf_media_get_reduced_frame_rate(u32 *timescale, u32 *sample_dur) { u32 res; if (!*sample_dur) return; res = *timescale / *sample_dur; if (res * (*sample_dur) == *timescale) { *timescale = res; *sample_dur = 1; } else if ((double)(*timescale * 1001 - (res + 1) * *sample_dur * 1000) / ((res + 1) * *sample_dur * 1000) < 0.001) { *timescale = (res + 1) * 1000; *sample_dur = 1001; } } struct __m4v_profile { u32 value; const char *name; } M4VProfiles[] = { {0x00, "Reserved (0x00) Profile"}, {0x01, "Simple Profile @ Level 1"}, {0x02, "Simple Profile @ Level 2"}, {0x03, "Simple Profile @ Level 3"}, {0x08, "Simple Profile @ Level 0"}, {0x10, "Simple Scalable Profile @ Level 0"}, {0x11, "Simple Scalable Profile @ Level 1"}, {0x12, "Simple Scalable Profile @ Level 2"}, {0x21, "Core Profile @ Level 1"}, {0x22, "Core Profile @ Level 2"}, {0x32, "Main Profile @ Level 2"}, {0x33, "Main Profile @ Level 3"}, {0x34, "Main Profile @ Level 4"}, {0x42, "N-bit Profile @ Level 2"}, {0x51, "Scalable Texture Profile @ Level 1"}, {0x61, "Simple Face Animation Profile @ Level 1"}, {0x62, "Simple Face Animation Profile @ Level 2"}, {0x63, "Simple FBA Profile @ Level 1"}, {0x64, "Simple FBA Profile @ Level 2"}, {0x71, "Basic Animated Texture Profile @ Level 1"}, {0x72, "Basic Animated Texture Profile @ Level 2"}, {0x7F, "AVC/H264 Profile"}, {0x81, "Hybrid Profile @ Level 1"}, {0x82, "Hybrid Profile @ Level 2"}, {0x91, "Advanced Real Time Simple Profile @ Level 1"}, {0x92, "Advanced Real Time Simple Profile @ Level 2"}, {0x93, "Advanced Real Time Simple Profile @ Level 3"}, {0x94, "Advanced Real Time Simple Profile @ Level 4"}, {0xA1, "Core Scalable Profile @ Level1"}, {0xA2, "Core Scalable Profile @ Level2"}, {0xA3, "Core Scalable Profile @ Level3"}, {0xB1, "Advanced Coding Efficiency Profile @ Level 1"}, {0xB2, "Advanced Coding Efficiency Profile @ Level 2"}, {0xB3, "Advanced Coding Efficiency Profile @ Level 3"}, {0xB4, "Advanced Coding Efficiency Profile @ Level 4"}, {0xC1, "Advanced Core Profile @ Level 1"}, {0xC2, "Advanced Core Profile @ Level 2"}, {0xD1, "Advanced Scalable Texture @ Level1"}, {0xD2, "Advanced Scalable Texture @ Level2"}, {0xE1, "Simple Studio Profile @ Level 1"}, {0xE2, "Simple Studio Profile @ Level 2"}, {0xE3, "Simple Studio Profile @ Level 3"}, {0xE4, "Simple Studio Profile @ Level 4"}, {0xE5, "Core Studio Profile @ Level 1"}, {0xE6, "Core Studio Profile @ Level 2"}, {0xE7, "Core Studio Profile @ Level 3"}, {0xE8, "Core Studio Profile @ Level 4"}, {0xF0, "Advanced Simple Profile @ Level 0"}, {0xF1, "Advanced Simple Profile @ Level 1"}, {0xF2, "Advanced Simple Profile @ Level 2"}, {0xF3, "Advanced Simple Profile @ Level 3"}, {0xF4, "Advanced Simple Profile @ Level 4"}, {0xF5, "Advanced Simple Profile @ Level 5"}, {0xF7, "Advanced Simple Profile @ Level 3b"}, {0xF8, "Fine Granularity Scalable Profile @ Level 0"}, {0xF9, "Fine Granularity Scalable Profile @ Level 1"}, {0xFA, "Fine Granularity Scalable Profile @ Level 2"}, {0xFB, "Fine Granularity Scalable Profile @ Level 3"}, {0xFC, "Fine Granularity Scalable Profile @ Level 4"}, {0xFD, "Fine Granularity Scalable Profile @ Level 5"}, {0xFE, "Not part of MPEG-4 Visual profiles"}, {0xFF, "No visual capability required"} }; GF_EXPORT const char *gf_m4v_get_profile_name(u8 video_pl) { u32 i, count = GF_ARRAY_LENGTH(M4VProfiles); for (i=0; i<count; i++) { if ((u32)video_pl == M4VProfiles[i].value) return M4VProfiles[i].name; } return "ISO Reserved Profile"; } #ifndef GPAC_DISABLE_AV_PARSERS #define MPEG12_START_CODE_PREFIX 0x000001 #define MPEG12_PICTURE_START_CODE 0x00000100 #define MPEG12_SLICE_MIN_START 0x00000101 #define MPEG12_SLICE_MAX_START 0x000001af #define MPEG12_USER_DATA_START_CODE 0x000001b2 #define MPEG12_SEQUENCE_START_CODE 0x000001b3 #define MPEG12_SEQUENCE_ERR_START_CODE 0x000001b4 #define MPEG12_EXT_START_CODE 0x000001b5 #define MPEG12_SEQUENCE_END_START_CODE 0x000001b7 #define MPEG12_GOP_START_CODE 0x000001b8 s32 gf_mv12_next_start_code(unsigned char *pbuffer, u32 buflen, u32 *optr, u32 *scode) { u32 value; u32 offset; if (buflen < 4) return -1; for (offset = 0; offset < buflen - 3; offset++, pbuffer++) { #ifdef GPAC_BIG_ENDIAN value = *(u32 *)pbuffer >> 8; #else value = (pbuffer[0] << 16) | (pbuffer[1] << 8) | (pbuffer[2] << 0); #endif if (value == MPEG12_START_CODE_PREFIX) { *optr = offset; *scode = (value << 8) | pbuffer[3]; return 0; } } return -1; } s32 gf_mv12_next_slice_start(unsigned char *pbuffer, u32 startoffset, u32 buflen, u32 *slice_offset) { u32 slicestart, code; while (gf_mv12_next_start_code(pbuffer + startoffset, buflen - startoffset, &slicestart, &code) >= 0) { if ((code >= MPEG12_SLICE_MIN_START) && (code <= MPEG12_SLICE_MAX_START)) { *slice_offset = slicestart + startoffset; return 0; } startoffset += slicestart + 4; } return -1; } /* MPEG-4 video (14496-2) */ struct __tag_m4v_parser { GF_BitStream *bs; Bool mpeg12, step_mode; u32 current_object_type; u32 force_next_obj_type; u64 current_object_start; u32 tc_dec, prev_tc_dec, tc_disp, prev_tc_disp; }; GF_EXPORT GF_M4VParser *gf_m4v_parser_new(u8 *data, u64 data_size, Bool mpeg12video) { GF_M4VParser *tmp; if (!data || !data_size) return NULL; GF_SAFEALLOC(tmp, GF_M4VParser); if (!tmp) return NULL; tmp->bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); tmp->mpeg12 = mpeg12video; return tmp; } GF_M4VParser *gf_m4v_parser_bs_new(GF_BitStream *bs, Bool mpeg12video) { GF_M4VParser *tmp; GF_SAFEALLOC(tmp, GF_M4VParser); if (!tmp) return NULL; tmp->bs = bs; tmp->mpeg12 = mpeg12video; return tmp; } GF_EXPORT void gf_m4v_parser_del(GF_M4VParser *m4v) { gf_bs_del(m4v->bs); gf_free(m4v); } GF_EXPORT void gf_m4v_parser_del_no_bs(GF_M4VParser *m4v) { gf_free(m4v); } GF_EXPORT void gf_m4v_parser_set_inspect(GF_M4VParser *m4v) { if (m4v) m4v->step_mode = 1; } GF_EXPORT u32 gf_m4v_parser_get_obj_type(GF_M4VParser *m4v) { if (m4v) return m4v->current_object_type; return 0; } #define M4V_CACHE_SIZE 4096 s32 M4V_LoadObject(GF_M4VParser *m4v) { u32 v, bpos, found; char m4v_cache[M4V_CACHE_SIZE]; u64 end, cache_start, load_size; if (!m4v) return 0; if (m4v->force_next_obj_type) { m4v->current_object_type = m4v->force_next_obj_type - 1; m4v->force_next_obj_type = 0; return (s32)m4v->current_object_type; } bpos = 0; found = 0; load_size = 0; end = 0; cache_start = 0; v = 0xffffffff; while (!end) { /*refill cache*/ if (bpos == (u32)load_size) { if (!gf_bs_available(m4v->bs)) break; load_size = gf_bs_available(m4v->bs); if (load_size > M4V_CACHE_SIZE) load_size = M4V_CACHE_SIZE; bpos = 0; cache_start = gf_bs_get_position(m4v->bs); gf_bs_read_data(m4v->bs, m4v_cache, (u32)load_size); } v = ((v << 8) & 0xFFFFFF00) | ((u8)m4v_cache[bpos]); bpos++; if ((v & 0xFFFFFF00) == 0x00000100) { end = cache_start + bpos - 4; found = 1; break; } } if (!found) return -1; m4v->current_object_start = end; gf_bs_seek(m4v->bs, end + 3); m4v->current_object_type = gf_bs_read_u8(m4v->bs); return (s32)m4v->current_object_type; } GF_EXPORT void gf_m4v_rewrite_pl(u8 **o_data, u32 *o_dataLen, u8 PL) { u32 pos = 0; unsigned char *data = (unsigned char *)*o_data; u32 dataLen = *o_dataLen; while (pos + 4 < dataLen) { if (!data[pos] && !data[pos + 1] && (data[pos + 2] == 0x01) && (data[pos + 3] == M4V_VOS_START_CODE)) { data[pos + 4] = PL; return; } pos++; } /*emulate VOS at beggining*/ (*o_data) = (char *)gf_malloc(sizeof(char)*(dataLen + 5)); (*o_data)[0] = 0; (*o_data)[1] = 0; (*o_data)[2] = 1; (*o_data)[3] = (char)M4V_VOS_START_CODE; (*o_data)[4] = PL; memcpy((*o_data + 5), data, sizeof(char)*dataLen); gf_free(data); (*o_dataLen) = dataLen + 5; } static GF_Err M4V_Reset(GF_M4VParser *m4v, u64 start) { gf_bs_seek(m4v->bs, start); assert(start < (u64)1<<31); m4v->current_object_start = (u32)start; m4v->current_object_type = 0; return GF_OK; } void gf_m4v_parser_reset(GF_M4VParser *m4v, u8 sc_type) { m4v->current_object_start = 0; m4v->current_object_type = 0; m4v->force_next_obj_type = sc_type; } static GF_Err gf_m4v_parse_config_mpeg12(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { unsigned char p[4]; u32 ext_type; s32 o_type; u8 go, par; if (!m4v || !dsi) return GF_BAD_PARAM; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); dsi->VideoPL = 0; go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M2V_SEQ_START_CODE: dsi->RAP_stream = 1; gf_bs_read_data(m4v->bs, (char *)p, 4); dsi->width = (p[0] << 4) | ((p[1] >> 4) & 0xf); dsi->height = ((p[1] & 0xf) << 8) | p[2]; dsi->VideoPL = GF_CODECID_MPEG1; par = (p[3] >> 4) & 0xf; switch (par) { case 2: dsi->par_num = dsi->height / 3; dsi->par_den = dsi->width / 4; break; case 3: dsi->par_num = dsi->height / 9; dsi->par_den = dsi->width / 16; break; case 4: dsi->par_num = dsi->height / 2; dsi->par_den = dsi->width / 21; break; default: dsi->par_den = dsi->par_num = 0; break; } switch (p[3] & 0xf) { case 0: break; case 1: dsi->fps = 24000.0 / 1001.0; break; case 2: dsi->fps = 24.0; break; case 3: dsi->fps = 25.0; break; case 4: dsi->fps = 30000.0 / 1001.0; break; case 5: dsi->fps = 30.0; break; case 6: dsi->fps = 50.0; break; case 7: dsi->fps = ((60.0*1000.0) / 1001.0); break; case 8: dsi->fps = 60.0; break; case 9: dsi->fps = 1; break; case 10: dsi->fps = 5; break; case 11: dsi->fps = 10; break; case 12: dsi->fps = 12; break; case 13: dsi->fps = 15; break; } break; case M2V_EXT_START_CODE: gf_bs_read_data(m4v->bs, (char *)p, 4); ext_type = ((p[0] >> 4) & 0xf); if (ext_type == 1) { dsi->VideoPL = 0x65; dsi->height = ((p[1] & 0x1) << 13) | ((p[2] & 0x80) << 5) | (dsi->height & 0x0fff); dsi->width = (((p[2] >> 5) & 0x3) << 12) | (dsi->width & 0x0fff); } break; case M2V_PIC_START_CODE: if (dsi->width) go = 0; break; default: break; /*EOS*/ case -1: go = 0; m4v->current_object_start = gf_bs_get_position(m4v->bs); break; } } M4V_Reset(m4v, 0); return GF_OK; } static const struct { u32 w, h; } m4v_sar[6] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 } }; static u8 m4v_get_sar_idx(u32 w, u32 h) { u32 i; for (i = 0; i < 6; i++) { if ((m4v_sar[i].w == w) && (m4v_sar[i].h == h)) return i; } return 0xF; } static void gf_m4v_parse_vol(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { u8 verid, par; s32 clock_rate; u8 vpl = dsi->VideoPL; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); dsi->VideoPL = vpl; verid = 0; dsi->RAP_stream = gf_bs_read_int(m4v->bs, 1); dsi->objectType = gf_bs_read_int(m4v->bs, 8); if (gf_bs_read_int(m4v->bs, 1)) { verid = gf_bs_read_int(m4v->bs, 4); gf_bs_read_int(m4v->bs, 3); } par = gf_bs_read_int(m4v->bs, 4); if (par == 0xF) { dsi->par_num = gf_bs_read_int(m4v->bs, 8); dsi->par_den = gf_bs_read_int(m4v->bs, 8); } else if (par<6) { dsi->par_num = m4v_sar[par].w; dsi->par_den = m4v_sar[par].h; } if (gf_bs_read_int(m4v->bs, 1)) { gf_bs_read_int(m4v->bs, 3); if (gf_bs_read_int(m4v->bs, 1)) gf_bs_read_int(m4v->bs, 79); } dsi->has_shape = gf_bs_read_int(m4v->bs, 2); if (dsi->has_shape && (verid!=1) ) gf_bs_read_int(m4v->bs, 4); gf_bs_read_int(m4v->bs, 1); /*clock rate*/ dsi->clock_rate = gf_bs_read_int(m4v->bs, 16); /*marker*/ gf_bs_read_int(m4v->bs, 1); clock_rate = dsi->clock_rate-1; if (clock_rate >= 65536) clock_rate = 65535; if (clock_rate > 0) { for (dsi->NumBitsTimeIncrement = 1; dsi->NumBitsTimeIncrement < 16; dsi->NumBitsTimeIncrement++) { if (clock_rate == 1) break; clock_rate = (clock_rate >> 1); } } else { /*fix from vivien for divX*/ dsi->NumBitsTimeIncrement = 1; } /*fixed FPS stream*/ dsi->time_increment = 0; if (gf_bs_read_int(m4v->bs, 1)) { dsi->time_increment = gf_bs_read_int(m4v->bs, dsi->NumBitsTimeIncrement); } if (!dsi->has_shape) { gf_bs_read_int(m4v->bs, 1); dsi->width = gf_bs_read_int(m4v->bs, 13); gf_bs_read_int(m4v->bs, 1); dsi->height = gf_bs_read_int(m4v->bs, 13); } else { dsi->width = dsi->height = 0; } gf_bs_align(m4v->bs); } static GF_Err gf_m4v_parse_config_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { s32 o_type; u8 go; if (!m4v || !dsi) return GF_BAD_PARAM; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { /*vosh*/ case M4V_VOS_START_CODE: dsi->VideoPL = (u8)gf_bs_read_u8(m4v->bs); break; case M4V_VOL_START_CODE: gf_m4v_parse_vol(m4v, dsi); /*shape will be done later*/ gf_bs_align(m4v->bs); break; case M4V_VOP_START_CODE: case M4V_GOV_START_CODE: go = 0; break; /*EOS*/ case -1: m4v->current_object_start = gf_bs_get_position(m4v->bs); return GF_EOS; /*don't interest us*/ case M4V_UDTA_START_CODE: default: break; } } return GF_OK; } GF_EXPORT GF_Err gf_m4v_parse_config(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { if (m4v->mpeg12) { return gf_m4v_parse_config_mpeg12(m4v, dsi); } else { return gf_m4v_parse_config_mpeg4(m4v, dsi); } } static GF_Err gf_m4v_parse_frame_mpeg12(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, val; s32 o_type; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = GF_FALSE; m4v->current_object_type = (u32)-1; *frame_type = 0; if (!m4v->step_mode) M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M2V_PIC_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; *is_coded = 1; /*val = */gf_bs_read_u8(m4v->bs); val = gf_bs_read_u8(m4v->bs); *frame_type = ((val >> 3) & 0x7) - 1; break; case M2V_GOP_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M2V_SEQ_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) { go = 0; break; } /**/ break; default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } if (m4v->step_mode) return GF_OK; } *size = m4v->current_object_start - *start; return GF_OK; } static GF_Err gf_m4v_parse_frame_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, secs; s32 o_type; u32 vop_inc = 0; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = 0; m4v->current_object_type = (u32)-1; *frame_type = 0; *start = 0; if (!m4v->step_mode) M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M4V_VOP_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; /*coding type*/ *frame_type = gf_bs_read_int(m4v->bs, 2); /*modulo time base*/ secs = 0; while (gf_bs_read_int(m4v->bs, 1) != 0) secs++; /*no support for B frames in parsing*/ secs += (dsi->enh_layer || *frame_type!=2) ? m4v->tc_dec : m4v->tc_disp; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*vop_time_inc*/ if (dsi->NumBitsTimeIncrement) vop_inc = gf_bs_read_int(m4v->bs, dsi->NumBitsTimeIncrement); m4v->prev_tc_dec = m4v->tc_dec; m4v->prev_tc_disp = m4v->tc_disp; if (dsi->enh_layer || *frame_type!=2) { m4v->tc_disp = m4v->tc_dec; m4v->tc_dec = secs; } *time_inc = secs * dsi->clock_rate + vop_inc; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*coded*/ *is_coded = gf_bs_read_int(m4v->bs, 1); gf_bs_align(m4v->bs); break; case M4V_GOV_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M4V_VOL_START_CODE: if (m4v->step_mode) gf_m4v_parse_vol(m4v, dsi); case M4V_VOS_START_CODE: if (hasVOP) { go = 0; } else if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } break; case M4V_VO_START_CODE: default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } if (m4v->step_mode) return GF_OK; } assert(m4v->current_object_start >= *start); *size = m4v->current_object_start - *start; return GF_OK; } GF_EXPORT GF_Err gf_m4v_parse_frame(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { if (m4v->mpeg12) { return gf_m4v_parse_frame_mpeg12(m4v, dsi, frame_type, time_inc, size, start, is_coded); } else { return gf_m4v_parse_frame_mpeg4(m4v, dsi, frame_type, time_inc, size, start, is_coded); } } GF_Err gf_m4v_rewrite_par(u8 **o_data, u32 *o_dataLen, s32 par_n, s32 par_d) { u64 start, end, size; GF_BitStream *mod; GF_M4VParser *m4v; Bool go = 1; m4v = gf_m4v_parser_new(*o_data, *o_dataLen, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); start = 0; while (go) { u32 type = M4V_LoadObject(m4v); end = gf_bs_get_position(m4v->bs) - 4; size = end - start; /*store previous object*/ if (size) { assert (size < (u64)1<<31); gf_bs_write_data(mod, *o_data + start, (u32)size); start = end; } switch (type) { case M4V_VOL_START_CODE: gf_bs_write_int(mod, 0, 8); gf_bs_write_int(mod, 0, 8); gf_bs_write_int(mod, 1, 8); gf_bs_write_int(mod, M4V_VOL_START_CODE, 8); gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 1), 1); gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 8), 8); start = gf_bs_read_int(m4v->bs, 1); gf_bs_write_int(mod, (u32)start, 1); if (start) { gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 7), 7); } start = gf_bs_read_int(m4v->bs, 4); if (start == 0xF) { gf_bs_read_int(m4v->bs, 8); gf_bs_read_int(m4v->bs, 8); } if ((par_n >= 0) && (par_d >= 0)) { u8 par = m4v_get_sar_idx(par_n, par_d); gf_bs_write_int(mod, par, 4); if (par == 0xF) { gf_bs_write_int(mod, par_n, 8); gf_bs_write_int(mod, par_d, 8); } } else { gf_bs_write_int(mod, 0x0, 4); } case -1: go = 0; break; default: break; } } while (gf_bs_bits_available(m4v->bs)) { u32 b = gf_bs_read_int(m4v->bs, 1); gf_bs_write_int(mod, b, 1); } gf_m4v_parser_del(m4v); gf_free(*o_data); gf_bs_get_content(mod, o_data, o_dataLen); gf_bs_del(mod); return GF_OK; } GF_EXPORT u64 gf_m4v_get_object_start(GF_M4VParser *m4v) { return m4v->current_object_start; } #if 0 //unused Bool gf_m4v_is_valid_object_type(GF_M4VParser *m4v) { return ((s32)m4v->current_object_type == -1) ? 0 : 1; } #endif GF_EXPORT GF_Err gf_m4v_get_config(u8 *rawdsi, u32 rawdsi_size, GF_M4VDecSpecInfo *dsi) { GF_Err e; GF_M4VParser *vparse; if (!rawdsi || !rawdsi_size) return GF_NON_COMPLIANT_BITSTREAM; vparse = gf_m4v_parser_new(rawdsi, rawdsi_size, 0); e = gf_m4v_parse_config(vparse, dsi); dsi->next_object_start = (u32)vparse->current_object_start; gf_m4v_parser_del(vparse); return e < 0 ? e : GF_OK; } GF_EXPORT GF_Err gf_mpegv12_get_config(u8 *rawdsi, u32 rawdsi_size, GF_M4VDecSpecInfo *dsi) { GF_Err e; GF_M4VParser *vparse; if (!rawdsi || !rawdsi_size) return GF_NON_COMPLIANT_BITSTREAM; vparse = gf_m4v_parser_new(rawdsi, rawdsi_size, GF_TRUE); e = gf_m4v_parse_config(vparse, dsi); dsi->next_object_start = (u32)vparse->current_object_start; gf_m4v_parser_del(vparse); return e; } #endif /* AAC parser */ struct __m4a_oti { u32 type; const char *name; } M4AObjectTypes[] = { {0, "MPEG-4 Audio Reserved"}, {1, "MPEG-4 Audio AAC Main"}, {2, "MPEG-4 Audio AAC LC"}, {3, "MPEG-4 Audio AAC SSR"}, {4, "MPEG-4 Audio AAC LTP"}, {5, "MPEG-4 Audio SBR"}, {6, "MPEG-4 Audio AAC Scalable"}, {7, "MPEG-4 Audio TwinVQ"}, {8, "MPEG-4 Audio CELP"}, {9, "MPEG-4 Audio HVXC"}, {10, "MPEG-4 Audio Reserved"}, {11, "MPEG-4 Audio Reserved"}, {12, "MPEG-4 Audio TTSI"}, {13, "MPEG-4 Audio Main synthetic"}, {14, "MPEG-4 Audio Wavetable synthesis"}, {15, "MPEG-4 Audio General MIDI"}, {16, "MPEG-4 Audio Algorithmic Synthesis and Audio FX"}, {17, "MPEG-4 Audio ER AAC LC"}, {18, "MPEG-4 Audio Reserved"}, {19, "MPEG-4 Audio ER AAC LTP"}, {20, "MPEG-4 Audio ER AAC scalable"}, {21, "MPEG-4 Audio ER TwinVQ"}, {22, "MPEG-4 Audio ER BSAC"}, {23, "MPEG-4 Audio ER AAC LD"}, {24, "MPEG-4 Audio ER CELP"}, {25, "MPEG-4 Audio ER HVXC"}, {26, "MPEG-4 Audio ER HILN"}, {27, "MPEG-4 Audio ER Parametric"}, {28, "MPEG-4 Audio SSC"}, {29, "MPEG-4 Audio ParametricStereo"}, {30, "MPEG-4 Audio Reserved"}, {31, "MPEG-4 Audio Reserved"}, {32, "MPEG-1 Audio Layer-1"}, {33, "MPEG-1 Audio Layer-2"}, {34, "MPEG-1 Audio Layer-3"}, {35, "MPEG-4 Audio DST"}, {36, "MPEG-4 Audio ALS"}, {37, "MPEG-4 Audio SLS"}, {42, "MPEG Audio xHE-AAC"}, }; GF_EXPORT const char *gf_m4a_object_type_name(u32 objectType) { u32 i, count = GF_ARRAY_LENGTH(M4AObjectTypes); for (i=0; i<count; i++) { if (objectType==M4AObjectTypes[i].type) return M4AObjectTypes[i].name; } return "MPEG-4 Audio Unknown"; } struct __m4a_profile { u32 value; const char *name; } M4AProfiles[] = { {0x00, "ISO Reserved (0x00)"}, {0x01, "Main Audio Profile @ Level 1"}, {0x02, "Main Audio Profile @ Level 2"}, {0x03, "Main Audio Profile @ Level 3"}, {0x04, "Main Audio Profile @ Level 4"}, {0x05, "Scalable Audio Profile @ Level 1"}, {0x06, "Scalable Audio Profile @ Level 2"}, {0x07, "Scalable Audio Profile @ Level 3"}, {0x08, "Scalable Audio Profile @ Level 4"}, {0x09, "Speech Audio Profile @ Level 1"}, {0x0A, "Speech Audio Profile @ Level 2"}, {0x0B, "Synthetic Audio Profile @ Level 1"}, {0x0C, "Synthetic Audio Profile @ Level 2"}, {0x0D, "Synthetic Audio Profile @ Level 3"}, {0x0E, "High Quality Audio Profile @ Level 1"}, {0x0F, "High Quality Audio Profile @ Level 2"}, {0x10, "High Quality Audio Profile @ Level 3"}, {0x11, "High Quality Audio Profile @ Level 4"}, {0x12, "High Quality Audio Profile @ Level 5"}, {0x13, "High Quality Audio Profile @ Level 6"}, {0x14, "High Quality Audio Profile @ Level 7"}, {0x15, "High Quality Audio Profile @ Level 8"}, {0x16, "Low Delay Audio Profile @ Level 1"}, {0x17, "Low Delay Audio Profile @ Level 2"}, {0x18, "Low Delay Audio Profile @ Level 3"}, {0x19, "Low Delay Audio Profile @ Level 4"}, {0x1A, "Low Delay Audio Profile @ Level 5"}, {0x1B, "Low Delay Audio Profile @ Level 6"}, {0x1C, "Low Delay Audio Profile @ Level 7"}, {0x1D, "Low Delay Audio Profile @ Level 8"}, {0x1E, "Natural Audio Profile @ Level 1"}, {0x1F, "Natural Audio Profile @ Level 2"}, {0x20, "Natural Audio Profile @ Level 3"}, {0x21, "Natural Audio Profile @ Level 4"}, {0x22, "Mobile Audio Internetworking Profile @ Level 1"}, {0x23, "Mobile Audio Internetworking Profile @ Level 2"}, {0x24, "Mobile Audio Internetworking Profile @ Level 3"}, {0x25, "Mobile Audio Internetworking Profile @ Level 4"}, {0x26, "Mobile Audio Internetworking Profile @ Level 5"}, {0x27, "Mobile Audio Internetworking Profile @ Level 6"}, {0x28, "AAC Profile @ Level 1"}, {0x29, "AAC Profile @ Level 2"}, {0x2A, "AAC Profile @ Level 4"}, {0x2B, "AAC Profile @ Level 5"}, {0x2C, "High Efficiency AAC Profile @ Level 2"}, {0x2D, "High Efficiency AAC Profile @ Level 3"}, {0x2E, "High Efficiency AAC Profile @ Level 4"}, {0x2F, "High Efficiency AAC Profile @ Level 5"}, {0x30, "High Efficiency AAC v2 Profile @ Level 2"}, {0x31, "High Efficiency AAC v2 Profile @ Level 3"}, {0x32, "High Efficiency AAC v2 Profile @ Level 4"}, {0x33, "High Efficiency AAC v2 Profile @ Level 5"}, {0x34, "Low Delay AAC Profile"}, {0x35, "Baseline MPEG Surround Profile @ Level 1"}, {0x36, "Baseline MPEG Surround Profile @ Level 2"}, {0x37, "Baseline MPEG Surround Profile @ Level 3"}, {0x38, "Baseline MPEG Surround Profile @ Level 4"}, {0x39, "Baseline MPEG Surround Profile @ Level 5"}, {0x3A, "Baseline MPEG Surround Profile @ Level 6"}, {0x3B, "High Definition AAC Profile @ Level 1"}, {0x3C, "ALS Simple Profile @ Level 1"}, {0x50, "AAC Profile @ Level 6"}, {0x51, "AAC Profile @ Level 7"}, {0x52, "High Efficiency AAC Profile @ Level 6"}, {0x53, "High Efficiency AAC Profile @ Level 7"}, {0x54, "High Efficiency AAC v2 Profile @ Level 6"}, {0x55, "High Efficiency AAC v2 Profile @ Level 7"}, {0x56, "Extended High Efficiency AAC Profile @ Level 6"}, {0x57, "Extended High Efficiency AAC Profile @ Level 7"}, {0xFE, "Not part of MPEG-4 audio profiles"}, {0xFF, "No audio capability required"} }; GF_EXPORT const char *gf_m4a_get_profile_name(u8 audio_pl) { u32 i, count = GF_ARRAY_LENGTH(M4AProfiles); for (i=0; i<count; i++) { if ((u32) audio_pl==M4AProfiles[i].value) return M4AProfiles[i].name; } return "ISO Reserved / User Private"; } #ifndef GPAC_DISABLE_AV_PARSERS GF_EXPORT u32 gf_m4a_get_profile(GF_M4ADecSpecInfo *cfg) { switch (cfg->base_object_type) { case 2: /*AAC LC*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x28 : 0x29; /*LC@L1 or LC@L2*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x2A : 0x2B; /*LC@L4 or LC@L5*/ return (cfg->base_sr <= 48000) ? 0x50 : 0x51; /*LC@L4 or LC@L5*/ case 5: /*HE-AAC - SBR*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x2C : 0x2D; /*HE@L2 or HE@L3*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x2E : 0x2F; /*HE@L4 or HE@L5*/ return (cfg->base_sr <= 48000) ? 0x52 : 0x53; /*HE@L6 or HE@L7*/ case 29: /*HE-AACv2 - SBR+PS*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x30 : 0x31; /*HE-AACv2@L2 or HE-AACv2@L3*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x32 : 0x33; /*HE-AACv2@L4 or HE-AACv2@L5*/ return (cfg->base_sr <= 48000) ? 0x54 : 0x55; /*HE-AACv2@L6 or HE-AACv2@L7*/ /*default to HQ*/ default: if (cfg->nb_chan <= 2) return (cfg->base_sr < 24000) ? 0x0E : 0x0F; /*HQ@L1 or HQ@L2*/ return 0x10; /*HQ@L3*/ } } GF_EXPORT GF_Err gf_m4a_parse_program_config_element(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { u32 i; cfg->program_config_element_present = 1; cfg->cpe_channels = 0; cfg->element_instance_tag = gf_bs_read_int_log(bs, 4, "element_instance_tag"); cfg->object_type = gf_bs_read_int_log(bs, 2, "object_type"); cfg->sampling_frequency_index = gf_bs_read_int_log(bs, 4, "sampling_frequency_index"); cfg->num_front_channel_elements = gf_bs_read_int_log(bs, 4, "num_front_channel_elements"); cfg->num_side_channel_elements = gf_bs_read_int_log(bs, 4, "num_side_channel_elements"); cfg->num_back_channel_elements = gf_bs_read_int_log(bs, 4, "num_back_channel_elements"); cfg->num_lfe_channel_elements = gf_bs_read_int_log(bs, 2, "num_lfe_channel_elements"); cfg->num_assoc_data_elements = gf_bs_read_int_log(bs, 3, "num_assoc_data_elements"); cfg->num_valid_cc_elements = gf_bs_read_int_log(bs, 4, "num_valid_cc_elements"); cfg->mono_mixdown_present = (Bool)gf_bs_read_int_log(bs, 1, "mono_mixdown_present"); if (cfg->mono_mixdown_present) { cfg->mono_mixdown_element_number = gf_bs_read_int_log(bs, 4, "mono_mixdown_element_number"); } cfg->stereo_mixdown_present = gf_bs_read_int_log(bs, 1, "stereo_mixdown_present"); if (cfg->stereo_mixdown_present) { cfg->stereo_mixdown_element_number = gf_bs_read_int_log(bs, 4, "stereo_mixdown_element_number"); } cfg->matrix_mixdown_idx_present = gf_bs_read_int_log(bs, 1, "matrix_mixdown_idx_present"); if (cfg->matrix_mixdown_idx_present) { cfg->matrix_mixdown_idx = gf_bs_read_int_log(bs, 2, "matrix_mixdown_idx"); cfg->pseudo_surround_enable = gf_bs_read_int_log(bs, 1, "pseudo_surround_enable"); } for (i = 0; i < cfg->num_front_channel_elements; i++) { cfg->front_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "front_element_is_cpe", i); cfg->front_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "front_element_tag_select", i); if (cfg->front_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_side_channel_elements; i++) { cfg->side_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "side_element_is_cpe", i); cfg->side_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "side_element_tag_select", i); if (cfg->side_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_back_channel_elements; i++) { cfg->back_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "back_element_is_cpe", i); cfg->back_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "back_element_tag_select", i); if (cfg->back_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_lfe_channel_elements; i++) { cfg->lfe_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "lfe_element_tag_select", i); } for (i = 0; i < cfg->num_assoc_data_elements; i++) { cfg->assoc_data_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "assoc_data_element_tag_select", i); } for (i = 0; i < cfg->num_valid_cc_elements; i++) { cfg->cc_element_is_ind_sw[i] = gf_bs_read_int_log_idx(bs, 1, "cc_element_is_ind_sw", i); cfg->valid_cc_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "valid_cc_element_tag_select", i); } gf_bs_align(bs); cfg->comment_field_bytes = gf_bs_read_int_log(bs, 8, "comment_field_bytes"); gf_bs_read_data(bs, (char *)cfg->comments, cfg->comment_field_bytes); cfg->nb_chan = cfg->num_front_channel_elements + cfg->num_back_channel_elements + cfg->num_side_channel_elements + cfg->num_lfe_channel_elements; cfg->nb_chan += cfg->cpe_channels; return GF_OK; } GF_EXPORT GF_Err gf_m4a_parse_config(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg, Bool size_known) { u32 audio_obj_type; memset(cfg, 0, sizeof(GF_M4ADecSpecInfo)); cfg->base_object_type = gf_bs_read_int_log(bs, 5, "base_object_type"); /*extended object type*/ if (cfg->base_object_type == 31) { cfg->base_object_type = 32 + gf_bs_read_int_log(bs, 6, "extended_base_object_type"); } cfg->base_sr_index = gf_bs_read_int_log(bs, 4, "base_samplerate_index"); if (cfg->base_sr_index == 0x0F) { cfg->base_sr = gf_bs_read_int_log(bs, 24, "base_samplerate"); } else { cfg->base_sr = GF_M4ASampleRates[cfg->base_sr_index]; } cfg->chan_cfg = gf_bs_read_int_log(bs, 4, "channel_configuration"); if (cfg->chan_cfg) { cfg->nb_chan = GF_M4ANumChannels[cfg->chan_cfg - 1]; } audio_obj_type = cfg->base_object_type; if (cfg->base_object_type == 5 || cfg->base_object_type == 29) { if (cfg->base_object_type == 29) { cfg->has_ps = 1; cfg->nb_chan = 1; } cfg->has_sbr = GF_TRUE; cfg->sbr_sr_index = gf_bs_read_int_log(bs, 4, "sbr_samplerate_index"); if (cfg->sbr_sr_index == 0x0F) { cfg->sbr_sr = gf_bs_read_int_log(bs, 24, "sbr_samplerate"); } else { cfg->sbr_sr = GF_M4ASampleRates[cfg->sbr_sr_index]; } cfg->sbr_object_type = gf_bs_read_int_log(bs, 5, "sbr_object_type"); if (cfg->sbr_object_type==31) cfg->sbr_object_type = 32 + gf_bs_read_int_log(bs, 6, "audioObjectTypeExt"); audio_obj_type = cfg->sbr_object_type; if (cfg->sbr_object_type==22) { /*ext_chan_cfg = */gf_bs_read_int_log(bs, 4, "channel_configuration"); } } /*object cfg*/ switch (audio_obj_type) { case 1: case 2: case 3: case 4: case 6: case 7: case 17: case 19: case 20: case 21: case 22: case 23: case 42: { Bool ext_flag; gf_bs_read_int_log(bs, 1, "frame_length_flag"); if (gf_bs_read_int_log(bs, 1, "depends_on_core_coder")) gf_bs_read_int_log(bs, 14, "delay"); ext_flag = gf_bs_read_int_log(bs, 1, "extension_flag"); if (!cfg->chan_cfg) { gf_m4a_parse_program_config_element(bs, cfg); } if ((cfg->base_object_type == 6) || (cfg->base_object_type == 20)) { gf_bs_read_int_log(bs, 3, "layerN"); } if (ext_flag) { if (cfg->base_object_type == 22) { gf_bs_read_int_log(bs, 5, "numOfSubFrame"); gf_bs_read_int_log(bs, 11, "layer_length"); } if ((cfg->base_object_type == 17) || (cfg->base_object_type == 19) || (cfg->base_object_type == 20) || (cfg->base_object_type == 23) ) { gf_bs_read_int_log(bs, 1, "aacSectionDataResilienceFlag"); gf_bs_read_int_log(bs, 1, "aacScalefactorDataResilienceFlag"); gf_bs_read_int_log(bs, 1, "aacSpectralDataResilienceFlag"); } gf_bs_read_int_log(bs, 1, "extensionFlag3"); } } break; } /*ER cfg*/ switch (audio_obj_type) { case 17: case 19: case 20: case 21: case 22: case 23: case 24: case 25: case 26: case 27: { u32 epConfig = gf_bs_read_int_log(bs, 2, "epConfig"); if ((epConfig == 2) || (epConfig == 3)) { } if (epConfig == 3) { gf_bs_read_int_log(bs, 1, "directMapping"); } } break; } if (size_known && (cfg->base_object_type != 5) && (cfg->base_object_type != 29)) { while (gf_bs_available(bs) >= 2) { u32 sync = gf_bs_peek_bits(bs, 11, 0); if (sync == 0x2b7) { gf_bs_read_int_log(bs, 11, "syncExtensionType"); cfg->sbr_object_type = gf_bs_read_int_log(bs, 5, "extensionAudioObjectType "); cfg->has_sbr = gf_bs_read_int_log(bs, 1, "sbrPresentFlag"); if (cfg->has_sbr) { cfg->sbr_sr_index = gf_bs_read_int_log(bs, 4, "extensionSamplingFrequencyIndex"); if (cfg->sbr_sr_index == 0x0F) { cfg->sbr_sr = gf_bs_read_int_log(bs, 24, "extensionSamplingFrequency"); } else { cfg->sbr_sr = GF_M4ASampleRates[cfg->sbr_sr_index]; } } } else if (sync == 0x548) { gf_bs_read_int_log(bs, 11, "syncExtensionType"); cfg->has_ps = gf_bs_read_int_log(bs, 1, "hasParametricStereo"); if (cfg->has_ps) cfg->nb_chan = 1; } else { break; } } } cfg->audioPL = gf_m4a_get_profile(cfg); return GF_OK; } GF_EXPORT GF_Err gf_m4a_get_config(u8 *dsi, u32 dsi_size, GF_M4ADecSpecInfo *cfg) { GF_BitStream *bs; if (!dsi || !dsi_size || (dsi_size < 2)) return GF_NON_COMPLIANT_BITSTREAM; bs = gf_bs_new(dsi, dsi_size, GF_BITSTREAM_READ); gf_m4a_parse_config(bs, cfg, GF_TRUE); gf_bs_del(bs); return GF_OK; } u32 gf_latm_get_value(GF_BitStream *bs) { u32 i, tmp, value = 0; u32 bytesForValue = gf_bs_read_int(bs, 2); for (i = 0; i <= bytesForValue; i++) { value <<= 8; tmp = gf_bs_read_int(bs, 8); value += tmp; } return value; } GF_EXPORT u32 gf_m4a_get_channel_cfg(u32 nb_chan) { u32 i, count = sizeof(GF_M4ANumChannels) / sizeof(u32); for (i = 0; i < count; i++) { if (GF_M4ANumChannels[i] == nb_chan) return i + 1; } return 0; } GF_EXPORT GF_Err gf_m4a_write_program_config_element_bs(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { u32 i; gf_bs_write_int(bs, cfg->element_instance_tag, 4); gf_bs_write_int(bs, cfg->object_type, 2); gf_bs_write_int(bs, cfg->sampling_frequency_index, 4); gf_bs_write_int(bs, cfg->num_front_channel_elements, 4); gf_bs_write_int(bs, cfg->num_side_channel_elements, 4); gf_bs_write_int(bs, cfg->num_back_channel_elements, 4); gf_bs_write_int(bs, cfg->num_lfe_channel_elements, 2); gf_bs_write_int(bs, cfg->num_assoc_data_elements, 3); gf_bs_write_int(bs, cfg->num_valid_cc_elements, 4); gf_bs_write_int(bs, cfg->mono_mixdown_present, 1); if (cfg->mono_mixdown_present) { gf_bs_write_int(bs, cfg->mono_mixdown_element_number, 4); } gf_bs_write_int(bs, cfg->stereo_mixdown_present, 1); if (cfg->stereo_mixdown_present) { gf_bs_write_int(bs, cfg->stereo_mixdown_element_number, 4); } gf_bs_write_int(bs, cfg->matrix_mixdown_idx_present, 1); if (cfg->matrix_mixdown_idx_present) { gf_bs_write_int(bs, cfg->matrix_mixdown_idx, 2); gf_bs_write_int(bs, cfg->pseudo_surround_enable, 1); } for (i = 0; i < cfg->num_front_channel_elements; i++) { gf_bs_write_int(bs, cfg->front_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->front_element_tag_select[i], 4); } for (i = 0; i < cfg->num_side_channel_elements; i++) { gf_bs_write_int(bs, cfg->side_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->side_element_tag_select[i], 4); } for (i = 0; i < cfg->num_back_channel_elements; i++) { gf_bs_write_int(bs, cfg->back_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->back_element_tag_select[i], 4); } for (i = 0; i < cfg->num_lfe_channel_elements; i++) { gf_bs_write_int(bs, cfg->lfe_element_tag_select[i], 4); } for (i = 0; i < cfg->num_assoc_data_elements; i++) { gf_bs_write_int(bs, cfg->assoc_data_element_tag_select[i], 4); } for (i = 0; i < cfg->num_valid_cc_elements; i++) { gf_bs_write_int(bs, cfg->cc_element_is_ind_sw[i], 1); gf_bs_write_int(bs, cfg->valid_cc_element_tag_select[i], 4); } gf_bs_align(bs); gf_bs_write_int(bs, cfg->comment_field_bytes, 8); gf_bs_write_data(bs, (char *)cfg->comments, cfg->comment_field_bytes); return GF_OK; } GF_EXPORT GF_Err gf_m4a_write_config_bs(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { if (!cfg->base_sr_index) { if (!cfg->base_sr) return GF_BAD_PARAM; while (GF_M4ASampleRates[cfg->base_sr_index]) { if (GF_M4ASampleRates[cfg->base_sr_index] == cfg->base_sr) break; cfg->base_sr_index++; } } if (cfg->sbr_sr && !cfg->sbr_sr_index) { while (GF_M4ASampleRates[cfg->sbr_sr_index]) { if (GF_M4ASampleRates[cfg->sbr_sr_index] == cfg->sbr_sr) break; cfg->sbr_sr_index++; } } /*extended object type*/ if (cfg->base_object_type >= 32) { gf_bs_write_int(bs, 31, 5); gf_bs_write_int(bs, cfg->base_object_type - 32, 6); } else { gf_bs_write_int(bs, cfg->base_object_type, 5); } gf_bs_write_int(bs, cfg->base_sr_index, 4); if (cfg->base_sr_index == 0x0F) { gf_bs_write_int(bs, cfg->base_sr, 24); } if (cfg->program_config_element_present) { gf_bs_write_int(bs, 0, 4); } else { cfg->chan_cfg = gf_m4a_get_channel_cfg(cfg->nb_chan); if (!cfg->chan_cfg) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AAC] Cannot write decoder config, ProgramConfigElement is missing and channel configuration is not a predefined one !\n")); return GF_BAD_PARAM; } gf_bs_write_int(bs, cfg->chan_cfg, 4); } if (cfg->base_object_type == 5 || cfg->base_object_type == 29) { if (cfg->base_object_type == 29) { cfg->has_ps = 1; cfg->nb_chan = 1; } cfg->has_sbr = 1; gf_bs_write_int(bs, cfg->sbr_sr_index, 4); if (cfg->sbr_sr_index == 0x0F) { gf_bs_write_int(bs, cfg->sbr_sr, 24); } gf_bs_write_int(bs, cfg->sbr_object_type, 5); } /*object cfg*/ switch (cfg->base_object_type) { case 1: case 2: case 3: case 4: case 6: case 7: case 17: case 19: case 20: case 21: case 22: case 23: case 42: { /*frame length flag*/ gf_bs_write_int(bs, 0, 1); /*depends on core coder*/ gf_bs_write_int(bs, 0, 1); /*ext flag*/ gf_bs_write_int(bs, 0, 1); if (cfg->program_config_element_present) { gf_m4a_write_program_config_element_bs(bs, cfg); } if ((cfg->base_object_type == 6) || (cfg->base_object_type == 20)) { gf_bs_write_int(bs, 0, 3); } } break; } /*ER cfg - not supported*/ /*implicit sbr/ps signaling not written here, cf reframe_adts*/ return GF_OK; } GF_EXPORT GF_Err gf_m4a_write_config(GF_M4ADecSpecInfo *cfg, u8 **dsi, u32 *dsi_size) { GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_m4a_write_config_bs(bs, cfg); gf_bs_get_content(bs, dsi, dsi_size); gf_bs_del(bs); return GF_OK; } /*AV1 parsing*/ static u32 av1_read_ns(GF_BitStream *bs, u32 n, const char *fname) { u32 v, res; Bool extra_bit; int w = (u32)(log(n) / log(2)) + 1; u32 m = (1 << w) - n; assert(w < 32); v = gf_bs_read_int(bs, w - 1); if (v < m) { if (fname) { gf_bs_log(bs, w-1, fname, v); } return v; } extra_bit = gf_bs_read_int(bs, 1); res = (v << 1) - m + extra_bit; if (fname) { gf_bs_log(bs, w, fname, res); } return res; } static void av1_color_config(GF_BitStream *bs, AV1State *state) { state->config->high_bitdepth = gf_bs_read_int_log(bs, 1, "high_bitdepth"); state->bit_depth = 8; if (state->config->seq_profile == 2 && state->config->high_bitdepth) { state->config->twelve_bit = gf_bs_read_int_log(bs, 1, "twelve_bit"); state->bit_depth = state->config->twelve_bit ? 12 : 10; } else if (state->config->seq_profile <= 2) { state->bit_depth = state->config->high_bitdepth ? 10 : 8; } state->config->monochrome = GF_FALSE; if (state->config->seq_profile == 1) { state->config->monochrome = GF_FALSE; } else { state->config->monochrome = gf_bs_read_int_log(bs, 1, "monochrome"); } /*NumPlanes = mono_chrome ? 1 : 3;*/ state->color_description_present_flag = gf_bs_read_int_log(bs, 1, "color_description_present_flag"); if (state->color_description_present_flag) { state->color_primaries = gf_bs_read_int_log(bs, 8, "color_primaries"); state->transfer_characteristics = gf_bs_read_int_log(bs, 8, "transfer_characteristics"); state->matrix_coefficients = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } else { state->color_primaries = 2/*CP_UNSPECIFIED*/; state->transfer_characteristics = 2/*TC_UNSPECIFIED*/; state->matrix_coefficients = 2/*MC_UNSPECIFIED*/; } if (state->config->monochrome) { state->color_range = gf_bs_read_int_log(bs, 1, "color_range"); state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_TRUE; state->config->chroma_sample_position = 0/*CSP_UNKNOWN*/; state->separate_uv_delta_q = 0; return; } else if (state->color_primaries == 0/*CP_BT_709*/ && state->transfer_characteristics == 13/*TC_SRGB*/ && state->matrix_coefficients == 0/*MC_IDENTITY*/) { state->color_range = GF_TRUE; state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; } else { state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; state->color_range = gf_bs_read_int_log(bs, 1, "color_range"); if (state->config->seq_profile == 0) { state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_TRUE; } else if (state->config->seq_profile == 1) { state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; } else { if (state->bit_depth == 12) { state->config->chroma_subsampling_x = gf_bs_read_int_log(bs, 1, "chroma_subsampling_x"); if (state->config->chroma_subsampling_x) state->config->chroma_subsampling_y = gf_bs_read_int_log(bs, 1, "chroma_subsampling_y"); else state->config->chroma_subsampling_y = GF_FALSE; } else { state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_FALSE; } } if (state->config->chroma_subsampling_x && state->config->chroma_subsampling_y) { state->config->chroma_sample_position = gf_bs_read_int_log(bs, 2, "chroma_sample_position"); } } state->separate_uv_delta_q = gf_bs_read_int_log(bs, 1, "separate_uv_delta_q"); } static u32 av1_uvlc(GF_BitStream *bs, const char *fname) { u32 res; u8 leadingZeros = 0; while (1) { Bool done = gf_bs_read_int(bs, 1); if (done) break; leadingZeros++; } if (leadingZeros >= 32) { return 0xFFFFFFFF; } res = gf_bs_read_int(bs, leadingZeros) + (1 << leadingZeros) - 1; gf_bs_log(bs, 2*leadingZeros, fname, res); return res; } static void timing_info(GF_BitStream *bs, AV1State *state) { u32 time_scale = 0; u32 num_units_in_display_tick = gf_bs_read_int_log(bs, 32, "num_units_in_display_tick"); if (num_units_in_display_tick == 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] num_units_in_display_tick must be greater than 0.\n")); } time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); if (time_scale == 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] time_scale must be greater than 0.\n")); } state->equal_picture_interval = gf_bs_read_int_log(bs, 1, "equal_picture_interval"); if (state->equal_picture_interval) { u32 num_ticks_per_picture_minus_1 = av1_uvlc(bs, "num_ticks_per_picture_minus_1"); state->tb_num = time_scale; state->tb_den = (num_ticks_per_picture_minus_1 + 1)*num_units_in_display_tick; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] VFR not supported.\n")); //TODO: upload num_units_in_display_tick (eq. to the POC in H264), compute delta between frames, set it as dts_inc in gf_import_aom_av1() } } static void decoder_model_info(AV1State *state, GF_BitStream *bs) { state->buffer_delay_length = 1 + gf_bs_read_int_log(bs, 5, "buffer_delay_length_minus1"); gf_bs_read_int_log(bs, 32, "num_units_in_decoding_tick"); state->buffer_removal_time_length = gf_bs_read_int_log(bs, 5, "buffer_removal_time_length"); state->frame_presentation_time_length = 1 + gf_bs_read_int_log(bs, 5, "frame_presentation_time_length_minus1"); } static void operating_parameters_info(GF_BitStream *bs, const u8 idx, const u8 buffer_delay_length_minus_1) { const u8 n = buffer_delay_length_minus_1 + 1; gf_bs_read_int_log(bs, n, "decoder_buffer_delay"); gf_bs_read_int_log(bs, n, "encoder_buffer_delay"); gf_bs_read_int_log(bs, 1, "low_delay_mode_flag"); } static void av1_parse_sequence_header_obu(GF_BitStream *bs, AV1State *state) { u8 buffer_delay_length_minus_1 = 0; state->frame_state.seen_seq_header = GF_TRUE; state->config->seq_profile = gf_bs_read_int_log(bs, 3, "seq_profile"); state->still_picture = gf_bs_read_int_log(bs, 1, "still_picture"); state->reduced_still_picture_header = gf_bs_read_int_log(bs, 1, "reduced_still_picture_header"); if (state->reduced_still_picture_header) { //timing_info_present_flag = GF_FALSE; //initial_display_delay_present_flag = GF_FALSE; state->operating_points_count = 1; state->config->seq_level_idx_0 = gf_bs_read_int_log(bs, 5, "seq_level_idx_0"); } else { u8 i = 0; Bool initial_display_delay_present_flag; Bool timing_info_present_flag = gf_bs_read_int_log(bs, 1, "timing_info_present_flag"); if (timing_info_present_flag) { timing_info(bs, state); state->decoder_model_info_present_flag = gf_bs_read_int_log(bs, 1, "decoder_model_info_present_flag"); if (state->decoder_model_info_present_flag) { decoder_model_info(state, bs); } } else { state->decoder_model_info_present_flag = GF_FALSE; } initial_display_delay_present_flag = gf_bs_read_int_log(bs, 1, "initial_display_delay_present_flag"); state->operating_points_count = 1 + gf_bs_read_int_log(bs, 5, "operating_points_count_minus1"); for (i = 0; i < state->operating_points_count; i++) { u8 seq_level_idx_i, seq_tier = 0; state->operating_point_idc[i] = gf_bs_read_int_log_idx(bs, 12, "operating_point_idc", i); seq_level_idx_i = gf_bs_read_int_log_idx(bs, 5, "seq_level_idx", i); if (i == 0) state->config->seq_level_idx_0 = seq_level_idx_i; if (seq_level_idx_i > 7) { seq_tier = gf_bs_read_int_log_idx(bs, 1, "seq_tier", i); } if (i == 0) state->config->seq_tier_0 = seq_tier; if (state->decoder_model_info_present_flag) { state->decoder_model_present_for_this_op[i] = gf_bs_read_int_log_idx(bs, 1, "decoder_model_present_for_this_op", i); if (state->decoder_model_present_for_this_op[i]) { operating_parameters_info(bs, i, buffer_delay_length_minus_1); } } else { state->decoder_model_present_for_this_op[i] = 0; } if (initial_display_delay_present_flag) { if (gf_bs_read_int_log_idx(bs, 1, "initial_display_delay_present_for_this_op", i) ) { gf_bs_read_int_log_idx(bs, 4, "initial_display_delay_minus1", i); } } } } //operatingPoint = av1_choose_operating_point(bs); state->OperatingPointIdc = 0;//TODO: operating_point_idc[operatingPoint]; state->frame_width_bits_minus_1 = gf_bs_read_int_log(bs, 4, "frame_width_bits_minus1"); state->frame_height_bits_minus_1 = gf_bs_read_int_log(bs, 4, "frame_height_bits_minus1"); state->width = gf_bs_read_int_log(bs, state->frame_width_bits_minus_1 + 1, "width_minus1") + 1; state->height = gf_bs_read_int_log(bs, state->frame_height_bits_minus_1 + 1, "height_minus1") + 1; state->sequence_width = state->width; state->sequence_height = state->height; state->frame_id_numbers_present_flag = GF_FALSE; if (!state->reduced_still_picture_header) { state->frame_id_numbers_present_flag = gf_bs_read_int_log(bs, 1, "frame_id_numbers_present_flag"); } if (state->frame_id_numbers_present_flag) { state->delta_frame_id_length_minus_2 = gf_bs_read_int_log(bs, 4, "delta_frame_id_length_minus2"); state->additional_frame_id_length_minus_1 = gf_bs_read_int_log(bs, 3, "additional_frame_id_length_minus1"); } state->use_128x128_superblock = gf_bs_read_int_log(bs, 1, "use_128x128_superblock"); gf_bs_read_int_log(bs, 1, "enable_filter_intra"); gf_bs_read_int_log(bs, 1, "enable_intra_edge_filter"); if (state->reduced_still_picture_header) { /*enable_interintra_compound = 0; enable_masked_compound = 0; enable_dual_filter = 0; enable_jnt_comp = 0; enable_ref_frame_mvs = 0;*/ state->enable_warped_motion = 0; state->enable_order_hint = GF_FALSE; state->OrderHintBits = 0; state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; state->seq_force_screen_content_tools = 2/*SELECT_SCREEN_CONTENT_TOOLS*/; } else { Bool seq_choose_screen_content_tools; gf_bs_read_int_log(bs, 1, "enable_interintra_compound"); gf_bs_read_int_log(bs, 1, "enable_masked_compound"); state->enable_warped_motion = gf_bs_read_int_log(bs, 1, "enable_warped_motion"); gf_bs_read_int_log(bs, 1, "enable_dual_filter"); state->enable_order_hint = gf_bs_read_int_log(bs, 1, "enable_order_hint"); if (state->enable_order_hint) { gf_bs_read_int_log(bs, 1, "enable_jnt_comp"); state->enable_ref_frame_mvs = gf_bs_read_int_log(bs, 1, "enable_ref_frame_mvs"); } else { /*enable_jnt_comp = 0*/; /*enable_ref_frame_mvs = 0*/; } seq_choose_screen_content_tools = gf_bs_read_int_log(bs, 1, "seq_choose_screen_content_tools"); state->seq_force_screen_content_tools = 0; if (seq_choose_screen_content_tools) { state->seq_force_screen_content_tools = 2/*SELECT_SCREEN_CONTENT_TOOLS*/; } else { state->seq_force_screen_content_tools = gf_bs_read_int_log(bs, 1, "seq_force_screen_content_tools"); } state->seq_force_integer_mv = 0; if (state->seq_force_screen_content_tools > 0) { const Bool seq_choose_integer_mv = gf_bs_read_int_log(bs, 1, "seq_choose_integer_mv"); if (seq_choose_integer_mv) { state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; } else { state->seq_force_integer_mv = gf_bs_read_int_log(bs, 1, "seq_force_integer_mv"); } } else { state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; } if (state->enable_order_hint) { u8 order_hint_bits_minus_1 = gf_bs_read_int_log(bs, 3, "order_hint_bits_minus1"); state->OrderHintBits = order_hint_bits_minus_1 + 1; } else { state->OrderHintBits = 0; } } state->enable_superres = gf_bs_read_int_log(bs, 1, "enable_superres"); state->enable_cdef = gf_bs_read_int_log(bs, 1, "enable_cdef"); state->enable_restoration = gf_bs_read_int_log(bs, 1, "enable_restoration"); av1_color_config(bs, state); state->film_grain_params_present = gf_bs_read_int_log(bs, 1, "film_grain_params_present"); } #define IVF_FILE_HEADER_SIZE 32 Bool gf_media_probe_ivf(GF_BitStream *bs) { u32 dw = 0; if (gf_bs_available(bs) < IVF_FILE_HEADER_SIZE) return GF_FALSE; dw = gf_bs_peek_bits(bs, 32, 0); if (dw != GF_4CC('D', 'K', 'I', 'F')) { return GF_FALSE; } return GF_TRUE; } GF_Err gf_media_parse_ivf_file_header(GF_BitStream *bs, u32 *width, u32 *height, u32 *codec_fourcc, u32 *timebase_num, u32 *timebase_den, u32 *num_frames) { u32 dw = 0; if (!width || !height || !codec_fourcc || !timebase_den || !timebase_num || !num_frames) { assert(0); return GF_BAD_PARAM; } if (gf_bs_available(bs) < IVF_FILE_HEADER_SIZE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Not enough bytes available ("LLU").\n", gf_bs_available(bs))); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u32(bs); if (dw != GF_4CC('D', 'K', 'I', 'F')) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[IVF] Invalid signature\n")); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u16_le(bs); if (dw != 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong IVF version. 0 expected, got %u\n", dw)); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u16_le(bs); //length of header in bytes if (dw != IVF_FILE_HEADER_SIZE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong IVF header length. Expected 32 bytes, got %u\n", dw)); return GF_NON_COMPLIANT_BITSTREAM; } *codec_fourcc = gf_bs_read_u32(bs); *width = gf_bs_read_u16_le(bs); *height = gf_bs_read_u16_le(bs); *timebase_num = gf_bs_read_u32_le(bs); *timebase_den = gf_bs_read_u32_le(bs); *num_frames = gf_bs_read_u32_le(bs); gf_bs_read_u32_le(bs); //skip unused return GF_OK; } GF_Err gf_media_parse_ivf_frame_header(GF_BitStream *bs, u64 *frame_size, u64 *pts) { if (!frame_size) return GF_BAD_PARAM; *frame_size = gf_bs_read_u32_le(bs); if (*frame_size > 256 * 1024 * 1024) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong frame size %u\n", *frame_size)); *frame_size = 0; return GF_NON_COMPLIANT_BITSTREAM; } *pts = gf_bs_read_u64_le(bs); return GF_OK; } GF_Err gf_media_vp9_parse_superframe(GF_BitStream *bs, u64 ivf_frame_size, u32 *num_frames_in_superframe, u32 frame_sizes[VP9_MAX_FRAMES_IN_SUPERFRAME], u32 *superframe_index_size) { u32 byte, bytes_per_framesize; u64 pos = gf_bs_get_position(bs), i = 0; GF_Err e; assert(bs && num_frames_in_superframe); /*initialize like there is no superframe*/ memset(frame_sizes, 0, VP9_MAX_FRAMES_IN_SUPERFRAME * sizeof(frame_sizes[0])); *num_frames_in_superframe = 1; frame_sizes[0] = (u32)ivf_frame_size; *superframe_index_size = 0; e = gf_bs_seek(bs, pos + ivf_frame_size - 1); if (e) return e; byte = gf_bs_read_u8(bs); if ((byte & 0xe0) != 0xc0) goto exit; /*no superframe*/ bytes_per_framesize = 1 + ((byte & 0x18) >> 3); *num_frames_in_superframe = (u32)(1 + (byte & 0x7)); /*superframe_index()*/ *superframe_index_size = 2 + bytes_per_framesize * *num_frames_in_superframe; gf_bs_seek(bs, pos + ivf_frame_size - *superframe_index_size); byte = gf_bs_read_u8(bs); if ((byte & 0xe0) != 0xc0) goto exit; /*no superframe*/ frame_sizes[0] = 0; for (i = 0; i < *num_frames_in_superframe; ++i) { gf_bs_read_data(bs, (char*)(frame_sizes + i), bytes_per_framesize); } exit: gf_bs_seek(bs, pos); return e; } static Bool vp9_frame_sync_code(GF_BitStream *bs) { u8 val = gf_bs_read_int_log(bs, 8, "syncbyte1"); if (val != 0x49) return GF_FALSE; val = gf_bs_read_int_log(bs, 8, "syncbyte2"); if (val != 0x83) return GF_FALSE; val = gf_bs_read_int_log(bs, 8, "syncbyte3"); if (val != 0x42) return GF_FALSE; return GF_TRUE; } typedef enum { CS_UNKNOWN = 0, CS_BT_601 = 1, CS_BT_709 = 2, CS_SMPTE_170 = 3, CS_SMPTE_240 = 4, CS_BT_2020 = 5, CS_RESERVED = 6, CS_RGB = 7, } VP9_color_space; static const int VP9_CS_to_23001_8_colour_primaries[] = { -1/*undefined*/, 5, 1, 6, 7, 9, -1/*reserved*/, 1 }; static const int VP9_CS_to_23001_8_transfer_characteristics[] = { -1/*undefined*/, 5, 1, 6, 7, 9, -1/*reserved*/, 13 }; static const int VP9_CS_to_23001_8_matrix_coefficients[] = { -1/*undefined*/, 6, 1, -1, -1, 9, -1/*reserved*/, 0 }; static GF_Err vp9_color_config(GF_BitStream *bs, GF_VPConfig *vp9_cfg) { VP9_color_space color_space; if (vp9_cfg->profile >= 2) { Bool ten_or_twelve_bit = gf_bs_read_int_log(bs, 1, "ten_or_twelve_bit"); vp9_cfg->bit_depth = ten_or_twelve_bit ? 12 : 10; } else { vp9_cfg->bit_depth = 8; } color_space = gf_bs_read_int_log(bs, 3, "color_space"); vp9_cfg->colour_primaries = VP9_CS_to_23001_8_colour_primaries[color_space]; vp9_cfg->transfer_characteristics = VP9_CS_to_23001_8_transfer_characteristics[color_space]; vp9_cfg->matrix_coefficients = VP9_CS_to_23001_8_matrix_coefficients[color_space]; if (color_space != CS_RGB) { vp9_cfg->video_fullRange_flag = gf_bs_read_int_log(bs, 1, "video_fullRange_flag"); if (vp9_cfg->profile == 1 || vp9_cfg->profile == 3) { u8 subsampling_x, subsampling_y, subsampling_xy_to_chroma_subsampling[2][2] = { {3, 0}, {2, 0} }; subsampling_x = gf_bs_read_int_log(bs, 1, "subsampling_x"); subsampling_y = gf_bs_read_int_log(bs, 1, "subsampling_x"); vp9_cfg->chroma_subsampling = subsampling_xy_to_chroma_subsampling[subsampling_x][subsampling_y]; Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] color config reserved zero (1) is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } else { vp9_cfg->chroma_subsampling = 0; } } else { vp9_cfg->video_fullRange_flag = GF_TRUE; if (vp9_cfg->profile == 1 || vp9_cfg->profile == 3) { vp9_cfg->chroma_subsampling = 3; Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] color config reserved zero (2) is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } } return GF_OK; } static void vp9_compute_image_size(int FrameWidth, int FrameHeight, int *Sb64Cols, int *Sb64Rows) { int MiCols = (FrameWidth + 7) >> 3; int MiRows = (FrameHeight + 7) >> 3; *Sb64Cols = (MiCols + 7) >> 3; *Sb64Rows = (MiRows + 7) >> 3; } static void vp9_frame_size(GF_BitStream *bs, int *FrameWidth, int *FrameHeight, int *Sb64Cols, int *Sb64Rows) { int frame_width_minus_1 = gf_bs_read_int_log(bs, 16, "frame_width_minus_1"); int frame_height_minus_1 = gf_bs_read_int_log(bs, 16, "frame_height_minus_1"); if (frame_width_minus_1 + 1 != *FrameWidth || frame_height_minus_1 + 1 != *FrameHeight) { if (*FrameWidth || *FrameHeight) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[VP9] inconsistent frame dimensions: previous was %dx%d, new one is %dx%d.\n", *FrameWidth, *FrameHeight, frame_width_minus_1 + 1, frame_height_minus_1 + 1)); } *FrameWidth = frame_width_minus_1 + 1; *FrameHeight = frame_height_minus_1 + 1; vp9_compute_image_size(*FrameWidth, *FrameHeight, Sb64Cols, Sb64Rows); } static void vp9_render_size(GF_BitStream *bs, int FrameWidth, int FrameHeight, int *renderWidth, int *renderHeight) { Bool render_and_frame_size_different = gf_bs_read_int_log(bs, 1, "render_and_frame_size_different"); if (render_and_frame_size_different == 1) { int render_width_minus_1 = gf_bs_read_int_log(bs, 16, "render_width_minus_1"); int render_height_minus_1 = gf_bs_read_int_log(bs, 16, "render_height_minus_1"); *renderWidth = render_width_minus_1 + 1; *renderHeight = render_height_minus_1 + 1; } else { *renderWidth = FrameWidth; *renderHeight = FrameHeight; } } static s64 vp9_s(GF_BitStream *bs, int n, const char *fname, u32 idx) { s64 value = gf_bs_read_int(bs, n); Bool sign = gf_bs_read_int(bs, 1); if (sign) value = -value; gf_bs_log_idx(bs, n+1, fname, value, idx, -1, -1); return value; } static void vp9_loop_filter_params(GF_BitStream *bs) { /*loop_filter_level = */gf_bs_read_int_log(bs, 6, "loop_filter_level"); /*loop_filter_sharpness = */gf_bs_read_int_log(bs, 3, "loop_filter_sharpness"); Bool loop_filter_delta_enabled = gf_bs_read_int_log(bs, 1, "loop_filter_delta_enabled"); if (loop_filter_delta_enabled == 1) { Bool loop_filter_delta_update = gf_bs_read_int_log(bs, 1, "loop_filter_delta_update"); if (loop_filter_delta_update == GF_TRUE) { int i; for (i = 0; i < 4; i++) { Bool update_ref_delta = gf_bs_read_int_log_idx(bs, 1, "update_ref_delta", i); if (update_ref_delta == GF_TRUE) vp9_s(bs, 6, "loop_filter_ref_deltas", i); } for (i = 0; i < 2; i++) { Bool update_mode_delta = gf_bs_read_int_log_idx(bs, 1, "update_mode_delta", i); if (update_mode_delta == GF_TRUE) vp9_s(bs, 6, "loop_filter_mode_deltas", i); } } } } static void vp9_quantization_params(GF_BitStream *bs) { /*base_q_idx = */gf_bs_read_int_log(bs, 8, "base_q_idx"); } #define VP9_MAX_SEGMENTS 8 #define VP9_SEG_LVL_MAX 4 static const int segmentation_feature_bits[VP9_SEG_LVL_MAX] = { 8, 6, 2, 0 }; static const int segmentation_feature_signed[VP9_SEG_LVL_MAX] = { 1, 1, 0, 0 }; #define VP9_MIN_TILE_WIDTH_B64 4 #define VP9_MAX_TILE_WIDTH_B64 64 static void vp9_segmentation_params(GF_BitStream *bs) { Bool segmentation_enabled = gf_bs_read_int_log(bs, 1, "segmentation_enabled"); if (segmentation_enabled == 1) { int i; Bool segmentation_update_map = gf_bs_read_int_log(bs, 1, "segmentation_update_map"); if (segmentation_update_map) { for (i = 0; i < 7; i++) /*segmentation_tree_probs[i] = read_prob()*/ /*segmentation_temporal_update = */gf_bs_read_int_log(bs, 1, "segmentation_temporal_update"); /*for (i = 0; i < 3; i++) segmentation_pred_prob[i] = segmentation_temporal_update ? read_prob() : 255*/ } Bool segmentation_update_data = gf_bs_read_int_log(bs, 1, "segmentation_update_data"); if (segmentation_update_data == 1) { /*segmentation_abs_or_delta_update =*/ gf_bs_read_int_log(bs, 1, "segmentation_abs_or_delta_update"); for (i = 0; i < VP9_MAX_SEGMENTS; i++) { int j; for (j = 0; j < VP9_SEG_LVL_MAX; j++) { /*feature_value = 0*/ Bool feature_enabled = gf_bs_read_int_log(bs, 1, "feature_enabled"); /*FeatureEnabled[i][j] = feature_enabled*/ if (feature_enabled) { int bits_to_read = segmentation_feature_bits[j]; /*feature_value =*/ gf_bs_read_int_log(bs, bits_to_read, "feature_value"); if (segmentation_feature_signed[j] == 1) { /*Bool feature_sign = */gf_bs_read_int_log(bs, 1, "feature_sign"); /*if (feature_sign == 1) feature_value *= -1*/ } } /*FeatureData[i][j] = feature_value*/ } } } } } static int calc_min_log2_tile_cols(int Sb64Cols) { int minLog2 = 0; while ((VP9_MAX_TILE_WIDTH_B64 << minLog2) < Sb64Cols) minLog2++; return minLog2; } static int calc_max_log2_tile_cols(int Sb64Cols) { int maxLog2 = 1; while ((Sb64Cols >> maxLog2) >= VP9_MIN_TILE_WIDTH_B64) maxLog2++; return maxLog2 - 1; } static void vp9_tile_info(GF_BitStream *bs, int Sb64Cols) { Bool tile_rows_log2; int minLog2TileCols = calc_min_log2_tile_cols(Sb64Cols); int maxLog2TileCols = calc_max_log2_tile_cols(Sb64Cols); int tile_cols_log2 = minLog2TileCols; while (tile_cols_log2 < maxLog2TileCols) { Bool increment_tile_cols_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_cols_log2"); if (increment_tile_cols_log2) tile_cols_log2++; else break; } tile_rows_log2 = gf_bs_read_int_log(bs, 1, "tile_rows_log2"); if (tile_rows_log2) { /*Bool increment_tile_rows_log2 = */gf_bs_read_int_log(bs, 1, "increment_tile_rows_log2"); //tile_rows_log2 += increment_tile_rows_log2; } } static void vp9_frame_size_with_refs(GF_BitStream *bs, u8 refresh_frame_flags, u8 * ref_frame_idx, int * RefFrameWidth, int *RefFrameHeight, int *FrameWidth, int *FrameHeight, int *RenderWidth, int *RenderHeight, int *Sb64Cols, int *Sb64Rows) { Bool found_ref; int i; for (i = 0; i < 3; i++) { found_ref = gf_bs_read_int_log(bs, 1, "found_ref"); if (found_ref) { *FrameWidth = RefFrameWidth [ref_frame_idx[i]]; *FrameHeight = RefFrameHeight[ref_frame_idx[i]]; break; } } if (found_ref == 0) { vp9_frame_size(bs, FrameWidth, FrameHeight, Sb64Cols, Sb64Rows); } else { vp9_compute_image_size(*FrameWidth, *FrameHeight, Sb64Cols, Sb64Rows); } vp9_render_size(bs, *FrameWidth, *FrameHeight, RenderWidth, RenderHeight); } static void vp9_read_interpolation_filter(GF_BitStream *bs) { Bool is_filter_switchable = gf_bs_read_int_log(bs, 1, "is_filter_switchable"); if (!is_filter_switchable) { /*raw_interpolation_filter = */gf_bs_read_int_log(bs, 2, "raw_interpolation_filter"); } } #define VP9_KEY_FRAME 0 GF_Err gf_media_vp9_parse_sample(GF_BitStream *bs, GF_VPConfig *vp9_cfg, Bool *key_frame, u32 *FrameWidth, u32 *FrameHeight, u32 *renderWidth, u32 *renderHeight) { Bool FrameIsIntra = GF_FALSE, profile_low_bit, profile_high_bit, show_existing_frame = GF_FALSE, frame_type = GF_FALSE, show_frame = GF_FALSE, error_resilient_mode = GF_FALSE; /*u8 frame_context_idx = 0, reset_frame_context = 0, frame_marker = 0*/; int Sb64Cols = 0, Sb64Rows = 0, i; u8 refresh_frame_flags = 0; assert(bs && key_frame); /*uncompressed header*/ /*frame_marker = */gf_bs_read_int_log(bs, 2, "frame_marker"); profile_low_bit = gf_bs_read_int_log(bs, 1, "profile_low_bit"); profile_high_bit = gf_bs_read_int_log(bs, 1, "profile_high_bit"); vp9_cfg->profile = (profile_high_bit << 1) + profile_low_bit; if (vp9_cfg->profile == 3) { Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] uncompressed header reserved zero is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } show_existing_frame = gf_bs_read_int_log(bs, 1, "show_existing_frame"); if (show_existing_frame == GF_TRUE) { /*frame_to_show_map_idx = */gf_bs_read_int_log(bs, 3, "frame_to_show_map_idx"); return GF_OK; } frame_type = gf_bs_read_int_log(bs, 1, "frame_type"); show_frame = gf_bs_read_int_log(bs, 1, "show_frame"); error_resilient_mode = gf_bs_read_int_log(bs, 1, "error_resilient_mode"); if (frame_type == VP9_KEY_FRAME) { if (!vp9_frame_sync_code(bs)) return GF_NON_COMPLIANT_BITSTREAM; if (vp9_color_config(bs, vp9_cfg) != GF_OK) return GF_NON_COMPLIANT_BITSTREAM; vp9_frame_size(bs, FrameWidth, FrameHeight, &Sb64Cols, &Sb64Rows); vp9_render_size(bs, *FrameWidth, *FrameHeight, renderWidth, renderHeight); refresh_frame_flags = 0xFF; *key_frame = GF_TRUE; FrameIsIntra = GF_TRUE; } else { Bool intra_only = GF_FALSE; *key_frame = GF_FALSE; if (show_frame == GF_FALSE) { intra_only = gf_bs_read_int_log(bs, 1, "intra_only"); } FrameIsIntra = intra_only; if (error_resilient_mode == GF_FALSE) { /*reset_frame_context = */gf_bs_read_int_log(bs, 2, "reset_frame_context"); } if (intra_only == GF_TRUE) { if (!vp9_frame_sync_code(bs)) return GF_NON_COMPLIANT_BITSTREAM; if (vp9_cfg->profile > 0) { if (vp9_color_config(bs, vp9_cfg) != GF_OK) return GF_NON_COMPLIANT_BITSTREAM; } else { u8 color_space = CS_BT_601; vp9_cfg->colour_primaries = VP9_CS_to_23001_8_colour_primaries[color_space]; vp9_cfg->transfer_characteristics = VP9_CS_to_23001_8_transfer_characteristics[color_space]; vp9_cfg->matrix_coefficients = VP9_CS_to_23001_8_matrix_coefficients[color_space]; vp9_cfg->chroma_subsampling = 0; vp9_cfg->bit_depth = 8; } refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); vp9_frame_size(bs, FrameWidth, FrameHeight, &Sb64Cols, &Sb64Rows); vp9_render_size(bs, *FrameWidth, *FrameHeight, renderWidth, renderHeight); } else { refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); u8 ref_frame_idx[3]; for (i = 0; i < 3; i++) { ref_frame_idx[i] = gf_bs_read_int_log_idx(bs, 3, "ref_frame_idx", i); /*ref_frame_sign_bias[LAST_FRAME + i] = */gf_bs_read_int_log_idx(bs, 1, "ref_frame_sign_bias", i); } vp9_frame_size_with_refs(bs, refresh_frame_flags, ref_frame_idx, vp9_cfg->RefFrameWidth, vp9_cfg->RefFrameHeight, FrameWidth, FrameHeight, renderWidth, renderHeight, &Sb64Cols, &Sb64Rows); /*allow_high_precision_mv = */gf_bs_read_int_log(bs, 1, "allow_high_precision_mv"); vp9_read_interpolation_filter(bs); } } if (error_resilient_mode == 0) { /*refresh_frame_context = */gf_bs_read_int_log(bs, 1, "refresh_frame_context"); /*frame_parallel_decoding_mode = */gf_bs_read_int_log(bs, 1, "frame_parallel_decoding_mode"); } /*frame_context_idx = */gf_bs_read_int_log(bs, 2, "frame_context_idx"); if (FrameIsIntra || error_resilient_mode) { /*setup_past_independence + save_probs ...*/ //frame_context_idx = 0; } vp9_loop_filter_params(bs); vp9_quantization_params(bs); vp9_segmentation_params(bs); vp9_tile_info(bs, Sb64Cols); /*header_size_in_bytes = */gf_bs_read_int_log(bs, 16, "header_size_in_bytes"); /*Reference frame update process (8.10 - partial)*/ for (i = 0; i < VP9_NUM_REF_FRAMES; i++) { if ((refresh_frame_flags >> i) & 1) { vp9_cfg->RefFrameWidth[i] = *FrameWidth; vp9_cfg->RefFrameHeight[i] = *FrameHeight; } } return GF_OK; } GF_Err gf_av1_parse_obu_header(GF_BitStream *bs, ObuType *obu_type, Bool *obu_extension_flag, Bool *obu_has_size_field, u8 *temporal_id, u8 *spatial_id) { Bool forbidden = gf_bs_read_int(bs, 1); if (forbidden) { return GF_NON_COMPLIANT_BITSTREAM; } *obu_type = gf_bs_read_int(bs, 4); *obu_extension_flag = gf_bs_read_int(bs, 1); *obu_has_size_field = gf_bs_read_int(bs, 1); if (gf_bs_read_int(bs, 1) /*obu_reserved_1bit*/) { return GF_NON_COMPLIANT_BITSTREAM; } if (*obu_extension_flag) { *temporal_id = gf_bs_read_int(bs, 3); *spatial_id = gf_bs_read_int(bs, 2); /*extension_header_reserved_3bits = */gf_bs_read_int(bs, 3); } return GF_OK; } #endif // GPAC_DISABLE_AV_PARSERS GF_EXPORT const char *gf_av1_get_obu_name(ObuType obu_type) { switch (obu_type) { case OBU_SEQUENCE_HEADER: return "seq_header"; case OBU_TEMPORAL_DELIMITER: return "delimiter"; case OBU_FRAME_HEADER: return "frame_header"; case OBU_TILE_GROUP: return "tile_group"; case OBU_METADATA: return "metadata"; case OBU_FRAME: return "frame"; case OBU_REDUNDANT_FRAME_HEADER: return "redundant_frame_header"; case OBU_TILE_LIST: return "tile_list"; case OBU_PADDING: return "padding"; case OBU_RESERVED_0: case OBU_RESERVED_9: case OBU_RESERVED_10: case OBU_RESERVED_11: case OBU_RESERVED_12: case OBU_RESERVED_13: case OBU_RESERVED_14: return "reserved"; default: return "unknown"; } } Bool av1_is_obu_header(ObuType obu_type) { switch (obu_type) { case OBU_SEQUENCE_HEADER: case OBU_METADATA: // TODO add check based on the metadata type return GF_TRUE; default: return GF_FALSE; } } #ifndef GPAC_DISABLE_AV_PARSERS static Bool av1_is_obu_frame(AV1State *state, ObuType obu_type) { switch (obu_type) { case OBU_PADDING: case OBU_REDUNDANT_FRAME_HEADER: return GF_FALSE; case OBU_TEMPORAL_DELIMITER: return state->keep_temporal_delim ? GF_TRUE : GF_FALSE; default: return GF_TRUE; } } u64 gf_av1_leb128_read(GF_BitStream *bs, u8 *opt_Leb128Bytes) { u64 value = 0; u8 Leb128Bytes = 0, i = 0; for (i = 0; i < 8; i++) { u8 leb128_byte = gf_bs_read_u8(bs); value |= ( ((u64) (leb128_byte & 0x7f)) << (i * 7)); Leb128Bytes += 1; if (!(leb128_byte & 0x80)) { break; } } if (opt_Leb128Bytes) { *opt_Leb128Bytes = Leb128Bytes; } return value; } u32 gf_av1_leb128_size(u64 value) { u32 gf_av1_leb128_size = 0; do { ++gf_av1_leb128_size; } while ((value >>= 7) != 0); return gf_av1_leb128_size; } u64 gf_av1_leb128_write(GF_BitStream *bs, u64 value) { u32 i, leb_size = gf_av1_leb128_size(value); for (i = 0; i < leb_size; ++i) { u8 byte = value & 0x7f; value >>= 7; if (value != 0) byte |= 0x80; //more bytes follow gf_bs_write_u8(bs, byte); } return leb_size; } #define OBU_BLOCK_SIZE 4096 static void av1_add_obu_internal(GF_BitStream *bs, u64 pos, u64 obu_length, ObuType obu_type, GF_List **obu_list, AV1State *state) { char block[OBU_BLOCK_SIZE]; Bool has_size_field = 0, obu_extension_flag = 0; u8 temporal_id, spatial_id; GF_AV1_OBUArrayEntry *a = NULL; if (state && state->mem_mode) { if (!state->bs) state->bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(state->bs, state->frame_obus, state->frame_obus_alloc); } else { GF_SAFEALLOC(a, GF_AV1_OBUArrayEntry); if (!a) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] Failed to allocate OBU\n")); return; } } gf_bs_seek(bs, pos); gf_av1_parse_obu_header(bs, &obu_type, &obu_extension_flag, &has_size_field, &temporal_id, &spatial_id); gf_bs_seek(bs, pos); if (has_size_field) { if (a) { a->obu = gf_malloc((size_t)obu_length); gf_bs_read_data(bs, a->obu, (u32)obu_length); a->obu_length = obu_length; } else { u32 remain = (u32)obu_length; while (remain) { u32 block_size = OBU_BLOCK_SIZE; if (block_size > remain) block_size = remain; gf_bs_read_data(bs, block, block_size); gf_bs_write_data(state->bs, block, block_size); remain -= block_size; } return; } } else { u8 i, hdr_size = obu_extension_flag ? 2 : 1; const u32 leb_size = (u32)gf_av1_leb128_size(obu_length); const u64 obu_size = obu_length - hdr_size; if (a) { a->obu = gf_malloc((size_t)obu_length + leb_size); a->obu_length = obu_length + leb_size; for (i = 0; i < hdr_size; ++i) { a->obu[i] = gf_bs_read_u8(bs); /*add size field flag*/ if (i == 0) a->obu[0] |= 0x02; } { u32 out_size = 0; u8 *output = NULL; GF_BitStream *bsLeb128 = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*write size field*/ gf_av1_leb128_write(bsLeb128, obu_size); assert(gf_bs_get_position(bsLeb128) == leb_size); gf_bs_get_content(bsLeb128, &output, &out_size); gf_bs_del(bsLeb128); memcpy(a->obu + hdr_size, output, out_size); gf_free(output); } gf_bs_read_data(bs, a->obu + hdr_size + leb_size, (u32)(obu_size)); assert(gf_bs_get_position(bs) == pos + obu_length); } else { u32 remain; for (i = 0; i < hdr_size; ++i) { u8 hdr_b = gf_bs_read_u8(bs); if (i == 0) hdr_b |= 0x02; /*add size field flag*/ gf_bs_write_u8(state->bs, hdr_b); } /*add size field */ gf_av1_leb128_write(state->bs, obu_size); remain = (u32)obu_length - hdr_size; while (remain) { u32 block_size = OBU_BLOCK_SIZE; if (block_size > remain) block_size = remain; gf_bs_read_data(bs, block, block_size); gf_bs_write_data(state->bs, block, block_size); remain -= block_size; } assert(gf_bs_get_position(bs) == pos + obu_length); return; } } if (!obu_list) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] internal error, no OBU list cannot add\n")); gf_free(a->obu); gf_free(a); return; } a->obu_type = obu_type; if (! *obu_list) *obu_list = gf_list_new(); gf_list_add(*obu_list, a); } static void av1_populate_state_from_obu(GF_BitStream *bs, u64 pos, u64 obu_length, ObuType obu_type, AV1State *state) { if (av1_is_obu_header(obu_type)) { av1_add_obu_internal(bs, pos, obu_length, obu_type, &state->frame_state.header_obus, NULL); } if (!state->skip_frames && av1_is_obu_frame(state, obu_type)) { if (!state->mem_mode) { av1_add_obu_internal(bs, pos, obu_length, obu_type, &state->frame_state.frame_obus, NULL); } else { av1_add_obu_internal(bs, pos, obu_length, obu_type, NULL, state); } } } GF_Err aom_av1_parse_temporal_unit_from_section5(GF_BitStream *bs, AV1State *state) { if (!state) return GF_BAD_PARAM; state->obu_type = -1; while (state->obu_type != OBU_TEMPORAL_DELIMITER) { GF_Err e; if (!gf_bs_available(bs)) return state->unframed ? GF_BUFFER_TOO_SMALL : GF_OK; u64 pos = gf_bs_get_position(bs), obu_length = 0; e = gf_av1_parse_obu(bs, &state->obu_type, &obu_length, NULL, state); if (e) return e; if (obu_length != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] OBU (Section 5) frame size "LLU" different from consumed bytes "LLU".\n", obu_length, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Section5 OBU detected (size "LLU")\n", obu_length)); av1_populate_state_from_obu(bs, pos, obu_length, state->obu_type, state); } return GF_OK; } Bool gf_media_aom_probe_annexb(GF_BitStream *bs) { Bool res = GF_TRUE; u64 init_pos = gf_bs_get_position(bs); u64 sz = gf_av1_leb128_read(bs, NULL); if (!sz) res = GF_FALSE; while (sz > 0) { u8 Leb128Bytes = 0; u64 frame_unit_size = gf_av1_leb128_read(bs, &Leb128Bytes); if (!frame_unit_size) { res = GF_FALSE; break; } if (sz < Leb128Bytes + frame_unit_size) { res = GF_FALSE; break; } sz -= Leb128Bytes + frame_unit_size; while (frame_unit_size > 0) { ObuType obu_type; u64 pos, obu_length = gf_av1_leb128_read(bs, &Leb128Bytes); if (frame_unit_size < Leb128Bytes + obu_length) { res = GF_FALSE; break; } pos = gf_bs_get_position(bs); frame_unit_size -= Leb128Bytes; u8 tid, sid; Bool extflag, has_size; GF_Err e = gf_av1_parse_obu_header(bs, &obu_type, &extflag, &has_size, &tid, &sid); if (e) { res = GF_FALSE; break; } if (has_size) { obu_length = (u32)gf_av1_leb128_read(bs, NULL); } else { if (obu_length >= 1 + extflag) { obu_length = obu_length - 1 - extflag; } else { res = GF_FALSE; break; } } u32 hdr_size = (u32)(gf_bs_get_position(bs) - pos); obu_length += hdr_size; if (frame_unit_size < obu_length) { res = GF_FALSE; break; } frame_unit_size -= obu_length; gf_bs_skip_bytes(bs, obu_length - hdr_size); } if (!res) break; } gf_bs_seek(bs, init_pos); return res; } GF_Err aom_av1_parse_temporal_unit_from_annexb(GF_BitStream *bs, AV1State *state) { GF_Err e; u64 tupos; u64 tusize, sz; if (!bs || !state) return GF_BAD_PARAM; state->bs_overread = GF_FALSE; tusize = sz = gf_av1_leb128_read(bs, NULL); tupos = gf_bs_get_position(bs); if (!sz) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[AV1] temporal unit size is 0, likely not annex B\n")); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B temporal unit detected (size "LLU") ***** \n", sz)); while (sz > 0) { u8 Leb128Bytes = 0; u64 frame_unit_size = gf_av1_leb128_read(bs, &Leb128Bytes); if (state->bs_overread) { return GF_BUFFER_TOO_SMALL; } if (sz < Leb128Bytes + frame_unit_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B sz("LLU") < Leb128Bytes("LLU") + frame_unit_size("LLU")\n", sz, Leb128Bytes, frame_unit_size)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B frame unit detected (size "LLU")\n", frame_unit_size)); sz -= Leb128Bytes + frame_unit_size; while (frame_unit_size > 0) { u64 pos, obu_length = gf_av1_leb128_read(bs, &Leb128Bytes); if (state->bs_overread) { return GF_BUFFER_TOO_SMALL; } if (frame_unit_size < Leb128Bytes + obu_length) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B frame_unit_size("LLU") < Leb128Bytes("LLU") + obu_length("LLU")\n", frame_unit_size, Leb128Bytes, obu_length)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B OBU detected (size "LLU")\n", obu_length)); pos = gf_bs_get_position(bs); frame_unit_size -= Leb128Bytes; e = gf_av1_parse_obu(bs, &state->obu_type, &obu_length, NULL, state); if (e) return e; if (obu_length != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] Annex B frame size "LLU" different from consumed bytes "LLU".\n", obu_length, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } av1_populate_state_from_obu(bs, pos, obu_length, state->obu_type, state); if (frame_unit_size < obu_length) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B frame_unit_size("LLU") < OBU size ("LLU")\n", frame_unit_size, obu_length)); return GF_NON_COMPLIANT_BITSTREAM; } frame_unit_size -= obu_length; } } assert(sz == 0); if (tusize != gf_bs_get_position(bs) - tupos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] Annex B TU size "LLU" different from consumed bytes "LLU".\n", tusize, gf_bs_get_position(bs) - tupos)); return GF_NON_COMPLIANT_BITSTREAM; } return GF_OK; } GF_Err aom_av1_parse_temporal_unit_from_ivf(GF_BitStream *bs, AV1State *state) { u64 frame_size, pts_ignored; GF_Err e; if (gf_bs_available(bs)<12) return GF_EOS; e = gf_media_parse_ivf_frame_header(bs, &frame_size, &pts_ignored); if (e) return e; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] IVF frame detected (size "LLU")\n", frame_size)); if (gf_bs_available(bs) < frame_size) return GF_EOS; while (frame_size > 0) { u64 obu_size = 0, pos = gf_bs_get_position(bs); e = gf_av1_parse_obu(bs, &state->obu_type, &obu_size, NULL, state); if (e != GF_OK) return e; if (obu_size != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] IVF frame size "LLU" different from consumed bytes "LLU".\n", obu_size, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } av1_populate_state_from_obu(bs, pos, obu_size, state->obu_type, state); frame_size -= obu_size; } return GF_OK; } #define AV1_NUM_REF_FRAMES 8 #define AV1_ALL_FRAMES ((1 << AV1_NUM_REF_FRAMES) - 1) #define AV1_SUPERRES_DENOM_MIN 9 #define AV1_SUPERRES_DENOM_BITS 3 #define AV1_SUPERRES_NUM 8 #define AV1_REFS_PER_FRAME 7 #define AV1_PRIMARY_REF_NONE 7 #define MAX_TILE_WIDTH 4096 #define MAX_TILE_AREA (4096 * 2304) static u32 aom_av1_tile_log2(u32 blkSize, u32 target) { u32 k; for (k = 0; (blkSize << k) < target; k++) { } return k; } static u64 aom_av1_le(GF_BitStream *bs, u32 n, const char *name) { u32 i = 0; u64 t = 0; for (i = 0; i < n; i++) { u8 byte = gf_bs_read_int(bs, 8); t += (byte << (i * 8)); } gf_bs_log(bs, n*8, name, t); return t; } static void av1_parse_tile_info(GF_BitStream *bs, AV1State *state) { u32 i; u32 MiCols = 2 * ((state->width + 7) >> 3); u32 MiRows = 2 * ((state->height + 7) >> 3); u32 sbCols = state->use_128x128_superblock ? ((MiCols + 31) >> 5) : ((MiCols + 15) >> 4); u32 sbRows = state->use_128x128_superblock ? ((MiRows + 31) >> 5) : ((MiRows + 15) >> 4); u32 sbShift = state->use_128x128_superblock ? 5 : 4; u32 sbSize = sbShift + 2; u32 maxTileWidthSb = MAX_TILE_WIDTH >> sbSize; u32 maxTileAreaSb = MAX_TILE_AREA >> (2 * sbSize); u32 minLog2tileCols = aom_av1_tile_log2(maxTileWidthSb, sbCols); u32 maxLog2tileCols = aom_av1_tile_log2(1, MIN(sbCols, AV1_MAX_TILE_COLS)); u32 maxLog2tileRows = aom_av1_tile_log2(1, MIN(sbRows, AV1_MAX_TILE_ROWS)); u32 minLog2Tiles = MAX(minLog2tileCols, aom_av1_tile_log2(maxTileAreaSb, sbRows * sbCols)); Bool uniform_tile_spacing_flag = gf_bs_read_int_log(bs, 1, "uniform_tile_spacing_flag"); if (uniform_tile_spacing_flag) { u32 startSb, tileWidthSb, tileHeightSb, minLog2tileRows; state->tileColsLog2 = minLog2tileCols; while (state->tileColsLog2 < maxLog2tileCols) { Bool increment_tile_cols_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_cols_log2"); if (increment_tile_cols_log2 == 1) state->tileColsLog2++; else break; } tileWidthSb = (sbCols + (1 << state->tileColsLog2) - 1) >> state->tileColsLog2; i = 0; for (startSb = 0; startSb < sbCols; startSb += tileWidthSb) { i += 1; } state->tileCols = i; minLog2tileRows = MAX((int)(minLog2Tiles - state->tileColsLog2), 0); state->tileRowsLog2 = minLog2tileRows; while (state->tileRowsLog2 < maxLog2tileRows) { Bool increment_tile_rows_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_rows_log2"); if (increment_tile_rows_log2 == 1) state->tileRowsLog2++; else break; } tileHeightSb = (sbRows + (1 << state->tileRowsLog2) - 1) >> state->tileRowsLog2; i = 0; for (startSb = 0; startSb < sbRows; startSb += tileHeightSb) { i += 1; } state->tileRows = i; } else { u32 startSb, maxTileHeightSb, widestTileSb; widestTileSb = 0; startSb = 0; for (i = 0; startSb < sbCols; i++) { u32 maxWidth = MIN((int)(sbCols - startSb), maxTileWidthSb); u32 width_in_sbs_minus_1 = av1_read_ns(bs, maxWidth, "width_in_sbs_minus_1"); u32 sizeSb = width_in_sbs_minus_1 + 1; widestTileSb = MAX(sizeSb, widestTileSb); startSb += sizeSb; } if (!widestTileSb) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] widest tile is 0, broken bitstream\n")); return; } state->tileCols = i; state->tileColsLog2 = aom_av1_tile_log2(1, state->tileCols); if (minLog2Tiles > 0) maxTileAreaSb = (sbRows * sbCols) >> (minLog2Tiles + 1); else maxTileAreaSb = sbRows * sbCols; maxTileHeightSb = MAX(maxTileAreaSb / widestTileSb, 1); startSb = 0; for (i = 0; startSb < sbRows; i++) { u32 maxHeight = MIN((int)(sbRows - startSb), maxTileHeightSb); u32 height_in_sbs_minus_1 = av1_read_ns(bs, maxHeight, "height_in_sbs_minus_1"); u32 sizeSb = height_in_sbs_minus_1 + 1; startSb += sizeSb; } state->tileRows = i; state->tileRowsLog2 = aom_av1_tile_log2(1, state->tileRows); } if (state->tileColsLog2 > 0 || state->tileRowsLog2 > 0) { gf_bs_read_int_log(bs, state->tileRowsLog2 + state->tileColsLog2, "context_update_tile_id"); state->tile_size_bytes = gf_bs_read_int_log(bs, 2, "tile_size_bytes_minus1") + 1; } } static void superres_params(GF_BitStream *bs, AV1State *state) { u32 SuperresDenom; Bool use_superres; if (state->enable_superres) { use_superres = gf_bs_read_int_log(bs, 1, "use_superres"); } else { use_superres = GF_FALSE; } if (use_superres) { u8 coded_denom = gf_bs_read_int_log(bs, AV1_SUPERRES_DENOM_BITS, "coded_denom"); SuperresDenom = coded_denom + AV1_SUPERRES_DENOM_MIN; } else { SuperresDenom = AV1_SUPERRES_NUM; } state->UpscaledWidth = state->width; state->width = (state->UpscaledWidth * AV1_SUPERRES_NUM + (SuperresDenom / 2)) / SuperresDenom; } static void av1_frame_size(GF_BitStream *bs, AV1State *state, Bool frame_size_override_flag) { if (frame_size_override_flag) { u32 frame_width_minus_1, frame_height_minus_1; u8 n = state->frame_width_bits_minus_1 + 1; frame_width_minus_1 = gf_bs_read_int_log(bs, n, "frame_width_minus_1"); n = state->frame_height_bits_minus_1 + 1; frame_height_minus_1 = gf_bs_read_int_log(bs, n, "frame_height_minus_1"); state->width = frame_width_minus_1 + 1; state->height = frame_height_minus_1 + 1; } else { state->width = state->sequence_width; state->height = state->sequence_height; } superres_params(bs, state); //compute_image_size(); //no bits } static void av1_render_size(GF_BitStream *bs) { Bool render_and_frame_size_different = gf_bs_read_int_log(bs, 1, "render_and_frame_size_different_flag"); if (render_and_frame_size_different == GF_TRUE) { gf_bs_read_int_log(bs, 16, "render_width_minus_1"); gf_bs_read_int_log(bs, 16, "render_height_minus_1"); //RenderWidth = render_width_minus_1 + 1; //RenderHeight = render_height_minus_1 + 1; } else { //RenderWidth = UpscaledWidth; //RenderHeight = FrameHeight; } } static void read_interpolation_filter(GF_BitStream *bs) { Bool is_filter_switchable = gf_bs_read_int_log(bs, 1, "is_filter_switchable"); if (!is_filter_switchable) { /*interpolation_filter =*/ gf_bs_read_int_log(bs, 2, "interpolation_filter"); } } static void frame_size_with_refs(GF_BitStream *bs, AV1State *state, Bool frame_size_override_flag) { Bool found_ref = GF_FALSE; u32 i = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { found_ref = gf_bs_read_int_log_idx(bs, 1, "found_ref", i); if (found_ref == 1) { #if 0 UpscaledWidth = RefUpscaledWidth[ref_frame_idx[i]]; FrameWidth = UpscaledWidth; FrameHeight = RefFrameHeight[ref_frame_idx[i]]; RenderWidth = RefRenderWidth[ref_frame_idx[i]]; RenderHeight = RefRenderHeight[ref_frame_idx[i]]; #endif break; } } if (found_ref == 0) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); } else { superres_params(bs, state); //compute_image_size(); } } static s32 av1_delta_q(GF_BitStream *bs, const char *name_flag, const char *name) { Bool delta_coded = gf_bs_read_int_log(bs, 1, name_flag); s32 delta_q = 0; if (delta_coded) { u32 signMask = 1 << (7 - 1); delta_q = gf_bs_read_int_log(bs, 7, name); if (delta_q & signMask) delta_q = delta_q - 2 * signMask; } return delta_q; } static u8 Segmentation_Feature_Bits[] = { 8,6,6,6,6,3,0,0 }; static u8 Segmentation_Feature_Signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 }; static u8 av1_get_qindex(Bool ignoreDeltaQ, u32 segmentId, u32 base_q_idx, u32 delta_q_present, u32 CurrentQIndex, Bool segmentation_enabled, u8 *features_SEG_LVL_ALT_Q_enabled, s32 *features_SEG_LVL_ALT_Q) { //If seg_feature_active_idx( segmentId, SEG_LVL_ALT_Q ) is equal to 1 the following ordered steps apply: if (segmentation_enabled && features_SEG_LVL_ALT_Q_enabled[segmentId]) { //Set the variable data equal to FeatureData[ segmentId ][ SEG_LVL_ALT_Q ]. s32 data = features_SEG_LVL_ALT_Q[segmentId]; s32 qindex = base_q_idx + data; //If ignoreDeltaQ is equal to 0 and delta_q_present is equal to 1, set qindex equal to CurrentQIndex + data. if ((ignoreDeltaQ == 0) && (delta_q_present == 1)) qindex = CurrentQIndex + data; //Return Clip3( 0, 255, qindex ). if (qindex < 0) return 0; else if (qindex > 255) return 255; else return (u8)qindex; } //Otherwise, if ignoreDeltaQ is equal to 0 and delta_q_present is equal to 1, return CurrentQIndex. if ((ignoreDeltaQ == 0) && (delta_q_present == 1)) return CurrentQIndex; //otherwise return base_q_idx; } enum { AV1_RESTORE_NONE = 0, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ }; #define AV1_GMC_IDENTITY 0 #define AV1_GMC_TRANSLATION 1 #define AV1_GMC_ROTZOOM 2 #define AV1_GMC_AFFINE 3 #define AV1_LAST_FRAME 1 #define AV1_LAST2_FRAME 2 #define AV1_LAST3_FRAME 3 #define AV1_GOLDEN_FRAME 4 #define AV1_BWDREF_FRAME 5 #define AV1_ALTREF2_FRAME 6 #define AV1_ALTREF_FRAME 7 #define GM_ABS_ALPHA_BITS 12 #define GM_ALPHA_PREC_BITS 15 #define GM_ABS_TRANS_ONLY_BITS 9 #define GM_TRANS_ONLY_PREC_BITS 3 #define GM_ABS_TRANS_BITS 12 #define GM_TRANS_PREC_BITS 6 #define WARPEDMODEL_PREC_BITS 16 static u32 av1_decode_subexp(GF_BitStream *bs, s32 numSyms) { s32 i = 0; s32 mk = 0; s32 k = 3; while (1) { s32 b2 = i ? k + i - 1 : k; s32 a = 1 << b2; if (numSyms <= mk + 3 * a) { s32 subexp_final_bits = av1_read_ns(bs, numSyms - mk, NULL); return subexp_final_bits + mk; } else { s32 subexp_more_bits = gf_bs_read_int(bs, 1); if (subexp_more_bits) { i++; mk += a; } else { s32 subexp_bits = gf_bs_read_int(bs, b2); return subexp_bits + mk; } } } } static GFINLINE s32 inverse_recenter(s32 r, u32 v) { if ((s64)v > (s64)(2 * r)) return v; else if (v & 1) return r - ((v + 1) >> 1); else return r + (v >> 1); } static s32 av1_decode_unsigned_subexp_with_ref(GF_BitStream *bs, s32 mx, s32 r) { u32 v = av1_decode_subexp(bs, mx); if ((r < 0) && (-(-r << 1) <= mx)) { return inverse_recenter(r, v); } else if ((r << 1) <= mx) { return inverse_recenter(r, v); } else { return mx - 1 - inverse_recenter(mx - 1 - r, v); } } static s16 av1_decode_signed_subexp_with_ref(GF_BitStream *bs, s32 low, s32 high, s32 r) { s16 x = av1_decode_unsigned_subexp_with_ref(bs, high - low, r - low); return x + low; } static void av1_read_global_param(AV1State *state, GF_BitStream *bs, u8 type, u8 ref, u8 idx) { u8 absBits = GM_ABS_ALPHA_BITS; u8 precBits = GM_ALPHA_PREC_BITS; if (idx < 2) { if (type == AV1_GMC_TRANSLATION) { absBits = GM_ABS_TRANS_ONLY_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0); precBits = GM_TRANS_ONLY_PREC_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0); } else { absBits = GM_ABS_TRANS_BITS; precBits = GM_TRANS_PREC_BITS; } } s32 precDiff = WARPEDMODEL_PREC_BITS - precBits; s32 round = (idx % 3) == 2 ? (1 << WARPEDMODEL_PREC_BITS) : 0; s32 sub = (idx % 3) == 2 ? (1 << precBits) : 0; s32 mx = (1 << absBits); s32 r = (state->PrevGmParams.coefs[ref][idx] >> precDiff) - sub; s32 val = av1_decode_signed_subexp_with_ref(bs, -mx, mx + 1, r); if (val < 0) { val = -val; state->GmParams.coefs[ref][idx] = (-(val << precDiff) + round); } else { state->GmParams.coefs[ref][idx] = (val << precDiff) + round; } } static s32 av1_get_relative_dist(s32 a, s32 b, AV1State *state) { if (!state->enable_order_hint) return 0; s32 diff = a - b; s32 m = 1 << (state->OrderHintBits - 1); diff = (diff & (m - 1)) - (diff & m); return diff; } static void av1_setup_past_independence(AV1State *state) { u32 ref, i; for (ref = AV1_LAST_FRAME; ref <= AV1_ALTREF_FRAME; ref++) { for (i = 0; i <= 5; i++) { state->PrevGmParams.coefs[ref][i] = ((i % 3 == 2) ? 1 << WARPEDMODEL_PREC_BITS : 0); } } } static void av1_load_previous(AV1State *state, u8 primary_ref_frame, s8 *ref_frame_idx) { s8 prevFrame = ref_frame_idx[primary_ref_frame]; if (prevFrame < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] load_previous: prevFrame reference index %d is invalid\n", prevFrame)); } else { state->PrevGmParams = state->SavedGmParams[prevFrame]; // load_loop_filter_params( prevFrame ) // load_segmentation_params( prevFrame ) } } static void av1_decode_frame_wrapup(AV1State *state) { u32 i; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { if ((state->frame_state.refresh_frame_flags >> i) & 1) { state->RefOrderHint[i] = state->frame_state.order_hint; state->SavedGmParams[i] = state->GmParams; state->RefFrameType[i] = state->frame_state.frame_type; } } state->frame_state.seen_frame_header = GF_FALSE; //Otherwise (show_existing_frame is equal to 1), if frame_type is equal to KEY_FRAME, the reference frame loading process as specified in section 7.21 is invoked if ((state->frame_state.show_existing_frame) && (state->frame_state.frame_type == AV1_KEY_FRAME)) { state->frame_state.order_hint = state->RefOrderHint[state->frame_state.frame_to_show_map_idx]; //OrderHints[ j + LAST_FRAME ] is set equal to SavedOrderHints[state->frame_to_show_map_idx ][ j + LAST_FRAME ] for j = 0..REFS_PER_FRAME-1. //gm_params[ ref ][ j ] is set equal to SavedGmParams[ frame_to_show_map_idx ][ ref ][ j ] for ref = LAST_FRAME..ALTREF_FRAME, for j = 0..5. state->GmParams = state->SavedGmParams[state->frame_state.frame_to_show_map_idx]; } } static s32 find_latest_forward(u32 curFrameHint, u8 *shiftedOrderHints, u8 *usedFrame) { u32 i; s32 ref = -1; s32 latestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint < curFrameHint) && (ref < 0 || hint >= latestOrderHint)) { ref = i; latestOrderHint = hint; } } return ref; } //see 7.8 of AV1 spec static void av1_set_frame_refs(AV1State *state, u8 last_frame_idx, u8 gold_frame_idx, s8 *ref_frame_idx) { u32 i; u8 usedFrame[AV1_NUM_REF_FRAMES]; u8 shiftedOrderHints[AV1_NUM_REF_FRAMES]; for (i = 0; i < AV1_REFS_PER_FRAME; i++) ref_frame_idx[i] = -1; ref_frame_idx[AV1_LAST_FRAME - AV1_LAST_FRAME] = last_frame_idx; ref_frame_idx[AV1_GOLDEN_FRAME - AV1_LAST_FRAME] = gold_frame_idx; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { usedFrame[i] = 0; } usedFrame[last_frame_idx] = 1; usedFrame[gold_frame_idx] = 1; u32 curFrameHint = 1 << (state->OrderHintBits - 1); for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { shiftedOrderHints[i] = curFrameHint + av1_get_relative_dist(state->RefOrderHint[i], state->frame_state.order_hint, state); } u8 lastOrderHint = shiftedOrderHints[last_frame_idx]; u8 goldOrderHint = shiftedOrderHints[gold_frame_idx]; //It is a requirement of bitstream conformance that lastOrderHint is strictly less than curFrameHint. if (lastOrderHint >= curFrameHint) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] non conformant bitstream detected while setting up frame refs: lastOrderHint(%d) shall be stricly less than curFrameHint(%d)\n", lastOrderHint, curFrameHint)); } //It is a requirement of bitstream conformance that goldOrderHint is strictly less than curFrameHint. if (goldOrderHint >= curFrameHint) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] non conformant bitstream detected while setting up frame refs: goldOrderHint(%d) shall be stricly less than curFrameHint(%d)\n", lastOrderHint, curFrameHint)); } //find_latest_backward() { s32 ref = -1; s32 latestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint >= latestOrderHint)) { ref = i; latestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_ALTREF_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //find_earliest_backward() for BWDREF_FRAME ref = -1; s32 earliestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint < earliestOrderHint)) { ref = i; earliestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_BWDREF_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //find_earliest_backward() for ALTREF2_FRAME ref = -1; earliestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint < earliestOrderHint)) { ref = i; earliestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_ALTREF2_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //The remaining references are set to be forward references in anti-chronological order as follows: const u8 Ref_Frame_List[AV1_REFS_PER_FRAME - 2] = { AV1_LAST2_FRAME, AV1_LAST3_FRAME, AV1_BWDREF_FRAME, AV1_ALTREF2_FRAME, AV1_ALTREF_FRAME }; for (i = 0; i < AV1_REFS_PER_FRAME - 2; i++) { u8 refFrame = Ref_Frame_List[i]; if (ref_frame_idx[refFrame - AV1_LAST_FRAME] < 0) { s32 last_ref = find_latest_forward(curFrameHint, shiftedOrderHints, usedFrame); if (last_ref >= 0) { ref_frame_idx[refFrame - AV1_LAST_FRAME] = last_ref; usedFrame[last_ref] = 1; } } } //Finally, any remaining references are set to the reference frame with smallest output order as follows: ref = -1; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (ref < 0 || hint < earliestOrderHint) { ref = i; earliestOrderHint = hint; } } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { if (ref_frame_idx[i] < 0) { ref_frame_idx[i] = ref; } } } static void av1_parse_uncompressed_header(GF_BitStream *bs, AV1State *state) { Bool error_resilient_mode = GF_FALSE, allow_screen_content_tools = GF_FALSE, force_integer_mv = GF_FALSE; Bool /*use_ref_frame_mvs = GF_FALSE,*/ FrameIsIntra = GF_FALSE, frame_size_override_flag = GF_FALSE; Bool disable_cdf_update = GF_FALSE; u8 showable_frame; u8 primary_ref_frame; u16 idLen = 0; u32 idx; s8 ref_frame_idx[AV1_REFS_PER_FRAME]; AV1StateFrame *frame_state = &state->frame_state; if (state->frame_id_numbers_present_flag) { idLen = (state->additional_frame_id_length_minus_1 + state->delta_frame_id_length_minus_2 + 3); } frame_state->refresh_frame_flags = 0; showable_frame = 0; if (state->reduced_still_picture_header) { frame_state->key_frame = GF_TRUE; FrameIsIntra = GF_TRUE; frame_state->frame_type = AV1_KEY_FRAME; frame_state->show_frame = GF_TRUE; frame_state->show_existing_frame = 0; } else { frame_state->show_existing_frame = gf_bs_read_int_log(bs, 1, "show_existing_frame"); if (frame_state->show_existing_frame == GF_TRUE) { frame_state->frame_to_show_map_idx = gf_bs_read_int_log(bs, 3, "frame_to_show_map_idx"); frame_state->frame_type = state->RefFrameType[frame_state->frame_to_show_map_idx]; if (state->decoder_model_info_present_flag && !state->equal_picture_interval) { gf_bs_read_int_log(bs, state->frame_presentation_time_length, "frame_presentation_time"); } frame_state->refresh_frame_flags = 0; if (state->frame_id_numbers_present_flag) { gf_bs_read_int_log(bs, idLen, "display_frame_id"); } if (frame_state->frame_type == AV1_KEY_FRAME) { frame_state->refresh_frame_flags = AV1_ALL_FRAMES; } /* if (film_grain_params_present) { load_grain_params(frame_to_show_map_idx) }*/ return; } frame_state->frame_type = gf_bs_read_int_log(bs, 2, "frame_type"); FrameIsIntra = (frame_state->frame_type == AV1_INTRA_ONLY_FRAME || frame_state->frame_type == AV1_KEY_FRAME); frame_state->show_frame = gf_bs_read_int_log(bs, 1, "show_frame"); if (frame_state->is_first_frame) { frame_state->key_frame = frame_state->seen_seq_header && frame_state->show_frame && frame_state->frame_type == AV1_KEY_FRAME && frame_state->seen_frame_header; } if (frame_state->show_frame && state->decoder_model_info_present_flag && !state->equal_picture_interval) { gf_bs_read_int_log(bs, state->frame_presentation_time_length, "frame_presentation_time"); } if (frame_state->show_frame) { showable_frame = frame_state->frame_type != AV1_KEY_FRAME; } else { showable_frame = gf_bs_read_int_log(bs, 1, "showable_frame"); } if (frame_state->frame_type == AV1_SWITCH_FRAME || (frame_state->frame_type == AV1_KEY_FRAME && frame_state->show_frame)) error_resilient_mode = GF_TRUE; else error_resilient_mode = gf_bs_read_int_log(bs, 1, "error_resilient_mode"); } if ((frame_state->frame_type == AV1_KEY_FRAME) && frame_state->show_frame) { u32 i; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { state->RefValid[i] = 0; state->RefOrderHint[i] = 0; } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { state->OrderHints[AV1_LAST_FRAME + i] = 0; } } disable_cdf_update = gf_bs_read_int_log(bs, 1, "disable_cdf_update"); if (state->seq_force_screen_content_tools == 2/*SELECT_SCREEN_CONTENT_TOOLS*/) { allow_screen_content_tools = gf_bs_read_int_log(bs, 1, "allow_screen_content_tools"); } else { allow_screen_content_tools = state->seq_force_screen_content_tools; } if (allow_screen_content_tools) { if (state->seq_force_integer_mv == 2/*SELECT_INTEGER_MV*/) { force_integer_mv = gf_bs_read_int_log(bs, 1, "force_integer_mv"); } else { force_integer_mv = state->seq_force_integer_mv; } } else { force_integer_mv = 0; } if (FrameIsIntra) { force_integer_mv = 1; } if (state->frame_id_numbers_present_flag) { gf_bs_read_int_log(bs, idLen, "current_frame_id"); } if (frame_state->frame_type == AV1_SWITCH_FRAME) frame_size_override_flag = GF_TRUE; else if (state->reduced_still_picture_header) frame_size_override_flag = GF_FALSE; else frame_size_override_flag = gf_bs_read_int_log(bs, 1, "frame_size_override_flag"); frame_state->order_hint = gf_bs_read_int(bs, state->OrderHintBits); if (FrameIsIntra || error_resilient_mode) { primary_ref_frame = AV1_PRIMARY_REF_NONE; } else { primary_ref_frame = gf_bs_read_int_log(bs, 3, "primary_ref_frame"); } if (state->decoder_model_info_present_flag) { u8 buffer_removal_time_present_flag = gf_bs_read_int_log(bs, 1, "buffer_removal_time_present_flag"); if (buffer_removal_time_present_flag) { u32 opNum; for (opNum = 0; opNum < state->operating_points_count; opNum++) { if (state->decoder_model_present_for_this_op[opNum]) { u8 opPtIdc = state->operating_point_idc[opNum]; u8 inTemporalLayer = (opPtIdc >> state->temporal_id) & 1; u8 inSpatialLayer = (opPtIdc >> (state->spatial_id + 8)) & 1; if (opPtIdc == 0 || (inTemporalLayer && inSpatialLayer)) { gf_bs_read_int_log_idx(bs, state->buffer_removal_time_length, "buffer_removal_time", opNum); } } } } } if (frame_state->frame_type == AV1_SWITCH_FRAME || (frame_state->frame_type == AV1_KEY_FRAME && frame_state->show_frame)) { frame_state->refresh_frame_flags = AV1_ALL_FRAMES; } else { frame_state->refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); } if (!FrameIsIntra || frame_state->refresh_frame_flags != AV1_ALL_FRAMES) { if (error_resilient_mode && state->enable_order_hint) { u32 i = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { u8 ref_order_hint = gf_bs_read_int_log_idx(bs, state->OrderHintBits, "ref_order_hint", i); if (ref_order_hint != state->RefOrderHint[i]) { state->RefValid[i] = 0; } state->RefOrderHint[i] = ref_order_hint; } } } u8 allow_intrabc = 0; if (frame_state->frame_type == AV1_KEY_FRAME) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); if (allow_screen_content_tools && state->UpscaledWidth == state->width) { allow_intrabc = gf_bs_read_int_log(bs, 1, "allow_intrabc"); } } else { if (frame_state->frame_type == AV1_INTRA_ONLY_FRAME) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); if (allow_screen_content_tools && state->UpscaledWidth == state->width) { allow_intrabc = gf_bs_read_int_log(bs, 1, "allow_intrabc"); } } else { u32 i = 0; Bool frame_refs_short_signaling = GF_FALSE; if (state->enable_order_hint) { frame_refs_short_signaling = gf_bs_read_int_log(bs, 1, "frame_refs_short_signaling"); if (frame_refs_short_signaling) { u8 last_frame_idx = gf_bs_read_int_log(bs, 3, "last_frame_idx"); u8 gold_frame_idx = gf_bs_read_int_log(bs, 3, "gold_frame_idx"); av1_set_frame_refs(state, last_frame_idx, gold_frame_idx, ref_frame_idx); } } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { if (!frame_refs_short_signaling) ref_frame_idx[i] = gf_bs_read_int_log_idx(bs, 3, "ref_frame_idx", i); if (state->frame_id_numbers_present_flag) { u32 n = state->delta_frame_id_length_minus_2 + 2; /*delta_frame_id_minus_1 =*/ gf_bs_read_int_log_idx(bs, n, "delta_frame_id_minus1", i); //DeltaFrameId = delta_frame_id_minus_1 + 1; //expectedFrameId[i] = ((current_frame_id + (1 << idLen) - DeltaFrameId) % (1 << idLen)); } } if (frame_size_override_flag && !error_resilient_mode) { frame_size_with_refs(bs, state, frame_size_override_flag); } else { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); } frame_state->allow_high_precision_mv = 0; if (!force_integer_mv) { frame_state->allow_high_precision_mv = gf_bs_read_int_log(bs, 1, "allow_high_precision_mv"); } read_interpolation_filter(bs); gf_bs_read_int_log(bs, 1, "is_motion_mode_switchable"); if (!(error_resilient_mode || !state->enable_ref_frame_mvs)) { gf_bs_read_int_log(bs, 1, "use_ref_frame_mvs"); } } } if (!FrameIsIntra) { u32 i; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refFrame = AV1_LAST_FRAME + i; u8 ridx = ref_frame_idx[i]; if (ridx >= 0) { u8 hint = state->RefOrderHint[ridx]; state->OrderHints[refFrame] = hint; /* if ( !enable_order_hint ) { RefFrameSignBias[ refFrame ] = 0; } else { RefFrameSignBias[ refFrame ] = get_relative_dist( hint, OrderHint) > 0; } */ } } } if (!(state->reduced_still_picture_header || disable_cdf_update)) gf_bs_read_int_log(bs, 1, "disable_frame_end_update_cdf"); if (primary_ref_frame == AV1_PRIMARY_REF_NONE) { //init_non_coeff_cdfs(); av1_setup_past_independence(state); } else { //load_cdfs(ref_frame_idx[primary_ref_frame]); av1_load_previous(state, primary_ref_frame, ref_frame_idx); } av1_parse_tile_info(bs, state); //quantization_params( ): u8 base_q_idx = gf_bs_read_int(bs, 8); s32 DeltaQUDc = 0; s32 DeltaQUAc = 0; s32 DeltaQVDc = 0; s32 DeltaQVAc = 0; s32 DeltaQYDc = av1_delta_q(bs, "DeltaQYDc_coded", "DeltaQYDc"); if (!state->config->monochrome) { u8 diff_uv_delta = 0; if (state->separate_uv_delta_q) diff_uv_delta = gf_bs_read_int(bs, 1); DeltaQUDc = av1_delta_q(bs, "DeltaQUDc_coded", "DeltaQUDc"); DeltaQUAc = av1_delta_q(bs, "DeltaQUAc_coded", "DeltaQUAc"); if (diff_uv_delta) { DeltaQVDc = av1_delta_q(bs, "DeltaQVDc_coded", "DeltaQVDc"); DeltaQVAc = av1_delta_q(bs, "DeltaQVAc_coded", "DeltaQVAc"); } } if (gf_bs_read_int_log(bs, 1, "using_qmatrix")) { gf_bs_read_int_log(bs, 4, "qm_y"); gf_bs_read_int_log(bs, 4, "qm_u"); if (!state->separate_uv_delta_q) { gf_bs_read_int_log(bs, 4, "qm_v"); } } u8 seg_features_SEG_LVL_ALT_Q_enabled[8] = { 0,0,0,0,0,0,0,0 }; s32 seg_features_SEG_LVL_ALT_Q[8] = { 0,0,0,0,0,0,0,0 }; //segmentation_params( ): u8 segmentation_enabled = gf_bs_read_int_log(bs, 1, "segmentation_enabled"); if (segmentation_enabled) { /*u8 segmentation_temporal_update = 0;*/ u8 segmentation_update_data = 1; if (primary_ref_frame != AV1_PRIMARY_REF_NONE) { u8 segmentation_update_map = gf_bs_read_int_log(bs, 1, "segmentation_update_map"); if (segmentation_update_map == 1) gf_bs_read_int_log(bs, 1, "segmentation_temporal_update"); segmentation_update_data = gf_bs_read_int_log(bs, 1, "segmentation_update_data"); } if (segmentation_update_data == 1) { u32 i, j; for (i = 0; i < 8/*=MAX_SEGMENTS*/; i++) { for (j = 0; j < 8 /*=SEG_LVL_MAX*/; j++) { if (/*feature_enabled = */gf_bs_read_int_log_idx2(bs, 1, "feature_enabled", i, j) == 1) { s32 val; u32 bitsToRead = Segmentation_Feature_Bits[j]; //this is SEG_LVL_ALT_Q if (!j) seg_features_SEG_LVL_ALT_Q_enabled[i] = 1; if (Segmentation_Feature_Signed[j] == 1) { val = gf_bs_read_int_log_idx2(bs, 1 + bitsToRead, "signed_feature_value", i, j); } else { val = gf_bs_read_int_log_idx2(bs, bitsToRead, "feature_value", i, j); } if (!j) seg_features_SEG_LVL_ALT_Q[i] = val; } } } //ignore all init steps } } //delta_q_params(): /*u8 delta_q_res = 0;*/ u8 delta_q_present = 0; if (base_q_idx > 0) { delta_q_present = gf_bs_read_int_log(bs, 1, "delta_q_present"); } if (delta_q_present) { gf_bs_read_int_log(bs, 2, "delta_q_res"); } //delta_lf_params(): u8 delta_lf_present = 0; /*u8 delta_lf_res = 0; u8 delta_lf_multi = 0;*/ if (delta_q_present) { if (!allow_intrabc) { delta_lf_present = gf_bs_read_int_log(bs, 1, "delta_lf_present"); } if (delta_lf_present) { gf_bs_read_int_log(bs, 2, "delta_lf_res"); gf_bs_read_int_log(bs, 1, "delta_lf_multi"); } } //init lossless stuff! u8 CodedLossless = 1; for (idx = 0; idx < 8; idx++) { u8 qindex = av1_get_qindex(GF_TRUE, idx, base_q_idx, delta_q_present, 0/*CurrentQIndex always ignored at this level of parsin*/, segmentation_enabled, seg_features_SEG_LVL_ALT_Q_enabled, seg_features_SEG_LVL_ALT_Q); Bool LosslessArray = (qindex == 0) && (DeltaQYDc == 0) && (DeltaQUAc == 0) && (DeltaQUDc == 0) && (DeltaQVAc == 0) && (DeltaQVDc == 0); if (!LosslessArray) CodedLossless = 0; } Bool AllLossless = CodedLossless && (state->width == state->UpscaledWidth); //loop_filter_params(): if (!CodedLossless && !allow_intrabc) { u8 loop_filter_level_0 = gf_bs_read_int_log(bs, 6, "loop_filter_level_0"); u8 loop_filter_level_1 = gf_bs_read_int_log(bs, 6, "loop_filter_level_1"); if (!state->config->monochrome) { if (loop_filter_level_0 || loop_filter_level_1) { gf_bs_read_int_log(bs, 6, "loop_filter_level_2"); gf_bs_read_int_log(bs, 6, "loop_filter_level_3"); } } gf_bs_read_int_log(bs, 3, "loop_filter_sharpness"); u8 loop_filter_delta_enabled = gf_bs_read_int_log(bs, 1, "loop_filter_delta_enabled"); if (loop_filter_delta_enabled == 1) { u8 loop_filter_delta_update = gf_bs_read_int_log(bs, 1, "loop_filter_delta_update"); if (loop_filter_delta_update) { u32 i; for (i = 0; i < 8/*TOTAL_REFS_PER_FRAME*/; i++) { u8 update_ref_delta = gf_bs_read_int_log_idx(bs, 1, "update_ref_delta", i); if (update_ref_delta == 1) { gf_bs_read_int_log_idx(bs, 1 + 6, "loop_filter_ref_deltas", i); } } for (i = 0; i < 2; i++) { u8 update_mode_delta = gf_bs_read_int_log_idx(bs, 1, "update_mode_delta", i); if (update_mode_delta) { gf_bs_read_int_log_idx(bs, 1 + 6, "loop_filter_mode_deltas", i); } } } } } //cdef_params( ): if (!CodedLossless && !allow_intrabc && state->enable_cdef) { gf_bs_read_int_log(bs, 2, "cdef_damping_minus_3"); u8 cdef_bits = gf_bs_read_int_log(bs, 2, "cdef_bits"); u32 i, num_cd = 1 << cdef_bits; for (i = 0; i < num_cd; i++) { gf_bs_read_int_log_idx(bs, 4, "cdef_y_pri_strength", i); gf_bs_read_int_log_idx(bs, 2, "cdef_y_sec_strength", i); if (!state->config->monochrome) { gf_bs_read_int_log_idx(bs, 4, "cdef_uv_pri_strength", i); gf_bs_read_int_log_idx(bs, 2, "cdef_uv_sec_strength", i); } } } //lr_params( ) : if (!AllLossless && !allow_intrabc && state->enable_restoration) { u32 i, nb_planes = state->config->monochrome ? 1 : 3; u8 UsesLr = 0; u8 usesChromaLr = 0; for (i = 0; i < nb_planes; i++) { u8 lr_type = gf_bs_read_int_log_idx(bs, 2, "lr_type", i); //FrameRestorationType[i] = Remap_Lr_Type[lr_type] if (lr_type != AV1_RESTORE_NONE) { UsesLr = 1; if (i > 0) { usesChromaLr = 1; } } } if (UsesLr) { if (state->use_128x128_superblock) { gf_bs_read_int_log(bs, 1, "lr_unit_shift_minus_1"); } else { u8 lr_unit_shift = gf_bs_read_int_log(bs, 1, "lr_unit_shift"); if (lr_unit_shift) { gf_bs_read_int_log(bs, 1, "lr_unit_extra_shift"); //lr_unit_shift += lr_unit_extra_shift; } } if (state->config->chroma_subsampling_x && state->config->chroma_subsampling_y && usesChromaLr) { gf_bs_read_int_log(bs, 1, "lr_uv_shift"); } } } //read_tx_mode(): if (CodedLossless == 1) { } else { gf_bs_read_int_log(bs, 1, "tx_mode_select"); } //frame_reference_mode( ): u8 reference_select = 0; if (FrameIsIntra) { } else { reference_select = gf_bs_read_int_log(bs, 1, "reference_select"); } //skip_mode_params( ): u8 skipModeAllowed = 0; if (FrameIsIntra || !reference_select || !state->enable_order_hint) { } else { u32 i; s32 forwardIdx = -1; s32 backwardIdx = -1; s32 forwardHint = 0; s32 backwardHint = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refHint = state->RefOrderHint[ref_frame_idx[i]]; if (av1_get_relative_dist(refHint, frame_state->order_hint, state) < 0) { if (forwardIdx < 0 || av1_get_relative_dist(refHint, forwardHint, state) > 0) { forwardIdx = i; forwardHint = refHint; } } else if (av1_get_relative_dist(refHint, frame_state->order_hint, state) > 0) { if (backwardIdx < 0 || av1_get_relative_dist(refHint, backwardHint, state) < 0) { backwardIdx = i; backwardHint = refHint; } } } if (forwardIdx < 0) { skipModeAllowed = 0; } else if (backwardIdx >= 0) { skipModeAllowed = 1; //SkipModeFrame[0] = AV1_LAST_FRAME + MIN(forwardIdx, backwardIdx); //SkipModeFrame[1] = AV1_LAST_FRAME + MAX(forwardIdx, backwardIdx); } else { s32 secondForwardIdx = -1; s32 secondForwardHint = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refHint = state->RefOrderHint[ref_frame_idx[i]]; if (av1_get_relative_dist(refHint, forwardHint, state) < 0) { if (secondForwardIdx < 0 || av1_get_relative_dist(refHint, secondForwardHint, state) > 0) { secondForwardIdx = i; secondForwardHint = refHint; } } } if (secondForwardIdx < 0) { skipModeAllowed = 0; } else { skipModeAllowed = 1; //SkipModeFrame[ 0 ] = LAST_FRAME + Min(forwardIdx, secondForwardIdx) //SkipModeFrame[ 1 ] = LAST_FRAME + Max(forwardIdx, secondForwardIdx) } } } if (skipModeAllowed) { gf_bs_read_int_log(bs, 1, "skip_mode_present"); } if (FrameIsIntra || error_resilient_mode || !state->enable_warped_motion) { } else { gf_bs_read_int_log(bs, 1, "allow_warped_motion"); } gf_bs_read_int_log(bs, 1, "reduced_tx"); //global_motion_params( ) u32 ref; for (ref = AV1_LAST_FRAME; ref <= AV1_ALTREF_FRAME; ref++) { u32 i; for (i = 0; i < 6; i++) { state->GmParams.coefs[ref][i] = ((i % 3 == 2) ? 1 << WARPEDMODEL_PREC_BITS : 0); } } if (!FrameIsIntra) { u32 refs; for (refs = AV1_LAST_FRAME; refs <= AV1_ALTREF_FRAME; refs++) { u8 type = AV1_GMC_IDENTITY; Bool is_global = gf_bs_read_int_log_idx(bs, 1, "is_global", refs); if (is_global) { Bool is_rot_zoom = gf_bs_read_int_log_idx(bs, 1, "is_rot_zoom", refs); if (is_rot_zoom) { type = AV1_GMC_ROTZOOM; } else { Bool is_trans = gf_bs_read_int_log_idx(bs, 1, "is_translation", refs); type = is_trans ? AV1_GMC_TRANSLATION : AV1_GMC_AFFINE; } } if (type >= AV1_GMC_ROTZOOM) { av1_read_global_param(state, bs, type, refs, 2); av1_read_global_param(state, bs, type, refs, 3); if (type == AV1_GMC_AFFINE) { av1_read_global_param(state, bs, type, refs, 4); av1_read_global_param(state, bs, type, refs, 5); } else { state->GmParams.coefs[refs][4] = -state->GmParams.coefs[refs][3]; state->GmParams.coefs[refs][5] = state->GmParams.coefs[refs][2]; } } if (type >= AV1_GMC_TRANSLATION) { av1_read_global_param(state, bs, type, refs, 0); av1_read_global_param(state, bs, type, refs, 1); } } } //film_grain_params() if (!state->film_grain_params_present || (!state->frame_state.show_frame && !showable_frame)) { } else { u8 apply_grain = gf_bs_read_int_log(bs, 1, "apply_grain"); if (apply_grain) { gf_bs_read_int_log(bs, 16, "grain_seed"); u8 update_grain = 1; if (state->frame_state.frame_type == AV1_INTER_FRAME) { update_grain = gf_bs_read_int_log(bs, 1, "update_grain"); } if (!update_grain) { gf_bs_read_int_log(bs, 3, "film_grain_params_ref_idx"); } else { u32 i, num_y_points = gf_bs_read_int_log(bs, 4, "num_y_points"); for (i = 0; i < num_y_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_y_value", i); gf_bs_read_int_log_idx(bs, 8, "point_y_scaling", i); } u8 chroma_scaling_from_luma = 0; if (!state->config->monochrome) chroma_scaling_from_luma = gf_bs_read_int_log(bs, 1, "chroma_scaling_from_luma"); u8 num_cb_points = 0; u8 num_cr_points = 0; if (state->config->monochrome || chroma_scaling_from_luma || ((state->config->chroma_subsampling_x == 1) && (state->config->chroma_subsampling_y == 1) && (num_y_points == 0)) ) { } else { num_cb_points = gf_bs_read_int_log(bs, 4, "num_cb_points"); for (i = 0; i < num_cb_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_cb_value", i); gf_bs_read_int_log_idx(bs, 8, "point_cb_scaling", i); } num_cr_points = gf_bs_read_int_log(bs, 4, "num_cr_points"); for (i = 0; i < num_cr_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_cr_value", i); gf_bs_read_int_log_idx(bs, 8, "point_cr_scaling", i); } } gf_bs_read_int_log(bs, 2, "grain_scaling_minus_8"); u8 ar_coeff_lag = gf_bs_read_int_log(bs, 2, "ar_coeff_lag"); u16 numPosLuma = 2 * ar_coeff_lag * (ar_coeff_lag + 1); u16 numPosChroma = numPosLuma; if (num_y_points) { numPosChroma = numPosLuma + 1; for (i = 0; i < numPosLuma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_y_plus_128", i); } } if (chroma_scaling_from_luma || num_cb_points) { for (i = 0; i < numPosChroma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_cb_plus_128", i); } } if (chroma_scaling_from_luma || num_cr_points) { for (i = 0; i < numPosChroma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_cr_plus_128", i); } } gf_bs_read_int_log(bs, 2, "ar_coeff_shift_minus_6"); gf_bs_read_int_log(bs, 2, "grain_scale_shift"); if (num_cb_points) { gf_bs_read_int_log(bs, 8, "cb_mult"); gf_bs_read_int_log(bs, 8, "cb_luma_mult"); gf_bs_read_int_log(bs, 9, "cb_offset"); } if (num_cr_points) { gf_bs_read_int_log(bs, 8, "cr_mult"); gf_bs_read_int_log(bs, 8, "cr_luma_mult"); gf_bs_read_int_log(bs, 9, "cr_offset"); } gf_bs_read_int_log(bs, 1, "overlap_flag"); gf_bs_read_int_log(bs, 1, "clip_to_restricted_range"); } } } //end of uncompressed header !! } GF_EXPORT void gf_av1_init_state(AV1State *state) { if (!state) return; memset(state, 0, sizeof(AV1State)); state->color_primaries = 2; state->transfer_characteristics = 2; state->matrix_coefficients = 2; } GF_EXPORT void gf_av1_reset_state(AV1State *state, Bool is_destroy) { GF_List *l1, *l2; if (state->frame_state.header_obus) { while (gf_list_count(state->frame_state.header_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.header_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } if (state->frame_state.frame_obus) { while (gf_list_count(state->frame_state.frame_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.frame_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } l1 = state->frame_state.frame_obus; l2 = state->frame_state.header_obus; memset(&state->frame_state, 0, sizeof(AV1StateFrame)); state->frame_state.is_first_frame = GF_TRUE; if (is_destroy) { gf_list_del(l1); gf_list_del(l2); if (state->bs) { if (gf_bs_get_position(state->bs)) { u32 size; gf_bs_get_content_no_truncate(state->bs, &state->frame_obus, &size, &state->frame_obus_alloc); } gf_bs_del(state->bs); } state->bs = NULL; } else { state->frame_state.frame_obus = l1; state->frame_state.header_obus = l2; if (state->bs) gf_bs_seek(state->bs, 0); } } static GF_Err av1_parse_tile_group(GF_BitStream *bs, AV1State *state, u64 obu_start, u64 obu_size) { u32 TileNum, tg_start = 0, tg_end = 0; Bool numTiles = state->tileCols * state->tileRows; Bool tile_start_and_end_present_flag = GF_FALSE; GF_Err e = GF_OK; if (numTiles > 1) tile_start_and_end_present_flag = gf_bs_read_int(bs, 1); if (numTiles == 1 || !tile_start_and_end_present_flag) { tg_start = 0; tg_end = numTiles - 1; /*state->frame_state.tg[0].start_idx = 0; state->frame_state.tg[0].end_idx = numTiles - 1;*/ } else { u32 tileBits = state->tileColsLog2 + state->tileRowsLog2; /*state->frame_state.tg[state->frame_state.tg_idx].start_idx*/ tg_start = gf_bs_read_int(bs, tileBits); /*state->frame_state.tg[state->frame_state.tg_idx].end_idx*/ tg_end = gf_bs_read_int(bs, tileBits); } /*state->frame_state.tg_idx++;*/ gf_bs_align(bs); if (tg_end >= GF_ARRAY_LENGTH(state->frame_state.tiles)) return GF_NON_COMPLIANT_BITSTREAM; state->frame_state.nb_tiles_in_obu = 0; for (TileNum = tg_start; TileNum <= tg_end; TileNum++) { u32 tile_start_offset, tile_size; /*u32 tileRow = TileNum / state->tileCols; u32 tileCol = TileNum % state->tileCols;*/ Bool lastTile = TileNum == tg_end; u64 pos = gf_bs_get_position(bs); if (lastTile) { tile_start_offset = (u32)(pos - obu_start); tile_size = (u32)(obu_size - (pos - obu_start)); } else { u64 tile_size_minus_1 = aom_av1_le(bs, state->tile_size_bytes, "tile_size_minus_1"); pos = gf_bs_get_position(bs); tile_start_offset = (u32)(pos - obu_start); tile_size = (u32)(tile_size_minus_1 + 1/* + state->tile_size_bytes*/); } if (tile_start_offset + tile_size > obu_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Error parsing tile group, tile %d start %d + size %d exceeds OBU length %d\n", TileNum, tile_start_offset, tile_size, obu_size)); e = GF_NON_COMPLIANT_BITSTREAM; break; } state->frame_state.tiles[state->frame_state.nb_tiles_in_obu].obu_start_offset = tile_start_offset; state->frame_state.tiles[state->frame_state.nb_tiles_in_obu].size = tile_size; gf_bs_skip_bytes(bs, tile_size); state->frame_state.nb_tiles_in_obu++; } if (tg_end == numTiles - 1) { av1_decode_frame_wrapup(state); } return e; } static void av1_parse_frame_header(GF_BitStream *bs, AV1State *state) { AV1StateFrame *frame_state = &state->frame_state; if (frame_state->seen_frame_header == GF_FALSE) { u64 pos = gf_bs_get_position(bs); state->frame_state.show_existing_frame = GF_FALSE; frame_state->seen_frame_header = GF_TRUE; av1_parse_uncompressed_header(bs, state); state->frame_state.is_first_frame = GF_FALSE; state->frame_state.uncompressed_header_bytes = (u32) (gf_bs_get_position(bs) - pos); if (state->frame_state.show_existing_frame) { av1_decode_frame_wrapup(state); frame_state->seen_frame_header = GF_FALSE; } else { //TileNum = 0; frame_state->seen_frame_header = GF_TRUE; } } } static GF_Err av1_parse_frame(GF_BitStream *bs, AV1State *state, u64 obu_start, u64 obu_size) { av1_parse_frame_header(bs, state); //byte alignment gf_bs_align(bs); return av1_parse_tile_group(bs, state, obu_start, obu_size); } static void on_aom_av1_eos(void *_state) { AV1State *state = (AV1State *)_state; state->bs_overread = GF_TRUE; } GF_EXPORT GF_Err gf_av1_parse_obu(GF_BitStream *bs, ObuType *obu_type, u64 *obu_size, u32 *obu_hdr_size, AV1State *state) { GF_Err e = GF_OK; u32 hdr_size; u64 pos = gf_bs_get_position(bs); if (!bs || !obu_type || !state) return GF_BAD_PARAM; state->bs_overread = GF_FALSE; gf_bs_set_eos_callback(bs, on_aom_av1_eos, state); state->obu_extension_flag = state->obu_has_size_field = 0; state->temporal_id = state->spatial_id = 0; state->frame_state.uncompressed_header_bytes = 0; e = gf_av1_parse_obu_header(bs, obu_type, &state->obu_extension_flag, &state->obu_has_size_field, &state->temporal_id, &state->spatial_id); if (e) return e; if (state->obu_has_size_field) { *obu_size = (u32)gf_av1_leb128_read(bs, NULL); } else { if (*obu_size >= 1 + state->obu_extension_flag) { *obu_size = *obu_size - 1 - state->obu_extension_flag; } else { GF_LOG(state->config ? GF_LOG_WARNING : GF_LOG_DEBUG, GF_LOG_CODING, ("[AV1] computed OBU size "LLD" (input value = "LLU"). Skipping.\n", *obu_size - 1 - state->obu_extension_flag, *obu_size)); return GF_NON_COMPLIANT_BITSTREAM; } } hdr_size = (u32)(gf_bs_get_position(bs) - pos); if ((gf_bs_available(bs) < *obu_size) || state->bs_overread) { gf_bs_seek(bs, pos); return GF_BUFFER_TOO_SMALL; } *obu_size += hdr_size; if (obu_hdr_size) *obu_hdr_size = hdr_size; if (*obu_type != OBU_SEQUENCE_HEADER && *obu_type != OBU_TEMPORAL_DELIMITER && state->OperatingPointIdc != 0 && state->obu_extension_flag == 1) { u32 inTemporalLayer = (state->OperatingPointIdc >> state->temporal_id) & 1; u32 inSpatialLayer = (state->OperatingPointIdc >> (state->spatial_id + 8)) & 1; if (!inTemporalLayer || !inSpatialLayer) { *obu_type = -1; gf_bs_seek(bs, pos + *obu_size); return GF_OK; } } e = GF_OK; switch (*obu_type) { case OBU_SEQUENCE_HEADER: av1_parse_sequence_header_obu(bs, state); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Sequence header parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_METADATA: #if 0 //TODO + sample groups const ObuMetadataType metadata_type = (u32)read_leb128(bs, NULL); we should check for 16 bits limit(AV1MetadataSampleGroupEntry) for ISOBMFF bindings, see https ://github.com/AOMediaCodec/av1-isobmff/pull/86#issuecomment-416659538 if (metadata_type == OBU_METADATA_TYPE_ITUT_T35) { } else if (metadata_type == OBU_METADATA_TYPE_HDR_CLL) { } else if (metadata_type == OBU_METADATA_TYPE_HDR_MDCV) { } else if (metadata_type == OBU_METADATA_TYPE_SCALABILITY) { } else if (metadata_type == METADATA_TYPE_TIMECODE) { } #endif GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[AV1] parsing for metadata is not implemented. Forwarding.\n")); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Metadata parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_FRAME_HEADER: case OBU_REDUNDANT_FRAME_HEADER: if (state->config) { av1_parse_frame_header(bs, state); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Frame header parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } } gf_bs_seek(bs, pos + *obu_size); break; case OBU_FRAME: e = av1_parse_frame(bs, state, pos, *obu_size); if (gf_bs_get_position(bs) != pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Frame parsing did not consume the right number of bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_TILE_GROUP: if (state->config) { e = av1_parse_tile_group(bs, state, pos, *obu_size); if (gf_bs_get_position(bs) != pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Tile group parsing did not consume the right number of bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } } gf_bs_seek(bs, pos + *obu_size); break; case OBU_TEMPORAL_DELIMITER: state->frame_state.seen_frame_header = GF_FALSE; case OBU_PADDING: gf_bs_seek(bs, pos + *obu_size); break; default: GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] unknown OBU type %u (size "LLU"). Skipping.\n", *obu_type, *obu_size)); gf_bs_seek(bs, pos + *obu_size); break; } return e; } GF_EXPORT GF_Err gf_media_prores_parse_bs(GF_BitStream *bs, GF_ProResFrameInfo *prores_frame) { u32 i, j; u64 start, pos; memset(prores_frame, 0, sizeof(GF_ProResFrameInfo)); start = gf_bs_get_position(bs); if (gf_bs_available(bs) < 10) return GF_BUFFER_TOO_SMALL; prores_frame->frame_size = gf_bs_read_u32(bs); prores_frame->frame_identifier = gf_bs_read_u32(bs); if (prores_frame->frame_identifier != GF_4CC('i','c','p','f')) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[ProRes] Invalid frame identifier, expected \"icpf\" got \"%s\"\n", gf_4cc_to_str(prores_frame->frame_identifier) )); gf_bs_seek(bs, start); return GF_NON_COMPLIANT_BITSTREAM; } /*parse frame header*/ pos = gf_bs_get_position(bs); prores_frame->frame_hdr_size = gf_bs_read_u16(bs); if (gf_bs_available(bs) + 2 < prores_frame->frame_hdr_size) { gf_bs_seek(bs, start); return GF_BUFFER_TOO_SMALL; } gf_bs_read_u8(bs); prores_frame->version = gf_bs_read_u8(bs); prores_frame->encoder_id = gf_bs_read_u32(bs); prores_frame->width = gf_bs_read_u16(bs); prores_frame->height = gf_bs_read_u16(bs); prores_frame->chroma_format = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 2); prores_frame->interlaced_mode = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 2); prores_frame->aspect_ratio_information = gf_bs_read_int(bs, 4); prores_frame->framerate_code = gf_bs_read_int(bs, 4); prores_frame->color_primaries = gf_bs_read_u8(bs); prores_frame->transfer_characteristics = gf_bs_read_u8(bs); prores_frame->matrix_coefficients = gf_bs_read_u8(bs); gf_bs_read_int(bs, 4); prores_frame->alpha_channel_type = gf_bs_read_int(bs, 4); gf_bs_read_int(bs, 14); prores_frame->load_luma_quant_matrix = gf_bs_read_int(bs, 1); prores_frame->load_chroma_quant_matrix = gf_bs_read_int(bs, 1); if (prores_frame->load_luma_quant_matrix) { for (i=0; i<8; i++) { for (j=0; j<8; j++) { prores_frame->luma_quant_matrix[i][j] = gf_bs_read_u8(bs); } } } if (prores_frame->load_chroma_quant_matrix) { for (i=0; i<8; i++) { for (j=0; j<8; j++) { prores_frame->chroma_quant_matrix[i][j] = gf_bs_read_u8(bs); } } } pos = gf_bs_get_position(bs) - pos; if (pos != prores_frame->frame_hdr_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[ProRes] Invalid frame header size, expected %d got %d\n", prores_frame->frame_hdr_size, (u32) pos)); gf_bs_seek(bs, start); return GF_NON_COMPLIANT_BITSTREAM; } prores_frame->nb_pic = ((prores_frame->interlaced_mode==1) || (prores_frame->interlaced_mode==2)) ? 2 : 1; gf_bs_seek(bs, start); return GF_OK; } #endif /*GPAC_DISABLE_AV_PARSERS*/ GF_EXPORT u8 gf_mp3_version(u32 hdr) { return ((hdr >> 19) & 0x3); } GF_EXPORT const char *gf_mp3_version_name(u32 hdr) { u32 v = gf_mp3_version(hdr); switch (v) { case 0: return "MPEG-2.5"; case 1: return "Reserved"; case 2: return "MPEG-2"; case 3: return "MPEG-1"; default: return "Unknown"; } } #ifndef GPAC_DISABLE_AV_PARSERS GF_EXPORT u8 gf_mp3_layer(u32 hdr) { return 4 - (((hdr >> 17) & 0x3)); } GF_EXPORT u8 gf_mp3_num_channels(u32 hdr) { if (((hdr >> 6) & 0x3) == 3) return 1; return 2; } GF_EXPORT u16 gf_mp3_sampling_rate(u32 hdr) { u16 res; /* extract the necessary fields from the MP3 header */ u8 version = gf_mp3_version(hdr); u8 sampleRateIndex = (hdr >> 10) & 0x3; switch (sampleRateIndex) { case 0: res = 44100; break; case 1: res = 48000; break; case 2: res = 32000; break; default: GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] Samplerate index not valid\n")); return 0; } /*reserved or MPEG-1*/ if (version & 1) return res; /*MPEG-2*/ res /= 2; /*MPEG-2.5*/ if (version == 0) res /= 2; return res; } GF_EXPORT u16 gf_mp3_window_size(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); if (layer == 3) { if (version == 3) return 1152; return 576; } if (layer == 2) return 1152; return 384; } GF_EXPORT u8 gf_mp3_object_type_indication(u32 hdr) { switch (gf_mp3_version(hdr)) { case 3: return GF_CODECID_MPEG_AUDIO; case 2: case 0: return GF_CODECID_MPEG2_PART3; default: return 0x00; } } /*aligned bitrate parsing with libMAD*/ static u32 const bitrate_table[5][15] = { /* MPEG-1 */ { 0, 32000, 64000, 96000, 128000, 160000, 192000, 224000, /* Layer I */ 256000, 288000, 320000, 352000, 384000, 416000, 448000 }, { 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer II */ 128000, 160000, 192000, 224000, 256000, 320000, 384000 }, { 0, 32000, 40000, 48000, 56000, 64000, 80000, 96000, /* Layer III */ 112000, 128000, 160000, 192000, 224000, 256000, 320000 }, /* MPEG-2 LSF */ { 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer I */ 128000, 144000, 160000, 176000, 192000, 224000, 256000 }, { 0, 8000, 16000, 24000, 32000, 40000, 48000, 56000, /* Layers */ 64000, 80000, 96000, 112000, 128000, 144000, 160000 } /* II & III */ }; u32 gf_mp3_bit_rate(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); u8 bitRateIndex = (hdr >> 12) & 0xF; u32 lidx; /*MPEG-1*/ if (version & 1) { if (!layer) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] layer index not valid\n")); return 0; } lidx = layer - 1; } /*MPEG-2/2.5*/ else { lidx = 3 + (layer >> 1); } if (lidx>4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] layer index not valid\n")); return 0; } return bitrate_table[lidx][bitRateIndex]; } GF_EXPORT u16 gf_mp3_frame_size(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); u32 pad = ((hdr >> 9) & 0x1) ? 1 : 0; u32 bitrate = gf_mp3_bit_rate(hdr); u32 samplerate = gf_mp3_sampling_rate(hdr); u32 frameSize = 0; if (!samplerate || !bitrate) return 0; if (layer == 1) { frameSize = ((12 * bitrate / samplerate) + pad) * 4; } else { u32 slots_per_frame = 144; if ((layer == 3) && !(version & 1)) slots_per_frame = 72; frameSize = (slots_per_frame * bitrate / samplerate) + pad; } return (u16)frameSize; } GF_EXPORT u32 gf_mp3_get_next_header(FILE* in) { u8 b, state = 0; u32 dropped = 0; unsigned char bytes[4]; bytes[0] = bytes[1] = bytes[2] = bytes[3] = 0; while (1) { if (gf_fread(&b, 1, in) == 0) return 0; if (state == 3) { bytes[state] = b; return GF_4CC((u32)bytes[0], bytes[1], bytes[2], bytes[3]); } if (state == 2) { if (((b & 0xF0) == 0) || ((b & 0xF0) == 0xF0) || ((b & 0x0C) == 0x0C)) { if (bytes[1] == 0xFF) state = 1; else state = 0; } else { bytes[state] = b; state = 3; } } if (state == 1) { if (((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[state] = b; state = 2; } else { state = 0; } } if (state == 0) { if (b == 0xFF) { bytes[state] = b; state = 1; } else { if ((dropped == 0) && ((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[0] = (u8)0xFF; bytes[1] = b; state = 2; } else { dropped++; } } } } return 0; } GF_EXPORT u32 gf_mp3_get_next_header_mem(const u8 *buffer, u32 size, u32 *pos) { u32 cur; u8 b, state = 0; u32 dropped = 0; unsigned char bytes[4]; bytes[0] = bytes[1] = bytes[2] = bytes[3] = 0; cur = 0; *pos = 0; while (cur < size) { b = (u8)buffer[cur]; cur++; if (state == 3) { u32 val; bytes[state] = b; val = GF_4CC((u32)bytes[0], bytes[1], bytes[2], bytes[3]); if (gf_mp3_frame_size(val)) { *pos = dropped; return val; } state = 0; dropped = cur; } if (state == 2) { if (((b & 0xF0) == 0) || ((b & 0xF0) == 0xF0) || ((b & 0x0C) == 0x0C)) { if (bytes[1] == 0xFF) { state = 1; dropped += 1; } else { state = 0; dropped = cur; } } else { bytes[state] = b; state = 3; } } if (state == 1) { if (((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[state] = b; state = 2; } else { state = 0; dropped = cur; } } if (state == 0) { if (b == 0xFF) { bytes[state] = b; state = 1; } else { dropped++; } } } return 0; } #endif /*GPAC_DISABLE_AV_PARSERS*/ GF_EXPORT Bool gf_avc_is_rext_profile(u8 profile_idc) { switch (profile_idc) { case 100: case 110: case 122: case 244: case 44: case 83: case 86: case 118: case 128: case 138: case 139: case 134: case 135: return GF_TRUE; default: return GF_FALSE; } } GF_EXPORT const char *gf_avc_get_profile_name(u8 video_prof) { switch (video_prof) { case 0x42: return "Baseline"; case 0x4D: return "Main"; case 0x53: return "Scalable Baseline"; case 0x56: return "Scalable High"; case 0x58: return "Extended"; case 0x64: return "High"; case 0x6E: return "High 10"; case 0x7A: return "High 4:2:2"; case 0x90: case 0xF4: return "High 4:4:4"; default: return "Unknown"; } } GF_EXPORT const char *gf_hevc_get_profile_name(u8 video_prof) { switch (video_prof) { case 0x01: return "Main"; case 0x02: return "Main 10"; case 0x03: return "Main Still Picture"; default: return "Unknown"; } } GF_EXPORT const char *gf_avc_hevc_get_chroma_format_name(u8 chroma_format) { switch (chroma_format) { case 1: return "YUV 4:2:0"; case 2: return "YUV 4:2:2"; case 3: return "YUV 4:4:4"; default: return "Unknown"; } } #ifndef GPAC_DISABLE_AV_PARSERS u32 gf_bs_read_ue_log_idx3(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2, s32 idx3) { u32 val=0, code; s32 nb_lead = -1; u32 bits = 0; for (code=0; !code; nb_lead++) { if (nb_lead>=32) { //gf_bs_read_int keeps returning 0 on EOS, so if no more bits available, rbsp was truncated otherwise code is broken in rbsp) //we only test once nb_lead>=32 to avoid testing at each bit read if (!gf_bs_available(bs)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] exp-golomb read failed, not enough bits in bitstream !\n")); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] corrupted exp-golomb code, %d leading zeros, max 31 allowed !\n", nb_lead)); } return 0; } code = gf_bs_read_int(bs, 1); bits++; } if (nb_lead) { val = gf_bs_read_int(bs, nb_lead); val += (1 << nb_lead) - 1; bits += nb_lead; } if (fname) { gf_bs_log_idx(bs, bits, fname, val, idx1, idx2, idx3); } return val; } #define gf_bs_read_ue_log_idx2(_bs, _fname, _idx1, _idx2) gf_bs_read_ue_log_idx3(_bs, _fname, (s32) _idx1, (s32) _idx2, -1) #define gf_bs_read_ue_log_idx(_bs, _fname, _idx) gf_bs_read_ue_log_idx3(_bs, _fname, (s32) _idx, -1, -1) #define gf_bs_read_ue_log(_bs, _fname) gf_bs_read_ue_log_idx3(_bs, _fname, -1, -1, -1) u32 gf_bs_read_ue(GF_BitStream *bs) { return gf_bs_read_ue_log(bs, NULL); } s32 gf_bs_read_se(GF_BitStream *bs) { u32 v = gf_bs_read_ue(bs); if ((v & 0x1) == 0) return (s32)(0 - (v >> 1)); return (v + 1) >> 1; } s32 gf_bs_read_se_log_idx2(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2) { s32 res = gf_bs_read_se(bs); if (fname) gf_bs_log_idx(bs, -1, fname, res, idx1, idx2, -1); return res; } #define gf_bs_read_se_log_idx(_bs, _fname, _idx) gf_bs_read_se_log_idx2(_bs, _fname, (s32) _idx, -1) #define gf_bs_read_se_log(_bs, _fname) gf_bs_read_se_log_idx2(_bs, _fname, -1, -1) void gf_bs_write_ue(GF_BitStream *bs, u32 num) { s32 length = 1; s32 temp = ++num; while (temp != 1) { temp >>= 1; length += 2; } gf_bs_write_int(bs, 0, length >> 1); gf_bs_write_int(bs, num, (length + 1) >> 1); } void gf_bs_write_se(GF_BitStream *bs, s32 num) { u32 v; if (num <= 0) v = (-1 * num) << 1; else v = (num << 1) - 1; gf_bs_write_ue(bs, v); } u32 gf_media_nalu_is_start_code(GF_BitStream *bs) { u8 s1, s2, s3, s4; Bool is_sc = 0; u64 pos = gf_bs_get_position(bs); s1 = gf_bs_read_int(bs, 8); s2 = gf_bs_read_int(bs, 8); if (!s1 && !s2) { s3 = gf_bs_read_int(bs, 8); if (s3 == 0x01) is_sc = 3; else if (!s3) { s4 = gf_bs_read_int(bs, 8); if (s4 == 0x01) is_sc = 4; } } gf_bs_seek(bs, pos + is_sc); return is_sc; } /*read that amount of data at each IO access rather than fetching byte by byte...*/ #define AVC_CACHE_SIZE 4096 static u32 gf_media_nalu_locate_start_code_bs(GF_BitStream *bs, Bool locate_trailing) { u32 v, bpos, nb_cons_zeros = 0; char avc_cache[AVC_CACHE_SIZE]; u64 end, cache_start, load_size; u64 start = gf_bs_get_position(bs); if (start < 3) return 0; load_size = 0; bpos = 0; cache_start = 0; end = 0; v = 0xffffffff; while (!end) { /*refill cache*/ if (bpos == (u32)load_size) { if (!gf_bs_available(bs)) break; load_size = gf_bs_available(bs); if (load_size > AVC_CACHE_SIZE) load_size = AVC_CACHE_SIZE; bpos = 0; cache_start = gf_bs_get_position(bs); gf_bs_read_data(bs, avc_cache, (u32)load_size); } v = ( (v<<8) & 0xFFFFFF00) | ((u32) avc_cache[bpos]); bpos++; if (locate_trailing) { if ((v & 0x000000FF) == 0) nb_cons_zeros++; else nb_cons_zeros = 0; } if (v == 0x00000001) end = cache_start + bpos - 4; else if ((v & 0x00FFFFFF) == 0x00000001) end = cache_start + bpos - 3; } gf_bs_seek(bs, start); if (!end) end = gf_bs_get_size(bs); if (locate_trailing) { if (nb_cons_zeros >= 3) return (u32)(end - start - nb_cons_zeros); } return (u32)(end - start); } GF_EXPORT u32 gf_media_nalu_next_start_code_bs(GF_BitStream *bs) { return gf_media_nalu_locate_start_code_bs(bs, 0); } GF_EXPORT u32 gf_media_nalu_next_start_code(const u8 *data, u32 data_len, u32 *sc_size) { u32 avail = data_len; const u8 *cur = data; while (cur) { u32 v, bpos; u8 *next_zero = memchr(cur, 0, avail); if (!next_zero) return data_len; v = 0xffffff00; bpos = (u32)(next_zero - data) + 1; while (1) { u8 cval; if (bpos == (u32)data_len) return data_len; cval = data[bpos]; v = ((v << 8) & 0xFFFFFF00) | ((u32)cval); bpos++; if (v == 0x00000001) { *sc_size = 4; return bpos - 4; } else if ((v & 0x00FFFFFF) == 0x00000001) { *sc_size = 3; return bpos - 3; } if (cval) break; } if (bpos >= data_len) break; cur = data + bpos; avail = data_len - bpos; } return data_len; } Bool gf_media_avc_slice_is_intra(AVCState *avc) { switch (avc->s_info.slice_type) { case GF_AVC_TYPE_I: case GF_AVC_TYPE2_I: case GF_AVC_TYPE_SI: case GF_AVC_TYPE2_SI: return 1; default: return 0; } } #if 0 //unused Bool gf_media_avc_slice_is_IDR(AVCState *avc) { if (avc->sei.recovery_point.valid) { avc->sei.recovery_point.valid = 0; return 1; } if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) return 0; return gf_media_avc_slice_is_intra(avc); } #endif static const struct { u32 w, h; } avc_hevc_sar[] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, { 160,99 }, { 4, 3 }, { 3, 2 }, { 2, 1 } }; /*ISO 14496-10 (N11084) E.1.2*/ static void avc_parse_hrd_parameters(GF_BitStream *bs, AVC_HRD *hrd) { int i, cpb_cnt_minus1; cpb_cnt_minus1 = gf_bs_read_ue_log(bs, "cpb_cnt_minus1"); if (cpb_cnt_minus1 > 31) GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] invalid cpb_cnt_minus1 value: %d (expected in [0;31])\n", cpb_cnt_minus1)); gf_bs_read_int_log(bs, 4, "bit_rate_scale"); gf_bs_read_int_log(bs, 4, "cpb_size_scale"); /*for( SchedSelIdx = 0; SchedSelIdx <= cpb_cnt_minus1; SchedSelIdx++ ) {*/ for (i = 0; i <= cpb_cnt_minus1; i++) { gf_bs_read_ue_log_idx(bs, "bit_rate_value_minus1", i); gf_bs_read_ue_log_idx(bs, "cpb_size_value_minus1", i); gf_bs_read_int_log_idx(bs, 1, "cbr_flag", i); } gf_bs_read_int_log(bs, 5, "initial_cpb_removal_delay_length_minus1"); hrd->cpb_removal_delay_length_minus1 = gf_bs_read_int_log(bs, 5, "cpb_removal_delay_length_minus1"); hrd->dpb_output_delay_length_minus1 = gf_bs_read_int_log(bs, 5, "dpb_output_delay_length_minus1"); hrd->time_offset_length = gf_bs_read_int_log(bs, 5, "time_offset_length"); return; } /*returns the nal_size without emulation prevention bytes*/ u32 gf_media_nalu_emulation_bytes_add_count(u8 *buffer, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: \96 0x00000300 \96 0x00000301 \96 0x00000302 \96 0x00000303" */ if (num_zero == 2 && (u8)buffer[i] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; if (!buffer[i]) num_zero = 1; } else { if (!buffer[i]) num_zero++; else num_zero = 0; } i++; } return emulation_bytes_count; } u32 gf_media_nalu_add_emulation_bytes(const u8 *buffer_src, u8 *buffer_dst, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: 0x00000300 0x00000301 0x00000302 0x00000303" */ if (num_zero == 2 && (u8)buffer_src[i] < 0x04) { /*add emulation code*/ num_zero = 0; buffer_dst[i + emulation_bytes_count] = 0x03; emulation_bytes_count++; if (!buffer_src[i]) num_zero = 1; } else { if (!buffer_src[i]) num_zero++; else num_zero = 0; } buffer_dst[i + emulation_bytes_count] = buffer_src[i]; i++; } return nal_size + emulation_bytes_count; } /*returns the nal_size without emulation prevention bytes*/ u32 gf_media_nalu_emulation_bytes_remove_count(const u8 *buffer, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; if (!buffer || !nal_size) return 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: \96 0x00000300 \96 0x00000301 \96 0x00000302 \96 0x00000303" */ if (num_zero == 2 && buffer[i] == 0x03 && i + 1 < nal_size /*next byte is readable*/ && (u8)buffer[i + 1] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; i++; } if (!buffer[i]) num_zero++; else num_zero = 0; i++; } return emulation_bytes_count; } /*nal_size is updated to allow better error detection*/ GF_EXPORT u32 gf_media_nalu_remove_emulation_bytes(const u8 *buffer_src, u8 *buffer_dst, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: 0x00000300 0x00000301 0x00000302 0x00000303" */ if (num_zero == 2 && buffer_src[i] == 0x03 && i + 1 < nal_size /*next byte is readable*/ && (u8)buffer_src[i + 1] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; i++; } buffer_dst[i - emulation_bytes_count] = buffer_src[i]; if (!buffer_src[i]) num_zero++; else num_zero = 0; i++; } return nal_size - emulation_bytes_count; } static s32 gf_avc_read_sps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos, u32 nal_hdr) { AVC_SPS *sps; s32 mb_width, mb_height, sps_id = -1; u32 profile_idc, level_idc, pcomp, i, chroma_format_idc, cl = 0, cr = 0, ct = 0, cb = 0, luma_bd, chroma_bd; u8 separate_colour_plane_flag = 0; if (!vui_flag_pos) { gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); } if (!bs) { return -1; } if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } profile_idc = gf_bs_read_int_log(bs, 8, "profile_idc"); pcomp = gf_bs_read_int_log(bs, 8, "profile_compatibility"); /*sanity checks*/ if (pcomp & 0x3) return -1; level_idc = gf_bs_read_int_log(bs, 8, "level_idc"); /*SubsetSps is used to be sure that AVC SPS are not going to be scratched by subset SPS. According to the SVC standard, subset SPS can have the same sps_id than its base layer, but it does not refer to the same SPS. */ sps_id = gf_bs_read_ue_log(bs, "sps_id") + GF_SVC_SSPS_ID_SHIFT * subseq_sps; if (sps_id >= 32) { return -1; } if (sps_id < 0) { return -1; } luma_bd = chroma_bd = 0; sps = &avc->sps[sps_id]; chroma_format_idc = sps->ChromaArrayType = 1; sps->state |= subseq_sps ? AVC_SUBSPS_PARSED : AVC_SPS_PARSED; /*High Profile and SVC*/ switch (profile_idc) { case 100: case 110: case 122: case 244: case 44: /*sanity checks: note1 from 7.4.2.1.1 of iso/iec 14496-10-N11084*/ if (pcomp & 0xE0) return -1; case 83: case 86: case 118: case 128: chroma_format_idc = gf_bs_read_ue_log(bs, "chroma_format_idc"); sps->ChromaArrayType = chroma_format_idc; if (chroma_format_idc == 3) { separate_colour_plane_flag = gf_bs_read_int_log(bs, 1, "separate_colour_plane_flag"); /* Depending on the value of separate_colour_plane_flag, the value of the variable ChromaArrayType is assigned as follows. \96 If separate_colour_plane_flag is equal to 0, ChromaArrayType is set equal to chroma_format_idc. \96 Otherwise (separate_colour_plane_flag is equal to 1), ChromaArrayType is set equal to 0. */ if (separate_colour_plane_flag) sps->ChromaArrayType = 0; } luma_bd = gf_bs_read_ue_log(bs, "luma_bit_depth"); chroma_bd = gf_bs_read_ue_log(bs, "chroma_bit_depth"); /*qpprime_y_zero_transform_bypass_flag = */ gf_bs_read_int_log(bs, 1, "qpprime_y_zero_transform_bypass_flag"); /*seq_scaling_matrix_present_flag*/ if (gf_bs_read_int_log(bs, 1, "seq_scaling_matrix_present_flag")) { u32 k; for (k = 0; k < 8; k++) { if (gf_bs_read_int_log_idx(bs, 1, "seq_scaling_list_present_flag", k)) { u32 z, last = 8, next = 8; u32 sl = k < 6 ? 16 : 64; for (z = 0; z < sl; z++) { if (next) { s32 delta = gf_bs_read_se(bs); next = (last + delta + 256) % 256; } last = next ? next : last; } } } } break; } sps->profile_idc = profile_idc; sps->level_idc = level_idc; sps->prof_compat = pcomp; sps->log2_max_frame_num = gf_bs_read_ue_log(bs, "log2_max_frame_num") + 4; sps->poc_type = gf_bs_read_ue_log(bs, "poc_type"); sps->chroma_format = chroma_format_idc; sps->luma_bit_depth_m8 = luma_bd; sps->chroma_bit_depth_m8 = chroma_bd; if (sps->poc_type == 0) { sps->log2_max_poc_lsb = gf_bs_read_ue_log(bs, "log2_max_poc_lsb") + 4; } else if (sps->poc_type == 1) { sps->delta_pic_order_always_zero_flag = gf_bs_read_int_log(bs, 1, "delta_pic_order_always_zero_flag"); sps->offset_for_non_ref_pic = gf_bs_read_se_log(bs, "offset_for_non_ref_pic"); sps->offset_for_top_to_bottom_field = gf_bs_read_se_log(bs, "offset_for_top_to_bottom_field"); sps->poc_cycle_length = gf_bs_read_ue_log(bs, "poc_cycle_length"); if (sps->poc_cycle_length > GF_ARRAY_LENGTH(sps->offset_for_ref_frame)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] offset_for_ref_frame overflow from poc_cycle_length\n")); return -1; } for (i = 0; i < sps->poc_cycle_length; i++) sps->offset_for_ref_frame[i] = gf_bs_read_se_log_idx(bs, "offset_for_ref_frame", i); } if (sps->poc_type > 2) { return -1; } sps->max_num_ref_frames = gf_bs_read_ue_log(bs, "max_num_ref_frames"); sps->gaps_in_frame_num_value_allowed_flag = gf_bs_read_int_log(bs, 1, "gaps_in_frame_num_value_allowed_flag"); mb_width = gf_bs_read_ue_log(bs, "pic_width_in_mbs_minus1") + 1; mb_height = gf_bs_read_ue_log(bs, "pic_height_in_map_units_minus1") + 1; sps->frame_mbs_only_flag = gf_bs_read_int_log(bs, 1, "frame_mbs_only_flag"); sps->width = mb_width * 16; sps->height = (2 - sps->frame_mbs_only_flag) * mb_height * 16; if (!sps->frame_mbs_only_flag) sps->mb_adaptive_frame_field_flag = gf_bs_read_int_log(bs, 1, "mb_adaptive_frame_field_flag"); gf_bs_read_int_log(bs, 1, "direct_8x8_inference_flag"); if (gf_bs_read_int_log(bs, 1, "frame_cropping_flag")) { int CropUnitX, CropUnitY, SubWidthC = -1, SubHeightC = -1; if (chroma_format_idc == 1) { SubWidthC = 2; SubHeightC = 2; } else if (chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else if ((chroma_format_idc == 3) && (separate_colour_plane_flag == 0)) { SubWidthC = 1; SubHeightC = 1; } if (sps->ChromaArrayType == 0) { assert(SubWidthC == -1); CropUnitX = 1; CropUnitY = 2 - sps->frame_mbs_only_flag; } else { CropUnitX = SubWidthC; CropUnitY = SubHeightC * (2 - sps->frame_mbs_only_flag); } cl = gf_bs_read_ue_log(bs, "frame_crop_left_offset"); cr = gf_bs_read_ue_log(bs, "frame_crop_right_offset"); ct = gf_bs_read_ue_log(bs, "frame_crop_top_offset"); cb = gf_bs_read_ue_log(bs, "frame_crop_bottom_offset"); sps->width -= CropUnitX * (cl + cr); sps->height -= CropUnitY * (ct + cb); cl *= CropUnitX; cr *= CropUnitX; ct *= CropUnitY; cb *= CropUnitY; } sps->crop.left = cl; sps->crop.right = cr; sps->crop.top = ct; sps->crop.bottom = cb; if (vui_flag_pos) { *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); } /*vui_parameters_present_flag*/ sps->vui_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_parameters_present_flag"); if (sps->vui_parameters_present_flag) { sps->vui.aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "aspect_ratio_info_present_flag"); if (sps->vui.aspect_ratio_info_present_flag) { s32 aspect_ratio_idc = gf_bs_read_int_log(bs, 8, "aspect_ratio_idc"); if (aspect_ratio_idc == 255) { sps->vui.par_num = gf_bs_read_int_log(bs, 16, "aspect_ratio_num"); sps->vui.par_den = gf_bs_read_int_log(bs, 16, "aspect_ratio_den"); } else if (aspect_ratio_idc < GF_ARRAY_LENGTH(avc_hevc_sar) ) { sps->vui.par_num = avc_hevc_sar[aspect_ratio_idc].w; sps->vui.par_den = avc_hevc_sar[aspect_ratio_idc].h; } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] Unknown aspect_ratio_idc: your video may have a wrong aspect ratio. Contact the GPAC team!\n")); } } sps->vui.overscan_info_present_flag = gf_bs_read_int_log(bs, 1, "overscan_info_present_flag"); if (sps->vui.overscan_info_present_flag) gf_bs_read_int_log(bs, 1, "overscan_appropriate_flag"); /* default values */ sps->vui.video_format = 5; sps->vui.colour_primaries = 2; sps->vui.transfer_characteristics = 2; sps->vui.matrix_coefficients = 2; /* now read values if possible */ sps->vui.video_signal_type_present_flag = gf_bs_read_int_log(bs, 1, "video_signal_type_present_flag"); if (sps->vui.video_signal_type_present_flag) { sps->vui.video_format = gf_bs_read_int_log(bs, 3, "video_format"); sps->vui.video_full_range_flag = gf_bs_read_int_log(bs, 1, "video_full_range_flag"); sps->vui.colour_description_present_flag = gf_bs_read_int_log(bs, 1, "colour_description_present_flag"); if (sps->vui.colour_description_present_flag) { sps->vui.colour_primaries = gf_bs_read_int_log(bs, 8, "colour_primaries"); sps->vui.transfer_characteristics = gf_bs_read_int_log(bs, 8, "transfer_characteristics"); sps->vui.matrix_coefficients = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } } if (gf_bs_read_int_log(bs, 1, "chroma_location_info_present_flag")) { gf_bs_read_ue_log(bs, "chroma_sample_location_type_top_field"); gf_bs_read_ue_log(bs, "chroma_sample_location_type_bottom_field"); } sps->vui.timing_info_present_flag = gf_bs_read_int_log(bs, 1, "timing_info_present_flag"); if (sps->vui.timing_info_present_flag) { sps->vui.num_units_in_tick = gf_bs_read_int_log(bs, 32, "num_units_in_tick"); sps->vui.time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); sps->vui.fixed_frame_rate_flag = gf_bs_read_int_log(bs, 1, "fixed_frame_rate_flag"); } sps->vui.nal_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "nal_hrd_parameters_present_flag"); if (sps->vui.nal_hrd_parameters_present_flag) avc_parse_hrd_parameters(bs, &sps->vui.hrd); sps->vui.vcl_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vcl_hrd_parameters_present_flag"); if (sps->vui.vcl_hrd_parameters_present_flag) avc_parse_hrd_parameters(bs, &sps->vui.hrd); if (sps->vui.nal_hrd_parameters_present_flag || sps->vui.vcl_hrd_parameters_present_flag) sps->vui.low_delay_hrd_flag = gf_bs_read_int_log(bs, 1, "low_delay_hrd_flag"); sps->vui.pic_struct_present_flag = gf_bs_read_int_log(bs, 1, "pic_struct_present_flag"); } /*end of seq_parameter_set_data*/ if (subseq_sps) { if ((profile_idc == 83) || (profile_idc == 86)) { u8 extended_spatial_scalability_idc; /*parsing seq_parameter_set_svc_extension*/ gf_bs_read_int_log(bs, 1, "inter_layer_deblocking_filter_control_present_flag"); extended_spatial_scalability_idc = gf_bs_read_int_log(bs, 2, "extended_spatial_scalability_idc"); if (sps->ChromaArrayType == 1 || sps->ChromaArrayType == 2) { gf_bs_read_int_log(bs, 1, "chroma_phase_x_plus1_flag"); } if (sps->ChromaArrayType == 1) { gf_bs_read_int_log(bs, 2, "chroma_phase_y_plus1"); } if (extended_spatial_scalability_idc == 1) { if (sps->ChromaArrayType > 0) { gf_bs_read_int_log(bs, 1, "seq_ref_layer_chroma_phase_x_plus1_flag"); gf_bs_read_int_log(bs, 2, "seq_ref_layer_chroma_phase_y_plus1"); } gf_bs_read_se_log(bs, "seq_scaled_ref_layer_left_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_top_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_right_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_bottom_offset"); } if (gf_bs_read_int_log(bs, 1, "seq_tcoeff_level_prediction_flag")) { gf_bs_read_int_log(bs, 1, "adaptive_tcoeff_level_prediction_flag"); } gf_bs_read_int_log(bs, 1, "slice_header_restriction_flag"); if (gf_bs_read_int_log(bs, 1, "svc_vui_parameters_present")) { u32 vui_ext_num_entries_minus1 = gf_bs_read_ue_log(bs, "vui_ext_num_entries_minus1"); for (i = 0; i <= vui_ext_num_entries_minus1; i++) { u8 vui_ext_nal_hrd_parameters_present_flag, vui_ext_vcl_hrd_parameters_present_flag, vui_ext_timing_info_present_flag; gf_bs_read_int_log(bs, 3, "vui_ext_dependency_id"); gf_bs_read_int_log(bs, 4, "vui_ext_quality_id"); gf_bs_read_int_log(bs, 3, "vui_ext_temporal_id"); vui_ext_timing_info_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_timing_info_present_flag"); if (vui_ext_timing_info_present_flag) { gf_bs_read_int_log(bs, 32, "vui_ext_num_units_in_tick"); gf_bs_read_int_log(bs, 32, "vui_ext_time_scale"); gf_bs_read_int_log(bs, 1, "vui_ext_fixed_frame_rate_flag"); } vui_ext_nal_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_nal_hrd_parameters_present_flag"); if (vui_ext_nal_hrd_parameters_present_flag) { //hrd_parameters( ) } vui_ext_vcl_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_vcl_hrd_parameters_present_flag"); if (vui_ext_vcl_hrd_parameters_present_flag) { //hrd_parameters( ) } if (vui_ext_nal_hrd_parameters_present_flag || vui_ext_vcl_hrd_parameters_present_flag) { gf_bs_read_int_log(bs, 1, "vui_ext_low_delay_hrd_flag"); } gf_bs_read_int_log(bs, 1, "vui_ext_pic_struct_present_flag"); } } } else if ((profile_idc == 118) || (profile_idc == 128)) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[avc-h264] MVC parsing not implemented - skipping parsing end of Subset SPS\n")); return sps_id; } if (gf_bs_read_int_log(bs, 1, "additional_extension2")) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] skipping parsing end of Subset SPS (additional_extension2)\n")); return sps_id; } } return sps_id; } GF_EXPORT s32 gf_avc_read_sps_bs(GF_BitStream *bs, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos) { return gf_avc_read_sps_bs_internal(bs, avc, subseq_sps, vui_flag_pos, 0); } GF_EXPORT s32 gf_avc_read_sps(const u8 *sps_data, u32 sps_size, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos) { s32 sps_id = -1; GF_BitStream *bs; char *sps_data_without_emulation_bytes = NULL; u32 sps_data_without_emulation_bytes_size = 0; if (vui_flag_pos) { /*SPS still contains emulation bytes*/ sps_data_without_emulation_bytes = gf_malloc(sps_size * sizeof(char)); sps_data_without_emulation_bytes_size = gf_media_nalu_remove_emulation_bytes(sps_data, sps_data_without_emulation_bytes, sps_size); bs = gf_bs_new(sps_data_without_emulation_bytes, sps_data_without_emulation_bytes_size, GF_BITSTREAM_READ); *vui_flag_pos = 0; } else { bs = gf_bs_new(sps_data, sps_size, GF_BITSTREAM_READ); } if (!bs) { sps_id = -1; goto exit; } sps_id = gf_avc_read_sps_bs(bs, avc, subseq_sps, vui_flag_pos); exit: gf_bs_del(bs); if (sps_data_without_emulation_bytes) gf_free(sps_data_without_emulation_bytes); return sps_id; } static s32 gf_avc_read_pps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 nal_hdr) { s32 pps_id; AVC_PPS *pps; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id >= 255) { return -1; } pps = &avc->pps[pps_id]; pps->id = pps_id; if (!pps->status) pps->status = 1; pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if (pps->sps_id >= 32) { pps->sps_id = 0; return -1; } /*sps_id may be refer to regular SPS or subseq sps, depending on the coded slice referring to the pps*/ if (!avc->sps[pps->sps_id].state && !avc->sps[pps->sps_id + GF_SVC_SSPS_ID_SHIFT].state) { return -1; } avc->pps_active_idx = pps->id; /*set active sps*/ avc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->entropy_coding_mode_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_mode_flag"); pps->pic_order_present = gf_bs_read_int_log(bs, 1, "pic_order_present"); pps->slice_group_count = gf_bs_read_ue_log(bs, "slice_group_count_minus1") + 1; if (pps->slice_group_count > 1) { u32 iGroup; pps->mb_slice_group_map_type = gf_bs_read_ue_log(bs, "mb_slice_group_map_type"); if (pps->mb_slice_group_map_type == 0) { for (iGroup = 0; iGroup <= pps->slice_group_count - 1; iGroup++) gf_bs_read_ue_log_idx(bs, "run_length_minus1", iGroup); } else if (pps->mb_slice_group_map_type == 2) { for (iGroup = 0; iGroup < pps->slice_group_count - 1; iGroup++) { gf_bs_read_ue_log_idx(bs, "top_left", iGroup); gf_bs_read_ue_log_idx(bs, "bottom_right", iGroup); } } else if (pps->mb_slice_group_map_type == 3 || pps->mb_slice_group_map_type == 4 || pps->mb_slice_group_map_type == 5) { gf_bs_read_int_log(bs, 1, "slice_group_change_direction_flag"); gf_bs_read_ue_log(bs, "slice_group_change_rate_minus1"); } else if (pps->mb_slice_group_map_type == 6) { u32 i; pps->pic_size_in_map_units_minus1 = gf_bs_read_ue_log(bs, "pic_size_in_map_units_minus1"); for (i = 0; i <= pps->pic_size_in_map_units_minus1; i++) { gf_bs_read_int_log_idx(bs, (u32)ceil(log(pps->slice_group_count) / log(2)), "slice_group_id", i); } } } pps->num_ref_idx_l0_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active_minus1"); pps->num_ref_idx_l1_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active_minus1"); /* if ((pps->ref_count[0] > 32) || (pps->ref_count[1] > 32)) goto exit; */ pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); gf_bs_read_int_log(bs, 2, "weighted_bipred_idc"); gf_bs_read_se_log(bs, "init_qp_minus26"); gf_bs_read_se_log(bs, "init_qs_minus26"); gf_bs_read_se_log(bs, "chroma_qp_index_offset"); pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"); gf_bs_read_int_log(bs, 1, "constrained_intra_pred"); pps->redundant_pic_cnt_present = gf_bs_read_int_log(bs, 1, "redundant_pic_cnt_present"); return pps_id; } GF_EXPORT s32 gf_avc_read_pps_bs(GF_BitStream *bs, AVCState *avc) { return gf_avc_read_pps_bs_internal(bs, avc, 0); } GF_EXPORT s32 gf_avc_read_pps(const u8 *pps_data, u32 pps_size, AVCState *avc) { GF_BitStream *bs; s32 pps_id; /*PPS still contains emulation bytes*/ bs = gf_bs_new(pps_data, pps_size, GF_BITSTREAM_READ); if (!bs) { return -1; } gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); pps_id = gf_avc_read_pps_bs(bs, avc); gf_bs_del(bs); return pps_id; } #if 0 //unused s32 gf_avc_read_sps_ext(const char *spse_data, u32 spse_size) { GF_BitStream *bs; s32 sps_id; bs = gf_bs_new(spse_data, spse_size, GF_BITSTREAM_READ); sps_id = gf_avc_read_sps_ext_bs(bs); gf_bs_del(bs); return sps_id; } #endif static s32 SVC_ReadNal_header_extension(GF_BitStream *bs, SVC_NALUHeader *NalHeader) { gf_bs_read_int_log(bs, 1, "reserved_one_bit"); NalHeader->idr_pic_flag = gf_bs_read_int_log(bs, 1, "idr_flag"); NalHeader->priority_id = gf_bs_read_int_log(bs, 6, "priority_id"); gf_bs_read_int_log(bs, 1, "no_inter_layer_pred_flag"); NalHeader->dependency_id = gf_bs_read_int_log(bs, 3, "DependencyId"); NalHeader->quality_id = gf_bs_read_int_log(bs, 4, "quality_id"); NalHeader->temporal_id = gf_bs_read_int_log(bs, 3, "temporal_id"); gf_bs_read_int_log(bs, 1, "use_ref_base_pic_flag"); gf_bs_read_int_log(bs, 1, "discardable_flag"); gf_bs_read_int_log(bs, 1, "output_flag"); gf_bs_read_int_log(bs, 2, "reserved_three_2bits"); return 1; } static void ref_pic_list_modification(GF_BitStream *bs, u32 slice_type) { if (slice_type % 5 != 2 && slice_type % 5 != 4) { if (gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l0")) { u32 idx=0, modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = gf_bs_read_ue_log_idx(bs, "modification_of_pic_nums_idc", idx); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { gf_bs_read_ue_log_idx(bs, "abs_diff_pic_num_minus1", idx); } else if (modification_of_pic_nums_idc == 2) { gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); } idx++; } while ((modification_of_pic_nums_idc != 3) && gf_bs_available(bs)); } } if (slice_type % 5 == 1) { if (gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l1")) { u32 idx=0, modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = gf_bs_read_ue_log_idx(bs, "modification_of_pic_nums_idc", idx); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { gf_bs_read_ue_log_idx(bs, "abs_diff_pic_num_minus1", idx); } else if (modification_of_pic_nums_idc == 2) { gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); } idx++; } while ((modification_of_pic_nums_idc != 3) && gf_bs_available(bs)); } } } static void pred_weight_table(GF_BitStream *bs, u32 slice_type, u32 ChromaArrayType, u32 num_ref_idx_l0_active_minus1, u32 num_ref_idx_l1_active_minus1) { u32 i, j; gf_bs_read_ue_log(bs, "luma_log2_weight_denom"); if (ChromaArrayType != 0) { gf_bs_read_ue_log(bs, "chroma_log2_weight_denom"); } for (i = 0; i <= num_ref_idx_l0_active_minus1; i++) { if (gf_bs_read_int_log_idx(bs, 1, "luma_weight_l0_flag", i)) { gf_bs_read_se_log_idx(bs, "luma_weight_l0", i); gf_bs_read_se_log_idx(bs, "luma_offset_l0", i); } if (ChromaArrayType != 0) { if (gf_bs_read_int_log_idx(bs, 1, "chroma_weight_l0_flag", i)) for (j = 0; j < 2; j++) { gf_bs_read_se_log_idx2(bs, "chroma_weight_l0", i, j); gf_bs_read_se_log_idx2(bs, "chroma_offset_l0", i, j); } } } if (slice_type % 5 == 1) { for (i = 0; i <= num_ref_idx_l1_active_minus1; i++) { if (gf_bs_read_int_log_idx(bs, 1, "luma_weight_l1_flag", i)) { gf_bs_read_se_log_idx(bs, "luma_weight_l1", i); gf_bs_read_se_log_idx(bs, "luma_offset_l1", i); } if (ChromaArrayType != 0) { if (gf_bs_read_int_log_idx(bs, 1, "chroma_weight_l1_flag", i)) { for (j = 0; j < 2; j++) { gf_bs_read_se_log_idx2(bs, "chroma_weight_l1", i, j); gf_bs_read_se_log_idx2(bs, "chroma_offset_l1", i, j); } } } } } } static void dec_ref_pic_marking(GF_BitStream *bs, Bool IdrPicFlag) { if (IdrPicFlag) { gf_bs_read_int_log(bs, 1, "no_output_of_prior_pics_flag"); gf_bs_read_int_log(bs, 1, "long_term_reference_flag"); } else { if (gf_bs_read_int_log(bs, 1, "adaptive_ref_pic_marking_mode_flag")) { u32 idx=0, memory_management_control_operation; do { memory_management_control_operation = gf_bs_read_ue_log_idx(bs, "memory_management_control_operation", idx); if (memory_management_control_operation == 1 || memory_management_control_operation == 3) gf_bs_read_ue_log_idx(bs, "difference_of_pic_nums_minus1", idx); if (memory_management_control_operation == 2) gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); if (memory_management_control_operation == 3 || memory_management_control_operation == 6) gf_bs_read_ue_log_idx(bs, "long_term_frame_idx", idx); if (memory_management_control_operation == 4) gf_bs_read_ue_log_idx(bs, "max_long_term_frame_idx_plus1", idx); idx++; } while (memory_management_control_operation != 0); } } } static s32 avc_parse_slice(GF_BitStream *bs, AVCState *avc, Bool svc_idr_flag, AVCSliceInfo *si) { s32 pps_id, num_ref_idx_l0_active_minus1 = 0, num_ref_idx_l1_active_minus1 = 0; /*s->current_picture.reference= h->nal_ref_idc != 0;*/ gf_bs_read_ue_log(bs, "first_mb_in_slice"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (si->slice_type > 9) return -1; pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id > 255) return -1; si->pps = &avc->pps[pps_id]; if (!si->pps->slice_group_count) return -2; si->sps = &avc->sps[si->pps->sps_id]; if (!si->sps->log2_max_frame_num) return -2; avc->sps_active_idx = si->pps->sps_id; avc->pps_active_idx = pps_id; si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num"); si->field_pic_flag = 0; si->bottom_field_flag = 0; if (!si->sps->frame_mbs_only_flag) { si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag"); if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag"); } if ((si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) || svc_idr_flag) si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id"); if (si->sps->poc_type == 0) { si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); if (si->pps->pic_order_present && !si->field_pic_flag) { si->delta_poc_bottom = gf_bs_read_se_log(bs, "poc_lsb"); } } else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) { si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0"); if ((si->pps->pic_order_present == 1) && !si->field_pic_flag) si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1"); } if (si->pps->redundant_pic_cnt_present) { si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt"); } if (si->slice_type % 5 == GF_AVC_TYPE_B) { gf_bs_read_int_log(bs, 1, "direct_spatial_mv_pred_flag"); } num_ref_idx_l0_active_minus1 = si->pps->num_ref_idx_l0_default_active_minus1; num_ref_idx_l1_active_minus1 = si->pps->num_ref_idx_l1_default_active_minus1; if (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_B) { Bool num_ref_idx_active_override_flag = gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag"); if (num_ref_idx_active_override_flag) { num_ref_idx_l0_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_active_minus1"); if (si->slice_type % 5 == GF_AVC_TYPE_B) { num_ref_idx_l1_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_active_minus1"); } } } if (si->nal_unit_type == 20 || si->nal_unit_type == 21) { //ref_pic_list_mvc_modification(); /* specified in Annex H */ GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] unimplemented ref_pic_list_mvc_modification() in slide header\n")); assert(0); return -1; } else { ref_pic_list_modification(bs, si->slice_type); } if ((si->pps->weighted_pred_flag && (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP)) || (si->pps->weighted_bipred_idc == 1 && si->slice_type % 5 == GF_AVC_TYPE_B)) { pred_weight_table(bs, si->slice_type, si->sps->ChromaArrayType, num_ref_idx_l0_active_minus1, num_ref_idx_l1_active_minus1); } if (si->nal_ref_idc != 0) { dec_ref_pic_marking(bs, (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE)); } if (si->pps->entropy_coding_mode_flag && si->slice_type % 5 != GF_AVC_TYPE_I && si->slice_type % 5 != GF_AVC_TYPE_SI) { gf_bs_read_ue_log(bs, "cabac_init_idc"); } /*slice_qp_delta = */gf_bs_read_se(bs); if (si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_SI) { if (si->slice_type % 5 == GF_AVC_TYPE_SP) { gf_bs_read_int_log(bs, 1, "sp_for_switch_flag"); } gf_bs_read_se_log(bs, "slice_qs_delta"); } if (si->pps->deblocking_filter_control_present_flag) { if (gf_bs_read_ue_log(bs, "disable_deblocking_filter_idc") != 1) { gf_bs_read_se_log(bs, "slice_alpha_c0_offset_div2"); gf_bs_read_se_log(bs, "slice_beta_offset_div2"); } } if (si->pps->slice_group_count > 1 && si->pps->mb_slice_group_map_type >= 3 && si->pps->mb_slice_group_map_type <= 5) { gf_bs_read_int_log(bs, (u32)ceil(log1p((si->pps->pic_size_in_map_units_minus1 + 1) / (si->pps->slice_group_change_rate_minus1 + 1) ) / log(2)), "slice_group_change_cycle"); } return 0; } static s32 svc_parse_slice(GF_BitStream *bs, AVCState *avc, AVCSliceInfo *si) { s32 pps_id; /*s->current_picture.reference= h->nal_ref_idc != 0;*/ gf_bs_read_ue_log(bs, "first_mb_in_slice"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (si->slice_type > 9) return -1; pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id > 255) return -1; si->pps = &avc->pps[pps_id]; si->pps->id = pps_id; if (!si->pps->slice_group_count) return -2; si->sps = &avc->sps[si->pps->sps_id + GF_SVC_SSPS_ID_SHIFT]; if (!si->sps->log2_max_frame_num) return -2; si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num"); si->field_pic_flag = 0; if (si->sps->frame_mbs_only_flag) { /*s->picture_structure= PICT_FRAME;*/ } else { si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag"); if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag"); } if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE || si->NalHeader.idr_pic_flag) si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id"); if (si->sps->poc_type == 0) { si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); if (si->pps->pic_order_present && !si->field_pic_flag) { si->delta_poc_bottom = gf_bs_read_se_log(bs, "delta_poc_bottom"); } } else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) { si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0"); if ((si->pps->pic_order_present == 1) && !si->field_pic_flag) si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1"); } if (si->pps->redundant_pic_cnt_present) { si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt"); } return 0; } static s32 avc_parse_recovery_point_sei(GF_BitStream *bs, AVCState *avc) { AVCSeiRecoveryPoint *rp = &avc->sei.recovery_point; rp->frame_cnt = gf_bs_read_ue_log(bs, "frame_cnt"); rp->exact_match_flag = gf_bs_read_int_log(bs, 1, "exact_match_flag"); rp->broken_link_flag = gf_bs_read_int_log(bs, 1, "broken_link_flag"); rp->changing_slice_group_idc = gf_bs_read_int_log(bs, 2, "changing_slice_group_idc"); rp->valid = 1; return 0; } /*for interpretation see ISO 14496-10 N.11084, table D-1*/ static s32 avc_parse_pic_timing_sei(GF_BitStream *bs, AVCState *avc) { int sps_id = avc->sps_active_idx; const char NumClockTS[] = { 1, 1, 1, 2, 2, 3, 3, 2, 3 }; AVCSeiPicTiming *pt = &avc->sei.pic_timing; if (sps_id < 0) { /*sps_active_idx equals -1 when no sps has been detected. In this case SEI should not be decoded.*/ assert(0); return 1; } if (avc->sps[sps_id].vui.nal_hrd_parameters_present_flag || avc->sps[sps_id].vui.vcl_hrd_parameters_present_flag) { /*CpbDpbDelaysPresentFlag, see 14496-10(2003) E.11*/ gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.cpb_removal_delay_length_minus1, "cpb_removal_delay_minus1"); gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.dpb_output_delay_length_minus1, "dpb_output_delay_minus1"); } /*ISO 14496-10 (2003), D.8.2: we need to get pic_struct in order to know if we display top field first or bottom field first*/ if (avc->sps[sps_id].vui.pic_struct_present_flag) { int i; pt->pic_struct = gf_bs_read_int_log(bs, 4, "pic_struct"); if (pt->pic_struct > 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] invalid pic_struct value %d\n", pt->pic_struct)); return 1; } for (i = 0; i < NumClockTS[pt->pic_struct]; i++) { if (gf_bs_read_int_log_idx(bs, 1, "clock_timestamp_flag", i)) { Bool full_timestamp_flag; gf_bs_read_int_log_idx(bs, 2, "ct_type", i); gf_bs_read_int_log_idx(bs, 1, "nuit_field_based_flag", i); gf_bs_read_int_log_idx(bs, 5, "counting_type", i); full_timestamp_flag = gf_bs_read_int_log_idx(bs, 1, "full_timestamp_flag", i); gf_bs_read_int_log_idx(bs, 1, "discontinuity_flag", i); gf_bs_read_int_log_idx(bs, 1, "cnt_dropped_flag", i); gf_bs_read_int_log_idx(bs, 8, "n_frames", i); if (full_timestamp_flag) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } else { if (gf_bs_read_int_log_idx(bs, 1, "seconds_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); if (gf_bs_read_int_log_idx(bs, 1, "minutes_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); if (gf_bs_read_int_log_idx(bs, 1, "hours_flag", i)) { gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } } } if (avc->sps[sps_id].vui.hrd.time_offset_length > 0) gf_bs_read_int_log_idx(bs, avc->sps[sps_id].vui.hrd.time_offset_length, "time_offset", i); } } } } return 0; } #if !defined(GPAC_DISABLE_HEVC) static void avc_parse_itu_t_t35_sei(GF_BitStream* bs, AVCSeiItuTT35DolbyVision *dovi) { u8 itu_t_t35_country_code = gf_bs_read_u8(bs); u16 terminal_provider_code = gf_bs_read_u16(bs); u32 user_id = gf_bs_read_u32(bs); u8 data_type_code = gf_bs_read_u8(bs); if (itu_t_t35_country_code == 0xB5 && terminal_provider_code == 0x31 && user_id == 0x47413934 && (data_type_code == 0x8 || data_type_code == 0x9)) { dovi->rpu_flag = GF_TRUE; } } #endif static void avc_compute_poc(AVCSliceInfo *si) { enum { AVC_PIC_FRAME, AVC_PIC_FIELD_TOP, AVC_PIC_FIELD_BOTTOM, } pic_type; s32 field_poc[2] = { 0,0 }; s32 max_frame_num; if (!si->sps) return; max_frame_num = 1 << (si->sps->log2_max_frame_num); /* picture type */ if (si->sps->frame_mbs_only_flag || !si->field_pic_flag) pic_type = AVC_PIC_FRAME; else if (si->bottom_field_flag) pic_type = AVC_PIC_FIELD_BOTTOM; else pic_type = AVC_PIC_FIELD_TOP; /* frame_num_offset */ if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; si->frame_num_offset = 0; } else { if (si->frame_num < si->frame_num_prev) si->frame_num_offset = si->frame_num_offset_prev + max_frame_num; else si->frame_num_offset = si->frame_num_offset_prev; } /*ISO 14496-10 N.11084 8.2.1.1*/ if (si->sps->poc_type == 0) { const u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*ISO 14496-10 N.11084 eq (8-3)*/ if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; /*ISO 14496-10 N.11084 eq (8-4)*/ if (pic_type != AVC_PIC_FIELD_BOTTOM) field_poc[0] = si->poc_msb + si->poc_lsb; /*ISO 14496-10 N.11084 eq (8-5)*/ if (pic_type != AVC_PIC_FIELD_TOP) { if (!si->field_pic_flag) field_poc[1] = field_poc[0] + si->delta_poc_bottom; else field_poc[1] = si->poc_msb + si->poc_lsb; } } /*ISO 14496-10 N.11084 8.2.1.2*/ else if (si->sps->poc_type == 1) { u32 i; s32 abs_frame_num, expected_delta_per_poc_cycle, expected_poc; if (si->sps->poc_cycle_length) abs_frame_num = si->frame_num_offset + si->frame_num; else abs_frame_num = 0; if (!si->nal_ref_idc && (abs_frame_num > 0)) abs_frame_num--; expected_delta_per_poc_cycle = 0; for (i = 0; i < si->sps->poc_cycle_length; i++) expected_delta_per_poc_cycle += si->sps->offset_for_ref_frame[i]; if (abs_frame_num > 0) { const u32 poc_cycle_cnt = (abs_frame_num - 1) / si->sps->poc_cycle_length; const u32 frame_num_in_poc_cycle = (abs_frame_num - 1) % si->sps->poc_cycle_length; expected_poc = poc_cycle_cnt * expected_delta_per_poc_cycle; for (i = 0; i <= frame_num_in_poc_cycle; i++) expected_poc += si->sps->offset_for_ref_frame[i]; } else { expected_poc = 0; } if (!si->nal_ref_idc) expected_poc += si->sps->offset_for_non_ref_pic; field_poc[0] = expected_poc + si->delta_poc[0]; field_poc[1] = field_poc[0] + si->sps->offset_for_top_to_bottom_field; if (pic_type == AVC_PIC_FRAME) field_poc[1] += si->delta_poc[1]; } /*ISO 14496-10 N.11084 8.2.1.3*/ else if (si->sps->poc_type == 2) { int poc; if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) { poc = 0; } else { const int abs_frame_num = si->frame_num_offset + si->frame_num; poc = 2 * abs_frame_num; if (!si->nal_ref_idc) poc -= 1; } field_poc[0] = poc; field_poc[1] = poc; } /*ISO 14496-10 N.11084 eq (8-1)*/ if (pic_type == AVC_PIC_FRAME) si->poc = MIN(field_poc[0], field_poc[1]); else if (pic_type == AVC_PIC_FIELD_TOP) si->poc = field_poc[0]; else si->poc = field_poc[1]; } GF_EXPORT s32 gf_avc_parse_nalu(GF_BitStream *bs, AVCState *avc) { u8 idr_flag; s32 slice, ret; u32 nal_hdr; AVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nal_hdr = gf_bs_read_u8(bs); slice = 0; memcpy(&n_state, &avc->s_info, sizeof(AVCSliceInfo)); avc->last_nal_type_parsed = n_state.nal_unit_type = nal_hdr & 0x1F; n_state.nal_ref_idc = (nal_hdr >> 5) & 0x3; idr_flag = 0; switch (n_state.nal_unit_type) { case GF_AVC_NALU_ACCESS_UNIT: case GF_AVC_NALU_END_OF_SEQ: case GF_AVC_NALU_END_OF_STREAM: ret = 1; break; case GF_AVC_NALU_SVC_SLICE: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); // slice buffer - read the info and compare. /*ret = */svc_parse_slice(bs, avc, &n_state); if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } avc_compute_poc(&n_state); if (avc->s_info.poc != n_state.poc) { memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 1; } memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 0; case GF_AVC_NALU_SVC_PREFIX_NALU: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); return 0; case GF_AVC_NALU_IDR_SLICE: case GF_AVC_NALU_NON_IDR_SLICE: case GF_AVC_NALU_DP_A_SLICE: case GF_AVC_NALU_DP_B_SLICE: case GF_AVC_NALU_DP_C_SLICE: slice = 1; /* slice buffer - read the info and compare.*/ ret = avc_parse_slice(bs, avc, idr_flag, &n_state); if (ret < 0) return ret; ret = 0; if ( ((avc->s_info.nal_unit_type > GF_AVC_NALU_IDR_SLICE) || (avc->s_info.nal_unit_type < GF_AVC_NALU_NON_IDR_SLICE)) && (avc->s_info.nal_unit_type != GF_AVC_NALU_SVC_SLICE) ) { break; } if (avc->s_info.frame_num != n_state.frame_num) { ret = 1; break; } if (avc->s_info.field_pic_flag != n_state.field_pic_flag) { ret = 1; break; } if ((avc->s_info.nal_ref_idc != n_state.nal_ref_idc) && (!avc->s_info.nal_ref_idc || !n_state.nal_ref_idc)) { ret = 1; break; } assert(avc->s_info.sps); if (avc->s_info.sps->poc_type == n_state.sps->poc_type) { if (!avc->s_info.sps->poc_type) { if (!n_state.bottom_field_flag && (avc->s_info.poc_lsb != n_state.poc_lsb)) { ret = 1; break; } if (avc->s_info.delta_poc_bottom != n_state.delta_poc_bottom) { ret = 1; break; } } else if (avc->s_info.sps->poc_type == 1) { if (avc->s_info.delta_poc[0] != n_state.delta_poc[0]) { ret = 1; break; } if (avc->s_info.delta_poc[1] != n_state.delta_poc[1]) { ret = 1; break; } } } if (n_state.nal_unit_type == GF_AVC_NALU_IDR_SLICE) { if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) { /*IdrPicFlag differs in value*/ ret = 1; break; } else if (avc->s_info.idr_pic_id != n_state.idr_pic_id) { /*both IDR and idr_pic_id differs*/ ret = 1; break; } } break; case GF_AVC_NALU_SEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 0, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_PIC_PARAM: avc->last_ps_idx = gf_avc_read_pps_bs_internal(bs, avc, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SVC_SUBSEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 1, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEQ_PARAM_EXT: avc->last_ps_idx = (s32) gf_bs_read_ue(bs); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEI: case GF_AVC_NALU_FILLER_DATA: return 0; default: if (avc->s_info.nal_unit_type <= GF_AVC_NALU_IDR_SLICE) ret = 1; //To detect change of AU when multiple sps and pps in stream else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEI && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEQ_PARAM && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else ret = 0; break; } /* save _prev values */ if (ret && avc->s_info.sps) { n_state.frame_num_offset_prev = avc->s_info.frame_num_offset; if ((avc->s_info.sps->poc_type != 2) || (avc->s_info.nal_ref_idc != 0)) n_state.frame_num_prev = avc->s_info.frame_num; if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } } if (slice) avc_compute_poc(&n_state); memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return ret; } u32 gf_media_avc_reformat_sei(u8 *buffer, u32 nal_size, Bool isobmf_rewrite, AVCState *avc) { u32 ptype, psize, hdr, var; u32 start; GF_BitStream *bs; GF_BitStream *bs_dest = NULL; u8 nhdr; Bool sei_removed = GF_FALSE; char store; hdr = buffer[0]; if ((hdr & 0x1F) != GF_AVC_NALU_SEI) return 0; if (isobmf_rewrite) bs_dest = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nhdr = gf_bs_read_int(bs, 8); if (bs_dest) gf_bs_write_int(bs_dest, nhdr, 8); /*parse SEI*/ while (gf_bs_available(bs)) { Bool do_copy; ptype = 0; while (1) { u8 v = gf_bs_read_int(bs, 8); ptype += v; if (v != 0xFF) break; } psize = 0; while (1) { u8 v = gf_bs_read_int(bs, 8); psize += v; if (v != 0xFF) break; } start = (u32)gf_bs_get_position(bs); do_copy = 1; if (start + psize >= nal_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message type %d size error (%d but %d remain), keeping full SEI untouched\n", ptype, psize, nal_size - start)); if (bs_dest) gf_bs_del(bs_dest); return nal_size; } switch (ptype) { /*remove SEI messages forbidden in MP4*/ case 3: /*filler data*/ case 10: /*sub_seq info*/ case 11: /*sub_seq_layer char*/ case 12: /*sub_seq char*/ do_copy = 0; sei_removed = GF_TRUE; break; case 5: /*user unregistered */ store = buffer[start + psize]; buffer[start + psize] = 0; GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[avc-h264] SEI user message %s\n", buffer + start + 16)); buffer[start + psize] = store; break; case 6: /*recovery point*/ avc_parse_recovery_point_sei(bs, avc); break; case 1: /*pic_timing*/ avc_parse_pic_timing_sei(bs, avc); break; case 0: /*buffering period*/ case 2: /*pan scan rect*/ case 4: /*user registered ITU t35*/ case 7: /*def_rec_pic_marking_repetition*/ case 8: /*spare_pic*/ case 9: /*scene info*/ case 13: /*full frame freeze*/ case 14: /*full frame freeze release*/ case 15: /*full frame snapshot*/ case 16: /*progressive refinement segment start*/ case 17: /*progressive refinement segment end*/ case 18: /*motion constrained slice group*/ default: /*add all unknown SEIs*/ break; } if (do_copy && bs_dest) { var = ptype; while (var >= 255) { gf_bs_write_int(bs_dest, 0xFF, 8); var -= 255; } gf_bs_write_int(bs_dest, var, 8); var = psize; while (var >= 255) { gf_bs_write_int(bs_dest, 0xFF, 8); var -= 255; } gf_bs_write_int(bs_dest, var, 8); gf_bs_seek(bs, start); //bs_read_data does not skip EPB, read byte per byte var = psize; while (var) { gf_bs_write_u8(bs_dest, gf_bs_read_u8(bs)); var--; } } else { gf_bs_seek(bs, start); //bs_skip_bytes does not skip EPB, skip byte per byte while (psize) { gf_bs_read_u8(bs); psize--; } } if (gf_bs_available(bs) <= 2) { var = gf_bs_read_int(bs, 8); if (var != 0x80) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message has less than 2 bytes remaining but no end of sei found\n")); } if (bs_dest) gf_bs_write_int(bs_dest, 0x80, 8); break; } } gf_bs_del(bs); //we cannot compare final size and original size since original may have EPB and final does not yet have them if (bs_dest && sei_removed) { u8 *dst_no_epb = NULL; u32 dst_no_epb_size = 0; gf_bs_get_content(bs_dest, &dst_no_epb, &dst_no_epb_size); nal_size = gf_media_nalu_add_emulation_bytes(buffer, dst_no_epb, dst_no_epb_size); } if (bs_dest) gf_bs_del(bs_dest); return nal_size; } static u8 avc_hevc_get_sar_idx(u32 w, u32 h) { u32 i, count = GF_ARRAY_LENGTH(avc_hevc_sar); for (i = 0; i < count; i++) { if ((avc_hevc_sar[i].w == w) && (avc_hevc_sar[i].h == h)) return i; } return 0xFF; } static void avc_hevc_rewrite_vui(GF_VUIInfo *vui_info, GF_BitStream *orig, GF_BitStream *mod) { /* VUI present flag*/ Bool vui_present_flag = gf_bs_read_int(orig, 1); /*setup default values*/ Bool aspect_ratio_info_present_flag = 0; s32 aspect_ratio_idc = -1; u32 ar_n=0, ar_d=0; Bool overscan_info_present_flag = 0; u32 overscan_info=0; u32 video_signal_type_present_flag=0; u32 video_format = 5; u32 video_full_range_flag = 0; u32 colour_description_present_flag = 0; u32 colour_primaries = 2; u32 transfer_characteristics = 2; u32 matrix_coefficients = 2; //if VUI is present, read all SAR and overscan values if (vui_present_flag) { /* VUI found in input bitstream */ aspect_ratio_info_present_flag = gf_bs_read_int(orig, 1); if (aspect_ratio_info_present_flag) { aspect_ratio_idc = gf_bs_read_int(orig, 8); /*aspect_ratio_idc*/ if (aspect_ratio_idc == 255) { ar_n = gf_bs_read_int(orig, 16); /*sar_width*/ ar_d = gf_bs_read_int(orig, 16); /*sar_height*/ } } /*overscan_info_present_flag */ overscan_info_present_flag = gf_bs_read_int(orig, 1); if(overscan_info_present_flag) { overscan_info = gf_bs_read_int(orig, 1); } /* read all video signal related flags first */ video_signal_type_present_flag = gf_bs_read_int(orig, 1); if(video_signal_type_present_flag) { video_format = gf_bs_read_int(orig, 3); video_full_range_flag = gf_bs_read_int(orig, 1); colour_description_present_flag = gf_bs_read_int(orig, 1); if(colour_description_present_flag) { colour_primaries = gf_bs_read_int(orig, 8); transfer_characteristics = gf_bs_read_int(orig, 8); matrix_coefficients = gf_bs_read_int(orig, 8); } } } //recompute values //no change if ((vui_info->ar_num<0) || (vui_info->ar_den<0)) { } //remove par else if ((vui_info->ar_num==0) || (vui_info->ar_den==0)) { aspect_ratio_info_present_flag = 0; } //set par else { aspect_ratio_info_present_flag = 1; ar_n = vui_info->ar_num; ar_d = vui_info->ar_den; aspect_ratio_idc = avc_hevc_get_sar_idx((u32) ar_n, (u32) ar_d); } if (vui_info->remove_video_info) { video_signal_type_present_flag = 0; } /* correct the values of each flags */ else if ((vui_info->fullrange==0) && (vui_info->video_format==5) && (vui_info->color_prim==2) && (vui_info->color_tfc==2) && (vui_info->color_matrix==2)) { video_signal_type_present_flag = 0; /* all default, nothing to write*/ } else { video_signal_type_present_flag = 1; video_format = (vui_info->video_format < 0) ? video_format : vui_info->video_format; video_full_range_flag = (vui_info->fullrange < 0) ? video_full_range_flag : vui_info->fullrange; if ((vui_info->color_prim==2) && (vui_info->color_tfc==2) && (vui_info->color_matrix==2)) { colour_description_present_flag = 0; } else { colour_description_present_flag = 1; colour_primaries = (vui_info->color_prim < 0) ? colour_primaries : vui_info->color_prim; transfer_characteristics = (vui_info->color_tfc < 0) ? transfer_characteristics : vui_info->color_tfc; matrix_coefficients = (vui_info->color_matrix < 0) ? matrix_coefficients : vui_info->color_matrix; } if ((colour_primaries==2) && (transfer_characteristics==2) && (matrix_coefficients==2)) { colour_description_present_flag = 0; if ((video_format==5) && (video_full_range_flag==0)) video_signal_type_present_flag = 0; } } //always rewrite VUI gf_bs_write_int(mod, 1, 1); gf_bs_write_int(mod, aspect_ratio_info_present_flag, 1); if (aspect_ratio_info_present_flag) { gf_bs_write_int(mod, aspect_ratio_idc, 8); if (aspect_ratio_idc == 255) { gf_bs_write_int(mod, ar_n, 16); gf_bs_write_int(mod, ar_d, 16); } if (vui_info->update) { vui_info->ar_num = ar_n; vui_info->ar_den = ar_d; } } gf_bs_write_int(mod, overscan_info_present_flag, 1); if (overscan_info_present_flag) { gf_bs_write_int(mod, overscan_info, 1); } gf_bs_write_int(mod, video_signal_type_present_flag, 1); if (video_signal_type_present_flag) { gf_bs_write_int(mod, video_format, 3); gf_bs_write_int(mod, video_full_range_flag, 1); gf_bs_write_int(mod, colour_description_present_flag, 1); if (colour_description_present_flag) { gf_bs_write_int(mod, colour_primaries, 8); gf_bs_write_int(mod, transfer_characteristics, 8); gf_bs_write_int(mod, matrix_coefficients, 8); } if (vui_info->update) { vui_info->video_format = video_format; vui_info->fullrange = video_full_range_flag; if (colour_description_present_flag) { vui_info->color_prim = colour_primaries; vui_info->color_tfc = transfer_characteristics; vui_info->color_matrix = matrix_coefficients; } } } /*no VUI in input bitstream but we just inserted one, set all remaining vui flags to 0*/ if (!vui_present_flag) { gf_bs_write_int(mod, 0, 1); /*chroma_location_info_present_flag */ gf_bs_write_int(mod, 0, 1); /*timing_info_present_flag*/ gf_bs_write_int(mod, 0, 1); /*nal_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*vcl_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*pic_struct_present*/ gf_bs_write_int(mod, 0, 1); /*bitstream_restriction*/ } /*otherwise we copy over th bits from the input bitrate*/ } GF_Err gf_avc_change_vui(GF_AVCConfig *avcc, GF_VUIInfo *vui_info) { GF_BitStream *orig, *mod; AVCState avc; u32 i, bit_offset, flag; s32 idx; GF_AVCConfigSlot *slc; orig = NULL; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; i=0; while ((slc = (GF_AVCConfigSlot *)gf_list_enum(avcc->sequenceParameterSets, &i))) { u8 *no_emulation_buf = NULL; u32 no_emulation_buf_size = 0, emulation_bytes = 0; idx = gf_avc_read_sps(slc->data, slc->size, &avc, 0, &bit_offset); if (idx<0) { if ( orig ) gf_bs_del(orig); continue; } /*SPS still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size - 1) * sizeof(char)); no_emulation_buf_size = gf_media_nalu_remove_emulation_bytes(slc->data + 1, no_emulation_buf, slc->size - 1); orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); gf_bs_read_data(orig, no_emulation_buf, no_emulation_buf_size); gf_bs_seek(orig, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset >= 8); while (bit_offset - 8/*bit_offset doesn't take care of the first byte (NALU type)*/) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } avc_hevc_rewrite_vui(vui_info, orig, mod); /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, &no_emulation_buf, &flag); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(no_emulation_buf, flag); if (flag+emulation_bytes+1>slc->size) slc->data = (char*)gf_realloc(slc->data, flag+emulation_bytes+1); slc->size = gf_media_nalu_add_emulation_bytes(no_emulation_buf, slc->data + 1, flag) + 1; gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; } GF_EXPORT GF_Err gf_media_avc_change_par(GF_AVCConfig *avcc, s32 ar_n, s32 ar_d) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = ar_n; vuii.ar_den = ar_d; vuii.fullrange = -1; vuii.video_format = -1; vuii.color_prim = -1; vuii.color_tfc = -1; vuii.color_matrix = -1; return gf_avc_change_vui(avcc, &vuii); } GF_EXPORT GF_Err gf_media_avc_change_color(GF_AVCConfig *avcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = -1; vuii.ar_den = -1; vuii.fullrange = fullrange; vuii.video_format = vidformat; vuii.color_prim = colorprim; vuii.color_tfc = transfer; vuii.color_matrix = colmatrix; return gf_avc_change_vui(avcc, &vuii); } GF_EXPORT GF_Err gf_avc_get_sps_info(u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { AVCState avc; s32 idx; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; idx = gf_avc_read_sps(sps_data, sps_size, &avc, 0, NULL); if (idx < 0) { return GF_NON_COMPLIANT_BITSTREAM; } if (sps_id) *sps_id = idx; if (width) *width = avc.sps[idx].width; if (height) *height = avc.sps[idx].height; if (par_n) *par_n = avc.sps[idx].vui.par_num ? avc.sps[idx].vui.par_num : (u32)-1; if (par_d) *par_d = avc.sps[idx].vui.par_den ? avc.sps[idx].vui.par_den : (u32)-1; return GF_OK; } GF_EXPORT GF_Err gf_avc_get_pps_info(u8 *pps_data, u32 pps_size, u32 *pps_id, u32 *sps_id) { GF_BitStream *bs; GF_Err e = GF_OK; bs = gf_bs_new(pps_data, pps_size, GF_BITSTREAM_READ); if (!bs) { e = GF_NON_COMPLIANT_BITSTREAM; goto exit; } gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); /*nal hdr*/ gf_bs_read_int(bs, 8); *pps_id = gf_bs_read_ue(bs); *sps_id = gf_bs_read_ue(bs); exit: gf_bs_del(bs); return e; } #ifndef GPAC_DISABLE_HEVC /********** HEVC parsing **********/ Bool gf_hevc_slice_is_intra(HEVCState *hevc) { switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: return GF_TRUE; default: return GF_FALSE; } } Bool gf_hevc_slice_is_IDR(HEVCState *hevc) { if (hevc->sei.recovery_point.valid) { hevc->sei.recovery_point.valid = 0; return GF_TRUE; } switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: return GF_TRUE; default: return GF_FALSE; } } static Bool hevc_parse_short_term_ref_pic_set(GF_BitStream *bs, HEVC_SPS *sps, u32 idx_rps) { u32 i; Bool inter_ref_pic_set_prediction_flag = 0; if (idx_rps != 0) inter_ref_pic_set_prediction_flag = gf_bs_read_int_log_idx(bs, 1, "inter_ref_pic_set_prediction_flag", idx_rps); if (inter_ref_pic_set_prediction_flag) { HEVC_ReferencePictureSets *ref_ps, *rps; u32 delta_idx_minus1 = 0; u32 ref_idx; u32 delta_rps_sign; u32 abs_delta_rps_minus1, nb_ref_pics; s32 deltaRPS; u32 k = 0, k0 = 0, k1 = 0; if (idx_rps == sps->num_short_term_ref_pic_sets) delta_idx_minus1 = gf_bs_read_ue_log_idx(bs, "delta_idx_minus1", idx_rps); assert(delta_idx_minus1 <= idx_rps - 1); ref_idx = idx_rps - 1 - delta_idx_minus1; delta_rps_sign = gf_bs_read_int_log_idx(bs, 1, "delta_rps_sign", idx_rps); abs_delta_rps_minus1 = gf_bs_read_ue_log_idx(bs, "abs_delta_rps_minus1", idx_rps); deltaRPS = (1 - (delta_rps_sign << 1)) * (abs_delta_rps_minus1 + 1); rps = &sps->rps[idx_rps]; ref_ps = &sps->rps[ref_idx]; nb_ref_pics = ref_ps->num_negative_pics + ref_ps->num_positive_pics; for (i = 0; i <= nb_ref_pics; i++) { s32 ref_idc; s32 used_by_curr_pic_flag = gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_flag", idx_rps, i); ref_idc = used_by_curr_pic_flag ? 1 : 0; if (!used_by_curr_pic_flag) { used_by_curr_pic_flag = gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_flag", idx_rps, i); ref_idc = used_by_curr_pic_flag << 1; } if ((ref_idc == 1) || (ref_idc == 2)) { s32 deltaPOC = deltaRPS; if (i < nb_ref_pics) deltaPOC += ref_ps->delta_poc[i]; rps->delta_poc[k] = deltaPOC; if (deltaPOC < 0) k0++; else k1++; k++; } } rps->num_negative_pics = k0; rps->num_positive_pics = k1; } else { s32 prev = 0, poc; sps->rps[idx_rps].num_negative_pics = gf_bs_read_ue_log_idx(bs, "num_negative_pics", idx_rps); sps->rps[idx_rps].num_positive_pics = gf_bs_read_ue_log_idx(bs, "num_positive_pics", idx_rps); if (sps->rps[idx_rps].num_negative_pics > 16) return GF_FALSE; if (sps->rps[idx_rps].num_positive_pics > 16) return GF_FALSE; for (i = 0; i < sps->rps[idx_rps].num_negative_pics; i++) { u32 delta_poc_s0_minus1 = gf_bs_read_ue_log_idx2(bs, "delta_poc_s0_minus1", idx_rps, i); poc = prev - delta_poc_s0_minus1 - 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; gf_bs_read_int_log_idx2(bs, 1, "delta_poc_s0_minus1", idx_rps, i); } for (i = 0; i < sps->rps[idx_rps].num_positive_pics; i++) { u32 delta_poc_s1_minus1 = gf_bs_read_ue_log_idx2(bs, "delta_poc_s1_minus1" , idx_rps, i); poc = prev + delta_poc_s1_minus1 + 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_s1_flag", idx_rps, i); } } return GF_TRUE; } void hevc_pred_weight_table(GF_BitStream *bs, HEVCState *hevc, HEVCSliceInfo *si, HEVC_PPS *pps, HEVC_SPS *sps, u32 num_ref_idx_l0_active, u32 num_ref_idx_l1_active) { u32 i, num_ref_idx; Bool first_pass = GF_TRUE; u8 luma_weights[20], chroma_weights[20]; u32 ChromaArrayType = sps->separate_colour_plane_flag ? 0 : sps->chroma_format_idc; num_ref_idx = num_ref_idx_l0_active; gf_bs_read_ue_log(bs, "luma_log2_weight_denom"); if (ChromaArrayType != 0) gf_bs_read_se_log(bs, "delta_chroma_log2_weight_denom"); parse_weights: for (i = 0; i < num_ref_idx; i++) { luma_weights[i] = gf_bs_read_int_log_idx(bs, 1, "luma_weights", i); //infered to be 0 if not present chroma_weights[i] = 0; } if (ChromaArrayType != 0) { for (i = 0; i < num_ref_idx; i++) { chroma_weights[i] = gf_bs_read_int_log_idx(bs, 1, "chroma_weights", i); } } for (i = 0; i < num_ref_idx; i++) { if (luma_weights[i]) { gf_bs_read_se_log_idx(bs, "delta_luma_weight_l0", i); gf_bs_read_se_log_idx(bs, "luma_offset_l0", i); } if (chroma_weights[i]) { gf_bs_read_se_log_idx(bs, "delta_chroma_weight_l0_0", i); gf_bs_read_se_log_idx(bs, "delta_chroma_offset_l0_0", i); gf_bs_read_se_log_idx(bs, "delta_chroma_weight_l0_1", i); gf_bs_read_se_log_idx(bs, "delta_chroma_offset_l0_1", i); } } if (si->slice_type == GF_HEVC_SLICE_TYPE_B) { if (!first_pass) return; first_pass = GF_FALSE; num_ref_idx = num_ref_idx_l1_active; goto parse_weights; } } static Bool ref_pic_lists_modification(GF_BitStream *bs, u32 slice_type, u32 num_ref_idx_l0_active, u32 num_ref_idx_l1_active) { //u32 i; Bool ref_pic_list_modification_flag_l0 = gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l0"); if (ref_pic_list_modification_flag_l0) { /*for (i=0; i<num_ref_idx_l0_active; i++) { list_entry_l0[i] = *//*gf_bs_read_int(bs, (u32)ceil(log(getNumPicTotalCurr())/log(2))); }*/ return GF_FALSE; } if (slice_type == GF_HEVC_SLICE_TYPE_B) { Bool ref_pic_list_modification_flag_l1 = gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l1"); if (ref_pic_list_modification_flag_l1) { /*for (i=0; i<num_ref_idx_l1_active; i++) { list_entry_l1[i] = *//*gf_bs_read_int(bs, (u32)ceil(log(getNumPicTotalCurr()) / log(2))); }*/ return GF_FALSE; } } return GF_TRUE; } static s32 hevc_parse_slice_segment(GF_BitStream *bs, HEVCState *hevc, HEVCSliceInfo *si) { u32 i, j; u32 num_ref_idx_l0_active = 0, num_ref_idx_l1_active = 0; HEVC_PPS *pps; HEVC_SPS *sps; s32 pps_id; Bool RapPicFlag = GF_FALSE; Bool IDRPicFlag = GF_FALSE; si->first_slice_segment_in_pic_flag = gf_bs_read_int_log(bs, 1, "first_slice_segment_in_pic_flag"); switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: IDRPicFlag = GF_TRUE; RapPicFlag = GF_TRUE; break; case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_CRA: RapPicFlag = GF_TRUE; break; } if (RapPicFlag) { gf_bs_read_int_log(bs, 1, "no_output_of_prior_pics_flag"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id >= 64) return -1; pps = &hevc->pps[pps_id]; sps = &hevc->sps[pps->sps_id]; si->sps = sps; si->pps = pps; if (!si->first_slice_segment_in_pic_flag && pps->dependent_slice_segments_enabled_flag) { si->dependent_slice_segment_flag = gf_bs_read_int_log(bs, 1, "dependent_slice_segment_flag"); } else { si->dependent_slice_segment_flag = GF_FALSE; } if (!si->first_slice_segment_in_pic_flag) { si->slice_segment_address = gf_bs_read_int_log(bs, sps->bitsSliceSegmentAddress, "slice_segment_address"); } else { si->slice_segment_address = 0; } if (!si->dependent_slice_segment_flag) { Bool deblocking_filter_override_flag = 0; Bool slice_temporal_mvp_enabled_flag = 0; Bool slice_sao_luma_flag = 0; Bool slice_sao_chroma_flag = 0; Bool slice_deblocking_filter_disabled_flag = 0; //"slice_reserved_undetermined_flag[]" gf_bs_read_int_log(bs, pps->num_extra_slice_header_bits, "slice_reserved_undetermined_flag"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (pps->output_flag_present_flag) gf_bs_read_int_log(bs, 1, "pic_output_flag"); if (sps->separate_colour_plane_flag == 1) gf_bs_read_int_log(bs, 2, "colour_plane_id"); if (IDRPicFlag) { si->poc_lsb = 0; //if not asked to parse full header, abort since we know the poc if (!hevc->full_slice_header_parse) return 0; } else { si->poc_lsb = gf_bs_read_int_log(bs, sps->log2_max_pic_order_cnt_lsb, "poc_lsb"); //if not asked to parse full header, abort once we have the poc if (!hevc->full_slice_header_parse) return 0; if (gf_bs_read_int_log(bs, 1, "short_term_ref_pic_set_sps_flag") == 0) { Bool ret = hevc_parse_short_term_ref_pic_set(bs, sps, sps->num_short_term_ref_pic_sets); if (!ret) return -1; } else if (sps->num_short_term_ref_pic_sets > 1) { u32 numbits = 0; while ((u32)(1 << numbits) < sps->num_short_term_ref_pic_sets) numbits++; if (numbits > 0) gf_bs_read_int_log(bs, numbits, "short_term_ref_pic_set_idx"); /*else short_term_ref_pic_set_idx = 0;*/ } if (sps->long_term_ref_pics_present_flag) { u8 DeltaPocMsbCycleLt[32]; u32 num_long_term_sps = 0; u32 num_long_term_pics = 0; memset(DeltaPocMsbCycleLt, 0, sizeof(u8) * 32); if (sps->num_long_term_ref_pic_sps > 0) { num_long_term_sps = gf_bs_read_ue_log(bs, "num_long_term_sps"); } num_long_term_pics = gf_bs_read_ue_log(bs, "num_long_term_pics"); for (i = 0; i < num_long_term_sps + num_long_term_pics; i++) { if (i < num_long_term_sps) { if (sps->num_long_term_ref_pic_sps > 1) gf_bs_read_int_log_idx(bs, gf_get_bit_size(sps->num_long_term_ref_pic_sps), "lt_idx_sps", i); } else { gf_bs_read_int_log_idx(bs, sps->log2_max_pic_order_cnt_lsb, "PocLsbLt", i); gf_bs_read_int_log_idx(bs, 1, "UsedByCurrPicLt", i); } if (gf_bs_read_int_log_idx(bs, 1, "delta_poc_msb_present_flag", i)) { if (i == 0 || i == num_long_term_sps) DeltaPocMsbCycleLt[i] = gf_bs_read_ue_log_idx(bs, "DeltaPocMsbCycleLt", i); else DeltaPocMsbCycleLt[i] = gf_bs_read_ue_log_idx(bs, "DeltaPocMsbCycleLt", i) + DeltaPocMsbCycleLt[i - 1]; } } } if (sps->temporal_mvp_enable_flag) slice_temporal_mvp_enabled_flag = gf_bs_read_int_log(bs, 1, "slice_temporal_mvp_enabled_flag"); } if (sps->sample_adaptive_offset_enabled_flag) { u32 ChromaArrayType = sps->separate_colour_plane_flag ? 0 : sps->chroma_format_idc; slice_sao_luma_flag = gf_bs_read_int_log(bs, 1, "slice_sao_luma_flag"); if (ChromaArrayType != 0) slice_sao_chroma_flag = gf_bs_read_int_log(bs, 1, "slice_sao_chroma_flag"); } if (si->slice_type == GF_HEVC_SLICE_TYPE_P || si->slice_type == GF_HEVC_SLICE_TYPE_B) { //u32 NumPocTotalCurr; num_ref_idx_l0_active = pps->num_ref_idx_l0_default_active; num_ref_idx_l1_active = 0; if (si->slice_type == GF_HEVC_SLICE_TYPE_B) num_ref_idx_l1_active = pps->num_ref_idx_l1_default_active; if (gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag")) { num_ref_idx_l0_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l0_active"); if (si->slice_type == GF_HEVC_SLICE_TYPE_B) num_ref_idx_l1_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l1_active"); } if (pps->lists_modification_present_flag /*TODO: && NumPicTotalCurr > 1*/) { if (!ref_pic_lists_modification(bs, si->slice_type, num_ref_idx_l0_active, num_ref_idx_l1_active)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[hevc] ref_pic_lists_modification( ) not implemented\n")); return -1; } } if (si->slice_type == GF_HEVC_SLICE_TYPE_B) gf_bs_read_int_log(bs, 1, "mvd_l1_zero_flag"); if (pps->cabac_init_present_flag) gf_bs_read_int_log(bs, 1, "cabac_init_flag"); if (slice_temporal_mvp_enabled_flag) { // When collocated_from_l0_flag is not present, it is inferred to be equal to 1. Bool collocated_from_l0_flag = 1; if (si->slice_type == GF_HEVC_SLICE_TYPE_B) collocated_from_l0_flag = gf_bs_read_int_log(bs, 1, "collocated_from_l0_flag"); if ((collocated_from_l0_flag && (num_ref_idx_l0_active > 1)) || (!collocated_from_l0_flag && (num_ref_idx_l1_active > 1)) ) { gf_bs_read_ue_log(bs, "collocated_ref_idx"); } } if ((pps->weighted_pred_flag && si->slice_type == GF_HEVC_SLICE_TYPE_P) || (pps->weighted_bipred_flag && si->slice_type == GF_HEVC_SLICE_TYPE_B) ) { hevc_pred_weight_table(bs, hevc, si, pps, sps, num_ref_idx_l0_active, num_ref_idx_l1_active); } gf_bs_read_ue_log(bs, "five_minus_max_num_merge_cand"); } si->slice_qp_delta_start_bits = (s32) (gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); si->slice_qp_delta = gf_bs_read_se_log(bs, "slice_qp_delta"); if (pps->slice_chroma_qp_offsets_present_flag) { gf_bs_read_se_log(bs, "slice_cb_qp_offset"); gf_bs_read_se_log(bs, "slice_cr_qp_offset"); } if (pps->deblocking_filter_override_enabled_flag) { deblocking_filter_override_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_override_flag"); } if (deblocking_filter_override_flag) { slice_deblocking_filter_disabled_flag = gf_bs_read_int_log(bs, 1, "slice_deblocking_filter_disabled_flag"); if (!slice_deblocking_filter_disabled_flag) { gf_bs_read_se_log(bs, "slice_beta_offset_div2"); gf_bs_read_se_log(bs, "slice_tc_offset_div2"); } } if (pps->loop_filter_across_slices_enabled_flag && (slice_sao_luma_flag || slice_sao_chroma_flag || !slice_deblocking_filter_disabled_flag) ) { gf_bs_read_int_log(bs, 1, "slice_loop_filter_across_slices_enabled_flag"); } } //dependent slice segment else { //if not asked to parse full header, abort if (!hevc->full_slice_header_parse) return 0; } si->entry_point_start_bits = ((u32)gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); if (pps->tiles_enabled_flag || pps->entropy_coding_sync_enabled_flag) { u32 num_entry_point_offsets = gf_bs_read_ue_log(bs, "num_entry_point_offsets"); if (num_entry_point_offsets > 0) { u32 offset = gf_bs_read_ue_log(bs, "offset") + 1; u32 segments = offset >> 4; s32 remain = (offset & 15); for (i = 0; i < num_entry_point_offsets; i++) { //u32 res = 0; for (j = 0; j < segments; j++) { //res <<= 16; /*res +=*/ gf_bs_read_int(bs, 16); } if (remain) { //res <<= remain; /* res += */ gf_bs_read_int(bs, remain); } // entry_point_offset = val + 1; // +1; // +1 to get the size } } } if (pps->slice_segment_header_extension_present_flag) { u32 size_ext = gf_bs_read_ue_log(bs, "size_ext"); while (size_ext) { gf_bs_read_int(bs, 8); size_ext--; } } si->header_size_bits = (gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); // av_parser.c modified on 16 jan. 2019 if (gf_bs_read_int_log(bs, 1, "byte_align") == 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("Error parsing slice header: byte_align not found at end of header !\n")); } gf_bs_align(bs); si->payload_start_offset = (s32)gf_bs_get_position(bs); return 0; } static void gf_hevc_vvc_parse_sei(char *buffer, u32 nal_size, HEVCState *hevc, VVCState *vvc) { u32 ptype, psize, hdr; u64 start; GF_BitStream *bs; hdr = buffer[0]; if (((hdr & 0x7e) >> 1) != GF_HEVC_NALU_SEI_PREFIX) return; bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); gf_bs_read_int(bs, 16); /*parse SEI*/ while (gf_bs_available(bs)) { u32 consumed; ptype = 0; while (gf_bs_peek_bits(bs, 8, 0)==0xFF) { gf_bs_read_int(bs, 8); ptype += 255; } ptype += gf_bs_read_int(bs, 8); psize = 0; while (gf_bs_peek_bits(bs, 8, 0)==0xFF) { gf_bs_read_int(bs, 8); psize += 255; } psize += gf_bs_read_int(bs, 8); start = gf_bs_get_position(bs); if (start+psize >= nal_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] SEI user message type %d size error (%d but %d remain), skipping SEI message\n", hevc ? "HEVC" : "VVC", ptype, psize, nal_size-start)); break; } switch (ptype) { case 4: /*user registered ITU-T T35*/ if (hevc) { avc_parse_itu_t_t35_sei(bs, &hevc->sei.dovi); } break; default: break; } gf_bs_align(bs); consumed = (u32) (gf_bs_get_position(bs) - start); psize-=consumed; gf_bs_skip_bytes(bs, psize); if (gf_bs_available(bs) <= 2) break; } gf_bs_del(bs); } void gf_hevc_parse_sei(char *buffer, u32 nal_size, HEVCState *hevc) { gf_hevc_vvc_parse_sei(buffer, nal_size, hevc, NULL); } static void hevc_compute_poc(HEVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_pic_order_cnt_lsb); /*POC reset for IDR frames, NOT for CRA*/ switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: si->poc_lsb_prev = 0; si->poc_msb_prev = 0; break; } if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: si->poc_msb = 0; break; } si->poc = si->poc_msb + si->poc_lsb; } static Bool hevc_parse_nal_header(GF_BitStream *bs, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { u32 val; val = gf_bs_read_int_log(bs, 1, "forbidden_zero"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 6, "nuh_type"); if (nal_unit_type) *nal_unit_type = val; val = gf_bs_read_int_log(bs, 6, "layerID"); if (layer_id) *layer_id = val; val = gf_bs_read_int_log(bs, 3, "temporalID"); if (!val) return GF_FALSE; val -= 1; if (temporal_id) *temporal_id = val; return GF_TRUE; } void hevc_profile_tier_level(GF_BitStream *bs, Bool ProfilePresentFlag, u8 MaxNumSubLayersMinus1, HEVC_ProfileTierLevel *ptl, u32 idx) { u32 i; if (ProfilePresentFlag) { ptl->profile_space = gf_bs_read_int_log_idx(bs, 2, "profile_space", idx); ptl->tier_flag = gf_bs_read_int_log_idx(bs, 1, "tier_flag", idx); ptl->profile_idc = gf_bs_read_int_log_idx(bs, 5, "profile_idc", idx); ptl->profile_compatibility_flag = gf_bs_read_int_log_idx(bs, 32, "profile_compatibility_flag", idx); ptl->general_progressive_source_flag = gf_bs_read_int_log_idx(bs, 1, "general_progressive_source_flag", idx); ptl->general_interlaced_source_flag = gf_bs_read_int_log_idx(bs, 1, "general_interlaced_source_flag", idx); ptl->general_non_packed_constraint_flag = gf_bs_read_int_log_idx(bs, 1, "general_non_packed_constraint_flag", idx); ptl->general_frame_only_constraint_flag = gf_bs_read_int_log_idx(bs, 1, "general_frame_only_constraint_flag", idx); ptl->general_reserved_44bits = gf_bs_read_long_int(bs, 44); } ptl->level_idc = gf_bs_read_int_log(bs, 8, "level_idc"); for (i = 0; i < MaxNumSubLayersMinus1; i++) { ptl->sub_ptl[i].profile_present_flag = gf_bs_read_int_log_idx2(bs, 1, "profile_present_flag", idx, i); ptl->sub_ptl[i].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i); } if (MaxNumSubLayersMinus1 > 0) { for (i = MaxNumSubLayersMinus1; i < 8; i++) { /*reserved_zero_2bits*/gf_bs_read_int(bs, 2); } } for (i = 0; i < MaxNumSubLayersMinus1; i++) { if (ptl->sub_ptl[i].profile_present_flag) { ptl->sub_ptl[i].profile_space = gf_bs_read_int_log_idx2(bs, 2, "sublayer_profile_space", idx, i); ptl->sub_ptl[i].tier_flag = gf_bs_read_int_log_idx2(bs, 1, "sublayer_tier_flag", idx, i); ptl->sub_ptl[i].profile_idc = gf_bs_read_int_log_idx2(bs, 5, "sublayer_profile_idc", idx, i); ptl->sub_ptl[i].profile_compatibility_flag = gf_bs_read_int_log_idx2(bs, 32, "sublayer_profile_compatibility_flag", idx, i); /*ptl->sub_ptl[i].progressive_source_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_progressive_source_flag", idx, i); /*ptl->sub_ptl[i].interlaced_source_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_interlaced_source_flag", idx, i); /*ptl->sub_ptl[i].non_packed_constraint_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_non_packed_constraint_flag", idx, i); /*ptl->sub_ptl[i].frame_only_constraint_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_frame_only_constraint_flag", idx, i); /*ptl->sub_ptl[i].reserved_44bits =*/ gf_bs_read_long_int(bs, 44); } if (ptl->sub_ptl[i].level_present_flag) ptl->sub_ptl[i].level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i); } } static u32 scalability_type_to_idx(HEVC_VPS *vps, u32 scalability_type) { u32 idx = 0, type; for (type = 0; type < scalability_type; type++) { idx += (vps->scalability_mask[type] ? 1 : 0); } return idx; } #define LHVC_VIEW_ORDER_INDEX 1 #define LHVC_SCALABILITY_INDEX 2 static u32 lhvc_get_scalability_id(HEVC_VPS *vps, u32 layer_id_in_vps, u32 scalability_type) { u32 idx; if (!vps->scalability_mask[scalability_type]) return 0; idx = scalability_type_to_idx(vps, scalability_type); return vps->dimension_id[layer_id_in_vps][idx]; } static u32 lhvc_get_view_index(HEVC_VPS *vps, u32 id) { return lhvc_get_scalability_id(vps, vps->layer_id_in_vps[id], LHVC_VIEW_ORDER_INDEX); } static u32 lhvc_get_num_views(HEVC_VPS *vps) { u32 numViews = 1, i; for (i = 0; i < vps->max_layers; i++) { u32 layer_id = vps->layer_id_in_nuh[i]; if (i > 0 && (lhvc_get_view_index(vps, layer_id) != lhvc_get_scalability_id(vps, i - 1, LHVC_VIEW_ORDER_INDEX))) { numViews++; } } return numViews; } static void lhvc_parse_rep_format(HEVC_RepFormat *fmt, GF_BitStream *bs, u32 idx) { u8 chroma_bitdepth_present_flag; fmt->pic_width_luma_samples = gf_bs_read_int_log_idx(bs, 16, "pic_width_luma_samples", idx); fmt->pic_height_luma_samples = gf_bs_read_int_log_idx(bs, 16, "pic_height_luma_samples", idx); chroma_bitdepth_present_flag = gf_bs_read_int_log_idx(bs, 1, "chroma_bitdepth_present_flag", idx); if (chroma_bitdepth_present_flag) { fmt->chroma_format_idc = gf_bs_read_int_log_idx(bs, 2, "chroma_format_idc", idx); if (fmt->chroma_format_idc == 3) fmt->separate_colour_plane_flag = gf_bs_read_int_log_idx(bs, 1, "separate_colour_plane_flag", idx); fmt->bit_depth_luma = 8 + gf_bs_read_int_log_idx(bs, 4, "bit_depth_luma_minus8", idx); fmt->bit_depth_chroma = 8 + gf_bs_read_int_log_idx(bs, 4, "bit_depth_chroma_minus8", idx); } if (gf_bs_read_int_log_idx(bs, 1, "conformance_window_vps_flag", idx)) { gf_bs_read_ue_log_idx(bs, "conf_win_vps_left_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_right_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_top_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_bottom_offset", idx); } } static Bool hevc_parse_vps_extension(HEVC_VPS *vps, GF_BitStream *bs) { u8 splitting_flag, vps_nuh_layer_id_present_flag, view_id_len; u32 i, j, num_scalability_types, num_add_olss, num_add_layer_set, num_indepentdent_layers, nb_bits, default_output_layer_idc = 0; u8 dimension_id_len[16], dim_bit_offset[16]; u8 /*avc_base_layer_flag, */NumLayerSets, /*default_one_target_output_layer_flag, */rep_format_idx_present_flag, ols_ids_to_ls_idx; u8 layer_set_idx_for_ols_minus1[MAX_LHVC_LAYERS]; u8 nb_output_layers_in_output_layer_set[MAX_LHVC_LAYERS + 1]; u8 ols_highest_output_layer_id[MAX_LHVC_LAYERS + 1]; u32 k, d, r, p, iNuhLId, jNuhLId; u8 num_direct_ref_layers[64], num_pred_layers[64], num_layers_in_tree_partition[MAX_LHVC_LAYERS]; u8 dependency_flag[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS], id_pred_layers[64][MAX_LHVC_LAYERS]; // u8 num_ref_layers[64]; // u8 tree_partition_layer_id[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS]; // u8 id_ref_layers[64][MAX_LHVC_LAYERS]; // u8 id_direct_ref_layers[64][MAX_LHVC_LAYERS]; u8 layer_id_in_list_flag[64]; Bool OutputLayerFlag[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS]; vps->vps_extension_found = 1; if ((vps->max_layers > 1) && vps->base_layer_internal_flag) hevc_profile_tier_level(bs, 0, vps->max_sub_layers - 1, &vps->ext_ptl[0], 0); splitting_flag = gf_bs_read_int_log(bs, 1, "splitting_flag"); num_scalability_types = 0; for (i = 0; i < 16; i++) { vps->scalability_mask[i] = gf_bs_read_int_log_idx(bs, 1, "scalability_mask", i); num_scalability_types += vps->scalability_mask[i]; } if (num_scalability_types >= 16) { num_scalability_types = 16; } dimension_id_len[0] = 0; for (i = 0; i < (num_scalability_types - splitting_flag); i++) { dimension_id_len[i] = 1 + gf_bs_read_int_log_idx(bs, 3, "dimension_id_len_minus1", i); } if (splitting_flag) { for (i = 0; i < num_scalability_types; i++) { dim_bit_offset[i] = 0; for (j = 0; j < i; j++) dim_bit_offset[i] += dimension_id_len[j]; } dimension_id_len[num_scalability_types - 1] = 1 + (5 - dim_bit_offset[num_scalability_types - 1]); dim_bit_offset[num_scalability_types] = 6; } vps_nuh_layer_id_present_flag = gf_bs_read_int_log(bs, 1, "vps_nuh_layer_id_present_flag"); vps->layer_id_in_nuh[0] = 0; vps->layer_id_in_vps[0] = 0; for (i = 1; i < vps->max_layers; i++) { if (vps_nuh_layer_id_present_flag) { vps->layer_id_in_nuh[i] = gf_bs_read_int_log_idx(bs, 6, "layer_id_in_nuh", i); } else { vps->layer_id_in_nuh[i] = i; } vps->layer_id_in_vps[vps->layer_id_in_nuh[i]] = i; if (!splitting_flag) { for (j = 0; j < num_scalability_types; j++) { vps->dimension_id[i][j] = gf_bs_read_int_log_idx2(bs, dimension_id_len[j], "dimension_id", i, j); } } } if (splitting_flag) { for (i = 0; i < vps->max_layers; i++) for (j = 0; j < num_scalability_types; j++) vps->dimension_id[i][j] = ((vps->layer_id_in_nuh[i] & ((1 << dim_bit_offset[j + 1]) - 1)) >> dim_bit_offset[j]); } else { for (j = 0; j < num_scalability_types; j++) vps->dimension_id[0][j] = 0; } view_id_len = gf_bs_read_int_log(bs, 4, "view_id_len"); if (view_id_len > 0) { for (i = 0; i < lhvc_get_num_views(vps); i++) { gf_bs_read_int_log_idx(bs, view_id_len, "view_id_val", i); } } for (i = 1; i < vps->max_layers; i++) { for (j = 0; j < i; j++) { vps->direct_dependency_flag[i][j] = gf_bs_read_int_log_idx(bs, 1, "direct_dependency_flag", i); } } //we do the test on MAX_LHVC_LAYERS and break in the loop to avoid a wrong GCC 4.8 warning on array bounds for (i = 0; i < MAX_LHVC_LAYERS; i++) { if (i >= vps->max_layers) break; for (j = 0; j < vps->max_layers; j++) { dependency_flag[i][j] = vps->direct_dependency_flag[i][j]; for (k = 0; k < i; k++) if (vps->direct_dependency_flag[i][k] && vps->direct_dependency_flag[k][j]) dependency_flag[i][j] = 1; } } for (i = 0; i < vps->max_layers; i++) { iNuhLId = vps->layer_id_in_nuh[i]; d = r = p = 0; for (j = 0; j < vps->max_layers; j++) { jNuhLId = vps->layer_id_in_nuh[j]; if (vps->direct_dependency_flag[i][j]) { // id_direct_ref_layers[iNuhLId][d] = jNuhLId; d++; } if (dependency_flag[i][j]) { // id_ref_layers[iNuhLId][r] = jNuhLId; r++; } if (dependency_flag[j][i]) id_pred_layers[iNuhLId][p++] = jNuhLId; } num_direct_ref_layers[iNuhLId] = d; // num_ref_layers[iNuhLId] = r; num_pred_layers[iNuhLId] = p; } memset(layer_id_in_list_flag, 0, 64 * sizeof(u8)); k = 0; //num_indepentdent_layers for (i = 0; i < vps->max_layers; i++) { iNuhLId = vps->layer_id_in_nuh[i]; if (!num_direct_ref_layers[iNuhLId]) { u32 h = 1; //tree_partition_layer_id[k][0] = iNuhLId; for (j = 0; j < num_pred_layers[iNuhLId]; j++) { u32 predLId = id_pred_layers[iNuhLId][j]; if (!layer_id_in_list_flag[predLId]) { //tree_partition_layer_id[k][h++] = predLId; layer_id_in_list_flag[predLId] = 1; } } num_layers_in_tree_partition[k++] = h; } } num_indepentdent_layers = k; num_add_layer_set = 0; if (num_indepentdent_layers > 1) num_add_layer_set = gf_bs_read_ue_log(bs, "num_add_layer_set"); for (i = 0; i < num_add_layer_set; i++) for (j = 1; j < num_indepentdent_layers; j++) { nb_bits = 1; while ((1 << nb_bits) < (num_layers_in_tree_partition[j] + 1)) nb_bits++; gf_bs_read_int_log_idx2(bs, nb_bits, "highest_layer_idx_plus1", i, j); } if (gf_bs_read_int_log(bs, 1, "vps_sub_layers_max_minus1_present_flag")) { for (i = 0; i < vps->max_layers; i++) { gf_bs_read_int_log_idx(bs, 3, "sub_layers_vps_max_minus1", i); } } if (gf_bs_read_int_log(bs, 1, "max_tid_ref_present_flag")) { for (i = 0; i < (vps->max_layers - 1); i++) { for (j = i + 1; j < vps->max_layers; j++) { if (vps->direct_dependency_flag[j][i]) gf_bs_read_int_log_idx2(bs, 3, "max_tid_il_ref_pics_plus1", i, j); } } } gf_bs_read_int_log(bs, 1, "default_ref_layers_active_flag"); vps->num_profile_tier_level = 1 + gf_bs_read_ue_log(bs, "num_profile_tier_level"); if (vps->num_profile_tier_level > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of PTLs in VPS %d\n", vps->num_profile_tier_level)); vps->num_profile_tier_level = 1; return GF_FALSE; } for (i = vps->base_layer_internal_flag ? 2 : 1; i < vps->num_profile_tier_level; i++) { Bool vps_profile_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_profile_present_flag", i); hevc_profile_tier_level(bs, vps_profile_present_flag, vps->max_sub_layers - 1, &vps->ext_ptl[i - 1], i-1); } NumLayerSets = vps->num_layer_sets + num_add_layer_set; num_add_olss = 0; if (NumLayerSets > 1) { num_add_olss = gf_bs_read_ue_log(bs, "num_add_olss"); default_output_layer_idc = gf_bs_read_int_log(bs, 2, "default_output_layer_idc"); default_output_layer_idc = default_output_layer_idc < 2 ? default_output_layer_idc : 2; } vps->num_output_layer_sets = num_add_olss + NumLayerSets; layer_set_idx_for_ols_minus1[0] = 1; vps->output_layer_flag[0][0] = 1; for (i = 0; i < vps->num_output_layer_sets; i++) { if ((NumLayerSets > 2) && (i >= NumLayerSets)) { nb_bits = 1; while ((1 << nb_bits) < (NumLayerSets - 1)) nb_bits++; layer_set_idx_for_ols_minus1[i] = gf_bs_read_int_log_idx(bs, nb_bits, "layer_set_idx_for_ols_minus1", i); } else layer_set_idx_for_ols_minus1[i] = 0; ols_ids_to_ls_idx = i < NumLayerSets ? i : layer_set_idx_for_ols_minus1[i] + 1; if ((i > (vps->num_layer_sets - 1)) || (default_output_layer_idc == 2)) { for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) vps->output_layer_flag[i][j] = gf_bs_read_int_log_idx2(bs, 1, "output_layer_flag", i, j); } if ((default_output_layer_idc == 0) || (default_output_layer_idc == 1)) { for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if ((default_output_layer_idc == 0) || (vps->LayerSetLayerIdList[i][j] == vps->LayerSetLayerIdListMax[i])) OutputLayerFlag[i][j] = GF_TRUE; else OutputLayerFlag[i][j] = GF_FALSE; } } for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if (OutputLayerFlag[i][j]) { u32 curLayerID; vps->necessary_layers_flag[i][j] = GF_TRUE; curLayerID = vps->LayerSetLayerIdList[i][j]; for (k = 0; k < j; k++) { u32 refLayerId = vps->LayerSetLayerIdList[i][k]; if (dependency_flag[vps->layer_id_in_vps[curLayerID]][vps->layer_id_in_vps[refLayerId]]) vps->necessary_layers_flag[i][k] = GF_TRUE; } } } vps->num_necessary_layers[i] = 0; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if (vps->necessary_layers_flag[i][j]) vps->num_necessary_layers[i] += 1; } if (i == 0) { if (vps->base_layer_internal_flag) { if (vps->max_layers > 1) vps->profile_tier_level_idx[0][0] = 1; else vps->profile_tier_level_idx[0][0] = 0; } continue; } nb_bits = 1; while ((u32)(1 << nb_bits) < vps->num_profile_tier_level) nb_bits++; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) if (vps->necessary_layers_flag[i][j] && vps->num_profile_tier_level) vps->profile_tier_level_idx[i][j] = gf_bs_read_int_log_idx2(bs, nb_bits, "profile_tier_level_idx", i, j); else vps->profile_tier_level_idx[i][j] = 0; nb_output_layers_in_output_layer_set[i] = 0; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { nb_output_layers_in_output_layer_set[i] += OutputLayerFlag[i][j]; if (OutputLayerFlag[i][j]) { ols_highest_output_layer_id[i] = vps->LayerSetLayerIdList[ols_ids_to_ls_idx][j]; } } if (nb_output_layers_in_output_layer_set[i] == 1 && ols_highest_output_layer_id[i] > 0) vps->alt_output_layer_flag[i] = gf_bs_read_int_log_idx(bs, 1, "alt_output_layer_flag", i); } vps->num_rep_formats = 1 + gf_bs_read_ue_log(bs, "num_rep_formats_minus1"); if (vps->num_rep_formats > 16) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of rep formats in VPS %d\n", vps->num_rep_formats)); vps->num_rep_formats = 0; return GF_FALSE; } for (i = 0; i < vps->num_rep_formats; i++) { lhvc_parse_rep_format(&vps->rep_formats[i], bs, i); } if (vps->num_rep_formats > 1) rep_format_idx_present_flag = gf_bs_read_int_log(bs, 1, "rep_format_idx_present_flag"); else rep_format_idx_present_flag = 0; vps->rep_format_idx[0] = 0; nb_bits = 1; while ((u32)(1 << nb_bits) < vps->num_rep_formats) nb_bits++; for (i = vps->base_layer_internal_flag ? 1 : 0; i < vps->max_layers; i++) { if (rep_format_idx_present_flag) { vps->rep_format_idx[i] = gf_bs_read_int_log_idx(bs, nb_bits, "rep_format_idx", i); } else { vps->rep_format_idx[i] = i < vps->num_rep_formats - 1 ? i : vps->num_rep_formats - 1; } } //TODO - we don't use the rest ... return GF_TRUE; } static void sub_layer_hrd_parameters(GF_BitStream *bs, int subLayerId, u32 cpb_cnt, Bool sub_pic_hrd_params_present_flag, u32 idx1, u32 idx2) { u32 i; if (!gf_bs_available(bs)) return; for (i = 0; i <= cpb_cnt; i++) { gf_bs_read_ue_log_idx3(bs, "bit_rate_value_minus1", idx1, idx2, i); gf_bs_read_ue_log_idx3(bs, "cpb_size_value_minus1", idx1, idx2, i); if (sub_pic_hrd_params_present_flag) { gf_bs_read_ue_log_idx3(bs, "cpb_size_du_value_minus1", idx1, idx2, i); gf_bs_read_ue_log_idx3(bs, "bit_rate_du_value_minus1", idx1, idx2, i); } gf_bs_read_int_log_idx3(bs, 1, "cbr_flag", idx1, idx2, i); } } static void hevc_parse_hrd_parameters(GF_BitStream *bs, Bool commonInfPresentFlag, int maxNumSubLayersMinus1, u32 idx) { int i; Bool nal_hrd_parameters_present_flag = GF_FALSE; Bool vcl_hrd_parameters_present_flag = GF_FALSE; Bool sub_pic_hrd_params_present_flag = GF_FALSE; if (commonInfPresentFlag) { nal_hrd_parameters_present_flag = gf_bs_read_int_log_idx(bs, 1, "nal_hrd_parameters_present_flag", idx); vcl_hrd_parameters_present_flag = gf_bs_read_int_log_idx(bs, 1, "vcl_hrd_parameters_present_flag", idx); if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) { sub_pic_hrd_params_present_flag = gf_bs_read_int_log_idx(bs, 1, "sub_pic_hrd_params_present_flag", idx); if (sub_pic_hrd_params_present_flag) { gf_bs_read_int_log_idx(bs, 8, "tick_divisor_minus2", idx); gf_bs_read_int_log_idx(bs, 5, "du_cpb_removal_delay_increment_length_minus1", idx); gf_bs_read_int_log_idx(bs, 1, "sub_pic_cpb_params_in_pic_timing_sei_flag", idx); gf_bs_read_int_log_idx(bs, 5, "dpb_output_delay_du_length_minus1", idx); } gf_bs_read_int_log_idx(bs, 4, "bit_rate_scale", idx); gf_bs_read_int_log_idx(bs, 4, "cpb_size_scale", idx); if (sub_pic_hrd_params_present_flag) { gf_bs_read_int_log_idx(bs, 4, "cpb_size_du_scale", idx); } gf_bs_read_int_log_idx(bs, 5, "initial_cpb_removal_delay_length_minus1", idx); gf_bs_read_int_log_idx(bs, 5, "au_cpb_removal_delay_length_minus1", idx); gf_bs_read_int_log_idx(bs, 5, "dpb_output_delay_length_minus1", idx); } } for (i = 0; i <= maxNumSubLayersMinus1; i++) { Bool fixed_pic_rate_general_flag_i = gf_bs_read_int_log_idx(bs, 1, "fixed_pic_rate_general_flag", idx); Bool fixed_pic_rate_within_cvs_flag_i = GF_TRUE; Bool low_delay_hrd_flag_i = GF_FALSE; u32 cpb_cnt_minus1_i = 0; if (!fixed_pic_rate_general_flag_i) { fixed_pic_rate_within_cvs_flag_i = gf_bs_read_int_log_idx(bs, 1, "fixed_pic_rate_within_cvs_flag", idx); } if (fixed_pic_rate_within_cvs_flag_i) gf_bs_read_ue_log_idx(bs, "elemental_duration_in_tc_minus1", idx); else low_delay_hrd_flag_i = gf_bs_read_int_log_idx(bs, 1, "low_delay_hrd_flag", idx); if (!low_delay_hrd_flag_i) { cpb_cnt_minus1_i = gf_bs_read_ue_log_idx(bs, "cpb_cnt_minus1", idx); } if (nal_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag, idx, i); } if (vcl_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag, idx, i); } } } static s32 gf_hevc_read_vps_bs_internal(GF_BitStream *bs, HEVCState *hevc, Bool stop_at_vps_ext) { u8 vps_sub_layer_ordering_info_present_flag, vps_extension_flag; u32 i, j; s32 vps_id; HEVC_VPS *vps; u8 layer_id_included_flag[MAX_LHVC_LAYERS][64]; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if (vps_id >= 16) return -1; vps = &hevc->vps[vps_id]; vps->bit_pos_vps_extensions = -1; if (!vps->state) { vps->id = vps_id; vps->state = 1; } vps->base_layer_internal_flag = gf_bs_read_int_log(bs, 1, "base_layer_internal_flag"); vps->base_layer_available_flag = gf_bs_read_int_log(bs, 1, "base_layer_available_flag"); vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers_minus1"); if (vps->max_layers > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS)); return -1; } vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1; vps->temporal_id_nesting = gf_bs_read_int_log(bs, 1, "temporal_id_nesting"); gf_bs_read_int_log(bs, 16, "vps_reserved_ffff_16bits"); hevc_profile_tier_level(bs, 1, vps->max_sub_layers - 1, &vps->ptl, 0); vps_sub_layer_ordering_info_present_flag = gf_bs_read_int_log(bs, 1, "vps_sub_layer_ordering_info_present_flag"); for (i = (vps_sub_layer_ordering_info_present_flag ? 0 : vps->max_sub_layers - 1); i < vps->max_sub_layers; i++) { gf_bs_read_ue_log_idx(bs, "vps_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "vps_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "vps_max_latency_increase_plus1", i); } vps->max_layer_id = gf_bs_read_int_log(bs, 6, "max_layer_id"); if (vps->max_layer_id > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] VPS max layer ID %u but GPAC only supports %u\n", vps->max_layer_id, MAX_LHVC_LAYERS)); return -1; } vps->num_layer_sets = gf_bs_read_ue_log(bs, "num_layer_sets_minus1") + 1; if (vps->num_layer_sets > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of layer sets in VPS %d\n", vps->num_layer_sets)); return -1; } for (i = 1; i < vps->num_layer_sets; i++) { for (j = 0; j <= vps->max_layer_id; j++) { layer_id_included_flag[i][j] = gf_bs_read_int_log_idx2(bs, 1, "layer_id_included_flag", i, j); } } vps->num_layers_in_id_list[0] = 1; for (i = 1; i < vps->num_layer_sets; i++) { u32 n, m; n = 0; for (m = 0; m <= vps->max_layer_id; m++) { if (layer_id_included_flag[i][m]) { vps->LayerSetLayerIdList[i][n++] = m; if (vps->LayerSetLayerIdListMax[i] < m) vps->LayerSetLayerIdListMax[i] = m; } } vps->num_layers_in_id_list[i] = n; } if (gf_bs_read_int_log(bs, 1, "vps_timing_info_present_flag")) { u32 vps_num_hrd_parameters; gf_bs_read_int_log(bs, 32, "vps_num_units_in_tick"); gf_bs_read_int_log(bs, 32, "vps_time_scale"); if (gf_bs_read_int_log(bs, 1, "vps_poc_proportional_to_timing_flag")) { gf_bs_read_ue_log(bs, "vps_num_ticks_poc_diff_one_minus1"); } vps_num_hrd_parameters = gf_bs_read_ue_log(bs, "vps_num_hrd_parameters"); for (i = 0; i < vps_num_hrd_parameters; i++) { Bool cprms_present_flag = GF_TRUE; gf_bs_read_ue_log_idx(bs, "hrd_layer_set_idx", i); if (i > 0) cprms_present_flag = gf_bs_read_int_log(bs, 1, "cprms_present_flag"); hevc_parse_hrd_parameters(bs, cprms_present_flag, vps->max_sub_layers - 1, i); } } if (stop_at_vps_ext) { return vps_id; } vps_extension_flag = gf_bs_read_int_log(bs, 1, "vps_extension_flag"); if (vps_extension_flag) { Bool res; gf_bs_align(bs); res = hevc_parse_vps_extension(vps, bs); if (res != GF_TRUE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Failed to parse VPS extensions\n")); return -1; } if (gf_bs_read_int_log(bs, 1, "vps_extension2_flag")) { #if 0 while (gf_bs_available(bs)) { /*vps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } } return vps_id; } GF_EXPORT s32 gf_hevc_read_vps_ex(u8 *data, u32 *size, HEVCState *hevc, Bool remove_extensions) { GF_BitStream *bs; char *data_without_emulation_bytes = NULL; u32 data_without_emulation_bytes_size = 0; s32 vps_id = -1; /*still contains emulation bytes*/ data_without_emulation_bytes_size = remove_extensions ? gf_media_nalu_emulation_bytes_remove_count(data, (*size)) : 0; if (!data_without_emulation_bytes_size) { bs = gf_bs_new(data, (*size), GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); } //when removing VPS ext, we have to get the full buffer without emulation prevention bytes becuase we do a bit-by-bit copy of the vps else { data_without_emulation_bytes = gf_malloc((*size) * sizeof(char)); data_without_emulation_bytes_size = gf_media_nalu_remove_emulation_bytes(data, data_without_emulation_bytes, (*size)); bs = gf_bs_new(data_without_emulation_bytes, data_without_emulation_bytes_size, GF_BITSTREAM_READ); } if (!bs) goto exit; if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) goto exit; vps_id = gf_hevc_read_vps_bs_internal(bs, hevc, remove_extensions); if (vps_id < 0) goto exit; if (remove_extensions) { u8 *new_vps; u32 new_vps_size, emulation_bytes; u32 bit_pos = gf_bs_get_bit_offset(bs); GF_BitStream *w_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_seek(bs, 0); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u16(w_bs, gf_bs_read_u16(bs) ); bit_pos -= 48; while (bit_pos) { u32 v = gf_bs_read_int(bs, 1); gf_bs_write_int(w_bs, v, 1); bit_pos--; } /*vps extension flag*/ gf_bs_write_int(w_bs, 0, 1); new_vps = NULL; gf_bs_get_content(w_bs, &new_vps, &new_vps_size); gf_bs_del(w_bs); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(new_vps, new_vps_size); if (emulation_bytes + new_vps_size > *size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("Buffer too small to rewrite VPS - skipping rewrite\n")); } else { *size = gf_media_nalu_add_emulation_bytes(new_vps, data, new_vps_size); } if (new_vps) gf_free(new_vps); } exit: if (bs) gf_bs_del(bs); if (data_without_emulation_bytes) gf_free(data_without_emulation_bytes); return vps_id; } GF_EXPORT s32 gf_hevc_read_vps(u8 *data, u32 size, HEVCState *hevc) { return gf_hevc_read_vps_ex(data, &size, hevc, GF_FALSE); } GF_EXPORT s32 gf_hevc_read_vps_bs(GF_BitStream *bs, HEVCState *hevc) { if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) return -1; return gf_hevc_read_vps_bs_internal(bs, hevc, GF_FALSE); } static void hevc_scaling_list_data(GF_BitStream *bs) { u32 i, sizeId, matrixId; for (sizeId = 0; sizeId < 4; sizeId++) { for (matrixId = 0; matrixId < 6; matrixId += (sizeId == 3) ? 3 : 1) { u32 idx = sizeId*100 + 10*matrixId; u32 scaling_list_pred_mode_flag_sizeId_matrixId = gf_bs_read_int_log_idx(bs, 1, "scaling_list_pred_mode_flag_sizeId_matrixId", idx); if (!scaling_list_pred_mode_flag_sizeId_matrixId) { gf_bs_read_ue_log_idx(bs, "scaling_list_pred_matrix_id_delta", idx); } else { //u32 nextCoef = 8; u32 coefNum = MIN(64, (1 << (4 + (sizeId << 1)))); if (sizeId > 1) { gf_bs_read_se_log_idx(bs, "scaling_list_dc_coef_minus8", idx); } for (i = 0; i < coefNum; i++) { gf_bs_read_se_log_idx2(bs, "scaling_list_delta_coef", idx, i); } } } } } static const struct { u32 w, h; } hevc_sar[17] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, { 160,99 }, { 4,3}, { 3,2}, { 2,1} }; static s32 gf_hevc_read_sps_bs_internal(GF_BitStream *bs, HEVCState *hevc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id = -1; u32 i, nb_CTUs, depth; HEVC_SPS *sps; HEVC_VPS *vps; HEVC_ProfileTierLevel ptl; Bool multiLayerExtSpsFlag; u8 sps_ext_or_max_sub_layers_minus1, max_sub_layers_minus1; if (vui_flag_pos) *vui_flag_pos = 0; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if (vps_id >= 16) { return -1; } memset(&ptl, 0, sizeof(ptl)); max_sub_layers_minus1 = 0; sps_ext_or_max_sub_layers_minus1 = 0; if (layer_id == 0) max_sub_layers_minus1 = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1"); else sps_ext_or_max_sub_layers_minus1 = gf_bs_read_int_log(bs, 3, "sps_ext_or_max_sub_layers_minus1"); multiLayerExtSpsFlag = (layer_id != 0) && (sps_ext_or_max_sub_layers_minus1 == 7); if (!multiLayerExtSpsFlag) { gf_bs_read_int_log(bs, 1, "temporal_id_nesting_flag"); hevc_profile_tier_level(bs, 1, max_sub_layers_minus1, &ptl, 0); } sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((sps_id < 0) || (sps_id >= 16)) { return -1; } sps = &hevc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->ptl = ptl; vps = &hevc->vps[vps_id]; sps->max_sub_layers_minus1 = 0; sps->sps_ext_or_max_sub_layers_minus1 = 0; /* default values */ sps->colour_primaries = 2; sps->transfer_characteristic = 2; sps->matrix_coeffs = 2; //sps_rep_format_idx = 0; if (multiLayerExtSpsFlag) { sps->update_rep_format_flag = gf_bs_read_int_log(bs, 1, "update_rep_format_flag"); if (sps->update_rep_format_flag) { sps->rep_format_idx = gf_bs_read_int_log(bs, 8, "rep_format_idx"); } else { sps->rep_format_idx = vps->rep_format_idx[layer_id]; } sps->width = vps->rep_formats[sps->rep_format_idx].pic_width_luma_samples; sps->height = vps->rep_formats[sps->rep_format_idx].pic_height_luma_samples; sps->chroma_format_idc = vps->rep_formats[sps->rep_format_idx].chroma_format_idc; sps->bit_depth_luma = vps->rep_formats[sps->rep_format_idx].bit_depth_luma; sps->bit_depth_chroma = vps->rep_formats[sps->rep_format_idx].bit_depth_chroma; sps->separate_colour_plane_flag = vps->rep_formats[sps->rep_format_idx].separate_colour_plane_flag; //TODO this is crude ... sps->ptl = vps->ext_ptl[0]; } else { sps->chroma_format_idc = gf_bs_read_ue_log(bs, "chroma_format_idc"); if (sps->chroma_format_idc == 3) sps->separate_colour_plane_flag = gf_bs_read_int_log(bs, 1, "separate_colour_plane_flag"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); if ((sps->cw_flag = gf_bs_read_int_log(bs, 1, "conformance_window_flag"))) { u32 SubWidthC, SubHeightC; if (sps->chroma_format_idc == 1) { SubWidthC = SubHeightC = 2; } else if (sps->chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else { SubWidthC = SubHeightC = 1; } sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); sps->width -= SubWidthC * (sps->cw_left + sps->cw_right); sps->height -= SubHeightC * (sps->cw_top + sps->cw_bottom); } sps->bit_depth_luma = 8 + gf_bs_read_ue_log(bs, "bit_depth_luma_minus8"); sps->bit_depth_chroma = 8 + gf_bs_read_ue_log(bs, "bit_depth_chroma_minus8"); } sps->log2_max_pic_order_cnt_lsb = 4 + gf_bs_read_ue_log(bs, "log2_max_pic_order_cnt_lsb_minus4"); if (!multiLayerExtSpsFlag) { sps->sub_layer_ordering_info_present_flag = gf_bs_read_int_log(bs, 1, "sub_layer_ordering_info_present_flag"); for (i = sps->sub_layer_ordering_info_present_flag ? 0 : sps->max_sub_layers_minus1; i <= sps->max_sub_layers_minus1; i++) { gf_bs_read_ue_log_idx(bs, "max_dec_pic_buffering", i); gf_bs_read_ue_log_idx(bs, "num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "max_latency_increase", i); } } sps->log2_min_luma_coding_block_size = 3 + gf_bs_read_ue_log(bs, "log2_min_luma_coding_block_size_minus3"); sps->log2_diff_max_min_luma_coding_block_size = gf_bs_read_ue_log(bs, "log2_diff_max_min_luma_coding_block_size"); sps->max_CU_width = (1 << (sps->log2_min_luma_coding_block_size + sps->log2_diff_max_min_luma_coding_block_size)); sps->max_CU_height = (1 << (sps->log2_min_luma_coding_block_size + sps->log2_diff_max_min_luma_coding_block_size)); sps->log2_min_transform_block_size = 2 + gf_bs_read_ue_log(bs, "log2_min_transform_block_size_minus2"); sps->log2_max_transform_block_size = sps->log2_min_transform_block_size + gf_bs_read_ue_log(bs, "log2_max_transform_block_size"); depth = 0; sps->max_transform_hierarchy_depth_inter = gf_bs_read_ue_log(bs, "max_transform_hierarchy_depth_inter"); sps->max_transform_hierarchy_depth_intra = gf_bs_read_ue_log(bs, "max_transform_hierarchy_depth_intra"); while ((u32)(sps->max_CU_width >> sps->log2_diff_max_min_luma_coding_block_size) > (u32)(1 << (sps->log2_min_transform_block_size + depth))) { depth++; } sps->max_CU_depth = sps->log2_diff_max_min_luma_coding_block_size + depth; nb_CTUs = ((sps->width + sps->max_CU_width - 1) / sps->max_CU_width) * ((sps->height + sps->max_CU_height - 1) / sps->max_CU_height); sps->bitsSliceSegmentAddress = 0; while (nb_CTUs > (u32)(1 << sps->bitsSliceSegmentAddress)) { sps->bitsSliceSegmentAddress++; } sps->scaling_list_enable_flag = gf_bs_read_int_log(bs, 1, "scaling_list_enable_flag"); if (sps->scaling_list_enable_flag) { sps->infer_scaling_list_flag = 0; sps->scaling_list_ref_layer_id = 0; if (multiLayerExtSpsFlag) { sps->infer_scaling_list_flag = gf_bs_read_int_log(bs, 1, "infer_scaling_list_flag"); } if (sps->infer_scaling_list_flag) { sps->scaling_list_ref_layer_id = gf_bs_read_int_log(bs, 6, "scaling_list_ref_layer_id"); } else { sps->scaling_list_data_present_flag = gf_bs_read_int_log(bs, 1, "scaling_list_data_present_flag"); if (sps->scaling_list_data_present_flag) { hevc_scaling_list_data(bs); } } } sps->asymmetric_motion_partitions_enabled_flag = gf_bs_read_int_log(bs, 1, "asymmetric_motion_partitions_enabled_flag"); sps->sample_adaptive_offset_enabled_flag = gf_bs_read_int_log(bs, 1, "sample_adaptive_offset_enabled_flag"); if ( (sps->pcm_enabled_flag = gf_bs_read_int_log(bs, 1, "pcm_enabled_flag")) ) { sps->pcm_sample_bit_depth_luma_minus1 = gf_bs_read_int_log(bs, 4, "pcm_sample_bit_depth_luma_minus1"); sps->pcm_sample_bit_depth_chroma_minus1 = gf_bs_read_int_log(bs, 4, "pcm_sample_bit_depth_chroma_minus1"); sps->log2_min_pcm_luma_coding_block_size_minus3 = gf_bs_read_ue_log(bs, "log2_min_pcm_luma_coding_block_size_minus3"); sps->log2_diff_max_min_pcm_luma_coding_block_size = gf_bs_read_ue_log(bs, "log2_diff_max_min_pcm_luma_coding_block_size"); sps->pcm_loop_filter_disable_flag = gf_bs_read_int_log(bs, 1, "pcm_loop_filter_disable_flag"); } sps->num_short_term_ref_pic_sets = gf_bs_read_ue_log(bs, "num_short_term_ref_pic_sets"); if (sps->num_short_term_ref_pic_sets > 64) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Invalid number of short term reference picture sets %d\n", sps->num_short_term_ref_pic_sets)); return -1; } for (i = 0; i < sps->num_short_term_ref_pic_sets; i++) { Bool ret = hevc_parse_short_term_ref_pic_set(bs, sps, i); /*cannot parse short_term_ref_pic_set, skip VUI parsing*/ if (!ret) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Invalid short_term_ref_pic_set\n")); return -1; } } sps->long_term_ref_pics_present_flag = gf_bs_read_int_log(bs, 1, "long_term_ref_pics_present_flag"); if (sps->long_term_ref_pics_present_flag) { sps->num_long_term_ref_pic_sps = gf_bs_read_ue_log(bs, "num_long_term_ref_pic_sps"); for (i = 0; i < sps->num_long_term_ref_pic_sps; i++) { gf_bs_read_int_log_idx(bs, sps->log2_max_pic_order_cnt_lsb, "lt_ref_pic_poc_lsb_sps", i); gf_bs_read_int_log_idx(bs, 1, "used_by_curr_pic_lt_sps_flag", i); } } sps->temporal_mvp_enable_flag = gf_bs_read_int_log(bs, 1, "temporal_mvp_enable_flag"); sps->strong_intra_smoothing_enable_flag = gf_bs_read_int_log(bs, 1, "strong_intra_smoothing_enable_flag"); if (vui_flag_pos) *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); if ((sps->vui_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_parameters_present_flag")) ) { sps->aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "aspect_ratio_info_present_flag"); if (sps->aspect_ratio_info_present_flag) { sps->sar_idc = gf_bs_read_int_log(bs, 8, "aspect_ratio_idc"); if (sps->sar_idc == 255) { sps->sar_width = gf_bs_read_int_log(bs, 16, "aspect_ratio_width"); sps->sar_height = gf_bs_read_int_log(bs, 16, "aspect_ratio_height"); } else if (sps->sar_idc < 17) { sps->sar_width = hevc_sar[sps->sar_idc].w; sps->sar_height = hevc_sar[sps->sar_idc].h; } } if ((sps->overscan_info_present = gf_bs_read_int_log(bs, 1, "overscan_info_present"))) sps->overscan_appropriate = gf_bs_read_int_log(bs, 1, "overscan_appropriate"); sps->video_signal_type_present_flag = gf_bs_read_int_log(bs, 1, "video_signal_type_present_flag"); if (sps->video_signal_type_present_flag) { sps->video_format = gf_bs_read_int_log(bs, 3, "video_format"); sps->video_full_range_flag = gf_bs_read_int_log(bs, 1, "video_full_range_flag"); if ((sps->colour_description_present_flag = gf_bs_read_int_log(bs, 1, "colour_description_present_flag"))) { sps->colour_primaries = gf_bs_read_int_log(bs, 8, "colour_primaries"); sps->transfer_characteristic = gf_bs_read_int_log(bs, 8, "transfer_characteristic"); sps->matrix_coeffs = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } } if ((sps->chroma_loc_info_present_flag = gf_bs_read_int_log(bs, 1, "chroma_loc_info_present_flag"))) { sps->chroma_sample_loc_type_top_field = gf_bs_read_ue_log(bs, "chroma_sample_loc_type_top_field"); sps->chroma_sample_loc_type_bottom_field = gf_bs_read_ue_log(bs, "chroma_sample_loc_type_bottom_field"); } sps->neutra_chroma_indication_flag = gf_bs_read_int_log(bs, 1, "neutra_chroma_indication_flag"); sps->field_seq_flag = gf_bs_read_int_log(bs, 1, "field_seq_flag"); sps->frame_field_info_present_flag = gf_bs_read_int_log(bs, 1, "frame_field_info_present_flag"); if ((sps->default_display_window_flag = gf_bs_read_int_log(bs, 1, "default_display_window_flag"))) { sps->left_offset = gf_bs_read_ue_log(bs, "display_window_left_offset"); sps->right_offset = gf_bs_read_ue_log(bs, "display_window_right_offset"); sps->top_offset = gf_bs_read_ue_log(bs, "display_window_top_offset"); sps->bottom_offset = gf_bs_read_ue_log(bs, "display_window_bottom_offset"); } sps->has_timing_info = gf_bs_read_int_log(bs, 1, "has_timing_info"); if (sps->has_timing_info) { sps->num_units_in_tick = gf_bs_read_int_log(bs, 32, "num_units_in_tick"); sps->time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); sps->poc_proportional_to_timing_flag = gf_bs_read_int_log(bs, 1, "poc_proportional_to_timing_flag"); if (sps->poc_proportional_to_timing_flag) sps->num_ticks_poc_diff_one_minus1 = gf_bs_read_ue_log(bs, "num_ticks_poc_diff_one_minus1"); if ((sps->hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "hrd_parameters_present_flag"))) { // GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[HEVC] HRD param parsing not implemented\n")); return sps_id; } } if (gf_bs_read_int_log(bs, 1, "bitstream_restriction_flag")) { gf_bs_read_int_log(bs, 1, "tiles_fixed_structure_flag"); gf_bs_read_int_log(bs, 1, "motion_vectors_over_pic_boundaries_flag"); gf_bs_read_int_log(bs, 1, "restricted_ref_pic_lists_flag"); gf_bs_read_ue_log(bs, "min_spatial_segmentation_idc"); gf_bs_read_ue_log(bs, "max_bytes_per_pic_denom"); gf_bs_read_ue_log(bs, "max_bits_per_min_cu_denom"); gf_bs_read_ue_log(bs, "log2_max_mv_length_horizontal"); gf_bs_read_ue_log(bs, "log2_max_mv_length_vertical"); } } if (gf_bs_read_int_log(bs, 1, "sps_extension_flag")) { #if 0 while (gf_bs_available(bs)) { /*sps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } return sps_id; } GF_EXPORT s32 gf_hevc_read_sps_ex(char *data, u32 size, HEVCState *hevc, u32 *vui_flag_pos) { GF_BitStream *bs; s32 sps_id = -1; u8 layer_id; if (vui_flag_pos) *vui_flag_pos = 0; bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) goto exit; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) goto exit; sps_id = gf_hevc_read_sps_bs_internal(bs, hevc, layer_id, vui_flag_pos); exit: if (bs) gf_bs_del(bs); return sps_id; } GF_EXPORT s32 gf_hevc_read_sps(u8 *data, u32 size, HEVCState *hevc) { return gf_hevc_read_sps_ex(data, size, hevc, NULL); } GF_EXPORT s32 gf_hevc_read_sps_bs(GF_BitStream *bs, HEVCState *hevc) { u8 layer_id; if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) return -1; return gf_hevc_read_sps_bs_internal(bs, hevc, layer_id, NULL); } static s32 gf_hevc_read_pps_bs_internal(GF_BitStream *bs, HEVCState *hevc) { u32 i; s32 pps_id; HEVC_PPS *pps; //NAL header already read pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id < 0) || (pps_id >= 64)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong PPS ID %d in PPS\n", pps_id)); return -1; } pps = &hevc->pps[pps_id]; if (!pps->state) { pps->id = pps_id; pps->state = 1; } pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if (pps->sps_id >= 16) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong SPS ID %d in PPS\n", pps->sps_id)); pps->sps_id=0; return -1; } hevc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->dependent_slice_segments_enabled_flag = gf_bs_read_int_log(bs, 1, "dependent_slice_segments_enabled_flag"); pps->output_flag_present_flag = gf_bs_read_int_log(bs, 1, "output_flag_present_flag"); pps->num_extra_slice_header_bits = gf_bs_read_int_log(bs, 3, "num_extra_slice_header_bits"); pps->sign_data_hiding_flag = gf_bs_read_int_log(bs, 1, "sign_data_hiding_flag"); pps->cabac_init_present_flag = gf_bs_read_int_log(bs, 1, "cabac_init_present_flag"); pps->num_ref_idx_l0_default_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active"); pps->num_ref_idx_l1_default_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active"); pps->pic_init_qp_minus26 = gf_bs_read_se_log(bs, "pic_init_qp_minus26"); pps->constrained_intra_pred_flag = gf_bs_read_int_log(bs, 1, "constrained_intra_pred_flag"); pps->transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "transform_skip_enabled_flag"); if ((pps->cu_qp_delta_enabled_flag = gf_bs_read_int_log(bs, 1, "cu_qp_delta_enabled_flag"))) pps->diff_cu_qp_delta_depth = gf_bs_read_ue_log(bs, "diff_cu_qp_delta_depth"); pps->pic_cb_qp_offset = gf_bs_read_se_log(bs, "pic_cb_qp_offset"); pps->pic_cr_qp_offset = gf_bs_read_se_log(bs, "pic_cr_qp_offset"); pps->slice_chroma_qp_offsets_present_flag = gf_bs_read_int_log(bs, 1, "slice_chroma_qp_offsets_present_flag"); pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); pps->weighted_bipred_flag = gf_bs_read_int_log(bs, 1, "weighted_bipred_flag"); pps->transquant_bypass_enable_flag = gf_bs_read_int_log(bs, 1, "transquant_bypass_enable_flag"); pps->tiles_enabled_flag = gf_bs_read_int_log(bs, 1, "tiles_enabled_flag"); pps->entropy_coding_sync_enabled_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); if (pps->tiles_enabled_flag) { pps->num_tile_columns = 1 + gf_bs_read_ue_log(bs, "num_tile_columns_minus1"); pps->num_tile_rows = 1 + gf_bs_read_ue_log(bs, "num_tile_rows_minus1"); pps->uniform_spacing_flag = gf_bs_read_int_log(bs, 1, "uniform_spacing_flag"); if (!pps->uniform_spacing_flag) { for (i = 0; i < pps->num_tile_columns - 1; i++) { pps->column_width[i] = 1 + gf_bs_read_ue_log_idx(bs, "column_width_minus1", i); } for (i = 0; i < pps->num_tile_rows - 1; i++) { pps->row_height[i] = 1 + gf_bs_read_ue_log_idx(bs, "row_height_minus1", i); } } pps->loop_filter_across_tiles_enabled_flag = gf_bs_read_int_log(bs, 1, "loop_filter_across_tiles_enabled_flag"); } pps->loop_filter_across_slices_enabled_flag = gf_bs_read_int_log(bs, 1, "loop_filter_across_slices_enabled_flag"); if ((pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"))) { pps->deblocking_filter_override_enabled_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_override_enabled_flag"); if (! (pps->pic_disable_deblocking_filter_flag = gf_bs_read_int_log(bs, 1, "pic_disable_deblocking_filter_flag"))) { pps->beta_offset_div2 = gf_bs_read_se_log(bs, "beta_offset_div2"); pps->tc_offset_div2 = gf_bs_read_se_log(bs, "tc_offset_div2"); } } if ((pps->pic_scaling_list_data_present_flag = gf_bs_read_int_log(bs, 1, "pic_scaling_list_data_present_flag"))) { hevc_scaling_list_data(bs); } pps->lists_modification_present_flag = gf_bs_read_int_log(bs, 1, "lists_modification_present_flag"); pps->log2_parallel_merge_level_minus2 = gf_bs_read_ue_log(bs, "log2_parallel_merge_level_minus2"); pps->slice_segment_header_extension_present_flag = gf_bs_read_int_log(bs, 1, "slice_segment_header_extension_present_flag"); if (gf_bs_read_int_log(bs, 1, "pps_extension_flag")) { #if 0 while (gf_bs_available(bs)) { /*pps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } return pps_id; } GF_EXPORT s32 gf_hevc_read_pps(u8 *data, u32 size, HEVCState *hevc) { GF_BitStream *bs; s32 pps_id = -1; bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) goto exit; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) goto exit; pps_id = gf_hevc_read_pps_bs_internal(bs, hevc); exit: if (bs) gf_bs_del(bs); return pps_id; } GF_EXPORT s32 gf_hevc_read_pps_bs(GF_BitStream *bs, HEVCState *hevc) { if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) return -1; return gf_hevc_read_pps_bs_internal(bs, hevc); } GF_EXPORT s32 gf_hevc_parse_nalu_bs(GF_BitStream *bs, HEVCState *hevc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { Bool is_slice = GF_FALSE; s32 ret = -1; HEVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); memcpy(&n_state, &hevc->s_info, sizeof(HEVCSliceInfo)); if (!hevc_parse_nal_header(bs, nal_unit_type, temporal_id, layer_id)) return -1; n_state.nal_unit_type = *nal_unit_type; switch (n_state.nal_unit_type) { case GF_HEVC_NALU_ACCESS_UNIT: case GF_HEVC_NALU_END_OF_SEQ: case GF_HEVC_NALU_END_OF_STREAM: ret = 1; break; /*slice_segment_layer_rbsp*/ case GF_HEVC_NALU_SLICE_TRAIL_N: case GF_HEVC_NALU_SLICE_TRAIL_R: case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_TSA_R: case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_STSA_R: case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: case GF_HEVC_NALU_SLICE_RADL_N: case GF_HEVC_NALU_SLICE_RADL_R: case GF_HEVC_NALU_SLICE_RASL_N: case GF_HEVC_NALU_SLICE_RASL_R: is_slice = GF_TRUE; /* slice - read the info and compare.*/ ret = hevc_parse_slice_segment(bs, hevc, &n_state); if (ret < 0) return ret; hevc_compute_poc(&n_state); ret = 0; if (hevc->s_info.poc != n_state.poc) { ret = 1; break; } if (n_state.first_slice_segment_in_pic_flag) { if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; break; } } break; case GF_HEVC_NALU_SEQ_PARAM: hevc->last_parsed_sps_id = gf_hevc_read_sps_bs_internal(bs, hevc, *layer_id, NULL); ret = (hevc->last_parsed_sps_id>=0) ? 0 : -1; break; case GF_HEVC_NALU_PIC_PARAM: hevc->last_parsed_pps_id = gf_hevc_read_pps_bs_internal(bs, hevc); ret = (hevc->last_parsed_pps_id>=0) ? 0 : -1; break; case GF_HEVC_NALU_VID_PARAM: hevc->last_parsed_vps_id = gf_hevc_read_vps_bs_internal(bs, hevc, GF_FALSE); ret = (hevc->last_parsed_vps_id>=0) ? 0 : -1; break; default: ret = 0; break; } /* save _prev values */ if ((ret>0) && hevc->s_info.sps) { n_state.frame_num_offset_prev = hevc->s_info.frame_num_offset; n_state.frame_num_prev = hevc->s_info.frame_num; n_state.poc_lsb_prev = hevc->s_info.poc_lsb; n_state.poc_msb_prev = hevc->s_info.poc_msb; if (is_slice) n_state.prev_layer_id_plus1 = *layer_id + 1; } if (is_slice) hevc_compute_poc(&n_state); memcpy(&hevc->s_info, &n_state, sizeof(HEVCSliceInfo)); return ret; } GF_EXPORT s32 gf_hevc_parse_nalu(u8 *data, u32 size, HEVCState *hevc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { GF_BitStream *bs = NULL; s32 ret = -1; if (!hevc) { if (nal_unit_type) (*nal_unit_type) = (data[0] & 0x7E) >> 1; if (layer_id) { u8 id = data[0] & 1; id <<= 5; id |= (data[1] >> 3) & 0x1F; (*layer_id) = id; } if (temporal_id) (*temporal_id) = (data[1] & 0x7); return -1; } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); ret = gf_hevc_parse_nalu_bs(bs, hevc, nal_unit_type, temporal_id, layer_id); gf_bs_del(bs); return ret; } GF_EXPORT GF_Err gf_hevc_change_vui(GF_HEVCConfig *hvcc, GF_VUIInfo *vui_info) { GF_BitStream *orig, *mod; HEVCState hevc; u32 i, bit_offset, flag; s32 idx; GF_NALUFFParamArray *spss; GF_NALUFFParam *slc; orig = NULL; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; i = 0; spss = NULL; while ((spss = (GF_NALUFFParamArray *)gf_list_enum(hvcc->param_array, &i))) { if (spss->type == GF_HEVC_NALU_SEQ_PARAM) break; spss = NULL; } if (!spss) return GF_NON_COMPLIANT_BITSTREAM; i = 0; while ((slc = (GF_NALUFFParam *)gf_list_enum(spss->nalus, &i))) { u8 *no_emulation_buf; u32 no_emulation_buf_size, emulation_bytes; /*SPS may still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size) * sizeof(char)); no_emulation_buf_size = gf_media_nalu_remove_emulation_bytes(slc->data, no_emulation_buf, slc->size); idx = gf_hevc_read_sps_ex(no_emulation_buf, no_emulation_buf_size, &hevc, &bit_offset); if (idx < 0) { if (orig) gf_bs_del(orig); gf_free(no_emulation_buf); continue; } orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset >= 0); while (bit_offset) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } avc_hevc_rewrite_vui(vui_info, orig, mod); /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, &no_emulation_buf, &no_emulation_buf_size); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(no_emulation_buf, no_emulation_buf_size); if (no_emulation_buf_size + emulation_bytes > slc->size) slc->data = (char*)gf_realloc(slc->data, no_emulation_buf_size + emulation_bytes); slc->size = gf_media_nalu_add_emulation_bytes(no_emulation_buf, slc->data, no_emulation_buf_size); gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; } GF_EXPORT GF_Err gf_hevc_change_par(GF_HEVCConfig *hvcc, s32 ar_n, s32 ar_d) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = ar_n; vuii.ar_den = ar_d; vuii.fullrange = -1; vuii.video_format = -1; vuii.color_prim = -1; vuii.color_tfc = -1; vuii.color_matrix = -1; return gf_hevc_change_vui(hvcc, &vuii); } GF_EXPORT GF_Err gf_hevc_change_color(GF_HEVCConfig *hvcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = -1; vuii.ar_den = -1; vuii.fullrange = fullrange; vuii.video_format = vidformat; vuii.color_prim = colorprim; vuii.color_tfc = transfer; vuii.color_matrix = colmatrix; return gf_hevc_change_vui(hvcc, &vuii); } GF_EXPORT GF_Err gf_hevc_get_sps_info_with_state(HEVCState *hevc, u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { s32 idx; idx = gf_hevc_read_sps(sps_data, sps_size, hevc); if (idx < 0) { return GF_NON_COMPLIANT_BITSTREAM; } if (sps_id) *sps_id = idx; if (width) *width = hevc->sps[idx].width; if (height) *height = hevc->sps[idx].height; if (par_n) *par_n = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_width : (u32)-1; if (par_d) *par_d = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_height : (u32)-1; return GF_OK; } GF_EXPORT GF_Err gf_hevc_get_sps_info(u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { HEVCState hevc; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; return gf_hevc_get_sps_info_with_state(&hevc, sps_data, sps_size, sps_id, width, height, par_n, par_d); } #endif //GPAC_DISABLE_HEVC static u32 AC3_FindSyncCode(u8 *buf, u32 buflen) { u32 end = buflen - 6; u32 offset = 0; while (offset <= end) { if (buf[offset] == 0x0b && buf[offset + 1] == 0x77) { return offset; } offset++; } return buflen; } static Bool AC3_FindSyncCodeBS(GF_BitStream *bs) { u8 b1; u64 pos = gf_bs_get_position(bs); u64 end = gf_bs_get_size(bs); pos += 1; b1 = gf_bs_read_u8(bs); while (pos + 1 <= end) { u8 b2 = gf_bs_read_u8(bs); if ((b1 == 0x0b) && (b2 == 0x77)) { gf_bs_seek(bs, pos - 1); return GF_TRUE; } pos++; b1 = b2; } return GF_FALSE; } static const u32 ac3_sizecod_to_bitrate[] = { 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 320000, 384000, 448000, 512000, 576000, 640000 }; static const u32 ac3_sizecod2_to_framesize[] = { 96, 120, 144, 168, 192, 240, 288, 336, 384, 480, 576, 672, 768, 960, 1152, 1344, 1536, 1728, 1920 }; static const u32 ac3_sizecod1_to_framesize[] = { 69, 87, 104, 121, 139, 174, 208, 243, 278, 348, 417, 487, 557, 696, 835, 975, 1114, 1253, 1393 }; static const u32 ac3_sizecod0_to_framesize[] = { 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 640, 768, 896, 1024, 1152, 1280 }; static const u32 ac3_mod_to_chans[] = { 2, 1, 2, 3, 3, 4, 4, 5 }; GF_EXPORT u32 gf_ac3_get_channels(u32 acmod) { u32 nb_ch; nb_ch = ac3_mod_to_chans[acmod]; return nb_ch; } GF_EXPORT u32 gf_ac3_get_bitrate(u32 brcode) { return ac3_sizecod_to_bitrate[brcode]; } Bool gf_ac3_parser(u8 *buf, u32 buflen, u32 *pos, GF_AC3Config *hdr, Bool full_parse) { GF_BitStream *bs; Bool ret; if (buflen < 6) return GF_FALSE; (*pos) = AC3_FindSyncCode(buf, buflen); if (*pos >= buflen) return GF_FALSE; bs = gf_bs_new((const char*)(buf + *pos), buflen, GF_BITSTREAM_READ); ret = gf_ac3_parser_bs(bs, hdr, full_parse); gf_bs_del(bs); return ret; } GF_EXPORT Bool gf_ac3_parser_bs(GF_BitStream *bs, GF_AC3Config *hdr, Bool full_parse) { u32 fscod, frmsizecod, bsid, ac3_mod, freq, framesize, bsmod, syncword; u64 pos; if (!hdr || (gf_bs_available(bs) < 6)) return GF_FALSE; if (!AC3_FindSyncCodeBS(bs)) return GF_FALSE; pos = gf_bs_get_position(bs); syncword = gf_bs_read_u16(bs); if (syncword != 0x0B77) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AC3] Wrong sync word detected (0x%X - expecting 0x0B77).\n", syncword)); return GF_FALSE; } gf_bs_read_int_log(bs, 16, "crc1"); fscod = gf_bs_read_int_log(bs, 2, "fscod"); frmsizecod = gf_bs_read_int_log(bs, 6, "frmsizecod"); bsid = gf_bs_read_int_log(bs, 5, "bsid"); bsmod = gf_bs_read_int_log(bs, 3, "bsmod"); ac3_mod = gf_bs_read_int_log(bs, 3, "ac3_mod"); if (frmsizecod >= 2 * sizeof(ac3_sizecod_to_bitrate) / sizeof(u32)) return GF_FALSE; hdr->bitrate = ac3_sizecod_to_bitrate[frmsizecod / 2]; if (bsid > 8) hdr->bitrate = hdr->bitrate >> (bsid - 8); switch (fscod) { case 0: if (frmsizecod >= 2 * sizeof(ac3_sizecod0_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 48000; framesize = ac3_sizecod0_to_framesize[frmsizecod / 2] * 2; break; case 1: if (frmsizecod >= 2 * sizeof(ac3_sizecod1_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 44100; framesize = (ac3_sizecod1_to_framesize[frmsizecod / 2] + (frmsizecod & 0x1)) * 2; break; case 2: if (frmsizecod >= 2 * sizeof(ac3_sizecod2_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 32000; framesize = ac3_sizecod2_to_framesize[frmsizecod / 2] * 2; break; default: return GF_FALSE; } hdr->sample_rate = freq; hdr->framesize = framesize; if (full_parse) { hdr->streams[0].bsid = bsid; hdr->streams[0].bsmod = bsmod; hdr->streams[0].acmod = ac3_mod; hdr->streams[0].lfon = 0; hdr->streams[0].fscod = fscod; hdr->brcode = frmsizecod / 2; } if (ac3_mod >= 2 * sizeof(ac3_mod_to_chans) / sizeof(u32)) return GF_FALSE; hdr->channels = ac3_mod_to_chans[ac3_mod]; if ((ac3_mod & 0x1) && (ac3_mod != 1)) gf_bs_read_int_log(bs, 2, "cmixlev"); if (ac3_mod & 0x4) gf_bs_read_int_log(bs, 2, "surmixlev"); if (ac3_mod == 0x2) gf_bs_read_int_log(bs, 2, "dsurmod"); if (gf_bs_read_int_log(bs, 1, "lfeon")) { hdr->channels += 1; hdr->streams[0].lfon = 1; } gf_bs_seek(bs, pos); return GF_TRUE; } GF_EXPORT Bool gf_eac3_parser_bs(GF_BitStream *bs, GF_AC3Config *hdr, Bool full_parse) { u32 fscod, bsid, ac3_mod, freq, framesize, syncword, substreamid, lfon, channels, numblkscod, strmtyp, frmsiz; u64 pos; u16 chanmap; static u32 numblks[4] = {1, 2, 3, 6}; if (!hdr || (gf_bs_available(bs) < 6)) return GF_FALSE; if (!AC3_FindSyncCodeBS(bs)) return GF_FALSE; pos = gf_bs_get_position(bs); framesize = 0; numblkscod = 0; memset(hdr, 0, sizeof(GF_AC3Config)); block: syncword = gf_bs_read_u16(bs); if (syncword != 0x0B77) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[E-AC3] Wrong sync word detected (0x%X - expecting 0x0B77).\n", syncword)); return GF_FALSE; } strmtyp = gf_bs_read_int_log(bs, 2, "strmtyp"); substreamid = gf_bs_read_int_log(bs, 3, "substreamid"); //next main (independent) AU, done with this frame if ((strmtyp!=0x1) && ((hdr->substreams >> substreamid) & 0x1)) { hdr->framesize = framesize; gf_bs_seek(bs, pos); return GF_TRUE; } frmsiz = gf_bs_read_int_log(bs, 11, "frmsiz"); framesize += 2 * (1 + frmsiz); fscod = gf_bs_read_int_log(bs, 2, "fscod"); if (fscod == 0x3) { fscod = gf_bs_read_int_log(bs, 2, "fscod2"); numblkscod += 6; } else { numblkscod += gf_bs_read_int_log(bs, 2, "numblkscod"); } assert(numblkscod <= 9); if ((hdr->substreams >> substreamid) & 0x1) { //we still have sync frames following if (substreamid) { if (gf_bs_seek(bs, pos + framesize) != GF_OK) { gf_bs_seek(bs, pos); return GF_FALSE; } if ((gf_bs_available(bs) < 6) || !AC3_FindSyncCodeBS(bs)) { gf_bs_seek(bs, pos); return GF_FALSE; } goto block; } } hdr->substreams |= (1 << substreamid); switch (fscod) { case 0: freq = 48000; break; case 1: freq = 44100; break; case 2: freq = 32000; break; default: return GF_FALSE; } ac3_mod = gf_bs_read_int_log(bs, 3, "ac3_mod"); lfon = gf_bs_read_int_log(bs, 1, "lfon"); bsid = gf_bs_read_int_log(bs, 5, "bsid"); if (!substreamid && (bsid != 16/*E-AC3*/)) return GF_FALSE; gf_bs_read_int_log(bs, 5, "dialnorm"); if (gf_bs_read_int_log(bs, 1, "compre")) { gf_bs_read_int_log(bs, 8, "compr"); } if (ac3_mod==0) { gf_bs_read_int_log(bs, 5, "dialnorm2"); if (gf_bs_read_int_log(bs, 1, "compr2e")) { gf_bs_read_int_log(bs, 8, "compr2"); } } chanmap = 0; if (strmtyp==0x1) { if (gf_bs_read_int_log(bs, 1, "chanmape")) { chanmap = gf_bs_read_int_log(bs, 16, "chanmap"); } } channels = ac3_mod_to_chans[ac3_mod]; if (lfon) channels += 1; hdr->bitrate = 0; hdr->sample_rate = freq; hdr->framesize = framesize; if (strmtyp != 1) { hdr->channels = channels; hdr->streams[substreamid].lfon = lfon; if (full_parse) { hdr->streams[substreamid].bsid = bsid; hdr->streams[substreamid].bsmod = 0; hdr->streams[substreamid].acmod = ac3_mod; hdr->streams[substreamid].fscod = fscod; hdr->brcode = 0; } hdr->nb_streams++; //not clear if this is only for the independent streams hdr->brcode += ((frmsiz+1) * freq) / (numblks[numblkscod]*16) / 1000; if (lfon) hdr->channels += 1; } else { hdr->streams[substreamid].nb_dep_sub = substreamid; hdr->streams[substreamid].chan_loc |= chanmap; } if (numblkscod < 6) { //we need 6 blocks to make a sample if (gf_bs_seek(bs, pos + framesize) != GF_OK) { gf_bs_seek(bs, pos); return GF_FALSE; } if ((gf_bs_available(bs) < 6) || !AC3_FindSyncCodeBS(bs)) return GF_FALSE; goto block; } gf_bs_seek(bs, pos); return GF_TRUE; } #endif /*GPAC_DISABLE_AV_PARSERS*/ u32 gf_id3_read_size(GF_BitStream *bs) { u32 size = 0; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); return size; } #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined (GPAC_DISABLE_OGG) /* Vorbis parser */ static u32 vorbis_book_maptype1_quantvals(u32 entries, u32 dim) { u32 vals = (u32)floor(pow(entries, 1.0 / dim)); while (1) { u32 acc = 1; u32 acc1 = 1; u32 i; for (i = 0; i < dim; i++) { acc *= vals; acc1 *= vals + 1; } if (acc <= entries && acc1 > entries) return (vals); else { if (acc > entries) vals--; else vals++; } } } static u32 ilog(u32 v, Bool dec) { u32 ret = 0; if (dec && v) --v; while (v) { ret++; v >>= 1; } return (ret); } static u32 icount(u32 v) { u32 ret = 0; while (v) { ret += v & 1; v >>= 1; } return(ret); } GF_EXPORT Bool gf_vorbis_parse_header(GF_VorbisParser *vp, u8 *data, u32 data_len) { u32 pack_type, i, j, k, times, nb_part, nb_books, nb_modes; u32 l; char szNAME[8]; oggpack_buffer opb; oggpack_readinit(&opb, (u8*)data, data_len); pack_type = oggpack_read(&opb, 8); i = 0; while (i < 6) { szNAME[i] = oggpack_read(&opb, 8); i++; } szNAME[i] = 0; if (strcmp(szNAME, "vorbis")) { return GF_FALSE; } switch (pack_type) { case 0x01: vp->version = oggpack_read(&opb, 32); if (vp->version != 0) { return GF_FALSE; } vp->channels = oggpack_read(&opb, 8); vp->sample_rate = oggpack_read(&opb, 32); vp->max_r = oggpack_read(&opb, 32); vp->avg_r = oggpack_read(&opb, 32); vp->low_r = oggpack_read(&opb, 32); vp->min_block = 1<<oggpack_read(&opb, 4); vp->max_block = 1<<oggpack_read(&opb, 4); if (vp->sample_rate < 1 || vp->channels < 1 || vp->min_block < 8 || vp->max_block < vp->min_block || oggpack_read(&opb, 1) != 1) { return GF_FALSE; } vp->nb_init=1; return GF_TRUE; case 0x03: /*trash comments*/ vp->nb_init++; return GF_TRUE; case 0x05: /*need at least bitstream header to make sure we're parsing the right thing*/ if (!vp->nb_init) return GF_FALSE; break; default: return GF_FALSE; } /*OK parse codebook*/ nb_books = oggpack_read(&opb, 8) + 1; /*skip vorbis static books*/ for (i = 0; i < nb_books; i++) { u32 map_type, qb, qq; u32 entries, dim; oggpack_read(&opb, 24); dim = oggpack_read(&opb, 16); entries = oggpack_read(&opb, 24); if ((s32)entries < 0) entries = 0; if (oggpack_read(&opb, 1) == 0) { if (oggpack_read(&opb, 1)) { for (j = 0; j < entries; j++) { if (oggpack_read(&opb, 1)) { oggpack_read(&opb, 5); } } } else { for (j = 0; j < entries; j++) oggpack_read(&opb, 5); } } else { oggpack_read(&opb, 5); for (j = 0; j < entries;) { u32 num = oggpack_read(&opb, ilog(entries - j, GF_FALSE)); for (k = 0; k < num && j < entries; k++, j++) { } } } switch ((map_type = oggpack_read(&opb, 4))) { case 0: break; case 1: case 2: oggpack_read(&opb, 32); oggpack_read(&opb, 32); qq = oggpack_read(&opb, 4) + 1; oggpack_read(&opb, 1); if (map_type == 1) qb = vorbis_book_maptype1_quantvals(entries, dim); else if (map_type == 2) qb = entries * dim; else qb = 0; for (j = 0; j < qb; j++) oggpack_read(&opb, qq); break; } } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) oggpack_read(&opb, 16); times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 type = oggpack_read(&opb, 16); if (type) { u32 *parts, *class_dims, count, rangebits; u32 max_class = 0; nb_part = oggpack_read(&opb, 5); parts = (u32*)gf_malloc(sizeof(u32) * nb_part); for (j = 0; j < nb_part; j++) { parts[j] = oggpack_read(&opb, 4); if (max_class < parts[j]) max_class = parts[j]; } class_dims = (u32*)gf_malloc(sizeof(u32) * (max_class + 1)); for (j = 0; j < max_class + 1; j++) { u32 class_sub; class_dims[j] = oggpack_read(&opb, 3) + 1; class_sub = oggpack_read(&opb, 2); if (class_sub) oggpack_read(&opb, 8); for (k = 0; k < (u32)(1 << class_sub); k++) oggpack_read(&opb, 8); } oggpack_read(&opb, 2); rangebits = oggpack_read(&opb, 4); count = 0; for (j = 0, k = 0; j < nb_part; j++) { count += class_dims[parts[j]]; for (; k < count; k++) oggpack_read(&opb, rangebits); } gf_free(parts); gf_free(class_dims); } else { oggpack_read(&opb, 8 + 16 + 16 + 6 + 8); nb_books = oggpack_read(&opb, 4) + 1; for (j = 0; j < nb_books; j++) oggpack_read(&opb, 8); } } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 acc = 0; oggpack_read(&opb, 16);/*type*/ oggpack_read(&opb, 24); oggpack_read(&opb, 24); oggpack_read(&opb, 24); nb_part = oggpack_read(&opb, 6) + 1; oggpack_read(&opb, 8); for (j = 0; j < nb_part; j++) { u32 cascade = oggpack_read(&opb, 3); if (oggpack_read(&opb, 1)) cascade |= (oggpack_read(&opb, 5) << 3); acc += icount(cascade); } for (j = 0; j < acc; j++) oggpack_read(&opb, 8); } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 sub_maps = 1; oggpack_read(&opb, 16); if (oggpack_read(&opb, 1)) sub_maps = oggpack_read(&opb, 4) + 1; if (oggpack_read(&opb, 1)) { u32 nb_steps = oggpack_read(&opb, 8) + 1; for (j = 0; j < nb_steps; j++) { oggpack_read(&opb, ilog(vp->channels, GF_TRUE)); oggpack_read(&opb, ilog(vp->channels, GF_TRUE)); } } oggpack_read(&opb, 2); if (sub_maps>1) { for(l=0; l<vp->channels; l++) oggpack_read(&opb, 4); } for (j = 0; j < sub_maps; j++) { oggpack_read(&opb, 8); oggpack_read(&opb, 8); oggpack_read(&opb, 8); } } nb_modes = oggpack_read(&opb, 6) + 1; for (i = 0; i < nb_modes; i++) { vp->mode_flag[i] = oggpack_read(&opb, 1); oggpack_read(&opb, 16); oggpack_read(&opb, 16); oggpack_read(&opb, 8); } vp->modebits = 0; j = nb_modes; while (j > 1) { vp->modebits++; j >>= 1; } return GF_TRUE; } GF_EXPORT u32 gf_vorbis_check_frame(GF_VorbisParser *vp, u8 *data, u32 data_length) { s32 block_size; oggpack_buffer opb; if (!vp) return 0; oggpack_readinit(&opb, (unsigned char*)data, data_length); /*not audio*/ if (oggpack_read(&opb, 1) != 0) return 0; block_size = oggpack_read(&opb, vp->modebits); if (block_size == -1) return 0; return ((vp->mode_flag[block_size]) ? vp->max_block : vp->min_block) / (2); } /*call with vorbis header packets - initializes the parser on success, leave it to NULL otherwise returns 1 if success, 0 if error.*/ Bool gf_opus_parse_header(GF_OpusParser *opus, u8 *data, u32 data_len) { char tag[9]; GF_BitStream *bs = gf_bs_new(data, data_len, GF_BITSTREAM_READ); gf_bs_read_data(bs, tag, 8); tag[8]=0; if (memcmp(data, "OpusHead", sizeof(char)*8)) { gf_bs_del(bs); return GF_FALSE; } /*Identification Header*/ opus->version = gf_bs_read_u8(bs); /*version*/ if (opus->version != 1) { gf_bs_del(bs); GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Opus] Unsupported version %d\n", opus->version)); return GF_FALSE; } opus->OutputChannelCount = gf_bs_read_u8(bs); opus->PreSkip = gf_bs_read_u16_le(bs); opus->InputSampleRate = gf_bs_read_u32_le(bs); opus->OutputGain = gf_bs_read_u16_le(bs); opus->ChannelMappingFamily = gf_bs_read_u8(bs); if (opus->ChannelMappingFamily != 0) { opus->StreamCount = gf_bs_read_u8(bs); opus->CoupledCount = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *) opus->ChannelMapping, opus->OutputChannelCount); } gf_bs_del(bs); return GF_TRUE; } /*returns 0 if init error or not a vorbis frame, otherwise returns the number of audio samples in this frame*/ u32 gf_opus_check_frame(GF_OpusParser *op, u8 *data, u32 data_length) { u32 block_size; if (!memcmp(data, "OpusHead", sizeof(char)*8)) return 0; if (!memcmp(data, "OpusTags", sizeof(char)*8)) return 0; /*consider the whole packet as Ogg packets and ISOBMFF samples for Opus are framed similarly*/ static const int OpusFrameDurIn48k[] = { 480, 960, 1920, 2880, 480, 960, 1920, 2880, 480, 960, 1920, 2880, 480, 960, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, }; int TOC_config = (data[0] & 0xf8) >> 3; //int s = (data[0] & 0x04) >> 2; block_size = OpusFrameDurIn48k[TOC_config]; int c = data[0] & 0x03; if (c == 1 || c == 2) { block_size *= 2; } else if (c == 3) { /*unknown number of frames*/ int num_frames = data[1] & 0x3f; block_size *= num_frames; } return block_size; } #endif /*!defined(GPAC_DISABLE_AV_PARSERS) && !defined (GPAC_DISABLE_OGG)*/ u64 gf_mpegh_escaped_value(GF_BitStream *bs, u32 nBits1, u32 nBits2, u32 nBits3) { u64 value = gf_bs_read_int(bs, nBits1); if (value == (1<<nBits1)-1) { u32 vadd = gf_bs_read_int(bs, nBits2); value += vadd; if (vadd == (1<<nBits2)-1) { vadd = gf_bs_read_int(bs, nBits3); value += vadd; } } return value; } GF_EXPORT s32 gf_mpegh_get_mhas_pl(u8 *ptr, u32 size, u64 *ch_layout) { s32 PL = -1; GF_BitStream *bs; u32 i; s32 sync_pos=-1; for (i=0; i<size-3; i++) { if ((ptr[i]==0xC0) && (ptr[i+1]== 0x01) && (ptr[i+2]==0xA5)) { sync_pos = i; break; } } if (sync_pos<0) return 0; if (ch_layout) *ch_layout = 0; bs = gf_bs_new(ptr, size, GF_BITSTREAM_READ); gf_bs_skip_bytes(bs, sync_pos); while (gf_bs_available(bs)) { u32 type = (u32) gf_mpegh_escaped_value(bs, 3, 8, 8); /*u64 label = */gf_mpegh_escaped_value(bs, 2, 8, 32); u64 mh_size = gf_mpegh_escaped_value(bs, 11, 24, 24); if (mh_size > gf_bs_available(bs)) break; //MHAS config if (type==1) { PL = gf_bs_read_int(bs, 8); if (ch_layout) { u32 idx = gf_bs_read_int(bs, 5); if (idx==0x1f) gf_bs_read_int(bs, 24); /*idx = */gf_bs_read_int(bs, 3); gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 1); //speaker config idx = gf_bs_read_int(bs, 2); if (idx == 0) { *ch_layout = gf_audio_fmt_get_layout_from_cicp( gf_bs_read_int(bs, 6) ); } } break; } gf_bs_skip_bytes(bs, mh_size); } gf_bs_del(bs); return PL; } GF_EXPORT void gf_media_vvc_parse_sei(char *buffer, u32 nal_size, VVCState *vvc) { gf_hevc_vvc_parse_sei(buffer, nal_size, NULL, vvc); } static Bool vvc_parse_nal_header(GF_BitStream *bs, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { u32 val; val = gf_bs_read_int_log(bs, 1, "forbidden_zero"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 1, "resevred0"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 6, "layerID"); if (layer_id) *layer_id = val; val = gf_bs_read_int_log(bs, 5, "nuh_type"); if (nal_unit_type) *nal_unit_type = val; val = gf_bs_read_int_log(bs, 3, "temporalID"); if (!val) return GF_FALSE; val -= 1; if (temporal_id) *temporal_id = val; return GF_TRUE; } static void vvc_profile_tier_level(GF_BitStream *bs, VVC_ProfileTierLevel *ptl, u32 idx) { u32 i; if (ptl->pt_present) { ptl->general_profile_idc = gf_bs_read_int_log_idx(bs, 7, "general_profile_idc", idx); ptl->general_tier_flag = gf_bs_read_int_log_idx(bs, 1, "general_tier_flag", idx); } ptl->general_level_idc = gf_bs_read_int_log_idx(bs, 8, "general_level_idc", idx); ptl->frame_only_constraint = gf_bs_read_int_log_idx(bs, 1, "frame_only_constraint", idx); ptl->multilayer_enabled = gf_bs_read_int_log_idx(bs, 1, "multilayer_enabled", idx); //general constraints info - max size if 1 + 81 + 8 + 255 if (ptl->pt_present) { // general_constraints_info ptl->gci_present = gf_bs_read_int_log_idx(bs, 1, "gci_present", idx); if (ptl->gci_present) { u8 res; ptl->gci[0] = 0x80; ptl->gci[0] |= gf_bs_read_int(bs, 7); //81-7 = 74 bits till reserved gf_bs_read_data(bs, ptl->gci+1, 9); ptl->gci[10] = gf_bs_read_int(bs, 2)<<6; //skip extensions ptl->gci[11] = 0; res = gf_bs_read_int(bs, 8); gf_bs_read_int(bs, res); } gf_bs_align(bs); } for (i=ptl->ptl_max_tid; i>0; i--) { ptl->sub_ptl[i-1].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i); } gf_bs_align(bs); for (i=ptl->ptl_max_tid; i>0; i--) { if (ptl->sub_ptl[i-1].level_present_flag) ptl->sub_ptl[i-1].sublayer_level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i); } if (ptl->pt_present) { ptl->num_sub_profiles = gf_bs_read_int_log_idx(bs, 8, "num_sub_profiles", idx); for (i=0; i<ptl->num_sub_profiles; i++) { ptl->sub_profile_idc[i] = gf_bs_read_int_log_idx2(bs, 32, "sub_profile_idc", idx, i); } } } static s32 gf_media_vvc_read_vps_bs_internal(GF_BitStream *bs, VVCState *vvc, Bool stop_at_vps_ext) { u32 i, j; s32 vps_id; VVC_VPS *vps; Bool vps_default_ptl_dpb_hrd_max_tid_flag=0; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if (vps_id >= 16) return -1; if (!vps_id) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] VPS ID 0 is forbidden\n")); return -1; } vps = &vvc->vps[vps_id]; if (!vps->state) { vps->id = vps_id; vps->state = 1; } vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers"); if (vps->max_layers > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS)); return -1; } vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1; if ((vps->max_layers>1) && (vps->max_sub_layers>1)) vps_default_ptl_dpb_hrd_max_tid_flag = gf_bs_read_int_log(bs, 1, "vps_default_ptl_dpb_hrd_max_tid_flag"); if (vps->max_layers>1) vps->all_layers_independent = gf_bs_read_int_log(bs, 1, "all_layers_independent"); for (i=0; i<vps->max_layers; i++) { u32 layer_id = gf_bs_read_int_log_idx(bs, 6, "layer_id", i); if (layer_id>vps->max_layer_id) vps->max_layer_id = layer_id; if (i && !vps->all_layers_independent) { Bool layer_indep = gf_bs_read_int_log_idx(bs, 1, "layer_independent", i); if (!layer_indep) { Bool vps_max_tid_ref_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_max_tid_ref_present_flag", i); for (j=0; j<i; j++) { Bool vps_direct_ref_layer_flag = gf_bs_read_int_log_idx2(bs, 1, "vps_direct_ref_layer_flag", i, j); if (vps_max_tid_ref_present_flag && vps_direct_ref_layer_flag) { gf_bs_read_int_log_idx2(bs, 3, "vps_max_tid_il_ref_pics_plus1", i, j); } } } } } vps->num_ptl = 1; if (vps->max_layers > 1) { if (vps->all_layers_independent) { vps->each_layer_is_ols = gf_bs_read_int_log(bs, 1, "each_layer_is_ols"); } if (!vps->each_layer_is_ols) { u32 vps_ols_mode_idc = 2; if (!vps->all_layers_independent) { vps_ols_mode_idc = gf_bs_read_int_log(bs, 2, "vps_ols_mode_idc"); } if (vps_ols_mode_idc==2) { u8 vps_num_output_layer_sets = 2 + gf_bs_read_int_log(bs, 8, "vps_num_output_layer_sets_minus2"); for (i=0; i<vps_num_output_layer_sets; i++) { for (j=0; j<vps->max_layers; j++) { gf_bs_read_int_log_idx2(bs, 1, "vps_ols_output_layer_flag", i, j); } } } } vps->num_ptl = 1 + gf_bs_read_int_log(bs, 8, "num_ptl_minus1"); } vps->ptl[0].pt_present = 1; for (i=0; i<vps->num_ptl; i++) { if (i) vps->ptl[i].pt_present = gf_bs_read_int_log_idx(bs, 1, "pt_present", i); if (!vps_default_ptl_dpb_hrd_max_tid_flag) vps->ptl[i].ptl_max_tid = gf_bs_read_int_log_idx(bs, 3, "ptl_max_tid", i); else vps->ptl[i].ptl_max_tid = vps->max_sub_layers - 1;; } //align gf_bs_align(bs); for (i=0; i<vps->num_ptl; i++) { vvc_profile_tier_level(bs, &vps->ptl[i], i); } //TODO, parse multilayer stuff return vps_id; } static s32 gf_media_vvc_read_sps_bs_internal(GF_BitStream *bs, VVCState *vvc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id; u32 i, CtbSizeY; VVC_SPS *sps; u8 sps_ptl_dpb_hrd_params_present_flag; if (vui_flag_pos) *vui_flag_pos = 0; sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if (sps_id >= 16) { return -1; } vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if (vps_id >= 16) { return -1; } if (!vps_id && !vvc->vps[0].state) { vvc->vps[0].state = 1; vvc->vps[0].num_ptl = 1; vvc->vps[0].max_layers = 1; vvc->vps[0].all_layers_independent = 1; } sps = &vvc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->max_sublayers = 1 + gf_bs_read_int_log(bs, 3, "max_sublayers_minus1"); sps->chroma_format_idc = gf_bs_read_int_log(bs, 2, "chroma_format_idc"); sps->log2_ctu_size = 5 + gf_bs_read_int_log(bs, 2, "log2_ctu_size_minus5"); CtbSizeY = 1<<sps->log2_ctu_size; sps_ptl_dpb_hrd_params_present_flag = gf_bs_read_int_log(bs, 1, "sps_ptl_dpb_hrd_params_present_flag"); if (sps_ptl_dpb_hrd_params_present_flag) { VVC_ProfileTierLevel ptl, *p_ptl; if (sps->vps_id) { p_ptl = &ptl; } else { p_ptl = &vvc->vps[0].ptl[0]; } memset(p_ptl, 0, sizeof(VVC_ProfileTierLevel)); p_ptl->pt_present = 1; p_ptl->ptl_max_tid = sps->max_sublayers; vvc_profile_tier_level(bs, p_ptl, 0); } sps->gdr_enabled = gf_bs_read_int_log(bs, 1, "gdr_enabled"); sps->ref_pic_resampling = gf_bs_read_int_log(bs, 1, "ref_pic_resampling"); if (sps->ref_pic_resampling) sps->res_change_in_clvs = gf_bs_read_int_log(bs, 1, "res_change_in_clvs"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); sps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_present_flag"); if (sps->conf_window) { sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); } sps->subpic_info_present = gf_bs_read_int_log(bs, 1, "subpic_info_present"); if (sps->subpic_info_present) { sps->nb_subpics = 1 + gf_bs_read_ue_log(bs, "nb_subpics_minus1"); if (sps->nb_subpics>1) { u32 tmpWidthVal, tmpHeightVal; sps->independent_subpic_flags = gf_bs_read_int_log(bs, 1, "independent_subpic_flags"); sps->subpic_same_size = gf_bs_read_int_log(bs, 1, "subpic_same_size"); tmpWidthVal = (sps->width + CtbSizeY-1) / CtbSizeY; tmpWidthVal = gf_get_bit_size(tmpWidthVal); tmpHeightVal = (sps->height + CtbSizeY-1) / CtbSizeY; tmpHeightVal = gf_get_bit_size(tmpHeightVal); for (i=0; i<sps->nb_subpics; i++) { if( !sps->subpic_same_size || !i) { if (i && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_ctu_top_left_x"); if (i && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_ctu_top_left_y"); if ((i+1 < sps->nb_subpics) && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_width_minus1"); if ((i+1 < sps->nb_subpics) && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_height_minus1"); } if (!sps->independent_subpic_flags) { gf_bs_read_int_log(bs, 1, "subpic_treated_as_pic_flag"); gf_bs_read_int_log(bs, 1, "loop_filter_across_subpic_enabled_flag"); } } sps->subpicid_len = gf_bs_read_ue_log(bs, "subpic_id_len_minus1") + 1; sps->subpicid_mapping_explicit = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_explicitly_signalled_flag"); if (sps->subpicid_mapping_explicit) { sps->subpicid_mapping_present = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (sps->subpicid_mapping_present) { for (i=0; i<sps->nb_subpics; i++) { gf_bs_read_ue_log(bs, "subpic_id"); } } } } } sps->bitdepth = gf_bs_read_ue_log(bs, "bitdepth_minus8") + 8; gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); gf_bs_read_int_log(bs, 1, "entry_point_offsets_present_flag"); sps->log2_max_poc_lsb = 4 + gf_bs_read_int_log(bs, 4, "log2_max_poc_lsb_minus4"); if ((sps->poc_msb_cycle_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_flag"))) sps->poc_msb_cycle_len = 1 + gf_bs_read_ue_log(bs, "poc_msb_cycle_len_minus1"); u8 sps_num_extra_ph_bits = 8 * gf_bs_read_int_log(bs, 2, "sps_num_extra_ph_bytes"); for (i=0; i<sps_num_extra_ph_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_ph_bit_present_flag", 1)) sps->ph_num_extra_bits++; } u8 sps_num_extra_sh_bits = 8 * gf_bs_read_int_log(bs, 2, "num_extra_sh_bytes"); for (i=0; i<sps_num_extra_sh_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_sh_bit_present_flag", i)) sps->sh_num_extra_bits++; } if (sps_ptl_dpb_hrd_params_present_flag) { u8 sps_sublayer_dpb_params_flag = 0; if (sps->max_sublayers>1) { sps_sublayer_dpb_params_flag = gf_bs_read_int_log(bs, 1, "sps_sublayer_dpb_params_flag"); } for (i=(sps_sublayer_dpb_params_flag ? 0 : sps->max_sublayers-1); i < sps->max_sublayers; i++ ) { gf_bs_read_ue_log_idx(bs, "dpb_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "dpb_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "dpb_max_latency_increase_plus1", i); } } gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_partition_constraints_override_enabled_flag"); gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); u8 sps_max_mtt_hierarchy_depth_intra_slice_luma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_luma"); if (sps_max_mtt_hierarchy_depth_intra_slice_luma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_luma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_luma"); } u8 sps_qtbtt_dual_tree_intra_flag = 0; if (sps->chroma_format_idc) { sps_qtbtt_dual_tree_intra_flag = gf_bs_read_int_log(bs, 1, "sps_qtbtt_dual_tree_intra_flag"); } if (sps_qtbtt_dual_tree_intra_flag) { gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_intra_slice_chroma"); u8 sps_max_mtt_hierarchy_depth_intra_slice_chroma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_chroma"); if( sps_max_mtt_hierarchy_depth_intra_slice_chroma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_chroma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_chroma"); } } gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_inter_slice"); u8 sps_max_mtt_hierarchy_depth_inter_slice = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_inter_slice"); if (sps_max_mtt_hierarchy_depth_inter_slice != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_inter_slice"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_inter_slice"); } //u8 sps_max_luma_transform_size_64_flag = 0; if (CtbSizeY > 32) { /*sps_max_luma_transform_size_64_flag = */gf_bs_read_int_log(bs, 1, "sps_max_luma_transform_size_64_flag"); } u8 sps_transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_transform_skip_enabled_flag"); if (sps_transform_skip_enabled_flag) { gf_bs_read_ue_log(bs, "sps_log2_transform_skip_max_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_bdpcm_enabled_flag"); } if (gf_bs_read_int_log(bs, 1, "sps_mts_enabled_flag")) { gf_bs_read_int_log(bs, 1, "sps_explicit_mts_intra_enabled_flag"); gf_bs_read_int_log(bs, 1, "sps_explicit_mts_inter_enabled_flag"); } gf_bs_read_int_log(bs, 1, "sps_lfnst_enabled_flag"); if (sps->chroma_format_idc) { u8 sps_joint_cbcr_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_joint_cbcr_enabled_flag"); u8 sps_same_qp_table_for_chroma_flag = gf_bs_read_int_log(bs, 1, "sps_same_qp_table_for_chroma_flag"); u32 numQpTables = sps_same_qp_table_for_chroma_flag ? 1 : (sps_joint_cbcr_enabled_flag ? 3 : 2); for (i=0; i<numQpTables; i++) { gf_bs_read_se_log_idx(bs, "sps_qp_table_start_minus26", i); u32 j, sps_num_points_in_qp_table = 1 + gf_bs_read_ue_log_idx(bs, "sps_num_points_in_qp_table_minus1", i); for (j=0; j<sps_num_points_in_qp_table; j++) { gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_in_val_minus1", i, j); gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_diff_val", i, j); } } } gf_bs_read_int_log(bs, 1, "sps_sao_enabled_flag"); sps->alf_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_alf_enabled_flag"); if (sps->alf_enabled_flag && sps->chroma_format_idc) { gf_bs_read_int_log(bs, 1, "sps_ccalf_enabled_flag"); } /*! TODO parse the rest !*/ return sps_id; } static s32 gf_media_vvc_read_pps_bs_internal(GF_BitStream *bs, VVCState *vvc) { u32 i; s32 pps_id; VVC_PPS *pps; //NAL header already read pps_id = gf_bs_read_int_log(bs, 6, "pps_id"); if ((pps_id < 0) || (pps_id >= 64)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] wrong PPS ID %d in PPS\n", pps_id)); return -1; } pps = &vvc->pps[pps_id]; if (!pps->state) { pps->id = pps_id; pps->state = 1; } pps->sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if (pps->sps_id >= 16) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] wrong SPS ID %d in PPS\n", pps->sps_id)); pps->sps_id=0; return -1; } vvc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->mixed_nal_types = gf_bs_read_int_log(bs, 1, "mixed_nal_types"); pps->width = gf_bs_read_ue_log(bs, "width"); pps->height = gf_bs_read_ue_log(bs, "height"); pps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_flag"); if (pps->conf_window) { pps->cw_left = gf_bs_read_ue_log(bs, "conf_win_left_offset"); pps->cw_right = gf_bs_read_ue_log(bs, "conf_win_right_offset"); pps->cw_top = gf_bs_read_ue_log(bs, "conf_win_top_offset"); pps->cw_bottom = gf_bs_read_ue_log(bs, "conf_win_bottom_offset"); } //scaling window if (gf_bs_read_int_log(bs, 1, "scaling_window_explicit_signalling_flag")) { gf_bs_read_se_log(bs, "scaling_win_left_offset"); gf_bs_read_se_log(bs, "scaling_win_right_offset"); gf_bs_read_se_log(bs, "scaling_win_top_offset"); gf_bs_read_se_log(bs, "scaling_win_bottom_offset"); } pps->output_flag_present_flag = gf_bs_read_int_log(bs, 1, "output_flag_present_flag"); pps->no_pic_partition_flag = gf_bs_read_int_log(bs, 1, "no_pic_partition_flag"); pps->subpic_id_mapping_present_flag = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (pps->subpic_id_mapping_present_flag) { u32 pps_subpic_id_len, pps_num_subpics=0; if (!pps->no_pic_partition_flag) { pps_num_subpics = 1+gf_bs_read_ue_log(bs, "pps_num_subpics_minus1"); } pps_subpic_id_len = 1 + gf_bs_read_ue(bs); for (i=0; i<pps_num_subpics; i++) { gf_bs_read_int_log_idx(bs, pps_subpic_id_len, "subpic_id", i); } } if (!pps->no_pic_partition_flag) { gf_bs_read_int_log(bs, 2, "pps_log2_ctu_size_minus5"); u32 num_exp_tile_columns = 1 + gf_bs_read_ue_log(bs, "num_exp_tile_columns_minus1"); u32 num_exp_tile_rows = 1 + gf_bs_read_ue_log(bs, "num_exp_tile_rows_minus1"); for (i=0; i<num_exp_tile_columns; i++) gf_bs_read_ue_log_idx(bs, "tile_column_width_minus1", i); for (i=0; i<num_exp_tile_rows; i++) gf_bs_read_ue_log_idx(bs, "tile_row_height_minus1", i); //todo parse the rest return pps_id; } //todo parse the rest return pps_id; } static s32 vvc_parse_picture_header(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { u32 pps_id; si->irap_or_gdr_pic = gf_bs_read_int_log(bs, 1, "irap_or_gdr_pic"); si->non_ref_pic = gf_bs_read_int_log(bs, 1, "non_ref_pic"); if (si->irap_or_gdr_pic) si->gdr_pic = gf_bs_read_int_log(bs, 1, "gdr_pic"); if ((si->inter_slice_allowed_flag = gf_bs_read_int_log(bs, 1, "inter_slice_allowed_flag"))) si->intra_slice_allowed_flag = gf_bs_read_int_log(bs, 1, "intra_slice_allowed_flag"); pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id >= 64) return -1; si->pps = &vvc->pps[pps_id]; si->sps = &vvc->sps[si->pps->sps_id]; si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); si->recovery_point_valid = 0; si->gdr_recovery_count = 0; if (si->gdr_pic) { si->recovery_point_valid = 1; si->gdr_recovery_count = gf_bs_read_ue_log(bs, "gdr_recovery_count"); } gf_bs_read_int_log(bs, si->sps->ph_num_extra_bits, "ph_extra_bits"); if (si->sps->poc_msb_cycle_flag) { if ( (si->poc_msb_cycle_present_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_present_flag"))) { si->poc_msb_cycle = gf_bs_read_int_log(bs, si->sps->poc_msb_cycle_len, "poc_msb_cycle"); } } return 0; } static s32 vvc_parse_slice(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { // u32 CurrSubpicIdx = 0; si->picture_header_in_slice_header_flag = gf_bs_read_int_log(bs, 1, "picture_header_in_slice_header_flag"); if (si->picture_header_in_slice_header_flag) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[VVC] Picture header in slice header incomplete support, cannot guess slice type\n")); si->slice_type = GF_VVC_SLICE_TYPE_UNKNOWN; return vvc_parse_picture_header(bs, vvc, si); } if (!si->sps) return -1; si->slice_type = GF_VVC_SLICE_TYPE_I; if (gf_bs_read_int_log(bs, 1, "sps_subpic_info_present_flag")) { gf_bs_read_int_log(bs, si->sps->subpicid_len, "subpic_id"); //todo update CurrSubpicIdx } if (si->pps->rect_slice_flag ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[VVC] tiling parsing not supported - patch welcome\n")); return 0; } gf_bs_read_int_log(bs, si->sps->sh_num_extra_bits, "num_extra_bits"); /* if( !pps_rect_slice_flag && NumTilesInPic − sh_slice_address > 1 ) sh_num_tiles_in_slice_minus1 */ if (si->inter_slice_allowed_flag ) si->slice_type = gf_bs_read_int_log(bs, 2, "slice_type"); return 0; } /*this needs further tests !*/ static void vvc_compute_poc(VVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*POC reset for IDR frames, NOT for CRA*/ if (si->irap_or_gdr_pic && !si->gdr_pic) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; } if (si->poc_msb_cycle_present_flag) { si->poc_msb = si->poc_msb_cycle; } else { if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; } si->poc = si->poc_msb + si->poc_lsb; } GF_EXPORT s32 gf_media_vvc_parse_nalu_bs(GF_BitStream *bs, VVCState *vvc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { Bool is_slice = GF_FALSE; s32 ret = -1; VVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); memcpy(&n_state, &vvc->s_info, sizeof(VVCSliceInfo)); if (!vvc_parse_nal_header(bs, nal_unit_type, temporal_id, layer_id)) return -1; n_state.nal_unit_type = *nal_unit_type; switch (n_state.nal_unit_type) { case GF_VVC_NALU_ACCESS_UNIT: case GF_VVC_NALU_END_OF_SEQ: case GF_VVC_NALU_END_OF_STREAM: ret = 1; break; case GF_VVC_NALU_SLICE_TRAIL: case GF_VVC_NALU_SLICE_STSA: case GF_VVC_NALU_SLICE_RADL: case GF_VVC_NALU_SLICE_RASL: case GF_VVC_NALU_SLICE_IDR_W_RADL: case GF_VVC_NALU_SLICE_IDR_N_LP: case GF_VVC_NALU_SLICE_CRA: case GF_VVC_NALU_SLICE_GDR: /* slice - read the info and compare.*/ ret = vvc_parse_slice(bs, vvc, &n_state); if (ret < 0) return ret; ret = 0; if (n_state.picture_header_in_slice_header_flag) { is_slice = GF_TRUE; vvc_compute_poc(&n_state); if (vvc->s_info.poc != n_state.poc) { ret = 1; break; } if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; break; } } break; case GF_VVC_NALU_PIC_HEADER: if (vvc_parse_picture_header(bs, vvc, &n_state)<0) { ret = -1; break; } is_slice = GF_TRUE; vvc_compute_poc(&n_state); if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; } break; case GF_VVC_NALU_SEQ_PARAM: vvc->last_parsed_sps_id = gf_media_vvc_read_sps_bs_internal(bs, vvc, *layer_id, NULL); ret = (vvc->last_parsed_sps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_PIC_PARAM: vvc->last_parsed_pps_id = gf_media_vvc_read_pps_bs_internal(bs, vvc); ret = (vvc->last_parsed_pps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_VID_PARAM: vvc->last_parsed_vps_id = gf_media_vvc_read_vps_bs_internal(bs, vvc, GF_FALSE); ret = (vvc->last_parsed_vps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_DEC_PARAM: ret = 0; break; case GF_VVC_NALU_APS_PREFIX: //we use the mix aps type + aps id (first 8 bits) as unique identifier vvc->last_parsed_aps_id = gf_bs_read_int_log(bs, 8, "aps_id"); ret = 0; break; default: ret = 0; break; } /* save _prev values */ if ((ret>0) && vvc->s_info.sps) { // n_state.frame_num_offset_prev = vvc->s_info.frame_num_offset; // n_state.frame_num_prev = vvc->s_info.frame_num; n_state.poc_lsb_prev = vvc->s_info.poc_lsb; n_state.poc_msb_prev = vvc->s_info.poc_msb; if (is_slice) n_state.prev_layer_id_plus1 = *layer_id + 1; } if (is_slice) vvc_compute_poc(&n_state); memcpy(&vvc->s_info, &n_state, sizeof(VVCSliceInfo)); return ret; } GF_EXPORT s32 gf_media_vvc_parse_nalu(u8 *data, u32 size, VVCState *vvc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { GF_BitStream *bs = NULL; s32 ret; if (!vvc) { if (nal_unit_type) (*nal_unit_type) = data[1] >> 3; if (layer_id) (*layer_id) = data[0] & 0x3f; if (temporal_id) (*temporal_id) = (data[1] & 0x7); return -1; } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); ret = gf_media_vvc_parse_nalu_bs(bs, vvc, nal_unit_type, temporal_id, layer_id); gf_bs_del(bs); return ret; } Bool gf_media_vvc_slice_is_ref(VVCState *vvc) { if (!vvc->s_info.irap_or_gdr_pic) { return GF_FALSE; } if (vvc->s_info.gdr_pic) { if (vvc->s_info.recovery_point_valid) { vvc->s_info.recovery_point_valid = 0; return GF_TRUE; } return GF_FALSE; } return GF_TRUE; }
null
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre, Romain Bouqueau, Cyril Concolato * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / Media Tools sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/media_dev.h> #include <gpac/constants.h> #include <gpac/mpeg4_odf.h> #include <gpac/maths.h> #include <gpac/avparse.h> #ifndef GPAC_DISABLE_OGG #include <gpac/internal/ogg.h> #endif //uncomment/define globally to remove all bitstream parsing logging from code (this will break inspect mode ananlyze=bs) //#define GPAC_DISABLE_AVPARSE_LOGS #ifndef GPAC_DISABLE_AVPARSE_LOGS void gf_bs_log_idx(GF_BitStream *bs, u32 nBits, const char *fname, s64 val, s32 idx1, s32 idx2, s32 idx3); #define gf_bs_log(_bs, _nBits, _fname, _val) gf_bs_log_idx(_bs, _nBits, _fname, _val, -1, -1, -1) u32 gf_bs_read_int_log_idx3(GF_BitStream *bs, u32 nBits, const char *fname, s32 idx1, s32 idx2, s32 idx3) { u32 val = gf_bs_read_int(bs, nBits); gf_bs_log_idx(bs, nBits, fname, val, idx1, idx2, idx3); return val; } #define gf_bs_read_int_log(_bs, _nBits, _fname) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, -1, -1, -1) #define gf_bs_read_int_log_idx(_bs, _nBits, _fname, _idx) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, _idx, -1, -1) #define gf_bs_read_int_log_idx2(_bs, _nBits, _fname, _idx1, _idx2) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, (s32) _idx1, (s32) _idx2, -1) #else #define gf_bs_log(_bs, _nBits, _fname, _val) #define gf_bs_log_idx(_bs, _nBits, _fname, _val, _idx1, _idx2, _idx3) #define gf_bs_read_int_log(_bs, _nbb, _f) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx(_bs, _nbb, _f, _idx) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx2(_bs, _nbb, _f, _idx1, _idx2) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx3(_bs, _nbb, _f, _idx1, _idx2, _idx3) gf_bs_read_int(_bs, _nbb) #endif static const struct { u32 w, h; } std_par[] = { { 4, 3}, {3, 2}, {16, 9}, {5, 3}, {5, 4}, {8, 5}, {2, 1}, {1, 1}, {0, 0}, }; GF_EXPORT void gf_media_reduce_aspect_ratio(u32 *width, u32 *height) { u32 i = 0; u32 w = *width; u32 h = *height; while (std_par[i].w) { if (std_par[i].w * h == std_par[i].h * w) { *width = std_par[i].w; *height = std_par[i].h; return; } i++; } //not standard one, reduce by power of 2 i = 2; while (1) { if (w <= i) return; if (h <= i) return; if (w % i) return; if (h % i) return; *width = w / i; *height = h / i; i *= 2; } } GF_EXPORT void gf_media_get_reduced_frame_rate(u32 *timescale, u32 *sample_dur) { u32 res; if (!*sample_dur) return; res = *timescale / *sample_dur; if (res * (*sample_dur) == *timescale) { *timescale = res; *sample_dur = 1; } else if ((double)(*timescale * 1001 - (res + 1) * *sample_dur * 1000) / ((res + 1) * *sample_dur * 1000) < 0.001) { *timescale = (res + 1) * 1000; *sample_dur = 1001; } } struct __m4v_profile { u32 value; const char *name; } M4VProfiles[] = { {0x00, "Reserved (0x00) Profile"}, {0x01, "Simple Profile @ Level 1"}, {0x02, "Simple Profile @ Level 2"}, {0x03, "Simple Profile @ Level 3"}, {0x08, "Simple Profile @ Level 0"}, {0x10, "Simple Scalable Profile @ Level 0"}, {0x11, "Simple Scalable Profile @ Level 1"}, {0x12, "Simple Scalable Profile @ Level 2"}, {0x21, "Core Profile @ Level 1"}, {0x22, "Core Profile @ Level 2"}, {0x32, "Main Profile @ Level 2"}, {0x33, "Main Profile @ Level 3"}, {0x34, "Main Profile @ Level 4"}, {0x42, "N-bit Profile @ Level 2"}, {0x51, "Scalable Texture Profile @ Level 1"}, {0x61, "Simple Face Animation Profile @ Level 1"}, {0x62, "Simple Face Animation Profile @ Level 2"}, {0x63, "Simple FBA Profile @ Level 1"}, {0x64, "Simple FBA Profile @ Level 2"}, {0x71, "Basic Animated Texture Profile @ Level 1"}, {0x72, "Basic Animated Texture Profile @ Level 2"}, {0x7F, "AVC/H264 Profile"}, {0x81, "Hybrid Profile @ Level 1"}, {0x82, "Hybrid Profile @ Level 2"}, {0x91, "Advanced Real Time Simple Profile @ Level 1"}, {0x92, "Advanced Real Time Simple Profile @ Level 2"}, {0x93, "Advanced Real Time Simple Profile @ Level 3"}, {0x94, "Advanced Real Time Simple Profile @ Level 4"}, {0xA1, "Core Scalable Profile @ Level1"}, {0xA2, "Core Scalable Profile @ Level2"}, {0xA3, "Core Scalable Profile @ Level3"}, {0xB1, "Advanced Coding Efficiency Profile @ Level 1"}, {0xB2, "Advanced Coding Efficiency Profile @ Level 2"}, {0xB3, "Advanced Coding Efficiency Profile @ Level 3"}, {0xB4, "Advanced Coding Efficiency Profile @ Level 4"}, {0xC1, "Advanced Core Profile @ Level 1"}, {0xC2, "Advanced Core Profile @ Level 2"}, {0xD1, "Advanced Scalable Texture @ Level1"}, {0xD2, "Advanced Scalable Texture @ Level2"}, {0xE1, "Simple Studio Profile @ Level 1"}, {0xE2, "Simple Studio Profile @ Level 2"}, {0xE3, "Simple Studio Profile @ Level 3"}, {0xE4, "Simple Studio Profile @ Level 4"}, {0xE5, "Core Studio Profile @ Level 1"}, {0xE6, "Core Studio Profile @ Level 2"}, {0xE7, "Core Studio Profile @ Level 3"}, {0xE8, "Core Studio Profile @ Level 4"}, {0xF0, "Advanced Simple Profile @ Level 0"}, {0xF1, "Advanced Simple Profile @ Level 1"}, {0xF2, "Advanced Simple Profile @ Level 2"}, {0xF3, "Advanced Simple Profile @ Level 3"}, {0xF4, "Advanced Simple Profile @ Level 4"}, {0xF5, "Advanced Simple Profile @ Level 5"}, {0xF7, "Advanced Simple Profile @ Level 3b"}, {0xF8, "Fine Granularity Scalable Profile @ Level 0"}, {0xF9, "Fine Granularity Scalable Profile @ Level 1"}, {0xFA, "Fine Granularity Scalable Profile @ Level 2"}, {0xFB, "Fine Granularity Scalable Profile @ Level 3"}, {0xFC, "Fine Granularity Scalable Profile @ Level 4"}, {0xFD, "Fine Granularity Scalable Profile @ Level 5"}, {0xFE, "Not part of MPEG-4 Visual profiles"}, {0xFF, "No visual capability required"} }; GF_EXPORT const char *gf_m4v_get_profile_name(u8 video_pl) { u32 i, count = GF_ARRAY_LENGTH(M4VProfiles); for (i=0; i<count; i++) { if ((u32)video_pl == M4VProfiles[i].value) return M4VProfiles[i].name; } return "ISO Reserved Profile"; } #ifndef GPAC_DISABLE_AV_PARSERS #define MPEG12_START_CODE_PREFIX 0x000001 #define MPEG12_PICTURE_START_CODE 0x00000100 #define MPEG12_SLICE_MIN_START 0x00000101 #define MPEG12_SLICE_MAX_START 0x000001af #define MPEG12_USER_DATA_START_CODE 0x000001b2 #define MPEG12_SEQUENCE_START_CODE 0x000001b3 #define MPEG12_SEQUENCE_ERR_START_CODE 0x000001b4 #define MPEG12_EXT_START_CODE 0x000001b5 #define MPEG12_SEQUENCE_END_START_CODE 0x000001b7 #define MPEG12_GOP_START_CODE 0x000001b8 s32 gf_mv12_next_start_code(unsigned char *pbuffer, u32 buflen, u32 *optr, u32 *scode) { u32 value; u32 offset; if (buflen < 4) return -1; for (offset = 0; offset < buflen - 3; offset++, pbuffer++) { #ifdef GPAC_BIG_ENDIAN value = *(u32 *)pbuffer >> 8; #else value = (pbuffer[0] << 16) | (pbuffer[1] << 8) | (pbuffer[2] << 0); #endif if (value == MPEG12_START_CODE_PREFIX) { *optr = offset; *scode = (value << 8) | pbuffer[3]; return 0; } } return -1; } s32 gf_mv12_next_slice_start(unsigned char *pbuffer, u32 startoffset, u32 buflen, u32 *slice_offset) { u32 slicestart, code; while (gf_mv12_next_start_code(pbuffer + startoffset, buflen - startoffset, &slicestart, &code) >= 0) { if ((code >= MPEG12_SLICE_MIN_START) && (code <= MPEG12_SLICE_MAX_START)) { *slice_offset = slicestart + startoffset; return 0; } startoffset += slicestart + 4; } return -1; } /* MPEG-4 video (14496-2) */ struct __tag_m4v_parser { GF_BitStream *bs; Bool mpeg12, step_mode; u32 current_object_type; u32 force_next_obj_type; u64 current_object_start; u32 tc_dec, prev_tc_dec, tc_disp, prev_tc_disp; }; GF_EXPORT GF_M4VParser *gf_m4v_parser_new(u8 *data, u64 data_size, Bool mpeg12video) { GF_M4VParser *tmp; if (!data || !data_size) return NULL; GF_SAFEALLOC(tmp, GF_M4VParser); if (!tmp) return NULL; tmp->bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); tmp->mpeg12 = mpeg12video; return tmp; } GF_M4VParser *gf_m4v_parser_bs_new(GF_BitStream *bs, Bool mpeg12video) { GF_M4VParser *tmp; GF_SAFEALLOC(tmp, GF_M4VParser); if (!tmp) return NULL; tmp->bs = bs; tmp->mpeg12 = mpeg12video; return tmp; } GF_EXPORT void gf_m4v_parser_del(GF_M4VParser *m4v) { gf_bs_del(m4v->bs); gf_free(m4v); } GF_EXPORT void gf_m4v_parser_del_no_bs(GF_M4VParser *m4v) { gf_free(m4v); } GF_EXPORT void gf_m4v_parser_set_inspect(GF_M4VParser *m4v) { if (m4v) m4v->step_mode = 1; } GF_EXPORT u32 gf_m4v_parser_get_obj_type(GF_M4VParser *m4v) { if (m4v) return m4v->current_object_type; return 0; } #define M4V_CACHE_SIZE 4096 s32 M4V_LoadObject(GF_M4VParser *m4v) { u32 v, bpos, found; char m4v_cache[M4V_CACHE_SIZE]; u64 end, cache_start, load_size; if (!m4v) return 0; if (m4v->force_next_obj_type) { m4v->current_object_type = m4v->force_next_obj_type - 1; m4v->force_next_obj_type = 0; return (s32)m4v->current_object_type; } bpos = 0; found = 0; load_size = 0; end = 0; cache_start = 0; v = 0xffffffff; while (!end) { /*refill cache*/ if (bpos == (u32)load_size) { if (!gf_bs_available(m4v->bs)) break; load_size = gf_bs_available(m4v->bs); if (load_size > M4V_CACHE_SIZE) load_size = M4V_CACHE_SIZE; bpos = 0; cache_start = gf_bs_get_position(m4v->bs); gf_bs_read_data(m4v->bs, m4v_cache, (u32)load_size); } v = ((v << 8) & 0xFFFFFF00) | ((u8)m4v_cache[bpos]); bpos++; if ((v & 0xFFFFFF00) == 0x00000100) { end = cache_start + bpos - 4; found = 1; break; } } if (!found) return -1; m4v->current_object_start = end; gf_bs_seek(m4v->bs, end + 3); m4v->current_object_type = gf_bs_read_u8(m4v->bs); return (s32)m4v->current_object_type; } GF_EXPORT void gf_m4v_rewrite_pl(u8 **o_data, u32 *o_dataLen, u8 PL) { u32 pos = 0; unsigned char *data = (unsigned char *)*o_data; u32 dataLen = *o_dataLen; while (pos + 4 < dataLen) { if (!data[pos] && !data[pos + 1] && (data[pos + 2] == 0x01) && (data[pos + 3] == M4V_VOS_START_CODE)) { data[pos + 4] = PL; return; } pos++; } /*emulate VOS at beggining*/ (*o_data) = (char *)gf_malloc(sizeof(char)*(dataLen + 5)); (*o_data)[0] = 0; (*o_data)[1] = 0; (*o_data)[2] = 1; (*o_data)[3] = (char)M4V_VOS_START_CODE; (*o_data)[4] = PL; memcpy((*o_data + 5), data, sizeof(char)*dataLen); gf_free(data); (*o_dataLen) = dataLen + 5; } static GF_Err M4V_Reset(GF_M4VParser *m4v, u64 start) { gf_bs_seek(m4v->bs, start); assert(start < (u64)1<<31); m4v->current_object_start = (u32)start; m4v->current_object_type = 0; return GF_OK; } void gf_m4v_parser_reset(GF_M4VParser *m4v, u8 sc_type) { m4v->current_object_start = 0; m4v->current_object_type = 0; m4v->force_next_obj_type = sc_type; } static GF_Err gf_m4v_parse_config_mpeg12(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { unsigned char p[4]; u32 ext_type; s32 o_type; u8 go, par; if (!m4v || !dsi) return GF_BAD_PARAM; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); dsi->VideoPL = 0; go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M2V_SEQ_START_CODE: dsi->RAP_stream = 1; gf_bs_read_data(m4v->bs, (char *)p, 4); dsi->width = (p[0] << 4) | ((p[1] >> 4) & 0xf); dsi->height = ((p[1] & 0xf) << 8) | p[2]; dsi->VideoPL = GF_CODECID_MPEG1; par = (p[3] >> 4) & 0xf; switch (par) { case 2: dsi->par_num = dsi->height / 3; dsi->par_den = dsi->width / 4; break; case 3: dsi->par_num = dsi->height / 9; dsi->par_den = dsi->width / 16; break; case 4: dsi->par_num = dsi->height / 2; dsi->par_den = dsi->width / 21; break; default: dsi->par_den = dsi->par_num = 0; break; } switch (p[3] & 0xf) { case 0: break; case 1: dsi->fps = 24000.0 / 1001.0; break; case 2: dsi->fps = 24.0; break; case 3: dsi->fps = 25.0; break; case 4: dsi->fps = 30000.0 / 1001.0; break; case 5: dsi->fps = 30.0; break; case 6: dsi->fps = 50.0; break; case 7: dsi->fps = ((60.0*1000.0) / 1001.0); break; case 8: dsi->fps = 60.0; break; case 9: dsi->fps = 1; break; case 10: dsi->fps = 5; break; case 11: dsi->fps = 10; break; case 12: dsi->fps = 12; break; case 13: dsi->fps = 15; break; } break; case M2V_EXT_START_CODE: gf_bs_read_data(m4v->bs, (char *)p, 4); ext_type = ((p[0] >> 4) & 0xf); if (ext_type == 1) { dsi->VideoPL = 0x65; dsi->height = ((p[1] & 0x1) << 13) | ((p[2] & 0x80) << 5) | (dsi->height & 0x0fff); dsi->width = (((p[2] >> 5) & 0x3) << 12) | (dsi->width & 0x0fff); } break; case M2V_PIC_START_CODE: if (dsi->width) go = 0; break; default: break; /*EOS*/ case -1: go = 0; m4v->current_object_start = gf_bs_get_position(m4v->bs); break; } } M4V_Reset(m4v, 0); return GF_OK; } static const struct { u32 w, h; } m4v_sar[6] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 } }; static u8 m4v_get_sar_idx(u32 w, u32 h) { u32 i; for (i = 0; i < 6; i++) { if ((m4v_sar[i].w == w) && (m4v_sar[i].h == h)) return i; } return 0xF; } static void gf_m4v_parse_vol(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { u8 verid, par; s32 clock_rate; u8 vpl = dsi->VideoPL; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); dsi->VideoPL = vpl; verid = 0; dsi->RAP_stream = gf_bs_read_int(m4v->bs, 1); dsi->objectType = gf_bs_read_int(m4v->bs, 8); if (gf_bs_read_int(m4v->bs, 1)) { verid = gf_bs_read_int(m4v->bs, 4); gf_bs_read_int(m4v->bs, 3); } par = gf_bs_read_int(m4v->bs, 4); if (par == 0xF) { dsi->par_num = gf_bs_read_int(m4v->bs, 8); dsi->par_den = gf_bs_read_int(m4v->bs, 8); } else if (par<6) { dsi->par_num = m4v_sar[par].w; dsi->par_den = m4v_sar[par].h; } if (gf_bs_read_int(m4v->bs, 1)) { gf_bs_read_int(m4v->bs, 3); if (gf_bs_read_int(m4v->bs, 1)) gf_bs_read_int(m4v->bs, 79); } dsi->has_shape = gf_bs_read_int(m4v->bs, 2); if (dsi->has_shape && (verid!=1) ) gf_bs_read_int(m4v->bs, 4); gf_bs_read_int(m4v->bs, 1); /*clock rate*/ dsi->clock_rate = gf_bs_read_int(m4v->bs, 16); /*marker*/ gf_bs_read_int(m4v->bs, 1); clock_rate = dsi->clock_rate-1; if (clock_rate >= 65536) clock_rate = 65535; if (clock_rate > 0) { for (dsi->NumBitsTimeIncrement = 1; dsi->NumBitsTimeIncrement < 16; dsi->NumBitsTimeIncrement++) { if (clock_rate == 1) break; clock_rate = (clock_rate >> 1); } } else { /*fix from vivien for divX*/ dsi->NumBitsTimeIncrement = 1; } /*fixed FPS stream*/ dsi->time_increment = 0; if (gf_bs_read_int(m4v->bs, 1)) { dsi->time_increment = gf_bs_read_int(m4v->bs, dsi->NumBitsTimeIncrement); } if (!dsi->has_shape) { gf_bs_read_int(m4v->bs, 1); dsi->width = gf_bs_read_int(m4v->bs, 13); gf_bs_read_int(m4v->bs, 1); dsi->height = gf_bs_read_int(m4v->bs, 13); } else { dsi->width = dsi->height = 0; } gf_bs_align(m4v->bs); } static GF_Err gf_m4v_parse_config_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { s32 o_type; u8 go; if (!m4v || !dsi) return GF_BAD_PARAM; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { /*vosh*/ case M4V_VOS_START_CODE: dsi->VideoPL = (u8)gf_bs_read_u8(m4v->bs); break; case M4V_VOL_START_CODE: gf_m4v_parse_vol(m4v, dsi); /*shape will be done later*/ gf_bs_align(m4v->bs); break; case M4V_VOP_START_CODE: case M4V_GOV_START_CODE: go = 0; break; /*EOS*/ case -1: m4v->current_object_start = gf_bs_get_position(m4v->bs); return GF_EOS; /*don't interest us*/ case M4V_UDTA_START_CODE: default: break; } } return GF_OK; } GF_EXPORT GF_Err gf_m4v_parse_config(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { if (m4v->mpeg12) { return gf_m4v_parse_config_mpeg12(m4v, dsi); } else { return gf_m4v_parse_config_mpeg4(m4v, dsi); } } static GF_Err gf_m4v_parse_frame_mpeg12(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, val; s32 o_type; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = GF_FALSE; m4v->current_object_type = (u32)-1; *frame_type = 0; if (!m4v->step_mode) M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M2V_PIC_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; *is_coded = 1; /*val = */gf_bs_read_u8(m4v->bs); val = gf_bs_read_u8(m4v->bs); *frame_type = ((val >> 3) & 0x7) - 1; break; case M2V_GOP_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M2V_SEQ_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) { go = 0; break; } /**/ break; default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } if (m4v->step_mode) return GF_OK; } *size = m4v->current_object_start - *start; return GF_OK; } static GF_Err gf_m4v_parse_frame_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, secs; s32 o_type; u32 vop_inc = 0; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = 0; m4v->current_object_type = (u32)-1; *frame_type = 0; *start = 0; if (!m4v->step_mode) M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M4V_VOP_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; /*coding type*/ *frame_type = gf_bs_read_int(m4v->bs, 2); /*modulo time base*/ secs = 0; while (gf_bs_read_int(m4v->bs, 1) != 0) secs++; /*no support for B frames in parsing*/ secs += (dsi->enh_layer || *frame_type!=2) ? m4v->tc_dec : m4v->tc_disp; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*vop_time_inc*/ if (dsi->NumBitsTimeIncrement) vop_inc = gf_bs_read_int(m4v->bs, dsi->NumBitsTimeIncrement); m4v->prev_tc_dec = m4v->tc_dec; m4v->prev_tc_disp = m4v->tc_disp; if (dsi->enh_layer || *frame_type!=2) { m4v->tc_disp = m4v->tc_dec; m4v->tc_dec = secs; } *time_inc = secs * dsi->clock_rate + vop_inc; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*coded*/ *is_coded = gf_bs_read_int(m4v->bs, 1); gf_bs_align(m4v->bs); break; case M4V_GOV_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M4V_VOL_START_CODE: if (m4v->step_mode) gf_m4v_parse_vol(m4v, dsi); case M4V_VOS_START_CODE: if (hasVOP) { go = 0; } else if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } break; case M4V_VO_START_CODE: default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } if (m4v->step_mode) return GF_OK; } assert(m4v->current_object_start >= *start); *size = m4v->current_object_start - *start; return GF_OK; } GF_EXPORT GF_Err gf_m4v_parse_frame(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { if (m4v->mpeg12) { return gf_m4v_parse_frame_mpeg12(m4v, dsi, frame_type, time_inc, size, start, is_coded); } else { return gf_m4v_parse_frame_mpeg4(m4v, dsi, frame_type, time_inc, size, start, is_coded); } } GF_Err gf_m4v_rewrite_par(u8 **o_data, u32 *o_dataLen, s32 par_n, s32 par_d) { u64 start, end, size; GF_BitStream *mod; GF_M4VParser *m4v; Bool go = 1; m4v = gf_m4v_parser_new(*o_data, *o_dataLen, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); start = 0; while (go) { u32 type = M4V_LoadObject(m4v); end = gf_bs_get_position(m4v->bs) - 4; size = end - start; /*store previous object*/ if (size) { assert (size < (u64)1<<31); gf_bs_write_data(mod, *o_data + start, (u32)size); start = end; } switch (type) { case M4V_VOL_START_CODE: gf_bs_write_int(mod, 0, 8); gf_bs_write_int(mod, 0, 8); gf_bs_write_int(mod, 1, 8); gf_bs_write_int(mod, M4V_VOL_START_CODE, 8); gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 1), 1); gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 8), 8); start = gf_bs_read_int(m4v->bs, 1); gf_bs_write_int(mod, (u32)start, 1); if (start) { gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 7), 7); } start = gf_bs_read_int(m4v->bs, 4); if (start == 0xF) { gf_bs_read_int(m4v->bs, 8); gf_bs_read_int(m4v->bs, 8); } if ((par_n >= 0) && (par_d >= 0)) { u8 par = m4v_get_sar_idx(par_n, par_d); gf_bs_write_int(mod, par, 4); if (par == 0xF) { gf_bs_write_int(mod, par_n, 8); gf_bs_write_int(mod, par_d, 8); } } else { gf_bs_write_int(mod, 0x0, 4); } case -1: go = 0; break; default: break; } } while (gf_bs_bits_available(m4v->bs)) { u32 b = gf_bs_read_int(m4v->bs, 1); gf_bs_write_int(mod, b, 1); } gf_m4v_parser_del(m4v); gf_free(*o_data); gf_bs_get_content(mod, o_data, o_dataLen); gf_bs_del(mod); return GF_OK; } GF_EXPORT u64 gf_m4v_get_object_start(GF_M4VParser *m4v) { return m4v->current_object_start; } #if 0 //unused Bool gf_m4v_is_valid_object_type(GF_M4VParser *m4v) { return ((s32)m4v->current_object_type == -1) ? 0 : 1; } #endif GF_EXPORT GF_Err gf_m4v_get_config(u8 *rawdsi, u32 rawdsi_size, GF_M4VDecSpecInfo *dsi) { GF_Err e; GF_M4VParser *vparse; if (!rawdsi || !rawdsi_size) return GF_NON_COMPLIANT_BITSTREAM; vparse = gf_m4v_parser_new(rawdsi, rawdsi_size, 0); e = gf_m4v_parse_config(vparse, dsi); dsi->next_object_start = (u32)vparse->current_object_start; gf_m4v_parser_del(vparse); return e < 0 ? e : GF_OK; } GF_EXPORT GF_Err gf_mpegv12_get_config(u8 *rawdsi, u32 rawdsi_size, GF_M4VDecSpecInfo *dsi) { GF_Err e; GF_M4VParser *vparse; if (!rawdsi || !rawdsi_size) return GF_NON_COMPLIANT_BITSTREAM; vparse = gf_m4v_parser_new(rawdsi, rawdsi_size, GF_TRUE); e = gf_m4v_parse_config(vparse, dsi); dsi->next_object_start = (u32)vparse->current_object_start; gf_m4v_parser_del(vparse); return e; } #endif /* AAC parser */ struct __m4a_oti { u32 type; const char *name; } M4AObjectTypes[] = { {0, "MPEG-4 Audio Reserved"}, {1, "MPEG-4 Audio AAC Main"}, {2, "MPEG-4 Audio AAC LC"}, {3, "MPEG-4 Audio AAC SSR"}, {4, "MPEG-4 Audio AAC LTP"}, {5, "MPEG-4 Audio SBR"}, {6, "MPEG-4 Audio AAC Scalable"}, {7, "MPEG-4 Audio TwinVQ"}, {8, "MPEG-4 Audio CELP"}, {9, "MPEG-4 Audio HVXC"}, {10, "MPEG-4 Audio Reserved"}, {11, "MPEG-4 Audio Reserved"}, {12, "MPEG-4 Audio TTSI"}, {13, "MPEG-4 Audio Main synthetic"}, {14, "MPEG-4 Audio Wavetable synthesis"}, {15, "MPEG-4 Audio General MIDI"}, {16, "MPEG-4 Audio Algorithmic Synthesis and Audio FX"}, {17, "MPEG-4 Audio ER AAC LC"}, {18, "MPEG-4 Audio Reserved"}, {19, "MPEG-4 Audio ER AAC LTP"}, {20, "MPEG-4 Audio ER AAC scalable"}, {21, "MPEG-4 Audio ER TwinVQ"}, {22, "MPEG-4 Audio ER BSAC"}, {23, "MPEG-4 Audio ER AAC LD"}, {24, "MPEG-4 Audio ER CELP"}, {25, "MPEG-4 Audio ER HVXC"}, {26, "MPEG-4 Audio ER HILN"}, {27, "MPEG-4 Audio ER Parametric"}, {28, "MPEG-4 Audio SSC"}, {29, "MPEG-4 Audio ParametricStereo"}, {30, "MPEG-4 Audio Reserved"}, {31, "MPEG-4 Audio Reserved"}, {32, "MPEG-1 Audio Layer-1"}, {33, "MPEG-1 Audio Layer-2"}, {34, "MPEG-1 Audio Layer-3"}, {35, "MPEG-4 Audio DST"}, {36, "MPEG-4 Audio ALS"}, {37, "MPEG-4 Audio SLS"}, {42, "MPEG Audio xHE-AAC"}, }; GF_EXPORT const char *gf_m4a_object_type_name(u32 objectType) { u32 i, count = GF_ARRAY_LENGTH(M4AObjectTypes); for (i=0; i<count; i++) { if (objectType==M4AObjectTypes[i].type) return M4AObjectTypes[i].name; } return "MPEG-4 Audio Unknown"; } struct __m4a_profile { u32 value; const char *name; } M4AProfiles[] = { {0x00, "ISO Reserved (0x00)"}, {0x01, "Main Audio Profile @ Level 1"}, {0x02, "Main Audio Profile @ Level 2"}, {0x03, "Main Audio Profile @ Level 3"}, {0x04, "Main Audio Profile @ Level 4"}, {0x05, "Scalable Audio Profile @ Level 1"}, {0x06, "Scalable Audio Profile @ Level 2"}, {0x07, "Scalable Audio Profile @ Level 3"}, {0x08, "Scalable Audio Profile @ Level 4"}, {0x09, "Speech Audio Profile @ Level 1"}, {0x0A, "Speech Audio Profile @ Level 2"}, {0x0B, "Synthetic Audio Profile @ Level 1"}, {0x0C, "Synthetic Audio Profile @ Level 2"}, {0x0D, "Synthetic Audio Profile @ Level 3"}, {0x0E, "High Quality Audio Profile @ Level 1"}, {0x0F, "High Quality Audio Profile @ Level 2"}, {0x10, "High Quality Audio Profile @ Level 3"}, {0x11, "High Quality Audio Profile @ Level 4"}, {0x12, "High Quality Audio Profile @ Level 5"}, {0x13, "High Quality Audio Profile @ Level 6"}, {0x14, "High Quality Audio Profile @ Level 7"}, {0x15, "High Quality Audio Profile @ Level 8"}, {0x16, "Low Delay Audio Profile @ Level 1"}, {0x17, "Low Delay Audio Profile @ Level 2"}, {0x18, "Low Delay Audio Profile @ Level 3"}, {0x19, "Low Delay Audio Profile @ Level 4"}, {0x1A, "Low Delay Audio Profile @ Level 5"}, {0x1B, "Low Delay Audio Profile @ Level 6"}, {0x1C, "Low Delay Audio Profile @ Level 7"}, {0x1D, "Low Delay Audio Profile @ Level 8"}, {0x1E, "Natural Audio Profile @ Level 1"}, {0x1F, "Natural Audio Profile @ Level 2"}, {0x20, "Natural Audio Profile @ Level 3"}, {0x21, "Natural Audio Profile @ Level 4"}, {0x22, "Mobile Audio Internetworking Profile @ Level 1"}, {0x23, "Mobile Audio Internetworking Profile @ Level 2"}, {0x24, "Mobile Audio Internetworking Profile @ Level 3"}, {0x25, "Mobile Audio Internetworking Profile @ Level 4"}, {0x26, "Mobile Audio Internetworking Profile @ Level 5"}, {0x27, "Mobile Audio Internetworking Profile @ Level 6"}, {0x28, "AAC Profile @ Level 1"}, {0x29, "AAC Profile @ Level 2"}, {0x2A, "AAC Profile @ Level 4"}, {0x2B, "AAC Profile @ Level 5"}, {0x2C, "High Efficiency AAC Profile @ Level 2"}, {0x2D, "High Efficiency AAC Profile @ Level 3"}, {0x2E, "High Efficiency AAC Profile @ Level 4"}, {0x2F, "High Efficiency AAC Profile @ Level 5"}, {0x30, "High Efficiency AAC v2 Profile @ Level 2"}, {0x31, "High Efficiency AAC v2 Profile @ Level 3"}, {0x32, "High Efficiency AAC v2 Profile @ Level 4"}, {0x33, "High Efficiency AAC v2 Profile @ Level 5"}, {0x34, "Low Delay AAC Profile"}, {0x35, "Baseline MPEG Surround Profile @ Level 1"}, {0x36, "Baseline MPEG Surround Profile @ Level 2"}, {0x37, "Baseline MPEG Surround Profile @ Level 3"}, {0x38, "Baseline MPEG Surround Profile @ Level 4"}, {0x39, "Baseline MPEG Surround Profile @ Level 5"}, {0x3A, "Baseline MPEG Surround Profile @ Level 6"}, {0x3B, "High Definition AAC Profile @ Level 1"}, {0x3C, "ALS Simple Profile @ Level 1"}, {0x50, "AAC Profile @ Level 6"}, {0x51, "AAC Profile @ Level 7"}, {0x52, "High Efficiency AAC Profile @ Level 6"}, {0x53, "High Efficiency AAC Profile @ Level 7"}, {0x54, "High Efficiency AAC v2 Profile @ Level 6"}, {0x55, "High Efficiency AAC v2 Profile @ Level 7"}, {0x56, "Extended High Efficiency AAC Profile @ Level 6"}, {0x57, "Extended High Efficiency AAC Profile @ Level 7"}, {0xFE, "Not part of MPEG-4 audio profiles"}, {0xFF, "No audio capability required"} }; GF_EXPORT const char *gf_m4a_get_profile_name(u8 audio_pl) { u32 i, count = GF_ARRAY_LENGTH(M4AProfiles); for (i=0; i<count; i++) { if ((u32) audio_pl==M4AProfiles[i].value) return M4AProfiles[i].name; } return "ISO Reserved / User Private"; } #ifndef GPAC_DISABLE_AV_PARSERS GF_EXPORT u32 gf_m4a_get_profile(GF_M4ADecSpecInfo *cfg) { switch (cfg->base_object_type) { case 2: /*AAC LC*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x28 : 0x29; /*LC@L1 or LC@L2*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x2A : 0x2B; /*LC@L4 or LC@L5*/ return (cfg->base_sr <= 48000) ? 0x50 : 0x51; /*LC@L4 or LC@L5*/ case 5: /*HE-AAC - SBR*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x2C : 0x2D; /*HE@L2 or HE@L3*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x2E : 0x2F; /*HE@L4 or HE@L5*/ return (cfg->base_sr <= 48000) ? 0x52 : 0x53; /*HE@L6 or HE@L7*/ case 29: /*HE-AACv2 - SBR+PS*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x30 : 0x31; /*HE-AACv2@L2 or HE-AACv2@L3*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x32 : 0x33; /*HE-AACv2@L4 or HE-AACv2@L5*/ return (cfg->base_sr <= 48000) ? 0x54 : 0x55; /*HE-AACv2@L6 or HE-AACv2@L7*/ /*default to HQ*/ default: if (cfg->nb_chan <= 2) return (cfg->base_sr < 24000) ? 0x0E : 0x0F; /*HQ@L1 or HQ@L2*/ return 0x10; /*HQ@L3*/ } } GF_EXPORT GF_Err gf_m4a_parse_program_config_element(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { u32 i; cfg->program_config_element_present = 1; cfg->cpe_channels = 0; cfg->element_instance_tag = gf_bs_read_int_log(bs, 4, "element_instance_tag"); cfg->object_type = gf_bs_read_int_log(bs, 2, "object_type"); cfg->sampling_frequency_index = gf_bs_read_int_log(bs, 4, "sampling_frequency_index"); cfg->num_front_channel_elements = gf_bs_read_int_log(bs, 4, "num_front_channel_elements"); cfg->num_side_channel_elements = gf_bs_read_int_log(bs, 4, "num_side_channel_elements"); cfg->num_back_channel_elements = gf_bs_read_int_log(bs, 4, "num_back_channel_elements"); cfg->num_lfe_channel_elements = gf_bs_read_int_log(bs, 2, "num_lfe_channel_elements"); cfg->num_assoc_data_elements = gf_bs_read_int_log(bs, 3, "num_assoc_data_elements"); cfg->num_valid_cc_elements = gf_bs_read_int_log(bs, 4, "num_valid_cc_elements"); cfg->mono_mixdown_present = (Bool)gf_bs_read_int_log(bs, 1, "mono_mixdown_present"); if (cfg->mono_mixdown_present) { cfg->mono_mixdown_element_number = gf_bs_read_int_log(bs, 4, "mono_mixdown_element_number"); } cfg->stereo_mixdown_present = gf_bs_read_int_log(bs, 1, "stereo_mixdown_present"); if (cfg->stereo_mixdown_present) { cfg->stereo_mixdown_element_number = gf_bs_read_int_log(bs, 4, "stereo_mixdown_element_number"); } cfg->matrix_mixdown_idx_present = gf_bs_read_int_log(bs, 1, "matrix_mixdown_idx_present"); if (cfg->matrix_mixdown_idx_present) { cfg->matrix_mixdown_idx = gf_bs_read_int_log(bs, 2, "matrix_mixdown_idx"); cfg->pseudo_surround_enable = gf_bs_read_int_log(bs, 1, "pseudo_surround_enable"); } for (i = 0; i < cfg->num_front_channel_elements; i++) { cfg->front_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "front_element_is_cpe", i); cfg->front_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "front_element_tag_select", i); if (cfg->front_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_side_channel_elements; i++) { cfg->side_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "side_element_is_cpe", i); cfg->side_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "side_element_tag_select", i); if (cfg->side_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_back_channel_elements; i++) { cfg->back_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "back_element_is_cpe", i); cfg->back_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "back_element_tag_select", i); if (cfg->back_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_lfe_channel_elements; i++) { cfg->lfe_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "lfe_element_tag_select", i); } for (i = 0; i < cfg->num_assoc_data_elements; i++) { cfg->assoc_data_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "assoc_data_element_tag_select", i); } for (i = 0; i < cfg->num_valid_cc_elements; i++) { cfg->cc_element_is_ind_sw[i] = gf_bs_read_int_log_idx(bs, 1, "cc_element_is_ind_sw", i); cfg->valid_cc_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "valid_cc_element_tag_select", i); } gf_bs_align(bs); cfg->comment_field_bytes = gf_bs_read_int_log(bs, 8, "comment_field_bytes"); gf_bs_read_data(bs, (char *)cfg->comments, cfg->comment_field_bytes); cfg->nb_chan = cfg->num_front_channel_elements + cfg->num_back_channel_elements + cfg->num_side_channel_elements + cfg->num_lfe_channel_elements; cfg->nb_chan += cfg->cpe_channels; return GF_OK; } GF_EXPORT GF_Err gf_m4a_parse_config(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg, Bool size_known) { u32 audio_obj_type; memset(cfg, 0, sizeof(GF_M4ADecSpecInfo)); cfg->base_object_type = gf_bs_read_int_log(bs, 5, "base_object_type"); /*extended object type*/ if (cfg->base_object_type == 31) { cfg->base_object_type = 32 + gf_bs_read_int_log(bs, 6, "extended_base_object_type"); } cfg->base_sr_index = gf_bs_read_int_log(bs, 4, "base_samplerate_index"); if (cfg->base_sr_index == 0x0F) { cfg->base_sr = gf_bs_read_int_log(bs, 24, "base_samplerate"); } else { cfg->base_sr = GF_M4ASampleRates[cfg->base_sr_index]; } cfg->chan_cfg = gf_bs_read_int_log(bs, 4, "channel_configuration"); if (cfg->chan_cfg) { cfg->nb_chan = GF_M4ANumChannels[cfg->chan_cfg - 1]; } audio_obj_type = cfg->base_object_type; if (cfg->base_object_type == 5 || cfg->base_object_type == 29) { if (cfg->base_object_type == 29) { cfg->has_ps = 1; cfg->nb_chan = 1; } cfg->has_sbr = GF_TRUE; cfg->sbr_sr_index = gf_bs_read_int_log(bs, 4, "sbr_samplerate_index"); if (cfg->sbr_sr_index == 0x0F) { cfg->sbr_sr = gf_bs_read_int_log(bs, 24, "sbr_samplerate"); } else { cfg->sbr_sr = GF_M4ASampleRates[cfg->sbr_sr_index]; } cfg->sbr_object_type = gf_bs_read_int_log(bs, 5, "sbr_object_type"); if (cfg->sbr_object_type==31) cfg->sbr_object_type = 32 + gf_bs_read_int_log(bs, 6, "audioObjectTypeExt"); audio_obj_type = cfg->sbr_object_type; if (cfg->sbr_object_type==22) { /*ext_chan_cfg = */gf_bs_read_int_log(bs, 4, "channel_configuration"); } } /*object cfg*/ switch (audio_obj_type) { case 1: case 2: case 3: case 4: case 6: case 7: case 17: case 19: case 20: case 21: case 22: case 23: case 42: { Bool ext_flag; gf_bs_read_int_log(bs, 1, "frame_length_flag"); if (gf_bs_read_int_log(bs, 1, "depends_on_core_coder")) gf_bs_read_int_log(bs, 14, "delay"); ext_flag = gf_bs_read_int_log(bs, 1, "extension_flag"); if (!cfg->chan_cfg) { gf_m4a_parse_program_config_element(bs, cfg); } if ((cfg->base_object_type == 6) || (cfg->base_object_type == 20)) { gf_bs_read_int_log(bs, 3, "layerN"); } if (ext_flag) { if (cfg->base_object_type == 22) { gf_bs_read_int_log(bs, 5, "numOfSubFrame"); gf_bs_read_int_log(bs, 11, "layer_length"); } if ((cfg->base_object_type == 17) || (cfg->base_object_type == 19) || (cfg->base_object_type == 20) || (cfg->base_object_type == 23) ) { gf_bs_read_int_log(bs, 1, "aacSectionDataResilienceFlag"); gf_bs_read_int_log(bs, 1, "aacScalefactorDataResilienceFlag"); gf_bs_read_int_log(bs, 1, "aacSpectralDataResilienceFlag"); } gf_bs_read_int_log(bs, 1, "extensionFlag3"); } } break; } /*ER cfg*/ switch (audio_obj_type) { case 17: case 19: case 20: case 21: case 22: case 23: case 24: case 25: case 26: case 27: { u32 epConfig = gf_bs_read_int_log(bs, 2, "epConfig"); if ((epConfig == 2) || (epConfig == 3)) { } if (epConfig == 3) { gf_bs_read_int_log(bs, 1, "directMapping"); } } break; } if (size_known && (cfg->base_object_type != 5) && (cfg->base_object_type != 29)) { while (gf_bs_available(bs) >= 2) { u32 sync = gf_bs_peek_bits(bs, 11, 0); if (sync == 0x2b7) { gf_bs_read_int_log(bs, 11, "syncExtensionType"); cfg->sbr_object_type = gf_bs_read_int_log(bs, 5, "extensionAudioObjectType "); cfg->has_sbr = gf_bs_read_int_log(bs, 1, "sbrPresentFlag"); if (cfg->has_sbr) { cfg->sbr_sr_index = gf_bs_read_int_log(bs, 4, "extensionSamplingFrequencyIndex"); if (cfg->sbr_sr_index == 0x0F) { cfg->sbr_sr = gf_bs_read_int_log(bs, 24, "extensionSamplingFrequency"); } else { cfg->sbr_sr = GF_M4ASampleRates[cfg->sbr_sr_index]; } } } else if (sync == 0x548) { gf_bs_read_int_log(bs, 11, "syncExtensionType"); cfg->has_ps = gf_bs_read_int_log(bs, 1, "hasParametricStereo"); if (cfg->has_ps) cfg->nb_chan = 1; } else { break; } } } cfg->audioPL = gf_m4a_get_profile(cfg); return GF_OK; } GF_EXPORT GF_Err gf_m4a_get_config(u8 *dsi, u32 dsi_size, GF_M4ADecSpecInfo *cfg) { GF_BitStream *bs; if (!dsi || !dsi_size || (dsi_size < 2)) return GF_NON_COMPLIANT_BITSTREAM; bs = gf_bs_new(dsi, dsi_size, GF_BITSTREAM_READ); gf_m4a_parse_config(bs, cfg, GF_TRUE); gf_bs_del(bs); return GF_OK; } u32 gf_latm_get_value(GF_BitStream *bs) { u32 i, tmp, value = 0; u32 bytesForValue = gf_bs_read_int(bs, 2); for (i = 0; i <= bytesForValue; i++) { value <<= 8; tmp = gf_bs_read_int(bs, 8); value += tmp; } return value; } GF_EXPORT u32 gf_m4a_get_channel_cfg(u32 nb_chan) { u32 i, count = sizeof(GF_M4ANumChannels) / sizeof(u32); for (i = 0; i < count; i++) { if (GF_M4ANumChannels[i] == nb_chan) return i + 1; } return 0; } GF_EXPORT GF_Err gf_m4a_write_program_config_element_bs(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { u32 i; gf_bs_write_int(bs, cfg->element_instance_tag, 4); gf_bs_write_int(bs, cfg->object_type, 2); gf_bs_write_int(bs, cfg->sampling_frequency_index, 4); gf_bs_write_int(bs, cfg->num_front_channel_elements, 4); gf_bs_write_int(bs, cfg->num_side_channel_elements, 4); gf_bs_write_int(bs, cfg->num_back_channel_elements, 4); gf_bs_write_int(bs, cfg->num_lfe_channel_elements, 2); gf_bs_write_int(bs, cfg->num_assoc_data_elements, 3); gf_bs_write_int(bs, cfg->num_valid_cc_elements, 4); gf_bs_write_int(bs, cfg->mono_mixdown_present, 1); if (cfg->mono_mixdown_present) { gf_bs_write_int(bs, cfg->mono_mixdown_element_number, 4); } gf_bs_write_int(bs, cfg->stereo_mixdown_present, 1); if (cfg->stereo_mixdown_present) { gf_bs_write_int(bs, cfg->stereo_mixdown_element_number, 4); } gf_bs_write_int(bs, cfg->matrix_mixdown_idx_present, 1); if (cfg->matrix_mixdown_idx_present) { gf_bs_write_int(bs, cfg->matrix_mixdown_idx, 2); gf_bs_write_int(bs, cfg->pseudo_surround_enable, 1); } for (i = 0; i < cfg->num_front_channel_elements; i++) { gf_bs_write_int(bs, cfg->front_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->front_element_tag_select[i], 4); } for (i = 0; i < cfg->num_side_channel_elements; i++) { gf_bs_write_int(bs, cfg->side_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->side_element_tag_select[i], 4); } for (i = 0; i < cfg->num_back_channel_elements; i++) { gf_bs_write_int(bs, cfg->back_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->back_element_tag_select[i], 4); } for (i = 0; i < cfg->num_lfe_channel_elements; i++) { gf_bs_write_int(bs, cfg->lfe_element_tag_select[i], 4); } for (i = 0; i < cfg->num_assoc_data_elements; i++) { gf_bs_write_int(bs, cfg->assoc_data_element_tag_select[i], 4); } for (i = 0; i < cfg->num_valid_cc_elements; i++) { gf_bs_write_int(bs, cfg->cc_element_is_ind_sw[i], 1); gf_bs_write_int(bs, cfg->valid_cc_element_tag_select[i], 4); } gf_bs_align(bs); gf_bs_write_int(bs, cfg->comment_field_bytes, 8); gf_bs_write_data(bs, (char *)cfg->comments, cfg->comment_field_bytes); return GF_OK; } GF_EXPORT GF_Err gf_m4a_write_config_bs(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { if (!cfg->base_sr_index) { if (!cfg->base_sr) return GF_BAD_PARAM; while (GF_M4ASampleRates[cfg->base_sr_index]) { if (GF_M4ASampleRates[cfg->base_sr_index] == cfg->base_sr) break; cfg->base_sr_index++; } } if (cfg->sbr_sr && !cfg->sbr_sr_index) { while (GF_M4ASampleRates[cfg->sbr_sr_index]) { if (GF_M4ASampleRates[cfg->sbr_sr_index] == cfg->sbr_sr) break; cfg->sbr_sr_index++; } } /*extended object type*/ if (cfg->base_object_type >= 32) { gf_bs_write_int(bs, 31, 5); gf_bs_write_int(bs, cfg->base_object_type - 32, 6); } else { gf_bs_write_int(bs, cfg->base_object_type, 5); } gf_bs_write_int(bs, cfg->base_sr_index, 4); if (cfg->base_sr_index == 0x0F) { gf_bs_write_int(bs, cfg->base_sr, 24); } if (cfg->program_config_element_present) { gf_bs_write_int(bs, 0, 4); } else { cfg->chan_cfg = gf_m4a_get_channel_cfg(cfg->nb_chan); if (!cfg->chan_cfg) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AAC] Cannot write decoder config, ProgramConfigElement is missing and channel configuration is not a predefined one !\n")); return GF_BAD_PARAM; } gf_bs_write_int(bs, cfg->chan_cfg, 4); } if (cfg->base_object_type == 5 || cfg->base_object_type == 29) { if (cfg->base_object_type == 29) { cfg->has_ps = 1; cfg->nb_chan = 1; } cfg->has_sbr = 1; gf_bs_write_int(bs, cfg->sbr_sr_index, 4); if (cfg->sbr_sr_index == 0x0F) { gf_bs_write_int(bs, cfg->sbr_sr, 24); } gf_bs_write_int(bs, cfg->sbr_object_type, 5); } /*object cfg*/ switch (cfg->base_object_type) { case 1: case 2: case 3: case 4: case 6: case 7: case 17: case 19: case 20: case 21: case 22: case 23: case 42: { /*frame length flag*/ gf_bs_write_int(bs, 0, 1); /*depends on core coder*/ gf_bs_write_int(bs, 0, 1); /*ext flag*/ gf_bs_write_int(bs, 0, 1); if (cfg->program_config_element_present) { gf_m4a_write_program_config_element_bs(bs, cfg); } if ((cfg->base_object_type == 6) || (cfg->base_object_type == 20)) { gf_bs_write_int(bs, 0, 3); } } break; } /*ER cfg - not supported*/ /*implicit sbr/ps signaling not written here, cf reframe_adts*/ return GF_OK; } GF_EXPORT GF_Err gf_m4a_write_config(GF_M4ADecSpecInfo *cfg, u8 **dsi, u32 *dsi_size) { GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_m4a_write_config_bs(bs, cfg); gf_bs_get_content(bs, dsi, dsi_size); gf_bs_del(bs); return GF_OK; } /*AV1 parsing*/ static u32 av1_read_ns(GF_BitStream *bs, u32 n, const char *fname) { u32 v, res; Bool extra_bit; int w = (u32)(log(n) / log(2)) + 1; u32 m = (1 << w) - n; assert(w < 32); v = gf_bs_read_int(bs, w - 1); if (v < m) { if (fname) { gf_bs_log(bs, w-1, fname, v); } return v; } extra_bit = gf_bs_read_int(bs, 1); res = (v << 1) - m + extra_bit; if (fname) { gf_bs_log(bs, w, fname, res); } return res; } static void av1_color_config(GF_BitStream *bs, AV1State *state) { state->config->high_bitdepth = gf_bs_read_int_log(bs, 1, "high_bitdepth"); state->bit_depth = 8; if (state->config->seq_profile == 2 && state->config->high_bitdepth) { state->config->twelve_bit = gf_bs_read_int_log(bs, 1, "twelve_bit"); state->bit_depth = state->config->twelve_bit ? 12 : 10; } else if (state->config->seq_profile <= 2) { state->bit_depth = state->config->high_bitdepth ? 10 : 8; } state->config->monochrome = GF_FALSE; if (state->config->seq_profile == 1) { state->config->monochrome = GF_FALSE; } else { state->config->monochrome = gf_bs_read_int_log(bs, 1, "monochrome"); } /*NumPlanes = mono_chrome ? 1 : 3;*/ state->color_description_present_flag = gf_bs_read_int_log(bs, 1, "color_description_present_flag"); if (state->color_description_present_flag) { state->color_primaries = gf_bs_read_int_log(bs, 8, "color_primaries"); state->transfer_characteristics = gf_bs_read_int_log(bs, 8, "transfer_characteristics"); state->matrix_coefficients = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } else { state->color_primaries = 2/*CP_UNSPECIFIED*/; state->transfer_characteristics = 2/*TC_UNSPECIFIED*/; state->matrix_coefficients = 2/*MC_UNSPECIFIED*/; } if (state->config->monochrome) { state->color_range = gf_bs_read_int_log(bs, 1, "color_range"); state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_TRUE; state->config->chroma_sample_position = 0/*CSP_UNKNOWN*/; state->separate_uv_delta_q = 0; return; } else if (state->color_primaries == 0/*CP_BT_709*/ && state->transfer_characteristics == 13/*TC_SRGB*/ && state->matrix_coefficients == 0/*MC_IDENTITY*/) { state->color_range = GF_TRUE; state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; } else { state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; state->color_range = gf_bs_read_int_log(bs, 1, "color_range"); if (state->config->seq_profile == 0) { state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_TRUE; } else if (state->config->seq_profile == 1) { state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; } else { if (state->bit_depth == 12) { state->config->chroma_subsampling_x = gf_bs_read_int_log(bs, 1, "chroma_subsampling_x"); if (state->config->chroma_subsampling_x) state->config->chroma_subsampling_y = gf_bs_read_int_log(bs, 1, "chroma_subsampling_y"); else state->config->chroma_subsampling_y = GF_FALSE; } else { state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_FALSE; } } if (state->config->chroma_subsampling_x && state->config->chroma_subsampling_y) { state->config->chroma_sample_position = gf_bs_read_int_log(bs, 2, "chroma_sample_position"); } } state->separate_uv_delta_q = gf_bs_read_int_log(bs, 1, "separate_uv_delta_q"); } static u32 av1_uvlc(GF_BitStream *bs, const char *fname) { u32 res; u8 leadingZeros = 0; while (1) { Bool done = gf_bs_read_int(bs, 1); if (done) break; leadingZeros++; } if (leadingZeros >= 32) { return 0xFFFFFFFF; } res = gf_bs_read_int(bs, leadingZeros) + (1 << leadingZeros) - 1; gf_bs_log(bs, 2*leadingZeros, fname, res); return res; } static void timing_info(GF_BitStream *bs, AV1State *state) { u32 time_scale = 0; u32 num_units_in_display_tick = gf_bs_read_int_log(bs, 32, "num_units_in_display_tick"); if (num_units_in_display_tick == 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] num_units_in_display_tick must be greater than 0.\n")); } time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); if (time_scale == 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] time_scale must be greater than 0.\n")); } state->equal_picture_interval = gf_bs_read_int_log(bs, 1, "equal_picture_interval"); if (state->equal_picture_interval) { u32 num_ticks_per_picture_minus_1 = av1_uvlc(bs, "num_ticks_per_picture_minus_1"); state->tb_num = time_scale; state->tb_den = (num_ticks_per_picture_minus_1 + 1)*num_units_in_display_tick; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] VFR not supported.\n")); //TODO: upload num_units_in_display_tick (eq. to the POC in H264), compute delta between frames, set it as dts_inc in gf_import_aom_av1() } } static void decoder_model_info(AV1State *state, GF_BitStream *bs) { state->buffer_delay_length = 1 + gf_bs_read_int_log(bs, 5, "buffer_delay_length_minus1"); gf_bs_read_int_log(bs, 32, "num_units_in_decoding_tick"); state->buffer_removal_time_length = gf_bs_read_int_log(bs, 5, "buffer_removal_time_length"); state->frame_presentation_time_length = 1 + gf_bs_read_int_log(bs, 5, "frame_presentation_time_length_minus1"); } static void operating_parameters_info(GF_BitStream *bs, const u8 idx, const u8 buffer_delay_length_minus_1) { const u8 n = buffer_delay_length_minus_1 + 1; gf_bs_read_int_log(bs, n, "decoder_buffer_delay"); gf_bs_read_int_log(bs, n, "encoder_buffer_delay"); gf_bs_read_int_log(bs, 1, "low_delay_mode_flag"); } static void av1_parse_sequence_header_obu(GF_BitStream *bs, AV1State *state) { u8 buffer_delay_length_minus_1 = 0; state->frame_state.seen_seq_header = GF_TRUE; state->config->seq_profile = gf_bs_read_int_log(bs, 3, "seq_profile"); state->still_picture = gf_bs_read_int_log(bs, 1, "still_picture"); state->reduced_still_picture_header = gf_bs_read_int_log(bs, 1, "reduced_still_picture_header"); if (state->reduced_still_picture_header) { //timing_info_present_flag = GF_FALSE; //initial_display_delay_present_flag = GF_FALSE; state->operating_points_count = 1; state->config->seq_level_idx_0 = gf_bs_read_int_log(bs, 5, "seq_level_idx_0"); } else { u8 i = 0; Bool initial_display_delay_present_flag; Bool timing_info_present_flag = gf_bs_read_int_log(bs, 1, "timing_info_present_flag"); if (timing_info_present_flag) { timing_info(bs, state); state->decoder_model_info_present_flag = gf_bs_read_int_log(bs, 1, "decoder_model_info_present_flag"); if (state->decoder_model_info_present_flag) { decoder_model_info(state, bs); } } else { state->decoder_model_info_present_flag = GF_FALSE; } initial_display_delay_present_flag = gf_bs_read_int_log(bs, 1, "initial_display_delay_present_flag"); state->operating_points_count = 1 + gf_bs_read_int_log(bs, 5, "operating_points_count_minus1"); for (i = 0; i < state->operating_points_count; i++) { u8 seq_level_idx_i, seq_tier = 0; state->operating_point_idc[i] = gf_bs_read_int_log_idx(bs, 12, "operating_point_idc", i); seq_level_idx_i = gf_bs_read_int_log_idx(bs, 5, "seq_level_idx", i); if (i == 0) state->config->seq_level_idx_0 = seq_level_idx_i; if (seq_level_idx_i > 7) { seq_tier = gf_bs_read_int_log_idx(bs, 1, "seq_tier", i); } if (i == 0) state->config->seq_tier_0 = seq_tier; if (state->decoder_model_info_present_flag) { state->decoder_model_present_for_this_op[i] = gf_bs_read_int_log_idx(bs, 1, "decoder_model_present_for_this_op", i); if (state->decoder_model_present_for_this_op[i]) { operating_parameters_info(bs, i, buffer_delay_length_minus_1); } } else { state->decoder_model_present_for_this_op[i] = 0; } if (initial_display_delay_present_flag) { if (gf_bs_read_int_log_idx(bs, 1, "initial_display_delay_present_for_this_op", i) ) { gf_bs_read_int_log_idx(bs, 4, "initial_display_delay_minus1", i); } } } } //operatingPoint = av1_choose_operating_point(bs); state->OperatingPointIdc = 0;//TODO: operating_point_idc[operatingPoint]; state->frame_width_bits_minus_1 = gf_bs_read_int_log(bs, 4, "frame_width_bits_minus1"); state->frame_height_bits_minus_1 = gf_bs_read_int_log(bs, 4, "frame_height_bits_minus1"); state->width = gf_bs_read_int_log(bs, state->frame_width_bits_minus_1 + 1, "width_minus1") + 1; state->height = gf_bs_read_int_log(bs, state->frame_height_bits_minus_1 + 1, "height_minus1") + 1; state->sequence_width = state->width; state->sequence_height = state->height; state->frame_id_numbers_present_flag = GF_FALSE; if (!state->reduced_still_picture_header) { state->frame_id_numbers_present_flag = gf_bs_read_int_log(bs, 1, "frame_id_numbers_present_flag"); } if (state->frame_id_numbers_present_flag) { state->delta_frame_id_length_minus_2 = gf_bs_read_int_log(bs, 4, "delta_frame_id_length_minus2"); state->additional_frame_id_length_minus_1 = gf_bs_read_int_log(bs, 3, "additional_frame_id_length_minus1"); } state->use_128x128_superblock = gf_bs_read_int_log(bs, 1, "use_128x128_superblock"); gf_bs_read_int_log(bs, 1, "enable_filter_intra"); gf_bs_read_int_log(bs, 1, "enable_intra_edge_filter"); if (state->reduced_still_picture_header) { /*enable_interintra_compound = 0; enable_masked_compound = 0; enable_dual_filter = 0; enable_jnt_comp = 0; enable_ref_frame_mvs = 0;*/ state->enable_warped_motion = 0; state->enable_order_hint = GF_FALSE; state->OrderHintBits = 0; state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; state->seq_force_screen_content_tools = 2/*SELECT_SCREEN_CONTENT_TOOLS*/; } else { Bool seq_choose_screen_content_tools; gf_bs_read_int_log(bs, 1, "enable_interintra_compound"); gf_bs_read_int_log(bs, 1, "enable_masked_compound"); state->enable_warped_motion = gf_bs_read_int_log(bs, 1, "enable_warped_motion"); gf_bs_read_int_log(bs, 1, "enable_dual_filter"); state->enable_order_hint = gf_bs_read_int_log(bs, 1, "enable_order_hint"); if (state->enable_order_hint) { gf_bs_read_int_log(bs, 1, "enable_jnt_comp"); state->enable_ref_frame_mvs = gf_bs_read_int_log(bs, 1, "enable_ref_frame_mvs"); } else { /*enable_jnt_comp = 0*/; /*enable_ref_frame_mvs = 0*/; } seq_choose_screen_content_tools = gf_bs_read_int_log(bs, 1, "seq_choose_screen_content_tools"); state->seq_force_screen_content_tools = 0; if (seq_choose_screen_content_tools) { state->seq_force_screen_content_tools = 2/*SELECT_SCREEN_CONTENT_TOOLS*/; } else { state->seq_force_screen_content_tools = gf_bs_read_int_log(bs, 1, "seq_force_screen_content_tools"); } state->seq_force_integer_mv = 0; if (state->seq_force_screen_content_tools > 0) { const Bool seq_choose_integer_mv = gf_bs_read_int_log(bs, 1, "seq_choose_integer_mv"); if (seq_choose_integer_mv) { state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; } else { state->seq_force_integer_mv = gf_bs_read_int_log(bs, 1, "seq_force_integer_mv"); } } else { state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; } if (state->enable_order_hint) { u8 order_hint_bits_minus_1 = gf_bs_read_int_log(bs, 3, "order_hint_bits_minus1"); state->OrderHintBits = order_hint_bits_minus_1 + 1; } else { state->OrderHintBits = 0; } } state->enable_superres = gf_bs_read_int_log(bs, 1, "enable_superres"); state->enable_cdef = gf_bs_read_int_log(bs, 1, "enable_cdef"); state->enable_restoration = gf_bs_read_int_log(bs, 1, "enable_restoration"); av1_color_config(bs, state); state->film_grain_params_present = gf_bs_read_int_log(bs, 1, "film_grain_params_present"); } #define IVF_FILE_HEADER_SIZE 32 Bool gf_media_probe_ivf(GF_BitStream *bs) { u32 dw = 0; if (gf_bs_available(bs) < IVF_FILE_HEADER_SIZE) return GF_FALSE; dw = gf_bs_peek_bits(bs, 32, 0); if (dw != GF_4CC('D', 'K', 'I', 'F')) { return GF_FALSE; } return GF_TRUE; } GF_Err gf_media_parse_ivf_file_header(GF_BitStream *bs, u32 *width, u32 *height, u32 *codec_fourcc, u32 *timebase_num, u32 *timebase_den, u32 *num_frames) { u32 dw = 0; if (!width || !height || !codec_fourcc || !timebase_den || !timebase_num || !num_frames) { assert(0); return GF_BAD_PARAM; } if (gf_bs_available(bs) < IVF_FILE_HEADER_SIZE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Not enough bytes available ("LLU").\n", gf_bs_available(bs))); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u32(bs); if (dw != GF_4CC('D', 'K', 'I', 'F')) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[IVF] Invalid signature\n")); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u16_le(bs); if (dw != 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong IVF version. 0 expected, got %u\n", dw)); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u16_le(bs); //length of header in bytes if (dw != IVF_FILE_HEADER_SIZE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong IVF header length. Expected 32 bytes, got %u\n", dw)); return GF_NON_COMPLIANT_BITSTREAM; } *codec_fourcc = gf_bs_read_u32(bs); *width = gf_bs_read_u16_le(bs); *height = gf_bs_read_u16_le(bs); *timebase_num = gf_bs_read_u32_le(bs); *timebase_den = gf_bs_read_u32_le(bs); *num_frames = gf_bs_read_u32_le(bs); gf_bs_read_u32_le(bs); //skip unused return GF_OK; } GF_Err gf_media_parse_ivf_frame_header(GF_BitStream *bs, u64 *frame_size, u64 *pts) { if (!frame_size) return GF_BAD_PARAM; *frame_size = gf_bs_read_u32_le(bs); if (*frame_size > 256 * 1024 * 1024) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong frame size %u\n", *frame_size)); *frame_size = 0; return GF_NON_COMPLIANT_BITSTREAM; } *pts = gf_bs_read_u64_le(bs); return GF_OK; } GF_Err gf_media_vp9_parse_superframe(GF_BitStream *bs, u64 ivf_frame_size, u32 *num_frames_in_superframe, u32 frame_sizes[VP9_MAX_FRAMES_IN_SUPERFRAME], u32 *superframe_index_size) { u32 byte, bytes_per_framesize; u64 pos = gf_bs_get_position(bs), i = 0; GF_Err e; assert(bs && num_frames_in_superframe); /*initialize like there is no superframe*/ memset(frame_sizes, 0, VP9_MAX_FRAMES_IN_SUPERFRAME * sizeof(frame_sizes[0])); *num_frames_in_superframe = 1; frame_sizes[0] = (u32)ivf_frame_size; *superframe_index_size = 0; e = gf_bs_seek(bs, pos + ivf_frame_size - 1); if (e) return e; byte = gf_bs_read_u8(bs); if ((byte & 0xe0) != 0xc0) goto exit; /*no superframe*/ bytes_per_framesize = 1 + ((byte & 0x18) >> 3); *num_frames_in_superframe = (u32)(1 + (byte & 0x7)); /*superframe_index()*/ *superframe_index_size = 2 + bytes_per_framesize * *num_frames_in_superframe; gf_bs_seek(bs, pos + ivf_frame_size - *superframe_index_size); byte = gf_bs_read_u8(bs); if ((byte & 0xe0) != 0xc0) goto exit; /*no superframe*/ frame_sizes[0] = 0; for (i = 0; i < *num_frames_in_superframe; ++i) { gf_bs_read_data(bs, (char*)(frame_sizes + i), bytes_per_framesize); } exit: gf_bs_seek(bs, pos); return e; } static Bool vp9_frame_sync_code(GF_BitStream *bs) { u8 val = gf_bs_read_int_log(bs, 8, "syncbyte1"); if (val != 0x49) return GF_FALSE; val = gf_bs_read_int_log(bs, 8, "syncbyte2"); if (val != 0x83) return GF_FALSE; val = gf_bs_read_int_log(bs, 8, "syncbyte3"); if (val != 0x42) return GF_FALSE; return GF_TRUE; } typedef enum { CS_UNKNOWN = 0, CS_BT_601 = 1, CS_BT_709 = 2, CS_SMPTE_170 = 3, CS_SMPTE_240 = 4, CS_BT_2020 = 5, CS_RESERVED = 6, CS_RGB = 7, } VP9_color_space; static const int VP9_CS_to_23001_8_colour_primaries[] = { -1/*undefined*/, 5, 1, 6, 7, 9, -1/*reserved*/, 1 }; static const int VP9_CS_to_23001_8_transfer_characteristics[] = { -1/*undefined*/, 5, 1, 6, 7, 9, -1/*reserved*/, 13 }; static const int VP9_CS_to_23001_8_matrix_coefficients[] = { -1/*undefined*/, 6, 1, -1, -1, 9, -1/*reserved*/, 0 }; static GF_Err vp9_color_config(GF_BitStream *bs, GF_VPConfig *vp9_cfg) { VP9_color_space color_space; if (vp9_cfg->profile >= 2) { Bool ten_or_twelve_bit = gf_bs_read_int_log(bs, 1, "ten_or_twelve_bit"); vp9_cfg->bit_depth = ten_or_twelve_bit ? 12 : 10; } else { vp9_cfg->bit_depth = 8; } color_space = gf_bs_read_int_log(bs, 3, "color_space"); vp9_cfg->colour_primaries = VP9_CS_to_23001_8_colour_primaries[color_space]; vp9_cfg->transfer_characteristics = VP9_CS_to_23001_8_transfer_characteristics[color_space]; vp9_cfg->matrix_coefficients = VP9_CS_to_23001_8_matrix_coefficients[color_space]; if (color_space != CS_RGB) { vp9_cfg->video_fullRange_flag = gf_bs_read_int_log(bs, 1, "video_fullRange_flag"); if (vp9_cfg->profile == 1 || vp9_cfg->profile == 3) { u8 subsampling_x, subsampling_y, subsampling_xy_to_chroma_subsampling[2][2] = { {3, 0}, {2, 0} }; subsampling_x = gf_bs_read_int_log(bs, 1, "subsampling_x"); subsampling_y = gf_bs_read_int_log(bs, 1, "subsampling_x"); vp9_cfg->chroma_subsampling = subsampling_xy_to_chroma_subsampling[subsampling_x][subsampling_y]; Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] color config reserved zero (1) is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } else { vp9_cfg->chroma_subsampling = 0; } } else { vp9_cfg->video_fullRange_flag = GF_TRUE; if (vp9_cfg->profile == 1 || vp9_cfg->profile == 3) { vp9_cfg->chroma_subsampling = 3; Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] color config reserved zero (2) is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } } return GF_OK; } static void vp9_compute_image_size(int FrameWidth, int FrameHeight, int *Sb64Cols, int *Sb64Rows) { int MiCols = (FrameWidth + 7) >> 3; int MiRows = (FrameHeight + 7) >> 3; *Sb64Cols = (MiCols + 7) >> 3; *Sb64Rows = (MiRows + 7) >> 3; } static void vp9_frame_size(GF_BitStream *bs, int *FrameWidth, int *FrameHeight, int *Sb64Cols, int *Sb64Rows) { int frame_width_minus_1 = gf_bs_read_int_log(bs, 16, "frame_width_minus_1"); int frame_height_minus_1 = gf_bs_read_int_log(bs, 16, "frame_height_minus_1"); if (frame_width_minus_1 + 1 != *FrameWidth || frame_height_minus_1 + 1 != *FrameHeight) { if (*FrameWidth || *FrameHeight) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[VP9] inconsistent frame dimensions: previous was %dx%d, new one is %dx%d.\n", *FrameWidth, *FrameHeight, frame_width_minus_1 + 1, frame_height_minus_1 + 1)); } *FrameWidth = frame_width_minus_1 + 1; *FrameHeight = frame_height_minus_1 + 1; vp9_compute_image_size(*FrameWidth, *FrameHeight, Sb64Cols, Sb64Rows); } static void vp9_render_size(GF_BitStream *bs, int FrameWidth, int FrameHeight, int *renderWidth, int *renderHeight) { Bool render_and_frame_size_different = gf_bs_read_int_log(bs, 1, "render_and_frame_size_different"); if (render_and_frame_size_different == 1) { int render_width_minus_1 = gf_bs_read_int_log(bs, 16, "render_width_minus_1"); int render_height_minus_1 = gf_bs_read_int_log(bs, 16, "render_height_minus_1"); *renderWidth = render_width_minus_1 + 1; *renderHeight = render_height_minus_1 + 1; } else { *renderWidth = FrameWidth; *renderHeight = FrameHeight; } } static s64 vp9_s(GF_BitStream *bs, int n, const char *fname, u32 idx) { s64 value = gf_bs_read_int(bs, n); Bool sign = gf_bs_read_int(bs, 1); if (sign) value = -value; gf_bs_log_idx(bs, n+1, fname, value, idx, -1, -1); return value; } static void vp9_loop_filter_params(GF_BitStream *bs) { /*loop_filter_level = */gf_bs_read_int_log(bs, 6, "loop_filter_level"); /*loop_filter_sharpness = */gf_bs_read_int_log(bs, 3, "loop_filter_sharpness"); Bool loop_filter_delta_enabled = gf_bs_read_int_log(bs, 1, "loop_filter_delta_enabled"); if (loop_filter_delta_enabled == 1) { Bool loop_filter_delta_update = gf_bs_read_int_log(bs, 1, "loop_filter_delta_update"); if (loop_filter_delta_update == GF_TRUE) { int i; for (i = 0; i < 4; i++) { Bool update_ref_delta = gf_bs_read_int_log_idx(bs, 1, "update_ref_delta", i); if (update_ref_delta == GF_TRUE) vp9_s(bs, 6, "loop_filter_ref_deltas", i); } for (i = 0; i < 2; i++) { Bool update_mode_delta = gf_bs_read_int_log_idx(bs, 1, "update_mode_delta", i); if (update_mode_delta == GF_TRUE) vp9_s(bs, 6, "loop_filter_mode_deltas", i); } } } } static void vp9_quantization_params(GF_BitStream *bs) { /*base_q_idx = */gf_bs_read_int_log(bs, 8, "base_q_idx"); } #define VP9_MAX_SEGMENTS 8 #define VP9_SEG_LVL_MAX 4 static const int segmentation_feature_bits[VP9_SEG_LVL_MAX] = { 8, 6, 2, 0 }; static const int segmentation_feature_signed[VP9_SEG_LVL_MAX] = { 1, 1, 0, 0 }; #define VP9_MIN_TILE_WIDTH_B64 4 #define VP9_MAX_TILE_WIDTH_B64 64 static void vp9_segmentation_params(GF_BitStream *bs) { Bool segmentation_enabled = gf_bs_read_int_log(bs, 1, "segmentation_enabled"); if (segmentation_enabled == 1) { int i; Bool segmentation_update_map = gf_bs_read_int_log(bs, 1, "segmentation_update_map"); if (segmentation_update_map) { for (i = 0; i < 7; i++) /*segmentation_tree_probs[i] = read_prob()*/ /*segmentation_temporal_update = */gf_bs_read_int_log(bs, 1, "segmentation_temporal_update"); /*for (i = 0; i < 3; i++) segmentation_pred_prob[i] = segmentation_temporal_update ? read_prob() : 255*/ } Bool segmentation_update_data = gf_bs_read_int_log(bs, 1, "segmentation_update_data"); if (segmentation_update_data == 1) { /*segmentation_abs_or_delta_update =*/ gf_bs_read_int_log(bs, 1, "segmentation_abs_or_delta_update"); for (i = 0; i < VP9_MAX_SEGMENTS; i++) { int j; for (j = 0; j < VP9_SEG_LVL_MAX; j++) { /*feature_value = 0*/ Bool feature_enabled = gf_bs_read_int_log(bs, 1, "feature_enabled"); /*FeatureEnabled[i][j] = feature_enabled*/ if (feature_enabled) { int bits_to_read = segmentation_feature_bits[j]; /*feature_value =*/ gf_bs_read_int_log(bs, bits_to_read, "feature_value"); if (segmentation_feature_signed[j] == 1) { /*Bool feature_sign = */gf_bs_read_int_log(bs, 1, "feature_sign"); /*if (feature_sign == 1) feature_value *= -1*/ } } /*FeatureData[i][j] = feature_value*/ } } } } } static int calc_min_log2_tile_cols(int Sb64Cols) { int minLog2 = 0; while ((VP9_MAX_TILE_WIDTH_B64 << minLog2) < Sb64Cols) minLog2++; return minLog2; } static int calc_max_log2_tile_cols(int Sb64Cols) { int maxLog2 = 1; while ((Sb64Cols >> maxLog2) >= VP9_MIN_TILE_WIDTH_B64) maxLog2++; return maxLog2 - 1; } static void vp9_tile_info(GF_BitStream *bs, int Sb64Cols) { Bool tile_rows_log2; int minLog2TileCols = calc_min_log2_tile_cols(Sb64Cols); int maxLog2TileCols = calc_max_log2_tile_cols(Sb64Cols); int tile_cols_log2 = minLog2TileCols; while (tile_cols_log2 < maxLog2TileCols) { Bool increment_tile_cols_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_cols_log2"); if (increment_tile_cols_log2) tile_cols_log2++; else break; } tile_rows_log2 = gf_bs_read_int_log(bs, 1, "tile_rows_log2"); if (tile_rows_log2) { /*Bool increment_tile_rows_log2 = */gf_bs_read_int_log(bs, 1, "increment_tile_rows_log2"); //tile_rows_log2 += increment_tile_rows_log2; } } static void vp9_frame_size_with_refs(GF_BitStream *bs, u8 refresh_frame_flags, u8 * ref_frame_idx, int * RefFrameWidth, int *RefFrameHeight, int *FrameWidth, int *FrameHeight, int *RenderWidth, int *RenderHeight, int *Sb64Cols, int *Sb64Rows) { Bool found_ref; int i; for (i = 0; i < 3; i++) { found_ref = gf_bs_read_int_log(bs, 1, "found_ref"); if (found_ref) { *FrameWidth = RefFrameWidth [ref_frame_idx[i]]; *FrameHeight = RefFrameHeight[ref_frame_idx[i]]; break; } } if (found_ref == 0) { vp9_frame_size(bs, FrameWidth, FrameHeight, Sb64Cols, Sb64Rows); } else { vp9_compute_image_size(*FrameWidth, *FrameHeight, Sb64Cols, Sb64Rows); } vp9_render_size(bs, *FrameWidth, *FrameHeight, RenderWidth, RenderHeight); } static void vp9_read_interpolation_filter(GF_BitStream *bs) { Bool is_filter_switchable = gf_bs_read_int_log(bs, 1, "is_filter_switchable"); if (!is_filter_switchable) { /*raw_interpolation_filter = */gf_bs_read_int_log(bs, 2, "raw_interpolation_filter"); } } #define VP9_KEY_FRAME 0 GF_Err gf_media_vp9_parse_sample(GF_BitStream *bs, GF_VPConfig *vp9_cfg, Bool *key_frame, u32 *FrameWidth, u32 *FrameHeight, u32 *renderWidth, u32 *renderHeight) { Bool FrameIsIntra = GF_FALSE, profile_low_bit, profile_high_bit, show_existing_frame = GF_FALSE, frame_type = GF_FALSE, show_frame = GF_FALSE, error_resilient_mode = GF_FALSE; /*u8 frame_context_idx = 0, reset_frame_context = 0, frame_marker = 0*/; int Sb64Cols = 0, Sb64Rows = 0, i; u8 refresh_frame_flags = 0; assert(bs && key_frame); /*uncompressed header*/ /*frame_marker = */gf_bs_read_int_log(bs, 2, "frame_marker"); profile_low_bit = gf_bs_read_int_log(bs, 1, "profile_low_bit"); profile_high_bit = gf_bs_read_int_log(bs, 1, "profile_high_bit"); vp9_cfg->profile = (profile_high_bit << 1) + profile_low_bit; if (vp9_cfg->profile == 3) { Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] uncompressed header reserved zero is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } show_existing_frame = gf_bs_read_int_log(bs, 1, "show_existing_frame"); if (show_existing_frame == GF_TRUE) { /*frame_to_show_map_idx = */gf_bs_read_int_log(bs, 3, "frame_to_show_map_idx"); return GF_OK; } frame_type = gf_bs_read_int_log(bs, 1, "frame_type"); show_frame = gf_bs_read_int_log(bs, 1, "show_frame"); error_resilient_mode = gf_bs_read_int_log(bs, 1, "error_resilient_mode"); if (frame_type == VP9_KEY_FRAME) { if (!vp9_frame_sync_code(bs)) return GF_NON_COMPLIANT_BITSTREAM; if (vp9_color_config(bs, vp9_cfg) != GF_OK) return GF_NON_COMPLIANT_BITSTREAM; vp9_frame_size(bs, FrameWidth, FrameHeight, &Sb64Cols, &Sb64Rows); vp9_render_size(bs, *FrameWidth, *FrameHeight, renderWidth, renderHeight); refresh_frame_flags = 0xFF; *key_frame = GF_TRUE; FrameIsIntra = GF_TRUE; } else { Bool intra_only = GF_FALSE; *key_frame = GF_FALSE; if (show_frame == GF_FALSE) { intra_only = gf_bs_read_int_log(bs, 1, "intra_only"); } FrameIsIntra = intra_only; if (error_resilient_mode == GF_FALSE) { /*reset_frame_context = */gf_bs_read_int_log(bs, 2, "reset_frame_context"); } if (intra_only == GF_TRUE) { if (!vp9_frame_sync_code(bs)) return GF_NON_COMPLIANT_BITSTREAM; if (vp9_cfg->profile > 0) { if (vp9_color_config(bs, vp9_cfg) != GF_OK) return GF_NON_COMPLIANT_BITSTREAM; } else { u8 color_space = CS_BT_601; vp9_cfg->colour_primaries = VP9_CS_to_23001_8_colour_primaries[color_space]; vp9_cfg->transfer_characteristics = VP9_CS_to_23001_8_transfer_characteristics[color_space]; vp9_cfg->matrix_coefficients = VP9_CS_to_23001_8_matrix_coefficients[color_space]; vp9_cfg->chroma_subsampling = 0; vp9_cfg->bit_depth = 8; } refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); vp9_frame_size(bs, FrameWidth, FrameHeight, &Sb64Cols, &Sb64Rows); vp9_render_size(bs, *FrameWidth, *FrameHeight, renderWidth, renderHeight); } else { refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); u8 ref_frame_idx[3]; for (i = 0; i < 3; i++) { ref_frame_idx[i] = gf_bs_read_int_log_idx(bs, 3, "ref_frame_idx", i); /*ref_frame_sign_bias[LAST_FRAME + i] = */gf_bs_read_int_log_idx(bs, 1, "ref_frame_sign_bias", i); } vp9_frame_size_with_refs(bs, refresh_frame_flags, ref_frame_idx, vp9_cfg->RefFrameWidth, vp9_cfg->RefFrameHeight, FrameWidth, FrameHeight, renderWidth, renderHeight, &Sb64Cols, &Sb64Rows); /*allow_high_precision_mv = */gf_bs_read_int_log(bs, 1, "allow_high_precision_mv"); vp9_read_interpolation_filter(bs); } } if (error_resilient_mode == 0) { /*refresh_frame_context = */gf_bs_read_int_log(bs, 1, "refresh_frame_context"); /*frame_parallel_decoding_mode = */gf_bs_read_int_log(bs, 1, "frame_parallel_decoding_mode"); } /*frame_context_idx = */gf_bs_read_int_log(bs, 2, "frame_context_idx"); if (FrameIsIntra || error_resilient_mode) { /*setup_past_independence + save_probs ...*/ //frame_context_idx = 0; } vp9_loop_filter_params(bs); vp9_quantization_params(bs); vp9_segmentation_params(bs); vp9_tile_info(bs, Sb64Cols); /*header_size_in_bytes = */gf_bs_read_int_log(bs, 16, "header_size_in_bytes"); /*Reference frame update process (8.10 - partial)*/ for (i = 0; i < VP9_NUM_REF_FRAMES; i++) { if ((refresh_frame_flags >> i) & 1) { vp9_cfg->RefFrameWidth[i] = *FrameWidth; vp9_cfg->RefFrameHeight[i] = *FrameHeight; } } return GF_OK; } GF_Err gf_av1_parse_obu_header(GF_BitStream *bs, ObuType *obu_type, Bool *obu_extension_flag, Bool *obu_has_size_field, u8 *temporal_id, u8 *spatial_id) { Bool forbidden = gf_bs_read_int(bs, 1); if (forbidden) { return GF_NON_COMPLIANT_BITSTREAM; } *obu_type = gf_bs_read_int(bs, 4); *obu_extension_flag = gf_bs_read_int(bs, 1); *obu_has_size_field = gf_bs_read_int(bs, 1); if (gf_bs_read_int(bs, 1) /*obu_reserved_1bit*/) { return GF_NON_COMPLIANT_BITSTREAM; } if (*obu_extension_flag) { *temporal_id = gf_bs_read_int(bs, 3); *spatial_id = gf_bs_read_int(bs, 2); /*extension_header_reserved_3bits = */gf_bs_read_int(bs, 3); } return GF_OK; } #endif // GPAC_DISABLE_AV_PARSERS GF_EXPORT const char *gf_av1_get_obu_name(ObuType obu_type) { switch (obu_type) { case OBU_SEQUENCE_HEADER: return "seq_header"; case OBU_TEMPORAL_DELIMITER: return "delimiter"; case OBU_FRAME_HEADER: return "frame_header"; case OBU_TILE_GROUP: return "tile_group"; case OBU_METADATA: return "metadata"; case OBU_FRAME: return "frame"; case OBU_REDUNDANT_FRAME_HEADER: return "redundant_frame_header"; case OBU_TILE_LIST: return "tile_list"; case OBU_PADDING: return "padding"; case OBU_RESERVED_0: case OBU_RESERVED_9: case OBU_RESERVED_10: case OBU_RESERVED_11: case OBU_RESERVED_12: case OBU_RESERVED_13: case OBU_RESERVED_14: return "reserved"; default: return "unknown"; } } Bool av1_is_obu_header(ObuType obu_type) { switch (obu_type) { case OBU_SEQUENCE_HEADER: case OBU_METADATA: // TODO add check based on the metadata type return GF_TRUE; default: return GF_FALSE; } } #ifndef GPAC_DISABLE_AV_PARSERS static Bool av1_is_obu_frame(AV1State *state, ObuType obu_type) { switch (obu_type) { case OBU_PADDING: case OBU_REDUNDANT_FRAME_HEADER: return GF_FALSE; case OBU_TEMPORAL_DELIMITER: return state->keep_temporal_delim ? GF_TRUE : GF_FALSE; default: return GF_TRUE; } } u64 gf_av1_leb128_read(GF_BitStream *bs, u8 *opt_Leb128Bytes) { u64 value = 0; u8 Leb128Bytes = 0, i = 0; for (i = 0; i < 8; i++) { u8 leb128_byte = gf_bs_read_u8(bs); value |= ( ((u64) (leb128_byte & 0x7f)) << (i * 7)); Leb128Bytes += 1; if (!(leb128_byte & 0x80)) { break; } } if (opt_Leb128Bytes) { *opt_Leb128Bytes = Leb128Bytes; } return value; } u32 gf_av1_leb128_size(u64 value) { u32 gf_av1_leb128_size = 0; do { ++gf_av1_leb128_size; } while ((value >>= 7) != 0); return gf_av1_leb128_size; } u64 gf_av1_leb128_write(GF_BitStream *bs, u64 value) { u32 i, leb_size = gf_av1_leb128_size(value); for (i = 0; i < leb_size; ++i) { u8 byte = value & 0x7f; value >>= 7; if (value != 0) byte |= 0x80; //more bytes follow gf_bs_write_u8(bs, byte); } return leb_size; } #define OBU_BLOCK_SIZE 4096 static void av1_add_obu_internal(GF_BitStream *bs, u64 pos, u64 obu_length, ObuType obu_type, GF_List **obu_list, AV1State *state) { char block[OBU_BLOCK_SIZE]; Bool has_size_field = 0, obu_extension_flag = 0; u8 temporal_id, spatial_id; GF_AV1_OBUArrayEntry *a = NULL; if (state && state->mem_mode) { if (!state->bs) state->bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(state->bs, state->frame_obus, state->frame_obus_alloc); } else { GF_SAFEALLOC(a, GF_AV1_OBUArrayEntry); if (!a) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] Failed to allocate OBU\n")); return; } } gf_bs_seek(bs, pos); gf_av1_parse_obu_header(bs, &obu_type, &obu_extension_flag, &has_size_field, &temporal_id, &spatial_id); gf_bs_seek(bs, pos); if (has_size_field) { if (a) { a->obu = gf_malloc((size_t)obu_length); gf_bs_read_data(bs, a->obu, (u32)obu_length); a->obu_length = obu_length; } else { u32 remain = (u32)obu_length; while (remain) { u32 block_size = OBU_BLOCK_SIZE; if (block_size > remain) block_size = remain; gf_bs_read_data(bs, block, block_size); gf_bs_write_data(state->bs, block, block_size); remain -= block_size; } return; } } else { u8 i, hdr_size = obu_extension_flag ? 2 : 1; const u32 leb_size = (u32)gf_av1_leb128_size(obu_length); const u64 obu_size = obu_length - hdr_size; if (a) { a->obu = gf_malloc((size_t)obu_length + leb_size); a->obu_length = obu_length + leb_size; for (i = 0; i < hdr_size; ++i) { a->obu[i] = gf_bs_read_u8(bs); /*add size field flag*/ if (i == 0) a->obu[0] |= 0x02; } { u32 out_size = 0; u8 *output = NULL; GF_BitStream *bsLeb128 = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*write size field*/ gf_av1_leb128_write(bsLeb128, obu_size); assert(gf_bs_get_position(bsLeb128) == leb_size); gf_bs_get_content(bsLeb128, &output, &out_size); gf_bs_del(bsLeb128); memcpy(a->obu + hdr_size, output, out_size); gf_free(output); } gf_bs_read_data(bs, a->obu + hdr_size + leb_size, (u32)(obu_size)); assert(gf_bs_get_position(bs) == pos + obu_length); } else { u32 remain; for (i = 0; i < hdr_size; ++i) { u8 hdr_b = gf_bs_read_u8(bs); if (i == 0) hdr_b |= 0x02; /*add size field flag*/ gf_bs_write_u8(state->bs, hdr_b); } /*add size field */ gf_av1_leb128_write(state->bs, obu_size); remain = (u32)obu_length - hdr_size; while (remain) { u32 block_size = OBU_BLOCK_SIZE; if (block_size > remain) block_size = remain; gf_bs_read_data(bs, block, block_size); gf_bs_write_data(state->bs, block, block_size); remain -= block_size; } assert(gf_bs_get_position(bs) == pos + obu_length); return; } } if (!obu_list) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] internal error, no OBU list cannot add\n")); gf_free(a->obu); gf_free(a); return; } a->obu_type = obu_type; if (! *obu_list) *obu_list = gf_list_new(); gf_list_add(*obu_list, a); } static void av1_populate_state_from_obu(GF_BitStream *bs, u64 pos, u64 obu_length, ObuType obu_type, AV1State *state) { if (av1_is_obu_header(obu_type)) { av1_add_obu_internal(bs, pos, obu_length, obu_type, &state->frame_state.header_obus, NULL); } if (!state->skip_frames && av1_is_obu_frame(state, obu_type)) { if (!state->mem_mode) { av1_add_obu_internal(bs, pos, obu_length, obu_type, &state->frame_state.frame_obus, NULL); } else { av1_add_obu_internal(bs, pos, obu_length, obu_type, NULL, state); } } } GF_Err aom_av1_parse_temporal_unit_from_section5(GF_BitStream *bs, AV1State *state) { if (!state) return GF_BAD_PARAM; state->obu_type = -1; while (state->obu_type != OBU_TEMPORAL_DELIMITER) { GF_Err e; if (!gf_bs_available(bs)) return state->unframed ? GF_BUFFER_TOO_SMALL : GF_OK; u64 pos = gf_bs_get_position(bs), obu_length = 0; e = gf_av1_parse_obu(bs, &state->obu_type, &obu_length, NULL, state); if (e) return e; if (obu_length != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] OBU (Section 5) frame size "LLU" different from consumed bytes "LLU".\n", obu_length, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Section5 OBU detected (size "LLU")\n", obu_length)); av1_populate_state_from_obu(bs, pos, obu_length, state->obu_type, state); } return GF_OK; } Bool gf_media_aom_probe_annexb(GF_BitStream *bs) { Bool res = GF_TRUE; u64 init_pos = gf_bs_get_position(bs); u64 sz = gf_av1_leb128_read(bs, NULL); if (!sz) res = GF_FALSE; while (sz > 0) { u8 Leb128Bytes = 0; u64 frame_unit_size = gf_av1_leb128_read(bs, &Leb128Bytes); if (!frame_unit_size) { res = GF_FALSE; break; } if (sz < Leb128Bytes + frame_unit_size) { res = GF_FALSE; break; } sz -= Leb128Bytes + frame_unit_size; while (frame_unit_size > 0) { ObuType obu_type; u64 pos, obu_length = gf_av1_leb128_read(bs, &Leb128Bytes); if (frame_unit_size < Leb128Bytes + obu_length) { res = GF_FALSE; break; } pos = gf_bs_get_position(bs); frame_unit_size -= Leb128Bytes; u8 tid, sid; Bool extflag, has_size; GF_Err e = gf_av1_parse_obu_header(bs, &obu_type, &extflag, &has_size, &tid, &sid); if (e) { res = GF_FALSE; break; } if (has_size) { obu_length = (u32)gf_av1_leb128_read(bs, NULL); } else { if (obu_length >= 1 + extflag) { obu_length = obu_length - 1 - extflag; } else { res = GF_FALSE; break; } } u32 hdr_size = (u32)(gf_bs_get_position(bs) - pos); obu_length += hdr_size; if (frame_unit_size < obu_length) { res = GF_FALSE; break; } frame_unit_size -= obu_length; gf_bs_skip_bytes(bs, obu_length - hdr_size); } if (!res) break; } gf_bs_seek(bs, init_pos); return res; } GF_Err aom_av1_parse_temporal_unit_from_annexb(GF_BitStream *bs, AV1State *state) { GF_Err e; u64 tupos; u64 tusize, sz; if (!bs || !state) return GF_BAD_PARAM; state->bs_overread = GF_FALSE; tusize = sz = gf_av1_leb128_read(bs, NULL); tupos = gf_bs_get_position(bs); if (!sz) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[AV1] temporal unit size is 0, likely not annex B\n")); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B temporal unit detected (size "LLU") ***** \n", sz)); while (sz > 0) { u8 Leb128Bytes = 0; u64 frame_unit_size = gf_av1_leb128_read(bs, &Leb128Bytes); if (state->bs_overread) { return GF_BUFFER_TOO_SMALL; } if (sz < Leb128Bytes + frame_unit_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B sz("LLU") < Leb128Bytes("LLU") + frame_unit_size("LLU")\n", sz, Leb128Bytes, frame_unit_size)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B frame unit detected (size "LLU")\n", frame_unit_size)); sz -= Leb128Bytes + frame_unit_size; while (frame_unit_size > 0) { u64 pos, obu_length = gf_av1_leb128_read(bs, &Leb128Bytes); if (state->bs_overread) { return GF_BUFFER_TOO_SMALL; } if (frame_unit_size < Leb128Bytes + obu_length) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B frame_unit_size("LLU") < Leb128Bytes("LLU") + obu_length("LLU")\n", frame_unit_size, Leb128Bytes, obu_length)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B OBU detected (size "LLU")\n", obu_length)); pos = gf_bs_get_position(bs); frame_unit_size -= Leb128Bytes; e = gf_av1_parse_obu(bs, &state->obu_type, &obu_length, NULL, state); if (e) return e; if (obu_length != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] Annex B frame size "LLU" different from consumed bytes "LLU".\n", obu_length, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } av1_populate_state_from_obu(bs, pos, obu_length, state->obu_type, state); if (frame_unit_size < obu_length) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B frame_unit_size("LLU") < OBU size ("LLU")\n", frame_unit_size, obu_length)); return GF_NON_COMPLIANT_BITSTREAM; } frame_unit_size -= obu_length; } } assert(sz == 0); if (tusize != gf_bs_get_position(bs) - tupos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] Annex B TU size "LLU" different from consumed bytes "LLU".\n", tusize, gf_bs_get_position(bs) - tupos)); return GF_NON_COMPLIANT_BITSTREAM; } return GF_OK; } GF_Err aom_av1_parse_temporal_unit_from_ivf(GF_BitStream *bs, AV1State *state) { u64 frame_size, pts_ignored; GF_Err e; if (gf_bs_available(bs)<12) return GF_EOS; e = gf_media_parse_ivf_frame_header(bs, &frame_size, &pts_ignored); if (e) return e; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] IVF frame detected (size "LLU")\n", frame_size)); if (gf_bs_available(bs) < frame_size) return GF_EOS; while (frame_size > 0) { u64 obu_size = 0, pos = gf_bs_get_position(bs); e = gf_av1_parse_obu(bs, &state->obu_type, &obu_size, NULL, state); if (e != GF_OK) return e; if (obu_size != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] IVF frame size "LLU" different from consumed bytes "LLU".\n", obu_size, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } av1_populate_state_from_obu(bs, pos, obu_size, state->obu_type, state); frame_size -= obu_size; } return GF_OK; } #define AV1_NUM_REF_FRAMES 8 #define AV1_ALL_FRAMES ((1 << AV1_NUM_REF_FRAMES) - 1) #define AV1_SUPERRES_DENOM_MIN 9 #define AV1_SUPERRES_DENOM_BITS 3 #define AV1_SUPERRES_NUM 8 #define AV1_REFS_PER_FRAME 7 #define AV1_PRIMARY_REF_NONE 7 #define MAX_TILE_WIDTH 4096 #define MAX_TILE_AREA (4096 * 2304) static u32 aom_av1_tile_log2(u32 blkSize, u32 target) { u32 k; for (k = 0; (blkSize << k) < target; k++) { } return k; } static u64 aom_av1_le(GF_BitStream *bs, u32 n, const char *name) { u32 i = 0; u64 t = 0; for (i = 0; i < n; i++) { u8 byte = gf_bs_read_int(bs, 8); t += (byte << (i * 8)); } gf_bs_log(bs, n*8, name, t); return t; } static void av1_parse_tile_info(GF_BitStream *bs, AV1State *state) { u32 i; u32 MiCols = 2 * ((state->width + 7) >> 3); u32 MiRows = 2 * ((state->height + 7) >> 3); u32 sbCols = state->use_128x128_superblock ? ((MiCols + 31) >> 5) : ((MiCols + 15) >> 4); u32 sbRows = state->use_128x128_superblock ? ((MiRows + 31) >> 5) : ((MiRows + 15) >> 4); u32 sbShift = state->use_128x128_superblock ? 5 : 4; u32 sbSize = sbShift + 2; u32 maxTileWidthSb = MAX_TILE_WIDTH >> sbSize; u32 maxTileAreaSb = MAX_TILE_AREA >> (2 * sbSize); u32 minLog2tileCols = aom_av1_tile_log2(maxTileWidthSb, sbCols); u32 maxLog2tileCols = aom_av1_tile_log2(1, MIN(sbCols, AV1_MAX_TILE_COLS)); u32 maxLog2tileRows = aom_av1_tile_log2(1, MIN(sbRows, AV1_MAX_TILE_ROWS)); u32 minLog2Tiles = MAX(minLog2tileCols, aom_av1_tile_log2(maxTileAreaSb, sbRows * sbCols)); Bool uniform_tile_spacing_flag = gf_bs_read_int_log(bs, 1, "uniform_tile_spacing_flag"); if (uniform_tile_spacing_flag) { u32 startSb, tileWidthSb, tileHeightSb, minLog2tileRows; state->tileColsLog2 = minLog2tileCols; while (state->tileColsLog2 < maxLog2tileCols) { Bool increment_tile_cols_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_cols_log2"); if (increment_tile_cols_log2 == 1) state->tileColsLog2++; else break; } tileWidthSb = (sbCols + (1 << state->tileColsLog2) - 1) >> state->tileColsLog2; i = 0; for (startSb = 0; startSb < sbCols; startSb += tileWidthSb) { i += 1; } state->tileCols = i; minLog2tileRows = MAX((int)(minLog2Tiles - state->tileColsLog2), 0); state->tileRowsLog2 = minLog2tileRows; while (state->tileRowsLog2 < maxLog2tileRows) { Bool increment_tile_rows_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_rows_log2"); if (increment_tile_rows_log2 == 1) state->tileRowsLog2++; else break; } tileHeightSb = (sbRows + (1 << state->tileRowsLog2) - 1) >> state->tileRowsLog2; i = 0; for (startSb = 0; startSb < sbRows; startSb += tileHeightSb) { i += 1; } state->tileRows = i; } else { u32 startSb, maxTileHeightSb, widestTileSb; widestTileSb = 0; startSb = 0; for (i = 0; startSb < sbCols; i++) { u32 maxWidth = MIN((int)(sbCols - startSb), maxTileWidthSb); u32 width_in_sbs_minus_1 = av1_read_ns(bs, maxWidth, "width_in_sbs_minus_1"); u32 sizeSb = width_in_sbs_minus_1 + 1; widestTileSb = MAX(sizeSb, widestTileSb); startSb += sizeSb; } if (!widestTileSb) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] widest tile is 0, broken bitstream\n")); return; } state->tileCols = i; state->tileColsLog2 = aom_av1_tile_log2(1, state->tileCols); if (minLog2Tiles > 0) maxTileAreaSb = (sbRows * sbCols) >> (minLog2Tiles + 1); else maxTileAreaSb = sbRows * sbCols; maxTileHeightSb = MAX(maxTileAreaSb / widestTileSb, 1); startSb = 0; for (i = 0; startSb < sbRows; i++) { u32 maxHeight = MIN((int)(sbRows - startSb), maxTileHeightSb); u32 height_in_sbs_minus_1 = av1_read_ns(bs, maxHeight, "height_in_sbs_minus_1"); u32 sizeSb = height_in_sbs_minus_1 + 1; startSb += sizeSb; } state->tileRows = i; state->tileRowsLog2 = aom_av1_tile_log2(1, state->tileRows); } if (state->tileColsLog2 > 0 || state->tileRowsLog2 > 0) { gf_bs_read_int_log(bs, state->tileRowsLog2 + state->tileColsLog2, "context_update_tile_id"); state->tile_size_bytes = gf_bs_read_int_log(bs, 2, "tile_size_bytes_minus1") + 1; } } static void superres_params(GF_BitStream *bs, AV1State *state) { u32 SuperresDenom; Bool use_superres; if (state->enable_superres) { use_superres = gf_bs_read_int_log(bs, 1, "use_superres"); } else { use_superres = GF_FALSE; } if (use_superres) { u8 coded_denom = gf_bs_read_int_log(bs, AV1_SUPERRES_DENOM_BITS, "coded_denom"); SuperresDenom = coded_denom + AV1_SUPERRES_DENOM_MIN; } else { SuperresDenom = AV1_SUPERRES_NUM; } state->UpscaledWidth = state->width; state->width = (state->UpscaledWidth * AV1_SUPERRES_NUM + (SuperresDenom / 2)) / SuperresDenom; } static void av1_frame_size(GF_BitStream *bs, AV1State *state, Bool frame_size_override_flag) { if (frame_size_override_flag) { u32 frame_width_minus_1, frame_height_minus_1; u8 n = state->frame_width_bits_minus_1 + 1; frame_width_minus_1 = gf_bs_read_int_log(bs, n, "frame_width_minus_1"); n = state->frame_height_bits_minus_1 + 1; frame_height_minus_1 = gf_bs_read_int_log(bs, n, "frame_height_minus_1"); state->width = frame_width_minus_1 + 1; state->height = frame_height_minus_1 + 1; } else { state->width = state->sequence_width; state->height = state->sequence_height; } superres_params(bs, state); //compute_image_size(); //no bits } static void av1_render_size(GF_BitStream *bs) { Bool render_and_frame_size_different = gf_bs_read_int_log(bs, 1, "render_and_frame_size_different_flag"); if (render_and_frame_size_different == GF_TRUE) { gf_bs_read_int_log(bs, 16, "render_width_minus_1"); gf_bs_read_int_log(bs, 16, "render_height_minus_1"); //RenderWidth = render_width_minus_1 + 1; //RenderHeight = render_height_minus_1 + 1; } else { //RenderWidth = UpscaledWidth; //RenderHeight = FrameHeight; } } static void read_interpolation_filter(GF_BitStream *bs) { Bool is_filter_switchable = gf_bs_read_int_log(bs, 1, "is_filter_switchable"); if (!is_filter_switchable) { /*interpolation_filter =*/ gf_bs_read_int_log(bs, 2, "interpolation_filter"); } } static void frame_size_with_refs(GF_BitStream *bs, AV1State *state, Bool frame_size_override_flag) { Bool found_ref = GF_FALSE; u32 i = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { found_ref = gf_bs_read_int_log_idx(bs, 1, "found_ref", i); if (found_ref == 1) { #if 0 UpscaledWidth = RefUpscaledWidth[ref_frame_idx[i]]; FrameWidth = UpscaledWidth; FrameHeight = RefFrameHeight[ref_frame_idx[i]]; RenderWidth = RefRenderWidth[ref_frame_idx[i]]; RenderHeight = RefRenderHeight[ref_frame_idx[i]]; #endif break; } } if (found_ref == 0) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); } else { superres_params(bs, state); //compute_image_size(); } } static s32 av1_delta_q(GF_BitStream *bs, const char *name_flag, const char *name) { Bool delta_coded = gf_bs_read_int_log(bs, 1, name_flag); s32 delta_q = 0; if (delta_coded) { u32 signMask = 1 << (7 - 1); delta_q = gf_bs_read_int_log(bs, 7, name); if (delta_q & signMask) delta_q = delta_q - 2 * signMask; } return delta_q; } static u8 Segmentation_Feature_Bits[] = { 8,6,6,6,6,3,0,0 }; static u8 Segmentation_Feature_Signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 }; static u8 av1_get_qindex(Bool ignoreDeltaQ, u32 segmentId, u32 base_q_idx, u32 delta_q_present, u32 CurrentQIndex, Bool segmentation_enabled, u8 *features_SEG_LVL_ALT_Q_enabled, s32 *features_SEG_LVL_ALT_Q) { //If seg_feature_active_idx( segmentId, SEG_LVL_ALT_Q ) is equal to 1 the following ordered steps apply: if (segmentation_enabled && features_SEG_LVL_ALT_Q_enabled[segmentId]) { //Set the variable data equal to FeatureData[ segmentId ][ SEG_LVL_ALT_Q ]. s32 data = features_SEG_LVL_ALT_Q[segmentId]; s32 qindex = base_q_idx + data; //If ignoreDeltaQ is equal to 0 and delta_q_present is equal to 1, set qindex equal to CurrentQIndex + data. if ((ignoreDeltaQ == 0) && (delta_q_present == 1)) qindex = CurrentQIndex + data; //Return Clip3( 0, 255, qindex ). if (qindex < 0) return 0; else if (qindex > 255) return 255; else return (u8)qindex; } //Otherwise, if ignoreDeltaQ is equal to 0 and delta_q_present is equal to 1, return CurrentQIndex. if ((ignoreDeltaQ == 0) && (delta_q_present == 1)) return CurrentQIndex; //otherwise return base_q_idx; } enum { AV1_RESTORE_NONE = 0, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ }; #define AV1_GMC_IDENTITY 0 #define AV1_GMC_TRANSLATION 1 #define AV1_GMC_ROTZOOM 2 #define AV1_GMC_AFFINE 3 #define AV1_LAST_FRAME 1 #define AV1_LAST2_FRAME 2 #define AV1_LAST3_FRAME 3 #define AV1_GOLDEN_FRAME 4 #define AV1_BWDREF_FRAME 5 #define AV1_ALTREF2_FRAME 6 #define AV1_ALTREF_FRAME 7 #define GM_ABS_ALPHA_BITS 12 #define GM_ALPHA_PREC_BITS 15 #define GM_ABS_TRANS_ONLY_BITS 9 #define GM_TRANS_ONLY_PREC_BITS 3 #define GM_ABS_TRANS_BITS 12 #define GM_TRANS_PREC_BITS 6 #define WARPEDMODEL_PREC_BITS 16 static u32 av1_decode_subexp(GF_BitStream *bs, s32 numSyms) { s32 i = 0; s32 mk = 0; s32 k = 3; while (1) { s32 b2 = i ? k + i - 1 : k; s32 a = 1 << b2; if (numSyms <= mk + 3 * a) { s32 subexp_final_bits = av1_read_ns(bs, numSyms - mk, NULL); return subexp_final_bits + mk; } else { s32 subexp_more_bits = gf_bs_read_int(bs, 1); if (subexp_more_bits) { i++; mk += a; } else { s32 subexp_bits = gf_bs_read_int(bs, b2); return subexp_bits + mk; } } } } static GFINLINE s32 inverse_recenter(s32 r, u32 v) { if ((s64)v > (s64)(2 * r)) return v; else if (v & 1) return r - ((v + 1) >> 1); else return r + (v >> 1); } static s32 av1_decode_unsigned_subexp_with_ref(GF_BitStream *bs, s32 mx, s32 r) { u32 v = av1_decode_subexp(bs, mx); if ((r < 0) && (-(-r << 1) <= mx)) { return inverse_recenter(r, v); } else if ((r << 1) <= mx) { return inverse_recenter(r, v); } else { return mx - 1 - inverse_recenter(mx - 1 - r, v); } } static s16 av1_decode_signed_subexp_with_ref(GF_BitStream *bs, s32 low, s32 high, s32 r) { s16 x = av1_decode_unsigned_subexp_with_ref(bs, high - low, r - low); return x + low; } static void av1_read_global_param(AV1State *state, GF_BitStream *bs, u8 type, u8 ref, u8 idx) { u8 absBits = GM_ABS_ALPHA_BITS; u8 precBits = GM_ALPHA_PREC_BITS; if (idx < 2) { if (type == AV1_GMC_TRANSLATION) { absBits = GM_ABS_TRANS_ONLY_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0); precBits = GM_TRANS_ONLY_PREC_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0); } else { absBits = GM_ABS_TRANS_BITS; precBits = GM_TRANS_PREC_BITS; } } s32 precDiff = WARPEDMODEL_PREC_BITS - precBits; s32 round = (idx % 3) == 2 ? (1 << WARPEDMODEL_PREC_BITS) : 0; s32 sub = (idx % 3) == 2 ? (1 << precBits) : 0; s32 mx = (1 << absBits); s32 r = (state->PrevGmParams.coefs[ref][idx] >> precDiff) - sub; s32 val = av1_decode_signed_subexp_with_ref(bs, -mx, mx + 1, r); if (val < 0) { val = -val; state->GmParams.coefs[ref][idx] = (-(val << precDiff) + round); } else { state->GmParams.coefs[ref][idx] = (val << precDiff) + round; } } static s32 av1_get_relative_dist(s32 a, s32 b, AV1State *state) { if (!state->enable_order_hint) return 0; s32 diff = a - b; s32 m = 1 << (state->OrderHintBits - 1); diff = (diff & (m - 1)) - (diff & m); return diff; } static void av1_setup_past_independence(AV1State *state) { u32 ref, i; for (ref = AV1_LAST_FRAME; ref <= AV1_ALTREF_FRAME; ref++) { for (i = 0; i <= 5; i++) { state->PrevGmParams.coefs[ref][i] = ((i % 3 == 2) ? 1 << WARPEDMODEL_PREC_BITS : 0); } } } static void av1_load_previous(AV1State *state, u8 primary_ref_frame, s8 *ref_frame_idx) { s8 prevFrame = ref_frame_idx[primary_ref_frame]; if (prevFrame < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] load_previous: prevFrame reference index %d is invalid\n", prevFrame)); } else { state->PrevGmParams = state->SavedGmParams[prevFrame]; // load_loop_filter_params( prevFrame ) // load_segmentation_params( prevFrame ) } } static void av1_decode_frame_wrapup(AV1State *state) { u32 i; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { if ((state->frame_state.refresh_frame_flags >> i) & 1) { state->RefOrderHint[i] = state->frame_state.order_hint; state->SavedGmParams[i] = state->GmParams; state->RefFrameType[i] = state->frame_state.frame_type; } } state->frame_state.seen_frame_header = GF_FALSE; //Otherwise (show_existing_frame is equal to 1), if frame_type is equal to KEY_FRAME, the reference frame loading process as specified in section 7.21 is invoked if ((state->frame_state.show_existing_frame) && (state->frame_state.frame_type == AV1_KEY_FRAME)) { state->frame_state.order_hint = state->RefOrderHint[state->frame_state.frame_to_show_map_idx]; //OrderHints[ j + LAST_FRAME ] is set equal to SavedOrderHints[state->frame_to_show_map_idx ][ j + LAST_FRAME ] for j = 0..REFS_PER_FRAME-1. //gm_params[ ref ][ j ] is set equal to SavedGmParams[ frame_to_show_map_idx ][ ref ][ j ] for ref = LAST_FRAME..ALTREF_FRAME, for j = 0..5. state->GmParams = state->SavedGmParams[state->frame_state.frame_to_show_map_idx]; } } static s32 find_latest_forward(u32 curFrameHint, u8 *shiftedOrderHints, u8 *usedFrame) { u32 i; s32 ref = -1; s32 latestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint < curFrameHint) && (ref < 0 || hint >= latestOrderHint)) { ref = i; latestOrderHint = hint; } } return ref; } //see 7.8 of AV1 spec static void av1_set_frame_refs(AV1State *state, u8 last_frame_idx, u8 gold_frame_idx, s8 *ref_frame_idx) { u32 i; u8 usedFrame[AV1_NUM_REF_FRAMES]; u8 shiftedOrderHints[AV1_NUM_REF_FRAMES]; for (i = 0; i < AV1_REFS_PER_FRAME; i++) ref_frame_idx[i] = -1; ref_frame_idx[AV1_LAST_FRAME - AV1_LAST_FRAME] = last_frame_idx; ref_frame_idx[AV1_GOLDEN_FRAME - AV1_LAST_FRAME] = gold_frame_idx; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { usedFrame[i] = 0; } usedFrame[last_frame_idx] = 1; usedFrame[gold_frame_idx] = 1; u32 curFrameHint = 1 << (state->OrderHintBits - 1); for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { shiftedOrderHints[i] = curFrameHint + av1_get_relative_dist(state->RefOrderHint[i], state->frame_state.order_hint, state); } u8 lastOrderHint = shiftedOrderHints[last_frame_idx]; u8 goldOrderHint = shiftedOrderHints[gold_frame_idx]; //It is a requirement of bitstream conformance that lastOrderHint is strictly less than curFrameHint. if (lastOrderHint >= curFrameHint) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] non conformant bitstream detected while setting up frame refs: lastOrderHint(%d) shall be stricly less than curFrameHint(%d)\n", lastOrderHint, curFrameHint)); } //It is a requirement of bitstream conformance that goldOrderHint is strictly less than curFrameHint. if (goldOrderHint >= curFrameHint) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] non conformant bitstream detected while setting up frame refs: goldOrderHint(%d) shall be stricly less than curFrameHint(%d)\n", lastOrderHint, curFrameHint)); } //find_latest_backward() { s32 ref = -1; s32 latestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint >= latestOrderHint)) { ref = i; latestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_ALTREF_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //find_earliest_backward() for BWDREF_FRAME ref = -1; s32 earliestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint < earliestOrderHint)) { ref = i; earliestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_BWDREF_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //find_earliest_backward() for ALTREF2_FRAME ref = -1; earliestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint < earliestOrderHint)) { ref = i; earliestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_ALTREF2_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //The remaining references are set to be forward references in anti-chronological order as follows: const u8 Ref_Frame_List[AV1_REFS_PER_FRAME - 2] = { AV1_LAST2_FRAME, AV1_LAST3_FRAME, AV1_BWDREF_FRAME, AV1_ALTREF2_FRAME, AV1_ALTREF_FRAME }; for (i = 0; i < AV1_REFS_PER_FRAME - 2; i++) { u8 refFrame = Ref_Frame_List[i]; if (ref_frame_idx[refFrame - AV1_LAST_FRAME] < 0) { s32 last_ref = find_latest_forward(curFrameHint, shiftedOrderHints, usedFrame); if (last_ref >= 0) { ref_frame_idx[refFrame - AV1_LAST_FRAME] = last_ref; usedFrame[last_ref] = 1; } } } //Finally, any remaining references are set to the reference frame with smallest output order as follows: ref = -1; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (ref < 0 || hint < earliestOrderHint) { ref = i; earliestOrderHint = hint; } } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { if (ref_frame_idx[i] < 0) { ref_frame_idx[i] = ref; } } } static void av1_parse_uncompressed_header(GF_BitStream *bs, AV1State *state) { Bool error_resilient_mode = GF_FALSE, allow_screen_content_tools = GF_FALSE, force_integer_mv = GF_FALSE; Bool /*use_ref_frame_mvs = GF_FALSE,*/ FrameIsIntra = GF_FALSE, frame_size_override_flag = GF_FALSE; Bool disable_cdf_update = GF_FALSE; u8 showable_frame; u8 primary_ref_frame; u16 idLen = 0; u32 idx; s8 ref_frame_idx[AV1_REFS_PER_FRAME]; AV1StateFrame *frame_state = &state->frame_state; if (state->frame_id_numbers_present_flag) { idLen = (state->additional_frame_id_length_minus_1 + state->delta_frame_id_length_minus_2 + 3); } frame_state->refresh_frame_flags = 0; showable_frame = 0; if (state->reduced_still_picture_header) { frame_state->key_frame = GF_TRUE; FrameIsIntra = GF_TRUE; frame_state->frame_type = AV1_KEY_FRAME; frame_state->show_frame = GF_TRUE; frame_state->show_existing_frame = 0; } else { frame_state->show_existing_frame = gf_bs_read_int_log(bs, 1, "show_existing_frame"); if (frame_state->show_existing_frame == GF_TRUE) { frame_state->frame_to_show_map_idx = gf_bs_read_int_log(bs, 3, "frame_to_show_map_idx"); frame_state->frame_type = state->RefFrameType[frame_state->frame_to_show_map_idx]; if (state->decoder_model_info_present_flag && !state->equal_picture_interval) { gf_bs_read_int_log(bs, state->frame_presentation_time_length, "frame_presentation_time"); } frame_state->refresh_frame_flags = 0; if (state->frame_id_numbers_present_flag) { gf_bs_read_int_log(bs, idLen, "display_frame_id"); } if (frame_state->frame_type == AV1_KEY_FRAME) { frame_state->refresh_frame_flags = AV1_ALL_FRAMES; } /* if (film_grain_params_present) { load_grain_params(frame_to_show_map_idx) }*/ return; } frame_state->frame_type = gf_bs_read_int_log(bs, 2, "frame_type"); FrameIsIntra = (frame_state->frame_type == AV1_INTRA_ONLY_FRAME || frame_state->frame_type == AV1_KEY_FRAME); frame_state->show_frame = gf_bs_read_int_log(bs, 1, "show_frame"); if (frame_state->is_first_frame) { frame_state->key_frame = frame_state->seen_seq_header && frame_state->show_frame && frame_state->frame_type == AV1_KEY_FRAME && frame_state->seen_frame_header; } if (frame_state->show_frame && state->decoder_model_info_present_flag && !state->equal_picture_interval) { gf_bs_read_int_log(bs, state->frame_presentation_time_length, "frame_presentation_time"); } if (frame_state->show_frame) { showable_frame = frame_state->frame_type != AV1_KEY_FRAME; } else { showable_frame = gf_bs_read_int_log(bs, 1, "showable_frame"); } if (frame_state->frame_type == AV1_SWITCH_FRAME || (frame_state->frame_type == AV1_KEY_FRAME && frame_state->show_frame)) error_resilient_mode = GF_TRUE; else error_resilient_mode = gf_bs_read_int_log(bs, 1, "error_resilient_mode"); } if ((frame_state->frame_type == AV1_KEY_FRAME) && frame_state->show_frame) { u32 i; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { state->RefValid[i] = 0; state->RefOrderHint[i] = 0; } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { state->OrderHints[AV1_LAST_FRAME + i] = 0; } } disable_cdf_update = gf_bs_read_int_log(bs, 1, "disable_cdf_update"); if (state->seq_force_screen_content_tools == 2/*SELECT_SCREEN_CONTENT_TOOLS*/) { allow_screen_content_tools = gf_bs_read_int_log(bs, 1, "allow_screen_content_tools"); } else { allow_screen_content_tools = state->seq_force_screen_content_tools; } if (allow_screen_content_tools) { if (state->seq_force_integer_mv == 2/*SELECT_INTEGER_MV*/) { force_integer_mv = gf_bs_read_int_log(bs, 1, "force_integer_mv"); } else { force_integer_mv = state->seq_force_integer_mv; } } else { force_integer_mv = 0; } if (FrameIsIntra) { force_integer_mv = 1; } if (state->frame_id_numbers_present_flag) { gf_bs_read_int_log(bs, idLen, "current_frame_id"); } if (frame_state->frame_type == AV1_SWITCH_FRAME) frame_size_override_flag = GF_TRUE; else if (state->reduced_still_picture_header) frame_size_override_flag = GF_FALSE; else frame_size_override_flag = gf_bs_read_int_log(bs, 1, "frame_size_override_flag"); frame_state->order_hint = gf_bs_read_int(bs, state->OrderHintBits); if (FrameIsIntra || error_resilient_mode) { primary_ref_frame = AV1_PRIMARY_REF_NONE; } else { primary_ref_frame = gf_bs_read_int_log(bs, 3, "primary_ref_frame"); } if (state->decoder_model_info_present_flag) { u8 buffer_removal_time_present_flag = gf_bs_read_int_log(bs, 1, "buffer_removal_time_present_flag"); if (buffer_removal_time_present_flag) { u32 opNum; for (opNum = 0; opNum < state->operating_points_count; opNum++) { if (state->decoder_model_present_for_this_op[opNum]) { u8 opPtIdc = state->operating_point_idc[opNum]; u8 inTemporalLayer = (opPtIdc >> state->temporal_id) & 1; u8 inSpatialLayer = (opPtIdc >> (state->spatial_id + 8)) & 1; if (opPtIdc == 0 || (inTemporalLayer && inSpatialLayer)) { gf_bs_read_int_log_idx(bs, state->buffer_removal_time_length, "buffer_removal_time", opNum); } } } } } if (frame_state->frame_type == AV1_SWITCH_FRAME || (frame_state->frame_type == AV1_KEY_FRAME && frame_state->show_frame)) { frame_state->refresh_frame_flags = AV1_ALL_FRAMES; } else { frame_state->refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); } if (!FrameIsIntra || frame_state->refresh_frame_flags != AV1_ALL_FRAMES) { if (error_resilient_mode && state->enable_order_hint) { u32 i = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { u8 ref_order_hint = gf_bs_read_int_log_idx(bs, state->OrderHintBits, "ref_order_hint", i); if (ref_order_hint != state->RefOrderHint[i]) { state->RefValid[i] = 0; } state->RefOrderHint[i] = ref_order_hint; } } } u8 allow_intrabc = 0; if (frame_state->frame_type == AV1_KEY_FRAME) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); if (allow_screen_content_tools && state->UpscaledWidth == state->width) { allow_intrabc = gf_bs_read_int_log(bs, 1, "allow_intrabc"); } } else { if (frame_state->frame_type == AV1_INTRA_ONLY_FRAME) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); if (allow_screen_content_tools && state->UpscaledWidth == state->width) { allow_intrabc = gf_bs_read_int_log(bs, 1, "allow_intrabc"); } } else { u32 i = 0; Bool frame_refs_short_signaling = GF_FALSE; if (state->enable_order_hint) { frame_refs_short_signaling = gf_bs_read_int_log(bs, 1, "frame_refs_short_signaling"); if (frame_refs_short_signaling) { u8 last_frame_idx = gf_bs_read_int_log(bs, 3, "last_frame_idx"); u8 gold_frame_idx = gf_bs_read_int_log(bs, 3, "gold_frame_idx"); av1_set_frame_refs(state, last_frame_idx, gold_frame_idx, ref_frame_idx); } } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { if (!frame_refs_short_signaling) ref_frame_idx[i] = gf_bs_read_int_log_idx(bs, 3, "ref_frame_idx", i); if (state->frame_id_numbers_present_flag) { u32 n = state->delta_frame_id_length_minus_2 + 2; /*delta_frame_id_minus_1 =*/ gf_bs_read_int_log_idx(bs, n, "delta_frame_id_minus1", i); //DeltaFrameId = delta_frame_id_minus_1 + 1; //expectedFrameId[i] = ((current_frame_id + (1 << idLen) - DeltaFrameId) % (1 << idLen)); } } if (frame_size_override_flag && !error_resilient_mode) { frame_size_with_refs(bs, state, frame_size_override_flag); } else { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); } frame_state->allow_high_precision_mv = 0; if (!force_integer_mv) { frame_state->allow_high_precision_mv = gf_bs_read_int_log(bs, 1, "allow_high_precision_mv"); } read_interpolation_filter(bs); gf_bs_read_int_log(bs, 1, "is_motion_mode_switchable"); if (!(error_resilient_mode || !state->enable_ref_frame_mvs)) { gf_bs_read_int_log(bs, 1, "use_ref_frame_mvs"); } } } if (!FrameIsIntra) { u32 i; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refFrame = AV1_LAST_FRAME + i; u8 ridx = ref_frame_idx[i]; if (ridx >= 0) { u8 hint = state->RefOrderHint[ridx]; state->OrderHints[refFrame] = hint; /* if ( !enable_order_hint ) { RefFrameSignBias[ refFrame ] = 0; } else { RefFrameSignBias[ refFrame ] = get_relative_dist( hint, OrderHint) > 0; } */ } } } if (!(state->reduced_still_picture_header || disable_cdf_update)) gf_bs_read_int_log(bs, 1, "disable_frame_end_update_cdf"); if (primary_ref_frame == AV1_PRIMARY_REF_NONE) { //init_non_coeff_cdfs(); av1_setup_past_independence(state); } else { //load_cdfs(ref_frame_idx[primary_ref_frame]); av1_load_previous(state, primary_ref_frame, ref_frame_idx); } av1_parse_tile_info(bs, state); //quantization_params( ): u8 base_q_idx = gf_bs_read_int(bs, 8); s32 DeltaQUDc = 0; s32 DeltaQUAc = 0; s32 DeltaQVDc = 0; s32 DeltaQVAc = 0; s32 DeltaQYDc = av1_delta_q(bs, "DeltaQYDc_coded", "DeltaQYDc"); if (!state->config->monochrome) { u8 diff_uv_delta = 0; if (state->separate_uv_delta_q) diff_uv_delta = gf_bs_read_int(bs, 1); DeltaQUDc = av1_delta_q(bs, "DeltaQUDc_coded", "DeltaQUDc"); DeltaQUAc = av1_delta_q(bs, "DeltaQUAc_coded", "DeltaQUAc"); if (diff_uv_delta) { DeltaQVDc = av1_delta_q(bs, "DeltaQVDc_coded", "DeltaQVDc"); DeltaQVAc = av1_delta_q(bs, "DeltaQVAc_coded", "DeltaQVAc"); } } if (gf_bs_read_int_log(bs, 1, "using_qmatrix")) { gf_bs_read_int_log(bs, 4, "qm_y"); gf_bs_read_int_log(bs, 4, "qm_u"); if (!state->separate_uv_delta_q) { gf_bs_read_int_log(bs, 4, "qm_v"); } } u8 seg_features_SEG_LVL_ALT_Q_enabled[8] = { 0,0,0,0,0,0,0,0 }; s32 seg_features_SEG_LVL_ALT_Q[8] = { 0,0,0,0,0,0,0,0 }; //segmentation_params( ): u8 segmentation_enabled = gf_bs_read_int_log(bs, 1, "segmentation_enabled"); if (segmentation_enabled) { /*u8 segmentation_temporal_update = 0;*/ u8 segmentation_update_data = 1; if (primary_ref_frame != AV1_PRIMARY_REF_NONE) { u8 segmentation_update_map = gf_bs_read_int_log(bs, 1, "segmentation_update_map"); if (segmentation_update_map == 1) gf_bs_read_int_log(bs, 1, "segmentation_temporal_update"); segmentation_update_data = gf_bs_read_int_log(bs, 1, "segmentation_update_data"); } if (segmentation_update_data == 1) { u32 i, j; for (i = 0; i < 8/*=MAX_SEGMENTS*/; i++) { for (j = 0; j < 8 /*=SEG_LVL_MAX*/; j++) { if (/*feature_enabled = */gf_bs_read_int_log_idx2(bs, 1, "feature_enabled", i, j) == 1) { s32 val; u32 bitsToRead = Segmentation_Feature_Bits[j]; //this is SEG_LVL_ALT_Q if (!j) seg_features_SEG_LVL_ALT_Q_enabled[i] = 1; if (Segmentation_Feature_Signed[j] == 1) { val = gf_bs_read_int_log_idx2(bs, 1 + bitsToRead, "signed_feature_value", i, j); } else { val = gf_bs_read_int_log_idx2(bs, bitsToRead, "feature_value", i, j); } if (!j) seg_features_SEG_LVL_ALT_Q[i] = val; } } } //ignore all init steps } } //delta_q_params(): /*u8 delta_q_res = 0;*/ u8 delta_q_present = 0; if (base_q_idx > 0) { delta_q_present = gf_bs_read_int_log(bs, 1, "delta_q_present"); } if (delta_q_present) { gf_bs_read_int_log(bs, 2, "delta_q_res"); } //delta_lf_params(): u8 delta_lf_present = 0; /*u8 delta_lf_res = 0; u8 delta_lf_multi = 0;*/ if (delta_q_present) { if (!allow_intrabc) { delta_lf_present = gf_bs_read_int_log(bs, 1, "delta_lf_present"); } if (delta_lf_present) { gf_bs_read_int_log(bs, 2, "delta_lf_res"); gf_bs_read_int_log(bs, 1, "delta_lf_multi"); } } //init lossless stuff! u8 CodedLossless = 1; for (idx = 0; idx < 8; idx++) { u8 qindex = av1_get_qindex(GF_TRUE, idx, base_q_idx, delta_q_present, 0/*CurrentQIndex always ignored at this level of parsin*/, segmentation_enabled, seg_features_SEG_LVL_ALT_Q_enabled, seg_features_SEG_LVL_ALT_Q); Bool LosslessArray = (qindex == 0) && (DeltaQYDc == 0) && (DeltaQUAc == 0) && (DeltaQUDc == 0) && (DeltaQVAc == 0) && (DeltaQVDc == 0); if (!LosslessArray) CodedLossless = 0; } Bool AllLossless = CodedLossless && (state->width == state->UpscaledWidth); //loop_filter_params(): if (!CodedLossless && !allow_intrabc) { u8 loop_filter_level_0 = gf_bs_read_int_log(bs, 6, "loop_filter_level_0"); u8 loop_filter_level_1 = gf_bs_read_int_log(bs, 6, "loop_filter_level_1"); if (!state->config->monochrome) { if (loop_filter_level_0 || loop_filter_level_1) { gf_bs_read_int_log(bs, 6, "loop_filter_level_2"); gf_bs_read_int_log(bs, 6, "loop_filter_level_3"); } } gf_bs_read_int_log(bs, 3, "loop_filter_sharpness"); u8 loop_filter_delta_enabled = gf_bs_read_int_log(bs, 1, "loop_filter_delta_enabled"); if (loop_filter_delta_enabled == 1) { u8 loop_filter_delta_update = gf_bs_read_int_log(bs, 1, "loop_filter_delta_update"); if (loop_filter_delta_update) { u32 i; for (i = 0; i < 8/*TOTAL_REFS_PER_FRAME*/; i++) { u8 update_ref_delta = gf_bs_read_int_log_idx(bs, 1, "update_ref_delta", i); if (update_ref_delta == 1) { gf_bs_read_int_log_idx(bs, 1 + 6, "loop_filter_ref_deltas", i); } } for (i = 0; i < 2; i++) { u8 update_mode_delta = gf_bs_read_int_log_idx(bs, 1, "update_mode_delta", i); if (update_mode_delta) { gf_bs_read_int_log_idx(bs, 1 + 6, "loop_filter_mode_deltas", i); } } } } } //cdef_params( ): if (!CodedLossless && !allow_intrabc && state->enable_cdef) { gf_bs_read_int_log(bs, 2, "cdef_damping_minus_3"); u8 cdef_bits = gf_bs_read_int_log(bs, 2, "cdef_bits"); u32 i, num_cd = 1 << cdef_bits; for (i = 0; i < num_cd; i++) { gf_bs_read_int_log_idx(bs, 4, "cdef_y_pri_strength", i); gf_bs_read_int_log_idx(bs, 2, "cdef_y_sec_strength", i); if (!state->config->monochrome) { gf_bs_read_int_log_idx(bs, 4, "cdef_uv_pri_strength", i); gf_bs_read_int_log_idx(bs, 2, "cdef_uv_sec_strength", i); } } } //lr_params( ) : if (!AllLossless && !allow_intrabc && state->enable_restoration) { u32 i, nb_planes = state->config->monochrome ? 1 : 3; u8 UsesLr = 0; u8 usesChromaLr = 0; for (i = 0; i < nb_planes; i++) { u8 lr_type = gf_bs_read_int_log_idx(bs, 2, "lr_type", i); //FrameRestorationType[i] = Remap_Lr_Type[lr_type] if (lr_type != AV1_RESTORE_NONE) { UsesLr = 1; if (i > 0) { usesChromaLr = 1; } } } if (UsesLr) { if (state->use_128x128_superblock) { gf_bs_read_int_log(bs, 1, "lr_unit_shift_minus_1"); } else { u8 lr_unit_shift = gf_bs_read_int_log(bs, 1, "lr_unit_shift"); if (lr_unit_shift) { gf_bs_read_int_log(bs, 1, "lr_unit_extra_shift"); //lr_unit_shift += lr_unit_extra_shift; } } if (state->config->chroma_subsampling_x && state->config->chroma_subsampling_y && usesChromaLr) { gf_bs_read_int_log(bs, 1, "lr_uv_shift"); } } } //read_tx_mode(): if (CodedLossless == 1) { } else { gf_bs_read_int_log(bs, 1, "tx_mode_select"); } //frame_reference_mode( ): u8 reference_select = 0; if (FrameIsIntra) { } else { reference_select = gf_bs_read_int_log(bs, 1, "reference_select"); } //skip_mode_params( ): u8 skipModeAllowed = 0; if (FrameIsIntra || !reference_select || !state->enable_order_hint) { } else { u32 i; s32 forwardIdx = -1; s32 backwardIdx = -1; s32 forwardHint = 0; s32 backwardHint = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refHint = state->RefOrderHint[ref_frame_idx[i]]; if (av1_get_relative_dist(refHint, frame_state->order_hint, state) < 0) { if (forwardIdx < 0 || av1_get_relative_dist(refHint, forwardHint, state) > 0) { forwardIdx = i; forwardHint = refHint; } } else if (av1_get_relative_dist(refHint, frame_state->order_hint, state) > 0) { if (backwardIdx < 0 || av1_get_relative_dist(refHint, backwardHint, state) < 0) { backwardIdx = i; backwardHint = refHint; } } } if (forwardIdx < 0) { skipModeAllowed = 0; } else if (backwardIdx >= 0) { skipModeAllowed = 1; //SkipModeFrame[0] = AV1_LAST_FRAME + MIN(forwardIdx, backwardIdx); //SkipModeFrame[1] = AV1_LAST_FRAME + MAX(forwardIdx, backwardIdx); } else { s32 secondForwardIdx = -1; s32 secondForwardHint = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refHint = state->RefOrderHint[ref_frame_idx[i]]; if (av1_get_relative_dist(refHint, forwardHint, state) < 0) { if (secondForwardIdx < 0 || av1_get_relative_dist(refHint, secondForwardHint, state) > 0) { secondForwardIdx = i; secondForwardHint = refHint; } } } if (secondForwardIdx < 0) { skipModeAllowed = 0; } else { skipModeAllowed = 1; //SkipModeFrame[ 0 ] = LAST_FRAME + Min(forwardIdx, secondForwardIdx) //SkipModeFrame[ 1 ] = LAST_FRAME + Max(forwardIdx, secondForwardIdx) } } } if (skipModeAllowed) { gf_bs_read_int_log(bs, 1, "skip_mode_present"); } if (FrameIsIntra || error_resilient_mode || !state->enable_warped_motion) { } else { gf_bs_read_int_log(bs, 1, "allow_warped_motion"); } gf_bs_read_int_log(bs, 1, "reduced_tx"); //global_motion_params( ) u32 ref; for (ref = AV1_LAST_FRAME; ref <= AV1_ALTREF_FRAME; ref++) { u32 i; for (i = 0; i < 6; i++) { state->GmParams.coefs[ref][i] = ((i % 3 == 2) ? 1 << WARPEDMODEL_PREC_BITS : 0); } } if (!FrameIsIntra) { u32 refs; for (refs = AV1_LAST_FRAME; refs <= AV1_ALTREF_FRAME; refs++) { u8 type = AV1_GMC_IDENTITY; Bool is_global = gf_bs_read_int_log_idx(bs, 1, "is_global", refs); if (is_global) { Bool is_rot_zoom = gf_bs_read_int_log_idx(bs, 1, "is_rot_zoom", refs); if (is_rot_zoom) { type = AV1_GMC_ROTZOOM; } else { Bool is_trans = gf_bs_read_int_log_idx(bs, 1, "is_translation", refs); type = is_trans ? AV1_GMC_TRANSLATION : AV1_GMC_AFFINE; } } if (type >= AV1_GMC_ROTZOOM) { av1_read_global_param(state, bs, type, refs, 2); av1_read_global_param(state, bs, type, refs, 3); if (type == AV1_GMC_AFFINE) { av1_read_global_param(state, bs, type, refs, 4); av1_read_global_param(state, bs, type, refs, 5); } else { state->GmParams.coefs[refs][4] = -state->GmParams.coefs[refs][3]; state->GmParams.coefs[refs][5] = state->GmParams.coefs[refs][2]; } } if (type >= AV1_GMC_TRANSLATION) { av1_read_global_param(state, bs, type, refs, 0); av1_read_global_param(state, bs, type, refs, 1); } } } //film_grain_params() if (!state->film_grain_params_present || (!state->frame_state.show_frame && !showable_frame)) { } else { u8 apply_grain = gf_bs_read_int_log(bs, 1, "apply_grain"); if (apply_grain) { gf_bs_read_int_log(bs, 16, "grain_seed"); u8 update_grain = 1; if (state->frame_state.frame_type == AV1_INTER_FRAME) { update_grain = gf_bs_read_int_log(bs, 1, "update_grain"); } if (!update_grain) { gf_bs_read_int_log(bs, 3, "film_grain_params_ref_idx"); } else { u32 i, num_y_points = gf_bs_read_int_log(bs, 4, "num_y_points"); for (i = 0; i < num_y_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_y_value", i); gf_bs_read_int_log_idx(bs, 8, "point_y_scaling", i); } u8 chroma_scaling_from_luma = 0; if (!state->config->monochrome) chroma_scaling_from_luma = gf_bs_read_int_log(bs, 1, "chroma_scaling_from_luma"); u8 num_cb_points = 0; u8 num_cr_points = 0; if (state->config->monochrome || chroma_scaling_from_luma || ((state->config->chroma_subsampling_x == 1) && (state->config->chroma_subsampling_y == 1) && (num_y_points == 0)) ) { } else { num_cb_points = gf_bs_read_int_log(bs, 4, "num_cb_points"); for (i = 0; i < num_cb_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_cb_value", i); gf_bs_read_int_log_idx(bs, 8, "point_cb_scaling", i); } num_cr_points = gf_bs_read_int_log(bs, 4, "num_cr_points"); for (i = 0; i < num_cr_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_cr_value", i); gf_bs_read_int_log_idx(bs, 8, "point_cr_scaling", i); } } gf_bs_read_int_log(bs, 2, "grain_scaling_minus_8"); u8 ar_coeff_lag = gf_bs_read_int_log(bs, 2, "ar_coeff_lag"); u16 numPosLuma = 2 * ar_coeff_lag * (ar_coeff_lag + 1); u16 numPosChroma = numPosLuma; if (num_y_points) { numPosChroma = numPosLuma + 1; for (i = 0; i < numPosLuma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_y_plus_128", i); } } if (chroma_scaling_from_luma || num_cb_points) { for (i = 0; i < numPosChroma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_cb_plus_128", i); } } if (chroma_scaling_from_luma || num_cr_points) { for (i = 0; i < numPosChroma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_cr_plus_128", i); } } gf_bs_read_int_log(bs, 2, "ar_coeff_shift_minus_6"); gf_bs_read_int_log(bs, 2, "grain_scale_shift"); if (num_cb_points) { gf_bs_read_int_log(bs, 8, "cb_mult"); gf_bs_read_int_log(bs, 8, "cb_luma_mult"); gf_bs_read_int_log(bs, 9, "cb_offset"); } if (num_cr_points) { gf_bs_read_int_log(bs, 8, "cr_mult"); gf_bs_read_int_log(bs, 8, "cr_luma_mult"); gf_bs_read_int_log(bs, 9, "cr_offset"); } gf_bs_read_int_log(bs, 1, "overlap_flag"); gf_bs_read_int_log(bs, 1, "clip_to_restricted_range"); } } } //end of uncompressed header !! } GF_EXPORT void gf_av1_init_state(AV1State *state) { if (!state) return; memset(state, 0, sizeof(AV1State)); state->color_primaries = 2; state->transfer_characteristics = 2; state->matrix_coefficients = 2; } GF_EXPORT void gf_av1_reset_state(AV1State *state, Bool is_destroy) { GF_List *l1, *l2; if (state->frame_state.header_obus) { while (gf_list_count(state->frame_state.header_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.header_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } if (state->frame_state.frame_obus) { while (gf_list_count(state->frame_state.frame_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.frame_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } l1 = state->frame_state.frame_obus; l2 = state->frame_state.header_obus; memset(&state->frame_state, 0, sizeof(AV1StateFrame)); state->frame_state.is_first_frame = GF_TRUE; if (is_destroy) { gf_list_del(l1); gf_list_del(l2); if (state->bs) { if (gf_bs_get_position(state->bs)) { u32 size; gf_bs_get_content_no_truncate(state->bs, &state->frame_obus, &size, &state->frame_obus_alloc); } gf_bs_del(state->bs); } state->bs = NULL; } else { state->frame_state.frame_obus = l1; state->frame_state.header_obus = l2; if (state->bs) gf_bs_seek(state->bs, 0); } } static GF_Err av1_parse_tile_group(GF_BitStream *bs, AV1State *state, u64 obu_start, u64 obu_size) { u32 TileNum, tg_start = 0, tg_end = 0; Bool numTiles = state->tileCols * state->tileRows; Bool tile_start_and_end_present_flag = GF_FALSE; GF_Err e = GF_OK; if (numTiles > 1) tile_start_and_end_present_flag = gf_bs_read_int(bs, 1); if (numTiles == 1 || !tile_start_and_end_present_flag) { tg_start = 0; tg_end = numTiles - 1; /*state->frame_state.tg[0].start_idx = 0; state->frame_state.tg[0].end_idx = numTiles - 1;*/ } else { u32 tileBits = state->tileColsLog2 + state->tileRowsLog2; /*state->frame_state.tg[state->frame_state.tg_idx].start_idx*/ tg_start = gf_bs_read_int(bs, tileBits); /*state->frame_state.tg[state->frame_state.tg_idx].end_idx*/ tg_end = gf_bs_read_int(bs, tileBits); } /*state->frame_state.tg_idx++;*/ gf_bs_align(bs); if (tg_end >= GF_ARRAY_LENGTH(state->frame_state.tiles)) return GF_NON_COMPLIANT_BITSTREAM; state->frame_state.nb_tiles_in_obu = 0; for (TileNum = tg_start; TileNum <= tg_end; TileNum++) { u32 tile_start_offset, tile_size; /*u32 tileRow = TileNum / state->tileCols; u32 tileCol = TileNum % state->tileCols;*/ Bool lastTile = TileNum == tg_end; u64 pos = gf_bs_get_position(bs); if (lastTile) { tile_start_offset = (u32)(pos - obu_start); tile_size = (u32)(obu_size - (pos - obu_start)); } else { u64 tile_size_minus_1 = aom_av1_le(bs, state->tile_size_bytes, "tile_size_minus_1"); pos = gf_bs_get_position(bs); tile_start_offset = (u32)(pos - obu_start); tile_size = (u32)(tile_size_minus_1 + 1/* + state->tile_size_bytes*/); } if (tile_start_offset + tile_size > obu_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Error parsing tile group, tile %d start %d + size %d exceeds OBU length %d\n", TileNum, tile_start_offset, tile_size, obu_size)); e = GF_NON_COMPLIANT_BITSTREAM; break; } state->frame_state.tiles[state->frame_state.nb_tiles_in_obu].obu_start_offset = tile_start_offset; state->frame_state.tiles[state->frame_state.nb_tiles_in_obu].size = tile_size; gf_bs_skip_bytes(bs, tile_size); state->frame_state.nb_tiles_in_obu++; } if (tg_end == numTiles - 1) { av1_decode_frame_wrapup(state); } return e; } static void av1_parse_frame_header(GF_BitStream *bs, AV1State *state) { AV1StateFrame *frame_state = &state->frame_state; if (frame_state->seen_frame_header == GF_FALSE) { u64 pos = gf_bs_get_position(bs); state->frame_state.show_existing_frame = GF_FALSE; frame_state->seen_frame_header = GF_TRUE; av1_parse_uncompressed_header(bs, state); state->frame_state.is_first_frame = GF_FALSE; state->frame_state.uncompressed_header_bytes = (u32) (gf_bs_get_position(bs) - pos); if (state->frame_state.show_existing_frame) { av1_decode_frame_wrapup(state); frame_state->seen_frame_header = GF_FALSE; } else { //TileNum = 0; frame_state->seen_frame_header = GF_TRUE; } } } static GF_Err av1_parse_frame(GF_BitStream *bs, AV1State *state, u64 obu_start, u64 obu_size) { av1_parse_frame_header(bs, state); //byte alignment gf_bs_align(bs); return av1_parse_tile_group(bs, state, obu_start, obu_size); } static void on_aom_av1_eos(void *_state) { AV1State *state = (AV1State *)_state; state->bs_overread = GF_TRUE; } GF_EXPORT GF_Err gf_av1_parse_obu(GF_BitStream *bs, ObuType *obu_type, u64 *obu_size, u32 *obu_hdr_size, AV1State *state) { GF_Err e = GF_OK; u32 hdr_size; u64 pos = gf_bs_get_position(bs); if (!bs || !obu_type || !state) return GF_BAD_PARAM; state->bs_overread = GF_FALSE; gf_bs_set_eos_callback(bs, on_aom_av1_eos, state); state->obu_extension_flag = state->obu_has_size_field = 0; state->temporal_id = state->spatial_id = 0; state->frame_state.uncompressed_header_bytes = 0; e = gf_av1_parse_obu_header(bs, obu_type, &state->obu_extension_flag, &state->obu_has_size_field, &state->temporal_id, &state->spatial_id); if (e) return e; if (state->obu_has_size_field) { *obu_size = (u32)gf_av1_leb128_read(bs, NULL); } else { if (*obu_size >= 1 + state->obu_extension_flag) { *obu_size = *obu_size - 1 - state->obu_extension_flag; } else { GF_LOG(state->config ? GF_LOG_WARNING : GF_LOG_DEBUG, GF_LOG_CODING, ("[AV1] computed OBU size "LLD" (input value = "LLU"). Skipping.\n", *obu_size - 1 - state->obu_extension_flag, *obu_size)); return GF_NON_COMPLIANT_BITSTREAM; } } hdr_size = (u32)(gf_bs_get_position(bs) - pos); if ((gf_bs_available(bs) < *obu_size) || state->bs_overread) { gf_bs_seek(bs, pos); return GF_BUFFER_TOO_SMALL; } *obu_size += hdr_size; if (obu_hdr_size) *obu_hdr_size = hdr_size; if (*obu_type != OBU_SEQUENCE_HEADER && *obu_type != OBU_TEMPORAL_DELIMITER && state->OperatingPointIdc != 0 && state->obu_extension_flag == 1) { u32 inTemporalLayer = (state->OperatingPointIdc >> state->temporal_id) & 1; u32 inSpatialLayer = (state->OperatingPointIdc >> (state->spatial_id + 8)) & 1; if (!inTemporalLayer || !inSpatialLayer) { *obu_type = -1; gf_bs_seek(bs, pos + *obu_size); return GF_OK; } } e = GF_OK; switch (*obu_type) { case OBU_SEQUENCE_HEADER: av1_parse_sequence_header_obu(bs, state); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Sequence header parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_METADATA: #if 0 //TODO + sample groups const ObuMetadataType metadata_type = (u32)read_leb128(bs, NULL); we should check for 16 bits limit(AV1MetadataSampleGroupEntry) for ISOBMFF bindings, see https ://github.com/AOMediaCodec/av1-isobmff/pull/86#issuecomment-416659538 if (metadata_type == OBU_METADATA_TYPE_ITUT_T35) { } else if (metadata_type == OBU_METADATA_TYPE_HDR_CLL) { } else if (metadata_type == OBU_METADATA_TYPE_HDR_MDCV) { } else if (metadata_type == OBU_METADATA_TYPE_SCALABILITY) { } else if (metadata_type == METADATA_TYPE_TIMECODE) { } #endif GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[AV1] parsing for metadata is not implemented. Forwarding.\n")); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Metadata parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_FRAME_HEADER: case OBU_REDUNDANT_FRAME_HEADER: if (state->config) { av1_parse_frame_header(bs, state); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Frame header parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } } gf_bs_seek(bs, pos + *obu_size); break; case OBU_FRAME: e = av1_parse_frame(bs, state, pos, *obu_size); if (gf_bs_get_position(bs) != pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Frame parsing did not consume the right number of bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_TILE_GROUP: if (state->config) { e = av1_parse_tile_group(bs, state, pos, *obu_size); if (gf_bs_get_position(bs) != pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Tile group parsing did not consume the right number of bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } } gf_bs_seek(bs, pos + *obu_size); break; case OBU_TEMPORAL_DELIMITER: state->frame_state.seen_frame_header = GF_FALSE; case OBU_PADDING: gf_bs_seek(bs, pos + *obu_size); break; default: GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] unknown OBU type %u (size "LLU"). Skipping.\n", *obu_type, *obu_size)); gf_bs_seek(bs, pos + *obu_size); break; } return e; } GF_EXPORT GF_Err gf_media_prores_parse_bs(GF_BitStream *bs, GF_ProResFrameInfo *prores_frame) { u32 i, j; u64 start, pos; memset(prores_frame, 0, sizeof(GF_ProResFrameInfo)); start = gf_bs_get_position(bs); if (gf_bs_available(bs) < 10) return GF_BUFFER_TOO_SMALL; prores_frame->frame_size = gf_bs_read_u32(bs); prores_frame->frame_identifier = gf_bs_read_u32(bs); if (prores_frame->frame_identifier != GF_4CC('i','c','p','f')) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[ProRes] Invalid frame identifier, expected \"icpf\" got \"%s\"\n", gf_4cc_to_str(prores_frame->frame_identifier) )); gf_bs_seek(bs, start); return GF_NON_COMPLIANT_BITSTREAM; } /*parse frame header*/ pos = gf_bs_get_position(bs); prores_frame->frame_hdr_size = gf_bs_read_u16(bs); if (gf_bs_available(bs) + 2 < prores_frame->frame_hdr_size) { gf_bs_seek(bs, start); return GF_BUFFER_TOO_SMALL; } gf_bs_read_u8(bs); prores_frame->version = gf_bs_read_u8(bs); prores_frame->encoder_id = gf_bs_read_u32(bs); prores_frame->width = gf_bs_read_u16(bs); prores_frame->height = gf_bs_read_u16(bs); prores_frame->chroma_format = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 2); prores_frame->interlaced_mode = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 2); prores_frame->aspect_ratio_information = gf_bs_read_int(bs, 4); prores_frame->framerate_code = gf_bs_read_int(bs, 4); prores_frame->color_primaries = gf_bs_read_u8(bs); prores_frame->transfer_characteristics = gf_bs_read_u8(bs); prores_frame->matrix_coefficients = gf_bs_read_u8(bs); gf_bs_read_int(bs, 4); prores_frame->alpha_channel_type = gf_bs_read_int(bs, 4); gf_bs_read_int(bs, 14); prores_frame->load_luma_quant_matrix = gf_bs_read_int(bs, 1); prores_frame->load_chroma_quant_matrix = gf_bs_read_int(bs, 1); if (prores_frame->load_luma_quant_matrix) { for (i=0; i<8; i++) { for (j=0; j<8; j++) { prores_frame->luma_quant_matrix[i][j] = gf_bs_read_u8(bs); } } } if (prores_frame->load_chroma_quant_matrix) { for (i=0; i<8; i++) { for (j=0; j<8; j++) { prores_frame->chroma_quant_matrix[i][j] = gf_bs_read_u8(bs); } } } pos = gf_bs_get_position(bs) - pos; if (pos != prores_frame->frame_hdr_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[ProRes] Invalid frame header size, expected %d got %d\n", prores_frame->frame_hdr_size, (u32) pos)); gf_bs_seek(bs, start); return GF_NON_COMPLIANT_BITSTREAM; } prores_frame->nb_pic = ((prores_frame->interlaced_mode==1) || (prores_frame->interlaced_mode==2)) ? 2 : 1; gf_bs_seek(bs, start); return GF_OK; } #endif /*GPAC_DISABLE_AV_PARSERS*/ GF_EXPORT u8 gf_mp3_version(u32 hdr) { return ((hdr >> 19) & 0x3); } GF_EXPORT const char *gf_mp3_version_name(u32 hdr) { u32 v = gf_mp3_version(hdr); switch (v) { case 0: return "MPEG-2.5"; case 1: return "Reserved"; case 2: return "MPEG-2"; case 3: return "MPEG-1"; default: return "Unknown"; } } #ifndef GPAC_DISABLE_AV_PARSERS GF_EXPORT u8 gf_mp3_layer(u32 hdr) { return 4 - (((hdr >> 17) & 0x3)); } GF_EXPORT u8 gf_mp3_num_channels(u32 hdr) { if (((hdr >> 6) & 0x3) == 3) return 1; return 2; } GF_EXPORT u16 gf_mp3_sampling_rate(u32 hdr) { u16 res; /* extract the necessary fields from the MP3 header */ u8 version = gf_mp3_version(hdr); u8 sampleRateIndex = (hdr >> 10) & 0x3; switch (sampleRateIndex) { case 0: res = 44100; break; case 1: res = 48000; break; case 2: res = 32000; break; default: GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] Samplerate index not valid\n")); return 0; } /*reserved or MPEG-1*/ if (version & 1) return res; /*MPEG-2*/ res /= 2; /*MPEG-2.5*/ if (version == 0) res /= 2; return res; } GF_EXPORT u16 gf_mp3_window_size(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); if (layer == 3) { if (version == 3) return 1152; return 576; } if (layer == 2) return 1152; return 384; } GF_EXPORT u8 gf_mp3_object_type_indication(u32 hdr) { switch (gf_mp3_version(hdr)) { case 3: return GF_CODECID_MPEG_AUDIO; case 2: case 0: return GF_CODECID_MPEG2_PART3; default: return 0x00; } } /*aligned bitrate parsing with libMAD*/ static u32 const bitrate_table[5][15] = { /* MPEG-1 */ { 0, 32000, 64000, 96000, 128000, 160000, 192000, 224000, /* Layer I */ 256000, 288000, 320000, 352000, 384000, 416000, 448000 }, { 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer II */ 128000, 160000, 192000, 224000, 256000, 320000, 384000 }, { 0, 32000, 40000, 48000, 56000, 64000, 80000, 96000, /* Layer III */ 112000, 128000, 160000, 192000, 224000, 256000, 320000 }, /* MPEG-2 LSF */ { 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer I */ 128000, 144000, 160000, 176000, 192000, 224000, 256000 }, { 0, 8000, 16000, 24000, 32000, 40000, 48000, 56000, /* Layers */ 64000, 80000, 96000, 112000, 128000, 144000, 160000 } /* II & III */ }; u32 gf_mp3_bit_rate(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); u8 bitRateIndex = (hdr >> 12) & 0xF; u32 lidx; /*MPEG-1*/ if (version & 1) { if (!layer) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] layer index not valid\n")); return 0; } lidx = layer - 1; } /*MPEG-2/2.5*/ else { lidx = 3 + (layer >> 1); } if (lidx>4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] layer index not valid\n")); return 0; } return bitrate_table[lidx][bitRateIndex]; } GF_EXPORT u16 gf_mp3_frame_size(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); u32 pad = ((hdr >> 9) & 0x1) ? 1 : 0; u32 bitrate = gf_mp3_bit_rate(hdr); u32 samplerate = gf_mp3_sampling_rate(hdr); u32 frameSize = 0; if (!samplerate || !bitrate) return 0; if (layer == 1) { frameSize = ((12 * bitrate / samplerate) + pad) * 4; } else { u32 slots_per_frame = 144; if ((layer == 3) && !(version & 1)) slots_per_frame = 72; frameSize = (slots_per_frame * bitrate / samplerate) + pad; } return (u16)frameSize; } GF_EXPORT u32 gf_mp3_get_next_header(FILE* in) { u8 b, state = 0; u32 dropped = 0; unsigned char bytes[4]; bytes[0] = bytes[1] = bytes[2] = bytes[3] = 0; while (1) { if (gf_fread(&b, 1, in) == 0) return 0; if (state == 3) { bytes[state] = b; return GF_4CC((u32)bytes[0], bytes[1], bytes[2], bytes[3]); } if (state == 2) { if (((b & 0xF0) == 0) || ((b & 0xF0) == 0xF0) || ((b & 0x0C) == 0x0C)) { if (bytes[1] == 0xFF) state = 1; else state = 0; } else { bytes[state] = b; state = 3; } } if (state == 1) { if (((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[state] = b; state = 2; } else { state = 0; } } if (state == 0) { if (b == 0xFF) { bytes[state] = b; state = 1; } else { if ((dropped == 0) && ((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[0] = (u8)0xFF; bytes[1] = b; state = 2; } else { dropped++; } } } } return 0; } GF_EXPORT u32 gf_mp3_get_next_header_mem(const u8 *buffer, u32 size, u32 *pos) { u32 cur; u8 b, state = 0; u32 dropped = 0; unsigned char bytes[4]; bytes[0] = bytes[1] = bytes[2] = bytes[3] = 0; cur = 0; *pos = 0; while (cur < size) { b = (u8)buffer[cur]; cur++; if (state == 3) { u32 val; bytes[state] = b; val = GF_4CC((u32)bytes[0], bytes[1], bytes[2], bytes[3]); if (gf_mp3_frame_size(val)) { *pos = dropped; return val; } state = 0; dropped = cur; } if (state == 2) { if (((b & 0xF0) == 0) || ((b & 0xF0) == 0xF0) || ((b & 0x0C) == 0x0C)) { if (bytes[1] == 0xFF) { state = 1; dropped += 1; } else { state = 0; dropped = cur; } } else { bytes[state] = b; state = 3; } } if (state == 1) { if (((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[state] = b; state = 2; } else { state = 0; dropped = cur; } } if (state == 0) { if (b == 0xFF) { bytes[state] = b; state = 1; } else { dropped++; } } } return 0; } #endif /*GPAC_DISABLE_AV_PARSERS*/ GF_EXPORT Bool gf_avc_is_rext_profile(u8 profile_idc) { switch (profile_idc) { case 100: case 110: case 122: case 244: case 44: case 83: case 86: case 118: case 128: case 138: case 139: case 134: case 135: return GF_TRUE; default: return GF_FALSE; } } GF_EXPORT const char *gf_avc_get_profile_name(u8 video_prof) { switch (video_prof) { case 0x42: return "Baseline"; case 0x4D: return "Main"; case 0x53: return "Scalable Baseline"; case 0x56: return "Scalable High"; case 0x58: return "Extended"; case 0x64: return "High"; case 0x6E: return "High 10"; case 0x7A: return "High 4:2:2"; case 0x90: case 0xF4: return "High 4:4:4"; default: return "Unknown"; } } GF_EXPORT const char *gf_hevc_get_profile_name(u8 video_prof) { switch (video_prof) { case 0x01: return "Main"; case 0x02: return "Main 10"; case 0x03: return "Main Still Picture"; default: return "Unknown"; } } GF_EXPORT const char *gf_avc_hevc_get_chroma_format_name(u8 chroma_format) { switch (chroma_format) { case 1: return "YUV 4:2:0"; case 2: return "YUV 4:2:2"; case 3: return "YUV 4:4:4"; default: return "Unknown"; } } #ifndef GPAC_DISABLE_AV_PARSERS u32 gf_bs_read_ue_log_idx3(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2, s32 idx3) { u32 val=0, code; s32 nb_lead = -1; u32 bits = 0; for (code=0; !code; nb_lead++) { if (nb_lead>=32) { //gf_bs_read_int keeps returning 0 on EOS, so if no more bits available, rbsp was truncated otherwise code is broken in rbsp) //we only test once nb_lead>=32 to avoid testing at each bit read if (!gf_bs_available(bs)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] exp-golomb read failed, not enough bits in bitstream !\n")); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] corrupted exp-golomb code, %d leading zeros, max 31 allowed !\n", nb_lead)); } return 0; } code = gf_bs_read_int(bs, 1); bits++; } if (nb_lead) { val = gf_bs_read_int(bs, nb_lead); val += (1 << nb_lead) - 1; bits += nb_lead; } if (fname) { gf_bs_log_idx(bs, bits, fname, val, idx1, idx2, idx3); } return val; } #define gf_bs_read_ue_log_idx2(_bs, _fname, _idx1, _idx2) gf_bs_read_ue_log_idx3(_bs, _fname, (s32) _idx1, (s32) _idx2, -1) #define gf_bs_read_ue_log_idx(_bs, _fname, _idx) gf_bs_read_ue_log_idx3(_bs, _fname, (s32) _idx, -1, -1) #define gf_bs_read_ue_log(_bs, _fname) gf_bs_read_ue_log_idx3(_bs, _fname, -1, -1, -1) u32 gf_bs_read_ue(GF_BitStream *bs) { return gf_bs_read_ue_log(bs, NULL); } s32 gf_bs_read_se(GF_BitStream *bs) { u32 v = gf_bs_read_ue(bs); if ((v & 0x1) == 0) return (s32)(0 - (v >> 1)); return (v + 1) >> 1; } s32 gf_bs_read_se_log_idx2(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2) { s32 res = gf_bs_read_se(bs); if (fname) gf_bs_log_idx(bs, -1, fname, res, idx1, idx2, -1); return res; } #define gf_bs_read_se_log_idx(_bs, _fname, _idx) gf_bs_read_se_log_idx2(_bs, _fname, (s32) _idx, -1) #define gf_bs_read_se_log(_bs, _fname) gf_bs_read_se_log_idx2(_bs, _fname, -1, -1) void gf_bs_write_ue(GF_BitStream *bs, u32 num) { s32 length = 1; s32 temp = ++num; while (temp != 1) { temp >>= 1; length += 2; } gf_bs_write_int(bs, 0, length >> 1); gf_bs_write_int(bs, num, (length + 1) >> 1); } void gf_bs_write_se(GF_BitStream *bs, s32 num) { u32 v; if (num <= 0) v = (-1 * num) << 1; else v = (num << 1) - 1; gf_bs_write_ue(bs, v); } u32 gf_media_nalu_is_start_code(GF_BitStream *bs) { u8 s1, s2, s3, s4; Bool is_sc = 0; u64 pos = gf_bs_get_position(bs); s1 = gf_bs_read_int(bs, 8); s2 = gf_bs_read_int(bs, 8); if (!s1 && !s2) { s3 = gf_bs_read_int(bs, 8); if (s3 == 0x01) is_sc = 3; else if (!s3) { s4 = gf_bs_read_int(bs, 8); if (s4 == 0x01) is_sc = 4; } } gf_bs_seek(bs, pos + is_sc); return is_sc; } /*read that amount of data at each IO access rather than fetching byte by byte...*/ #define AVC_CACHE_SIZE 4096 static u32 gf_media_nalu_locate_start_code_bs(GF_BitStream *bs, Bool locate_trailing) { u32 v, bpos, nb_cons_zeros = 0; char avc_cache[AVC_CACHE_SIZE]; u64 end, cache_start, load_size; u64 start = gf_bs_get_position(bs); if (start < 3) return 0; load_size = 0; bpos = 0; cache_start = 0; end = 0; v = 0xffffffff; while (!end) { /*refill cache*/ if (bpos == (u32)load_size) { if (!gf_bs_available(bs)) break; load_size = gf_bs_available(bs); if (load_size > AVC_CACHE_SIZE) load_size = AVC_CACHE_SIZE; bpos = 0; cache_start = gf_bs_get_position(bs); gf_bs_read_data(bs, avc_cache, (u32)load_size); } v = ( (v<<8) & 0xFFFFFF00) | ((u32) avc_cache[bpos]); bpos++; if (locate_trailing) { if ((v & 0x000000FF) == 0) nb_cons_zeros++; else nb_cons_zeros = 0; } if (v == 0x00000001) end = cache_start + bpos - 4; else if ((v & 0x00FFFFFF) == 0x00000001) end = cache_start + bpos - 3; } gf_bs_seek(bs, start); if (!end) end = gf_bs_get_size(bs); if (locate_trailing) { if (nb_cons_zeros >= 3) return (u32)(end - start - nb_cons_zeros); } return (u32)(end - start); } GF_EXPORT u32 gf_media_nalu_next_start_code_bs(GF_BitStream *bs) { return gf_media_nalu_locate_start_code_bs(bs, 0); } GF_EXPORT u32 gf_media_nalu_next_start_code(const u8 *data, u32 data_len, u32 *sc_size) { u32 avail = data_len; const u8 *cur = data; while (cur) { u32 v, bpos; u8 *next_zero = memchr(cur, 0, avail); if (!next_zero) return data_len; v = 0xffffff00; bpos = (u32)(next_zero - data) + 1; while (1) { u8 cval; if (bpos == (u32)data_len) return data_len; cval = data[bpos]; v = ((v << 8) & 0xFFFFFF00) | ((u32)cval); bpos++; if (v == 0x00000001) { *sc_size = 4; return bpos - 4; } else if ((v & 0x00FFFFFF) == 0x00000001) { *sc_size = 3; return bpos - 3; } if (cval) break; } if (bpos >= data_len) break; cur = data + bpos; avail = data_len - bpos; } return data_len; } Bool gf_media_avc_slice_is_intra(AVCState *avc) { switch (avc->s_info.slice_type) { case GF_AVC_TYPE_I: case GF_AVC_TYPE2_I: case GF_AVC_TYPE_SI: case GF_AVC_TYPE2_SI: return 1; default: return 0; } } #if 0 //unused Bool gf_media_avc_slice_is_IDR(AVCState *avc) { if (avc->sei.recovery_point.valid) { avc->sei.recovery_point.valid = 0; return 1; } if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) return 0; return gf_media_avc_slice_is_intra(avc); } #endif static const struct { u32 w, h; } avc_hevc_sar[] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, { 160,99 }, { 4, 3 }, { 3, 2 }, { 2, 1 } }; /*ISO 14496-10 (N11084) E.1.2*/ static void avc_parse_hrd_parameters(GF_BitStream *bs, AVC_HRD *hrd) { int i, cpb_cnt_minus1; cpb_cnt_minus1 = gf_bs_read_ue_log(bs, "cpb_cnt_minus1"); if (cpb_cnt_minus1 > 31) GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] invalid cpb_cnt_minus1 value: %d (expected in [0;31])\n", cpb_cnt_minus1)); gf_bs_read_int_log(bs, 4, "bit_rate_scale"); gf_bs_read_int_log(bs, 4, "cpb_size_scale"); /*for( SchedSelIdx = 0; SchedSelIdx <= cpb_cnt_minus1; SchedSelIdx++ ) {*/ for (i = 0; i <= cpb_cnt_minus1; i++) { gf_bs_read_ue_log_idx(bs, "bit_rate_value_minus1", i); gf_bs_read_ue_log_idx(bs, "cpb_size_value_minus1", i); gf_bs_read_int_log_idx(bs, 1, "cbr_flag", i); } gf_bs_read_int_log(bs, 5, "initial_cpb_removal_delay_length_minus1"); hrd->cpb_removal_delay_length_minus1 = gf_bs_read_int_log(bs, 5, "cpb_removal_delay_length_minus1"); hrd->dpb_output_delay_length_minus1 = gf_bs_read_int_log(bs, 5, "dpb_output_delay_length_minus1"); hrd->time_offset_length = gf_bs_read_int_log(bs, 5, "time_offset_length"); return; } /*returns the nal_size without emulation prevention bytes*/ u32 gf_media_nalu_emulation_bytes_add_count(u8 *buffer, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: \96 0x00000300 \96 0x00000301 \96 0x00000302 \96 0x00000303" */ if (num_zero == 2 && (u8)buffer[i] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; if (!buffer[i]) num_zero = 1; } else { if (!buffer[i]) num_zero++; else num_zero = 0; } i++; } return emulation_bytes_count; } u32 gf_media_nalu_add_emulation_bytes(const u8 *buffer_src, u8 *buffer_dst, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: 0x00000300 0x00000301 0x00000302 0x00000303" */ if (num_zero == 2 && (u8)buffer_src[i] < 0x04) { /*add emulation code*/ num_zero = 0; buffer_dst[i + emulation_bytes_count] = 0x03; emulation_bytes_count++; if (!buffer_src[i]) num_zero = 1; } else { if (!buffer_src[i]) num_zero++; else num_zero = 0; } buffer_dst[i + emulation_bytes_count] = buffer_src[i]; i++; } return nal_size + emulation_bytes_count; } /*returns the nal_size without emulation prevention bytes*/ u32 gf_media_nalu_emulation_bytes_remove_count(const u8 *buffer, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; if (!buffer || !nal_size) return 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: \96 0x00000300 \96 0x00000301 \96 0x00000302 \96 0x00000303" */ if (num_zero == 2 && buffer[i] == 0x03 && i + 1 < nal_size /*next byte is readable*/ && (u8)buffer[i + 1] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; i++; } if (!buffer[i]) num_zero++; else num_zero = 0; i++; } return emulation_bytes_count; } /*nal_size is updated to allow better error detection*/ GF_EXPORT u32 gf_media_nalu_remove_emulation_bytes(const u8 *buffer_src, u8 *buffer_dst, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: 0x00000300 0x00000301 0x00000302 0x00000303" */ if (num_zero == 2 && buffer_src[i] == 0x03 && i + 1 < nal_size /*next byte is readable*/ && (u8)buffer_src[i + 1] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; i++; } buffer_dst[i - emulation_bytes_count] = buffer_src[i]; if (!buffer_src[i]) num_zero++; else num_zero = 0; i++; } return nal_size - emulation_bytes_count; } static s32 gf_avc_read_sps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos, u32 nal_hdr) { AVC_SPS *sps; s32 mb_width, mb_height, sps_id = -1; u32 profile_idc, level_idc, pcomp, i, chroma_format_idc, cl = 0, cr = 0, ct = 0, cb = 0, luma_bd, chroma_bd; u8 separate_colour_plane_flag = 0; if (!vui_flag_pos) { gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); } if (!bs) { return -1; } if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } profile_idc = gf_bs_read_int_log(bs, 8, "profile_idc"); pcomp = gf_bs_read_int_log(bs, 8, "profile_compatibility"); /*sanity checks*/ if (pcomp & 0x3) return -1; level_idc = gf_bs_read_int_log(bs, 8, "level_idc"); /*SubsetSps is used to be sure that AVC SPS are not going to be scratched by subset SPS. According to the SVC standard, subset SPS can have the same sps_id than its base layer, but it does not refer to the same SPS. */ sps_id = gf_bs_read_ue_log(bs, "sps_id") + GF_SVC_SSPS_ID_SHIFT * subseq_sps; if ((sps_id < 0) || (sps_id >= 32)) { return -1; } luma_bd = chroma_bd = 0; sps = &avc->sps[sps_id]; chroma_format_idc = sps->ChromaArrayType = 1; sps->state |= subseq_sps ? AVC_SUBSPS_PARSED : AVC_SPS_PARSED; /*High Profile and SVC*/ switch (profile_idc) { case 100: case 110: case 122: case 244: case 44: /*sanity checks: note1 from 7.4.2.1.1 of iso/iec 14496-10-N11084*/ if (pcomp & 0xE0) return -1; case 83: case 86: case 118: case 128: chroma_format_idc = gf_bs_read_ue_log(bs, "chroma_format_idc"); sps->ChromaArrayType = chroma_format_idc; if (chroma_format_idc == 3) { separate_colour_plane_flag = gf_bs_read_int_log(bs, 1, "separate_colour_plane_flag"); /* Depending on the value of separate_colour_plane_flag, the value of the variable ChromaArrayType is assigned as follows. \96 If separate_colour_plane_flag is equal to 0, ChromaArrayType is set equal to chroma_format_idc. \96 Otherwise (separate_colour_plane_flag is equal to 1), ChromaArrayType is set equal to 0. */ if (separate_colour_plane_flag) sps->ChromaArrayType = 0; } luma_bd = gf_bs_read_ue_log(bs, "luma_bit_depth"); chroma_bd = gf_bs_read_ue_log(bs, "chroma_bit_depth"); /*qpprime_y_zero_transform_bypass_flag = */ gf_bs_read_int_log(bs, 1, "qpprime_y_zero_transform_bypass_flag"); /*seq_scaling_matrix_present_flag*/ if (gf_bs_read_int_log(bs, 1, "seq_scaling_matrix_present_flag")) { u32 k; for (k = 0; k < 8; k++) { if (gf_bs_read_int_log_idx(bs, 1, "seq_scaling_list_present_flag", k)) { u32 z, last = 8, next = 8; u32 sl = k < 6 ? 16 : 64; for (z = 0; z < sl; z++) { if (next) { s32 delta = gf_bs_read_se(bs); next = (last + delta + 256) % 256; } last = next ? next : last; } } } } break; } sps->profile_idc = profile_idc; sps->level_idc = level_idc; sps->prof_compat = pcomp; sps->log2_max_frame_num = gf_bs_read_ue_log(bs, "log2_max_frame_num") + 4; sps->poc_type = gf_bs_read_ue_log(bs, "poc_type"); sps->chroma_format = chroma_format_idc; sps->luma_bit_depth_m8 = luma_bd; sps->chroma_bit_depth_m8 = chroma_bd; if (sps->poc_type == 0) { sps->log2_max_poc_lsb = gf_bs_read_ue_log(bs, "log2_max_poc_lsb") + 4; } else if (sps->poc_type == 1) { sps->delta_pic_order_always_zero_flag = gf_bs_read_int_log(bs, 1, "delta_pic_order_always_zero_flag"); sps->offset_for_non_ref_pic = gf_bs_read_se_log(bs, "offset_for_non_ref_pic"); sps->offset_for_top_to_bottom_field = gf_bs_read_se_log(bs, "offset_for_top_to_bottom_field"); sps->poc_cycle_length = gf_bs_read_ue_log(bs, "poc_cycle_length"); if (sps->poc_cycle_length > GF_ARRAY_LENGTH(sps->offset_for_ref_frame)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] offset_for_ref_frame overflow from poc_cycle_length\n")); return -1; } for (i = 0; i < sps->poc_cycle_length; i++) sps->offset_for_ref_frame[i] = gf_bs_read_se_log_idx(bs, "offset_for_ref_frame", i); } if (sps->poc_type > 2) { return -1; } sps->max_num_ref_frames = gf_bs_read_ue_log(bs, "max_num_ref_frames"); sps->gaps_in_frame_num_value_allowed_flag = gf_bs_read_int_log(bs, 1, "gaps_in_frame_num_value_allowed_flag"); mb_width = gf_bs_read_ue_log(bs, "pic_width_in_mbs_minus1") + 1; mb_height = gf_bs_read_ue_log(bs, "pic_height_in_map_units_minus1") + 1; sps->frame_mbs_only_flag = gf_bs_read_int_log(bs, 1, "frame_mbs_only_flag"); sps->width = mb_width * 16; sps->height = (2 - sps->frame_mbs_only_flag) * mb_height * 16; if (!sps->frame_mbs_only_flag) sps->mb_adaptive_frame_field_flag = gf_bs_read_int_log(bs, 1, "mb_adaptive_frame_field_flag"); gf_bs_read_int_log(bs, 1, "direct_8x8_inference_flag"); if (gf_bs_read_int_log(bs, 1, "frame_cropping_flag")) { int CropUnitX, CropUnitY, SubWidthC = -1, SubHeightC = -1; if (chroma_format_idc == 1) { SubWidthC = 2; SubHeightC = 2; } else if (chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else if ((chroma_format_idc == 3) && (separate_colour_plane_flag == 0)) { SubWidthC = 1; SubHeightC = 1; } if (sps->ChromaArrayType == 0) { assert(SubWidthC == -1); CropUnitX = 1; CropUnitY = 2 - sps->frame_mbs_only_flag; } else { CropUnitX = SubWidthC; CropUnitY = SubHeightC * (2 - sps->frame_mbs_only_flag); } cl = gf_bs_read_ue_log(bs, "frame_crop_left_offset"); cr = gf_bs_read_ue_log(bs, "frame_crop_right_offset"); ct = gf_bs_read_ue_log(bs, "frame_crop_top_offset"); cb = gf_bs_read_ue_log(bs, "frame_crop_bottom_offset"); sps->width -= CropUnitX * (cl + cr); sps->height -= CropUnitY * (ct + cb); cl *= CropUnitX; cr *= CropUnitX; ct *= CropUnitY; cb *= CropUnitY; } sps->crop.left = cl; sps->crop.right = cr; sps->crop.top = ct; sps->crop.bottom = cb; if (vui_flag_pos) { *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); } /*vui_parameters_present_flag*/ sps->vui_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_parameters_present_flag"); if (sps->vui_parameters_present_flag) { sps->vui.aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "aspect_ratio_info_present_flag"); if (sps->vui.aspect_ratio_info_present_flag) { s32 aspect_ratio_idc = gf_bs_read_int_log(bs, 8, "aspect_ratio_idc"); if (aspect_ratio_idc == 255) { sps->vui.par_num = gf_bs_read_int_log(bs, 16, "aspect_ratio_num"); sps->vui.par_den = gf_bs_read_int_log(bs, 16, "aspect_ratio_den"); } else if (aspect_ratio_idc < GF_ARRAY_LENGTH(avc_hevc_sar) ) { sps->vui.par_num = avc_hevc_sar[aspect_ratio_idc].w; sps->vui.par_den = avc_hevc_sar[aspect_ratio_idc].h; } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] Unknown aspect_ratio_idc: your video may have a wrong aspect ratio. Contact the GPAC team!\n")); } } sps->vui.overscan_info_present_flag = gf_bs_read_int_log(bs, 1, "overscan_info_present_flag"); if (sps->vui.overscan_info_present_flag) gf_bs_read_int_log(bs, 1, "overscan_appropriate_flag"); /* default values */ sps->vui.video_format = 5; sps->vui.colour_primaries = 2; sps->vui.transfer_characteristics = 2; sps->vui.matrix_coefficients = 2; /* now read values if possible */ sps->vui.video_signal_type_present_flag = gf_bs_read_int_log(bs, 1, "video_signal_type_present_flag"); if (sps->vui.video_signal_type_present_flag) { sps->vui.video_format = gf_bs_read_int_log(bs, 3, "video_format"); sps->vui.video_full_range_flag = gf_bs_read_int_log(bs, 1, "video_full_range_flag"); sps->vui.colour_description_present_flag = gf_bs_read_int_log(bs, 1, "colour_description_present_flag"); if (sps->vui.colour_description_present_flag) { sps->vui.colour_primaries = gf_bs_read_int_log(bs, 8, "colour_primaries"); sps->vui.transfer_characteristics = gf_bs_read_int_log(bs, 8, "transfer_characteristics"); sps->vui.matrix_coefficients = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } } if (gf_bs_read_int_log(bs, 1, "chroma_location_info_present_flag")) { gf_bs_read_ue_log(bs, "chroma_sample_location_type_top_field"); gf_bs_read_ue_log(bs, "chroma_sample_location_type_bottom_field"); } sps->vui.timing_info_present_flag = gf_bs_read_int_log(bs, 1, "timing_info_present_flag"); if (sps->vui.timing_info_present_flag) { sps->vui.num_units_in_tick = gf_bs_read_int_log(bs, 32, "num_units_in_tick"); sps->vui.time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); sps->vui.fixed_frame_rate_flag = gf_bs_read_int_log(bs, 1, "fixed_frame_rate_flag"); } sps->vui.nal_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "nal_hrd_parameters_present_flag"); if (sps->vui.nal_hrd_parameters_present_flag) avc_parse_hrd_parameters(bs, &sps->vui.hrd); sps->vui.vcl_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vcl_hrd_parameters_present_flag"); if (sps->vui.vcl_hrd_parameters_present_flag) avc_parse_hrd_parameters(bs, &sps->vui.hrd); if (sps->vui.nal_hrd_parameters_present_flag || sps->vui.vcl_hrd_parameters_present_flag) sps->vui.low_delay_hrd_flag = gf_bs_read_int_log(bs, 1, "low_delay_hrd_flag"); sps->vui.pic_struct_present_flag = gf_bs_read_int_log(bs, 1, "pic_struct_present_flag"); } /*end of seq_parameter_set_data*/ if (subseq_sps) { if ((profile_idc == 83) || (profile_idc == 86)) { u8 extended_spatial_scalability_idc; /*parsing seq_parameter_set_svc_extension*/ gf_bs_read_int_log(bs, 1, "inter_layer_deblocking_filter_control_present_flag"); extended_spatial_scalability_idc = gf_bs_read_int_log(bs, 2, "extended_spatial_scalability_idc"); if (sps->ChromaArrayType == 1 || sps->ChromaArrayType == 2) { gf_bs_read_int_log(bs, 1, "chroma_phase_x_plus1_flag"); } if (sps->ChromaArrayType == 1) { gf_bs_read_int_log(bs, 2, "chroma_phase_y_plus1"); } if (extended_spatial_scalability_idc == 1) { if (sps->ChromaArrayType > 0) { gf_bs_read_int_log(bs, 1, "seq_ref_layer_chroma_phase_x_plus1_flag"); gf_bs_read_int_log(bs, 2, "seq_ref_layer_chroma_phase_y_plus1"); } gf_bs_read_se_log(bs, "seq_scaled_ref_layer_left_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_top_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_right_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_bottom_offset"); } if (gf_bs_read_int_log(bs, 1, "seq_tcoeff_level_prediction_flag")) { gf_bs_read_int_log(bs, 1, "adaptive_tcoeff_level_prediction_flag"); } gf_bs_read_int_log(bs, 1, "slice_header_restriction_flag"); if (gf_bs_read_int_log(bs, 1, "svc_vui_parameters_present")) { u32 vui_ext_num_entries_minus1 = gf_bs_read_ue_log(bs, "vui_ext_num_entries_minus1"); for (i = 0; i <= vui_ext_num_entries_minus1; i++) { u8 vui_ext_nal_hrd_parameters_present_flag, vui_ext_vcl_hrd_parameters_present_flag, vui_ext_timing_info_present_flag; gf_bs_read_int_log(bs, 3, "vui_ext_dependency_id"); gf_bs_read_int_log(bs, 4, "vui_ext_quality_id"); gf_bs_read_int_log(bs, 3, "vui_ext_temporal_id"); vui_ext_timing_info_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_timing_info_present_flag"); if (vui_ext_timing_info_present_flag) { gf_bs_read_int_log(bs, 32, "vui_ext_num_units_in_tick"); gf_bs_read_int_log(bs, 32, "vui_ext_time_scale"); gf_bs_read_int_log(bs, 1, "vui_ext_fixed_frame_rate_flag"); } vui_ext_nal_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_nal_hrd_parameters_present_flag"); if (vui_ext_nal_hrd_parameters_present_flag) { //hrd_parameters( ) } vui_ext_vcl_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_vcl_hrd_parameters_present_flag"); if (vui_ext_vcl_hrd_parameters_present_flag) { //hrd_parameters( ) } if (vui_ext_nal_hrd_parameters_present_flag || vui_ext_vcl_hrd_parameters_present_flag) { gf_bs_read_int_log(bs, 1, "vui_ext_low_delay_hrd_flag"); } gf_bs_read_int_log(bs, 1, "vui_ext_pic_struct_present_flag"); } } } else if ((profile_idc == 118) || (profile_idc == 128)) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[avc-h264] MVC parsing not implemented - skipping parsing end of Subset SPS\n")); return sps_id; } if (gf_bs_read_int_log(bs, 1, "additional_extension2")) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] skipping parsing end of Subset SPS (additional_extension2)\n")); return sps_id; } } return sps_id; } GF_EXPORT s32 gf_avc_read_sps_bs(GF_BitStream *bs, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos) { return gf_avc_read_sps_bs_internal(bs, avc, subseq_sps, vui_flag_pos, 0); } GF_EXPORT s32 gf_avc_read_sps(const u8 *sps_data, u32 sps_size, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos) { s32 sps_id = -1; GF_BitStream *bs; char *sps_data_without_emulation_bytes = NULL; u32 sps_data_without_emulation_bytes_size = 0; if (vui_flag_pos) { /*SPS still contains emulation bytes*/ sps_data_without_emulation_bytes = gf_malloc(sps_size * sizeof(char)); sps_data_without_emulation_bytes_size = gf_media_nalu_remove_emulation_bytes(sps_data, sps_data_without_emulation_bytes, sps_size); bs = gf_bs_new(sps_data_without_emulation_bytes, sps_data_without_emulation_bytes_size, GF_BITSTREAM_READ); *vui_flag_pos = 0; } else { bs = gf_bs_new(sps_data, sps_size, GF_BITSTREAM_READ); } if (!bs) { sps_id = -1; goto exit; } sps_id = gf_avc_read_sps_bs(bs, avc, subseq_sps, vui_flag_pos); exit: gf_bs_del(bs); if (sps_data_without_emulation_bytes) gf_free(sps_data_without_emulation_bytes); return sps_id; } static s32 gf_avc_read_pps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 nal_hdr) { s32 pps_id; AVC_PPS *pps; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 255)) { return -1; } pps = &avc->pps[pps_id]; pps->id = pps_id; if (!pps->status) pps->status = 1; pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((pps->sps_id<0) || (pps->sps_id >= 32)) { pps->sps_id = 0; return -1; } /*sps_id may be refer to regular SPS or subseq sps, depending on the coded slice referring to the pps*/ if (!avc->sps[pps->sps_id].state && !avc->sps[pps->sps_id + GF_SVC_SSPS_ID_SHIFT].state) { return -1; } avc->pps_active_idx = pps->id; /*set active sps*/ avc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->entropy_coding_mode_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_mode_flag"); pps->pic_order_present = gf_bs_read_int_log(bs, 1, "pic_order_present"); pps->slice_group_count = gf_bs_read_ue_log(bs, "slice_group_count_minus1") + 1; if (pps->slice_group_count > 1) { u32 iGroup; pps->mb_slice_group_map_type = gf_bs_read_ue_log(bs, "mb_slice_group_map_type"); if (pps->mb_slice_group_map_type == 0) { for (iGroup = 0; iGroup <= pps->slice_group_count - 1; iGroup++) gf_bs_read_ue_log_idx(bs, "run_length_minus1", iGroup); } else if (pps->mb_slice_group_map_type == 2) { for (iGroup = 0; iGroup < pps->slice_group_count - 1; iGroup++) { gf_bs_read_ue_log_idx(bs, "top_left", iGroup); gf_bs_read_ue_log_idx(bs, "bottom_right", iGroup); } } else if (pps->mb_slice_group_map_type == 3 || pps->mb_slice_group_map_type == 4 || pps->mb_slice_group_map_type == 5) { gf_bs_read_int_log(bs, 1, "slice_group_change_direction_flag"); gf_bs_read_ue_log(bs, "slice_group_change_rate_minus1"); } else if (pps->mb_slice_group_map_type == 6) { u32 i; pps->pic_size_in_map_units_minus1 = gf_bs_read_ue_log(bs, "pic_size_in_map_units_minus1"); for (i = 0; i <= pps->pic_size_in_map_units_minus1; i++) { gf_bs_read_int_log_idx(bs, (u32)ceil(log(pps->slice_group_count) / log(2)), "slice_group_id", i); } } } pps->num_ref_idx_l0_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active_minus1"); pps->num_ref_idx_l1_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active_minus1"); /* if ((pps->ref_count[0] > 32) || (pps->ref_count[1] > 32)) goto exit; */ pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); gf_bs_read_int_log(bs, 2, "weighted_bipred_idc"); gf_bs_read_se_log(bs, "init_qp_minus26"); gf_bs_read_se_log(bs, "init_qs_minus26"); gf_bs_read_se_log(bs, "chroma_qp_index_offset"); pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"); gf_bs_read_int_log(bs, 1, "constrained_intra_pred"); pps->redundant_pic_cnt_present = gf_bs_read_int_log(bs, 1, "redundant_pic_cnt_present"); return pps_id; } GF_EXPORT s32 gf_avc_read_pps_bs(GF_BitStream *bs, AVCState *avc) { return gf_avc_read_pps_bs_internal(bs, avc, 0); } GF_EXPORT s32 gf_avc_read_pps(const u8 *pps_data, u32 pps_size, AVCState *avc) { GF_BitStream *bs; s32 pps_id; /*PPS still contains emulation bytes*/ bs = gf_bs_new(pps_data, pps_size, GF_BITSTREAM_READ); if (!bs) { return -1; } gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); pps_id = gf_avc_read_pps_bs(bs, avc); gf_bs_del(bs); return pps_id; } #if 0 //unused s32 gf_avc_read_sps_ext(const char *spse_data, u32 spse_size) { GF_BitStream *bs; s32 sps_id; bs = gf_bs_new(spse_data, spse_size, GF_BITSTREAM_READ); sps_id = gf_avc_read_sps_ext_bs(bs); gf_bs_del(bs); return sps_id; } #endif static s32 SVC_ReadNal_header_extension(GF_BitStream *bs, SVC_NALUHeader *NalHeader) { gf_bs_read_int_log(bs, 1, "reserved_one_bit"); NalHeader->idr_pic_flag = gf_bs_read_int_log(bs, 1, "idr_flag"); NalHeader->priority_id = gf_bs_read_int_log(bs, 6, "priority_id"); gf_bs_read_int_log(bs, 1, "no_inter_layer_pred_flag"); NalHeader->dependency_id = gf_bs_read_int_log(bs, 3, "DependencyId"); NalHeader->quality_id = gf_bs_read_int_log(bs, 4, "quality_id"); NalHeader->temporal_id = gf_bs_read_int_log(bs, 3, "temporal_id"); gf_bs_read_int_log(bs, 1, "use_ref_base_pic_flag"); gf_bs_read_int_log(bs, 1, "discardable_flag"); gf_bs_read_int_log(bs, 1, "output_flag"); gf_bs_read_int_log(bs, 2, "reserved_three_2bits"); return 1; } static void ref_pic_list_modification(GF_BitStream *bs, u32 slice_type) { if (slice_type % 5 != 2 && slice_type % 5 != 4) { if (gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l0")) { u32 idx=0, modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = gf_bs_read_ue_log_idx(bs, "modification_of_pic_nums_idc", idx); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { gf_bs_read_ue_log_idx(bs, "abs_diff_pic_num_minus1", idx); } else if (modification_of_pic_nums_idc == 2) { gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); } idx++; } while ((modification_of_pic_nums_idc != 3) && gf_bs_available(bs)); } } if (slice_type % 5 == 1) { if (gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l1")) { u32 idx=0, modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = gf_bs_read_ue_log_idx(bs, "modification_of_pic_nums_idc", idx); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { gf_bs_read_ue_log_idx(bs, "abs_diff_pic_num_minus1", idx); } else if (modification_of_pic_nums_idc == 2) { gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); } idx++; } while ((modification_of_pic_nums_idc != 3) && gf_bs_available(bs)); } } } static void pred_weight_table(GF_BitStream *bs, u32 slice_type, u32 ChromaArrayType, u32 num_ref_idx_l0_active_minus1, u32 num_ref_idx_l1_active_minus1) { u32 i, j; gf_bs_read_ue_log(bs, "luma_log2_weight_denom"); if (ChromaArrayType != 0) { gf_bs_read_ue_log(bs, "chroma_log2_weight_denom"); } for (i = 0; i <= num_ref_idx_l0_active_minus1; i++) { if (gf_bs_read_int_log_idx(bs, 1, "luma_weight_l0_flag", i)) { gf_bs_read_se_log_idx(bs, "luma_weight_l0", i); gf_bs_read_se_log_idx(bs, "luma_offset_l0", i); } if (ChromaArrayType != 0) { if (gf_bs_read_int_log_idx(bs, 1, "chroma_weight_l0_flag", i)) for (j = 0; j < 2; j++) { gf_bs_read_se_log_idx2(bs, "chroma_weight_l0", i, j); gf_bs_read_se_log_idx2(bs, "chroma_offset_l0", i, j); } } } if (slice_type % 5 == 1) { for (i = 0; i <= num_ref_idx_l1_active_minus1; i++) { if (gf_bs_read_int_log_idx(bs, 1, "luma_weight_l1_flag", i)) { gf_bs_read_se_log_idx(bs, "luma_weight_l1", i); gf_bs_read_se_log_idx(bs, "luma_offset_l1", i); } if (ChromaArrayType != 0) { if (gf_bs_read_int_log_idx(bs, 1, "chroma_weight_l1_flag", i)) { for (j = 0; j < 2; j++) { gf_bs_read_se_log_idx2(bs, "chroma_weight_l1", i, j); gf_bs_read_se_log_idx2(bs, "chroma_offset_l1", i, j); } } } } } } static void dec_ref_pic_marking(GF_BitStream *bs, Bool IdrPicFlag) { if (IdrPicFlag) { gf_bs_read_int_log(bs, 1, "no_output_of_prior_pics_flag"); gf_bs_read_int_log(bs, 1, "long_term_reference_flag"); } else { if (gf_bs_read_int_log(bs, 1, "adaptive_ref_pic_marking_mode_flag")) { u32 idx=0, memory_management_control_operation; do { memory_management_control_operation = gf_bs_read_ue_log_idx(bs, "memory_management_control_operation", idx); if (memory_management_control_operation == 1 || memory_management_control_operation == 3) gf_bs_read_ue_log_idx(bs, "difference_of_pic_nums_minus1", idx); if (memory_management_control_operation == 2) gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); if (memory_management_control_operation == 3 || memory_management_control_operation == 6) gf_bs_read_ue_log_idx(bs, "long_term_frame_idx", idx); if (memory_management_control_operation == 4) gf_bs_read_ue_log_idx(bs, "max_long_term_frame_idx_plus1", idx); idx++; } while (memory_management_control_operation != 0); } } } static s32 avc_parse_slice(GF_BitStream *bs, AVCState *avc, Bool svc_idr_flag, AVCSliceInfo *si) { s32 pps_id, num_ref_idx_l0_active_minus1 = 0, num_ref_idx_l1_active_minus1 = 0; /*s->current_picture.reference= h->nal_ref_idc != 0;*/ gf_bs_read_ue_log(bs, "first_mb_in_slice"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (si->slice_type > 9) return -1; pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id > 255) return -1; si->pps = &avc->pps[pps_id]; if (!si->pps->slice_group_count) return -2; si->sps = &avc->sps[si->pps->sps_id]; if (!si->sps->log2_max_frame_num) return -2; avc->sps_active_idx = si->pps->sps_id; avc->pps_active_idx = pps_id; si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num"); si->field_pic_flag = 0; si->bottom_field_flag = 0; if (!si->sps->frame_mbs_only_flag) { si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag"); if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag"); } if ((si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) || svc_idr_flag) si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id"); if (si->sps->poc_type == 0) { si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); if (si->pps->pic_order_present && !si->field_pic_flag) { si->delta_poc_bottom = gf_bs_read_se_log(bs, "poc_lsb"); } } else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) { si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0"); if ((si->pps->pic_order_present == 1) && !si->field_pic_flag) si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1"); } if (si->pps->redundant_pic_cnt_present) { si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt"); } if (si->slice_type % 5 == GF_AVC_TYPE_B) { gf_bs_read_int_log(bs, 1, "direct_spatial_mv_pred_flag"); } num_ref_idx_l0_active_minus1 = si->pps->num_ref_idx_l0_default_active_minus1; num_ref_idx_l1_active_minus1 = si->pps->num_ref_idx_l1_default_active_minus1; if (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_B) { Bool num_ref_idx_active_override_flag = gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag"); if (num_ref_idx_active_override_flag) { num_ref_idx_l0_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_active_minus1"); if (si->slice_type % 5 == GF_AVC_TYPE_B) { num_ref_idx_l1_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_active_minus1"); } } } if (si->nal_unit_type == 20 || si->nal_unit_type == 21) { //ref_pic_list_mvc_modification(); /* specified in Annex H */ GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] unimplemented ref_pic_list_mvc_modification() in slide header\n")); assert(0); return -1; } else { ref_pic_list_modification(bs, si->slice_type); } if ((si->pps->weighted_pred_flag && (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP)) || (si->pps->weighted_bipred_idc == 1 && si->slice_type % 5 == GF_AVC_TYPE_B)) { pred_weight_table(bs, si->slice_type, si->sps->ChromaArrayType, num_ref_idx_l0_active_minus1, num_ref_idx_l1_active_minus1); } if (si->nal_ref_idc != 0) { dec_ref_pic_marking(bs, (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE)); } if (si->pps->entropy_coding_mode_flag && si->slice_type % 5 != GF_AVC_TYPE_I && si->slice_type % 5 != GF_AVC_TYPE_SI) { gf_bs_read_ue_log(bs, "cabac_init_idc"); } /*slice_qp_delta = */gf_bs_read_se(bs); if (si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_SI) { if (si->slice_type % 5 == GF_AVC_TYPE_SP) { gf_bs_read_int_log(bs, 1, "sp_for_switch_flag"); } gf_bs_read_se_log(bs, "slice_qs_delta"); } if (si->pps->deblocking_filter_control_present_flag) { if (gf_bs_read_ue_log(bs, "disable_deblocking_filter_idc") != 1) { gf_bs_read_se_log(bs, "slice_alpha_c0_offset_div2"); gf_bs_read_se_log(bs, "slice_beta_offset_div2"); } } if (si->pps->slice_group_count > 1 && si->pps->mb_slice_group_map_type >= 3 && si->pps->mb_slice_group_map_type <= 5) { gf_bs_read_int_log(bs, (u32)ceil(log1p((si->pps->pic_size_in_map_units_minus1 + 1) / (si->pps->slice_group_change_rate_minus1 + 1) ) / log(2)), "slice_group_change_cycle"); } return 0; } static s32 svc_parse_slice(GF_BitStream *bs, AVCState *avc, AVCSliceInfo *si) { s32 pps_id; /*s->current_picture.reference= h->nal_ref_idc != 0;*/ gf_bs_read_ue_log(bs, "first_mb_in_slice"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (si->slice_type > 9) return -1; pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id > 255) return -1; si->pps = &avc->pps[pps_id]; si->pps->id = pps_id; if (!si->pps->slice_group_count) return -2; si->sps = &avc->sps[si->pps->sps_id + GF_SVC_SSPS_ID_SHIFT]; if (!si->sps->log2_max_frame_num) return -2; si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num"); si->field_pic_flag = 0; if (si->sps->frame_mbs_only_flag) { /*s->picture_structure= PICT_FRAME;*/ } else { si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag"); if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag"); } if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE || si->NalHeader.idr_pic_flag) si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id"); if (si->sps->poc_type == 0) { si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); if (si->pps->pic_order_present && !si->field_pic_flag) { si->delta_poc_bottom = gf_bs_read_se_log(bs, "delta_poc_bottom"); } } else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) { si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0"); if ((si->pps->pic_order_present == 1) && !si->field_pic_flag) si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1"); } if (si->pps->redundant_pic_cnt_present) { si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt"); } return 0; } static s32 avc_parse_recovery_point_sei(GF_BitStream *bs, AVCState *avc) { AVCSeiRecoveryPoint *rp = &avc->sei.recovery_point; rp->frame_cnt = gf_bs_read_ue_log(bs, "frame_cnt"); rp->exact_match_flag = gf_bs_read_int_log(bs, 1, "exact_match_flag"); rp->broken_link_flag = gf_bs_read_int_log(bs, 1, "broken_link_flag"); rp->changing_slice_group_idc = gf_bs_read_int_log(bs, 2, "changing_slice_group_idc"); rp->valid = 1; return 0; } /*for interpretation see ISO 14496-10 N.11084, table D-1*/ static s32 avc_parse_pic_timing_sei(GF_BitStream *bs, AVCState *avc) { int sps_id = avc->sps_active_idx; const char NumClockTS[] = { 1, 1, 1, 2, 2, 3, 3, 2, 3 }; AVCSeiPicTiming *pt = &avc->sei.pic_timing; if (sps_id < 0) { /*sps_active_idx equals -1 when no sps has been detected. In this case SEI should not be decoded.*/ assert(0); return 1; } if (avc->sps[sps_id].vui.nal_hrd_parameters_present_flag || avc->sps[sps_id].vui.vcl_hrd_parameters_present_flag) { /*CpbDpbDelaysPresentFlag, see 14496-10(2003) E.11*/ gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.cpb_removal_delay_length_minus1, "cpb_removal_delay_minus1"); gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.dpb_output_delay_length_minus1, "dpb_output_delay_minus1"); } /*ISO 14496-10 (2003), D.8.2: we need to get pic_struct in order to know if we display top field first or bottom field first*/ if (avc->sps[sps_id].vui.pic_struct_present_flag) { int i; pt->pic_struct = gf_bs_read_int_log(bs, 4, "pic_struct"); if (pt->pic_struct > 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] invalid pic_struct value %d\n", pt->pic_struct)); return 1; } for (i = 0; i < NumClockTS[pt->pic_struct]; i++) { if (gf_bs_read_int_log_idx(bs, 1, "clock_timestamp_flag", i)) { Bool full_timestamp_flag; gf_bs_read_int_log_idx(bs, 2, "ct_type", i); gf_bs_read_int_log_idx(bs, 1, "nuit_field_based_flag", i); gf_bs_read_int_log_idx(bs, 5, "counting_type", i); full_timestamp_flag = gf_bs_read_int_log_idx(bs, 1, "full_timestamp_flag", i); gf_bs_read_int_log_idx(bs, 1, "discontinuity_flag", i); gf_bs_read_int_log_idx(bs, 1, "cnt_dropped_flag", i); gf_bs_read_int_log_idx(bs, 8, "n_frames", i); if (full_timestamp_flag) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } else { if (gf_bs_read_int_log_idx(bs, 1, "seconds_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); if (gf_bs_read_int_log_idx(bs, 1, "minutes_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); if (gf_bs_read_int_log_idx(bs, 1, "hours_flag", i)) { gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } } } if (avc->sps[sps_id].vui.hrd.time_offset_length > 0) gf_bs_read_int_log_idx(bs, avc->sps[sps_id].vui.hrd.time_offset_length, "time_offset", i); } } } } return 0; } #if !defined(GPAC_DISABLE_HEVC) static void avc_parse_itu_t_t35_sei(GF_BitStream* bs, AVCSeiItuTT35DolbyVision *dovi) { u8 itu_t_t35_country_code = gf_bs_read_u8(bs); u16 terminal_provider_code = gf_bs_read_u16(bs); u32 user_id = gf_bs_read_u32(bs); u8 data_type_code = gf_bs_read_u8(bs); if (itu_t_t35_country_code == 0xB5 && terminal_provider_code == 0x31 && user_id == 0x47413934 && (data_type_code == 0x8 || data_type_code == 0x9)) { dovi->rpu_flag = GF_TRUE; } } #endif static void avc_compute_poc(AVCSliceInfo *si) { enum { AVC_PIC_FRAME, AVC_PIC_FIELD_TOP, AVC_PIC_FIELD_BOTTOM, } pic_type; s32 field_poc[2] = { 0,0 }; s32 max_frame_num; if (!si->sps) return; max_frame_num = 1 << (si->sps->log2_max_frame_num); /* picture type */ if (si->sps->frame_mbs_only_flag || !si->field_pic_flag) pic_type = AVC_PIC_FRAME; else if (si->bottom_field_flag) pic_type = AVC_PIC_FIELD_BOTTOM; else pic_type = AVC_PIC_FIELD_TOP; /* frame_num_offset */ if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; si->frame_num_offset = 0; } else { if (si->frame_num < si->frame_num_prev) si->frame_num_offset = si->frame_num_offset_prev + max_frame_num; else si->frame_num_offset = si->frame_num_offset_prev; } /*ISO 14496-10 N.11084 8.2.1.1*/ if (si->sps->poc_type == 0) { const u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*ISO 14496-10 N.11084 eq (8-3)*/ if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; /*ISO 14496-10 N.11084 eq (8-4)*/ if (pic_type != AVC_PIC_FIELD_BOTTOM) field_poc[0] = si->poc_msb + si->poc_lsb; /*ISO 14496-10 N.11084 eq (8-5)*/ if (pic_type != AVC_PIC_FIELD_TOP) { if (!si->field_pic_flag) field_poc[1] = field_poc[0] + si->delta_poc_bottom; else field_poc[1] = si->poc_msb + si->poc_lsb; } } /*ISO 14496-10 N.11084 8.2.1.2*/ else if (si->sps->poc_type == 1) { u32 i; s32 abs_frame_num, expected_delta_per_poc_cycle, expected_poc; if (si->sps->poc_cycle_length) abs_frame_num = si->frame_num_offset + si->frame_num; else abs_frame_num = 0; if (!si->nal_ref_idc && (abs_frame_num > 0)) abs_frame_num--; expected_delta_per_poc_cycle = 0; for (i = 0; i < si->sps->poc_cycle_length; i++) expected_delta_per_poc_cycle += si->sps->offset_for_ref_frame[i]; if (abs_frame_num > 0) { const u32 poc_cycle_cnt = (abs_frame_num - 1) / si->sps->poc_cycle_length; const u32 frame_num_in_poc_cycle = (abs_frame_num - 1) % si->sps->poc_cycle_length; expected_poc = poc_cycle_cnt * expected_delta_per_poc_cycle; for (i = 0; i <= frame_num_in_poc_cycle; i++) expected_poc += si->sps->offset_for_ref_frame[i]; } else { expected_poc = 0; } if (!si->nal_ref_idc) expected_poc += si->sps->offset_for_non_ref_pic; field_poc[0] = expected_poc + si->delta_poc[0]; field_poc[1] = field_poc[0] + si->sps->offset_for_top_to_bottom_field; if (pic_type == AVC_PIC_FRAME) field_poc[1] += si->delta_poc[1]; } /*ISO 14496-10 N.11084 8.2.1.3*/ else if (si->sps->poc_type == 2) { int poc; if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) { poc = 0; } else { const int abs_frame_num = si->frame_num_offset + si->frame_num; poc = 2 * abs_frame_num; if (!si->nal_ref_idc) poc -= 1; } field_poc[0] = poc; field_poc[1] = poc; } /*ISO 14496-10 N.11084 eq (8-1)*/ if (pic_type == AVC_PIC_FRAME) si->poc = MIN(field_poc[0], field_poc[1]); else if (pic_type == AVC_PIC_FIELD_TOP) si->poc = field_poc[0]; else si->poc = field_poc[1]; } GF_EXPORT s32 gf_avc_parse_nalu(GF_BitStream *bs, AVCState *avc) { u8 idr_flag; s32 slice, ret; u32 nal_hdr; AVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nal_hdr = gf_bs_read_u8(bs); slice = 0; memcpy(&n_state, &avc->s_info, sizeof(AVCSliceInfo)); avc->last_nal_type_parsed = n_state.nal_unit_type = nal_hdr & 0x1F; n_state.nal_ref_idc = (nal_hdr >> 5) & 0x3; idr_flag = 0; switch (n_state.nal_unit_type) { case GF_AVC_NALU_ACCESS_UNIT: case GF_AVC_NALU_END_OF_SEQ: case GF_AVC_NALU_END_OF_STREAM: ret = 1; break; case GF_AVC_NALU_SVC_SLICE: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); // slice buffer - read the info and compare. /*ret = */svc_parse_slice(bs, avc, &n_state); if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } avc_compute_poc(&n_state); if (avc->s_info.poc != n_state.poc) { memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 1; } memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 0; case GF_AVC_NALU_SVC_PREFIX_NALU: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); return 0; case GF_AVC_NALU_IDR_SLICE: case GF_AVC_NALU_NON_IDR_SLICE: case GF_AVC_NALU_DP_A_SLICE: case GF_AVC_NALU_DP_B_SLICE: case GF_AVC_NALU_DP_C_SLICE: slice = 1; /* slice buffer - read the info and compare.*/ ret = avc_parse_slice(bs, avc, idr_flag, &n_state); if (ret < 0) return ret; ret = 0; if ( ((avc->s_info.nal_unit_type > GF_AVC_NALU_IDR_SLICE) || (avc->s_info.nal_unit_type < GF_AVC_NALU_NON_IDR_SLICE)) && (avc->s_info.nal_unit_type != GF_AVC_NALU_SVC_SLICE) ) { break; } if (avc->s_info.frame_num != n_state.frame_num) { ret = 1; break; } if (avc->s_info.field_pic_flag != n_state.field_pic_flag) { ret = 1; break; } if ((avc->s_info.nal_ref_idc != n_state.nal_ref_idc) && (!avc->s_info.nal_ref_idc || !n_state.nal_ref_idc)) { ret = 1; break; } assert(avc->s_info.sps); if (avc->s_info.sps->poc_type == n_state.sps->poc_type) { if (!avc->s_info.sps->poc_type) { if (!n_state.bottom_field_flag && (avc->s_info.poc_lsb != n_state.poc_lsb)) { ret = 1; break; } if (avc->s_info.delta_poc_bottom != n_state.delta_poc_bottom) { ret = 1; break; } } else if (avc->s_info.sps->poc_type == 1) { if (avc->s_info.delta_poc[0] != n_state.delta_poc[0]) { ret = 1; break; } if (avc->s_info.delta_poc[1] != n_state.delta_poc[1]) { ret = 1; break; } } } if (n_state.nal_unit_type == GF_AVC_NALU_IDR_SLICE) { if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) { /*IdrPicFlag differs in value*/ ret = 1; break; } else if (avc->s_info.idr_pic_id != n_state.idr_pic_id) { /*both IDR and idr_pic_id differs*/ ret = 1; break; } } break; case GF_AVC_NALU_SEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 0, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_PIC_PARAM: avc->last_ps_idx = gf_avc_read_pps_bs_internal(bs, avc, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SVC_SUBSEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 1, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEQ_PARAM_EXT: avc->last_ps_idx = (s32) gf_bs_read_ue(bs); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEI: case GF_AVC_NALU_FILLER_DATA: return 0; default: if (avc->s_info.nal_unit_type <= GF_AVC_NALU_IDR_SLICE) ret = 1; //To detect change of AU when multiple sps and pps in stream else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEI && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEQ_PARAM && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else ret = 0; break; } /* save _prev values */ if (ret && avc->s_info.sps) { n_state.frame_num_offset_prev = avc->s_info.frame_num_offset; if ((avc->s_info.sps->poc_type != 2) || (avc->s_info.nal_ref_idc != 0)) n_state.frame_num_prev = avc->s_info.frame_num; if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } } if (slice) avc_compute_poc(&n_state); memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return ret; } u32 gf_media_avc_reformat_sei(u8 *buffer, u32 nal_size, Bool isobmf_rewrite, AVCState *avc) { u32 ptype, psize, hdr, var; u32 start; GF_BitStream *bs; GF_BitStream *bs_dest = NULL; u8 nhdr; Bool sei_removed = GF_FALSE; char store; hdr = buffer[0]; if ((hdr & 0x1F) != GF_AVC_NALU_SEI) return 0; if (isobmf_rewrite) bs_dest = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nhdr = gf_bs_read_int(bs, 8); if (bs_dest) gf_bs_write_int(bs_dest, nhdr, 8); /*parse SEI*/ while (gf_bs_available(bs)) { Bool do_copy; ptype = 0; while (1) { u8 v = gf_bs_read_int(bs, 8); ptype += v; if (v != 0xFF) break; } psize = 0; while (1) { u8 v = gf_bs_read_int(bs, 8); psize += v; if (v != 0xFF) break; } start = (u32)gf_bs_get_position(bs); do_copy = 1; if (start + psize >= nal_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message type %d size error (%d but %d remain), keeping full SEI untouched\n", ptype, psize, nal_size - start)); if (bs_dest) gf_bs_del(bs_dest); return nal_size; } switch (ptype) { /*remove SEI messages forbidden in MP4*/ case 3: /*filler data*/ case 10: /*sub_seq info*/ case 11: /*sub_seq_layer char*/ case 12: /*sub_seq char*/ do_copy = 0; sei_removed = GF_TRUE; break; case 5: /*user unregistered */ store = buffer[start + psize]; buffer[start + psize] = 0; GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[avc-h264] SEI user message %s\n", buffer + start + 16)); buffer[start + psize] = store; break; case 6: /*recovery point*/ avc_parse_recovery_point_sei(bs, avc); break; case 1: /*pic_timing*/ avc_parse_pic_timing_sei(bs, avc); break; case 0: /*buffering period*/ case 2: /*pan scan rect*/ case 4: /*user registered ITU t35*/ case 7: /*def_rec_pic_marking_repetition*/ case 8: /*spare_pic*/ case 9: /*scene info*/ case 13: /*full frame freeze*/ case 14: /*full frame freeze release*/ case 15: /*full frame snapshot*/ case 16: /*progressive refinement segment start*/ case 17: /*progressive refinement segment end*/ case 18: /*motion constrained slice group*/ default: /*add all unknown SEIs*/ break; } if (do_copy && bs_dest) { var = ptype; while (var >= 255) { gf_bs_write_int(bs_dest, 0xFF, 8); var -= 255; } gf_bs_write_int(bs_dest, var, 8); var = psize; while (var >= 255) { gf_bs_write_int(bs_dest, 0xFF, 8); var -= 255; } gf_bs_write_int(bs_dest, var, 8); gf_bs_seek(bs, start); //bs_read_data does not skip EPB, read byte per byte var = psize; while (var) { gf_bs_write_u8(bs_dest, gf_bs_read_u8(bs)); var--; } } else { gf_bs_seek(bs, start); //bs_skip_bytes does not skip EPB, skip byte per byte while (psize) { gf_bs_read_u8(bs); psize--; } } if (gf_bs_available(bs) <= 2) { var = gf_bs_read_int(bs, 8); if (var != 0x80) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message has less than 2 bytes remaining but no end of sei found\n")); } if (bs_dest) gf_bs_write_int(bs_dest, 0x80, 8); break; } } gf_bs_del(bs); //we cannot compare final size and original size since original may have EPB and final does not yet have them if (bs_dest && sei_removed) { u8 *dst_no_epb = NULL; u32 dst_no_epb_size = 0; gf_bs_get_content(bs_dest, &dst_no_epb, &dst_no_epb_size); nal_size = gf_media_nalu_add_emulation_bytes(buffer, dst_no_epb, dst_no_epb_size); } if (bs_dest) gf_bs_del(bs_dest); return nal_size; } static u8 avc_hevc_get_sar_idx(u32 w, u32 h) { u32 i, count = GF_ARRAY_LENGTH(avc_hevc_sar); for (i = 0; i < count; i++) { if ((avc_hevc_sar[i].w == w) && (avc_hevc_sar[i].h == h)) return i; } return 0xFF; } static void avc_hevc_rewrite_vui(GF_VUIInfo *vui_info, GF_BitStream *orig, GF_BitStream *mod) { /* VUI present flag*/ Bool vui_present_flag = gf_bs_read_int(orig, 1); /*setup default values*/ Bool aspect_ratio_info_present_flag = 0; s32 aspect_ratio_idc = -1; u32 ar_n=0, ar_d=0; Bool overscan_info_present_flag = 0; u32 overscan_info=0; u32 video_signal_type_present_flag=0; u32 video_format = 5; u32 video_full_range_flag = 0; u32 colour_description_present_flag = 0; u32 colour_primaries = 2; u32 transfer_characteristics = 2; u32 matrix_coefficients = 2; //if VUI is present, read all SAR and overscan values if (vui_present_flag) { /* VUI found in input bitstream */ aspect_ratio_info_present_flag = gf_bs_read_int(orig, 1); if (aspect_ratio_info_present_flag) { aspect_ratio_idc = gf_bs_read_int(orig, 8); /*aspect_ratio_idc*/ if (aspect_ratio_idc == 255) { ar_n = gf_bs_read_int(orig, 16); /*sar_width*/ ar_d = gf_bs_read_int(orig, 16); /*sar_height*/ } } /*overscan_info_present_flag */ overscan_info_present_flag = gf_bs_read_int(orig, 1); if(overscan_info_present_flag) { overscan_info = gf_bs_read_int(orig, 1); } /* read all video signal related flags first */ video_signal_type_present_flag = gf_bs_read_int(orig, 1); if(video_signal_type_present_flag) { video_format = gf_bs_read_int(orig, 3); video_full_range_flag = gf_bs_read_int(orig, 1); colour_description_present_flag = gf_bs_read_int(orig, 1); if(colour_description_present_flag) { colour_primaries = gf_bs_read_int(orig, 8); transfer_characteristics = gf_bs_read_int(orig, 8); matrix_coefficients = gf_bs_read_int(orig, 8); } } } //recompute values //no change if ((vui_info->ar_num<0) || (vui_info->ar_den<0)) { } //remove par else if ((vui_info->ar_num==0) || (vui_info->ar_den==0)) { aspect_ratio_info_present_flag = 0; } //set par else { aspect_ratio_info_present_flag = 1; ar_n = vui_info->ar_num; ar_d = vui_info->ar_den; aspect_ratio_idc = avc_hevc_get_sar_idx((u32) ar_n, (u32) ar_d); } if (vui_info->remove_video_info) { video_signal_type_present_flag = 0; } /* correct the values of each flags */ else if ((vui_info->fullrange==0) && (vui_info->video_format==5) && (vui_info->color_prim==2) && (vui_info->color_tfc==2) && (vui_info->color_matrix==2)) { video_signal_type_present_flag = 0; /* all default, nothing to write*/ } else { video_signal_type_present_flag = 1; video_format = (vui_info->video_format < 0) ? video_format : vui_info->video_format; video_full_range_flag = (vui_info->fullrange < 0) ? video_full_range_flag : vui_info->fullrange; if ((vui_info->color_prim==2) && (vui_info->color_tfc==2) && (vui_info->color_matrix==2)) { colour_description_present_flag = 0; } else { colour_description_present_flag = 1; colour_primaries = (vui_info->color_prim < 0) ? colour_primaries : vui_info->color_prim; transfer_characteristics = (vui_info->color_tfc < 0) ? transfer_characteristics : vui_info->color_tfc; matrix_coefficients = (vui_info->color_matrix < 0) ? matrix_coefficients : vui_info->color_matrix; } if ((colour_primaries==2) && (transfer_characteristics==2) && (matrix_coefficients==2)) { colour_description_present_flag = 0; if ((video_format==5) && (video_full_range_flag==0)) video_signal_type_present_flag = 0; } } //always rewrite VUI gf_bs_write_int(mod, 1, 1); gf_bs_write_int(mod, aspect_ratio_info_present_flag, 1); if (aspect_ratio_info_present_flag) { gf_bs_write_int(mod, aspect_ratio_idc, 8); if (aspect_ratio_idc == 255) { gf_bs_write_int(mod, ar_n, 16); gf_bs_write_int(mod, ar_d, 16); } if (vui_info->update) { vui_info->ar_num = ar_n; vui_info->ar_den = ar_d; } } gf_bs_write_int(mod, overscan_info_present_flag, 1); if (overscan_info_present_flag) { gf_bs_write_int(mod, overscan_info, 1); } gf_bs_write_int(mod, video_signal_type_present_flag, 1); if (video_signal_type_present_flag) { gf_bs_write_int(mod, video_format, 3); gf_bs_write_int(mod, video_full_range_flag, 1); gf_bs_write_int(mod, colour_description_present_flag, 1); if (colour_description_present_flag) { gf_bs_write_int(mod, colour_primaries, 8); gf_bs_write_int(mod, transfer_characteristics, 8); gf_bs_write_int(mod, matrix_coefficients, 8); } if (vui_info->update) { vui_info->video_format = video_format; vui_info->fullrange = video_full_range_flag; if (colour_description_present_flag) { vui_info->color_prim = colour_primaries; vui_info->color_tfc = transfer_characteristics; vui_info->color_matrix = matrix_coefficients; } } } /*no VUI in input bitstream but we just inserted one, set all remaining vui flags to 0*/ if (!vui_present_flag) { gf_bs_write_int(mod, 0, 1); /*chroma_location_info_present_flag */ gf_bs_write_int(mod, 0, 1); /*timing_info_present_flag*/ gf_bs_write_int(mod, 0, 1); /*nal_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*vcl_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*pic_struct_present*/ gf_bs_write_int(mod, 0, 1); /*bitstream_restriction*/ } /*otherwise we copy over th bits from the input bitrate*/ } GF_Err gf_avc_change_vui(GF_AVCConfig *avcc, GF_VUIInfo *vui_info) { GF_BitStream *orig, *mod; AVCState avc; u32 i, bit_offset, flag; s32 idx; GF_AVCConfigSlot *slc; orig = NULL; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; i=0; while ((slc = (GF_AVCConfigSlot *)gf_list_enum(avcc->sequenceParameterSets, &i))) { u8 *no_emulation_buf = NULL; u32 no_emulation_buf_size = 0, emulation_bytes = 0; idx = gf_avc_read_sps(slc->data, slc->size, &avc, 0, &bit_offset); if (idx<0) { if ( orig ) gf_bs_del(orig); continue; } /*SPS still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size - 1) * sizeof(char)); no_emulation_buf_size = gf_media_nalu_remove_emulation_bytes(slc->data + 1, no_emulation_buf, slc->size - 1); orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); gf_bs_read_data(orig, no_emulation_buf, no_emulation_buf_size); gf_bs_seek(orig, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset >= 8); while (bit_offset - 8/*bit_offset doesn't take care of the first byte (NALU type)*/) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } avc_hevc_rewrite_vui(vui_info, orig, mod); /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, &no_emulation_buf, &flag); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(no_emulation_buf, flag); if (flag+emulation_bytes+1>slc->size) slc->data = (char*)gf_realloc(slc->data, flag+emulation_bytes+1); slc->size = gf_media_nalu_add_emulation_bytes(no_emulation_buf, slc->data + 1, flag) + 1; gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; } GF_EXPORT GF_Err gf_media_avc_change_par(GF_AVCConfig *avcc, s32 ar_n, s32 ar_d) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = ar_n; vuii.ar_den = ar_d; vuii.fullrange = -1; vuii.video_format = -1; vuii.color_prim = -1; vuii.color_tfc = -1; vuii.color_matrix = -1; return gf_avc_change_vui(avcc, &vuii); } GF_EXPORT GF_Err gf_media_avc_change_color(GF_AVCConfig *avcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = -1; vuii.ar_den = -1; vuii.fullrange = fullrange; vuii.video_format = vidformat; vuii.color_prim = colorprim; vuii.color_tfc = transfer; vuii.color_matrix = colmatrix; return gf_avc_change_vui(avcc, &vuii); } GF_EXPORT GF_Err gf_avc_get_sps_info(u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { AVCState avc; s32 idx; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; idx = gf_avc_read_sps(sps_data, sps_size, &avc, 0, NULL); if (idx < 0) { return GF_NON_COMPLIANT_BITSTREAM; } if (sps_id) *sps_id = idx; if (width) *width = avc.sps[idx].width; if (height) *height = avc.sps[idx].height; if (par_n) *par_n = avc.sps[idx].vui.par_num ? avc.sps[idx].vui.par_num : (u32)-1; if (par_d) *par_d = avc.sps[idx].vui.par_den ? avc.sps[idx].vui.par_den : (u32)-1; return GF_OK; } GF_EXPORT GF_Err gf_avc_get_pps_info(u8 *pps_data, u32 pps_size, u32 *pps_id, u32 *sps_id) { GF_BitStream *bs; GF_Err e = GF_OK; bs = gf_bs_new(pps_data, pps_size, GF_BITSTREAM_READ); if (!bs) { e = GF_NON_COMPLIANT_BITSTREAM; goto exit; } gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); /*nal hdr*/ gf_bs_read_int(bs, 8); *pps_id = gf_bs_read_ue(bs); *sps_id = gf_bs_read_ue(bs); exit: gf_bs_del(bs); return e; } #ifndef GPAC_DISABLE_HEVC /********** HEVC parsing **********/ Bool gf_hevc_slice_is_intra(HEVCState *hevc) { switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: return GF_TRUE; default: return GF_FALSE; } } Bool gf_hevc_slice_is_IDR(HEVCState *hevc) { if (hevc->sei.recovery_point.valid) { hevc->sei.recovery_point.valid = 0; return GF_TRUE; } switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: return GF_TRUE; default: return GF_FALSE; } } static Bool hevc_parse_short_term_ref_pic_set(GF_BitStream *bs, HEVC_SPS *sps, u32 idx_rps) { u32 i; Bool inter_ref_pic_set_prediction_flag = 0; if (idx_rps != 0) inter_ref_pic_set_prediction_flag = gf_bs_read_int_log_idx(bs, 1, "inter_ref_pic_set_prediction_flag", idx_rps); if (inter_ref_pic_set_prediction_flag) { HEVC_ReferencePictureSets *ref_ps, *rps; u32 delta_idx_minus1 = 0; u32 ref_idx; u32 delta_rps_sign; u32 abs_delta_rps_minus1, nb_ref_pics; s32 deltaRPS; u32 k = 0, k0 = 0, k1 = 0; if (idx_rps == sps->num_short_term_ref_pic_sets) delta_idx_minus1 = gf_bs_read_ue_log_idx(bs, "delta_idx_minus1", idx_rps); assert(delta_idx_minus1 <= idx_rps - 1); ref_idx = idx_rps - 1 - delta_idx_minus1; delta_rps_sign = gf_bs_read_int_log_idx(bs, 1, "delta_rps_sign", idx_rps); abs_delta_rps_minus1 = gf_bs_read_ue_log_idx(bs, "abs_delta_rps_minus1", idx_rps); deltaRPS = (1 - (delta_rps_sign << 1)) * (abs_delta_rps_minus1 + 1); rps = &sps->rps[idx_rps]; ref_ps = &sps->rps[ref_idx]; nb_ref_pics = ref_ps->num_negative_pics + ref_ps->num_positive_pics; for (i = 0; i <= nb_ref_pics; i++) { s32 ref_idc; s32 used_by_curr_pic_flag = gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_flag", idx_rps, i); ref_idc = used_by_curr_pic_flag ? 1 : 0; if (!used_by_curr_pic_flag) { used_by_curr_pic_flag = gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_flag", idx_rps, i); ref_idc = used_by_curr_pic_flag << 1; } if ((ref_idc == 1) || (ref_idc == 2)) { s32 deltaPOC = deltaRPS; if (i < nb_ref_pics) deltaPOC += ref_ps->delta_poc[i]; rps->delta_poc[k] = deltaPOC; if (deltaPOC < 0) k0++; else k1++; k++; } } rps->num_negative_pics = k0; rps->num_positive_pics = k1; } else { s32 prev = 0, poc; sps->rps[idx_rps].num_negative_pics = gf_bs_read_ue_log_idx(bs, "num_negative_pics", idx_rps); sps->rps[idx_rps].num_positive_pics = gf_bs_read_ue_log_idx(bs, "num_positive_pics", idx_rps); if (sps->rps[idx_rps].num_negative_pics > 16) return GF_FALSE; if (sps->rps[idx_rps].num_positive_pics > 16) return GF_FALSE; for (i = 0; i < sps->rps[idx_rps].num_negative_pics; i++) { u32 delta_poc_s0_minus1 = gf_bs_read_ue_log_idx2(bs, "delta_poc_s0_minus1", idx_rps, i); poc = prev - delta_poc_s0_minus1 - 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; gf_bs_read_int_log_idx2(bs, 1, "delta_poc_s0_minus1", idx_rps, i); } for (i = 0; i < sps->rps[idx_rps].num_positive_pics; i++) { u32 delta_poc_s1_minus1 = gf_bs_read_ue_log_idx2(bs, "delta_poc_s1_minus1" , idx_rps, i); poc = prev + delta_poc_s1_minus1 + 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_s1_flag", idx_rps, i); } } return GF_TRUE; } void hevc_pred_weight_table(GF_BitStream *bs, HEVCState *hevc, HEVCSliceInfo *si, HEVC_PPS *pps, HEVC_SPS *sps, u32 num_ref_idx_l0_active, u32 num_ref_idx_l1_active) { u32 i, num_ref_idx; Bool first_pass = GF_TRUE; u8 luma_weights[20], chroma_weights[20]; u32 ChromaArrayType = sps->separate_colour_plane_flag ? 0 : sps->chroma_format_idc; num_ref_idx = num_ref_idx_l0_active; gf_bs_read_ue_log(bs, "luma_log2_weight_denom"); if (ChromaArrayType != 0) gf_bs_read_se_log(bs, "delta_chroma_log2_weight_denom"); parse_weights: for (i = 0; i < num_ref_idx; i++) { luma_weights[i] = gf_bs_read_int_log_idx(bs, 1, "luma_weights", i); //infered to be 0 if not present chroma_weights[i] = 0; } if (ChromaArrayType != 0) { for (i = 0; i < num_ref_idx; i++) { chroma_weights[i] = gf_bs_read_int_log_idx(bs, 1, "chroma_weights", i); } } for (i = 0; i < num_ref_idx; i++) { if (luma_weights[i]) { gf_bs_read_se_log_idx(bs, "delta_luma_weight_l0", i); gf_bs_read_se_log_idx(bs, "luma_offset_l0", i); } if (chroma_weights[i]) { gf_bs_read_se_log_idx(bs, "delta_chroma_weight_l0_0", i); gf_bs_read_se_log_idx(bs, "delta_chroma_offset_l0_0", i); gf_bs_read_se_log_idx(bs, "delta_chroma_weight_l0_1", i); gf_bs_read_se_log_idx(bs, "delta_chroma_offset_l0_1", i); } } if (si->slice_type == GF_HEVC_SLICE_TYPE_B) { if (!first_pass) return; first_pass = GF_FALSE; num_ref_idx = num_ref_idx_l1_active; goto parse_weights; } } static Bool ref_pic_lists_modification(GF_BitStream *bs, u32 slice_type, u32 num_ref_idx_l0_active, u32 num_ref_idx_l1_active) { //u32 i; Bool ref_pic_list_modification_flag_l0 = gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l0"); if (ref_pic_list_modification_flag_l0) { /*for (i=0; i<num_ref_idx_l0_active; i++) { list_entry_l0[i] = *//*gf_bs_read_int(bs, (u32)ceil(log(getNumPicTotalCurr())/log(2))); }*/ return GF_FALSE; } if (slice_type == GF_HEVC_SLICE_TYPE_B) { Bool ref_pic_list_modification_flag_l1 = gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l1"); if (ref_pic_list_modification_flag_l1) { /*for (i=0; i<num_ref_idx_l1_active; i++) { list_entry_l1[i] = *//*gf_bs_read_int(bs, (u32)ceil(log(getNumPicTotalCurr()) / log(2))); }*/ return GF_FALSE; } } return GF_TRUE; } static s32 hevc_parse_slice_segment(GF_BitStream *bs, HEVCState *hevc, HEVCSliceInfo *si) { u32 i, j; u32 num_ref_idx_l0_active = 0, num_ref_idx_l1_active = 0; HEVC_PPS *pps; HEVC_SPS *sps; s32 pps_id; Bool RapPicFlag = GF_FALSE; Bool IDRPicFlag = GF_FALSE; si->first_slice_segment_in_pic_flag = gf_bs_read_int_log(bs, 1, "first_slice_segment_in_pic_flag"); switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: IDRPicFlag = GF_TRUE; RapPicFlag = GF_TRUE; break; case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_CRA: RapPicFlag = GF_TRUE; break; } if (RapPicFlag) { gf_bs_read_int_log(bs, 1, "no_output_of_prior_pics_flag"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 64)) return -1; pps = &hevc->pps[pps_id]; sps = &hevc->sps[pps->sps_id]; si->sps = sps; si->pps = pps; if (!si->first_slice_segment_in_pic_flag && pps->dependent_slice_segments_enabled_flag) { si->dependent_slice_segment_flag = gf_bs_read_int_log(bs, 1, "dependent_slice_segment_flag"); } else { si->dependent_slice_segment_flag = GF_FALSE; } if (!si->first_slice_segment_in_pic_flag) { si->slice_segment_address = gf_bs_read_int_log(bs, sps->bitsSliceSegmentAddress, "slice_segment_address"); } else { si->slice_segment_address = 0; } if (!si->dependent_slice_segment_flag) { Bool deblocking_filter_override_flag = 0; Bool slice_temporal_mvp_enabled_flag = 0; Bool slice_sao_luma_flag = 0; Bool slice_sao_chroma_flag = 0; Bool slice_deblocking_filter_disabled_flag = 0; //"slice_reserved_undetermined_flag[]" gf_bs_read_int_log(bs, pps->num_extra_slice_header_bits, "slice_reserved_undetermined_flag"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (pps->output_flag_present_flag) gf_bs_read_int_log(bs, 1, "pic_output_flag"); if (sps->separate_colour_plane_flag == 1) gf_bs_read_int_log(bs, 2, "colour_plane_id"); if (IDRPicFlag) { si->poc_lsb = 0; //if not asked to parse full header, abort since we know the poc if (!hevc->full_slice_header_parse) return 0; } else { si->poc_lsb = gf_bs_read_int_log(bs, sps->log2_max_pic_order_cnt_lsb, "poc_lsb"); //if not asked to parse full header, abort once we have the poc if (!hevc->full_slice_header_parse) return 0; if (gf_bs_read_int_log(bs, 1, "short_term_ref_pic_set_sps_flag") == 0) { Bool ret = hevc_parse_short_term_ref_pic_set(bs, sps, sps->num_short_term_ref_pic_sets); if (!ret) return -1; } else if (sps->num_short_term_ref_pic_sets > 1) { u32 numbits = 0; while ((u32)(1 << numbits) < sps->num_short_term_ref_pic_sets) numbits++; if (numbits > 0) gf_bs_read_int_log(bs, numbits, "short_term_ref_pic_set_idx"); /*else short_term_ref_pic_set_idx = 0;*/ } if (sps->long_term_ref_pics_present_flag) { u8 DeltaPocMsbCycleLt[32]; u32 num_long_term_sps = 0; u32 num_long_term_pics = 0; memset(DeltaPocMsbCycleLt, 0, sizeof(u8) * 32); if (sps->num_long_term_ref_pic_sps > 0) { num_long_term_sps = gf_bs_read_ue_log(bs, "num_long_term_sps"); } num_long_term_pics = gf_bs_read_ue_log(bs, "num_long_term_pics"); for (i = 0; i < num_long_term_sps + num_long_term_pics; i++) { if (i < num_long_term_sps) { if (sps->num_long_term_ref_pic_sps > 1) gf_bs_read_int_log_idx(bs, gf_get_bit_size(sps->num_long_term_ref_pic_sps), "lt_idx_sps", i); } else { gf_bs_read_int_log_idx(bs, sps->log2_max_pic_order_cnt_lsb, "PocLsbLt", i); gf_bs_read_int_log_idx(bs, 1, "UsedByCurrPicLt", i); } if (gf_bs_read_int_log_idx(bs, 1, "delta_poc_msb_present_flag", i)) { if (i == 0 || i == num_long_term_sps) DeltaPocMsbCycleLt[i] = gf_bs_read_ue_log_idx(bs, "DeltaPocMsbCycleLt", i); else DeltaPocMsbCycleLt[i] = gf_bs_read_ue_log_idx(bs, "DeltaPocMsbCycleLt", i) + DeltaPocMsbCycleLt[i - 1]; } } } if (sps->temporal_mvp_enable_flag) slice_temporal_mvp_enabled_flag = gf_bs_read_int_log(bs, 1, "slice_temporal_mvp_enabled_flag"); } if (sps->sample_adaptive_offset_enabled_flag) { u32 ChromaArrayType = sps->separate_colour_plane_flag ? 0 : sps->chroma_format_idc; slice_sao_luma_flag = gf_bs_read_int_log(bs, 1, "slice_sao_luma_flag"); if (ChromaArrayType != 0) slice_sao_chroma_flag = gf_bs_read_int_log(bs, 1, "slice_sao_chroma_flag"); } if (si->slice_type == GF_HEVC_SLICE_TYPE_P || si->slice_type == GF_HEVC_SLICE_TYPE_B) { //u32 NumPocTotalCurr; num_ref_idx_l0_active = pps->num_ref_idx_l0_default_active; num_ref_idx_l1_active = 0; if (si->slice_type == GF_HEVC_SLICE_TYPE_B) num_ref_idx_l1_active = pps->num_ref_idx_l1_default_active; if (gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag")) { num_ref_idx_l0_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l0_active"); if (si->slice_type == GF_HEVC_SLICE_TYPE_B) num_ref_idx_l1_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l1_active"); } if (pps->lists_modification_present_flag /*TODO: && NumPicTotalCurr > 1*/) { if (!ref_pic_lists_modification(bs, si->slice_type, num_ref_idx_l0_active, num_ref_idx_l1_active)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[hevc] ref_pic_lists_modification( ) not implemented\n")); return -1; } } if (si->slice_type == GF_HEVC_SLICE_TYPE_B) gf_bs_read_int_log(bs, 1, "mvd_l1_zero_flag"); if (pps->cabac_init_present_flag) gf_bs_read_int_log(bs, 1, "cabac_init_flag"); if (slice_temporal_mvp_enabled_flag) { // When collocated_from_l0_flag is not present, it is inferred to be equal to 1. Bool collocated_from_l0_flag = 1; if (si->slice_type == GF_HEVC_SLICE_TYPE_B) collocated_from_l0_flag = gf_bs_read_int_log(bs, 1, "collocated_from_l0_flag"); if ((collocated_from_l0_flag && (num_ref_idx_l0_active > 1)) || (!collocated_from_l0_flag && (num_ref_idx_l1_active > 1)) ) { gf_bs_read_ue_log(bs, "collocated_ref_idx"); } } if ((pps->weighted_pred_flag && si->slice_type == GF_HEVC_SLICE_TYPE_P) || (pps->weighted_bipred_flag && si->slice_type == GF_HEVC_SLICE_TYPE_B) ) { hevc_pred_weight_table(bs, hevc, si, pps, sps, num_ref_idx_l0_active, num_ref_idx_l1_active); } gf_bs_read_ue_log(bs, "five_minus_max_num_merge_cand"); } si->slice_qp_delta_start_bits = (s32) (gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); si->slice_qp_delta = gf_bs_read_se_log(bs, "slice_qp_delta"); if (pps->slice_chroma_qp_offsets_present_flag) { gf_bs_read_se_log(bs, "slice_cb_qp_offset"); gf_bs_read_se_log(bs, "slice_cr_qp_offset"); } if (pps->deblocking_filter_override_enabled_flag) { deblocking_filter_override_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_override_flag"); } if (deblocking_filter_override_flag) { slice_deblocking_filter_disabled_flag = gf_bs_read_int_log(bs, 1, "slice_deblocking_filter_disabled_flag"); if (!slice_deblocking_filter_disabled_flag) { gf_bs_read_se_log(bs, "slice_beta_offset_div2"); gf_bs_read_se_log(bs, "slice_tc_offset_div2"); } } if (pps->loop_filter_across_slices_enabled_flag && (slice_sao_luma_flag || slice_sao_chroma_flag || !slice_deblocking_filter_disabled_flag) ) { gf_bs_read_int_log(bs, 1, "slice_loop_filter_across_slices_enabled_flag"); } } //dependent slice segment else { //if not asked to parse full header, abort if (!hevc->full_slice_header_parse) return 0; } si->entry_point_start_bits = ((u32)gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); if (pps->tiles_enabled_flag || pps->entropy_coding_sync_enabled_flag) { u32 num_entry_point_offsets = gf_bs_read_ue_log(bs, "num_entry_point_offsets"); if (num_entry_point_offsets > 0) { u32 offset = gf_bs_read_ue_log(bs, "offset") + 1; u32 segments = offset >> 4; s32 remain = (offset & 15); for (i = 0; i < num_entry_point_offsets; i++) { //u32 res = 0; for (j = 0; j < segments; j++) { //res <<= 16; /*res +=*/ gf_bs_read_int(bs, 16); } if (remain) { //res <<= remain; /* res += */ gf_bs_read_int(bs, remain); } // entry_point_offset = val + 1; // +1; // +1 to get the size } } } if (pps->slice_segment_header_extension_present_flag) { u32 size_ext = gf_bs_read_ue_log(bs, "size_ext"); while (size_ext) { gf_bs_read_int(bs, 8); size_ext--; } } si->header_size_bits = (gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); // av_parser.c modified on 16 jan. 2019 if (gf_bs_read_int_log(bs, 1, "byte_align") == 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("Error parsing slice header: byte_align not found at end of header !\n")); } gf_bs_align(bs); si->payload_start_offset = (s32)gf_bs_get_position(bs); return 0; } static void gf_hevc_vvc_parse_sei(char *buffer, u32 nal_size, HEVCState *hevc, VVCState *vvc) { u32 ptype, psize, hdr; u64 start; GF_BitStream *bs; hdr = buffer[0]; if (((hdr & 0x7e) >> 1) != GF_HEVC_NALU_SEI_PREFIX) return; bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); gf_bs_read_int(bs, 16); /*parse SEI*/ while (gf_bs_available(bs)) { u32 consumed; ptype = 0; while (gf_bs_peek_bits(bs, 8, 0)==0xFF) { gf_bs_read_int(bs, 8); ptype += 255; } ptype += gf_bs_read_int(bs, 8); psize = 0; while (gf_bs_peek_bits(bs, 8, 0)==0xFF) { gf_bs_read_int(bs, 8); psize += 255; } psize += gf_bs_read_int(bs, 8); start = gf_bs_get_position(bs); if (start+psize >= nal_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] SEI user message type %d size error (%d but %d remain), skipping SEI message\n", hevc ? "HEVC" : "VVC", ptype, psize, nal_size-start)); break; } switch (ptype) { case 4: /*user registered ITU-T T35*/ if (hevc) { avc_parse_itu_t_t35_sei(bs, &hevc->sei.dovi); } break; default: break; } gf_bs_align(bs); consumed = (u32) (gf_bs_get_position(bs) - start); psize-=consumed; gf_bs_skip_bytes(bs, psize); if (gf_bs_available(bs) <= 2) break; } gf_bs_del(bs); } void gf_hevc_parse_sei(char *buffer, u32 nal_size, HEVCState *hevc) { gf_hevc_vvc_parse_sei(buffer, nal_size, hevc, NULL); } static void hevc_compute_poc(HEVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_pic_order_cnt_lsb); /*POC reset for IDR frames, NOT for CRA*/ switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: si->poc_lsb_prev = 0; si->poc_msb_prev = 0; break; } if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: si->poc_msb = 0; break; } si->poc = si->poc_msb + si->poc_lsb; } static Bool hevc_parse_nal_header(GF_BitStream *bs, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { u32 val; val = gf_bs_read_int_log(bs, 1, "forbidden_zero"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 6, "nuh_type"); if (nal_unit_type) *nal_unit_type = val; val = gf_bs_read_int_log(bs, 6, "layerID"); if (layer_id) *layer_id = val; val = gf_bs_read_int_log(bs, 3, "temporalID"); if (!val) return GF_FALSE; val -= 1; if (temporal_id) *temporal_id = val; return GF_TRUE; } void hevc_profile_tier_level(GF_BitStream *bs, Bool ProfilePresentFlag, u8 MaxNumSubLayersMinus1, HEVC_ProfileTierLevel *ptl, u32 idx) { u32 i; if (ProfilePresentFlag) { ptl->profile_space = gf_bs_read_int_log_idx(bs, 2, "profile_space", idx); ptl->tier_flag = gf_bs_read_int_log_idx(bs, 1, "tier_flag", idx); ptl->profile_idc = gf_bs_read_int_log_idx(bs, 5, "profile_idc", idx); ptl->profile_compatibility_flag = gf_bs_read_int_log_idx(bs, 32, "profile_compatibility_flag", idx); ptl->general_progressive_source_flag = gf_bs_read_int_log_idx(bs, 1, "general_progressive_source_flag", idx); ptl->general_interlaced_source_flag = gf_bs_read_int_log_idx(bs, 1, "general_interlaced_source_flag", idx); ptl->general_non_packed_constraint_flag = gf_bs_read_int_log_idx(bs, 1, "general_non_packed_constraint_flag", idx); ptl->general_frame_only_constraint_flag = gf_bs_read_int_log_idx(bs, 1, "general_frame_only_constraint_flag", idx); ptl->general_reserved_44bits = gf_bs_read_long_int(bs, 44); } ptl->level_idc = gf_bs_read_int_log(bs, 8, "level_idc"); for (i = 0; i < MaxNumSubLayersMinus1; i++) { ptl->sub_ptl[i].profile_present_flag = gf_bs_read_int_log_idx2(bs, 1, "profile_present_flag", idx, i); ptl->sub_ptl[i].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i); } if (MaxNumSubLayersMinus1 > 0) { for (i = MaxNumSubLayersMinus1; i < 8; i++) { /*reserved_zero_2bits*/gf_bs_read_int(bs, 2); } } for (i = 0; i < MaxNumSubLayersMinus1; i++) { if (ptl->sub_ptl[i].profile_present_flag) { ptl->sub_ptl[i].profile_space = gf_bs_read_int_log_idx2(bs, 2, "sublayer_profile_space", idx, i); ptl->sub_ptl[i].tier_flag = gf_bs_read_int_log_idx2(bs, 1, "sublayer_tier_flag", idx, i); ptl->sub_ptl[i].profile_idc = gf_bs_read_int_log_idx2(bs, 5, "sublayer_profile_idc", idx, i); ptl->sub_ptl[i].profile_compatibility_flag = gf_bs_read_int_log_idx2(bs, 32, "sublayer_profile_compatibility_flag", idx, i); /*ptl->sub_ptl[i].progressive_source_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_progressive_source_flag", idx, i); /*ptl->sub_ptl[i].interlaced_source_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_interlaced_source_flag", idx, i); /*ptl->sub_ptl[i].non_packed_constraint_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_non_packed_constraint_flag", idx, i); /*ptl->sub_ptl[i].frame_only_constraint_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_frame_only_constraint_flag", idx, i); /*ptl->sub_ptl[i].reserved_44bits =*/ gf_bs_read_long_int(bs, 44); } if (ptl->sub_ptl[i].level_present_flag) ptl->sub_ptl[i].level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i); } } static u32 scalability_type_to_idx(HEVC_VPS *vps, u32 scalability_type) { u32 idx = 0, type; for (type = 0; type < scalability_type; type++) { idx += (vps->scalability_mask[type] ? 1 : 0); } return idx; } #define LHVC_VIEW_ORDER_INDEX 1 #define LHVC_SCALABILITY_INDEX 2 static u32 lhvc_get_scalability_id(HEVC_VPS *vps, u32 layer_id_in_vps, u32 scalability_type) { u32 idx; if (!vps->scalability_mask[scalability_type]) return 0; idx = scalability_type_to_idx(vps, scalability_type); return vps->dimension_id[layer_id_in_vps][idx]; } static u32 lhvc_get_view_index(HEVC_VPS *vps, u32 id) { return lhvc_get_scalability_id(vps, vps->layer_id_in_vps[id], LHVC_VIEW_ORDER_INDEX); } static u32 lhvc_get_num_views(HEVC_VPS *vps) { u32 numViews = 1, i; for (i = 0; i < vps->max_layers; i++) { u32 layer_id = vps->layer_id_in_nuh[i]; if (i > 0 && (lhvc_get_view_index(vps, layer_id) != lhvc_get_scalability_id(vps, i - 1, LHVC_VIEW_ORDER_INDEX))) { numViews++; } } return numViews; } static void lhvc_parse_rep_format(HEVC_RepFormat *fmt, GF_BitStream *bs, u32 idx) { u8 chroma_bitdepth_present_flag; fmt->pic_width_luma_samples = gf_bs_read_int_log_idx(bs, 16, "pic_width_luma_samples", idx); fmt->pic_height_luma_samples = gf_bs_read_int_log_idx(bs, 16, "pic_height_luma_samples", idx); chroma_bitdepth_present_flag = gf_bs_read_int_log_idx(bs, 1, "chroma_bitdepth_present_flag", idx); if (chroma_bitdepth_present_flag) { fmt->chroma_format_idc = gf_bs_read_int_log_idx(bs, 2, "chroma_format_idc", idx); if (fmt->chroma_format_idc == 3) fmt->separate_colour_plane_flag = gf_bs_read_int_log_idx(bs, 1, "separate_colour_plane_flag", idx); fmt->bit_depth_luma = 8 + gf_bs_read_int_log_idx(bs, 4, "bit_depth_luma_minus8", idx); fmt->bit_depth_chroma = 8 + gf_bs_read_int_log_idx(bs, 4, "bit_depth_chroma_minus8", idx); } if (gf_bs_read_int_log_idx(bs, 1, "conformance_window_vps_flag", idx)) { gf_bs_read_ue_log_idx(bs, "conf_win_vps_left_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_right_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_top_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_bottom_offset", idx); } } static Bool hevc_parse_vps_extension(HEVC_VPS *vps, GF_BitStream *bs) { u8 splitting_flag, vps_nuh_layer_id_present_flag, view_id_len; u32 i, j, num_scalability_types, num_add_olss, num_add_layer_set, num_indepentdent_layers, nb_bits, default_output_layer_idc = 0; u8 dimension_id_len[16], dim_bit_offset[16]; u8 /*avc_base_layer_flag, */NumLayerSets, /*default_one_target_output_layer_flag, */rep_format_idx_present_flag, ols_ids_to_ls_idx; u8 layer_set_idx_for_ols_minus1[MAX_LHVC_LAYERS]; u8 nb_output_layers_in_output_layer_set[MAX_LHVC_LAYERS + 1]; u8 ols_highest_output_layer_id[MAX_LHVC_LAYERS + 1]; u32 k, d, r, p, iNuhLId, jNuhLId; u8 num_direct_ref_layers[64], num_pred_layers[64], num_layers_in_tree_partition[MAX_LHVC_LAYERS]; u8 dependency_flag[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS], id_pred_layers[64][MAX_LHVC_LAYERS]; // u8 num_ref_layers[64]; // u8 tree_partition_layer_id[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS]; // u8 id_ref_layers[64][MAX_LHVC_LAYERS]; // u8 id_direct_ref_layers[64][MAX_LHVC_LAYERS]; u8 layer_id_in_list_flag[64]; Bool OutputLayerFlag[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS]; vps->vps_extension_found = 1; if ((vps->max_layers > 1) && vps->base_layer_internal_flag) hevc_profile_tier_level(bs, 0, vps->max_sub_layers - 1, &vps->ext_ptl[0], 0); splitting_flag = gf_bs_read_int_log(bs, 1, "splitting_flag"); num_scalability_types = 0; for (i = 0; i < 16; i++) { vps->scalability_mask[i] = gf_bs_read_int_log_idx(bs, 1, "scalability_mask", i); num_scalability_types += vps->scalability_mask[i]; } if (num_scalability_types >= 16) { num_scalability_types = 16; } dimension_id_len[0] = 0; for (i = 0; i < (num_scalability_types - splitting_flag); i++) { dimension_id_len[i] = 1 + gf_bs_read_int_log_idx(bs, 3, "dimension_id_len_minus1", i); } if (splitting_flag) { for (i = 0; i < num_scalability_types; i++) { dim_bit_offset[i] = 0; for (j = 0; j < i; j++) dim_bit_offset[i] += dimension_id_len[j]; } dimension_id_len[num_scalability_types - 1] = 1 + (5 - dim_bit_offset[num_scalability_types - 1]); dim_bit_offset[num_scalability_types] = 6; } vps_nuh_layer_id_present_flag = gf_bs_read_int_log(bs, 1, "vps_nuh_layer_id_present_flag"); vps->layer_id_in_nuh[0] = 0; vps->layer_id_in_vps[0] = 0; for (i = 1; i < vps->max_layers; i++) { if (vps_nuh_layer_id_present_flag) { vps->layer_id_in_nuh[i] = gf_bs_read_int_log_idx(bs, 6, "layer_id_in_nuh", i); } else { vps->layer_id_in_nuh[i] = i; } vps->layer_id_in_vps[vps->layer_id_in_nuh[i]] = i; if (!splitting_flag) { for (j = 0; j < num_scalability_types; j++) { vps->dimension_id[i][j] = gf_bs_read_int_log_idx2(bs, dimension_id_len[j], "dimension_id", i, j); } } } if (splitting_flag) { for (i = 0; i < vps->max_layers; i++) for (j = 0; j < num_scalability_types; j++) vps->dimension_id[i][j] = ((vps->layer_id_in_nuh[i] & ((1 << dim_bit_offset[j + 1]) - 1)) >> dim_bit_offset[j]); } else { for (j = 0; j < num_scalability_types; j++) vps->dimension_id[0][j] = 0; } view_id_len = gf_bs_read_int_log(bs, 4, "view_id_len"); if (view_id_len > 0) { for (i = 0; i < lhvc_get_num_views(vps); i++) { gf_bs_read_int_log_idx(bs, view_id_len, "view_id_val", i); } } for (i = 1; i < vps->max_layers; i++) { for (j = 0; j < i; j++) { vps->direct_dependency_flag[i][j] = gf_bs_read_int_log_idx(bs, 1, "direct_dependency_flag", i); } } //we do the test on MAX_LHVC_LAYERS and break in the loop to avoid a wrong GCC 4.8 warning on array bounds for (i = 0; i < MAX_LHVC_LAYERS; i++) { if (i >= vps->max_layers) break; for (j = 0; j < vps->max_layers; j++) { dependency_flag[i][j] = vps->direct_dependency_flag[i][j]; for (k = 0; k < i; k++) if (vps->direct_dependency_flag[i][k] && vps->direct_dependency_flag[k][j]) dependency_flag[i][j] = 1; } } for (i = 0; i < vps->max_layers; i++) { iNuhLId = vps->layer_id_in_nuh[i]; d = r = p = 0; for (j = 0; j < vps->max_layers; j++) { jNuhLId = vps->layer_id_in_nuh[j]; if (vps->direct_dependency_flag[i][j]) { // id_direct_ref_layers[iNuhLId][d] = jNuhLId; d++; } if (dependency_flag[i][j]) { // id_ref_layers[iNuhLId][r] = jNuhLId; r++; } if (dependency_flag[j][i]) id_pred_layers[iNuhLId][p++] = jNuhLId; } num_direct_ref_layers[iNuhLId] = d; // num_ref_layers[iNuhLId] = r; num_pred_layers[iNuhLId] = p; } memset(layer_id_in_list_flag, 0, 64 * sizeof(u8)); k = 0; //num_indepentdent_layers for (i = 0; i < vps->max_layers; i++) { iNuhLId = vps->layer_id_in_nuh[i]; if (!num_direct_ref_layers[iNuhLId]) { u32 h = 1; //tree_partition_layer_id[k][0] = iNuhLId; for (j = 0; j < num_pred_layers[iNuhLId]; j++) { u32 predLId = id_pred_layers[iNuhLId][j]; if (!layer_id_in_list_flag[predLId]) { //tree_partition_layer_id[k][h++] = predLId; layer_id_in_list_flag[predLId] = 1; } } num_layers_in_tree_partition[k++] = h; } } num_indepentdent_layers = k; num_add_layer_set = 0; if (num_indepentdent_layers > 1) num_add_layer_set = gf_bs_read_ue_log(bs, "num_add_layer_set"); for (i = 0; i < num_add_layer_set; i++) for (j = 1; j < num_indepentdent_layers; j++) { nb_bits = 1; while ((1 << nb_bits) < (num_layers_in_tree_partition[j] + 1)) nb_bits++; gf_bs_read_int_log_idx2(bs, nb_bits, "highest_layer_idx_plus1", i, j); } if (gf_bs_read_int_log(bs, 1, "vps_sub_layers_max_minus1_present_flag")) { for (i = 0; i < vps->max_layers; i++) { gf_bs_read_int_log_idx(bs, 3, "sub_layers_vps_max_minus1", i); } } if (gf_bs_read_int_log(bs, 1, "max_tid_ref_present_flag")) { for (i = 0; i < (vps->max_layers - 1); i++) { for (j = i + 1; j < vps->max_layers; j++) { if (vps->direct_dependency_flag[j][i]) gf_bs_read_int_log_idx2(bs, 3, "max_tid_il_ref_pics_plus1", i, j); } } } gf_bs_read_int_log(bs, 1, "default_ref_layers_active_flag"); vps->num_profile_tier_level = 1 + gf_bs_read_ue_log(bs, "num_profile_tier_level"); if (vps->num_profile_tier_level > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of PTLs in VPS %d\n", vps->num_profile_tier_level)); vps->num_profile_tier_level = 1; return GF_FALSE; } for (i = vps->base_layer_internal_flag ? 2 : 1; i < vps->num_profile_tier_level; i++) { Bool vps_profile_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_profile_present_flag", i); hevc_profile_tier_level(bs, vps_profile_present_flag, vps->max_sub_layers - 1, &vps->ext_ptl[i - 1], i-1); } NumLayerSets = vps->num_layer_sets + num_add_layer_set; num_add_olss = 0; if (NumLayerSets > 1) { num_add_olss = gf_bs_read_ue_log(bs, "num_add_olss"); default_output_layer_idc = gf_bs_read_int_log(bs, 2, "default_output_layer_idc"); default_output_layer_idc = default_output_layer_idc < 2 ? default_output_layer_idc : 2; } vps->num_output_layer_sets = num_add_olss + NumLayerSets; layer_set_idx_for_ols_minus1[0] = 1; vps->output_layer_flag[0][0] = 1; for (i = 0; i < vps->num_output_layer_sets; i++) { if ((NumLayerSets > 2) && (i >= NumLayerSets)) { nb_bits = 1; while ((1 << nb_bits) < (NumLayerSets - 1)) nb_bits++; layer_set_idx_for_ols_minus1[i] = gf_bs_read_int_log_idx(bs, nb_bits, "layer_set_idx_for_ols_minus1", i); } else layer_set_idx_for_ols_minus1[i] = 0; ols_ids_to_ls_idx = i < NumLayerSets ? i : layer_set_idx_for_ols_minus1[i] + 1; if ((i > (vps->num_layer_sets - 1)) || (default_output_layer_idc == 2)) { for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) vps->output_layer_flag[i][j] = gf_bs_read_int_log_idx2(bs, 1, "output_layer_flag", i, j); } if ((default_output_layer_idc == 0) || (default_output_layer_idc == 1)) { for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if ((default_output_layer_idc == 0) || (vps->LayerSetLayerIdList[i][j] == vps->LayerSetLayerIdListMax[i])) OutputLayerFlag[i][j] = GF_TRUE; else OutputLayerFlag[i][j] = GF_FALSE; } } for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if (OutputLayerFlag[i][j]) { u32 curLayerID; vps->necessary_layers_flag[i][j] = GF_TRUE; curLayerID = vps->LayerSetLayerIdList[i][j]; for (k = 0; k < j; k++) { u32 refLayerId = vps->LayerSetLayerIdList[i][k]; if (dependency_flag[vps->layer_id_in_vps[curLayerID]][vps->layer_id_in_vps[refLayerId]]) vps->necessary_layers_flag[i][k] = GF_TRUE; } } } vps->num_necessary_layers[i] = 0; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if (vps->necessary_layers_flag[i][j]) vps->num_necessary_layers[i] += 1; } if (i == 0) { if (vps->base_layer_internal_flag) { if (vps->max_layers > 1) vps->profile_tier_level_idx[0][0] = 1; else vps->profile_tier_level_idx[0][0] = 0; } continue; } nb_bits = 1; while ((u32)(1 << nb_bits) < vps->num_profile_tier_level) nb_bits++; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) if (vps->necessary_layers_flag[i][j] && vps->num_profile_tier_level) vps->profile_tier_level_idx[i][j] = gf_bs_read_int_log_idx2(bs, nb_bits, "profile_tier_level_idx", i, j); else vps->profile_tier_level_idx[i][j] = 0; nb_output_layers_in_output_layer_set[i] = 0; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { nb_output_layers_in_output_layer_set[i] += OutputLayerFlag[i][j]; if (OutputLayerFlag[i][j]) { ols_highest_output_layer_id[i] = vps->LayerSetLayerIdList[ols_ids_to_ls_idx][j]; } } if (nb_output_layers_in_output_layer_set[i] == 1 && ols_highest_output_layer_id[i] > 0) vps->alt_output_layer_flag[i] = gf_bs_read_int_log_idx(bs, 1, "alt_output_layer_flag", i); } vps->num_rep_formats = 1 + gf_bs_read_ue_log(bs, "num_rep_formats_minus1"); if (vps->num_rep_formats > 16) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of rep formats in VPS %d\n", vps->num_rep_formats)); vps->num_rep_formats = 0; return GF_FALSE; } for (i = 0; i < vps->num_rep_formats; i++) { lhvc_parse_rep_format(&vps->rep_formats[i], bs, i); } if (vps->num_rep_formats > 1) rep_format_idx_present_flag = gf_bs_read_int_log(bs, 1, "rep_format_idx_present_flag"); else rep_format_idx_present_flag = 0; vps->rep_format_idx[0] = 0; nb_bits = 1; while ((u32)(1 << nb_bits) < vps->num_rep_formats) nb_bits++; for (i = vps->base_layer_internal_flag ? 1 : 0; i < vps->max_layers; i++) { if (rep_format_idx_present_flag) { vps->rep_format_idx[i] = gf_bs_read_int_log_idx(bs, nb_bits, "rep_format_idx", i); } else { vps->rep_format_idx[i] = i < vps->num_rep_formats - 1 ? i : vps->num_rep_formats - 1; } } //TODO - we don't use the rest ... return GF_TRUE; } static void sub_layer_hrd_parameters(GF_BitStream *bs, int subLayerId, u32 cpb_cnt, Bool sub_pic_hrd_params_present_flag, u32 idx1, u32 idx2) { u32 i; if (!gf_bs_available(bs)) return; for (i = 0; i <= cpb_cnt; i++) { gf_bs_read_ue_log_idx3(bs, "bit_rate_value_minus1", idx1, idx2, i); gf_bs_read_ue_log_idx3(bs, "cpb_size_value_minus1", idx1, idx2, i); if (sub_pic_hrd_params_present_flag) { gf_bs_read_ue_log_idx3(bs, "cpb_size_du_value_minus1", idx1, idx2, i); gf_bs_read_ue_log_idx3(bs, "bit_rate_du_value_minus1", idx1, idx2, i); } gf_bs_read_int_log_idx3(bs, 1, "cbr_flag", idx1, idx2, i); } } static void hevc_parse_hrd_parameters(GF_BitStream *bs, Bool commonInfPresentFlag, int maxNumSubLayersMinus1, u32 idx) { int i; Bool nal_hrd_parameters_present_flag = GF_FALSE; Bool vcl_hrd_parameters_present_flag = GF_FALSE; Bool sub_pic_hrd_params_present_flag = GF_FALSE; if (commonInfPresentFlag) { nal_hrd_parameters_present_flag = gf_bs_read_int_log_idx(bs, 1, "nal_hrd_parameters_present_flag", idx); vcl_hrd_parameters_present_flag = gf_bs_read_int_log_idx(bs, 1, "vcl_hrd_parameters_present_flag", idx); if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) { sub_pic_hrd_params_present_flag = gf_bs_read_int_log_idx(bs, 1, "sub_pic_hrd_params_present_flag", idx); if (sub_pic_hrd_params_present_flag) { gf_bs_read_int_log_idx(bs, 8, "tick_divisor_minus2", idx); gf_bs_read_int_log_idx(bs, 5, "du_cpb_removal_delay_increment_length_minus1", idx); gf_bs_read_int_log_idx(bs, 1, "sub_pic_cpb_params_in_pic_timing_sei_flag", idx); gf_bs_read_int_log_idx(bs, 5, "dpb_output_delay_du_length_minus1", idx); } gf_bs_read_int_log_idx(bs, 4, "bit_rate_scale", idx); gf_bs_read_int_log_idx(bs, 4, "cpb_size_scale", idx); if (sub_pic_hrd_params_present_flag) { gf_bs_read_int_log_idx(bs, 4, "cpb_size_du_scale", idx); } gf_bs_read_int_log_idx(bs, 5, "initial_cpb_removal_delay_length_minus1", idx); gf_bs_read_int_log_idx(bs, 5, "au_cpb_removal_delay_length_minus1", idx); gf_bs_read_int_log_idx(bs, 5, "dpb_output_delay_length_minus1", idx); } } for (i = 0; i <= maxNumSubLayersMinus1; i++) { Bool fixed_pic_rate_general_flag_i = gf_bs_read_int_log_idx(bs, 1, "fixed_pic_rate_general_flag", idx); Bool fixed_pic_rate_within_cvs_flag_i = GF_TRUE; Bool low_delay_hrd_flag_i = GF_FALSE; u32 cpb_cnt_minus1_i = 0; if (!fixed_pic_rate_general_flag_i) { fixed_pic_rate_within_cvs_flag_i = gf_bs_read_int_log_idx(bs, 1, "fixed_pic_rate_within_cvs_flag", idx); } if (fixed_pic_rate_within_cvs_flag_i) gf_bs_read_ue_log_idx(bs, "elemental_duration_in_tc_minus1", idx); else low_delay_hrd_flag_i = gf_bs_read_int_log_idx(bs, 1, "low_delay_hrd_flag", idx); if (!low_delay_hrd_flag_i) { cpb_cnt_minus1_i = gf_bs_read_ue_log_idx(bs, "cpb_cnt_minus1", idx); } if (nal_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag, idx, i); } if (vcl_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag, idx, i); } } } static s32 gf_hevc_read_vps_bs_internal(GF_BitStream *bs, HEVCState *hevc, Bool stop_at_vps_ext) { u8 vps_sub_layer_ordering_info_present_flag, vps_extension_flag; u32 i, j; s32 vps_id; HEVC_VPS *vps; u8 layer_id_included_flag[MAX_LHVC_LAYERS][64]; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) return -1; vps = &hevc->vps[vps_id]; vps->bit_pos_vps_extensions = -1; if (!vps->state) { vps->id = vps_id; vps->state = 1; } vps->base_layer_internal_flag = gf_bs_read_int_log(bs, 1, "base_layer_internal_flag"); vps->base_layer_available_flag = gf_bs_read_int_log(bs, 1, "base_layer_available_flag"); vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers_minus1"); if (vps->max_layers > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS)); return -1; } vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1; vps->temporal_id_nesting = gf_bs_read_int_log(bs, 1, "temporal_id_nesting"); gf_bs_read_int_log(bs, 16, "vps_reserved_ffff_16bits"); hevc_profile_tier_level(bs, 1, vps->max_sub_layers - 1, &vps->ptl, 0); vps_sub_layer_ordering_info_present_flag = gf_bs_read_int_log(bs, 1, "vps_sub_layer_ordering_info_present_flag"); for (i = (vps_sub_layer_ordering_info_present_flag ? 0 : vps->max_sub_layers - 1); i < vps->max_sub_layers; i++) { gf_bs_read_ue_log_idx(bs, "vps_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "vps_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "vps_max_latency_increase_plus1", i); } vps->max_layer_id = gf_bs_read_int_log(bs, 6, "max_layer_id"); if (vps->max_layer_id > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] VPS max layer ID %u but GPAC only supports %u\n", vps->max_layer_id, MAX_LHVC_LAYERS)); return -1; } vps->num_layer_sets = gf_bs_read_ue_log(bs, "num_layer_sets_minus1") + 1; if (vps->num_layer_sets > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of layer sets in VPS %d\n", vps->num_layer_sets)); return -1; } for (i = 1; i < vps->num_layer_sets; i++) { for (j = 0; j <= vps->max_layer_id; j++) { layer_id_included_flag[i][j] = gf_bs_read_int_log_idx2(bs, 1, "layer_id_included_flag", i, j); } } vps->num_layers_in_id_list[0] = 1; for (i = 1; i < vps->num_layer_sets; i++) { u32 n, m; n = 0; for (m = 0; m <= vps->max_layer_id; m++) { if (layer_id_included_flag[i][m]) { vps->LayerSetLayerIdList[i][n++] = m; if (vps->LayerSetLayerIdListMax[i] < m) vps->LayerSetLayerIdListMax[i] = m; } } vps->num_layers_in_id_list[i] = n; } if (gf_bs_read_int_log(bs, 1, "vps_timing_info_present_flag")) { u32 vps_num_hrd_parameters; gf_bs_read_int_log(bs, 32, "vps_num_units_in_tick"); gf_bs_read_int_log(bs, 32, "vps_time_scale"); if (gf_bs_read_int_log(bs, 1, "vps_poc_proportional_to_timing_flag")) { gf_bs_read_ue_log(bs, "vps_num_ticks_poc_diff_one_minus1"); } vps_num_hrd_parameters = gf_bs_read_ue_log(bs, "vps_num_hrd_parameters"); for (i = 0; i < vps_num_hrd_parameters; i++) { Bool cprms_present_flag = GF_TRUE; gf_bs_read_ue_log_idx(bs, "hrd_layer_set_idx", i); if (i > 0) cprms_present_flag = gf_bs_read_int_log(bs, 1, "cprms_present_flag"); hevc_parse_hrd_parameters(bs, cprms_present_flag, vps->max_sub_layers - 1, i); } } if (stop_at_vps_ext) { return vps_id; } vps_extension_flag = gf_bs_read_int_log(bs, 1, "vps_extension_flag"); if (vps_extension_flag) { Bool res; gf_bs_align(bs); res = hevc_parse_vps_extension(vps, bs); if (res != GF_TRUE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Failed to parse VPS extensions\n")); return -1; } if (gf_bs_read_int_log(bs, 1, "vps_extension2_flag")) { #if 0 while (gf_bs_available(bs)) { /*vps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } } return vps_id; } GF_EXPORT s32 gf_hevc_read_vps_ex(u8 *data, u32 *size, HEVCState *hevc, Bool remove_extensions) { GF_BitStream *bs; char *data_without_emulation_bytes = NULL; u32 data_without_emulation_bytes_size = 0; s32 vps_id = -1; /*still contains emulation bytes*/ data_without_emulation_bytes_size = remove_extensions ? gf_media_nalu_emulation_bytes_remove_count(data, (*size)) : 0; if (!data_without_emulation_bytes_size) { bs = gf_bs_new(data, (*size), GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); } //when removing VPS ext, we have to get the full buffer without emulation prevention bytes becuase we do a bit-by-bit copy of the vps else { data_without_emulation_bytes = gf_malloc((*size) * sizeof(char)); data_without_emulation_bytes_size = gf_media_nalu_remove_emulation_bytes(data, data_without_emulation_bytes, (*size)); bs = gf_bs_new(data_without_emulation_bytes, data_without_emulation_bytes_size, GF_BITSTREAM_READ); } if (!bs) goto exit; if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) goto exit; vps_id = gf_hevc_read_vps_bs_internal(bs, hevc, remove_extensions); if (vps_id < 0) goto exit; if (remove_extensions) { u8 *new_vps; u32 new_vps_size, emulation_bytes; u32 bit_pos = gf_bs_get_bit_offset(bs); GF_BitStream *w_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_seek(bs, 0); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u16(w_bs, gf_bs_read_u16(bs) ); bit_pos -= 48; while (bit_pos) { u32 v = gf_bs_read_int(bs, 1); gf_bs_write_int(w_bs, v, 1); bit_pos--; } /*vps extension flag*/ gf_bs_write_int(w_bs, 0, 1); new_vps = NULL; gf_bs_get_content(w_bs, &new_vps, &new_vps_size); gf_bs_del(w_bs); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(new_vps, new_vps_size); if (emulation_bytes + new_vps_size > *size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("Buffer too small to rewrite VPS - skipping rewrite\n")); } else { *size = gf_media_nalu_add_emulation_bytes(new_vps, data, new_vps_size); } if (new_vps) gf_free(new_vps); } exit: if (bs) gf_bs_del(bs); if (data_without_emulation_bytes) gf_free(data_without_emulation_bytes); return vps_id; } GF_EXPORT s32 gf_hevc_read_vps(u8 *data, u32 size, HEVCState *hevc) { return gf_hevc_read_vps_ex(data, &size, hevc, GF_FALSE); } GF_EXPORT s32 gf_hevc_read_vps_bs(GF_BitStream *bs, HEVCState *hevc) { if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) return -1; return gf_hevc_read_vps_bs_internal(bs, hevc, GF_FALSE); } static void hevc_scaling_list_data(GF_BitStream *bs) { u32 i, sizeId, matrixId; for (sizeId = 0; sizeId < 4; sizeId++) { for (matrixId = 0; matrixId < 6; matrixId += (sizeId == 3) ? 3 : 1) { u32 idx = sizeId*100 + 10*matrixId; u32 scaling_list_pred_mode_flag_sizeId_matrixId = gf_bs_read_int_log_idx(bs, 1, "scaling_list_pred_mode_flag_sizeId_matrixId", idx); if (!scaling_list_pred_mode_flag_sizeId_matrixId) { gf_bs_read_ue_log_idx(bs, "scaling_list_pred_matrix_id_delta", idx); } else { //u32 nextCoef = 8; u32 coefNum = MIN(64, (1 << (4 + (sizeId << 1)))); if (sizeId > 1) { gf_bs_read_se_log_idx(bs, "scaling_list_dc_coef_minus8", idx); } for (i = 0; i < coefNum; i++) { gf_bs_read_se_log_idx2(bs, "scaling_list_delta_coef", idx, i); } } } } } static const struct { u32 w, h; } hevc_sar[17] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, { 160,99 }, { 4,3}, { 3,2}, { 2,1} }; static s32 gf_hevc_read_sps_bs_internal(GF_BitStream *bs, HEVCState *hevc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id = -1; u32 i, nb_CTUs, depth; HEVC_SPS *sps; HEVC_VPS *vps; HEVC_ProfileTierLevel ptl; Bool multiLayerExtSpsFlag; u8 sps_ext_or_max_sub_layers_minus1, max_sub_layers_minus1; if (vui_flag_pos) *vui_flag_pos = 0; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) { return -1; } memset(&ptl, 0, sizeof(ptl)); max_sub_layers_minus1 = 0; sps_ext_or_max_sub_layers_minus1 = 0; if (layer_id == 0) max_sub_layers_minus1 = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1"); else sps_ext_or_max_sub_layers_minus1 = gf_bs_read_int_log(bs, 3, "sps_ext_or_max_sub_layers_minus1"); multiLayerExtSpsFlag = (layer_id != 0) && (sps_ext_or_max_sub_layers_minus1 == 7); if (!multiLayerExtSpsFlag) { gf_bs_read_int_log(bs, 1, "temporal_id_nesting_flag"); hevc_profile_tier_level(bs, 1, max_sub_layers_minus1, &ptl, 0); } sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((sps_id < 0) || (sps_id >= 16)) { return -1; } sps = &hevc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->ptl = ptl; vps = &hevc->vps[vps_id]; sps->max_sub_layers_minus1 = 0; sps->sps_ext_or_max_sub_layers_minus1 = 0; /* default values */ sps->colour_primaries = 2; sps->transfer_characteristic = 2; sps->matrix_coeffs = 2; //sps_rep_format_idx = 0; if (multiLayerExtSpsFlag) { sps->update_rep_format_flag = gf_bs_read_int_log(bs, 1, "update_rep_format_flag"); if (sps->update_rep_format_flag) { sps->rep_format_idx = gf_bs_read_int_log(bs, 8, "rep_format_idx"); } else { sps->rep_format_idx = vps->rep_format_idx[layer_id]; } sps->width = vps->rep_formats[sps->rep_format_idx].pic_width_luma_samples; sps->height = vps->rep_formats[sps->rep_format_idx].pic_height_luma_samples; sps->chroma_format_idc = vps->rep_formats[sps->rep_format_idx].chroma_format_idc; sps->bit_depth_luma = vps->rep_formats[sps->rep_format_idx].bit_depth_luma; sps->bit_depth_chroma = vps->rep_formats[sps->rep_format_idx].bit_depth_chroma; sps->separate_colour_plane_flag = vps->rep_formats[sps->rep_format_idx].separate_colour_plane_flag; //TODO this is crude ... sps->ptl = vps->ext_ptl[0]; } else { sps->chroma_format_idc = gf_bs_read_ue_log(bs, "chroma_format_idc"); if (sps->chroma_format_idc == 3) sps->separate_colour_plane_flag = gf_bs_read_int_log(bs, 1, "separate_colour_plane_flag"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); if ((sps->cw_flag = gf_bs_read_int_log(bs, 1, "conformance_window_flag"))) { u32 SubWidthC, SubHeightC; if (sps->chroma_format_idc == 1) { SubWidthC = SubHeightC = 2; } else if (sps->chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else { SubWidthC = SubHeightC = 1; } sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); sps->width -= SubWidthC * (sps->cw_left + sps->cw_right); sps->height -= SubHeightC * (sps->cw_top + sps->cw_bottom); } sps->bit_depth_luma = 8 + gf_bs_read_ue_log(bs, "bit_depth_luma_minus8"); sps->bit_depth_chroma = 8 + gf_bs_read_ue_log(bs, "bit_depth_chroma_minus8"); } sps->log2_max_pic_order_cnt_lsb = 4 + gf_bs_read_ue_log(bs, "log2_max_pic_order_cnt_lsb_minus4"); if (!multiLayerExtSpsFlag) { sps->sub_layer_ordering_info_present_flag = gf_bs_read_int_log(bs, 1, "sub_layer_ordering_info_present_flag"); for (i = sps->sub_layer_ordering_info_present_flag ? 0 : sps->max_sub_layers_minus1; i <= sps->max_sub_layers_minus1; i++) { gf_bs_read_ue_log_idx(bs, "max_dec_pic_buffering", i); gf_bs_read_ue_log_idx(bs, "num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "max_latency_increase", i); } } sps->log2_min_luma_coding_block_size = 3 + gf_bs_read_ue_log(bs, "log2_min_luma_coding_block_size_minus3"); sps->log2_diff_max_min_luma_coding_block_size = gf_bs_read_ue_log(bs, "log2_diff_max_min_luma_coding_block_size"); sps->max_CU_width = (1 << (sps->log2_min_luma_coding_block_size + sps->log2_diff_max_min_luma_coding_block_size)); sps->max_CU_height = (1 << (sps->log2_min_luma_coding_block_size + sps->log2_diff_max_min_luma_coding_block_size)); sps->log2_min_transform_block_size = 2 + gf_bs_read_ue_log(bs, "log2_min_transform_block_size_minus2"); sps->log2_max_transform_block_size = sps->log2_min_transform_block_size + gf_bs_read_ue_log(bs, "log2_max_transform_block_size"); depth = 0; sps->max_transform_hierarchy_depth_inter = gf_bs_read_ue_log(bs, "max_transform_hierarchy_depth_inter"); sps->max_transform_hierarchy_depth_intra = gf_bs_read_ue_log(bs, "max_transform_hierarchy_depth_intra"); while ((u32)(sps->max_CU_width >> sps->log2_diff_max_min_luma_coding_block_size) > (u32)(1 << (sps->log2_min_transform_block_size + depth))) { depth++; } sps->max_CU_depth = sps->log2_diff_max_min_luma_coding_block_size + depth; nb_CTUs = ((sps->width + sps->max_CU_width - 1) / sps->max_CU_width) * ((sps->height + sps->max_CU_height - 1) / sps->max_CU_height); sps->bitsSliceSegmentAddress = 0; while (nb_CTUs > (u32)(1 << sps->bitsSliceSegmentAddress)) { sps->bitsSliceSegmentAddress++; } sps->scaling_list_enable_flag = gf_bs_read_int_log(bs, 1, "scaling_list_enable_flag"); if (sps->scaling_list_enable_flag) { sps->infer_scaling_list_flag = 0; sps->scaling_list_ref_layer_id = 0; if (multiLayerExtSpsFlag) { sps->infer_scaling_list_flag = gf_bs_read_int_log(bs, 1, "infer_scaling_list_flag"); } if (sps->infer_scaling_list_flag) { sps->scaling_list_ref_layer_id = gf_bs_read_int_log(bs, 6, "scaling_list_ref_layer_id"); } else { sps->scaling_list_data_present_flag = gf_bs_read_int_log(bs, 1, "scaling_list_data_present_flag"); if (sps->scaling_list_data_present_flag) { hevc_scaling_list_data(bs); } } } sps->asymmetric_motion_partitions_enabled_flag = gf_bs_read_int_log(bs, 1, "asymmetric_motion_partitions_enabled_flag"); sps->sample_adaptive_offset_enabled_flag = gf_bs_read_int_log(bs, 1, "sample_adaptive_offset_enabled_flag"); if ( (sps->pcm_enabled_flag = gf_bs_read_int_log(bs, 1, "pcm_enabled_flag")) ) { sps->pcm_sample_bit_depth_luma_minus1 = gf_bs_read_int_log(bs, 4, "pcm_sample_bit_depth_luma_minus1"); sps->pcm_sample_bit_depth_chroma_minus1 = gf_bs_read_int_log(bs, 4, "pcm_sample_bit_depth_chroma_minus1"); sps->log2_min_pcm_luma_coding_block_size_minus3 = gf_bs_read_ue_log(bs, "log2_min_pcm_luma_coding_block_size_minus3"); sps->log2_diff_max_min_pcm_luma_coding_block_size = gf_bs_read_ue_log(bs, "log2_diff_max_min_pcm_luma_coding_block_size"); sps->pcm_loop_filter_disable_flag = gf_bs_read_int_log(bs, 1, "pcm_loop_filter_disable_flag"); } sps->num_short_term_ref_pic_sets = gf_bs_read_ue_log(bs, "num_short_term_ref_pic_sets"); if (sps->num_short_term_ref_pic_sets > 64) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Invalid number of short term reference picture sets %d\n", sps->num_short_term_ref_pic_sets)); return -1; } for (i = 0; i < sps->num_short_term_ref_pic_sets; i++) { Bool ret = hevc_parse_short_term_ref_pic_set(bs, sps, i); /*cannot parse short_term_ref_pic_set, skip VUI parsing*/ if (!ret) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Invalid short_term_ref_pic_set\n")); return -1; } } sps->long_term_ref_pics_present_flag = gf_bs_read_int_log(bs, 1, "long_term_ref_pics_present_flag"); if (sps->long_term_ref_pics_present_flag) { sps->num_long_term_ref_pic_sps = gf_bs_read_ue_log(bs, "num_long_term_ref_pic_sps"); for (i = 0; i < sps->num_long_term_ref_pic_sps; i++) { gf_bs_read_int_log_idx(bs, sps->log2_max_pic_order_cnt_lsb, "lt_ref_pic_poc_lsb_sps", i); gf_bs_read_int_log_idx(bs, 1, "used_by_curr_pic_lt_sps_flag", i); } } sps->temporal_mvp_enable_flag = gf_bs_read_int_log(bs, 1, "temporal_mvp_enable_flag"); sps->strong_intra_smoothing_enable_flag = gf_bs_read_int_log(bs, 1, "strong_intra_smoothing_enable_flag"); if (vui_flag_pos) *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); if ((sps->vui_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_parameters_present_flag")) ) { sps->aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "aspect_ratio_info_present_flag"); if (sps->aspect_ratio_info_present_flag) { sps->sar_idc = gf_bs_read_int_log(bs, 8, "aspect_ratio_idc"); if (sps->sar_idc == 255) { sps->sar_width = gf_bs_read_int_log(bs, 16, "aspect_ratio_width"); sps->sar_height = gf_bs_read_int_log(bs, 16, "aspect_ratio_height"); } else if (sps->sar_idc < 17) { sps->sar_width = hevc_sar[sps->sar_idc].w; sps->sar_height = hevc_sar[sps->sar_idc].h; } } if ((sps->overscan_info_present = gf_bs_read_int_log(bs, 1, "overscan_info_present"))) sps->overscan_appropriate = gf_bs_read_int_log(bs, 1, "overscan_appropriate"); sps->video_signal_type_present_flag = gf_bs_read_int_log(bs, 1, "video_signal_type_present_flag"); if (sps->video_signal_type_present_flag) { sps->video_format = gf_bs_read_int_log(bs, 3, "video_format"); sps->video_full_range_flag = gf_bs_read_int_log(bs, 1, "video_full_range_flag"); if ((sps->colour_description_present_flag = gf_bs_read_int_log(bs, 1, "colour_description_present_flag"))) { sps->colour_primaries = gf_bs_read_int_log(bs, 8, "colour_primaries"); sps->transfer_characteristic = gf_bs_read_int_log(bs, 8, "transfer_characteristic"); sps->matrix_coeffs = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } } if ((sps->chroma_loc_info_present_flag = gf_bs_read_int_log(bs, 1, "chroma_loc_info_present_flag"))) { sps->chroma_sample_loc_type_top_field = gf_bs_read_ue_log(bs, "chroma_sample_loc_type_top_field"); sps->chroma_sample_loc_type_bottom_field = gf_bs_read_ue_log(bs, "chroma_sample_loc_type_bottom_field"); } sps->neutra_chroma_indication_flag = gf_bs_read_int_log(bs, 1, "neutra_chroma_indication_flag"); sps->field_seq_flag = gf_bs_read_int_log(bs, 1, "field_seq_flag"); sps->frame_field_info_present_flag = gf_bs_read_int_log(bs, 1, "frame_field_info_present_flag"); if ((sps->default_display_window_flag = gf_bs_read_int_log(bs, 1, "default_display_window_flag"))) { sps->left_offset = gf_bs_read_ue_log(bs, "display_window_left_offset"); sps->right_offset = gf_bs_read_ue_log(bs, "display_window_right_offset"); sps->top_offset = gf_bs_read_ue_log(bs, "display_window_top_offset"); sps->bottom_offset = gf_bs_read_ue_log(bs, "display_window_bottom_offset"); } sps->has_timing_info = gf_bs_read_int_log(bs, 1, "has_timing_info"); if (sps->has_timing_info) { sps->num_units_in_tick = gf_bs_read_int_log(bs, 32, "num_units_in_tick"); sps->time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); sps->poc_proportional_to_timing_flag = gf_bs_read_int_log(bs, 1, "poc_proportional_to_timing_flag"); if (sps->poc_proportional_to_timing_flag) sps->num_ticks_poc_diff_one_minus1 = gf_bs_read_ue_log(bs, "num_ticks_poc_diff_one_minus1"); if ((sps->hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "hrd_parameters_present_flag"))) { // GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[HEVC] HRD param parsing not implemented\n")); return sps_id; } } if (gf_bs_read_int_log(bs, 1, "bitstream_restriction_flag")) { gf_bs_read_int_log(bs, 1, "tiles_fixed_structure_flag"); gf_bs_read_int_log(bs, 1, "motion_vectors_over_pic_boundaries_flag"); gf_bs_read_int_log(bs, 1, "restricted_ref_pic_lists_flag"); gf_bs_read_ue_log(bs, "min_spatial_segmentation_idc"); gf_bs_read_ue_log(bs, "max_bytes_per_pic_denom"); gf_bs_read_ue_log(bs, "max_bits_per_min_cu_denom"); gf_bs_read_ue_log(bs, "log2_max_mv_length_horizontal"); gf_bs_read_ue_log(bs, "log2_max_mv_length_vertical"); } } if (gf_bs_read_int_log(bs, 1, "sps_extension_flag")) { #if 0 while (gf_bs_available(bs)) { /*sps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } return sps_id; } GF_EXPORT s32 gf_hevc_read_sps_ex(char *data, u32 size, HEVCState *hevc, u32 *vui_flag_pos) { GF_BitStream *bs; s32 sps_id = -1; u8 layer_id; if (vui_flag_pos) *vui_flag_pos = 0; bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) goto exit; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) goto exit; sps_id = gf_hevc_read_sps_bs_internal(bs, hevc, layer_id, vui_flag_pos); exit: if (bs) gf_bs_del(bs); return sps_id; } GF_EXPORT s32 gf_hevc_read_sps(u8 *data, u32 size, HEVCState *hevc) { return gf_hevc_read_sps_ex(data, size, hevc, NULL); } GF_EXPORT s32 gf_hevc_read_sps_bs(GF_BitStream *bs, HEVCState *hevc) { u8 layer_id; if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) return -1; return gf_hevc_read_sps_bs_internal(bs, hevc, layer_id, NULL); } static s32 gf_hevc_read_pps_bs_internal(GF_BitStream *bs, HEVCState *hevc) { u32 i; s32 pps_id; HEVC_PPS *pps; //NAL header already read pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id < 0) || (pps_id >= 64)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong PPS ID %d in PPS\n", pps_id)); return -1; } pps = &hevc->pps[pps_id]; if (!pps->state) { pps->id = pps_id; pps->state = 1; } pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((pps->sps_id<0) || (pps->sps_id >= 16)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong SPS ID %d in PPS\n", pps->sps_id)); pps->sps_id=0; return -1; } hevc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->dependent_slice_segments_enabled_flag = gf_bs_read_int_log(bs, 1, "dependent_slice_segments_enabled_flag"); pps->output_flag_present_flag = gf_bs_read_int_log(bs, 1, "output_flag_present_flag"); pps->num_extra_slice_header_bits = gf_bs_read_int_log(bs, 3, "num_extra_slice_header_bits"); pps->sign_data_hiding_flag = gf_bs_read_int_log(bs, 1, "sign_data_hiding_flag"); pps->cabac_init_present_flag = gf_bs_read_int_log(bs, 1, "cabac_init_present_flag"); pps->num_ref_idx_l0_default_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active"); pps->num_ref_idx_l1_default_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active"); pps->pic_init_qp_minus26 = gf_bs_read_se_log(bs, "pic_init_qp_minus26"); pps->constrained_intra_pred_flag = gf_bs_read_int_log(bs, 1, "constrained_intra_pred_flag"); pps->transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "transform_skip_enabled_flag"); if ((pps->cu_qp_delta_enabled_flag = gf_bs_read_int_log(bs, 1, "cu_qp_delta_enabled_flag"))) pps->diff_cu_qp_delta_depth = gf_bs_read_ue_log(bs, "diff_cu_qp_delta_depth"); pps->pic_cb_qp_offset = gf_bs_read_se_log(bs, "pic_cb_qp_offset"); pps->pic_cr_qp_offset = gf_bs_read_se_log(bs, "pic_cr_qp_offset"); pps->slice_chroma_qp_offsets_present_flag = gf_bs_read_int_log(bs, 1, "slice_chroma_qp_offsets_present_flag"); pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); pps->weighted_bipred_flag = gf_bs_read_int_log(bs, 1, "weighted_bipred_flag"); pps->transquant_bypass_enable_flag = gf_bs_read_int_log(bs, 1, "transquant_bypass_enable_flag"); pps->tiles_enabled_flag = gf_bs_read_int_log(bs, 1, "tiles_enabled_flag"); pps->entropy_coding_sync_enabled_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); if (pps->tiles_enabled_flag) { pps->num_tile_columns = 1 + gf_bs_read_ue_log(bs, "num_tile_columns_minus1"); pps->num_tile_rows = 1 + gf_bs_read_ue_log(bs, "num_tile_rows_minus1"); pps->uniform_spacing_flag = gf_bs_read_int_log(bs, 1, "uniform_spacing_flag"); if (!pps->uniform_spacing_flag) { for (i = 0; i < pps->num_tile_columns - 1; i++) { pps->column_width[i] = 1 + gf_bs_read_ue_log_idx(bs, "column_width_minus1", i); } for (i = 0; i < pps->num_tile_rows - 1; i++) { pps->row_height[i] = 1 + gf_bs_read_ue_log_idx(bs, "row_height_minus1", i); } } pps->loop_filter_across_tiles_enabled_flag = gf_bs_read_int_log(bs, 1, "loop_filter_across_tiles_enabled_flag"); } pps->loop_filter_across_slices_enabled_flag = gf_bs_read_int_log(bs, 1, "loop_filter_across_slices_enabled_flag"); if ((pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"))) { pps->deblocking_filter_override_enabled_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_override_enabled_flag"); if (! (pps->pic_disable_deblocking_filter_flag = gf_bs_read_int_log(bs, 1, "pic_disable_deblocking_filter_flag"))) { pps->beta_offset_div2 = gf_bs_read_se_log(bs, "beta_offset_div2"); pps->tc_offset_div2 = gf_bs_read_se_log(bs, "tc_offset_div2"); } } if ((pps->pic_scaling_list_data_present_flag = gf_bs_read_int_log(bs, 1, "pic_scaling_list_data_present_flag"))) { hevc_scaling_list_data(bs); } pps->lists_modification_present_flag = gf_bs_read_int_log(bs, 1, "lists_modification_present_flag"); pps->log2_parallel_merge_level_minus2 = gf_bs_read_ue_log(bs, "log2_parallel_merge_level_minus2"); pps->slice_segment_header_extension_present_flag = gf_bs_read_int_log(bs, 1, "slice_segment_header_extension_present_flag"); if (gf_bs_read_int_log(bs, 1, "pps_extension_flag")) { #if 0 while (gf_bs_available(bs)) { /*pps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } return pps_id; } GF_EXPORT s32 gf_hevc_read_pps(u8 *data, u32 size, HEVCState *hevc) { GF_BitStream *bs; s32 pps_id = -1; bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) goto exit; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) goto exit; pps_id = gf_hevc_read_pps_bs_internal(bs, hevc); exit: if (bs) gf_bs_del(bs); return pps_id; } GF_EXPORT s32 gf_hevc_read_pps_bs(GF_BitStream *bs, HEVCState *hevc) { if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) return -1; return gf_hevc_read_pps_bs_internal(bs, hevc); } GF_EXPORT s32 gf_hevc_parse_nalu_bs(GF_BitStream *bs, HEVCState *hevc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { Bool is_slice = GF_FALSE; s32 ret = -1; HEVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); memcpy(&n_state, &hevc->s_info, sizeof(HEVCSliceInfo)); if (!hevc_parse_nal_header(bs, nal_unit_type, temporal_id, layer_id)) return -1; n_state.nal_unit_type = *nal_unit_type; switch (n_state.nal_unit_type) { case GF_HEVC_NALU_ACCESS_UNIT: case GF_HEVC_NALU_END_OF_SEQ: case GF_HEVC_NALU_END_OF_STREAM: ret = 1; break; /*slice_segment_layer_rbsp*/ case GF_HEVC_NALU_SLICE_TRAIL_N: case GF_HEVC_NALU_SLICE_TRAIL_R: case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_TSA_R: case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_STSA_R: case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: case GF_HEVC_NALU_SLICE_RADL_N: case GF_HEVC_NALU_SLICE_RADL_R: case GF_HEVC_NALU_SLICE_RASL_N: case GF_HEVC_NALU_SLICE_RASL_R: is_slice = GF_TRUE; /* slice - read the info and compare.*/ ret = hevc_parse_slice_segment(bs, hevc, &n_state); if (ret < 0) return ret; hevc_compute_poc(&n_state); ret = 0; if (hevc->s_info.poc != n_state.poc) { ret = 1; break; } if (n_state.first_slice_segment_in_pic_flag) { if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; break; } } break; case GF_HEVC_NALU_SEQ_PARAM: hevc->last_parsed_sps_id = gf_hevc_read_sps_bs_internal(bs, hevc, *layer_id, NULL); ret = (hevc->last_parsed_sps_id>=0) ? 0 : -1; break; case GF_HEVC_NALU_PIC_PARAM: hevc->last_parsed_pps_id = gf_hevc_read_pps_bs_internal(bs, hevc); ret = (hevc->last_parsed_pps_id>=0) ? 0 : -1; break; case GF_HEVC_NALU_VID_PARAM: hevc->last_parsed_vps_id = gf_hevc_read_vps_bs_internal(bs, hevc, GF_FALSE); ret = (hevc->last_parsed_vps_id>=0) ? 0 : -1; break; default: ret = 0; break; } /* save _prev values */ if ((ret>0) && hevc->s_info.sps) { n_state.frame_num_offset_prev = hevc->s_info.frame_num_offset; n_state.frame_num_prev = hevc->s_info.frame_num; n_state.poc_lsb_prev = hevc->s_info.poc_lsb; n_state.poc_msb_prev = hevc->s_info.poc_msb; if (is_slice) n_state.prev_layer_id_plus1 = *layer_id + 1; } if (is_slice) hevc_compute_poc(&n_state); memcpy(&hevc->s_info, &n_state, sizeof(HEVCSliceInfo)); return ret; } GF_EXPORT s32 gf_hevc_parse_nalu(u8 *data, u32 size, HEVCState *hevc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { GF_BitStream *bs = NULL; s32 ret = -1; if (!hevc) { if (nal_unit_type) (*nal_unit_type) = (data[0] & 0x7E) >> 1; if (layer_id) { u8 id = data[0] & 1; id <<= 5; id |= (data[1] >> 3) & 0x1F; (*layer_id) = id; } if (temporal_id) (*temporal_id) = (data[1] & 0x7); return -1; } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); ret = gf_hevc_parse_nalu_bs(bs, hevc, nal_unit_type, temporal_id, layer_id); gf_bs_del(bs); return ret; } GF_EXPORT GF_Err gf_hevc_change_vui(GF_HEVCConfig *hvcc, GF_VUIInfo *vui_info) { GF_BitStream *orig, *mod; HEVCState hevc; u32 i, bit_offset, flag; s32 idx; GF_NALUFFParamArray *spss; GF_NALUFFParam *slc; orig = NULL; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; i = 0; spss = NULL; while ((spss = (GF_NALUFFParamArray *)gf_list_enum(hvcc->param_array, &i))) { if (spss->type == GF_HEVC_NALU_SEQ_PARAM) break; spss = NULL; } if (!spss) return GF_NON_COMPLIANT_BITSTREAM; i = 0; while ((slc = (GF_NALUFFParam *)gf_list_enum(spss->nalus, &i))) { u8 *no_emulation_buf; u32 no_emulation_buf_size, emulation_bytes; /*SPS may still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size) * sizeof(char)); no_emulation_buf_size = gf_media_nalu_remove_emulation_bytes(slc->data, no_emulation_buf, slc->size); idx = gf_hevc_read_sps_ex(no_emulation_buf, no_emulation_buf_size, &hevc, &bit_offset); if (idx < 0) { if (orig) gf_bs_del(orig); gf_free(no_emulation_buf); continue; } orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset >= 0); while (bit_offset) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } avc_hevc_rewrite_vui(vui_info, orig, mod); /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, &no_emulation_buf, &no_emulation_buf_size); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(no_emulation_buf, no_emulation_buf_size); if (no_emulation_buf_size + emulation_bytes > slc->size) slc->data = (char*)gf_realloc(slc->data, no_emulation_buf_size + emulation_bytes); slc->size = gf_media_nalu_add_emulation_bytes(no_emulation_buf, slc->data, no_emulation_buf_size); gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; } GF_EXPORT GF_Err gf_hevc_change_par(GF_HEVCConfig *hvcc, s32 ar_n, s32 ar_d) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = ar_n; vuii.ar_den = ar_d; vuii.fullrange = -1; vuii.video_format = -1; vuii.color_prim = -1; vuii.color_tfc = -1; vuii.color_matrix = -1; return gf_hevc_change_vui(hvcc, &vuii); } GF_EXPORT GF_Err gf_hevc_change_color(GF_HEVCConfig *hvcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = -1; vuii.ar_den = -1; vuii.fullrange = fullrange; vuii.video_format = vidformat; vuii.color_prim = colorprim; vuii.color_tfc = transfer; vuii.color_matrix = colmatrix; return gf_hevc_change_vui(hvcc, &vuii); } GF_EXPORT GF_Err gf_hevc_get_sps_info_with_state(HEVCState *hevc, u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { s32 idx; idx = gf_hevc_read_sps(sps_data, sps_size, hevc); if (idx < 0) { return GF_NON_COMPLIANT_BITSTREAM; } if (sps_id) *sps_id = idx; if (width) *width = hevc->sps[idx].width; if (height) *height = hevc->sps[idx].height; if (par_n) *par_n = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_width : (u32)-1; if (par_d) *par_d = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_height : (u32)-1; return GF_OK; } GF_EXPORT GF_Err gf_hevc_get_sps_info(u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { HEVCState hevc; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; return gf_hevc_get_sps_info_with_state(&hevc, sps_data, sps_size, sps_id, width, height, par_n, par_d); } #endif //GPAC_DISABLE_HEVC static u32 AC3_FindSyncCode(u8 *buf, u32 buflen) { u32 end = buflen - 6; u32 offset = 0; while (offset <= end) { if (buf[offset] == 0x0b && buf[offset + 1] == 0x77) { return offset; } offset++; } return buflen; } static Bool AC3_FindSyncCodeBS(GF_BitStream *bs) { u8 b1; u64 pos = gf_bs_get_position(bs); u64 end = gf_bs_get_size(bs); pos += 1; b1 = gf_bs_read_u8(bs); while (pos + 1 <= end) { u8 b2 = gf_bs_read_u8(bs); if ((b1 == 0x0b) && (b2 == 0x77)) { gf_bs_seek(bs, pos - 1); return GF_TRUE; } pos++; b1 = b2; } return GF_FALSE; } static const u32 ac3_sizecod_to_bitrate[] = { 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 320000, 384000, 448000, 512000, 576000, 640000 }; static const u32 ac3_sizecod2_to_framesize[] = { 96, 120, 144, 168, 192, 240, 288, 336, 384, 480, 576, 672, 768, 960, 1152, 1344, 1536, 1728, 1920 }; static const u32 ac3_sizecod1_to_framesize[] = { 69, 87, 104, 121, 139, 174, 208, 243, 278, 348, 417, 487, 557, 696, 835, 975, 1114, 1253, 1393 }; static const u32 ac3_sizecod0_to_framesize[] = { 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 640, 768, 896, 1024, 1152, 1280 }; static const u32 ac3_mod_to_chans[] = { 2, 1, 2, 3, 3, 4, 4, 5 }; GF_EXPORT u32 gf_ac3_get_channels(u32 acmod) { u32 nb_ch; nb_ch = ac3_mod_to_chans[acmod]; return nb_ch; } GF_EXPORT u32 gf_ac3_get_bitrate(u32 brcode) { return ac3_sizecod_to_bitrate[brcode]; } Bool gf_ac3_parser(u8 *buf, u32 buflen, u32 *pos, GF_AC3Config *hdr, Bool full_parse) { GF_BitStream *bs; Bool ret; if (buflen < 6) return GF_FALSE; (*pos) = AC3_FindSyncCode(buf, buflen); if (*pos >= buflen) return GF_FALSE; bs = gf_bs_new((const char*)(buf + *pos), buflen, GF_BITSTREAM_READ); ret = gf_ac3_parser_bs(bs, hdr, full_parse); gf_bs_del(bs); return ret; } GF_EXPORT Bool gf_ac3_parser_bs(GF_BitStream *bs, GF_AC3Config *hdr, Bool full_parse) { u32 fscod, frmsizecod, bsid, ac3_mod, freq, framesize, bsmod, syncword; u64 pos; if (!hdr || (gf_bs_available(bs) < 6)) return GF_FALSE; if (!AC3_FindSyncCodeBS(bs)) return GF_FALSE; pos = gf_bs_get_position(bs); syncword = gf_bs_read_u16(bs); if (syncword != 0x0B77) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AC3] Wrong sync word detected (0x%X - expecting 0x0B77).\n", syncword)); return GF_FALSE; } gf_bs_read_int_log(bs, 16, "crc1"); fscod = gf_bs_read_int_log(bs, 2, "fscod"); frmsizecod = gf_bs_read_int_log(bs, 6, "frmsizecod"); bsid = gf_bs_read_int_log(bs, 5, "bsid"); bsmod = gf_bs_read_int_log(bs, 3, "bsmod"); ac3_mod = gf_bs_read_int_log(bs, 3, "ac3_mod"); if (frmsizecod >= 2 * sizeof(ac3_sizecod_to_bitrate) / sizeof(u32)) return GF_FALSE; hdr->bitrate = ac3_sizecod_to_bitrate[frmsizecod / 2]; if (bsid > 8) hdr->bitrate = hdr->bitrate >> (bsid - 8); switch (fscod) { case 0: if (frmsizecod >= 2 * sizeof(ac3_sizecod0_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 48000; framesize = ac3_sizecod0_to_framesize[frmsizecod / 2] * 2; break; case 1: if (frmsizecod >= 2 * sizeof(ac3_sizecod1_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 44100; framesize = (ac3_sizecod1_to_framesize[frmsizecod / 2] + (frmsizecod & 0x1)) * 2; break; case 2: if (frmsizecod >= 2 * sizeof(ac3_sizecod2_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 32000; framesize = ac3_sizecod2_to_framesize[frmsizecod / 2] * 2; break; default: return GF_FALSE; } hdr->sample_rate = freq; hdr->framesize = framesize; if (full_parse) { hdr->streams[0].bsid = bsid; hdr->streams[0].bsmod = bsmod; hdr->streams[0].acmod = ac3_mod; hdr->streams[0].lfon = 0; hdr->streams[0].fscod = fscod; hdr->brcode = frmsizecod / 2; } if (ac3_mod >= 2 * sizeof(ac3_mod_to_chans) / sizeof(u32)) return GF_FALSE; hdr->channels = ac3_mod_to_chans[ac3_mod]; if ((ac3_mod & 0x1) && (ac3_mod != 1)) gf_bs_read_int_log(bs, 2, "cmixlev"); if (ac3_mod & 0x4) gf_bs_read_int_log(bs, 2, "surmixlev"); if (ac3_mod == 0x2) gf_bs_read_int_log(bs, 2, "dsurmod"); if (gf_bs_read_int_log(bs, 1, "lfeon")) { hdr->channels += 1; hdr->streams[0].lfon = 1; } gf_bs_seek(bs, pos); return GF_TRUE; } GF_EXPORT Bool gf_eac3_parser_bs(GF_BitStream *bs, GF_AC3Config *hdr, Bool full_parse) { u32 fscod, bsid, ac3_mod, freq, framesize, syncword, substreamid, lfon, channels, numblkscod, strmtyp, frmsiz; u64 pos; u16 chanmap; static u32 numblks[4] = {1, 2, 3, 6}; if (!hdr || (gf_bs_available(bs) < 6)) return GF_FALSE; if (!AC3_FindSyncCodeBS(bs)) return GF_FALSE; pos = gf_bs_get_position(bs); framesize = 0; numblkscod = 0; memset(hdr, 0, sizeof(GF_AC3Config)); block: syncword = gf_bs_read_u16(bs); if (syncword != 0x0B77) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[E-AC3] Wrong sync word detected (0x%X - expecting 0x0B77).\n", syncword)); return GF_FALSE; } strmtyp = gf_bs_read_int_log(bs, 2, "strmtyp"); substreamid = gf_bs_read_int_log(bs, 3, "substreamid"); //next main (independent) AU, done with this frame if ((strmtyp!=0x1) && ((hdr->substreams >> substreamid) & 0x1)) { hdr->framesize = framesize; gf_bs_seek(bs, pos); return GF_TRUE; } frmsiz = gf_bs_read_int_log(bs, 11, "frmsiz"); framesize += 2 * (1 + frmsiz); fscod = gf_bs_read_int_log(bs, 2, "fscod"); if (fscod == 0x3) { fscod = gf_bs_read_int_log(bs, 2, "fscod2"); numblkscod += 6; } else { numblkscod += gf_bs_read_int_log(bs, 2, "numblkscod"); } assert(numblkscod <= 9); if ((hdr->substreams >> substreamid) & 0x1) { //we still have sync frames following if (substreamid) { if (gf_bs_seek(bs, pos + framesize) != GF_OK) { gf_bs_seek(bs, pos); return GF_FALSE; } if ((gf_bs_available(bs) < 6) || !AC3_FindSyncCodeBS(bs)) { gf_bs_seek(bs, pos); return GF_FALSE; } goto block; } } hdr->substreams |= (1 << substreamid); switch (fscod) { case 0: freq = 48000; break; case 1: freq = 44100; break; case 2: freq = 32000; break; default: return GF_FALSE; } ac3_mod = gf_bs_read_int_log(bs, 3, "ac3_mod"); lfon = gf_bs_read_int_log(bs, 1, "lfon"); bsid = gf_bs_read_int_log(bs, 5, "bsid"); if (!substreamid && (bsid != 16/*E-AC3*/)) return GF_FALSE; gf_bs_read_int_log(bs, 5, "dialnorm"); if (gf_bs_read_int_log(bs, 1, "compre")) { gf_bs_read_int_log(bs, 8, "compr"); } if (ac3_mod==0) { gf_bs_read_int_log(bs, 5, "dialnorm2"); if (gf_bs_read_int_log(bs, 1, "compr2e")) { gf_bs_read_int_log(bs, 8, "compr2"); } } chanmap = 0; if (strmtyp==0x1) { if (gf_bs_read_int_log(bs, 1, "chanmape")) { chanmap = gf_bs_read_int_log(bs, 16, "chanmap"); } } channels = ac3_mod_to_chans[ac3_mod]; if (lfon) channels += 1; hdr->bitrate = 0; hdr->sample_rate = freq; hdr->framesize = framesize; if (strmtyp != 1) { hdr->channels = channels; hdr->streams[substreamid].lfon = lfon; if (full_parse) { hdr->streams[substreamid].bsid = bsid; hdr->streams[substreamid].bsmod = 0; hdr->streams[substreamid].acmod = ac3_mod; hdr->streams[substreamid].fscod = fscod; hdr->brcode = 0; } hdr->nb_streams++; //not clear if this is only for the independent streams hdr->brcode += ((frmsiz+1) * freq) / (numblks[numblkscod]*16) / 1000; if (lfon) hdr->channels += 1; } else { hdr->streams[substreamid].nb_dep_sub = substreamid; hdr->streams[substreamid].chan_loc |= chanmap; } if (numblkscod < 6) { //we need 6 blocks to make a sample if (gf_bs_seek(bs, pos + framesize) != GF_OK) { gf_bs_seek(bs, pos); return GF_FALSE; } if ((gf_bs_available(bs) < 6) || !AC3_FindSyncCodeBS(bs)) return GF_FALSE; goto block; } gf_bs_seek(bs, pos); return GF_TRUE; } #endif /*GPAC_DISABLE_AV_PARSERS*/ u32 gf_id3_read_size(GF_BitStream *bs) { u32 size = 0; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); return size; } #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined (GPAC_DISABLE_OGG) /* Vorbis parser */ static u32 vorbis_book_maptype1_quantvals(u32 entries, u32 dim) { u32 vals = (u32)floor(pow(entries, 1.0 / dim)); while (1) { u32 acc = 1; u32 acc1 = 1; u32 i; for (i = 0; i < dim; i++) { acc *= vals; acc1 *= vals + 1; } if (acc <= entries && acc1 > entries) return (vals); else { if (acc > entries) vals--; else vals++; } } } static u32 ilog(u32 v, Bool dec) { u32 ret = 0; if (dec && v) --v; while (v) { ret++; v >>= 1; } return (ret); } static u32 icount(u32 v) { u32 ret = 0; while (v) { ret += v & 1; v >>= 1; } return(ret); } GF_EXPORT Bool gf_vorbis_parse_header(GF_VorbisParser *vp, u8 *data, u32 data_len) { u32 pack_type, i, j, k, times, nb_part, nb_books, nb_modes; u32 l; char szNAME[8]; oggpack_buffer opb; oggpack_readinit(&opb, (u8*)data, data_len); pack_type = oggpack_read(&opb, 8); i = 0; while (i < 6) { szNAME[i] = oggpack_read(&opb, 8); i++; } szNAME[i] = 0; if (strcmp(szNAME, "vorbis")) { return GF_FALSE; } switch (pack_type) { case 0x01: vp->version = oggpack_read(&opb, 32); if (vp->version != 0) { return GF_FALSE; } vp->channels = oggpack_read(&opb, 8); vp->sample_rate = oggpack_read(&opb, 32); vp->max_r = oggpack_read(&opb, 32); vp->avg_r = oggpack_read(&opb, 32); vp->low_r = oggpack_read(&opb, 32); vp->min_block = 1<<oggpack_read(&opb, 4); vp->max_block = 1<<oggpack_read(&opb, 4); if (vp->sample_rate < 1 || vp->channels < 1 || vp->min_block < 8 || vp->max_block < vp->min_block || oggpack_read(&opb, 1) != 1) { return GF_FALSE; } vp->nb_init=1; return GF_TRUE; case 0x03: /*trash comments*/ vp->nb_init++; return GF_TRUE; case 0x05: /*need at least bitstream header to make sure we're parsing the right thing*/ if (!vp->nb_init) return GF_FALSE; break; default: return GF_FALSE; } /*OK parse codebook*/ nb_books = oggpack_read(&opb, 8) + 1; /*skip vorbis static books*/ for (i = 0; i < nb_books; i++) { u32 map_type, qb, qq; u32 entries, dim; oggpack_read(&opb, 24); dim = oggpack_read(&opb, 16); entries = oggpack_read(&opb, 24); if ((s32)entries < 0) entries = 0; if (oggpack_read(&opb, 1) == 0) { if (oggpack_read(&opb, 1)) { for (j = 0; j < entries; j++) { if (oggpack_read(&opb, 1)) { oggpack_read(&opb, 5); } } } else { for (j = 0; j < entries; j++) oggpack_read(&opb, 5); } } else { oggpack_read(&opb, 5); for (j = 0; j < entries;) { u32 num = oggpack_read(&opb, ilog(entries - j, GF_FALSE)); for (k = 0; k < num && j < entries; k++, j++) { } } } switch ((map_type = oggpack_read(&opb, 4))) { case 0: break; case 1: case 2: oggpack_read(&opb, 32); oggpack_read(&opb, 32); qq = oggpack_read(&opb, 4) + 1; oggpack_read(&opb, 1); if (map_type == 1) qb = vorbis_book_maptype1_quantvals(entries, dim); else if (map_type == 2) qb = entries * dim; else qb = 0; for (j = 0; j < qb; j++) oggpack_read(&opb, qq); break; } } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) oggpack_read(&opb, 16); times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 type = oggpack_read(&opb, 16); if (type) { u32 *parts, *class_dims, count, rangebits; u32 max_class = 0; nb_part = oggpack_read(&opb, 5); parts = (u32*)gf_malloc(sizeof(u32) * nb_part); for (j = 0; j < nb_part; j++) { parts[j] = oggpack_read(&opb, 4); if (max_class < parts[j]) max_class = parts[j]; } class_dims = (u32*)gf_malloc(sizeof(u32) * (max_class + 1)); for (j = 0; j < max_class + 1; j++) { u32 class_sub; class_dims[j] = oggpack_read(&opb, 3) + 1; class_sub = oggpack_read(&opb, 2); if (class_sub) oggpack_read(&opb, 8); for (k = 0; k < (u32)(1 << class_sub); k++) oggpack_read(&opb, 8); } oggpack_read(&opb, 2); rangebits = oggpack_read(&opb, 4); count = 0; for (j = 0, k = 0; j < nb_part; j++) { count += class_dims[parts[j]]; for (; k < count; k++) oggpack_read(&opb, rangebits); } gf_free(parts); gf_free(class_dims); } else { oggpack_read(&opb, 8 + 16 + 16 + 6 + 8); nb_books = oggpack_read(&opb, 4) + 1; for (j = 0; j < nb_books; j++) oggpack_read(&opb, 8); } } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 acc = 0; oggpack_read(&opb, 16);/*type*/ oggpack_read(&opb, 24); oggpack_read(&opb, 24); oggpack_read(&opb, 24); nb_part = oggpack_read(&opb, 6) + 1; oggpack_read(&opb, 8); for (j = 0; j < nb_part; j++) { u32 cascade = oggpack_read(&opb, 3); if (oggpack_read(&opb, 1)) cascade |= (oggpack_read(&opb, 5) << 3); acc += icount(cascade); } for (j = 0; j < acc; j++) oggpack_read(&opb, 8); } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 sub_maps = 1; oggpack_read(&opb, 16); if (oggpack_read(&opb, 1)) sub_maps = oggpack_read(&opb, 4) + 1; if (oggpack_read(&opb, 1)) { u32 nb_steps = oggpack_read(&opb, 8) + 1; for (j = 0; j < nb_steps; j++) { oggpack_read(&opb, ilog(vp->channels, GF_TRUE)); oggpack_read(&opb, ilog(vp->channels, GF_TRUE)); } } oggpack_read(&opb, 2); if (sub_maps>1) { for(l=0; l<vp->channels; l++) oggpack_read(&opb, 4); } for (j = 0; j < sub_maps; j++) { oggpack_read(&opb, 8); oggpack_read(&opb, 8); oggpack_read(&opb, 8); } } nb_modes = oggpack_read(&opb, 6) + 1; for (i = 0; i < nb_modes; i++) { vp->mode_flag[i] = oggpack_read(&opb, 1); oggpack_read(&opb, 16); oggpack_read(&opb, 16); oggpack_read(&opb, 8); } vp->modebits = 0; j = nb_modes; while (j > 1) { vp->modebits++; j >>= 1; } return GF_TRUE; } GF_EXPORT u32 gf_vorbis_check_frame(GF_VorbisParser *vp, u8 *data, u32 data_length) { s32 block_size; oggpack_buffer opb; if (!vp) return 0; oggpack_readinit(&opb, (unsigned char*)data, data_length); /*not audio*/ if (oggpack_read(&opb, 1) != 0) return 0; block_size = oggpack_read(&opb, vp->modebits); if (block_size == -1) return 0; return ((vp->mode_flag[block_size]) ? vp->max_block : vp->min_block) / (2); } /*call with vorbis header packets - initializes the parser on success, leave it to NULL otherwise returns 1 if success, 0 if error.*/ Bool gf_opus_parse_header(GF_OpusParser *opus, u8 *data, u32 data_len) { char tag[9]; GF_BitStream *bs = gf_bs_new(data, data_len, GF_BITSTREAM_READ); gf_bs_read_data(bs, tag, 8); tag[8]=0; if (memcmp(data, "OpusHead", sizeof(char)*8)) { gf_bs_del(bs); return GF_FALSE; } /*Identification Header*/ opus->version = gf_bs_read_u8(bs); /*version*/ if (opus->version != 1) { gf_bs_del(bs); GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Opus] Unsupported version %d\n", opus->version)); return GF_FALSE; } opus->OutputChannelCount = gf_bs_read_u8(bs); opus->PreSkip = gf_bs_read_u16_le(bs); opus->InputSampleRate = gf_bs_read_u32_le(bs); opus->OutputGain = gf_bs_read_u16_le(bs); opus->ChannelMappingFamily = gf_bs_read_u8(bs); if (opus->ChannelMappingFamily != 0) { opus->StreamCount = gf_bs_read_u8(bs); opus->CoupledCount = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *) opus->ChannelMapping, opus->OutputChannelCount); } gf_bs_del(bs); return GF_TRUE; } /*returns 0 if init error or not a vorbis frame, otherwise returns the number of audio samples in this frame*/ u32 gf_opus_check_frame(GF_OpusParser *op, u8 *data, u32 data_length) { u32 block_size; if (!memcmp(data, "OpusHead", sizeof(char)*8)) return 0; if (!memcmp(data, "OpusTags", sizeof(char)*8)) return 0; /*consider the whole packet as Ogg packets and ISOBMFF samples for Opus are framed similarly*/ static const int OpusFrameDurIn48k[] = { 480, 960, 1920, 2880, 480, 960, 1920, 2880, 480, 960, 1920, 2880, 480, 960, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, }; int TOC_config = (data[0] & 0xf8) >> 3; //int s = (data[0] & 0x04) >> 2; block_size = OpusFrameDurIn48k[TOC_config]; int c = data[0] & 0x03; if (c == 1 || c == 2) { block_size *= 2; } else if (c == 3) { /*unknown number of frames*/ int num_frames = data[1] & 0x3f; block_size *= num_frames; } return block_size; } #endif /*!defined(GPAC_DISABLE_AV_PARSERS) && !defined (GPAC_DISABLE_OGG)*/ u64 gf_mpegh_escaped_value(GF_BitStream *bs, u32 nBits1, u32 nBits2, u32 nBits3) { u64 value = gf_bs_read_int(bs, nBits1); if (value == (1<<nBits1)-1) { u32 vadd = gf_bs_read_int(bs, nBits2); value += vadd; if (vadd == (1<<nBits2)-1) { vadd = gf_bs_read_int(bs, nBits3); value += vadd; } } return value; } GF_EXPORT s32 gf_mpegh_get_mhas_pl(u8 *ptr, u32 size, u64 *ch_layout) { s32 PL = -1; GF_BitStream *bs; u32 i; s32 sync_pos=-1; for (i=0; i<size-3; i++) { if ((ptr[i]==0xC0) && (ptr[i+1]== 0x01) && (ptr[i+2]==0xA5)) { sync_pos = i; break; } } if (sync_pos<0) return 0; if (ch_layout) *ch_layout = 0; bs = gf_bs_new(ptr, size, GF_BITSTREAM_READ); gf_bs_skip_bytes(bs, sync_pos); while (gf_bs_available(bs)) { u32 type = (u32) gf_mpegh_escaped_value(bs, 3, 8, 8); /*u64 label = */gf_mpegh_escaped_value(bs, 2, 8, 32); u64 mh_size = gf_mpegh_escaped_value(bs, 11, 24, 24); if (mh_size > gf_bs_available(bs)) break; //MHAS config if (type==1) { PL = gf_bs_read_int(bs, 8); if (ch_layout) { u32 idx = gf_bs_read_int(bs, 5); if (idx==0x1f) gf_bs_read_int(bs, 24); /*idx = */gf_bs_read_int(bs, 3); gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 1); //speaker config idx = gf_bs_read_int(bs, 2); if (idx == 0) { *ch_layout = gf_audio_fmt_get_layout_from_cicp( gf_bs_read_int(bs, 6) ); } } break; } gf_bs_skip_bytes(bs, mh_size); } gf_bs_del(bs); return PL; } GF_EXPORT void gf_media_vvc_parse_sei(char *buffer, u32 nal_size, VVCState *vvc) { gf_hevc_vvc_parse_sei(buffer, nal_size, NULL, vvc); } static Bool vvc_parse_nal_header(GF_BitStream *bs, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { u32 val; val = gf_bs_read_int_log(bs, 1, "forbidden_zero"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 1, "resevred0"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 6, "layerID"); if (layer_id) *layer_id = val; val = gf_bs_read_int_log(bs, 5, "nuh_type"); if (nal_unit_type) *nal_unit_type = val; val = gf_bs_read_int_log(bs, 3, "temporalID"); if (!val) return GF_FALSE; val -= 1; if (temporal_id) *temporal_id = val; return GF_TRUE; } static void vvc_profile_tier_level(GF_BitStream *bs, VVC_ProfileTierLevel *ptl, u32 idx) { u32 i; if (ptl->pt_present) { ptl->general_profile_idc = gf_bs_read_int_log_idx(bs, 7, "general_profile_idc", idx); ptl->general_tier_flag = gf_bs_read_int_log_idx(bs, 1, "general_tier_flag", idx); } ptl->general_level_idc = gf_bs_read_int_log_idx(bs, 8, "general_level_idc", idx); ptl->frame_only_constraint = gf_bs_read_int_log_idx(bs, 1, "frame_only_constraint", idx); ptl->multilayer_enabled = gf_bs_read_int_log_idx(bs, 1, "multilayer_enabled", idx); //general constraints info - max size if 1 + 81 + 8 + 255 if (ptl->pt_present) { // general_constraints_info ptl->gci_present = gf_bs_read_int_log_idx(bs, 1, "gci_present", idx); if (ptl->gci_present) { u8 res; ptl->gci[0] = 0x80; ptl->gci[0] |= gf_bs_read_int(bs, 7); //81-7 = 74 bits till reserved gf_bs_read_data(bs, ptl->gci+1, 9); ptl->gci[10] = gf_bs_read_int(bs, 2)<<6; //skip extensions ptl->gci[11] = 0; res = gf_bs_read_int(bs, 8); gf_bs_read_int(bs, res); } gf_bs_align(bs); } for (i=ptl->ptl_max_tid; i>0; i--) { ptl->sub_ptl[i-1].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i); } gf_bs_align(bs); for (i=ptl->ptl_max_tid; i>0; i--) { if (ptl->sub_ptl[i-1].level_present_flag) ptl->sub_ptl[i-1].sublayer_level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i); } if (ptl->pt_present) { ptl->num_sub_profiles = gf_bs_read_int_log_idx(bs, 8, "num_sub_profiles", idx); for (i=0; i<ptl->num_sub_profiles; i++) { ptl->sub_profile_idc[i] = gf_bs_read_int_log_idx2(bs, 32, "sub_profile_idc", idx, i); } } } static s32 gf_media_vvc_read_vps_bs_internal(GF_BitStream *bs, VVCState *vvc, Bool stop_at_vps_ext) { u32 i, j; s32 vps_id; VVC_VPS *vps; Bool vps_default_ptl_dpb_hrd_max_tid_flag=0; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) return -1; if (!vps_id) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] VPS ID 0 is forbidden\n")); return -1; } vps = &vvc->vps[vps_id]; if (!vps->state) { vps->id = vps_id; vps->state = 1; } vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers"); if (vps->max_layers > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS)); return -1; } vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1; if ((vps->max_layers>1) && (vps->max_sub_layers>1)) vps_default_ptl_dpb_hrd_max_tid_flag = gf_bs_read_int_log(bs, 1, "vps_default_ptl_dpb_hrd_max_tid_flag"); if (vps->max_layers>1) vps->all_layers_independent = gf_bs_read_int_log(bs, 1, "all_layers_independent"); for (i=0; i<vps->max_layers; i++) { u32 layer_id = gf_bs_read_int_log_idx(bs, 6, "layer_id", i); if (layer_id>vps->max_layer_id) vps->max_layer_id = layer_id; if (i && !vps->all_layers_independent) { Bool layer_indep = gf_bs_read_int_log_idx(bs, 1, "layer_independent", i); if (!layer_indep) { Bool vps_max_tid_ref_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_max_tid_ref_present_flag", i); for (j=0; j<i; j++) { Bool vps_direct_ref_layer_flag = gf_bs_read_int_log_idx2(bs, 1, "vps_direct_ref_layer_flag", i, j); if (vps_max_tid_ref_present_flag && vps_direct_ref_layer_flag) { gf_bs_read_int_log_idx2(bs, 3, "vps_max_tid_il_ref_pics_plus1", i, j); } } } } } vps->num_ptl = 1; if (vps->max_layers > 1) { if (vps->all_layers_independent) { vps->each_layer_is_ols = gf_bs_read_int_log(bs, 1, "each_layer_is_ols"); } if (!vps->each_layer_is_ols) { u32 vps_ols_mode_idc = 2; if (!vps->all_layers_independent) { vps_ols_mode_idc = gf_bs_read_int_log(bs, 2, "vps_ols_mode_idc"); } if (vps_ols_mode_idc==2) { u8 vps_num_output_layer_sets = 2 + gf_bs_read_int_log(bs, 8, "vps_num_output_layer_sets_minus2"); for (i=0; i<vps_num_output_layer_sets; i++) { for (j=0; j<vps->max_layers; j++) { gf_bs_read_int_log_idx2(bs, 1, "vps_ols_output_layer_flag", i, j); } } } } vps->num_ptl = 1 + gf_bs_read_int_log(bs, 8, "num_ptl_minus1"); } vps->ptl[0].pt_present = 1; for (i=0; i<vps->num_ptl; i++) { if (i) vps->ptl[i].pt_present = gf_bs_read_int_log_idx(bs, 1, "pt_present", i); if (!vps_default_ptl_dpb_hrd_max_tid_flag) vps->ptl[i].ptl_max_tid = gf_bs_read_int_log_idx(bs, 3, "ptl_max_tid", i); else vps->ptl[i].ptl_max_tid = vps->max_sub_layers - 1;; } //align gf_bs_align(bs); for (i=0; i<vps->num_ptl; i++) { vvc_profile_tier_level(bs, &vps->ptl[i], i); } //TODO, parse multilayer stuff return vps_id; } static s32 gf_media_vvc_read_sps_bs_internal(GF_BitStream *bs, VVCState *vvc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id; u32 i, CtbSizeY; VVC_SPS *sps; u8 sps_ptl_dpb_hrd_params_present_flag; if (vui_flag_pos) *vui_flag_pos = 0; sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if ((sps_id<0) || (sps_id >= 16)) { return -1; } vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) { return -1; } if (!vps_id && !vvc->vps[0].state) { vvc->vps[0].state = 1; vvc->vps[0].num_ptl = 1; vvc->vps[0].max_layers = 1; vvc->vps[0].all_layers_independent = 1; } sps = &vvc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->max_sublayers = 1 + gf_bs_read_int_log(bs, 3, "max_sublayers_minus1"); sps->chroma_format_idc = gf_bs_read_int_log(bs, 2, "chroma_format_idc"); sps->log2_ctu_size = 5 + gf_bs_read_int_log(bs, 2, "log2_ctu_size_minus5"); CtbSizeY = 1<<sps->log2_ctu_size; sps_ptl_dpb_hrd_params_present_flag = gf_bs_read_int_log(bs, 1, "sps_ptl_dpb_hrd_params_present_flag"); if (sps_ptl_dpb_hrd_params_present_flag) { VVC_ProfileTierLevel ptl, *p_ptl; if (sps->vps_id) { p_ptl = &ptl; } else { p_ptl = &vvc->vps[0].ptl[0]; } memset(p_ptl, 0, sizeof(VVC_ProfileTierLevel)); p_ptl->pt_present = 1; p_ptl->ptl_max_tid = sps->max_sublayers; vvc_profile_tier_level(bs, p_ptl, 0); } sps->gdr_enabled = gf_bs_read_int_log(bs, 1, "gdr_enabled"); sps->ref_pic_resampling = gf_bs_read_int_log(bs, 1, "ref_pic_resampling"); if (sps->ref_pic_resampling) sps->res_change_in_clvs = gf_bs_read_int_log(bs, 1, "res_change_in_clvs"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); sps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_present_flag"); if (sps->conf_window) { sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); } sps->subpic_info_present = gf_bs_read_int_log(bs, 1, "subpic_info_present"); if (sps->subpic_info_present) { sps->nb_subpics = 1 + gf_bs_read_ue_log(bs, "nb_subpics_minus1"); if (sps->nb_subpics>1) { u32 tmpWidthVal, tmpHeightVal; sps->independent_subpic_flags = gf_bs_read_int_log(bs, 1, "independent_subpic_flags"); sps->subpic_same_size = gf_bs_read_int_log(bs, 1, "subpic_same_size"); tmpWidthVal = (sps->width + CtbSizeY-1) / CtbSizeY; tmpWidthVal = gf_get_bit_size(tmpWidthVal); tmpHeightVal = (sps->height + CtbSizeY-1) / CtbSizeY; tmpHeightVal = gf_get_bit_size(tmpHeightVal); for (i=0; i<sps->nb_subpics; i++) { if( !sps->subpic_same_size || !i) { if (i && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_ctu_top_left_x"); if (i && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_ctu_top_left_y"); if ((i+1 < sps->nb_subpics) && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_width_minus1"); if ((i+1 < sps->nb_subpics) && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_height_minus1"); } if (!sps->independent_subpic_flags) { gf_bs_read_int_log(bs, 1, "subpic_treated_as_pic_flag"); gf_bs_read_int_log(bs, 1, "loop_filter_across_subpic_enabled_flag"); } } sps->subpicid_len = gf_bs_read_ue_log(bs, "subpic_id_len_minus1") + 1; sps->subpicid_mapping_explicit = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_explicitly_signalled_flag"); if (sps->subpicid_mapping_explicit) { sps->subpicid_mapping_present = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (sps->subpicid_mapping_present) { for (i=0; i<sps->nb_subpics; i++) { gf_bs_read_ue_log(bs, "subpic_id"); } } } } } sps->bitdepth = gf_bs_read_ue_log(bs, "bitdepth_minus8") + 8; gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); gf_bs_read_int_log(bs, 1, "entry_point_offsets_present_flag"); sps->log2_max_poc_lsb = 4 + gf_bs_read_int_log(bs, 4, "log2_max_poc_lsb_minus4"); if ((sps->poc_msb_cycle_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_flag"))) sps->poc_msb_cycle_len = 1 + gf_bs_read_ue_log(bs, "poc_msb_cycle_len_minus1"); u8 sps_num_extra_ph_bits = 8 * gf_bs_read_int_log(bs, 2, "sps_num_extra_ph_bytes"); for (i=0; i<sps_num_extra_ph_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_ph_bit_present_flag", 1)) sps->ph_num_extra_bits++; } u8 sps_num_extra_sh_bits = 8 * gf_bs_read_int_log(bs, 2, "num_extra_sh_bytes"); for (i=0; i<sps_num_extra_sh_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_sh_bit_present_flag", i)) sps->sh_num_extra_bits++; } if (sps_ptl_dpb_hrd_params_present_flag) { u8 sps_sublayer_dpb_params_flag = 0; if (sps->max_sublayers>1) { sps_sublayer_dpb_params_flag = gf_bs_read_int_log(bs, 1, "sps_sublayer_dpb_params_flag"); } for (i=(sps_sublayer_dpb_params_flag ? 0 : sps->max_sublayers-1); i < sps->max_sublayers; i++ ) { gf_bs_read_ue_log_idx(bs, "dpb_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "dpb_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "dpb_max_latency_increase_plus1", i); } } gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_partition_constraints_override_enabled_flag"); gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); u8 sps_max_mtt_hierarchy_depth_intra_slice_luma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_luma"); if (sps_max_mtt_hierarchy_depth_intra_slice_luma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_luma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_luma"); } u8 sps_qtbtt_dual_tree_intra_flag = 0; if (sps->chroma_format_idc) { sps_qtbtt_dual_tree_intra_flag = gf_bs_read_int_log(bs, 1, "sps_qtbtt_dual_tree_intra_flag"); } if (sps_qtbtt_dual_tree_intra_flag) { gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_intra_slice_chroma"); u8 sps_max_mtt_hierarchy_depth_intra_slice_chroma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_chroma"); if( sps_max_mtt_hierarchy_depth_intra_slice_chroma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_chroma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_chroma"); } } gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_inter_slice"); u8 sps_max_mtt_hierarchy_depth_inter_slice = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_inter_slice"); if (sps_max_mtt_hierarchy_depth_inter_slice != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_inter_slice"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_inter_slice"); } //u8 sps_max_luma_transform_size_64_flag = 0; if (CtbSizeY > 32) { /*sps_max_luma_transform_size_64_flag = */gf_bs_read_int_log(bs, 1, "sps_max_luma_transform_size_64_flag"); } u8 sps_transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_transform_skip_enabled_flag"); if (sps_transform_skip_enabled_flag) { gf_bs_read_ue_log(bs, "sps_log2_transform_skip_max_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_bdpcm_enabled_flag"); } if (gf_bs_read_int_log(bs, 1, "sps_mts_enabled_flag")) { gf_bs_read_int_log(bs, 1, "sps_explicit_mts_intra_enabled_flag"); gf_bs_read_int_log(bs, 1, "sps_explicit_mts_inter_enabled_flag"); } gf_bs_read_int_log(bs, 1, "sps_lfnst_enabled_flag"); if (sps->chroma_format_idc) { u8 sps_joint_cbcr_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_joint_cbcr_enabled_flag"); u8 sps_same_qp_table_for_chroma_flag = gf_bs_read_int_log(bs, 1, "sps_same_qp_table_for_chroma_flag"); u32 numQpTables = sps_same_qp_table_for_chroma_flag ? 1 : (sps_joint_cbcr_enabled_flag ? 3 : 2); for (i=0; i<numQpTables; i++) { gf_bs_read_se_log_idx(bs, "sps_qp_table_start_minus26", i); u32 j, sps_num_points_in_qp_table = 1 + gf_bs_read_ue_log_idx(bs, "sps_num_points_in_qp_table_minus1", i); for (j=0; j<sps_num_points_in_qp_table; j++) { gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_in_val_minus1", i, j); gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_diff_val", i, j); } } } gf_bs_read_int_log(bs, 1, "sps_sao_enabled_flag"); sps->alf_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_alf_enabled_flag"); if (sps->alf_enabled_flag && sps->chroma_format_idc) { gf_bs_read_int_log(bs, 1, "sps_ccalf_enabled_flag"); } /*! TODO parse the rest !*/ return sps_id; } static s32 gf_media_vvc_read_pps_bs_internal(GF_BitStream *bs, VVCState *vvc) { u32 i; s32 pps_id; VVC_PPS *pps; //NAL header already read pps_id = gf_bs_read_int_log(bs, 6, "pps_id"); if ((pps_id < 0) || (pps_id >= 64)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] wrong PPS ID %d in PPS\n", pps_id)); return -1; } pps = &vvc->pps[pps_id]; if (!pps->state) { pps->id = pps_id; pps->state = 1; } pps->sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if ((pps->sps_id<0) || (pps->sps_id >= 16)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] wrong SPS ID %d in PPS\n", pps->sps_id)); pps->sps_id=0; return -1; } vvc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->mixed_nal_types = gf_bs_read_int_log(bs, 1, "mixed_nal_types"); pps->width = gf_bs_read_ue_log(bs, "width"); pps->height = gf_bs_read_ue_log(bs, "height"); pps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_flag"); if (pps->conf_window) { pps->cw_left = gf_bs_read_ue_log(bs, "conf_win_left_offset"); pps->cw_right = gf_bs_read_ue_log(bs, "conf_win_right_offset"); pps->cw_top = gf_bs_read_ue_log(bs, "conf_win_top_offset"); pps->cw_bottom = gf_bs_read_ue_log(bs, "conf_win_bottom_offset"); } //scaling window if (gf_bs_read_int_log(bs, 1, "scaling_window_explicit_signalling_flag")) { gf_bs_read_se_log(bs, "scaling_win_left_offset"); gf_bs_read_se_log(bs, "scaling_win_right_offset"); gf_bs_read_se_log(bs, "scaling_win_top_offset"); gf_bs_read_se_log(bs, "scaling_win_bottom_offset"); } pps->output_flag_present_flag = gf_bs_read_int_log(bs, 1, "output_flag_present_flag"); pps->no_pic_partition_flag = gf_bs_read_int_log(bs, 1, "no_pic_partition_flag"); pps->subpic_id_mapping_present_flag = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (pps->subpic_id_mapping_present_flag) { u32 pps_subpic_id_len, pps_num_subpics=0; if (!pps->no_pic_partition_flag) { pps_num_subpics = 1+gf_bs_read_ue_log(bs, "pps_num_subpics_minus1"); } pps_subpic_id_len = 1 + gf_bs_read_ue(bs); for (i=0; i<pps_num_subpics; i++) { gf_bs_read_int_log_idx(bs, pps_subpic_id_len, "subpic_id", i); } } if (!pps->no_pic_partition_flag) { gf_bs_read_int_log(bs, 2, "pps_log2_ctu_size_minus5"); u32 num_exp_tile_columns = 1 + gf_bs_read_ue_log(bs, "num_exp_tile_columns_minus1"); u32 num_exp_tile_rows = 1 + gf_bs_read_ue_log(bs, "num_exp_tile_rows_minus1"); for (i=0; i<num_exp_tile_columns; i++) gf_bs_read_ue_log_idx(bs, "tile_column_width_minus1", i); for (i=0; i<num_exp_tile_rows; i++) gf_bs_read_ue_log_idx(bs, "tile_row_height_minus1", i); //todo parse the rest return pps_id; } //todo parse the rest return pps_id; } static s32 vvc_parse_picture_header(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { u32 pps_id; si->irap_or_gdr_pic = gf_bs_read_int_log(bs, 1, "irap_or_gdr_pic"); si->non_ref_pic = gf_bs_read_int_log(bs, 1, "non_ref_pic"); if (si->irap_or_gdr_pic) si->gdr_pic = gf_bs_read_int_log(bs, 1, "gdr_pic"); if ((si->inter_slice_allowed_flag = gf_bs_read_int_log(bs, 1, "inter_slice_allowed_flag"))) si->intra_slice_allowed_flag = gf_bs_read_int_log(bs, 1, "intra_slice_allowed_flag"); pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 64)) return -1; si->pps = &vvc->pps[pps_id]; si->sps = &vvc->sps[si->pps->sps_id]; si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); si->recovery_point_valid = 0; si->gdr_recovery_count = 0; if (si->gdr_pic) { si->recovery_point_valid = 1; si->gdr_recovery_count = gf_bs_read_ue_log(bs, "gdr_recovery_count"); } gf_bs_read_int_log(bs, si->sps->ph_num_extra_bits, "ph_extra_bits"); if (si->sps->poc_msb_cycle_flag) { if ( (si->poc_msb_cycle_present_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_present_flag"))) { si->poc_msb_cycle = gf_bs_read_int_log(bs, si->sps->poc_msb_cycle_len, "poc_msb_cycle"); } } return 0; } static s32 vvc_parse_slice(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { // u32 CurrSubpicIdx = 0; si->picture_header_in_slice_header_flag = gf_bs_read_int_log(bs, 1, "picture_header_in_slice_header_flag"); if (si->picture_header_in_slice_header_flag) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[VVC] Picture header in slice header incomplete support, cannot guess slice type\n")); si->slice_type = GF_VVC_SLICE_TYPE_UNKNOWN; return vvc_parse_picture_header(bs, vvc, si); } if (!si->sps) return -1; si->slice_type = GF_VVC_SLICE_TYPE_I; if (gf_bs_read_int_log(bs, 1, "sps_subpic_info_present_flag")) { gf_bs_read_int_log(bs, si->sps->subpicid_len, "subpic_id"); //todo update CurrSubpicIdx } if (si->pps->rect_slice_flag ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[VVC] tiling parsing not supported - patch welcome\n")); return 0; } gf_bs_read_int_log(bs, si->sps->sh_num_extra_bits, "num_extra_bits"); /* if( !pps_rect_slice_flag && NumTilesInPic − sh_slice_address > 1 ) sh_num_tiles_in_slice_minus1 */ if (si->inter_slice_allowed_flag ) si->slice_type = gf_bs_read_int_log(bs, 2, "slice_type"); return 0; } /*this needs further tests !*/ static void vvc_compute_poc(VVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*POC reset for IDR frames, NOT for CRA*/ if (si->irap_or_gdr_pic && !si->gdr_pic) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; } if (si->poc_msb_cycle_present_flag) { si->poc_msb = si->poc_msb_cycle; } else { if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; } si->poc = si->poc_msb + si->poc_lsb; } GF_EXPORT s32 gf_media_vvc_parse_nalu_bs(GF_BitStream *bs, VVCState *vvc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { Bool is_slice = GF_FALSE; s32 ret = -1; VVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); memcpy(&n_state, &vvc->s_info, sizeof(VVCSliceInfo)); if (!vvc_parse_nal_header(bs, nal_unit_type, temporal_id, layer_id)) return -1; n_state.nal_unit_type = *nal_unit_type; switch (n_state.nal_unit_type) { case GF_VVC_NALU_ACCESS_UNIT: case GF_VVC_NALU_END_OF_SEQ: case GF_VVC_NALU_END_OF_STREAM: ret = 1; break; case GF_VVC_NALU_SLICE_TRAIL: case GF_VVC_NALU_SLICE_STSA: case GF_VVC_NALU_SLICE_RADL: case GF_VVC_NALU_SLICE_RASL: case GF_VVC_NALU_SLICE_IDR_W_RADL: case GF_VVC_NALU_SLICE_IDR_N_LP: case GF_VVC_NALU_SLICE_CRA: case GF_VVC_NALU_SLICE_GDR: /* slice - read the info and compare.*/ ret = vvc_parse_slice(bs, vvc, &n_state); if (ret < 0) return ret; ret = 0; if (n_state.picture_header_in_slice_header_flag) { is_slice = GF_TRUE; vvc_compute_poc(&n_state); if (vvc->s_info.poc != n_state.poc) { ret = 1; break; } if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; break; } } break; case GF_VVC_NALU_PIC_HEADER: if (vvc_parse_picture_header(bs, vvc, &n_state)<0) { ret = -1; break; } is_slice = GF_TRUE; vvc_compute_poc(&n_state); if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; } break; case GF_VVC_NALU_SEQ_PARAM: vvc->last_parsed_sps_id = gf_media_vvc_read_sps_bs_internal(bs, vvc, *layer_id, NULL); ret = (vvc->last_parsed_sps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_PIC_PARAM: vvc->last_parsed_pps_id = gf_media_vvc_read_pps_bs_internal(bs, vvc); ret = (vvc->last_parsed_pps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_VID_PARAM: vvc->last_parsed_vps_id = gf_media_vvc_read_vps_bs_internal(bs, vvc, GF_FALSE); ret = (vvc->last_parsed_vps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_DEC_PARAM: ret = 0; break; case GF_VVC_NALU_APS_PREFIX: //we use the mix aps type + aps id (first 8 bits) as unique identifier vvc->last_parsed_aps_id = gf_bs_read_int_log(bs, 8, "aps_id"); ret = 0; break; default: ret = 0; break; } /* save _prev values */ if ((ret>0) && vvc->s_info.sps) { // n_state.frame_num_offset_prev = vvc->s_info.frame_num_offset; // n_state.frame_num_prev = vvc->s_info.frame_num; n_state.poc_lsb_prev = vvc->s_info.poc_lsb; n_state.poc_msb_prev = vvc->s_info.poc_msb; if (is_slice) n_state.prev_layer_id_plus1 = *layer_id + 1; } if (is_slice) vvc_compute_poc(&n_state); memcpy(&vvc->s_info, &n_state, sizeof(VVCSliceInfo)); return ret; } GF_EXPORT s32 gf_media_vvc_parse_nalu(u8 *data, u32 size, VVCState *vvc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { GF_BitStream *bs = NULL; s32 ret; if (!vvc) { if (nal_unit_type) (*nal_unit_type) = data[1] >> 3; if (layer_id) (*layer_id) = data[0] & 0x3f; if (temporal_id) (*temporal_id) = (data[1] & 0x7); return -1; } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); ret = gf_media_vvc_parse_nalu_bs(bs, vvc, nal_unit_type, temporal_id, layer_id); gf_bs_del(bs); return ret; } Bool gf_media_vvc_slice_is_ref(VVCState *vvc) { if (!vvc->s_info.irap_or_gdr_pic) { return GF_FALSE; } if (vvc->s_info.gdr_pic) { if (vvc->s_info.recovery_point_valid) { vvc->s_info.recovery_point_valid = 0; return GF_TRUE; } return GF_FALSE; } return GF_TRUE; }
null
267
CWE-787
CVE-2021-31254
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre, Cyril Concolato * Copyright (c) Telecom ParisTech 2005-2020 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #ifndef GPAC_DISABLE_ISOM /* ProtectionInfo Box */ GF_Box *sinf_box_new() { ISOM_DECL_BOX_ALLOC(GF_ProtectionSchemeInfoBox, GF_ISOM_BOX_TYPE_SINF); return (GF_Box *)tmp; } void sinf_box_del(GF_Box *s) { gf_free(s); } GF_Err sinf_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_ProtectionSchemeInfoBox *ptr = (GF_ProtectionSchemeInfoBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_FRMA: BOX_FIELD_ASSIGN(original_format, GF_OriginalFormatBox) break; case GF_ISOM_BOX_TYPE_SCHM: BOX_FIELD_ASSIGN(scheme_type, GF_SchemeTypeBox) break; case GF_ISOM_BOX_TYPE_SCHI: BOX_FIELD_ASSIGN(info, GF_SchemeInformationBox) break; } return GF_OK; } GF_Err sinf_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sinf_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err sinf_box_size(GF_Box *s) { u32 pos=0; GF_ProtectionSchemeInfoBox *ptr = (GF_ProtectionSchemeInfoBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->original_format, &pos); gf_isom_check_position(s, (GF_Box *)ptr->scheme_type, &pos); gf_isom_check_position(s, (GF_Box *)ptr->info, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OriginalFormat Box */ GF_Box *frma_box_new() { ISOM_DECL_BOX_ALLOC(GF_OriginalFormatBox, GF_ISOM_BOX_TYPE_FRMA); return (GF_Box *)tmp; } void frma_box_del(GF_Box *s) { GF_OriginalFormatBox *ptr = (GF_OriginalFormatBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err frma_box_read(GF_Box *s, GF_BitStream *bs) { GF_OriginalFormatBox *ptr = (GF_OriginalFormatBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->data_format = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err frma_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_OriginalFormatBox *ptr = (GF_OriginalFormatBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->data_format); return GF_OK; } GF_Err frma_box_size(GF_Box *s) { GF_OriginalFormatBox *ptr = (GF_OriginalFormatBox *)s; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* SchemeType Box */ GF_Box *schm_box_new() { ISOM_DECL_BOX_ALLOC(GF_SchemeTypeBox, GF_ISOM_BOX_TYPE_SCHM); return (GF_Box *)tmp; } void schm_box_del(GF_Box *s) { GF_SchemeTypeBox *ptr = (GF_SchemeTypeBox *)s; if (ptr == NULL) return; if (ptr->URI) gf_free(ptr->URI); gf_free(ptr); } GF_Err schm_box_read(GF_Box *s, GF_BitStream *bs) { GF_SchemeTypeBox *ptr = (GF_SchemeTypeBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->scheme_type = gf_bs_read_u32(bs); ptr->scheme_version = gf_bs_read_u32(bs); if (ptr->size && (ptr->flags & 0x000001)) { u32 len = (u32) (ptr->size); ptr->URI = (char*)gf_malloc(sizeof(char)*len); if (!ptr->URI) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->URI, len); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err schm_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SchemeTypeBox *ptr = (GF_SchemeTypeBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->scheme_type); gf_bs_write_u32(bs, ptr->scheme_version); if (ptr->flags & 0x000001) { if (ptr->URI) gf_bs_write_data(bs, ptr->URI, (u32) strlen(ptr->URI)+1); else gf_bs_write_u8(bs, 0); } return GF_OK; } GF_Err schm_box_size(GF_Box *s) { GF_SchemeTypeBox *ptr = (GF_SchemeTypeBox *) s; if (!s) return GF_BAD_PARAM; ptr->size += 8; if (ptr->flags & 0x000001) ptr->size += 1 + (ptr->URI ? strlen(ptr->URI) : 0); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* SchemeInformation Box */ GF_Box *schi_box_new() { ISOM_DECL_BOX_ALLOC(GF_SchemeInformationBox, GF_ISOM_BOX_TYPE_SCHI); return (GF_Box *)tmp; } void schi_box_del(GF_Box *s) { gf_free(s); } GF_Err schi_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_SchemeInformationBox *ptr = (GF_SchemeInformationBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_IKMS: BOX_FIELD_ASSIGN(ikms, GF_ISMAKMSBox) return GF_OK; case GF_ISOM_BOX_TYPE_ISFM: BOX_FIELD_ASSIGN(isfm, GF_ISMASampleFormatBox) return GF_OK; case GF_ISOM_BOX_TYPE_ISLT: BOX_FIELD_ASSIGN(islt, GF_ISMACrypSaltBox) return GF_OK; case GF_ISOM_BOX_TYPE_ODKM: BOX_FIELD_ASSIGN(odkm, GF_OMADRMKMSBox) return GF_OK; case GF_ISOM_BOX_TYPE_TENC: BOX_FIELD_ASSIGN(tenc, GF_TrackEncryptionBox) return GF_OK; case GF_ISOM_BOX_TYPE_ADKM: BOX_FIELD_ASSIGN(adkm, GF_AdobeDRMKeyManagementSystemBox) return GF_OK; case GF_ISOM_BOX_TYPE_UUID: if (((GF_UUIDBox*)a)->internal_4cc==GF_ISOM_BOX_UUID_TENC) { BOX_FIELD_ASSIGN(piff_tenc, GF_PIFFTrackEncryptionBox) return GF_OK; } else { return GF_OK; } } return GF_OK; } GF_Err schi_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err schi_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err schi_box_size(GF_Box *s) { u32 pos=0; GF_SchemeInformationBox *ptr = (GF_SchemeInformationBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->ikms, &pos); gf_isom_check_position(s, (GF_Box *)ptr->isfm, &pos); gf_isom_check_position(s, (GF_Box *)ptr->islt, &pos); gf_isom_check_position(s, (GF_Box *)ptr->odkm, &pos); gf_isom_check_position(s, (GF_Box *)ptr->tenc, &pos); gf_isom_check_position(s, (GF_Box *)ptr->adkm, &pos); gf_isom_check_position(s, (GF_Box *)ptr->piff_tenc, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* ISMAKMS Box */ GF_Box *iKMS_box_new() { ISOM_DECL_BOX_ALLOC(GF_ISMAKMSBox, GF_ISOM_BOX_TYPE_IKMS); return (GF_Box *)tmp; } void iKMS_box_del(GF_Box *s) { GF_ISMAKMSBox *ptr = (GF_ISMAKMSBox *)s; if (ptr == NULL) return; if (ptr->URI) gf_free(ptr->URI); gf_free(ptr); } GF_Err iKMS_box_read(GF_Box *s, GF_BitStream *bs) { u32 len; GF_ISMAKMSBox *ptr = (GF_ISMAKMSBox *)s; len = (u32) (ptr->size); ptr->URI = (char*) gf_malloc(sizeof(char)*len); if (!ptr->URI) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->URI, len); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err iKMS_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ISMAKMSBox *ptr = (GF_ISMAKMSBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->URI) gf_bs_write_data(bs, ptr->URI, (u32) strlen(ptr->URI)); gf_bs_write_u8(bs, 0); return GF_OK; } GF_Err iKMS_box_size(GF_Box *s) { GF_ISMAKMSBox *ptr = (GF_ISMAKMSBox *)s; ptr->size += (ptr->URI ? strlen(ptr->URI) : 0) + 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* ISMASampleFormat Box */ GF_Box *iSFM_box_new() { ISOM_DECL_BOX_ALLOC(GF_ISMASampleFormatBox, GF_ISOM_BOX_TYPE_ISFM); return (GF_Box *)tmp; } void iSFM_box_del(GF_Box *s) { GF_ISMASampleFormatBox *ptr = (GF_ISMASampleFormatBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err iSFM_box_read(GF_Box *s, GF_BitStream *bs) { GF_ISMASampleFormatBox *ptr = (GF_ISMASampleFormatBox *)s; ISOM_DECREASE_SIZE(ptr, 3); ptr->selective_encryption = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 7); ptr->key_indicator_length = gf_bs_read_u8(bs); ptr->IV_length = gf_bs_read_u8(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err iSFM_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ISMASampleFormatBox *ptr = (GF_ISMASampleFormatBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->selective_encryption, 1); gf_bs_write_int(bs, 0, 7); gf_bs_write_u8(bs, ptr->key_indicator_length); gf_bs_write_u8(bs, ptr->IV_length); return GF_OK; } GF_Err iSFM_box_size(GF_Box *s) { GF_ISMASampleFormatBox *ptr = (GF_ISMASampleFormatBox *)s; ptr->size += 3; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* ISMASampleFormat Box */ GF_Box *iSLT_box_new() { ISOM_DECL_BOX_ALLOC(GF_ISMACrypSaltBox, GF_ISOM_BOX_TYPE_ISLT); return (GF_Box *)tmp; } void iSLT_box_del(GF_Box *s) { GF_ISMACrypSaltBox *ptr = (GF_ISMACrypSaltBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err iSLT_box_read(GF_Box *s, GF_BitStream *bs) { GF_ISMACrypSaltBox *ptr = (GF_ISMACrypSaltBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ISOM_DECREASE_SIZE(ptr, 8); ptr->salt = gf_bs_read_u64(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err iSLT_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ISMACrypSaltBox *ptr = (GF_ISMACrypSaltBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->salt); return GF_OK; } GF_Err iSLT_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OMADRMCommonHeader Box */ GF_Box *ohdr_box_new() { ISOM_DECL_BOX_ALLOC(GF_OMADRMCommonHeaderBox, GF_ISOM_BOX_TYPE_OHDR); tmp->child_boxes = gf_list_new(); return (GF_Box *)tmp; } void ohdr_box_del(GF_Box *s) { GF_OMADRMCommonHeaderBox *ptr = (GF_OMADRMCommonHeaderBox*)s; if (ptr == NULL) return; if (ptr->ContentID) gf_free(ptr->ContentID); if (ptr->RightsIssuerURL) gf_free(ptr->RightsIssuerURL); if (ptr->TextualHeaders) gf_free(ptr->TextualHeaders); gf_free(ptr); } GF_Err ohdr_box_read(GF_Box *s, GF_BitStream *bs) { u16 cid_len, ri_len; GF_OMADRMCommonHeaderBox *ptr = (GF_OMADRMCommonHeaderBox*)s; ISOM_DECREASE_SIZE(ptr, (1+1+8+2+2+2) ); ptr->EncryptionMethod = gf_bs_read_u8(bs); ptr->PaddingScheme = gf_bs_read_u8(bs); ptr->PlaintextLength = gf_bs_read_u64(bs); cid_len = gf_bs_read_u16(bs); ri_len = gf_bs_read_u16(bs); ptr->TextualHeadersLen = gf_bs_read_u16(bs); if (ptr->size<cid_len+ri_len+ptr->TextualHeadersLen) return GF_ISOM_INVALID_FILE; if (cid_len) { ptr->ContentID = (char *)gf_malloc(sizeof(char)*(cid_len+1)); if (!ptr->ContentID) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->ContentID, cid_len); ptr->ContentID[cid_len]=0; } if (ri_len) { ptr->RightsIssuerURL = (char *)gf_malloc(sizeof(char)*(ri_len+1)); if (!ptr->RightsIssuerURL) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->RightsIssuerURL, ri_len); ptr->RightsIssuerURL[ri_len]=0; } if (ptr->TextualHeadersLen) { ptr->TextualHeaders = (char *)gf_malloc(sizeof(char)*(ptr->TextualHeadersLen+1)); if (!ptr->TextualHeaders) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->TextualHeaders, ptr->TextualHeadersLen); ptr->TextualHeaders[ptr->TextualHeadersLen] = 0; } ISOM_DECREASE_SIZE(ptr, (cid_len+ri_len+ptr->TextualHeadersLen) ); return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ohdr_box_write(GF_Box *s, GF_BitStream *bs) { u16 cid_len, ri_len; GF_Err e; GF_OMADRMCommonHeaderBox *ptr = (GF_OMADRMCommonHeaderBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->EncryptionMethod); gf_bs_write_u8(bs, ptr->PaddingScheme); gf_bs_write_u64(bs, ptr->PlaintextLength); cid_len = ptr->ContentID ? (u16) strlen(ptr->ContentID) : 0; gf_bs_write_u16(bs, cid_len); ri_len = ptr->RightsIssuerURL ? (u16) strlen(ptr->RightsIssuerURL) : 0; gf_bs_write_u16(bs, ri_len); gf_bs_write_u16(bs, ptr->TextualHeadersLen); if (cid_len) gf_bs_write_data(bs, ptr->ContentID, (u32) strlen(ptr->ContentID)); if (ri_len) gf_bs_write_data(bs, ptr->RightsIssuerURL, (u32) strlen(ptr->RightsIssuerURL)); if (ptr->TextualHeadersLen) gf_bs_write_data(bs, ptr->TextualHeaders, ptr->TextualHeadersLen); ISOM_DECREASE_SIZE(ptr, (cid_len+ri_len+ptr->TextualHeadersLen) ); return GF_OK; } GF_Err ohdr_box_size(GF_Box *s) { GF_OMADRMCommonHeaderBox *ptr = (GF_OMADRMCommonHeaderBox *)s; ptr->size += 1+1+8+2+2+2; if (ptr->ContentID) ptr->size += strlen(ptr->ContentID); if (ptr->RightsIssuerURL) ptr->size += strlen(ptr->RightsIssuerURL); if (ptr->TextualHeadersLen) ptr->size += ptr->TextualHeadersLen; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OMADRMGroupID Box */ GF_Box *grpi_box_new() { ISOM_DECL_BOX_ALLOC(GF_OMADRMGroupIDBox, GF_ISOM_BOX_TYPE_GRPI); return (GF_Box *)tmp; } void grpi_box_del(GF_Box *s) { GF_OMADRMGroupIDBox *ptr = (GF_OMADRMGroupIDBox *)s; if (ptr == NULL) return; if (ptr->GroupID) gf_free(ptr->GroupID); if (ptr->GroupKey) gf_free(ptr->GroupKey); gf_free(ptr); } GF_Err grpi_box_read(GF_Box *s, GF_BitStream *bs) { u16 gid_len; GF_OMADRMGroupIDBox *ptr = (GF_OMADRMGroupIDBox*)s; ISOM_DECREASE_SIZE(ptr, (1+2+2) ); gid_len = gf_bs_read_u16(bs); ptr->GKEncryptionMethod = gf_bs_read_u8(bs); ptr->GKLength = gf_bs_read_u16(bs); if (ptr->size<gid_len+ptr->GKLength) return GF_ISOM_INVALID_FILE; ptr->GroupID = gf_malloc(sizeof(char)*(gid_len+1)); if (!ptr->GroupID) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->GroupID, gid_len); ptr->GroupID[gid_len]=0; ptr->GroupKey = (char *)gf_malloc(sizeof(char)*ptr->GKLength); if (!ptr->GroupKey) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->GroupKey, ptr->GKLength); ISOM_DECREASE_SIZE(ptr, (gid_len+ptr->GKLength) ); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err grpi_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u16 gid_len; GF_OMADRMGroupIDBox *ptr = (GF_OMADRMGroupIDBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gid_len = ptr->GroupID ? (u16) strlen(ptr->GroupID) : 0; gf_bs_write_u16(bs, gid_len); gf_bs_write_u8(bs, ptr->GKEncryptionMethod); gf_bs_write_u16(bs, ptr->GKLength); gf_bs_write_data(bs, ptr->GroupID, gid_len); gf_bs_write_data(bs, ptr->GroupKey, ptr->GKLength); return GF_OK; } GF_Err grpi_box_size(GF_Box *s) { GF_OMADRMGroupIDBox *ptr = (GF_OMADRMGroupIDBox *)s; ptr->size += 2+2+1 + ptr->GKLength; if (ptr->GroupID) ptr->size += strlen(ptr->GroupID); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OMADRMMutableInformation Box */ GF_Box *mdri_box_new() { ISOM_DECL_BOX_ALLOC(GF_OMADRMMutableInformationBox, GF_ISOM_BOX_TYPE_MDRI); return (GF_Box *)tmp; } void mdri_box_del(GF_Box *s) { GF_OMADRMMutableInformationBox*ptr = (GF_OMADRMMutableInformationBox*)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err mdri_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mdri_box_write(GF_Box *s, GF_BitStream *bs) { // GF_OMADRMMutableInformationBox*ptr = (GF_OMADRMMutableInformationBox*)s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; return GF_OK; } GF_Err mdri_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OMADRMTransactionTracking Box */ GF_Box *odtt_box_new() { ISOM_DECL_BOX_ALLOC(GF_OMADRMTransactionTrackingBox, GF_ISOM_BOX_TYPE_ODTT); return (GF_Box *)tmp; } void odtt_box_del(GF_Box *s) { GF_OMADRMTransactionTrackingBox *ptr = (GF_OMADRMTransactionTrackingBox*)s; gf_free(ptr); } GF_Err odtt_box_read(GF_Box *s, GF_BitStream *bs) { GF_OMADRMTransactionTrackingBox *ptr = (GF_OMADRMTransactionTrackingBox *)s; gf_bs_read_data(bs, ptr->TransactionID, 16); ISOM_DECREASE_SIZE(ptr, 16); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err odtt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_OMADRMTransactionTrackingBox *ptr = (GF_OMADRMTransactionTrackingBox*)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->TransactionID, 16); return GF_OK; } GF_Err odtt_box_size(GF_Box *s) { s->size += 16; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OMADRMRightsObject Box */ GF_Box *odrb_box_new() { ISOM_DECL_BOX_ALLOC(GF_OMADRMRightsObjectBox, GF_ISOM_BOX_TYPE_ODRB); return (GF_Box *)tmp; } void odrb_box_del(GF_Box *s) { GF_OMADRMRightsObjectBox *ptr = (GF_OMADRMRightsObjectBox*)s; if (ptr->oma_ro) gf_free(ptr->oma_ro); gf_free(ptr); } GF_Err odrb_box_read(GF_Box *s, GF_BitStream *bs) { GF_OMADRMRightsObjectBox *ptr = (GF_OMADRMRightsObjectBox *)s; ptr->oma_ro_size = (u32) ptr->size; ptr->oma_ro = (char*) gf_malloc(sizeof(char)*ptr->oma_ro_size); if (!ptr->oma_ro) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->oma_ro, ptr->oma_ro_size); ptr->size = 0; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err odrb_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_OMADRMRightsObjectBox *ptr = (GF_OMADRMRightsObjectBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->oma_ro, ptr->oma_ro_size); return GF_OK; } GF_Err odrb_box_size(GF_Box *s) { GF_OMADRMRightsObjectBox *ptr = (GF_OMADRMRightsObjectBox *)s; s->size += ptr->oma_ro_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OMADRMKMS Box */ GF_Box *odkm_box_new() { ISOM_DECL_BOX_ALLOC(GF_OMADRMKMSBox, GF_ISOM_BOX_TYPE_ODKM); return (GF_Box *)tmp; } void odkm_box_del(GF_Box *s) { gf_free(s); } GF_Err odkm_Add(GF_Box *s, GF_Box *a, Bool is_rem) { GF_OMADRMKMSBox *ptr = (GF_OMADRMKMSBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_OHDR: BOX_FIELD_ASSIGN(hdr, GF_OMADRMCommonHeaderBox) return GF_OK; case GF_ISOM_BOX_TYPE_ODAF: BOX_FIELD_ASSIGN(fmt, GF_OMADRMAUFormatBox) return GF_OK; } return GF_OK; } GF_Err odkm_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err odkm_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err odkm_box_size(GF_Box *s) { u32 pos=0; GF_OMADRMKMSBox *ptr = (GF_OMADRMKMSBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->hdr, &pos); gf_isom_check_position(s, (GF_Box *)ptr->fmt, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *pssh_box_new() { ISOM_DECL_BOX_ALLOC(GF_ProtectionSystemHeaderBox, GF_ISOM_BOX_TYPE_PSSH); return (GF_Box *)tmp; } void pssh_box_del(GF_Box *s) { GF_ProtectionSystemHeaderBox *ptr = (GF_ProtectionSystemHeaderBox*)s; if (ptr == NULL) return; if (ptr->private_data) gf_free(ptr->private_data); if (ptr->KIDs) gf_free(ptr->KIDs); gf_free(ptr); } GF_Err pssh_box_read(GF_Box *s, GF_BitStream *bs) { GF_ProtectionSystemHeaderBox *ptr = (GF_ProtectionSystemHeaderBox *)s; gf_bs_read_data(bs, (char *) ptr->SystemID, 16); ISOM_DECREASE_SIZE(ptr, 16); if (ptr->version > 0) { ptr->KID_count = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->KID_count) { u32 i; if (ptr->size < ptr->KID_count * sizeof(bin128)) return GF_ISOM_INVALID_FILE; ptr->KIDs = gf_malloc(ptr->KID_count*sizeof(bin128)); if (!ptr->KIDs) return GF_OUT_OF_MEM; for (i=0; i<ptr->KID_count; i++) { gf_bs_read_data(bs, (char *) ptr->KIDs[i], 16); ISOM_DECREASE_SIZE(ptr, 16); } } } ptr->private_data_size = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->private_data_size) { if (ptr->size < ptr->private_data_size) return GF_ISOM_INVALID_FILE; ptr->private_data = gf_malloc(sizeof(char)*ptr->private_data_size); if (!ptr->private_data) return GF_OUT_OF_MEM; gf_bs_read_data(bs, (char *) ptr->private_data, ptr->private_data_size); ISOM_DECREASE_SIZE(ptr, ptr->private_data_size); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pssh_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ProtectionSystemHeaderBox *ptr = (GF_ProtectionSystemHeaderBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, (char *) ptr->SystemID, 16); if (ptr->version > 0) { u32 i; gf_bs_write_u32(bs, ptr->KID_count); for (i=0; i<ptr->KID_count; i++) gf_bs_write_data(bs, (char *) ptr->KIDs[i], 16); } if (ptr->private_data) { gf_bs_write_u32(bs, ptr->private_data_size); gf_bs_write_data(bs, (char *) ptr->private_data, ptr->private_data_size); } else gf_bs_write_u32(bs, 0); return GF_OK; } GF_Err pssh_box_size(GF_Box *s) { GF_ProtectionSystemHeaderBox *ptr = (GF_ProtectionSystemHeaderBox*)s; if (ptr->KID_count && !ptr->version) { ptr->version = 1; } ptr->size += 16; if (ptr->version) ptr->size += 4 + 16*ptr->KID_count; ptr->size += 4 + (ptr->private_data ? ptr->private_data_size : 0); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *tenc_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackEncryptionBox, GF_ISOM_BOX_TYPE_TENC); return (GF_Box *)tmp; } void tenc_box_del(GF_Box *s) { gf_free(s); } GF_Err tenc_box_read(GF_Box *s, GF_BitStream *bs) { u8 iv_size; GF_TrackEncryptionBox *ptr = (GF_TrackEncryptionBox*)s; ISOM_DECREASE_SIZE(ptr, 3); gf_bs_read_u8(bs); //reserved if (!ptr->version) { gf_bs_read_u8(bs); //reserved } else { ptr->crypt_byte_block = gf_bs_read_int(bs, 4); ptr->skip_byte_block = gf_bs_read_int(bs, 4); } ptr->isProtected = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, 17); ptr->key_info[0] = 0; ptr->key_info[1] = 0; ptr->key_info[2] = 0; ptr->key_info[3] = iv_size = gf_bs_read_u8(bs); gf_bs_read_data(bs, ptr->key_info+4, 16); if (!iv_size && ptr->isProtected) { ISOM_DECREASE_SIZE(ptr, 1); iv_size = ptr->key_info[20] = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, ptr->key_info[20]); gf_bs_read_data(bs, ptr->key_info+21, iv_size); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tenc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackEncryptionBox *ptr = (GF_TrackEncryptionBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, 0); //reserved if (!ptr->version) { gf_bs_write_u8(bs, 0); //reserved } else { gf_bs_write_int(bs, ptr->crypt_byte_block, 4); gf_bs_write_int(bs, ptr->skip_byte_block, 4); } gf_bs_write_u8(bs, ptr->isProtected); gf_bs_write_u8(bs, ptr->key_info[3]); gf_bs_write_data(bs, ptr->key_info + 4, 16); if ((ptr->isProtected == 1) && !ptr->key_info[3]) { gf_bs_write_u8(bs, ptr->key_info[20]); gf_bs_write_data(bs, ptr->key_info + 21, ptr->key_info[20]); } return GF_OK; } GF_Err tenc_box_size(GF_Box *s) { GF_TrackEncryptionBox *ptr = (GF_TrackEncryptionBox*)s; ptr->size += 3; ptr->size += 17; if ((ptr->isProtected == 1) && ! ptr->key_info[3]) { ptr->size += 1 + ptr->key_info[20]; } return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *piff_tenc_box_new() { ISOM_DECL_BOX_ALLOC(GF_PIFFTrackEncryptionBox, GF_ISOM_BOX_TYPE_UUID); tmp->internal_4cc = GF_ISOM_BOX_UUID_TENC; return (GF_Box *)tmp; } void piff_tenc_box_del(GF_Box *s) { gf_free(s); } GF_Err piff_tenc_box_read(GF_Box *s, GF_BitStream *bs) { GF_PIFFTrackEncryptionBox *ptr = (GF_PIFFTrackEncryptionBox*)s; ISOM_DECREASE_SIZE(ptr, 4); //PIFF TENC extends UUID and fullbox ptr->version = gf_bs_read_u8(bs); ptr->flags = gf_bs_read_u24(bs); ISOM_DECREASE_SIZE(ptr, 20); ptr->AlgorithmID = gf_bs_read_int(bs, 24); ptr->key_info[0] = 0; ptr->key_info[1] = 0; ptr->key_info[2] = 0; ptr->key_info[3] = gf_bs_read_u8(bs); gf_bs_read_data(bs, ptr->key_info+4, 16); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err piff_tenc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_PIFFTrackEncryptionBox *ptr = (GF_PIFFTrackEncryptionBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->version); gf_bs_write_u24(bs, ptr->flags); gf_bs_write_int(bs, ptr->AlgorithmID, 24); gf_bs_write_u8(bs, ptr->key_info[3]); gf_bs_write_data(bs, ptr->key_info+4, 16); return GF_OK; } GF_Err piff_tenc_box_size(GF_Box *s) { GF_PIFFTrackEncryptionBox *ptr = (GF_PIFFTrackEncryptionBox*)s; ptr->size += 24; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *piff_psec_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleEncryptionBox, GF_ISOM_BOX_TYPE_UUID); tmp->internal_4cc = GF_ISOM_BOX_UUID_PSEC; tmp->piff_type = 1; return (GF_Box *)tmp; } void piff_psec_box_del(GF_Box *s) { GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *)s; while (gf_list_count(ptr->samp_aux_info)) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, 0); if (sai) gf_isom_cenc_samp_aux_info_del(sai); gf_list_rem(ptr->samp_aux_info, 0); } if (ptr->samp_aux_info) gf_list_del(ptr->samp_aux_info); gf_free(s); } GF_Err piff_psec_box_read(GF_Box *s, GF_BitStream *bs) { GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *)s; ISOM_DECREASE_SIZE(ptr, 4); //PIFF PSEC extends UUID and fullbox ptr->version = gf_bs_read_u8(bs); ptr->flags = gf_bs_read_u24(bs); if (ptr->flags & 1) { ISOM_DECREASE_SIZE(ptr, 20); ptr->AlgorithmID = gf_bs_read_int(bs, 24); ptr->IV_size = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *) ptr->KID, 16); } if (ptr->IV_size == 0) ptr->IV_size = 8; //default to 8 ptr->bs_offset = gf_bs_get_position(bs); /*u32 sample_count = */gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->IV_size != 8 && ptr->IV_size != 16) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] PIFF PSEC box incorrect IV size: %u - shall be 8 or 16\n", ptr->IV_size)); return GF_BAD_PARAM; } //as for senc, we skip parsing of the box until we have all saiz/saio info gf_bs_skip_bytes(bs, ptr->size); ptr->size = 0; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err store_senc_info(GF_SampleEncryptionBox *ptr, GF_BitStream *bs) { GF_Err e; u64 pos, new_pos; if (!ptr->cenc_saio) return GF_OK; pos = gf_bs_get_position(bs); if (pos>0xFFFFFFFFULL) { if (ptr->cenc_saio && !ptr->cenc_saio->version) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] saio offset larger than 32-bits but box version 0 enforced. Retry without \"saio32\" option\n")); return GF_BAD_PARAM; } } e = gf_bs_seek(bs, ptr->cenc_saio->offset_first_offset_field); if (e) return e; //force using version 1 for saio box i.e offset has 64 bits #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (ptr->traf) { new_pos = pos - ptr->traf->moof_start_in_bs; } else #endif { new_pos = pos; } if (ptr->cenc_saio->offsets) { u32 i; u64 old_offset = ptr->cenc_saio->offsets[0]; for (i=0; i<ptr->cenc_saio->entry_count; i++) { if (ptr->cenc_saio->version) { gf_bs_write_u64(bs, new_pos + ptr->cenc_saio->offsets[i] - old_offset); } else { gf_bs_write_u32(bs, (u32) (new_pos + ptr->cenc_saio->offsets[i] - old_offset)); } ptr->cenc_saio->offsets[i] = new_pos + ptr->cenc_saio->offsets[i] - old_offset; } } else { if (ptr->cenc_saio->version) { gf_bs_write_u64(bs, new_pos); } else { gf_bs_write_u32(bs, (u32) new_pos); } } return gf_bs_seek(bs, pos); } GF_Err piff_psec_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 sample_count; GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *) s; if (!s) return GF_BAD_PARAM; sample_count = gf_list_count(ptr->samp_aux_info); if (!sample_count) { ptr->size = 0; return GF_OK; } e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->version); gf_bs_write_u24(bs, ptr->flags); if (ptr->flags & 1) { gf_bs_write_int(bs, ptr->AlgorithmID, 24); gf_bs_write_u8(bs, ptr->IV_size); gf_bs_write_data(bs, (char *) ptr->KID, 16); } sample_count = gf_list_count(ptr->samp_aux_info); gf_bs_write_u32(bs, sample_count); if (sample_count) { u32 i; e = store_senc_info((GF_SampleEncryptionBox *)ptr, bs); if (e) return e; for (i = 0; i < sample_count; i++) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (! sai->cenc_data_size) continue; gf_bs_write_data(bs, (char *)sai->cenc_data, sai->cenc_data_size); } } return GF_OK; } GF_Err piff_psec_box_size(GF_Box *s) { u32 i, sample_count; GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox*)s; sample_count = gf_list_count(ptr->samp_aux_info); if (!sample_count) { ptr->size = 0; return GF_OK; } ptr->size += 4; if (ptr->flags & 1) { ptr->size += 20; } ptr->size += 4; for (i = 0; i < sample_count; i++) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (! sai->cenc_data_size) continue; ptr->size += sai->cenc_data_size; } return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *piff_pssh_box_new() { ISOM_DECL_BOX_ALLOC(GF_PIFFProtectionSystemHeaderBox, GF_ISOM_BOX_TYPE_UUID); tmp->internal_4cc = GF_ISOM_BOX_UUID_PSSH; return (GF_Box *)tmp; } void piff_pssh_box_del(GF_Box *s) { GF_PIFFProtectionSystemHeaderBox *ptr = (GF_PIFFProtectionSystemHeaderBox*)s; if (ptr->private_data) gf_free(ptr->private_data); gf_free(s); } GF_Err piff_pssh_box_read(GF_Box *s, GF_BitStream *bs) { GF_PIFFProtectionSystemHeaderBox *ptr = (GF_PIFFProtectionSystemHeaderBox*)s; ISOM_DECREASE_SIZE(ptr, 24); //PIFF PSSH extends UUID and fullbox ptr->version = gf_bs_read_u8(bs); ptr->flags = gf_bs_read_u24(bs); gf_bs_read_data(bs, (char *) ptr->SystemID, 16); ptr->private_data_size = gf_bs_read_u32(bs); if (ptr->size < sizeof(char)*ptr->private_data_size) return GF_ISOM_INVALID_FILE; ptr->private_data = gf_malloc(sizeof(char)*ptr->private_data_size); if (!ptr->private_data) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, ptr->private_data_size); gf_bs_read_data(bs, (char *) ptr->private_data, ptr->private_data_size); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err piff_pssh_box_write(GF_Box *s, GF_BitStream *bs) { GF_PIFFProtectionSystemHeaderBox *ptr = (GF_PIFFProtectionSystemHeaderBox *) s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->version); gf_bs_write_u24(bs, ptr->flags); gf_bs_write_data(bs, (char *) ptr->SystemID, 16); gf_bs_write_u32(bs, ptr->private_data_size); gf_bs_write_data(bs, (char *) ptr->private_data, ptr->private_data_size); return GF_OK; } GF_Err piff_pssh_box_size(GF_Box *s) { GF_PIFFProtectionSystemHeaderBox *ptr = (GF_PIFFProtectionSystemHeaderBox*)s; ptr->size += 24 + ptr->private_data_size; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *senc_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleEncryptionBox, GF_ISOM_BOX_TYPE_SENC); return (GF_Box *)tmp; } void senc_box_del(GF_Box *s) { GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *)s; while (gf_list_count(ptr->samp_aux_info)) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, 0); if (sai) gf_isom_cenc_samp_aux_info_del(sai); gf_list_rem(ptr->samp_aux_info, 0); } if (ptr->samp_aux_info) gf_list_del(ptr->samp_aux_info); gf_free(s); } u8 key_info_get_iv_size(const u8 *key_info, u32 nb_keys, u32 idx, u8 *const_iv_size, const u8 **const_iv) { u32 i, kpos=3; if (const_iv_size) *const_iv_size = 0; if (const_iv) *const_iv = NULL; for (i=0; i<nb_keys; i++) { u8 civ_size=0; const u8 *civ = NULL; u8 iv_size = key_info[kpos]; kpos += 17; if (!iv_size) { civ_size = key_info[kpos]; civ = key_info + kpos + 1; kpos += 1 + iv_size; } if (i+1==idx) { if (const_iv_size) *const_iv_size = civ_size; if (const_iv) *const_iv = civ; return iv_size; } } return 0; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS GF_Err senc_Parse(GF_BitStream *bs, GF_TrackBox *trak, GF_TrackFragmentBox *traf, GF_SampleEncryptionBox *senc) #else GF_Err senc_Parse(GF_BitStream *bs, GF_TrackBox *trak, void *traf, GF_SampleEncryptionBox *senc) #endif { GF_Err e; Bool parse_failed = GF_FALSE; u32 i, count, sample_number; u32 senc_size = (u32) senc->size; u32 subs_size = 0, def_IV_size; u64 pos = gf_bs_get_position(bs); Bool do_warn = GF_TRUE; Bool use_multikey = GF_FALSE; #ifdef GPAC_DISABLE_ISOM_FRAGMENTS if (!traf) return GF_BAD_PARAM; #endif //BOX + version/flags if (senc_size<12) return GF_BAD_PARAM; senc_size -= 12; if (senc->piff_type==1) { //UUID if (senc_size<16) return GF_BAD_PARAM; senc_size -= 16; } else if (!senc->piff_type) { if (senc->version==1) use_multikey = GF_TRUE; } if (senc->flags & 2) subs_size = 8; if (senc_size<4) return GF_BAD_PARAM; sample_number = 1; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (trak) sample_number += trak->sample_count_at_seg_start; #endif gf_bs_seek(bs, senc->bs_offset); count = gf_bs_read_u32(bs); senc_size -= 4; def_IV_size = 0; //check the target size if we have one subsample if (senc_size >= count * (16 + subs_size)) { def_IV_size = 16; } else if (senc_size >= count * (8 + subs_size)) { def_IV_size = 8; } else if (senc_size >= count * (subs_size)) { def_IV_size = 0; } if (!senc->samp_aux_info) senc->samp_aux_info = gf_list_new(); for (i=0; i<count; i++) { const u8 *key_info=NULL; u32 key_info_size=0; Bool is_encrypted; GF_CENCSampleAuxInfo *sai; u8 IV_size=0; u32 nb_keys = 0; u32 nb_bytes_subsample = 6; u32 nb_subs_bits = 16; GF_SAFEALLOC(sai, GF_CENCSampleAuxInfo); if (!sai) { gf_bs_seek(bs, pos); return GF_OUT_OF_MEM; } if (trak) { e = gf_isom_get_sample_cenc_info_internal(trak, traf, senc, sample_number, &is_encrypted, NULL, NULL, &key_info, &key_info_size); if (! key_info) { IV_size = key_info_size; //piff default use_multikey = GF_FALSE; senc->piff_type = 2; } else if (use_multikey) { nb_keys = key_info[1]; nb_keys <<= 8; nb_keys |= key_info[2]; nb_bytes_subsample = 8; nb_subs_bits = 32; } else { IV_size = key_info[3]; } if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[isobmf] could not get cenc info for sample %d: %s\n", sample_number, gf_error_to_string(e) )); gf_isom_cenc_samp_aux_info_del(sai); gf_bs_seek(bs, pos); if (trak->moov->mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) return GF_OK; return e; } } //no init movie setup (segment dump/inspaction, assume default encrypted and 16 bytes IV else { if (do_warn) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[isobmf] no moov found, cannot get cenc default info, assuming isEncrypted, IV size %d (computed from senc size)\n", def_IV_size)); do_warn = GF_FALSE; } is_encrypted = GF_TRUE; IV_size = def_IV_size; } if (senc_size < IV_size) { parse_failed = GF_TRUE; gf_isom_cenc_samp_aux_info_del(sai); break; } sample_number++; //subsample info is only signaled for encrypted samples if (is_encrypted) { u64 sai_start = gf_bs_get_position(bs); u32 nb_subs = 0; if (use_multikey) { u32 j; u32 nb_iv_init = gf_bs_read_u16(bs); for (j=0; j<nb_iv_init; j++) { u32 idx = gf_bs_read_u16(bs); IV_size = key_info_get_iv_size(key_info, nb_keys, idx, NULL, NULL); if (!IV_size) { gf_isom_cenc_samp_aux_info_del(sai); GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[isobmf] Failed to parse SENC box, invalid SAI multikey with IV size 0\n" )); gf_bs_seek(bs, pos); return GF_ISOM_INVALID_FILE; } gf_bs_skip_bytes(bs, IV_size); } } else { if (IV_size > 16) { gf_isom_cenc_samp_aux_info_del(sai); GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[isobmf] Failed to parse SENC box, invalid SAI size\n" )); gf_bs_seek(bs, pos); return GF_ISOM_INVALID_FILE; } if (IV_size) { gf_bs_skip_bytes(bs, IV_size); } } if (senc->flags & 0x00000002) { nb_subs = gf_bs_read_int(bs, nb_subs_bits); } sai->cenc_data_size = (u32) (gf_bs_get_position(bs) - sai_start); sai->cenc_data_size += nb_subs * nb_bytes_subsample; gf_bs_seek(bs, sai_start); if ((s32) senc_size < sai->cenc_data_size) { parse_failed = GF_TRUE; gf_isom_cenc_samp_aux_info_del(sai); break; } sai->cenc_data = gf_malloc(sizeof(u8) * sai->cenc_data_size); if (!sai->cenc_data) { gf_isom_cenc_samp_aux_info_del(sai); gf_bs_seek(bs, pos); return GF_OUT_OF_MEM; } gf_bs_read_data(bs, sai->cenc_data, sai->cenc_data_size); senc_size -= sai->cenc_data_size; } else { i--; sai->isNotProtected = 1; } if (senc->internal_4cc == GF_ISOM_BOX_UUID_PSEC) { sai->key_info_size = IV_size; } else { sai->key_info = key_info; sai->key_info_size = key_info_size; } gf_list_add(senc->samp_aux_info, sai); } gf_bs_seek(bs, pos); if (parse_failed) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[isobmf] cannot parse senc, missing IV/crypto state\n")); } return GF_OK; } GF_Err senc_box_read(GF_Box *s, GF_BitStream *bs) { GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *)s; ISOM_DECREASE_SIZE(ptr, 4); //WARNING - PSEC (UUID) IS TYPECASTED TO SENC (FULL BOX) SO WE CANNOT USE USUAL FULL BOX FUNCTIONS ptr->version = gf_bs_read_u8(bs); ptr->flags = gf_bs_read_u24(bs); ptr->bs_offset = gf_bs_get_position(bs); gf_bs_skip_bytes(bs, ptr->size); ptr->size = 0; ptr->load_needed = GF_TRUE; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err senc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; u32 sample_count, nb_crypt_samples; GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *) s; sample_count = gf_list_count(ptr->samp_aux_info); //temp patch until we cleanup the spec... nb_crypt_samples = 0; for (i = 0; i < sample_count; i++) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (!sai->isNotProtected) nb_crypt_samples++; } if (!nb_crypt_samples) { ptr->size = 0; return GF_OK; } e = gf_isom_box_write_header(s, bs); if (e) return e; //WARNING - PSEC (UUID) IS TYPECASTED TO SENC (FULL BOX) SO WE CANNOT USE USUAL FULL BOX FUNCTIONS gf_bs_write_u8(bs, ptr->version); gf_bs_write_u24(bs, ptr->flags); gf_bs_write_u32(bs, nb_crypt_samples); e = store_senc_info(ptr, bs); if (e) return e; for (i = 0; i < sample_count; i++) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (sai->isNotProtected || !sai->cenc_data_size) continue; gf_bs_write_data(bs, sai->cenc_data, sai->cenc_data_size); } return GF_OK; } GF_Err senc_box_size(GF_Box *s) { u32 sample_count; u32 i, nb_crypt_samples; GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox*)s; sample_count = gf_list_count(ptr->samp_aux_info); //temp patch until we cleanup the spec... nb_crypt_samples=0; for (i = 0; i < sample_count; i++) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (!sai->isNotProtected) nb_crypt_samples++; } if (!nb_crypt_samples) { ptr->size = 0; return GF_OK; } //WARNING - PSEC (UUID) IS TYPECASTED TO SENC (FULL BOX) SO WE CANNOT USE USUAL FULL BOX FUNCTIONS ptr->size += 4; //version and flags ptr->size += 4; //sample count for (i = 0; i < sample_count; i++) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (sai->isNotProtected) continue; ptr->size += sai->cenc_data_size; } return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *adkm_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeDRMKeyManagementSystemBox, GF_ISOM_BOX_TYPE_ADKM); tmp->version = 1; tmp->flags = 0; return (GF_Box *)tmp; } void adkm_box_del(GF_Box *s) { GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)s; if (!ptr) return; gf_free(s); } GF_Err adkm_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_AHDR: BOX_FIELD_ASSIGN(header, GF_AdobeDRMHeaderBox) break; case GF_ISOM_BOX_TYPE_ADAF: BOX_FIELD_ASSIGN(au_format, GF_AdobeDRMAUFormatBox) break; } return GF_OK; } GF_Err adkm_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err adkm_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err adkm_box_size(GF_Box *s) { u32 pos=0; GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->header, &pos); gf_isom_check_position(s, (GF_Box *)ptr->au_format, &pos); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *ahdr_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeDRMHeaderBox, GF_ISOM_BOX_TYPE_AHDR); tmp->version = 2; tmp->flags = 0; return (GF_Box *)tmp; } void ahdr_box_del(GF_Box *s) { gf_free(s); } GF_Err ahdr_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_AdobeDRMHeaderBox *ptr = (GF_AdobeDRMHeaderBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_APRM: BOX_FIELD_ASSIGN(std_enc_params, GF_AdobeStdEncryptionParamsBox) break; } return GF_OK; } GF_Err ahdr_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ahdr_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err ahdr_box_size(GF_Box *s) { u32 pos=0; GF_AdobeDRMHeaderBox *ptr = (GF_AdobeDRMHeaderBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->std_enc_params, &pos); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *aprm_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeStdEncryptionParamsBox, GF_ISOM_BOX_TYPE_APRM); tmp->version = 1; tmp->flags = 0; return (GF_Box *)tmp; } void aprm_box_del(GF_Box *s) { gf_free(s); } GF_Err aprm_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_AdobeStdEncryptionParamsBox *ptr = (GF_AdobeStdEncryptionParamsBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_AEIB: BOX_FIELD_ASSIGN(enc_info, GF_AdobeEncryptionInfoBox) break; case GF_ISOM_BOX_TYPE_AKEY: BOX_FIELD_ASSIGN(key_info, GF_AdobeKeyInfoBox) break; } return GF_OK; } GF_Err aprm_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err aprm_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err aprm_box_size(GF_Box *s) { u32 pos=0; GF_AdobeStdEncryptionParamsBox *ptr = (GF_AdobeStdEncryptionParamsBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->enc_info, &pos); gf_isom_check_position(s, (GF_Box *)ptr->key_info, &pos); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *aeib_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeEncryptionInfoBox, GF_ISOM_BOX_TYPE_AEIB); tmp->version = 1; tmp->flags = 0; return (GF_Box *)tmp; } void aeib_box_del(GF_Box *s) { GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox*)s; if (!ptr) return; if (ptr->enc_algo) gf_free(ptr->enc_algo); gf_free(ptr); } GF_Err aeib_box_read(GF_Box *s, GF_BitStream *bs) { GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox*)s; u32 len; len = (u32) ptr->size - 1; if (len) { ptr->enc_algo = (char *)gf_malloc(len*sizeof(char)); if (!ptr->enc_algo) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->enc_algo, len); } ptr->key_length = gf_bs_read_u8(bs); ptr->size = 0; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err aeib_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->enc_algo) { gf_bs_write_data(bs, (char *) ptr->enc_algo, (u32) strlen(ptr->enc_algo)); gf_bs_write_u8(bs, 0); //string end } gf_bs_write_u8(bs, ptr->key_length); return GF_OK; } GF_Err aeib_box_size(GF_Box *s) { GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox*)s; if (ptr->enc_algo) ptr->size += strlen(ptr->enc_algo) + 1; ptr->size += 1; //KeyLength return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *akey_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeKeyInfoBox, GF_ISOM_BOX_TYPE_AKEY); tmp->version = 1; tmp->flags = 0; return (GF_Box *)tmp; } void akey_box_del(GF_Box *s) { gf_free(s); } GF_Err akey_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_AdobeKeyInfoBox *ptr = (GF_AdobeKeyInfoBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_FLXS: BOX_FIELD_ASSIGN(params, GF_AdobeFlashAccessParamsBox) break; } return GF_OK; } GF_Err akey_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err akey_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err akey_box_size(GF_Box *s) { u32 pos=0; GF_AdobeKeyInfoBox *ptr = (GF_AdobeKeyInfoBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->params, &pos); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *flxs_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeFlashAccessParamsBox, GF_ISOM_BOX_TYPE_FLXS); return (GF_Box *)tmp; } void flxs_box_del(GF_Box *s) { GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox*)s; if (!ptr) return; if (ptr->metadata) gf_free(ptr->metadata); gf_free(ptr); } GF_Err flxs_box_read(GF_Box *s, GF_BitStream *bs) { GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox*)s; u32 len; len = (u32) ptr->size; if (len) { ptr->metadata = (char *)gf_malloc(len*sizeof(char)); if (!ptr->metadata) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->metadata, len); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err flxs_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->metadata) { gf_bs_write_data(bs, ptr->metadata, (u32) strlen(ptr->metadata)); gf_bs_write_u8(bs, 0); //string end } return GF_OK; } GF_Err flxs_box_size(GF_Box *s) { GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox*)s; if (ptr->metadata) ptr->size += strlen(ptr->metadata) + 1; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *adaf_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeDRMAUFormatBox, GF_ISOM_BOX_TYPE_ADAF); return (GF_Box *)tmp; } void adaf_box_del(GF_Box *s) { gf_free(s); } GF_Err adaf_box_read(GF_Box *s, GF_BitStream *bs) { GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox*)s; ISOM_DECREASE_SIZE(ptr, 3); ptr->selective_enc = gf_bs_read_u8(bs); gf_bs_read_u8(bs);//resersed ptr->IV_length = gf_bs_read_u8(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err adaf_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->selective_enc); gf_bs_write_u8(bs, 0x0); gf_bs_write_u8(bs, ptr->IV_length); return GF_OK; } GF_Err adaf_box_size(GF_Box *s) { GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox*)s; ptr->size += 3; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE #endif /*GPAC_DISABLE_ISOM*/
null
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre, Cyril Concolato * Copyright (c) Telecom ParisTech 2005-2020 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #ifndef GPAC_DISABLE_ISOM /* ProtectionInfo Box */ GF_Box *sinf_box_new() { ISOM_DECL_BOX_ALLOC(GF_ProtectionSchemeInfoBox, GF_ISOM_BOX_TYPE_SINF); return (GF_Box *)tmp; } void sinf_box_del(GF_Box *s) { gf_free(s); } GF_Err sinf_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_ProtectionSchemeInfoBox *ptr = (GF_ProtectionSchemeInfoBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_FRMA: BOX_FIELD_ASSIGN(original_format, GF_OriginalFormatBox) break; case GF_ISOM_BOX_TYPE_SCHM: BOX_FIELD_ASSIGN(scheme_type, GF_SchemeTypeBox) break; case GF_ISOM_BOX_TYPE_SCHI: BOX_FIELD_ASSIGN(info, GF_SchemeInformationBox) break; } return GF_OK; } GF_Err sinf_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sinf_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err sinf_box_size(GF_Box *s) { u32 pos=0; GF_ProtectionSchemeInfoBox *ptr = (GF_ProtectionSchemeInfoBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->original_format, &pos); gf_isom_check_position(s, (GF_Box *)ptr->scheme_type, &pos); gf_isom_check_position(s, (GF_Box *)ptr->info, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OriginalFormat Box */ GF_Box *frma_box_new() { ISOM_DECL_BOX_ALLOC(GF_OriginalFormatBox, GF_ISOM_BOX_TYPE_FRMA); return (GF_Box *)tmp; } void frma_box_del(GF_Box *s) { GF_OriginalFormatBox *ptr = (GF_OriginalFormatBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err frma_box_read(GF_Box *s, GF_BitStream *bs) { GF_OriginalFormatBox *ptr = (GF_OriginalFormatBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->data_format = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err frma_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_OriginalFormatBox *ptr = (GF_OriginalFormatBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->data_format); return GF_OK; } GF_Err frma_box_size(GF_Box *s) { GF_OriginalFormatBox *ptr = (GF_OriginalFormatBox *)s; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* SchemeType Box */ GF_Box *schm_box_new() { ISOM_DECL_BOX_ALLOC(GF_SchemeTypeBox, GF_ISOM_BOX_TYPE_SCHM); return (GF_Box *)tmp; } void schm_box_del(GF_Box *s) { GF_SchemeTypeBox *ptr = (GF_SchemeTypeBox *)s; if (ptr == NULL) return; if (ptr->URI) gf_free(ptr->URI); gf_free(ptr); } GF_Err schm_box_read(GF_Box *s, GF_BitStream *bs) { GF_SchemeTypeBox *ptr = (GF_SchemeTypeBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->scheme_type = gf_bs_read_u32(bs); ptr->scheme_version = gf_bs_read_u32(bs); if (ptr->size && (ptr->flags & 0x000001)) { u32 len = (u32) (ptr->size); ptr->URI = (char*)gf_malloc(sizeof(char)*len); if (!ptr->URI) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->URI, len); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err schm_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SchemeTypeBox *ptr = (GF_SchemeTypeBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->scheme_type); gf_bs_write_u32(bs, ptr->scheme_version); if (ptr->flags & 0x000001) { if (ptr->URI) gf_bs_write_data(bs, ptr->URI, (u32) strlen(ptr->URI)+1); else gf_bs_write_u8(bs, 0); } return GF_OK; } GF_Err schm_box_size(GF_Box *s) { GF_SchemeTypeBox *ptr = (GF_SchemeTypeBox *) s; if (!s) return GF_BAD_PARAM; ptr->size += 8; if (ptr->flags & 0x000001) ptr->size += 1 + (ptr->URI ? strlen(ptr->URI) : 0); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* SchemeInformation Box */ GF_Box *schi_box_new() { ISOM_DECL_BOX_ALLOC(GF_SchemeInformationBox, GF_ISOM_BOX_TYPE_SCHI); return (GF_Box *)tmp; } void schi_box_del(GF_Box *s) { gf_free(s); } GF_Err schi_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_SchemeInformationBox *ptr = (GF_SchemeInformationBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_IKMS: BOX_FIELD_ASSIGN(ikms, GF_ISMAKMSBox) return GF_OK; case GF_ISOM_BOX_TYPE_ISFM: BOX_FIELD_ASSIGN(isfm, GF_ISMASampleFormatBox) return GF_OK; case GF_ISOM_BOX_TYPE_ISLT: BOX_FIELD_ASSIGN(islt, GF_ISMACrypSaltBox) return GF_OK; case GF_ISOM_BOX_TYPE_ODKM: BOX_FIELD_ASSIGN(odkm, GF_OMADRMKMSBox) return GF_OK; case GF_ISOM_BOX_TYPE_TENC: BOX_FIELD_ASSIGN(tenc, GF_TrackEncryptionBox) return GF_OK; case GF_ISOM_BOX_TYPE_ADKM: BOX_FIELD_ASSIGN(adkm, GF_AdobeDRMKeyManagementSystemBox) return GF_OK; case GF_ISOM_BOX_TYPE_UUID: if (((GF_UUIDBox*)a)->internal_4cc==GF_ISOM_BOX_UUID_TENC) { BOX_FIELD_ASSIGN(piff_tenc, GF_PIFFTrackEncryptionBox) return GF_OK; } else { return GF_OK; } } return GF_OK; } GF_Err schi_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err schi_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err schi_box_size(GF_Box *s) { u32 pos=0; GF_SchemeInformationBox *ptr = (GF_SchemeInformationBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->ikms, &pos); gf_isom_check_position(s, (GF_Box *)ptr->isfm, &pos); gf_isom_check_position(s, (GF_Box *)ptr->islt, &pos); gf_isom_check_position(s, (GF_Box *)ptr->odkm, &pos); gf_isom_check_position(s, (GF_Box *)ptr->tenc, &pos); gf_isom_check_position(s, (GF_Box *)ptr->adkm, &pos); gf_isom_check_position(s, (GF_Box *)ptr->piff_tenc, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* ISMAKMS Box */ GF_Box *iKMS_box_new() { ISOM_DECL_BOX_ALLOC(GF_ISMAKMSBox, GF_ISOM_BOX_TYPE_IKMS); return (GF_Box *)tmp; } void iKMS_box_del(GF_Box *s) { GF_ISMAKMSBox *ptr = (GF_ISMAKMSBox *)s; if (ptr == NULL) return; if (ptr->URI) gf_free(ptr->URI); gf_free(ptr); } GF_Err iKMS_box_read(GF_Box *s, GF_BitStream *bs) { u32 len; GF_ISMAKMSBox *ptr = (GF_ISMAKMSBox *)s; len = (u32) (ptr->size); ptr->URI = (char*) gf_malloc(sizeof(char)*len); if (!ptr->URI) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->URI, len); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err iKMS_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ISMAKMSBox *ptr = (GF_ISMAKMSBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->URI) gf_bs_write_data(bs, ptr->URI, (u32) strlen(ptr->URI)); gf_bs_write_u8(bs, 0); return GF_OK; } GF_Err iKMS_box_size(GF_Box *s) { GF_ISMAKMSBox *ptr = (GF_ISMAKMSBox *)s; ptr->size += (ptr->URI ? strlen(ptr->URI) : 0) + 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* ISMASampleFormat Box */ GF_Box *iSFM_box_new() { ISOM_DECL_BOX_ALLOC(GF_ISMASampleFormatBox, GF_ISOM_BOX_TYPE_ISFM); return (GF_Box *)tmp; } void iSFM_box_del(GF_Box *s) { GF_ISMASampleFormatBox *ptr = (GF_ISMASampleFormatBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err iSFM_box_read(GF_Box *s, GF_BitStream *bs) { GF_ISMASampleFormatBox *ptr = (GF_ISMASampleFormatBox *)s; ISOM_DECREASE_SIZE(ptr, 3); ptr->selective_encryption = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 7); ptr->key_indicator_length = gf_bs_read_u8(bs); ptr->IV_length = gf_bs_read_u8(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err iSFM_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ISMASampleFormatBox *ptr = (GF_ISMASampleFormatBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->selective_encryption, 1); gf_bs_write_int(bs, 0, 7); gf_bs_write_u8(bs, ptr->key_indicator_length); gf_bs_write_u8(bs, ptr->IV_length); return GF_OK; } GF_Err iSFM_box_size(GF_Box *s) { GF_ISMASampleFormatBox *ptr = (GF_ISMASampleFormatBox *)s; ptr->size += 3; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* ISMASampleFormat Box */ GF_Box *iSLT_box_new() { ISOM_DECL_BOX_ALLOC(GF_ISMACrypSaltBox, GF_ISOM_BOX_TYPE_ISLT); return (GF_Box *)tmp; } void iSLT_box_del(GF_Box *s) { GF_ISMACrypSaltBox *ptr = (GF_ISMACrypSaltBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err iSLT_box_read(GF_Box *s, GF_BitStream *bs) { GF_ISMACrypSaltBox *ptr = (GF_ISMACrypSaltBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ISOM_DECREASE_SIZE(ptr, 8); ptr->salt = gf_bs_read_u64(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err iSLT_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ISMACrypSaltBox *ptr = (GF_ISMACrypSaltBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->salt); return GF_OK; } GF_Err iSLT_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OMADRMCommonHeader Box */ GF_Box *ohdr_box_new() { ISOM_DECL_BOX_ALLOC(GF_OMADRMCommonHeaderBox, GF_ISOM_BOX_TYPE_OHDR); tmp->child_boxes = gf_list_new(); return (GF_Box *)tmp; } void ohdr_box_del(GF_Box *s) { GF_OMADRMCommonHeaderBox *ptr = (GF_OMADRMCommonHeaderBox*)s; if (ptr == NULL) return; if (ptr->ContentID) gf_free(ptr->ContentID); if (ptr->RightsIssuerURL) gf_free(ptr->RightsIssuerURL); if (ptr->TextualHeaders) gf_free(ptr->TextualHeaders); gf_free(ptr); } GF_Err ohdr_box_read(GF_Box *s, GF_BitStream *bs) { u16 cid_len, ri_len; GF_OMADRMCommonHeaderBox *ptr = (GF_OMADRMCommonHeaderBox*)s; ISOM_DECREASE_SIZE(ptr, (1+1+8+2+2+2) ); ptr->EncryptionMethod = gf_bs_read_u8(bs); ptr->PaddingScheme = gf_bs_read_u8(bs); ptr->PlaintextLength = gf_bs_read_u64(bs); cid_len = gf_bs_read_u16(bs); ri_len = gf_bs_read_u16(bs); ptr->TextualHeadersLen = gf_bs_read_u16(bs); if (ptr->size<cid_len+ri_len+ptr->TextualHeadersLen) return GF_ISOM_INVALID_FILE; if (cid_len) { ptr->ContentID = (char *)gf_malloc(sizeof(char)*(cid_len+1)); if (!ptr->ContentID) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->ContentID, cid_len); ptr->ContentID[cid_len]=0; } if (ri_len) { ptr->RightsIssuerURL = (char *)gf_malloc(sizeof(char)*(ri_len+1)); if (!ptr->RightsIssuerURL) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->RightsIssuerURL, ri_len); ptr->RightsIssuerURL[ri_len]=0; } if (ptr->TextualHeadersLen) { ptr->TextualHeaders = (char *)gf_malloc(sizeof(char)*(ptr->TextualHeadersLen+1)); if (!ptr->TextualHeaders) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->TextualHeaders, ptr->TextualHeadersLen); ptr->TextualHeaders[ptr->TextualHeadersLen] = 0; } ISOM_DECREASE_SIZE(ptr, (cid_len+ri_len+ptr->TextualHeadersLen) ); return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ohdr_box_write(GF_Box *s, GF_BitStream *bs) { u16 cid_len, ri_len; GF_Err e; GF_OMADRMCommonHeaderBox *ptr = (GF_OMADRMCommonHeaderBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->EncryptionMethod); gf_bs_write_u8(bs, ptr->PaddingScheme); gf_bs_write_u64(bs, ptr->PlaintextLength); cid_len = ptr->ContentID ? (u16) strlen(ptr->ContentID) : 0; gf_bs_write_u16(bs, cid_len); ri_len = ptr->RightsIssuerURL ? (u16) strlen(ptr->RightsIssuerURL) : 0; gf_bs_write_u16(bs, ri_len); gf_bs_write_u16(bs, ptr->TextualHeadersLen); if (cid_len) gf_bs_write_data(bs, ptr->ContentID, (u32) strlen(ptr->ContentID)); if (ri_len) gf_bs_write_data(bs, ptr->RightsIssuerURL, (u32) strlen(ptr->RightsIssuerURL)); if (ptr->TextualHeadersLen) gf_bs_write_data(bs, ptr->TextualHeaders, ptr->TextualHeadersLen); ISOM_DECREASE_SIZE(ptr, (cid_len+ri_len+ptr->TextualHeadersLen) ); return GF_OK; } GF_Err ohdr_box_size(GF_Box *s) { GF_OMADRMCommonHeaderBox *ptr = (GF_OMADRMCommonHeaderBox *)s; ptr->size += 1+1+8+2+2+2; if (ptr->ContentID) ptr->size += strlen(ptr->ContentID); if (ptr->RightsIssuerURL) ptr->size += strlen(ptr->RightsIssuerURL); if (ptr->TextualHeadersLen) ptr->size += ptr->TextualHeadersLen; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OMADRMGroupID Box */ GF_Box *grpi_box_new() { ISOM_DECL_BOX_ALLOC(GF_OMADRMGroupIDBox, GF_ISOM_BOX_TYPE_GRPI); return (GF_Box *)tmp; } void grpi_box_del(GF_Box *s) { GF_OMADRMGroupIDBox *ptr = (GF_OMADRMGroupIDBox *)s; if (ptr == NULL) return; if (ptr->GroupID) gf_free(ptr->GroupID); if (ptr->GroupKey) gf_free(ptr->GroupKey); gf_free(ptr); } GF_Err grpi_box_read(GF_Box *s, GF_BitStream *bs) { u16 gid_len; GF_OMADRMGroupIDBox *ptr = (GF_OMADRMGroupIDBox*)s; ISOM_DECREASE_SIZE(ptr, (1+2+2) ); gid_len = gf_bs_read_u16(bs); ptr->GKEncryptionMethod = gf_bs_read_u8(bs); ptr->GKLength = gf_bs_read_u16(bs); if (ptr->size<gid_len+ptr->GKLength) return GF_ISOM_INVALID_FILE; ptr->GroupID = gf_malloc(sizeof(char)*(gid_len+1)); if (!ptr->GroupID) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->GroupID, gid_len); ptr->GroupID[gid_len]=0; ptr->GroupKey = (char *)gf_malloc(sizeof(char)*ptr->GKLength); if (!ptr->GroupKey) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->GroupKey, ptr->GKLength); ISOM_DECREASE_SIZE(ptr, (gid_len+ptr->GKLength) ); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err grpi_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u16 gid_len; GF_OMADRMGroupIDBox *ptr = (GF_OMADRMGroupIDBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gid_len = ptr->GroupID ? (u16) strlen(ptr->GroupID) : 0; gf_bs_write_u16(bs, gid_len); gf_bs_write_u8(bs, ptr->GKEncryptionMethod); gf_bs_write_u16(bs, ptr->GKLength); gf_bs_write_data(bs, ptr->GroupID, gid_len); gf_bs_write_data(bs, ptr->GroupKey, ptr->GKLength); return GF_OK; } GF_Err grpi_box_size(GF_Box *s) { GF_OMADRMGroupIDBox *ptr = (GF_OMADRMGroupIDBox *)s; ptr->size += 2+2+1 + ptr->GKLength; if (ptr->GroupID) ptr->size += strlen(ptr->GroupID); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OMADRMMutableInformation Box */ GF_Box *mdri_box_new() { ISOM_DECL_BOX_ALLOC(GF_OMADRMMutableInformationBox, GF_ISOM_BOX_TYPE_MDRI); return (GF_Box *)tmp; } void mdri_box_del(GF_Box *s) { GF_OMADRMMutableInformationBox*ptr = (GF_OMADRMMutableInformationBox*)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err mdri_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mdri_box_write(GF_Box *s, GF_BitStream *bs) { // GF_OMADRMMutableInformationBox*ptr = (GF_OMADRMMutableInformationBox*)s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; return GF_OK; } GF_Err mdri_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OMADRMTransactionTracking Box */ GF_Box *odtt_box_new() { ISOM_DECL_BOX_ALLOC(GF_OMADRMTransactionTrackingBox, GF_ISOM_BOX_TYPE_ODTT); return (GF_Box *)tmp; } void odtt_box_del(GF_Box *s) { GF_OMADRMTransactionTrackingBox *ptr = (GF_OMADRMTransactionTrackingBox*)s; gf_free(ptr); } GF_Err odtt_box_read(GF_Box *s, GF_BitStream *bs) { GF_OMADRMTransactionTrackingBox *ptr = (GF_OMADRMTransactionTrackingBox *)s; gf_bs_read_data(bs, ptr->TransactionID, 16); ISOM_DECREASE_SIZE(ptr, 16); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err odtt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_OMADRMTransactionTrackingBox *ptr = (GF_OMADRMTransactionTrackingBox*)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->TransactionID, 16); return GF_OK; } GF_Err odtt_box_size(GF_Box *s) { s->size += 16; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OMADRMRightsObject Box */ GF_Box *odrb_box_new() { ISOM_DECL_BOX_ALLOC(GF_OMADRMRightsObjectBox, GF_ISOM_BOX_TYPE_ODRB); return (GF_Box *)tmp; } void odrb_box_del(GF_Box *s) { GF_OMADRMRightsObjectBox *ptr = (GF_OMADRMRightsObjectBox*)s; if (ptr->oma_ro) gf_free(ptr->oma_ro); gf_free(ptr); } GF_Err odrb_box_read(GF_Box *s, GF_BitStream *bs) { GF_OMADRMRightsObjectBox *ptr = (GF_OMADRMRightsObjectBox *)s; ptr->oma_ro_size = (u32) ptr->size; ptr->oma_ro = (char*) gf_malloc(sizeof(char)*ptr->oma_ro_size); if (!ptr->oma_ro) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->oma_ro, ptr->oma_ro_size); ptr->size = 0; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err odrb_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_OMADRMRightsObjectBox *ptr = (GF_OMADRMRightsObjectBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->oma_ro, ptr->oma_ro_size); return GF_OK; } GF_Err odrb_box_size(GF_Box *s) { GF_OMADRMRightsObjectBox *ptr = (GF_OMADRMRightsObjectBox *)s; s->size += ptr->oma_ro_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* OMADRMKMS Box */ GF_Box *odkm_box_new() { ISOM_DECL_BOX_ALLOC(GF_OMADRMKMSBox, GF_ISOM_BOX_TYPE_ODKM); return (GF_Box *)tmp; } void odkm_box_del(GF_Box *s) { gf_free(s); } GF_Err odkm_Add(GF_Box *s, GF_Box *a, Bool is_rem) { GF_OMADRMKMSBox *ptr = (GF_OMADRMKMSBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_OHDR: BOX_FIELD_ASSIGN(hdr, GF_OMADRMCommonHeaderBox) return GF_OK; case GF_ISOM_BOX_TYPE_ODAF: BOX_FIELD_ASSIGN(fmt, GF_OMADRMAUFormatBox) return GF_OK; } return GF_OK; } GF_Err odkm_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err odkm_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err odkm_box_size(GF_Box *s) { u32 pos=0; GF_OMADRMKMSBox *ptr = (GF_OMADRMKMSBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->hdr, &pos); gf_isom_check_position(s, (GF_Box *)ptr->fmt, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *pssh_box_new() { ISOM_DECL_BOX_ALLOC(GF_ProtectionSystemHeaderBox, GF_ISOM_BOX_TYPE_PSSH); return (GF_Box *)tmp; } void pssh_box_del(GF_Box *s) { GF_ProtectionSystemHeaderBox *ptr = (GF_ProtectionSystemHeaderBox*)s; if (ptr == NULL) return; if (ptr->private_data) gf_free(ptr->private_data); if (ptr->KIDs) gf_free(ptr->KIDs); gf_free(ptr); } GF_Err pssh_box_read(GF_Box *s, GF_BitStream *bs) { GF_ProtectionSystemHeaderBox *ptr = (GF_ProtectionSystemHeaderBox *)s; gf_bs_read_data(bs, (char *) ptr->SystemID, 16); ISOM_DECREASE_SIZE(ptr, 16); if (ptr->version > 0) { ptr->KID_count = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->KID_count) { u32 i; if (ptr->size < ptr->KID_count * sizeof(bin128)) return GF_ISOM_INVALID_FILE; ptr->KIDs = gf_malloc(ptr->KID_count*sizeof(bin128)); if (!ptr->KIDs) return GF_OUT_OF_MEM; for (i=0; i<ptr->KID_count; i++) { gf_bs_read_data(bs, (char *) ptr->KIDs[i], 16); ISOM_DECREASE_SIZE(ptr, 16); } } } ptr->private_data_size = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->private_data_size) { if (ptr->size < ptr->private_data_size) return GF_ISOM_INVALID_FILE; ptr->private_data = gf_malloc(sizeof(char)*ptr->private_data_size); if (!ptr->private_data) return GF_OUT_OF_MEM; gf_bs_read_data(bs, (char *) ptr->private_data, ptr->private_data_size); ISOM_DECREASE_SIZE(ptr, ptr->private_data_size); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pssh_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ProtectionSystemHeaderBox *ptr = (GF_ProtectionSystemHeaderBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, (char *) ptr->SystemID, 16); if (ptr->version > 0) { u32 i; gf_bs_write_u32(bs, ptr->KID_count); for (i=0; i<ptr->KID_count; i++) gf_bs_write_data(bs, (char *) ptr->KIDs[i], 16); } if (ptr->private_data) { gf_bs_write_u32(bs, ptr->private_data_size); gf_bs_write_data(bs, (char *) ptr->private_data, ptr->private_data_size); } else gf_bs_write_u32(bs, 0); return GF_OK; } GF_Err pssh_box_size(GF_Box *s) { GF_ProtectionSystemHeaderBox *ptr = (GF_ProtectionSystemHeaderBox*)s; if (ptr->KID_count && !ptr->version) { ptr->version = 1; } ptr->size += 16; if (ptr->version) ptr->size += 4 + 16*ptr->KID_count; ptr->size += 4 + (ptr->private_data ? ptr->private_data_size : 0); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *tenc_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackEncryptionBox, GF_ISOM_BOX_TYPE_TENC); return (GF_Box *)tmp; } void tenc_box_del(GF_Box *s) { gf_free(s); } GF_Err tenc_box_read(GF_Box *s, GF_BitStream *bs) { u8 iv_size; GF_TrackEncryptionBox *ptr = (GF_TrackEncryptionBox*)s; ISOM_DECREASE_SIZE(ptr, 3); gf_bs_read_u8(bs); //reserved if (!ptr->version) { gf_bs_read_u8(bs); //reserved } else { ptr->crypt_byte_block = gf_bs_read_int(bs, 4); ptr->skip_byte_block = gf_bs_read_int(bs, 4); } ptr->isProtected = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, 17); ptr->key_info[0] = 0; ptr->key_info[1] = 0; ptr->key_info[2] = 0; ptr->key_info[3] = iv_size = gf_bs_read_u8(bs); gf_bs_read_data(bs, ptr->key_info+4, 16); if (!iv_size && ptr->isProtected) { ISOM_DECREASE_SIZE(ptr, 1); iv_size = ptr->key_info[20] = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, ptr->key_info[20]); if ((iv_size!=8) && (iv_size!=16)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid constant IV size %d, must be 8 or 16\n", (u32) iv_size)); ptr->key_info[20] = 16; return GF_NON_COMPLIANT_BITSTREAM; } gf_bs_read_data(bs, ptr->key_info+21, iv_size); } else if ((iv_size!=0) && (iv_size!=8) && (iv_size!=16)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid IV size %d, must be 0, 8 or 16\n", (u32) iv_size)); return GF_NON_COMPLIANT_BITSTREAM; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tenc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackEncryptionBox *ptr = (GF_TrackEncryptionBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, 0); //reserved if (!ptr->version) { gf_bs_write_u8(bs, 0); //reserved } else { gf_bs_write_int(bs, ptr->crypt_byte_block, 4); gf_bs_write_int(bs, ptr->skip_byte_block, 4); } gf_bs_write_u8(bs, ptr->isProtected); gf_bs_write_u8(bs, ptr->key_info[3]); gf_bs_write_data(bs, ptr->key_info + 4, 16); if ((ptr->isProtected == 1) && !ptr->key_info[3]) { gf_bs_write_u8(bs, ptr->key_info[20]); gf_bs_write_data(bs, ptr->key_info + 21, ptr->key_info[20]); } return GF_OK; } GF_Err tenc_box_size(GF_Box *s) { GF_TrackEncryptionBox *ptr = (GF_TrackEncryptionBox*)s; ptr->size += 3; ptr->size += 17; if ((ptr->isProtected == 1) && ! ptr->key_info[3]) { ptr->size += 1 + ptr->key_info[20]; } return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *piff_tenc_box_new() { ISOM_DECL_BOX_ALLOC(GF_PIFFTrackEncryptionBox, GF_ISOM_BOX_TYPE_UUID); tmp->internal_4cc = GF_ISOM_BOX_UUID_TENC; return (GF_Box *)tmp; } void piff_tenc_box_del(GF_Box *s) { gf_free(s); } GF_Err piff_tenc_box_read(GF_Box *s, GF_BitStream *bs) { GF_PIFFTrackEncryptionBox *ptr = (GF_PIFFTrackEncryptionBox*)s; ISOM_DECREASE_SIZE(ptr, 4); //PIFF TENC extends UUID and fullbox ptr->version = gf_bs_read_u8(bs); ptr->flags = gf_bs_read_u24(bs); ISOM_DECREASE_SIZE(ptr, 20); ptr->AlgorithmID = gf_bs_read_int(bs, 24); ptr->key_info[0] = 0; ptr->key_info[1] = 0; ptr->key_info[2] = 0; ptr->key_info[3] = gf_bs_read_u8(bs); gf_bs_read_data(bs, ptr->key_info+4, 16); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err piff_tenc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_PIFFTrackEncryptionBox *ptr = (GF_PIFFTrackEncryptionBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->version); gf_bs_write_u24(bs, ptr->flags); gf_bs_write_int(bs, ptr->AlgorithmID, 24); gf_bs_write_u8(bs, ptr->key_info[3]); gf_bs_write_data(bs, ptr->key_info+4, 16); return GF_OK; } GF_Err piff_tenc_box_size(GF_Box *s) { GF_PIFFTrackEncryptionBox *ptr = (GF_PIFFTrackEncryptionBox*)s; ptr->size += 24; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *piff_psec_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleEncryptionBox, GF_ISOM_BOX_TYPE_UUID); tmp->internal_4cc = GF_ISOM_BOX_UUID_PSEC; tmp->piff_type = 1; return (GF_Box *)tmp; } void piff_psec_box_del(GF_Box *s) { GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *)s; while (gf_list_count(ptr->samp_aux_info)) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, 0); if (sai) gf_isom_cenc_samp_aux_info_del(sai); gf_list_rem(ptr->samp_aux_info, 0); } if (ptr->samp_aux_info) gf_list_del(ptr->samp_aux_info); gf_free(s); } GF_Err piff_psec_box_read(GF_Box *s, GF_BitStream *bs) { GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *)s; ISOM_DECREASE_SIZE(ptr, 4); //PIFF PSEC extends UUID and fullbox ptr->version = gf_bs_read_u8(bs); ptr->flags = gf_bs_read_u24(bs); if (ptr->flags & 1) { ISOM_DECREASE_SIZE(ptr, 20); ptr->AlgorithmID = gf_bs_read_int(bs, 24); ptr->IV_size = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *) ptr->KID, 16); } if (ptr->IV_size == 0) ptr->IV_size = 8; //default to 8 ptr->bs_offset = gf_bs_get_position(bs); /*u32 sample_count = */gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); if (ptr->IV_size != 8 && ptr->IV_size != 16) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] PIFF PSEC box incorrect IV size: %u - shall be 8 or 16\n", ptr->IV_size)); return GF_BAD_PARAM; } //as for senc, we skip parsing of the box until we have all saiz/saio info gf_bs_skip_bytes(bs, ptr->size); ptr->size = 0; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err store_senc_info(GF_SampleEncryptionBox *ptr, GF_BitStream *bs) { GF_Err e; u64 pos, new_pos; if (!ptr->cenc_saio) return GF_OK; pos = gf_bs_get_position(bs); if (pos>0xFFFFFFFFULL) { if (ptr->cenc_saio && !ptr->cenc_saio->version) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] saio offset larger than 32-bits but box version 0 enforced. Retry without \"saio32\" option\n")); return GF_BAD_PARAM; } } e = gf_bs_seek(bs, ptr->cenc_saio->offset_first_offset_field); if (e) return e; //force using version 1 for saio box i.e offset has 64 bits #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (ptr->traf) { new_pos = pos - ptr->traf->moof_start_in_bs; } else #endif { new_pos = pos; } if (ptr->cenc_saio->offsets) { u32 i; u64 old_offset = ptr->cenc_saio->offsets[0]; for (i=0; i<ptr->cenc_saio->entry_count; i++) { if (ptr->cenc_saio->version) { gf_bs_write_u64(bs, new_pos + ptr->cenc_saio->offsets[i] - old_offset); } else { gf_bs_write_u32(bs, (u32) (new_pos + ptr->cenc_saio->offsets[i] - old_offset)); } ptr->cenc_saio->offsets[i] = new_pos + ptr->cenc_saio->offsets[i] - old_offset; } } else { if (ptr->cenc_saio->version) { gf_bs_write_u64(bs, new_pos); } else { gf_bs_write_u32(bs, (u32) new_pos); } } return gf_bs_seek(bs, pos); } GF_Err piff_psec_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 sample_count; GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *) s; if (!s) return GF_BAD_PARAM; sample_count = gf_list_count(ptr->samp_aux_info); if (!sample_count) { ptr->size = 0; return GF_OK; } e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->version); gf_bs_write_u24(bs, ptr->flags); if (ptr->flags & 1) { gf_bs_write_int(bs, ptr->AlgorithmID, 24); gf_bs_write_u8(bs, ptr->IV_size); gf_bs_write_data(bs, (char *) ptr->KID, 16); } sample_count = gf_list_count(ptr->samp_aux_info); gf_bs_write_u32(bs, sample_count); if (sample_count) { u32 i; e = store_senc_info((GF_SampleEncryptionBox *)ptr, bs); if (e) return e; for (i = 0; i < sample_count; i++) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (! sai->cenc_data_size) continue; gf_bs_write_data(bs, (char *)sai->cenc_data, sai->cenc_data_size); } } return GF_OK; } GF_Err piff_psec_box_size(GF_Box *s) { u32 i, sample_count; GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox*)s; sample_count = gf_list_count(ptr->samp_aux_info); if (!sample_count) { ptr->size = 0; return GF_OK; } ptr->size += 4; if (ptr->flags & 1) { ptr->size += 20; } ptr->size += 4; for (i = 0; i < sample_count; i++) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (! sai->cenc_data_size) continue; ptr->size += sai->cenc_data_size; } return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *piff_pssh_box_new() { ISOM_DECL_BOX_ALLOC(GF_PIFFProtectionSystemHeaderBox, GF_ISOM_BOX_TYPE_UUID); tmp->internal_4cc = GF_ISOM_BOX_UUID_PSSH; return (GF_Box *)tmp; } void piff_pssh_box_del(GF_Box *s) { GF_PIFFProtectionSystemHeaderBox *ptr = (GF_PIFFProtectionSystemHeaderBox*)s; if (ptr->private_data) gf_free(ptr->private_data); gf_free(s); } GF_Err piff_pssh_box_read(GF_Box *s, GF_BitStream *bs) { GF_PIFFProtectionSystemHeaderBox *ptr = (GF_PIFFProtectionSystemHeaderBox*)s; ISOM_DECREASE_SIZE(ptr, 24); //PIFF PSSH extends UUID and fullbox ptr->version = gf_bs_read_u8(bs); ptr->flags = gf_bs_read_u24(bs); gf_bs_read_data(bs, (char *) ptr->SystemID, 16); ptr->private_data_size = gf_bs_read_u32(bs); if (ptr->size < sizeof(char)*ptr->private_data_size) return GF_ISOM_INVALID_FILE; ptr->private_data = gf_malloc(sizeof(char)*ptr->private_data_size); if (!ptr->private_data) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, ptr->private_data_size); gf_bs_read_data(bs, (char *) ptr->private_data, ptr->private_data_size); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err piff_pssh_box_write(GF_Box *s, GF_BitStream *bs) { GF_PIFFProtectionSystemHeaderBox *ptr = (GF_PIFFProtectionSystemHeaderBox *) s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->version); gf_bs_write_u24(bs, ptr->flags); gf_bs_write_data(bs, (char *) ptr->SystemID, 16); gf_bs_write_u32(bs, ptr->private_data_size); gf_bs_write_data(bs, (char *) ptr->private_data, ptr->private_data_size); return GF_OK; } GF_Err piff_pssh_box_size(GF_Box *s) { GF_PIFFProtectionSystemHeaderBox *ptr = (GF_PIFFProtectionSystemHeaderBox*)s; ptr->size += 24 + ptr->private_data_size; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *senc_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleEncryptionBox, GF_ISOM_BOX_TYPE_SENC); return (GF_Box *)tmp; } void senc_box_del(GF_Box *s) { GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *)s; while (gf_list_count(ptr->samp_aux_info)) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, 0); if (sai) gf_isom_cenc_samp_aux_info_del(sai); gf_list_rem(ptr->samp_aux_info, 0); } if (ptr->samp_aux_info) gf_list_del(ptr->samp_aux_info); gf_free(s); } u8 key_info_get_iv_size(const u8 *key_info, u32 nb_keys, u32 idx, u8 *const_iv_size, const u8 **const_iv) { u32 i, kpos=3; if (const_iv_size) *const_iv_size = 0; if (const_iv) *const_iv = NULL; for (i=0; i<nb_keys; i++) { u8 civ_size=0; const u8 *civ = NULL; u8 iv_size = key_info[kpos]; kpos += 17; if (!iv_size) { civ_size = key_info[kpos]; civ = key_info + kpos + 1; kpos += 1 + iv_size; } if (i+1==idx) { if (const_iv_size) *const_iv_size = civ_size; if (const_iv) *const_iv = civ; return iv_size; } } return 0; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS GF_Err senc_Parse(GF_BitStream *bs, GF_TrackBox *trak, GF_TrackFragmentBox *traf, GF_SampleEncryptionBox *senc) #else GF_Err senc_Parse(GF_BitStream *bs, GF_TrackBox *trak, void *traf, GF_SampleEncryptionBox *senc) #endif { GF_Err e; Bool parse_failed = GF_FALSE; u32 i, count, sample_number; u32 senc_size = (u32) senc->size; u32 subs_size = 0, def_IV_size; u64 pos = gf_bs_get_position(bs); Bool do_warn = GF_TRUE; Bool use_multikey = GF_FALSE; #ifdef GPAC_DISABLE_ISOM_FRAGMENTS if (!traf) return GF_BAD_PARAM; #endif //BOX + version/flags if (senc_size<12) return GF_BAD_PARAM; senc_size -= 12; if (senc->piff_type==1) { //UUID if (senc_size<16) return GF_BAD_PARAM; senc_size -= 16; } else if (!senc->piff_type) { if (senc->version==1) use_multikey = GF_TRUE; } if (senc->flags & 2) subs_size = 8; if (senc_size<4) return GF_BAD_PARAM; sample_number = 1; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (trak) sample_number += trak->sample_count_at_seg_start; #endif gf_bs_seek(bs, senc->bs_offset); count = gf_bs_read_u32(bs); senc_size -= 4; def_IV_size = 0; //check the target size if we have one subsample if (senc_size >= count * (16 + subs_size)) { def_IV_size = 16; } else if (senc_size >= count * (8 + subs_size)) { def_IV_size = 8; } else if (senc_size >= count * (subs_size)) { def_IV_size = 0; } if (!senc->samp_aux_info) senc->samp_aux_info = gf_list_new(); for (i=0; i<count; i++) { const u8 *key_info=NULL; u32 key_info_size=0; Bool is_encrypted; GF_CENCSampleAuxInfo *sai; u8 IV_size=0; u32 nb_keys = 0; u32 nb_bytes_subsample = 6; u32 nb_subs_bits = 16; GF_SAFEALLOC(sai, GF_CENCSampleAuxInfo); if (!sai) { gf_bs_seek(bs, pos); return GF_OUT_OF_MEM; } if (trak) { e = gf_isom_get_sample_cenc_info_internal(trak, traf, senc, sample_number, &is_encrypted, NULL, NULL, &key_info, &key_info_size); if (! key_info) { IV_size = key_info_size; //piff default use_multikey = GF_FALSE; senc->piff_type = 2; } else if (use_multikey) { nb_keys = key_info[1]; nb_keys <<= 8; nb_keys |= key_info[2]; nb_bytes_subsample = 8; nb_subs_bits = 32; } else { IV_size = key_info[3]; } if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[isobmf] could not get cenc info for sample %d: %s\n", sample_number, gf_error_to_string(e) )); gf_isom_cenc_samp_aux_info_del(sai); gf_bs_seek(bs, pos); if (trak->moov->mov->FragmentsFlags & GF_ISOM_FRAG_READ_DEBUG) return GF_OK; return e; } } //no init movie setup (segment dump/inspaction, assume default encrypted and 16 bytes IV else { if (do_warn) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[isobmf] no moov found, cannot get cenc default info, assuming isEncrypted, IV size %d (computed from senc size)\n", def_IV_size)); do_warn = GF_FALSE; } is_encrypted = GF_TRUE; IV_size = def_IV_size; } if (senc_size < IV_size) { parse_failed = GF_TRUE; gf_isom_cenc_samp_aux_info_del(sai); break; } sample_number++; //subsample info is only signaled for encrypted samples if (is_encrypted) { u64 sai_start = gf_bs_get_position(bs); u32 nb_subs = 0; if (use_multikey) { u32 j; u32 nb_iv_init = gf_bs_read_u16(bs); for (j=0; j<nb_iv_init; j++) { u32 idx = gf_bs_read_u16(bs); IV_size = key_info_get_iv_size(key_info, nb_keys, idx, NULL, NULL); if (!IV_size) { gf_isom_cenc_samp_aux_info_del(sai); GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[isobmf] Failed to parse SENC box, invalid SAI multikey with IV size 0\n" )); gf_bs_seek(bs, pos); return GF_ISOM_INVALID_FILE; } gf_bs_skip_bytes(bs, IV_size); } } else { if (IV_size > 16) { gf_isom_cenc_samp_aux_info_del(sai); GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[isobmf] Failed to parse SENC box, invalid SAI size\n" )); gf_bs_seek(bs, pos); return GF_ISOM_INVALID_FILE; } if (IV_size) { gf_bs_skip_bytes(bs, IV_size); } } if (senc->flags & 0x00000002) { nb_subs = gf_bs_read_int(bs, nb_subs_bits); } sai->cenc_data_size = (u32) (gf_bs_get_position(bs) - sai_start); sai->cenc_data_size += nb_subs * nb_bytes_subsample; gf_bs_seek(bs, sai_start); if ((s32) senc_size < sai->cenc_data_size) { parse_failed = GF_TRUE; gf_isom_cenc_samp_aux_info_del(sai); break; } sai->cenc_data = gf_malloc(sizeof(u8) * sai->cenc_data_size); if (!sai->cenc_data) { gf_isom_cenc_samp_aux_info_del(sai); gf_bs_seek(bs, pos); return GF_OUT_OF_MEM; } gf_bs_read_data(bs, sai->cenc_data, sai->cenc_data_size); senc_size -= sai->cenc_data_size; } else { i--; sai->isNotProtected = 1; } if (senc->internal_4cc == GF_ISOM_BOX_UUID_PSEC) { sai->key_info_size = IV_size; } else { sai->key_info = key_info; sai->key_info_size = key_info_size; } gf_list_add(senc->samp_aux_info, sai); } gf_bs_seek(bs, pos); if (parse_failed) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[isobmf] cannot parse senc, missing IV/crypto state\n")); } return GF_OK; } GF_Err senc_box_read(GF_Box *s, GF_BitStream *bs) { GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *)s; ISOM_DECREASE_SIZE(ptr, 4); //WARNING - PSEC (UUID) IS TYPECASTED TO SENC (FULL BOX) SO WE CANNOT USE USUAL FULL BOX FUNCTIONS ptr->version = gf_bs_read_u8(bs); ptr->flags = gf_bs_read_u24(bs); ptr->bs_offset = gf_bs_get_position(bs); gf_bs_skip_bytes(bs, ptr->size); ptr->size = 0; ptr->load_needed = GF_TRUE; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err senc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; u32 sample_count, nb_crypt_samples; GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox *) s; sample_count = gf_list_count(ptr->samp_aux_info); //temp patch until we cleanup the spec... nb_crypt_samples = 0; for (i = 0; i < sample_count; i++) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (!sai->isNotProtected) nb_crypt_samples++; } if (!nb_crypt_samples) { ptr->size = 0; return GF_OK; } e = gf_isom_box_write_header(s, bs); if (e) return e; //WARNING - PSEC (UUID) IS TYPECASTED TO SENC (FULL BOX) SO WE CANNOT USE USUAL FULL BOX FUNCTIONS gf_bs_write_u8(bs, ptr->version); gf_bs_write_u24(bs, ptr->flags); gf_bs_write_u32(bs, nb_crypt_samples); e = store_senc_info(ptr, bs); if (e) return e; for (i = 0; i < sample_count; i++) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (sai->isNotProtected || !sai->cenc_data_size) continue; gf_bs_write_data(bs, sai->cenc_data, sai->cenc_data_size); } return GF_OK; } GF_Err senc_box_size(GF_Box *s) { u32 sample_count; u32 i, nb_crypt_samples; GF_SampleEncryptionBox *ptr = (GF_SampleEncryptionBox*)s; sample_count = gf_list_count(ptr->samp_aux_info); //temp patch until we cleanup the spec... nb_crypt_samples=0; for (i = 0; i < sample_count; i++) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (!sai->isNotProtected) nb_crypt_samples++; } if (!nb_crypt_samples) { ptr->size = 0; return GF_OK; } //WARNING - PSEC (UUID) IS TYPECASTED TO SENC (FULL BOX) SO WE CANNOT USE USUAL FULL BOX FUNCTIONS ptr->size += 4; //version and flags ptr->size += 4; //sample count for (i = 0; i < sample_count; i++) { GF_CENCSampleAuxInfo *sai = (GF_CENCSampleAuxInfo *)gf_list_get(ptr->samp_aux_info, i); if (sai->isNotProtected) continue; ptr->size += sai->cenc_data_size; } return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *adkm_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeDRMKeyManagementSystemBox, GF_ISOM_BOX_TYPE_ADKM); tmp->version = 1; tmp->flags = 0; return (GF_Box *)tmp; } void adkm_box_del(GF_Box *s) { GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)s; if (!ptr) return; gf_free(s); } GF_Err adkm_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_AHDR: BOX_FIELD_ASSIGN(header, GF_AdobeDRMHeaderBox) break; case GF_ISOM_BOX_TYPE_ADAF: BOX_FIELD_ASSIGN(au_format, GF_AdobeDRMAUFormatBox) break; } return GF_OK; } GF_Err adkm_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err adkm_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err adkm_box_size(GF_Box *s) { u32 pos=0; GF_AdobeDRMKeyManagementSystemBox *ptr = (GF_AdobeDRMKeyManagementSystemBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->header, &pos); gf_isom_check_position(s, (GF_Box *)ptr->au_format, &pos); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *ahdr_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeDRMHeaderBox, GF_ISOM_BOX_TYPE_AHDR); tmp->version = 2; tmp->flags = 0; return (GF_Box *)tmp; } void ahdr_box_del(GF_Box *s) { gf_free(s); } GF_Err ahdr_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_AdobeDRMHeaderBox *ptr = (GF_AdobeDRMHeaderBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_APRM: BOX_FIELD_ASSIGN(std_enc_params, GF_AdobeStdEncryptionParamsBox) break; } return GF_OK; } GF_Err ahdr_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ahdr_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err ahdr_box_size(GF_Box *s) { u32 pos=0; GF_AdobeDRMHeaderBox *ptr = (GF_AdobeDRMHeaderBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->std_enc_params, &pos); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *aprm_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeStdEncryptionParamsBox, GF_ISOM_BOX_TYPE_APRM); tmp->version = 1; tmp->flags = 0; return (GF_Box *)tmp; } void aprm_box_del(GF_Box *s) { gf_free(s); } GF_Err aprm_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_AdobeStdEncryptionParamsBox *ptr = (GF_AdobeStdEncryptionParamsBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_AEIB: BOX_FIELD_ASSIGN(enc_info, GF_AdobeEncryptionInfoBox) break; case GF_ISOM_BOX_TYPE_AKEY: BOX_FIELD_ASSIGN(key_info, GF_AdobeKeyInfoBox) break; } return GF_OK; } GF_Err aprm_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err aprm_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err aprm_box_size(GF_Box *s) { u32 pos=0; GF_AdobeStdEncryptionParamsBox *ptr = (GF_AdobeStdEncryptionParamsBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->enc_info, &pos); gf_isom_check_position(s, (GF_Box *)ptr->key_info, &pos); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *aeib_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeEncryptionInfoBox, GF_ISOM_BOX_TYPE_AEIB); tmp->version = 1; tmp->flags = 0; return (GF_Box *)tmp; } void aeib_box_del(GF_Box *s) { GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox*)s; if (!ptr) return; if (ptr->enc_algo) gf_free(ptr->enc_algo); gf_free(ptr); } GF_Err aeib_box_read(GF_Box *s, GF_BitStream *bs) { GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox*)s; u32 len; len = (u32) ptr->size - 1; if (len) { ptr->enc_algo = (char *)gf_malloc(len*sizeof(char)); if (!ptr->enc_algo) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->enc_algo, len); } ptr->key_length = gf_bs_read_u8(bs); ptr->size = 0; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err aeib_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->enc_algo) { gf_bs_write_data(bs, (char *) ptr->enc_algo, (u32) strlen(ptr->enc_algo)); gf_bs_write_u8(bs, 0); //string end } gf_bs_write_u8(bs, ptr->key_length); return GF_OK; } GF_Err aeib_box_size(GF_Box *s) { GF_AdobeEncryptionInfoBox *ptr = (GF_AdobeEncryptionInfoBox*)s; if (ptr->enc_algo) ptr->size += strlen(ptr->enc_algo) + 1; ptr->size += 1; //KeyLength return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *akey_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeKeyInfoBox, GF_ISOM_BOX_TYPE_AKEY); tmp->version = 1; tmp->flags = 0; return (GF_Box *)tmp; } void akey_box_del(GF_Box *s) { gf_free(s); } GF_Err akey_on_child_box(GF_Box *s, GF_Box *a, Bool is_rem) { GF_AdobeKeyInfoBox *ptr = (GF_AdobeKeyInfoBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_FLXS: BOX_FIELD_ASSIGN(params, GF_AdobeFlashAccessParamsBox) break; } return GF_OK; } GF_Err akey_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err akey_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err akey_box_size(GF_Box *s) { u32 pos=0; GF_AdobeKeyInfoBox *ptr = (GF_AdobeKeyInfoBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->params, &pos); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *flxs_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeFlashAccessParamsBox, GF_ISOM_BOX_TYPE_FLXS); return (GF_Box *)tmp; } void flxs_box_del(GF_Box *s) { GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox*)s; if (!ptr) return; if (ptr->metadata) gf_free(ptr->metadata); gf_free(ptr); } GF_Err flxs_box_read(GF_Box *s, GF_BitStream *bs) { GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox*)s; u32 len; len = (u32) ptr->size; if (len) { ptr->metadata = (char *)gf_malloc(len*sizeof(char)); if (!ptr->metadata) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->metadata, len); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err flxs_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->metadata) { gf_bs_write_data(bs, ptr->metadata, (u32) strlen(ptr->metadata)); gf_bs_write_u8(bs, 0); //string end } return GF_OK; } GF_Err flxs_box_size(GF_Box *s) { GF_AdobeFlashAccessParamsBox *ptr = (GF_AdobeFlashAccessParamsBox*)s; if (ptr->metadata) ptr->size += strlen(ptr->metadata) + 1; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *adaf_box_new() { ISOM_DECL_BOX_ALLOC(GF_AdobeDRMAUFormatBox, GF_ISOM_BOX_TYPE_ADAF); return (GF_Box *)tmp; } void adaf_box_del(GF_Box *s) { gf_free(s); } GF_Err adaf_box_read(GF_Box *s, GF_BitStream *bs) { GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox*)s; ISOM_DECREASE_SIZE(ptr, 3); ptr->selective_enc = gf_bs_read_u8(bs); gf_bs_read_u8(bs);//resersed ptr->IV_length = gf_bs_read_u8(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err adaf_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->selective_enc); gf_bs_write_u8(bs, 0x0); gf_bs_write_u8(bs, ptr->IV_length); return GF_OK; } GF_Err adaf_box_size(GF_Box *s) { GF_AdobeDRMAUFormatBox *ptr = (GF_AdobeDRMAUFormatBox*)s; ptr->size += 3; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE #endif /*GPAC_DISABLE_ISOM*/
null
268
CWE-787
CVE-2021-31916
/* * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include "dm-core.h" #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/miscdevice.h> #include <linux/sched/mm.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/dm-ioctl.h> #include <linux/hdreg.h> #include <linux/compat.h> #include <linux/uaccess.h> #define DM_MSG_PREFIX "ioctl" #define DM_DRIVER_EMAIL "dm-devel@redhat.com" struct dm_file { /* * poll will wait until the global event number is greater than * this value. */ volatile unsigned global_event_nr; }; /*----------------------------------------------------------------- * The ioctl interface needs to be able to look up devices by * name or uuid. *---------------------------------------------------------------*/ struct hash_cell { struct list_head name_list; struct list_head uuid_list; char *name; char *uuid; struct mapped_device *md; struct dm_table *new_map; }; struct vers_iter { size_t param_size; struct dm_target_versions *vers, *old_vers; char *end; uint32_t flags; }; #define NUM_BUCKETS 64 #define MASK_BUCKETS (NUM_BUCKETS - 1) static struct list_head _name_buckets[NUM_BUCKETS]; static struct list_head _uuid_buckets[NUM_BUCKETS]; static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred); /* * Guards access to both hash tables. */ static DECLARE_RWSEM(_hash_lock); /* * Protects use of mdptr to obtain hash cell name and uuid from mapped device. */ static DEFINE_MUTEX(dm_hash_cells_mutex); static void init_buckets(struct list_head *buckets) { unsigned int i; for (i = 0; i < NUM_BUCKETS; i++) INIT_LIST_HEAD(buckets + i); } static int dm_hash_init(void) { init_buckets(_name_buckets); init_buckets(_uuid_buckets); return 0; } static void dm_hash_exit(void) { dm_hash_remove_all(false, false, false); } /*----------------------------------------------------------------- * Hash function: * We're not really concerned with the str hash function being * fast since it's only used by the ioctl interface. *---------------------------------------------------------------*/ static unsigned int hash_str(const char *str) { const unsigned int hash_mult = 2654435387U; unsigned int h = 0; while (*str) h = (h + (unsigned int) *str++) * hash_mult; return h & MASK_BUCKETS; } /*----------------------------------------------------------------- * Code for looking up a device by name *---------------------------------------------------------------*/ static struct hash_cell *__get_name_cell(const char *str) { struct hash_cell *hc; unsigned int h = hash_str(str); list_for_each_entry (hc, _name_buckets + h, name_list) if (!strcmp(hc->name, str)) { dm_get(hc->md); return hc; } return NULL; } static struct hash_cell *__get_uuid_cell(const char *str) { struct hash_cell *hc; unsigned int h = hash_str(str); list_for_each_entry (hc, _uuid_buckets + h, uuid_list) if (!strcmp(hc->uuid, str)) { dm_get(hc->md); return hc; } return NULL; } static struct hash_cell *__get_dev_cell(uint64_t dev) { struct mapped_device *md; struct hash_cell *hc; md = dm_get_md(huge_decode_dev(dev)); if (!md) return NULL; hc = dm_get_mdptr(md); if (!hc) { dm_put(md); return NULL; } return hc; } /*----------------------------------------------------------------- * Inserting, removing and renaming a device. *---------------------------------------------------------------*/ static struct hash_cell *alloc_cell(const char *name, const char *uuid, struct mapped_device *md) { struct hash_cell *hc; hc = kmalloc(sizeof(*hc), GFP_KERNEL); if (!hc) return NULL; hc->name = kstrdup(name, GFP_KERNEL); if (!hc->name) { kfree(hc); return NULL; } if (!uuid) hc->uuid = NULL; else { hc->uuid = kstrdup(uuid, GFP_KERNEL); if (!hc->uuid) { kfree(hc->name); kfree(hc); return NULL; } } INIT_LIST_HEAD(&hc->name_list); INIT_LIST_HEAD(&hc->uuid_list); hc->md = md; hc->new_map = NULL; return hc; } static void free_cell(struct hash_cell *hc) { if (hc) { kfree(hc->name); kfree(hc->uuid); kfree(hc); } } /* * The kdev_t and uuid of a device can never change once it is * initially inserted. */ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) { struct hash_cell *cell, *hc; /* * Allocate the new cells. */ cell = alloc_cell(name, uuid, md); if (!cell) return -ENOMEM; /* * Insert the cell into both hash tables. */ down_write(&_hash_lock); hc = __get_name_cell(name); if (hc) { dm_put(hc->md); goto bad; } list_add(&cell->name_list, _name_buckets + hash_str(name)); if (uuid) { hc = __get_uuid_cell(uuid); if (hc) { list_del(&cell->name_list); dm_put(hc->md); goto bad; } list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); } dm_get(md); mutex_lock(&dm_hash_cells_mutex); dm_set_mdptr(md, cell); mutex_unlock(&dm_hash_cells_mutex); up_write(&_hash_lock); return 0; bad: up_write(&_hash_lock); free_cell(cell); return -EBUSY; } static struct dm_table *__hash_remove(struct hash_cell *hc) { struct dm_table *table; int srcu_idx; /* remove from the dev hash */ list_del(&hc->uuid_list); list_del(&hc->name_list); mutex_lock(&dm_hash_cells_mutex); dm_set_mdptr(hc->md, NULL); mutex_unlock(&dm_hash_cells_mutex); table = dm_get_live_table(hc->md, &srcu_idx); if (table) dm_table_event(table); dm_put_live_table(hc->md, srcu_idx); table = NULL; if (hc->new_map) table = hc->new_map; dm_put(hc->md); free_cell(hc); return table; } static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred) { int i, dev_skipped; struct hash_cell *hc; struct mapped_device *md; struct dm_table *t; retry: dev_skipped = 0; down_write(&_hash_lock); for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry(hc, _name_buckets + i, name_list) { md = hc->md; dm_get(md); if (keep_open_devices && dm_lock_for_deletion(md, mark_deferred, only_deferred)) { dm_put(md); dev_skipped++; continue; } t = __hash_remove(hc); up_write(&_hash_lock); if (t) { dm_sync_table(md); dm_table_destroy(t); } dm_put(md); if (likely(keep_open_devices)) dm_destroy(md); else dm_destroy_immediate(md); /* * Some mapped devices may be using other mapped * devices, so repeat until we make no further * progress. If a new mapped device is created * here it will also get removed. */ goto retry; } } up_write(&_hash_lock); if (dev_skipped) DMWARN("remove_all left %d open device(s)", dev_skipped); } /* * Set the uuid of a hash_cell that isn't already set. */ static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid) { mutex_lock(&dm_hash_cells_mutex); hc->uuid = new_uuid; mutex_unlock(&dm_hash_cells_mutex); list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid)); } /* * Changes the name of a hash_cell and returns the old name for * the caller to free. */ static char *__change_cell_name(struct hash_cell *hc, char *new_name) { char *old_name; /* * Rename and move the name cell. */ list_del(&hc->name_list); old_name = hc->name; mutex_lock(&dm_hash_cells_mutex); hc->name = new_name; mutex_unlock(&dm_hash_cells_mutex); list_add(&hc->name_list, _name_buckets + hash_str(new_name)); return old_name; } static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, const char *new) { char *new_data, *old_name = NULL; struct hash_cell *hc; struct dm_table *table; struct mapped_device *md; unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; int srcu_idx; /* * duplicate new. */ new_data = kstrdup(new, GFP_KERNEL); if (!new_data) return ERR_PTR(-ENOMEM); down_write(&_hash_lock); /* * Is new free ? */ if (change_uuid) hc = __get_uuid_cell(new); else hc = __get_name_cell(new); if (hc) { DMWARN("Unable to change %s on mapped device %s to one that " "already exists: %s", change_uuid ? "uuid" : "name", param->name, new); dm_put(hc->md); up_write(&_hash_lock); kfree(new_data); return ERR_PTR(-EBUSY); } /* * Is there such a device as 'old' ? */ hc = __get_name_cell(param->name); if (!hc) { DMWARN("Unable to rename non-existent device, %s to %s%s", param->name, change_uuid ? "uuid " : "", new); up_write(&_hash_lock); kfree(new_data); return ERR_PTR(-ENXIO); } /* * Does this device already have a uuid? */ if (change_uuid && hc->uuid) { DMWARN("Unable to change uuid of mapped device %s to %s " "because uuid is already set to %s", param->name, new, hc->uuid); dm_put(hc->md); up_write(&_hash_lock); kfree(new_data); return ERR_PTR(-EINVAL); } if (change_uuid) __set_cell_uuid(hc, new_data); else old_name = __change_cell_name(hc, new_data); /* * Wake up any dm event waiters. */ table = dm_get_live_table(hc->md, &srcu_idx); if (table) dm_table_event(table); dm_put_live_table(hc->md, srcu_idx); if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) param->flags |= DM_UEVENT_GENERATED_FLAG; md = hc->md; up_write(&_hash_lock); kfree(old_name); return md; } void dm_deferred_remove(void) { dm_hash_remove_all(true, false, true); } /*----------------------------------------------------------------- * Implementation of the ioctl commands *---------------------------------------------------------------*/ /* * All the ioctl commands get dispatched to functions with this * prototype. */ typedef int (*ioctl_fn)(struct file *filp, struct dm_ioctl *param, size_t param_size); static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_size) { dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false); param->data_size = 0; return 0; } /* * Round up the ptr to an 8-byte boundary. */ #define ALIGN_MASK 7 static inline size_t align_val(size_t val) { return (val + ALIGN_MASK) & ~ALIGN_MASK; } static inline void *align_ptr(void *ptr) { return (void *)align_val((size_t)ptr); } /* * Retrieves the data payload buffer from an already allocated * struct dm_ioctl. */ static void *get_result_buffer(struct dm_ioctl *param, size_t param_size, size_t *len) { param->data_start = align_ptr(param + 1) - (void *) param; if (param->data_start < param_size) *len = param_size - param->data_start; else *len = 0; return ((void *) param) + param->data_start; } static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_size) { unsigned int i; struct hash_cell *hc; size_t len, needed = 0; struct gendisk *disk; struct dm_name_list *orig_nl, *nl, *old_nl = NULL; uint32_t *event_nr; down_write(&_hash_lock); /* * Loop through all the devices working out how much * space we need. */ for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry (hc, _name_buckets + i, name_list) { needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1); needed += align_val(sizeof(uint32_t)); } } /* * Grab our output buffer. */ nl = orig_nl = get_result_buffer(param, param_size, &len); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; goto out; } param->data_size = param->data_start + needed; nl->dev = 0; /* Flags no data */ /* * Now loop through filling out the names. */ for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry (hc, _name_buckets + i, name_list) { if (old_nl) old_nl->next = (uint32_t) ((void *) nl - (void *) old_nl); disk = dm_disk(hc->md); nl->dev = huge_encode_dev(disk_devt(disk)); nl->next = 0; strcpy(nl->name, hc->name); old_nl = nl; event_nr = align_ptr(nl->name + strlen(hc->name) + 1); *event_nr = dm_get_event_nr(hc->md); nl = align_ptr(event_nr + 1); } } /* * If mismatch happens, security may be compromised due to buffer * overflow, so it's better to crash. */ BUG_ON((char *)nl - (char *)orig_nl != needed); out: up_write(&_hash_lock); return 0; } static void list_version_get_needed(struct target_type *tt, void *needed_param) { size_t *needed = needed_param; *needed += sizeof(struct dm_target_versions); *needed += strlen(tt->name); *needed += ALIGN_MASK; } static void list_version_get_info(struct target_type *tt, void *param) { struct vers_iter *info = param; /* Check space - it might have changed since the first iteration */ if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > info->end) { info->flags = DM_BUFFER_FULL_FLAG; return; } if (info->old_vers) info->old_vers->next = (uint32_t) ((void *)info->vers - (void *)info->old_vers); info->vers->version[0] = tt->version[0]; info->vers->version[1] = tt->version[1]; info->vers->version[2] = tt->version[2]; info->vers->next = 0; strcpy(info->vers->name, tt->name); info->old_vers = info->vers; info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1); } static int __list_versions(struct dm_ioctl *param, size_t param_size, const char *name) { size_t len, needed = 0; struct dm_target_versions *vers; struct vers_iter iter_info; struct target_type *tt = NULL; if (name) { tt = dm_get_target_type(name); if (!tt) return -EINVAL; } /* * Loop through all the devices working out how much * space we need. */ if (!tt) dm_target_iterate(list_version_get_needed, &needed); else list_version_get_needed(tt, &needed); /* * Grab our output buffer. */ vers = get_result_buffer(param, param_size, &len); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; goto out; } param->data_size = param->data_start + needed; iter_info.param_size = param_size; iter_info.old_vers = NULL; iter_info.vers = vers; iter_info.flags = 0; iter_info.end = (char *)vers+len; /* * Now loop through filling out the names & versions. */ if (!tt) dm_target_iterate(list_version_get_info, &iter_info); else list_version_get_info(tt, &iter_info); param->flags |= iter_info.flags; out: if (tt) dm_put_target_type(tt); return 0; } static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param_size) { return __list_versions(param, param_size, NULL); } static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t param_size) { return __list_versions(param, param_size, param->name); } static int check_name(const char *name) { if (strchr(name, '/')) { DMWARN("invalid device name"); return -EINVAL; } return 0; } /* * On successful return, the caller must not attempt to acquire * _hash_lock without first calling dm_put_live_table, because dm_table_destroy * waits for this dm_put_live_table and could be called under this lock. */ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx) { struct hash_cell *hc; struct dm_table *table = NULL; /* increment rcu count, we don't care about the table pointer */ dm_get_live_table(md, srcu_idx); down_read(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { DMWARN("device has been removed from the dev hash table."); goto out; } table = hc->new_map; out: up_read(&_hash_lock); return table; } static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, struct dm_ioctl *param, int *srcu_idx) { return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? dm_get_inactive_table(md, srcu_idx) : dm_get_live_table(md, srcu_idx); } /* * Fills in a dm_ioctl structure, ready for sending back to * userland. */ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) { struct gendisk *disk = dm_disk(md); struct dm_table *table; int srcu_idx; param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | DM_ACTIVE_PRESENT_FLAG | DM_INTERNAL_SUSPEND_FLAG); if (dm_suspended_md(md)) param->flags |= DM_SUSPEND_FLAG; if (dm_suspended_internally_md(md)) param->flags |= DM_INTERNAL_SUSPEND_FLAG; if (dm_test_deferred_remove_flag(md)) param->flags |= DM_DEFERRED_REMOVE; param->dev = huge_encode_dev(disk_devt(disk)); /* * Yes, this will be out of date by the time it gets back * to userland, but it is still very useful for * debugging. */ param->open_count = dm_open_count(md); param->event_nr = dm_get_event_nr(md); param->target_count = 0; table = dm_get_live_table(md, &srcu_idx); if (table) { if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { if (get_disk_ro(disk)) param->flags |= DM_READONLY_FLAG; param->target_count = dm_table_get_num_targets(table); } param->flags |= DM_ACTIVE_PRESENT_FLAG; } dm_put_live_table(md, srcu_idx); if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { int srcu_idx; table = dm_get_inactive_table(md, &srcu_idx); if (table) { if (!(dm_table_get_mode(table) & FMODE_WRITE)) param->flags |= DM_READONLY_FLAG; param->target_count = dm_table_get_num_targets(table); } dm_put_live_table(md, srcu_idx); } } static int dev_create(struct file *filp, struct dm_ioctl *param, size_t param_size) { int r, m = DM_ANY_MINOR; struct mapped_device *md; r = check_name(param->name); if (r) return r; if (param->flags & DM_PERSISTENT_DEV_FLAG) m = MINOR(huge_decode_dev(param->dev)); r = dm_create(m, &md); if (r) return r; r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); if (r) { dm_put(md); dm_destroy(md); return r; } param->flags &= ~DM_INACTIVE_PRESENT_FLAG; __dev_status(md, param); dm_put(md); return 0; } /* * Always use UUID for lookups if it's present, otherwise use name or dev. */ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) { struct hash_cell *hc = NULL; if (*param->uuid) { if (*param->name || param->dev) return NULL; hc = __get_uuid_cell(param->uuid); if (!hc) return NULL; } else if (*param->name) { if (param->dev) return NULL; hc = __get_name_cell(param->name); if (!hc) return NULL; } else if (param->dev) { hc = __get_dev_cell(param->dev); if (!hc) return NULL; } else return NULL; /* * Sneakily write in both the name and the uuid * while we have the cell. */ strlcpy(param->name, hc->name, sizeof(param->name)); if (hc->uuid) strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); else param->uuid[0] = '\0'; if (hc->new_map) param->flags |= DM_INACTIVE_PRESENT_FLAG; else param->flags &= ~DM_INACTIVE_PRESENT_FLAG; return hc; } static struct mapped_device *find_device(struct dm_ioctl *param) { struct hash_cell *hc; struct mapped_device *md = NULL; down_read(&_hash_lock); hc = __find_device_hash_cell(param); if (hc) md = hc->md; up_read(&_hash_lock); return md; } static int dev_remove(struct file *filp, struct dm_ioctl *param, size_t param_size) { struct hash_cell *hc; struct mapped_device *md; int r; struct dm_table *t; down_write(&_hash_lock); hc = __find_device_hash_cell(param); if (!hc) { DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); up_write(&_hash_lock); return -ENXIO; } md = hc->md; /* * Ensure the device is not open and nothing further can open it. */ r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false); if (r) { if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) { up_write(&_hash_lock); dm_put(md); return 0; } DMDEBUG_LIMIT("unable to remove open device %s", hc->name); up_write(&_hash_lock); dm_put(md); return r; } t = __hash_remove(hc); up_write(&_hash_lock); if (t) { dm_sync_table(md); dm_table_destroy(t); } param->flags &= ~DM_DEFERRED_REMOVE; if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) param->flags |= DM_UEVENT_GENERATED_FLAG; dm_put(md); dm_destroy(md); return 0; } /* * Check a string doesn't overrun the chunk of * memory we copied from userland. */ static int invalid_str(char *str, void *end) { while ((void *) str < end) if (!*str++) return 0; return -EINVAL; } static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_size) { int r; char *new_data = (char *) param + param->data_start; struct mapped_device *md; unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; if (new_data < param->data || invalid_str(new_data, (void *) param + param_size) || !*new_data || strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { DMWARN("Invalid new mapped device name or uuid string supplied."); return -EINVAL; } if (!change_uuid) { r = check_name(new_data); if (r) return r; } md = dm_hash_rename(param, new_data); if (IS_ERR(md)) return PTR_ERR(md); __dev_status(md, param); dm_put(md); return 0; } static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t param_size) { int r = -EINVAL, x; struct mapped_device *md; struct hd_geometry geometry; unsigned long indata[4]; char *geostr = (char *) param + param->data_start; char dummy; md = find_device(param); if (!md) return -ENXIO; if (geostr < param->data || invalid_str(geostr, (void *) param + param_size)) { DMWARN("Invalid geometry supplied."); goto out; } x = sscanf(geostr, "%lu %lu %lu %lu%c", indata, indata + 1, indata + 2, indata + 3, &dummy); if (x != 4) { DMWARN("Unable to interpret geometry settings."); goto out; } if (indata[0] > 65535 || indata[1] > 255 || indata[2] > 255 || indata[3] > ULONG_MAX) { DMWARN("Geometry exceeds range limits."); goto out; } geometry.cylinders = indata[0]; geometry.heads = indata[1]; geometry.sectors = indata[2]; geometry.start = indata[3]; r = dm_set_geometry(md, &geometry); param->data_size = 0; out: dm_put(md); return r; } static int do_suspend(struct dm_ioctl *param) { int r = 0; unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; struct mapped_device *md; md = find_device(param); if (!md) return -ENXIO; if (param->flags & DM_SKIP_LOCKFS_FLAG) suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; if (!dm_suspended_md(md)) { r = dm_suspend(md, suspend_flags); if (r) goto out; } __dev_status(md, param); out: dm_put(md); return r; } static int do_resume(struct dm_ioctl *param) { int r = 0; unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; struct hash_cell *hc; struct mapped_device *md; struct dm_table *new_map, *old_map = NULL; down_write(&_hash_lock); hc = __find_device_hash_cell(param); if (!hc) { DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); up_write(&_hash_lock); return -ENXIO; } md = hc->md; new_map = hc->new_map; hc->new_map = NULL; param->flags &= ~DM_INACTIVE_PRESENT_FLAG; up_write(&_hash_lock); /* Do we need to load a new map ? */ if (new_map) { /* Suspend if it isn't already suspended */ if (param->flags & DM_SKIP_LOCKFS_FLAG) suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; if (!dm_suspended_md(md)) dm_suspend(md, suspend_flags); old_map = dm_swap_table(md, new_map); if (IS_ERR(old_map)) { dm_sync_table(md); dm_table_destroy(new_map); dm_put(md); return PTR_ERR(old_map); } if (dm_table_get_mode(new_map) & FMODE_WRITE) set_disk_ro(dm_disk(md), 0); else set_disk_ro(dm_disk(md), 1); } if (dm_suspended_md(md)) { r = dm_resume(md); if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr)) param->flags |= DM_UEVENT_GENERATED_FLAG; } /* * Since dm_swap_table synchronizes RCU, nobody should be in * read-side critical section already. */ if (old_map) dm_table_destroy(old_map); if (!r) __dev_status(md, param); dm_put(md); return r; } /* * Set or unset the suspension state of a device. * If the device already is in the requested state we just return its status. */ static int dev_suspend(struct file *filp, struct dm_ioctl *param, size_t param_size) { if (param->flags & DM_SUSPEND_FLAG) return do_suspend(param); return do_resume(param); } /* * Copies device info back to user space, used by * the create and info ioctls. */ static int dev_status(struct file *filp, struct dm_ioctl *param, size_t param_size) { struct mapped_device *md; md = find_device(param); if (!md) return -ENXIO; __dev_status(md, param); dm_put(md); return 0; } /* * Build up the status struct for each target */ static void retrieve_status(struct dm_table *table, struct dm_ioctl *param, size_t param_size) { unsigned int i, num_targets; struct dm_target_spec *spec; char *outbuf, *outptr; status_type_t type; size_t remaining, len, used = 0; unsigned status_flags = 0; outptr = outbuf = get_result_buffer(param, param_size, &len); if (param->flags & DM_STATUS_TABLE_FLAG) type = STATUSTYPE_TABLE; else type = STATUSTYPE_INFO; /* Get all the target info */ num_targets = dm_table_get_num_targets(table); for (i = 0; i < num_targets; i++) { struct dm_target *ti = dm_table_get_target(table, i); size_t l; remaining = len - (outptr - outbuf); if (remaining <= sizeof(struct dm_target_spec)) { param->flags |= DM_BUFFER_FULL_FLAG; break; } spec = (struct dm_target_spec *) outptr; spec->status = 0; spec->sector_start = ti->begin; spec->length = ti->len; strncpy(spec->target_type, ti->type->name, sizeof(spec->target_type) - 1); outptr += sizeof(struct dm_target_spec); remaining = len - (outptr - outbuf); if (remaining <= 0) { param->flags |= DM_BUFFER_FULL_FLAG; break; } /* Get the status/table string from the target driver */ if (ti->type->status) { if (param->flags & DM_NOFLUSH_FLAG) status_flags |= DM_STATUS_NOFLUSH_FLAG; ti->type->status(ti, type, status_flags, outptr, remaining); } else outptr[0] = '\0'; l = strlen(outptr) + 1; if (l == remaining) { param->flags |= DM_BUFFER_FULL_FLAG; break; } outptr += l; used = param->data_start + (outptr - outbuf); outptr = align_ptr(outptr); spec->next = outptr - outbuf; } if (used) param->data_size = used; param->target_count = num_targets; } /* * Wait for a device to report an event */ static int dev_wait(struct file *filp, struct dm_ioctl *param, size_t param_size) { int r = 0; struct mapped_device *md; struct dm_table *table; int srcu_idx; md = find_device(param); if (!md) return -ENXIO; /* * Wait for a notification event */ if (dm_wait_event(md, param->event_nr)) { r = -ERESTARTSYS; goto out; } /* * The userland program is going to want to know what * changed to trigger the event, so we may as well tell * him and save an ioctl. */ __dev_status(md, param); table = dm_get_live_or_inactive_table(md, param, &srcu_idx); if (table) retrieve_status(table, param, param_size); dm_put_live_table(md, srcu_idx); out: dm_put(md); return r; } /* * Remember the global event number and make it possible to poll * for further events. */ static int dev_arm_poll(struct file *filp, struct dm_ioctl *param, size_t param_size) { struct dm_file *priv = filp->private_data; priv->global_event_nr = atomic_read(&dm_global_event_nr); return 0; } static inline fmode_t get_mode(struct dm_ioctl *param) { fmode_t mode = FMODE_READ | FMODE_WRITE; if (param->flags & DM_READONLY_FLAG) mode = FMODE_READ; return mode; } static int next_target(struct dm_target_spec *last, uint32_t next, void *end, struct dm_target_spec **spec, char **target_params) { *spec = (struct dm_target_spec *) ((unsigned char *) last + next); *target_params = (char *) (*spec + 1); if (*spec < (last + 1)) return -EINVAL; return invalid_str(*target_params, end); } static int populate_table(struct dm_table *table, struct dm_ioctl *param, size_t param_size) { int r; unsigned int i = 0; struct dm_target_spec *spec = (struct dm_target_spec *) param; uint32_t next = param->data_start; void *end = (void *) param + param_size; char *target_params; if (!param->target_count) { DMWARN("populate_table: no targets specified"); return -EINVAL; } for (i = 0; i < param->target_count; i++) { r = next_target(spec, next, end, &spec, &target_params); if (r) { DMWARN("unable to find target"); return r; } r = dm_table_add_target(table, spec->target_type, (sector_t) spec->sector_start, (sector_t) spec->length, target_params); if (r) { DMWARN("error adding target to table"); return r; } next = spec->next; } return dm_table_complete(table); } static bool is_valid_type(enum dm_queue_mode cur, enum dm_queue_mode new) { if (cur == new || (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED)) return true; return false; } static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_size) { int r; struct hash_cell *hc; struct dm_table *t, *old_map = NULL; struct mapped_device *md; struct target_type *immutable_target_type; md = find_device(param); if (!md) return -ENXIO; r = dm_table_create(&t, get_mode(param), param->target_count, md); if (r) goto err; /* Protect md->type and md->queue against concurrent table loads. */ dm_lock_md_type(md); r = populate_table(t, param, param_size); if (r) goto err_unlock_md_type; immutable_target_type = dm_get_immutable_target_type(md); if (immutable_target_type && (immutable_target_type != dm_table_get_immutable_target_type(t)) && !dm_table_get_wildcard_target(t)) { DMWARN("can't replace immutable target type %s", immutable_target_type->name); r = -EINVAL; goto err_unlock_md_type; } if (dm_get_md_type(md) == DM_TYPE_NONE) { /* Initial table load: acquire type of table. */ dm_set_md_type(md, dm_table_get_type(t)); /* setup md->queue to reflect md's type (may block) */ r = dm_setup_md_queue(md, t); if (r) { DMWARN("unable to set up device queue for new table."); goto err_unlock_md_type; } } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) { DMWARN("can't change device type (old=%u vs new=%u) after initial table load.", dm_get_md_type(md), dm_table_get_type(t)); r = -EINVAL; goto err_unlock_md_type; } dm_unlock_md_type(md); /* stage inactive table */ down_write(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { DMWARN("device has been removed from the dev hash table."); up_write(&_hash_lock); r = -ENXIO; goto err_destroy_table; } if (hc->new_map) old_map = hc->new_map; hc->new_map = t; up_write(&_hash_lock); param->flags |= DM_INACTIVE_PRESENT_FLAG; __dev_status(md, param); if (old_map) { dm_sync_table(md); dm_table_destroy(old_map); } dm_put(md); return 0; err_unlock_md_type: dm_unlock_md_type(md); err_destroy_table: dm_table_destroy(t); err: dm_put(md); return r; } static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_size) { struct hash_cell *hc; struct mapped_device *md; struct dm_table *old_map = NULL; down_write(&_hash_lock); hc = __find_device_hash_cell(param); if (!hc) { DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); up_write(&_hash_lock); return -ENXIO; } if (hc->new_map) { old_map = hc->new_map; hc->new_map = NULL; } param->flags &= ~DM_INACTIVE_PRESENT_FLAG; __dev_status(hc->md, param); md = hc->md; up_write(&_hash_lock); if (old_map) { dm_sync_table(md); dm_table_destroy(old_map); } dm_put(md); return 0; } /* * Retrieves a list of devices used by a particular dm device. */ static void retrieve_deps(struct dm_table *table, struct dm_ioctl *param, size_t param_size) { unsigned int count = 0; struct list_head *tmp; size_t len, needed; struct dm_dev_internal *dd; struct dm_target_deps *deps; deps = get_result_buffer(param, param_size, &len); /* * Count the devices. */ list_for_each (tmp, dm_table_get_devices(table)) count++; /* * Check we have enough space. */ needed = struct_size(deps, dev, count); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; return; } /* * Fill in the devices. */ deps->count = count; count = 0; list_for_each_entry (dd, dm_table_get_devices(table), list) deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev); param->data_size = param->data_start + needed; } static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size) { struct mapped_device *md; struct dm_table *table; int srcu_idx; md = find_device(param); if (!md) return -ENXIO; __dev_status(md, param); table = dm_get_live_or_inactive_table(md, param, &srcu_idx); if (table) retrieve_deps(table, param, param_size); dm_put_live_table(md, srcu_idx); dm_put(md); return 0; } /* * Return the status of a device as a text string for each * target. */ static int table_status(struct file *filp, struct dm_ioctl *param, size_t param_size) { struct mapped_device *md; struct dm_table *table; int srcu_idx; md = find_device(param); if (!md) return -ENXIO; __dev_status(md, param); table = dm_get_live_or_inactive_table(md, param, &srcu_idx); if (table) retrieve_status(table, param, param_size); dm_put_live_table(md, srcu_idx); dm_put(md); return 0; } /* * Process device-mapper dependent messages. Messages prefixed with '@' * are processed by the DM core. All others are delivered to the target. * Returns a number <= 1 if message was processed by device mapper. * Returns 2 if message should be delivered to the target. */ static int message_for_md(struct mapped_device *md, unsigned argc, char **argv, char *result, unsigned maxlen) { int r; if (**argv != '@') return 2; /* no '@' prefix, deliver to target */ if (!strcasecmp(argv[0], "@cancel_deferred_remove")) { if (argc != 1) { DMERR("Invalid arguments for @cancel_deferred_remove"); return -EINVAL; } return dm_cancel_deferred_remove(md); } r = dm_stats_message(md, argc, argv, result, maxlen); if (r < 2) return r; DMERR("Unsupported message sent to DM core: %s", argv[0]); return -EINVAL; } /* * Pass a message to the target that's at the supplied device offset. */ static int target_message(struct file *filp, struct dm_ioctl *param, size_t param_size) { int r, argc; char **argv; struct mapped_device *md; struct dm_table *table; struct dm_target *ti; struct dm_target_msg *tmsg = (void *) param + param->data_start; size_t maxlen; char *result = get_result_buffer(param, param_size, &maxlen); int srcu_idx; md = find_device(param); if (!md) return -ENXIO; if (tmsg < (struct dm_target_msg *) param->data || invalid_str(tmsg->message, (void *) param + param_size)) { DMWARN("Invalid target message parameters."); r = -EINVAL; goto out; } r = dm_split_args(&argc, &argv, tmsg->message); if (r) { DMWARN("Failed to split target message parameters"); goto out; } if (!argc) { DMWARN("Empty message received."); r = -EINVAL; goto out_argv; } r = message_for_md(md, argc, argv, result, maxlen); if (r <= 1) goto out_argv; table = dm_get_live_table(md, &srcu_idx); if (!table) goto out_table; if (dm_deleting_md(md)) { r = -ENXIO; goto out_table; } ti = dm_table_find_target(table, tmsg->sector); if (!ti) { DMWARN("Target message sector outside device."); r = -EINVAL; } else if (ti->type->message) r = ti->type->message(ti, argc, argv, result, maxlen); else { DMWARN("Target type does not support messages"); r = -EINVAL; } out_table: dm_put_live_table(md, srcu_idx); out_argv: kfree(argv); out: if (r >= 0) __dev_status(md, param); if (r == 1) { param->flags |= DM_DATA_OUT_FLAG; if (dm_message_test_buffer_overflow(result, maxlen)) param->flags |= DM_BUFFER_FULL_FLAG; else param->data_size = param->data_start + strlen(result) + 1; r = 0; } dm_put(md); return r; } /* * The ioctl parameter block consists of two parts, a dm_ioctl struct * followed by a data buffer. This flag is set if the second part, * which has a variable size, is not used by the function processing * the ioctl. */ #define IOCTL_FLAGS_NO_PARAMS 1 #define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2 /*----------------------------------------------------------------- * Implementation of open/close/ioctl on the special char * device. *---------------------------------------------------------------*/ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) { static const struct { int cmd; int flags; ioctl_fn fn; } _ioctls[] = { {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */ {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all}, {DM_LIST_DEVICES_CMD, 0, list_devices}, {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create}, {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove}, {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename}, {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend}, {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status}, {DM_DEV_WAIT_CMD, 0, dev_wait}, {DM_TABLE_LOAD_CMD, 0, table_load}, {DM_TABLE_CLEAR_CMD, IOCTL_FLAGS_NO_PARAMS, table_clear}, {DM_TABLE_DEPS_CMD, 0, table_deps}, {DM_TABLE_STATUS_CMD, 0, table_status}, {DM_LIST_VERSIONS_CMD, 0, list_versions}, {DM_TARGET_MSG_CMD, 0, target_message}, {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry}, {DM_DEV_ARM_POLL, IOCTL_FLAGS_NO_PARAMS, dev_arm_poll}, {DM_GET_TARGET_VERSION, 0, get_target_version}, }; if (unlikely(cmd >= ARRAY_SIZE(_ioctls))) return NULL; *ioctl_flags = _ioctls[cmd].flags; return _ioctls[cmd].fn; } /* * As well as checking the version compatibility this always * copies the kernel interface version out. */ static int check_version(unsigned int cmd, struct dm_ioctl __user *user) { uint32_t version[3]; int r = 0; if (copy_from_user(version, user->version, sizeof(version))) return -EFAULT; if ((DM_VERSION_MAJOR != version[0]) || (DM_VERSION_MINOR < version[1])) { DMWARN("ioctl interface mismatch: " "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", DM_VERSION_MAJOR, DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, version[0], version[1], version[2], cmd); r = -EINVAL; } /* * Fill in the kernel version. */ version[0] = DM_VERSION_MAJOR; version[1] = DM_VERSION_MINOR; version[2] = DM_VERSION_PATCHLEVEL; if (copy_to_user(user->version, version, sizeof(version))) return -EFAULT; return r; } #define DM_PARAMS_MALLOC 0x0001 /* Params allocated with kvmalloc() */ #define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */ static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags) { if (param_flags & DM_WIPE_BUFFER) memset(param, 0, param_size); if (param_flags & DM_PARAMS_MALLOC) kvfree(param); } static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel, int ioctl_flags, struct dm_ioctl **param, int *param_flags) { struct dm_ioctl *dmi; int secure_data; const size_t minimum_data_size = offsetof(struct dm_ioctl, data); unsigned noio_flag; if (copy_from_user(param_kernel, user, minimum_data_size)) return -EFAULT; if (param_kernel->data_size < minimum_data_size) return -EINVAL; secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG; *param_flags = secure_data ? DM_WIPE_BUFFER : 0; if (ioctl_flags & IOCTL_FLAGS_NO_PARAMS) { dmi = param_kernel; dmi->data_size = minimum_data_size; goto data_copied; } /* * Use __GFP_HIGH to avoid low memory issues when a device is * suspended and the ioctl is needed to resume it. * Use kmalloc() rather than vmalloc() when we can. */ dmi = NULL; noio_flag = memalloc_noio_save(); dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH); memalloc_noio_restore(noio_flag); if (!dmi) { if (secure_data && clear_user(user, param_kernel->data_size)) return -EFAULT; return -ENOMEM; } *param_flags |= DM_PARAMS_MALLOC; /* Copy from param_kernel (which was already copied from user) */ memcpy(dmi, param_kernel, minimum_data_size); if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size, param_kernel->data_size - minimum_data_size)) goto bad; data_copied: /* Wipe the user buffer so we do not return it to userspace */ if (secure_data && clear_user(user, param_kernel->data_size)) goto bad; *param = dmi; return 0; bad: free_params(dmi, param_kernel->data_size, *param_flags); return -EFAULT; } static int validate_params(uint cmd, struct dm_ioctl *param) { /* Always clear this flag */ param->flags &= ~DM_BUFFER_FULL_FLAG; param->flags &= ~DM_UEVENT_GENERATED_FLAG; param->flags &= ~DM_SECURE_DATA_FLAG; param->flags &= ~DM_DATA_OUT_FLAG; /* Ignores parameters */ if (cmd == DM_REMOVE_ALL_CMD || cmd == DM_LIST_DEVICES_CMD || cmd == DM_LIST_VERSIONS_CMD) return 0; if (cmd == DM_DEV_CREATE_CMD) { if (!*param->name) { DMWARN("name not supplied when creating device"); return -EINVAL; } } else if (*param->uuid && *param->name) { DMWARN("only supply one of name or uuid, cmd(%u)", cmd); return -EINVAL; } /* Ensure strings are terminated */ param->name[DM_NAME_LEN - 1] = '\0'; param->uuid[DM_UUID_LEN - 1] = '\0'; return 0; } static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *user) { int r = 0; int ioctl_flags; int param_flags; unsigned int cmd; struct dm_ioctl *param; ioctl_fn fn = NULL; size_t input_param_size; struct dm_ioctl param_kernel; /* only root can play with this */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (_IOC_TYPE(command) != DM_IOCTL) return -ENOTTY; cmd = _IOC_NR(command); /* * Check the interface version passed in. This also * writes out the kernel's interface version. */ r = check_version(cmd, user); if (r) return r; /* * Nothing more to do for the version command. */ if (cmd == DM_VERSION_CMD) return 0; fn = lookup_ioctl(cmd, &ioctl_flags); if (!fn) { DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); return -ENOTTY; } /* * Copy the parameters into kernel space. */ r = copy_params(user, &param_kernel, ioctl_flags, &param, &param_flags); if (r) return r; input_param_size = param->data_size; r = validate_params(cmd, param); if (r) goto out; param->data_size = offsetof(struct dm_ioctl, data); r = fn(file, param, input_param_size); if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) && unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS)) DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd); if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT) dm_issue_global_event(); /* * Copy the results back to userland. */ if (!r && copy_to_user(user, param, param->data_size)) r = -EFAULT; out: free_params(param, input_param_size, param_flags); return r; } static long dm_ctl_ioctl(struct file *file, uint command, ulong u) { return (long)ctl_ioctl(file, command, (struct dm_ioctl __user *)u); } #ifdef CONFIG_COMPAT static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u) { return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u)); } #else #define dm_compat_ctl_ioctl NULL #endif static int dm_open(struct inode *inode, struct file *filp) { int r; struct dm_file *priv; r = nonseekable_open(inode, filp); if (unlikely(r)) return r; priv = filp->private_data = kmalloc(sizeof(struct dm_file), GFP_KERNEL); if (!priv) return -ENOMEM; priv->global_event_nr = atomic_read(&dm_global_event_nr); return 0; } static int dm_release(struct inode *inode, struct file *filp) { kfree(filp->private_data); return 0; } static __poll_t dm_poll(struct file *filp, poll_table *wait) { struct dm_file *priv = filp->private_data; __poll_t mask = 0; poll_wait(filp, &dm_global_eventq, wait); if ((int)(atomic_read(&dm_global_event_nr) - priv->global_event_nr) > 0) mask |= EPOLLIN; return mask; } static const struct file_operations _ctl_fops = { .open = dm_open, .release = dm_release, .poll = dm_poll, .unlocked_ioctl = dm_ctl_ioctl, .compat_ioctl = dm_compat_ctl_ioctl, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice _dm_misc = { .minor = MAPPER_CTRL_MINOR, .name = DM_NAME, .nodename = DM_DIR "/" DM_CONTROL_NODE, .fops = &_ctl_fops }; MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR); MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE); /* * Create misc character device and link to DM_DIR/control. */ int __init dm_interface_init(void) { int r; r = dm_hash_init(); if (r) return r; r = misc_register(&_dm_misc); if (r) { DMERR("misc_register failed for control device"); dm_hash_exit(); return r; } DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, DM_DRIVER_EMAIL); return 0; } void dm_interface_exit(void) { misc_deregister(&_dm_misc); dm_hash_exit(); } /** * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers * @md: Pointer to mapped_device * @name: Buffer (size DM_NAME_LEN) for name * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined */ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) { int r = 0; struct hash_cell *hc; if (!md) return -ENXIO; mutex_lock(&dm_hash_cells_mutex); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { r = -ENXIO; goto out; } if (name) strcpy(name, hc->name); if (uuid) strcpy(uuid, hc->uuid ? : ""); out: mutex_unlock(&dm_hash_cells_mutex); return r; } EXPORT_SYMBOL_GPL(dm_copy_name_and_uuid); /** * dm_early_create - create a mapped device in early boot. * * @dmi: Contains main information of the device mapping to be created. * @spec_array: array of pointers to struct dm_target_spec. Describes the * mapping table of the device. * @target_params_array: array of strings with the parameters to a specific * target. * * Instead of having the struct dm_target_spec and the parameters for every * target embedded at the end of struct dm_ioctl (as performed in a normal * ioctl), pass them as arguments, so the caller doesn't need to serialize them. * The size of the spec_array and target_params_array is given by * @dmi->target_count. * This function is supposed to be called in early boot, so locking mechanisms * to protect against concurrent loads are not required. */ int __init dm_early_create(struct dm_ioctl *dmi, struct dm_target_spec **spec_array, char **target_params_array) { int r, m = DM_ANY_MINOR; struct dm_table *t, *old_map; struct mapped_device *md; unsigned int i; if (!dmi->target_count) return -EINVAL; r = check_name(dmi->name); if (r) return r; if (dmi->flags & DM_PERSISTENT_DEV_FLAG) m = MINOR(huge_decode_dev(dmi->dev)); /* alloc dm device */ r = dm_create(m, &md); if (r) return r; /* hash insert */ r = dm_hash_insert(dmi->name, *dmi->uuid ? dmi->uuid : NULL, md); if (r) goto err_destroy_dm; /* alloc table */ r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md); if (r) goto err_hash_remove; /* add targets */ for (i = 0; i < dmi->target_count; i++) { r = dm_table_add_target(t, spec_array[i]->target_type, (sector_t) spec_array[i]->sector_start, (sector_t) spec_array[i]->length, target_params_array[i]); if (r) { DMWARN("error adding target to table"); goto err_destroy_table; } } /* finish table */ r = dm_table_complete(t); if (r) goto err_destroy_table; md->type = dm_table_get_type(t); /* setup md->queue to reflect md's type (may block) */ r = dm_setup_md_queue(md, t); if (r) { DMWARN("unable to set up device queue for new table."); goto err_destroy_table; } /* Set new map */ dm_suspend(md, 0); old_map = dm_swap_table(md, t); if (IS_ERR(old_map)) { r = PTR_ERR(old_map); goto err_destroy_table; } set_disk_ro(dm_disk(md), !!(dmi->flags & DM_READONLY_FLAG)); /* resume device */ r = dm_resume(md); if (r) goto err_destroy_table; DMINFO("%s (%s) is ready", md->disk->disk_name, dmi->name); dm_put(md); return 0; err_destroy_table: dm_table_destroy(t); err_hash_remove: (void) __hash_remove(__get_name_cell(dmi->name)); /* release reference from __get_name_cell */ dm_put(md); err_destroy_dm: dm_put(md); dm_destroy(md); return r; }
null
/* * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ #include "dm-core.h" #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/miscdevice.h> #include <linux/sched/mm.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/dm-ioctl.h> #include <linux/hdreg.h> #include <linux/compat.h> #include <linux/uaccess.h> #define DM_MSG_PREFIX "ioctl" #define DM_DRIVER_EMAIL "dm-devel@redhat.com" struct dm_file { /* * poll will wait until the global event number is greater than * this value. */ volatile unsigned global_event_nr; }; /*----------------------------------------------------------------- * The ioctl interface needs to be able to look up devices by * name or uuid. *---------------------------------------------------------------*/ struct hash_cell { struct list_head name_list; struct list_head uuid_list; char *name; char *uuid; struct mapped_device *md; struct dm_table *new_map; }; struct vers_iter { size_t param_size; struct dm_target_versions *vers, *old_vers; char *end; uint32_t flags; }; #define NUM_BUCKETS 64 #define MASK_BUCKETS (NUM_BUCKETS - 1) static struct list_head _name_buckets[NUM_BUCKETS]; static struct list_head _uuid_buckets[NUM_BUCKETS]; static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred); /* * Guards access to both hash tables. */ static DECLARE_RWSEM(_hash_lock); /* * Protects use of mdptr to obtain hash cell name and uuid from mapped device. */ static DEFINE_MUTEX(dm_hash_cells_mutex); static void init_buckets(struct list_head *buckets) { unsigned int i; for (i = 0; i < NUM_BUCKETS; i++) INIT_LIST_HEAD(buckets + i); } static int dm_hash_init(void) { init_buckets(_name_buckets); init_buckets(_uuid_buckets); return 0; } static void dm_hash_exit(void) { dm_hash_remove_all(false, false, false); } /*----------------------------------------------------------------- * Hash function: * We're not really concerned with the str hash function being * fast since it's only used by the ioctl interface. *---------------------------------------------------------------*/ static unsigned int hash_str(const char *str) { const unsigned int hash_mult = 2654435387U; unsigned int h = 0; while (*str) h = (h + (unsigned int) *str++) * hash_mult; return h & MASK_BUCKETS; } /*----------------------------------------------------------------- * Code for looking up a device by name *---------------------------------------------------------------*/ static struct hash_cell *__get_name_cell(const char *str) { struct hash_cell *hc; unsigned int h = hash_str(str); list_for_each_entry (hc, _name_buckets + h, name_list) if (!strcmp(hc->name, str)) { dm_get(hc->md); return hc; } return NULL; } static struct hash_cell *__get_uuid_cell(const char *str) { struct hash_cell *hc; unsigned int h = hash_str(str); list_for_each_entry (hc, _uuid_buckets + h, uuid_list) if (!strcmp(hc->uuid, str)) { dm_get(hc->md); return hc; } return NULL; } static struct hash_cell *__get_dev_cell(uint64_t dev) { struct mapped_device *md; struct hash_cell *hc; md = dm_get_md(huge_decode_dev(dev)); if (!md) return NULL; hc = dm_get_mdptr(md); if (!hc) { dm_put(md); return NULL; } return hc; } /*----------------------------------------------------------------- * Inserting, removing and renaming a device. *---------------------------------------------------------------*/ static struct hash_cell *alloc_cell(const char *name, const char *uuid, struct mapped_device *md) { struct hash_cell *hc; hc = kmalloc(sizeof(*hc), GFP_KERNEL); if (!hc) return NULL; hc->name = kstrdup(name, GFP_KERNEL); if (!hc->name) { kfree(hc); return NULL; } if (!uuid) hc->uuid = NULL; else { hc->uuid = kstrdup(uuid, GFP_KERNEL); if (!hc->uuid) { kfree(hc->name); kfree(hc); return NULL; } } INIT_LIST_HEAD(&hc->name_list); INIT_LIST_HEAD(&hc->uuid_list); hc->md = md; hc->new_map = NULL; return hc; } static void free_cell(struct hash_cell *hc) { if (hc) { kfree(hc->name); kfree(hc->uuid); kfree(hc); } } /* * The kdev_t and uuid of a device can never change once it is * initially inserted. */ static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md) { struct hash_cell *cell, *hc; /* * Allocate the new cells. */ cell = alloc_cell(name, uuid, md); if (!cell) return -ENOMEM; /* * Insert the cell into both hash tables. */ down_write(&_hash_lock); hc = __get_name_cell(name); if (hc) { dm_put(hc->md); goto bad; } list_add(&cell->name_list, _name_buckets + hash_str(name)); if (uuid) { hc = __get_uuid_cell(uuid); if (hc) { list_del(&cell->name_list); dm_put(hc->md); goto bad; } list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid)); } dm_get(md); mutex_lock(&dm_hash_cells_mutex); dm_set_mdptr(md, cell); mutex_unlock(&dm_hash_cells_mutex); up_write(&_hash_lock); return 0; bad: up_write(&_hash_lock); free_cell(cell); return -EBUSY; } static struct dm_table *__hash_remove(struct hash_cell *hc) { struct dm_table *table; int srcu_idx; /* remove from the dev hash */ list_del(&hc->uuid_list); list_del(&hc->name_list); mutex_lock(&dm_hash_cells_mutex); dm_set_mdptr(hc->md, NULL); mutex_unlock(&dm_hash_cells_mutex); table = dm_get_live_table(hc->md, &srcu_idx); if (table) dm_table_event(table); dm_put_live_table(hc->md, srcu_idx); table = NULL; if (hc->new_map) table = hc->new_map; dm_put(hc->md); free_cell(hc); return table; } static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred) { int i, dev_skipped; struct hash_cell *hc; struct mapped_device *md; struct dm_table *t; retry: dev_skipped = 0; down_write(&_hash_lock); for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry(hc, _name_buckets + i, name_list) { md = hc->md; dm_get(md); if (keep_open_devices && dm_lock_for_deletion(md, mark_deferred, only_deferred)) { dm_put(md); dev_skipped++; continue; } t = __hash_remove(hc); up_write(&_hash_lock); if (t) { dm_sync_table(md); dm_table_destroy(t); } dm_put(md); if (likely(keep_open_devices)) dm_destroy(md); else dm_destroy_immediate(md); /* * Some mapped devices may be using other mapped * devices, so repeat until we make no further * progress. If a new mapped device is created * here it will also get removed. */ goto retry; } } up_write(&_hash_lock); if (dev_skipped) DMWARN("remove_all left %d open device(s)", dev_skipped); } /* * Set the uuid of a hash_cell that isn't already set. */ static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid) { mutex_lock(&dm_hash_cells_mutex); hc->uuid = new_uuid; mutex_unlock(&dm_hash_cells_mutex); list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid)); } /* * Changes the name of a hash_cell and returns the old name for * the caller to free. */ static char *__change_cell_name(struct hash_cell *hc, char *new_name) { char *old_name; /* * Rename and move the name cell. */ list_del(&hc->name_list); old_name = hc->name; mutex_lock(&dm_hash_cells_mutex); hc->name = new_name; mutex_unlock(&dm_hash_cells_mutex); list_add(&hc->name_list, _name_buckets + hash_str(new_name)); return old_name; } static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, const char *new) { char *new_data, *old_name = NULL; struct hash_cell *hc; struct dm_table *table; struct mapped_device *md; unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; int srcu_idx; /* * duplicate new. */ new_data = kstrdup(new, GFP_KERNEL); if (!new_data) return ERR_PTR(-ENOMEM); down_write(&_hash_lock); /* * Is new free ? */ if (change_uuid) hc = __get_uuid_cell(new); else hc = __get_name_cell(new); if (hc) { DMWARN("Unable to change %s on mapped device %s to one that " "already exists: %s", change_uuid ? "uuid" : "name", param->name, new); dm_put(hc->md); up_write(&_hash_lock); kfree(new_data); return ERR_PTR(-EBUSY); } /* * Is there such a device as 'old' ? */ hc = __get_name_cell(param->name); if (!hc) { DMWARN("Unable to rename non-existent device, %s to %s%s", param->name, change_uuid ? "uuid " : "", new); up_write(&_hash_lock); kfree(new_data); return ERR_PTR(-ENXIO); } /* * Does this device already have a uuid? */ if (change_uuid && hc->uuid) { DMWARN("Unable to change uuid of mapped device %s to %s " "because uuid is already set to %s", param->name, new, hc->uuid); dm_put(hc->md); up_write(&_hash_lock); kfree(new_data); return ERR_PTR(-EINVAL); } if (change_uuid) __set_cell_uuid(hc, new_data); else old_name = __change_cell_name(hc, new_data); /* * Wake up any dm event waiters. */ table = dm_get_live_table(hc->md, &srcu_idx); if (table) dm_table_event(table); dm_put_live_table(hc->md, srcu_idx); if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr)) param->flags |= DM_UEVENT_GENERATED_FLAG; md = hc->md; up_write(&_hash_lock); kfree(old_name); return md; } void dm_deferred_remove(void) { dm_hash_remove_all(true, false, true); } /*----------------------------------------------------------------- * Implementation of the ioctl commands *---------------------------------------------------------------*/ /* * All the ioctl commands get dispatched to functions with this * prototype. */ typedef int (*ioctl_fn)(struct file *filp, struct dm_ioctl *param, size_t param_size); static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_size) { dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false); param->data_size = 0; return 0; } /* * Round up the ptr to an 8-byte boundary. */ #define ALIGN_MASK 7 static inline size_t align_val(size_t val) { return (val + ALIGN_MASK) & ~ALIGN_MASK; } static inline void *align_ptr(void *ptr) { return (void *)align_val((size_t)ptr); } /* * Retrieves the data payload buffer from an already allocated * struct dm_ioctl. */ static void *get_result_buffer(struct dm_ioctl *param, size_t param_size, size_t *len) { param->data_start = align_ptr(param + 1) - (void *) param; if (param->data_start < param_size) *len = param_size - param->data_start; else *len = 0; return ((void *) param) + param->data_start; } static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_size) { unsigned int i; struct hash_cell *hc; size_t len, needed = 0; struct gendisk *disk; struct dm_name_list *orig_nl, *nl, *old_nl = NULL; uint32_t *event_nr; down_write(&_hash_lock); /* * Loop through all the devices working out how much * space we need. */ for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry (hc, _name_buckets + i, name_list) { needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1); needed += align_val(sizeof(uint32_t)); } } /* * Grab our output buffer. */ nl = orig_nl = get_result_buffer(param, param_size, &len); if (len < needed || len < sizeof(nl->dev)) { param->flags |= DM_BUFFER_FULL_FLAG; goto out; } param->data_size = param->data_start + needed; nl->dev = 0; /* Flags no data */ /* * Now loop through filling out the names. */ for (i = 0; i < NUM_BUCKETS; i++) { list_for_each_entry (hc, _name_buckets + i, name_list) { if (old_nl) old_nl->next = (uint32_t) ((void *) nl - (void *) old_nl); disk = dm_disk(hc->md); nl->dev = huge_encode_dev(disk_devt(disk)); nl->next = 0; strcpy(nl->name, hc->name); old_nl = nl; event_nr = align_ptr(nl->name + strlen(hc->name) + 1); *event_nr = dm_get_event_nr(hc->md); nl = align_ptr(event_nr + 1); } } /* * If mismatch happens, security may be compromised due to buffer * overflow, so it's better to crash. */ BUG_ON((char *)nl - (char *)orig_nl != needed); out: up_write(&_hash_lock); return 0; } static void list_version_get_needed(struct target_type *tt, void *needed_param) { size_t *needed = needed_param; *needed += sizeof(struct dm_target_versions); *needed += strlen(tt->name); *needed += ALIGN_MASK; } static void list_version_get_info(struct target_type *tt, void *param) { struct vers_iter *info = param; /* Check space - it might have changed since the first iteration */ if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > info->end) { info->flags = DM_BUFFER_FULL_FLAG; return; } if (info->old_vers) info->old_vers->next = (uint32_t) ((void *)info->vers - (void *)info->old_vers); info->vers->version[0] = tt->version[0]; info->vers->version[1] = tt->version[1]; info->vers->version[2] = tt->version[2]; info->vers->next = 0; strcpy(info->vers->name, tt->name); info->old_vers = info->vers; info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1); } static int __list_versions(struct dm_ioctl *param, size_t param_size, const char *name) { size_t len, needed = 0; struct dm_target_versions *vers; struct vers_iter iter_info; struct target_type *tt = NULL; if (name) { tt = dm_get_target_type(name); if (!tt) return -EINVAL; } /* * Loop through all the devices working out how much * space we need. */ if (!tt) dm_target_iterate(list_version_get_needed, &needed); else list_version_get_needed(tt, &needed); /* * Grab our output buffer. */ vers = get_result_buffer(param, param_size, &len); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; goto out; } param->data_size = param->data_start + needed; iter_info.param_size = param_size; iter_info.old_vers = NULL; iter_info.vers = vers; iter_info.flags = 0; iter_info.end = (char *)vers+len; /* * Now loop through filling out the names & versions. */ if (!tt) dm_target_iterate(list_version_get_info, &iter_info); else list_version_get_info(tt, &iter_info); param->flags |= iter_info.flags; out: if (tt) dm_put_target_type(tt); return 0; } static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param_size) { return __list_versions(param, param_size, NULL); } static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t param_size) { return __list_versions(param, param_size, param->name); } static int check_name(const char *name) { if (strchr(name, '/')) { DMWARN("invalid device name"); return -EINVAL; } return 0; } /* * On successful return, the caller must not attempt to acquire * _hash_lock without first calling dm_put_live_table, because dm_table_destroy * waits for this dm_put_live_table and could be called under this lock. */ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx) { struct hash_cell *hc; struct dm_table *table = NULL; /* increment rcu count, we don't care about the table pointer */ dm_get_live_table(md, srcu_idx); down_read(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { DMWARN("device has been removed from the dev hash table."); goto out; } table = hc->new_map; out: up_read(&_hash_lock); return table; } static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md, struct dm_ioctl *param, int *srcu_idx) { return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ? dm_get_inactive_table(md, srcu_idx) : dm_get_live_table(md, srcu_idx); } /* * Fills in a dm_ioctl structure, ready for sending back to * userland. */ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param) { struct gendisk *disk = dm_disk(md); struct dm_table *table; int srcu_idx; param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG | DM_ACTIVE_PRESENT_FLAG | DM_INTERNAL_SUSPEND_FLAG); if (dm_suspended_md(md)) param->flags |= DM_SUSPEND_FLAG; if (dm_suspended_internally_md(md)) param->flags |= DM_INTERNAL_SUSPEND_FLAG; if (dm_test_deferred_remove_flag(md)) param->flags |= DM_DEFERRED_REMOVE; param->dev = huge_encode_dev(disk_devt(disk)); /* * Yes, this will be out of date by the time it gets back * to userland, but it is still very useful for * debugging. */ param->open_count = dm_open_count(md); param->event_nr = dm_get_event_nr(md); param->target_count = 0; table = dm_get_live_table(md, &srcu_idx); if (table) { if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) { if (get_disk_ro(disk)) param->flags |= DM_READONLY_FLAG; param->target_count = dm_table_get_num_targets(table); } param->flags |= DM_ACTIVE_PRESENT_FLAG; } dm_put_live_table(md, srcu_idx); if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) { int srcu_idx; table = dm_get_inactive_table(md, &srcu_idx); if (table) { if (!(dm_table_get_mode(table) & FMODE_WRITE)) param->flags |= DM_READONLY_FLAG; param->target_count = dm_table_get_num_targets(table); } dm_put_live_table(md, srcu_idx); } } static int dev_create(struct file *filp, struct dm_ioctl *param, size_t param_size) { int r, m = DM_ANY_MINOR; struct mapped_device *md; r = check_name(param->name); if (r) return r; if (param->flags & DM_PERSISTENT_DEV_FLAG) m = MINOR(huge_decode_dev(param->dev)); r = dm_create(m, &md); if (r) return r; r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); if (r) { dm_put(md); dm_destroy(md); return r; } param->flags &= ~DM_INACTIVE_PRESENT_FLAG; __dev_status(md, param); dm_put(md); return 0; } /* * Always use UUID for lookups if it's present, otherwise use name or dev. */ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param) { struct hash_cell *hc = NULL; if (*param->uuid) { if (*param->name || param->dev) return NULL; hc = __get_uuid_cell(param->uuid); if (!hc) return NULL; } else if (*param->name) { if (param->dev) return NULL; hc = __get_name_cell(param->name); if (!hc) return NULL; } else if (param->dev) { hc = __get_dev_cell(param->dev); if (!hc) return NULL; } else return NULL; /* * Sneakily write in both the name and the uuid * while we have the cell. */ strlcpy(param->name, hc->name, sizeof(param->name)); if (hc->uuid) strlcpy(param->uuid, hc->uuid, sizeof(param->uuid)); else param->uuid[0] = '\0'; if (hc->new_map) param->flags |= DM_INACTIVE_PRESENT_FLAG; else param->flags &= ~DM_INACTIVE_PRESENT_FLAG; return hc; } static struct mapped_device *find_device(struct dm_ioctl *param) { struct hash_cell *hc; struct mapped_device *md = NULL; down_read(&_hash_lock); hc = __find_device_hash_cell(param); if (hc) md = hc->md; up_read(&_hash_lock); return md; } static int dev_remove(struct file *filp, struct dm_ioctl *param, size_t param_size) { struct hash_cell *hc; struct mapped_device *md; int r; struct dm_table *t; down_write(&_hash_lock); hc = __find_device_hash_cell(param); if (!hc) { DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); up_write(&_hash_lock); return -ENXIO; } md = hc->md; /* * Ensure the device is not open and nothing further can open it. */ r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false); if (r) { if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) { up_write(&_hash_lock); dm_put(md); return 0; } DMDEBUG_LIMIT("unable to remove open device %s", hc->name); up_write(&_hash_lock); dm_put(md); return r; } t = __hash_remove(hc); up_write(&_hash_lock); if (t) { dm_sync_table(md); dm_table_destroy(t); } param->flags &= ~DM_DEFERRED_REMOVE; if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr)) param->flags |= DM_UEVENT_GENERATED_FLAG; dm_put(md); dm_destroy(md); return 0; } /* * Check a string doesn't overrun the chunk of * memory we copied from userland. */ static int invalid_str(char *str, void *end) { while ((void *) str < end) if (!*str++) return 0; return -EINVAL; } static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_size) { int r; char *new_data = (char *) param + param->data_start; struct mapped_device *md; unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0; if (new_data < param->data || invalid_str(new_data, (void *) param + param_size) || !*new_data || strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { DMWARN("Invalid new mapped device name or uuid string supplied."); return -EINVAL; } if (!change_uuid) { r = check_name(new_data); if (r) return r; } md = dm_hash_rename(param, new_data); if (IS_ERR(md)) return PTR_ERR(md); __dev_status(md, param); dm_put(md); return 0; } static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t param_size) { int r = -EINVAL, x; struct mapped_device *md; struct hd_geometry geometry; unsigned long indata[4]; char *geostr = (char *) param + param->data_start; char dummy; md = find_device(param); if (!md) return -ENXIO; if (geostr < param->data || invalid_str(geostr, (void *) param + param_size)) { DMWARN("Invalid geometry supplied."); goto out; } x = sscanf(geostr, "%lu %lu %lu %lu%c", indata, indata + 1, indata + 2, indata + 3, &dummy); if (x != 4) { DMWARN("Unable to interpret geometry settings."); goto out; } if (indata[0] > 65535 || indata[1] > 255 || indata[2] > 255 || indata[3] > ULONG_MAX) { DMWARN("Geometry exceeds range limits."); goto out; } geometry.cylinders = indata[0]; geometry.heads = indata[1]; geometry.sectors = indata[2]; geometry.start = indata[3]; r = dm_set_geometry(md, &geometry); param->data_size = 0; out: dm_put(md); return r; } static int do_suspend(struct dm_ioctl *param) { int r = 0; unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; struct mapped_device *md; md = find_device(param); if (!md) return -ENXIO; if (param->flags & DM_SKIP_LOCKFS_FLAG) suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; if (!dm_suspended_md(md)) { r = dm_suspend(md, suspend_flags); if (r) goto out; } __dev_status(md, param); out: dm_put(md); return r; } static int do_resume(struct dm_ioctl *param) { int r = 0; unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG; struct hash_cell *hc; struct mapped_device *md; struct dm_table *new_map, *old_map = NULL; down_write(&_hash_lock); hc = __find_device_hash_cell(param); if (!hc) { DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); up_write(&_hash_lock); return -ENXIO; } md = hc->md; new_map = hc->new_map; hc->new_map = NULL; param->flags &= ~DM_INACTIVE_PRESENT_FLAG; up_write(&_hash_lock); /* Do we need to load a new map ? */ if (new_map) { /* Suspend if it isn't already suspended */ if (param->flags & DM_SKIP_LOCKFS_FLAG) suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG; if (param->flags & DM_NOFLUSH_FLAG) suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG; if (!dm_suspended_md(md)) dm_suspend(md, suspend_flags); old_map = dm_swap_table(md, new_map); if (IS_ERR(old_map)) { dm_sync_table(md); dm_table_destroy(new_map); dm_put(md); return PTR_ERR(old_map); } if (dm_table_get_mode(new_map) & FMODE_WRITE) set_disk_ro(dm_disk(md), 0); else set_disk_ro(dm_disk(md), 1); } if (dm_suspended_md(md)) { r = dm_resume(md); if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr)) param->flags |= DM_UEVENT_GENERATED_FLAG; } /* * Since dm_swap_table synchronizes RCU, nobody should be in * read-side critical section already. */ if (old_map) dm_table_destroy(old_map); if (!r) __dev_status(md, param); dm_put(md); return r; } /* * Set or unset the suspension state of a device. * If the device already is in the requested state we just return its status. */ static int dev_suspend(struct file *filp, struct dm_ioctl *param, size_t param_size) { if (param->flags & DM_SUSPEND_FLAG) return do_suspend(param); return do_resume(param); } /* * Copies device info back to user space, used by * the create and info ioctls. */ static int dev_status(struct file *filp, struct dm_ioctl *param, size_t param_size) { struct mapped_device *md; md = find_device(param); if (!md) return -ENXIO; __dev_status(md, param); dm_put(md); return 0; } /* * Build up the status struct for each target */ static void retrieve_status(struct dm_table *table, struct dm_ioctl *param, size_t param_size) { unsigned int i, num_targets; struct dm_target_spec *spec; char *outbuf, *outptr; status_type_t type; size_t remaining, len, used = 0; unsigned status_flags = 0; outptr = outbuf = get_result_buffer(param, param_size, &len); if (param->flags & DM_STATUS_TABLE_FLAG) type = STATUSTYPE_TABLE; else type = STATUSTYPE_INFO; /* Get all the target info */ num_targets = dm_table_get_num_targets(table); for (i = 0; i < num_targets; i++) { struct dm_target *ti = dm_table_get_target(table, i); size_t l; remaining = len - (outptr - outbuf); if (remaining <= sizeof(struct dm_target_spec)) { param->flags |= DM_BUFFER_FULL_FLAG; break; } spec = (struct dm_target_spec *) outptr; spec->status = 0; spec->sector_start = ti->begin; spec->length = ti->len; strncpy(spec->target_type, ti->type->name, sizeof(spec->target_type) - 1); outptr += sizeof(struct dm_target_spec); remaining = len - (outptr - outbuf); if (remaining <= 0) { param->flags |= DM_BUFFER_FULL_FLAG; break; } /* Get the status/table string from the target driver */ if (ti->type->status) { if (param->flags & DM_NOFLUSH_FLAG) status_flags |= DM_STATUS_NOFLUSH_FLAG; ti->type->status(ti, type, status_flags, outptr, remaining); } else outptr[0] = '\0'; l = strlen(outptr) + 1; if (l == remaining) { param->flags |= DM_BUFFER_FULL_FLAG; break; } outptr += l; used = param->data_start + (outptr - outbuf); outptr = align_ptr(outptr); spec->next = outptr - outbuf; } if (used) param->data_size = used; param->target_count = num_targets; } /* * Wait for a device to report an event */ static int dev_wait(struct file *filp, struct dm_ioctl *param, size_t param_size) { int r = 0; struct mapped_device *md; struct dm_table *table; int srcu_idx; md = find_device(param); if (!md) return -ENXIO; /* * Wait for a notification event */ if (dm_wait_event(md, param->event_nr)) { r = -ERESTARTSYS; goto out; } /* * The userland program is going to want to know what * changed to trigger the event, so we may as well tell * him and save an ioctl. */ __dev_status(md, param); table = dm_get_live_or_inactive_table(md, param, &srcu_idx); if (table) retrieve_status(table, param, param_size); dm_put_live_table(md, srcu_idx); out: dm_put(md); return r; } /* * Remember the global event number and make it possible to poll * for further events. */ static int dev_arm_poll(struct file *filp, struct dm_ioctl *param, size_t param_size) { struct dm_file *priv = filp->private_data; priv->global_event_nr = atomic_read(&dm_global_event_nr); return 0; } static inline fmode_t get_mode(struct dm_ioctl *param) { fmode_t mode = FMODE_READ | FMODE_WRITE; if (param->flags & DM_READONLY_FLAG) mode = FMODE_READ; return mode; } static int next_target(struct dm_target_spec *last, uint32_t next, void *end, struct dm_target_spec **spec, char **target_params) { *spec = (struct dm_target_spec *) ((unsigned char *) last + next); *target_params = (char *) (*spec + 1); if (*spec < (last + 1)) return -EINVAL; return invalid_str(*target_params, end); } static int populate_table(struct dm_table *table, struct dm_ioctl *param, size_t param_size) { int r; unsigned int i = 0; struct dm_target_spec *spec = (struct dm_target_spec *) param; uint32_t next = param->data_start; void *end = (void *) param + param_size; char *target_params; if (!param->target_count) { DMWARN("populate_table: no targets specified"); return -EINVAL; } for (i = 0; i < param->target_count; i++) { r = next_target(spec, next, end, &spec, &target_params); if (r) { DMWARN("unable to find target"); return r; } r = dm_table_add_target(table, spec->target_type, (sector_t) spec->sector_start, (sector_t) spec->length, target_params); if (r) { DMWARN("error adding target to table"); return r; } next = spec->next; } return dm_table_complete(table); } static bool is_valid_type(enum dm_queue_mode cur, enum dm_queue_mode new) { if (cur == new || (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED)) return true; return false; } static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_size) { int r; struct hash_cell *hc; struct dm_table *t, *old_map = NULL; struct mapped_device *md; struct target_type *immutable_target_type; md = find_device(param); if (!md) return -ENXIO; r = dm_table_create(&t, get_mode(param), param->target_count, md); if (r) goto err; /* Protect md->type and md->queue against concurrent table loads. */ dm_lock_md_type(md); r = populate_table(t, param, param_size); if (r) goto err_unlock_md_type; immutable_target_type = dm_get_immutable_target_type(md); if (immutable_target_type && (immutable_target_type != dm_table_get_immutable_target_type(t)) && !dm_table_get_wildcard_target(t)) { DMWARN("can't replace immutable target type %s", immutable_target_type->name); r = -EINVAL; goto err_unlock_md_type; } if (dm_get_md_type(md) == DM_TYPE_NONE) { /* Initial table load: acquire type of table. */ dm_set_md_type(md, dm_table_get_type(t)); /* setup md->queue to reflect md's type (may block) */ r = dm_setup_md_queue(md, t); if (r) { DMWARN("unable to set up device queue for new table."); goto err_unlock_md_type; } } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) { DMWARN("can't change device type (old=%u vs new=%u) after initial table load.", dm_get_md_type(md), dm_table_get_type(t)); r = -EINVAL; goto err_unlock_md_type; } dm_unlock_md_type(md); /* stage inactive table */ down_write(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { DMWARN("device has been removed from the dev hash table."); up_write(&_hash_lock); r = -ENXIO; goto err_destroy_table; } if (hc->new_map) old_map = hc->new_map; hc->new_map = t; up_write(&_hash_lock); param->flags |= DM_INACTIVE_PRESENT_FLAG; __dev_status(md, param); if (old_map) { dm_sync_table(md); dm_table_destroy(old_map); } dm_put(md); return 0; err_unlock_md_type: dm_unlock_md_type(md); err_destroy_table: dm_table_destroy(t); err: dm_put(md); return r; } static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_size) { struct hash_cell *hc; struct mapped_device *md; struct dm_table *old_map = NULL; down_write(&_hash_lock); hc = __find_device_hash_cell(param); if (!hc) { DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table."); up_write(&_hash_lock); return -ENXIO; } if (hc->new_map) { old_map = hc->new_map; hc->new_map = NULL; } param->flags &= ~DM_INACTIVE_PRESENT_FLAG; __dev_status(hc->md, param); md = hc->md; up_write(&_hash_lock); if (old_map) { dm_sync_table(md); dm_table_destroy(old_map); } dm_put(md); return 0; } /* * Retrieves a list of devices used by a particular dm device. */ static void retrieve_deps(struct dm_table *table, struct dm_ioctl *param, size_t param_size) { unsigned int count = 0; struct list_head *tmp; size_t len, needed; struct dm_dev_internal *dd; struct dm_target_deps *deps; deps = get_result_buffer(param, param_size, &len); /* * Count the devices. */ list_for_each (tmp, dm_table_get_devices(table)) count++; /* * Check we have enough space. */ needed = struct_size(deps, dev, count); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; return; } /* * Fill in the devices. */ deps->count = count; count = 0; list_for_each_entry (dd, dm_table_get_devices(table), list) deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev); param->data_size = param->data_start + needed; } static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size) { struct mapped_device *md; struct dm_table *table; int srcu_idx; md = find_device(param); if (!md) return -ENXIO; __dev_status(md, param); table = dm_get_live_or_inactive_table(md, param, &srcu_idx); if (table) retrieve_deps(table, param, param_size); dm_put_live_table(md, srcu_idx); dm_put(md); return 0; } /* * Return the status of a device as a text string for each * target. */ static int table_status(struct file *filp, struct dm_ioctl *param, size_t param_size) { struct mapped_device *md; struct dm_table *table; int srcu_idx; md = find_device(param); if (!md) return -ENXIO; __dev_status(md, param); table = dm_get_live_or_inactive_table(md, param, &srcu_idx); if (table) retrieve_status(table, param, param_size); dm_put_live_table(md, srcu_idx); dm_put(md); return 0; } /* * Process device-mapper dependent messages. Messages prefixed with '@' * are processed by the DM core. All others are delivered to the target. * Returns a number <= 1 if message was processed by device mapper. * Returns 2 if message should be delivered to the target. */ static int message_for_md(struct mapped_device *md, unsigned argc, char **argv, char *result, unsigned maxlen) { int r; if (**argv != '@') return 2; /* no '@' prefix, deliver to target */ if (!strcasecmp(argv[0], "@cancel_deferred_remove")) { if (argc != 1) { DMERR("Invalid arguments for @cancel_deferred_remove"); return -EINVAL; } return dm_cancel_deferred_remove(md); } r = dm_stats_message(md, argc, argv, result, maxlen); if (r < 2) return r; DMERR("Unsupported message sent to DM core: %s", argv[0]); return -EINVAL; } /* * Pass a message to the target that's at the supplied device offset. */ static int target_message(struct file *filp, struct dm_ioctl *param, size_t param_size) { int r, argc; char **argv; struct mapped_device *md; struct dm_table *table; struct dm_target *ti; struct dm_target_msg *tmsg = (void *) param + param->data_start; size_t maxlen; char *result = get_result_buffer(param, param_size, &maxlen); int srcu_idx; md = find_device(param); if (!md) return -ENXIO; if (tmsg < (struct dm_target_msg *) param->data || invalid_str(tmsg->message, (void *) param + param_size)) { DMWARN("Invalid target message parameters."); r = -EINVAL; goto out; } r = dm_split_args(&argc, &argv, tmsg->message); if (r) { DMWARN("Failed to split target message parameters"); goto out; } if (!argc) { DMWARN("Empty message received."); r = -EINVAL; goto out_argv; } r = message_for_md(md, argc, argv, result, maxlen); if (r <= 1) goto out_argv; table = dm_get_live_table(md, &srcu_idx); if (!table) goto out_table; if (dm_deleting_md(md)) { r = -ENXIO; goto out_table; } ti = dm_table_find_target(table, tmsg->sector); if (!ti) { DMWARN("Target message sector outside device."); r = -EINVAL; } else if (ti->type->message) r = ti->type->message(ti, argc, argv, result, maxlen); else { DMWARN("Target type does not support messages"); r = -EINVAL; } out_table: dm_put_live_table(md, srcu_idx); out_argv: kfree(argv); out: if (r >= 0) __dev_status(md, param); if (r == 1) { param->flags |= DM_DATA_OUT_FLAG; if (dm_message_test_buffer_overflow(result, maxlen)) param->flags |= DM_BUFFER_FULL_FLAG; else param->data_size = param->data_start + strlen(result) + 1; r = 0; } dm_put(md); return r; } /* * The ioctl parameter block consists of two parts, a dm_ioctl struct * followed by a data buffer. This flag is set if the second part, * which has a variable size, is not used by the function processing * the ioctl. */ #define IOCTL_FLAGS_NO_PARAMS 1 #define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2 /*----------------------------------------------------------------- * Implementation of open/close/ioctl on the special char * device. *---------------------------------------------------------------*/ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) { static const struct { int cmd; int flags; ioctl_fn fn; } _ioctls[] = { {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */ {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all}, {DM_LIST_DEVICES_CMD, 0, list_devices}, {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create}, {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove}, {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename}, {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend}, {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status}, {DM_DEV_WAIT_CMD, 0, dev_wait}, {DM_TABLE_LOAD_CMD, 0, table_load}, {DM_TABLE_CLEAR_CMD, IOCTL_FLAGS_NO_PARAMS, table_clear}, {DM_TABLE_DEPS_CMD, 0, table_deps}, {DM_TABLE_STATUS_CMD, 0, table_status}, {DM_LIST_VERSIONS_CMD, 0, list_versions}, {DM_TARGET_MSG_CMD, 0, target_message}, {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry}, {DM_DEV_ARM_POLL, IOCTL_FLAGS_NO_PARAMS, dev_arm_poll}, {DM_GET_TARGET_VERSION, 0, get_target_version}, }; if (unlikely(cmd >= ARRAY_SIZE(_ioctls))) return NULL; *ioctl_flags = _ioctls[cmd].flags; return _ioctls[cmd].fn; } /* * As well as checking the version compatibility this always * copies the kernel interface version out. */ static int check_version(unsigned int cmd, struct dm_ioctl __user *user) { uint32_t version[3]; int r = 0; if (copy_from_user(version, user->version, sizeof(version))) return -EFAULT; if ((DM_VERSION_MAJOR != version[0]) || (DM_VERSION_MINOR < version[1])) { DMWARN("ioctl interface mismatch: " "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", DM_VERSION_MAJOR, DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, version[0], version[1], version[2], cmd); r = -EINVAL; } /* * Fill in the kernel version. */ version[0] = DM_VERSION_MAJOR; version[1] = DM_VERSION_MINOR; version[2] = DM_VERSION_PATCHLEVEL; if (copy_to_user(user->version, version, sizeof(version))) return -EFAULT; return r; } #define DM_PARAMS_MALLOC 0x0001 /* Params allocated with kvmalloc() */ #define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */ static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags) { if (param_flags & DM_WIPE_BUFFER) memset(param, 0, param_size); if (param_flags & DM_PARAMS_MALLOC) kvfree(param); } static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel, int ioctl_flags, struct dm_ioctl **param, int *param_flags) { struct dm_ioctl *dmi; int secure_data; const size_t minimum_data_size = offsetof(struct dm_ioctl, data); unsigned noio_flag; if (copy_from_user(param_kernel, user, minimum_data_size)) return -EFAULT; if (param_kernel->data_size < minimum_data_size) return -EINVAL; secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG; *param_flags = secure_data ? DM_WIPE_BUFFER : 0; if (ioctl_flags & IOCTL_FLAGS_NO_PARAMS) { dmi = param_kernel; dmi->data_size = minimum_data_size; goto data_copied; } /* * Use __GFP_HIGH to avoid low memory issues when a device is * suspended and the ioctl is needed to resume it. * Use kmalloc() rather than vmalloc() when we can. */ dmi = NULL; noio_flag = memalloc_noio_save(); dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH); memalloc_noio_restore(noio_flag); if (!dmi) { if (secure_data && clear_user(user, param_kernel->data_size)) return -EFAULT; return -ENOMEM; } *param_flags |= DM_PARAMS_MALLOC; /* Copy from param_kernel (which was already copied from user) */ memcpy(dmi, param_kernel, minimum_data_size); if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size, param_kernel->data_size - minimum_data_size)) goto bad; data_copied: /* Wipe the user buffer so we do not return it to userspace */ if (secure_data && clear_user(user, param_kernel->data_size)) goto bad; *param = dmi; return 0; bad: free_params(dmi, param_kernel->data_size, *param_flags); return -EFAULT; } static int validate_params(uint cmd, struct dm_ioctl *param) { /* Always clear this flag */ param->flags &= ~DM_BUFFER_FULL_FLAG; param->flags &= ~DM_UEVENT_GENERATED_FLAG; param->flags &= ~DM_SECURE_DATA_FLAG; param->flags &= ~DM_DATA_OUT_FLAG; /* Ignores parameters */ if (cmd == DM_REMOVE_ALL_CMD || cmd == DM_LIST_DEVICES_CMD || cmd == DM_LIST_VERSIONS_CMD) return 0; if (cmd == DM_DEV_CREATE_CMD) { if (!*param->name) { DMWARN("name not supplied when creating device"); return -EINVAL; } } else if (*param->uuid && *param->name) { DMWARN("only supply one of name or uuid, cmd(%u)", cmd); return -EINVAL; } /* Ensure strings are terminated */ param->name[DM_NAME_LEN - 1] = '\0'; param->uuid[DM_UUID_LEN - 1] = '\0'; return 0; } static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *user) { int r = 0; int ioctl_flags; int param_flags; unsigned int cmd; struct dm_ioctl *param; ioctl_fn fn = NULL; size_t input_param_size; struct dm_ioctl param_kernel; /* only root can play with this */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (_IOC_TYPE(command) != DM_IOCTL) return -ENOTTY; cmd = _IOC_NR(command); /* * Check the interface version passed in. This also * writes out the kernel's interface version. */ r = check_version(cmd, user); if (r) return r; /* * Nothing more to do for the version command. */ if (cmd == DM_VERSION_CMD) return 0; fn = lookup_ioctl(cmd, &ioctl_flags); if (!fn) { DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); return -ENOTTY; } /* * Copy the parameters into kernel space. */ r = copy_params(user, &param_kernel, ioctl_flags, &param, &param_flags); if (r) return r; input_param_size = param->data_size; r = validate_params(cmd, param); if (r) goto out; param->data_size = offsetof(struct dm_ioctl, data); r = fn(file, param, input_param_size); if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) && unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS)) DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd); if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT) dm_issue_global_event(); /* * Copy the results back to userland. */ if (!r && copy_to_user(user, param, param->data_size)) r = -EFAULT; out: free_params(param, input_param_size, param_flags); return r; } static long dm_ctl_ioctl(struct file *file, uint command, ulong u) { return (long)ctl_ioctl(file, command, (struct dm_ioctl __user *)u); } #ifdef CONFIG_COMPAT static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u) { return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u)); } #else #define dm_compat_ctl_ioctl NULL #endif static int dm_open(struct inode *inode, struct file *filp) { int r; struct dm_file *priv; r = nonseekable_open(inode, filp); if (unlikely(r)) return r; priv = filp->private_data = kmalloc(sizeof(struct dm_file), GFP_KERNEL); if (!priv) return -ENOMEM; priv->global_event_nr = atomic_read(&dm_global_event_nr); return 0; } static int dm_release(struct inode *inode, struct file *filp) { kfree(filp->private_data); return 0; } static __poll_t dm_poll(struct file *filp, poll_table *wait) { struct dm_file *priv = filp->private_data; __poll_t mask = 0; poll_wait(filp, &dm_global_eventq, wait); if ((int)(atomic_read(&dm_global_event_nr) - priv->global_event_nr) > 0) mask |= EPOLLIN; return mask; } static const struct file_operations _ctl_fops = { .open = dm_open, .release = dm_release, .poll = dm_poll, .unlocked_ioctl = dm_ctl_ioctl, .compat_ioctl = dm_compat_ctl_ioctl, .owner = THIS_MODULE, .llseek = noop_llseek, }; static struct miscdevice _dm_misc = { .minor = MAPPER_CTRL_MINOR, .name = DM_NAME, .nodename = DM_DIR "/" DM_CONTROL_NODE, .fops = &_ctl_fops }; MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR); MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE); /* * Create misc character device and link to DM_DIR/control. */ int __init dm_interface_init(void) { int r; r = dm_hash_init(); if (r) return r; r = misc_register(&_dm_misc); if (r) { DMERR("misc_register failed for control device"); dm_hash_exit(); return r; } DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR, DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA, DM_DRIVER_EMAIL); return 0; } void dm_interface_exit(void) { misc_deregister(&_dm_misc); dm_hash_exit(); } /** * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers * @md: Pointer to mapped_device * @name: Buffer (size DM_NAME_LEN) for name * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined */ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid) { int r = 0; struct hash_cell *hc; if (!md) return -ENXIO; mutex_lock(&dm_hash_cells_mutex); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { r = -ENXIO; goto out; } if (name) strcpy(name, hc->name); if (uuid) strcpy(uuid, hc->uuid ? : ""); out: mutex_unlock(&dm_hash_cells_mutex); return r; } EXPORT_SYMBOL_GPL(dm_copy_name_and_uuid); /** * dm_early_create - create a mapped device in early boot. * * @dmi: Contains main information of the device mapping to be created. * @spec_array: array of pointers to struct dm_target_spec. Describes the * mapping table of the device. * @target_params_array: array of strings with the parameters to a specific * target. * * Instead of having the struct dm_target_spec and the parameters for every * target embedded at the end of struct dm_ioctl (as performed in a normal * ioctl), pass them as arguments, so the caller doesn't need to serialize them. * The size of the spec_array and target_params_array is given by * @dmi->target_count. * This function is supposed to be called in early boot, so locking mechanisms * to protect against concurrent loads are not required. */ int __init dm_early_create(struct dm_ioctl *dmi, struct dm_target_spec **spec_array, char **target_params_array) { int r, m = DM_ANY_MINOR; struct dm_table *t, *old_map; struct mapped_device *md; unsigned int i; if (!dmi->target_count) return -EINVAL; r = check_name(dmi->name); if (r) return r; if (dmi->flags & DM_PERSISTENT_DEV_FLAG) m = MINOR(huge_decode_dev(dmi->dev)); /* alloc dm device */ r = dm_create(m, &md); if (r) return r; /* hash insert */ r = dm_hash_insert(dmi->name, *dmi->uuid ? dmi->uuid : NULL, md); if (r) goto err_destroy_dm; /* alloc table */ r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md); if (r) goto err_hash_remove; /* add targets */ for (i = 0; i < dmi->target_count; i++) { r = dm_table_add_target(t, spec_array[i]->target_type, (sector_t) spec_array[i]->sector_start, (sector_t) spec_array[i]->length, target_params_array[i]); if (r) { DMWARN("error adding target to table"); goto err_destroy_table; } } /* finish table */ r = dm_table_complete(t); if (r) goto err_destroy_table; md->type = dm_table_get_type(t); /* setup md->queue to reflect md's type (may block) */ r = dm_setup_md_queue(md, t); if (r) { DMWARN("unable to set up device queue for new table."); goto err_destroy_table; } /* Set new map */ dm_suspend(md, 0); old_map = dm_swap_table(md, t); if (IS_ERR(old_map)) { r = PTR_ERR(old_map); goto err_destroy_table; } set_disk_ro(dm_disk(md), !!(dmi->flags & DM_READONLY_FLAG)); /* resume device */ r = dm_resume(md); if (r) goto err_destroy_table; DMINFO("%s (%s) is ready", md->disk->disk_name, dmi->name); dm_put(md); return 0; err_destroy_table: dm_table_destroy(t); err_hash_remove: (void) __hash_remove(__get_name_cell(dmi->name)); /* release reference from __get_name_cell */ dm_put(md); err_destroy_dm: dm_put(md); dm_destroy(md); return r; }
null
269
CWE-787
CVE-2021-32136
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / mp4box application * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "mp4box.h" #if defined(GPAC_DISABLE_ISOM) || defined(GPAC_DISABLE_ISOM_WRITE) #error "Cannot compile MP4Box if GPAC is not built with ISO File Format support" #else #ifndef GPAC_DISABLE_X3D #include <gpac/nodes_x3d.h> #endif #ifndef GPAC_DISABLE_BIFS #include <gpac/internal/bifs_dev.h> #endif #ifndef GPAC_DISABLE_VRML #include <gpac/nodes_mpeg4.h> #endif #include <gpac/constants.h> #include <gpac/avparse.h> #include <gpac/internal/media_dev.h> /*ISO 639 languages*/ #include <gpac/iso639.h> #include <gpac/mpegts.h> #ifndef GPAC_DISABLE_SMGR #include <gpac/scene_manager.h> #endif #include <gpac/internal/media_dev.h> #include <gpac/media_tools.h> /*for built-in box printing*/ #include <gpac/internal/isomedia_dev.h> extern u32 swf_flags; extern Float swf_flatten_angle; extern GF_FileType get_file_type_by_ext(char *inName); extern u32 fs_dump_flags; void scene_coding_log(void *cbk, GF_LOG_Level log_level, GF_LOG_Tool log_tool, const char *fmt, va_list vlist); #ifdef GPAC_DISABLE_LOG void mp4box_log(const char *fmt, ...) { va_list vl; va_start(vl, fmt); vfprintf(stderr, fmt, vlist); fflush(stderr); va_end(vl); } #endif u32 PrintLanguages(char *val, u32 opt) { u32 i=0, count = gf_lang_get_count(); fprintf(stderr, "Supported ISO 639 languages and codes:\n\n"); for (i=0; i<count; i++) { if (gf_lang_get_2cc(i)) { fprintf(stderr, "%s (%s - %s)\n", gf_lang_get_name(i), gf_lang_get_3cc(i), gf_lang_get_2cc(i)); } } return 1; } static const char *GetLanguage(char *lcode) { s32 idx = gf_lang_find(lcode); if (idx>=0) return gf_lang_get_name(idx); return lcode; } GF_Err dump_isom_cover_art(GF_ISOFile *file, char *inName, Bool is_final_name) { const u8 *tag; FILE *t; u32 tag_len; GF_Err e = gf_isom_apple_get_tag(file, GF_ISOM_ITUNE_COVER_ART, &tag, &tag_len); if (e!=GF_OK) { if (e==GF_URL_ERROR) { M4_LOG(GF_LOG_WARNING, ("No cover art found\n")); return GF_OK; } return e; } if (inName) { char szName[1024]; if (is_final_name) { strcpy(szName, inName); } else { sprintf(szName, "%s.%s", inName, (tag_len>>31) ? "png" : "jpg"); } t = gf_fopen(szName, "wb"); if (!t) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szName)); return GF_IO_ERR; } } else { t = stdout; } gf_fwrite(tag, tag_len & 0x7FFFFFFF, t); if (inName) gf_fclose(t); return GF_OK; } #ifndef GPAC_DISABLE_SCENE_DUMP GF_Err dump_isom_scene(char *file, char *inName, Bool is_final_name, GF_SceneDumpFormat dump_mode, Bool do_log, Bool no_odf_conv) { GF_Err e; GF_SceneManager *ctx; GF_SceneGraph *sg; GF_SceneLoader load; GF_FileType ftype; gf_log_cbk prev_logs = NULL; FILE *logs = NULL; sg = gf_sg_new(); ctx = gf_sm_new(sg); memset(&load, 0, sizeof(GF_SceneLoader)); load.fileName = file; load.ctx = ctx; load.swf_import_flags = swf_flags; if (dump_mode == GF_SM_DUMP_SVG) { load.swf_import_flags |= GF_SM_SWF_USE_SVG; load.svgOutFile = inName; } load.swf_flatten_limit = swf_flatten_angle; ftype = get_file_type_by_ext(file); if (ftype == GF_FILE_TYPE_ISO_MEDIA) { load.isom = gf_isom_open(file, GF_ISOM_OPEN_READ, NULL); if (!load.isom) { e = gf_isom_last_error(NULL); M4_LOG(GF_LOG_ERROR, ("Error opening file: %s\n", gf_error_to_string(e))); gf_sm_del(ctx); gf_sg_del(sg); return e; } if (no_odf_conv) gf_isom_disable_odf_conversion(load.isom, GF_TRUE); } else if (ftype==GF_FILE_TYPE_LSR_SAF) { load.isom = gf_isom_open("saf_conv", GF_ISOM_WRITE_EDIT, NULL); #ifndef GPAC_DISABLE_MEDIA_IMPORT if (load.isom) { GF_Fraction _frac = {0,0}; e = import_file(load.isom, file, 0, _frac, 0, NULL, NULL, 0); } else #else M4_LOG(GF_LOG_WARNING, ("Warning: GPAC was compiled without Media Import support\n")); #endif e = gf_isom_last_error(NULL); if (e) { M4_LOG(GF_LOG_ERROR, ("Error importing file: %s\n", gf_error_to_string(e))); gf_sm_del(ctx); gf_sg_del(sg); if (load.isom) gf_isom_delete(load.isom); return e; } } if (do_log) { char szLog[GF_MAX_PATH]; sprintf(szLog, "%s_dec.logs", inName); logs = gf_fopen(szLog, "wt"); gf_log_set_tool_level(GF_LOG_CODING, GF_LOG_DEBUG); prev_logs = gf_log_set_callback(logs, scene_coding_log); } e = gf_sm_load_init(&load); if (!e) e = gf_sm_load_run(&load); gf_sm_load_done(&load); if (logs) { gf_log_set_tool_level(GF_LOG_CODING, GF_LOG_ERROR); gf_log_set_callback(NULL, prev_logs); gf_fclose(logs); } if (!e && dump_mode != GF_SM_DUMP_SVG) { u32 count = gf_list_count(ctx->streams); if (count) fprintf(stderr, "Scene loaded - dumping %d systems streams\n", count); else fprintf(stderr, "Scene loaded - dumping root scene\n"); e = gf_sm_dump(ctx, inName, is_final_name, dump_mode); } gf_sm_del(ctx); gf_sg_del(sg); if (e) M4_LOG(GF_LOG_ERROR, ("Error loading scene: %s\n", gf_error_to_string(e))); if (load.isom) gf_isom_delete(load.isom); return e; } #endif #ifndef GPAC_DISABLE_SCENE_STATS static void dump_stats(FILE *dump, const GF_SceneStatistics *stats) { u32 i; s32 created, count, draw_created, draw_count, deleted, draw_deleted; created = count = draw_created = draw_count = deleted = draw_deleted = 0; fprintf(dump, "<NodeStatistics>\n"); fprintf(dump, "<General NumberOfNodeTypes=\"%d\"/>\n", gf_list_count(stats->node_stats)); for (i=0; i<gf_list_count(stats->node_stats); i++) { GF_NodeStats *ptr = gf_list_get(stats->node_stats, i); fprintf(dump, "<NodeStat NodeName=\"%s\">\n", ptr->name); switch (ptr->tag) { #ifndef GPAC_DISABLE_VRML case TAG_MPEG4_Bitmap: case TAG_MPEG4_Background2D: case TAG_MPEG4_Background: case TAG_MPEG4_Box: case TAG_MPEG4_Circle: case TAG_MPEG4_CompositeTexture2D: case TAG_MPEG4_CompositeTexture3D: case TAG_MPEG4_Cylinder: case TAG_MPEG4_Cone: case TAG_MPEG4_Curve2D: case TAG_MPEG4_Extrusion: case TAG_MPEG4_ElevationGrid: case TAG_MPEG4_IndexedFaceSet2D: case TAG_MPEG4_IndexedFaceSet: case TAG_MPEG4_IndexedLineSet2D: case TAG_MPEG4_IndexedLineSet: case TAG_MPEG4_PointSet2D: case TAG_MPEG4_PointSet: case TAG_MPEG4_Rectangle: case TAG_MPEG4_Sphere: case TAG_MPEG4_Text: case TAG_MPEG4_Ellipse: case TAG_MPEG4_XCurve2D: draw_count += ptr->nb_created + ptr->nb_used - ptr->nb_del; draw_deleted += ptr->nb_del; draw_created += ptr->nb_created; break; #endif /*GPAC_DISABLE_VRML*/ } fprintf(dump, "<Instanciation NbObjects=\"%d\" NbUse=\"%d\" NbDestroy=\"%d\"/>\n", ptr->nb_created, ptr->nb_used, ptr->nb_del); count += ptr->nb_created + ptr->nb_used; deleted += ptr->nb_del; created += ptr->nb_created; fprintf(dump, "</NodeStat>\n"); } if (i) { fprintf(dump, "<CumulatedStat TotalNumberOfNodes=\"%d\" ReallyAllocatedNodes=\"%d\" DeletedNodes=\"%d\" NumberOfAttributes=\"%d\"/>\n", count, created, deleted, stats->nb_svg_attributes); fprintf(dump, "<DrawableNodesCumulatedStat TotalNumberOfNodes=\"%d\" ReallyAllocatedNodes=\"%d\" DeletedNodes=\"%d\"/>\n", draw_count, draw_created, draw_deleted); } fprintf(dump, "</NodeStatistics>\n"); created = count = deleted = 0; if (gf_list_count(stats->proto_stats)) { fprintf(dump, "<ProtoStatistics NumberOfProtoUsed=\"%d\">\n", gf_list_count(stats->proto_stats)); for (i=0; i<gf_list_count(stats->proto_stats); i++) { GF_NodeStats *ptr = gf_list_get(stats->proto_stats, i); fprintf(dump, "<ProtoStat ProtoName=\"%s\">\n", ptr->name); fprintf(dump, "<Instanciation NbObjects=\"%d\" NbUse=\"%d\" NbDestroy=\"%d\"/>\n", ptr->nb_created, ptr->nb_used, ptr->nb_del); count += ptr->nb_created + ptr->nb_used; deleted += ptr->nb_del; created += ptr->nb_created; fprintf(dump, "</ProtoStat>\n"); } if (i) fprintf(dump, "<CumulatedStat TotalNumberOfProtos=\"%d\" ReallyAllocatedProtos=\"%d\" DeletedProtos=\"%d\"/>\n", count, created, deleted); fprintf(dump, "</ProtoStatistics>\n"); } fprintf(dump, "<FixedValues min=\"%f\" max=\"%f\">\n", FIX2FLT( stats->min_fixed) , FIX2FLT( stats->max_fixed )); fprintf(dump, "<Resolutions scaleIntegerPart=\"%d\" scaleFracPart=\"%d\" coordIntegerPart=\"%d\" coordFracPart=\"%d\"/>\n", stats->scale_int_res_2d, stats->scale_frac_res_2d, stats->int_res_2d, stats->frac_res_2d); fprintf(dump, "</FixedValues>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MFVec2f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>\n", stats->count_2d, stats->rem_2d); if (stats->count_2d) { fprintf(dump, "<ExtendInfo MinVec2f=\"%f %f\" MaxVec2f=\"%f %f\"/>\n", FIX2FLT( stats->min_2d.x) , FIX2FLT( stats->min_2d.y ), FIX2FLT( stats->max_2d.x ), FIX2FLT( stats->max_2d.y ) ); } fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MFVec3f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>", stats->count_3d, stats->rem_3d); if (stats->count_3d) { fprintf(dump, "<ExtendInfo MinVec3f=\"%f %f %f\" MaxVec3f=\"%f %f %f\"/>\n", FIX2FLT( stats->min_3d.x ), FIX2FLT( stats->min_3d.y ), FIX2FLT( stats->min_3d.z ), FIX2FLT( stats->max_3d.x ), FIX2FLT( stats->max_3d.y ), FIX2FLT( stats->max_3d.z ) ); } fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MF/SFColor\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>", stats->count_color, stats->rem_color); fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MF/SFFloat\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>", stats->count_float, stats->rem_float); fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"SFVec2f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\"/>", stats->count_2f); fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"SFVec3f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\"/>", stats->count_3f); fprintf(dump, "</FieldStatistic>\n"); } static void ReorderAU(GF_List *sample_list, GF_AUContext *au) { u32 i; for (i=0; i<gf_list_count(sample_list); i++) { GF_AUContext *ptr = gf_list_get(sample_list, i); if ( /*time ordered*/ (ptr->timing_sec > au->timing_sec) /*set bifs first*/ || ((ptr->timing_sec == au->timing_sec) && (ptr->owner->streamType < au->owner->streamType)) ) { gf_list_insert(sample_list, au, i); return; } } gf_list_add(sample_list, au); } void dump_isom_scene_stats(char *file, char *inName, Bool is_final_name, u32 stat_level) { GF_Err e; FILE *dump; Bool close; u32 i, j, count; char szBuf[1024]; GF_SceneManager *ctx; GF_SceneLoader load; GF_StatManager *sm; GF_List *sample_list; GF_SceneGraph *scene_graph; dump = NULL; sm = NULL; sample_list = NULL; close = 0; scene_graph = gf_sg_new(); ctx = gf_sm_new(scene_graph); memset(&load, 0, sizeof(GF_SceneLoader)); load.fileName = file; load.ctx = ctx; if (get_file_type_by_ext(file) == 1) { load.isom = gf_isom_open(file, GF_ISOM_OPEN_READ, NULL); if (!load.isom) { M4_LOG(GF_LOG_ERROR, ("Cannot open file: %s\n", gf_error_to_string(gf_isom_last_error(NULL)))); gf_sm_del(ctx); gf_sg_del(scene_graph); return; } } e = gf_sm_load_init(&load); if (!e) e = gf_sm_load_run(&load); gf_sm_load_done(&load); if (e<0) goto exit; if (inName) { strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_stat.xml"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } close = 1; } else { dump = stdout; close = 0; } fprintf(stderr, "Analysing Scene\n"); fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- Scene Graph Statistics Generated by MP4Box - GPAC "); if (! gf_sys_is_test_mode()) fprintf(dump, "%s ", gf_gpac_version()); fprintf(dump, "-->\n"); fprintf(dump, "<SceneStatistics file=\"%s\" DumpType=\"%s\">\n", gf_file_basename(file), (stat_level==1) ? "full scene" : ((stat_level==2) ? "AccessUnit based" : "SceneGraph after each AU")); sm = gf_sm_stats_new(); /*stat level 1: complete scene stat*/ if (stat_level == 1) { e = gf_sm_stats_for_scene(sm, ctx); if (!e) dump_stats(dump, gf_sm_stats_get(sm) ); goto exit; } /*re_order all BIFS-AUs*/ sample_list = gf_list_new(); /*configure all systems streams we're dumping*/ for (i=0; i<gf_list_count(ctx->streams); i++) { GF_StreamContext *sc = gf_list_get(ctx->streams, i); if (sc->streamType != GF_STREAM_SCENE) continue; for (j=0; j<gf_list_count(sc->AUs); j++) { GF_AUContext *au = gf_list_get(sc->AUs, j); ReorderAU(sample_list, au); } } count = gf_list_count(sample_list); for (i=0; i<count; i++) { GF_AUContext *au = gf_list_get(sample_list, i); for (j=0; j<gf_list_count(au->commands); j++) { GF_Command *com = gf_list_get(au->commands, j); /*stat level 2 - get command stats*/ if (stat_level==2) { e = gf_sm_stats_for_command(sm, com); if (e) goto exit; } /*stat level 3 - apply command*/ if (stat_level==3) gf_sg_command_apply(scene_graph, com, 0); } /*stat level 3: get graph stat*/ if (stat_level==3) { e = gf_sm_stats_for_graph(sm, scene_graph); if (e) goto exit; } if (stat_level==2) { fprintf(dump, "<AUStatistics StreamID=\"%d\" AUTime=\""LLD"\">\n", au->owner->ESID, au->timing); } else { fprintf(dump, "<GraphStatistics StreamID=\"%d\" AUTime=\""LLD"\">\n", au->owner->ESID, au->timing); } /*dump stats*/ dump_stats(dump, gf_sm_stats_get(sm) ); /*reset stats*/ gf_sm_stats_reset(sm); if (stat_level==2) { fprintf(dump, "</AUStatistics>\n"); } else { fprintf(dump, "</GraphStatistics>\n"); } gf_set_progress("Analysing AU", i+1, count); } exit: if (sample_list) gf_list_del(sample_list); if (sm) gf_sm_stats_del(sm); gf_sm_del(ctx); gf_sg_del(scene_graph); if (load.isom) gf_isom_delete(load.isom); if (e) { M4_LOG(GF_LOG_ERROR, ("Stats error: %s\n", gf_error_to_string(e))); } else { fprintf(dump, "</SceneStatistics>\n"); } if (dump && close) gf_fclose(dump); fprintf(stderr, "done\n"); } #endif /*GPAC_DISABLE_SCENE_STATS*/ #ifndef GPAC_DISABLE_VRML static void PrintFixed(Fixed val, Bool add_space) { if (add_space) fprintf(stderr, " "); if (val==FIX_MIN) fprintf(stderr, "-I"); else if (val==FIX_MAX) fprintf(stderr, "+I"); else fprintf(stderr, "%g", FIX2FLT(val)); } static void PrintNodeSFField(u32 type, void *far_ptr) { if (!far_ptr) return; switch (type) { case GF_SG_VRML_SFBOOL: fprintf(stderr, "%s", (*(SFBool *)far_ptr) ? "TRUE" : "FALSE"); break; case GF_SG_VRML_SFINT32: fprintf(stderr, "%d", (*(SFInt32 *)far_ptr)); break; case GF_SG_VRML_SFFLOAT: PrintFixed((*(SFFloat *)far_ptr), 0); break; case GF_SG_VRML_SFTIME: fprintf(stderr, "%g", (*(SFTime *)far_ptr)); break; case GF_SG_VRML_SFVEC2F: PrintFixed(((SFVec2f *)far_ptr)->x, 0); PrintFixed(((SFVec2f *)far_ptr)->y, 1); break; case GF_SG_VRML_SFVEC3F: PrintFixed(((SFVec3f *)far_ptr)->x, 0); PrintFixed(((SFVec3f *)far_ptr)->y, 1); PrintFixed(((SFVec3f *)far_ptr)->z, 1); break; case GF_SG_VRML_SFROTATION: PrintFixed(((SFRotation *)far_ptr)->x, 0); PrintFixed(((SFRotation *)far_ptr)->y, 1); PrintFixed(((SFRotation *)far_ptr)->z, 1); PrintFixed(((SFRotation *)far_ptr)->q, 1); break; case GF_SG_VRML_SFCOLOR: PrintFixed(((SFColor *)far_ptr)->red, 0); PrintFixed(((SFColor *)far_ptr)->green, 1); PrintFixed(((SFColor *)far_ptr)->blue, 1); break; case GF_SG_VRML_SFSTRING: if (((SFString*)far_ptr)->buffer) fprintf(stderr, "\"%s\"", ((SFString*)far_ptr)->buffer); else fprintf(stderr, "NULL"); break; } } #endif #ifndef GPAC_DISABLE_VRML static void do_print_node(GF_Node *node, GF_SceneGraph *sg, const char *name, u32 graph_type, Bool is_nodefield, Bool do_cov) { u32 nbF, i; GF_FieldInfo f; #ifndef GPAC_DISABLE_BIFS u8 qt, at; Fixed bmin, bmax; u32 nbBits; #endif /*GPAC_DISABLE_BIFS*/ nbF = gf_node_get_field_count(node); if (is_nodefield) { char szField[1024]; u32 tfirst, tlast; if (gf_node_get_field_by_name(node, szField, &f) != GF_OK) { M4_LOG(GF_LOG_ERROR, ("Field %s is not a member of node %s\n", szField, name)); return; } fprintf(stderr, "Allowed nodes in %s.%s:\n", name, szField); if (graph_type==1) { tfirst = GF_NODE_RANGE_FIRST_X3D; tlast = GF_NODE_RANGE_LAST_X3D; } else { tfirst = GF_NODE_RANGE_FIRST_MPEG4; tlast = GF_NODE_RANGE_LAST_MPEG4; } for (i=tfirst; i<tlast; i++) { GF_Node *tmp = gf_node_new(sg, i); gf_node_register(tmp, NULL); if (gf_node_in_table_by_tag(i, f.NDTtype)) { const char *nname = gf_node_get_class_name(tmp); if (nname && strcmp(nname, "Unknown Node")) { fprintf(stderr, "\t%s\n", nname); } } gf_node_unregister(tmp, NULL); } return; } if (do_cov) { u32 ndt; if (graph_type==0) { u32 all; gf_node_mpeg4_type_by_class_name(name); gf_bifs_get_child_table(node); all = gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_ALL); for (i=0; i<all; i++) { u32 res; gf_sg_script_get_field_index(node, i, GF_SG_FIELD_CODING_ALL, &res); } gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_DEF); gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_IN); gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_OUT); gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_DYN); } else if (graph_type==1) gf_node_x3d_type_by_class_name(name); for (ndt=NDT_SFWorldNode; ndt<NDT_LAST; ndt++) { gf_node_in_table_by_tag(gf_node_get_tag(node), ndt); } } fprintf(stderr, "%s {\n", name); for (i=0; i<nbF; i++) { gf_node_get_field(node, i, &f); if (graph_type==2) { fprintf(stderr, "\t%s=\"...\"\n", f.name); continue; } fprintf(stderr, "\t%s %s %s", gf_sg_vrml_get_event_type_name(f.eventType, 0), gf_sg_vrml_get_field_type_name(f.fieldType), f.name); if (f.fieldType==GF_SG_VRML_SFNODE) fprintf(stderr, " NULL"); else if (f.fieldType==GF_SG_VRML_MFNODE) fprintf(stderr, " []"); else if (gf_sg_vrml_is_sf_field(f.fieldType)) { fprintf(stderr, " "); PrintNodeSFField(f.fieldType, f.far_ptr); } else { void *ptr; u32 j, sftype; GenMFField *mffield = (GenMFField *) f.far_ptr; fprintf(stderr, " ["); sftype = gf_sg_vrml_get_sf_type(f.fieldType); for (j=0; j<mffield->count; j++) { if (j) fprintf(stderr, " "); gf_sg_vrml_mf_get_item(f.far_ptr, f.fieldType, &ptr, j); PrintNodeSFField(sftype, ptr); } fprintf(stderr, "]"); } #ifndef GPAC_DISABLE_BIFS if (gf_bifs_get_aq_info(node, i, &qt, &at, &bmin, &bmax, &nbBits)) { if (qt) { fprintf(stderr, " #QP=%d", qt); if (qt==13) fprintf(stderr, " NbBits=%d", nbBits); if (bmin && bmax) { fprintf(stderr, " Bounds=["); PrintFixed(bmin, 0); fprintf(stderr, ","); PrintFixed(bmax, 0); fprintf(stderr, "]"); } } } #endif /*GPAC_DISABLE_BIFS*/ fprintf(stderr, "\n"); if (do_cov) { gf_node_get_field_by_name(node, (char *) f.name, &f); } } fprintf(stderr, "}\n\n"); } #endif u32 PrintNode(const char *name, u32 graph_type) { #ifdef GPAC_DISABLE_VRML M4_LOG(GF_LOG_ERROR, ("VRML/MPEG-4/X3D scene graph is disabled in this build of GPAC\n")); return 2; #else const char *std_name; GF_Node *node; GF_SceneGraph *sg; u32 tag; #ifndef GPAC_DISABLE_BIFS #endif /*GPAC_DISABLE_BIFS*/ Bool is_nodefield = 0; char *sep = strchr(name, '.'); if (sep) { sep[0] = 0; is_nodefield = 1; } if (graph_type==1) { #ifndef GPAC_DISABLE_X3D tag = gf_node_x3d_type_by_class_name(name); std_name = "X3D"; #else M4_LOG(GF_LOG_ERROR, ("X3D node printing is not supported (X3D support disabled)\n")); return 2; #endif } else { tag = gf_node_mpeg4_type_by_class_name(name); std_name = "MPEG4"; } if (!tag) { M4_LOG(GF_LOG_ERROR, ("Unknown %s node %s\n", std_name, name)); return 2; } sg = gf_sg_new(); node = gf_node_new(sg, tag); gf_node_register(node, NULL); name = gf_node_get_class_name(node); if (!node) { M4_LOG(GF_LOG_ERROR, ("Node %s not supported in current built\n", name)); return 2; } do_print_node(node, sg, name, graph_type, is_nodefield, GF_FALSE); gf_node_unregister(node, NULL); gf_sg_del(sg); #endif /*GPAC_DISABLE_VRML*/ return 1; } u32 PrintBuiltInNodes(char *arg_val, u32 dump_type) { #if !defined(GPAC_DISABLE_VRML) && !defined(GPAC_DISABLE_X3D) && !defined(GPAC_DISABLE_SVG) GF_SceneGraph *sg; u32 i, nb_in, nb_not_in, start_tag, end_tag; u32 graph_type; Bool dump_nodes = ((dump_type==1) || (dump_type==3)) ? 1 : 0; if (dump_type==4) graph_type = 2; else if ((dump_type==2) || (dump_type==3)) graph_type = 1; else graph_type = 0; if (graph_type==1) { #if !defined(GPAC_DISABLE_VRML) && !defined(GPAC_DISABLE_X3D) start_tag = GF_NODE_RANGE_FIRST_X3D; end_tag = TAG_LastImplementedX3D; #else M4_LOG(GF_LOG_ERROR, ("X3D scene graph disabled in this build of GPAC\n")); return 2; #endif } else if (graph_type==2) { #ifdef GPAC_DISABLE_SVG M4_LOG(GF_LOG_ERROR, ("SVG scene graph disabled in this build of GPAC\n")); return 2; #else start_tag = GF_NODE_RANGE_FIRST_SVG; end_tag = GF_NODE_RANGE_LAST_SVG; #endif } else { #ifdef GPAC_DISABLE_VRML M4_LOG(GF_LOG_ERROR, ("VRML/MPEG-4 scene graph disabled in this build of GPAC\n")); return 2; #else start_tag = GF_NODE_RANGE_FIRST_MPEG4; end_tag = TAG_LastImplementedMPEG4; #endif } nb_in = nb_not_in = 0; sg = gf_sg_new(); if (graph_type==1) { fprintf(stderr, "Available X3D nodes in this build (dumping):\n"); } else if (graph_type==2) { fprintf(stderr, "Available SVG nodes in this build (dumping and LASeR coding):\n"); } else { fprintf(stderr, "Available MPEG-4 nodes in this build (encoding/decoding/dumping):\n"); } for (i=start_tag; i<end_tag; i++) { GF_Node *node = gf_node_new(sg, i); if (node) { gf_node_register(node, NULL); if (dump_nodes) { do_print_node(node, sg, gf_node_get_class_name(node), graph_type, GF_FALSE, GF_TRUE); } else { fprintf(stderr, " %s\n", gf_node_get_class_name(node)); } gf_node_unregister(node, NULL); nb_in++; } else { if (graph_type==2) break; nb_not_in++; } } gf_sg_del(sg); if (graph_type==2) { fprintf(stderr, "\n%d nodes supported\n", nb_in); } else { fprintf(stderr, "\n%d nodes supported - %d nodes not supported\n", nb_in, nb_not_in); } //coverage if (dump_nodes) { for (i=GF_SG_VRML_SFBOOL; i<GF_SG_VRML_SCRIPT_FUNCTION; i++) { void *fp = gf_sg_vrml_field_pointer_new(i); if (fp) { if (i==GF_SG_VRML_SFSCRIPT) gf_free(fp); else gf_sg_vrml_field_pointer_del(fp, i); } } } #else M4_LOG(GF_LOG_ERROR, ("No scene graph enabled in this MP4Box build\n")); #endif return 1; } u32 PrintBuiltInBoxes(char *argval, u32 do_cov) { u32 i, count=gf_isom_get_num_supported_boxes(); fprintf(stdout, "<Boxes>\n"); //index 0 is our internal unknown box handler for (i=1; i<count; i++) { gf_isom_dump_supported_box(i, stdout); if (do_cov) { u32 btype = gf_isom_get_supported_box_type(i); GF_Box *b=gf_isom_box_new(btype); if (b) { GF_Box *c=NULL; gf_isom_clone_box(b, &c); if (c) gf_isom_box_del(c); gf_isom_box_del(b); } } } fprintf(stdout, "</Boxes>\n"); return 1; } #if !defined(GPAC_DISABLE_ISOM_HINTING) && !defined(GPAC_DISABLE_ISOM_DUMP) void dump_isom_rtp(GF_ISOFile *file, char *inName, Bool is_final_name) { u32 i, j, size; FILE *dump; const char *sdp; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_rtp.xml"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s\n", szBuf)); return; } } else { dump = stdout; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- MP4Box RTP trace -->\n"); fprintf(dump, "<RTPFile>\n"); for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_type(file, i+1) != GF_ISOM_MEDIA_HINT) continue; fprintf(dump, "<RTPHintTrack trackID=\"%d\">\n", gf_isom_get_track_id(file, i+1)); gf_isom_sdp_track_get(file, i+1, &sdp, &size); fprintf(dump, "<SDPInfo>%s</SDPInfo>", sdp); #ifndef GPAC_DISABLE_ISOM_HINTING for (j=0; j<gf_isom_get_sample_count(file, i+1); j++) { gf_isom_dump_hint_sample(file, i+1, j+1, dump); } #endif fprintf(dump, "</RTPHintTrack>\n"); } fprintf(dump, "</RTPFile>\n"); if (inName) gf_fclose(dump); } #endif void dump_isom_timestamps(GF_ISOFile *file, char *inName, Bool is_final_name, u32 dump_mode) { u32 i, j, k, count; Bool has_ctts_error, is_fragmented=GF_FALSE; FILE *dump; Bool skip_offset = ((dump_mode==2) || (dump_mode==4)) ? GF_TRUE : GF_FALSE; Bool check_ts = ((dump_mode==3) || (dump_mode==4)) ? GF_TRUE : GF_FALSE; struct _ts_info { u64 dts; s64 cts; }; struct _ts_info *timings = NULL; u32 nb_timings=0, nb_timings_alloc = 0; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_ts.txt"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s\n", szBuf)); return; } } else { dump = stdout; } if (gf_isom_is_fragmented(file)) is_fragmented = GF_TRUE; has_ctts_error = GF_FALSE; for (i=0; i<gf_isom_get_track_count(file); i++) { s64 cts_dts_shift = gf_isom_get_cts_to_dts_shift(file, i+1); u32 has_cts_offset = gf_isom_has_time_offset(file, i+1); fprintf(dump, "#dumping track ID %d timing:\n", gf_isom_get_track_id(file, i + 1)); fprintf(dump, "Num\tDTS\tCTS\tSize\tRAP%s\tisLeading\tDependsOn\tDependedOn\tRedundant\tRAP-SampleGroup\tRoll-SampleGroup\tRoll-Distance", skip_offset ? "" : "\tOffset"); if (is_fragmented) { fprintf(dump, "\tfrag_start"); } fprintf(dump, "\n"); count = gf_isom_get_sample_count(file, i+1); if (has_cts_offset && check_ts) { if (nb_timings_alloc<count) { nb_timings_alloc = count; timings = gf_realloc(timings, sizeof (struct _ts_info) * count); } nb_timings = 0; } for (j=0; j<count; j++) { s64 cts; u64 dts, offset; u32 isLeading, dependsOn, dependedOn, redundant; Bool is_rap; GF_ISOSampleRollType roll_type; s32 roll_distance; u32 index; GF_ISOSample *samp = gf_isom_get_sample_info(file, i+1, j+1, &index, &offset); if (!samp) { fprintf(dump, " SAMPLE #%d IN TRACK #%d NOT THERE !!!\n", j+1, i+1); continue; } gf_isom_get_sample_flags(file, i+1, j+1, &isLeading, &dependsOn, &dependedOn, &redundant); gf_isom_get_sample_rap_roll_info(file, i+1, j+1, &is_rap, &roll_type, &roll_distance); dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; fprintf(dump, "Sample %d\tDTS "LLU"\tCTS "LLD"\t%d\t%d", j+1, dts, cts, samp->dataLength, samp->IsRAP); if (!skip_offset) fprintf(dump, "\t"LLU, offset); fprintf(dump, "\t%d\t%d\t%d\t%d\t%d\t%d\t%d", isLeading, dependsOn, dependedOn, redundant, is_rap, roll_type, roll_distance); if (cts< (s64) dts) { if (has_cts_offset==2) { if (cts_dts_shift && (cts+cts_dts_shift < (s64) dts)) { fprintf(dump, " #NEGATIVE CTS OFFSET!!!"); has_ctts_error = 1; } else if (!cts_dts_shift) { fprintf(dump, " #possible negative CTS offset (no cslg in file)"); } } else { fprintf(dump, " #NEGATIVE CTS OFFSET!!!"); has_ctts_error = 1; } } if (has_cts_offset && check_ts) { for (k=0; k<nb_timings; k++) { if (timings[k].dts==dts) { fprintf(dump, " #SAME DTS USED!!!"); has_ctts_error = 1; } if (timings[k].cts==cts) { fprintf(dump, " #SAME CTS USED!!! "); has_ctts_error = 1; } } timings[nb_timings].dts = dts; timings[nb_timings].cts = cts; nb_timings++; } gf_isom_sample_del(&samp); if (is_fragmented) { fprintf(dump, "\t%d", gf_isom_sample_is_fragment_start(file, i+1, j+1, NULL) ); } fprintf(dump, "\n"); gf_set_progress("Dumping track timing", j+1, count); } fprintf(dump, "\n\n"); gf_set_progress("Dumping track timing", count, count); } if (timings) gf_free(timings); if (inName) gf_fclose(dump); if (has_ctts_error) { M4_LOG(GF_LOG_ERROR, ("\tFile has CTTS table errors\n")); } } static u32 read_nal_size_hdr(u8 *ptr, u32 nalh_size) { u32 nal_size=0; u32 v = nalh_size; while (v) { nal_size |= (u8) *ptr; ptr++; v-=1; if (v) nal_size <<= 8; } return nal_size; } #ifndef GPAC_DISABLE_AV_PARSERS void gf_inspect_dump_nalu(FILE *dump, u8 *ptr, u32 ptr_size, Bool is_svc, HEVCState *hevc, AVCState *avc, VVCState *vvc, u32 nalh_size, Bool dump_crc, Bool is_encrypted); #endif static void dump_isom_nal_ex(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, u32 dump_flags) { u32 i, j, count, nb_descs, track, nalh_size, timescale, cur_extract_mode; s32 countRef; Bool is_adobe_protected = GF_FALSE; Bool is_cenc_protected = GF_FALSE; Bool is_hevc = GF_FALSE; Bool is_vvc = GF_FALSE; #ifndef GPAC_DISABLE_AV_PARSERS AVCState *avc_state = NULL; HEVCState *hevc_state = NULL; VVCState *vvc_state = NULL; #endif GF_AVCConfig *avccfg, *svccfg; GF_HEVCConfig *hevccfg, *lhvccfg; GF_VVCConfig *vvccfg; GF_NALUFFParam *slc; Bool has_svcc = GF_FALSE; track = gf_isom_get_track_by_id(file, trackID); count = gf_isom_get_sample_count(file, track); timescale = gf_isom_get_media_timescale(file, track); cur_extract_mode = gf_isom_get_nalu_extract_mode(file, track); nb_descs = gf_isom_get_sample_description_count(file, track); if (!nb_descs) { M4_LOG(GF_LOG_ERROR, ("Error: Track #%d has no sample description so is likely not NALU-based!\n", trackID)); return; } fprintf(dump, "<NALUTrack trackID=\"%d\" SampleCount=\"%d\" TimeScale=\"%d\">\n", trackID, count, timescale); #ifndef GPAC_DISABLE_AV_PARSERS #define DUMP_ARRAY(arr, name, loc, _is_svc)\ if (arr) {\ fprintf(dump, " <%sArray location=\"%s\">\n", name, loc);\ for (i=0; i<gf_list_count(arr); i++) {\ slc = gf_list_get(arr, i);\ fprintf(dump, " <NALU size=\"%d\" ", slc->size);\ gf_inspect_dump_nalu(dump, (u8 *) slc->data, slc->size, _is_svc, is_hevc ? hevc_state : NULL, avc_state, is_vvc ? vvc_state : NULL, nalh_size, (dump_flags&1) ? GF_TRUE : GF_FALSE, GF_FALSE);\ }\ fprintf(dump, " </%sArray>\n", name);\ }\ #else #define DUMP_ARRAY(arr, name, loc, _is_svc)\ if (arr) {\ fprintf(dump, " <%sArray location=\"%s\">\n", name, loc);\ for (i=0; i<gf_list_count(arr); i++) {\ slc = gf_list_get(arr, i);\ fprintf(dump, " <NALU size=\"%d\" ", slc->size);\ fprintf(dump, "/>\n");\ }\ fprintf(dump, " </%sArray>\n", name);\ }\ #endif nalh_size = 0; for (j=0; j<nb_descs; j++) { GF_AVCConfig *mvccfg; Bool is_svc; avccfg = gf_isom_avc_config_get(file, track, j+1); svccfg = gf_isom_svc_config_get(file, track, j+1); mvccfg = gf_isom_mvc_config_get(file, track, j+1); hevccfg = gf_isom_hevc_config_get(file, track, j+1); lhvccfg = gf_isom_lhvc_config_get(file, track, j+1); vvccfg = gf_isom_vvc_config_get(file, track, j+1); is_svc = (svccfg!=NULL) ? 1:0; if (hevccfg || lhvccfg) { is_hevc = 1; #ifndef GPAC_DISABLE_AV_PARSERS GF_SAFEALLOC(hevc_state, HEVCState) #endif } else if (vvccfg) { is_vvc = 1; #ifndef GPAC_DISABLE_AV_PARSERS GF_SAFEALLOC(vvc_state, VVCState) #endif } else if (avccfg || svccfg || mvccfg) { #ifndef GPAC_DISABLE_AV_PARSERS GF_SAFEALLOC(avc_state, AVCState) #endif } //for tile tracks the hvcC is stored in the 'tbas' track if (!hevccfg && gf_isom_get_reference_count(file, track, GF_ISOM_REF_TBAS)) { u32 tk = 0; gf_isom_get_reference(file, track, GF_ISOM_REF_TBAS, 1, &tk); hevccfg = gf_isom_hevc_config_get(file, tk, 1); } fprintf(dump, " <NALUConfig>\n"); if (!avccfg && !svccfg && !hevccfg && !lhvccfg && !vvccfg) { M4_LOG(GF_LOG_ERROR, ("Error: Track #%d is not NALU or OBU based!\n", trackID)); return; } if (avccfg) { nalh_size = avccfg->nal_unit_size; DUMP_ARRAY(avccfg->sequenceParameterSets, "AVCSPS", "avcC", is_svc); DUMP_ARRAY(avccfg->pictureParameterSets, "AVCPPS", "avcC", is_svc) DUMP_ARRAY(avccfg->sequenceParameterSetExtensions, "AVCSPSEx", "avcC", is_svc) } if (is_svc) { if (!nalh_size) nalh_size = svccfg->nal_unit_size; DUMP_ARRAY(svccfg->sequenceParameterSets, "SVCSPS", "svcC", is_svc) DUMP_ARRAY(svccfg->pictureParameterSets, "SVCPPS", "svcC", is_svc) } if (mvccfg) { if (!nalh_size) nalh_size = mvccfg->nal_unit_size; DUMP_ARRAY(mvccfg->sequenceParameterSets, "SVCSPS", "mvcC", is_svc) DUMP_ARRAY(mvccfg->pictureParameterSets, "SVCPPS", "mvcC", is_svc) } if (hevccfg) { u32 idx; nalh_size = hevccfg->nal_unit_size; for (idx=0; idx<gf_list_count(hevccfg->param_array); idx++) { GF_NALUFFParamArray *ar = gf_list_get(hevccfg->param_array, idx); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCSPS", "hvcC", 0) } else if (ar->type==GF_HEVC_NALU_PIC_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCPPS", "hvcC", 0) } else if (ar->type==GF_HEVC_NALU_VID_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCVPS", "hvcC", 0) } else { DUMP_ARRAY(ar->nalus, "HEVCUnknownPS", "hvcC", 0) } } } if (vvccfg) { u32 idx; nalh_size = vvccfg->nal_unit_size; for (idx=0; idx<gf_list_count(vvccfg->param_array); idx++) { GF_NALUFFParamArray *ar = gf_list_get(vvccfg->param_array, idx); if (ar->type==GF_VVC_NALU_SEQ_PARAM) { DUMP_ARRAY(ar->nalus, "VVCSPS", "vvcC", 0) } else if (ar->type==GF_VVC_NALU_PIC_PARAM) { DUMP_ARRAY(ar->nalus, "VVCPPS", "vvcC", 0) } else if (ar->type==GF_VVC_NALU_VID_PARAM) { DUMP_ARRAY(ar->nalus, "VVCVPS", "vvcC", 0) } else { DUMP_ARRAY(ar->nalus, "VVCUnknownPS", "vvcC", 0) } } } if (lhvccfg) { u32 idx; nalh_size = lhvccfg->nal_unit_size; for (idx=0; idx<gf_list_count(lhvccfg->param_array); idx++) { GF_NALUFFParamArray *ar = gf_list_get(lhvccfg->param_array, idx); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCSPS", "lhcC", 0) } else if (ar->type==GF_HEVC_NALU_PIC_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCPPS", "lhcC", 0) } else if (ar->type==GF_HEVC_NALU_VID_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCVPS", "lhcC", 0) } else { DUMP_ARRAY(ar->nalus, "HEVCUnknownPS", "lhcC", 0) } } } fprintf(dump, " </NALUConfig>\n"); if (avccfg) gf_odf_avc_cfg_del(avccfg); if (svccfg) { gf_odf_avc_cfg_del(svccfg); has_svcc = GF_TRUE; } if (hevccfg) gf_odf_hevc_cfg_del(hevccfg); if (vvccfg) gf_odf_vvc_cfg_del(vvccfg); if (lhvccfg) gf_odf_hevc_cfg_del(lhvccfg); } /*fixme: for dumping encrypted track: we don't have neither avccfg nor svccfg*/ if (!nalh_size) nalh_size = 4; /*for testing dependency*/ countRef = gf_isom_get_reference_count(file, track, GF_ISOM_REF_SCAL); if (countRef > 0) { GF_ISOTrackID refTrackID; fprintf(dump, " <SCALReferences>\n"); for (i = 1; i <= (u32) countRef; i++) { gf_isom_get_reference_ID(file, track, GF_ISOM_REF_SCAL, i, &refTrackID); fprintf(dump, " <SCALReference number=\"%d\" refTrackID=\"%d\"/>\n", i, refTrackID); } fprintf(dump, " </SCALReferences>\n"); } fprintf(dump, " <NALUSamples>\n"); gf_isom_set_nalu_extract_mode(file, track, GF_ISOM_NALU_EXTRACT_INSPECT); is_adobe_protected = gf_isom_is_adobe_protection_media(file, track, 1); is_cenc_protected = gf_isom_is_cenc_media(file, track, 1); for (i=0; i<count; i++) { u64 dts, cts; Bool is_rap; u32 size, nal_size, idx, di; u8 *ptr; GF_ISOSample *samp = gf_isom_get_sample(file, track, i+1, &di); if (!samp) { fprintf(dump, "<!-- Unable to fetch sample %d -->\n", i+1); continue; } dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; is_rap = samp->IsRAP; if (!is_rap) gf_isom_get_sample_rap_roll_info(file, track, i+1, &is_rap, NULL, NULL); if (dump_flags&2) { fprintf(dump, " <Sample size=\"%d\" RAP=\"%d\"", samp->dataLength, is_rap); } else { fprintf(dump, " <Sample DTS=\""LLD"\" CTS=\""LLD"\" size=\"%d\" RAP=\"%d\"", dts, cts, samp->dataLength, is_rap); } if (nb_descs>1) fprintf(dump, " sample_description=\"%d\"", di); fprintf(dump, " >\n"); if (cts<dts) fprintf(dump, "<!-- NEGATIVE CTS OFFSET! -->\n"); idx = 1; ptr = samp->data; size = samp->dataLength; if (is_adobe_protected) { u8 encrypted_au = ptr[0]; if (encrypted_au) { fprintf(dump, " <!-- Sample number %d is an Adobe's protected sample: can not be dumped -->\n", i+1); fprintf(dump, " </Sample>\n\n"); continue; } else { ptr++; size--; } } while (size) { nal_size = read_nal_size_hdr(ptr, nalh_size); ptr += nalh_size; if (nal_size >= UINT_MAX-nalh_size || nalh_size + nal_size > size) { fprintf(dump, " <!-- NALU number %d is corrupted: size is %d but only %d remains -->\n", idx, nal_size, size); break; } else { fprintf(dump, " <NALU size=\"%d\" ", nal_size); #ifndef GPAC_DISABLE_AV_PARSERS Bool is_encrypted = 0; if (is_cenc_protected) { GF_Err e = gf_isom_get_sample_cenc_info(file, track, i + 1, &is_encrypted, NULL, NULL, NULL, NULL); if (e != GF_OK) { fprintf(dump, "dump_msg=\"Error %s while fetching encryption info for sample, assuming sample is encrypted\" ", gf_error_to_string(e) ); is_encrypted = GF_TRUE; } } gf_inspect_dump_nalu(dump, ptr, nal_size, has_svcc ? 1 : 0, hevc_state, avc_state, vvc_state, nalh_size, dump_flags, is_encrypted); #else fprintf(dump, "/>\n"); #endif } idx++; ptr+=nal_size; size -= nal_size + nalh_size; } fprintf(dump, " </Sample>\n"); gf_isom_sample_del(&samp); fprintf(dump, "\n"); gf_set_progress("Analysing Track NALUs", i+1, count); } fprintf(dump, " </NALUSamples>\n"); fprintf(dump, "</NALUTrack>\n"); gf_isom_set_nalu_extract_mode(file, track, cur_extract_mode); #ifndef GPAC_DISABLE_AV_PARSERS if (hevc_state) gf_free(hevc_state); if (vvc_state) gf_free(vvc_state); if (avc_state) gf_free(avc_state); #endif } static void dump_isom_obu(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, Bool dump_crc); static void dump_qt_prores(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, Bool dump_crc); void dump_isom_nal(GF_ISOFile *file, GF_ISOTrackID trackID, char *inName, Bool is_final_name, u32 dump_flags) { Bool is_av1 = GF_FALSE; Bool is_prores = GF_FALSE; FILE *dump; if (inName) { GF_ESD* esd; char szBuf[GF_MAX_PATH]; strcpy(szBuf, inName); u32 track = gf_isom_get_track_by_id(file, trackID); esd = gf_isom_get_esd(file, track, 1); if (!esd || !esd->decoderConfig) { switch (gf_isom_get_media_subtype(file, track, 1)) { case GF_ISOM_SUBTYPE_AV01: is_av1 = GF_TRUE; break; case GF_QT_SUBTYPE_APCH: case GF_QT_SUBTYPE_APCO: case GF_QT_SUBTYPE_APCN: case GF_QT_SUBTYPE_APCS: case GF_QT_SUBTYPE_AP4X: case GF_QT_SUBTYPE_AP4H: is_prores = GF_TRUE; break; } } else if (esd->decoderConfig->objectTypeIndication == GF_CODECID_AV1) { is_av1 = GF_TRUE; } if (esd) gf_odf_desc_del((GF_Descriptor*)esd); if (!is_final_name) sprintf(szBuf, "%s_%d_%s.xml", inName, trackID, is_av1 ? "obu" : "nalu"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } if (is_av1) dump_isom_obu(file, trackID, dump, dump_flags); else if (is_prores) dump_qt_prores(file, trackID, dump, dump_flags); else dump_isom_nal_ex(file, trackID, dump, dump_flags); if (inName) gf_fclose(dump); } #ifndef GPAC_DISABLE_AV_PARSERS void gf_inspect_dump_obu(FILE *dump, AV1State *av1, u8 *obu, u64 obu_length, ObuType obu_type, u64 obu_size, u32 hdr_size, Bool dump_crc); #endif static void dump_isom_obu(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, Bool dump_crc) { #ifndef GPAC_DISABLE_AV_PARSERS u32 i, count, track, timescale; AV1State av1; ObuType obu_type; u64 obu_size; u32 hdr_size; GF_BitStream *bs; u32 idx; track = gf_isom_get_track_by_id(file, trackID); gf_av1_init_state(&av1); av1.config = gf_isom_av1_config_get(file, track, 1); if (!av1.config) { M4_LOG(GF_LOG_ERROR, ("Error: Track #%d is not AV1!\n", trackID)); return; } count = gf_isom_get_sample_count(file, track); timescale = gf_isom_get_media_timescale(file, track); fprintf(dump, "<OBUTrack trackID=\"%d\" SampleCount=\"%d\" TimeScale=\"%d\">\n", trackID, count, timescale); fprintf(dump, " <OBUConfig>\n"); for (i=0; i<gf_list_count(av1.config->obu_array); i++) { GF_AV1_OBUArrayEntry *obu = gf_list_get(av1.config->obu_array, i); bs = gf_bs_new(obu->obu, (u32) obu->obu_length, GF_BITSTREAM_READ); gf_av1_parse_obu(bs, &obu_type, &obu_size, &hdr_size, &av1); gf_inspect_dump_obu(dump, &av1, obu->obu, obu->obu_length, obu_type, obu_size, hdr_size, dump_crc); gf_bs_del(bs); } fprintf(dump, " </OBUConfig>\n"); fprintf(dump, " <OBUSamples>\n"); for (i=0; i<count; i++) { u64 dts, cts; u32 size; u8 *ptr; GF_ISOSample *samp = gf_isom_get_sample(file, track, i+1, NULL); if (!samp) { fprintf(dump, "<!-- Unable to fetch sample %d -->\n", i+1); continue; } dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; fprintf(dump, " <Sample number=\"%d\" DTS=\""LLD"\" CTS=\""LLD"\" size=\"%d\" RAP=\"%d\" >\n", i+1, dts, cts, samp->dataLength, samp->IsRAP); if (cts<dts) fprintf(dump, "<!-- NEGATIVE CTS OFFSET! -->\n"); idx = 1; ptr = samp->data; size = samp->dataLength; bs = gf_bs_new(ptr, size, GF_BITSTREAM_READ); while (size) { gf_av1_parse_obu(bs, &obu_type, &obu_size, &hdr_size, &av1); if (obu_size > size) { fprintf(dump, " <!-- OBU number %d is corrupted: size is %d but only %d remains -->\n", idx, (u32) obu_size, size); break; } gf_inspect_dump_obu(dump, &av1, ptr, obu_size, obu_type, obu_size, hdr_size, dump_crc); ptr += obu_size; size -= (u32)obu_size; idx++; } gf_bs_del(bs); fprintf(dump, " </Sample>\n"); gf_isom_sample_del(&samp); fprintf(dump, "\n"); gf_set_progress("Analysing Track OBUs", i+1, count); } fprintf(dump, " </OBUSamples>\n"); fprintf(dump, "</OBUTrack>\n"); if (av1.config) gf_odf_av1_cfg_del(av1.config); gf_av1_reset_state(&av1, GF_TRUE); #endif } static void dump_qt_prores(GF_ISOFile *file, u32 trackID, FILE *dump, Bool dump_crc) { #ifndef GPAC_DISABLE_AV_PARSERS u32 i, count, track, timescale; track = gf_isom_get_track_by_id(file, trackID); count = gf_isom_get_sample_count(file, track); timescale = gf_isom_get_media_timescale(file, track); fprintf(dump, "<ProResTrack trackID=\"%d\" SampleCount=\"%d\" TimeScale=\"%d\">\n", trackID, count, timescale); for (i=0; i<count; i++) { void gf_inspect_dump_prores(FILE *dump, u8 *ptr, u64 frame_size, Bool dump_crc); u64 dts, cts; GF_ISOSample *samp = gf_isom_get_sample(file, track, i+1, NULL); if (!samp) { fprintf(dump, "<!-- Unable to fetch sample %d -->\n", i+1); continue; } dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; if (cts!=dts) fprintf(dump, "<!-- Wrong timing info (CTS "LLD" vs DTS "LLD") ! -->\n", cts, dts); if (!samp->IsRAP) fprintf(dump, "<!-- Wrong sync sample info, sample is not SAP1 ! -->\n"); fprintf(dump, " <Sample number=\"%d\" CTS=\""LLD"\" size=\"%d\">\n", i+1, cts, samp->dataLength); gf_inspect_dump_prores(dump, samp->data, samp->dataLength, dump_crc); fprintf(dump, " </Sample>\n"); gf_isom_sample_del(&samp); fprintf(dump, "\n"); gf_set_progress("Analysing ProRes Track", i+1, count); } fprintf(dump, "</ProResTrack>\n"); #endif } void dump_isom_saps(GF_ISOFile *file, GF_ISOTrackID trackID, u32 dump_saps_mode, char *inName, Bool is_final_name) { FILE *dump; u32 i, count; s64 media_offset=0; u32 track = gf_isom_get_track_by_id(file, trackID); if (inName) { char szBuf[GF_MAX_PATH]; strcpy(szBuf, inName); if (!is_final_name) sprintf(szBuf, "%s_%d_cues.xml", inName, trackID); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); fprintf(dump, "<DASHCues xmlns=\"urn:gpac:dash:schema:cues:2018\">\n"); fprintf(dump, "<Stream id=\"%d\" timescale=\"%d\"", trackID, gf_isom_get_media_timescale(file, track) ); if (dump_saps_mode==4) { fprintf(dump, " mode=\"edit\""); gf_isom_get_edit_list_type(file, track, &media_offset); } fprintf(dump, ">\n"); count = gf_isom_get_sample_count(file, track); for (i=0; i<count; i++) { s64 cts, dts; u32 di; Bool traf_start = 0; u32 sap_type = 0; u64 doffset; GF_ISOSample *samp = gf_isom_get_sample_info(file, track, i+1, &di, &doffset); traf_start = gf_isom_sample_is_fragment_start(file, track, i+1, NULL); sap_type = samp->IsRAP; if (!sap_type) { Bool is_rap; GF_ISOSampleRollType roll_type; s32 roll_dist; gf_isom_get_sample_rap_roll_info(file, track, i+1, &is_rap, &roll_type, &roll_dist); if (roll_type) sap_type = SAP_TYPE_4; else if (is_rap) sap_type = SAP_TYPE_3; } if (!sap_type) { gf_isom_sample_del(&samp); continue; } dts = cts = samp->DTS; cts += samp->CTS_Offset; fprintf(dump, "<Cue sap=\"%d\"", sap_type); if (dump_saps_mode==4) { cts += media_offset; fprintf(dump, " cts=\""LLD"\"", cts); } else { if (!dump_saps_mode || (dump_saps_mode==1)) fprintf(dump, " sample=\"%d\"", i+1); if (!dump_saps_mode || (dump_saps_mode==2)) fprintf(dump, " cts=\""LLD"\"", cts); if (!dump_saps_mode || (dump_saps_mode==3)) fprintf(dump, " dts=\""LLD"\"", dts); } if (traf_start) fprintf(dump, " wasFragStart=\"yes\""); fprintf(dump, "/>\n"); gf_isom_sample_del(&samp); } fprintf(dump, "</Stream>\n"); fprintf(dump, "</DASHCues>\n"); if (inName) gf_fclose(dump); } #ifndef GPAC_DISABLE_ISOM_DUMP void dump_isom_ismacryp(GF_ISOFile *file, char *inName, Bool is_final_name) { u32 i, j; FILE *dump; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_ismacryp.xml"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- MP4Box ISMACryp trace -->\n"); fprintf(dump, "<ISMACrypFile>\n"); for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_subtype(file, i+1, 1) != GF_ISOM_SUBTYPE_MPEG4_CRYP) continue; gf_isom_dump_ismacryp_protection(file, i+1, dump); fprintf(dump, "<ISMACrypTrack trackID=\"%d\">\n", gf_isom_get_track_id(file, i+1)); for (j=0; j<gf_isom_get_sample_count(file, i+1); j++) { gf_isom_dump_ismacryp_sample(file, i+1, j+1, dump); } fprintf(dump, "</ISMACrypTrack >\n"); } fprintf(dump, "</ISMACrypFile>\n"); if (inName) gf_fclose(dump); } void dump_isom_timed_text(GF_ISOFile *file, GF_ISOTrackID trackID, char *inName, Bool is_final_name, Bool is_convert, GF_TextDumpType dump_type) { FILE *dump; GF_Err e; u32 track; track = gf_isom_get_track_by_id(file, trackID); if (!track) { M4_LOG(GF_LOG_ERROR, ("Cannot find track ID %d\n", trackID)); return; } switch (gf_isom_get_media_type(file, track)) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: M4_LOG(GF_LOG_ERROR, ("Track ID %d is not a 3GPP text track\n", trackID)); return; } if (inName) { char szBuf[1024]; char *ext; ext = ((dump_type==GF_TEXTDUMPTYPE_SVG) ? "svg" : ((dump_type==GF_TEXTDUMPTYPE_SRT) ? "srt" : "ttxt")); if (is_final_name) { strcpy(szBuf, inName) ; } else if (is_convert) sprintf(szBuf, "%s.%s", inName, ext) ; else sprintf(szBuf, "%s_%d_text.%s", inName, trackID, ext); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } e = gf_isom_text_dump(file, track, dump, dump_type); if (inName) gf_fclose(dump); if (e) { M4_LOG(GF_LOG_ERROR, ("Conversion failed (%s)\n", gf_error_to_string(e))); } else { fprintf(stderr, "Conversion done\n"); } } #endif /*GPAC_DISABLE_ISOM_DUMP*/ #ifndef GPAC_DISABLE_ISOM_HINTING void dump_isom_sdp(GF_ISOFile *file, char *inName, Bool is_final_name) { const char *sdp; u32 size, i; FILE *dump; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) { char *ext = strchr(szBuf, '.'); if (ext) ext[0] = 0; strcat(szBuf, "_sdp.txt"); } dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; fprintf(dump, "# File SDP content \n\n"); } //get the movie SDP gf_isom_sdp_get(file, &sdp, &size); if (sdp && size) fprintf(dump, "%s", sdp); fprintf(dump, "\r\n"); //then tracks for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_type(file, i+1) != GF_ISOM_MEDIA_HINT) continue; gf_isom_sdp_track_get(file, i+1, &sdp, &size); fprintf(dump, "%s", sdp); } fprintf(dump, "\n\n"); if (inName) gf_fclose(dump); } #endif #ifndef GPAC_DISABLE_ISOM_DUMP GF_Err dump_isom_xml(GF_ISOFile *file, char *inName, Bool is_final_name, Bool do_track_dump, Bool merge_vtt_cues, Bool skip_init, Bool skip_samples) { GF_Err e; FILE *dump = stdout; Bool do_close=GF_FALSE; if (!file) return GF_ISOM_INVALID_FILE; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) { strcat(szBuf, do_track_dump ? "_dump.xml" : "_info.xml"); } dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s\n", szBuf)); return GF_IO_ERR; } do_close=GF_TRUE; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); if (do_track_dump) { fprintf(dump, "<ISOBaseMediaFileTrace>\n"); } e = gf_isom_dump(file, dump, skip_init, skip_samples); if (e) { M4_LOG(GF_LOG_ERROR, ("Error dumping ISO structure\n")); } if (do_track_dump) { #ifndef GPAC_DISABLE_MEDIA_EXPORT u32 i; //because of dump mode we need to reopen in regular read mode to avoid mem leaks GF_ISOFile *the_file = gf_isom_open(gf_isom_get_filename(file), GF_ISOM_OPEN_READ, NULL); u32 tcount = gf_isom_get_track_count(the_file); fprintf(dump, "<Tracks>\n"); for (i=0; i<tcount; i++) { GF_MediaExporter dumper; GF_ISOTrackID trackID = gf_isom_get_track_id(the_file, i+1); u32 mtype = gf_isom_get_media_type(the_file, i+1); u32 msubtype = gf_isom_get_media_subtype(the_file, i+1, 1); Bool fmt_handled = GF_FALSE; memset(&dumper, 0, sizeof(GF_MediaExporter)); dumper.file = the_file; dumper.trackID = trackID; dumper.dump_file = dump; if (mtype == GF_ISOM_MEDIA_HINT) { #ifndef GPAC_DISABLE_ISOM_HINTING char *name=NULL; if (msubtype==GF_ISOM_SUBTYPE_RTP) name = "RTPHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_SRTP) name = "SRTPHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_RRTP) name = "RTPReceptionHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_RTCP) name = "RTCPReceptionHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_FLUTE) name = "FLUTEReceptionHintTrack"; else name = "UnknownHintTrack"; fprintf(dump, "<%s trackID=\"%d\">\n", name, trackID); #ifndef GPAC_DISABLE_ISOM_HINTING u32 j, scount=gf_isom_get_sample_count(the_file, i+1); for (j=0; j<scount; j++) { gf_isom_dump_hint_sample(the_file, i+1, j+1, dump); } #endif fprintf(dump, "</%s>\n", name); fmt_handled = GF_TRUE; #endif /*GPAC_DISABLE_ISOM_HINTING*/ } else if (gf_isom_get_avc_svc_type(the_file, i+1, 1) || gf_isom_get_hevc_lhvc_type(the_file, i+1, 1)) { dump_isom_nal_ex(the_file, trackID, dump, GF_FALSE); fmt_handled = GF_TRUE; } else if ((mtype==GF_ISOM_MEDIA_TEXT) || (mtype==GF_ISOM_MEDIA_SUBT) ) { if (msubtype==GF_ISOM_SUBTYPE_WVTT) { gf_webvtt_dump_iso_track(&dumper, i+1, merge_vtt_cues, GF_TRUE); fmt_handled = GF_TRUE; } else if ((msubtype==GF_ISOM_SUBTYPE_TX3G) || (msubtype==GF_ISOM_SUBTYPE_TEXT)) { gf_isom_text_dump(the_file, i+1, dump, GF_TEXTDUMPTYPE_TTXT_BOXES); fmt_handled = GF_TRUE; } } if (!fmt_handled) { dumper.flags = GF_EXPORT_NHML | GF_EXPORT_NHML_FULL; dumper.print_stats_graph = fs_dump_flags; gf_media_export(&dumper); } } #else return GF_NOT_SUPPORTED; #endif /*GPAC_DISABLE_MEDIA_EXPORT*/ gf_isom_delete(the_file); fprintf(dump, "</Tracks>\n"); fprintf(dump, "</ISOBaseMediaFileTrace>\n"); } if (do_close) gf_fclose(dump); return e; } #endif static char *format_duration(u64 dur, u32 timescale, char *szDur) { u32 h, m, s, ms; if ((dur==(u64) -1) || (dur==(u32) -1)) { strcpy(szDur, "Unknown"); return szDur; } dur = (u64) (( ((Double) (s64) dur)/timescale)*1000); h = (u32) (dur / 3600000); m = (u32) (dur/ 60000) - h*60; s = (u32) (dur/1000) - h*3600 - m*60; ms = (u32) (dur) - h*3600000 - m*60000 - s*1000; if (h<=24) { sprintf(szDur, "%02d:%02d:%02d.%03d", h, m, s, ms); } else { u32 d = (u32) (dur / 3600000 / 24); h = (u32) (dur/3600000)-24*d; if (d<=365) { sprintf(szDur, "%d Days, %02d:%02d:%02d.%03d", d, h, m, s, ms); } else { u32 y=0; while (d>365) { y++; d-=365; if (y%4) d--; } sprintf(szDur, "%d Years %d Days, %02d:%02d:%02d.%03d", y, d, h, m, s, ms); } } return szDur; } static char *format_date(u64 time, char *szTime) { time_t now; if (!time) { strcpy(szTime, "UNKNOWN DATE"); } else { time -= 2082844800; now = (u32) time; sprintf(szTime, "GMT %s", asctime(gf_gmtime(&now)) ); } return szTime; } void print_udta(GF_ISOFile *file, u32 track_number, Bool has_itags) { u32 i, count; count = gf_isom_get_udta_count(file, track_number); if (!count) return; if (has_itags) { for (i=0; i<count; i++) { u32 type; bin128 uuid; gf_isom_get_udta_type(file, track_number, i+1, &type, &uuid); if (type == GF_ISOM_BOX_TYPE_META) { count--; break; } } if (!count) return; } fprintf(stderr, "%d UDTA types: ", count); for (i=0; i<count; i++) { u32 j, type, nb_items, first=GF_TRUE; bin128 uuid; gf_isom_get_udta_type(file, track_number, i+1, &type, &uuid); nb_items = gf_isom_get_user_data_count(file, track_number, type, uuid); fprintf(stderr, "%s (%d) ", gf_4cc_to_str(type), nb_items); for (j=0; j<nb_items; j++) { u8 *udta=NULL; u32 udta_size; gf_isom_get_user_data(file, track_number, type, uuid, j+1, &udta, &udta_size); if (!udta) continue; if (gf_utf8_is_legal(udta, udta_size)) { if (first) { fprintf(stderr, "\n"); first = GF_FALSE; } fprintf(stderr, "\t%s\n", (char *) udta); } gf_free(udta); } } fprintf(stderr, "\n"); } GF_Err dump_isom_udta(GF_ISOFile *file, char *inName, Bool is_final_name, u32 dump_udta_type, u32 dump_udta_track) { u8 *data; FILE *t; bin128 uuid; u32 count, res; GF_Err e; memset(uuid, 0, 16); count = gf_isom_get_user_data_count(file, dump_udta_track, dump_udta_type, uuid); if (!count) { M4_LOG(GF_LOG_ERROR, ("No UDTA for type %s found\n", gf_4cc_to_str(dump_udta_type) )); return GF_NOT_FOUND; } data = NULL; count = 0; e = gf_isom_get_user_data(file, dump_udta_track, dump_udta_type, uuid, 0, &data, &count); if (e) { M4_LOG(GF_LOG_ERROR, ("Error dumping UDTA %s: %s\n", gf_4cc_to_str(dump_udta_type), gf_error_to_string(e) )); return e; } if (inName) { char szName[1024]; if (is_final_name) strcpy(szName, inName); else sprintf(szName, "%s_%s.udta", inName, gf_4cc_to_str(dump_udta_type) ); t = gf_fopen(szName, "wb"); if (!t) { gf_free(data); M4_LOG(GF_LOG_ERROR, ("Cannot open file %s\n", szName )); return GF_IO_ERR; } } else { t = stdout; } res = (u32) gf_fwrite(data+8, count-8, t); if (inName) gf_fclose(t); gf_free(data); if (count-8 != res) { M4_LOG(GF_LOG_ERROR, ("Error writing udta to file\n")); return GF_IO_ERR; } return GF_OK; } GF_Err dump_isom_chapters(GF_ISOFile *file, char *inName, Bool is_final_name, u32 dump_mode) { FILE *t; u32 i, count; u32 chap_tk = 0; count = gf_isom_get_chapter_count(file, 0); if (dump_mode==2) dump_mode = GF_TEXTDUMPTYPE_OGG_CHAP; else if (dump_mode==3) dump_mode = GF_TEXTDUMPTYPE_ZOOM_CHAP; else dump_mode = GF_TEXTDUMPTYPE_TTXT_CHAP; if (!count) { for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_reference_count(file, i+1, GF_ISOM_REF_CHAP)) { GF_Err e = gf_isom_get_reference(file, i+1, GF_ISOM_REF_CHAP, 1, &chap_tk); if (!e) break; } } if (!chap_tk) { M4_LOG(GF_LOG_WARNING, ("No chapters or chapters track found in file\n")); return GF_OK; } fprintf(stderr, "Dumping chapter track %d\n", chap_tk); dump_isom_timed_text(file, gf_isom_get_track_id(file, chap_tk), inName, is_final_name, GF_FALSE, dump_mode); return GF_OK; } if (inName) { char szName[1024]; strcpy(szName, inName); if (!is_final_name) { if (dump_mode==GF_TEXTDUMPTYPE_OGG_CHAP) { strcat(szName, ".txt"); } else if (dump_mode==GF_TEXTDUMPTYPE_ZOOM_CHAP) { strcat(szName, ".txt"); } else { strcat(szName, ".ttxt"); } } t = gf_fopen(szName, "wt"); if (!t) return GF_IO_ERR; } else { t = stdout; } if (dump_mode==GF_TEXTDUMPTYPE_TTXT_CHAP) { fprintf(t, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(t, "<TextStream version=\"1.1\">\n"); fprintf(t, "<TextStreamHeader width=\"0\" height=\"0\" layer=\"0\" translation_x=\"0\" translation_y=\"0\">\n"); fprintf(t, "<TextSampleDescription horizontalJustification=\"left\" backColor=\"0 0 0\" scroll=\"None\"/>\n"); fprintf(t, "</TextStreamHeader>\n"); } for (i=0; i<count; i++) { char szDur[20]; u64 chapter_time; const char *name; gf_isom_get_chapter(file, 0, i+1, &chapter_time, &name); if (dump_mode==GF_TEXTDUMPTYPE_OGG_CHAP) { fprintf(t, "CHAPTER%02d=%s\n", i+1, format_duration(chapter_time, 1000, szDur)); fprintf(t, "CHAPTER%02dNAME=%s\n", i+1, name); } else if (dump_mode==GF_TEXTDUMPTYPE_ZOOM_CHAP) { chapter_time /= 1000; fprintf(t, "AddChapterBySecond("LLD",%s)\n", chapter_time, name); } else { fprintf(t, "<TextSample sampleTime=\"%s\" sampleDescriptionIndex=\"1\" xml:space=\"preserve\">%s</TextSample>\n" , format_duration(chapter_time, 1000, szDur), name); } } if (dump_mode==GF_TEXTDUMPTYPE_TTXT_CHAP) { fprintf(t, "</TextStream>\n"); } if (inName) gf_fclose(t); return GF_OK; } static void dump_key_info(const u8 *key_info, u32 key_info_size, Bool is_protected) { if (!key_info) return; u32 j, k, kpos=3; u32 nb_keys = 1; if (key_info[0]) { nb_keys = key_info[1]; nb_keys <<= 8; nb_keys |= key_info[2]; } for (k=0; k<nb_keys; k++) { u8 constant_iv_size=0; u8 iv_size=key_info[kpos+1]; fprintf(stderr, "\t\tKID"); if (nb_keys>1) fprintf(stderr, "%d", k+1); fprintf(stderr, " "); for (j=0; j<16; j++) fprintf(stderr, "%02X", key_info[kpos+1+j]); kpos+=17; if (!iv_size && is_protected) { constant_iv_size = key_info[1]; kpos += 1 + constant_iv_size; } fprintf(stderr, " - %sIV size %d \n", constant_iv_size ? "const " : "", constant_iv_size ? constant_iv_size : iv_size); } } static void DumpMetaItem(GF_ISOFile *file, Bool root_meta, u32 tk_num, char *name) { char szInd[2]; u32 i, count, primary_id; u32 meta_type = gf_isom_get_meta_type(file, root_meta, tk_num); if (name[0]=='\t') { szInd[0] = '\t'; szInd[1] = 0; } else { szInd[0] = 0; } count = gf_isom_get_meta_item_count(file, root_meta, tk_num); primary_id = gf_isom_get_meta_primary_item_id(file, root_meta, tk_num); fprintf(stderr, "%s type: \"%s\" - %d resource item(s)\n", name, meta_type ? gf_4cc_to_str(meta_type) : "undefined", (count+(primary_id>0))); switch (gf_isom_has_meta_xml(file, root_meta, tk_num)) { case 1: fprintf(stderr, "%sMeta has XML resource\n", szInd); break; case 2: fprintf(stderr, "%sMeta has BinaryXML resource\n", szInd); break; } if (primary_id) { fprintf(stderr, "%sPrimary Item - ID %d\n", szInd, primary_id); } for (i=0; i<count; i++) { const char *it_name, *mime, *enc, *url, *urn; Bool self_ref; u32 ID; u32 it_type, cenc_scheme, cenc_version; GF_Err e = gf_isom_get_meta_item_info(file, root_meta, tk_num, i+1, &ID, &it_type, &cenc_scheme, &cenc_version, &self_ref, &it_name, &mime, &enc, &url, &urn); if (e) { fprintf(stderr, "%sItem #%d fetch info error: %s\n", szInd, i+1, gf_error_to_string(e) ); continue; } fprintf(stderr, "%sItem #%d: ID %d type %s", szInd, i+1, ID, gf_4cc_to_str(it_type)); if (self_ref) fprintf(stderr, " Self-Reference"); else if (it_name && it_name[0]) fprintf(stderr, " Name \"%s\"", it_name); if (mime) fprintf(stderr, " MIME: \"%s\"", mime); if (enc) fprintf(stderr, " ContentEncoding: \"%s\"", enc); if (meta_type == GF_META_ITEM_TYPE_PICT) { GF_ImageItemProperties img_props; e = gf_isom_get_meta_image_props(file, root_meta, tk_num, ID, &img_props); if (e) { fprintf(stderr, " invalid image properties !"); } else { u32 j; Bool chan_diff = 0; if (img_props.width && img_props.height) { fprintf(stderr, " size %ux%u", img_props.width, img_props.height); } if (img_props.hSpacing && img_props.vSpacing) { fprintf(stderr, " SAR %u/%u", img_props.hSpacing, img_props.vSpacing); } if (img_props.num_channels) { fprintf(stderr, " %d channel%s (", img_props.num_channels, (img_props.num_channels>1) ? "s" : ""); for (j=1; j<img_props.num_channels; j++) { if (img_props.bits_per_channel[0] != img_props.bits_per_channel[j]) chan_diff = 1; } if (chan_diff) { for (j=0; j<img_props.num_channels; j++) { if (j) fprintf(stderr, ","); fprintf(stderr, "%d", img_props.bits_per_channel[j]); } } else { fprintf(stderr, "%d", img_props.bits_per_channel[0]); } fprintf(stderr, " bpc)"); } if (img_props.hOffset || img_props.vOffset) fprintf(stderr, " Offset %ux%u", img_props.hOffset, img_props.vOffset); if (img_props.alpha) fprintf(stderr, " Alpha"); if (img_props.hidden) fprintf(stderr, " Hidden"); if (img_props.angle) fprintf(stderr, " Rotate %d", img_props.angle); if (img_props.mirror) fprintf(stderr, " Mirror %d", img_props.mirror); if (img_props.clap_hden || img_props.clap_wden) fprintf(stderr, " Clap %d/%d,%d/%d,%d/%d,%d/%d", img_props.clap_wnum, img_props.clap_wden, img_props.clap_hnum, img_props.clap_hden, img_props.clap_honum, img_props.clap_hoden, img_props.clap_vonum, img_props.clap_voden); } } if (cenc_scheme) { Bool is_protected; u8 skip_byte_block, crypt_byte_block; const u8 *key_info; u32 key_info_size; fprintf(stderr, " - Protection scheme: %s v0x%08X", gf_4cc_to_str(cenc_scheme), cenc_version); gf_isom_extract_meta_item_get_cenc_info(file, root_meta, tk_num, ID, &is_protected, &skip_byte_block, &crypt_byte_block, &key_info, &key_info_size, NULL, NULL, NULL, NULL); if (skip_byte_block && crypt_byte_block) fprintf(stderr, " - Pattern %d:%d", skip_byte_block, crypt_byte_block); fprintf(stderr, "\n"); dump_key_info(key_info, key_info_size, is_protected); } fprintf(stderr, "\n"); if (url) fprintf(stderr, "%sURL: %s\n", szInd, url); if (urn) fprintf(stderr, "%sURN: %s\n", szInd, urn); } } static void print_config_hash(GF_List *xps_array, char *szName) { u32 i, j; u8 hash[20]; for (i=0; i<gf_list_count(xps_array); i++) { GF_NALUFFParam *slc = gf_list_get(xps_array, i); gf_sha1_csum((u8 *) slc->data, slc->size, hash); fprintf(stderr, "\t%s#%d hash: ", szName, i+1); for (j=0; j<20; j++) fprintf(stderr, "%02X", hash[j]); fprintf(stderr, "\n"); } } void dump_hevc_track_info(GF_ISOFile *file, u32 trackNum, GF_HEVCConfig *hevccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , HEVCState *hevc_state #endif /*GPAC_DISABLE_AV_PARSERS && defined(GPAC_DISABLE_HEVC)*/ ) { #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) u32 idx; #endif u32 k; Bool non_hevc_base_layer=GF_FALSE; fprintf(stderr, "\t%s Info:", hevccfg->is_lhvc ? "LHVC" : "HEVC"); if (!hevccfg->is_lhvc) fprintf(stderr, " Profile %s @ Level %g - Chroma Format %s\n", gf_hevc_get_profile_name(hevccfg->profile_idc), ((Double)hevccfg->level_idc) / 30.0, gf_avc_hevc_get_chroma_format_name(hevccfg->chromaFormat)); fprintf(stderr, "\n"); fprintf(stderr, "\tNAL Unit length bits: %d", 8*hevccfg->nal_unit_size); if (!hevccfg->is_lhvc) fprintf(stderr, " - general profile compatibility 0x%08X\n", hevccfg->general_profile_compatibility_flags); fprintf(stderr, "\n"); fprintf(stderr, "\tParameter Sets: "); for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(hevccfg->param_array, k); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { fprintf(stderr, "%d SPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_HEVC_NALU_PIC_PARAM) { fprintf(stderr, "%d PPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_HEVC_NALU_VID_PARAM) { fprintf(stderr, "%d VPS ", gf_list_count(ar->nalus)); #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_NALUFFParam *vps = gf_list_get(ar->nalus, idx); s32 ps_idx=gf_hevc_read_vps(vps->data, vps->size, hevc_state); if (hevccfg->is_lhvc && (ps_idx>=0)) { non_hevc_base_layer = ! hevc_state->vps[ps_idx].base_layer_internal_flag; } } #endif } } fprintf(stderr, "\n"); #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(hevccfg->param_array, k); u32 width, height; s32 par_n, par_d; if (ar->type !=GF_HEVC_NALU_SEQ_PARAM) continue; for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_Err e; GF_NALUFFParam *sps = gf_list_get(ar->nalus, idx); par_n = par_d = -1; e = gf_hevc_get_sps_info_with_state(hevc_state, sps->data, sps->size, NULL, &width, &height, &par_n, &par_d); if (e==GF_OK) { fprintf(stderr, "\tSPS resolution %dx%d", width, height); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, " - Pixel Aspect Ratio %d:%d - Indicated track size %d x %d", par_n, par_d, tw, th); } fprintf(stderr, "\n"); } else { M4_LOG(GF_LOG_ERROR, ("Failed to read SPS: %s\n\n", gf_error_to_string(e) )); } } } #endif if (!hevccfg->is_lhvc) fprintf(stderr, "\tBit Depth luma %d - Chroma %d - %d temporal layers\n", hevccfg->luma_bit_depth, hevccfg->chroma_bit_depth, hevccfg->numTemporalLayers); else fprintf(stderr, "\t%d temporal layers\n", hevccfg->numTemporalLayers); if (hevccfg->is_lhvc) { fprintf(stderr, "\t%sHEVC base layer - Complete representation %d\n", non_hevc_base_layer ? "Non-" : "", hevccfg->complete_representation); } for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(hevccfg->param_array, k); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) print_config_hash(ar->nalus, "SPS"); else if (ar->type==GF_HEVC_NALU_PIC_PARAM) print_config_hash(ar->nalus, "PPS"); else if (ar->type==GF_HEVC_NALU_VID_PARAM) print_config_hash(ar->nalus, "VPS"); } } void dump_vvc_track_info(GF_ISOFile *file, u32 trackNum, GF_VVCConfig *vvccfg #if !defined(GPAC_DISABLE_AV_PARSERS) , VVCState *vvc_state #endif /*GPAC_DISABLE_AV_PARSERS && defined(GPAC_DISABLE_HEVC)*/ ) { #if !defined(GPAC_DISABLE_AV_PARSERS) u32 idx; #endif u32 k; fprintf(stderr, "\tVVC Info:"); fprintf(stderr, " Profile %d @ Level %d - Chroma Format %s\n", vvccfg->general_profile_idc, vvccfg->general_level_idc, vvccfg->chromaformat_plus_one ? gf_avc_hevc_get_chroma_format_name(vvccfg->chromaformat_plus_one-1) : "n/a"); fprintf(stderr, "\n"); fprintf(stderr, "\tNAL Unit length bits: %d", 8*vvccfg->nal_unit_size); if (vvccfg->general_constraint_info && vvccfg->num_constraint_info && vvccfg->general_constraint_info[0]) { fprintf(stderr, " - general constraint info 0x"); for (idx=0; idx<vvccfg->num_constraint_info; idx++) { fprintf(stderr, "%02X", vvccfg->general_constraint_info[idx]); } } fprintf(stderr, "\n"); fprintf(stderr, "\tParameter Sets: "); for (k=0; k<gf_list_count(vvccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(vvccfg->param_array, k); if (ar->type==GF_VVC_NALU_SEQ_PARAM) { fprintf(stderr, "%d SPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_VVC_NALU_PIC_PARAM) { fprintf(stderr, "%d PPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_VVC_NALU_VID_PARAM) { fprintf(stderr, "%d VPS ", gf_list_count(ar->nalus)); #if !defined(GPAC_DISABLE_AV_PARSERS) && 0 //TODO for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_NALUFFParam *vps = gf_list_get(ar->nalus, idx); s32 ps_idx=gf_hevc_read_vps(vps->data, vps->size, hevc_state); if (hevccfg->is_lhvc && (ps_idx>=0)) { non_hevc_base_layer = ! hevc_state->vps[ps_idx].base_layer_internal_flag; } } #endif } } fprintf(stderr, "\n"); #if !defined(GPAC_DISABLE_AV_PARSERS) && 0 //TODO for (k=0; k<gf_list_count(vvccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(vvccfg->param_array, k); u32 width, height; s32 par_n, par_d; if (ar->type !=GF_VVC_NALU_SEQ_PARAM) continue; for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_Err e; GF_NALUFFParam *sps = gf_list_get(ar->nalus, idx); par_n = par_d = -1; e = gf_vvc_get_sps_info_with_state(vvc_state, sps->data, sps->size, NULL, &width, &height, &par_n, &par_d); if (e==GF_OK) { fprintf(stderr, "\tSPS resolution %dx%d", width, height); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, " - Pixel Aspect Ratio %d:%d - Indicated track size %d x %d", par_n, par_d, tw, th); } fprintf(stderr, "\n"); } else { M4_LOG(GF_LOG_ERROR, ("\nFailed to read SPS: %s\n\n", gf_error_to_string(e) )); } } } #endif fprintf(stderr, "\tBit Depth %d - %d temporal layers\n", vvccfg->bit_depth_plus_one-1, vvccfg->numTemporalLayers); for (k=0; k<gf_list_count(vvccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(vvccfg->param_array, k); if (ar->type==GF_VVC_NALU_SEQ_PARAM) print_config_hash(ar->nalus, "SPS"); else if (ar->type==GF_VVC_NALU_PIC_PARAM) print_config_hash(ar->nalus, "PPS"); else if (ar->type==GF_VVC_NALU_VID_PARAM) print_config_hash(ar->nalus, "VPS"); } } void gf_inspect_format_timecode(const u8 *data, u32 size, u32 tmcd_flags, u32 tc_num, u32 tc_den, u32 tmcd_fpt, char szFmt[100]); void DumpTrackInfo(GF_ISOFile *file, GF_ISOTrackID trackID, Bool full_dump, Bool is_track_num, Bool dump_m4sys) { char szCodec[RFC6381_CODEC_NAME_SIZE_MAX]; Double scale, max_rate, rate; Bool is_od_track = 0; u32 trackNum, i, j, ts, mtype, msub_type, timescale, sr, nb_ch, count, alt_group, nb_groups, nb_edits, cdur, csize, bps, pfmt, codecid; u64 time_slice, dur, size; s32 cts_shift; GF_ESD *esd; char szDur[50]; char *lang; if (!is_track_num) { trackNum = gf_isom_get_track_by_id(file, trackID); } else { trackNum = trackID; trackID = gf_isom_get_track_id(file, trackNum); } if (!trackNum) { M4_LOG(GF_LOG_ERROR, ("No track with ID %d found\n", trackID)); return; } timescale = gf_isom_get_media_timescale(file, trackNum); fprintf(stderr, "# Track %d Info - ID %d - TimeScale %d\n", trackNum, trackID, timescale); dur = gf_isom_get_media_original_duration(file, trackNum); size = gf_isom_get_media_duration(file, trackNum); fprintf(stderr, "Media Duration %s ", format_duration(dur, timescale, szDur)); if (dur != size) fprintf(stderr, " (recomputed %s)", format_duration(size, timescale, szDur)); fprintf(stderr, "\n"); if (gf_isom_check_data_reference(file, trackNum, 1) != GF_OK) { M4_LOG(GF_LOG_WARNING, ("Track uses external data reference not supported by GPAC!\n")); } nb_edits = gf_isom_get_edits_count(file, trackNum); if (nb_edits) fprintf(stderr, "Track has %d edits: track duration is %s\n", nb_edits, format_duration(gf_isom_get_track_duration(file, trackNum), gf_isom_get_timescale(file), szDur)); cts_shift = gf_isom_get_composition_offset_shift(file, trackNum); if (cts_shift) fprintf(stderr, "Track composition offset shift (negative CTS offset): %d\n", cts_shift); if (gf_isom_is_track_in_root_od(file, trackNum) ) fprintf(stderr, "Track is present in Root OD\n"); if (!gf_isom_is_track_enabled(file, trackNum)) fprintf(stderr, "Track is disabled\n"); gf_isom_get_media_language(file, trackNum, &lang); fprintf(stderr, "Media Info: Language \"%s (%s)\" - ", GetLanguage(lang), lang ); gf_free(lang); mtype = gf_isom_get_media_type(file, trackNum); fprintf(stderr, "Type \"%s:", gf_4cc_to_str(mtype)); msub_type = gf_isom_get_mpeg4_subtype(file, trackNum, 1); if (!msub_type) msub_type = gf_isom_get_media_subtype(file, trackNum, 1); fprintf(stderr, "%s\" - %d samples\n", gf_4cc_to_str(msub_type), gf_isom_get_sample_count(file, trackNum)); pfmt = gf_pixel_fmt_from_qt_type(msub_type); codecid = gf_codec_id_from_isobmf(msub_type); count = gf_isom_get_track_kind_count(file, trackNum); for (i = 0; i < count; i++) { char *kind_scheme, *kind_value; gf_isom_get_track_kind(file, trackNum, i, &kind_scheme, &kind_value); fprintf(stderr, "Kind: %s - %s\n", kind_scheme ? kind_scheme : "null", kind_value ? kind_value : "null"); if (kind_scheme) gf_free(kind_scheme); if (kind_value) gf_free(kind_value); } if (gf_isom_is_track_fragmented(file, trackID) ) { u32 defaultDuration, defaultSize, defaultDescriptionIndex, defaultRandomAccess; u8 defaultPadding; u16 defaultDegradationPriority; u32 frag_samples; u64 frag_duration; gf_isom_get_fragmented_samples_info(file, trackID, &frag_samples, &frag_duration); fprintf(stderr, "Fragmented track: %d samples - Media Duration %s\n", frag_samples, format_duration(frag_duration, timescale, szDur)); gf_isom_get_fragment_defaults(file, trackNum, &defaultDuration, &defaultSize, &defaultDescriptionIndex, &defaultRandomAccess, &defaultPadding, &defaultDegradationPriority); fprintf(stderr, "Fragment sample defaults: duration %d size %d stsd %d sync %d padding %d degradation_priority %d\n", defaultDuration, defaultSize, defaultDescriptionIndex, defaultRandomAccess, (u32) defaultPadding, (u32) defaultDegradationPriority ); } if (!gf_isom_is_self_contained(file, trackNum, 1)) { const char *url, *urn; gf_isom_get_data_reference(file, trackNum, 1, &url, &urn); fprintf(stderr, "Media Data Location: %s\n", url ? url : urn); } if (full_dump) { const char *handler_name; gf_isom_get_handler_name(file, trackNum, &handler_name); fprintf(stderr, "Handler name: %s\n", handler_name); } print_udta(file, trackNum, GF_FALSE); if (gf_isom_is_video_handler_type(mtype) ) { s32 tx, ty; u32 w, h; u16 bit_depth; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); gf_isom_get_visual_bit_depth(file, trackNum, 1, &bit_depth); fprintf(stderr, "Visual Sample Entry Info: width=%d height=%d (depth=%d bits)\n", w, h, (int)bit_depth); gf_isom_get_track_layout_info(file, trackNum, &w, &h, &tx, &ty, NULL); fprintf(stderr, "Visual Track layout: x=%d y=%d width=%d height=%d\n", tx, ty, w, h); } gf_isom_get_audio_info(file, trackNum, 1, &sr, &nb_ch, &bps); gf_isom_set_nalu_extract_mode(file, trackNum, GF_ISOM_NALU_EXTRACT_INSPECT); msub_type = gf_isom_get_media_subtype(file, trackNum, 1); if (msub_type==GF_ISOM_SUBTYPE_MPEG4_CRYP) gf_isom_get_original_format_type(file, trackNum, 1, &msub_type); if ((msub_type==GF_ISOM_SUBTYPE_MPEG4) || (msub_type==GF_ISOM_SUBTYPE_AVC_H264) || (msub_type==GF_ISOM_SUBTYPE_AVC2_H264) || (msub_type==GF_ISOM_SUBTYPE_AVC3_H264) || (msub_type==GF_ISOM_SUBTYPE_AVC4_H264) || (msub_type==GF_ISOM_SUBTYPE_SVC_H264) || (msub_type==GF_ISOM_SUBTYPE_MVC_H264) || (msub_type==GF_ISOM_SUBTYPE_LSR1) || (msub_type==GF_ISOM_SUBTYPE_HVC1) || (msub_type==GF_ISOM_SUBTYPE_HEV1) || (msub_type==GF_ISOM_SUBTYPE_HVC2) || (msub_type==GF_ISOM_SUBTYPE_HEV2) || (msub_type==GF_ISOM_SUBTYPE_LHV1) || (msub_type==GF_ISOM_SUBTYPE_LHE1) || (msub_type==GF_ISOM_SUBTYPE_HVT1) ) { esd = gf_isom_get_esd(file, trackNum, 1); if (!esd || !esd->decoderConfig) { M4_LOG(GF_LOG_WARNING, ("WARNING: Broken MPEG-4 Track\n")); if (esd) gf_odf_desc_del((GF_Descriptor *)esd); } else { const char *st = gf_stream_type_name(esd->decoderConfig->streamType); if (dump_m4sys) { if (st) { fprintf(stderr, "MPEG-4 Config%s%s Stream - ObjectTypeIndication 0x%02x\n", full_dump ? "\n\t" : ": ", st, esd->decoderConfig->objectTypeIndication); } else { fprintf(stderr, "MPEG-4 Config%sStream Type 0x%02x - ObjectTypeIndication 0x%02x\n", full_dump ? "\n\t" : ": ", esd->decoderConfig->streamType, esd->decoderConfig->objectTypeIndication); } } if (esd->decoderConfig->streamType==GF_STREAM_OD) is_od_track=1; if (esd->decoderConfig->streamType==GF_STREAM_VISUAL) { u32 w, h; u16 rvc_predef; w = h = 0; if (esd->decoderConfig->objectTypeIndication==GF_CODECID_MPEG4_PART2) { #ifndef GPAC_DISABLE_AV_PARSERS if (!esd->decoderConfig->decoderSpecificInfo) { #else gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "MPEG-4 Visual Size %d x %d\n", w, h); #endif M4_LOG(GF_LOG_WARNING, ("Non-compliant MPEG-4 Visual track: video_object_layer infos not found in sample description\n")); #ifndef GPAC_DISABLE_AV_PARSERS } else { GF_M4VDecSpecInfo dsi; gf_m4v_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &dsi); if (full_dump) fprintf(stderr, "\t"); w = dsi.width; h = dsi.height; fprintf(stderr, "MPEG-4 Visual Size %d x %d - %s\n", w, h, gf_m4v_get_profile_name(dsi.VideoPL)); if (dsi.par_den && dsi.par_num) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "Pixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", dsi.par_num, dsi.par_den, tw, th); } } #endif } else if (gf_isom_get_avc_svc_type(file, trackNum, 1) != GF_ISOM_AVCTYPE_NONE) { GF_AVCConfig *avccfg, *svccfg, *mvccfg; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "AVC/H264 Video - Visual Size %d x %d\n", w, h); avccfg = gf_isom_avc_config_get(file, trackNum, 1); svccfg = gf_isom_svc_config_get(file, trackNum, 1); mvccfg = gf_isom_mvc_config_get(file, trackNum, 1); if (!avccfg && !svccfg && !mvccfg) { M4_LOG(GF_LOG_ERROR, ("\tNon-compliant AVC track: SPS/PPS not found in sample description\n")); } else if (avccfg) { fprintf(stderr, "\tAVC Info: %d SPS - %d PPS", gf_list_count(avccfg->sequenceParameterSets) , gf_list_count(avccfg->pictureParameterSets) ); fprintf(stderr, " - Profile %s @ Level %g\n", gf_avc_get_profile_name(avccfg->AVCProfileIndication), ((Double)avccfg->AVCLevelIndication)/10.0 ); fprintf(stderr, "\tNAL Unit length bits: %d\n", 8*avccfg->nal_unit_size); #ifndef GPAC_DISABLE_AV_PARSERS for (i=0; i<gf_list_count(avccfg->sequenceParameterSets); i++) { s32 par_n, par_d; GF_NALUFFParam *slc = gf_list_get(avccfg->sequenceParameterSets, i); gf_avc_get_sps_info(slc->data, slc->size, NULL, NULL, NULL, &par_n, &par_d); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "\tPixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", par_n, par_d, tw, th); } if (!full_dump) break; } #endif if (avccfg->chroma_bit_depth) { fprintf(stderr, "\tChroma format %s - Luma bit depth %d - chroma bit depth %d\n", gf_avc_hevc_get_chroma_format_name(avccfg->chroma_format), avccfg->luma_bit_depth, avccfg->chroma_bit_depth); } print_config_hash(avccfg->sequenceParameterSets, "SPS"); print_config_hash(avccfg->pictureParameterSets, "PPS"); gf_odf_avc_cfg_del(avccfg); } if (svccfg) { fprintf(stderr, "\n\tSVC Info: %d SPS - %d PPS - Profile %s @ Level %g\n", gf_list_count(svccfg->sequenceParameterSets) , gf_list_count(svccfg->pictureParameterSets), gf_avc_get_profile_name(svccfg->AVCProfileIndication), ((Double)svccfg->AVCLevelIndication)/10.0 ); fprintf(stderr, "\tSVC NAL Unit length bits: %d\n", 8*svccfg->nal_unit_size); #ifndef GPAC_DISABLE_AV_PARSERS for (i=0; i<gf_list_count(svccfg->sequenceParameterSets); i++) { GF_NALUFFParam *slc = gf_list_get(svccfg->sequenceParameterSets, i); if (slc) { s32 par_n, par_d; u32 s_w, s_h, sps_id; gf_avc_get_sps_info(slc->data, slc->size, &sps_id, &s_w, &s_h, &par_n, &par_d); fprintf(stderr, "\t\tSPS ID %d - Visual Size %d x %d\n", sps_id, s_w, s_h); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "\tPixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", par_n, par_d, tw, th); } } } #endif print_config_hash(svccfg->sequenceParameterSets, "SPS"); print_config_hash(svccfg->pictureParameterSets, "PPS"); print_config_hash(svccfg->sequenceParameterSetExtensions, "SPSEx"); gf_odf_avc_cfg_del(svccfg); } if (mvccfg) { fprintf(stderr, "\n\tMVC Info: %d SPS - %d PPS - Profile %s @ Level %g\n", gf_list_count(mvccfg->sequenceParameterSets) , gf_list_count(mvccfg->pictureParameterSets), gf_avc_get_profile_name(mvccfg->AVCProfileIndication), ((Double)mvccfg->AVCLevelIndication)/10.0 ); fprintf(stderr, "\tMVC NAL Unit length bits: %d\n", 8*mvccfg->nal_unit_size); #ifndef GPAC_DISABLE_AV_PARSERS for (i=0; i<gf_list_count(mvccfg->sequenceParameterSets); i++) { GF_NALUFFParam *slc = gf_list_get(mvccfg->sequenceParameterSets, i); if (slc) { u32 s_w, s_h, sps_id; s32 par_n, par_d; gf_avc_get_sps_info(slc->data, slc->size, &sps_id, &s_w, &s_h, &par_n, &par_d); fprintf(stderr, "\t\tSPS ID %d - Visual Size %d x %d\n", sps_id, s_w, s_h); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "\tPixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", par_n, par_d, tw, th); } } } #endif print_config_hash(mvccfg->sequenceParameterSets, "SPS"); print_config_hash(mvccfg->pictureParameterSets, "PPS"); gf_odf_avc_cfg_del(mvccfg); } } else if ((esd->decoderConfig->objectTypeIndication==GF_CODECID_HEVC) || (esd->decoderConfig->objectTypeIndication==GF_CODECID_LHVC) ) { GF_HEVCConfig *hevccfg, *lhvccfg; GF_OperatingPointsInformation *oinf; #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) HEVCState hevc_state; memset(&hevc_state, 0, sizeof(HEVCState)); hevc_state.sps_active_idx = -1; #endif gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "HEVC Video - Visual Size %d x %d\n", w, h); hevccfg = gf_isom_hevc_config_get(file, trackNum, 1); lhvccfg = gf_isom_lhvc_config_get(file, trackNum, 1); if (msub_type==GF_ISOM_SUBTYPE_HVT1) { const u8 *data; u32 tsize; u32 is_default, tx,ty,tw,th, id, independent; Bool full_frame; if (gf_isom_get_tile_info(file, trackNum, 1, &is_default, &id, &independent, &full_frame, &tx, &ty, &tw, &th)) { fprintf(stderr, "\tHEVC Tile - ID %d independent %d (x,y,w,h)=%d,%d,%d,%d \n", id, independent, tx, ty, tw, th); } else if (gf_isom_get_sample_group_info(file, trackNum, 1, GF_ISOM_SAMPLE_GROUP_TRIF, &is_default, &data, &tsize)) { fprintf(stderr, "\tHEVC Tile track containing a tile set\n"); } else { fprintf(stderr, "\tHEVC Tile track without tiling info\n"); } } else if (!hevccfg && !lhvccfg) { M4_LOG(GF_LOG_ERROR, ("\tNon-compliant HEVC track: No hvcC or shcC found in sample description\n")); } if (gf_isom_get_reference_count(file, trackNum, GF_ISOM_REF_SABT)) { fprintf(stderr, "\tHEVC Tile base track\n"); } if (hevccfg) { dump_hevc_track_info(file, trackNum, hevccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , &hevc_state #endif ); gf_odf_hevc_cfg_del(hevccfg); fprintf(stderr, "\n"); } if (lhvccfg) { dump_hevc_track_info(file, trackNum, lhvccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , &hevc_state #endif ); gf_odf_hevc_cfg_del(lhvccfg); } if (gf_isom_get_oinf_info(file, trackNum, &oinf)) { fprintf(stderr, "\n\tOperating Points Information -"); fprintf(stderr, " scalability_mask %d (", oinf->scalability_mask); switch (oinf->scalability_mask) { case 2: fprintf(stderr, "Multiview"); break; case 4: fprintf(stderr, "Spatial scalability"); break; case 8: fprintf(stderr, "Auxilary"); break; default: fprintf(stderr, "unknown"); } //TODO: need to dump more info ? fprintf(stderr, ") num_profile_tier_level %d ", gf_list_count(oinf->profile_tier_levels) ); fprintf(stderr, " num_operating_points %d dependency layers %d \n", gf_list_count(oinf->operating_points), gf_list_count(oinf->dependency_layers) ); } } /*OGG media*/ else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_THEORA) { char *szName; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); if (!strnicmp((char *) &esd->decoderConfig->decoderSpecificInfo->data[3], "theora", 6)) szName = "Theora"; else szName = "Unknown"; fprintf(stderr, "Ogg/%s video / GPAC Mux - Visual Size %d x %d\n", szName, w, h); } else { //check if we know this codec from its OTI u32 codec_id = gf_codecid_from_oti(GF_STREAM_VISUAL, esd->decoderConfig->objectTypeIndication); if (codec_id) { gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "%s - Visual Size %d x %d\n", gf_codecid_name(codec_id), w, h); } } if (!w || !h) { gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "Visual Size %d x %d\n", w, h); } if (gf_isom_get_rvc_config(file, trackNum, 1, &rvc_predef, NULL, NULL, NULL)==GF_OK) { fprintf(stderr, "Has RVC signaled - Predefined configuration %d\n", rvc_predef); } } else if (esd->decoderConfig->streamType==GF_STREAM_AUDIO) { #ifndef GPAC_DISABLE_AV_PARSERS GF_M4ADecSpecInfo a_cfg; GF_Err e; u32 oti; #endif u32 codec_id; Bool is_mp2 = GF_FALSE; switch (esd->decoderConfig->objectTypeIndication) { case GF_CODECID_AAC_MPEG2_MP: case GF_CODECID_AAC_MPEG2_LCP: case GF_CODECID_AAC_MPEG2_SSRP: is_mp2 = GF_TRUE; case GF_CODECID_AAC_MPEG4: #ifndef GPAC_DISABLE_AV_PARSERS if (!esd->decoderConfig->decoderSpecificInfo) e = GF_NON_COMPLIANT_BITSTREAM; else e = gf_m4a_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &a_cfg); if (full_dump) fprintf(stderr, "\t"); if (e) { M4_LOG(GF_LOG_ERROR, ("Corrupted AAC Config\n")); } else { char *signaling = "implicit"; char *heaac = ""; if (!is_mp2 && a_cfg.has_sbr) { if (a_cfg.has_ps) heaac = "(HE-AAC v2) "; else heaac = "(HE-AAC v1) "; } if (a_cfg.base_object_type==2) { if (a_cfg.has_ps || a_cfg.has_sbr) signaling = "backward compatible"; } else { signaling = "hierarchical"; } fprintf(stderr, "%s (AOT=%d %s) %s- %d Channel(s) - SampleRate %d", gf_m4a_object_type_name(a_cfg.base_object_type), a_cfg.base_object_type, signaling, heaac, a_cfg.nb_chan, a_cfg.base_sr); if (is_mp2) fprintf(stderr, " (MPEG-2 Signaling)"); if (a_cfg.has_sbr) fprintf(stderr, " - SBR: SampleRate %d Type %s", a_cfg.sbr_sr, gf_m4a_object_type_name(a_cfg.sbr_object_type)); if (a_cfg.has_ps) fprintf(stderr, " - PS"); fprintf(stderr, "\n"); } #else fprintf(stderr, "MPEG-2/4 Audio - %d Channels - SampleRate %d\n", nb_ch, sr); #endif break; case GF_CODECID_MPEG2_PART3: case GF_CODECID_MPEG_AUDIO: if (msub_type == GF_ISOM_SUBTYPE_MPEG4_CRYP) { fprintf(stderr, "MPEG-1/2 Audio - %d Channels - SampleRate %d\n", nb_ch, sr); } else { #ifndef GPAC_DISABLE_AV_PARSERS GF_ISOSample *samp = gf_isom_get_sample(file, trackNum, 1, &oti); if (samp) { u32 mhdr = GF_4CC((u8)samp->data[0], (u8)samp->data[1], (u8)samp->data[2], (u8)samp->data[3]); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "%s Audio - %d Channel(s) - SampleRate %d - Layer %d\n", gf_mp3_version_name(mhdr), gf_mp3_num_channels(mhdr), gf_mp3_sampling_rate(mhdr), gf_mp3_layer(mhdr) ); gf_isom_sample_del(&samp); } else { M4_LOG(GF_LOG_ERROR, ("Error fetching sample: %s\n", gf_error_to_string(gf_isom_last_error(file)) )); } #else fprintf(stderr, "MPEG-1/2 Audio - %d Channels - SampleRate %d\n", nb_ch, sr); #endif } break; case GF_CODECID_EVRC: fprintf(stderr, "EVRC Audio - Sample Rate 8000 - 1 channel\n"); break; case GF_CODECID_SMV: fprintf(stderr, "SMV Audio - Sample Rate 8000 - 1 channel\n"); break; case GF_CODECID_QCELP: fprintf(stderr, "QCELP Audio - Sample Rate 8000 - 1 channel\n"); break; /*packetVideo hack for EVRC...*/ case GF_CODECID_EVRC_PV: if (esd->decoderConfig->decoderSpecificInfo && (esd->decoderConfig->decoderSpecificInfo->dataLength==8) && !strnicmp((char *)esd->decoderConfig->decoderSpecificInfo->data, "pvmm", 4)) { if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "EVRC Audio (PacketVideo Mux) - Sample Rate 8000 - 1 channel\n"); } break; default: codec_id = gf_codecid_from_oti(GF_STREAM_AUDIO, esd->decoderConfig->objectTypeIndication); if (codec_id) { fprintf(stderr, "%s - Sample Rate %d - %d channel(s)\n", gf_codecid_name(codec_id), sr, nb_ch); } break; } } else if (esd->decoderConfig->streamType==GF_STREAM_SCENE) { if (esd->decoderConfig->objectTypeIndication<=4) { GF_BIFSConfig *b_cfg = gf_odf_get_bifs_config(esd->decoderConfig->decoderSpecificInfo, esd->decoderConfig->objectTypeIndication); fprintf(stderr, "BIFS Scene description - %s stream\n", b_cfg->elementaryMasks ? "Animation" : "Command"); if (full_dump && !b_cfg->elementaryMasks) { fprintf(stderr, "\tWidth %d Height %d Pixel Metrics %s\n", b_cfg->pixelWidth, b_cfg->pixelHeight, b_cfg->pixelMetrics ? "yes" : "no"); } gf_odf_desc_del((GF_Descriptor *)b_cfg); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_AFX) { u8 tag = esd->decoderConfig->decoderSpecificInfo ? esd->decoderConfig->decoderSpecificInfo->data[0] : 0xFF; const char *afxtype = gf_stream_type_afx_name(tag); fprintf(stderr, "AFX Stream - type %s (%d)\n", afxtype, tag); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_FONT) { fprintf(stderr, "Font Data stream\n"); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_LASER) { GF_LASERConfig l_cfg; gf_odf_get_laser_config(esd->decoderConfig->decoderSpecificInfo, &l_cfg); fprintf(stderr, "LASER Stream - %s\n", l_cfg.newSceneIndicator ? "Full Scene" : "Scene Segment"); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_TEXT_MPEG4) { fprintf(stderr, "MPEG-4 Streaming Text stream\n"); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_SYNTHESIZED_TEXTURE) { fprintf(stderr, "Synthetized Texture stream stream\n"); } else { M4_LOG(GF_LOG_WARNING, ("Unknown Systems stream OTI %d\n", esd->decoderConfig->objectTypeIndication)); } } /*sync is only valid if we open all tracks to take care of default MP4 sync..*/ if (!full_dump) { if (dump_m4sys) { if (!esd->OCRESID || (esd->OCRESID == esd->ESID)) fprintf(stderr, "Self-synchronized\n"); else fprintf(stderr, "Synchronized on stream %d\n", esd->OCRESID); } } else { fprintf(stderr, "\tDecoding Buffer size %d - Bitrate: avg %d - max %d kbps\n", esd->decoderConfig->bufferSizeDB, esd->decoderConfig->avgBitrate/1000, esd->decoderConfig->maxBitrate/1000); if (esd->dependsOnESID) fprintf(stderr, "\tDepends on stream %d for decoding\n", esd->dependsOnESID); else fprintf(stderr, "\tNo stream dependencies for decoding\n"); fprintf(stderr, "\tStreamPriority %d\n", esd->streamPriority); if (esd->URLString) fprintf(stderr, "\tRemote Data Source %s\n", esd->URLString); } gf_odf_desc_del((GF_Descriptor *) esd); } } else if (msub_type == GF_ISOM_SUBTYPE_AV01) { GF_AV1Config *av1c; u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "\tAOM AV1 stream - Resolution %d x %d\n", w, h); av1c = gf_isom_av1_config_get(file, trackNum, 1); fprintf(stderr, "\tversion=%u, profile=%u, level_idx0=%u, tier=%u\n", (u32)av1c->version, (u32)av1c->seq_profile, (u32)av1c->seq_level_idx_0, (u32)av1c->seq_tier_0); fprintf(stderr, "\thigh_bitdepth=%u, twelve_bit=%u, monochrome=%u\n", (u32)av1c->high_bitdepth, (u32)av1c->twelve_bit, (u32)av1c->monochrome); fprintf(stderr, "\tchroma: subsampling_x=%u, subsampling_y=%u, sample_position=%u\n", (u32)av1c->chroma_subsampling_x, (u32)av1c->chroma_subsampling_y, (u32)av1c->chroma_sample_position); if (av1c->initial_presentation_delay_present) fprintf(stderr, "\tInitial presentation delay %u\n", (u32) av1c->initial_presentation_delay_minus_one+1); count = gf_list_count(av1c->obu_array); for (i=0; i<count; i++) { u8 hash[20]; GF_AV1_OBUArrayEntry *obu = gf_list_get(av1c->obu_array, i); gf_sha1_csum((u8*)obu->obu, (u32)obu->obu_length, hash); fprintf(stderr, "\tOBU#%d %s hash: ", i+1, gf_av1_get_obu_name(obu->obu_type) ); for (j=0; j<20; j++) fprintf(stderr, "%02X", hash[j]); fprintf(stderr, "\n"); } gf_odf_av1_cfg_del(av1c); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_H263) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "\t3GPP H263 stream - Resolution %d x %d\n", w, h); } else if (msub_type == GF_ISOM_SUBTYPE_MJP2) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "\tMotionJPEG2000 stream - Resolution %d x %d\n", w, h); } else if ((msub_type == GF_ISOM_SUBTYPE_3GP_AMR) || (msub_type == GF_ISOM_SUBTYPE_3GP_AMR_WB)) { fprintf(stderr, "\t3GPP AMR%s stream - Sample Rate %d - %d channel(s) %d bps\n", (msub_type == GF_ISOM_SUBTYPE_3GP_AMR_WB) ? " Wide Band" : "", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_EVRC) { fprintf(stderr, "\t3GPP EVRC stream - Sample Rate %d - %d channel(s) %d bps\n", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_QCELP) { fprintf(stderr, "\t3GPP QCELP stream - Sample Rate %d - %d channel(s) %d bps\n", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_MP3) { fprintf(stderr, "\tMPEG 1/2 Audio stream - Sample Rate %d - %d channel(s) %d bps\n", sr, nb_ch, (u32) bps); } else if ((msub_type == GF_ISOM_SUBTYPE_AC3) || (msub_type == GF_ISOM_SUBTYPE_EC3)) { u32 br = 0; const char *lfe = ""; Bool is_ec3 = (msub_type == GF_ISOM_SUBTYPE_EC3) ? GF_TRUE : GF_FALSE; #ifndef GPAC_DISABLE_AV_PARSERS GF_AC3Config *ac3 = gf_isom_ac3_config_get(file, trackNum, 1); if (ac3) { nb_ch = gf_ac3_get_channels(ac3->streams[0].acmod); for (i=0; i<ac3->streams[0].nb_dep_sub; ++i) { assert(ac3->streams[0].nb_dep_sub == 1); nb_ch += gf_ac3_get_channels(ac3->streams[0].chan_loc); } if (ac3->streams[0].lfon) lfe = ".1"; br = ac3->is_ec3 ? ac3->brcode : gf_ac3_get_bitrate(ac3->brcode); is_ec3 = ac3->is_ec3; gf_free(ac3); } #endif fprintf(stderr, "\t%s stream - Sample Rate %d - %d%s channel(s) - bitrate %d\n", is_ec3 ? "EC-3" : "AC-3", sr, nb_ch, lfe, br); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_SMV) { fprintf(stderr, "\t3GPP SMV stream - Sample Rate %d - %d channel(s) %d bits per samples\n", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_DIMS) { u32 w, h; GF_DIMSDescription dims; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); gf_isom_get_dims_description(file, trackNum, 1, &dims); fprintf(stderr, "\t3GPP DIMS stream - size %d x %d - Profile %d - Level %d\n", w, h, dims.profile, dims.level); fprintf(stderr, "\tpathComponents: %d - useFullRequestHost: %s\n", dims.pathComponents, dims.fullRequestHost ? "yes" : "no"); fprintf(stderr, "\tstream type: %s - redundant: %s\n", dims.streamType ? "primary" : "secondary", (dims.containsRedundant==1) ? "main" : ((dims.containsRedundant==2) ? "redundant" : "main+redundant") ); if (dims.textEncoding[0]) fprintf(stderr, "\ttext encoding %s\n", dims.textEncoding); if (dims.contentEncoding[0]) fprintf(stderr, "\tcontent encoding %s\n", dims.contentEncoding); if (dims.content_script_types) fprintf(stderr, "\tscript languages %s\n", dims.content_script_types); } else if (mtype==GF_ISOM_MEDIA_HINT) { u32 refTrack; s32 refCount = gf_isom_get_reference_count(file, trackNum, GF_ISOM_REF_HINT); if (refCount>0) { fprintf(stderr, "Streaming Hint Track for track%s ", (refCount>1) ? "s" :""); for (i=0; i<(u32) refCount; i++) { gf_isom_get_reference(file, trackNum, GF_ISOM_REF_HINT, i+1, &refTrack); if (i) fprintf(stderr, " - "); fprintf(stderr, "ID %d", gf_isom_get_track_id(file, refTrack)); } fprintf(stderr, "\n"); } else { fprintf(stderr, "Streaming Hint Track (no refs)\n"); } #ifndef GPAC_DISABLE_ISOM_HINTING refCount = gf_isom_get_payt_count(file, trackNum); if (refCount>0) { for (i=0; i<(u32) refCount; i++) { const char *name = gf_isom_get_payt_info(file, trackNum, i+1, &refTrack); fprintf(stderr, "\tPayload ID %d: type %s\n", refTrack, name); } } #endif } else if (mtype==GF_ISOM_MEDIA_FLASH) { fprintf(stderr, "Macromedia Flash Movie\n"); } else if ((mtype==GF_ISOM_MEDIA_TEXT) || (mtype==GF_ISOM_MEDIA_SUBT) || (mtype==GF_ISOM_MEDIA_MPEG_SUBT)) { u32 w, h; s16 l; s32 tx, ty; const char *content_encoding = NULL; const char *mime = NULL; const char *config = NULL; const char *_namespace = NULL; const char *schema_loc = NULL; const char *auxiliary_mimes = NULL; gf_isom_get_track_layout_info(file, trackNum, &w, &h, &tx, &ty, &l); if (msub_type == GF_ISOM_SUBTYPE_SBTT) { gf_isom_stxt_get_description(file, trackNum, 1, &mime, &content_encoding, &config); fprintf(stderr, "Textual Subtitle Stream "); fprintf(stderr, "- mime %s", mime); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (config != NULL) { fprintf(stderr, " - %d bytes config", (u32) strlen(config)); } } else if (msub_type == GF_ISOM_SUBTYPE_STXT) { gf_isom_stxt_get_description(file, trackNum, 1, &mime, &content_encoding, &config); fprintf(stderr, "Simple Timed Text Stream "); fprintf(stderr, "- mime %s", mime); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (config != NULL) { fprintf(stderr, " - %d bytes config", (u32) strlen(config)); } } else if (msub_type == GF_ISOM_SUBTYPE_STPP) { gf_isom_xml_subtitle_get_description(file, trackNum, 1, &_namespace, &schema_loc, &auxiliary_mimes); fprintf(stderr, "XML Subtitle Stream "); fprintf(stderr, "- namespace %s", _namespace); if (schema_loc != NULL) { fprintf(stderr, " - schema-location %s", schema_loc); } if (auxiliary_mimes != NULL) { fprintf(stderr, " - auxiliary-mime-types %s", auxiliary_mimes); } } else { fprintf(stderr, "Unknown Text Stream"); } fprintf(stderr, "\n Size %d x %d - Translation X=%d Y=%d - Layer %d\n", w, h, tx, ty, l); } else if (mtype == GF_ISOM_MEDIA_META) { const char *content_encoding = NULL; if (msub_type == GF_ISOM_SUBTYPE_METT) { const char *mime = NULL; const char *config = NULL; gf_isom_stxt_get_description(file, trackNum, 1, &mime, &content_encoding, &config); fprintf(stderr, "Textual Metadata Stream - mime %s", mime); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (config != NULL) { fprintf(stderr, " - %d bytes config", (u32) strlen(config)); } fprintf(stderr, "\n"); } else if (msub_type == GF_ISOM_SUBTYPE_METX) { const char *_namespace = NULL; const char *schema_loc = NULL; gf_isom_get_xml_metadata_description(file, trackNum, 1, &_namespace, &schema_loc, &content_encoding); fprintf(stderr, "XML Metadata Stream - namespace %s", _namespace); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (schema_loc != NULL) { fprintf(stderr, " - schema-location %s", schema_loc); } fprintf(stderr, "\n"); } else { fprintf(stderr, "Unknown Metadata Stream\n"); } } else if ((msub_type==GF_ISOM_SUBTYPE_VVC1) || (msub_type==GF_ISOM_SUBTYPE_VVI1)) { GF_VVCConfig *vvccfg; u32 w, h; #if !defined(GPAC_DISABLE_AV_PARSERS) VVCState *vvc_state; GF_SAFEALLOC(vvc_state, VVCState); if (vvc_state) vvc_state->sps_active_idx = -1; #endif gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "VVC Video - Visual Size %d x %d\n", w, h); vvccfg = gf_isom_vvc_config_get(file, trackNum, 1); if (!vvccfg) { M4_LOG(GF_LOG_ERROR, ("Non-compliant VVC track: No vvcC found in sample description\n")); } else { dump_vvc_track_info(file, trackNum, vvccfg #if !defined(GPAC_DISABLE_AV_PARSERS) , vvc_state #endif ); gf_odf_vvc_cfg_del(vvccfg); fprintf(stderr, "\n"); } #if !defined(GPAC_DISABLE_AV_PARSERS) if (vvc_state) gf_free(vvc_state); #endif } else if ((msub_type == GF_ISOM_SUBTYPE_MH3D_MHA1) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHA2) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM1) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM2) ) { const u8 *compat_profiles; u32 nb_compat_profiles; Bool valid = GF_FALSE; Bool allow_inband = GF_FALSE; if ( (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM1) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM2)) allow_inband = GF_TRUE; fprintf(stderr, "\tMPEG-H Audio stream - Sample Rate %d\n", sr); esd = gf_media_map_esd(file, trackNum, 1); if (!esd || !esd->decoderConfig || !esd->decoderConfig->decoderSpecificInfo || !esd->decoderConfig->decoderSpecificInfo->data ) { if (allow_inband) { GF_ISOSample *samp = gf_isom_get_sample(file, trackNum, 1, NULL); if (samp) { u64 ch_layout=0; s32 PL = gf_mpegh_get_mhas_pl(samp->data, samp->dataLength, &ch_layout); if (PL>=0) { fprintf(stderr, "\tProfileLevelIndication: 0x%02X", PL); if (ch_layout) fprintf(stderr, " - Reference Channel Layout %s", gf_audio_fmt_get_layout_name(ch_layout) ); fprintf(stderr, "\n"); } gf_isom_sample_del(&samp); } valid = GF_TRUE; } } else if (esd->decoderConfig->decoderSpecificInfo->dataLength>=5) { fprintf(stderr, "\tProfileLevelIndication: 0x%02X - Reference Channel Layout %s\n", esd->decoderConfig->decoderSpecificInfo->data[1] , gf_audio_fmt_get_layout_name_from_cicp(esd->decoderConfig->decoderSpecificInfo->data[2]) ); valid = GF_TRUE; } if (!valid) { M4_LOG(GF_LOG_ERROR, ("Invalid MPEG-H audio config\n")); } if (esd) gf_odf_desc_del((GF_Descriptor *)esd); compat_profiles = gf_isom_get_mpegh_compatible_profiles(file, trackNum, 1, &nb_compat_profiles); for (i=0; i<nb_compat_profiles; i++) { if (!i) fprintf(stderr, "\tCompatible profiles:"); fprintf(stderr, " 0x%02X", compat_profiles[i]); } if (i) fprintf(stderr, "\n"); } else if (msub_type==GF_ISOM_SUBTYPE_MLPA) { u32 fmt, prate; if (gf_isom_truehd_config_get(file, trackNum, 1, &fmt, &prate) != GF_OK) { fprintf(stderr, "\tInvalid TrueHD audio config\n"); } fprintf(stderr, "TrueHD Audio stream - Sample Rate %u - channels %u - format %u peak rate %u\n", sr, nb_ch, fmt, prate); } else if (codecid) { if (gf_isom_is_video_handler_type(mtype) ) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "%s - Resolution %d x %d\n", gf_codecid_name(codecid), w, h); } else if (mtype==GF_ISOM_MEDIA_AUDIO) { gf_isom_get_audio_info(file, trackNum, 1, &sr, &nb_ch, NULL); fprintf(stderr, "%s - Sample Rate %d - %d channel(s)\n", gf_codecid_name(codecid), sr, nb_ch); } else { fprintf(stderr, "%s\n", gf_codecid_name(codecid) ); } } else if (pfmt) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "Raw video %s - Resolution %d x %d\n", gf_pixel_fmt_name(pfmt), w, h); } else if (msub_type==GF_QT_SUBTYPE_TMCD) { u32 stsd_idx; GF_ISOSample *sample = gf_isom_get_sample(file, trackNum, 1, &stsd_idx); fprintf(stderr, "Time Code stream\n"); if (sample) { char szTimecode[100]; u32 tmcd_flags, tmcd_num, tmcd_den, tmcd_fpt; gf_isom_get_tmcd_config(file, trackNum, stsd_idx, &tmcd_flags, &tmcd_num, &tmcd_den, &tmcd_fpt); gf_inspect_format_timecode(sample->data, sample->dataLength, tmcd_flags, tmcd_num, tmcd_den, tmcd_fpt, szTimecode); gf_isom_sample_del(&sample); fprintf(stderr, "\tFirst timecode: %s\n", szTimecode); } } else { GF_GenericSampleDescription *udesc; udesc = gf_isom_get_generic_sample_description(file, trackNum, 1); if (udesc) { if (gf_isom_is_video_handler_type(mtype) ) { fprintf(stderr, "%s - Compressor \"%s\" - Resolution %d x %d\n", ( (mtype == GF_ISOM_MEDIA_VISUAL ? "Visual" : "Auxiliary Video") ), udesc->compressor_name, udesc->width, udesc->height); } else if (mtype==GF_ISOM_MEDIA_AUDIO) { fprintf(stderr, "Audio - Sample Rate %d - %d channel(s)\n", udesc->samplerate, udesc->nb_channels); } else { fprintf(stderr, "Unknown media type\n"); } if (udesc->vendor_code) fprintf(stderr, "\tVendor code \"%s\" - Version %d - revision %d\n", gf_4cc_to_str(udesc->vendor_code), udesc->version, udesc->revision); if (udesc->extension_buf) { fprintf(stderr, "\tCodec configuration data size: %d bytes\n", udesc->extension_buf_size); gf_free(udesc->extension_buf); } gf_free(udesc); } else { fprintf(stderr, "Unknown track type\n"); } } /*Crypto info*/ if (gf_isom_is_track_encrypted(file, trackNum)) { const char *scheme_URI, *KMS_URI; u32 scheme_type, version; u32 IV_size; Bool use_sel_enc; if (gf_isom_is_ismacryp_media(file, trackNum, 1)) { gf_isom_get_ismacryp_info(file, trackNum, 1, NULL, &scheme_type, &version, &scheme_URI, &KMS_URI, &use_sel_enc, &IV_size, NULL); fprintf(stderr, "\n\tProtected by ISMA E&A scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version); if (scheme_URI) fprintf(stderr, "scheme location: %s\n", scheme_URI); if (KMS_URI) { if (!strnicmp(KMS_URI, "(key)", 5)) fprintf(stderr, "\tKMS location: key in file\n"); else fprintf(stderr, "\tKMS location: %s\n", KMS_URI); } fprintf(stderr, "\tSelective Encryption: %s\n", use_sel_enc ? "Yes" : "No"); if (IV_size) fprintf(stderr, "\tInitialization Vector size: %d bits\n", IV_size*8); } else if (gf_isom_is_omadrm_media(file, trackNum, 1)) { const char *textHdrs; u32 enc_type, hdr_len; u64 orig_len; gf_isom_get_omadrm_info(file, trackNum, 1, NULL, &scheme_type, &version, &scheme_URI, &KMS_URI, &textHdrs, &hdr_len, &orig_len, &enc_type, &use_sel_enc, &IV_size, NULL); fprintf(stderr, "\n\tProtected by OMA DRM scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version); fprintf(stderr, "\tRights Issuer: %s\n", KMS_URI); fprintf(stderr, "\tContent ID: %s\n", scheme_URI); if (textHdrs) { u32 offset; const char *start = textHdrs; fprintf(stderr, "\tOMA Textual Headers:\n"); i=0; offset=0; while (i<hdr_len) { if (start[i]==0) { fprintf(stderr, "\t\t%s\n", start+offset); offset=i+1; } i++; } fprintf(stderr, "\\tt%s\n", start+offset); } if (orig_len) fprintf(stderr, "\tOriginal media size "LLD"\n", orig_len); fprintf(stderr, "\tEncryption algorithm %s\n", (enc_type==1) ? "AEA 128 CBC" : (enc_type ? "AEA 128 CTR" : "None")); fprintf(stderr, "\tSelective Encryption: %s\n", use_sel_enc ? "Yes" : "No"); if (IV_size) fprintf(stderr, "\tInitialization Vector size: %d bits\n", IV_size*8); } else if(gf_isom_is_cenc_media(file, trackNum, 1)) { const u8 *def_key; u32 def_key_size; Bool IsEncrypted; u8 crypt_byte_block, skip_byte_block; IV_size = 0; gf_isom_get_cenc_info(file, trackNum, 1, NULL, &scheme_type, &version); gf_isom_cenc_get_default_info(file, trackNum, 1, NULL, &IsEncrypted, &crypt_byte_block, &skip_byte_block, &def_key, &def_key_size); fprintf(stderr, "\n\tProtected by CENC scheme %s version 0x%08X", gf_4cc_to_str(scheme_type), version); if (crypt_byte_block && skip_byte_block) fprintf(stderr, " - Pattern %d:%d", (u32) skip_byte_block, (u32) crypt_byte_block); if (def_key && def_key[0]) fprintf(stderr, " - MultiKey"); fprintf(stderr, "\n"); dump_key_info(def_key, def_key_size, IsEncrypted); } else if(gf_isom_is_adobe_protection_media(file, trackNum, 1)) { gf_isom_get_adobe_protection_info(file, trackNum, 1, NULL, &scheme_type, &version, NULL); fprintf(stderr, "\nProtected by Adobe scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version); } else { fprintf(stderr, "\nProtected by unknown scheme %s\n", gf_4cc_to_str(gf_isom_is_media_encrypted(file, trackNum, 0) )); } fprintf(stderr, "\n"); } if ( gf_media_get_rfc_6381_codec_name(file, trackNum, szCodec, GF_FALSE, GF_FALSE) == GF_OK) { fprintf(stderr, "\tRFC6381 Codec Parameters: %s\n", szCodec); } DumpMetaItem(file, 0, trackNum, "\tTrack Meta"); gf_isom_get_track_switch_group_count(file, trackNum, &alt_group, &nb_groups); if (alt_group) { fprintf(stderr, "Alternate Group ID %d\n", alt_group); for (i=0; i<nb_groups; i++) { u32 nb_crit, switchGroupID; const u32 *criterias = gf_isom_get_track_switch_parameter(file, trackNum, i+1, &switchGroupID, &nb_crit); if (!nb_crit) { fprintf(stderr, "\tNo criteria in %s group\n", switchGroupID ? "switch" : "alternate"); } else { if (switchGroupID) { fprintf(stderr, "\tSwitchGroup ID %d criterias: ", switchGroupID); } else { fprintf(stderr, "\tAlternate Group criterias: "); } for (j=0; j<nb_crit; j++) { if (j) fprintf(stderr, " "); fprintf(stderr, "%s", gf_4cc_to_str(criterias[j]) ); } fprintf(stderr, "\n"); } } } switch (gf_isom_has_sync_points(file, trackNum)) { case 0: fprintf(stderr, "\tAll samples are sync\n"); break; case 1: { u32 nb_sync = gf_isom_get_sync_point_count(file, trackNum) - 1; if (! nb_sync) { fprintf(stderr, "\tOnly one sync sample\n"); } else { fprintf(stderr, "\tAverage GOP length: %d samples\n", gf_isom_get_sample_count(file, trackNum) / nb_sync); } } break; case 2: fprintf(stderr, "\tNo sync sample found\n"); break; } fprintf(stderr, "\tMax sample duration: %d / %d\n", gf_isom_get_max_sample_delta(file, trackNum), timescale); if (!full_dump) { fprintf(stderr, "\n"); return; } dur = size = 0; max_rate = rate = 0; time_slice = 0; ts = gf_isom_get_media_timescale(file, trackNum); csize = gf_isom_get_constant_sample_size(file, trackNum); cdur = gf_isom_get_constant_sample_duration(file, trackNum); count = gf_isom_get_sample_count(file, trackNum); if (csize && cdur) { size = count * csize; dur = cdur * count; } else { for (j=0; j<count; j++) { GF_ISOSample *samp; if (is_od_track) { samp = gf_isom_get_sample(file, trackNum, j+1, NULL); } else { samp = gf_isom_get_sample_info(file, trackNum, j+1, NULL, NULL); } if (!samp) { M4_LOG(GF_LOG_ERROR, ("Failed to fetch sample %d\n", j+1)); return; } dur = samp->DTS+samp->CTS_Offset; size += samp->dataLength; rate += samp->dataLength; if (samp->DTS - time_slice > ts) { Double max_tmp = rate * ts / (samp->DTS - time_slice); if (max_rate < max_tmp ) max_rate = max_tmp; rate = 0; time_slice = samp->DTS; } gf_isom_sample_del(&samp); } } fprintf(stderr, "\nComputed info from media:\n"); if (csize && cdur) { fprintf(stderr, "\tConstant sample size %d bytes and dur %d / %d\n", csize, cdur, ts); } scale = 1000.0 / ts; dur = (u64) (scale * dur); fprintf(stderr, "\tTotal size "LLU" bytes - Total samples duration "LLU" ms\n", size, dur); if (!dur) { fprintf(stderr, "\n"); return; } /*rate in byte, dur is in ms*/ rate = 8000.0 * size / dur; if (!max_rate) max_rate = rate; else max_rate *= 8.0; if (rate >= 1500) { fprintf(stderr, "\tAverage rate %.2f kbps - Max Rate %.2f kbps\n", rate/1000, max_rate/1000); } else { fprintf(stderr, "\tAverage rate %.2f bps - Max Rate %.2f bps\n", rate, max_rate); } { u32 dmin, dmax, davg, smin, smax, savg; gf_isom_get_chunks_infos(file, trackNum, &dmin, &davg, &dmax, &smin, &savg, &smax); fprintf(stderr, "\tChunk durations: min %d ms - max %d ms - average %d ms\n", (1000*dmin)/ts, (1000*dmax)/ts, (1000*davg)/ts); fprintf(stderr, "\tChunk sizes (bytes): min %d - max %d - average %d\n", smin, smax, savg); } fprintf(stderr, "\n"); count = gf_isom_get_chapter_count(file, trackNum); if (count) { const char *name; u64 time; fprintf(stderr, "\nChapters:\n"); for (j=0; j<count; j++) { gf_isom_get_chapter(file, trackNum, j+1, &time, &name); fprintf(stderr, "\tChapter #%d - %s - \"%s\"\n", j+1, format_duration(time, 1000, szDur), name); } } } void DumpMovieInfo(GF_ISOFile *file) { GF_InitialObjectDescriptor *iod; Bool dump_m4sys = GF_FALSE; u32 i, brand, min, timescale, count, data_len; const u8 *data; u64 create, modif; Bool has_itags = GF_FALSE; char szDur[50]; DumpMetaItem(file, 1, 0, "# File Meta"); if (!gf_isom_has_movie(file)) { if (gf_isom_has_segment(file, &brand, &min)) { count = gf_isom_segment_get_fragment_count(file); fprintf(stderr, "File is a segment - %d movie fragments - Brand %s (version %d):\n", count, gf_4cc_to_str(brand), min); for (i=0; i<count; i++) { u32 j, traf_count = gf_isom_segment_get_track_fragment_count(file, i+1); for (j=0; j<traf_count; j++) { u32 ID; u64 tfdt; ID = gf_isom_segment_get_track_fragment_decode_time(file, i+1, j+1, &tfdt); fprintf(stderr, "\tFragment #%d Track ID %d - TFDT "LLU"\n", i+1, ID, tfdt); } } } else { fprintf(stderr, "File has no movie (moov) - static data container\n"); } return; } timescale = gf_isom_get_timescale(file); i=gf_isom_get_track_count(file); fprintf(stderr, "# Movie Info - %d track%s - TimeScale %d\n", i, i>1 ? "s" : "", timescale); modif = gf_isom_get_duration(file); create = gf_isom_get_original_duration(file); fprintf(stderr, "Duration %s", format_duration(create, timescale, szDur)); if (create!=modif) { fprintf(stderr, " (recomputed %s)", format_duration(modif, timescale, szDur)); } fprintf(stderr, "\n"); #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (gf_isom_is_fragmented(file)) { fprintf(stderr, "Fragmented: yes - duration %s\n%d fragments - %d SegmentIndexes\n", format_duration(gf_isom_get_fragmented_duration(file), timescale, szDur), gf_isom_get_fragments_count(file, 0) , gf_isom_get_fragments_count(file, 1) ); } else { fprintf(stderr, "Fragmented: no\n"); } #endif if (gf_isom_moov_first(file)) fprintf(stderr, "Progressive (moov before mdat)\n"); if (gf_isom_get_brand_info(file, &brand, &min, &count) == GF_OK) { fprintf(stderr, "Major Brand %s - version %d - compatible brands:", gf_4cc_to_str(brand), min); for (i=0; i<count;i++) { if (gf_isom_get_alternate_brand(file, i+1, &brand)==GF_OK) fprintf(stderr, " %s", gf_4cc_to_str(brand) ); } fprintf(stderr, "\n"); } gf_isom_get_creation_time(file, &create, &modif); fprintf(stderr, "Created: %s", format_date(create, szDur)); if (create != modif) fprintf(stderr, "Modified: %s", format_date(modif, szDur)); fprintf(stderr, "\n"); DumpMetaItem(file, 0, 0, "# Movie Meta"); iod = (GF_InitialObjectDescriptor *) gf_isom_get_root_od(file); if (iod) { u32 desc_size = gf_odf_desc_size((GF_Descriptor *)iod); if (iod->tag == GF_ODF_IOD_TAG) { fprintf(stderr, "File has root IOD (%d bytes)\n", desc_size); fprintf(stderr, "Scene PL 0x%02x - Graphics PL 0x%02x - OD PL 0x%02x\n", iod->scene_profileAndLevel, iod->graphics_profileAndLevel, iod->OD_profileAndLevel); fprintf(stderr, "Visual PL: %s (0x%02x)\n", gf_m4v_get_profile_name(iod->visual_profileAndLevel), iod->visual_profileAndLevel); fprintf(stderr, "Audio PL: %s (0x%02x)\n", gf_m4a_get_profile_name(iod->audio_profileAndLevel), iod->audio_profileAndLevel); //fprintf(stderr, "inline profiles included %s\n", iod->inlineProfileFlag ? "yes" : "no"); } else { fprintf(stderr, "File has root OD (%d bytes)\n", desc_size); } if (!gf_list_count(iod->ESDescriptors)) fprintf(stderr, "No streams included in root OD\n"); else dump_m4sys = GF_TRUE; gf_odf_desc_del((GF_Descriptor *) iod); } if (gf_isom_is_JPEG2000(file)) fprintf(stderr, "File is JPEG 2000\n"); count = gf_isom_get_copyright_count(file); if (count) { const char *lang, *note; fprintf(stderr, "\nCopyrights:\n"); for (i=0; i<count; i++) { gf_isom_get_copyright(file, i+1, &lang, &note); fprintf(stderr, "\t(%s) %s\n", lang, note); } } count = gf_isom_get_chapter_count(file, 0); if (count) { const char *name; u64 time; fprintf(stderr, "\nChapters:\n"); for (i=0; i<count; i++) { gf_isom_get_chapter(file, 0, i+1, &time, &name); fprintf(stderr, "\tChapter #%d - %s - \"%s\"\n", i+1, format_duration(time, 1000, szDur), name); } } if (gf_isom_apple_get_tag(file, 0, &data, &data_len) == GF_OK) { has_itags = GF_TRUE; fprintf(stderr, "\niTunes Info:\n"); i=0; while (1) { u32 int_val2, flags, itype; GF_ISOiTunesTag tag; u64 int_val; s32 tag_idx; GF_Err e = gf_isom_apple_enum_tag(file, i, &tag, &data, &data_len, &int_val, &int_val2, &flags); if (e) break; i++; tag_idx = gf_itags_find_by_itag(tag); if (tag_idx<0) { fprintf(stderr, "\t%s: %s\n", gf_4cc_to_str(tag), data); continue; } fprintf(stderr, "\t%s: ", gf_itags_get_name(tag_idx) ); itype = gf_itags_get_type(tag_idx); switch (itype) { case GF_ITAG_BOOL: fprintf(stderr, int_val ? "yes" : "no"); break; case GF_ITAG_INT8: case GF_ITAG_INT16: case GF_ITAG_INT32: case GF_ITAG_INT64: fprintf(stderr, LLU, int_val); break; case GF_ITAG_FRAC6: case GF_ITAG_FRAC8: fprintf(stderr, LLU" / %u", int_val, int_val2); break; case GF_ITAG_FILE: if (flags==14) fprintf(stderr, "PNG File"); else if (flags==13) fprintf(stderr, "JPEG File"); else fprintf(stderr, "unknown (flags %d)", flags); break; case GF_ITAG_ID3_GENRE: if (int_val) { fprintf(stderr, "%s", gf_id3_get_genre((u32) int_val) ); break; } //fallthrough default: if (data) fprintf(stderr, "%s", data); else fprintf(stderr, data_len ? "none" : "unknown"); break; } fprintf(stderr, "\n"); } } i=0; while (1) { u32 type, version; char *wmatag; GF_Err e = gf_isom_wma_enum_tag(file, i, &wmatag, &data, &data_len, &version, &type); if (e) break; if (!i) { fprintf(stderr, "\nWMA Info:\n"); } i++; fprintf(stderr, "\t%s", wmatag); if (version!=1) fprintf(stderr, " (version %d)", version); fprintf(stderr, ": "); if (type) { fprintf(stderr, "unknown type %d\n", type); } else { u16 *src_str = (u16 *) data; u32 len = (u32) ( UTF8_MAX_BYTES_PER_CHAR * gf_utf8_wcslen(src_str) ); char *utf8str = (char *)gf_malloc(len + 1); u32 res_len = (u32) gf_utf8_wcstombs(utf8str, len, (const unsigned short **) &src_str); utf8str[res_len] = 0; fprintf(stderr, "%s\n", utf8str); gf_free(utf8str); } } print_udta(file, 0, has_itags); fprintf(stderr, "\n"); for (i=0; i<gf_isom_get_track_count(file); i++) { DumpTrackInfo(file, i+1, 0, GF_TRUE, dump_m4sys); } } #endif /*defined(GPAC_DISABLE_ISOM) || defined(GPAC_DISABLE_ISOM_WRITE)*/ #ifndef GPAC_DISABLE_MPEG2TS typedef struct { /* when writing to file */ FILE *pes_out; char dump[100]; #if 0 FILE *pes_out_nhml; char nhml[100]; FILE *pes_out_info; char info[100]; #endif Bool is_info_dumped; u32 prog_number; /* For logging timing information (PCR, PTS/DTS) */ FILE *timestamps_info_file; char timestamps_info_name[100]; /* when dumping TS information */ u32 dump_pid; Bool has_seen_pat; } GF_M2TS_Dump; static void on_m2ts_dump_event(GF_M2TS_Demuxer *ts, u32 evt_type, void *par) { u32 i, count; GF_M2TS_Program *prog; GF_M2TS_PES_PCK *pck; GF_M2TS_Dump *dumper = (GF_M2TS_Dump *)ts->user; switch (evt_type) { case GF_M2TS_EVT_PAT_FOUND: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_PAT_UPDATE: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_PAT_REPEAT: /* WARNING: We detect the pat on a repetition, probably to ensure that we also have seen all the PMT To be checked */ dumper->has_seen_pat = 1; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } // fprintf(stderr, "Repeated PAT found - %d programs\n", gf_list_count(ts->programs) ); break; case GF_M2TS_EVT_CAT_FOUND: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_CAT_UPDATE: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_CAT_REPEAT: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_PMT_FOUND: prog = (GF_M2TS_Program*)par; if (gf_list_count(ts->programs)>1 && prog->number!=dumper->prog_number) break; count = gf_list_count(prog->streams); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Program number %d found - %d streams:\n", prog->number, count)); for (i=0; i<count; i++) { GF_M2TS_ES *es = gf_list_get(prog->streams, i); if (es->pid == prog->pmt_pid) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tPID %d: Program Map Table\n", es->pid)); } else { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; gf_m2ts_set_pes_framing(pes, dumper->pes_out ? GF_M2TS_PES_FRAMING_RAW : GF_M2TS_PES_FRAMING_DEFAULT); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tPID %d: %s ", pes->pid, gf_m2ts_get_stream_name(pes->stream_type) )); if (pes->mpeg4_es_id) GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, (" - MPEG-4 ES ID %d", pes->mpeg4_es_id)); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\n")); } } if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, prog->pmt_pid); } break; case GF_M2TS_EVT_PMT_UPDATE: prog = (GF_M2TS_Program*)par; if (gf_list_count(ts->programs)>1 && prog->number!=dumper->prog_number) break; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, prog->pmt_pid); } break; case GF_M2TS_EVT_PMT_REPEAT: prog = (GF_M2TS_Program*)par; if (gf_list_count(ts->programs)>1 && prog->number!=dumper->prog_number) break; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, prog->pmt_pid); } break; case GF_M2TS_EVT_SDT_FOUND: #ifndef GPAC_DISABLE_LOG count = gf_list_count(ts->SDTs) ; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Program Description found - %d desc:\n", count)); for (i=0; i<count; i++) { GF_M2TS_SDT *sdt = gf_list_get(ts->SDTs, i); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tServiceID %d - Provider %s - Name %s\n", sdt->service_id, sdt->provider, sdt->service)); } #endif break; case GF_M2TS_EVT_SDT_UPDATE: #ifndef GPAC_DISABLE_LOG count = gf_list_count(ts->SDTs) ; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Program Description updated - %d desc\n", count)); for (i=0; i<count; i++) { GF_M2TS_SDT *sdt = gf_list_get(ts->SDTs, i); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tServiceID %d - Provider %s - Name %s\n", sdt->service_id, sdt->provider, sdt->service)); } #endif break; case GF_M2TS_EVT_SDT_REPEAT: break; case GF_M2TS_EVT_PES_TIMING: pck = par; if (gf_list_count(ts->programs)>1 && pck->stream->program->number != dumper->prog_number) break; break; case GF_M2TS_EVT_PES_PCK: pck = par; if (gf_list_count(ts->programs)>1 && pck->stream->program->number != dumper->prog_number) break; if (dumper->has_seen_pat) { /*We need the interpolated PCR for the pcrb, hence moved this calculus out, and saving the calculated value in index_info to put it in the pcrb*/ GF_M2TS_PES *pes = pck->stream; /*FIXME : not used GF_M2TS_Program *prog = pes->program; */ /* Interpolated PCR value for the TS packet containing the PES header start */ u64 interpolated_pcr_value = 0; if (pes->last_pcr_value && pes->before_last_pcr_value_pck_number && pes->last_pcr_value > pes->before_last_pcr_value) { u32 delta_pcr_pck_num = pes->last_pcr_value_pck_number - pes->before_last_pcr_value_pck_number; u32 delta_pts_pcr_pck_num = pes->pes_start_packet_number - pes->last_pcr_value_pck_number; u64 delta_pcr_value = pes->last_pcr_value - pes->before_last_pcr_value; if ((pes->pes_start_packet_number > pes->last_pcr_value_pck_number) && (pes->last_pcr_value > pes->before_last_pcr_value)) { pes->last_pcr_value = pes->before_last_pcr_value; } /* we can compute the interpolated pcr value for the packet containing the PES header */ interpolated_pcr_value = pes->last_pcr_value + (u64)((delta_pcr_value*delta_pts_pcr_pck_num*1.0)/delta_pcr_pck_num); } if (dumper->timestamps_info_file) { Double diff; fprintf(dumper->timestamps_info_file, "%u\t%d\t", pck->stream->pes_start_packet_number, pck->stream->pid); if (interpolated_pcr_value) fprintf(dumper->timestamps_info_file, "%f", interpolated_pcr_value/(300.0 * 90000)); fprintf(dumper->timestamps_info_file, "\t"); if (pck->DTS) fprintf(dumper->timestamps_info_file, "%f", (pck->DTS / 90000.0)); fprintf(dumper->timestamps_info_file, "\t%f\t%d\t%d", pck->PTS / 90000.0, (pck->flags & GF_M2TS_PES_PCK_RAP) ? 1 : 0, (pck->flags & GF_M2TS_PES_PCK_DISCONTINUITY) ? 1 : 0); if (interpolated_pcr_value) { diff = (pck->DTS ? pck->DTS : pck->PTS) / 90000.0; diff -= pes->last_pcr_value / (300.0 * 90000); fprintf(dumper->timestamps_info_file, "\t%f\n", diff); if (diff<0) { M4_LOG(GF_LOG_WARNING, ("Warning: detected PTS/DTS value less than current PCR of %g sec\n", diff)); } } else { fprintf(dumper->timestamps_info_file, "\t\n"); } } } if (dumper->has_seen_pat && dumper->pes_out && (dumper->dump_pid == pck->stream->pid)) { gf_fwrite(pck->data, pck->data_len, dumper->pes_out); } break; case GF_M2TS_EVT_PES_PCR: pck = par; if (gf_list_count(ts->programs)>1 && pck->stream->program->number != dumper->prog_number) break; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\t%f\t\t\t\t%d\n", pck->stream->program->last_pcr_value_pck_number, pck->stream->pid, pck->PTS / (300*90000.0), (pck->flags & GF_M2TS_PES_PCK_DISCONTINUITY) ? 1 : 0); } break; case GF_M2TS_EVT_SL_PCK: #if 0 { GF_M2TS_SL_PCK *sl_pck = par; if (dumper->pes_out && (dumper->dump_pid == sl_pck->stream->pid)) { GF_SLHeader header; u32 header_len; if (sl_pck->stream->mpeg4_es_id) { GF_ESD *esd = ((GF_M2TS_PES*)sl_pck->stream)->esd; if (!dumper->is_info_dumped) { if (esd->decoderConfig->decoderSpecificInfo) gf_fwrite(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, dumper->pes_out_info); dumper->is_info_dumped = 1; fprintf(dumper->pes_out_nhml, "<NHNTStream version=\"1.0\" "); fprintf(dumper->pes_out_nhml, "timeScale=\"%d\" ", esd->slConfig->timestampResolution); fprintf(dumper->pes_out_nhml, "streamType=\"%d\" ", esd->decoderConfig->streamType); fprintf(dumper->pes_out_nhml, "objectTypeIndication=\"%d\" ", esd->decoderConfig->objectTypeIndication); if (esd->decoderConfig->decoderSpecificInfo) fprintf(dumper->pes_out_nhml, "specificInfoFile=\"%s\" ", dumper->info); fprintf(dumper->pes_out_nhml, "baseMediaFile=\"%s\" ", dumper->dump); fprintf(dumper->pes_out_nhml, "inRootOD=\"yes\">\n"); } gf_sl_depacketize(esd->slConfig, &header, sl_pck->data, sl_pck->data_len, &header_len); gf_fwrite(sl_pck->data+header_len, sl_pck->data_len-header_len, dumper->pes_out); fprintf(dumper->pes_out_nhml, "<NHNTSample DTS=\""LLD"\" dataLength=\"%d\" isRAP=\"%s\"/>\n", header.decodingTimeStamp, sl_pck->data_len-header_len, (header.randomAccessPointFlag?"yes":"no")); } } } #endif break; } } void dump_mpeg2_ts(char *mpeg2ts_file, char *out_name, Bool prog_num) { u8 data[188]; GF_M2TS_Dump dumper; u32 size; u64 fsize, fdone; GF_M2TS_Demuxer *ts; FILE *src; if (!prog_num && !out_name) { fprintf(stderr, "No program number nor output filename specified. No timestamp file will be generated."); } src = gf_fopen(mpeg2ts_file, "rb"); if (!src) { M4_LOG(GF_LOG_ERROR, ("Cannot open %s: no such file\n", mpeg2ts_file)); return; } ts = gf_m2ts_demux_new(); ts->on_event = on_m2ts_dump_event; ts->notify_pes_timing = 1; memset(&dumper, 0, sizeof(GF_M2TS_Dump)); ts->user = &dumper; dumper.prog_number = prog_num; /*PES dumping*/ if (out_name) { char *pid = strrchr(out_name, '#'); if (pid) { dumper.dump_pid = atoi(pid+1); pid[0] = 0; sprintf(dumper.dump, "%s_%d.raw", out_name, dumper.dump_pid); dumper.pes_out = gf_fopen(dumper.dump, "wb"); #if 0 sprintf(dumper.nhml, "%s_%d.nhml", pes_out_name, dumper.dump_pid); dumper.pes_out_nhml = gf_fopen(dumper.nhml, "wt"); sprintf(dumper.info, "%s_%d.info", pes_out_name, dumper.dump_pid); dumper.pes_out_info = gf_fopen(dumper.info, "wb"); #endif pid[0] = '#'; } } gf_fseek(src, 0, SEEK_END); fsize = gf_ftell(src); gf_fseek(src, 0, SEEK_SET); /* first loop to process all packets between two PAT, and assume all signaling was found between these 2 PATs */ while (!feof(src)) { size = (u32) gf_fread(data, 188, src); if (size<188) break; gf_m2ts_process_data(ts, data, size); if (dumper.has_seen_pat) break; } dumper.has_seen_pat = GF_TRUE; if (!prog_num) { GF_M2TS_Program *p = gf_list_get(ts->programs, 0); if (p) prog_num = p->number; fprintf(stderr, "No program number specified, defaulting to first program\n"); } if (!prog_num && !out_name) { fprintf(stderr, "No program number nor output filename specified. No timestamp file will be generated\n"); } if (prog_num) { sprintf(dumper.timestamps_info_name, "%s_prog_%d_timestamps.txt", mpeg2ts_file, prog_num/*, mpeg2ts_file*/); dumper.timestamps_info_file = gf_fopen(dumper.timestamps_info_name, "wt"); if (!dumper.timestamps_info_file) { M4_LOG(GF_LOG_ERROR, ("Cannot open file %s\n", dumper.timestamps_info_name)); return; } fprintf(dumper.timestamps_info_file, "PCK#\tPID\tPCR\tDTS\tPTS\tRAP\tDiscontinuity\tDTS-PCR Diff\n"); } gf_m2ts_reset_parsers(ts); gf_fseek(src, 0, SEEK_SET); fdone = 0; while (!feof(src)) { size = (u32) gf_fread(data, 188, src); if (size<188) break; gf_m2ts_process_data(ts, data, size); fdone += size; gf_set_progress("MPEG-2 TS Parsing", fdone, fsize); } gf_fclose(src); gf_m2ts_demux_del(ts); if (dumper.pes_out) gf_fclose(dumper.pes_out); #if 0 if (dumper.pes_out_nhml) { if (dumper.is_info_dumped) fprintf(dumper.pes_out_nhml, "</NHNTStream>\n"); gf_fclose(dumper.pes_out_nhml); gf_fclose(dumper.pes_out_info); } #endif if (dumper.timestamps_info_file) gf_fclose(dumper.timestamps_info_file); } #endif /*GPAC_DISABLE_MPEG2TS*/ #include <gpac/download.h> #include <gpac/mpd.h> void get_file_callback(void *usr_cbk, GF_NETIO_Parameter *parameter) { if (parameter->msg_type==GF_NETIO_DATA_EXCHANGE) { u64 tot_size, done, max; u32 bps; gf_dm_sess_get_stats(parameter->sess, NULL, NULL, &tot_size, &done, &bps, NULL); if (tot_size) { max = done; max *= 100; max /= tot_size; fprintf(stderr, "download %02d %% at %05d kpbs\r", (u32) max, bps*8/1000); } } } static GF_DownloadSession *get_file(const char *url, GF_DownloadManager *dm, GF_Err *e) { GF_DownloadSession *sess; sess = gf_dm_sess_new(dm, url, GF_NETIO_SESSION_NOT_THREADED, get_file_callback, NULL, e); if (!sess) return NULL; *e = gf_dm_sess_process(sess); if (*e) { gf_dm_sess_del(sess); return NULL; } return sess; } static void revert_cache_file(char *item_path) { char szPATH[GF_MAX_PATH]; const char *url; GF_Config *cached; if (!strstr(item_path, "gpac_cache_")) { fprintf(stderr, "%s is not a gpac cache file\n", item_path); return; } if (!strncmp(item_path, "./", 2) || !strncmp(item_path, ".\\", 2)) item_path += 2; strcpy(szPATH, item_path); strcat(szPATH, ".txt"); cached = gf_cfg_new(NULL, szPATH); url = gf_cfg_get_key(cached, "cache", "url"); if (url) url = strstr(url, "://"); if (url) { u32 i, len, dir_len=0, k=0; char *sep; char *dst_name; sep = strstr(item_path, "gpac_cache_"); if (sep) { sep[0] = 0; dir_len = (u32) strlen(item_path); sep[0] = 'g'; } url+=3; len = (u32) strlen(url); dst_name = gf_malloc(len+dir_len+1); memset(dst_name, 0, len+dir_len+1); strncpy(dst_name, item_path, dir_len); k=dir_len; for (i=0; i<len; i++) { dst_name[k] = url[i]; if (dst_name[k]==':') dst_name[k]='_'; else if (dst_name[k]=='/') { if (!gf_dir_exists(dst_name)) gf_mkdir(dst_name); } k++; } if (gf_file_exists(item_path)) { gf_file_move(item_path, dst_name); } gf_free(dst_name); } else { M4_LOG(GF_LOG_ERROR, ("Failed to reverse %s cache file\n", item_path)); } gf_cfg_del(cached); gf_file_delete(szPATH); } GF_Err rip_mpd(const char *mpd_src, const char *output_dir) { GF_DownloadSession *sess; u32 i, connect_time, reply_time, download_time, req_hdr_size, rsp_hdr_size; GF_Err e; GF_DOMParser *mpd_parser=NULL; GF_MPD *mpd=NULL; GF_MPD_Period *period; GF_MPD_AdaptationSet *as; GF_MPD_Representation *rep; char szName[GF_MAX_PATH]; GF_DownloadManager *dm; if (output_dir) { char *sep; strcpy(szName, output_dir); sep = gf_file_basename(szName); if (sep) sep[0] = 0; gf_opts_set_key("temp", "cache", szName); } else { gf_opts_set_key("temp", "cache", "."); } gf_opts_set_key("temp", "clean-cache", "true"); dm = gf_dm_new(NULL); /* char *name = strrchr(mpd_src, '/'); if (!name) name = strrchr(mpd_src, '\\'); if (!name) name = "manifest.mpd"; else name ++; if (strchr(name, '?') || strchr(name, '&')) name = "manifest.mpd"; */ fprintf(stderr, "Downloading %s\n", mpd_src); sess = get_file(mpd_src, dm, &e); if (!sess) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error downloading MPD file %s: %s\n", mpd_src, gf_error_to_string(e) )); goto err_exit; } strcpy(szName, gf_dm_sess_get_cache_name(sess) ); gf_dm_sess_get_header_sizes_and_times(sess, &req_hdr_size, &rsp_hdr_size, &connect_time, &reply_time, &download_time); gf_dm_sess_del(sess); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error fetching MPD file %s: %s\n", mpd_src, gf_error_to_string(e))); goto err_exit; } else { GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("Fetched file %s\n", mpd_src)); } GF_LOG(GF_LOG_DEBUG, GF_LOG_APP, ("GET Header size %d - Reply header size %d\n", req_hdr_size, rsp_hdr_size)); GF_LOG(GF_LOG_DEBUG, GF_LOG_APP, ("GET time: Connect Time %d - Reply Time %d - Download Time %d\n", connect_time, reply_time, download_time)); mpd_parser = gf_xml_dom_new(); e = gf_xml_dom_parse(mpd_parser, szName, NULL, NULL); if (e != GF_OK) { gf_xml_dom_del(mpd_parser); GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error parsing MPD %s : %s\n", mpd_src, gf_error_to_string(e))); return e; } mpd = gf_mpd_new(); e = gf_mpd_init_from_dom(gf_xml_dom_get_root(mpd_parser), mpd, mpd_src); gf_xml_dom_del(mpd_parser); mpd_parser=NULL; if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error initializing MPD %s : %s\n", mpd_src, gf_error_to_string(e))); goto err_exit; } else { GF_LOG(GF_LOG_DEBUG, GF_LOG_APP, ("MPD %s initialized: %s\n", szName, gf_error_to_string(e))); } revert_cache_file(szName); if (mpd->type==GF_MPD_TYPE_DYNAMIC) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("MPD rip is not supported on live sources\n")); e = GF_NOT_SUPPORTED; goto err_exit; } i=0; while ((period = (GF_MPD_Period *) gf_list_enum(mpd->periods, &i))) { char *initTemplate = NULL; Bool segment_base = GF_FALSE; u32 j=0; if (period->segment_base) segment_base=GF_TRUE; if (period->segment_template && period->segment_template->initialization) { initTemplate = period->segment_template->initialization; } while ((as = gf_list_enum(period->adaptation_sets, &j))) { u32 k=0; if (!initTemplate && as->segment_template && as->segment_template->initialization) { initTemplate = as->segment_template->initialization; } if (as->segment_base) segment_base=GF_TRUE; while ((rep = gf_list_enum(as->representations, &k))) { u64 out_range_start, out_range_end, segment_duration; Bool is_in_base_url; char *seg_url; u32 seg_idx=0; if (rep->segment_template && rep->segment_template->initialization) { initTemplate = rep->segment_template->initialization; } else if (k>1) { initTemplate = NULL; } if (rep->segment_base) segment_base=GF_TRUE; e = gf_mpd_resolve_url(mpd, rep, as, period, mpd_src, 0, GF_MPD_RESOLVE_URL_INIT, 0, 0, &seg_url, &out_range_start, &out_range_end, &segment_duration, &is_in_base_url, NULL, NULL, NULL); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error resolving init segment name : %s\n", gf_error_to_string(e))); continue; } //not a byte range, replace URL if (segment_base) { } else if (out_range_start || out_range_end || !seg_url) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("byte range rip not yet implemented\n")); if (seg_url) gf_free(seg_url); e = GF_NOT_SUPPORTED; goto err_exit; } fprintf(stderr, "Downloading %s\n", seg_url); sess = get_file(seg_url, dm, &e); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error downloading init segment %s from MPD %s : %s\n", seg_url, mpd_src, gf_error_to_string(e))); goto err_exit; } revert_cache_file((char *) gf_dm_sess_get_cache_name(sess) ); gf_free(seg_url); gf_dm_sess_del(sess); if (segment_base) continue; while (1) { e = gf_mpd_resolve_url(mpd, rep, as, period, mpd_src, 0, GF_MPD_RESOLVE_URL_MEDIA, seg_idx, 0, &seg_url, &out_range_start, &out_range_end, &segment_duration, NULL, NULL, NULL, NULL); if (e) { if (e<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error resolving segment name : %s\n", gf_error_to_string(e))); } break; } seg_idx++; if (out_range_start || out_range_end || !seg_url) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("byte range rip not yet implemented\n")); if (seg_url) gf_free(seg_url); break; } fprintf(stderr, "Downloading %s\n", seg_url); sess = get_file(seg_url, dm, &e); if (e) { gf_free(seg_url); if (e != GF_URL_ERROR) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error downloading segment %s: %s\n", seg_url, gf_error_to_string(e))); } else { //todo, properly detect end of dash representation e = GF_OK; } break; } revert_cache_file((char *) gf_dm_sess_get_cache_name(sess) ); gf_free(seg_url); gf_dm_sess_del(sess); } } } } err_exit: if (mpd) gf_mpd_del(mpd); gf_dm_del(dm); return e; }
null
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / mp4box application * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "mp4box.h" #if defined(GPAC_DISABLE_ISOM) || defined(GPAC_DISABLE_ISOM_WRITE) #error "Cannot compile MP4Box if GPAC is not built with ISO File Format support" #else #ifndef GPAC_DISABLE_X3D #include <gpac/nodes_x3d.h> #endif #ifndef GPAC_DISABLE_BIFS #include <gpac/internal/bifs_dev.h> #endif #ifndef GPAC_DISABLE_VRML #include <gpac/nodes_mpeg4.h> #endif #include <gpac/constants.h> #include <gpac/avparse.h> #include <gpac/internal/media_dev.h> /*ISO 639 languages*/ #include <gpac/iso639.h> #include <gpac/mpegts.h> #ifndef GPAC_DISABLE_SMGR #include <gpac/scene_manager.h> #endif #include <gpac/internal/media_dev.h> #include <gpac/media_tools.h> /*for built-in box printing*/ #include <gpac/internal/isomedia_dev.h> extern u32 swf_flags; extern Float swf_flatten_angle; extern GF_FileType get_file_type_by_ext(char *inName); extern u32 fs_dump_flags; void scene_coding_log(void *cbk, GF_LOG_Level log_level, GF_LOG_Tool log_tool, const char *fmt, va_list vlist); #ifdef GPAC_DISABLE_LOG void mp4box_log(const char *fmt, ...) { va_list vl; va_start(vl, fmt); vfprintf(stderr, fmt, vlist); fflush(stderr); va_end(vl); } #endif u32 PrintLanguages(char *val, u32 opt) { u32 i=0, count = gf_lang_get_count(); fprintf(stderr, "Supported ISO 639 languages and codes:\n\n"); for (i=0; i<count; i++) { if (gf_lang_get_2cc(i)) { fprintf(stderr, "%s (%s - %s)\n", gf_lang_get_name(i), gf_lang_get_3cc(i), gf_lang_get_2cc(i)); } } return 1; } static const char *GetLanguage(char *lcode) { s32 idx = gf_lang_find(lcode); if (idx>=0) return gf_lang_get_name(idx); return lcode; } GF_Err dump_isom_cover_art(GF_ISOFile *file, char *inName, Bool is_final_name) { const u8 *tag; FILE *t; u32 tag_len; GF_Err e = gf_isom_apple_get_tag(file, GF_ISOM_ITUNE_COVER_ART, &tag, &tag_len); if (e!=GF_OK) { if (e==GF_URL_ERROR) { M4_LOG(GF_LOG_WARNING, ("No cover art found\n")); return GF_OK; } return e; } if (inName) { char szName[1024]; if (is_final_name) { strcpy(szName, inName); } else { sprintf(szName, "%s.%s", inName, (tag_len>>31) ? "png" : "jpg"); } t = gf_fopen(szName, "wb"); if (!t) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szName)); return GF_IO_ERR; } } else { t = stdout; } gf_fwrite(tag, tag_len & 0x7FFFFFFF, t); if (inName) gf_fclose(t); return GF_OK; } #ifndef GPAC_DISABLE_SCENE_DUMP GF_Err dump_isom_scene(char *file, char *inName, Bool is_final_name, GF_SceneDumpFormat dump_mode, Bool do_log, Bool no_odf_conv) { GF_Err e; GF_SceneManager *ctx; GF_SceneGraph *sg; GF_SceneLoader load; GF_FileType ftype; gf_log_cbk prev_logs = NULL; FILE *logs = NULL; sg = gf_sg_new(); ctx = gf_sm_new(sg); memset(&load, 0, sizeof(GF_SceneLoader)); load.fileName = file; load.ctx = ctx; load.swf_import_flags = swf_flags; if (dump_mode == GF_SM_DUMP_SVG) { load.swf_import_flags |= GF_SM_SWF_USE_SVG; load.svgOutFile = inName; } load.swf_flatten_limit = swf_flatten_angle; ftype = get_file_type_by_ext(file); if (ftype == GF_FILE_TYPE_ISO_MEDIA) { load.isom = gf_isom_open(file, GF_ISOM_OPEN_READ, NULL); if (!load.isom) { e = gf_isom_last_error(NULL); M4_LOG(GF_LOG_ERROR, ("Error opening file: %s\n", gf_error_to_string(e))); gf_sm_del(ctx); gf_sg_del(sg); return e; } if (no_odf_conv) gf_isom_disable_odf_conversion(load.isom, GF_TRUE); } else if (ftype==GF_FILE_TYPE_LSR_SAF) { load.isom = gf_isom_open("saf_conv", GF_ISOM_WRITE_EDIT, NULL); #ifndef GPAC_DISABLE_MEDIA_IMPORT if (load.isom) { GF_Fraction _frac = {0,0}; e = import_file(load.isom, file, 0, _frac, 0, NULL, NULL, 0); } else #else M4_LOG(GF_LOG_WARNING, ("Warning: GPAC was compiled without Media Import support\n")); #endif e = gf_isom_last_error(NULL); if (e) { M4_LOG(GF_LOG_ERROR, ("Error importing file: %s\n", gf_error_to_string(e))); gf_sm_del(ctx); gf_sg_del(sg); if (load.isom) gf_isom_delete(load.isom); return e; } } if (do_log) { char szLog[GF_MAX_PATH]; sprintf(szLog, "%s_dec.logs", inName); logs = gf_fopen(szLog, "wt"); gf_log_set_tool_level(GF_LOG_CODING, GF_LOG_DEBUG); prev_logs = gf_log_set_callback(logs, scene_coding_log); } e = gf_sm_load_init(&load); if (!e) e = gf_sm_load_run(&load); gf_sm_load_done(&load); if (logs) { gf_log_set_tool_level(GF_LOG_CODING, GF_LOG_ERROR); gf_log_set_callback(NULL, prev_logs); gf_fclose(logs); } if (!e && dump_mode != GF_SM_DUMP_SVG) { u32 count = gf_list_count(ctx->streams); if (count) fprintf(stderr, "Scene loaded - dumping %d systems streams\n", count); else fprintf(stderr, "Scene loaded - dumping root scene\n"); e = gf_sm_dump(ctx, inName, is_final_name, dump_mode); } gf_sm_del(ctx); gf_sg_del(sg); if (e) M4_LOG(GF_LOG_ERROR, ("Error loading scene: %s\n", gf_error_to_string(e))); if (load.isom) gf_isom_delete(load.isom); return e; } #endif #ifndef GPAC_DISABLE_SCENE_STATS static void dump_stats(FILE *dump, const GF_SceneStatistics *stats) { u32 i; s32 created, count, draw_created, draw_count, deleted, draw_deleted; created = count = draw_created = draw_count = deleted = draw_deleted = 0; fprintf(dump, "<NodeStatistics>\n"); fprintf(dump, "<General NumberOfNodeTypes=\"%d\"/>\n", gf_list_count(stats->node_stats)); for (i=0; i<gf_list_count(stats->node_stats); i++) { GF_NodeStats *ptr = gf_list_get(stats->node_stats, i); fprintf(dump, "<NodeStat NodeName=\"%s\">\n", ptr->name); switch (ptr->tag) { #ifndef GPAC_DISABLE_VRML case TAG_MPEG4_Bitmap: case TAG_MPEG4_Background2D: case TAG_MPEG4_Background: case TAG_MPEG4_Box: case TAG_MPEG4_Circle: case TAG_MPEG4_CompositeTexture2D: case TAG_MPEG4_CompositeTexture3D: case TAG_MPEG4_Cylinder: case TAG_MPEG4_Cone: case TAG_MPEG4_Curve2D: case TAG_MPEG4_Extrusion: case TAG_MPEG4_ElevationGrid: case TAG_MPEG4_IndexedFaceSet2D: case TAG_MPEG4_IndexedFaceSet: case TAG_MPEG4_IndexedLineSet2D: case TAG_MPEG4_IndexedLineSet: case TAG_MPEG4_PointSet2D: case TAG_MPEG4_PointSet: case TAG_MPEG4_Rectangle: case TAG_MPEG4_Sphere: case TAG_MPEG4_Text: case TAG_MPEG4_Ellipse: case TAG_MPEG4_XCurve2D: draw_count += ptr->nb_created + ptr->nb_used - ptr->nb_del; draw_deleted += ptr->nb_del; draw_created += ptr->nb_created; break; #endif /*GPAC_DISABLE_VRML*/ } fprintf(dump, "<Instanciation NbObjects=\"%d\" NbUse=\"%d\" NbDestroy=\"%d\"/>\n", ptr->nb_created, ptr->nb_used, ptr->nb_del); count += ptr->nb_created + ptr->nb_used; deleted += ptr->nb_del; created += ptr->nb_created; fprintf(dump, "</NodeStat>\n"); } if (i) { fprintf(dump, "<CumulatedStat TotalNumberOfNodes=\"%d\" ReallyAllocatedNodes=\"%d\" DeletedNodes=\"%d\" NumberOfAttributes=\"%d\"/>\n", count, created, deleted, stats->nb_svg_attributes); fprintf(dump, "<DrawableNodesCumulatedStat TotalNumberOfNodes=\"%d\" ReallyAllocatedNodes=\"%d\" DeletedNodes=\"%d\"/>\n", draw_count, draw_created, draw_deleted); } fprintf(dump, "</NodeStatistics>\n"); created = count = deleted = 0; if (gf_list_count(stats->proto_stats)) { fprintf(dump, "<ProtoStatistics NumberOfProtoUsed=\"%d\">\n", gf_list_count(stats->proto_stats)); for (i=0; i<gf_list_count(stats->proto_stats); i++) { GF_NodeStats *ptr = gf_list_get(stats->proto_stats, i); fprintf(dump, "<ProtoStat ProtoName=\"%s\">\n", ptr->name); fprintf(dump, "<Instanciation NbObjects=\"%d\" NbUse=\"%d\" NbDestroy=\"%d\"/>\n", ptr->nb_created, ptr->nb_used, ptr->nb_del); count += ptr->nb_created + ptr->nb_used; deleted += ptr->nb_del; created += ptr->nb_created; fprintf(dump, "</ProtoStat>\n"); } if (i) fprintf(dump, "<CumulatedStat TotalNumberOfProtos=\"%d\" ReallyAllocatedProtos=\"%d\" DeletedProtos=\"%d\"/>\n", count, created, deleted); fprintf(dump, "</ProtoStatistics>\n"); } fprintf(dump, "<FixedValues min=\"%f\" max=\"%f\">\n", FIX2FLT( stats->min_fixed) , FIX2FLT( stats->max_fixed )); fprintf(dump, "<Resolutions scaleIntegerPart=\"%d\" scaleFracPart=\"%d\" coordIntegerPart=\"%d\" coordFracPart=\"%d\"/>\n", stats->scale_int_res_2d, stats->scale_frac_res_2d, stats->int_res_2d, stats->frac_res_2d); fprintf(dump, "</FixedValues>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MFVec2f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>\n", stats->count_2d, stats->rem_2d); if (stats->count_2d) { fprintf(dump, "<ExtendInfo MinVec2f=\"%f %f\" MaxVec2f=\"%f %f\"/>\n", FIX2FLT( stats->min_2d.x) , FIX2FLT( stats->min_2d.y ), FIX2FLT( stats->max_2d.x ), FIX2FLT( stats->max_2d.y ) ); } fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MFVec3f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>", stats->count_3d, stats->rem_3d); if (stats->count_3d) { fprintf(dump, "<ExtendInfo MinVec3f=\"%f %f %f\" MaxVec3f=\"%f %f %f\"/>\n", FIX2FLT( stats->min_3d.x ), FIX2FLT( stats->min_3d.y ), FIX2FLT( stats->min_3d.z ), FIX2FLT( stats->max_3d.x ), FIX2FLT( stats->max_3d.y ), FIX2FLT( stats->max_3d.z ) ); } fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MF/SFColor\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>", stats->count_color, stats->rem_color); fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"MF/SFFloat\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\" NumRemoved=\"%d\"/>", stats->count_float, stats->rem_float); fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"SFVec2f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\"/>", stats->count_2f); fprintf(dump, "</FieldStatistic>\n"); fprintf(dump, "<FieldStatistic FieldType=\"SFVec3f\">\n"); fprintf(dump, "<ParsingInfo NumParsed=\"%d\"/>", stats->count_3f); fprintf(dump, "</FieldStatistic>\n"); } static void ReorderAU(GF_List *sample_list, GF_AUContext *au) { u32 i; for (i=0; i<gf_list_count(sample_list); i++) { GF_AUContext *ptr = gf_list_get(sample_list, i); if ( /*time ordered*/ (ptr->timing_sec > au->timing_sec) /*set bifs first*/ || ((ptr->timing_sec == au->timing_sec) && (ptr->owner->streamType < au->owner->streamType)) ) { gf_list_insert(sample_list, au, i); return; } } gf_list_add(sample_list, au); } void dump_isom_scene_stats(char *file, char *inName, Bool is_final_name, u32 stat_level) { GF_Err e; FILE *dump; Bool close; u32 i, j, count; char szBuf[1024]; GF_SceneManager *ctx; GF_SceneLoader load; GF_StatManager *sm; GF_List *sample_list; GF_SceneGraph *scene_graph; dump = NULL; sm = NULL; sample_list = NULL; close = 0; scene_graph = gf_sg_new(); ctx = gf_sm_new(scene_graph); memset(&load, 0, sizeof(GF_SceneLoader)); load.fileName = file; load.ctx = ctx; if (get_file_type_by_ext(file) == 1) { load.isom = gf_isom_open(file, GF_ISOM_OPEN_READ, NULL); if (!load.isom) { M4_LOG(GF_LOG_ERROR, ("Cannot open file: %s\n", gf_error_to_string(gf_isom_last_error(NULL)))); gf_sm_del(ctx); gf_sg_del(scene_graph); return; } } e = gf_sm_load_init(&load); if (!e) e = gf_sm_load_run(&load); gf_sm_load_done(&load); if (e<0) goto exit; if (inName) { strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_stat.xml"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } close = 1; } else { dump = stdout; close = 0; } fprintf(stderr, "Analysing Scene\n"); fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- Scene Graph Statistics Generated by MP4Box - GPAC "); if (! gf_sys_is_test_mode()) fprintf(dump, "%s ", gf_gpac_version()); fprintf(dump, "-->\n"); fprintf(dump, "<SceneStatistics file=\"%s\" DumpType=\"%s\">\n", gf_file_basename(file), (stat_level==1) ? "full scene" : ((stat_level==2) ? "AccessUnit based" : "SceneGraph after each AU")); sm = gf_sm_stats_new(); /*stat level 1: complete scene stat*/ if (stat_level == 1) { e = gf_sm_stats_for_scene(sm, ctx); if (!e) dump_stats(dump, gf_sm_stats_get(sm) ); goto exit; } /*re_order all BIFS-AUs*/ sample_list = gf_list_new(); /*configure all systems streams we're dumping*/ for (i=0; i<gf_list_count(ctx->streams); i++) { GF_StreamContext *sc = gf_list_get(ctx->streams, i); if (sc->streamType != GF_STREAM_SCENE) continue; for (j=0; j<gf_list_count(sc->AUs); j++) { GF_AUContext *au = gf_list_get(sc->AUs, j); ReorderAU(sample_list, au); } } count = gf_list_count(sample_list); for (i=0; i<count; i++) { GF_AUContext *au = gf_list_get(sample_list, i); for (j=0; j<gf_list_count(au->commands); j++) { GF_Command *com = gf_list_get(au->commands, j); /*stat level 2 - get command stats*/ if (stat_level==2) { e = gf_sm_stats_for_command(sm, com); if (e) goto exit; } /*stat level 3 - apply command*/ if (stat_level==3) gf_sg_command_apply(scene_graph, com, 0); } /*stat level 3: get graph stat*/ if (stat_level==3) { e = gf_sm_stats_for_graph(sm, scene_graph); if (e) goto exit; } if (stat_level==2) { fprintf(dump, "<AUStatistics StreamID=\"%d\" AUTime=\""LLD"\">\n", au->owner->ESID, au->timing); } else { fprintf(dump, "<GraphStatistics StreamID=\"%d\" AUTime=\""LLD"\">\n", au->owner->ESID, au->timing); } /*dump stats*/ dump_stats(dump, gf_sm_stats_get(sm) ); /*reset stats*/ gf_sm_stats_reset(sm); if (stat_level==2) { fprintf(dump, "</AUStatistics>\n"); } else { fprintf(dump, "</GraphStatistics>\n"); } gf_set_progress("Analysing AU", i+1, count); } exit: if (sample_list) gf_list_del(sample_list); if (sm) gf_sm_stats_del(sm); gf_sm_del(ctx); gf_sg_del(scene_graph); if (load.isom) gf_isom_delete(load.isom); if (e) { M4_LOG(GF_LOG_ERROR, ("Stats error: %s\n", gf_error_to_string(e))); } else { fprintf(dump, "</SceneStatistics>\n"); } if (dump && close) gf_fclose(dump); fprintf(stderr, "done\n"); } #endif /*GPAC_DISABLE_SCENE_STATS*/ #ifndef GPAC_DISABLE_VRML static void PrintFixed(Fixed val, Bool add_space) { if (add_space) fprintf(stderr, " "); if (val==FIX_MIN) fprintf(stderr, "-I"); else if (val==FIX_MAX) fprintf(stderr, "+I"); else fprintf(stderr, "%g", FIX2FLT(val)); } static void PrintNodeSFField(u32 type, void *far_ptr) { if (!far_ptr) return; switch (type) { case GF_SG_VRML_SFBOOL: fprintf(stderr, "%s", (*(SFBool *)far_ptr) ? "TRUE" : "FALSE"); break; case GF_SG_VRML_SFINT32: fprintf(stderr, "%d", (*(SFInt32 *)far_ptr)); break; case GF_SG_VRML_SFFLOAT: PrintFixed((*(SFFloat *)far_ptr), 0); break; case GF_SG_VRML_SFTIME: fprintf(stderr, "%g", (*(SFTime *)far_ptr)); break; case GF_SG_VRML_SFVEC2F: PrintFixed(((SFVec2f *)far_ptr)->x, 0); PrintFixed(((SFVec2f *)far_ptr)->y, 1); break; case GF_SG_VRML_SFVEC3F: PrintFixed(((SFVec3f *)far_ptr)->x, 0); PrintFixed(((SFVec3f *)far_ptr)->y, 1); PrintFixed(((SFVec3f *)far_ptr)->z, 1); break; case GF_SG_VRML_SFROTATION: PrintFixed(((SFRotation *)far_ptr)->x, 0); PrintFixed(((SFRotation *)far_ptr)->y, 1); PrintFixed(((SFRotation *)far_ptr)->z, 1); PrintFixed(((SFRotation *)far_ptr)->q, 1); break; case GF_SG_VRML_SFCOLOR: PrintFixed(((SFColor *)far_ptr)->red, 0); PrintFixed(((SFColor *)far_ptr)->green, 1); PrintFixed(((SFColor *)far_ptr)->blue, 1); break; case GF_SG_VRML_SFSTRING: if (((SFString*)far_ptr)->buffer) fprintf(stderr, "\"%s\"", ((SFString*)far_ptr)->buffer); else fprintf(stderr, "NULL"); break; } } #endif #ifndef GPAC_DISABLE_VRML static void do_print_node(GF_Node *node, GF_SceneGraph *sg, const char *name, u32 graph_type, Bool is_nodefield, Bool do_cov) { u32 nbF, i; GF_FieldInfo f; #ifndef GPAC_DISABLE_BIFS u8 qt, at; Fixed bmin, bmax; u32 nbBits; #endif /*GPAC_DISABLE_BIFS*/ nbF = gf_node_get_field_count(node); if (is_nodefield) { char szField[1024]; u32 tfirst, tlast; if (gf_node_get_field_by_name(node, szField, &f) != GF_OK) { M4_LOG(GF_LOG_ERROR, ("Field %s is not a member of node %s\n", szField, name)); return; } fprintf(stderr, "Allowed nodes in %s.%s:\n", name, szField); if (graph_type==1) { tfirst = GF_NODE_RANGE_FIRST_X3D; tlast = GF_NODE_RANGE_LAST_X3D; } else { tfirst = GF_NODE_RANGE_FIRST_MPEG4; tlast = GF_NODE_RANGE_LAST_MPEG4; } for (i=tfirst; i<tlast; i++) { GF_Node *tmp = gf_node_new(sg, i); gf_node_register(tmp, NULL); if (gf_node_in_table_by_tag(i, f.NDTtype)) { const char *nname = gf_node_get_class_name(tmp); if (nname && strcmp(nname, "Unknown Node")) { fprintf(stderr, "\t%s\n", nname); } } gf_node_unregister(tmp, NULL); } return; } if (do_cov) { u32 ndt; if (graph_type==0) { u32 all; gf_node_mpeg4_type_by_class_name(name); gf_bifs_get_child_table(node); all = gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_ALL); for (i=0; i<all; i++) { u32 res; gf_sg_script_get_field_index(node, i, GF_SG_FIELD_CODING_ALL, &res); } gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_DEF); gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_IN); gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_OUT); gf_node_get_num_fields_in_mode(node, GF_SG_FIELD_CODING_DYN); } else if (graph_type==1) gf_node_x3d_type_by_class_name(name); for (ndt=NDT_SFWorldNode; ndt<NDT_LAST; ndt++) { gf_node_in_table_by_tag(gf_node_get_tag(node), ndt); } } fprintf(stderr, "%s {\n", name); for (i=0; i<nbF; i++) { gf_node_get_field(node, i, &f); if (graph_type==2) { fprintf(stderr, "\t%s=\"...\"\n", f.name); continue; } fprintf(stderr, "\t%s %s %s", gf_sg_vrml_get_event_type_name(f.eventType, 0), gf_sg_vrml_get_field_type_name(f.fieldType), f.name); if (f.fieldType==GF_SG_VRML_SFNODE) fprintf(stderr, " NULL"); else if (f.fieldType==GF_SG_VRML_MFNODE) fprintf(stderr, " []"); else if (gf_sg_vrml_is_sf_field(f.fieldType)) { fprintf(stderr, " "); PrintNodeSFField(f.fieldType, f.far_ptr); } else { void *ptr; u32 j, sftype; GenMFField *mffield = (GenMFField *) f.far_ptr; fprintf(stderr, " ["); sftype = gf_sg_vrml_get_sf_type(f.fieldType); for (j=0; j<mffield->count; j++) { if (j) fprintf(stderr, " "); gf_sg_vrml_mf_get_item(f.far_ptr, f.fieldType, &ptr, j); PrintNodeSFField(sftype, ptr); } fprintf(stderr, "]"); } #ifndef GPAC_DISABLE_BIFS if (gf_bifs_get_aq_info(node, i, &qt, &at, &bmin, &bmax, &nbBits)) { if (qt) { fprintf(stderr, " #QP=%d", qt); if (qt==13) fprintf(stderr, " NbBits=%d", nbBits); if (bmin && bmax) { fprintf(stderr, " Bounds=["); PrintFixed(bmin, 0); fprintf(stderr, ","); PrintFixed(bmax, 0); fprintf(stderr, "]"); } } } #endif /*GPAC_DISABLE_BIFS*/ fprintf(stderr, "\n"); if (do_cov) { gf_node_get_field_by_name(node, (char *) f.name, &f); } } fprintf(stderr, "}\n\n"); } #endif u32 PrintNode(const char *name, u32 graph_type) { #ifdef GPAC_DISABLE_VRML M4_LOG(GF_LOG_ERROR, ("VRML/MPEG-4/X3D scene graph is disabled in this build of GPAC\n")); return 2; #else const char *std_name; GF_Node *node; GF_SceneGraph *sg; u32 tag; #ifndef GPAC_DISABLE_BIFS #endif /*GPAC_DISABLE_BIFS*/ Bool is_nodefield = 0; char *sep = strchr(name, '.'); if (sep) { sep[0] = 0; is_nodefield = 1; } if (graph_type==1) { #ifndef GPAC_DISABLE_X3D tag = gf_node_x3d_type_by_class_name(name); std_name = "X3D"; #else M4_LOG(GF_LOG_ERROR, ("X3D node printing is not supported (X3D support disabled)\n")); return 2; #endif } else { tag = gf_node_mpeg4_type_by_class_name(name); std_name = "MPEG4"; } if (!tag) { M4_LOG(GF_LOG_ERROR, ("Unknown %s node %s\n", std_name, name)); return 2; } sg = gf_sg_new(); node = gf_node_new(sg, tag); gf_node_register(node, NULL); name = gf_node_get_class_name(node); if (!node) { M4_LOG(GF_LOG_ERROR, ("Node %s not supported in current built\n", name)); return 2; } do_print_node(node, sg, name, graph_type, is_nodefield, GF_FALSE); gf_node_unregister(node, NULL); gf_sg_del(sg); #endif /*GPAC_DISABLE_VRML*/ return 1; } u32 PrintBuiltInNodes(char *arg_val, u32 dump_type) { #if !defined(GPAC_DISABLE_VRML) && !defined(GPAC_DISABLE_X3D) && !defined(GPAC_DISABLE_SVG) GF_SceneGraph *sg; u32 i, nb_in, nb_not_in, start_tag, end_tag; u32 graph_type; Bool dump_nodes = ((dump_type==1) || (dump_type==3)) ? 1 : 0; if (dump_type==4) graph_type = 2; else if ((dump_type==2) || (dump_type==3)) graph_type = 1; else graph_type = 0; if (graph_type==1) { #if !defined(GPAC_DISABLE_VRML) && !defined(GPAC_DISABLE_X3D) start_tag = GF_NODE_RANGE_FIRST_X3D; end_tag = TAG_LastImplementedX3D; #else M4_LOG(GF_LOG_ERROR, ("X3D scene graph disabled in this build of GPAC\n")); return 2; #endif } else if (graph_type==2) { #ifdef GPAC_DISABLE_SVG M4_LOG(GF_LOG_ERROR, ("SVG scene graph disabled in this build of GPAC\n")); return 2; #else start_tag = GF_NODE_RANGE_FIRST_SVG; end_tag = GF_NODE_RANGE_LAST_SVG; #endif } else { #ifdef GPAC_DISABLE_VRML M4_LOG(GF_LOG_ERROR, ("VRML/MPEG-4 scene graph disabled in this build of GPAC\n")); return 2; #else start_tag = GF_NODE_RANGE_FIRST_MPEG4; end_tag = TAG_LastImplementedMPEG4; #endif } nb_in = nb_not_in = 0; sg = gf_sg_new(); if (graph_type==1) { fprintf(stderr, "Available X3D nodes in this build (dumping):\n"); } else if (graph_type==2) { fprintf(stderr, "Available SVG nodes in this build (dumping and LASeR coding):\n"); } else { fprintf(stderr, "Available MPEG-4 nodes in this build (encoding/decoding/dumping):\n"); } for (i=start_tag; i<end_tag; i++) { GF_Node *node = gf_node_new(sg, i); if (node) { gf_node_register(node, NULL); if (dump_nodes) { do_print_node(node, sg, gf_node_get_class_name(node), graph_type, GF_FALSE, GF_TRUE); } else { fprintf(stderr, " %s\n", gf_node_get_class_name(node)); } gf_node_unregister(node, NULL); nb_in++; } else { if (graph_type==2) break; nb_not_in++; } } gf_sg_del(sg); if (graph_type==2) { fprintf(stderr, "\n%d nodes supported\n", nb_in); } else { fprintf(stderr, "\n%d nodes supported - %d nodes not supported\n", nb_in, nb_not_in); } //coverage if (dump_nodes) { for (i=GF_SG_VRML_SFBOOL; i<GF_SG_VRML_SCRIPT_FUNCTION; i++) { void *fp = gf_sg_vrml_field_pointer_new(i); if (fp) { if (i==GF_SG_VRML_SFSCRIPT) gf_free(fp); else gf_sg_vrml_field_pointer_del(fp, i); } } } #else M4_LOG(GF_LOG_ERROR, ("No scene graph enabled in this MP4Box build\n")); #endif return 1; } u32 PrintBuiltInBoxes(char *argval, u32 do_cov) { u32 i, count=gf_isom_get_num_supported_boxes(); fprintf(stdout, "<Boxes>\n"); //index 0 is our internal unknown box handler for (i=1; i<count; i++) { gf_isom_dump_supported_box(i, stdout); if (do_cov) { u32 btype = gf_isom_get_supported_box_type(i); GF_Box *b=gf_isom_box_new(btype); if (b) { GF_Box *c=NULL; gf_isom_clone_box(b, &c); if (c) gf_isom_box_del(c); gf_isom_box_del(b); } } } fprintf(stdout, "</Boxes>\n"); return 1; } #if !defined(GPAC_DISABLE_ISOM_HINTING) && !defined(GPAC_DISABLE_ISOM_DUMP) void dump_isom_rtp(GF_ISOFile *file, char *inName, Bool is_final_name) { u32 i, j, size; FILE *dump; const char *sdp; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_rtp.xml"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s\n", szBuf)); return; } } else { dump = stdout; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- MP4Box RTP trace -->\n"); fprintf(dump, "<RTPFile>\n"); for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_type(file, i+1) != GF_ISOM_MEDIA_HINT) continue; fprintf(dump, "<RTPHintTrack trackID=\"%d\">\n", gf_isom_get_track_id(file, i+1)); gf_isom_sdp_track_get(file, i+1, &sdp, &size); fprintf(dump, "<SDPInfo>%s</SDPInfo>", sdp); #ifndef GPAC_DISABLE_ISOM_HINTING for (j=0; j<gf_isom_get_sample_count(file, i+1); j++) { gf_isom_dump_hint_sample(file, i+1, j+1, dump); } #endif fprintf(dump, "</RTPHintTrack>\n"); } fprintf(dump, "</RTPFile>\n"); if (inName) gf_fclose(dump); } #endif void dump_isom_timestamps(GF_ISOFile *file, char *inName, Bool is_final_name, u32 dump_mode) { u32 i, j, k, count; Bool has_ctts_error, is_fragmented=GF_FALSE; FILE *dump; Bool skip_offset = ((dump_mode==2) || (dump_mode==4)) ? GF_TRUE : GF_FALSE; Bool check_ts = ((dump_mode==3) || (dump_mode==4)) ? GF_TRUE : GF_FALSE; struct _ts_info { u64 dts; s64 cts; }; struct _ts_info *timings = NULL; u32 nb_timings=0, nb_timings_alloc = 0; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_ts.txt"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s\n", szBuf)); return; } } else { dump = stdout; } if (gf_isom_is_fragmented(file)) is_fragmented = GF_TRUE; has_ctts_error = GF_FALSE; for (i=0; i<gf_isom_get_track_count(file); i++) { s64 cts_dts_shift = gf_isom_get_cts_to_dts_shift(file, i+1); u32 has_cts_offset = gf_isom_has_time_offset(file, i+1); fprintf(dump, "#dumping track ID %d timing:\n", gf_isom_get_track_id(file, i + 1)); fprintf(dump, "Num\tDTS\tCTS\tSize\tRAP%s\tisLeading\tDependsOn\tDependedOn\tRedundant\tRAP-SampleGroup\tRoll-SampleGroup\tRoll-Distance", skip_offset ? "" : "\tOffset"); if (is_fragmented) { fprintf(dump, "\tfrag_start"); } fprintf(dump, "\n"); count = gf_isom_get_sample_count(file, i+1); if (has_cts_offset && check_ts) { if (nb_timings_alloc<count) { nb_timings_alloc = count; timings = gf_realloc(timings, sizeof (struct _ts_info) * count); } nb_timings = 0; } for (j=0; j<count; j++) { s64 cts; u64 dts, offset; u32 isLeading, dependsOn, dependedOn, redundant; Bool is_rap; GF_ISOSampleRollType roll_type; s32 roll_distance; u32 index; GF_ISOSample *samp = gf_isom_get_sample_info(file, i+1, j+1, &index, &offset); if (!samp) { fprintf(dump, " SAMPLE #%d IN TRACK #%d NOT THERE !!!\n", j+1, i+1); continue; } gf_isom_get_sample_flags(file, i+1, j+1, &isLeading, &dependsOn, &dependedOn, &redundant); gf_isom_get_sample_rap_roll_info(file, i+1, j+1, &is_rap, &roll_type, &roll_distance); dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; fprintf(dump, "Sample %d\tDTS "LLU"\tCTS "LLD"\t%d\t%d", j+1, dts, cts, samp->dataLength, samp->IsRAP); if (!skip_offset) fprintf(dump, "\t"LLU, offset); fprintf(dump, "\t%d\t%d\t%d\t%d\t%d\t%d\t%d", isLeading, dependsOn, dependedOn, redundant, is_rap, roll_type, roll_distance); if (cts< (s64) dts) { if (has_cts_offset==2) { if (cts_dts_shift && (cts+cts_dts_shift < (s64) dts)) { fprintf(dump, " #NEGATIVE CTS OFFSET!!!"); has_ctts_error = 1; } else if (!cts_dts_shift) { fprintf(dump, " #possible negative CTS offset (no cslg in file)"); } } else { fprintf(dump, " #NEGATIVE CTS OFFSET!!!"); has_ctts_error = 1; } } if (has_cts_offset && check_ts) { for (k=0; k<nb_timings; k++) { if (timings[k].dts==dts) { fprintf(dump, " #SAME DTS USED!!!"); has_ctts_error = 1; } if (timings[k].cts==cts) { fprintf(dump, " #SAME CTS USED!!! "); has_ctts_error = 1; } } timings[nb_timings].dts = dts; timings[nb_timings].cts = cts; nb_timings++; } gf_isom_sample_del(&samp); if (is_fragmented) { fprintf(dump, "\t%d", gf_isom_sample_is_fragment_start(file, i+1, j+1, NULL) ); } fprintf(dump, "\n"); gf_set_progress("Dumping track timing", j+1, count); } fprintf(dump, "\n\n"); gf_set_progress("Dumping track timing", count, count); } if (timings) gf_free(timings); if (inName) gf_fclose(dump); if (has_ctts_error) { M4_LOG(GF_LOG_ERROR, ("\tFile has CTTS table errors\n")); } } static u32 read_nal_size_hdr(u8 *ptr, u32 nalh_size) { u32 nal_size=0; u32 v = nalh_size; while (v) { nal_size |= (u8) *ptr; ptr++; v-=1; if (v) nal_size <<= 8; } return nal_size; } #ifndef GPAC_DISABLE_AV_PARSERS void gf_inspect_dump_nalu(FILE *dump, u8 *ptr, u32 ptr_size, Bool is_svc, HEVCState *hevc, AVCState *avc, VVCState *vvc, u32 nalh_size, Bool dump_crc, Bool is_encrypted); #endif static void dump_isom_nal_ex(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, u32 dump_flags) { u32 i, j, count, nb_descs, track, nalh_size, timescale, cur_extract_mode; s32 countRef; Bool is_adobe_protected = GF_FALSE; Bool is_cenc_protected = GF_FALSE; Bool is_hevc = GF_FALSE; Bool is_vvc = GF_FALSE; #ifndef GPAC_DISABLE_AV_PARSERS AVCState *avc_state = NULL; HEVCState *hevc_state = NULL; VVCState *vvc_state = NULL; #endif GF_AVCConfig *avccfg, *svccfg; GF_HEVCConfig *hevccfg, *lhvccfg; GF_VVCConfig *vvccfg; GF_NALUFFParam *slc; Bool has_svcc = GF_FALSE; track = gf_isom_get_track_by_id(file, trackID); count = gf_isom_get_sample_count(file, track); timescale = gf_isom_get_media_timescale(file, track); cur_extract_mode = gf_isom_get_nalu_extract_mode(file, track); nb_descs = gf_isom_get_sample_description_count(file, track); if (!nb_descs) { M4_LOG(GF_LOG_ERROR, ("Error: Track #%d has no sample description so is likely not NALU-based!\n", trackID)); return; } fprintf(dump, "<NALUTrack trackID=\"%d\" SampleCount=\"%d\" TimeScale=\"%d\">\n", trackID, count, timescale); #ifndef GPAC_DISABLE_AV_PARSERS #define DUMP_ARRAY(arr, name, loc, _is_svc)\ if (arr) {\ fprintf(dump, " <%sArray location=\"%s\">\n", name, loc);\ for (i=0; i<gf_list_count(arr); i++) {\ slc = gf_list_get(arr, i);\ fprintf(dump, " <NALU size=\"%d\" ", slc->size);\ gf_inspect_dump_nalu(dump, (u8 *) slc->data, slc->size, _is_svc, is_hevc ? hevc_state : NULL, avc_state, is_vvc ? vvc_state : NULL, nalh_size, (dump_flags&1) ? GF_TRUE : GF_FALSE, GF_FALSE);\ }\ fprintf(dump, " </%sArray>\n", name);\ }\ #else #define DUMP_ARRAY(arr, name, loc, _is_svc)\ if (arr) {\ fprintf(dump, " <%sArray location=\"%s\">\n", name, loc);\ for (i=0; i<gf_list_count(arr); i++) {\ slc = gf_list_get(arr, i);\ fprintf(dump, " <NALU size=\"%d\" ", slc->size);\ fprintf(dump, "/>\n");\ }\ fprintf(dump, " </%sArray>\n", name);\ }\ #endif nalh_size = 0; for (j=0; j<nb_descs; j++) { GF_AVCConfig *mvccfg; Bool is_svc; avccfg = gf_isom_avc_config_get(file, track, j+1); svccfg = gf_isom_svc_config_get(file, track, j+1); mvccfg = gf_isom_mvc_config_get(file, track, j+1); hevccfg = gf_isom_hevc_config_get(file, track, j+1); lhvccfg = gf_isom_lhvc_config_get(file, track, j+1); vvccfg = gf_isom_vvc_config_get(file, track, j+1); is_svc = (svccfg!=NULL) ? 1:0; if (hevccfg || lhvccfg) { is_hevc = 1; #ifndef GPAC_DISABLE_AV_PARSERS GF_SAFEALLOC(hevc_state, HEVCState) #endif } else if (vvccfg) { is_vvc = 1; #ifndef GPAC_DISABLE_AV_PARSERS GF_SAFEALLOC(vvc_state, VVCState) #endif } else if (avccfg || svccfg || mvccfg) { #ifndef GPAC_DISABLE_AV_PARSERS GF_SAFEALLOC(avc_state, AVCState) #endif } //for tile tracks the hvcC is stored in the 'tbas' track if (!hevccfg && gf_isom_get_reference_count(file, track, GF_ISOM_REF_TBAS)) { u32 tk = 0; gf_isom_get_reference(file, track, GF_ISOM_REF_TBAS, 1, &tk); hevccfg = gf_isom_hevc_config_get(file, tk, 1); } fprintf(dump, " <NALUConfig>\n"); if (!avccfg && !svccfg && !hevccfg && !lhvccfg && !vvccfg) { M4_LOG(GF_LOG_ERROR, ("Error: Track #%d is not NALU or OBU based!\n", trackID)); return; } if (avccfg) { nalh_size = avccfg->nal_unit_size; DUMP_ARRAY(avccfg->sequenceParameterSets, "AVCSPS", "avcC", is_svc); DUMP_ARRAY(avccfg->pictureParameterSets, "AVCPPS", "avcC", is_svc) DUMP_ARRAY(avccfg->sequenceParameterSetExtensions, "AVCSPSEx", "avcC", is_svc) } if (is_svc) { if (!nalh_size) nalh_size = svccfg->nal_unit_size; DUMP_ARRAY(svccfg->sequenceParameterSets, "SVCSPS", "svcC", is_svc) DUMP_ARRAY(svccfg->pictureParameterSets, "SVCPPS", "svcC", is_svc) } if (mvccfg) { if (!nalh_size) nalh_size = mvccfg->nal_unit_size; DUMP_ARRAY(mvccfg->sequenceParameterSets, "SVCSPS", "mvcC", is_svc) DUMP_ARRAY(mvccfg->pictureParameterSets, "SVCPPS", "mvcC", is_svc) } if (hevccfg) { u32 idx; nalh_size = hevccfg->nal_unit_size; for (idx=0; idx<gf_list_count(hevccfg->param_array); idx++) { GF_NALUFFParamArray *ar = gf_list_get(hevccfg->param_array, idx); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCSPS", "hvcC", 0) } else if (ar->type==GF_HEVC_NALU_PIC_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCPPS", "hvcC", 0) } else if (ar->type==GF_HEVC_NALU_VID_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCVPS", "hvcC", 0) } else { DUMP_ARRAY(ar->nalus, "HEVCUnknownPS", "hvcC", 0) } } } if (vvccfg) { u32 idx; nalh_size = vvccfg->nal_unit_size; for (idx=0; idx<gf_list_count(vvccfg->param_array); idx++) { GF_NALUFFParamArray *ar = gf_list_get(vvccfg->param_array, idx); if (ar->type==GF_VVC_NALU_SEQ_PARAM) { DUMP_ARRAY(ar->nalus, "VVCSPS", "vvcC", 0) } else if (ar->type==GF_VVC_NALU_PIC_PARAM) { DUMP_ARRAY(ar->nalus, "VVCPPS", "vvcC", 0) } else if (ar->type==GF_VVC_NALU_VID_PARAM) { DUMP_ARRAY(ar->nalus, "VVCVPS", "vvcC", 0) } else { DUMP_ARRAY(ar->nalus, "VVCUnknownPS", "vvcC", 0) } } } if (lhvccfg) { u32 idx; nalh_size = lhvccfg->nal_unit_size; for (idx=0; idx<gf_list_count(lhvccfg->param_array); idx++) { GF_NALUFFParamArray *ar = gf_list_get(lhvccfg->param_array, idx); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCSPS", "lhcC", 0) } else if (ar->type==GF_HEVC_NALU_PIC_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCPPS", "lhcC", 0) } else if (ar->type==GF_HEVC_NALU_VID_PARAM) { DUMP_ARRAY(ar->nalus, "HEVCVPS", "lhcC", 0) } else { DUMP_ARRAY(ar->nalus, "HEVCUnknownPS", "lhcC", 0) } } } fprintf(dump, " </NALUConfig>\n"); if (avccfg) gf_odf_avc_cfg_del(avccfg); if (svccfg) { gf_odf_avc_cfg_del(svccfg); has_svcc = GF_TRUE; } if (hevccfg) gf_odf_hevc_cfg_del(hevccfg); if (vvccfg) gf_odf_vvc_cfg_del(vvccfg); if (lhvccfg) gf_odf_hevc_cfg_del(lhvccfg); } /*fixme: for dumping encrypted track: we don't have neither avccfg nor svccfg*/ if (!nalh_size) nalh_size = 4; /*for testing dependency*/ countRef = gf_isom_get_reference_count(file, track, GF_ISOM_REF_SCAL); if (countRef > 0) { GF_ISOTrackID refTrackID; fprintf(dump, " <SCALReferences>\n"); for (i = 1; i <= (u32) countRef; i++) { gf_isom_get_reference_ID(file, track, GF_ISOM_REF_SCAL, i, &refTrackID); fprintf(dump, " <SCALReference number=\"%d\" refTrackID=\"%d\"/>\n", i, refTrackID); } fprintf(dump, " </SCALReferences>\n"); } fprintf(dump, " <NALUSamples>\n"); gf_isom_set_nalu_extract_mode(file, track, GF_ISOM_NALU_EXTRACT_INSPECT); is_adobe_protected = gf_isom_is_adobe_protection_media(file, track, 1); is_cenc_protected = gf_isom_is_cenc_media(file, track, 1); for (i=0; i<count; i++) { u64 dts, cts; Bool is_rap; u32 size, nal_size, idx, di; u8 *ptr; GF_ISOSample *samp = gf_isom_get_sample(file, track, i+1, &di); if (!samp) { fprintf(dump, "<!-- Unable to fetch sample %d -->\n", i+1); continue; } dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; is_rap = samp->IsRAP; if (!is_rap) gf_isom_get_sample_rap_roll_info(file, track, i+1, &is_rap, NULL, NULL); if (dump_flags&2) { fprintf(dump, " <Sample size=\"%d\" RAP=\"%d\"", samp->dataLength, is_rap); } else { fprintf(dump, " <Sample DTS=\""LLD"\" CTS=\""LLD"\" size=\"%d\" RAP=\"%d\"", dts, cts, samp->dataLength, is_rap); } if (nb_descs>1) fprintf(dump, " sample_description=\"%d\"", di); fprintf(dump, " >\n"); if (cts<dts) fprintf(dump, "<!-- NEGATIVE CTS OFFSET! -->\n"); idx = 1; ptr = samp->data; size = samp->dataLength; if (is_adobe_protected) { u8 encrypted_au = ptr[0]; if (encrypted_au) { fprintf(dump, " <!-- Sample number %d is an Adobe's protected sample: can not be dumped -->\n", i+1); fprintf(dump, " </Sample>\n\n"); continue; } else { ptr++; size--; } } while (size) { nal_size = read_nal_size_hdr(ptr, nalh_size); ptr += nalh_size; if (nal_size >= UINT_MAX-nalh_size || nalh_size + nal_size > size) { fprintf(dump, " <!-- NALU number %d is corrupted: size is %d but only %d remains -->\n", idx, nal_size, size); break; } else { fprintf(dump, " <NALU size=\"%d\" ", nal_size); #ifndef GPAC_DISABLE_AV_PARSERS Bool is_encrypted = 0; if (is_cenc_protected) { GF_Err e = gf_isom_get_sample_cenc_info(file, track, i + 1, &is_encrypted, NULL, NULL, NULL, NULL); if (e != GF_OK) { fprintf(dump, "dump_msg=\"Error %s while fetching encryption info for sample, assuming sample is encrypted\" ", gf_error_to_string(e) ); is_encrypted = GF_TRUE; } } gf_inspect_dump_nalu(dump, ptr, nal_size, has_svcc ? 1 : 0, hevc_state, avc_state, vvc_state, nalh_size, dump_flags, is_encrypted); #else fprintf(dump, "/>\n"); #endif } idx++; ptr+=nal_size; size -= nal_size + nalh_size; } fprintf(dump, " </Sample>\n"); gf_isom_sample_del(&samp); fprintf(dump, "\n"); gf_set_progress("Analysing Track NALUs", i+1, count); } fprintf(dump, " </NALUSamples>\n"); fprintf(dump, "</NALUTrack>\n"); gf_isom_set_nalu_extract_mode(file, track, cur_extract_mode); #ifndef GPAC_DISABLE_AV_PARSERS if (hevc_state) gf_free(hevc_state); if (vvc_state) gf_free(vvc_state); if (avc_state) gf_free(avc_state); #endif } static void dump_isom_obu(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, Bool dump_crc); static void dump_qt_prores(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, Bool dump_crc); void dump_isom_nal(GF_ISOFile *file, GF_ISOTrackID trackID, char *inName, Bool is_final_name, u32 dump_flags) { Bool is_av1 = GF_FALSE; Bool is_prores = GF_FALSE; FILE *dump; if (inName) { GF_ESD* esd; char szBuf[GF_MAX_PATH]; strcpy(szBuf, inName); u32 track = gf_isom_get_track_by_id(file, trackID); esd = gf_isom_get_esd(file, track, 1); if (!esd || !esd->decoderConfig) { switch (gf_isom_get_media_subtype(file, track, 1)) { case GF_ISOM_SUBTYPE_AV01: is_av1 = GF_TRUE; break; case GF_QT_SUBTYPE_APCH: case GF_QT_SUBTYPE_APCO: case GF_QT_SUBTYPE_APCN: case GF_QT_SUBTYPE_APCS: case GF_QT_SUBTYPE_AP4X: case GF_QT_SUBTYPE_AP4H: is_prores = GF_TRUE; break; } } else if (esd->decoderConfig->objectTypeIndication == GF_CODECID_AV1) { is_av1 = GF_TRUE; } if (esd) gf_odf_desc_del((GF_Descriptor*)esd); if (!is_final_name) sprintf(szBuf, "%s_%d_%s.xml", inName, trackID, is_av1 ? "obu" : "nalu"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } if (is_av1) dump_isom_obu(file, trackID, dump, dump_flags); else if (is_prores) dump_qt_prores(file, trackID, dump, dump_flags); else dump_isom_nal_ex(file, trackID, dump, dump_flags); if (inName) gf_fclose(dump); } #ifndef GPAC_DISABLE_AV_PARSERS void gf_inspect_dump_obu(FILE *dump, AV1State *av1, u8 *obu, u64 obu_length, ObuType obu_type, u64 obu_size, u32 hdr_size, Bool dump_crc); #endif static void dump_isom_obu(GF_ISOFile *file, GF_ISOTrackID trackID, FILE *dump, Bool dump_crc) { #ifndef GPAC_DISABLE_AV_PARSERS u32 i, count, track, timescale; AV1State av1; ObuType obu_type; u64 obu_size; u32 hdr_size; GF_BitStream *bs; u32 idx; track = gf_isom_get_track_by_id(file, trackID); gf_av1_init_state(&av1); av1.config = gf_isom_av1_config_get(file, track, 1); if (!av1.config) { M4_LOG(GF_LOG_ERROR, ("Error: Track #%d is not AV1!\n", trackID)); return; } count = gf_isom_get_sample_count(file, track); timescale = gf_isom_get_media_timescale(file, track); fprintf(dump, "<OBUTrack trackID=\"%d\" SampleCount=\"%d\" TimeScale=\"%d\">\n", trackID, count, timescale); fprintf(dump, " <OBUConfig>\n"); for (i=0; i<gf_list_count(av1.config->obu_array); i++) { GF_AV1_OBUArrayEntry *obu = gf_list_get(av1.config->obu_array, i); bs = gf_bs_new(obu->obu, (u32) obu->obu_length, GF_BITSTREAM_READ); gf_av1_parse_obu(bs, &obu_type, &obu_size, &hdr_size, &av1); gf_inspect_dump_obu(dump, &av1, obu->obu, obu->obu_length, obu_type, obu_size, hdr_size, dump_crc); gf_bs_del(bs); } fprintf(dump, " </OBUConfig>\n"); fprintf(dump, " <OBUSamples>\n"); for (i=0; i<count; i++) { u64 dts, cts; u32 size; u8 *ptr; GF_ISOSample *samp = gf_isom_get_sample(file, track, i+1, NULL); if (!samp) { fprintf(dump, "<!-- Unable to fetch sample %d -->\n", i+1); continue; } dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; fprintf(dump, " <Sample number=\"%d\" DTS=\""LLD"\" CTS=\""LLD"\" size=\"%d\" RAP=\"%d\" >\n", i+1, dts, cts, samp->dataLength, samp->IsRAP); if (cts<dts) fprintf(dump, "<!-- NEGATIVE CTS OFFSET! -->\n"); idx = 1; ptr = samp->data; size = samp->dataLength; bs = gf_bs_new(ptr, size, GF_BITSTREAM_READ); while (size) { gf_av1_parse_obu(bs, &obu_type, &obu_size, &hdr_size, &av1); if (obu_size > size) { fprintf(dump, " <!-- OBU number %d is corrupted: size is %d but only %d remains -->\n", idx, (u32) obu_size, size); break; } gf_inspect_dump_obu(dump, &av1, ptr, obu_size, obu_type, obu_size, hdr_size, dump_crc); ptr += obu_size; size -= (u32)obu_size; idx++; } gf_bs_del(bs); fprintf(dump, " </Sample>\n"); gf_isom_sample_del(&samp); fprintf(dump, "\n"); gf_set_progress("Analysing Track OBUs", i+1, count); } fprintf(dump, " </OBUSamples>\n"); fprintf(dump, "</OBUTrack>\n"); if (av1.config) gf_odf_av1_cfg_del(av1.config); gf_av1_reset_state(&av1, GF_TRUE); #endif } static void dump_qt_prores(GF_ISOFile *file, u32 trackID, FILE *dump, Bool dump_crc) { #ifndef GPAC_DISABLE_AV_PARSERS u32 i, count, track, timescale; track = gf_isom_get_track_by_id(file, trackID); count = gf_isom_get_sample_count(file, track); timescale = gf_isom_get_media_timescale(file, track); fprintf(dump, "<ProResTrack trackID=\"%d\" SampleCount=\"%d\" TimeScale=\"%d\">\n", trackID, count, timescale); for (i=0; i<count; i++) { void gf_inspect_dump_prores(FILE *dump, u8 *ptr, u64 frame_size, Bool dump_crc); u64 dts, cts; GF_ISOSample *samp = gf_isom_get_sample(file, track, i+1, NULL); if (!samp) { fprintf(dump, "<!-- Unable to fetch sample %d -->\n", i+1); continue; } dts = samp->DTS; cts = dts + (s32) samp->CTS_Offset; if (cts!=dts) fprintf(dump, "<!-- Wrong timing info (CTS "LLD" vs DTS "LLD") ! -->\n", cts, dts); if (!samp->IsRAP) fprintf(dump, "<!-- Wrong sync sample info, sample is not SAP1 ! -->\n"); fprintf(dump, " <Sample number=\"%d\" CTS=\""LLD"\" size=\"%d\">\n", i+1, cts, samp->dataLength); gf_inspect_dump_prores(dump, samp->data, samp->dataLength, dump_crc); fprintf(dump, " </Sample>\n"); gf_isom_sample_del(&samp); fprintf(dump, "\n"); gf_set_progress("Analysing ProRes Track", i+1, count); } fprintf(dump, "</ProResTrack>\n"); #endif } void dump_isom_saps(GF_ISOFile *file, GF_ISOTrackID trackID, u32 dump_saps_mode, char *inName, Bool is_final_name) { FILE *dump; u32 i, count; s64 media_offset=0; u32 track = gf_isom_get_track_by_id(file, trackID); if (inName) { char szBuf[GF_MAX_PATH]; strcpy(szBuf, inName); if (!is_final_name) sprintf(szBuf, "%s_%d_cues.xml", inName, trackID); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); fprintf(dump, "<DASHCues xmlns=\"urn:gpac:dash:schema:cues:2018\">\n"); fprintf(dump, "<Stream id=\"%d\" timescale=\"%d\"", trackID, gf_isom_get_media_timescale(file, track) ); if (dump_saps_mode==4) { fprintf(dump, " mode=\"edit\""); gf_isom_get_edit_list_type(file, track, &media_offset); } fprintf(dump, ">\n"); count = gf_isom_get_sample_count(file, track); for (i=0; i<count; i++) { s64 cts, dts; u32 di; Bool traf_start = 0; u32 sap_type = 0; u64 doffset; GF_ISOSample *samp = gf_isom_get_sample_info(file, track, i+1, &di, &doffset); traf_start = gf_isom_sample_is_fragment_start(file, track, i+1, NULL); sap_type = samp->IsRAP; if (!sap_type) { Bool is_rap; GF_ISOSampleRollType roll_type; s32 roll_dist; gf_isom_get_sample_rap_roll_info(file, track, i+1, &is_rap, &roll_type, &roll_dist); if (roll_type) sap_type = SAP_TYPE_4; else if (is_rap) sap_type = SAP_TYPE_3; } if (!sap_type) { gf_isom_sample_del(&samp); continue; } dts = cts = samp->DTS; cts += samp->CTS_Offset; fprintf(dump, "<Cue sap=\"%d\"", sap_type); if (dump_saps_mode==4) { cts += media_offset; fprintf(dump, " cts=\""LLD"\"", cts); } else { if (!dump_saps_mode || (dump_saps_mode==1)) fprintf(dump, " sample=\"%d\"", i+1); if (!dump_saps_mode || (dump_saps_mode==2)) fprintf(dump, " cts=\""LLD"\"", cts); if (!dump_saps_mode || (dump_saps_mode==3)) fprintf(dump, " dts=\""LLD"\"", dts); } if (traf_start) fprintf(dump, " wasFragStart=\"yes\""); fprintf(dump, "/>\n"); gf_isom_sample_del(&samp); } fprintf(dump, "</Stream>\n"); fprintf(dump, "</DASHCues>\n"); if (inName) gf_fclose(dump); } #ifndef GPAC_DISABLE_ISOM_DUMP void dump_isom_ismacryp(GF_ISOFile *file, char *inName, Bool is_final_name) { u32 i, j; FILE *dump; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) strcat(szBuf, "_ismacryp.xml"); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(dump, "<!-- MP4Box ISMACryp trace -->\n"); fprintf(dump, "<ISMACrypFile>\n"); for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_subtype(file, i+1, 1) != GF_ISOM_SUBTYPE_MPEG4_CRYP) continue; gf_isom_dump_ismacryp_protection(file, i+1, dump); fprintf(dump, "<ISMACrypTrack trackID=\"%d\">\n", gf_isom_get_track_id(file, i+1)); for (j=0; j<gf_isom_get_sample_count(file, i+1); j++) { gf_isom_dump_ismacryp_sample(file, i+1, j+1, dump); } fprintf(dump, "</ISMACrypTrack >\n"); } fprintf(dump, "</ISMACrypFile>\n"); if (inName) gf_fclose(dump); } void dump_isom_timed_text(GF_ISOFile *file, GF_ISOTrackID trackID, char *inName, Bool is_final_name, Bool is_convert, GF_TextDumpType dump_type) { FILE *dump; GF_Err e; u32 track; track = gf_isom_get_track_by_id(file, trackID); if (!track) { M4_LOG(GF_LOG_ERROR, ("Cannot find track ID %d\n", trackID)); return; } switch (gf_isom_get_media_type(file, track)) { case GF_ISOM_MEDIA_TEXT: case GF_ISOM_MEDIA_SUBT: break; default: M4_LOG(GF_LOG_ERROR, ("Track ID %d is not a 3GPP text track\n", trackID)); return; } if (inName) { char szBuf[1024]; char *ext; ext = ((dump_type==GF_TEXTDUMPTYPE_SVG) ? "svg" : ((dump_type==GF_TEXTDUMPTYPE_SRT) ? "srt" : "ttxt")); if (is_final_name) { strcpy(szBuf, inName) ; } else if (is_convert) sprintf(szBuf, "%s.%s", inName, ext) ; else sprintf(szBuf, "%s_%d_text.%s", inName, trackID, ext); dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; } e = gf_isom_text_dump(file, track, dump, dump_type); if (inName) gf_fclose(dump); if (e) { M4_LOG(GF_LOG_ERROR, ("Conversion failed (%s)\n", gf_error_to_string(e))); } else { fprintf(stderr, "Conversion done\n"); } } #endif /*GPAC_DISABLE_ISOM_DUMP*/ #ifndef GPAC_DISABLE_ISOM_HINTING void dump_isom_sdp(GF_ISOFile *file, char *inName, Bool is_final_name) { const char *sdp; u32 size, i; FILE *dump; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) { char *ext = strchr(szBuf, '.'); if (ext) ext[0] = 0; strcat(szBuf, "_sdp.txt"); } dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s for dumping\n", szBuf)); return; } } else { dump = stdout; fprintf(dump, "# File SDP content \n\n"); } //get the movie SDP gf_isom_sdp_get(file, &sdp, &size); if (sdp && size) fprintf(dump, "%s", sdp); fprintf(dump, "\r\n"); //then tracks for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_media_type(file, i+1) != GF_ISOM_MEDIA_HINT) continue; gf_isom_sdp_track_get(file, i+1, &sdp, &size); fprintf(dump, "%s", sdp); } fprintf(dump, "\n\n"); if (inName) gf_fclose(dump); } #endif #ifndef GPAC_DISABLE_ISOM_DUMP GF_Err dump_isom_xml(GF_ISOFile *file, char *inName, Bool is_final_name, Bool do_track_dump, Bool merge_vtt_cues, Bool skip_init, Bool skip_samples) { GF_Err e; FILE *dump = stdout; Bool do_close=GF_FALSE; if (!file) return GF_ISOM_INVALID_FILE; if (inName) { char szBuf[1024]; strcpy(szBuf, inName); if (!is_final_name) { strcat(szBuf, do_track_dump ? "_dump.xml" : "_info.xml"); } dump = gf_fopen(szBuf, "wt"); if (!dump) { M4_LOG(GF_LOG_ERROR, ("Failed to open %s\n", szBuf)); return GF_IO_ERR; } do_close=GF_TRUE; } fprintf(dump, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); if (do_track_dump) { fprintf(dump, "<ISOBaseMediaFileTrace>\n"); } e = gf_isom_dump(file, dump, skip_init, skip_samples); if (e) { M4_LOG(GF_LOG_ERROR, ("Error dumping ISO structure\n")); } if (do_track_dump) { #ifndef GPAC_DISABLE_MEDIA_EXPORT u32 i; //because of dump mode we need to reopen in regular read mode to avoid mem leaks GF_ISOFile *the_file = gf_isom_open(gf_isom_get_filename(file), GF_ISOM_OPEN_READ, NULL); u32 tcount = gf_isom_get_track_count(the_file); fprintf(dump, "<Tracks>\n"); for (i=0; i<tcount; i++) { GF_MediaExporter dumper; GF_ISOTrackID trackID = gf_isom_get_track_id(the_file, i+1); u32 mtype = gf_isom_get_media_type(the_file, i+1); u32 msubtype = gf_isom_get_media_subtype(the_file, i+1, 1); Bool fmt_handled = GF_FALSE; memset(&dumper, 0, sizeof(GF_MediaExporter)); dumper.file = the_file; dumper.trackID = trackID; dumper.dump_file = dump; if (mtype == GF_ISOM_MEDIA_HINT) { #ifndef GPAC_DISABLE_ISOM_HINTING char *name=NULL; if (msubtype==GF_ISOM_SUBTYPE_RTP) name = "RTPHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_SRTP) name = "SRTPHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_RRTP) name = "RTPReceptionHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_RTCP) name = "RTCPReceptionHintTrack"; else if (msubtype==GF_ISOM_SUBTYPE_FLUTE) name = "FLUTEReceptionHintTrack"; else name = "UnknownHintTrack"; fprintf(dump, "<%s trackID=\"%d\">\n", name, trackID); #ifndef GPAC_DISABLE_ISOM_HINTING u32 j, scount=gf_isom_get_sample_count(the_file, i+1); for (j=0; j<scount; j++) { gf_isom_dump_hint_sample(the_file, i+1, j+1, dump); } #endif fprintf(dump, "</%s>\n", name); fmt_handled = GF_TRUE; #endif /*GPAC_DISABLE_ISOM_HINTING*/ } else if (gf_isom_get_avc_svc_type(the_file, i+1, 1) || gf_isom_get_hevc_lhvc_type(the_file, i+1, 1)) { dump_isom_nal_ex(the_file, trackID, dump, GF_FALSE); fmt_handled = GF_TRUE; } else if ((mtype==GF_ISOM_MEDIA_TEXT) || (mtype==GF_ISOM_MEDIA_SUBT) ) { if (msubtype==GF_ISOM_SUBTYPE_WVTT) { gf_webvtt_dump_iso_track(&dumper, i+1, merge_vtt_cues, GF_TRUE); fmt_handled = GF_TRUE; } else if ((msubtype==GF_ISOM_SUBTYPE_TX3G) || (msubtype==GF_ISOM_SUBTYPE_TEXT)) { gf_isom_text_dump(the_file, i+1, dump, GF_TEXTDUMPTYPE_TTXT_BOXES); fmt_handled = GF_TRUE; } } if (!fmt_handled) { dumper.flags = GF_EXPORT_NHML | GF_EXPORT_NHML_FULL; dumper.print_stats_graph = fs_dump_flags; gf_media_export(&dumper); } } #else return GF_NOT_SUPPORTED; #endif /*GPAC_DISABLE_MEDIA_EXPORT*/ gf_isom_delete(the_file); fprintf(dump, "</Tracks>\n"); fprintf(dump, "</ISOBaseMediaFileTrace>\n"); } if (do_close) gf_fclose(dump); return e; } #endif static char *format_duration(u64 dur, u32 timescale, char *szDur) { u32 h, m, s, ms; if ((dur==(u64) -1) || (dur==(u32) -1)) { strcpy(szDur, "Unknown"); return szDur; } dur = (u64) (( ((Double) (s64) dur)/timescale)*1000); h = (u32) (dur / 3600000); m = (u32) (dur/ 60000) - h*60; s = (u32) (dur/1000) - h*3600 - m*60; ms = (u32) (dur) - h*3600000 - m*60000 - s*1000; if (h<=24) { sprintf(szDur, "%02d:%02d:%02d.%03d", h, m, s, ms); } else { u32 d = (u32) (dur / 3600000 / 24); h = (u32) (dur/3600000)-24*d; if (d<=365) { sprintf(szDur, "%d Days, %02d:%02d:%02d.%03d", d, h, m, s, ms); } else { u32 y=0; while (d>365) { y++; d-=365; if (y%4) d--; } sprintf(szDur, "%d Years %d Days, %02d:%02d:%02d.%03d", y, d, h, m, s, ms); } } return szDur; } static char *format_date(u64 time, char *szTime) { time_t now; if (!time) { strcpy(szTime, "UNKNOWN DATE"); } else { time -= 2082844800; now = (u32) time; sprintf(szTime, "GMT %s", asctime(gf_gmtime(&now)) ); } return szTime; } void print_udta(GF_ISOFile *file, u32 track_number, Bool has_itags) { u32 i, count; count = gf_isom_get_udta_count(file, track_number); if (!count) return; if (has_itags) { for (i=0; i<count; i++) { u32 type; bin128 uuid; gf_isom_get_udta_type(file, track_number, i+1, &type, &uuid); if (type == GF_ISOM_BOX_TYPE_META) { count--; break; } } if (!count) return; } fprintf(stderr, "%d UDTA types: ", count); for (i=0; i<count; i++) { u32 j, type, nb_items, first=GF_TRUE; bin128 uuid; gf_isom_get_udta_type(file, track_number, i+1, &type, &uuid); nb_items = gf_isom_get_user_data_count(file, track_number, type, uuid); fprintf(stderr, "%s (%d) ", gf_4cc_to_str(type), nb_items); for (j=0; j<nb_items; j++) { u8 *udta=NULL; u32 udta_size; gf_isom_get_user_data(file, track_number, type, uuid, j+1, &udta, &udta_size); if (!udta) continue; if (udta_size && gf_utf8_is_legal(udta, udta_size)) { u32 idx; if (first) { fprintf(stderr, "\n"); first = GF_FALSE; } fprintf(stderr, "\t"); for (idx=0; idx<udta_size; idx++) { if (!udta[idx]) break; fprintf(stderr, "%c", udta[idx]); } fprintf(stderr, "\n"); } gf_free(udta); } } fprintf(stderr, "\n"); } GF_Err dump_isom_udta(GF_ISOFile *file, char *inName, Bool is_final_name, u32 dump_udta_type, u32 dump_udta_track) { u8 *data; FILE *t; bin128 uuid; u32 count, res; GF_Err e; memset(uuid, 0, 16); count = gf_isom_get_user_data_count(file, dump_udta_track, dump_udta_type, uuid); if (!count) { M4_LOG(GF_LOG_ERROR, ("No UDTA for type %s found\n", gf_4cc_to_str(dump_udta_type) )); return GF_NOT_FOUND; } data = NULL; count = 0; e = gf_isom_get_user_data(file, dump_udta_track, dump_udta_type, uuid, 0, &data, &count); if (e) { M4_LOG(GF_LOG_ERROR, ("Error dumping UDTA %s: %s\n", gf_4cc_to_str(dump_udta_type), gf_error_to_string(e) )); return e; } if (inName) { char szName[1024]; if (is_final_name) strcpy(szName, inName); else sprintf(szName, "%s_%s.udta", inName, gf_4cc_to_str(dump_udta_type) ); t = gf_fopen(szName, "wb"); if (!t) { gf_free(data); M4_LOG(GF_LOG_ERROR, ("Cannot open file %s\n", szName )); return GF_IO_ERR; } } else { t = stdout; } res = (u32) gf_fwrite(data+8, count-8, t); if (inName) gf_fclose(t); gf_free(data); if (count-8 != res) { M4_LOG(GF_LOG_ERROR, ("Error writing udta to file\n")); return GF_IO_ERR; } return GF_OK; } GF_Err dump_isom_chapters(GF_ISOFile *file, char *inName, Bool is_final_name, u32 dump_mode) { FILE *t; u32 i, count; u32 chap_tk = 0; count = gf_isom_get_chapter_count(file, 0); if (dump_mode==2) dump_mode = GF_TEXTDUMPTYPE_OGG_CHAP; else if (dump_mode==3) dump_mode = GF_TEXTDUMPTYPE_ZOOM_CHAP; else dump_mode = GF_TEXTDUMPTYPE_TTXT_CHAP; if (!count) { for (i=0; i<gf_isom_get_track_count(file); i++) { if (gf_isom_get_reference_count(file, i+1, GF_ISOM_REF_CHAP)) { GF_Err e = gf_isom_get_reference(file, i+1, GF_ISOM_REF_CHAP, 1, &chap_tk); if (!e) break; } } if (!chap_tk) { M4_LOG(GF_LOG_WARNING, ("No chapters or chapters track found in file\n")); return GF_OK; } fprintf(stderr, "Dumping chapter track %d\n", chap_tk); dump_isom_timed_text(file, gf_isom_get_track_id(file, chap_tk), inName, is_final_name, GF_FALSE, dump_mode); return GF_OK; } if (inName) { char szName[1024]; strcpy(szName, inName); if (!is_final_name) { if (dump_mode==GF_TEXTDUMPTYPE_OGG_CHAP) { strcat(szName, ".txt"); } else if (dump_mode==GF_TEXTDUMPTYPE_ZOOM_CHAP) { strcat(szName, ".txt"); } else { strcat(szName, ".ttxt"); } } t = gf_fopen(szName, "wt"); if (!t) return GF_IO_ERR; } else { t = stdout; } if (dump_mode==GF_TEXTDUMPTYPE_TTXT_CHAP) { fprintf(t, "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"); fprintf(t, "<TextStream version=\"1.1\">\n"); fprintf(t, "<TextStreamHeader width=\"0\" height=\"0\" layer=\"0\" translation_x=\"0\" translation_y=\"0\">\n"); fprintf(t, "<TextSampleDescription horizontalJustification=\"left\" backColor=\"0 0 0\" scroll=\"None\"/>\n"); fprintf(t, "</TextStreamHeader>\n"); } for (i=0; i<count; i++) { char szDur[20]; u64 chapter_time; const char *name; gf_isom_get_chapter(file, 0, i+1, &chapter_time, &name); if (dump_mode==GF_TEXTDUMPTYPE_OGG_CHAP) { fprintf(t, "CHAPTER%02d=%s\n", i+1, format_duration(chapter_time, 1000, szDur)); fprintf(t, "CHAPTER%02dNAME=%s\n", i+1, name); } else if (dump_mode==GF_TEXTDUMPTYPE_ZOOM_CHAP) { chapter_time /= 1000; fprintf(t, "AddChapterBySecond("LLD",%s)\n", chapter_time, name); } else { fprintf(t, "<TextSample sampleTime=\"%s\" sampleDescriptionIndex=\"1\" xml:space=\"preserve\">%s</TextSample>\n" , format_duration(chapter_time, 1000, szDur), name); } } if (dump_mode==GF_TEXTDUMPTYPE_TTXT_CHAP) { fprintf(t, "</TextStream>\n"); } if (inName) gf_fclose(t); return GF_OK; } static void dump_key_info(const u8 *key_info, u32 key_info_size, Bool is_protected) { if (!key_info) return; u32 j, k, kpos=3; u32 nb_keys = 1; if (key_info[0]) { nb_keys = key_info[1]; nb_keys <<= 8; nb_keys |= key_info[2]; } for (k=0; k<nb_keys; k++) { u8 constant_iv_size=0; u8 iv_size=key_info[kpos+1]; fprintf(stderr, "\t\tKID"); if (nb_keys>1) fprintf(stderr, "%d", k+1); fprintf(stderr, " "); for (j=0; j<16; j++) fprintf(stderr, "%02X", key_info[kpos+1+j]); kpos+=17; if (!iv_size && is_protected) { constant_iv_size = key_info[1]; kpos += 1 + constant_iv_size; } fprintf(stderr, " - %sIV size %d \n", constant_iv_size ? "const " : "", constant_iv_size ? constant_iv_size : iv_size); } } static void DumpMetaItem(GF_ISOFile *file, Bool root_meta, u32 tk_num, char *name) { char szInd[2]; u32 i, count, primary_id; u32 meta_type = gf_isom_get_meta_type(file, root_meta, tk_num); if (name[0]=='\t') { szInd[0] = '\t'; szInd[1] = 0; } else { szInd[0] = 0; } count = gf_isom_get_meta_item_count(file, root_meta, tk_num); primary_id = gf_isom_get_meta_primary_item_id(file, root_meta, tk_num); fprintf(stderr, "%s type: \"%s\" - %d resource item(s)\n", name, meta_type ? gf_4cc_to_str(meta_type) : "undefined", (count+(primary_id>0))); switch (gf_isom_has_meta_xml(file, root_meta, tk_num)) { case 1: fprintf(stderr, "%sMeta has XML resource\n", szInd); break; case 2: fprintf(stderr, "%sMeta has BinaryXML resource\n", szInd); break; } if (primary_id) { fprintf(stderr, "%sPrimary Item - ID %d\n", szInd, primary_id); } for (i=0; i<count; i++) { const char *it_name, *mime, *enc, *url, *urn; Bool self_ref; u32 ID; u32 it_type, cenc_scheme, cenc_version; GF_Err e = gf_isom_get_meta_item_info(file, root_meta, tk_num, i+1, &ID, &it_type, &cenc_scheme, &cenc_version, &self_ref, &it_name, &mime, &enc, &url, &urn); if (e) { fprintf(stderr, "%sItem #%d fetch info error: %s\n", szInd, i+1, gf_error_to_string(e) ); continue; } fprintf(stderr, "%sItem #%d: ID %d type %s", szInd, i+1, ID, gf_4cc_to_str(it_type)); if (self_ref) fprintf(stderr, " Self-Reference"); else if (it_name && it_name[0]) fprintf(stderr, " Name \"%s\"", it_name); if (mime) fprintf(stderr, " MIME: \"%s\"", mime); if (enc) fprintf(stderr, " ContentEncoding: \"%s\"", enc); if (meta_type == GF_META_ITEM_TYPE_PICT) { GF_ImageItemProperties img_props; e = gf_isom_get_meta_image_props(file, root_meta, tk_num, ID, &img_props); if (e) { fprintf(stderr, " invalid image properties !"); } else { u32 j; Bool chan_diff = 0; if (img_props.width && img_props.height) { fprintf(stderr, " size %ux%u", img_props.width, img_props.height); } if (img_props.hSpacing && img_props.vSpacing) { fprintf(stderr, " SAR %u/%u", img_props.hSpacing, img_props.vSpacing); } if (img_props.num_channels) { fprintf(stderr, " %d channel%s (", img_props.num_channels, (img_props.num_channels>1) ? "s" : ""); for (j=1; j<img_props.num_channels; j++) { if (img_props.bits_per_channel[0] != img_props.bits_per_channel[j]) chan_diff = 1; } if (chan_diff) { for (j=0; j<img_props.num_channels; j++) { if (j) fprintf(stderr, ","); fprintf(stderr, "%d", img_props.bits_per_channel[j]); } } else { fprintf(stderr, "%d", img_props.bits_per_channel[0]); } fprintf(stderr, " bpc)"); } if (img_props.hOffset || img_props.vOffset) fprintf(stderr, " Offset %ux%u", img_props.hOffset, img_props.vOffset); if (img_props.alpha) fprintf(stderr, " Alpha"); if (img_props.hidden) fprintf(stderr, " Hidden"); if (img_props.angle) fprintf(stderr, " Rotate %d", img_props.angle); if (img_props.mirror) fprintf(stderr, " Mirror %d", img_props.mirror); if (img_props.clap_hden || img_props.clap_wden) fprintf(stderr, " Clap %d/%d,%d/%d,%d/%d,%d/%d", img_props.clap_wnum, img_props.clap_wden, img_props.clap_hnum, img_props.clap_hden, img_props.clap_honum, img_props.clap_hoden, img_props.clap_vonum, img_props.clap_voden); } } if (cenc_scheme) { Bool is_protected; u8 skip_byte_block, crypt_byte_block; const u8 *key_info; u32 key_info_size; fprintf(stderr, " - Protection scheme: %s v0x%08X", gf_4cc_to_str(cenc_scheme), cenc_version); gf_isom_extract_meta_item_get_cenc_info(file, root_meta, tk_num, ID, &is_protected, &skip_byte_block, &crypt_byte_block, &key_info, &key_info_size, NULL, NULL, NULL, NULL); if (skip_byte_block && crypt_byte_block) fprintf(stderr, " - Pattern %d:%d", skip_byte_block, crypt_byte_block); fprintf(stderr, "\n"); dump_key_info(key_info, key_info_size, is_protected); } fprintf(stderr, "\n"); if (url) fprintf(stderr, "%sURL: %s\n", szInd, url); if (urn) fprintf(stderr, "%sURN: %s\n", szInd, urn); } } static void print_config_hash(GF_List *xps_array, char *szName) { u32 i, j; u8 hash[20]; for (i=0; i<gf_list_count(xps_array); i++) { GF_NALUFFParam *slc = gf_list_get(xps_array, i); gf_sha1_csum((u8 *) slc->data, slc->size, hash); fprintf(stderr, "\t%s#%d hash: ", szName, i+1); for (j=0; j<20; j++) fprintf(stderr, "%02X", hash[j]); fprintf(stderr, "\n"); } } void dump_hevc_track_info(GF_ISOFile *file, u32 trackNum, GF_HEVCConfig *hevccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , HEVCState *hevc_state #endif /*GPAC_DISABLE_AV_PARSERS && defined(GPAC_DISABLE_HEVC)*/ ) { #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) u32 idx; #endif u32 k; Bool non_hevc_base_layer=GF_FALSE; fprintf(stderr, "\t%s Info:", hevccfg->is_lhvc ? "LHVC" : "HEVC"); if (!hevccfg->is_lhvc) fprintf(stderr, " Profile %s @ Level %g - Chroma Format %s\n", gf_hevc_get_profile_name(hevccfg->profile_idc), ((Double)hevccfg->level_idc) / 30.0, gf_avc_hevc_get_chroma_format_name(hevccfg->chromaFormat)); fprintf(stderr, "\n"); fprintf(stderr, "\tNAL Unit length bits: %d", 8*hevccfg->nal_unit_size); if (!hevccfg->is_lhvc) fprintf(stderr, " - general profile compatibility 0x%08X\n", hevccfg->general_profile_compatibility_flags); fprintf(stderr, "\n"); fprintf(stderr, "\tParameter Sets: "); for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(hevccfg->param_array, k); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) { fprintf(stderr, "%d SPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_HEVC_NALU_PIC_PARAM) { fprintf(stderr, "%d PPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_HEVC_NALU_VID_PARAM) { fprintf(stderr, "%d VPS ", gf_list_count(ar->nalus)); #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_NALUFFParam *vps = gf_list_get(ar->nalus, idx); s32 ps_idx=gf_hevc_read_vps(vps->data, vps->size, hevc_state); if (hevccfg->is_lhvc && (ps_idx>=0)) { non_hevc_base_layer = ! hevc_state->vps[ps_idx].base_layer_internal_flag; } } #endif } } fprintf(stderr, "\n"); #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(hevccfg->param_array, k); u32 width, height; s32 par_n, par_d; if (ar->type !=GF_HEVC_NALU_SEQ_PARAM) continue; for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_Err e; GF_NALUFFParam *sps = gf_list_get(ar->nalus, idx); par_n = par_d = -1; e = gf_hevc_get_sps_info_with_state(hevc_state, sps->data, sps->size, NULL, &width, &height, &par_n, &par_d); if (e==GF_OK) { fprintf(stderr, "\tSPS resolution %dx%d", width, height); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, " - Pixel Aspect Ratio %d:%d - Indicated track size %d x %d", par_n, par_d, tw, th); } fprintf(stderr, "\n"); } else { M4_LOG(GF_LOG_ERROR, ("Failed to read SPS: %s\n\n", gf_error_to_string(e) )); } } } #endif if (!hevccfg->is_lhvc) fprintf(stderr, "\tBit Depth luma %d - Chroma %d - %d temporal layers\n", hevccfg->luma_bit_depth, hevccfg->chroma_bit_depth, hevccfg->numTemporalLayers); else fprintf(stderr, "\t%d temporal layers\n", hevccfg->numTemporalLayers); if (hevccfg->is_lhvc) { fprintf(stderr, "\t%sHEVC base layer - Complete representation %d\n", non_hevc_base_layer ? "Non-" : "", hevccfg->complete_representation); } for (k=0; k<gf_list_count(hevccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(hevccfg->param_array, k); if (ar->type==GF_HEVC_NALU_SEQ_PARAM) print_config_hash(ar->nalus, "SPS"); else if (ar->type==GF_HEVC_NALU_PIC_PARAM) print_config_hash(ar->nalus, "PPS"); else if (ar->type==GF_HEVC_NALU_VID_PARAM) print_config_hash(ar->nalus, "VPS"); } } void dump_vvc_track_info(GF_ISOFile *file, u32 trackNum, GF_VVCConfig *vvccfg #if !defined(GPAC_DISABLE_AV_PARSERS) , VVCState *vvc_state #endif /*GPAC_DISABLE_AV_PARSERS && defined(GPAC_DISABLE_HEVC)*/ ) { #if !defined(GPAC_DISABLE_AV_PARSERS) u32 idx; #endif u32 k; fprintf(stderr, "\tVVC Info:"); fprintf(stderr, " Profile %d @ Level %d - Chroma Format %s\n", vvccfg->general_profile_idc, vvccfg->general_level_idc, vvccfg->chromaformat_plus_one ? gf_avc_hevc_get_chroma_format_name(vvccfg->chromaformat_plus_one-1) : "n/a"); fprintf(stderr, "\n"); fprintf(stderr, "\tNAL Unit length bits: %d", 8*vvccfg->nal_unit_size); if (vvccfg->general_constraint_info && vvccfg->num_constraint_info && vvccfg->general_constraint_info[0]) { fprintf(stderr, " - general constraint info 0x"); for (idx=0; idx<vvccfg->num_constraint_info; idx++) { fprintf(stderr, "%02X", vvccfg->general_constraint_info[idx]); } } fprintf(stderr, "\n"); fprintf(stderr, "\tParameter Sets: "); for (k=0; k<gf_list_count(vvccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(vvccfg->param_array, k); if (ar->type==GF_VVC_NALU_SEQ_PARAM) { fprintf(stderr, "%d SPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_VVC_NALU_PIC_PARAM) { fprintf(stderr, "%d PPS ", gf_list_count(ar->nalus)); } if (ar->type==GF_VVC_NALU_VID_PARAM) { fprintf(stderr, "%d VPS ", gf_list_count(ar->nalus)); #if !defined(GPAC_DISABLE_AV_PARSERS) && 0 //TODO for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_NALUFFParam *vps = gf_list_get(ar->nalus, idx); s32 ps_idx=gf_hevc_read_vps(vps->data, vps->size, hevc_state); if (hevccfg->is_lhvc && (ps_idx>=0)) { non_hevc_base_layer = ! hevc_state->vps[ps_idx].base_layer_internal_flag; } } #endif } } fprintf(stderr, "\n"); #if !defined(GPAC_DISABLE_AV_PARSERS) && 0 //TODO for (k=0; k<gf_list_count(vvccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(vvccfg->param_array, k); u32 width, height; s32 par_n, par_d; if (ar->type !=GF_VVC_NALU_SEQ_PARAM) continue; for (idx=0; idx<gf_list_count(ar->nalus); idx++) { GF_Err e; GF_NALUFFParam *sps = gf_list_get(ar->nalus, idx); par_n = par_d = -1; e = gf_vvc_get_sps_info_with_state(vvc_state, sps->data, sps->size, NULL, &width, &height, &par_n, &par_d); if (e==GF_OK) { fprintf(stderr, "\tSPS resolution %dx%d", width, height); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, " - Pixel Aspect Ratio %d:%d - Indicated track size %d x %d", par_n, par_d, tw, th); } fprintf(stderr, "\n"); } else { M4_LOG(GF_LOG_ERROR, ("\nFailed to read SPS: %s\n\n", gf_error_to_string(e) )); } } } #endif fprintf(stderr, "\tBit Depth %d - %d temporal layers\n", vvccfg->bit_depth_plus_one-1, vvccfg->numTemporalLayers); for (k=0; k<gf_list_count(vvccfg->param_array); k++) { GF_NALUFFParamArray *ar=gf_list_get(vvccfg->param_array, k); if (ar->type==GF_VVC_NALU_SEQ_PARAM) print_config_hash(ar->nalus, "SPS"); else if (ar->type==GF_VVC_NALU_PIC_PARAM) print_config_hash(ar->nalus, "PPS"); else if (ar->type==GF_VVC_NALU_VID_PARAM) print_config_hash(ar->nalus, "VPS"); } } void gf_inspect_format_timecode(const u8 *data, u32 size, u32 tmcd_flags, u32 tc_num, u32 tc_den, u32 tmcd_fpt, char szFmt[100]); void DumpTrackInfo(GF_ISOFile *file, GF_ISOTrackID trackID, Bool full_dump, Bool is_track_num, Bool dump_m4sys) { char szCodec[RFC6381_CODEC_NAME_SIZE_MAX]; Double scale, max_rate, rate; Bool is_od_track = 0; u32 trackNum, i, j, ts, mtype, msub_type, timescale, sr, nb_ch, count, alt_group, nb_groups, nb_edits, cdur, csize, bps, pfmt, codecid; u64 time_slice, dur, size; s32 cts_shift; GF_ESD *esd; char szDur[50]; char *lang; if (!is_track_num) { trackNum = gf_isom_get_track_by_id(file, trackID); } else { trackNum = trackID; trackID = gf_isom_get_track_id(file, trackNum); } if (!trackNum) { M4_LOG(GF_LOG_ERROR, ("No track with ID %d found\n", trackID)); return; } timescale = gf_isom_get_media_timescale(file, trackNum); fprintf(stderr, "# Track %d Info - ID %d - TimeScale %d\n", trackNum, trackID, timescale); dur = gf_isom_get_media_original_duration(file, trackNum); size = gf_isom_get_media_duration(file, trackNum); fprintf(stderr, "Media Duration %s ", format_duration(dur, timescale, szDur)); if (dur != size) fprintf(stderr, " (recomputed %s)", format_duration(size, timescale, szDur)); fprintf(stderr, "\n"); if (gf_isom_check_data_reference(file, trackNum, 1) != GF_OK) { M4_LOG(GF_LOG_WARNING, ("Track uses external data reference not supported by GPAC!\n")); } nb_edits = gf_isom_get_edits_count(file, trackNum); if (nb_edits) fprintf(stderr, "Track has %d edits: track duration is %s\n", nb_edits, format_duration(gf_isom_get_track_duration(file, trackNum), gf_isom_get_timescale(file), szDur)); cts_shift = gf_isom_get_composition_offset_shift(file, trackNum); if (cts_shift) fprintf(stderr, "Track composition offset shift (negative CTS offset): %d\n", cts_shift); if (gf_isom_is_track_in_root_od(file, trackNum) ) fprintf(stderr, "Track is present in Root OD\n"); if (!gf_isom_is_track_enabled(file, trackNum)) fprintf(stderr, "Track is disabled\n"); gf_isom_get_media_language(file, trackNum, &lang); fprintf(stderr, "Media Info: Language \"%s (%s)\" - ", GetLanguage(lang), lang ); gf_free(lang); mtype = gf_isom_get_media_type(file, trackNum); fprintf(stderr, "Type \"%s:", gf_4cc_to_str(mtype)); msub_type = gf_isom_get_mpeg4_subtype(file, trackNum, 1); if (!msub_type) msub_type = gf_isom_get_media_subtype(file, trackNum, 1); fprintf(stderr, "%s\" - %d samples\n", gf_4cc_to_str(msub_type), gf_isom_get_sample_count(file, trackNum)); pfmt = gf_pixel_fmt_from_qt_type(msub_type); codecid = gf_codec_id_from_isobmf(msub_type); count = gf_isom_get_track_kind_count(file, trackNum); for (i = 0; i < count; i++) { char *kind_scheme, *kind_value; gf_isom_get_track_kind(file, trackNum, i, &kind_scheme, &kind_value); fprintf(stderr, "Kind: %s - %s\n", kind_scheme ? kind_scheme : "null", kind_value ? kind_value : "null"); if (kind_scheme) gf_free(kind_scheme); if (kind_value) gf_free(kind_value); } if (gf_isom_is_track_fragmented(file, trackID) ) { u32 defaultDuration, defaultSize, defaultDescriptionIndex, defaultRandomAccess; u8 defaultPadding; u16 defaultDegradationPriority; u32 frag_samples; u64 frag_duration; gf_isom_get_fragmented_samples_info(file, trackID, &frag_samples, &frag_duration); fprintf(stderr, "Fragmented track: %d samples - Media Duration %s\n", frag_samples, format_duration(frag_duration, timescale, szDur)); gf_isom_get_fragment_defaults(file, trackNum, &defaultDuration, &defaultSize, &defaultDescriptionIndex, &defaultRandomAccess, &defaultPadding, &defaultDegradationPriority); fprintf(stderr, "Fragment sample defaults: duration %d size %d stsd %d sync %d padding %d degradation_priority %d\n", defaultDuration, defaultSize, defaultDescriptionIndex, defaultRandomAccess, (u32) defaultPadding, (u32) defaultDegradationPriority ); } if (!gf_isom_is_self_contained(file, trackNum, 1)) { const char *url, *urn; gf_isom_get_data_reference(file, trackNum, 1, &url, &urn); fprintf(stderr, "Media Data Location: %s\n", url ? url : urn); } if (full_dump) { const char *handler_name; gf_isom_get_handler_name(file, trackNum, &handler_name); fprintf(stderr, "Handler name: %s\n", handler_name); } print_udta(file, trackNum, GF_FALSE); if (gf_isom_is_video_handler_type(mtype) ) { s32 tx, ty; u32 w, h; u16 bit_depth; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); gf_isom_get_visual_bit_depth(file, trackNum, 1, &bit_depth); fprintf(stderr, "Visual Sample Entry Info: width=%d height=%d (depth=%d bits)\n", w, h, (int)bit_depth); gf_isom_get_track_layout_info(file, trackNum, &w, &h, &tx, &ty, NULL); fprintf(stderr, "Visual Track layout: x=%d y=%d width=%d height=%d\n", tx, ty, w, h); } gf_isom_get_audio_info(file, trackNum, 1, &sr, &nb_ch, &bps); gf_isom_set_nalu_extract_mode(file, trackNum, GF_ISOM_NALU_EXTRACT_INSPECT); msub_type = gf_isom_get_media_subtype(file, trackNum, 1); if (msub_type==GF_ISOM_SUBTYPE_MPEG4_CRYP) gf_isom_get_original_format_type(file, trackNum, 1, &msub_type); if ((msub_type==GF_ISOM_SUBTYPE_MPEG4) || (msub_type==GF_ISOM_SUBTYPE_AVC_H264) || (msub_type==GF_ISOM_SUBTYPE_AVC2_H264) || (msub_type==GF_ISOM_SUBTYPE_AVC3_H264) || (msub_type==GF_ISOM_SUBTYPE_AVC4_H264) || (msub_type==GF_ISOM_SUBTYPE_SVC_H264) || (msub_type==GF_ISOM_SUBTYPE_MVC_H264) || (msub_type==GF_ISOM_SUBTYPE_LSR1) || (msub_type==GF_ISOM_SUBTYPE_HVC1) || (msub_type==GF_ISOM_SUBTYPE_HEV1) || (msub_type==GF_ISOM_SUBTYPE_HVC2) || (msub_type==GF_ISOM_SUBTYPE_HEV2) || (msub_type==GF_ISOM_SUBTYPE_LHV1) || (msub_type==GF_ISOM_SUBTYPE_LHE1) || (msub_type==GF_ISOM_SUBTYPE_HVT1) ) { esd = gf_isom_get_esd(file, trackNum, 1); if (!esd || !esd->decoderConfig) { M4_LOG(GF_LOG_WARNING, ("WARNING: Broken MPEG-4 Track\n")); if (esd) gf_odf_desc_del((GF_Descriptor *)esd); } else { const char *st = gf_stream_type_name(esd->decoderConfig->streamType); if (dump_m4sys) { if (st) { fprintf(stderr, "MPEG-4 Config%s%s Stream - ObjectTypeIndication 0x%02x\n", full_dump ? "\n\t" : ": ", st, esd->decoderConfig->objectTypeIndication); } else { fprintf(stderr, "MPEG-4 Config%sStream Type 0x%02x - ObjectTypeIndication 0x%02x\n", full_dump ? "\n\t" : ": ", esd->decoderConfig->streamType, esd->decoderConfig->objectTypeIndication); } } if (esd->decoderConfig->streamType==GF_STREAM_OD) is_od_track=1; if (esd->decoderConfig->streamType==GF_STREAM_VISUAL) { u32 w, h; u16 rvc_predef; w = h = 0; if (esd->decoderConfig->objectTypeIndication==GF_CODECID_MPEG4_PART2) { #ifndef GPAC_DISABLE_AV_PARSERS if (!esd->decoderConfig->decoderSpecificInfo) { #else gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "MPEG-4 Visual Size %d x %d\n", w, h); #endif M4_LOG(GF_LOG_WARNING, ("Non-compliant MPEG-4 Visual track: video_object_layer infos not found in sample description\n")); #ifndef GPAC_DISABLE_AV_PARSERS } else { GF_M4VDecSpecInfo dsi; gf_m4v_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &dsi); if (full_dump) fprintf(stderr, "\t"); w = dsi.width; h = dsi.height; fprintf(stderr, "MPEG-4 Visual Size %d x %d - %s\n", w, h, gf_m4v_get_profile_name(dsi.VideoPL)); if (dsi.par_den && dsi.par_num) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "Pixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", dsi.par_num, dsi.par_den, tw, th); } } #endif } else if (gf_isom_get_avc_svc_type(file, trackNum, 1) != GF_ISOM_AVCTYPE_NONE) { GF_AVCConfig *avccfg, *svccfg, *mvccfg; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "AVC/H264 Video - Visual Size %d x %d\n", w, h); avccfg = gf_isom_avc_config_get(file, trackNum, 1); svccfg = gf_isom_svc_config_get(file, trackNum, 1); mvccfg = gf_isom_mvc_config_get(file, trackNum, 1); if (!avccfg && !svccfg && !mvccfg) { M4_LOG(GF_LOG_ERROR, ("\tNon-compliant AVC track: SPS/PPS not found in sample description\n")); } else if (avccfg) { fprintf(stderr, "\tAVC Info: %d SPS - %d PPS", gf_list_count(avccfg->sequenceParameterSets) , gf_list_count(avccfg->pictureParameterSets) ); fprintf(stderr, " - Profile %s @ Level %g\n", gf_avc_get_profile_name(avccfg->AVCProfileIndication), ((Double)avccfg->AVCLevelIndication)/10.0 ); fprintf(stderr, "\tNAL Unit length bits: %d\n", 8*avccfg->nal_unit_size); #ifndef GPAC_DISABLE_AV_PARSERS for (i=0; i<gf_list_count(avccfg->sequenceParameterSets); i++) { s32 par_n, par_d; GF_NALUFFParam *slc = gf_list_get(avccfg->sequenceParameterSets, i); gf_avc_get_sps_info(slc->data, slc->size, NULL, NULL, NULL, &par_n, &par_d); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "\tPixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", par_n, par_d, tw, th); } if (!full_dump) break; } #endif if (avccfg->chroma_bit_depth) { fprintf(stderr, "\tChroma format %s - Luma bit depth %d - chroma bit depth %d\n", gf_avc_hevc_get_chroma_format_name(avccfg->chroma_format), avccfg->luma_bit_depth, avccfg->chroma_bit_depth); } print_config_hash(avccfg->sequenceParameterSets, "SPS"); print_config_hash(avccfg->pictureParameterSets, "PPS"); gf_odf_avc_cfg_del(avccfg); } if (svccfg) { fprintf(stderr, "\n\tSVC Info: %d SPS - %d PPS - Profile %s @ Level %g\n", gf_list_count(svccfg->sequenceParameterSets) , gf_list_count(svccfg->pictureParameterSets), gf_avc_get_profile_name(svccfg->AVCProfileIndication), ((Double)svccfg->AVCLevelIndication)/10.0 ); fprintf(stderr, "\tSVC NAL Unit length bits: %d\n", 8*svccfg->nal_unit_size); #ifndef GPAC_DISABLE_AV_PARSERS for (i=0; i<gf_list_count(svccfg->sequenceParameterSets); i++) { GF_NALUFFParam *slc = gf_list_get(svccfg->sequenceParameterSets, i); if (slc) { s32 par_n, par_d; u32 s_w, s_h, sps_id; gf_avc_get_sps_info(slc->data, slc->size, &sps_id, &s_w, &s_h, &par_n, &par_d); fprintf(stderr, "\t\tSPS ID %d - Visual Size %d x %d\n", sps_id, s_w, s_h); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "\tPixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", par_n, par_d, tw, th); } } } #endif print_config_hash(svccfg->sequenceParameterSets, "SPS"); print_config_hash(svccfg->pictureParameterSets, "PPS"); print_config_hash(svccfg->sequenceParameterSetExtensions, "SPSEx"); gf_odf_avc_cfg_del(svccfg); } if (mvccfg) { fprintf(stderr, "\n\tMVC Info: %d SPS - %d PPS - Profile %s @ Level %g\n", gf_list_count(mvccfg->sequenceParameterSets) , gf_list_count(mvccfg->pictureParameterSets), gf_avc_get_profile_name(mvccfg->AVCProfileIndication), ((Double)mvccfg->AVCLevelIndication)/10.0 ); fprintf(stderr, "\tMVC NAL Unit length bits: %d\n", 8*mvccfg->nal_unit_size); #ifndef GPAC_DISABLE_AV_PARSERS for (i=0; i<gf_list_count(mvccfg->sequenceParameterSets); i++) { GF_NALUFFParam *slc = gf_list_get(mvccfg->sequenceParameterSets, i); if (slc) { u32 s_w, s_h, sps_id; s32 par_n, par_d; gf_avc_get_sps_info(slc->data, slc->size, &sps_id, &s_w, &s_h, &par_n, &par_d); fprintf(stderr, "\t\tSPS ID %d - Visual Size %d x %d\n", sps_id, s_w, s_h); if ((par_n>0) && (par_d>0)) { u32 tw, th; gf_isom_get_track_layout_info(file, trackNum, &tw, &th, NULL, NULL, NULL); fprintf(stderr, "\tPixel Aspect Ratio %d:%d - Indicated track size %d x %d\n", par_n, par_d, tw, th); } } } #endif print_config_hash(mvccfg->sequenceParameterSets, "SPS"); print_config_hash(mvccfg->pictureParameterSets, "PPS"); gf_odf_avc_cfg_del(mvccfg); } } else if ((esd->decoderConfig->objectTypeIndication==GF_CODECID_HEVC) || (esd->decoderConfig->objectTypeIndication==GF_CODECID_LHVC) ) { GF_HEVCConfig *hevccfg, *lhvccfg; GF_OperatingPointsInformation *oinf; #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) HEVCState hevc_state; memset(&hevc_state, 0, sizeof(HEVCState)); hevc_state.sps_active_idx = -1; #endif gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "HEVC Video - Visual Size %d x %d\n", w, h); hevccfg = gf_isom_hevc_config_get(file, trackNum, 1); lhvccfg = gf_isom_lhvc_config_get(file, trackNum, 1); if (msub_type==GF_ISOM_SUBTYPE_HVT1) { const u8 *data; u32 tsize; u32 is_default, tx,ty,tw,th, id, independent; Bool full_frame; if (gf_isom_get_tile_info(file, trackNum, 1, &is_default, &id, &independent, &full_frame, &tx, &ty, &tw, &th)) { fprintf(stderr, "\tHEVC Tile - ID %d independent %d (x,y,w,h)=%d,%d,%d,%d \n", id, independent, tx, ty, tw, th); } else if (gf_isom_get_sample_group_info(file, trackNum, 1, GF_ISOM_SAMPLE_GROUP_TRIF, &is_default, &data, &tsize)) { fprintf(stderr, "\tHEVC Tile track containing a tile set\n"); } else { fprintf(stderr, "\tHEVC Tile track without tiling info\n"); } } else if (!hevccfg && !lhvccfg) { M4_LOG(GF_LOG_ERROR, ("\tNon-compliant HEVC track: No hvcC or shcC found in sample description\n")); } if (gf_isom_get_reference_count(file, trackNum, GF_ISOM_REF_SABT)) { fprintf(stderr, "\tHEVC Tile base track\n"); } if (hevccfg) { dump_hevc_track_info(file, trackNum, hevccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , &hevc_state #endif ); gf_odf_hevc_cfg_del(hevccfg); fprintf(stderr, "\n"); } if (lhvccfg) { dump_hevc_track_info(file, trackNum, lhvccfg #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) , &hevc_state #endif ); gf_odf_hevc_cfg_del(lhvccfg); } if (gf_isom_get_oinf_info(file, trackNum, &oinf)) { fprintf(stderr, "\n\tOperating Points Information -"); fprintf(stderr, " scalability_mask %d (", oinf->scalability_mask); switch (oinf->scalability_mask) { case 2: fprintf(stderr, "Multiview"); break; case 4: fprintf(stderr, "Spatial scalability"); break; case 8: fprintf(stderr, "Auxilary"); break; default: fprintf(stderr, "unknown"); } //TODO: need to dump more info ? fprintf(stderr, ") num_profile_tier_level %d ", gf_list_count(oinf->profile_tier_levels) ); fprintf(stderr, " num_operating_points %d dependency layers %d \n", gf_list_count(oinf->operating_points), gf_list_count(oinf->dependency_layers) ); } } /*OGG media*/ else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_THEORA) { char *szName; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); if (!strnicmp((char *) &esd->decoderConfig->decoderSpecificInfo->data[3], "theora", 6)) szName = "Theora"; else szName = "Unknown"; fprintf(stderr, "Ogg/%s video / GPAC Mux - Visual Size %d x %d\n", szName, w, h); } else { //check if we know this codec from its OTI u32 codec_id = gf_codecid_from_oti(GF_STREAM_VISUAL, esd->decoderConfig->objectTypeIndication); if (codec_id) { gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "%s - Visual Size %d x %d\n", gf_codecid_name(codec_id), w, h); } } if (!w || !h) { gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "Visual Size %d x %d\n", w, h); } if (gf_isom_get_rvc_config(file, trackNum, 1, &rvc_predef, NULL, NULL, NULL)==GF_OK) { fprintf(stderr, "Has RVC signaled - Predefined configuration %d\n", rvc_predef); } } else if (esd->decoderConfig->streamType==GF_STREAM_AUDIO) { #ifndef GPAC_DISABLE_AV_PARSERS GF_M4ADecSpecInfo a_cfg; GF_Err e; u32 oti; #endif u32 codec_id; Bool is_mp2 = GF_FALSE; switch (esd->decoderConfig->objectTypeIndication) { case GF_CODECID_AAC_MPEG2_MP: case GF_CODECID_AAC_MPEG2_LCP: case GF_CODECID_AAC_MPEG2_SSRP: is_mp2 = GF_TRUE; case GF_CODECID_AAC_MPEG4: #ifndef GPAC_DISABLE_AV_PARSERS if (!esd->decoderConfig->decoderSpecificInfo) e = GF_NON_COMPLIANT_BITSTREAM; else e = gf_m4a_get_config(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, &a_cfg); if (full_dump) fprintf(stderr, "\t"); if (e) { M4_LOG(GF_LOG_ERROR, ("Corrupted AAC Config\n")); } else { char *signaling = "implicit"; char *heaac = ""; if (!is_mp2 && a_cfg.has_sbr) { if (a_cfg.has_ps) heaac = "(HE-AAC v2) "; else heaac = "(HE-AAC v1) "; } if (a_cfg.base_object_type==2) { if (a_cfg.has_ps || a_cfg.has_sbr) signaling = "backward compatible"; } else { signaling = "hierarchical"; } fprintf(stderr, "%s (AOT=%d %s) %s- %d Channel(s) - SampleRate %d", gf_m4a_object_type_name(a_cfg.base_object_type), a_cfg.base_object_type, signaling, heaac, a_cfg.nb_chan, a_cfg.base_sr); if (is_mp2) fprintf(stderr, " (MPEG-2 Signaling)"); if (a_cfg.has_sbr) fprintf(stderr, " - SBR: SampleRate %d Type %s", a_cfg.sbr_sr, gf_m4a_object_type_name(a_cfg.sbr_object_type)); if (a_cfg.has_ps) fprintf(stderr, " - PS"); fprintf(stderr, "\n"); } #else fprintf(stderr, "MPEG-2/4 Audio - %d Channels - SampleRate %d\n", nb_ch, sr); #endif break; case GF_CODECID_MPEG2_PART3: case GF_CODECID_MPEG_AUDIO: if (msub_type == GF_ISOM_SUBTYPE_MPEG4_CRYP) { fprintf(stderr, "MPEG-1/2 Audio - %d Channels - SampleRate %d\n", nb_ch, sr); } else { #ifndef GPAC_DISABLE_AV_PARSERS GF_ISOSample *samp = gf_isom_get_sample(file, trackNum, 1, &oti); if (samp) { u32 mhdr = GF_4CC((u8)samp->data[0], (u8)samp->data[1], (u8)samp->data[2], (u8)samp->data[3]); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "%s Audio - %d Channel(s) - SampleRate %d - Layer %d\n", gf_mp3_version_name(mhdr), gf_mp3_num_channels(mhdr), gf_mp3_sampling_rate(mhdr), gf_mp3_layer(mhdr) ); gf_isom_sample_del(&samp); } else { M4_LOG(GF_LOG_ERROR, ("Error fetching sample: %s\n", gf_error_to_string(gf_isom_last_error(file)) )); } #else fprintf(stderr, "MPEG-1/2 Audio - %d Channels - SampleRate %d\n", nb_ch, sr); #endif } break; case GF_CODECID_EVRC: fprintf(stderr, "EVRC Audio - Sample Rate 8000 - 1 channel\n"); break; case GF_CODECID_SMV: fprintf(stderr, "SMV Audio - Sample Rate 8000 - 1 channel\n"); break; case GF_CODECID_QCELP: fprintf(stderr, "QCELP Audio - Sample Rate 8000 - 1 channel\n"); break; /*packetVideo hack for EVRC...*/ case GF_CODECID_EVRC_PV: if (esd->decoderConfig->decoderSpecificInfo && (esd->decoderConfig->decoderSpecificInfo->dataLength==8) && !strnicmp((char *)esd->decoderConfig->decoderSpecificInfo->data, "pvmm", 4)) { if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "EVRC Audio (PacketVideo Mux) - Sample Rate 8000 - 1 channel\n"); } break; default: codec_id = gf_codecid_from_oti(GF_STREAM_AUDIO, esd->decoderConfig->objectTypeIndication); if (codec_id) { fprintf(stderr, "%s - Sample Rate %d - %d channel(s)\n", gf_codecid_name(codec_id), sr, nb_ch); } break; } } else if (esd->decoderConfig->streamType==GF_STREAM_SCENE) { if (esd->decoderConfig->objectTypeIndication<=4) { GF_BIFSConfig *b_cfg = gf_odf_get_bifs_config(esd->decoderConfig->decoderSpecificInfo, esd->decoderConfig->objectTypeIndication); fprintf(stderr, "BIFS Scene description - %s stream\n", b_cfg->elementaryMasks ? "Animation" : "Command"); if (full_dump && !b_cfg->elementaryMasks) { fprintf(stderr, "\tWidth %d Height %d Pixel Metrics %s\n", b_cfg->pixelWidth, b_cfg->pixelHeight, b_cfg->pixelMetrics ? "yes" : "no"); } gf_odf_desc_del((GF_Descriptor *)b_cfg); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_AFX) { u8 tag = esd->decoderConfig->decoderSpecificInfo ? esd->decoderConfig->decoderSpecificInfo->data[0] : 0xFF; const char *afxtype = gf_stream_type_afx_name(tag); fprintf(stderr, "AFX Stream - type %s (%d)\n", afxtype, tag); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_FONT) { fprintf(stderr, "Font Data stream\n"); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_LASER) { GF_LASERConfig l_cfg; gf_odf_get_laser_config(esd->decoderConfig->decoderSpecificInfo, &l_cfg); fprintf(stderr, "LASER Stream - %s\n", l_cfg.newSceneIndicator ? "Full Scene" : "Scene Segment"); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_TEXT_MPEG4) { fprintf(stderr, "MPEG-4 Streaming Text stream\n"); } else if (esd->decoderConfig->objectTypeIndication==GF_CODECID_SYNTHESIZED_TEXTURE) { fprintf(stderr, "Synthetized Texture stream stream\n"); } else { M4_LOG(GF_LOG_WARNING, ("Unknown Systems stream OTI %d\n", esd->decoderConfig->objectTypeIndication)); } } /*sync is only valid if we open all tracks to take care of default MP4 sync..*/ if (!full_dump) { if (dump_m4sys) { if (!esd->OCRESID || (esd->OCRESID == esd->ESID)) fprintf(stderr, "Self-synchronized\n"); else fprintf(stderr, "Synchronized on stream %d\n", esd->OCRESID); } } else { fprintf(stderr, "\tDecoding Buffer size %d - Bitrate: avg %d - max %d kbps\n", esd->decoderConfig->bufferSizeDB, esd->decoderConfig->avgBitrate/1000, esd->decoderConfig->maxBitrate/1000); if (esd->dependsOnESID) fprintf(stderr, "\tDepends on stream %d for decoding\n", esd->dependsOnESID); else fprintf(stderr, "\tNo stream dependencies for decoding\n"); fprintf(stderr, "\tStreamPriority %d\n", esd->streamPriority); if (esd->URLString) fprintf(stderr, "\tRemote Data Source %s\n", esd->URLString); } gf_odf_desc_del((GF_Descriptor *) esd); } } else if (msub_type == GF_ISOM_SUBTYPE_AV01) { GF_AV1Config *av1c; u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "\tAOM AV1 stream - Resolution %d x %d\n", w, h); av1c = gf_isom_av1_config_get(file, trackNum, 1); fprintf(stderr, "\tversion=%u, profile=%u, level_idx0=%u, tier=%u\n", (u32)av1c->version, (u32)av1c->seq_profile, (u32)av1c->seq_level_idx_0, (u32)av1c->seq_tier_0); fprintf(stderr, "\thigh_bitdepth=%u, twelve_bit=%u, monochrome=%u\n", (u32)av1c->high_bitdepth, (u32)av1c->twelve_bit, (u32)av1c->monochrome); fprintf(stderr, "\tchroma: subsampling_x=%u, subsampling_y=%u, sample_position=%u\n", (u32)av1c->chroma_subsampling_x, (u32)av1c->chroma_subsampling_y, (u32)av1c->chroma_sample_position); if (av1c->initial_presentation_delay_present) fprintf(stderr, "\tInitial presentation delay %u\n", (u32) av1c->initial_presentation_delay_minus_one+1); count = gf_list_count(av1c->obu_array); for (i=0; i<count; i++) { u8 hash[20]; GF_AV1_OBUArrayEntry *obu = gf_list_get(av1c->obu_array, i); gf_sha1_csum((u8*)obu->obu, (u32)obu->obu_length, hash); fprintf(stderr, "\tOBU#%d %s hash: ", i+1, gf_av1_get_obu_name(obu->obu_type) ); for (j=0; j<20; j++) fprintf(stderr, "%02X", hash[j]); fprintf(stderr, "\n"); } gf_odf_av1_cfg_del(av1c); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_H263) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "\t3GPP H263 stream - Resolution %d x %d\n", w, h); } else if (msub_type == GF_ISOM_SUBTYPE_MJP2) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "\tMotionJPEG2000 stream - Resolution %d x %d\n", w, h); } else if ((msub_type == GF_ISOM_SUBTYPE_3GP_AMR) || (msub_type == GF_ISOM_SUBTYPE_3GP_AMR_WB)) { fprintf(stderr, "\t3GPP AMR%s stream - Sample Rate %d - %d channel(s) %d bps\n", (msub_type == GF_ISOM_SUBTYPE_3GP_AMR_WB) ? " Wide Band" : "", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_EVRC) { fprintf(stderr, "\t3GPP EVRC stream - Sample Rate %d - %d channel(s) %d bps\n", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_QCELP) { fprintf(stderr, "\t3GPP QCELP stream - Sample Rate %d - %d channel(s) %d bps\n", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_MP3) { fprintf(stderr, "\tMPEG 1/2 Audio stream - Sample Rate %d - %d channel(s) %d bps\n", sr, nb_ch, (u32) bps); } else if ((msub_type == GF_ISOM_SUBTYPE_AC3) || (msub_type == GF_ISOM_SUBTYPE_EC3)) { u32 br = 0; const char *lfe = ""; Bool is_ec3 = (msub_type == GF_ISOM_SUBTYPE_EC3) ? GF_TRUE : GF_FALSE; #ifndef GPAC_DISABLE_AV_PARSERS GF_AC3Config *ac3 = gf_isom_ac3_config_get(file, trackNum, 1); if (ac3) { nb_ch = gf_ac3_get_channels(ac3->streams[0].acmod); for (i=0; i<ac3->streams[0].nb_dep_sub; ++i) { assert(ac3->streams[0].nb_dep_sub == 1); nb_ch += gf_ac3_get_channels(ac3->streams[0].chan_loc); } if (ac3->streams[0].lfon) lfe = ".1"; br = ac3->is_ec3 ? ac3->brcode : gf_ac3_get_bitrate(ac3->brcode); is_ec3 = ac3->is_ec3; gf_free(ac3); } #endif fprintf(stderr, "\t%s stream - Sample Rate %d - %d%s channel(s) - bitrate %d\n", is_ec3 ? "EC-3" : "AC-3", sr, nb_ch, lfe, br); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_SMV) { fprintf(stderr, "\t3GPP SMV stream - Sample Rate %d - %d channel(s) %d bits per samples\n", sr, nb_ch, (u32) bps); } else if (msub_type == GF_ISOM_SUBTYPE_3GP_DIMS) { u32 w, h; GF_DIMSDescription dims; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); gf_isom_get_dims_description(file, trackNum, 1, &dims); fprintf(stderr, "\t3GPP DIMS stream - size %d x %d - Profile %d - Level %d\n", w, h, dims.profile, dims.level); fprintf(stderr, "\tpathComponents: %d - useFullRequestHost: %s\n", dims.pathComponents, dims.fullRequestHost ? "yes" : "no"); fprintf(stderr, "\tstream type: %s - redundant: %s\n", dims.streamType ? "primary" : "secondary", (dims.containsRedundant==1) ? "main" : ((dims.containsRedundant==2) ? "redundant" : "main+redundant") ); if (dims.textEncoding[0]) fprintf(stderr, "\ttext encoding %s\n", dims.textEncoding); if (dims.contentEncoding[0]) fprintf(stderr, "\tcontent encoding %s\n", dims.contentEncoding); if (dims.content_script_types) fprintf(stderr, "\tscript languages %s\n", dims.content_script_types); } else if (mtype==GF_ISOM_MEDIA_HINT) { u32 refTrack; s32 refCount = gf_isom_get_reference_count(file, trackNum, GF_ISOM_REF_HINT); if (refCount>0) { fprintf(stderr, "Streaming Hint Track for track%s ", (refCount>1) ? "s" :""); for (i=0; i<(u32) refCount; i++) { gf_isom_get_reference(file, trackNum, GF_ISOM_REF_HINT, i+1, &refTrack); if (i) fprintf(stderr, " - "); fprintf(stderr, "ID %d", gf_isom_get_track_id(file, refTrack)); } fprintf(stderr, "\n"); } else { fprintf(stderr, "Streaming Hint Track (no refs)\n"); } #ifndef GPAC_DISABLE_ISOM_HINTING refCount = gf_isom_get_payt_count(file, trackNum); if (refCount>0) { for (i=0; i<(u32) refCount; i++) { const char *name = gf_isom_get_payt_info(file, trackNum, i+1, &refTrack); fprintf(stderr, "\tPayload ID %d: type %s\n", refTrack, name); } } #endif } else if (mtype==GF_ISOM_MEDIA_FLASH) { fprintf(stderr, "Macromedia Flash Movie\n"); } else if ((mtype==GF_ISOM_MEDIA_TEXT) || (mtype==GF_ISOM_MEDIA_SUBT) || (mtype==GF_ISOM_MEDIA_MPEG_SUBT)) { u32 w, h; s16 l; s32 tx, ty; const char *content_encoding = NULL; const char *mime = NULL; const char *config = NULL; const char *_namespace = NULL; const char *schema_loc = NULL; const char *auxiliary_mimes = NULL; gf_isom_get_track_layout_info(file, trackNum, &w, &h, &tx, &ty, &l); if (msub_type == GF_ISOM_SUBTYPE_SBTT) { gf_isom_stxt_get_description(file, trackNum, 1, &mime, &content_encoding, &config); fprintf(stderr, "Textual Subtitle Stream "); fprintf(stderr, "- mime %s", mime); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (config != NULL) { fprintf(stderr, " - %d bytes config", (u32) strlen(config)); } } else if (msub_type == GF_ISOM_SUBTYPE_STXT) { gf_isom_stxt_get_description(file, trackNum, 1, &mime, &content_encoding, &config); fprintf(stderr, "Simple Timed Text Stream "); fprintf(stderr, "- mime %s", mime); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (config != NULL) { fprintf(stderr, " - %d bytes config", (u32) strlen(config)); } } else if (msub_type == GF_ISOM_SUBTYPE_STPP) { gf_isom_xml_subtitle_get_description(file, trackNum, 1, &_namespace, &schema_loc, &auxiliary_mimes); fprintf(stderr, "XML Subtitle Stream "); fprintf(stderr, "- namespace %s", _namespace); if (schema_loc != NULL) { fprintf(stderr, " - schema-location %s", schema_loc); } if (auxiliary_mimes != NULL) { fprintf(stderr, " - auxiliary-mime-types %s", auxiliary_mimes); } } else { fprintf(stderr, "Unknown Text Stream"); } fprintf(stderr, "\n Size %d x %d - Translation X=%d Y=%d - Layer %d\n", w, h, tx, ty, l); } else if (mtype == GF_ISOM_MEDIA_META) { const char *content_encoding = NULL; if (msub_type == GF_ISOM_SUBTYPE_METT) { const char *mime = NULL; const char *config = NULL; gf_isom_stxt_get_description(file, trackNum, 1, &mime, &content_encoding, &config); fprintf(stderr, "Textual Metadata Stream - mime %s", mime); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (config != NULL) { fprintf(stderr, " - %d bytes config", (u32) strlen(config)); } fprintf(stderr, "\n"); } else if (msub_type == GF_ISOM_SUBTYPE_METX) { const char *_namespace = NULL; const char *schema_loc = NULL; gf_isom_get_xml_metadata_description(file, trackNum, 1, &_namespace, &schema_loc, &content_encoding); fprintf(stderr, "XML Metadata Stream - namespace %s", _namespace); if (content_encoding != NULL) { fprintf(stderr, " - encoding %s", content_encoding); } if (schema_loc != NULL) { fprintf(stderr, " - schema-location %s", schema_loc); } fprintf(stderr, "\n"); } else { fprintf(stderr, "Unknown Metadata Stream\n"); } } else if ((msub_type==GF_ISOM_SUBTYPE_VVC1) || (msub_type==GF_ISOM_SUBTYPE_VVI1)) { GF_VVCConfig *vvccfg; u32 w, h; #if !defined(GPAC_DISABLE_AV_PARSERS) VVCState *vvc_state; GF_SAFEALLOC(vvc_state, VVCState); if (vvc_state) vvc_state->sps_active_idx = -1; #endif gf_isom_get_visual_info(file, trackNum, 1, &w, &h); if (full_dump) fprintf(stderr, "\t"); fprintf(stderr, "VVC Video - Visual Size %d x %d\n", w, h); vvccfg = gf_isom_vvc_config_get(file, trackNum, 1); if (!vvccfg) { M4_LOG(GF_LOG_ERROR, ("Non-compliant VVC track: No vvcC found in sample description\n")); } else { dump_vvc_track_info(file, trackNum, vvccfg #if !defined(GPAC_DISABLE_AV_PARSERS) , vvc_state #endif ); gf_odf_vvc_cfg_del(vvccfg); fprintf(stderr, "\n"); } #if !defined(GPAC_DISABLE_AV_PARSERS) if (vvc_state) gf_free(vvc_state); #endif } else if ((msub_type == GF_ISOM_SUBTYPE_MH3D_MHA1) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHA2) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM1) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM2) ) { const u8 *compat_profiles; u32 nb_compat_profiles; Bool valid = GF_FALSE; Bool allow_inband = GF_FALSE; if ( (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM1) || (msub_type == GF_ISOM_SUBTYPE_MH3D_MHM2)) allow_inband = GF_TRUE; fprintf(stderr, "\tMPEG-H Audio stream - Sample Rate %d\n", sr); esd = gf_media_map_esd(file, trackNum, 1); if (!esd || !esd->decoderConfig || !esd->decoderConfig->decoderSpecificInfo || !esd->decoderConfig->decoderSpecificInfo->data ) { if (allow_inband) { GF_ISOSample *samp = gf_isom_get_sample(file, trackNum, 1, NULL); if (samp) { u64 ch_layout=0; s32 PL = gf_mpegh_get_mhas_pl(samp->data, samp->dataLength, &ch_layout); if (PL>=0) { fprintf(stderr, "\tProfileLevelIndication: 0x%02X", PL); if (ch_layout) fprintf(stderr, " - Reference Channel Layout %s", gf_audio_fmt_get_layout_name(ch_layout) ); fprintf(stderr, "\n"); } gf_isom_sample_del(&samp); } valid = GF_TRUE; } } else if (esd->decoderConfig->decoderSpecificInfo->dataLength>=5) { fprintf(stderr, "\tProfileLevelIndication: 0x%02X - Reference Channel Layout %s\n", esd->decoderConfig->decoderSpecificInfo->data[1] , gf_audio_fmt_get_layout_name_from_cicp(esd->decoderConfig->decoderSpecificInfo->data[2]) ); valid = GF_TRUE; } if (!valid) { M4_LOG(GF_LOG_ERROR, ("Invalid MPEG-H audio config\n")); } if (esd) gf_odf_desc_del((GF_Descriptor *)esd); compat_profiles = gf_isom_get_mpegh_compatible_profiles(file, trackNum, 1, &nb_compat_profiles); for (i=0; i<nb_compat_profiles; i++) { if (!i) fprintf(stderr, "\tCompatible profiles:"); fprintf(stderr, " 0x%02X", compat_profiles[i]); } if (i) fprintf(stderr, "\n"); } else if (msub_type==GF_ISOM_SUBTYPE_MLPA) { u32 fmt, prate; if (gf_isom_truehd_config_get(file, trackNum, 1, &fmt, &prate) != GF_OK) { fprintf(stderr, "\tInvalid TrueHD audio config\n"); } fprintf(stderr, "TrueHD Audio stream - Sample Rate %u - channels %u - format %u peak rate %u\n", sr, nb_ch, fmt, prate); } else if (codecid) { if (gf_isom_is_video_handler_type(mtype) ) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "%s - Resolution %d x %d\n", gf_codecid_name(codecid), w, h); } else if (mtype==GF_ISOM_MEDIA_AUDIO) { gf_isom_get_audio_info(file, trackNum, 1, &sr, &nb_ch, NULL); fprintf(stderr, "%s - Sample Rate %d - %d channel(s)\n", gf_codecid_name(codecid), sr, nb_ch); } else { fprintf(stderr, "%s\n", gf_codecid_name(codecid) ); } } else if (pfmt) { u32 w, h; gf_isom_get_visual_info(file, trackNum, 1, &w, &h); fprintf(stderr, "Raw video %s - Resolution %d x %d\n", gf_pixel_fmt_name(pfmt), w, h); } else if (msub_type==GF_QT_SUBTYPE_TMCD) { u32 stsd_idx; GF_ISOSample *sample = gf_isom_get_sample(file, trackNum, 1, &stsd_idx); fprintf(stderr, "Time Code stream\n"); if (sample) { char szTimecode[100]; u32 tmcd_flags, tmcd_num, tmcd_den, tmcd_fpt; gf_isom_get_tmcd_config(file, trackNum, stsd_idx, &tmcd_flags, &tmcd_num, &tmcd_den, &tmcd_fpt); gf_inspect_format_timecode(sample->data, sample->dataLength, tmcd_flags, tmcd_num, tmcd_den, tmcd_fpt, szTimecode); gf_isom_sample_del(&sample); fprintf(stderr, "\tFirst timecode: %s\n", szTimecode); } } else { GF_GenericSampleDescription *udesc; udesc = gf_isom_get_generic_sample_description(file, trackNum, 1); if (udesc) { if (gf_isom_is_video_handler_type(mtype) ) { fprintf(stderr, "%s - Compressor \"%s\" - Resolution %d x %d\n", ( (mtype == GF_ISOM_MEDIA_VISUAL ? "Visual" : "Auxiliary Video") ), udesc->compressor_name, udesc->width, udesc->height); } else if (mtype==GF_ISOM_MEDIA_AUDIO) { fprintf(stderr, "Audio - Sample Rate %d - %d channel(s)\n", udesc->samplerate, udesc->nb_channels); } else { fprintf(stderr, "Unknown media type\n"); } if (udesc->vendor_code) fprintf(stderr, "\tVendor code \"%s\" - Version %d - revision %d\n", gf_4cc_to_str(udesc->vendor_code), udesc->version, udesc->revision); if (udesc->extension_buf) { fprintf(stderr, "\tCodec configuration data size: %d bytes\n", udesc->extension_buf_size); gf_free(udesc->extension_buf); } gf_free(udesc); } else { fprintf(stderr, "Unknown track type\n"); } } /*Crypto info*/ if (gf_isom_is_track_encrypted(file, trackNum)) { const char *scheme_URI, *KMS_URI; u32 scheme_type, version; u32 IV_size; Bool use_sel_enc; if (gf_isom_is_ismacryp_media(file, trackNum, 1)) { gf_isom_get_ismacryp_info(file, trackNum, 1, NULL, &scheme_type, &version, &scheme_URI, &KMS_URI, &use_sel_enc, &IV_size, NULL); fprintf(stderr, "\n\tProtected by ISMA E&A scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version); if (scheme_URI) fprintf(stderr, "scheme location: %s\n", scheme_URI); if (KMS_URI) { if (!strnicmp(KMS_URI, "(key)", 5)) fprintf(stderr, "\tKMS location: key in file\n"); else fprintf(stderr, "\tKMS location: %s\n", KMS_URI); } fprintf(stderr, "\tSelective Encryption: %s\n", use_sel_enc ? "Yes" : "No"); if (IV_size) fprintf(stderr, "\tInitialization Vector size: %d bits\n", IV_size*8); } else if (gf_isom_is_omadrm_media(file, trackNum, 1)) { const char *textHdrs; u32 enc_type, hdr_len; u64 orig_len; gf_isom_get_omadrm_info(file, trackNum, 1, NULL, &scheme_type, &version, &scheme_URI, &KMS_URI, &textHdrs, &hdr_len, &orig_len, &enc_type, &use_sel_enc, &IV_size, NULL); fprintf(stderr, "\n\tProtected by OMA DRM scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version); fprintf(stderr, "\tRights Issuer: %s\n", KMS_URI); fprintf(stderr, "\tContent ID: %s\n", scheme_URI); if (textHdrs) { u32 offset; const char *start = textHdrs; fprintf(stderr, "\tOMA Textual Headers:\n"); i=0; offset=0; while (i<hdr_len) { if (start[i]==0) { fprintf(stderr, "\t\t%s\n", start+offset); offset=i+1; } i++; } fprintf(stderr, "\\tt%s\n", start+offset); } if (orig_len) fprintf(stderr, "\tOriginal media size "LLD"\n", orig_len); fprintf(stderr, "\tEncryption algorithm %s\n", (enc_type==1) ? "AEA 128 CBC" : (enc_type ? "AEA 128 CTR" : "None")); fprintf(stderr, "\tSelective Encryption: %s\n", use_sel_enc ? "Yes" : "No"); if (IV_size) fprintf(stderr, "\tInitialization Vector size: %d bits\n", IV_size*8); } else if(gf_isom_is_cenc_media(file, trackNum, 1)) { const u8 *def_key; u32 def_key_size; Bool IsEncrypted; u8 crypt_byte_block, skip_byte_block; IV_size = 0; gf_isom_get_cenc_info(file, trackNum, 1, NULL, &scheme_type, &version); gf_isom_cenc_get_default_info(file, trackNum, 1, NULL, &IsEncrypted, &crypt_byte_block, &skip_byte_block, &def_key, &def_key_size); fprintf(stderr, "\n\tProtected by CENC scheme %s version 0x%08X", gf_4cc_to_str(scheme_type), version); if (crypt_byte_block && skip_byte_block) fprintf(stderr, " - Pattern %d:%d", (u32) skip_byte_block, (u32) crypt_byte_block); if (def_key && def_key[0]) fprintf(stderr, " - MultiKey"); fprintf(stderr, "\n"); dump_key_info(def_key, def_key_size, IsEncrypted); } else if(gf_isom_is_adobe_protection_media(file, trackNum, 1)) { gf_isom_get_adobe_protection_info(file, trackNum, 1, NULL, &scheme_type, &version, NULL); fprintf(stderr, "\nProtected by Adobe scheme %s (version %d)\n", gf_4cc_to_str(scheme_type), version); } else { fprintf(stderr, "\nProtected by unknown scheme %s\n", gf_4cc_to_str(gf_isom_is_media_encrypted(file, trackNum, 0) )); } fprintf(stderr, "\n"); } if ( gf_media_get_rfc_6381_codec_name(file, trackNum, szCodec, GF_FALSE, GF_FALSE) == GF_OK) { fprintf(stderr, "\tRFC6381 Codec Parameters: %s\n", szCodec); } DumpMetaItem(file, 0, trackNum, "\tTrack Meta"); gf_isom_get_track_switch_group_count(file, trackNum, &alt_group, &nb_groups); if (alt_group) { fprintf(stderr, "Alternate Group ID %d\n", alt_group); for (i=0; i<nb_groups; i++) { u32 nb_crit, switchGroupID; const u32 *criterias = gf_isom_get_track_switch_parameter(file, trackNum, i+1, &switchGroupID, &nb_crit); if (!nb_crit) { fprintf(stderr, "\tNo criteria in %s group\n", switchGroupID ? "switch" : "alternate"); } else { if (switchGroupID) { fprintf(stderr, "\tSwitchGroup ID %d criterias: ", switchGroupID); } else { fprintf(stderr, "\tAlternate Group criterias: "); } for (j=0; j<nb_crit; j++) { if (j) fprintf(stderr, " "); fprintf(stderr, "%s", gf_4cc_to_str(criterias[j]) ); } fprintf(stderr, "\n"); } } } switch (gf_isom_has_sync_points(file, trackNum)) { case 0: fprintf(stderr, "\tAll samples are sync\n"); break; case 1: { u32 nb_sync = gf_isom_get_sync_point_count(file, trackNum) - 1; if (! nb_sync) { fprintf(stderr, "\tOnly one sync sample\n"); } else { fprintf(stderr, "\tAverage GOP length: %d samples\n", gf_isom_get_sample_count(file, trackNum) / nb_sync); } } break; case 2: fprintf(stderr, "\tNo sync sample found\n"); break; } fprintf(stderr, "\tMax sample duration: %d / %d\n", gf_isom_get_max_sample_delta(file, trackNum), timescale); if (!full_dump) { fprintf(stderr, "\n"); return; } dur = size = 0; max_rate = rate = 0; time_slice = 0; ts = gf_isom_get_media_timescale(file, trackNum); csize = gf_isom_get_constant_sample_size(file, trackNum); cdur = gf_isom_get_constant_sample_duration(file, trackNum); count = gf_isom_get_sample_count(file, trackNum); if (csize && cdur) { size = count * csize; dur = cdur * count; } else { for (j=0; j<count; j++) { GF_ISOSample *samp; if (is_od_track) { samp = gf_isom_get_sample(file, trackNum, j+1, NULL); } else { samp = gf_isom_get_sample_info(file, trackNum, j+1, NULL, NULL); } if (!samp) { M4_LOG(GF_LOG_ERROR, ("Failed to fetch sample %d\n", j+1)); return; } dur = samp->DTS+samp->CTS_Offset; size += samp->dataLength; rate += samp->dataLength; if (samp->DTS - time_slice > ts) { Double max_tmp = rate * ts / (samp->DTS - time_slice); if (max_rate < max_tmp ) max_rate = max_tmp; rate = 0; time_slice = samp->DTS; } gf_isom_sample_del(&samp); } } fprintf(stderr, "\nComputed info from media:\n"); if (csize && cdur) { fprintf(stderr, "\tConstant sample size %d bytes and dur %d / %d\n", csize, cdur, ts); } scale = 1000.0 / ts; dur = (u64) (scale * dur); fprintf(stderr, "\tTotal size "LLU" bytes - Total samples duration "LLU" ms\n", size, dur); if (!dur) { fprintf(stderr, "\n"); return; } /*rate in byte, dur is in ms*/ rate = 8000.0 * size / dur; if (!max_rate) max_rate = rate; else max_rate *= 8.0; if (rate >= 1500) { fprintf(stderr, "\tAverage rate %.2f kbps - Max Rate %.2f kbps\n", rate/1000, max_rate/1000); } else { fprintf(stderr, "\tAverage rate %.2f bps - Max Rate %.2f bps\n", rate, max_rate); } { u32 dmin, dmax, davg, smin, smax, savg; gf_isom_get_chunks_infos(file, trackNum, &dmin, &davg, &dmax, &smin, &savg, &smax); fprintf(stderr, "\tChunk durations: min %d ms - max %d ms - average %d ms\n", (1000*dmin)/ts, (1000*dmax)/ts, (1000*davg)/ts); fprintf(stderr, "\tChunk sizes (bytes): min %d - max %d - average %d\n", smin, smax, savg); } fprintf(stderr, "\n"); count = gf_isom_get_chapter_count(file, trackNum); if (count) { const char *name; u64 time; fprintf(stderr, "\nChapters:\n"); for (j=0; j<count; j++) { gf_isom_get_chapter(file, trackNum, j+1, &time, &name); fprintf(stderr, "\tChapter #%d - %s - \"%s\"\n", j+1, format_duration(time, 1000, szDur), name); } } } void DumpMovieInfo(GF_ISOFile *file) { GF_InitialObjectDescriptor *iod; Bool dump_m4sys = GF_FALSE; u32 i, brand, min, timescale, count, data_len; const u8 *data; u64 create, modif; Bool has_itags = GF_FALSE; char szDur[50]; DumpMetaItem(file, 1, 0, "# File Meta"); if (!gf_isom_has_movie(file)) { if (gf_isom_has_segment(file, &brand, &min)) { count = gf_isom_segment_get_fragment_count(file); fprintf(stderr, "File is a segment - %d movie fragments - Brand %s (version %d):\n", count, gf_4cc_to_str(brand), min); for (i=0; i<count; i++) { u32 j, traf_count = gf_isom_segment_get_track_fragment_count(file, i+1); for (j=0; j<traf_count; j++) { u32 ID; u64 tfdt; ID = gf_isom_segment_get_track_fragment_decode_time(file, i+1, j+1, &tfdt); fprintf(stderr, "\tFragment #%d Track ID %d - TFDT "LLU"\n", i+1, ID, tfdt); } } } else { fprintf(stderr, "File has no movie (moov) - static data container\n"); } return; } timescale = gf_isom_get_timescale(file); i=gf_isom_get_track_count(file); fprintf(stderr, "# Movie Info - %d track%s - TimeScale %d\n", i, i>1 ? "s" : "", timescale); modif = gf_isom_get_duration(file); create = gf_isom_get_original_duration(file); fprintf(stderr, "Duration %s", format_duration(create, timescale, szDur)); if (create!=modif) { fprintf(stderr, " (recomputed %s)", format_duration(modif, timescale, szDur)); } fprintf(stderr, "\n"); #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (gf_isom_is_fragmented(file)) { fprintf(stderr, "Fragmented: yes - duration %s\n%d fragments - %d SegmentIndexes\n", format_duration(gf_isom_get_fragmented_duration(file), timescale, szDur), gf_isom_get_fragments_count(file, 0) , gf_isom_get_fragments_count(file, 1) ); } else { fprintf(stderr, "Fragmented: no\n"); } #endif if (gf_isom_moov_first(file)) fprintf(stderr, "Progressive (moov before mdat)\n"); if (gf_isom_get_brand_info(file, &brand, &min, &count) == GF_OK) { fprintf(stderr, "Major Brand %s - version %d - compatible brands:", gf_4cc_to_str(brand), min); for (i=0; i<count;i++) { if (gf_isom_get_alternate_brand(file, i+1, &brand)==GF_OK) fprintf(stderr, " %s", gf_4cc_to_str(brand) ); } fprintf(stderr, "\n"); } gf_isom_get_creation_time(file, &create, &modif); fprintf(stderr, "Created: %s", format_date(create, szDur)); if (create != modif) fprintf(stderr, "Modified: %s", format_date(modif, szDur)); fprintf(stderr, "\n"); DumpMetaItem(file, 0, 0, "# Movie Meta"); iod = (GF_InitialObjectDescriptor *) gf_isom_get_root_od(file); if (iod) { u32 desc_size = gf_odf_desc_size((GF_Descriptor *)iod); if (iod->tag == GF_ODF_IOD_TAG) { fprintf(stderr, "File has root IOD (%d bytes)\n", desc_size); fprintf(stderr, "Scene PL 0x%02x - Graphics PL 0x%02x - OD PL 0x%02x\n", iod->scene_profileAndLevel, iod->graphics_profileAndLevel, iod->OD_profileAndLevel); fprintf(stderr, "Visual PL: %s (0x%02x)\n", gf_m4v_get_profile_name(iod->visual_profileAndLevel), iod->visual_profileAndLevel); fprintf(stderr, "Audio PL: %s (0x%02x)\n", gf_m4a_get_profile_name(iod->audio_profileAndLevel), iod->audio_profileAndLevel); //fprintf(stderr, "inline profiles included %s\n", iod->inlineProfileFlag ? "yes" : "no"); } else { fprintf(stderr, "File has root OD (%d bytes)\n", desc_size); } if (!gf_list_count(iod->ESDescriptors)) fprintf(stderr, "No streams included in root OD\n"); else dump_m4sys = GF_TRUE; gf_odf_desc_del((GF_Descriptor *) iod); } if (gf_isom_is_JPEG2000(file)) fprintf(stderr, "File is JPEG 2000\n"); count = gf_isom_get_copyright_count(file); if (count) { const char *lang, *note; fprintf(stderr, "\nCopyrights:\n"); for (i=0; i<count; i++) { gf_isom_get_copyright(file, i+1, &lang, &note); fprintf(stderr, "\t(%s) %s\n", lang, note); } } count = gf_isom_get_chapter_count(file, 0); if (count) { const char *name; u64 time; fprintf(stderr, "\nChapters:\n"); for (i=0; i<count; i++) { gf_isom_get_chapter(file, 0, i+1, &time, &name); fprintf(stderr, "\tChapter #%d - %s - \"%s\"\n", i+1, format_duration(time, 1000, szDur), name); } } if (gf_isom_apple_get_tag(file, 0, &data, &data_len) == GF_OK) { has_itags = GF_TRUE; fprintf(stderr, "\niTunes Info:\n"); i=0; while (1) { u32 int_val2, flags, itype; GF_ISOiTunesTag tag; u64 int_val; s32 tag_idx; GF_Err e = gf_isom_apple_enum_tag(file, i, &tag, &data, &data_len, &int_val, &int_val2, &flags); if (e) break; i++; tag_idx = gf_itags_find_by_itag(tag); if (tag_idx<0) { fprintf(stderr, "\t%s: %s\n", gf_4cc_to_str(tag), data); continue; } fprintf(stderr, "\t%s: ", gf_itags_get_name(tag_idx) ); itype = gf_itags_get_type(tag_idx); switch (itype) { case GF_ITAG_BOOL: fprintf(stderr, int_val ? "yes" : "no"); break; case GF_ITAG_INT8: case GF_ITAG_INT16: case GF_ITAG_INT32: case GF_ITAG_INT64: fprintf(stderr, LLU, int_val); break; case GF_ITAG_FRAC6: case GF_ITAG_FRAC8: fprintf(stderr, LLU" / %u", int_val, int_val2); break; case GF_ITAG_FILE: if (flags==14) fprintf(stderr, "PNG File"); else if (flags==13) fprintf(stderr, "JPEG File"); else fprintf(stderr, "unknown (flags %d)", flags); break; case GF_ITAG_ID3_GENRE: if (int_val) { fprintf(stderr, "%s", gf_id3_get_genre((u32) int_val) ); break; } //fallthrough default: if (data) fprintf(stderr, "%s", data); else fprintf(stderr, data_len ? "none" : "unknown"); break; } fprintf(stderr, "\n"); } } i=0; while (1) { u32 type, version; char *wmatag; GF_Err e = gf_isom_wma_enum_tag(file, i, &wmatag, &data, &data_len, &version, &type); if (e) break; if (!i) { fprintf(stderr, "\nWMA Info:\n"); } i++; fprintf(stderr, "\t%s", wmatag); if (version!=1) fprintf(stderr, " (version %d)", version); fprintf(stderr, ": "); if (type) { fprintf(stderr, "unknown type %d\n", type); } else { u16 *src_str = (u16 *) data; u32 len = (u32) ( UTF8_MAX_BYTES_PER_CHAR * gf_utf8_wcslen(src_str) ); char *utf8str = (char *)gf_malloc(len + 1); u32 res_len = (u32) gf_utf8_wcstombs(utf8str, len, (const unsigned short **) &src_str); utf8str[res_len] = 0; fprintf(stderr, "%s\n", utf8str); gf_free(utf8str); } } print_udta(file, 0, has_itags); fprintf(stderr, "\n"); for (i=0; i<gf_isom_get_track_count(file); i++) { DumpTrackInfo(file, i+1, 0, GF_TRUE, dump_m4sys); } } #endif /*defined(GPAC_DISABLE_ISOM) || defined(GPAC_DISABLE_ISOM_WRITE)*/ #ifndef GPAC_DISABLE_MPEG2TS typedef struct { /* when writing to file */ FILE *pes_out; char dump[100]; #if 0 FILE *pes_out_nhml; char nhml[100]; FILE *pes_out_info; char info[100]; #endif Bool is_info_dumped; u32 prog_number; /* For logging timing information (PCR, PTS/DTS) */ FILE *timestamps_info_file; char timestamps_info_name[100]; /* when dumping TS information */ u32 dump_pid; Bool has_seen_pat; } GF_M2TS_Dump; static void on_m2ts_dump_event(GF_M2TS_Demuxer *ts, u32 evt_type, void *par) { u32 i, count; GF_M2TS_Program *prog; GF_M2TS_PES_PCK *pck; GF_M2TS_Dump *dumper = (GF_M2TS_Dump *)ts->user; switch (evt_type) { case GF_M2TS_EVT_PAT_FOUND: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_PAT_UPDATE: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_PAT_REPEAT: /* WARNING: We detect the pat on a repetition, probably to ensure that we also have seen all the PMT To be checked */ dumper->has_seen_pat = 1; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } // fprintf(stderr, "Repeated PAT found - %d programs\n", gf_list_count(ts->programs) ); break; case GF_M2TS_EVT_CAT_FOUND: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_CAT_UPDATE: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_CAT_REPEAT: if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, 0); } break; case GF_M2TS_EVT_PMT_FOUND: prog = (GF_M2TS_Program*)par; if (gf_list_count(ts->programs)>1 && prog->number!=dumper->prog_number) break; count = gf_list_count(prog->streams); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Program number %d found - %d streams:\n", prog->number, count)); for (i=0; i<count; i++) { GF_M2TS_ES *es = gf_list_get(prog->streams, i); if (es->pid == prog->pmt_pid) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tPID %d: Program Map Table\n", es->pid)); } else { GF_M2TS_PES *pes = (GF_M2TS_PES *)es; gf_m2ts_set_pes_framing(pes, dumper->pes_out ? GF_M2TS_PES_FRAMING_RAW : GF_M2TS_PES_FRAMING_DEFAULT); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tPID %d: %s ", pes->pid, gf_m2ts_get_stream_name(pes->stream_type) )); if (pes->mpeg4_es_id) GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, (" - MPEG-4 ES ID %d", pes->mpeg4_es_id)); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\n")); } } if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, prog->pmt_pid); } break; case GF_M2TS_EVT_PMT_UPDATE: prog = (GF_M2TS_Program*)par; if (gf_list_count(ts->programs)>1 && prog->number!=dumper->prog_number) break; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, prog->pmt_pid); } break; case GF_M2TS_EVT_PMT_REPEAT: prog = (GF_M2TS_Program*)par; if (gf_list_count(ts->programs)>1 && prog->number!=dumper->prog_number) break; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\n", ts->pck_number, prog->pmt_pid); } break; case GF_M2TS_EVT_SDT_FOUND: #ifndef GPAC_DISABLE_LOG count = gf_list_count(ts->SDTs) ; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Program Description found - %d desc:\n", count)); for (i=0; i<count; i++) { GF_M2TS_SDT *sdt = gf_list_get(ts->SDTs, i); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tServiceID %d - Provider %s - Name %s\n", sdt->service_id, sdt->provider, sdt->service)); } #endif break; case GF_M2TS_EVT_SDT_UPDATE: #ifndef GPAC_DISABLE_LOG count = gf_list_count(ts->SDTs) ; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("Program Description updated - %d desc\n", count)); for (i=0; i<count; i++) { GF_M2TS_SDT *sdt = gf_list_get(ts->SDTs, i); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("\tServiceID %d - Provider %s - Name %s\n", sdt->service_id, sdt->provider, sdt->service)); } #endif break; case GF_M2TS_EVT_SDT_REPEAT: break; case GF_M2TS_EVT_PES_TIMING: pck = par; if (gf_list_count(ts->programs)>1 && pck->stream->program->number != dumper->prog_number) break; break; case GF_M2TS_EVT_PES_PCK: pck = par; if (gf_list_count(ts->programs)>1 && pck->stream->program->number != dumper->prog_number) break; if (dumper->has_seen_pat) { /*We need the interpolated PCR for the pcrb, hence moved this calculus out, and saving the calculated value in index_info to put it in the pcrb*/ GF_M2TS_PES *pes = pck->stream; /*FIXME : not used GF_M2TS_Program *prog = pes->program; */ /* Interpolated PCR value for the TS packet containing the PES header start */ u64 interpolated_pcr_value = 0; if (pes->last_pcr_value && pes->before_last_pcr_value_pck_number && pes->last_pcr_value > pes->before_last_pcr_value) { u32 delta_pcr_pck_num = pes->last_pcr_value_pck_number - pes->before_last_pcr_value_pck_number; u32 delta_pts_pcr_pck_num = pes->pes_start_packet_number - pes->last_pcr_value_pck_number; u64 delta_pcr_value = pes->last_pcr_value - pes->before_last_pcr_value; if ((pes->pes_start_packet_number > pes->last_pcr_value_pck_number) && (pes->last_pcr_value > pes->before_last_pcr_value)) { pes->last_pcr_value = pes->before_last_pcr_value; } /* we can compute the interpolated pcr value for the packet containing the PES header */ interpolated_pcr_value = pes->last_pcr_value + (u64)((delta_pcr_value*delta_pts_pcr_pck_num*1.0)/delta_pcr_pck_num); } if (dumper->timestamps_info_file) { Double diff; fprintf(dumper->timestamps_info_file, "%u\t%d\t", pck->stream->pes_start_packet_number, pck->stream->pid); if (interpolated_pcr_value) fprintf(dumper->timestamps_info_file, "%f", interpolated_pcr_value/(300.0 * 90000)); fprintf(dumper->timestamps_info_file, "\t"); if (pck->DTS) fprintf(dumper->timestamps_info_file, "%f", (pck->DTS / 90000.0)); fprintf(dumper->timestamps_info_file, "\t%f\t%d\t%d", pck->PTS / 90000.0, (pck->flags & GF_M2TS_PES_PCK_RAP) ? 1 : 0, (pck->flags & GF_M2TS_PES_PCK_DISCONTINUITY) ? 1 : 0); if (interpolated_pcr_value) { diff = (pck->DTS ? pck->DTS : pck->PTS) / 90000.0; diff -= pes->last_pcr_value / (300.0 * 90000); fprintf(dumper->timestamps_info_file, "\t%f\n", diff); if (diff<0) { M4_LOG(GF_LOG_WARNING, ("Warning: detected PTS/DTS value less than current PCR of %g sec\n", diff)); } } else { fprintf(dumper->timestamps_info_file, "\t\n"); } } } if (dumper->has_seen_pat && dumper->pes_out && (dumper->dump_pid == pck->stream->pid)) { gf_fwrite(pck->data, pck->data_len, dumper->pes_out); } break; case GF_M2TS_EVT_PES_PCR: pck = par; if (gf_list_count(ts->programs)>1 && pck->stream->program->number != dumper->prog_number) break; if (dumper->timestamps_info_file) { fprintf(dumper->timestamps_info_file, "%u\t%d\t%f\t\t\t\t%d\n", pck->stream->program->last_pcr_value_pck_number, pck->stream->pid, pck->PTS / (300*90000.0), (pck->flags & GF_M2TS_PES_PCK_DISCONTINUITY) ? 1 : 0); } break; case GF_M2TS_EVT_SL_PCK: #if 0 { GF_M2TS_SL_PCK *sl_pck = par; if (dumper->pes_out && (dumper->dump_pid == sl_pck->stream->pid)) { GF_SLHeader header; u32 header_len; if (sl_pck->stream->mpeg4_es_id) { GF_ESD *esd = ((GF_M2TS_PES*)sl_pck->stream)->esd; if (!dumper->is_info_dumped) { if (esd->decoderConfig->decoderSpecificInfo) gf_fwrite(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, dumper->pes_out_info); dumper->is_info_dumped = 1; fprintf(dumper->pes_out_nhml, "<NHNTStream version=\"1.0\" "); fprintf(dumper->pes_out_nhml, "timeScale=\"%d\" ", esd->slConfig->timestampResolution); fprintf(dumper->pes_out_nhml, "streamType=\"%d\" ", esd->decoderConfig->streamType); fprintf(dumper->pes_out_nhml, "objectTypeIndication=\"%d\" ", esd->decoderConfig->objectTypeIndication); if (esd->decoderConfig->decoderSpecificInfo) fprintf(dumper->pes_out_nhml, "specificInfoFile=\"%s\" ", dumper->info); fprintf(dumper->pes_out_nhml, "baseMediaFile=\"%s\" ", dumper->dump); fprintf(dumper->pes_out_nhml, "inRootOD=\"yes\">\n"); } gf_sl_depacketize(esd->slConfig, &header, sl_pck->data, sl_pck->data_len, &header_len); gf_fwrite(sl_pck->data+header_len, sl_pck->data_len-header_len, dumper->pes_out); fprintf(dumper->pes_out_nhml, "<NHNTSample DTS=\""LLD"\" dataLength=\"%d\" isRAP=\"%s\"/>\n", header.decodingTimeStamp, sl_pck->data_len-header_len, (header.randomAccessPointFlag?"yes":"no")); } } } #endif break; } } void dump_mpeg2_ts(char *mpeg2ts_file, char *out_name, Bool prog_num) { u8 data[188]; GF_M2TS_Dump dumper; u32 size; u64 fsize, fdone; GF_M2TS_Demuxer *ts; FILE *src; if (!prog_num && !out_name) { fprintf(stderr, "No program number nor output filename specified. No timestamp file will be generated."); } src = gf_fopen(mpeg2ts_file, "rb"); if (!src) { M4_LOG(GF_LOG_ERROR, ("Cannot open %s: no such file\n", mpeg2ts_file)); return; } ts = gf_m2ts_demux_new(); ts->on_event = on_m2ts_dump_event; ts->notify_pes_timing = 1; memset(&dumper, 0, sizeof(GF_M2TS_Dump)); ts->user = &dumper; dumper.prog_number = prog_num; /*PES dumping*/ if (out_name) { char *pid = strrchr(out_name, '#'); if (pid) { dumper.dump_pid = atoi(pid+1); pid[0] = 0; sprintf(dumper.dump, "%s_%d.raw", out_name, dumper.dump_pid); dumper.pes_out = gf_fopen(dumper.dump, "wb"); #if 0 sprintf(dumper.nhml, "%s_%d.nhml", pes_out_name, dumper.dump_pid); dumper.pes_out_nhml = gf_fopen(dumper.nhml, "wt"); sprintf(dumper.info, "%s_%d.info", pes_out_name, dumper.dump_pid); dumper.pes_out_info = gf_fopen(dumper.info, "wb"); #endif pid[0] = '#'; } } gf_fseek(src, 0, SEEK_END); fsize = gf_ftell(src); gf_fseek(src, 0, SEEK_SET); /* first loop to process all packets between two PAT, and assume all signaling was found between these 2 PATs */ while (!feof(src)) { size = (u32) gf_fread(data, 188, src); if (size<188) break; gf_m2ts_process_data(ts, data, size); if (dumper.has_seen_pat) break; } dumper.has_seen_pat = GF_TRUE; if (!prog_num) { GF_M2TS_Program *p = gf_list_get(ts->programs, 0); if (p) prog_num = p->number; fprintf(stderr, "No program number specified, defaulting to first program\n"); } if (!prog_num && !out_name) { fprintf(stderr, "No program number nor output filename specified. No timestamp file will be generated\n"); } if (prog_num) { sprintf(dumper.timestamps_info_name, "%s_prog_%d_timestamps.txt", mpeg2ts_file, prog_num/*, mpeg2ts_file*/); dumper.timestamps_info_file = gf_fopen(dumper.timestamps_info_name, "wt"); if (!dumper.timestamps_info_file) { M4_LOG(GF_LOG_ERROR, ("Cannot open file %s\n", dumper.timestamps_info_name)); return; } fprintf(dumper.timestamps_info_file, "PCK#\tPID\tPCR\tDTS\tPTS\tRAP\tDiscontinuity\tDTS-PCR Diff\n"); } gf_m2ts_reset_parsers(ts); gf_fseek(src, 0, SEEK_SET); fdone = 0; while (!feof(src)) { size = (u32) gf_fread(data, 188, src); if (size<188) break; gf_m2ts_process_data(ts, data, size); fdone += size; gf_set_progress("MPEG-2 TS Parsing", fdone, fsize); } gf_fclose(src); gf_m2ts_demux_del(ts); if (dumper.pes_out) gf_fclose(dumper.pes_out); #if 0 if (dumper.pes_out_nhml) { if (dumper.is_info_dumped) fprintf(dumper.pes_out_nhml, "</NHNTStream>\n"); gf_fclose(dumper.pes_out_nhml); gf_fclose(dumper.pes_out_info); } #endif if (dumper.timestamps_info_file) gf_fclose(dumper.timestamps_info_file); } #endif /*GPAC_DISABLE_MPEG2TS*/ #include <gpac/download.h> #include <gpac/mpd.h> void get_file_callback(void *usr_cbk, GF_NETIO_Parameter *parameter) { if (parameter->msg_type==GF_NETIO_DATA_EXCHANGE) { u64 tot_size, done, max; u32 bps; gf_dm_sess_get_stats(parameter->sess, NULL, NULL, &tot_size, &done, &bps, NULL); if (tot_size) { max = done; max *= 100; max /= tot_size; fprintf(stderr, "download %02d %% at %05d kpbs\r", (u32) max, bps*8/1000); } } } static GF_DownloadSession *get_file(const char *url, GF_DownloadManager *dm, GF_Err *e) { GF_DownloadSession *sess; sess = gf_dm_sess_new(dm, url, GF_NETIO_SESSION_NOT_THREADED, get_file_callback, NULL, e); if (!sess) return NULL; *e = gf_dm_sess_process(sess); if (*e) { gf_dm_sess_del(sess); return NULL; } return sess; } static void revert_cache_file(char *item_path) { char szPATH[GF_MAX_PATH]; const char *url; GF_Config *cached; if (!strstr(item_path, "gpac_cache_")) { fprintf(stderr, "%s is not a gpac cache file\n", item_path); return; } if (!strncmp(item_path, "./", 2) || !strncmp(item_path, ".\\", 2)) item_path += 2; strcpy(szPATH, item_path); strcat(szPATH, ".txt"); cached = gf_cfg_new(NULL, szPATH); url = gf_cfg_get_key(cached, "cache", "url"); if (url) url = strstr(url, "://"); if (url) { u32 i, len, dir_len=0, k=0; char *sep; char *dst_name; sep = strstr(item_path, "gpac_cache_"); if (sep) { sep[0] = 0; dir_len = (u32) strlen(item_path); sep[0] = 'g'; } url+=3; len = (u32) strlen(url); dst_name = gf_malloc(len+dir_len+1); memset(dst_name, 0, len+dir_len+1); strncpy(dst_name, item_path, dir_len); k=dir_len; for (i=0; i<len; i++) { dst_name[k] = url[i]; if (dst_name[k]==':') dst_name[k]='_'; else if (dst_name[k]=='/') { if (!gf_dir_exists(dst_name)) gf_mkdir(dst_name); } k++; } if (gf_file_exists(item_path)) { gf_file_move(item_path, dst_name); } gf_free(dst_name); } else { M4_LOG(GF_LOG_ERROR, ("Failed to reverse %s cache file\n", item_path)); } gf_cfg_del(cached); gf_file_delete(szPATH); } GF_Err rip_mpd(const char *mpd_src, const char *output_dir) { GF_DownloadSession *sess; u32 i, connect_time, reply_time, download_time, req_hdr_size, rsp_hdr_size; GF_Err e; GF_DOMParser *mpd_parser=NULL; GF_MPD *mpd=NULL; GF_MPD_Period *period; GF_MPD_AdaptationSet *as; GF_MPD_Representation *rep; char szName[GF_MAX_PATH]; GF_DownloadManager *dm; if (output_dir) { char *sep; strcpy(szName, output_dir); sep = gf_file_basename(szName); if (sep) sep[0] = 0; gf_opts_set_key("temp", "cache", szName); } else { gf_opts_set_key("temp", "cache", "."); } gf_opts_set_key("temp", "clean-cache", "true"); dm = gf_dm_new(NULL); /* char *name = strrchr(mpd_src, '/'); if (!name) name = strrchr(mpd_src, '\\'); if (!name) name = "manifest.mpd"; else name ++; if (strchr(name, '?') || strchr(name, '&')) name = "manifest.mpd"; */ fprintf(stderr, "Downloading %s\n", mpd_src); sess = get_file(mpd_src, dm, &e); if (!sess) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error downloading MPD file %s: %s\n", mpd_src, gf_error_to_string(e) )); goto err_exit; } strcpy(szName, gf_dm_sess_get_cache_name(sess) ); gf_dm_sess_get_header_sizes_and_times(sess, &req_hdr_size, &rsp_hdr_size, &connect_time, &reply_time, &download_time); gf_dm_sess_del(sess); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error fetching MPD file %s: %s\n", mpd_src, gf_error_to_string(e))); goto err_exit; } else { GF_LOG(GF_LOG_INFO, GF_LOG_APP, ("Fetched file %s\n", mpd_src)); } GF_LOG(GF_LOG_DEBUG, GF_LOG_APP, ("GET Header size %d - Reply header size %d\n", req_hdr_size, rsp_hdr_size)); GF_LOG(GF_LOG_DEBUG, GF_LOG_APP, ("GET time: Connect Time %d - Reply Time %d - Download Time %d\n", connect_time, reply_time, download_time)); mpd_parser = gf_xml_dom_new(); e = gf_xml_dom_parse(mpd_parser, szName, NULL, NULL); if (e != GF_OK) { gf_xml_dom_del(mpd_parser); GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error parsing MPD %s : %s\n", mpd_src, gf_error_to_string(e))); return e; } mpd = gf_mpd_new(); e = gf_mpd_init_from_dom(gf_xml_dom_get_root(mpd_parser), mpd, mpd_src); gf_xml_dom_del(mpd_parser); mpd_parser=NULL; if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error initializing MPD %s : %s\n", mpd_src, gf_error_to_string(e))); goto err_exit; } else { GF_LOG(GF_LOG_DEBUG, GF_LOG_APP, ("MPD %s initialized: %s\n", szName, gf_error_to_string(e))); } revert_cache_file(szName); if (mpd->type==GF_MPD_TYPE_DYNAMIC) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("MPD rip is not supported on live sources\n")); e = GF_NOT_SUPPORTED; goto err_exit; } i=0; while ((period = (GF_MPD_Period *) gf_list_enum(mpd->periods, &i))) { char *initTemplate = NULL; Bool segment_base = GF_FALSE; u32 j=0; if (period->segment_base) segment_base=GF_TRUE; if (period->segment_template && period->segment_template->initialization) { initTemplate = period->segment_template->initialization; } while ((as = gf_list_enum(period->adaptation_sets, &j))) { u32 k=0; if (!initTemplate && as->segment_template && as->segment_template->initialization) { initTemplate = as->segment_template->initialization; } if (as->segment_base) segment_base=GF_TRUE; while ((rep = gf_list_enum(as->representations, &k))) { u64 out_range_start, out_range_end, segment_duration; Bool is_in_base_url; char *seg_url; u32 seg_idx=0; if (rep->segment_template && rep->segment_template->initialization) { initTemplate = rep->segment_template->initialization; } else if (k>1) { initTemplate = NULL; } if (rep->segment_base) segment_base=GF_TRUE; e = gf_mpd_resolve_url(mpd, rep, as, period, mpd_src, 0, GF_MPD_RESOLVE_URL_INIT, 0, 0, &seg_url, &out_range_start, &out_range_end, &segment_duration, &is_in_base_url, NULL, NULL, NULL); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error resolving init segment name : %s\n", gf_error_to_string(e))); continue; } //not a byte range, replace URL if (segment_base) { } else if (out_range_start || out_range_end || !seg_url) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("byte range rip not yet implemented\n")); if (seg_url) gf_free(seg_url); e = GF_NOT_SUPPORTED; goto err_exit; } fprintf(stderr, "Downloading %s\n", seg_url); sess = get_file(seg_url, dm, &e); if (e) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error downloading init segment %s from MPD %s : %s\n", seg_url, mpd_src, gf_error_to_string(e))); goto err_exit; } revert_cache_file((char *) gf_dm_sess_get_cache_name(sess) ); gf_free(seg_url); gf_dm_sess_del(sess); if (segment_base) continue; while (1) { e = gf_mpd_resolve_url(mpd, rep, as, period, mpd_src, 0, GF_MPD_RESOLVE_URL_MEDIA, seg_idx, 0, &seg_url, &out_range_start, &out_range_end, &segment_duration, NULL, NULL, NULL, NULL); if (e) { if (e<0) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error resolving segment name : %s\n", gf_error_to_string(e))); } break; } seg_idx++; if (out_range_start || out_range_end || !seg_url) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("byte range rip not yet implemented\n")); if (seg_url) gf_free(seg_url); break; } fprintf(stderr, "Downloading %s\n", seg_url); sess = get_file(seg_url, dm, &e); if (e) { gf_free(seg_url); if (e != GF_URL_ERROR) { GF_LOG(GF_LOG_ERROR, GF_LOG_APP, ("Error downloading segment %s: %s\n", seg_url, gf_error_to_string(e))); } else { //todo, properly detect end of dash representation e = GF_OK; } break; } revert_cache_file((char *) gf_dm_sess_get_cache_name(sess) ); gf_free(seg_url); gf_dm_sess_del(sess); } } } } err_exit: if (mpd) gf_mpd_del(mpd); gf_dm_del(dm); return e; }
null
270
CWE-787
CVE-2021-32137
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2019 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #include <gpac/constants.h> #include <gpac/avparse.h> #ifndef GPAC_DISABLE_ISOM GF_Err Media_GetSampleDesc(GF_MediaBox *mdia, u32 SampleDescIndex, GF_SampleEntryBox **out_entry, u32 *dataRefIndex) { GF_SampleDescriptionBox *stsd; GF_SampleEntryBox *entry = NULL; if (!mdia) return GF_ISOM_INVALID_FILE; stsd = mdia->information->sampleTable->SampleDescription; if (!stsd) return GF_ISOM_INVALID_FILE; if (!SampleDescIndex || (SampleDescIndex > gf_list_count(stsd->child_boxes)) ) return GF_BAD_PARAM; entry = (GF_SampleEntryBox*)gf_list_get(stsd->child_boxes, SampleDescIndex - 1); if (!entry) return GF_ISOM_INVALID_FILE; if (out_entry) *out_entry = entry; if (dataRefIndex) *dataRefIndex = entry->dataReferenceIndex; return GF_OK; } GF_Err Media_GetSampleDescIndex(GF_MediaBox *mdia, u64 DTS, u32 *sampleDescIndex) { GF_Err e; u32 sampleNumber, prevSampleNumber, num; u64 offset; if (sampleDescIndex == NULL) return GF_BAD_PARAM; //find the sample for this time e = stbl_findEntryForTime(mdia->information->sampleTable, (u32) DTS, 0, &sampleNumber, &prevSampleNumber); if (e) return e; if (!sampleNumber && !prevSampleNumber) { //we have to assume the track was created to be used... If we have a sampleDesc, OK if (gf_list_count(mdia->information->sampleTable->SampleDescription->child_boxes)) { (*sampleDescIndex) = 1; return GF_OK; } return GF_BAD_PARAM; } return stbl_GetSampleInfos(mdia->information->sampleTable, ( sampleNumber ? sampleNumber : prevSampleNumber), &offset, &num, sampleDescIndex, NULL); } static GF_Err gf_isom_get_3gpp_audio_esd(GF_SampleTableBox *stbl, u32 type, GF_GenericAudioSampleEntryBox *entry, GF_ESD **out_esd) { (*out_esd) = gf_odf_desc_esd_new(2); (*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO; /*official mapping to MPEG-4*/ switch (type) { case GF_ISOM_SUBTYPE_3GP_EVRC: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_EVRC; return GF_OK; case GF_ISOM_SUBTYPE_3GP_QCELP: { u32 block_size, sample_rate, sample_size, i; GF_SttsEntry *ent; GF_BitStream *bs; char szName[80]; /*only map CBR*/ sample_size = stbl->SampleSize->sampleSize; (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_QCELP; bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_write_data(bs, "QLCMfmt ", 8); gf_bs_write_u32_le(bs, 150);/*fmt chunk size*/ gf_bs_write_u8(bs, 1); gf_bs_write_u8(bs, 0); /*QCELP GUID*/ gf_bs_write_data(bs, "\x41\x6D\x7F\x5E\x15\xB1\xD0\x11\xBA\x91\x00\x80\x5F\xB4\xB9\x7E", 16); gf_bs_write_u16_le(bs, 1); memset(szName, 0, 80); strcpy(szName, "QCELP-13K(GPAC-emulated)"); gf_bs_write_data(bs, szName, 80); ent = &stbl->TimeToSample->entries[0]; sample_rate = entry->samplerate_hi; block_size = ent ? ent->sampleDelta : 160; gf_bs_write_u16_le(bs, 8*sample_size*sample_rate/block_size); gf_bs_write_u16_le(bs, sample_size); gf_bs_write_u16_le(bs, block_size); gf_bs_write_u16_le(bs, sample_rate); gf_bs_write_u16_le(bs, entry->bitspersample); gf_bs_write_u32_le(bs, sample_size ? 0 : 7); /**/ for (i=0; i<7; i++) { static const u32 qcelp_r2s [] = {0, 1, 1, 4, 2, 8, 3, 17, 4, 35, 5, 8, 14, 1}; if (sample_size) { gf_bs_write_u16(bs, 0); } else { gf_bs_write_u8(bs, qcelp_r2s[2*i+1]); gf_bs_write_u8(bs, qcelp_r2s[2*i]); } } gf_bs_write_u16(bs, 0); memset(szName, 0, 80); gf_bs_write_data(bs, szName, 20);/*reserved*/ gf_bs_get_content(bs, & (*out_esd)->decoderConfig->decoderSpecificInfo->data, & (*out_esd)->decoderConfig->decoderSpecificInfo->dataLength); gf_bs_del(bs); } return GF_OK; case GF_ISOM_SUBTYPE_3GP_SMV: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_SMV; return GF_OK; case GF_ISOM_SUBTYPE_3GP_AMR: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AMR; return GF_OK; case GF_ISOM_SUBTYPE_3GP_AMR_WB: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AMR_WB; return GF_OK; default: GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] unsupported sample description type %s\n", gf_4cc_to_str(entry->type))); break; } return GF_OK; } GF_Err Media_GetESD(GF_MediaBox *mdia, u32 sampleDescIndex, GF_ESD **out_esd, Bool true_desc_only) { u32 type; GF_ESD *esd; GF_MPEGSampleEntryBox *entry = NULL; GF_ESDBox *ESDa; GF_ProtectionSchemeInfoBox *sinf; GF_SampleDescriptionBox *stsd = mdia->information->sampleTable->SampleDescription; *out_esd = NULL; if (!stsd || !stsd->child_boxes || !sampleDescIndex || (sampleDescIndex > gf_list_count(stsd->child_boxes)) ) return GF_BAD_PARAM; esd = NULL; entry = (GF_MPEGSampleEntryBox*)gf_list_get(stsd->child_boxes, sampleDescIndex - 1); if (! entry) return GF_ISOM_INVALID_MEDIA; *out_esd = NULL; ESDa = NULL; type = entry->type; switch (type) { case GF_ISOM_BOX_TYPE_ENCV: case GF_ISOM_BOX_TYPE_ENCA: case GF_ISOM_BOX_TYPE_ENCS: case GF_ISOM_BOX_TYPE_ENCF: case GF_ISOM_BOX_TYPE_ENCM: case GF_ISOM_BOX_TYPE_ENCT: sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) { type = sinf->original_format->data_format; } break; case GF_ISOM_BOX_TYPE_RESV: sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_RINF); if (sinf && sinf->original_format) { type = sinf->original_format->data_format; } break; } switch (type) { case GF_ISOM_BOX_TYPE_MP4V: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; ESDa = ((GF_MPEGVisualSampleEntryBox*)entry)->esd; if (ESDa) esd = (GF_ESD *) ESDa->desc; /*avc1 encrypted*/ else esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_HVT1: case GF_ISOM_BOX_TYPE_264B: case GF_ISOM_BOX_TYPE_265B: case GF_ISOM_BOX_TYPE_DVHE: case GF_ISOM_BOX_TYPE_VVC1: case GF_ISOM_BOX_TYPE_VVI1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; if ((mdia->mediaTrack->extractor_mode & 0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT) AVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, mdia); else AVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, NULL); esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_LHV1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; if ((mdia->mediaTrack->extractor_mode & 0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT) HEVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, mdia); else HEVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, NULL); esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_AV01: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; AV1_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*)entry, mdia); esd = ((GF_MPEGVisualSampleEntryBox*)entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_VP08: case GF_ISOM_BOX_TYPE_VP09: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; VP9_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*)entry, mdia); esd = ((GF_MPEGVisualSampleEntryBox*)entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_MP4A: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; { GF_MPEGAudioSampleEntryBox *ase = (GF_MPEGAudioSampleEntryBox*)entry; ESDa = ase->esd; if (ESDa) { esd = (GF_ESD *) ESDa->desc; } else if (!true_desc_only) { Bool make_mp4a = GF_FALSE; sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) { if (sinf->original_format->data_format==GF_ISOM_BOX_TYPE_MP4A) { make_mp4a = GF_TRUE; } } else { // Assuming that if no ESD is provided the stream is Basic MPEG-4 AAC LC make_mp4a = GF_TRUE; } if (make_mp4a) { GF_M4ADecSpecInfo aacinfo; memset(&aacinfo, 0, sizeof(GF_M4ADecSpecInfo)); aacinfo.nb_chan = ase->channel_count; aacinfo.base_object_type = GF_M4A_AAC_LC; aacinfo.base_sr = ase->samplerate_hi; *out_esd = gf_odf_desc_esd_new(0); (*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO; (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AAC_MPEG4; gf_m4a_write_config(&aacinfo, &(*out_esd)->decoderConfig->decoderSpecificInfo->data, &(*out_esd)->decoderConfig->decoderSpecificInfo->dataLength); } } } break; case GF_ISOM_BOX_TYPE_MP4S: if (entry->internal_type==GF_ISOM_SAMPLE_ENTRY_MP4S) { ESDa = entry->esd; if (ESDa) esd = (GF_ESD *) ESDa->desc; } break; #ifndef GPAC_DISABLE_TTXT case GF_ISOM_BOX_TYPE_TX3G: case GF_ISOM_BOX_TYPE_TEXT: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_MP4S) return GF_ISOM_INVALID_MEDIA; if (!true_desc_only && mdia->mediaTrack->moov->mov->convert_streaming_text) { GF_Err e = gf_isom_get_ttxt_esd(mdia, out_esd); if (e) return e; break; } else return GF_ISOM_INVALID_MEDIA; #endif #ifndef GPAC_DISABLE_VTT case GF_ISOM_BOX_TYPE_WVTT: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_MP4S) return GF_ISOM_INVALID_MEDIA; { GF_WebVTTSampleEntryBox*vtte = (GF_WebVTTSampleEntryBox*)entry; esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_TEXT; esd->decoderConfig->objectTypeIndication = GF_CODECID_WEBVTT; if (vtte->config) { esd->decoderConfig->decoderSpecificInfo->dataLength = (u32) strlen(vtte->config->string); esd->decoderConfig->decoderSpecificInfo->data = gf_malloc(sizeof(char)*esd->decoderConfig->decoderSpecificInfo->dataLength); memcpy(esd->decoderConfig->decoderSpecificInfo->data, vtte->config->string, esd->decoderConfig->decoderSpecificInfo->dataLength); } } break; case GF_ISOM_BOX_TYPE_STPP: case GF_ISOM_BOX_TYPE_SBTT: case GF_ISOM_BOX_TYPE_STXT: break; #endif case GF_ISOM_SUBTYPE_3GP_AMR: case GF_ISOM_SUBTYPE_3GP_AMR_WB: case GF_ISOM_SUBTYPE_3GP_EVRC: case GF_ISOM_SUBTYPE_3GP_QCELP: case GF_ISOM_SUBTYPE_3GP_SMV: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; if (!true_desc_only) { GF_Err e = gf_isom_get_3gpp_audio_esd(mdia->information->sampleTable, type, (GF_GenericAudioSampleEntryBox*)entry, out_esd); if (e) return e; break; } else return GF_ISOM_INVALID_MEDIA; case GF_ISOM_SUBTYPE_OPUS: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; { GF_OpusSpecificBox *e = ((GF_MPEGAudioSampleEntryBox*)entry)->cfg_opus; GF_BitStream *bs_out; if (!e) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("ESD not found for Opus\n)")); break; } *out_esd = gf_odf_desc_esd_new(2); (*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO; (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_OPUS; //serialize box with header - compatibility with ffmpeg bs_out = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_isom_box_size((GF_Box *) e); gf_isom_box_write((GF_Box *) e, bs_out); gf_bs_get_content(bs_out, & (*out_esd)->decoderConfig->decoderSpecificInfo->data, & (*out_esd)->decoderConfig->decoderSpecificInfo->dataLength); gf_bs_del(bs_out); break; } case GF_ISOM_SUBTYPE_3GP_H263: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_VISUAL; esd->decoderConfig->objectTypeIndication = GF_CODECID_H263; break; } case GF_ISOM_SUBTYPE_MP3: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_AUDIO; esd->decoderConfig->objectTypeIndication = GF_CODECID_MPEG_AUDIO; break; } case GF_ISOM_SUBTYPE_LSR1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_MP4S) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { GF_LASeRSampleEntryBox*ptr = (GF_LASeRSampleEntryBox*)entry; esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_SCENE; esd->decoderConfig->objectTypeIndication = GF_CODECID_LASER; esd->decoderConfig->decoderSpecificInfo->dataLength = ptr->lsr_config->hdr_size; esd->decoderConfig->decoderSpecificInfo->data = gf_malloc(sizeof(char)*ptr->lsr_config->hdr_size); if (!esd->decoderConfig->decoderSpecificInfo->data) return GF_OUT_OF_MEM; memcpy(esd->decoderConfig->decoderSpecificInfo->data, ptr->lsr_config->hdr, sizeof(char)*ptr->lsr_config->hdr_size); break; } case GF_ISOM_SUBTYPE_MH3D_MHA1: case GF_ISOM_SUBTYPE_MH3D_MHA2: case GF_ISOM_SUBTYPE_MH3D_MHM1: case GF_ISOM_SUBTYPE_MH3D_MHM2: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { GF_MPEGAudioSampleEntryBox*ptr = (GF_MPEGAudioSampleEntryBox*)entry; esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_AUDIO; if ((type==GF_ISOM_SUBTYPE_MH3D_MHA1) || (type==GF_ISOM_SUBTYPE_MH3D_MHA2)) esd->decoderConfig->objectTypeIndication = GF_CODECID_MPHA; else esd->decoderConfig->objectTypeIndication = GF_CODECID_MHAS; if (ptr->cfg_mha) { GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_write_u8(bs, ptr->cfg_mha->configuration_version); gf_bs_write_u8(bs, ptr->cfg_mha->mha_pl_indication); gf_bs_write_u8(bs, ptr->cfg_mha->reference_channel_layout); gf_bs_write_u16(bs, ptr->cfg_mha->mha_config ? ptr->cfg_mha->mha_config_size : 0); if (ptr->cfg_mha->mha_config && ptr->cfg_mha->mha_config_size) gf_bs_write_data(bs, ptr->cfg_mha->mha_config, ptr->cfg_mha->mha_config_size); gf_bs_get_content(bs, &esd->decoderConfig->decoderSpecificInfo->data, &esd->decoderConfig->decoderSpecificInfo->dataLength); gf_bs_del(bs); } } break; default: return GF_ISOM_INVALID_MEDIA; } if (true_desc_only) { if (!esd) return GF_ISOM_INVALID_MEDIA; *out_esd = esd; return GF_OK; } else { if (!esd && !*out_esd) return GF_ISOM_INVALID_MEDIA; if (*out_esd == NULL) return gf_odf_desc_copy((GF_Descriptor *)esd, (GF_Descriptor **)out_esd); } return GF_OK; } Bool Media_IsSampleSyncShadow(GF_ShadowSyncBox *stsh, u32 sampleNumber) { u32 i; GF_StshEntry *ent; if (!stsh) return 0; i=0; while ((ent = (GF_StshEntry*)gf_list_enum(stsh->entries, &i))) { if ((u32) ent->syncSampleNumber == sampleNumber) return 1; else if ((u32) ent->syncSampleNumber > sampleNumber) return 0; } return 0; } GF_Err Media_GetSample(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample **samp, u32 *sIDX, Bool no_data, u64 *out_offset) { GF_Err e; u32 bytesRead; u32 dataRefIndex, chunkNumber; u64 offset, new_size; u32 sdesc_idx; GF_SampleEntryBox *entry; GF_StscEntry *stsc_entry; if (!mdia || !mdia->information->sampleTable) return GF_BAD_PARAM; if (!mdia->information->sampleTable->SampleSize) return GF_ISOM_INVALID_FILE; //OK, here we go.... if (sampleNumber > mdia->information->sampleTable->SampleSize->sampleCount) return GF_BAD_PARAM; //the data info if (!sIDX && !no_data) return GF_BAD_PARAM; e = stbl_GetSampleInfos(mdia->information->sampleTable, sampleNumber, &offset, &chunkNumber, &sdesc_idx, &stsc_entry); if (e) return e; if (sIDX) (*sIDX) = sdesc_idx; if (out_offset) *out_offset = offset; if (!samp ) return GF_OK; if (mdia->information->sampleTable->TimeToSample) { //get the DTS e = stbl_GetSampleDTS(mdia->information->sampleTable->TimeToSample, sampleNumber, &(*samp)->DTS); if (e) return e; } else { (*samp)->DTS=0; } //the CTS offset if (mdia->information->sampleTable->CompositionOffset) { e = stbl_GetSampleCTS(mdia->information->sampleTable->CompositionOffset , sampleNumber, &(*samp)->CTS_Offset); if (e) return e; } else { (*samp)->CTS_Offset = 0; } //the size e = stbl_GetSampleSize(mdia->information->sampleTable->SampleSize, sampleNumber, &(*samp)->dataLength); if (e) return e; //the RAP if (mdia->information->sampleTable->SyncSample) { e = stbl_GetSampleRAP(mdia->information->sampleTable->SyncSample, sampleNumber, &(*samp)->IsRAP, NULL, NULL); if (e) return e; } else { //if no SyncSample, all samples are sync (cf spec) (*samp)->IsRAP = RAP; } if (mdia->information->sampleTable->SampleDep) { u32 isLeading, dependsOn, dependedOn, redundant; e = stbl_GetSampleDepType(mdia->information->sampleTable->SampleDep, sampleNumber, &isLeading, &dependsOn, &dependedOn, &redundant); if (!e) { if (dependsOn==1) (*samp)->IsRAP = RAP_NO; //commenting following code since it is wrong - an I frame is not always a SAP1, it can be a SAP2 or SAP3. //Keeping this code breaks AVC / HEVC openGOP import when writing sample dependencies //else if (dependsOn==2) (*samp)->IsRAP = RAP; /*if not depended upon and redundant, mark as carousel sample*/ if ((dependedOn==2) && (redundant==1)) (*samp)->IsRAP = RAP_REDUNDANT; /*TODO FIXME - we must enhance the IsRAP semantics to carry disposable info ... */ } } /*get sync shadow*/ if (Media_IsSampleSyncShadow(mdia->information->sampleTable->ShadowSync, sampleNumber)) (*samp)->IsRAP = RAP_REDUNDANT; //the data info if (!sIDX && !no_data) return GF_BAD_PARAM; if (!sIDX && !out_offset) return GF_OK; if (!sIDX) return GF_OK; (*sIDX) = sdesc_idx; // e = stbl_GetSampleInfos(mdia->information->sampleTable, sampleNumber, &offset, &chunkNumber, sIDX, &stsc_entry); // if (e) return e; //then get the DataRef e = Media_GetSampleDesc(mdia, sdesc_idx, &entry, &dataRefIndex); if (e) return e; //if moov is compressed, remove offset if sample is after moov in this file if (mdia->mediaTrack->moov->compressed_diff) { GF_DataEntryBox *ent = (GF_DataEntryBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataRefIndex - 1); if (ent && (ent->flags&1) && (offset>=mdia->mediaTrack->moov->file_offset)) { offset -= mdia->mediaTrack->moov->compressed_diff; } } if (no_data) { if ( ((*samp)->dataLength != 0) && mdia->mediaTrack->pack_num_samples) { u32 idx_in_chunk = sampleNumber - mdia->information->sampleTable->SampleToChunk->firstSampleInCurrentChunk; u32 left_in_chunk = stsc_entry->samplesPerChunk - idx_in_chunk; if (left_in_chunk > mdia->mediaTrack->pack_num_samples) left_in_chunk = mdia->mediaTrack->pack_num_samples; (*samp)->dataLength *= left_in_chunk; (*samp)->nb_pack = left_in_chunk; } return GF_OK; } // Open the data handler - check our mode, don't reopen in read only if this is //the same entry. In other modes we have no choice because the main data map is //divided into the original and the edition files if (mdia->mediaTrack->moov->mov->openMode == GF_ISOM_OPEN_READ) { //same as last call in read mode if (!mdia->information->dataHandler) { e = gf_isom_datamap_open(mdia, dataRefIndex, stsc_entry->isEdited); if (e) return e; } mdia->information->dataEntryIndex = dataRefIndex; } else { e = gf_isom_datamap_open(mdia, dataRefIndex, stsc_entry->isEdited); if (e) return e; } if ( mdia->mediaTrack->moov->mov->read_byte_offset || mdia->mediaTrack->moov->mov->bytes_removed) { GF_DataEntryBox *ent = (GF_DataEntryBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataRefIndex - 1); if (ent && (ent->flags&1)) { u64 real_offset = mdia->mediaTrack->moov->mov->read_byte_offset + mdia->mediaTrack->moov->mov->bytes_removed; if (offset < real_offset) return GF_IO_ERR; if (mdia->information->dataHandler->last_read_offset != mdia->mediaTrack->moov->mov->read_byte_offset) { mdia->information->dataHandler->last_read_offset = mdia->mediaTrack->moov->mov->read_byte_offset; gf_bs_get_refreshed_size(mdia->information->dataHandler->bs); } offset -= real_offset; } } if ((*samp)->dataLength != 0) { if (mdia->mediaTrack->pack_num_samples) { u32 idx_in_chunk = sampleNumber - mdia->information->sampleTable->SampleToChunk->firstSampleInCurrentChunk; u32 left_in_chunk = stsc_entry->samplesPerChunk - idx_in_chunk; if (left_in_chunk > mdia->mediaTrack->pack_num_samples) left_in_chunk = mdia->mediaTrack->pack_num_samples; (*samp)->dataLength *= left_in_chunk; (*samp)->nb_pack = left_in_chunk; } /*and finally get the data, include padding if needed*/ if ((*samp)->alloc_size) { if ((*samp)->alloc_size < (*samp)->dataLength + mdia->mediaTrack->padding_bytes) { (*samp)->data = (char *) gf_realloc((*samp)->data, sizeof(char) * ( (*samp)->dataLength + mdia->mediaTrack->padding_bytes) ); if (! (*samp)->data) return GF_OUT_OF_MEM; (*samp)->alloc_size = (*samp)->dataLength + mdia->mediaTrack->padding_bytes; } } else { (*samp)->data = (char *) gf_malloc(sizeof(char) * ( (*samp)->dataLength + mdia->mediaTrack->padding_bytes) ); if (! (*samp)->data) return GF_OUT_OF_MEM; } if (mdia->mediaTrack->padding_bytes) memset((*samp)->data + (*samp)->dataLength, 0, sizeof(char) * mdia->mediaTrack->padding_bytes); //check if we can get the sample (make sure we have enougth data...) new_size = gf_bs_get_size(mdia->information->dataHandler->bs); if (offset + (*samp)->dataLength > new_size) { //always refresh the size to avoid wrong info on http/ftp new_size = gf_bs_get_refreshed_size(mdia->information->dataHandler->bs); if (offset + (*samp)->dataLength > new_size) { mdia->BytesMissing = offset + (*samp)->dataLength - new_size; return GF_ISOM_INCOMPLETE_FILE; } } bytesRead = gf_isom_datamap_get_data(mdia->information->dataHandler, (*samp)->data, (*samp)->dataLength, offset); //if bytesRead != sampleSize, we have an IO err if (bytesRead < (*samp)->dataLength) { return GF_IO_ERR; } mdia->BytesMissing = 0; } //finally rewrite the sample if this is an OD Access Unit or NAL-based one //we do this even if sample size is zero because of sample implicit reconstruction rules (especially tile tracks) if (mdia->handler->handlerType == GF_ISOM_MEDIA_OD) { if (!mdia->mediaTrack->moov->mov->disable_odf_translate) { e = Media_RewriteODFrame(mdia, *samp); if (e) return e; } } else if (gf_isom_is_nalu_based_entry(mdia, entry) && !gf_isom_is_encrypted_entry(entry->type) ) { e = gf_isom_nalu_sample_rewrite(mdia, *samp, sampleNumber, (GF_MPEGVisualSampleEntryBox *)entry); if (e) return e; } else if (mdia->mediaTrack->moov->mov->convert_streaming_text && ((mdia->handler->handlerType == GF_ISOM_MEDIA_TEXT) || (mdia->handler->handlerType == GF_ISOM_MEDIA_SCENE) || (mdia->handler->handlerType == GF_ISOM_MEDIA_SUBT)) && (entry->type == GF_ISOM_BOX_TYPE_TX3G || entry->type == GF_ISOM_BOX_TYPE_TEXT) ) { u64 dur; if (sampleNumber == mdia->information->sampleTable->SampleSize->sampleCount) { dur = mdia->mediaHeader->duration - (*samp)->DTS; } else { stbl_GetSampleDTS(mdia->information->sampleTable->TimeToSample, sampleNumber+1, &dur); dur -= (*samp)->DTS; } e = gf_isom_rewrite_text_sample(*samp, sdesc_idx, (u32) dur); if (e) return e; } return GF_OK; } GF_Err Media_CheckDataEntry(GF_MediaBox *mdia, u32 dataEntryIndex) { GF_DataEntryURLBox *entry; GF_DataMap *map; GF_Err e; if (!mdia || !dataEntryIndex || dataEntryIndex > gf_list_count(mdia->information->dataInformation->dref->child_boxes)) return GF_BAD_PARAM; entry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataEntryIndex - 1); if (!entry) return GF_ISOM_INVALID_FILE; if (entry->flags == 1) return GF_OK; //ok, not self contained, let's go for it... //we don't know what's a URN yet if (entry->type == GF_ISOM_BOX_TYPE_URN) return GF_NOT_SUPPORTED; if (mdia->mediaTrack->moov->mov->openMode == GF_ISOM_OPEN_WRITE) { e = gf_isom_datamap_new(entry->location, NULL, GF_ISOM_DATA_MAP_READ, &map); } else { e = gf_isom_datamap_new(entry->location, mdia->mediaTrack->moov->mov->fileName, GF_ISOM_DATA_MAP_READ, &map); } if (e) return e; gf_isom_datamap_del(map); return GF_OK; } Bool Media_IsSelfContained(GF_MediaBox *mdia, u32 StreamDescIndex) { u32 drefIndex=0; GF_FullBox *a=NULL; GF_SampleEntryBox *se = NULL; Media_GetSampleDesc(mdia, StreamDescIndex, &se, &drefIndex); if (!drefIndex) return 0; if (mdia && mdia->information && mdia->information->dataInformation && mdia->information->dataInformation->dref ) { a = (GF_FullBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1); } if (!a) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] broken file: Data reference index set to %d but no data reference entry found\n", drefIndex)); return 1; } if (a->flags & 1) return 1; /*QT specific*/ if (a->type == GF_QT_BOX_TYPE_ALIS) return 1; return 0; } GF_ISOMDataRefAllType Media_SelfContainedType(GF_MediaBox *mdia) { u32 nb_ext, nb_self; u32 i, count; nb_ext = nb_self = 0; count = mdia->information->sampleTable->SampleDescription ? gf_list_count(mdia->information->sampleTable->SampleDescription->child_boxes) : 0; for (i=0; i<count; i++) { if (Media_IsSelfContained(mdia, i+1)) nb_self++; else nb_ext++; } if (nb_ext==count) return ISOM_DREF_EXT; if (nb_self==count) return ISOM_DREF_SELF; return ISOM_DREF_MIXED; } //look for a sync sample from a given point in media time GF_Err Media_FindSyncSample(GF_SampleTableBox *stbl, u32 searchFromSample, u32 *sampleNumber, u8 mode) { GF_ISOSAPType isRAP; u32 next, prev, next_in_sap, prev_in_sap; if (!stbl || !stbl->SyncSample) return GF_BAD_PARAM; //set to current sample if we don't find a RAP *sampleNumber = searchFromSample; //this is not the exact sample, but the prev move to next sample if enough samples.... if ( (mode == GF_ISOM_SEARCH_SYNC_FORWARD) && (searchFromSample == stbl->SampleSize->sampleCount) ) { return GF_OK; } if ( (mode == GF_ISOM_SEARCH_SYNC_BACKWARD) && !searchFromSample) { *sampleNumber = 1; return GF_OK; } //get the entry stbl_GetSampleRAP(stbl->SyncSample, searchFromSample, &isRAP, &prev, &next); if (isRAP) { (*sampleNumber) = searchFromSample; return GF_OK; } /*check sample groups - prev & next are overwritten if RAP group is found, but are not re-initialized otherwise*/ stbl_SearchSAPs(stbl, searchFromSample, &isRAP, &prev_in_sap, &next_in_sap); if (isRAP) { (*sampleNumber) = searchFromSample; return GF_OK; } if (prev_in_sap > prev) prev = prev_in_sap; if (next_in_sap && next_in_sap < next) next = next_in_sap; //nothing yet, go for next time... if (mode == GF_ISOM_SEARCH_SYNC_FORWARD) { if (next) *sampleNumber = next; } else { if (prev) *sampleNumber = prev; } return GF_OK; } //create a DataReference if not existing (only for WRITE-edit mode) GF_Err Media_FindDataRef(GF_DataReferenceBox *dref, char *URLname, char *URNname, u32 *dataRefIndex) { u32 i; GF_DataEntryURLBox *entry; if (!dref) return GF_BAD_PARAM; *dataRefIndex = 0; i=0; while ((entry = (GF_DataEntryURLBox*)gf_list_enum(dref->child_boxes, &i))) { if (entry->type == GF_ISOM_BOX_TYPE_URL) { //self-contained case if (entry->flags == 1) { //if nothing specified, get the dataRef if (!URLname && !URNname) { *dataRefIndex = i; return GF_OK; } } else { //OK, check if we have URL if (URLname && !strcmp(URLname, entry->location)) { *dataRefIndex = i; return GF_OK; } } } else { //this is a URN one, only check the URN name (URL optional) if (URNname && !strcmp(URNname, ((GF_DataEntryURNBox *)entry)->nameURN)) { *dataRefIndex = i; return GF_OK; } } } return GF_OK; } //Get the total media duration based on the TimeToSample table GF_Err Media_SetDuration(GF_TrackBox *trak) { GF_Err e; GF_ESD *esd; u64 DTS; GF_SttsEntry *ent; u32 nbSamp; if (!trak || !trak->Media || !trak->Media->information || !trak->Media->information->sampleTable) return GF_ISOM_INVALID_FILE; if (!trak->Media->information->sampleTable->SampleSize || !trak->Media->information->sampleTable->TimeToSample) return GF_ISOM_INVALID_FILE; nbSamp = trak->Media->information->sampleTable->SampleSize->sampleCount; //we need to check how many samples we have. // == 1 -> last sample duration == default duration // > 1 -> last sample duration == prev sample duration switch (nbSamp) { case 0: trak->Media->mediaHeader->duration = 0; if (Track_IsMPEG4Stream(trak->Media->handler->handlerType)) { Media_GetESD(trak->Media, 1, &esd, 1); if (esd && esd->URLString) trak->Media->mediaHeader->duration = (u64) -1; } return GF_OK; // case 1: // trak->Media->mediaHeader->duration = trak->Media->mediaHeader->timeScale; // return GF_OK; default: //we assume a constant frame rate for the media and assume the last sample //will be hold the same time as the prev one e = stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, nbSamp, &DTS); if (e < 0) { return e; } if (trak->Media->information->sampleTable->TimeToSample->nb_entries > 0) { ent = &trak->Media->information->sampleTable->TimeToSample->entries[trak->Media->information->sampleTable->TimeToSample->nb_entries-1]; } else { ent = NULL; } trak->Media->mediaHeader->duration = DTS; #if 1 if (ent) trak->Media->mediaHeader->duration += ent->sampleDelta; #else if (!ent) { u64 DTSprev; stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, nbSamp-1, &DTSprev); trak->Media->mediaHeader->duration += (DTS - DTSprev); } else { #ifndef GPAC_DISABLE_ISOM_WRITE if (trak->moov->mov->editFileMap && trak->Media->information->sampleTable->CompositionOffset) { u32 count, i; u64 max_ts; GF_DttsEntry *cts_ent; GF_CompositionOffsetBox *ctts = trak->Media->information->sampleTable->CompositionOffset; if (ctts->w_LastSampleNumber==nbSamp) { count = gf_list_count(ctts->entryList); max_ts = trak->Media->mediaHeader->duration; while (count) { count -= 1; cts_ent = gf_list_get(ctts->entryList, count); if (nbSamp<cts_ent->sampleCount) break; for (i=0; i<cts_ent->sampleCount; i++) { stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, nbSamp-i, &DTS); if ((s32) cts_ent->decodingOffset < 0) max_ts = DTS; else max_ts = DTS + cts_ent->decodingOffset; if (max_ts>=trak->Media->mediaHeader->duration) { trak->Media->mediaHeader->duration = max_ts; } else { break; } } if (max_ts<trak->Media->mediaHeader->duration) { break; } nbSamp-=cts_ent->sampleCount; } } } #endif /*GPAC_DISABLE_ISOM_WRITE*/ trak->Media->mediaHeader->duration += ent->sampleDelta; } #endif return GF_OK; } } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err Media_SetDrefURL(GF_DataEntryURLBox *dref_entry, const char *origName, const char *finalName) { //for now we only support dref created in same folder for relative URLs if (strstr(origName, "://") || ((origName[1]==':') && (origName[2]=='\\')) || (origName[0]=='/') || (origName[0]=='\\') ) { dref_entry->location = gf_strdup(origName); } else { char *fname = strrchr(origName, '/'); if (!fname) fname = strrchr(origName, '\\'); if (fname) fname++; if (!fname) { dref_entry->location = gf_strdup(origName); } else { u32 len = (u32) (fname - origName); if (!finalName || strncmp(origName, finalName, len)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Concatenation of relative path %s with relative path %s not supported, use absolute URLs\n", origName, finalName)); return GF_NOT_SUPPORTED; } else { dref_entry->location = gf_strdup(fname); } } } return GF_OK; } GF_Err Media_CreateDataRef(GF_ISOFile *movie, GF_DataReferenceBox *dref, char *URLname, char *URNname, u32 *dataRefIndex) { GF_Err e; Bool use_alis=GF_FALSE; GF_DataEntryURLBox *entry; if (URLname && !strcmp(URLname, "alis")) { URLname = NULL; use_alis=GF_TRUE; } if (!URLname && !URNname) { //THIS IS SELF CONTAIN, create a regular entry if needed entry = (GF_DataEntryURLBox *) gf_isom_box_new_parent(&dref->child_boxes, use_alis ? GF_QT_BOX_TYPE_ALIS : GF_ISOM_BOX_TYPE_URL); if (!entry) return GF_OUT_OF_MEM; entry->flags = 1; *dataRefIndex = gf_list_count(dref->child_boxes); return GF_OK; } else if (!URNname && URLname) { //THIS IS URL entry = (GF_DataEntryURLBox *) gf_isom_box_new_parent(&dref->child_boxes, GF_ISOM_BOX_TYPE_URL); if (!entry) return GF_OUT_OF_MEM; entry->flags = 0; e = Media_SetDrefURL(entry, URLname, movie->fileName ? movie->fileName : movie->finalName); if (! entry->location) { gf_isom_box_del_parent(&dref->child_boxes, (GF_Box *)entry); return e ? e : GF_OUT_OF_MEM; } *dataRefIndex = gf_list_count(dref->child_boxes); return GF_OK; } else { //THIS IS URN entry = (GF_DataEntryURLBox *) gf_isom_box_new_parent(&dref->child_boxes, GF_ISOM_BOX_TYPE_URN); if (!entry) return GF_OUT_OF_MEM; ((GF_DataEntryURNBox *)entry)->flags = 0; ((GF_DataEntryURNBox *)entry)->nameURN = (char*)gf_malloc(strlen(URNname)+1); if (! ((GF_DataEntryURNBox *)entry)->nameURN) { gf_isom_box_del_parent(&dref->child_boxes, (GF_Box *)entry); return GF_OUT_OF_MEM; } strcpy(((GF_DataEntryURNBox *)entry)->nameURN, URNname); //check for URL if (URLname) { ((GF_DataEntryURNBox *)entry)->location = (char*)gf_malloc(strlen(URLname)+1); if (! ((GF_DataEntryURNBox *)entry)->location) { gf_isom_box_del_parent(&dref->child_boxes, (GF_Box *)entry); return GF_OUT_OF_MEM; } strcpy(((GF_DataEntryURNBox *)entry)->location, URLname); } *dataRefIndex = gf_list_count(dref->child_boxes); return GF_OK; } return GF_OK; } GF_Err Media_AddSample(GF_MediaBox *mdia, u64 data_offset, const GF_ISOSample *sample, u32 StreamDescIndex, u32 syncShadowNumber) { GF_Err e; GF_SampleTableBox *stbl; u32 sampleNumber, i; if (!mdia || !sample) return GF_BAD_PARAM; stbl = mdia->information->sampleTable; //get a valid sampleNumber for this new guy e = stbl_AddDTS(stbl, sample->DTS, &sampleNumber, mdia->mediaHeader->timeScale, sample->nb_pack); if (e) return e; //add size e = stbl_AddSize(stbl->SampleSize, sampleNumber, sample->dataLength, sample->nb_pack); if (e) return e; //adds CTS offset if (sample->CTS_Offset) { //if we don't have a CTS table, add it... if (!stbl->CompositionOffset) { stbl->CompositionOffset = (GF_CompositionOffsetBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_CTTS); if (!stbl->CompositionOffset) return GF_OUT_OF_MEM; } //then add our CTS (the prev samples with no CTS offset will be automatically added... e = stbl_AddCTS(stbl, sampleNumber, sample->CTS_Offset); if (e) return e; } else if (stbl->CompositionOffset) { e = stbl_AddCTS(stbl, sampleNumber, sample->CTS_Offset); if (e) return e; } //The first non sync sample we see must create a syncTable if (sample->IsRAP) { //insert it only if we have a sync table and if we have an IDR slice if (stbl->SyncSample && (sample->IsRAP == RAP)) { e = stbl_AddRAP(stbl->SyncSample, sampleNumber); if (e) return e; } } else { //non-sync sample. Create a SyncSample table if needed if (!stbl->SyncSample) { stbl->SyncSample = (GF_SyncSampleBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSS); if (!stbl->SyncSample) return GF_OUT_OF_MEM; //all the prev samples are sync for (i=0; i<stbl->SampleSize->sampleCount; i++) { if (i+1 != sampleNumber) { e = stbl_AddRAP(stbl->SyncSample, i+1); if (e) return e; } } } } if (sample->IsRAP==RAP_REDUNDANT) { e = stbl_AddRedundant(stbl, sampleNumber); if (e) return e; } if (!mdia->mediaTrack->chunk_cache) { //and update the chunks e = stbl_AddChunkOffset(mdia, sampleNumber, StreamDescIndex, data_offset, sample->nb_pack); if (e) return e; } if (!syncShadowNumber) return GF_OK; if (!stbl->ShadowSync) { stbl->ShadowSync = (GF_ShadowSyncBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSH); if (!stbl->ShadowSync) return GF_OUT_OF_MEM; } return stbl_AddShadow(mdia->information->sampleTable->ShadowSync, sampleNumber, syncShadowNumber); } static GF_Err UpdateSample(GF_MediaBox *mdia, u32 sampleNumber, u32 size, s32 CTS, u64 offset, u8 isRap) { u32 i; GF_SampleTableBox *stbl = mdia->information->sampleTable; //set size, offset, RAP, CTS ... stbl_SetSampleSize(stbl->SampleSize, sampleNumber, size); stbl_SetChunkOffset(mdia, sampleNumber, offset); //do we have a CTS? if (stbl->CompositionOffset) { stbl_SetSampleCTS(stbl, sampleNumber, CTS); } else { //do we need one ?? if (CTS) { stbl->CompositionOffset = (GF_CompositionOffsetBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_CTTS); if (!stbl->CompositionOffset) return GF_OUT_OF_MEM; stbl_AddCTS(stbl, sampleNumber, CTS); } } //do we have a sync ??? if (stbl->SyncSample) { stbl_SetSampleRAP(stbl->SyncSample, sampleNumber, isRap); } else { //do we need one if (! isRap) { stbl->SyncSample = (GF_SyncSampleBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSS); if (!stbl->SyncSample) return GF_OUT_OF_MEM; //what a pain: all the sample we had have to be sync ... for (i=0; i<stbl->SampleSize->sampleCount; i++) { if (i+1 != sampleNumber) stbl_AddRAP(stbl->SyncSample, i+1); } } } if (isRap==2) { stbl_SetRedundant(stbl, sampleNumber); } return GF_OK; } GF_Err Media_UpdateSample(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample *sample, Bool data_only) { GF_Err e; u32 drefIndex, chunkNum, descIndex; u64 newOffset, DTS; GF_DataEntryURLBox *Dentry; GF_SampleTableBox *stbl; if (!mdia || !sample || !sampleNumber || !mdia->mediaTrack->moov->mov->editFileMap) return GF_BAD_PARAM; stbl = mdia->information->sampleTable; if (!data_only) { //check we have the sampe dts e = stbl_GetSampleDTS(stbl->TimeToSample, sampleNumber, &DTS); if (e) return e; if (DTS != sample->DTS) return GF_BAD_PARAM; } //get our infos stbl_GetSampleInfos(stbl, sampleNumber, &newOffset, &chunkNum, &descIndex, NULL); //then check the data ref e = Media_GetSampleDesc(mdia, descIndex, NULL, &drefIndex); if (e) return e; Dentry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1); if (!Dentry) return GF_ISOM_INVALID_FILE; if (Dentry->flags != 1) return GF_BAD_PARAM; //MEDIA DATA EDIT: write this new sample to the edit temp file newOffset = gf_isom_datamap_get_offset(mdia->mediaTrack->moov->mov->editFileMap); if (sample->dataLength) { e = gf_isom_datamap_add_data(mdia->mediaTrack->moov->mov->editFileMap, sample->data, sample->dataLength); if (e) return e; } if (data_only) { stbl_SetSampleSize(stbl->SampleSize, sampleNumber, sample->dataLength); return stbl_SetChunkOffset(mdia, sampleNumber, newOffset); } return UpdateSample(mdia, sampleNumber, sample->dataLength, sample->CTS_Offset, newOffset, sample->IsRAP); } GF_Err Media_UpdateSampleReference(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample *sample, u64 data_offset) { GF_Err e; u32 drefIndex, chunkNum, descIndex; u64 off, DTS; GF_DataEntryURLBox *Dentry; GF_SampleTableBox *stbl; if (!mdia) return GF_BAD_PARAM; stbl = mdia->information->sampleTable; //check we have the sampe dts e = stbl_GetSampleDTS(stbl->TimeToSample, sampleNumber, &DTS); if (e) return e; if (DTS != sample->DTS) return GF_BAD_PARAM; //get our infos stbl_GetSampleInfos(stbl, sampleNumber, &off, &chunkNum, &descIndex, NULL); //then check the data ref e = Media_GetSampleDesc(mdia, descIndex, NULL, &drefIndex); if (e) return e; Dentry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1); if (!Dentry) return GF_ISOM_INVALID_FILE; //we only modify self-contained data if (Dentry->flags == 1) return GF_ISOM_INVALID_MODE; //and we don't modify the media data return UpdateSample(mdia, sampleNumber, sample->dataLength, sample->CTS_Offset, data_offset, sample->IsRAP); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM*/
null
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2019 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #include <gpac/constants.h> #include <gpac/avparse.h> #ifndef GPAC_DISABLE_ISOM GF_Err Media_GetSampleDesc(GF_MediaBox *mdia, u32 SampleDescIndex, GF_SampleEntryBox **out_entry, u32 *dataRefIndex) { GF_SampleDescriptionBox *stsd; GF_SampleEntryBox *entry = NULL; if (!mdia) return GF_ISOM_INVALID_FILE; stsd = mdia->information->sampleTable->SampleDescription; if (!stsd) return GF_ISOM_INVALID_FILE; if (!SampleDescIndex || (SampleDescIndex > gf_list_count(stsd->child_boxes)) ) return GF_BAD_PARAM; entry = (GF_SampleEntryBox*)gf_list_get(stsd->child_boxes, SampleDescIndex - 1); if (!entry) return GF_ISOM_INVALID_FILE; if (out_entry) *out_entry = entry; if (dataRefIndex) *dataRefIndex = entry->dataReferenceIndex; return GF_OK; } GF_Err Media_GetSampleDescIndex(GF_MediaBox *mdia, u64 DTS, u32 *sampleDescIndex) { GF_Err e; u32 sampleNumber, prevSampleNumber, num; u64 offset; if (sampleDescIndex == NULL) return GF_BAD_PARAM; //find the sample for this time e = stbl_findEntryForTime(mdia->information->sampleTable, (u32) DTS, 0, &sampleNumber, &prevSampleNumber); if (e) return e; if (!sampleNumber && !prevSampleNumber) { //we have to assume the track was created to be used... If we have a sampleDesc, OK if (gf_list_count(mdia->information->sampleTable->SampleDescription->child_boxes)) { (*sampleDescIndex) = 1; return GF_OK; } return GF_BAD_PARAM; } return stbl_GetSampleInfos(mdia->information->sampleTable, ( sampleNumber ? sampleNumber : prevSampleNumber), &offset, &num, sampleDescIndex, NULL); } static GF_Err gf_isom_get_3gpp_audio_esd(GF_SampleTableBox *stbl, u32 type, GF_GenericAudioSampleEntryBox *entry, GF_ESD **out_esd) { (*out_esd) = gf_odf_desc_esd_new(2); (*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO; /*official mapping to MPEG-4*/ switch (type) { case GF_ISOM_SUBTYPE_3GP_EVRC: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_EVRC; return GF_OK; case GF_ISOM_SUBTYPE_3GP_QCELP: { u32 block_size, sample_rate, sample_size, i; GF_SttsEntry *ent; GF_BitStream *bs; char szName[80]; /*only map CBR*/ sample_size = stbl->SampleSize->sampleSize; (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_QCELP; bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_write_data(bs, "QLCMfmt ", 8); gf_bs_write_u32_le(bs, 150);/*fmt chunk size*/ gf_bs_write_u8(bs, 1); gf_bs_write_u8(bs, 0); /*QCELP GUID*/ gf_bs_write_data(bs, "\x41\x6D\x7F\x5E\x15\xB1\xD0\x11\xBA\x91\x00\x80\x5F\xB4\xB9\x7E", 16); gf_bs_write_u16_le(bs, 1); memset(szName, 0, 80); strcpy(szName, "QCELP-13K(GPAC-emulated)"); gf_bs_write_data(bs, szName, 80); ent = &stbl->TimeToSample->entries[0]; sample_rate = entry->samplerate_hi; block_size = ent ? ent->sampleDelta : 160; gf_bs_write_u16_le(bs, 8*sample_size*sample_rate/block_size); gf_bs_write_u16_le(bs, sample_size); gf_bs_write_u16_le(bs, block_size); gf_bs_write_u16_le(bs, sample_rate); gf_bs_write_u16_le(bs, entry->bitspersample); gf_bs_write_u32_le(bs, sample_size ? 0 : 7); /**/ for (i=0; i<7; i++) { static const u32 qcelp_r2s [] = {0, 1, 1, 4, 2, 8, 3, 17, 4, 35, 5, 8, 14, 1}; if (sample_size) { gf_bs_write_u16(bs, 0); } else { gf_bs_write_u8(bs, qcelp_r2s[2*i+1]); gf_bs_write_u8(bs, qcelp_r2s[2*i]); } } gf_bs_write_u16(bs, 0); memset(szName, 0, 80); gf_bs_write_data(bs, szName, 20);/*reserved*/ gf_bs_get_content(bs, & (*out_esd)->decoderConfig->decoderSpecificInfo->data, & (*out_esd)->decoderConfig->decoderSpecificInfo->dataLength); gf_bs_del(bs); } return GF_OK; case GF_ISOM_SUBTYPE_3GP_SMV: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_SMV; return GF_OK; case GF_ISOM_SUBTYPE_3GP_AMR: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AMR; return GF_OK; case GF_ISOM_SUBTYPE_3GP_AMR_WB: (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AMR_WB; return GF_OK; default: GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] unsupported sample description type %s\n", gf_4cc_to_str(entry->type))); break; } return GF_OK; } GF_Err Media_GetESD(GF_MediaBox *mdia, u32 sampleDescIndex, GF_ESD **out_esd, Bool true_desc_only) { u32 type; GF_ESD *esd; GF_MPEGSampleEntryBox *entry = NULL; GF_ESDBox *ESDa; GF_ProtectionSchemeInfoBox *sinf; GF_SampleDescriptionBox *stsd = mdia->information->sampleTable->SampleDescription; *out_esd = NULL; if (!stsd || !stsd->child_boxes || !sampleDescIndex || (sampleDescIndex > gf_list_count(stsd->child_boxes)) ) return GF_BAD_PARAM; esd = NULL; entry = (GF_MPEGSampleEntryBox*)gf_list_get(stsd->child_boxes, sampleDescIndex - 1); if (! entry) return GF_ISOM_INVALID_MEDIA; *out_esd = NULL; ESDa = NULL; type = entry->type; switch (type) { case GF_ISOM_BOX_TYPE_ENCV: case GF_ISOM_BOX_TYPE_ENCA: case GF_ISOM_BOX_TYPE_ENCS: case GF_ISOM_BOX_TYPE_ENCF: case GF_ISOM_BOX_TYPE_ENCM: case GF_ISOM_BOX_TYPE_ENCT: sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) { type = sinf->original_format->data_format; } break; case GF_ISOM_BOX_TYPE_RESV: sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_RINF); if (sinf && sinf->original_format) { type = sinf->original_format->data_format; } break; } switch (type) { case GF_ISOM_BOX_TYPE_MP4V: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; ESDa = ((GF_MPEGVisualSampleEntryBox*)entry)->esd; if (ESDa) esd = (GF_ESD *) ESDa->desc; /*avc1 encrypted*/ else esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_HVT1: case GF_ISOM_BOX_TYPE_264B: case GF_ISOM_BOX_TYPE_265B: case GF_ISOM_BOX_TYPE_DVHE: case GF_ISOM_BOX_TYPE_VVC1: case GF_ISOM_BOX_TYPE_VVI1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; if ((mdia->mediaTrack->extractor_mode & 0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT) AVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, mdia); else AVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, NULL); esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_LHV1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; if ((mdia->mediaTrack->extractor_mode & 0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT) HEVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, mdia); else HEVC_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*) entry, NULL); esd = ((GF_MPEGVisualSampleEntryBox*) entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_AV01: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; AV1_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*)entry, mdia); esd = ((GF_MPEGVisualSampleEntryBox*)entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_VP08: case GF_ISOM_BOX_TYPE_VP09: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; VP9_RewriteESDescriptorEx((GF_MPEGVisualSampleEntryBox*)entry, mdia); esd = ((GF_MPEGVisualSampleEntryBox*)entry)->emul_esd; break; case GF_ISOM_BOX_TYPE_MP4A: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; { GF_MPEGAudioSampleEntryBox *ase = (GF_MPEGAudioSampleEntryBox*)entry; ESDa = ase->esd; if (ESDa) { esd = (GF_ESD *) ESDa->desc; } else if (!true_desc_only) { Bool make_mp4a = GF_FALSE; sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) { if (sinf->original_format->data_format==GF_ISOM_BOX_TYPE_MP4A) { make_mp4a = GF_TRUE; } } else { // Assuming that if no ESD is provided the stream is Basic MPEG-4 AAC LC make_mp4a = GF_TRUE; } if (make_mp4a) { GF_M4ADecSpecInfo aacinfo; memset(&aacinfo, 0, sizeof(GF_M4ADecSpecInfo)); aacinfo.nb_chan = ase->channel_count; aacinfo.base_object_type = GF_M4A_AAC_LC; aacinfo.base_sr = ase->samplerate_hi; *out_esd = gf_odf_desc_esd_new(0); (*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO; (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_AAC_MPEG4; gf_m4a_write_config(&aacinfo, &(*out_esd)->decoderConfig->decoderSpecificInfo->data, &(*out_esd)->decoderConfig->decoderSpecificInfo->dataLength); } } } break; case GF_ISOM_BOX_TYPE_MP4S: if (entry->internal_type==GF_ISOM_SAMPLE_ENTRY_MP4S) { ESDa = entry->esd; if (ESDa) esd = (GF_ESD *) ESDa->desc; } break; #ifndef GPAC_DISABLE_TTXT case GF_ISOM_BOX_TYPE_TX3G: case GF_ISOM_BOX_TYPE_TEXT: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_MP4S) return GF_ISOM_INVALID_MEDIA; if (!true_desc_only && mdia->mediaTrack->moov->mov->convert_streaming_text) { GF_Err e = gf_isom_get_ttxt_esd(mdia, out_esd); if (e) return e; break; } else return GF_ISOM_INVALID_MEDIA; #endif #ifndef GPAC_DISABLE_VTT case GF_ISOM_BOX_TYPE_WVTT: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_MP4S) return GF_ISOM_INVALID_MEDIA; { GF_WebVTTSampleEntryBox*vtte = (GF_WebVTTSampleEntryBox*)entry; esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_TEXT; esd->decoderConfig->objectTypeIndication = GF_CODECID_WEBVTT; if (vtte->config) { esd->decoderConfig->decoderSpecificInfo->dataLength = (u32) strlen(vtte->config->string); esd->decoderConfig->decoderSpecificInfo->data = gf_malloc(sizeof(char)*esd->decoderConfig->decoderSpecificInfo->dataLength); memcpy(esd->decoderConfig->decoderSpecificInfo->data, vtte->config->string, esd->decoderConfig->decoderSpecificInfo->dataLength); } } break; case GF_ISOM_BOX_TYPE_STPP: case GF_ISOM_BOX_TYPE_SBTT: case GF_ISOM_BOX_TYPE_STXT: break; #endif case GF_ISOM_SUBTYPE_3GP_AMR: case GF_ISOM_SUBTYPE_3GP_AMR_WB: case GF_ISOM_SUBTYPE_3GP_EVRC: case GF_ISOM_SUBTYPE_3GP_QCELP: case GF_ISOM_SUBTYPE_3GP_SMV: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; if (!true_desc_only) { GF_Err e = gf_isom_get_3gpp_audio_esd(mdia->information->sampleTable, type, (GF_GenericAudioSampleEntryBox*)entry, out_esd); if (e) return e; break; } else return GF_ISOM_INVALID_MEDIA; case GF_ISOM_SUBTYPE_OPUS: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; { GF_OpusSpecificBox *e = ((GF_MPEGAudioSampleEntryBox*)entry)->cfg_opus; GF_BitStream *bs_out; if (!e) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("ESD not found for Opus\n)")); break; } *out_esd = gf_odf_desc_esd_new(2); (*out_esd)->decoderConfig->streamType = GF_STREAM_AUDIO; (*out_esd)->decoderConfig->objectTypeIndication = GF_CODECID_OPUS; //serialize box with header - compatibility with ffmpeg bs_out = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_isom_box_size((GF_Box *) e); gf_isom_box_write((GF_Box *) e, bs_out); gf_bs_get_content(bs_out, & (*out_esd)->decoderConfig->decoderSpecificInfo->data, & (*out_esd)->decoderConfig->decoderSpecificInfo->dataLength); gf_bs_del(bs_out); break; } case GF_ISOM_SUBTYPE_3GP_H263: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_VIDEO) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_VISUAL; esd->decoderConfig->objectTypeIndication = GF_CODECID_H263; break; } case GF_ISOM_SUBTYPE_MP3: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_AUDIO; esd->decoderConfig->objectTypeIndication = GF_CODECID_MPEG_AUDIO; break; } case GF_ISOM_SUBTYPE_LSR1: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_MP4S) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { GF_LASeRSampleEntryBox*ptr = (GF_LASeRSampleEntryBox*)entry; esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_SCENE; esd->decoderConfig->objectTypeIndication = GF_CODECID_LASER; esd->decoderConfig->decoderSpecificInfo->dataLength = ptr->lsr_config->hdr_size; esd->decoderConfig->decoderSpecificInfo->data = gf_malloc(sizeof(char)*ptr->lsr_config->hdr_size); if (!esd->decoderConfig->decoderSpecificInfo->data) return GF_OUT_OF_MEM; memcpy(esd->decoderConfig->decoderSpecificInfo->data, ptr->lsr_config->hdr, sizeof(char)*ptr->lsr_config->hdr_size); break; } case GF_ISOM_SUBTYPE_MH3D_MHA1: case GF_ISOM_SUBTYPE_MH3D_MHA2: case GF_ISOM_SUBTYPE_MH3D_MHM1: case GF_ISOM_SUBTYPE_MH3D_MHM2: if (entry->internal_type != GF_ISOM_SAMPLE_ENTRY_AUDIO) return GF_ISOM_INVALID_MEDIA; if (true_desc_only) { return GF_ISOM_INVALID_MEDIA; } else { GF_MPEGAudioSampleEntryBox*ptr = (GF_MPEGAudioSampleEntryBox*)entry; esd = gf_odf_desc_esd_new(2); *out_esd = esd; esd->decoderConfig->streamType = GF_STREAM_AUDIO; if ((type==GF_ISOM_SUBTYPE_MH3D_MHA1) || (type==GF_ISOM_SUBTYPE_MH3D_MHA2)) esd->decoderConfig->objectTypeIndication = GF_CODECID_MPHA; else esd->decoderConfig->objectTypeIndication = GF_CODECID_MHAS; if (ptr->cfg_mha) { GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_write_u8(bs, ptr->cfg_mha->configuration_version); gf_bs_write_u8(bs, ptr->cfg_mha->mha_pl_indication); gf_bs_write_u8(bs, ptr->cfg_mha->reference_channel_layout); gf_bs_write_u16(bs, ptr->cfg_mha->mha_config ? ptr->cfg_mha->mha_config_size : 0); if (ptr->cfg_mha->mha_config && ptr->cfg_mha->mha_config_size) gf_bs_write_data(bs, ptr->cfg_mha->mha_config, ptr->cfg_mha->mha_config_size); gf_bs_get_content(bs, &esd->decoderConfig->decoderSpecificInfo->data, &esd->decoderConfig->decoderSpecificInfo->dataLength); gf_bs_del(bs); } } break; default: return GF_ISOM_INVALID_MEDIA; } if (true_desc_only) { if (!esd) return GF_ISOM_INVALID_MEDIA; *out_esd = esd; return GF_OK; } else { if (!esd && !*out_esd) return GF_ISOM_INVALID_MEDIA; if (*out_esd == NULL) return gf_odf_desc_copy((GF_Descriptor *)esd, (GF_Descriptor **)out_esd); } return GF_OK; } Bool Media_IsSampleSyncShadow(GF_ShadowSyncBox *stsh, u32 sampleNumber) { u32 i; GF_StshEntry *ent; if (!stsh) return 0; i=0; while ((ent = (GF_StshEntry*)gf_list_enum(stsh->entries, &i))) { if ((u32) ent->syncSampleNumber == sampleNumber) return 1; else if ((u32) ent->syncSampleNumber > sampleNumber) return 0; } return 0; } GF_Err Media_GetSample(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample **samp, u32 *sIDX, Bool no_data, u64 *out_offset) { GF_Err e; u32 bytesRead; u32 dataRefIndex, chunkNumber; u64 offset, new_size; u32 sdesc_idx; GF_SampleEntryBox *entry; GF_StscEntry *stsc_entry; if (!mdia || !mdia->information->sampleTable) return GF_BAD_PARAM; if (!mdia->information->sampleTable->SampleSize) return GF_ISOM_INVALID_FILE; //OK, here we go.... if (sampleNumber > mdia->information->sampleTable->SampleSize->sampleCount) return GF_BAD_PARAM; //the data info if (!sIDX && !no_data) return GF_BAD_PARAM; e = stbl_GetSampleInfos(mdia->information->sampleTable, sampleNumber, &offset, &chunkNumber, &sdesc_idx, &stsc_entry); if (e) return e; if (sIDX) (*sIDX) = sdesc_idx; if (out_offset) *out_offset = offset; if (!samp ) return GF_OK; if (mdia->information->sampleTable->TimeToSample) { //get the DTS e = stbl_GetSampleDTS(mdia->information->sampleTable->TimeToSample, sampleNumber, &(*samp)->DTS); if (e) return e; } else { (*samp)->DTS=0; } //the CTS offset if (mdia->information->sampleTable->CompositionOffset) { e = stbl_GetSampleCTS(mdia->information->sampleTable->CompositionOffset , sampleNumber, &(*samp)->CTS_Offset); if (e) return e; } else { (*samp)->CTS_Offset = 0; } //the size e = stbl_GetSampleSize(mdia->information->sampleTable->SampleSize, sampleNumber, &(*samp)->dataLength); if (e) return e; //the RAP if (mdia->information->sampleTable->SyncSample) { e = stbl_GetSampleRAP(mdia->information->sampleTable->SyncSample, sampleNumber, &(*samp)->IsRAP, NULL, NULL); if (e) return e; } else { //if no SyncSample, all samples are sync (cf spec) (*samp)->IsRAP = RAP; } if (mdia->information->sampleTable->SampleDep) { u32 isLeading, dependsOn, dependedOn, redundant; e = stbl_GetSampleDepType(mdia->information->sampleTable->SampleDep, sampleNumber, &isLeading, &dependsOn, &dependedOn, &redundant); if (!e) { if (dependsOn==1) (*samp)->IsRAP = RAP_NO; //commenting following code since it is wrong - an I frame is not always a SAP1, it can be a SAP2 or SAP3. //Keeping this code breaks AVC / HEVC openGOP import when writing sample dependencies //else if (dependsOn==2) (*samp)->IsRAP = RAP; /*if not depended upon and redundant, mark as carousel sample*/ if ((dependedOn==2) && (redundant==1)) (*samp)->IsRAP = RAP_REDUNDANT; /*TODO FIXME - we must enhance the IsRAP semantics to carry disposable info ... */ } } /*get sync shadow*/ if (Media_IsSampleSyncShadow(mdia->information->sampleTable->ShadowSync, sampleNumber)) (*samp)->IsRAP = RAP_REDUNDANT; //the data info if (!sIDX && !no_data) return GF_BAD_PARAM; if (!sIDX && !out_offset) return GF_OK; if (!sIDX) return GF_OK; (*sIDX) = sdesc_idx; // e = stbl_GetSampleInfos(mdia->information->sampleTable, sampleNumber, &offset, &chunkNumber, sIDX, &stsc_entry); // if (e) return e; //then get the DataRef e = Media_GetSampleDesc(mdia, sdesc_idx, &entry, &dataRefIndex); if (e) return e; //if moov is compressed, remove offset if sample is after moov in this file if (mdia->mediaTrack->moov->compressed_diff) { GF_DataEntryBox *ent = (GF_DataEntryBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataRefIndex - 1); if (ent && (ent->flags&1) && (offset>=mdia->mediaTrack->moov->file_offset)) { offset -= mdia->mediaTrack->moov->compressed_diff; } } if (no_data) { if ( ((*samp)->dataLength != 0) && mdia->mediaTrack->pack_num_samples) { u32 idx_in_chunk = sampleNumber - mdia->information->sampleTable->SampleToChunk->firstSampleInCurrentChunk; u32 left_in_chunk = stsc_entry->samplesPerChunk - idx_in_chunk; if (left_in_chunk > mdia->mediaTrack->pack_num_samples) left_in_chunk = mdia->mediaTrack->pack_num_samples; (*samp)->dataLength *= left_in_chunk; (*samp)->nb_pack = left_in_chunk; } return GF_OK; } // Open the data handler - check our mode, don't reopen in read only if this is //the same entry. In other modes we have no choice because the main data map is //divided into the original and the edition files if (mdia->mediaTrack->moov->mov->openMode == GF_ISOM_OPEN_READ) { //same as last call in read mode if (!mdia->information->dataHandler) { e = gf_isom_datamap_open(mdia, dataRefIndex, stsc_entry->isEdited); if (e) return e; } mdia->information->dataEntryIndex = dataRefIndex; } else { e = gf_isom_datamap_open(mdia, dataRefIndex, stsc_entry->isEdited); if (e) return e; } if ( mdia->mediaTrack->moov->mov->read_byte_offset || mdia->mediaTrack->moov->mov->bytes_removed) { GF_DataEntryBox *ent = (GF_DataEntryBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataRefIndex - 1); if (ent && (ent->flags&1)) { u64 real_offset = mdia->mediaTrack->moov->mov->read_byte_offset + mdia->mediaTrack->moov->mov->bytes_removed; if (offset < real_offset) return GF_IO_ERR; if (mdia->information->dataHandler->last_read_offset != mdia->mediaTrack->moov->mov->read_byte_offset) { mdia->information->dataHandler->last_read_offset = mdia->mediaTrack->moov->mov->read_byte_offset; gf_bs_get_refreshed_size(mdia->information->dataHandler->bs); } offset -= real_offset; } } if ((*samp)->dataLength != 0) { if (mdia->mediaTrack->pack_num_samples) { u32 idx_in_chunk = sampleNumber - mdia->information->sampleTable->SampleToChunk->firstSampleInCurrentChunk; u32 left_in_chunk = stsc_entry->samplesPerChunk - idx_in_chunk; if (left_in_chunk > mdia->mediaTrack->pack_num_samples) left_in_chunk = mdia->mediaTrack->pack_num_samples; (*samp)->dataLength *= left_in_chunk; (*samp)->nb_pack = left_in_chunk; } /*and finally get the data, include padding if needed*/ if ((*samp)->alloc_size) { if ((*samp)->alloc_size < (*samp)->dataLength + mdia->mediaTrack->padding_bytes) { (*samp)->data = (char *) gf_realloc((*samp)->data, sizeof(char) * ( (*samp)->dataLength + mdia->mediaTrack->padding_bytes) ); if (! (*samp)->data) return GF_OUT_OF_MEM; (*samp)->alloc_size = (*samp)->dataLength + mdia->mediaTrack->padding_bytes; } } else { (*samp)->data = (char *) gf_malloc(sizeof(char) * ( (*samp)->dataLength + mdia->mediaTrack->padding_bytes) ); if (! (*samp)->data) return GF_OUT_OF_MEM; } if (mdia->mediaTrack->padding_bytes) memset((*samp)->data + (*samp)->dataLength, 0, sizeof(char) * mdia->mediaTrack->padding_bytes); //check if we can get the sample (make sure we have enougth data...) new_size = gf_bs_get_size(mdia->information->dataHandler->bs); if (offset + (*samp)->dataLength > new_size) { //always refresh the size to avoid wrong info on http/ftp new_size = gf_bs_get_refreshed_size(mdia->information->dataHandler->bs); if (offset + (*samp)->dataLength > new_size) { mdia->BytesMissing = offset + (*samp)->dataLength - new_size; return GF_ISOM_INCOMPLETE_FILE; } } bytesRead = gf_isom_datamap_get_data(mdia->information->dataHandler, (*samp)->data, (*samp)->dataLength, offset); //if bytesRead != sampleSize, we have an IO err if (bytesRead < (*samp)->dataLength) { return GF_IO_ERR; } mdia->BytesMissing = 0; } //finally rewrite the sample if this is an OD Access Unit or NAL-based one //we do this even if sample size is zero because of sample implicit reconstruction rules (especially tile tracks) if (mdia->handler->handlerType == GF_ISOM_MEDIA_OD) { if (!mdia->mediaTrack->moov->mov->disable_odf_translate) { e = Media_RewriteODFrame(mdia, *samp); if (e) return e; } } else if (gf_isom_is_nalu_based_entry(mdia, entry) && !gf_isom_is_encrypted_entry(entry->type) ) { e = gf_isom_nalu_sample_rewrite(mdia, *samp, sampleNumber, (GF_MPEGVisualSampleEntryBox *)entry); if (e) return e; } else if (mdia->mediaTrack->moov->mov->convert_streaming_text && ((mdia->handler->handlerType == GF_ISOM_MEDIA_TEXT) || (mdia->handler->handlerType == GF_ISOM_MEDIA_SCENE) || (mdia->handler->handlerType == GF_ISOM_MEDIA_SUBT)) && (entry->type == GF_ISOM_BOX_TYPE_TX3G || entry->type == GF_ISOM_BOX_TYPE_TEXT) ) { u64 dur; if (sampleNumber == mdia->information->sampleTable->SampleSize->sampleCount) { dur = mdia->mediaHeader->duration - (*samp)->DTS; } else { stbl_GetSampleDTS(mdia->information->sampleTable->TimeToSample, sampleNumber+1, &dur); dur -= (*samp)->DTS; } e = gf_isom_rewrite_text_sample(*samp, sdesc_idx, (u32) dur); if (e) return e; } return GF_OK; } GF_Err Media_CheckDataEntry(GF_MediaBox *mdia, u32 dataEntryIndex) { GF_DataEntryURLBox *entry; GF_DataMap *map; GF_Err e; if (!mdia || !dataEntryIndex || dataEntryIndex > gf_list_count(mdia->information->dataInformation->dref->child_boxes)) return GF_BAD_PARAM; entry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, dataEntryIndex - 1); if (!entry) return GF_ISOM_INVALID_FILE; if (entry->flags == 1) return GF_OK; //ok, not self contained, let's go for it... //we only support alias and URL boxes if ((entry->type != GF_ISOM_BOX_TYPE_URL) && (entry->type != GF_QT_BOX_TYPE_ALIS) ) return GF_NOT_SUPPORTED; if (mdia->mediaTrack->moov->mov->openMode == GF_ISOM_OPEN_WRITE) { e = gf_isom_datamap_new(entry->location, NULL, GF_ISOM_DATA_MAP_READ, &map); } else { e = gf_isom_datamap_new(entry->location, mdia->mediaTrack->moov->mov->fileName, GF_ISOM_DATA_MAP_READ, &map); } if (e) return e; gf_isom_datamap_del(map); return GF_OK; } Bool Media_IsSelfContained(GF_MediaBox *mdia, u32 StreamDescIndex) { u32 drefIndex=0; GF_FullBox *a=NULL; GF_SampleEntryBox *se = NULL; Media_GetSampleDesc(mdia, StreamDescIndex, &se, &drefIndex); if (!drefIndex) return 0; if (mdia && mdia->information && mdia->information->dataInformation && mdia->information->dataInformation->dref ) { a = (GF_FullBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1); } if (!a) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] broken file: Data reference index set to %d but no data reference entry found\n", drefIndex)); return 1; } if (a->flags & 1) return 1; /*QT specific*/ if (a->type == GF_QT_BOX_TYPE_ALIS) return 1; return 0; } GF_ISOMDataRefAllType Media_SelfContainedType(GF_MediaBox *mdia) { u32 nb_ext, nb_self; u32 i, count; nb_ext = nb_self = 0; count = mdia->information->sampleTable->SampleDescription ? gf_list_count(mdia->information->sampleTable->SampleDescription->child_boxes) : 0; for (i=0; i<count; i++) { if (Media_IsSelfContained(mdia, i+1)) nb_self++; else nb_ext++; } if (nb_ext==count) return ISOM_DREF_EXT; if (nb_self==count) return ISOM_DREF_SELF; return ISOM_DREF_MIXED; } //look for a sync sample from a given point in media time GF_Err Media_FindSyncSample(GF_SampleTableBox *stbl, u32 searchFromSample, u32 *sampleNumber, u8 mode) { GF_ISOSAPType isRAP; u32 next, prev, next_in_sap, prev_in_sap; if (!stbl || !stbl->SyncSample) return GF_BAD_PARAM; //set to current sample if we don't find a RAP *sampleNumber = searchFromSample; //this is not the exact sample, but the prev move to next sample if enough samples.... if ( (mode == GF_ISOM_SEARCH_SYNC_FORWARD) && (searchFromSample == stbl->SampleSize->sampleCount) ) { return GF_OK; } if ( (mode == GF_ISOM_SEARCH_SYNC_BACKWARD) && !searchFromSample) { *sampleNumber = 1; return GF_OK; } //get the entry stbl_GetSampleRAP(stbl->SyncSample, searchFromSample, &isRAP, &prev, &next); if (isRAP) { (*sampleNumber) = searchFromSample; return GF_OK; } /*check sample groups - prev & next are overwritten if RAP group is found, but are not re-initialized otherwise*/ stbl_SearchSAPs(stbl, searchFromSample, &isRAP, &prev_in_sap, &next_in_sap); if (isRAP) { (*sampleNumber) = searchFromSample; return GF_OK; } if (prev_in_sap > prev) prev = prev_in_sap; if (next_in_sap && next_in_sap < next) next = next_in_sap; //nothing yet, go for next time... if (mode == GF_ISOM_SEARCH_SYNC_FORWARD) { if (next) *sampleNumber = next; } else { if (prev) *sampleNumber = prev; } return GF_OK; } //create a DataReference if not existing (only for WRITE-edit mode) GF_Err Media_FindDataRef(GF_DataReferenceBox *dref, char *URLname, char *URNname, u32 *dataRefIndex) { u32 i; GF_DataEntryURLBox *entry; if (!dref) return GF_BAD_PARAM; *dataRefIndex = 0; i=0; while ((entry = (GF_DataEntryURLBox*)gf_list_enum(dref->child_boxes, &i))) { if (entry->type == GF_ISOM_BOX_TYPE_URL) { //self-contained case if (entry->flags == 1) { //if nothing specified, get the dataRef if (!URLname && !URNname) { *dataRefIndex = i; return GF_OK; } } else { //OK, check if we have URL if (URLname && !strcmp(URLname, entry->location)) { *dataRefIndex = i; return GF_OK; } } } else { //this is a URN one, only check the URN name (URL optional) if (URNname && !strcmp(URNname, ((GF_DataEntryURNBox *)entry)->nameURN)) { *dataRefIndex = i; return GF_OK; } } } return GF_OK; } //Get the total media duration based on the TimeToSample table GF_Err Media_SetDuration(GF_TrackBox *trak) { GF_Err e; GF_ESD *esd; u64 DTS; GF_SttsEntry *ent; u32 nbSamp; if (!trak || !trak->Media || !trak->Media->information || !trak->Media->information->sampleTable) return GF_ISOM_INVALID_FILE; if (!trak->Media->information->sampleTable->SampleSize || !trak->Media->information->sampleTable->TimeToSample) return GF_ISOM_INVALID_FILE; nbSamp = trak->Media->information->sampleTable->SampleSize->sampleCount; //we need to check how many samples we have. // == 1 -> last sample duration == default duration // > 1 -> last sample duration == prev sample duration switch (nbSamp) { case 0: trak->Media->mediaHeader->duration = 0; if (Track_IsMPEG4Stream(trak->Media->handler->handlerType)) { Media_GetESD(trak->Media, 1, &esd, 1); if (esd && esd->URLString) trak->Media->mediaHeader->duration = (u64) -1; } return GF_OK; // case 1: // trak->Media->mediaHeader->duration = trak->Media->mediaHeader->timeScale; // return GF_OK; default: //we assume a constant frame rate for the media and assume the last sample //will be hold the same time as the prev one e = stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, nbSamp, &DTS); if (e < 0) { return e; } if (trak->Media->information->sampleTable->TimeToSample->nb_entries > 0) { ent = &trak->Media->information->sampleTable->TimeToSample->entries[trak->Media->information->sampleTable->TimeToSample->nb_entries-1]; } else { ent = NULL; } trak->Media->mediaHeader->duration = DTS; #if 1 if (ent) trak->Media->mediaHeader->duration += ent->sampleDelta; #else if (!ent) { u64 DTSprev; stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, nbSamp-1, &DTSprev); trak->Media->mediaHeader->duration += (DTS - DTSprev); } else { #ifndef GPAC_DISABLE_ISOM_WRITE if (trak->moov->mov->editFileMap && trak->Media->information->sampleTable->CompositionOffset) { u32 count, i; u64 max_ts; GF_DttsEntry *cts_ent; GF_CompositionOffsetBox *ctts = trak->Media->information->sampleTable->CompositionOffset; if (ctts->w_LastSampleNumber==nbSamp) { count = gf_list_count(ctts->entryList); max_ts = trak->Media->mediaHeader->duration; while (count) { count -= 1; cts_ent = gf_list_get(ctts->entryList, count); if (nbSamp<cts_ent->sampleCount) break; for (i=0; i<cts_ent->sampleCount; i++) { stbl_GetSampleDTS(trak->Media->information->sampleTable->TimeToSample, nbSamp-i, &DTS); if ((s32) cts_ent->decodingOffset < 0) max_ts = DTS; else max_ts = DTS + cts_ent->decodingOffset; if (max_ts>=trak->Media->mediaHeader->duration) { trak->Media->mediaHeader->duration = max_ts; } else { break; } } if (max_ts<trak->Media->mediaHeader->duration) { break; } nbSamp-=cts_ent->sampleCount; } } } #endif /*GPAC_DISABLE_ISOM_WRITE*/ trak->Media->mediaHeader->duration += ent->sampleDelta; } #endif return GF_OK; } } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err Media_SetDrefURL(GF_DataEntryURLBox *dref_entry, const char *origName, const char *finalName) { //for now we only support dref created in same folder for relative URLs if (strstr(origName, "://") || ((origName[1]==':') && (origName[2]=='\\')) || (origName[0]=='/') || (origName[0]=='\\') ) { dref_entry->location = gf_strdup(origName); } else { char *fname = strrchr(origName, '/'); if (!fname) fname = strrchr(origName, '\\'); if (fname) fname++; if (!fname) { dref_entry->location = gf_strdup(origName); } else { u32 len = (u32) (fname - origName); if (!finalName || strncmp(origName, finalName, len)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("Concatenation of relative path %s with relative path %s not supported, use absolute URLs\n", origName, finalName)); return GF_NOT_SUPPORTED; } else { dref_entry->location = gf_strdup(fname); } } } return GF_OK; } GF_Err Media_CreateDataRef(GF_ISOFile *movie, GF_DataReferenceBox *dref, char *URLname, char *URNname, u32 *dataRefIndex) { GF_Err e; Bool use_alis=GF_FALSE; GF_DataEntryURLBox *entry; if (URLname && !strcmp(URLname, "alis")) { URLname = NULL; use_alis=GF_TRUE; } if (!URLname && !URNname) { //THIS IS SELF CONTAIN, create a regular entry if needed entry = (GF_DataEntryURLBox *) gf_isom_box_new_parent(&dref->child_boxes, use_alis ? GF_QT_BOX_TYPE_ALIS : GF_ISOM_BOX_TYPE_URL); if (!entry) return GF_OUT_OF_MEM; entry->flags = 1; *dataRefIndex = gf_list_count(dref->child_boxes); return GF_OK; } else if (!URNname && URLname) { //THIS IS URL entry = (GF_DataEntryURLBox *) gf_isom_box_new_parent(&dref->child_boxes, GF_ISOM_BOX_TYPE_URL); if (!entry) return GF_OUT_OF_MEM; entry->flags = 0; e = Media_SetDrefURL(entry, URLname, movie->fileName ? movie->fileName : movie->finalName); if (! entry->location) { gf_isom_box_del_parent(&dref->child_boxes, (GF_Box *)entry); return e ? e : GF_OUT_OF_MEM; } *dataRefIndex = gf_list_count(dref->child_boxes); return GF_OK; } else { //THIS IS URN entry = (GF_DataEntryURLBox *) gf_isom_box_new_parent(&dref->child_boxes, GF_ISOM_BOX_TYPE_URN); if (!entry) return GF_OUT_OF_MEM; ((GF_DataEntryURNBox *)entry)->flags = 0; ((GF_DataEntryURNBox *)entry)->nameURN = (char*)gf_malloc(strlen(URNname)+1); if (! ((GF_DataEntryURNBox *)entry)->nameURN) { gf_isom_box_del_parent(&dref->child_boxes, (GF_Box *)entry); return GF_OUT_OF_MEM; } strcpy(((GF_DataEntryURNBox *)entry)->nameURN, URNname); //check for URL if (URLname) { ((GF_DataEntryURNBox *)entry)->location = (char*)gf_malloc(strlen(URLname)+1); if (! ((GF_DataEntryURNBox *)entry)->location) { gf_isom_box_del_parent(&dref->child_boxes, (GF_Box *)entry); return GF_OUT_OF_MEM; } strcpy(((GF_DataEntryURNBox *)entry)->location, URLname); } *dataRefIndex = gf_list_count(dref->child_boxes); return GF_OK; } return GF_OK; } GF_Err Media_AddSample(GF_MediaBox *mdia, u64 data_offset, const GF_ISOSample *sample, u32 StreamDescIndex, u32 syncShadowNumber) { GF_Err e; GF_SampleTableBox *stbl; u32 sampleNumber, i; if (!mdia || !sample) return GF_BAD_PARAM; stbl = mdia->information->sampleTable; //get a valid sampleNumber for this new guy e = stbl_AddDTS(stbl, sample->DTS, &sampleNumber, mdia->mediaHeader->timeScale, sample->nb_pack); if (e) return e; //add size e = stbl_AddSize(stbl->SampleSize, sampleNumber, sample->dataLength, sample->nb_pack); if (e) return e; //adds CTS offset if (sample->CTS_Offset) { //if we don't have a CTS table, add it... if (!stbl->CompositionOffset) { stbl->CompositionOffset = (GF_CompositionOffsetBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_CTTS); if (!stbl->CompositionOffset) return GF_OUT_OF_MEM; } //then add our CTS (the prev samples with no CTS offset will be automatically added... e = stbl_AddCTS(stbl, sampleNumber, sample->CTS_Offset); if (e) return e; } else if (stbl->CompositionOffset) { e = stbl_AddCTS(stbl, sampleNumber, sample->CTS_Offset); if (e) return e; } //The first non sync sample we see must create a syncTable if (sample->IsRAP) { //insert it only if we have a sync table and if we have an IDR slice if (stbl->SyncSample && (sample->IsRAP == RAP)) { e = stbl_AddRAP(stbl->SyncSample, sampleNumber); if (e) return e; } } else { //non-sync sample. Create a SyncSample table if needed if (!stbl->SyncSample) { stbl->SyncSample = (GF_SyncSampleBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSS); if (!stbl->SyncSample) return GF_OUT_OF_MEM; //all the prev samples are sync for (i=0; i<stbl->SampleSize->sampleCount; i++) { if (i+1 != sampleNumber) { e = stbl_AddRAP(stbl->SyncSample, i+1); if (e) return e; } } } } if (sample->IsRAP==RAP_REDUNDANT) { e = stbl_AddRedundant(stbl, sampleNumber); if (e) return e; } if (!mdia->mediaTrack->chunk_cache) { //and update the chunks e = stbl_AddChunkOffset(mdia, sampleNumber, StreamDescIndex, data_offset, sample->nb_pack); if (e) return e; } if (!syncShadowNumber) return GF_OK; if (!stbl->ShadowSync) { stbl->ShadowSync = (GF_ShadowSyncBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSH); if (!stbl->ShadowSync) return GF_OUT_OF_MEM; } return stbl_AddShadow(mdia->information->sampleTable->ShadowSync, sampleNumber, syncShadowNumber); } static GF_Err UpdateSample(GF_MediaBox *mdia, u32 sampleNumber, u32 size, s32 CTS, u64 offset, u8 isRap) { u32 i; GF_SampleTableBox *stbl = mdia->information->sampleTable; //set size, offset, RAP, CTS ... stbl_SetSampleSize(stbl->SampleSize, sampleNumber, size); stbl_SetChunkOffset(mdia, sampleNumber, offset); //do we have a CTS? if (stbl->CompositionOffset) { stbl_SetSampleCTS(stbl, sampleNumber, CTS); } else { //do we need one ?? if (CTS) { stbl->CompositionOffset = (GF_CompositionOffsetBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_CTTS); if (!stbl->CompositionOffset) return GF_OUT_OF_MEM; stbl_AddCTS(stbl, sampleNumber, CTS); } } //do we have a sync ??? if (stbl->SyncSample) { stbl_SetSampleRAP(stbl->SyncSample, sampleNumber, isRap); } else { //do we need one if (! isRap) { stbl->SyncSample = (GF_SyncSampleBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSS); if (!stbl->SyncSample) return GF_OUT_OF_MEM; //what a pain: all the sample we had have to be sync ... for (i=0; i<stbl->SampleSize->sampleCount; i++) { if (i+1 != sampleNumber) stbl_AddRAP(stbl->SyncSample, i+1); } } } if (isRap==2) { stbl_SetRedundant(stbl, sampleNumber); } return GF_OK; } GF_Err Media_UpdateSample(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample *sample, Bool data_only) { GF_Err e; u32 drefIndex, chunkNum, descIndex; u64 newOffset, DTS; GF_DataEntryURLBox *Dentry; GF_SampleTableBox *stbl; if (!mdia || !sample || !sampleNumber || !mdia->mediaTrack->moov->mov->editFileMap) return GF_BAD_PARAM; stbl = mdia->information->sampleTable; if (!data_only) { //check we have the sampe dts e = stbl_GetSampleDTS(stbl->TimeToSample, sampleNumber, &DTS); if (e) return e; if (DTS != sample->DTS) return GF_BAD_PARAM; } //get our infos stbl_GetSampleInfos(stbl, sampleNumber, &newOffset, &chunkNum, &descIndex, NULL); //then check the data ref e = Media_GetSampleDesc(mdia, descIndex, NULL, &drefIndex); if (e) return e; Dentry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1); if (!Dentry) return GF_ISOM_INVALID_FILE; if (Dentry->flags != 1) return GF_BAD_PARAM; //MEDIA DATA EDIT: write this new sample to the edit temp file newOffset = gf_isom_datamap_get_offset(mdia->mediaTrack->moov->mov->editFileMap); if (sample->dataLength) { e = gf_isom_datamap_add_data(mdia->mediaTrack->moov->mov->editFileMap, sample->data, sample->dataLength); if (e) return e; } if (data_only) { stbl_SetSampleSize(stbl->SampleSize, sampleNumber, sample->dataLength); return stbl_SetChunkOffset(mdia, sampleNumber, newOffset); } return UpdateSample(mdia, sampleNumber, sample->dataLength, sample->CTS_Offset, newOffset, sample->IsRAP); } GF_Err Media_UpdateSampleReference(GF_MediaBox *mdia, u32 sampleNumber, GF_ISOSample *sample, u64 data_offset) { GF_Err e; u32 drefIndex, chunkNum, descIndex; u64 off, DTS; GF_DataEntryURLBox *Dentry; GF_SampleTableBox *stbl; if (!mdia) return GF_BAD_PARAM; stbl = mdia->information->sampleTable; //check we have the sampe dts e = stbl_GetSampleDTS(stbl->TimeToSample, sampleNumber, &DTS); if (e) return e; if (DTS != sample->DTS) return GF_BAD_PARAM; //get our infos stbl_GetSampleInfos(stbl, sampleNumber, &off, &chunkNum, &descIndex, NULL); //then check the data ref e = Media_GetSampleDesc(mdia, descIndex, NULL, &drefIndex); if (e) return e; Dentry = (GF_DataEntryURLBox*)gf_list_get(mdia->information->dataInformation->dref->child_boxes, drefIndex - 1); if (!Dentry) return GF_ISOM_INVALID_FILE; //we only modify self-contained data if (Dentry->flags == 1) return GF_ISOM_INVALID_MODE; //and we don't modify the media data return UpdateSample(mdia, sampleNumber, sample->dataLength, sample->CTS_Offset, data_offset, sample->IsRAP); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM*/
null
271
CWE-787
CVE-2021-32268
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2019 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #ifndef GPAC_DISABLE_ISOM void co64_box_del(GF_Box *s) { GF_ChunkLargeOffsetBox *ptr; ptr = (GF_ChunkLargeOffsetBox *) s; if (ptr == NULL) return; if (ptr->offsets) gf_free(ptr->offsets); gf_free(ptr); } GF_Err co64_box_read(GF_Box *s,GF_BitStream *bs) { u32 entries; GF_ChunkLargeOffsetBox *ptr = (GF_ChunkLargeOffsetBox *) s; ptr->nb_entries = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4) if (ptr->nb_entries > ptr->size / 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in co64\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->offsets = (u64 *) gf_malloc(ptr->nb_entries * sizeof(u64) ); if (ptr->offsets == NULL) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->nb_entries; for (entries = 0; entries < ptr->nb_entries; entries++) { ptr->offsets[entries] = gf_bs_read_u64(bs); } return GF_OK; } GF_Box *co64_box_new() { ISOM_DECL_BOX_ALLOC(GF_ChunkLargeOffsetBox, GF_ISOM_BOX_TYPE_CO64); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err co64_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_ChunkLargeOffsetBox *ptr = (GF_ChunkLargeOffsetBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i = 0; i < ptr->nb_entries; i++ ) { gf_bs_write_u64(bs, ptr->offsets[i]); } return GF_OK; } GF_Err co64_box_size(GF_Box *s) { GF_ChunkLargeOffsetBox *ptr = (GF_ChunkLargeOffsetBox *) s; ptr->size += 4 + (8 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void cprt_box_del(GF_Box *s) { GF_CopyrightBox *ptr = (GF_CopyrightBox *) s; if (ptr == NULL) return; if (ptr->notice) gf_free(ptr->notice); gf_free(ptr); } GF_Box *chpl_box_new() { ISOM_DECL_BOX_ALLOC(GF_ChapterListBox, GF_ISOM_BOX_TYPE_CHPL); tmp->list = gf_list_new(); tmp->version = 1; return (GF_Box *)tmp; } void chpl_box_del(GF_Box *s) { GF_ChapterListBox *ptr = (GF_ChapterListBox *) s; if (ptr == NULL) return; while (gf_list_count(ptr->list)) { GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(ptr->list, 0); if (ce->name) gf_free(ce->name); gf_free(ce); gf_list_rem(ptr->list, 0); } gf_list_del(ptr->list); gf_free(ptr); } /*this is using chpl format according to some NeroRecode samples*/ GF_Err chpl_box_read(GF_Box *s,GF_BitStream *bs) { GF_ChapterEntry *ce; u32 nb_chaps, len, i, count; GF_ChapterListBox *ptr = (GF_ChapterListBox *)s; ISOM_DECREASE_SIZE(ptr, 5) /*reserved or ???*/ gf_bs_read_u32(bs); nb_chaps = gf_bs_read_u8(bs); count = 0; while (nb_chaps) { GF_SAFEALLOC(ce, GF_ChapterEntry); if (!ce) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, 9) ce->start_time = gf_bs_read_u64(bs); len = gf_bs_read_u8(bs); if (ptr->size<len) return GF_ISOM_INVALID_FILE; if (len) { ce->name = (char *)gf_malloc(sizeof(char)*(len+1)); if (!ce->name) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, len) gf_bs_read_data(bs, ce->name, len); ce->name[len] = 0; } else { ce->name = gf_strdup(""); } for (i=0; i<count; i++) { GF_ChapterEntry *ace = (GF_ChapterEntry *) gf_list_get(ptr->list, i); if (ace->start_time >= ce->start_time) { gf_list_insert(ptr->list, ce, i); ce = NULL; break; } } if (ce) gf_list_add(ptr->list, ce); count++; nb_chaps--; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err chpl_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 count, i; GF_ChapterListBox *ptr = (GF_ChapterListBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; count = gf_list_count(ptr->list); gf_bs_write_u32(bs, 0); gf_bs_write_u8(bs, count); for (i=0; i<count; i++) { u32 len; GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(ptr->list, i); gf_bs_write_u64(bs, ce->start_time); if (ce->name) { len = (u32) strlen(ce->name); if (len>255) len = 255; gf_bs_write_u8(bs, len); gf_bs_write_data(bs, ce->name, len); } else { gf_bs_write_u8(bs, 0); } } return GF_OK; } GF_Err chpl_box_size(GF_Box *s) { u32 count, i; GF_ChapterListBox *ptr = (GF_ChapterListBox *)s; ptr->size += 5; count = gf_list_count(ptr->list); for (i=0; i<count; i++) { GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(ptr->list, i); ptr->size += 9; /*64bit time stamp + 8bit str len*/ if (ce->name) ptr->size += strlen(ce->name); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Err cprt_box_read(GF_Box *s,GF_BitStream *bs) { GF_CopyrightBox *ptr = (GF_CopyrightBox *)s; ISOM_DECREASE_SIZE(ptr, 2); gf_bs_read_int(bs, 1); //the spec is unclear here, just says "the value 0 is interpreted as undetermined" ptr->packedLanguageCode[0] = gf_bs_read_int(bs, 5); ptr->packedLanguageCode[1] = gf_bs_read_int(bs, 5); ptr->packedLanguageCode[2] = gf_bs_read_int(bs, 5); //but before or after compaction ?? We assume before if (ptr->packedLanguageCode[0] || ptr->packedLanguageCode[1] || ptr->packedLanguageCode[2]) { ptr->packedLanguageCode[0] += 0x60; ptr->packedLanguageCode[1] += 0x60; ptr->packedLanguageCode[2] += 0x60; } else { ptr->packedLanguageCode[0] = 'u'; ptr->packedLanguageCode[1] = 'n'; ptr->packedLanguageCode[2] = 'd'; } if (ptr->size) { u32 bytesToRead = (u32) ptr->size; ptr->notice = (char*)gf_malloc(bytesToRead * sizeof(char)); if (ptr->notice == NULL) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->notice, bytesToRead); } return GF_OK; } GF_Box *cprt_box_new() { ISOM_DECL_BOX_ALLOC(GF_CopyrightBox, GF_ISOM_BOX_TYPE_CPRT); tmp->packedLanguageCode[0] = 'u'; tmp->packedLanguageCode[1] = 'n'; tmp->packedLanguageCode[2] = 'd'; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err cprt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_CopyrightBox *ptr = (GF_CopyrightBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, 0, 1); if (ptr->packedLanguageCode[0]) { gf_bs_write_int(bs, ptr->packedLanguageCode[0] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguageCode[1] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguageCode[2] - 0x60, 5); } else { gf_bs_write_int(bs, 0, 15); } if (ptr->notice) { gf_bs_write_data(bs, ptr->notice, (u32) (strlen(ptr->notice) + 1) ); } return GF_OK; } GF_Err cprt_box_size(GF_Box *s) { GF_CopyrightBox *ptr = (GF_CopyrightBox *)s; ptr->size += 2; if (ptr->notice) ptr->size += strlen(ptr->notice) + 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void kind_box_del(GF_Box *s) { GF_KindBox *ptr = (GF_KindBox *) s; if (ptr == NULL) return; if (ptr->schemeURI) gf_free(ptr->schemeURI); if (ptr->value) gf_free(ptr->value); gf_free(ptr); } GF_Err kind_box_read(GF_Box *s,GF_BitStream *bs) { GF_KindBox *ptr = (GF_KindBox *)s; if (ptr->size) { u32 bytesToRead = (u32) ptr->size; char *data; u32 schemeURIlen; data = (char*)gf_malloc(bytesToRead * sizeof(char)); if (!data) return GF_OUT_OF_MEM; gf_bs_read_data(bs, data, bytesToRead); /*safety check in case the string is not null-terminated*/ if (data[bytesToRead-1]) { data = (char*)gf_realloc(data, sizeof(char)*(bytesToRead + 1)); if (!data) return GF_OUT_OF_MEM; data[bytesToRead] = 0; bytesToRead++; } ptr->schemeURI = gf_strdup(data); if (!ptr->schemeURI) return GF_OUT_OF_MEM; schemeURIlen = (u32) strlen(data); if (bytesToRead > schemeURIlen+1) { /* read the value */ char *data_value = data + schemeURIlen +1; ptr->value = gf_strdup(data_value); if (!ptr->value) return GF_OUT_OF_MEM; } gf_free(data); } return GF_OK; } GF_Box *kind_box_new() { ISOM_DECL_BOX_ALLOC(GF_KindBox, GF_ISOM_BOX_TYPE_KIND); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err kind_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_KindBox *ptr = (GF_KindBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->schemeURI) gf_bs_write_data(bs, ptr->schemeURI, (u32) (strlen(ptr->schemeURI) + 1 )); else gf_bs_write_u8(bs, 0); if (ptr->value) { gf_bs_write_data(bs, ptr->value, (u32) (strlen(ptr->value) + 1) ); } return GF_OK; } GF_Err kind_box_size(GF_Box *s) { GF_KindBox *ptr = (GF_KindBox *)s; ptr->size += (ptr->schemeURI ? strlen(ptr->schemeURI) : 0) + 1; if (ptr->value) { ptr->size += strlen(ptr->value) + 1; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ctts_box_del(GF_Box *s) { GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err ctts_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; u32 sampleCount; GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->nb_entries > ptr->size / 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->entries = (GF_DttsEntry *)gf_malloc(sizeof(GF_DttsEntry)*ptr->alloc_size); if (!ptr->entries) return GF_OUT_OF_MEM; sampleCount = 0; for (i=0; i<ptr->nb_entries; i++) { ISOM_DECREASE_SIZE(ptr, 8); ptr->entries[i].sampleCount = gf_bs_read_u32(bs); if (ptr->version) ptr->entries[i].decodingOffset = gf_bs_read_int(bs, 32); else ptr->entries[i].decodingOffset = (s32) gf_bs_read_u32(bs); sampleCount += ptr->entries[i].sampleCount; if (ptr->max_ts_delta < ABS(ptr->entries[i].decodingOffset)) ptr->max_ts_delta = ABS(ptr->entries[i].decodingOffset); } #ifndef GPAC_DISABLE_ISOM_WRITE ptr->w_LastSampleNumber = sampleCount; #endif return GF_OK; } GF_Box *ctts_box_new() { ISOM_DECL_BOX_ALLOC(GF_CompositionOffsetBox, GF_ISOM_BOX_TYPE_CTTS); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ctts_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++ ) { gf_bs_write_u32(bs, ptr->entries[i].sampleCount); if (ptr->version) { gf_bs_write_int(bs, ptr->entries[i].decodingOffset, 32); } else { gf_bs_write_u32(bs, (u32) ptr->entries[i].decodingOffset); } } return GF_OK; } GF_Err ctts_box_size(GF_Box *s) { GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *) s; ptr->size += 4 + (8 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void cslg_box_del(GF_Box *s) { GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s; if (ptr == NULL) return; gf_free(ptr); return; } GF_Err cslg_box_read(GF_Box *s, GF_BitStream *bs) { GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s; ISOM_DECREASE_SIZE(ptr, 20); ptr->compositionToDTSShift = gf_bs_read_int(bs, 32); ptr->leastDecodeToDisplayDelta = gf_bs_read_int(bs, 32); ptr->greatestDecodeToDisplayDelta = gf_bs_read_int(bs, 32); ptr->compositionStartTime = gf_bs_read_int(bs, 32); ptr->compositionEndTime = gf_bs_read_int(bs, 32); return GF_OK; } GF_Box *cslg_box_new() { ISOM_DECL_BOX_ALLOC(GF_CompositionToDecodeBox, GF_ISOM_BOX_TYPE_CSLG); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err cslg_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->compositionToDTSShift, 32); gf_bs_write_int(bs, ptr->leastDecodeToDisplayDelta, 32); gf_bs_write_int(bs, ptr->greatestDecodeToDisplayDelta, 32); gf_bs_write_int(bs, ptr->compositionStartTime, 32); gf_bs_write_int(bs, ptr->compositionEndTime, 32); return GF_OK; } GF_Err cslg_box_size(GF_Box *s) { GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s; ptr->size += 20; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ccst_box_del(GF_Box *s) { GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s; if (ptr) gf_free(ptr); return; } GF_Err ccst_box_read(GF_Box *s, GF_BitStream *bs) { GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->all_ref_pics_intra = gf_bs_read_int(bs, 1); ptr->intra_pred_used = gf_bs_read_int(bs, 1); ptr->max_ref_per_pic = gf_bs_read_int(bs, 4); ptr->reserved = gf_bs_read_int(bs, 26); return GF_OK; } GF_Box *ccst_box_new() { ISOM_DECL_BOX_ALLOC(GF_CodingConstraintsBox, GF_ISOM_BOX_TYPE_CCST); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ccst_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->all_ref_pics_intra, 1); gf_bs_write_int(bs, ptr->intra_pred_used, 1); gf_bs_write_int(bs, ptr->max_ref_per_pic, 4); gf_bs_write_int(bs, 0, 26); return GF_OK; } GF_Err ccst_box_size(GF_Box *s) { GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void url_box_del(GF_Box *s) { GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; if (ptr == NULL) return; if (ptr->location) gf_free(ptr->location); gf_free(ptr); return; } GF_Err url_box_read(GF_Box *s, GF_BitStream *bs) { GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; if (ptr->size) { ptr->location = (char*)gf_malloc((u32) ptr->size); if (! ptr->location) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->location, (u32)ptr->size); } return GF_OK; } GF_Box *url_box_new() { ISOM_DECL_BOX_ALLOC(GF_DataEntryURLBox, GF_ISOM_BOX_TYPE_URL); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err url_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; //the flag set indicates we have a string (WE HAVE TO for URLs) if ( !(ptr->flags & 1)) { if (ptr->location) { gf_bs_write_data(bs, ptr->location, (u32)strlen(ptr->location) + 1); } } return GF_OK; } GF_Err url_box_size(GF_Box *s) { GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; if ( !(ptr->flags & 1)) { if (ptr->location) ptr->size += 1 + strlen(ptr->location); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void urn_box_del(GF_Box *s) { GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s; if (ptr == NULL) return; if (ptr->location) gf_free(ptr->location); if (ptr->nameURN) gf_free(ptr->nameURN); gf_free(ptr); } GF_Err urn_box_read(GF_Box *s, GF_BitStream *bs) { u32 i, to_read; char *tmpName; GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s; if (! ptr->size ) return GF_OK; //here we have to handle that in a clever way to_read = (u32) ptr->size; tmpName = (char*)gf_malloc(sizeof(char) * to_read); if (!tmpName) return GF_OUT_OF_MEM; //get the data gf_bs_read_data(bs, tmpName, to_read); //then get the break i = 0; while ( (i < to_read) && (tmpName[i] != 0) ) { i++; } //check the data is consistent if (i == to_read) { gf_free(tmpName); return GF_ISOM_INVALID_FILE; } //no NULL char, URL is not specified if (i == to_read - 1) { ptr->nameURN = tmpName; ptr->location = NULL; return GF_OK; } //OK, this has both URN and URL ptr->nameURN = (char*)gf_malloc(sizeof(char) * (i+1)); if (!ptr->nameURN) { gf_free(tmpName); return GF_OUT_OF_MEM; } memcpy(ptr->nameURN, tmpName, i + 1); if (tmpName[to_read - 1] != 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] urn box contains invalid location field\n" )); } else { ptr->location = (char*)gf_malloc(sizeof(char) * (to_read - i - 1)); if (!ptr->location) { gf_free(tmpName); gf_free(ptr->nameURN); ptr->nameURN = NULL; return GF_OUT_OF_MEM; } memcpy(ptr->location, tmpName + i + 1, (to_read - i - 1)); } gf_free(tmpName); return GF_OK; } GF_Box *urn_box_new() { ISOM_DECL_BOX_ALLOC(GF_DataEntryURNBox, GF_ISOM_BOX_TYPE_URN); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err urn_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; //the flag set indicates we have a string (WE HAVE TO for URLs) if ( !(ptr->flags & 1)) { //to check, the spec says: First name, then location if (ptr->nameURN) { gf_bs_write_data(bs, ptr->nameURN, (u32)strlen(ptr->nameURN) + 1); } if (ptr->location) { gf_bs_write_data(bs, ptr->location, (u32)strlen(ptr->location) + 1); } } return GF_OK; } GF_Err urn_box_size(GF_Box *s) { GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s; if ( !(ptr->flags & 1)) { if (ptr->nameURN) ptr->size += 1 + strlen(ptr->nameURN); if (ptr->location) ptr->size += 1 + strlen(ptr->location); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void unkn_box_del(GF_Box *s) { GF_UnknownBox *ptr = (GF_UnknownBox *) s; if (!s) return; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err unkn_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 bytesToRead, sub_size, sub_a; GF_BitStream *sub_bs; GF_UnknownBox *ptr = (GF_UnknownBox *)s; if (ptr->size > 0xFFFFFFFF) return GF_ISOM_INVALID_FILE; bytesToRead = (u32) (ptr->size); if (!bytesToRead) return GF_OK; if (bytesToRead>1000000) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Unknown box %s (0x%08X) with payload larger than 1 MBytes, ignoring\n", gf_4cc_to_str(ptr->type), ptr->type )); gf_bs_skip_bytes(bs, ptr->dataSize); return GF_OK; } ptr->data = (char*)gf_malloc(bytesToRead); if (ptr->data == NULL ) return GF_OUT_OF_MEM; ptr->dataSize = bytesToRead; gf_bs_read_data(bs, ptr->data, ptr->dataSize); //try to parse container boxes, check if next 8 bytes match a subbox sub_bs = gf_bs_new(ptr->data, ptr->dataSize, GF_BITSTREAM_READ); sub_size = gf_bs_read_u32(sub_bs); sub_a = gf_bs_read_u8(sub_bs); e = (sub_size && (sub_size <= ptr->dataSize)) ? GF_OK : GF_NOT_SUPPORTED; if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED; sub_a = gf_bs_read_u8(sub_bs); if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED; sub_a = gf_bs_read_u8(sub_bs); if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED; sub_a = gf_bs_read_u8(sub_bs); if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED; if (e == GF_OK) { gf_bs_seek(sub_bs, 0); gf_bs_set_cookie(sub_bs, GF_ISOM_BS_COOKIE_NO_LOGS); e = gf_isom_box_array_read(s, sub_bs, NULL); } gf_bs_del(sub_bs); if (e==GF_OK) { gf_free(ptr->data); ptr->data = NULL; ptr->dataSize = 0; } else if (s->child_boxes) { gf_isom_box_array_del(s->child_boxes); s->child_boxes=NULL; } return GF_OK; } GF_Box *unkn_box_new() { ISOM_DECL_BOX_ALLOC(GF_UnknownBox, GF_ISOM_BOX_TYPE_UNKNOWN); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err unkn_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 type; GF_UnknownBox *ptr = (GF_UnknownBox *)s; if (!s) return GF_BAD_PARAM; type = s->type; ptr->type = ptr->original_4cc; e = gf_isom_box_write_header(s, bs); ptr->type = type; if (e) return e; if (ptr->dataSize && ptr->data) { gf_bs_write_data(bs, ptr->data, ptr->dataSize); } return GF_OK; } GF_Err unkn_box_size(GF_Box *s) { GF_UnknownBox *ptr = (GF_UnknownBox *)s; if (ptr->dataSize && ptr->data) { ptr->size += ptr->dataSize; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void def_parent_box_del(GF_Box *s) { if (s) gf_free(s); } GF_Err def_parent_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, NULL); } GF_Box *def_parent_box_new() { ISOM_DECL_BOX_ALLOC(GF_Box, 0); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITEHintSa GF_Err def_parent_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err def_parent_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void def_parent_full_box_del(GF_Box *s) { if (s) gf_free(s); } GF_Err def_parent_full_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, NULL); } GF_Box *def_parent_full_box_new() { ISOM_DECL_BOX_ALLOC(GF_Box, 0); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITEHintSa GF_Err def_parent_full_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err def_parent_full_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void uuid_box_del(GF_Box *s) { GF_UnknownUUIDBox *ptr = (GF_UnknownUUIDBox *) s; if (!s) return; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err uuid_box_read(GF_Box *s, GF_BitStream *bs) { u32 bytesToRead; GF_UnknownUUIDBox *ptr = (GF_UnknownUUIDBox *)s; if (ptr->size > 0xFFFFFFFF) return GF_ISOM_INVALID_FILE; bytesToRead = (u32) (ptr->size); if (bytesToRead) { ptr->data = (char*)gf_malloc(bytesToRead); if (ptr->data == NULL ) return GF_OUT_OF_MEM; ptr->dataSize = bytesToRead; gf_bs_read_data(bs, ptr->data, ptr->dataSize); } return GF_OK; } GF_Box *uuid_box_new() { ISOM_DECL_BOX_ALLOC(GF_UnknownUUIDBox, GF_ISOM_BOX_TYPE_UUID); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err uuid_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_UnknownUUIDBox *ptr = (GF_UnknownUUIDBox*)s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->data) { gf_bs_write_data(bs, ptr->data, ptr->dataSize); } return GF_OK; } GF_Err uuid_box_size(GF_Box *s) { GF_UnknownUUIDBox*ptr = (GF_UnknownUUIDBox*)s; ptr->size += ptr->dataSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void dinf_box_del(GF_Box *s) { gf_free(s); } GF_Err dinf_on_child_box(GF_Box *s, GF_Box *a) { GF_DataInformationBox *ptr = (GF_DataInformationBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_DREF: if (ptr->dref) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->dref = (GF_DataReferenceBox *)a; return GF_OK; } return GF_OK; } GF_Err dinf_box_read(GF_Box *s, GF_BitStream *bs) { GF_DataInformationBox *dinf; GF_Err e = gf_isom_box_array_read(s, bs, dinf_on_child_box); if (e) { return e; } dinf = (GF_DataInformationBox *)s; if (!dinf->dref) { if (! (gf_bs_get_cookie(bs) & GF_ISOM_BS_COOKIE_NO_LOGS) ) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing dref box in dinf\n")); } dinf->dref = (GF_DataReferenceBox *) gf_isom_box_new_parent(&dinf->child_boxes, GF_ISOM_BOX_TYPE_DREF); } return GF_OK; } GF_Box *dinf_box_new() { ISOM_DECL_BOX_ALLOC(GF_DataInformationBox, GF_ISOM_BOX_TYPE_DINF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dinf_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err dinf_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void dref_box_del(GF_Box *s) { GF_DataReferenceBox *ptr = (GF_DataReferenceBox *) s; if (ptr == NULL) return; gf_free(ptr); } GF_Err dref_box_read(GF_Box *s, GF_BitStream *bs) { GF_DataReferenceBox *ptr = (GF_DataReferenceBox *)s; ISOM_DECREASE_SIZE(ptr, 4); gf_bs_read_u32(bs); return gf_isom_box_array_read(s, bs, NULL); } GF_Box *dref_box_new() { ISOM_DECL_BOX_ALLOC(GF_DataReferenceBox, GF_ISOM_BOX_TYPE_DREF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dref_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 count; GF_DataReferenceBox *ptr = (GF_DataReferenceBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; count = ptr->child_boxes ? gf_list_count(ptr->child_boxes) : 0; gf_bs_write_u32(bs, count); return GF_OK; } GF_Err dref_box_size(GF_Box *s) { GF_DataReferenceBox *ptr = (GF_DataReferenceBox *)s; if (!s) return GF_BAD_PARAM; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void edts_box_del(GF_Box *s) { gf_free(s); } GF_Err edts_on_child_box(GF_Box *s, GF_Box *a) { GF_EditBox *ptr = (GF_EditBox *)s; if (a->type == GF_ISOM_BOX_TYPE_ELST) { if (ptr->editList) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->editList = (GF_EditListBox *)a; return GF_OK; } else { return GF_OK; } return GF_OK; } GF_Err edts_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, edts_on_child_box); } GF_Box *edts_box_new() { ISOM_DECL_BOX_ALLOC(GF_EditBox, GF_ISOM_BOX_TYPE_EDTS); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err edts_box_write(GF_Box *s, GF_BitStream *bs) { GF_EditBox *ptr = (GF_EditBox *)s; //here we have a trick: if editList is empty, skip the box if (ptr->editList && gf_list_count(ptr->editList->entryList)) { return gf_isom_box_write_header(s, bs); } else { s->size = 0; } return GF_OK; } GF_Err edts_box_size(GF_Box *s) { GF_EditBox *ptr = (GF_EditBox *)s; //here we have a trick: if editList is empty, skip the box if (!ptr->editList || ! gf_list_count(ptr->editList->entryList)) { ptr->size = 0; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void elst_box_del(GF_Box *s) { GF_EditListBox *ptr; u32 nb_entries; u32 i; ptr = (GF_EditListBox *)s; if (ptr == NULL) return; nb_entries = gf_list_count(ptr->entryList); for (i = 0; i < nb_entries; i++) { GF_EdtsEntry *p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i); if (p) gf_free(p); } gf_list_del(ptr->entryList); gf_free(ptr); } GF_Err elst_box_read(GF_Box *s, GF_BitStream *bs) { u32 entries; s32 tr; u32 nb_entries; GF_EditListBox *ptr = (GF_EditListBox *)s; ISOM_DECREASE_SIZE(ptr, 4); nb_entries = gf_bs_read_u32(bs); if (ptr->version == 1) { if (nb_entries > ptr->size / 20) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", nb_entries)); return GF_ISOM_INVALID_FILE; } } else { if (nb_entries > ptr->size / 12) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", nb_entries)); return GF_ISOM_INVALID_FILE; } } for (entries = 0; entries < nb_entries; entries++) { GF_EdtsEntry *p = (GF_EdtsEntry *) gf_malloc(sizeof(GF_EdtsEntry)); if (!p) return GF_OUT_OF_MEM; if (ptr->version == 1) { ISOM_DECREASE_SIZE(ptr, 16); p->segmentDuration = gf_bs_read_u64(bs); p->mediaTime = (s64) gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 8); p->segmentDuration = gf_bs_read_u32(bs); tr = gf_bs_read_u32(bs); p->mediaTime = (s64) tr; } ISOM_DECREASE_SIZE(ptr, 4); p->mediaRate = gf_bs_read_u16(bs); gf_bs_read_u16(bs); gf_list_add(ptr->entryList, p); } return GF_OK; } GF_Box *elst_box_new() { ISOM_DECL_BOX_ALLOC(GF_EditListBox, GF_ISOM_BOX_TYPE_ELST); tmp->entryList = gf_list_new(); if (!tmp->entryList) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err elst_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; u32 nb_entries; GF_EditListBox *ptr = (GF_EditListBox *)s; if (!ptr) return GF_BAD_PARAM; nb_entries = gf_list_count(ptr->entryList); e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, nb_entries); for (i = 0; i < nb_entries; i++ ) { GF_EdtsEntry *p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i); if (ptr->version == 1) { gf_bs_write_u64(bs, p->segmentDuration); gf_bs_write_u64(bs, p->mediaTime); } else { gf_bs_write_u32(bs, (u32) p->segmentDuration); gf_bs_write_u32(bs, (s32) p->mediaTime); } gf_bs_write_u16(bs, p->mediaRate); gf_bs_write_u16(bs, 0); } return GF_OK; } GF_Err elst_box_size(GF_Box *s) { u32 durtimebytes; u32 i, nb_entries; GF_EditListBox *ptr = (GF_EditListBox *)s; //entry count ptr->size += 4; nb_entries = gf_list_count(ptr->entryList); ptr->version = 0; for (i=0; i<nb_entries; i++) { GF_EdtsEntry *p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i); if ((p->segmentDuration>0xFFFFFFFF) || (p->mediaTime>0xFFFFFFFF)) { ptr->version = 1; break; } } durtimebytes = (ptr->version == 1 ? 16 : 8) + 4; ptr->size += (nb_entries * durtimebytes); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void esds_box_del(GF_Box *s) { GF_ESDBox *ptr = (GF_ESDBox *)s; if (ptr == NULL) return; if (ptr->desc) gf_odf_desc_del((GF_Descriptor *)ptr->desc); gf_free(ptr); } GF_Err esds_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e=GF_OK; u32 descSize; GF_ESDBox *ptr = (GF_ESDBox *)s; descSize = (u32) (ptr->size); if (descSize) { char *enc_desc = (char*)gf_malloc(sizeof(char) * descSize); if (!enc_desc) return GF_OUT_OF_MEM; //get the payload gf_bs_read_data(bs, enc_desc, descSize); //send it to the OD Codec e = gf_odf_desc_read(enc_desc, descSize, (GF_Descriptor **) &ptr->desc); //OK, free our desc gf_free(enc_desc); if (ptr->desc && (ptr->desc->tag!=GF_ODF_ESD_TAG) ) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid descriptor tag 0x%x in esds\n", ptr->desc->tag)); gf_odf_desc_del((GF_Descriptor*)ptr->desc); ptr->desc=NULL; return GF_ISOM_INVALID_FILE; } if (e) { ptr->desc = NULL; } else { /*fix broken files*/ if (ptr->desc && !ptr->desc->URLString) { if (!ptr->desc->slConfig) { ptr->desc->slConfig = (GF_SLConfig *) gf_odf_desc_new(GF_ODF_SLC_TAG); ptr->desc->slConfig->predefined = SLPredef_MP4; } else if (ptr->desc->slConfig->predefined != SLPredef_MP4) { ptr->desc->slConfig->predefined = SLPredef_MP4; gf_odf_slc_set_pref(ptr->desc->slConfig); } } } } return e; } GF_Box *esds_box_new() { ISOM_DECL_BOX_ALLOC(GF_ESDBox, GF_ISOM_BOX_TYPE_ESDS); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err esds_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u8 *enc_desc; u32 descSize = 0; GF_ESDBox *ptr = (GF_ESDBox *)s; //make sure we write with no ESID and no OCRESID if (ptr->desc) { ptr->desc->ESID = 0; ptr->desc->OCRESID = 0; } e = gf_isom_full_box_write(s, bs); if (e) return e; e = gf_odf_desc_write((GF_Descriptor *)ptr->desc, &enc_desc, &descSize); if (e) return e; gf_bs_write_data(bs, enc_desc, descSize); //free our buffer gf_free(enc_desc); return GF_OK; } GF_Err esds_box_size(GF_Box *s) { u32 descSize = 0; GF_ESDBox *ptr = (GF_ESDBox *)s; descSize = gf_odf_desc_size((GF_Descriptor *)ptr->desc); ptr->size += descSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void free_box_del(GF_Box *s) { GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err free_box_read(GF_Box *s, GF_BitStream *bs) { u32 bytesToRead; GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s; if (ptr->size > 0xFFFFFFFF) return GF_IO_ERR; bytesToRead = (u32) (ptr->size); if (bytesToRead) { ptr->data = (char*)gf_malloc(bytesToRead * sizeof(char)); if (!ptr->data) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->data, bytesToRead); ptr->dataSize = bytesToRead; } return GF_OK; } GF_Box *free_box_new() { ISOM_DECL_BOX_ALLOC(GF_FreeSpaceBox, GF_ISOM_BOX_TYPE_FREE); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err free_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s; if (ptr->original_4cc) { u32 t = s->type; s->type=ptr->original_4cc; e = gf_isom_box_write_header(s, bs); s->type=t; } else { e = gf_isom_box_write_header(s, bs); } if (e) return e; if (ptr->dataSize) { if (ptr->data) { gf_bs_write_data(bs, ptr->data, ptr->dataSize); } else { u32 i = 0; while (i<ptr->dataSize) { gf_bs_write_u8(bs, 0); i++; } } } return GF_OK; } GF_Err free_box_size(GF_Box *s) { GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s; ptr->size += ptr->dataSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ftyp_box_del(GF_Box *s) { GF_FileTypeBox *ptr = (GF_FileTypeBox *) s; if (ptr->altBrand) gf_free(ptr->altBrand); gf_free(ptr); } GF_Box *ftyp_box_new() { ISOM_DECL_BOX_ALLOC(GF_FileTypeBox, GF_ISOM_BOX_TYPE_FTYP); return (GF_Box *)tmp; } GF_Err ftyp_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_FileTypeBox *ptr = (GF_FileTypeBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->majorBrand = gf_bs_read_u32(bs); ptr->minorVersion = gf_bs_read_u32(bs); if (ptr->size % 4) return GF_ISOM_INVALID_FILE; ptr->altCount = ( (u32) (ptr->size)) / 4; if (!ptr->altCount) return GF_OK; ptr->altBrand = (u32*)gf_malloc(sizeof(u32)*ptr->altCount); if (!ptr->altBrand) return GF_OUT_OF_MEM; for (i = 0; i<ptr->altCount; i++) { ptr->altBrand[i] = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ftyp_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_FileTypeBox *ptr = (GF_FileTypeBox *) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->majorBrand); gf_bs_write_u32(bs, ptr->minorVersion); for (i=0; i<ptr->altCount; i++) { gf_bs_write_u32(bs, ptr->altBrand[i]); } return GF_OK; } GF_Err ftyp_box_size(GF_Box *s) { GF_FileTypeBox *ptr = (GF_FileTypeBox *)s; ptr->size += 8 + ptr->altCount * 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void gnrm_box_del(GF_Box *s) { GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)ptr); if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Box *gnrm_box_new() { ISOM_DECL_BOX_ALLOC(GF_GenericSampleEntryBox, GF_ISOM_BOX_TYPE_GNRM); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } //dummy GF_Err gnrm_box_read(GF_Box *s, GF_BitStream *bs) { return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gnrm_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s; //carefull we are not writing the box type but the entry type so switch for write ptr->type = ptr->EntryType; e = gf_isom_box_write_header(s, bs); if (e) return e; ptr->type = GF_ISOM_BOX_TYPE_GNRM; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); gf_bs_write_data(bs, ptr->data, ptr->data_size); return GF_OK; } GF_Err gnrm_box_size(GF_Box *s) { GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s; s->type = GF_ISOM_BOX_TYPE_GNRM; ptr->size += 8+ptr->data_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void gnrv_box_del(GF_Box *s) { GF_GenericVisualSampleEntryBox *ptr = (GF_GenericVisualSampleEntryBox *)s; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)ptr); if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Box *gnrv_box_new() { ISOM_DECL_BOX_ALLOC(GF_GenericVisualSampleEntryBox, GF_ISOM_BOX_TYPE_GNRV); gf_isom_video_sample_entry_init((GF_VisualSampleEntryBox*) tmp); return (GF_Box *)tmp; } //dummy GF_Err gnrv_box_read(GF_Box *s, GF_BitStream *bs) { return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gnrv_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_GenericVisualSampleEntryBox *ptr = (GF_GenericVisualSampleEntryBox *)s; //carefull we are not writing the box type but the entry type so switch for write ptr->type = ptr->EntryType; e = gf_isom_box_write_header(s, bs); if (e) return e; ptr->type = GF_ISOM_BOX_TYPE_GNRV; gf_isom_video_sample_entry_write((GF_VisualSampleEntryBox *)ptr, bs); gf_bs_write_data(bs, ptr->data, ptr->data_size); return GF_OK; } GF_Err gnrv_box_size(GF_Box *s) { GF_GenericVisualSampleEntryBox *ptr = (GF_GenericVisualSampleEntryBox *)s; s->type = GF_ISOM_BOX_TYPE_GNRV; gf_isom_video_sample_entry_size((GF_VisualSampleEntryBox *)s); ptr->size += ptr->data_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void gnra_box_del(GF_Box *s) { GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)ptr); if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Box *gnra_box_new() { ISOM_DECL_BOX_ALLOC(GF_GenericAudioSampleEntryBox, GF_ISOM_BOX_TYPE_GNRA); gf_isom_audio_sample_entry_init((GF_AudioSampleEntryBox*) tmp); return (GF_Box *)tmp; } //dummy GF_Err gnra_box_read(GF_Box *s, GF_BitStream *bs) { return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gnra_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s; //carefull we are not writing the box type but the entry type so switch for write ptr->type = ptr->EntryType; e = gf_isom_box_write_header(s, bs); if (e) return e; ptr->type = GF_ISOM_BOX_TYPE_GNRA; gf_isom_audio_sample_entry_write((GF_AudioSampleEntryBox *)ptr, bs); if (ptr->data) { gf_bs_write_data(bs, ptr->data, ptr->data_size); } return GF_OK; } GF_Err gnra_box_size(GF_Box *s) { GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s; s->type = GF_ISOM_BOX_TYPE_GNRA; gf_isom_audio_sample_entry_size((GF_AudioSampleEntryBox *)s); ptr->size += ptr->data_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void hdlr_box_del(GF_Box *s) { GF_HandlerBox *ptr = (GF_HandlerBox *)s; if (ptr == NULL) return; if (ptr->nameUTF8) gf_free(ptr->nameUTF8); gf_free(ptr); } GF_Err hdlr_box_read(GF_Box *s, GF_BitStream *bs) { u64 cookie; GF_HandlerBox *ptr = (GF_HandlerBox *)s; ISOM_DECREASE_SIZE(ptr, 20); ptr->reserved1 = gf_bs_read_u32(bs); ptr->handlerType = gf_bs_read_u32(bs); gf_bs_read_data(bs, (char*)ptr->reserved2, 12); cookie = gf_bs_get_cookie(bs); if (ptr->handlerType==GF_ISOM_MEDIA_VISUAL) cookie |= GF_ISOM_BS_COOKIE_VISUAL_TRACK; else cookie &= ~GF_ISOM_BS_COOKIE_VISUAL_TRACK; gf_bs_set_cookie(bs, cookie); if (ptr->size) { ptr->nameUTF8 = (char*)gf_malloc((u32) ptr->size); if (!ptr->nameUTF8) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->nameUTF8, (u32) ptr->size); //patch for old QT files - we cannot rely on checking if str[0]==len(str+1) since we may have //cases where the first character of the string decimal value is indeed the same as the string length!! //we had this issue with encryption_import test //we therefore only check if last char is null, and if not so assume old QT style if (ptr->nameUTF8[ptr->size-1]) { memmove(ptr->nameUTF8, ptr->nameUTF8+1, sizeof(char) * (u32) (ptr->size-1) ); ptr->nameUTF8[ptr->size-1] = 0; ptr->store_counted_string = GF_TRUE; } } return GF_OK; } GF_Box *hdlr_box_new() { ISOM_DECL_BOX_ALLOC(GF_HandlerBox, GF_ISOM_BOX_TYPE_HDLR); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hdlr_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_HandlerBox *ptr = (GF_HandlerBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->reserved1); gf_bs_write_u32(bs, ptr->handlerType); gf_bs_write_data(bs, (char*)ptr->reserved2, 12); if (ptr->nameUTF8) { u32 len = (u32)strlen(ptr->nameUTF8); if (ptr->store_counted_string) { gf_bs_write_u8(bs, len); gf_bs_write_data(bs, ptr->nameUTF8, len); } else { gf_bs_write_data(bs, ptr->nameUTF8, len); gf_bs_write_u8(bs, 0); } } else { gf_bs_write_u8(bs, 0); } return GF_OK; } GF_Err hdlr_box_size(GF_Box *s) { GF_HandlerBox *ptr = (GF_HandlerBox *)s; ptr->size += 20 + 1; //null term or counted string if (ptr->nameUTF8) { ptr->size += strlen(ptr->nameUTF8); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void hinf_box_del(GF_Box *s) { GF_HintInfoBox *hinf = (GF_HintInfoBox *)s; gf_free(hinf); } GF_Box *hinf_box_new() { ISOM_DECL_BOX_ALLOC(GF_HintInfoBox, GF_ISOM_BOX_TYPE_HINF); tmp->child_boxes = gf_list_new(); return (GF_Box *)tmp; } GF_Err hinf_on_child_box(GF_Box *s, GF_Box *a) { GF_MAXRBox *maxR; GF_HintInfoBox *hinf = (GF_HintInfoBox *)s; u32 i; switch (a->type) { case GF_ISOM_BOX_TYPE_MAXR: i=0; while ((maxR = (GF_MAXRBox *)gf_list_enum(hinf->child_boxes, &i))) { if ((maxR->type==GF_ISOM_BOX_TYPE_MAXR) && (maxR->granularity == ((GF_MAXRBox *)a)->granularity)) ERROR_ON_DUPLICATED_BOX(a, s) } break; } return GF_OK; } GF_Err hinf_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, hinf_on_child_box); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hinf_box_write(GF_Box *s, GF_BitStream *bs) { // GF_HintInfoBox *ptr = (GF_HintInfoBox *)s; if (!s) return GF_BAD_PARAM; return gf_isom_box_write_header(s, bs); } GF_Err hinf_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void hmhd_box_del(GF_Box *s) { GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err hmhd_box_read(GF_Box *s,GF_BitStream *bs) { GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s; ISOM_DECREASE_SIZE(ptr, 16); ptr->maxPDUSize = gf_bs_read_u16(bs); ptr->avgPDUSize = gf_bs_read_u16(bs); ptr->maxBitrate = gf_bs_read_u32(bs); ptr->avgBitrate = gf_bs_read_u32(bs); ptr->slidingAverageBitrate = gf_bs_read_u32(bs); return GF_OK; } GF_Box *hmhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_HintMediaHeaderBox, GF_ISOM_BOX_TYPE_HMHD); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hmhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->maxPDUSize); gf_bs_write_u16(bs, ptr->avgPDUSize); gf_bs_write_u32(bs, ptr->maxBitrate); gf_bs_write_u32(bs, ptr->avgBitrate); gf_bs_write_u32(bs, ptr->slidingAverageBitrate); return GF_OK; } GF_Err hmhd_box_size(GF_Box *s) { GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s; ptr->size += 16; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *hnti_box_new() { ISOM_DECL_BOX_ALLOC(GF_HintTrackInfoBox, GF_ISOM_BOX_TYPE_HNTI); return (GF_Box *)tmp; } void hnti_box_del(GF_Box *a) { gf_free(a); } GF_Err hnti_on_child_box(GF_Box *s, GF_Box *a) { GF_HintTrackInfoBox *hnti = (GF_HintTrackInfoBox *)s; if (!hnti || !a) return GF_BAD_PARAM; switch (a->type) { //this is the value for GF_RTPBox - same as HintSampleEntry for RTP !!! case GF_ISOM_BOX_TYPE_RTP: case GF_ISOM_BOX_TYPE_SDP: if (hnti->SDP) ERROR_ON_DUPLICATED_BOX(a, s) hnti->SDP = a; break; default: break; } return GF_OK; } GF_Err hnti_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, hnti_on_child_box, s->type); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hnti_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err hnti_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** GF_SDPBox **********************************************************/ void sdp_box_del(GF_Box *s) { GF_SDPBox *ptr = (GF_SDPBox *)s; if (ptr->sdpText) gf_free(ptr->sdpText); gf_free(ptr); } GF_Err sdp_box_read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_SDPBox *ptr = (GF_SDPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; length = (u32) (ptr->size); //sdp text has no delimiter !!! ptr->sdpText = (char*)gf_malloc(sizeof(char) * (length+1)); if (!ptr->sdpText) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->sdpText, length); ptr->sdpText[length] = 0; return GF_OK; } GF_Box *sdp_box_new() { ISOM_DECL_BOX_ALLOC(GF_SDPBox, GF_ISOM_BOX_TYPE_SDP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sdp_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SDPBox *ptr = (GF_SDPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; //don't write the NULL char!!! if (ptr->sdpText) gf_bs_write_data(bs, ptr->sdpText, (u32) strlen(ptr->sdpText)); return GF_OK; } GF_Err sdp_box_size(GF_Box *s) { GF_SDPBox *ptr = (GF_SDPBox *)s; //don't count the NULL char!!! if (ptr->sdpText) ptr->size += strlen(ptr->sdpText); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void rtp_hnti_box_del(GF_Box *s) { GF_RTPBox *ptr = (GF_RTPBox *)s; if (ptr->sdpText) gf_free(ptr->sdpText); gf_free(ptr); } GF_Err rtp_hnti_box_read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_RTPBox *ptr = (GF_RTPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ISOM_DECREASE_SIZE(ptr, 4) ptr->subType = gf_bs_read_u32(bs); length = (u32) (ptr->size); //sdp text has no delimiter !!! ptr->sdpText = (char*)gf_malloc(sizeof(char) * (length+1)); if (!ptr->sdpText) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->sdpText, length); ptr->sdpText[length] = 0; return GF_OK; } GF_Box *rtp_hnti_box_new() { ISOM_DECL_BOX_ALLOC(GF_RTPBox, GF_ISOM_BOX_TYPE_RTP); tmp->subType = GF_ISOM_BOX_TYPE_SDP; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rtp_hnti_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_RTPBox *ptr = (GF_RTPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->subType); //don't write the NULL char!!! gf_bs_write_data(bs, ptr->sdpText, (u32) strlen(ptr->sdpText)); return GF_OK; } GF_Err rtp_hnti_box_size(GF_Box *s) { GF_RTPBox *ptr = (GF_RTPBox *)s; ptr->size += 4 + strlen(ptr->sdpText); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TRPY GF_Box **********************************************************/ void trpy_box_del(GF_Box *s) { gf_free((GF_TRPYBox *)s); } GF_Err trpy_box_read(GF_Box *s, GF_BitStream *bs) { GF_TRPYBox *ptr = (GF_TRPYBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *trpy_box_new() { ISOM_DECL_BOX_ALLOC(GF_TRPYBox, GF_ISOM_BOX_TYPE_TRPY); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trpy_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TRPYBox *ptr = (GF_TRPYBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err trpy_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TOTL GF_Box **********************************************************/ void totl_box_del(GF_Box *s) { gf_free((GF_TRPYBox *)s); } GF_Err totl_box_read(GF_Box *s, GF_BitStream *bs) { GF_TOTLBox *ptr = (GF_TOTLBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nbBytes = gf_bs_read_u32(bs); return GF_OK; } GF_Box *totl_box_new() { ISOM_DECL_BOX_ALLOC(GF_TOTLBox, GF_ISOM_BOX_TYPE_TOTL); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err totl_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TOTLBox *ptr = (GF_TOTLBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nbBytes); return GF_OK; } GF_Err totl_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** NUMP GF_Box **********************************************************/ void nump_box_del(GF_Box *s) { gf_free((GF_NUMPBox *)s); } GF_Err nump_box_read(GF_Box *s, GF_BitStream *bs) { GF_NUMPBox *ptr = (GF_NUMPBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->nbPackets = gf_bs_read_u64(bs); return GF_OK; } GF_Box *nump_box_new() { ISOM_DECL_BOX_ALLOC(GF_NUMPBox, GF_ISOM_BOX_TYPE_NUMP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err nump_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_NUMPBox *ptr = (GF_NUMPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbPackets); return GF_OK; } GF_Err nump_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** NPCK GF_Box **********************************************************/ void npck_box_del(GF_Box *s) { gf_free((GF_NPCKBox *)s); } GF_Err npck_box_read(GF_Box *s, GF_BitStream *bs) { GF_NPCKBox *ptr = (GF_NPCKBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nbPackets = gf_bs_read_u32(bs); return GF_OK; } GF_Box *npck_box_new() { ISOM_DECL_BOX_ALLOC(GF_NPCKBox, GF_ISOM_BOX_TYPE_NPCK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err npck_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_NPCKBox *ptr = (GF_NPCKBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nbPackets); return GF_OK; } GF_Err npck_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TPYL GF_Box **********************************************************/ void tpyl_box_del(GF_Box *s) { gf_free((GF_NTYLBox *)s); } GF_Err tpyl_box_read(GF_Box *s, GF_BitStream *bs) { GF_NTYLBox *ptr = (GF_NTYLBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ISOM_DECREASE_SIZE(ptr, 8); ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *tpyl_box_new() { ISOM_DECL_BOX_ALLOC(GF_NTYLBox, GF_ISOM_BOX_TYPE_TPYL); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tpyl_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_NTYLBox *ptr = (GF_NTYLBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err tpyl_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TPAY GF_Box **********************************************************/ void tpay_box_del(GF_Box *s) { gf_free((GF_TPAYBox *)s); } GF_Err tpay_box_read(GF_Box *s, GF_BitStream *bs) { GF_TPAYBox *ptr = (GF_TPAYBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nbBytes = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tpay_box_new() { ISOM_DECL_BOX_ALLOC(GF_TPAYBox, GF_ISOM_BOX_TYPE_TPAY); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tpay_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TPAYBox *ptr = (GF_TPAYBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nbBytes); return GF_OK; } GF_Err tpay_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** MAXR GF_Box **********************************************************/ void maxr_box_del(GF_Box *s) { gf_free((GF_MAXRBox *)s); } GF_Err maxr_box_read(GF_Box *s, GF_BitStream *bs) { GF_MAXRBox *ptr = (GF_MAXRBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ISOM_DECREASE_SIZE(ptr, 8); ptr->granularity = gf_bs_read_u32(bs); ptr->maxDataRate = gf_bs_read_u32(bs); return GF_OK; } GF_Box *maxr_box_new() { ISOM_DECL_BOX_ALLOC(GF_MAXRBox, GF_ISOM_BOX_TYPE_MAXR); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err maxr_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MAXRBox *ptr = (GF_MAXRBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->granularity); gf_bs_write_u32(bs, ptr->maxDataRate); return GF_OK; } GF_Err maxr_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** DMED GF_Box **********************************************************/ void dmed_box_del(GF_Box *s) { gf_free((GF_DMEDBox *)s); } GF_Err dmed_box_read(GF_Box *s, GF_BitStream *bs) { GF_DMEDBox *ptr = (GF_DMEDBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *dmed_box_new() { ISOM_DECL_BOX_ALLOC(GF_DMEDBox, GF_ISOM_BOX_TYPE_DMED); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dmed_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DMEDBox *ptr = (GF_DMEDBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err dmed_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** DIMM GF_Box **********************************************************/ void dimm_box_del(GF_Box *s) { gf_free((GF_DIMMBox *)s); } GF_Err dimm_box_read(GF_Box *s, GF_BitStream *bs) { GF_DIMMBox *ptr = (GF_DIMMBox *)s; ISOM_DECREASE_SIZE(ptr, 8) ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *dimm_box_new() { ISOM_DECL_BOX_ALLOC(GF_DIMMBox, GF_ISOM_BOX_TYPE_DIMM); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dimm_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DIMMBox *ptr = (GF_DIMMBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err dimm_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** DREP GF_Box **********************************************************/ void drep_box_del(GF_Box *s) { gf_free((GF_DREPBox *)s); } GF_Err drep_box_read(GF_Box *s, GF_BitStream *bs) { GF_DREPBox *ptr = (GF_DREPBox *)s; ISOM_DECREASE_SIZE(ptr, 8) ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *drep_box_new() { ISOM_DECL_BOX_ALLOC(GF_DREPBox, GF_ISOM_BOX_TYPE_DREP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err drep_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DREPBox *ptr = (GF_DREPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err drep_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TMIN GF_Box **********************************************************/ void tmin_box_del(GF_Box *s) { gf_free((GF_TMINBox *)s); } GF_Err tmin_box_read(GF_Box *s, GF_BitStream *bs) { GF_TMINBox *ptr = (GF_TMINBox *)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->minTime = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tmin_box_new() { ISOM_DECL_BOX_ALLOC(GF_TMINBox, GF_ISOM_BOX_TYPE_TMIN); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tmin_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TMINBox *ptr = (GF_TMINBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->minTime); return GF_OK; } GF_Err tmin_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TMAX GF_Box **********************************************************/ void tmax_box_del(GF_Box *s) { gf_free((GF_TMAXBox *)s); } GF_Err tmax_box_read(GF_Box *s, GF_BitStream *bs) { GF_TMAXBox *ptr = (GF_TMAXBox *)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->maxTime = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tmax_box_new() { ISOM_DECL_BOX_ALLOC(GF_TMAXBox, GF_ISOM_BOX_TYPE_TMAX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tmax_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TMAXBox *ptr = (GF_TMAXBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->maxTime); return GF_OK; } GF_Err tmax_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** PMAX GF_Box **********************************************************/ void pmax_box_del(GF_Box *s) { gf_free((GF_PMAXBox *)s); } GF_Err pmax_box_read(GF_Box *s, GF_BitStream *bs) { GF_PMAXBox *ptr = (GF_PMAXBox *)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->maxSize = gf_bs_read_u32(bs); return GF_OK; } GF_Box *pmax_box_new() { ISOM_DECL_BOX_ALLOC(GF_PMAXBox, GF_ISOM_BOX_TYPE_PMAX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pmax_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_PMAXBox *ptr = (GF_PMAXBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->maxSize); return GF_OK; } GF_Err pmax_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** DMAX GF_Box **********************************************************/ void dmax_box_del(GF_Box *s) { gf_free((GF_DMAXBox *)s); } GF_Err dmax_box_read(GF_Box *s, GF_BitStream *bs) { GF_DMAXBox *ptr = (GF_DMAXBox *)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->maxDur = gf_bs_read_u32(bs); return GF_OK; } GF_Box *dmax_box_new() { ISOM_DECL_BOX_ALLOC(GF_DMAXBox, GF_ISOM_BOX_TYPE_DMAX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dmax_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DMAXBox *ptr = (GF_DMAXBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->maxDur); return GF_OK; } GF_Err dmax_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** PAYT GF_Box **********************************************************/ void payt_box_del(GF_Box *s) { GF_PAYTBox *payt = (GF_PAYTBox *)s; if (payt->payloadString) gf_free(payt->payloadString); gf_free(payt); } GF_Err payt_box_read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_PAYTBox *ptr = (GF_PAYTBox *)s; ISOM_DECREASE_SIZE(ptr, 5 ); ptr->payloadCode = gf_bs_read_u32(bs); length = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, length); ptr->payloadString = (char*)gf_malloc(sizeof(char) * (length+1) ); if (! ptr->payloadString) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->payloadString, length); ptr->payloadString[length] = 0; return GF_OK; } GF_Box *payt_box_new() { ISOM_DECL_BOX_ALLOC(GF_PAYTBox, GF_ISOM_BOX_TYPE_PAYT); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err payt_box_write(GF_Box *s, GF_BitStream *bs) { u32 len; GF_Err e; GF_PAYTBox *ptr = (GF_PAYTBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->payloadCode); len = ptr->payloadString ? (u32) strlen(ptr->payloadString) : 0; gf_bs_write_u8(bs, len); if (len) gf_bs_write_data(bs, ptr->payloadString, len); return GF_OK; } GF_Err payt_box_size(GF_Box *s) { GF_PAYTBox *ptr = (GF_PAYTBox *)s; s->size += 4 + 1; if (ptr->payloadString) ptr->size += strlen(ptr->payloadString); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** PAYT GF_Box **********************************************************/ void name_box_del(GF_Box *s) { GF_NameBox *name = (GF_NameBox *)s; if (name->string) gf_free(name->string); gf_free(name); } GF_Err name_box_read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_NameBox *ptr = (GF_NameBox *)s; length = (u32) (ptr->size); ptr->string = (char*)gf_malloc(sizeof(char) * (length+1)); if (! ptr->string) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->string, length); ptr->string[length] = 0; return GF_OK; } GF_Box *name_box_new() { ISOM_DECL_BOX_ALLOC(GF_NameBox, GF_ISOM_BOX_TYPE_NAME); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err name_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_NameBox *ptr = (GF_NameBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->string) { gf_bs_write_data(bs, ptr->string, (u32) strlen(ptr->string) + 1); } return GF_OK; } GF_Err name_box_size(GF_Box *s) { GF_NameBox *ptr = (GF_NameBox *)s; if (ptr->string) ptr->size += strlen(ptr->string) + 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void tssy_box_del(GF_Box *s) { gf_free(s); } GF_Err tssy_box_read(GF_Box *s, GF_BitStream *bs) { GF_TimeStampSynchronyBox *ptr = (GF_TimeStampSynchronyBox *)s; ISOM_DECREASE_SIZE(ptr, 1) gf_bs_read_int(bs, 6); ptr->timestamp_sync = gf_bs_read_int(bs, 2); return GF_OK; } GF_Box *tssy_box_new() { ISOM_DECL_BOX_ALLOC(GF_TimeStampSynchronyBox, GF_ISOM_BOX_TYPE_TSSY); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tssy_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TimeStampSynchronyBox *ptr = (GF_TimeStampSynchronyBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_int(bs, 0, 6); gf_bs_write_int(bs, ptr->timestamp_sync, 2); return GF_OK; } GF_Err tssy_box_size(GF_Box *s) { s->size += 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void srpp_box_del(GF_Box *s) { gf_free(s); } GF_Err srpp_on_child_box(GF_Box *s, GF_Box *a) { GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_SCHI: if (ptr->info) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->info = (GF_SchemeInformationBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_SCHM: if (ptr->scheme_type) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->scheme_type = (GF_SchemeTypeBox *)a; return GF_OK; } return GF_OK; } GF_Err srpp_box_read(GF_Box *s, GF_BitStream *bs) { GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; ISOM_DECREASE_SIZE(s, 16) ptr->encryption_algorithm_rtp = gf_bs_read_u32(bs); ptr->encryption_algorithm_rtcp = gf_bs_read_u32(bs); ptr->integrity_algorithm_rtp = gf_bs_read_u32(bs); ptr->integrity_algorithm_rtcp = gf_bs_read_u32(bs); return gf_isom_box_array_read(s, bs, srpp_on_child_box); } GF_Box *srpp_box_new() { ISOM_DECL_BOX_ALLOC(GF_SRTPProcessBox, GF_ISOM_BOX_TYPE_SRPP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err srpp_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->encryption_algorithm_rtp); gf_bs_write_u32(bs, ptr->encryption_algorithm_rtcp); gf_bs_write_u32(bs, ptr->integrity_algorithm_rtp); gf_bs_write_u32(bs, ptr->integrity_algorithm_rtcp); return GF_OK; } GF_Err srpp_box_size(GF_Box *s) { u32 pos = 0; GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; s->size += 16; gf_isom_check_position(s, (GF_Box*)ptr->info, &pos); gf_isom_check_position(s, (GF_Box*)ptr->scheme_type, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void rssr_box_del(GF_Box *s) { gf_free(s); } GF_Err rssr_box_read(GF_Box *s, GF_BitStream *bs) { GF_ReceivedSsrcBox *ptr = (GF_ReceivedSsrcBox *)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->ssrc = gf_bs_read_u32(bs); return GF_OK; } GF_Box *rssr_box_new() { ISOM_DECL_BOX_ALLOC(GF_ReceivedSsrcBox, GF_ISOM_BOX_TYPE_RSSR); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rssr_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ReceivedSsrcBox *ptr = (GF_ReceivedSsrcBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->ssrc); return GF_OK; } GF_Err rssr_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void iods_box_del(GF_Box *s) { GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s; if (ptr == NULL) return; if (ptr->descriptor) gf_odf_desc_del(ptr->descriptor); gf_free(ptr); } GF_Err iods_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 descSize; char *desc; GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s; //use the OD codec... descSize = (u32) (ptr->size); desc = (char*)gf_malloc(sizeof(char) * descSize); if (!desc) return GF_OUT_OF_MEM; gf_bs_read_data(bs, desc, descSize); e = gf_odf_desc_read(desc, descSize, &ptr->descriptor); //OK, free our desc gf_free(desc); return e; } GF_Box *iods_box_new() { ISOM_DECL_BOX_ALLOC(GF_ObjectDescriptorBox, GF_ISOM_BOX_TYPE_IODS); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err iods_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 descSize; u8 *desc; GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; //call our OD codec e = gf_odf_desc_write(ptr->descriptor, &desc, &descSize); if (e) return e; gf_bs_write_data(bs, desc, descSize); //and free our stuff maybe!! gf_free(desc); return GF_OK; } GF_Err iods_box_size(GF_Box *s) { GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s; ptr->size += gf_odf_desc_size(ptr->descriptor); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mdat_box_del(GF_Box *s) { GF_MediaDataBox *ptr = (GF_MediaDataBox *)s; if (!s) return; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err mdat_box_read(GF_Box *s, GF_BitStream *bs) { GF_MediaDataBox *ptr = (GF_MediaDataBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ptr->dataSize = s->size; ptr->bsOffset = gf_bs_get_position(bs); //then skip these bytes gf_bs_skip_bytes(bs, ptr->dataSize); return GF_OK; } GF_Box *mdat_box_new() { ISOM_DECL_BOX_ALLOC(GF_MediaDataBox, GF_ISOM_BOX_TYPE_MDAT); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mdat_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MediaDataBox *ptr = (GF_MediaDataBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; //make sure we have some data ... //if not, we handle that independently (edit files) if (ptr->data) { gf_bs_write_data(bs, ptr->data, (u32) ptr->dataSize); } return GF_OK; } GF_Err mdat_box_size(GF_Box *s) { GF_MediaDataBox *ptr = (GF_MediaDataBox *)s; ptr->size += ptr->dataSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mdhd_box_del(GF_Box *s) { GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err mdhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s; if (ptr->version == 1) { ISOM_DECREASE_SIZE(ptr, 28) ptr->creationTime = gf_bs_read_u64(bs); ptr->modificationTime = gf_bs_read_u64(bs); ptr->timeScale = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 16) ptr->creationTime = gf_bs_read_u32(bs); ptr->modificationTime = gf_bs_read_u32(bs); ptr->timeScale = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u32(bs); } if (!ptr->timeScale) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Media header timescale is 0 - defaulting to 90000\n" )); ptr->timeScale = 90000; } ptr->original_duration = ptr->duration; ISOM_DECREASE_SIZE(ptr, 4) //our padding bit gf_bs_read_int(bs, 1); //the spec is unclear here, just says "the value 0 is interpreted as undetermined" ptr->packedLanguage[0] = gf_bs_read_int(bs, 5); ptr->packedLanguage[1] = gf_bs_read_int(bs, 5); ptr->packedLanguage[2] = gf_bs_read_int(bs, 5); //but before or after compaction ?? We assume before if (ptr->packedLanguage[0] || ptr->packedLanguage[1] || ptr->packedLanguage[2]) { ptr->packedLanguage[0] += 0x60; ptr->packedLanguage[1] += 0x60; ptr->packedLanguage[2] += 0x60; } else { ptr->packedLanguage[0] = 'u'; ptr->packedLanguage[1] = 'n'; ptr->packedLanguage[2] = 'd'; } ptr->reserved = gf_bs_read_u16(bs); return GF_OK; } GF_Box *mdhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_MediaHeaderBox, GF_ISOM_BOX_TYPE_MDHD); tmp->packedLanguage[0] = 'u'; tmp->packedLanguage[1] = 'n'; tmp->packedLanguage[2] = 'd'; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mdhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version == 1) { gf_bs_write_u64(bs, ptr->creationTime); gf_bs_write_u64(bs, ptr->modificationTime); gf_bs_write_u32(bs, ptr->timeScale); gf_bs_write_u64(bs, ptr->duration); } else { gf_bs_write_u32(bs, (u32) ptr->creationTime); gf_bs_write_u32(bs, (u32) ptr->modificationTime); gf_bs_write_u32(bs, ptr->timeScale); gf_bs_write_u32(bs, (u32) ptr->duration); } //SPECS: BIT(1) of padding gf_bs_write_int(bs, 0, 1); gf_bs_write_int(bs, ptr->packedLanguage[0] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguage[1] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguage[2] - 0x60, 5); gf_bs_write_u16(bs, ptr->reserved); return GF_OK; } GF_Err mdhd_box_size(GF_Box *s) { GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s; ptr->version = (ptr->duration>0xFFFFFFFF) ? 1 : 0; ptr->size += 4; ptr->size += (ptr->version == 1) ? 28 : 16; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mdia_box_del(GF_Box *s) { GF_MediaBox *ptr = (GF_MediaBox *)s; if (ptr == NULL) return; if (ptr->nalu_parser) gf_bs_del(ptr->nalu_parser); if (ptr->nalu_out_bs) gf_bs_del(ptr->nalu_out_bs); if (ptr->nalu_ps_bs) gf_bs_del(ptr->nalu_ps_bs); if (ptr->extracted_bs) gf_bs_del(ptr->extracted_bs); if (ptr->extracted_samp) gf_isom_sample_del(&ptr->extracted_samp); if (ptr->in_sample_buffer) gf_free(ptr->in_sample_buffer); if (ptr->tmp_nal_copy_buffer) gf_free(ptr->tmp_nal_copy_buffer); gf_free(ptr); } GF_Err mdia_on_child_box(GF_Box *s, GF_Box *a) { GF_MediaBox *ptr = (GF_MediaBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_MDHD: if (ptr->mediaHeader) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mediaHeader = (GF_MediaHeaderBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_HDLR: if (ptr->handler) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->handler = (GF_HandlerBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_MINF: if (ptr->information) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->information = (GF_MediaInformationBox *)a; return GF_OK; } return GF_OK; } GF_Err mdia_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u64 cookie = gf_bs_get_cookie(bs); cookie &= ~GF_ISOM_BS_COOKIE_VISUAL_TRACK; gf_bs_set_cookie(bs, cookie); e = gf_isom_box_array_read(s, bs, mdia_on_child_box); gf_bs_set_cookie(bs, cookie); if (e) return e; if (!((GF_MediaBox *)s)->information) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MediaInformationBox\n")); return GF_ISOM_INVALID_FILE; } if (!((GF_MediaBox *)s)->handler) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing HandlerBox\n")); return GF_ISOM_INVALID_FILE; } if (!((GF_MediaBox *)s)->mediaHeader) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MediaHeaderBox\n")); return GF_ISOM_INVALID_FILE; } return GF_OK; } GF_Box *mdia_box_new() { ISOM_DECL_BOX_ALLOC(GF_MediaBox, GF_ISOM_BOX_TYPE_MDIA); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mdia_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err mdia_box_size(GF_Box *s) { u32 pos = 0; GF_MediaBox *ptr = (GF_MediaBox *)s; //Header first gf_isom_check_position(s, (GF_Box*)ptr->mediaHeader, &pos); //then handler gf_isom_check_position(s, (GF_Box*)ptr->handler, &pos); //then info gf_isom_check_position(s, (GF_Box*)ptr->information, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mfra_box_del(GF_Box *s) { GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s; if (ptr == NULL) return; gf_list_del(ptr->tfra_list); gf_free(ptr); } GF_Box *mfra_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentRandomAccessBox, GF_ISOM_BOX_TYPE_MFRA); tmp->tfra_list = gf_list_new(); return (GF_Box *)tmp; } GF_Err mfra_on_child_box(GF_Box *s, GF_Box *a) { GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_TFRA: return gf_list_add(ptr->tfra_list, a); case GF_ISOM_BOX_TYPE_MFRO: if (ptr->mfro) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mfro = (GF_MovieFragmentRandomAccessOffsetBox *)a; return GF_OK; } return GF_OK; } GF_Err mfra_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, mfra_on_child_box); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mfra_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err mfra_box_size(GF_Box *s) { u32 pos=0; GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s; gf_isom_check_position_list(s, ptr->tfra_list, &pos); gf_isom_check_position(s, (GF_Box *)ptr->mfro, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void tfra_box_del(GF_Box *s) { GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s; if (ptr == NULL) return; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Box *tfra_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentRandomAccessBox, GF_ISOM_BOX_TYPE_TFRA); return (GF_Box *)tmp; } GF_Err tfra_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_RandomAccessEntry *p = 0; GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s; ISOM_DECREASE_SIZE(ptr, 12); ptr->track_id = gf_bs_read_u32(bs); if (gf_bs_read_int(bs, 26) != 0) return GF_ISOM_INVALID_FILE; ptr->traf_bits = (gf_bs_read_int(bs, 2) + 1) * 8; ptr->trun_bits = (gf_bs_read_int(bs, 2) + 1) * 8; ptr->sample_bits = (gf_bs_read_int(bs, 2) + 1) * 8; ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->version == 1) { if (ptr->nb_entries > ptr->size / (16+(ptr->traf_bits+ptr->trun_bits+ptr->sample_bits)/8)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in traf\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } } else { if (ptr->nb_entries > ptr->size / (8+(ptr->traf_bits+ptr->trun_bits+ptr->sample_bits)/8)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in traf\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } } if (ptr->nb_entries) { p = (GF_RandomAccessEntry *) gf_malloc(sizeof(GF_RandomAccessEntry) * ptr->nb_entries); if (!p) return GF_OUT_OF_MEM; } ptr->entries = p; for (i=0; i<ptr->nb_entries; i++) { memset(p, 0, sizeof(GF_RandomAccessEntry)); if (ptr->version == 1) { p->time = gf_bs_read_u64(bs); p->moof_offset = gf_bs_read_u64(bs); } else { p->time = gf_bs_read_u32(bs); p->moof_offset = gf_bs_read_u32(bs); } p->traf_number = gf_bs_read_int(bs, ptr->traf_bits); p->trun_number = gf_bs_read_int(bs, ptr->trun_bits); p->sample_number = gf_bs_read_int(bs, ptr->sample_bits); ++p; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tfra_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i, sap_nb_entries; GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->track_id); gf_bs_write_int(bs, 0, 26); gf_bs_write_int(bs, ptr->traf_bits/8 - 1, 2); gf_bs_write_int(bs, ptr->trun_bits/8 - 1, 2); gf_bs_write_int(bs, ptr->sample_bits/8 - 1, 2); sap_nb_entries = 0; for (i=0; i<ptr->nb_entries; i++) { GF_RandomAccessEntry *p = &ptr->entries[i]; //no sap found, do not store if (p->trun_number) sap_nb_entries++; } gf_bs_write_u32(bs, sap_nb_entries); for (i=0; i<ptr->nb_entries; i++) { GF_RandomAccessEntry *p = &ptr->entries[i]; //no sap found, do not store if (!p->trun_number) continue; if (ptr->version==1) { gf_bs_write_u64(bs, p->time); gf_bs_write_u64(bs, p->moof_offset); } else { gf_bs_write_u32(bs, (u32) p->time); gf_bs_write_u32(bs, (u32) p->moof_offset); } gf_bs_write_int(bs, p->traf_number, ptr->traf_bits); gf_bs_write_int(bs, p->trun_number, ptr->trun_bits); gf_bs_write_int(bs, p->sample_number, ptr->sample_bits); } return GF_OK; } GF_Err tfra_box_size(GF_Box *s) { u32 i; GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s; ptr->size += 12; for (i=0; i<ptr->nb_entries; i++) { GF_RandomAccessEntry *p = &ptr->entries[i]; //no sap found, do not store if (!p->trun_number) continue; ptr->size += ((ptr->version==1) ? 16 : 8 ) + ptr->traf_bits/8 + ptr->trun_bits/8 + ptr->sample_bits/8; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mfro_box_del(GF_Box *s) { GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Box *mfro_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentRandomAccessOffsetBox, GF_ISOM_BOX_TYPE_MFRO); return (GF_Box *)tmp; } GF_Err mfro_box_read(GF_Box *s, GF_BitStream *bs) { GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->container_size = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mfro_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->container_size); return GF_OK; } GF_Err mfro_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void elng_box_del(GF_Box *s) { GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; if (ptr == NULL) return; if (ptr->extended_language) gf_free(ptr->extended_language); gf_free(ptr); } GF_Err elng_box_read(GF_Box *s, GF_BitStream *bs) { GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; if (ptr->size) { ptr->extended_language = (char*)gf_malloc((u32) ptr->size); if (ptr->extended_language == NULL) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->extended_language, (u32) ptr->size); /*safety check in case the string is not null-terminated*/ if (ptr->extended_language[ptr->size-1]) { char *str = (char*)gf_malloc((u32) ptr->size + 1); if (!str) return GF_OUT_OF_MEM; memcpy(str, ptr->extended_language, (u32) ptr->size); str[ptr->size] = 0; gf_free(ptr->extended_language); ptr->extended_language = str; } } return GF_OK; } GF_Box *elng_box_new() { ISOM_DECL_BOX_ALLOC(GF_MediaBox, GF_ISOM_BOX_TYPE_ELNG); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err elng_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->extended_language) { gf_bs_write_data(bs, ptr->extended_language, (u32)(strlen(ptr->extended_language)+1)); } return GF_OK; } GF_Err elng_box_size(GF_Box *s) { GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; if (ptr->extended_language) { ptr->size += strlen(ptr->extended_language)+1; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void mfhd_box_del(GF_Box *s) { GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err mfhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->sequence_number = gf_bs_read_u32(bs); return GF_OK; } GF_Box *mfhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentHeaderBox, GF_ISOM_BOX_TYPE_MFHD); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mfhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->sequence_number); return GF_OK; } GF_Err mfhd_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void minf_box_del(GF_Box *s) { GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; if (ptr == NULL) return; //if we have a Handler not self-contained, delete it (the self-contained belongs to the movie) if (ptr->dataHandler) { gf_isom_datamap_close(ptr); } gf_free(ptr); } GF_Err minf_on_child_box(GF_Box *s, GF_Box *a) { GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_NMHD: case GF_ISOM_BOX_TYPE_STHD: case GF_ISOM_BOX_TYPE_VMHD: case GF_ISOM_BOX_TYPE_SMHD: case GF_ISOM_BOX_TYPE_HMHD: case GF_ISOM_BOX_TYPE_GMHD: if (ptr->InfoHeader) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->InfoHeader = a; return GF_OK; case GF_ISOM_BOX_TYPE_DINF: if (ptr->dataInformation) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->dataInformation = (GF_DataInformationBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_STBL: if (ptr->sampleTable ) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->sampleTable = (GF_SampleTableBox *)a; return GF_OK; } return GF_OK; } GF_Err minf_box_read(GF_Box *s, GF_BitStream *bs) { GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; GF_Err e; e = gf_isom_box_array_read(s, bs, minf_on_child_box); if (!e && ! ptr->dataInformation) { GF_Box *url; GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing DataInformationBox\n")); //commented on purpose, we are still able to handle the file, we only throw an error but keep processing // e = GF_ISOM_INVALID_FILE; //add a dinf box to avoid any access to a null dinf ptr->dataInformation = (GF_DataInformationBox *) gf_isom_box_new_parent(&ptr->child_boxes, GF_ISOM_BOX_TYPE_DINF); if (!ptr->dataInformation) return GF_OUT_OF_MEM; ptr->dataInformation->dref = (GF_DataReferenceBox *) gf_isom_box_new_parent(&ptr->dataInformation->child_boxes, GF_ISOM_BOX_TYPE_DREF); if (!ptr->dataInformation->dref) return GF_OUT_OF_MEM; url = gf_isom_box_new_parent(&ptr->dataInformation->dref->child_boxes, GF_ISOM_BOX_TYPE_URL); if (!url) return GF_OUT_OF_MEM; ((GF_FullBox*)url)->flags = 1; } return e; } GF_Box *minf_box_new() { ISOM_DECL_BOX_ALLOC(GF_MediaInformationBox, GF_ISOM_BOX_TYPE_MINF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err minf_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err minf_box_size(GF_Box *s) { u32 pos=0; GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; //Header first gf_isom_check_position(s, (GF_Box *)ptr->InfoHeader, &pos); //then dataInfo gf_isom_check_position(s, (GF_Box *)ptr->dataInformation, &pos); gf_isom_check_position(s, gf_isom_box_find_child(s->child_boxes, GF_ISOM_BOX_TYPE_MVCI), &pos); //then sampleTable gf_isom_check_position(s, (GF_Box *)ptr->sampleTable, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void moof_box_del(GF_Box *s) { GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *)s; if (ptr == NULL) return; gf_list_del(ptr->TrackList); if (ptr->PSSHs) gf_list_del(ptr->PSSHs); if (ptr->mdat) gf_free(ptr->mdat); gf_free(ptr); } GF_Err moof_on_child_box(GF_Box *s, GF_Box *a) { GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_MFHD: if (ptr->mfhd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mfhd = (GF_MovieFragmentHeaderBox *) a; return GF_OK; case GF_ISOM_BOX_TYPE_TRAF: return gf_list_add(ptr->TrackList, a); case GF_ISOM_BOX_TYPE_PSSH: if (!ptr->PSSHs) ptr->PSSHs = gf_list_new(); return gf_list_add(ptr->PSSHs, a); } return GF_OK; } GF_Err moof_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, moof_on_child_box); } GF_Box *moof_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentBox, GF_ISOM_BOX_TYPE_MOOF); tmp->TrackList = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err moof_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err moof_box_size(GF_Box *s) { u32 pos=0; GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *) s; if (!s) return GF_BAD_PARAM; //Header First gf_isom_check_position(s, (GF_Box *)ptr->mfhd, &pos); //then PSSH gf_isom_check_position_list(s, ptr->PSSHs, &pos); //then the track list gf_isom_check_position_list(s, ptr->TrackList, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void moov_box_del(GF_Box *s) { GF_MovieBox *ptr = (GF_MovieBox *)s; if (ptr == NULL) return; gf_list_del(ptr->trackList); gf_free(ptr); } GF_Err moov_on_child_box(GF_Box *s, GF_Box *a) { GF_MovieBox *ptr = (GF_MovieBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_IODS: if (ptr->iods) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->iods = (GF_ObjectDescriptorBox *)a; //if no IOD, delete the box if (!ptr->iods->descriptor) { ptr->iods = NULL; gf_isom_box_del_parent(&s->child_boxes, a); } return GF_OK; case GF_ISOM_BOX_TYPE_MVHD: if (ptr->mvhd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mvhd = (GF_MovieHeaderBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_UDTA: if (ptr->udta) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->udta = (GF_UserDataBox *)a; return GF_OK; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS case GF_ISOM_BOX_TYPE_MVEX: if (ptr->mvex) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mvex = (GF_MovieExtendsBox *)a; ptr->mvex->mov = ptr->mov; return GF_OK; #endif case GF_ISOM_BOX_TYPE_META: if (ptr->meta) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->meta = (GF_MetaBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_TRAK: //set our pointer to this obj ((GF_TrackBox *)a)->moov = ptr; return gf_list_add(ptr->trackList, a); } return GF_OK; } GF_Err moov_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, moov_on_child_box); } GF_Box *moov_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieBox, GF_ISOM_BOX_TYPE_MOOV); tmp->trackList = gf_list_new(); if (!tmp->trackList) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err moov_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err moov_box_size(GF_Box *s) { u32 pos=0; GF_MovieBox *ptr = (GF_MovieBox *)s; gf_isom_check_position(s, (GF_Box *) ptr->mvhd, &pos); gf_isom_check_position(s, (GF_Box *) ptr->iods, &pos); gf_isom_check_position(s, (GF_Box *) ptr->meta, &pos); #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (ptr->mvex && !ptr->mvex_after_traks) { gf_isom_check_position(s, (GF_Box *) ptr->mvex, &pos); } #endif gf_isom_check_position_list(s, ptr->trackList, &pos); #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (ptr->mvex && ptr->mvex_after_traks) { gf_isom_check_position(s, (GF_Box *) ptr->mvex, &pos); } #endif return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void audio_sample_entry_box_del(GF_Box *s) { GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); gf_free(ptr); } GF_Err audio_sample_entry_on_child_box(GF_Box *s, GF_Box *a) { GF_UnknownBox *wave = NULL; Bool drop_wave=GF_FALSE; GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_ESDS: if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->esd = (GF_ESDBox *)a; ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; break; case GF_ISOM_BOX_TYPE_DAMR: case GF_ISOM_BOX_TYPE_DEVC: case GF_ISOM_BOX_TYPE_DQCP: case GF_ISOM_BOX_TYPE_DSMV: if (ptr->cfg_3gpp) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_3gpp = (GF_3GPPConfigBox *) a; /*for 3GP config, remember sample entry type in config*/ ptr->cfg_3gpp->cfg.type = ptr->type; ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; break; case GF_ISOM_BOX_TYPE_DOPS: if (ptr->cfg_opus) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_opus = (GF_OpusSpecificBox *)a; ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; break; case GF_ISOM_BOX_TYPE_DAC3: if (ptr->cfg_ac3) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_ac3 = (GF_AC3ConfigBox *) a; ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; break; case GF_ISOM_BOX_TYPE_DEC3: if (ptr->cfg_ac3) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_ac3 = (GF_AC3ConfigBox *) a; break; case GF_ISOM_BOX_TYPE_MHAC: if (ptr->cfg_mha) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_mha = (GF_MHAConfigBox *) a; ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; break; case GF_ISOM_BOX_TYPE_DFLA: if (ptr->cfg_flac) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_flac = (GF_FLACConfigBox *) a; ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; break; case GF_ISOM_BOX_TYPE_UNKNOWN: wave = (GF_UnknownBox *)a; /*HACK for QT files: get the esds box from the track*/ if (s->type == GF_ISOM_BOX_TYPE_MP4A) { if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr) //wave subboxes may have been properly parsed if ((wave->original_4cc == GF_QT_BOX_TYPE_WAVE) && gf_list_count(wave->child_boxes)) { u32 i; for (i =0; i<gf_list_count(wave->child_boxes); i++) { GF_Box *inner_box = (GF_Box *)gf_list_get(wave->child_boxes, i); if (inner_box->type == GF_ISOM_BOX_TYPE_ESDS) { ptr->esd = (GF_ESDBox *)inner_box; if (ptr->qtff_mode & GF_ISOM_AUDIO_QTFF_CONVERT_FLAG) { gf_list_rem(a->child_boxes, i); drop_wave=GF_TRUE; ptr->compression_id = 0; gf_list_add(ptr->child_boxes, inner_box); } } } if (drop_wave) { gf_isom_box_del_parent(&ptr->child_boxes, a); ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; ptr->version = 0; return GF_OK; } ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_ON_EXT_VALID; return GF_OK; } gf_isom_box_del_parent(&ptr->child_boxes, a); return GF_ISOM_INVALID_MEDIA; } ptr->qtff_mode &= ~GF_ISOM_AUDIO_QTFF_CONVERT_FLAG; if ((wave->original_4cc == GF_QT_BOX_TYPE_WAVE) && gf_list_count(wave->child_boxes)) { ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_ON_NOEXT; } return GF_OK; case GF_QT_BOX_TYPE_WAVE: { u32 subtype = 0; GF_Box **cfg_ptr = NULL; if (s->type == GF_ISOM_BOX_TYPE_MP4A) { cfg_ptr = (GF_Box **) &ptr->esd; subtype = GF_ISOM_BOX_TYPE_ESDS; } else if (s->type == GF_ISOM_BOX_TYPE_AC3) { cfg_ptr = (GF_Box **) &ptr->cfg_ac3; subtype = GF_ISOM_BOX_TYPE_DAC3; } else if (s->type == GF_ISOM_BOX_TYPE_EC3) { cfg_ptr = (GF_Box **) &ptr->cfg_ac3; subtype = GF_ISOM_BOX_TYPE_DEC3; } else if (s->type == GF_ISOM_BOX_TYPE_OPUS) { cfg_ptr = (GF_Box **) &ptr->cfg_opus; subtype = GF_ISOM_BOX_TYPE_DOPS; } else if ((s->type == GF_ISOM_BOX_TYPE_MHA1) || (s->type == GF_ISOM_BOX_TYPE_MHA2) || (s->type == GF_ISOM_BOX_TYPE_MHM1) || (s->type == GF_ISOM_BOX_TYPE_MHM2) ) { cfg_ptr = (GF_Box **) &ptr->cfg_mha; subtype = GF_ISOM_BOX_TYPE_MHAC; } if (cfg_ptr) { if (*cfg_ptr) ERROR_ON_DUPLICATED_BOX(a, ptr) //wave subboxes may have been properly parsed if (gf_list_count(a->child_boxes)) { u32 i; for (i =0; i<gf_list_count(a->child_boxes); i++) { GF_Box *inner_box = (GF_Box *)gf_list_get(a->child_boxes, i); if (inner_box->type == subtype) { *cfg_ptr = inner_box; if (ptr->qtff_mode & GF_ISOM_AUDIO_QTFF_CONVERT_FLAG) { gf_list_rem(a->child_boxes, i); drop_wave=GF_TRUE; gf_list_add(ptr->child_boxes, inner_box); } break; } } if (drop_wave) { gf_isom_box_del_parent(&ptr->child_boxes, a); ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; ptr->compression_id = 0; ptr->version = 0; return GF_OK; } ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_ON_EXT_VALID; return GF_OK; } } } ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_ON_EXT_VALID; return GF_OK; } return GF_OK; } GF_Err audio_sample_entry_box_read(GF_Box *s, GF_BitStream *bs) { GF_MPEGAudioSampleEntryBox *ptr; char *data; u8 a, b, c, d; u32 i, size, v, nb_alnum; GF_Err e; u64 pos, start; ptr = (GF_MPEGAudioSampleEntryBox *)s; start = gf_bs_get_position(bs); gf_bs_seek(bs, start + 8); v = gf_bs_read_u16(bs); if (v) ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_ON_NOEXT; //try to disambiguate QTFF v1 and MP4 v1 audio sample entries ... if (v==1) { //go to end of ISOM audio sample entry, skip 4 byte (box size field), read 4 bytes (box type) and check if this looks like a box gf_bs_seek(bs, start + 8 + 20 + 4); a = gf_bs_read_u8(bs); b = gf_bs_read_u8(bs); c = gf_bs_read_u8(bs); d = gf_bs_read_u8(bs); nb_alnum = 0; if (isalnum(a)) nb_alnum++; if (isalnum(b)) nb_alnum++; if (isalnum(c)) nb_alnum++; if (isalnum(d)) nb_alnum++; if (nb_alnum>2) ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; } gf_bs_seek(bs, start); e = gf_isom_audio_sample_entry_read((GF_AudioSampleEntryBox*)s, bs); if (e) return e; pos = gf_bs_get_position(bs); size = (u32) s->size; //when cookie is set on bs, always convert qtff-style mp4a to isobmff-style //since the conversion is done in addBox and we don't have the bitstream there (arg...), flag the box if (gf_bs_get_cookie(bs) & GF_ISOM_BS_COOKIE_QT_CONV) { ptr->qtff_mode |= GF_ISOM_AUDIO_QTFF_CONVERT_FLAG; } e = gf_isom_box_array_read(s, bs, audio_sample_entry_on_child_box); if (!e) { if (s->type==GF_ISOM_BOX_TYPE_ENCA) { GF_ProtectionSchemeInfoBox *sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(s->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) { u32 type = sinf->original_format->data_format; switch (type) { case GF_ISOM_SUBTYPE_3GP_AMR: case GF_ISOM_SUBTYPE_3GP_AMR_WB: case GF_ISOM_SUBTYPE_3GP_EVRC: case GF_ISOM_SUBTYPE_3GP_QCELP: case GF_ISOM_SUBTYPE_3GP_SMV: if (ptr->cfg_3gpp) ptr->cfg_3gpp->cfg.type = type; break; } } } return GF_OK; } if (size<8) return GF_ISOM_INVALID_FILE; /*hack for some weird files (possibly recorded with live.com tools, needs further investigations)*/ gf_bs_seek(bs, pos); data = (char*)gf_malloc(sizeof(char) * size); if (!data) return GF_OUT_OF_MEM; gf_bs_read_data(bs, data, size); for (i=0; i<size-8; i++) { if (GF_4CC((u32)data[i+4], (u8)data[i+5], (u8)data[i+6], (u8)data[i+7]) == GF_ISOM_BOX_TYPE_ESDS) { GF_BitStream *mybs = gf_bs_new(data + i, size - i, GF_BITSTREAM_READ); if (ptr->esd) gf_isom_box_del_parent(&ptr->child_boxes, (GF_Box *)ptr->esd); ptr->esd = NULL; e = gf_isom_box_parse((GF_Box **)&ptr->esd, mybs); gf_bs_del(mybs); if (e==GF_OK) { if (!ptr->child_boxes) ptr->child_boxes = gf_list_new(); gf_list_add(ptr->child_boxes, ptr->esd); } else if (ptr->esd) { gf_isom_box_del((GF_Box *)ptr->esd); ptr->esd = NULL; } break; } } gf_free(data); return e; } GF_Box *audio_sample_entry_box_new() { ISOM_DECL_BOX_ALLOC(GF_MPEGAudioSampleEntryBox, GF_ISOM_BOX_TYPE_MP4A); gf_isom_audio_sample_entry_init((GF_AudioSampleEntryBox*)tmp); return (GF_Box *)tmp; } GF_Box *enca_box_new() { ISOM_DECL_BOX_ALLOC(GF_MPEGAudioSampleEntryBox, GF_ISOM_BOX_TYPE_ENCA); gf_isom_audio_sample_entry_init((GF_AudioSampleEntryBox*)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err audio_sample_entry_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_isom_audio_sample_entry_write((GF_AudioSampleEntryBox*)s, bs); return GF_OK; } GF_Err audio_sample_entry_box_size(GF_Box *s) { u32 pos=0; GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s; gf_isom_audio_sample_entry_size((GF_AudioSampleEntryBox*)s); if (ptr->qtff_mode) return GF_OK; gf_isom_check_position(s, (GF_Box *)ptr->esd, &pos); gf_isom_check_position(s, (GF_Box *)ptr->cfg_3gpp, &pos); gf_isom_check_position(s, (GF_Box *)ptr->cfg_opus, &pos); gf_isom_check_position(s, (GF_Box *)ptr->cfg_ac3, &pos); gf_isom_check_position(s, (GF_Box *)ptr->cfg_flac, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void gen_sample_entry_box_del(GF_Box *s) { GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); gf_free(ptr); } GF_Err gen_sample_entry_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)s, bs); if (e) return e; ISOM_DECREASE_SIZE(s, 8); return gf_isom_box_array_read(s, bs, NULL); } GF_Box *gen_sample_entry_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleEntryBox, GF_QT_SUBTYPE_C608);//type will be overriten gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gen_sample_entry_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); return GF_OK; } GF_Err gen_sample_entry_box_size(GF_Box *s) { GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s; ptr->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mp4s_box_del(GF_Box *s) { GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); gf_free(ptr); } GF_Err mp4s_on_child_box(GF_Box *s, GF_Box *a) { GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_ESDS: if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->esd = (GF_ESDBox *)a; break; } return GF_OK; } GF_Err mp4s_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs); if (e) return e; ISOM_DECREASE_SIZE(ptr, 8); return gf_isom_box_array_read(s, bs, mp4s_on_child_box); } GF_Box *mp4s_box_new() { ISOM_DECL_BOX_ALLOC(GF_MPEGSampleEntryBox, GF_ISOM_BOX_TYPE_MP4S); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); tmp->internal_type = GF_ISOM_SAMPLE_ENTRY_MP4S; return (GF_Box *)tmp; } GF_Box *encs_box_new() { ISOM_DECL_BOX_ALLOC(GF_MPEGSampleEntryBox, GF_ISOM_BOX_TYPE_ENCS); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); tmp->internal_type = GF_ISOM_SAMPLE_ENTRY_MP4S; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mp4s_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); return GF_OK; } GF_Err mp4s_box_size(GF_Box *s) { u32 pos=0; GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; s->size += 8; gf_isom_check_position(s, (GF_Box *)ptr->esd, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void video_sample_entry_box_del(GF_Box *s) { GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); /*for publishing*/ if (ptr->emul_esd) gf_odf_desc_del((GF_Descriptor *)ptr->emul_esd); gf_free(ptr); } GF_Err video_sample_entry_on_child_box(GF_Box *s, GF_Box *a) { GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_ESDS: if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->esd = (GF_ESDBox *)a; break; case GF_ISOM_BOX_TYPE_RINF: if (ptr->rinf) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->rinf = (GF_RestrictedSchemeInfoBox *) a; break; case GF_ISOM_BOX_TYPE_AVCC: if (ptr->avc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->avc_config = (GF_AVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_HVCC: if (ptr->hevc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->hevc_config = (GF_HEVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_SVCC: if (ptr->svc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->svc_config = (GF_AVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_MVCC: if (ptr->mvc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mvc_config = (GF_AVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_LHVC: if (ptr->lhvc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->lhvc_config = (GF_HEVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_AV1C: if (ptr->av1_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->av1_config = (GF_AV1ConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_VPCC: if (ptr->vp_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->vp_config = (GF_VPConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_DVCC: if (ptr->dovi_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->dovi_config = (GF_DOVIConfigurationBox*)a; break; case GF_ISOM_BOX_TYPE_UUID: if (! memcmp(((GF_UnknownUUIDBox*)a)->uuid, GF_ISOM_IPOD_EXT, 16)) { if (ptr->ipod_ext) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->ipod_ext = (GF_UnknownUUIDBox *)a; } else { return GF_OK; } break; case GF_ISOM_BOX_TYPE_D263: if (ptr->cfg_3gpp) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_3gpp = (GF_3GPPConfigBox *)a; /*for 3GP config, remember sample entry type in config*/ ptr->cfg_3gpp->cfg.type = ptr->type; break; case GF_ISOM_BOX_TYPE_JP2H: if (ptr->jp2h) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->jp2h = (GF_J2KHeaderBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_PASP: case GF_ISOM_BOX_TYPE_CLAP: case GF_ISOM_BOX_TYPE_COLR: case GF_ISOM_BOX_TYPE_MDCV: case GF_ISOM_BOX_TYPE_CLLI: case GF_ISOM_BOX_TYPE_CCST: case GF_ISOM_BOX_TYPE_AUXI: case GF_ISOM_BOX_TYPE_RVCC: case GF_ISOM_BOX_TYPE_M4DS: if (!gf_isom_box_check_unique(s->child_boxes, a)) { ERROR_ON_DUPLICATED_BOX(a, ptr) } return GF_OK; } return GF_OK; } GF_Err video_sample_entry_box_read(GF_Box *s, GF_BitStream *bs) { GF_MPEGVisualSampleEntryBox *mp4v = (GF_MPEGVisualSampleEntryBox*)s; GF_Err e; e = gf_isom_video_sample_entry_read((GF_VisualSampleEntryBox *)s, bs); if (e) return e; e = gf_isom_box_array_read(s, bs, video_sample_entry_on_child_box); if (e) return e; /*this is an AVC sample desc*/ if (mp4v->avc_config || mp4v->svc_config || mp4v->mvc_config) AVC_RewriteESDescriptor(mp4v); /*this is an HEVC sample desc*/ if (mp4v->hevc_config || mp4v->lhvc_config || (mp4v->type==GF_ISOM_BOX_TYPE_HVT1)) HEVC_RewriteESDescriptor(mp4v); /*this is an AV1 sample desc*/ if (mp4v->av1_config) AV1_RewriteESDescriptor(mp4v); /*this is a VP8-9 sample desc*/ if (mp4v->vp_config) VP9_RewriteESDescriptor(mp4v); if (s->type==GF_ISOM_BOX_TYPE_ENCV) { GF_ProtectionSchemeInfoBox *sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(s->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) { u32 type = sinf->original_format->data_format; switch (type) { case GF_ISOM_SUBTYPE_3GP_H263: if (mp4v->cfg_3gpp) mp4v->cfg_3gpp->cfg.type = type; break; } } } return GF_OK; } GF_Box *video_sample_entry_box_new() { GF_MPEGVisualSampleEntryBox *tmp; GF_SAFEALLOC(tmp, GF_MPEGVisualSampleEntryBox); if (tmp == NULL) return NULL; gf_isom_video_sample_entry_init((GF_VisualSampleEntryBox *)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err video_sample_entry_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_isom_video_sample_entry_write((GF_VisualSampleEntryBox *)s, bs); return GF_OK; } GF_Err video_sample_entry_box_size(GF_Box *s) { u32 pos=0; GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s; gf_isom_video_sample_entry_size((GF_VisualSampleEntryBox *)s); /*make sure we write the config box first, we don't care about the rest*/ /*mp4v*/ gf_isom_check_position(s, (GF_Box *)ptr->esd, &pos); gf_isom_check_position(s, (GF_Box *)ptr->cfg_3gpp, &pos); /*avc / SVC + MVC*/ gf_isom_check_position(s, (GF_Box *)ptr->avc_config, &pos); gf_isom_check_position(s, (GF_Box *)ptr->svc_config, &pos); if (ptr->mvc_config) { gf_isom_check_position(s, gf_isom_box_find_child(s->child_boxes, GF_ISOM_BOX_TYPE_VWID), &pos); gf_isom_check_position(s, (GF_Box *)ptr->mvc_config, &pos); } /*HEVC*/ gf_isom_check_position(s, (GF_Box *)ptr->hevc_config, &pos); gf_isom_check_position(s, (GF_Box *)ptr->lhvc_config, &pos); /*AV1*/ gf_isom_check_position(s, (GF_Box *)ptr->av1_config, &pos); /*VPx*/ gf_isom_check_position(s, (GF_Box *)ptr->vp_config, &pos); /*JP2H*/ gf_isom_check_position(s, (GF_Box *)ptr->jp2h, &pos); /*DolbyVision*/ gf_isom_check_position(s, (GF_Box *)ptr->dovi_config, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void mvex_box_del(GF_Box *s) { GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *)s; if (ptr == NULL) return; gf_list_del(ptr->TrackExList); gf_list_del(ptr->TrackExPropList); gf_free(ptr); } GF_Err mvex_on_child_box(GF_Box *s, GF_Box *a) { GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_TREX: return gf_list_add(ptr->TrackExList, a); case GF_ISOM_BOX_TYPE_TREP: return gf_list_add(ptr->TrackExPropList, a); case GF_ISOM_BOX_TYPE_MEHD: if (ptr->mehd) ERROR_ON_DUPLICATED_BOX(a, s) ptr->mehd = (GF_MovieExtendsHeaderBox*)a; return GF_OK; } return GF_OK; } GF_Err mvex_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, mvex_on_child_box); } GF_Box *mvex_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieExtendsBox, GF_ISOM_BOX_TYPE_MVEX); tmp->TrackExList = gf_list_new(); if (!tmp->TrackExList) { gf_free(tmp); return NULL; } tmp->TrackExPropList = gf_list_new(); if (!tmp->TrackExPropList) { gf_list_del(tmp->TrackExList); gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mvex_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err mvex_box_size(GF_Box *s) { u32 pos=0; GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *) s; gf_isom_check_position(s, (GF_Box *)ptr->mehd, &pos); gf_isom_check_position_list(s, ptr->TrackExList, &pos); gf_isom_check_position_list(s, ptr->TrackExPropList, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *mehd_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieExtendsHeaderBox, GF_ISOM_BOX_TYPE_MEHD); return (GF_Box *)tmp; } void mehd_box_del(GF_Box *s) { gf_free(s); } GF_Err mehd_box_read(GF_Box *s, GF_BitStream *bs) { GF_MovieExtendsHeaderBox *ptr = (GF_MovieExtendsHeaderBox *)s; if (ptr->version==1) { ISOM_DECREASE_SIZE(ptr, 8); ptr->fragment_duration = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 4); ptr->fragment_duration = (u64) gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mehd_box_write(GF_Box *s, GF_BitStream *bs) { GF_MovieExtendsHeaderBox *ptr = (GF_MovieExtendsHeaderBox *)s; GF_Err e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version == 1) { gf_bs_write_u64(bs, ptr->fragment_duration); } else { gf_bs_write_u32(bs, (u32) ptr->fragment_duration); } return GF_OK; } GF_Err mehd_box_size(GF_Box *s) { GF_MovieExtendsHeaderBox *ptr = (GF_MovieExtendsHeaderBox *)s; ptr->version = (ptr->fragment_duration>0xFFFFFFFF) ? 1 : 0; s->size += (ptr->version == 1) ? 8 : 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void mvhd_box_del(GF_Box *s) { GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err mvhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s; if (ptr == NULL) return GF_BAD_PARAM; if (ptr->version == 1) { ISOM_DECREASE_SIZE(ptr, 28); ptr->creationTime = gf_bs_read_u64(bs); ptr->modificationTime = gf_bs_read_u64(bs); ptr->timeScale = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 16); ptr->creationTime = gf_bs_read_u32(bs); ptr->modificationTime = gf_bs_read_u32(bs); ptr->timeScale = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u32(bs); } if (!ptr->timeScale) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Movie header timescale is invalid (0) - defaulting to 600\n" )); ptr->timeScale = 600; } ISOM_DECREASE_SIZE(ptr, 80); ptr->preferredRate = gf_bs_read_u32(bs); ptr->preferredVolume = gf_bs_read_u16(bs); gf_bs_read_data(bs, ptr->reserved, 10); ptr->matrixA = gf_bs_read_u32(bs); ptr->matrixB = gf_bs_read_u32(bs); ptr->matrixU = gf_bs_read_u32(bs); ptr->matrixC = gf_bs_read_u32(bs); ptr->matrixD = gf_bs_read_u32(bs); ptr->matrixV = gf_bs_read_u32(bs); ptr->matrixX = gf_bs_read_u32(bs); ptr->matrixY = gf_bs_read_u32(bs); ptr->matrixW = gf_bs_read_u32(bs); ptr->previewTime = gf_bs_read_u32(bs); ptr->previewDuration = gf_bs_read_u32(bs); ptr->posterTime = gf_bs_read_u32(bs); ptr->selectionTime = gf_bs_read_u32(bs); ptr->selectionDuration = gf_bs_read_u32(bs); ptr->currentTime = gf_bs_read_u32(bs); ptr->nextTrackID = gf_bs_read_u32(bs); ptr->original_duration = ptr->duration; return GF_OK; } GF_Box *mvhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieHeaderBox, GF_ISOM_BOX_TYPE_MVHD); tmp->preferredRate = (1<<16); tmp->preferredVolume = (1<<8); tmp->matrixA = (1<<16); tmp->matrixD = (1<<16); tmp->matrixW = (1<<30); tmp->nextTrackID = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mvhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version == 1) { gf_bs_write_u64(bs, ptr->creationTime); gf_bs_write_u64(bs, ptr->modificationTime); gf_bs_write_u32(bs, ptr->timeScale); gf_bs_write_u64(bs, ptr->duration); } else { gf_bs_write_u32(bs, (u32) ptr->creationTime); gf_bs_write_u32(bs, (u32) ptr->modificationTime); gf_bs_write_u32(bs, ptr->timeScale); gf_bs_write_u32(bs, (u32) ptr->duration); } gf_bs_write_u32(bs, ptr->preferredRate); gf_bs_write_u16(bs, ptr->preferredVolume); gf_bs_write_data(bs, ptr->reserved, 10); gf_bs_write_u32(bs, ptr->matrixA); gf_bs_write_u32(bs, ptr->matrixB); gf_bs_write_u32(bs, ptr->matrixU); gf_bs_write_u32(bs, ptr->matrixC); gf_bs_write_u32(bs, ptr->matrixD); gf_bs_write_u32(bs, ptr->matrixV); gf_bs_write_u32(bs, ptr->matrixX); gf_bs_write_u32(bs, ptr->matrixY); gf_bs_write_u32(bs, ptr->matrixW); gf_bs_write_u32(bs, ptr->previewTime); gf_bs_write_u32(bs, ptr->previewDuration); gf_bs_write_u32(bs, ptr->posterTime); gf_bs_write_u32(bs, ptr->selectionTime); gf_bs_write_u32(bs, ptr->selectionDuration); gf_bs_write_u32(bs, ptr->currentTime); gf_bs_write_u32(bs, ptr->nextTrackID); return GF_OK; } GF_Err mvhd_box_size(GF_Box *s) { GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s; if (ptr->duration==(u64) -1) ptr->version = 0; else ptr->version = (ptr->duration>0xFFFFFFFF) ? 1 : 0; ptr->size += (ptr->version == 1) ? 28 : 16; ptr->size += 80; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void nmhd_box_del(GF_Box *s) { GF_MPEGMediaHeaderBox *ptr = (GF_MPEGMediaHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err nmhd_box_read(GF_Box *s, GF_BitStream *bs) { return GF_OK; } GF_Box *nmhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_MPEGMediaHeaderBox, GF_ISOM_BOX_TYPE_NMHD); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err nmhd_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err nmhd_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void padb_box_del(GF_Box *s) { GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *) s; if (ptr == NULL) return; if (ptr->padbits) gf_free(ptr->padbits); gf_free(ptr); } GF_Err padb_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->SampleCount = gf_bs_read_u32(bs); if (ptr->size < ptr->SampleCount/2) //half byte per sample return GF_ISOM_INVALID_FILE; ptr->padbits = (u8 *)gf_malloc(sizeof(u8)*ptr->SampleCount); if (!ptr->padbits) return GF_OUT_OF_MEM; for (i=0; i<ptr->SampleCount; i += 2) { gf_bs_read_int(bs, 1); if (i+1 < ptr->SampleCount) { ptr->padbits[i+1] = gf_bs_read_int(bs, 3); } else { gf_bs_read_int(bs, 3); } gf_bs_read_int(bs, 1); ptr->padbits[i] = gf_bs_read_int(bs, 3); } return GF_OK; } GF_Box *padb_box_new() { ISOM_DECL_BOX_ALLOC(GF_PaddingBitsBox, GF_ISOM_BOX_TYPE_PADB); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err padb_box_write(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->SampleCount, 32); for (i=0 ; i<ptr->SampleCount; i += 2) { gf_bs_write_int(bs, 0, 1); if (i+1 < ptr->SampleCount) { gf_bs_write_int(bs, ptr->padbits[i+1], 3); } else { gf_bs_write_int(bs, 0, 3); } gf_bs_write_int(bs, 0, 1); gf_bs_write_int(bs, ptr->padbits[i], 3); } return GF_OK; } GF_Err padb_box_size(GF_Box *s) { GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *)s; ptr->size += 4; if (ptr->SampleCount) ptr->size += (ptr->SampleCount + 1) / 2; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void rely_box_del(GF_Box *s) { GF_RelyHintBox *rely = (GF_RelyHintBox *)s; gf_free(rely); } GF_Err rely_box_read(GF_Box *s, GF_BitStream *bs) { GF_RelyHintBox *ptr = (GF_RelyHintBox *)s; ISOM_DECREASE_SIZE(ptr, 1); ptr->reserved = gf_bs_read_int(bs, 6); ptr->preferred = gf_bs_read_int(bs, 1); ptr->required = gf_bs_read_int(bs, 1); return GF_OK; } GF_Box *rely_box_new() { ISOM_DECL_BOX_ALLOC(GF_RelyHintBox, GF_ISOM_BOX_TYPE_RELY); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rely_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_RelyHintBox *ptr = (GF_RelyHintBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->reserved, 6); gf_bs_write_int(bs, ptr->preferred, 1); gf_bs_write_int(bs, ptr->required, 1); return GF_OK; } GF_Err rely_box_size(GF_Box *s) { s->size += 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void rtpo_box_del(GF_Box *s) { GF_RTPOBox *rtpo = (GF_RTPOBox *)s; gf_free(rtpo); } GF_Err rtpo_box_read(GF_Box *s, GF_BitStream *bs) { GF_RTPOBox *ptr = (GF_RTPOBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->timeOffset = gf_bs_read_u32(bs); return GF_OK; } GF_Box *rtpo_box_new() { ISOM_DECL_BOX_ALLOC(GF_RTPOBox, GF_ISOM_BOX_TYPE_RTPO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rtpo_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_RTPOBox *ptr = (GF_RTPOBox *)s; if (ptr == NULL) return GF_BAD_PARAM; //here we have no pb, just remembed that some entries will have to //be 4-bytes aligned ... e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->timeOffset); return GF_OK; } GF_Err rtpo_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void smhd_box_del(GF_Box *s) { GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; if (ptr == NULL ) return; gf_free(ptr); } GF_Err smhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->balance = gf_bs_read_u16(bs); ptr->reserved = gf_bs_read_u16(bs); return GF_OK; } GF_Box *smhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_SoundMediaHeaderBox, GF_ISOM_BOX_TYPE_SMHD); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err smhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->balance); gf_bs_write_u16(bs, ptr->reserved); return GF_OK; } GF_Err smhd_box_size(GF_Box *s) { GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; ptr->reserved = 0; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void snro_box_del(GF_Box *s) { GF_SeqOffHintEntryBox *snro = (GF_SeqOffHintEntryBox *)s; gf_free(snro); } GF_Err snro_box_read(GF_Box *s, GF_BitStream *bs) { GF_SeqOffHintEntryBox *ptr = (GF_SeqOffHintEntryBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->SeqOffset = gf_bs_read_u32(bs); return GF_OK; } GF_Box *snro_box_new() { ISOM_DECL_BOX_ALLOC(GF_SeqOffHintEntryBox, GF_ISOM_BOX_TYPE_SNRO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err snro_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SeqOffHintEntryBox *ptr = (GF_SeqOffHintEntryBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->SeqOffset); return GF_OK; } GF_Err snro_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stbl_box_del(GF_Box *s) { GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; if (ptr == NULL) return; if (ptr->sub_samples) gf_list_del(ptr->sub_samples); if (ptr->sampleGroups) gf_list_del(ptr->sampleGroups); if (ptr->sampleGroupsDescription) gf_list_del(ptr->sampleGroupsDescription); if (ptr->sai_sizes) gf_list_del(ptr->sai_sizes); if (ptr->sai_offsets) gf_list_del(ptr->sai_offsets); if (ptr->traf_map) { if (ptr->traf_map->frag_starts) { u32 i; for (i=0; i<ptr->traf_map->nb_entries; i++) { if (ptr->traf_map->frag_starts[i].moof_template) gf_free(ptr->traf_map->frag_starts[i].moof_template); } gf_free(ptr->traf_map->frag_starts); } gf_free(ptr->traf_map); } gf_free(ptr); } GF_Err stbl_on_child_box(GF_Box *s, GF_Box *a) { GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; if (!a) return GF_OK; switch (a->type) { case GF_ISOM_BOX_TYPE_STTS: if (ptr->TimeToSample) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->TimeToSample = (GF_TimeToSampleBox *)a; break; case GF_ISOM_BOX_TYPE_CTTS: if (ptr->CompositionOffset) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->CompositionOffset = (GF_CompositionOffsetBox *)a; break; case GF_ISOM_BOX_TYPE_CSLG: if (ptr->CompositionToDecode) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->CompositionToDecode = (GF_CompositionToDecodeBox *)a; break; case GF_ISOM_BOX_TYPE_STSS: if (ptr->SyncSample) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SyncSample = (GF_SyncSampleBox *)a; break; case GF_ISOM_BOX_TYPE_STSD: if (ptr->SampleDescription) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SampleDescription =(GF_SampleDescriptionBox *)a; break; case GF_ISOM_BOX_TYPE_STZ2: case GF_ISOM_BOX_TYPE_STSZ: if (ptr->SampleSize) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SampleSize = (GF_SampleSizeBox *)a; break; case GF_ISOM_BOX_TYPE_STSC: if (ptr->SampleToChunk) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SampleToChunk = (GF_SampleToChunkBox *)a; break; case GF_ISOM_BOX_TYPE_PADB: if (ptr->PaddingBits) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->PaddingBits = (GF_PaddingBitsBox *) a; break; //WARNING: AS THIS MAY CHANGE DYNAMICALLY DURING EDIT, case GF_ISOM_BOX_TYPE_CO64: case GF_ISOM_BOX_TYPE_STCO: if (ptr->ChunkOffset) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->ChunkOffset = a; return GF_OK; case GF_ISOM_BOX_TYPE_STSH: if (ptr->ShadowSync) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->ShadowSync = (GF_ShadowSyncBox *)a; break; case GF_ISOM_BOX_TYPE_STDP: if (ptr->DegradationPriority) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->DegradationPriority = (GF_DegradationPriorityBox *)a; break; case GF_ISOM_BOX_TYPE_SDTP: if (ptr->SampleDep) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SampleDep = (GF_SampleDependencyTypeBox *)a; break; case GF_ISOM_BOX_TYPE_SUBS: if (!ptr->sub_samples) ptr->sub_samples = gf_list_new(); gf_list_add(ptr->sub_samples, a); //check subsample box { GF_SubSampleInformationBox *subs = (GF_SubSampleInformationBox *)a; GF_SubSampleInfoEntry *ent = gf_list_get(subs->Samples, 0); if (!ent) { gf_list_rem(subs->Samples, 0); GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] first entry in SubSample in track SampleTable is invalid\n")); } else if (ent->sample_delta==0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] first entry in SubSample in track SampleTable has sample_delta of 0, should be one. Fixing\n")); ent->sample_delta = 1; } } break; case GF_ISOM_BOX_TYPE_SBGP: if (!ptr->sampleGroups) ptr->sampleGroups = gf_list_new(); gf_list_add(ptr->sampleGroups, a); break; case GF_ISOM_BOX_TYPE_SGPD: if (!ptr->sampleGroupsDescription) ptr->sampleGroupsDescription = gf_list_new(); gf_list_add(ptr->sampleGroupsDescription, a); break; case GF_ISOM_BOX_TYPE_SAIZ: if (!ptr->sai_sizes) ptr->sai_sizes = gf_list_new(); gf_list_add(ptr->sai_sizes, a); break; case GF_ISOM_BOX_TYPE_SAIO: if (!ptr->sai_offsets) ptr->sai_offsets = gf_list_new(); gf_list_add(ptr->sai_offsets, a); break; } return GF_OK; } GF_Err stbl_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; //we need to parse DegPrior in a special way GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; e = gf_isom_box_array_read(s, bs, stbl_on_child_box); if (e) return e; if (!ptr->SyncSample) ptr->no_sync_found = 1; ptr->nb_sgpd_in_stbl = gf_list_count(ptr->sampleGroupsDescription); ptr->nb_stbl_boxes = gf_list_count(ptr->child_boxes); if (gf_bs_get_cookie(bs) & GF_ISOM_BS_COOKIE_CLONE_TRACK) return GF_OK; // return GF_OK; //these boxes are mandatory ! if (!ptr->SampleToChunk || !ptr->SampleSize || !ptr->ChunkOffset || !ptr->TimeToSample) return GF_ISOM_INVALID_FILE; //sanity check if (ptr->SampleSize->sampleCount) { if (!ptr->TimeToSample->nb_entries || !ptr->SampleToChunk->nb_entries) return GF_ISOM_INVALID_FILE; } return GF_OK; } GF_Box *stbl_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleTableBox, GF_ISOM_BOX_TYPE_STBL); //maxSamplePer chunk is 10 by default tmp->MaxSamplePerChunk = 10; tmp->groupID = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stbl_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err stbl_box_size(GF_Box *s) { u32 pos=0; GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->SampleDescription, &pos); gf_isom_check_position(s, (GF_Box *)ptr->TimeToSample, &pos); gf_isom_check_position(s, (GF_Box *)ptr->CompositionOffset, &pos); gf_isom_check_position(s, (GF_Box *)ptr->CompositionToDecode, &pos); gf_isom_check_position(s, (GF_Box *)ptr->SyncSample, &pos); gf_isom_check_position(s, (GF_Box *)ptr->ShadowSync, &pos); gf_isom_check_position(s, (GF_Box *)ptr->SampleToChunk, &pos); gf_isom_check_position(s, (GF_Box *)ptr->SampleSize, &pos); gf_isom_check_position(s, (GF_Box *)ptr->ChunkOffset, &pos); gf_isom_check_position(s, (GF_Box *)ptr->DegradationPriority, &pos); gf_isom_check_position(s, (GF_Box *)ptr->SampleDep, &pos); gf_isom_check_position(s, (GF_Box *)ptr->PaddingBits, &pos); if (ptr->sub_samples) { gf_isom_check_position_list(s, ptr->sub_samples, &pos); } if (ptr->sampleGroupsDescription) { gf_isom_check_position_list(s, ptr->sampleGroupsDescription, &pos); } if (ptr->sampleGroups) { gf_isom_check_position_list(s, ptr->sampleGroups, &pos); } if (ptr->sai_sizes) { gf_isom_check_position_list(s, ptr->sai_sizes, &pos); } if (ptr->sai_offsets) { gf_isom_check_position_list(s, ptr->sai_offsets, &pos); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stco_box_del(GF_Box *s) { GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s; if (ptr == NULL) return; if (ptr->offsets) gf_free(ptr->offsets); gf_free(ptr); } GF_Err stco_box_read(GF_Box *s, GF_BitStream *bs) { u32 entries; GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->nb_entries > ptr->size / 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stco\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } if (ptr->nb_entries) { ptr->offsets = (u32 *) gf_malloc(ptr->nb_entries * sizeof(u32) ); if (ptr->offsets == NULL) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->nb_entries; for (entries = 0; entries < ptr->nb_entries; entries++) { ptr->offsets[entries] = gf_bs_read_u32(bs); } } return GF_OK; } GF_Box *stco_box_new() { ISOM_DECL_BOX_ALLOC(GF_ChunkOffsetBox, GF_ISOM_BOX_TYPE_STCO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stco_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i = 0; i < ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->offsets[i]); } return GF_OK; } GF_Err stco_box_size(GF_Box *s) { GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s; ptr->size += 4 + (4 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stdp_box_del(GF_Box *s) { GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s; if (ptr == NULL ) return; if (ptr->priorities) gf_free(ptr->priorities); gf_free(ptr); } //this is called through stbl_read... GF_Err stdp_box_read(GF_Box *s, GF_BitStream *bs) { u32 entry; GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s; /*out-of-order stdp, assume no padding at the end and take the entire remaining data for entries*/ if (!ptr->nb_entries) ptr->nb_entries = (u32) ptr->size / 2; else if (ptr->nb_entries > ptr->size / 2) return GF_ISOM_INVALID_FILE; ptr->priorities = (u16 *) gf_malloc(ptr->nb_entries * sizeof(u16)); if (ptr->priorities == NULL) return GF_OUT_OF_MEM; for (entry = 0; entry < ptr->nb_entries; entry++) { ptr->priorities[entry] = gf_bs_read_u16(bs); } ISOM_DECREASE_SIZE(ptr, (2*ptr->nb_entries) ); return GF_OK; } GF_Box *stdp_box_new() { ISOM_DECL_BOX_ALLOC(GF_DegradationPriorityBox, GF_ISOM_BOX_TYPE_STDP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stdp_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; for (i = 0; i < ptr->nb_entries; i++) { gf_bs_write_u16(bs, ptr->priorities[i]); } return GF_OK; } GF_Err stdp_box_size(GF_Box *s) { GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s; ptr->size += (2 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsc_box_del(GF_Box *s) { GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s; if (ptr == NULL) return; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err stsc_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->nb_entries > ptr->size / 12) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsc\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->entries = NULL; if (ptr->nb_entries) { ptr->entries = gf_malloc(sizeof(GF_StscEntry)*ptr->alloc_size); if (!ptr->entries) return GF_OUT_OF_MEM; } for (i = 0; i < ptr->nb_entries; i++) { ptr->entries[i].firstChunk = gf_bs_read_u32(bs); ptr->entries[i].samplesPerChunk = gf_bs_read_u32(bs); ptr->entries[i].sampleDescriptionIndex = gf_bs_read_u32(bs); ptr->entries[i].isEdited = 0; ptr->entries[i].nextChunk = 0; if (!ptr->entries[i].firstChunk) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] invalid first chunk 0 in stsc entry\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } //update the next chunk in the previous entry if (i) ptr->entries[i-1].nextChunk = ptr->entries[i].firstChunk; } ptr->currentIndex = 0; ptr->firstSampleInCurrentChunk = 0; ptr->currentChunk = 0; ptr->ghostNumber = 0; return GF_OK; } GF_Box *stsc_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleToChunkBox, GF_ISOM_BOX_TYPE_STSC); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->entries[i].firstChunk); gf_bs_write_u32(bs, ptr->entries[i].samplesPerChunk); gf_bs_write_u32(bs, ptr->entries[i].sampleDescriptionIndex); } return GF_OK; } GF_Err stsc_box_size(GF_Box *s) { GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s; ptr->size += 4 + (12 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsd_box_del(GF_Box *s) { GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err stsd_on_child_box(GF_Box *s, GF_Box *a) { GF_UnknownBox *def; if (!a) return GF_OK; if (gf_box_valid_in_parent(a, "stsd")) { return GF_OK; } switch (a->type) { //unknown sample description: we need a specific box to handle the data ref index //rather than a default box ... case GF_ISOM_BOX_TYPE_UNKNOWN: def = (GF_UnknownBox *)a; /*we need at least 8 bytes for unknown sample entries*/ if (def->dataSize < 8) { gf_isom_box_del_parent(&s->child_boxes, a); return GF_ISOM_INVALID_MEDIA; } return GF_OK; default: GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Cannot process box of type %s\n", gf_4cc_to_str(a->type))); return GF_ISOM_INVALID_FILE; } } GF_Err stsd_box_read(GF_Box *s, GF_BitStream *bs) { ISOM_DECREASE_SIZE(s, 4) gf_bs_read_u32(bs); return gf_isom_box_array_read_ex(s, bs, stsd_on_child_box, GF_ISOM_BOX_TYPE_STSD); } GF_Box *stsd_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleDescriptionBox, GF_ISOM_BOX_TYPE_STSD); tmp->child_boxes = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 nb_entries; GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; nb_entries = gf_list_count(ptr->child_boxes); gf_bs_write_u32(bs, nb_entries); return GF_OK; } GF_Err stsd_box_size(GF_Box *s) { GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsh_box_del(GF_Box *s) { u32 i = 0; GF_StshEntry *ent; GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s; if (ptr == NULL) return; while ( (ent = (GF_StshEntry *)gf_list_enum(ptr->entries, &i)) ) { gf_free(ent); } gf_list_del(ptr->entries); gf_free(ptr); } GF_Err stsh_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 count, i; GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s; ISOM_DECREASE_SIZE(s, 4) count = gf_bs_read_u32(bs); if (ptr->size < count*8) return GF_ISOM_INVALID_FILE; for (i = 0; i < count; i++) { GF_StshEntry *ent = (GF_StshEntry *) gf_malloc(sizeof(GF_StshEntry)); if (!ent) return GF_OUT_OF_MEM; ent->shadowedSampleNumber = gf_bs_read_u32(bs); ent->syncSampleNumber = gf_bs_read_u32(bs); e = gf_list_add(ptr->entries, ent); if (e) return e; } return GF_OK; } GF_Box *stsh_box_new() { ISOM_DECL_BOX_ALLOC(GF_ShadowSyncBox, GF_ISOM_BOX_TYPE_STSH); tmp->entries = gf_list_new(); if (!tmp->entries) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsh_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_StshEntry *ent; GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, gf_list_count(ptr->entries)); i=0; while ((ent = (GF_StshEntry *)gf_list_enum(ptr->entries, &i))) { gf_bs_write_u32(bs, ent->shadowedSampleNumber); gf_bs_write_u32(bs, ent->syncSampleNumber); } return GF_OK; } GF_Err stsh_box_size(GF_Box *s) { GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s; ptr->size += 4 + (8 * gf_list_count(ptr->entries)); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stss_box_del(GF_Box *s) { GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s; if (ptr == NULL) return; if (ptr->sampleNumbers) gf_free(ptr->sampleNumbers); gf_free(ptr); } GF_Err stss_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->size < ptr->nb_entries * 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stss\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->sampleNumbers = (u32 *) gf_malloc( ptr->alloc_size * sizeof(u32)); if (ptr->sampleNumbers == NULL) return GF_OUT_OF_MEM; for (i = 0; i < ptr->nb_entries; i++) { ptr->sampleNumbers[i] = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *stss_box_new() { ISOM_DECL_BOX_ALLOC(GF_SyncSampleBox, GF_ISOM_BOX_TYPE_STSS); return (GF_Box*)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stss_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i = 0; i < ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->sampleNumbers[i]); } return GF_OK; } GF_Err stss_box_size(GF_Box *s) { GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s; ptr->size += 4 + (4 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsz_box_del(GF_Box *s) { GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; if (ptr == NULL) return; if (ptr->sizes) gf_free(ptr->sizes); gf_free(ptr); } GF_Err stsz_box_read(GF_Box *s, GF_BitStream *bs) { u32 i, estSize; GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; if (ptr == NULL) return GF_BAD_PARAM; //support for CompactSizes if (s->type == GF_ISOM_BOX_TYPE_STSZ) { ISOM_DECREASE_SIZE(ptr, 8); ptr->sampleSize = gf_bs_read_u32(bs); ptr->sampleCount = gf_bs_read_u32(bs); } else { //24-reserved ISOM_DECREASE_SIZE(ptr, 8); gf_bs_read_int(bs, 24); i = gf_bs_read_u8(bs); ptr->sampleCount = gf_bs_read_u32(bs); switch (i) { case 4: case 8: case 16: ptr->sampleSize = i; break; default: //try to fix the file //no samples, no parsing pb if (!ptr->sampleCount) { ptr->sampleSize = 16; return GF_OK; } estSize = (u32) (ptr->size) / ptr->sampleCount; if (!estSize && ((ptr->sampleCount+1)/2 == (ptr->size)) ) { ptr->sampleSize = 4; break; } else if (estSize == 1 || estSize == 2) { ptr->sampleSize = 8 * estSize; } else { return GF_ISOM_INVALID_FILE; } } } if (s->type == GF_ISOM_BOX_TYPE_STSZ) { if (! ptr->sampleSize && ptr->sampleCount) { if (ptr->sampleCount > ptr->size / 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount)); return GF_ISOM_INVALID_FILE; } ptr->sizes = (u32 *) gf_malloc(ptr->sampleCount * sizeof(u32)); if (! ptr->sizes) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->sampleCount; for (i = 0; i < ptr->sampleCount; i++) { ptr->sizes[i] = gf_bs_read_u32(bs); if (ptr->max_size < ptr->sizes[i]) ptr->max_size = ptr->sizes[i]; ptr->total_size += ptr->sizes[i]; ptr->total_samples++; } } } else { if (ptr->sampleSize==4) { if (ptr->sampleCount / 2 > ptr->size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount)); return GF_ISOM_INVALID_FILE; } } else { if (ptr->sampleCount > ptr->size / (ptr->sampleSize/8)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount)); return GF_ISOM_INVALID_FILE; } } //note we could optimize the mem usage by keeping the table compact //in memory. But that would complicate both caching and editing //we therefore keep all sizes as u32 and uncompress the table ptr->sizes = (u32 *) gf_malloc(ptr->sampleCount * sizeof(u32)); if (! ptr->sizes) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->sampleCount; for (i = 0; i < ptr->sampleCount; ) { switch (ptr->sampleSize) { case 4: ptr->sizes[i] = gf_bs_read_int(bs, 4); if (i+1 < ptr->sampleCount) { ptr->sizes[i+1] = gf_bs_read_int(bs, 4); } else { //0 padding in odd sample count gf_bs_read_int(bs, 4); } i += 2; break; default: ptr->sizes[i] = gf_bs_read_int(bs, ptr->sampleSize); i += 1; break; } if (ptr->max_size < ptr->sizes[i]) ptr->max_size = ptr->sizes[i]; ptr->total_size += ptr->sizes[i]; ptr->total_samples++; } } return GF_OK; } GF_Box *stsz_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleSizeBox, 0); //type is unknown here, can be regular or compact table return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsz_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; //in both versions this is still valid if (ptr->type == GF_ISOM_BOX_TYPE_STSZ) { gf_bs_write_u32(bs, ptr->sampleSize); } else { gf_bs_write_u24(bs, 0); gf_bs_write_u8(bs, ptr->sampleSize); } gf_bs_write_u32(bs, ptr->sampleCount); if (ptr->type == GF_ISOM_BOX_TYPE_STSZ) { if (! ptr->sampleSize) { for (i = 0; i < ptr->sampleCount; i++) { gf_bs_write_u32(bs, ptr->sizes ? ptr->sizes[i] : 0); } } } else { for (i = 0; i < ptr->sampleCount; ) { switch (ptr->sampleSize) { case 4: gf_bs_write_int(bs, ptr->sizes[i], 4); if (i+1 < ptr->sampleCount) { gf_bs_write_int(bs, ptr->sizes[i+1], 4); } else { //0 padding in odd sample count gf_bs_write_int(bs, 0, 4); } i += 2; break; default: gf_bs_write_int(bs, ptr->sizes[i], ptr->sampleSize); i += 1; break; } } } return GF_OK; } GF_Err stsz_box_size(GF_Box *s) { u32 i, fieldSize, size; GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; ptr->size += 8; if (!ptr->sampleCount) return GF_OK; //regular table if (ptr->type == GF_ISOM_BOX_TYPE_STSZ) { if (ptr->sampleSize) return GF_OK; ptr->size += (4 * ptr->sampleCount); return GF_OK; } fieldSize = 4; size = ptr->sizes[0]; for (i=0; i < ptr->sampleCount; i++) { if (ptr->sizes[i] <= 0xF) continue; //switch to 8-bit table else if (ptr->sizes[i] <= 0xFF) { fieldSize = 8; } //switch to 16-bit table else if (ptr->sizes[i] <= 0xFFFF) { fieldSize = 16; } //switch to 32-bit table else { fieldSize = 32; } //check the size if (size != ptr->sizes[i]) size = 0; } //if all samples are of the same size, switch to regular (more compact) if (size) { ptr->type = GF_ISOM_BOX_TYPE_STSZ; ptr->sampleSize = size; gf_free(ptr->sizes); ptr->sizes = NULL; } if (fieldSize == 32) { //oops, doesn't fit in a compact table ptr->type = GF_ISOM_BOX_TYPE_STSZ; ptr->size += (4 * ptr->sampleCount); return GF_OK; } //make sure we are a compact table (no need to change the mem representation) ptr->type = GF_ISOM_BOX_TYPE_STZ2; ptr->sampleSize = fieldSize; if (fieldSize == 4) { //do not forget the 0 padding field for odd count ptr->size += (ptr->sampleCount + 1) / 2; } else { ptr->size += (ptr->sampleCount) * (fieldSize/8); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stts_box_del(GF_Box *s) { GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err stts_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; #ifndef GPAC_DISABLE_ISOM_WRITE ptr->w_LastDTS = 0; #endif ISOM_DECREASE_SIZE(ptr, 4); ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->size < ptr->nb_entries * 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stts\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->entries = gf_malloc(sizeof(GF_SttsEntry)*ptr->alloc_size); if (!ptr->entries) return GF_OUT_OF_MEM; for (i=0; i<ptr->nb_entries; i++) { ptr->entries[i].sampleCount = gf_bs_read_u32(bs); ptr->entries[i].sampleDelta = gf_bs_read_u32(bs); #ifndef GPAC_DISABLE_ISOM_WRITE ptr->w_currentSampleNum += ptr->entries[i].sampleCount; ptr->w_LastDTS += (u64)ptr->entries[i].sampleCount * ptr->entries[i].sampleDelta; #endif if (ptr->max_ts_delta<ptr->entries[i].sampleDelta) ptr->max_ts_delta = ptr->entries[i].sampleDelta; if (!ptr->entries[i].sampleDelta) { if ((i+1<ptr->nb_entries) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Found stts entry with sample_delta=0 - forbidden ! Fixing to 1\n" )); ptr->entries[i].sampleDelta = 1; } else if (ptr->entries[i].sampleCount>1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] more than one stts entry at the end of the track with sample_delta=0 - forbidden ! Fixing to 1\n" )); ptr->entries[i].sampleDelta = 1; } } else if ((s32) ptr->entries[i].sampleDelta < 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] stts entry %d has negative duration %d - forbidden ! Fixing to 1, sync may get lost (consider reimport raw media)\n", i, (s32) ptr->entries[i].sampleDelta )); ptr->entries[i].sampleDelta = 1; } } if (ptr->size<(ptr->nb_entries*8)) return GF_ISOM_INVALID_FILE; ISOM_DECREASE_SIZE(ptr, ptr->nb_entries*8); //remove the last sample delta. #ifndef GPAC_DISABLE_ISOM_WRITE if (ptr->nb_entries) ptr->w_LastDTS -= ptr->entries[ptr->nb_entries-1].sampleDelta; #endif return GF_OK; } GF_Box *stts_box_new() { ISOM_DECL_BOX_ALLOC(GF_TimeToSampleBox, GF_ISOM_BOX_TYPE_STTS); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stts_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->entries[i].sampleCount); gf_bs_write_u32(bs, ptr->entries[i].sampleDelta); } return GF_OK; } GF_Err stts_box_size(GF_Box *s) { GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; ptr->size += 4 + (8 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void tfhd_box_del(GF_Box *s) { GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err tfhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->trackID = gf_bs_read_u32(bs); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) { ISOM_DECREASE_SIZE(ptr, 8); ptr->base_data_offset = gf_bs_read_u64(bs); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) { ISOM_DECREASE_SIZE(ptr, 4); ptr->sample_desc_index = gf_bs_read_u32(bs); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) { ISOM_DECREASE_SIZE(ptr, 4); ptr->def_sample_duration = gf_bs_read_u32(bs); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) { ISOM_DECREASE_SIZE(ptr, 4); ptr->def_sample_size = gf_bs_read_u32(bs); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) { ISOM_DECREASE_SIZE(ptr, 4); ptr->def_sample_flags = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *tfhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentHeaderBox, GF_ISOM_BOX_TYPE_TFHD); //NO FLAGS SET BY DEFAULT return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tfhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->trackID); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) { gf_bs_write_u64(bs, ptr->base_data_offset); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) { gf_bs_write_u32(bs, ptr->sample_desc_index); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) { gf_bs_write_u32(bs, ptr->def_sample_duration); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) { gf_bs_write_u32(bs, ptr->def_sample_size); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) { gf_bs_write_u32(bs, ptr->def_sample_flags); } return GF_OK; } GF_Err tfhd_box_size(GF_Box *s) { GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s; ptr->size += 4; //The rest depends on the flags if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) ptr->size += 8; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) ptr->size += 4; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) ptr->size += 4; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) ptr->size += 4; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void tims_box_del(GF_Box *s) { GF_TSHintEntryBox *tims = (GF_TSHintEntryBox *)s; gf_free(tims); } GF_Err tims_box_read(GF_Box *s, GF_BitStream *bs) { GF_TSHintEntryBox *ptr = (GF_TSHintEntryBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->timeScale = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tims_box_new() { ISOM_DECL_BOX_ALLOC(GF_TSHintEntryBox, GF_ISOM_BOX_TYPE_TIMS); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tims_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TSHintEntryBox *ptr = (GF_TSHintEntryBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->timeScale); return GF_OK; } GF_Err tims_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void tkhd_box_del(GF_Box *s) { GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); return; } GF_Err tkhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s; if (ptr->version == 1) { ISOM_DECREASE_SIZE(ptr, 32); ptr->creationTime = gf_bs_read_u64(bs); ptr->modificationTime = gf_bs_read_u64(bs); ptr->trackID = gf_bs_read_u32(bs); ptr->reserved1 = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 20); ptr->creationTime = gf_bs_read_u32(bs); ptr->modificationTime = gf_bs_read_u32(bs); ptr->trackID = gf_bs_read_u32(bs); ptr->reserved1 = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u32(bs); } ptr->initial_duration = ptr->duration; ISOM_DECREASE_SIZE(ptr, 60); ptr->reserved2[0] = gf_bs_read_u32(bs); ptr->reserved2[1] = gf_bs_read_u32(bs); ptr->layer = gf_bs_read_u16(bs); ptr->alternate_group = gf_bs_read_u16(bs); ptr->volume = gf_bs_read_u16(bs); ptr->reserved3 = gf_bs_read_u16(bs); ptr->matrix[0] = gf_bs_read_u32(bs); ptr->matrix[1] = gf_bs_read_u32(bs); ptr->matrix[2] = gf_bs_read_u32(bs); ptr->matrix[3] = gf_bs_read_u32(bs); ptr->matrix[4] = gf_bs_read_u32(bs); ptr->matrix[5] = gf_bs_read_u32(bs); ptr->matrix[6] = gf_bs_read_u32(bs); ptr->matrix[7] = gf_bs_read_u32(bs); ptr->matrix[8] = gf_bs_read_u32(bs); ptr->width = gf_bs_read_u32(bs); ptr->height = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tkhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackHeaderBox, GF_ISOM_BOX_TYPE_TKHD); tmp->matrix[0] = 0x00010000; tmp->matrix[4] = 0x00010000; tmp->matrix[8] = 0x40000000; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tkhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version == 1) { gf_bs_write_u64(bs, ptr->creationTime); gf_bs_write_u64(bs, ptr->modificationTime); gf_bs_write_u32(bs, ptr->trackID); gf_bs_write_u32(bs, ptr->reserved1); gf_bs_write_u64(bs, ptr->duration); } else { gf_bs_write_u32(bs, (u32) ptr->creationTime); gf_bs_write_u32(bs, (u32) ptr->modificationTime); gf_bs_write_u32(bs, ptr->trackID); gf_bs_write_u32(bs, ptr->reserved1); gf_bs_write_u32(bs, (u32) ptr->duration); } gf_bs_write_u32(bs, ptr->reserved2[0]); gf_bs_write_u32(bs, ptr->reserved2[1]); gf_bs_write_u16(bs, ptr->layer); gf_bs_write_u16(bs, ptr->alternate_group); gf_bs_write_u16(bs, ptr->volume); gf_bs_write_u16(bs, ptr->reserved3); gf_bs_write_u32(bs, ptr->matrix[0]); gf_bs_write_u32(bs, ptr->matrix[1]); gf_bs_write_u32(bs, ptr->matrix[2]); gf_bs_write_u32(bs, ptr->matrix[3]); gf_bs_write_u32(bs, ptr->matrix[4]); gf_bs_write_u32(bs, ptr->matrix[5]); gf_bs_write_u32(bs, ptr->matrix[6]); gf_bs_write_u32(bs, ptr->matrix[7]); gf_bs_write_u32(bs, ptr->matrix[8]); gf_bs_write_u32(bs, ptr->width); gf_bs_write_u32(bs, ptr->height); return GF_OK; } GF_Err tkhd_box_size(GF_Box *s) { GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s; if (ptr->duration==(u64) -1) ptr->version = 0; else ptr->version = (ptr->duration>0xFFFFFFFF) ? 1 : 0; ptr->size += (ptr->version == 1) ? 32 : 20; ptr->size += 60; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void traf_box_del(GF_Box *s) { GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s; if (ptr == NULL) return; if (ptr->sub_samples) gf_list_del(ptr->sub_samples); gf_list_del(ptr->TrackRuns); if (ptr->sampleGroups) gf_list_del(ptr->sampleGroups); if (ptr->sampleGroupsDescription) gf_list_del(ptr->sampleGroupsDescription); if (ptr->sai_sizes) gf_list_del(ptr->sai_sizes); if (ptr->sai_offsets) gf_list_del(ptr->sai_offsets); gf_free(ptr); } GF_Err traf_on_child_box(GF_Box *s, GF_Box *a) { GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_TFHD: if (ptr->tfhd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->tfhd = (GF_TrackFragmentHeaderBox *) a; return GF_OK; case GF_ISOM_BOX_TYPE_TRUN: return gf_list_add(ptr->TrackRuns, a); case GF_ISOM_BOX_TYPE_SDTP: if (ptr->sdtp) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->sdtp = (GF_SampleDependencyTypeBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_TFDT: if (ptr->tfdt) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->tfdt = (GF_TFBaseMediaDecodeTimeBox*) a; return GF_OK; case GF_ISOM_BOX_TYPE_SUBS: if (!ptr->sub_samples) ptr->sub_samples = gf_list_new(); return gf_list_add(ptr->sub_samples, a); case GF_ISOM_BOX_TYPE_SBGP: if (!ptr->sampleGroups) ptr->sampleGroups = gf_list_new(); gf_list_add(ptr->sampleGroups, a); return GF_OK; case GF_ISOM_BOX_TYPE_SGPD: if (!ptr->sampleGroupsDescription) ptr->sampleGroupsDescription = gf_list_new(); gf_list_add(ptr->sampleGroupsDescription, a); return GF_OK; case GF_ISOM_BOX_TYPE_SAIZ: if (!ptr->sai_sizes) ptr->sai_sizes = gf_list_new(); gf_list_add(ptr->sai_sizes, a); return GF_OK; case GF_ISOM_BOX_TYPE_SAIO: if (!ptr->sai_offsets) ptr->sai_offsets = gf_list_new(); gf_list_add(ptr->sai_offsets, a); return GF_OK; //we will throw an error if both PIFF_PSEC and SENC are found. Not such files seen yet case GF_ISOM_BOX_TYPE_UUID: if ( ((GF_UUIDBox *)a)->internal_4cc==GF_ISOM_BOX_UUID_PSEC) { if (ptr->sample_encryption) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->sample_encryption = (GF_SampleEncryptionBox *)a; ptr->sample_encryption->traf = ptr; return GF_OK; } else { return GF_OK; } case GF_ISOM_BOX_TYPE_SENC: if (ptr->sample_encryption) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->sample_encryption = (GF_SampleEncryptionBox *)a; ptr->sample_encryption->traf = ptr; return GF_OK; } return GF_OK; } GF_Err traf_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s; GF_Err e = gf_isom_box_array_read(s, bs, traf_on_child_box); if (e) return e; if (!ptr->tfhd) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing TrackFragmentHeaderBox \n")); return GF_ISOM_INVALID_FILE; } return GF_OK; } GF_Box *traf_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentBox, GF_ISOM_BOX_TYPE_TRAF); tmp->TrackRuns = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Box *tfxd_box_new() { ISOM_DECL_BOX_ALLOC(GF_MSSTimeExtBox, GF_ISOM_BOX_TYPE_UUID); tmp->internal_4cc = GF_ISOM_BOX_UUID_TFXD; return (GF_Box *)tmp; } void tfxd_box_del(GF_Box *s) { gf_free(s); } GF_Err tfxd_box_read(GF_Box *s, GF_BitStream *bs) { GF_MSSTimeExtBox *ptr = (GF_MSSTimeExtBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->version = gf_bs_read_u8(bs); ptr->flags = gf_bs_read_u24(bs); if (ptr->version == 0x01) { ISOM_DECREASE_SIZE(ptr, 16); ptr->absolute_time_in_track_timescale = gf_bs_read_u64(bs); ptr->fragment_duration_in_track_timescale = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 8); ptr->absolute_time_in_track_timescale = gf_bs_read_u32(bs); ptr->fragment_duration_in_track_timescale = gf_bs_read_u32(bs); } return GF_OK; } GF_Err tfxd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MSSTimeExtBox *uuid = (GF_MSSTimeExtBox*)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, 1); gf_bs_write_u24(bs, 0); gf_bs_write_u64(bs, uuid->absolute_time_in_track_timescale); gf_bs_write_u64(bs, uuid->fragment_duration_in_track_timescale); return GF_OK; } GF_Err tfxd_box_size(GF_Box *s) { s->size += 20; return GF_OK; } GF_Err traf_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err traf_box_size(GF_Box *s) { u32 pos=0; GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *) s; //Header first gf_isom_check_position(s, (GF_Box *)ptr->tfhd, &pos); gf_isom_check_position_list(s, ptr->sub_samples, &pos); gf_isom_check_position(s, (GF_Box *)ptr->tfdt, &pos); gf_isom_check_position_list(s, ptr->sampleGroupsDescription, &pos); gf_isom_check_position_list(s, ptr->sampleGroups, &pos); gf_isom_check_position_list(s, ptr->sai_sizes, &pos); gf_isom_check_position_list(s, ptr->sai_offsets, &pos); gf_isom_check_position(s, (GF_Box *)ptr->sample_encryption, &pos); gf_isom_check_position_list(s, ptr->TrackRuns, &pos); //when sdtp is present (smooth-like) write it after the trun box gf_isom_check_position(s, (GF_Box *)ptr->sdtp, &pos); //tfxd should be last ... if (ptr->tfxd) gf_isom_check_position(s, (GF_Box *)ptr->tfxd, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void trak_box_del(GF_Box *s) { #ifndef GPAC_DISABLE_ISOM_WRITE GF_TrackBox *ptr = (GF_TrackBox *)s; if (ptr->chunk_cache) gf_bs_del(ptr->chunk_cache); #endif gf_free(s); } static void gf_isom_check_sample_desc(GF_TrackBox *trak) { GF_BitStream *bs; GF_UnknownBox *a; u32 i; GF_Err e; GF_SampleTableBox *stbl; if (!trak->Media || !trak->Media->information) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Track with no media box !\n" )); return; } if (!trak->Media->information->sampleTable) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Track with no sample table !\n" )); trak->Media->information->sampleTable = (GF_SampleTableBox *) gf_isom_box_new_parent(&trak->Media->information->child_boxes, GF_ISOM_BOX_TYPE_STBL); } stbl = trak->Media->information->sampleTable; if (!stbl->SampleDescription) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Track with no sample description box !\n" )); stbl->SampleDescription = (GF_SampleDescriptionBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSD); return; } i=0; while ((a = (GF_UnknownBox*)gf_list_enum(trak->Media->information->sampleTable->SampleDescription->child_boxes, &i))) { switch (a->type) { case GF_ISOM_BOX_TYPE_MP4S: case GF_ISOM_BOX_TYPE_ENCS: case GF_ISOM_BOX_TYPE_MP4A: case GF_ISOM_BOX_TYPE_ENCA: case GF_ISOM_BOX_TYPE_MP4V: case GF_ISOM_BOX_TYPE_ENCV: case GF_ISOM_BOX_TYPE_RESV: case GF_ISOM_SUBTYPE_3GP_AMR: case GF_ISOM_SUBTYPE_3GP_AMR_WB: case GF_ISOM_SUBTYPE_3GP_EVRC: case GF_ISOM_SUBTYPE_3GP_QCELP: case GF_ISOM_SUBTYPE_3GP_SMV: case GF_ISOM_SUBTYPE_3GP_H263: case GF_ISOM_BOX_TYPE_GHNT: case GF_ISOM_BOX_TYPE_RTP_STSD: case GF_ISOM_BOX_TYPE_SRTP_STSD: case GF_ISOM_BOX_TYPE_FDP_STSD: case GF_ISOM_BOX_TYPE_RRTP_STSD: case GF_ISOM_BOX_TYPE_RTCP_STSD: case GF_ISOM_BOX_TYPE_METX: case GF_ISOM_BOX_TYPE_METT: case GF_ISOM_BOX_TYPE_STXT: case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_HVT1: case GF_ISOM_BOX_TYPE_LHV1: case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_AV01: case GF_ISOM_BOX_TYPE_VP08: case GF_ISOM_BOX_TYPE_VP09: case GF_ISOM_BOX_TYPE_AV1C: case GF_ISOM_BOX_TYPE_TX3G: case GF_ISOM_BOX_TYPE_TEXT: case GF_ISOM_BOX_TYPE_ENCT: case GF_ISOM_BOX_TYPE_DIMS: case GF_ISOM_BOX_TYPE_OPUS: case GF_ISOM_BOX_TYPE_AC3: case GF_ISOM_BOX_TYPE_EC3: case GF_ISOM_BOX_TYPE_LSR1: case GF_ISOM_BOX_TYPE_WVTT: case GF_ISOM_BOX_TYPE_STPP: case GF_ISOM_BOX_TYPE_SBTT: case GF_ISOM_BOX_TYPE_MP3: case GF_ISOM_BOX_TYPE_JPEG: case GF_ISOM_BOX_TYPE_PNG: case GF_ISOM_BOX_TYPE_JP2K: case GF_ISOM_BOX_TYPE_MHA1: case GF_ISOM_BOX_TYPE_MHA2: case GF_ISOM_BOX_TYPE_MHM1: case GF_ISOM_BOX_TYPE_MHM2: case GF_ISOM_BOX_TYPE_MJP2: case GF_QT_SUBTYPE_RAW_AUD: case GF_QT_SUBTYPE_TWOS: case GF_QT_SUBTYPE_SOWT: case GF_QT_SUBTYPE_FL32: case GF_QT_SUBTYPE_FL64: case GF_QT_SUBTYPE_IN24: case GF_QT_SUBTYPE_IN32: case GF_QT_SUBTYPE_ULAW: case GF_QT_SUBTYPE_ALAW: case GF_QT_SUBTYPE_ADPCM: case GF_QT_SUBTYPE_IMA_ADPCM: case GF_QT_SUBTYPE_DVCA: case GF_QT_SUBTYPE_QDMC: case GF_QT_SUBTYPE_QDMC2: case GF_QT_SUBTYPE_QCELP: case GF_QT_SUBTYPE_kMP3: case GF_QT_SUBTYPE_RAW_VID: case GF_QT_SUBTYPE_APCH: case GF_QT_SUBTYPE_APCO: case GF_QT_SUBTYPE_APCN: case GF_QT_SUBTYPE_APCS: case GF_QT_SUBTYPE_AP4X: case GF_QT_SUBTYPE_AP4H: case GF_QT_SUBTYPE_YUV422: case GF_QT_SUBTYPE_YUV444: case GF_QT_SUBTYPE_YUV422_10: case GF_QT_SUBTYPE_YUV444_10: case GF_ISOM_BOX_TYPE_IPCM: case GF_ISOM_BOX_TYPE_FPCM: continue; case GF_ISOM_BOX_TYPE_UNKNOWN: break; default: if (gf_box_valid_in_parent((GF_Box *) a, "stsd")) { continue; } GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Unexpected box %s in stsd!\n", gf_4cc_to_str(a->type))); continue; } //we are sure to have an unknown box here assert(a->type==GF_ISOM_BOX_TYPE_UNKNOWN); if (!a->data || (a->dataSize<8) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Sample description %s does not have at least 8 bytes!\n", gf_4cc_to_str(a->original_4cc) )); continue; } else if (a->dataSize > a->size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Sample description %s has wrong data size %d!\n", gf_4cc_to_str(a->original_4cc), a->dataSize)); continue; } #define STSD_SWITCH_BOX(_box) \ if (gf_bs_available(bs)) { \ u64 pos = gf_bs_get_position(bs); \ u32 count_subb = 0; \ gf_bs_set_cookie(bs, GF_ISOM_BS_COOKIE_NO_LOGS);\ e = gf_isom_box_array_read((GF_Box *) _box, bs, NULL); \ count_subb = _box->child_boxes ? gf_list_count(_box->child_boxes) : 0; \ if (!count_subb || e) { \ gf_bs_seek(bs, pos); \ _box->data_size = (u32) gf_bs_available(bs); \ if (_box->data_size) { \ _box->data = a->data; \ a->data = NULL; \ memmove(_box->data, _box->data + pos, _box->data_size); \ } \ } else { \ _box->data_size = 0; \ } \ } \ gf_bs_del(bs); \ if (!_box->data_size && _box->data) { \ gf_free(_box->data); \ _box->data = NULL; \ } \ _box->size = 0; \ _box->EntryType = a->original_4cc; \ gf_list_rem(trak->Media->information->sampleTable->SampleDescription->child_boxes, i-1); \ gf_isom_box_del((GF_Box *)a); \ gf_list_insert(trak->Media->information->sampleTable->SampleDescription->child_boxes, _box, i-1); \ /*only process visual or audio note: no need for new_box_parent here since we always store sample descriptions in child_boxes*/ switch (trak->Media->handler->handlerType) { case GF_ISOM_MEDIA_VISUAL: case GF_ISOM_MEDIA_AUXV: case GF_ISOM_MEDIA_PICT: { GF_GenericVisualSampleEntryBox *genv = (GF_GenericVisualSampleEntryBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_GNRV); bs = gf_bs_new(a->data, a->dataSize, GF_BITSTREAM_READ); genv->size = a->size-8; gf_isom_video_sample_entry_read((GF_VisualSampleEntryBox *) genv, bs); STSD_SWITCH_BOX(genv) } break; case GF_ISOM_MEDIA_AUDIO: { GF_GenericAudioSampleEntryBox *gena = (GF_GenericAudioSampleEntryBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_GNRA); gena->size = a->size-8; bs = gf_bs_new(a->data, a->dataSize, GF_BITSTREAM_READ); gf_isom_audio_sample_entry_read((GF_AudioSampleEntryBox *) gena, bs); STSD_SWITCH_BOX(gena) } break; default: { GF_GenericSampleEntryBox *genm = (GF_GenericSampleEntryBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_GNRM); genm->size = a->size-8; bs = gf_bs_new(a->data, a->dataSize, GF_BITSTREAM_READ); e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)genm, bs); if (e) return; STSD_SWITCH_BOX(genm) } break; } } } GF_Err trak_on_child_box(GF_Box *s, GF_Box *a) { GF_TrackBox *ptr = (GF_TrackBox *)s; if (!a) return GF_OK; switch(a->type) { case GF_ISOM_BOX_TYPE_TKHD: if (ptr->Header) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->Header = (GF_TrackHeaderBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_EDTS: if (ptr->editBox) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->editBox = (GF_EditBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_UDTA: if (ptr->udta) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->udta = (GF_UserDataBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_META: if (ptr->meta) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->meta = (GF_MetaBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_TREF: if (ptr->References) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->References = (GF_TrackReferenceBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_MDIA: if (ptr->Media) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->Media = (GF_MediaBox *)a; ((GF_MediaBox *)a)->mediaTrack = ptr; return GF_OK; case GF_ISOM_BOX_TYPE_TRGR: if (ptr->groups) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->groups = (GF_TrackGroupBox *)a; return GF_OK; case GF_QT_BOX_TYPE_TAPT: if (ptr->Aperture) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->Aperture = (GF_Box *)a; return GF_OK; case GF_ISOM_BOX_TYPE_SENC: ptr->sample_encryption = (GF_SampleEncryptionBox*)a; return GF_OK; case GF_ISOM_BOX_TYPE_UUID: if (((GF_UnknownUUIDBox *)a)->internal_4cc == GF_ISOM_BOX_UUID_PSEC) { ptr->sample_encryption = (GF_SampleEncryptionBox*) a; return GF_OK; } } return GF_OK; } GF_Err trak_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrackBox *ptr = (GF_TrackBox *)s; e = gf_isom_box_array_read(s, bs, trak_on_child_box); if (e) return e; gf_isom_check_sample_desc(ptr); if (!ptr->Header) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing TrackHeaderBox\n")); return GF_ISOM_INVALID_FILE; } if (!ptr->Media) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MediaBox\n")); return GF_ISOM_INVALID_FILE; } if (!ptr->Media->information || !ptr->Media->information->sampleTable) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid MediaBox\n")); return GF_ISOM_INVALID_FILE; } if (!ptr->Media->information->sampleTable->SampleSize || (ptr->Media->information->sampleTable->SampleSize->sampleCount==0)) { if (ptr->Header->initial_duration) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[iso file] Track with no samples but duration defined, ignoring duration\n")); ptr->Header->initial_duration = 0; } } for (i=0; i<gf_list_count(ptr->Media->information->sampleTable->child_boxes); i++) { GF_Box *a = gf_list_get(ptr->Media->information->sampleTable->child_boxes, i); if ((a->type ==GF_ISOM_BOX_TYPE_UUID) && (((GF_UUIDBox *)a)->internal_4cc == GF_ISOM_BOX_UUID_PSEC)) { ptr->sample_encryption = (struct __sample_encryption_box *) a; break; } else if (a->type == GF_ISOM_BOX_TYPE_SENC) { ptr->sample_encryption = (struct __sample_encryption_box *)a; break; } } return e; } GF_Box *trak_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackBox, GF_ISOM_BOX_TYPE_TRAK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trak_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err trak_box_size(GF_Box *s) { u32 pos=0; GF_TrackBox *ptr = (GF_TrackBox *)s; if (ptr->sample_encryption && ptr->sample_encryption->load_needed) { GF_Err e = senc_Parse(ptr->moov->mov->movieFileMap->bs, ptr, NULL, ptr->sample_encryption); if (e) return e; } gf_isom_check_position(s, (GF_Box *)ptr->Header, &pos); gf_isom_check_position(s, (GF_Box *)ptr->Aperture, &pos); gf_isom_check_position(s, (GF_Box *)ptr->References, &pos); gf_isom_check_position(s, (GF_Box *)ptr->editBox, &pos); gf_isom_check_position(s, (GF_Box *)ptr->Media, &pos); gf_isom_check_position(s, (GF_Box *)ptr->meta, &pos); gf_isom_check_position(s, (GF_Box *)ptr->groups, &pos); gf_isom_check_position(s, (GF_Box *)ptr->udta, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stri_box_del(GF_Box *s) { GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s; if (ptr == NULL) return; if (ptr->attribute_list) gf_free(ptr->attribute_list); gf_free(ptr); } GF_Err stri_box_read(GF_Box *s, GF_BitStream *bs) { size_t i; GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s; ISOM_DECREASE_SIZE(ptr, 8) ptr->switch_group = gf_bs_read_u16(bs); ptr->alternate_group = gf_bs_read_u16(bs); ptr->sub_track_id = gf_bs_read_u32(bs); ptr->attribute_count = ptr->size / 4; GF_SAFE_ALLOC_N(ptr->attribute_list, (size_t)ptr->attribute_count, u32); if (!ptr->attribute_list) return GF_OUT_OF_MEM; for (i = 0; i < ptr->attribute_count; i++) { ISOM_DECREASE_SIZE(ptr, 4) ptr->attribute_list[i] = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *stri_box_new() { ISOM_DECL_BOX_ALLOC(GF_SubTrackInformationBox, GF_ISOM_BOX_TYPE_STRI); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stri_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->switch_group); gf_bs_write_u16(bs, ptr->alternate_group); gf_bs_write_u32(bs, ptr->sub_track_id); for (i = 0; i < ptr->attribute_count; i++) { gf_bs_write_u32(bs, ptr->attribute_list[i]); } return GF_OK; } GF_Err stri_box_size(GF_Box *s) { GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s; ptr->size += 8 + 4 * ptr->attribute_count; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsg_box_del(GF_Box *s) { GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s; if (ptr == NULL) return; if (ptr->group_description_index) gf_free(ptr->group_description_index); gf_free(ptr); } GF_Err stsg_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s; ISOM_DECREASE_SIZE(s, 6); ptr->grouping_type = gf_bs_read_u32(bs); ptr->nb_groups = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(s, ptr->nb_groups*4); GF_SAFE_ALLOC_N(ptr->group_description_index, ptr->nb_groups, u32); if (!ptr->group_description_index) return GF_OUT_OF_MEM; for (i = 0; i < ptr->nb_groups; i++) { ptr->group_description_index[i] = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *stsg_box_new() { ISOM_DECL_BOX_ALLOC(GF_SubTrackSampleGroupBox, GF_ISOM_BOX_TYPE_STSG); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsg_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->grouping_type); gf_bs_write_u16(bs, ptr->nb_groups); for (i = 0; i < ptr->nb_groups; i++) { gf_bs_write_u32(bs, ptr->group_description_index[i]); } return GF_OK; } GF_Err stsg_box_size(GF_Box *s) { GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s; ptr->size += 6 + 4 * ptr->nb_groups; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void strk_box_del(GF_Box *s) { gf_free(s); } GF_Err strk_on_child_box(GF_Box *s, GF_Box *a) { GF_SubTrackBox *ptr = (GF_SubTrackBox *)s; if (!a) return GF_OK; switch (a->type) { case GF_ISOM_BOX_TYPE_STRI: if (ptr->info) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->info = (GF_SubTrackInformationBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_STRD: if (ptr->strd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->strd = a; return GF_OK; } return GF_OK; } GF_Err strk_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SubTrackBox *ptr = (GF_SubTrackBox *)s; e = gf_isom_box_array_read(s, bs, strk_on_child_box); if (e) return e; if (!ptr->info) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing SubTrackInformationBox\n")); return GF_ISOM_INVALID_FILE; } return GF_OK; } GF_Box *strk_box_new() { ISOM_DECL_BOX_ALLOC(GF_SubTrackBox, GF_ISOM_BOX_TYPE_STRK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err strk_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err strk_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void tref_box_del(GF_Box *s) { GF_TrackReferenceBox *ptr = (GF_TrackReferenceBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err tref_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, NULL, s->type); } GF_Box *tref_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackReferenceBox, GF_ISOM_BOX_TYPE_TREF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tref_box_write(GF_Box *s, GF_BitStream *bs) { // GF_TrackReferenceBox *ptr = (GF_TrackReferenceBox *)s; return gf_isom_box_write_header(s, bs); } GF_Err tref_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void reftype_box_del(GF_Box *s) { GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; if (!ptr) return; if (ptr->trackIDs) gf_free(ptr->trackIDs); gf_free(ptr); } GF_Err reftype_box_read(GF_Box *s, GF_BitStream *bs) { u32 bytesToRead; u32 i; GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; bytesToRead = (u32) (ptr->size); if (!bytesToRead) return GF_OK; ptr->trackIDCount = (u32) (bytesToRead) / sizeof(u32); ptr->trackIDs = (GF_ISOTrackID *) gf_malloc(ptr->trackIDCount * sizeof(GF_ISOTrackID)); if (!ptr->trackIDs) return GF_OUT_OF_MEM; for (i = 0; i < ptr->trackIDCount; i++) { ptr->trackIDs[i] = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *reftype_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackReferenceTypeBox, GF_ISOM_BOX_TYPE_REFT); return (GF_Box *)tmp; } GF_Err reftype_AddRefTrack(GF_TrackReferenceTypeBox *ref, GF_ISOTrackID trackID, u16 *outRefIndex) { u32 i; if (!ref || !trackID) return GF_BAD_PARAM; if (outRefIndex) *outRefIndex = 0; //don't add a dep if already here !! for (i = 0; i < ref->trackIDCount; i++) { if (ref->trackIDs[i] == trackID) { if (outRefIndex) *outRefIndex = i+1; return GF_OK; } } ref->trackIDs = (GF_ISOTrackID *) gf_realloc(ref->trackIDs, (ref->trackIDCount + 1) * sizeof(GF_ISOTrackID) ); if (!ref->trackIDs) return GF_OUT_OF_MEM; ref->trackIDs[ref->trackIDCount] = trackID; ref->trackIDCount++; if (outRefIndex) *outRefIndex = ref->trackIDCount; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err reftype_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; ptr->type = ptr->reference_type; e = gf_isom_box_write_header(s, bs); ptr->type = GF_ISOM_BOX_TYPE_REFT; if (e) return e; for (i = 0; i < ptr->trackIDCount; i++) { gf_bs_write_u32(bs, ptr->trackIDs[i]); } return GF_OK; } GF_Err reftype_box_size(GF_Box *s) { GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; if (ptr->trackIDCount) ptr->size += (ptr->trackIDCount * sizeof(u32)); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void trex_box_del(GF_Box *s) { GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err trex_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s; ISOM_DECREASE_SIZE(ptr, 20); ptr->trackID = gf_bs_read_u32(bs); ptr->def_sample_desc_index = gf_bs_read_u32(bs); ptr->def_sample_duration = gf_bs_read_u32(bs); ptr->def_sample_size = gf_bs_read_u32(bs); ptr->def_sample_flags = gf_bs_read_u32(bs); return GF_OK; } GF_Box *trex_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackExtendsBox, GF_ISOM_BOX_TYPE_TREX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trex_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->trackID); //we always write 1 in trex default sample desc as using 0 breaks chrome/opera/... gf_bs_write_u32(bs, ptr->def_sample_desc_index ? ptr->def_sample_desc_index : 1); gf_bs_write_u32(bs, ptr->def_sample_duration); gf_bs_write_u32(bs, ptr->def_sample_size); gf_bs_write_u32(bs, ptr->def_sample_flags); return GF_OK; } GF_Err trex_box_size(GF_Box *s) { GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s; ptr->size += 20; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void trep_box_del(GF_Box *s) { GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err trep_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->trackID = gf_bs_read_u32(bs); return gf_isom_box_array_read(s, bs, NULL); } GF_Box *trep_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackExtensionPropertiesBox, GF_ISOM_BOX_TYPE_TREP); tmp->child_boxes = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trep_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->trackID); return GF_OK; } GF_Err trep_box_size(GF_Box *s) { GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *)s; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void trun_box_del(GF_Box *s) { GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s; if (ptr == NULL) return; if (ptr->samples) gf_free(ptr->samples); if (ptr->cache) gf_bs_del(ptr->cache); if (ptr->sample_order) gf_free(ptr->sample_order); gf_free(ptr); } #ifdef GF_ENABLE_CTRN static u32 ctrn_field_size(u32 field_idx) { if (field_idx==3) return 4; return field_idx; } u32 gf_isom_ctrn_field_size_bits(u32 field_idx) { if (field_idx==3) return 32; return field_idx*8; } static u32 ctrn_read_flags(GF_BitStream *bs, u32 nbbits) { u32 val = gf_bs_read_int(bs, nbbits); if (nbbits==16) val <<= 16; else if (nbbits==8) val <<= 24; return val; } static GF_Err ctrn_box_read(GF_Box *s, GF_BitStream *bs) { u32 i, count, flags, first_idx=0; Bool inherit_dur, inherit_size, inherit_flags, inherit_ctso; GF_TrunEntry *ent; GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s; flags = ptr->flags; ptr->ctrn_flags = flags; ptr->flags = 0; ptr->sample_count = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 2); if (flags & GF_ISOM_TRUN_DATA_OFFSET) { if (flags & GF_ISOM_CTRN_DATAOFFSET_16) { ptr->data_offset = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 2); } else { ptr->data_offset = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); } ptr->flags |= GF_ISOM_TRUN_DATA_OFFSET; } if (flags & GF_ISOM_CTRN_CTSO_MULTIPLIER) { ptr->ctso_multiplier = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 2); } /*no sample dur/sample_flag/size/ctso for first or following, create a pack sample */ if (! (flags & 0x00FFFF00)) { GF_SAFEALLOC(ent, GF_TrunEntry); if (!ent) return GF_OUT_OF_MEM; ent->nb_pack = ptr->sample_count; gf_list_add(ptr->entries, ent); return GF_OK; } /*allocate all entries*/ for (i=0; i<ptr->sample_count; i++) { GF_SAFEALLOC(ent, GF_TrunEntry); if (!ent) return GF_OUT_OF_MEM; gf_list_add(ptr->entries, ent); } //unpack flags ptr->ctrn_first_dur = (flags>>22) & 0x3; ptr->ctrn_first_size = (flags>>20) & 0x3; ptr->ctrn_first_sample_flags = (flags>>18) & 0x3; ptr->ctrn_first_ctts = (flags>>16) & 0x3; ptr->ctrn_dur = (flags>>14) & 0x3; ptr->ctrn_size = (flags>>12) & 0x3; ptr->ctrn_sample_flags = (flags>>10) & 0x3; ptr->ctrn_ctts = (flags>>8) & 0x3; inherit_dur = flags & GF_ISOM_CTRN_INHERIT_DUR; inherit_size = flags & GF_ISOM_CTRN_INHERIT_SIZE; inherit_flags = flags & GF_ISOM_CTRN_INHERIT_FLAGS; inherit_ctso = flags & GF_ISOM_CTRN_INHERIT_CTSO; if (flags & GF_ISOM_CTRN_FIRST_SAMPLE) { ent = gf_list_get(ptr->entries, 0); first_idx = 1; if (!inherit_dur && ptr->ctrn_first_dur) { ent->Duration = gf_bs_read_int(bs, gf_isom_ctrn_field_size_bits(ptr->ctrn_first_dur) ); ISOM_DECREASE_SIZE(ptr, ctrn_field_size(ptr->ctrn_first_dur) ); } if (!inherit_size && ptr->ctrn_first_size) { ent->size = gf_bs_read_int(bs, gf_isom_ctrn_field_size_bits(ptr->ctrn_first_size) ); ISOM_DECREASE_SIZE(ptr, ctrn_field_size(ptr->ctrn_first_size) ); } if (!inherit_flags && ptr->ctrn_first_sample_flags) { ent->flags = ctrn_read_flags(bs, gf_isom_ctrn_field_size_bits(ptr->ctrn_first_sample_flags) ); ISOM_DECREASE_SIZE(ptr, ctrn_field_size(ptr->ctrn_first_sample_flags) ); } if (!inherit_ctso && ptr->ctrn_first_ctts) { ent->CTS_Offset = gf_bs_read_int(bs, gf_isom_ctrn_field_size_bits(ptr->ctrn_first_ctts) ); ISOM_DECREASE_SIZE(ptr, ctrn_field_size(ptr->ctrn_first_ctts) ); if (ptr->ctso_multiplier) ent->CTS_Offset *= (s32) ptr->ctso_multiplier; } } count = ptr->sample_count - first_idx; if (!inherit_dur && ptr->ctrn_dur) { u32 nbbits = gf_isom_ctrn_field_size_bits(ptr->ctrn_dur); ISOM_DECREASE_SIZE(ptr, count * nbbits / 8); for (i=first_idx; i<ptr->sample_count; i++) { ent = gf_list_get(ptr->entries, i); ent->Duration = gf_bs_read_int(bs, nbbits); } } if (!inherit_size && ptr->ctrn_size) { u32 nbbits = gf_isom_ctrn_field_size_bits(ptr->ctrn_size); ISOM_DECREASE_SIZE(ptr, count * nbbits / 8); for (i=first_idx; i<ptr->sample_count; i++) { ent = gf_list_get(ptr->entries, i); ent->size = gf_bs_read_int(bs, nbbits); } } if (!inherit_flags && ptr->ctrn_sample_flags) { u32 nbbits = gf_isom_ctrn_field_size_bits(ptr->ctrn_sample_flags); ISOM_DECREASE_SIZE(ptr, count * nbbits / 8); for (i=first_idx; i<ptr->sample_count; i++) { ent = gf_list_get(ptr->entries, i); ent->flags = ctrn_read_flags(bs, nbbits); } } if (!inherit_ctso && ptr->ctrn_ctts) { u32 nbbits = gf_isom_ctrn_field_size_bits(ptr->ctrn_ctts); ISOM_DECREASE_SIZE(ptr, count * nbbits / 8); for (i=first_idx; i<ptr->sample_count; i++) { ent = gf_list_get(ptr->entries, i); ent->CTS_Offset = gf_bs_read_int(bs, nbbits); if (ptr->ctso_multiplier) ent->CTS_Offset *= (s32) ptr->ctso_multiplier; } } return GF_OK; } #endif GF_Err trun_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_TrunEntry *p; GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s; #ifdef GF_ENABLE_CTRN if (ptr->type == GF_ISOM_BOX_TYPE_CTRN) { ptr->type = GF_ISOM_BOX_TYPE_TRUN; ptr->use_ctrn = GF_TRUE; return ctrn_box_read(s, bs); } #endif //check this is a good file if ((ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) && (ptr->flags & GF_ISOM_TRUN_FLAGS)) return GF_ISOM_INVALID_FILE; ISOM_DECREASE_SIZE(ptr, 4); ptr->sample_count = gf_bs_read_u32(bs); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) { ISOM_DECREASE_SIZE(ptr, 4); ptr->data_offset = gf_bs_read_u32(bs); } if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) { ISOM_DECREASE_SIZE(ptr, 4); ptr->first_sample_flags = gf_bs_read_u32(bs); } if (! (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) ) { ptr->samples = gf_malloc(sizeof(GF_TrunEntry)); if (!ptr->samples) return GF_OUT_OF_MEM; ptr->sample_alloc = ptr->nb_samples = 1; ptr->samples[0].nb_pack = ptr->sample_count; } else { //if we get here, at least one flag (so at least 4 bytes) is set, check size if (ptr->sample_count * 4 > ptr->size) { ISOM_DECREASE_SIZE(ptr, ptr->sample_count*4); } ptr->samples = gf_malloc(sizeof(GF_TrunEntry) * ptr->sample_count); if (!ptr->samples) return GF_OUT_OF_MEM; ptr->sample_alloc = ptr->nb_samples = ptr->sample_count; //read each entry (even though nothing may be written) for (i=0; i<ptr->sample_count; i++) { u32 trun_size = 0; p = &ptr->samples[i]; memset(p, 0, sizeof(GF_TrunEntry)); if (ptr->flags & GF_ISOM_TRUN_DURATION) { p->Duration = gf_bs_read_u32(bs); trun_size += 4; } if (ptr->flags & GF_ISOM_TRUN_SIZE) { p->size = gf_bs_read_u32(bs); trun_size += 4; } //SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED if (ptr->flags & GF_ISOM_TRUN_FLAGS) { p->flags = gf_bs_read_u32(bs); trun_size += 4; } if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) { if (ptr->version==0) { p->CTS_Offset = (u32) gf_bs_read_u32(bs); } else { p->CTS_Offset = (s32) gf_bs_read_u32(bs); } trun_size += 4; } ISOM_DECREASE_SIZE(ptr, trun_size); } } /*todo parse sample reorder*/ if (ptr->size) { gf_bs_skip_bytes(bs, ptr->size); ptr->size = 0; } return GF_OK; } GF_Box *trun_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentRunBox, GF_ISOM_BOX_TYPE_TRUN); //NO FLAGS SET BY DEFAULT return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE #ifdef GF_ENABLE_CTRN static void ctrn_write_sample_flags(GF_BitStream *bs, u32 flags, u32 field_size) { if (!field_size) return; if (field_size==8) flags = flags>>24; else if (field_size==16) flags = flags>>16; gf_bs_write_int(bs, flags, field_size); } static void ctrn_write_ctso(GF_TrackFragmentRunBox *ctrn, GF_BitStream *bs, u32 ctso, u32 field_size) { if (!field_size) return; if (ctrn->ctso_multiplier) { gf_bs_write_int(bs, ctso / ctrn->ctso_multiplier, field_size); } else { gf_bs_write_int(bs, ctso, field_size); } } GF_Err ctrn_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i, count, flags; GF_TrunEntry *ent; GF_TrackFragmentRunBox *ctrn = (GF_TrackFragmentRunBox *) s; if (!s) return GF_BAD_PARAM; flags = ctrn->flags; ctrn->flags = ctrn->ctrn_flags; ctrn->type = GF_ISOM_BOX_TYPE_CTRN; e = gf_isom_full_box_write(s, bs); if (e) return e; ctrn->flags = flags; ctrn->type = GF_ISOM_BOX_TYPE_TRUN; gf_bs_write_u16(bs, ctrn->sample_count); if (ctrn->flags & GF_ISOM_TRUN_DATA_OFFSET) { if (ctrn->ctrn_flags & GF_ISOM_CTRN_DATAOFFSET_16) { gf_bs_write_u16(bs, ctrn->data_offset); } else { gf_bs_write_u32(bs, ctrn->data_offset); } } if (ctrn->ctso_multiplier) { gf_bs_write_u16(bs, ctrn->ctso_multiplier); } /*we always write first sample using first flags*/ ent = gf_list_get(ctrn->entries, 0); gf_bs_write_int(bs, ent->Duration, gf_isom_ctrn_field_size_bits(ctrn->ctrn_first_dur) ); gf_bs_write_int(bs, ent->size, gf_isom_ctrn_field_size_bits(ctrn->ctrn_first_size) ); ctrn_write_sample_flags(bs, ent->flags, gf_isom_ctrn_field_size_bits(ctrn->ctrn_first_sample_flags) ); ctrn_write_ctso(ctrn,bs, ent->CTS_Offset, gf_isom_ctrn_field_size_bits(ctrn->ctrn_first_ctts) ); count = gf_list_count(ctrn->entries); if (ctrn->ctrn_dur) { u32 nbbits = gf_isom_ctrn_field_size_bits(ctrn->ctrn_dur); for (i=1; i<count; i++) { GF_TrunEntry *a_ent = gf_list_get(ctrn->entries, i); gf_bs_write_int(bs, a_ent->Duration, nbbits); } } if (ctrn->ctrn_size) { u32 nbbits = gf_isom_ctrn_field_size_bits(ctrn->ctrn_size); for (i=1; i<count; i++) { GF_TrunEntry *a_ent = gf_list_get(ctrn->entries, i); gf_bs_write_int(bs, a_ent->size, nbbits); } } if (ctrn->ctrn_sample_flags) { u32 nbbits = gf_isom_ctrn_field_size_bits(ctrn->ctrn_sample_flags); for (i=1; i<count; i++) { GF_TrunEntry *a_ent = gf_list_get(ctrn->entries, i); ctrn_write_sample_flags(bs, a_ent->flags, nbbits); } } if (ctrn->ctrn_ctts) { u32 nbbits = gf_isom_ctrn_field_size_bits(ctrn->ctrn_ctts); for (i=1; i<count; i++) { GF_TrunEntry *a_ent = gf_list_get(ctrn->entries, i); ctrn_write_ctso(ctrn, bs, a_ent->CTS_Offset, nbbits); } } return GF_OK; } #endif GF_Err trun_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *) s; if (!s) return GF_BAD_PARAM; #ifdef GF_ENABLE_CTRN if (ptr->use_ctrn) return ctrn_box_write(s, bs); #endif e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->sample_count); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) { gf_bs_write_u32(bs, ptr->data_offset); } if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) { gf_bs_write_u32(bs, ptr->first_sample_flags); } if (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) { for (i=0; i<ptr->nb_samples; i++) { GF_TrunEntry *p = &ptr->samples[i]; if (ptr->flags & GF_ISOM_TRUN_DURATION) { gf_bs_write_u32(bs, p->Duration); } if (ptr->flags & GF_ISOM_TRUN_SIZE) { gf_bs_write_u32(bs, p->size); } //SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED if (ptr->flags & GF_ISOM_TRUN_FLAGS) { gf_bs_write_u32(bs, p->flags); } if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) { if (ptr->version==0) { gf_bs_write_u32(bs, p->CTS_Offset); } else { gf_bs_write_u32(bs, (u32) p->CTS_Offset); } } } } if (ptr->sample_order) { u32 nb_bits = 8; if (ptr->sample_count>0xFFFFFF) nb_bits = 32; else if (ptr->sample_count>0xFFFF) nb_bits = 24; else if (ptr->sample_count>0xFF) nb_bits = 16; for (i=0; i<ptr->sample_count; i++) { gf_bs_write_int(bs, ptr->sample_order[i], nb_bits); } } return GF_OK; } #ifdef GF_ENABLE_CTRN static u32 ctrn_sample_flags_to_index(u32 val) { if (!val) return 0; if (val & 0x0000FFFF) return 3; if (val & 0x00FF0000) return 2; return 1; } static u32 ctrn_u32_to_index(u32 val) { if (!val) return 0; if (val<=255) return 1; if (val<=65535) return 2; return 3; } static u32 ctrn_s32_to_index(s32 val) { if (!val) return 0; if (ABS(val)<=127) return 1; if (ABS(val)<=32767) return 2; return 3; } static u32 ctrn_ctts_to_index(GF_TrackFragmentRunBox *ctrn, s32 ctts) { if (!(ctrn->flags & GF_ISOM_TRUN_CTS_OFFSET)) return 0; if (!ctts) return 0; if (ctrn->version) { if (ctrn->ctso_multiplier) return ctrn_s32_to_index(ctts / ctrn->ctso_multiplier); return ctrn_s32_to_index(ctts); } assert(ctts>0); if (ctrn->ctso_multiplier) return ctrn_u32_to_index((u32)ctts / ctrn->ctso_multiplier); return ctrn_s32_to_index((u32)ctts); } static GF_Err ctrn_box_size(GF_TrackFragmentRunBox *ctrn) { Bool use_ctso_multi = GF_TRUE; u32 i, count; GF_TrunEntry *ent; ctrn->ctrn_flags = 0; ctrn->ctrn_first_dur = ctrn->ctrn_first_size = ctrn->ctrn_first_sample_flags = ctrn->ctrn_first_ctts = 0; ctrn->ctrn_dur = ctrn->ctrn_size = ctrn->ctrn_sample_flags = ctrn->ctrn_ctts = 0; ctrn->size += 2; //16 bits for sample count if (ctrn->flags & GF_ISOM_TRUN_DATA_OFFSET) { ctrn->ctrn_flags |= GF_ISOM_TRUN_DATA_OFFSET; if (ABS(ctrn->data_offset) < 32767) { ctrn->size += 2; ctrn->ctrn_flags |= GF_ISOM_CTRN_DATAOFFSET_16; } else ctrn->size += 4; } count = gf_list_count(ctrn->entries); if (ctrn->ctso_multiplier && (ctrn->flags & GF_ISOM_TRUN_CTS_OFFSET) && (ctrn->ctso_multiplier<=0xFFFF) ) { for (i=0; i<count; i++) { GF_TrunEntry *a_ent = gf_list_get(ctrn->entries, i); if (a_ent->CTS_Offset % ctrn->ctso_multiplier) { use_ctso_multi = GF_FALSE; break; } } } else { use_ctso_multi = GF_FALSE; } if (ctrn->use_inherit) { use_ctso_multi = GF_FALSE; ctrn->ctrn_flags |= 0xB0; //duration=1,size=0,flags=1,cts=1 << 4 } if (use_ctso_multi) { ctrn->size += 2; ctrn->ctrn_flags |= GF_ISOM_CTRN_CTSO_MULTIPLIER; } else { ctrn->ctso_multiplier = 0; } /*we always write first sample using first flags*/ ent = gf_list_get(ctrn->entries, 0); ctrn->ctrn_flags |= GF_ISOM_CTRN_FIRST_SAMPLE; if (!ctrn->use_inherit && (ctrn->flags & GF_ISOM_TRUN_DURATION)) { ctrn->ctrn_first_dur = ctrn_u32_to_index(ent->Duration); if (ctrn->ctrn_first_dur) { ctrn->size += ctrn_field_size(ctrn->ctrn_first_dur); ctrn->ctrn_flags |= ctrn->ctrn_first_dur<<22; } } if (ctrn->flags & GF_ISOM_TRUN_SIZE) { ctrn->ctrn_first_size = ctrn_u32_to_index(ent->size); if (ctrn->ctrn_first_size) { ctrn->size += ctrn_field_size(ctrn->ctrn_first_size); ctrn->ctrn_flags |= ctrn->ctrn_first_size<<20; } } if (!ctrn->use_inherit && (ctrn->flags & GF_ISOM_TRUN_FLAGS)) { ctrn->ctrn_first_sample_flags = ctrn_sample_flags_to_index(ent->flags); if (ctrn->ctrn_first_sample_flags) { ctrn->size += ctrn_field_size(ctrn->ctrn_first_sample_flags); ctrn->ctrn_flags |= ctrn->ctrn_first_sample_flags<<18; } } if (!ctrn->use_inherit && (ctrn->flags & GF_ISOM_TRUN_CTS_OFFSET)) { ctrn->ctrn_first_ctts = ctrn_ctts_to_index(ctrn, ent->CTS_Offset); if (ctrn->ctrn_first_ctts) { ctrn->size += ctrn_field_size(ctrn->ctrn_first_ctts); ctrn->ctrn_flags |= ctrn->ctrn_first_ctts<<16; } } for (i=1; i<count; i++) { u8 field_idx; GF_TrunEntry *a_ent = gf_list_get(ctrn->entries, i); if (!ctrn->use_inherit && (ctrn->flags & GF_ISOM_TRUN_DURATION)) { field_idx = ctrn_u32_to_index(a_ent->Duration); if (ctrn->ctrn_dur < field_idx) ctrn->ctrn_dur = field_idx; } if (ctrn->flags & GF_ISOM_TRUN_SIZE) { field_idx = ctrn_u32_to_index(a_ent->size); if (ctrn->ctrn_size < field_idx) ctrn->ctrn_size = field_idx; } if (!ctrn->use_inherit && (ctrn->flags & GF_ISOM_TRUN_FLAGS)) { field_idx = ctrn_sample_flags_to_index(a_ent->flags); if (ctrn->ctrn_sample_flags < field_idx) ctrn->ctrn_sample_flags = field_idx; } if (!ctrn->use_inherit) { field_idx = ctrn_ctts_to_index(ctrn, a_ent->CTS_Offset); if (ctrn->ctrn_ctts < field_idx) ctrn->ctrn_ctts = field_idx; } } count-=1; if (ctrn->ctrn_dur) { ctrn->size += count * ctrn_field_size(ctrn->ctrn_dur); ctrn->ctrn_flags |= ctrn->ctrn_dur<<14; } if (ctrn->ctrn_size) { ctrn->size += count * ctrn_field_size(ctrn->ctrn_size); ctrn->ctrn_flags |= ctrn->ctrn_size<<12; } if (ctrn->ctrn_sample_flags) { ctrn->size += count * ctrn_field_size(ctrn->ctrn_sample_flags); ctrn->ctrn_flags |= ctrn->ctrn_sample_flags<<10; } if (ctrn->ctrn_ctts) { ctrn->size += count * ctrn_field_size(ctrn->ctrn_ctts); ctrn->ctrn_flags |= ctrn->ctrn_ctts<<8; } return GF_OK; } #endif GF_Err trun_box_size(GF_Box *s) { GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s; #ifdef GF_ENABLE_CTRN if (ptr->use_ctrn) return ctrn_box_size(ptr); #endif ptr->size += 4; //The rest depends on the flags if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) ptr->size += 4; if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) ptr->size += 4; if (ptr->sample_order) { u32 nb_bytes = 1; if (ptr->sample_count>0xFFFFFF) nb_bytes = 4; else if (ptr->sample_count>0xFFFF) nb_bytes = 3; else if (ptr->sample_count>0xFF) nb_bytes = 2; ptr->size += ptr->sample_count*nb_bytes; } if (! (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) ) { return GF_OK; } //if nothing to do, this will be skipped automatically if (ptr->flags & GF_ISOM_TRUN_DURATION) ptr->size += 4*ptr->nb_samples; if (ptr->flags & GF_ISOM_TRUN_SIZE) ptr->size += 4*ptr->nb_samples; //SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED if (ptr->flags & GF_ISOM_TRUN_FLAGS) ptr->size += 4*ptr->nb_samples; if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) ptr->size += 4*ptr->nb_samples; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void tsro_box_del(GF_Box *s) { GF_TimeOffHintEntryBox *tsro = (GF_TimeOffHintEntryBox *)s; gf_free(tsro); } GF_Err tsro_box_read(GF_Box *s, GF_BitStream *bs) { GF_TimeOffHintEntryBox *ptr = (GF_TimeOffHintEntryBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->TimeOffset = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tsro_box_new() { ISOM_DECL_BOX_ALLOC(GF_TimeOffHintEntryBox, GF_ISOM_BOX_TYPE_TSRO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tsro_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TimeOffHintEntryBox *ptr = (GF_TimeOffHintEntryBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->TimeOffset); return GF_OK; } GF_Err tsro_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void udta_box_del(GF_Box *s) { u32 i; GF_UserDataMap *map; GF_UserDataBox *ptr = (GF_UserDataBox *)s; if (ptr == NULL) return; i=0; while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) { gf_isom_box_array_del(map->boxes); gf_free(map); } gf_list_del(ptr->recordList); gf_free(ptr); } GF_UserDataMap *udta_getEntry(GF_UserDataBox *ptr, u32 box_type, bin128 *uuid) { u32 i; GF_UserDataMap *map; if (ptr == NULL) return NULL; i=0; while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) { if (map->boxType == box_type) { if ((box_type != GF_ISOM_BOX_TYPE_UUID) || !uuid) return map; if (!memcmp(map->uuid, *uuid, 16)) return map; } } return NULL; } GF_Err udta_on_child_box(GF_Box *s, GF_Box *a) { GF_Err e; u32 box_type; GF_UserDataMap *map; GF_UserDataBox *ptr = (GF_UserDataBox *)s; if (!ptr) return GF_BAD_PARAM; if (!a) return GF_OK; //detach from parent list if any gf_list_del_item(ptr->child_boxes, a); /* for unknown udta boxes, we reference them by their original box type */ box_type = a->type; if (box_type == GF_ISOM_BOX_TYPE_UNKNOWN) { GF_UnknownBox* unkn = (GF_UnknownBox *)a; box_type = unkn->original_4cc; } map = udta_getEntry(ptr, box_type, (a->type==GF_ISOM_BOX_TYPE_UUID) ? & ((GF_UUIDBox *)a)->uuid : NULL); if (map == NULL) { map = (GF_UserDataMap *) gf_malloc(sizeof(GF_UserDataMap)); if (map == NULL) return GF_OUT_OF_MEM; memset(map, 0, sizeof(GF_UserDataMap)); map->boxType = box_type; if (a->type == GF_ISOM_BOX_TYPE_UUID) memcpy(map->uuid, ((GF_UUIDBox *)a)->uuid, 16); map->boxes = gf_list_new(); if (!map->boxes) { gf_free(map); return GF_OUT_OF_MEM; } e = gf_list_add(ptr->recordList, map); if (e) return e; } return gf_list_add(map->boxes, a); } GF_Err udta_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e = gf_isom_box_array_read(s, bs, udta_on_child_box); if (e) return e; if (s->size==4) { u32 val = gf_bs_read_u32(bs); s->size = 0; if (val) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] udta has 4 remaining bytes set to %08X but they should be 0\n", val)); } } return GF_OK; } GF_Box *udta_box_new() { ISOM_DECL_BOX_ALLOC(GF_UserDataBox, GF_ISOM_BOX_TYPE_UDTA); tmp->recordList = gf_list_new(); if (!tmp->recordList) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err udta_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_UserDataMap *map; GF_UserDataBox *ptr = (GF_UserDataBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; i=0; while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) { //warning: here we are not passing the actual "parent" of the list //but the UDTA box. The parent itself is not an box, we don't care about it e = gf_isom_box_array_write(s, map->boxes, bs); if (e) return e; } return GF_OK; } GF_Err udta_box_size(GF_Box *s) { GF_Err e; u32 i; GF_UserDataMap *map; GF_UserDataBox *ptr = (GF_UserDataBox *)s; i=0; while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) { //warning: here we are not passing the actual "parent" of the list //but the UDTA box. The parent itself is not an box, we don't care about it e = gf_isom_box_array_size(s, map->boxes); if (e) return e; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void vmhd_box_del(GF_Box *s) { GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err vmhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->reserved = gf_bs_read_u64(bs); return GF_OK; } GF_Box *vmhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_VideoMediaHeaderBox, GF_ISOM_BOX_TYPE_VMHD); tmp->flags = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err vmhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->reserved); return GF_OK; } GF_Err vmhd_box_size(GF_Box *s) { GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; ptr->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void void_box_del(GF_Box *s) { gf_free(s); } GF_Err void_box_read(GF_Box *s, GF_BitStream *bs) { if (s->size) return GF_ISOM_INVALID_FILE; return GF_OK; } GF_Box *void_box_new() { ISOM_DECL_BOX_ALLOC(GF_Box, GF_ISOM_BOX_TYPE_VOID); return tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err void_box_write(GF_Box *s, GF_BitStream *bs) { gf_bs_write_u32(bs, 0); return GF_OK; } GF_Err void_box_size(GF_Box *s) { s->size = 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *pdin_box_new() { ISOM_DECL_BOX_ALLOC(GF_ProgressiveDownloadBox, GF_ISOM_BOX_TYPE_PDIN); return (GF_Box *)tmp; } void pdin_box_del(GF_Box *s) { GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox*)s; if (ptr == NULL) return; if (ptr->rates) gf_free(ptr->rates); if (ptr->times) gf_free(ptr->times); gf_free(ptr); } GF_Err pdin_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox*)s; ptr->count = (u32) (ptr->size) / 8; ptr->rates = (u32*)gf_malloc(sizeof(u32)*ptr->count); if (!ptr->rates) return GF_OUT_OF_MEM; ptr->times = (u32*)gf_malloc(sizeof(u32)*ptr->count); if (!ptr->times) return GF_OUT_OF_MEM; for (i=0; i<ptr->count; i++) { ptr->rates[i] = gf_bs_read_u32(bs); ptr->times[i] = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pdin_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; for (i=0; i<ptr->count; i++) { gf_bs_write_u32(bs, ptr->rates[i]); gf_bs_write_u32(bs, ptr->times[i]); } return GF_OK; } GF_Err pdin_box_size(GF_Box *s) { GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox *)s; ptr->size += 8*ptr->count; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *sdtp_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleDependencyTypeBox, GF_ISOM_BOX_TYPE_SDTP); return (GF_Box *)tmp; } void sdtp_box_del(GF_Box *s) { GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox*)s; if (ptr == NULL) return; if (ptr->sample_info) gf_free(ptr->sample_info); gf_free(ptr); } GF_Err sdtp_box_read(GF_Box *s, GF_BitStream *bs) { GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox*)s; /*out-of-order sdtp, assume no padding at the end*/ if (!ptr->sampleCount) ptr->sampleCount = (u32) ptr->size; else if (ptr->sampleCount > (u32) ptr->size) return GF_ISOM_INVALID_FILE; ptr->sample_info = (u8 *) gf_malloc(sizeof(u8)*ptr->sampleCount); if (!ptr->sample_info) return GF_OUT_OF_MEM; ptr->sample_alloc = ptr->sampleCount; gf_bs_read_data(bs, (char*)ptr->sample_info, ptr->sampleCount); ISOM_DECREASE_SIZE(ptr, ptr->sampleCount); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sdtp_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, (char*)ptr->sample_info, ptr->sampleCount); return GF_OK; } GF_Err sdtp_box_size(GF_Box *s) { GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox *)s; ptr->size += ptr->sampleCount; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *pasp_box_new() { ISOM_DECL_BOX_ALLOC(GF_PixelAspectRatioBox, GF_ISOM_BOX_TYPE_PASP); return (GF_Box *)tmp; } void pasp_box_del(GF_Box *s) { GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox*)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err pasp_box_read(GF_Box *s, GF_BitStream *bs) { GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox*)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->hSpacing = gf_bs_read_u32(bs); ptr->vSpacing = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pasp_box_write(GF_Box *s, GF_BitStream *bs) { GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox *)s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->hSpacing); gf_bs_write_u32(bs, ptr->vSpacing); return GF_OK; } GF_Err pasp_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *clap_box_new() { ISOM_DECL_BOX_ALLOC(GF_CleanApertureBox, GF_ISOM_BOX_TYPE_CLAP); return (GF_Box *)tmp; } void clap_box_del(GF_Box *s) { GF_CleanApertureBox *ptr = (GF_CleanApertureBox*)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err clap_box_read(GF_Box *s, GF_BitStream *bs) { GF_CleanApertureBox *ptr = (GF_CleanApertureBox*)s; ISOM_DECREASE_SIZE(ptr, 32); ptr->cleanApertureWidthN = gf_bs_read_u32(bs); ptr->cleanApertureWidthD = gf_bs_read_u32(bs); ptr->cleanApertureHeightN = gf_bs_read_u32(bs); ptr->cleanApertureHeightD = gf_bs_read_u32(bs); ptr->horizOffN = gf_bs_read_u32(bs); ptr->horizOffD = gf_bs_read_u32(bs); ptr->vertOffN = gf_bs_read_u32(bs); ptr->vertOffD = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err clap_box_write(GF_Box *s, GF_BitStream *bs) { GF_CleanApertureBox *ptr = (GF_CleanApertureBox *)s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->cleanApertureWidthN); gf_bs_write_u32(bs, ptr->cleanApertureWidthD); gf_bs_write_u32(bs, ptr->cleanApertureHeightN); gf_bs_write_u32(bs, ptr->cleanApertureHeightD); gf_bs_write_u32(bs, ptr->horizOffN); gf_bs_write_u32(bs, ptr->horizOffD); gf_bs_write_u32(bs, ptr->vertOffN); gf_bs_write_u32(bs, ptr->vertOffD); return GF_OK; } GF_Err clap_box_size(GF_Box *s) { s->size += 32; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *metx_box_new() { //type is overridden by the box constructor ISOM_DECL_BOX_ALLOC(GF_MetaDataSampleEntryBox, GF_ISOM_BOX_TYPE_METX); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } void metx_box_del(GF_Box *s) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox*)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->content_encoding) gf_free(ptr->content_encoding); if (ptr->xml_namespace) gf_free(ptr->xml_namespace); if (ptr->xml_schema_loc) gf_free(ptr->xml_schema_loc); if (ptr->mime_type) gf_free(ptr->mime_type); gf_free(ptr); } GF_Err metx_on_child_box(GF_Box *s, GF_Box *a) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_SINF: return GF_OK; case GF_ISOM_BOX_TYPE_TXTC: //we allow the config box on metx if (ptr->config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->config = (GF_TextConfigBox *)a; break; } return GF_OK; } GF_Err metx_box_read(GF_Box *s, GF_BitStream *bs) { u32 size, i; GF_Err e; char *str; GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox*)s; e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs); if (e) return e; ISOM_DECREASE_SIZE(ptr, 8); size = (u32) ptr->size; str = gf_malloc(sizeof(char)*size); if (!str) return GF_OUT_OF_MEM; i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) { i++; break; } i++; } if (!size && i>1 && str[i-1]) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] metx read invalid string\n")); gf_free(str); return GF_ISOM_INVALID_FILE; } if (i>1) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->xml_namespace = gf_strdup(str); } else { ptr->content_encoding = gf_strdup(str); } } i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) { i++; break; } i++; } if (!size && i>1 && str[i-1]) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] metx read invalid string\n")); gf_free(str); return GF_ISOM_INVALID_FILE; } if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) { if (i>1) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->xml_schema_loc = gf_strdup(str); } else { ptr->xml_namespace = gf_strdup(str); } } i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) { i++; break; } i++; } if (!size && i>1 && str[i-1]) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] metx read invalid string\n")); gf_free(str); return GF_ISOM_INVALID_FILE; } if (i>1) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->mime_type = gf_strdup(str); } else { ptr->xml_schema_loc = gf_strdup(str); } } } //mett, sbtt, stxt, stpp else { if (i>1) ptr->mime_type = gf_strdup(str); } ptr->size = size; gf_free(str); return gf_isom_box_array_read(s, bs, metx_on_child_box); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err metx_box_write(GF_Box *s, GF_BitStream *bs) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); if (ptr->type!=GF_ISOM_BOX_TYPE_STPP) { if (ptr->content_encoding) gf_bs_write_data(bs, ptr->content_encoding, (u32) strlen(ptr->content_encoding)); gf_bs_write_u8(bs, 0); } if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) { if (ptr->xml_namespace) gf_bs_write_data(bs, ptr->xml_namespace, (u32) strlen(ptr->xml_namespace)); gf_bs_write_u8(bs, 0); if (ptr->xml_schema_loc) gf_bs_write_data(bs, ptr->xml_schema_loc, (u32) strlen(ptr->xml_schema_loc)); gf_bs_write_u8(bs, 0); if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { if (ptr->mime_type) gf_bs_write_data(bs, ptr->mime_type, (u32) strlen(ptr->mime_type)); gf_bs_write_u8(bs, 0); } } //mett, sbtt, stxt else { if (ptr->mime_type) gf_bs_write_data(bs, ptr->mime_type, (u32) strlen(ptr->mime_type)); gf_bs_write_u8(bs, 0); } return GF_OK; } GF_Err metx_box_size(GF_Box *s) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s; ptr->size += 8; if (ptr->type!=GF_ISOM_BOX_TYPE_STPP) { if (ptr->content_encoding) ptr->size += strlen(ptr->content_encoding); ptr->size++; } if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) { if (ptr->xml_namespace) ptr->size += strlen(ptr->xml_namespace); ptr->size++; if (ptr->xml_schema_loc) ptr->size += strlen(ptr->xml_schema_loc); ptr->size++; if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { if (ptr->mime_type) ptr->size += strlen(ptr->mime_type); ptr->size++; } } //mett, sbtt, stxt else { if (ptr->mime_type) ptr->size += strlen(ptr->mime_type); ptr->size++; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* SimpleTextSampleEntry */ GF_Box *txtc_box_new() { ISOM_DECL_BOX_ALLOC(GF_TextConfigBox, GF_ISOM_BOX_TYPE_TXTC); return (GF_Box *)tmp; } void txtc_box_del(GF_Box *s) { GF_TextConfigBox *ptr = (GF_TextConfigBox*)s; if (ptr == NULL) return; if (ptr->config) gf_free(ptr->config); gf_free(ptr); } GF_Err txtc_box_read(GF_Box *s, GF_BitStream *bs) { GF_TextConfigBox *ptr = (GF_TextConfigBox*)s; ptr->config = (char *)gf_malloc(sizeof(char)*((u32) ptr->size+1)); if (!ptr->config) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->config, (u32) ptr->size); ptr->config[ptr->size] = 0; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err txtc_box_write(GF_Box *s, GF_BitStream *bs) { GF_TextConfigBox *ptr = (GF_TextConfigBox *)s; GF_Err e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->config) gf_bs_write_data(bs, ptr->config, (u32) strlen(ptr->config)); gf_bs_write_u8(bs, 0); return GF_OK; } GF_Err txtc_box_size(GF_Box *s) { GF_TextConfigBox *ptr = (GF_TextConfigBox *)s; if (ptr->config) ptr->size += strlen(ptr->config); ptr->size++; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *dac3_box_new() { ISOM_DECL_BOX_ALLOC(GF_AC3ConfigBox, GF_ISOM_BOX_TYPE_DAC3); return (GF_Box *)tmp; } GF_Box *dec3_box_new() { ISOM_DECL_BOX_ALLOC(GF_AC3ConfigBox, GF_ISOM_BOX_TYPE_DAC3); tmp->cfg.is_ec3 = 1; return (GF_Box *)tmp; } void dac3_box_del(GF_Box *s) { GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s; gf_free(ptr); } GF_Err gf_isom_ac3_config_parse_bs(GF_BitStream *bs, Bool is_ec3, GF_AC3Config *cfg); GF_Err dac3_box_read(GF_Box *s, GF_BitStream *bs) { GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s; if (ptr == NULL) return GF_BAD_PARAM; return gf_isom_ac3_config_parse_bs(bs, ptr->cfg.is_ec3, &ptr->cfg); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dac3_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s; if (ptr->cfg.is_ec3) s->type = GF_ISOM_BOX_TYPE_DEC3; e = gf_isom_box_write_header(s, bs); if (ptr->cfg.is_ec3) s->type = GF_ISOM_BOX_TYPE_DAC3; if (e) return e; if (ptr->cfg.is_ec3) { u32 i; gf_bs_write_int(bs, ptr->cfg.brcode, 13); gf_bs_write_int(bs, ptr->cfg.nb_streams - 1, 3); for (i=0; i<ptr->cfg.nb_streams; i++) { gf_bs_write_int(bs, ptr->cfg.streams[i].fscod, 2); gf_bs_write_int(bs, ptr->cfg.streams[i].bsid, 5); gf_bs_write_int(bs, ptr->cfg.streams[i].bsmod, 5); gf_bs_write_int(bs, ptr->cfg.streams[i].acmod, 3); gf_bs_write_int(bs, ptr->cfg.streams[i].lfon, 1); gf_bs_write_int(bs, 0, 3); gf_bs_write_int(bs, ptr->cfg.streams[i].nb_dep_sub, 4); if (ptr->cfg.streams[i].nb_dep_sub) { gf_bs_write_int(bs, ptr->cfg.streams[i].chan_loc, 9); } else { gf_bs_write_int(bs, 0, 1); } } } else { gf_bs_write_int(bs, ptr->cfg.streams[0].fscod, 2); gf_bs_write_int(bs, ptr->cfg.streams[0].bsid, 5); gf_bs_write_int(bs, ptr->cfg.streams[0].bsmod, 3); gf_bs_write_int(bs, ptr->cfg.streams[0].acmod, 3); gf_bs_write_int(bs, ptr->cfg.streams[0].lfon, 1); gf_bs_write_int(bs, ptr->cfg.brcode, 5); gf_bs_write_int(bs, 0, 5); } return GF_OK; } GF_Err dac3_box_size(GF_Box *s) { GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s; if (ptr->cfg.is_ec3) { u32 i; s->size += 2; for (i=0; i<ptr->cfg.nb_streams; i++) { s->size += 3; if (ptr->cfg.streams[i].nb_dep_sub) s->size += 1; } } else { s->size += 3; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void lsrc_box_del(GF_Box *s) { GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s; if (ptr == NULL) return; if (ptr->hdr) gf_free(ptr->hdr); gf_free(ptr); } GF_Err lsrc_box_read(GF_Box *s, GF_BitStream *bs) { GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s; ptr->hdr_size = (u32) ptr->size; ptr->hdr = gf_malloc(sizeof(char)*ptr->hdr_size); if (!ptr->hdr) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->hdr, ptr->hdr_size); return GF_OK; } GF_Box *lsrc_box_new() { ISOM_DECL_BOX_ALLOC(GF_LASERConfigurationBox, GF_ISOM_BOX_TYPE_LSRC); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err lsrc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->hdr, ptr->hdr_size); return GF_OK; } GF_Err lsrc_box_size(GF_Box *s) { GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s; ptr->size += ptr->hdr_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void lsr1_box_del(GF_Box *s) { GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); gf_free(ptr); } GF_Err lsr1_on_child_box(GF_Box *s, GF_Box *a) { GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_LSRC: if (ptr->lsr_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->lsr_config = (GF_LASERConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_M4DS: if (ptr->descr) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->descr = (GF_MPEG4ExtensionDescriptorsBox *)a; break; } return GF_OK; } GF_Err lsr1_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox*)s; e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs); if (e) return e; ISOM_DECREASE_SIZE(ptr, 8); return gf_isom_box_array_read(s, bs, lsr1_on_child_box); } GF_Box *lsr1_box_new() { ISOM_DECL_BOX_ALLOC(GF_LASeRSampleEntryBox, GF_ISOM_BOX_TYPE_LSR1); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err lsr1_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); return GF_OK; } GF_Err lsr1_box_size(GF_Box *s) { u32 pos=0; GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; s->size += 8; gf_isom_check_position(s, (GF_Box *)ptr->lsr_config, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void sidx_box_del(GF_Box *s) { GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox *) s; if (ptr == NULL) return; if (ptr->refs) gf_free(ptr->refs); gf_free(ptr); } GF_Err sidx_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s; ISOM_DECREASE_SIZE(ptr, 8); ptr->reference_ID = gf_bs_read_u32(bs); ptr->timescale = gf_bs_read_u32(bs); if (ptr->version==0) { ISOM_DECREASE_SIZE(ptr, 8); ptr->earliest_presentation_time = gf_bs_read_u32(bs); ptr->first_offset = gf_bs_read_u32(bs); } else { ISOM_DECREASE_SIZE(ptr, 16); ptr->earliest_presentation_time = gf_bs_read_u64(bs); ptr->first_offset = gf_bs_read_u64(bs); } ISOM_DECREASE_SIZE(ptr, 4); gf_bs_read_u16(bs); /* reserved */ ptr->nb_refs = gf_bs_read_u16(bs); ptr->refs = gf_malloc(sizeof(GF_SIDXReference)*ptr->nb_refs); if (!ptr->refs) return GF_OUT_OF_MEM; for (i=0; i<ptr->nb_refs; i++) { ptr->refs[i].reference_type = gf_bs_read_int(bs, 1); ptr->refs[i].reference_size = gf_bs_read_int(bs, 31); ptr->refs[i].subsegment_duration = gf_bs_read_u32(bs); ptr->refs[i].starts_with_SAP = gf_bs_read_int(bs, 1); ptr->refs[i].SAP_type = gf_bs_read_int(bs, 3); ptr->refs[i].SAP_delta_time = gf_bs_read_int(bs, 28); ISOM_DECREASE_SIZE(ptr, 12); } return GF_OK; } GF_Box *sidx_box_new() { ISOM_DECL_BOX_ALLOC(GF_SegmentIndexBox, GF_ISOM_BOX_TYPE_SIDX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sidx_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->reference_ID); gf_bs_write_u32(bs, ptr->timescale); if (ptr->version==0) { gf_bs_write_u32(bs, (u32) ptr->earliest_presentation_time); gf_bs_write_u32(bs, (u32) ptr->first_offset); } else { gf_bs_write_u64(bs, ptr->earliest_presentation_time); gf_bs_write_u64(bs, ptr->first_offset); } gf_bs_write_u16(bs, 0); gf_bs_write_u16(bs, ptr->nb_refs); for (i=0; i<ptr->nb_refs; i++ ) { gf_bs_write_int(bs, ptr->refs[i].reference_type, 1); gf_bs_write_int(bs, ptr->refs[i].reference_size, 31); gf_bs_write_u32(bs, ptr->refs[i].subsegment_duration); gf_bs_write_int(bs, ptr->refs[i].starts_with_SAP, 1); gf_bs_write_int(bs, ptr->refs[i].SAP_type, 3); gf_bs_write_int(bs, ptr->refs[i].SAP_delta_time, 28); } return GF_OK; } GF_Err sidx_box_size(GF_Box *s) { GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s; ptr->size += 12; if (ptr->version==0) { ptr->size += 8; } else { ptr->size += 16; } ptr->size += ptr->nb_refs * 12; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ssix_box_del(GF_Box *s) { u32 i; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox *)s; if (ptr == NULL) return; if (ptr->subsegments) { for (i = 0; i < ptr->subsegment_alloc; i++) { GF_SubsegmentInfo *subsegment = &ptr->subsegments[i]; if (subsegment->ranges) gf_free(subsegment->ranges); } gf_free(ptr->subsegments); } gf_free(ptr); } GF_Err ssix_box_read(GF_Box *s, GF_BitStream *bs) { u32 i,j; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->subsegment_count = gf_bs_read_u32(bs); //each subseg has at least one range_count (4 bytes), abort if not enough bytes (broken box) if (ptr->size < ptr->subsegment_count*4) return GF_ISOM_INVALID_FILE; GF_SAFE_ALLOC_N(ptr->subsegments, ptr->subsegment_count, GF_SubsegmentInfo); if (!ptr->subsegments) return GF_OUT_OF_MEM; for (i = 0; i < ptr->subsegment_count; i++) { GF_SubsegmentInfo *subseg = &ptr->subsegments[i]; ISOM_DECREASE_SIZE(ptr, 4) subseg->range_count = gf_bs_read_u32(bs); //each range is 4 bytes, abort if not enough bytes if (ptr->size < subseg->range_count*4) return GF_ISOM_INVALID_FILE; subseg->ranges = (GF_SubsegmentRangeInfo*) gf_malloc(sizeof(GF_SubsegmentRangeInfo) * subseg->range_count); if (!subseg->ranges) return GF_OUT_OF_MEM; for (j = 0; j < subseg->range_count; j++) { ISOM_DECREASE_SIZE(ptr, 4) subseg->ranges[j].level = gf_bs_read_u8(bs); subseg->ranges[j].range_size = gf_bs_read_u24(bs); } } return GF_OK; } GF_Box *ssix_box_new() { ISOM_DECL_BOX_ALLOC(GF_SubsegmentIndexBox, GF_ISOM_BOX_TYPE_SSIX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ssix_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i, j; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->subsegment_count); for (i = 0; i<ptr->subsegment_count; i++) { gf_bs_write_u32(bs, ptr->subsegments[i].range_count); for (j = 0; j < ptr->subsegments[i].range_count; j++) { gf_bs_write_u8(bs, ptr->subsegments[i].ranges[j].level); gf_bs_write_u24(bs, ptr->subsegments[i].ranges[j].range_size); } } return GF_OK; } GF_Err ssix_box_size(GF_Box *s) { u32 i; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s; ptr->size += 4; for (i = 0; i < ptr->subsegment_count; i++) { ptr->size += 4 + 4 * ptr->subsegments[i].range_count; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void leva_box_del(GF_Box *s) { GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox *)s; if (ptr == NULL) return; if (ptr->levels) gf_free(ptr->levels); gf_free(ptr); } GF_Err leva_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox*)s; ISOM_DECREASE_SIZE(ptr, 1) ptr->level_count = gf_bs_read_u8(bs); //each level is at least 5 bytes if (ptr->size < ptr->level_count * 5) return GF_ISOM_INVALID_FILE; GF_SAFE_ALLOC_N(ptr->levels, ptr->level_count, GF_LevelAssignment); if (!ptr->levels) return GF_OUT_OF_MEM; for (i = 0; i < ptr->level_count; i++) { GF_LevelAssignment *level = &ptr->levels[i]; u8 tmp; if (!level || ptr->size < 5) return GF_BAD_PARAM; ISOM_DECREASE_SIZE(ptr, 5) level->track_id = gf_bs_read_u32(bs); tmp = gf_bs_read_u8(bs); level->padding_flag = tmp >> 7; level->type = tmp & 0x7F; if (level->type == 0) { ISOM_DECREASE_SIZE(ptr, 4) level->grouping_type = gf_bs_read_u32(bs); } else if (level->type == 1) { ISOM_DECREASE_SIZE(ptr, 8) level->grouping_type = gf_bs_read_u32(bs); level->grouping_type_parameter = gf_bs_read_u32(bs); } else if (level->type == 4) { ISOM_DECREASE_SIZE(ptr, 4) level->sub_track_id = gf_bs_read_u32(bs); } } return GF_OK; } GF_Box *leva_box_new() { ISOM_DECL_BOX_ALLOC(GF_LevelAssignmentBox, GF_ISOM_BOX_TYPE_LEVA); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err leva_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->level_count); for (i = 0; i<ptr->level_count; i++) { gf_bs_write_u32(bs, ptr->levels[i].track_id); gf_bs_write_u8(bs, ptr->levels[i].padding_flag << 7 | (ptr->levels[i].type & 0x7F)); if (ptr->levels[i].type == 0) { gf_bs_write_u32(bs, ptr->levels[i].grouping_type); } else if (ptr->levels[i].type == 1) { gf_bs_write_u32(bs, ptr->levels[i].grouping_type); gf_bs_write_u32(bs, ptr->levels[i].grouping_type_parameter); } else if (ptr->levels[i].type == 4) { gf_bs_write_u32(bs, ptr->levels[i].sub_track_id); } } return GF_OK; } GF_Err leva_box_size(GF_Box *s) { u32 i; GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox*)s; ptr->size += 1; for (i = 0; i < ptr->level_count; i++) { ptr->size += 5; if (ptr->levels[i].type == 0 || ptr->levels[i].type == 4) { ptr->size += 4; } else if (ptr->levels[i].type == 1) { ptr->size += 8; } } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *pcrb_box_new() { ISOM_DECL_BOX_ALLOC(GF_PcrInfoBox, GF_ISOM_BOX_TYPE_PCRB); return (GF_Box *)tmp; } void pcrb_box_del(GF_Box *s) { GF_PcrInfoBox *ptr = (GF_PcrInfoBox *) s; if (ptr == NULL) return; if (ptr->pcr_values) gf_free(ptr->pcr_values); gf_free(ptr); } GF_Err pcrb_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s; ISOM_DECREASE_SIZE(ptr, 4); ptr->subsegment_count = gf_bs_read_u32(bs); ptr->pcr_values = gf_malloc(sizeof(u64)*ptr->subsegment_count); if (!ptr->pcr_values) return GF_OUT_OF_MEM; for (i=0; i<ptr->subsegment_count; i++) { u64 data1 = gf_bs_read_u32(bs); u64 data2 = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 6); ptr->pcr_values[i] = (data1 << 10) | (data2 >> 6); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pcrb_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->subsegment_count); for (i=0; i<ptr->subsegment_count; i++ ) { u32 data1 = (u32) (ptr->pcr_values[i] >> 10); u16 data2 = (u16) (ptr->pcr_values[i] << 6); gf_bs_write_u32(bs, data1); gf_bs_write_u16(bs, data2); } return GF_OK; } GF_Err pcrb_box_size(GF_Box *s) { GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s; ptr->size += 4; ptr->size += ptr->subsegment_count * 6; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *subs_box_new() { ISOM_DECL_BOX_ALLOC(GF_SubSampleInformationBox, GF_ISOM_BOX_TYPE_SUBS); tmp->Samples = gf_list_new(); return (GF_Box *)tmp; } void subs_box_del(GF_Box *s) { GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *)s; if (ptr == NULL) return; while (gf_list_count(ptr->Samples)) { GF_SubSampleInfoEntry *pSamp; pSamp = (GF_SubSampleInfoEntry*)gf_list_get(ptr->Samples, 0); while (gf_list_count(pSamp->SubSamples)) { GF_SubSampleEntry *pSubSamp; pSubSamp = (GF_SubSampleEntry*) gf_list_get(pSamp->SubSamples, 0); gf_free(pSubSamp); gf_list_rem(pSamp->SubSamples, 0); } gf_list_del(pSamp->SubSamples); gf_free(pSamp); gf_list_rem(ptr->Samples, 0); } gf_list_del(ptr->Samples); gf_free(ptr); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err subs_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i, j, entry_count; u16 subsample_count; GF_SubSampleEntry *pSubSamp; GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; entry_count = gf_list_count(ptr->Samples); gf_bs_write_u32(bs, entry_count); for (i=0; i<entry_count; i++) { GF_SubSampleInfoEntry *pSamp = (GF_SubSampleInfoEntry*) gf_list_get(ptr->Samples, i); subsample_count = gf_list_count(pSamp->SubSamples); gf_bs_write_u32(bs, pSamp->sample_delta); gf_bs_write_u16(bs, subsample_count); for (j=0; j<subsample_count; j++) { pSubSamp = (GF_SubSampleEntry*) gf_list_get(pSamp->SubSamples, j); if (ptr->version == 1) { gf_bs_write_u32(bs, pSubSamp->subsample_size); } else { gf_bs_write_u16(bs, pSubSamp->subsample_size); } gf_bs_write_u8(bs, pSubSamp->subsample_priority); gf_bs_write_u8(bs, pSubSamp->discardable); gf_bs_write_u32(bs, pSubSamp->reserved); } } return e; } GF_Err subs_box_size(GF_Box *s) { GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *) s; u32 entry_count, i; u16 subsample_count; // add 4 byte for entry_count ptr->size += 4; entry_count = gf_list_count(ptr->Samples); for (i=0; i<entry_count; i++) { GF_SubSampleInfoEntry *pSamp = (GF_SubSampleInfoEntry*) gf_list_get(ptr->Samples, i); subsample_count = gf_list_count(pSamp->SubSamples); // 4 byte for sample_delta, 2 byte for subsample_count // and 6 + (4 or 2) bytes for each subsample ptr->size += 4 + 2 + subsample_count * (6 + (ptr->version==1 ? 4 : 2)); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Err subs_box_read(GF_Box *s, GF_BitStream *bs) { GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *)s; u32 entry_count, i, j; u16 subsample_count; ISOM_DECREASE_SIZE(ptr, 4); entry_count = gf_bs_read_u32(bs); for (i=0; i<entry_count; i++) { u32 subs_size=0; GF_SubSampleInfoEntry *pSamp = (GF_SubSampleInfoEntry*) gf_malloc(sizeof(GF_SubSampleInfoEntry)); if (!pSamp) return GF_OUT_OF_MEM; memset(pSamp, 0, sizeof(GF_SubSampleInfoEntry)); pSamp->SubSamples = gf_list_new(); pSamp->sample_delta = gf_bs_read_u32(bs); subsample_count = gf_bs_read_u16(bs); subs_size=6; for (j=0; j<subsample_count; j++) { GF_SubSampleEntry *pSubSamp = (GF_SubSampleEntry*) gf_malloc(sizeof(GF_SubSampleEntry)); if (!pSubSamp) return GF_OUT_OF_MEM; memset(pSubSamp, 0, sizeof(GF_SubSampleEntry)); if (ptr->version==1) { pSubSamp->subsample_size = gf_bs_read_u32(bs); subs_size+=4; } else { pSubSamp->subsample_size = gf_bs_read_u16(bs); subs_size+=2; } pSubSamp->subsample_priority = gf_bs_read_u8(bs); pSubSamp->discardable = gf_bs_read_u8(bs); pSubSamp->reserved = gf_bs_read_u32(bs); subs_size+=6; gf_list_add(pSamp->SubSamples, pSubSamp); } gf_list_add(ptr->Samples, pSamp); ISOM_DECREASE_SIZE(ptr, subs_size); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS GF_Box *tfdt_box_new() { ISOM_DECL_BOX_ALLOC(GF_TFBaseMediaDecodeTimeBox, GF_ISOM_BOX_TYPE_TFDT); return (GF_Box *)tmp; } void tfdt_box_del(GF_Box *s) { gf_free(s); } /*this is using chpl format according to some NeroRecode samples*/ GF_Err tfdt_box_read(GF_Box *s,GF_BitStream *bs) { GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *)s; if (ptr->version==1) { ISOM_DECREASE_SIZE(ptr, 8); ptr->baseMediaDecodeTime = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 4); ptr->baseMediaDecodeTime = (u32) gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tfdt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version==1) { gf_bs_write_u64(bs, ptr->baseMediaDecodeTime); } else { gf_bs_write_u32(bs, (u32) ptr->baseMediaDecodeTime); } return GF_OK; } GF_Err tfdt_box_size(GF_Box *s) { GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *)s; if (ptr->baseMediaDecodeTime<=0xFFFFFFFF) { ptr->version = 0; ptr->size += 4; } else { ptr->version = 1; ptr->size += 8; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ GF_Box *rvcc_box_new() { ISOM_DECL_BOX_ALLOC(GF_RVCConfigurationBox, GF_ISOM_BOX_TYPE_RVCC); return (GF_Box *)tmp; } void rvcc_box_del(GF_Box *s) { gf_free(s); } GF_Err rvcc_box_read(GF_Box *s,GF_BitStream *bs) { GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox*)s; ISOM_DECREASE_SIZE(ptr, 2); ptr->predefined_rvc_config = gf_bs_read_u16(bs); if (!ptr->predefined_rvc_config) { ISOM_DECREASE_SIZE(ptr, 2); ptr->rvc_meta_idx = gf_bs_read_u16(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rvcc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox*) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->predefined_rvc_config); if (!ptr->predefined_rvc_config) { gf_bs_write_u16(bs, ptr->rvc_meta_idx); } return GF_OK; } GF_Err rvcc_box_size(GF_Box *s) { GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox *)s; ptr->size += 2; if (! ptr->predefined_rvc_config) ptr->size += 2; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *sbgp_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleGroupBox, GF_ISOM_BOX_TYPE_SBGP); return (GF_Box *)tmp; } void sbgp_box_del(GF_Box *a) { GF_SampleGroupBox *p = (GF_SampleGroupBox *)a; if (p->sample_entries) gf_free(p->sample_entries); gf_free(p); } GF_Err sbgp_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SampleGroupBox *ptr = (GF_SampleGroupBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->grouping_type = gf_bs_read_u32(bs); if (ptr->version==1) { ISOM_DECREASE_SIZE(ptr, 4); ptr->grouping_type_parameter = gf_bs_read_u32(bs); } ptr->entry_count = gf_bs_read_u32(bs); if (ptr->size < sizeof(GF_SampleGroupEntry)*ptr->entry_count) return GF_ISOM_INVALID_FILE; ptr->sample_entries = gf_malloc(sizeof(GF_SampleGroupEntry)*ptr->entry_count); if (!ptr->sample_entries) return GF_OUT_OF_MEM; for (i=0; i<ptr->entry_count; i++) { ISOM_DECREASE_SIZE(ptr, 8); ptr->sample_entries[i].sample_count = gf_bs_read_u32(bs); ptr->sample_entries[i].group_description_index = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sbgp_box_write(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; GF_SampleGroupBox *p = (GF_SampleGroupBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, p->grouping_type); if (p->version==1) gf_bs_write_u32(bs, p->grouping_type_parameter); gf_bs_write_u32(bs, p->entry_count); for (i = 0; i<p->entry_count; i++ ) { gf_bs_write_u32(bs, p->sample_entries[i].sample_count); gf_bs_write_u32(bs, p->sample_entries[i].group_description_index); } return GF_OK; } GF_Err sbgp_box_size(GF_Box *s) { GF_SampleGroupBox *p = (GF_SampleGroupBox*)s; p->size += 8; if (p->grouping_type_parameter) p->version=1; if (p->version==1) p->size += 4; p->size += 8*p->entry_count; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ static void *sgpd_parse_entry(u32 grouping_type, GF_BitStream *bs, u32 entry_size, u32 *total_bytes) { Bool null_size_ok = GF_FALSE; GF_DefaultSampleGroupDescriptionEntry *def_ptr; switch (grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: case GF_ISOM_SAMPLE_GROUP_PROL: { GF_RollRecoveryEntry *ptr; GF_SAFEALLOC(ptr, GF_RollRecoveryEntry); if (!ptr) return NULL; ptr->roll_distance = gf_bs_read_int(bs, 16); *total_bytes = 2; return ptr; } case GF_ISOM_SAMPLE_GROUP_RAP: { GF_VisualRandomAccessEntry *ptr; GF_SAFEALLOC(ptr, GF_VisualRandomAccessEntry); if (!ptr) return NULL; ptr->num_leading_samples_known = gf_bs_read_int(bs, 1); ptr->num_leading_samples = gf_bs_read_int(bs, 7); *total_bytes = 1; return ptr; } case GF_ISOM_SAMPLE_GROUP_SAP: { GF_SAPEntry *ptr; GF_SAFEALLOC(ptr, GF_SAPEntry); if (!ptr) return NULL; ptr->dependent_flag = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 3); ptr->SAP_type = gf_bs_read_int(bs, 4); *total_bytes = 1; return ptr; } case GF_ISOM_SAMPLE_GROUP_SYNC: { GF_SYNCEntry *ptr; GF_SAFEALLOC(ptr, GF_SYNCEntry); if (!ptr) return NULL; gf_bs_read_int(bs, 2); ptr->NALU_type = gf_bs_read_int(bs, 6); *total_bytes = 1; return ptr; } case GF_ISOM_SAMPLE_GROUP_TELE: { GF_TemporalLevelEntry *ptr; GF_SAFEALLOC(ptr, GF_TemporalLevelEntry); if (!ptr) return NULL; ptr->level_independently_decodable = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 7); *total_bytes = 1; return ptr; } case GF_ISOM_SAMPLE_GROUP_SEIG: { GF_CENCSampleEncryptionGroupEntry *ptr; GF_SAFEALLOC(ptr, GF_CENCSampleEncryptionGroupEntry); if (!ptr) return NULL; gf_bs_read_u8(bs); //reserved ptr->crypt_byte_block = gf_bs_read_int(bs, 4); ptr->skip_byte_block = gf_bs_read_int(bs, 4); ptr->IsProtected = gf_bs_read_u8(bs); ptr->Per_Sample_IV_size = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *)ptr->KID, 16); *total_bytes = 20; if ((ptr->IsProtected == 1) && !ptr->Per_Sample_IV_size) { ptr->constant_IV_size = gf_bs_read_u8(bs); if ((ptr->constant_IV_size != 8) && (ptr->constant_IV_size != 16)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] seig sample group have invalid constant_IV size\n")); gf_free(ptr); return NULL; } gf_bs_read_data(bs, (char *)ptr->constant_IV, ptr->constant_IV_size); *total_bytes += 1 + ptr->constant_IV_size; } if (!entry_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] seig sample group does not indicate entry size, deprecated in spec\n")); } return ptr; } case GF_ISOM_SAMPLE_GROUP_OINF: { GF_OperatingPointsInformation *ptr = gf_isom_oinf_new_entry(); u32 s = (u32) gf_bs_get_position(bs); gf_isom_oinf_read_entry(ptr, bs); *total_bytes = (u32) gf_bs_get_position(bs) - s; if (!entry_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] oinf sample group does not indicate entry size, deprecated in spec\n")); } return ptr; } case GF_ISOM_SAMPLE_GROUP_LINF: { GF_LHVCLayerInformation *ptr = gf_isom_linf_new_entry(); u32 s = (u32) gf_bs_get_position(bs); gf_isom_linf_read_entry(ptr, bs); *total_bytes = (u32) gf_bs_get_position(bs) - s; if (!entry_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] linf sample group does not indicate entry size, deprecated in spec\n")); } return ptr; } case GF_ISOM_SAMPLE_GROUP_TRIF: if (! entry_size) { u32 flags = gf_bs_peek_bits(bs, 24, 0); if (flags & 0x10000) entry_size=3; else { if (flags & 0x80000) entry_size=7; else entry_size=11; //have dependency list if (flags & 0x200000) { u32 nb_entries = gf_bs_peek_bits(bs, 16, entry_size); entry_size += 2 + 2*nb_entries; } } GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] trif sample group does not indicate entry size, deprecated in spec\n")); } break; case GF_ISOM_SAMPLE_GROUP_NALM: if (! entry_size) { u64 start = gf_bs_get_position(bs); Bool rle, large_size; u32 entry_count; gf_bs_read_int(bs, 6); large_size = gf_bs_read_int(bs, 1); rle = gf_bs_read_int(bs, 1); entry_count = gf_bs_read_int(bs, large_size ? 16 : 8); gf_bs_seek(bs, start); entry_size = 1 + (large_size ? 2 : 1); entry_size += entry_count * 2; if (rle) entry_size += entry_count * (large_size ? 2 : 1); GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] nalm sample group does not indicate entry size, deprecated in spec\n")); } break; case GF_ISOM_SAMPLE_GROUP_TSAS: case GF_ISOM_SAMPLE_GROUP_STSA: null_size_ok = GF_TRUE; break; //TODO, add support for these ones ? case GF_ISOM_SAMPLE_GROUP_TSCL: entry_size = 20; break; case GF_ISOM_SAMPLE_GROUP_LBLI: entry_size = 2; break; default: break; } if (!entry_size && !null_size_ok) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] %s sample group does not indicate entry size and is not implemented, cannot parse!\n", gf_4cc_to_str( grouping_type) )); return NULL; } GF_SAFEALLOC(def_ptr, GF_DefaultSampleGroupDescriptionEntry); if (!def_ptr) return NULL; if (entry_size) { def_ptr->length = entry_size; def_ptr->data = (u8 *) gf_malloc(sizeof(u8)*def_ptr->length); if (!def_ptr->data) { gf_free(def_ptr); return NULL; } gf_bs_read_data(bs, (char *) def_ptr->data, def_ptr->length); *total_bytes = entry_size; } return def_ptr; } static void sgpd_del_entry(u32 grouping_type, void *entry) { switch (grouping_type) { case GF_ISOM_SAMPLE_GROUP_SYNC: case GF_ISOM_SAMPLE_GROUP_ROLL: case GF_ISOM_SAMPLE_GROUP_PROL: case GF_ISOM_SAMPLE_GROUP_RAP: case GF_ISOM_SAMPLE_GROUP_SEIG: case GF_ISOM_SAMPLE_GROUP_TELE: case GF_ISOM_SAMPLE_GROUP_SAP: gf_free(entry); return; case GF_ISOM_SAMPLE_GROUP_OINF: gf_isom_oinf_del_entry(entry); return; case GF_ISOM_SAMPLE_GROUP_LINF: gf_isom_linf_del_entry(entry); return; default: { GF_DefaultSampleGroupDescriptionEntry *ptr = (GF_DefaultSampleGroupDescriptionEntry *)entry; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } } } void sgpd_write_entry(u32 grouping_type, void *entry, GF_BitStream *bs) { switch (grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: case GF_ISOM_SAMPLE_GROUP_PROL: gf_bs_write_int(bs, ((GF_RollRecoveryEntry*)entry)->roll_distance, 16); return; case GF_ISOM_SAMPLE_GROUP_RAP: gf_bs_write_int(bs, ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples_known, 1); gf_bs_write_int(bs, ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples, 7); return; case GF_ISOM_SAMPLE_GROUP_SAP: gf_bs_write_int(bs, ((GF_SAPEntry*)entry)->dependent_flag, 1); gf_bs_write_int(bs, 0, 3); gf_bs_write_int(bs, ((GF_SAPEntry*)entry)->SAP_type, 4); return; case GF_ISOM_SAMPLE_GROUP_SYNC: gf_bs_write_int(bs, 0, 2); gf_bs_write_int(bs, ((GF_SYNCEntry*)entry)->NALU_type, 6); return; case GF_ISOM_SAMPLE_GROUP_TELE: gf_bs_write_int(bs, ((GF_TemporalLevelEntry*)entry)->level_independently_decodable, 1); gf_bs_write_int(bs, 0, 7); return; case GF_ISOM_SAMPLE_GROUP_SEIG: gf_bs_write_u8(bs, 0x0); gf_bs_write_int(bs, ((GF_CENCSampleEncryptionGroupEntry*)entry)->crypt_byte_block, 4); gf_bs_write_int(bs, ((GF_CENCSampleEncryptionGroupEntry*)entry)->skip_byte_block, 4); gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->IsProtected); gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->Per_Sample_IV_size); gf_bs_write_data(bs, (char *)((GF_CENCSampleEncryptionGroupEntry *)entry)->KID, 16); if ((((GF_CENCSampleEncryptionGroupEntry *)entry)->IsProtected == 1) && !((GF_CENCSampleEncryptionGroupEntry *)entry)->Per_Sample_IV_size) { gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV_size); gf_bs_write_data(bs, (char *)((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV, ((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV_size); } return; case GF_ISOM_SAMPLE_GROUP_OINF: gf_isom_oinf_write_entry(entry, bs); return; case GF_ISOM_SAMPLE_GROUP_LINF: gf_isom_linf_write_entry(entry, bs); return; default: { GF_DefaultSampleGroupDescriptionEntry *ptr = (GF_DefaultSampleGroupDescriptionEntry *)entry; if (ptr->length) gf_bs_write_data(bs, (char *) ptr->data, ptr->length); } } } #ifndef GPAC_DISABLE_ISOM_WRITE static u32 sgpd_size_entry(u32 grouping_type, void *entry) { switch (grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: case GF_ISOM_SAMPLE_GROUP_PROL: return 2; case GF_ISOM_SAMPLE_GROUP_TELE: case GF_ISOM_SAMPLE_GROUP_RAP: case GF_ISOM_SAMPLE_GROUP_SAP: case GF_ISOM_SAMPLE_GROUP_SYNC: return 1; case GF_ISOM_SAMPLE_GROUP_TSCL: return 20; case GF_ISOM_SAMPLE_GROUP_LBLI: return 2; case GF_ISOM_SAMPLE_GROUP_TSAS: case GF_ISOM_SAMPLE_GROUP_STSA: return 0; case GF_ISOM_SAMPLE_GROUP_SEIG: return ((((GF_CENCSampleEncryptionGroupEntry *)entry)->IsProtected == 1) && !((GF_CENCSampleEncryptionGroupEntry *)entry)->Per_Sample_IV_size) ? 21 + ((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV_size : 20; case GF_ISOM_SAMPLE_GROUP_OINF: return gf_isom_oinf_size_entry(entry); case GF_ISOM_SAMPLE_GROUP_LINF: return gf_isom_linf_size_entry(entry); default: return ((GF_DefaultSampleGroupDescriptionEntry *)entry)->length; } } #endif GF_Box *sgpd_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleGroupDescriptionBox, GF_ISOM_BOX_TYPE_SGPD); /*version 0 is deprecated, use v1 by default*/ tmp->version = 1; tmp->group_descriptions = gf_list_new(); return (GF_Box *)tmp; } void sgpd_box_del(GF_Box *a) { GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)a; while (gf_list_count(p->group_descriptions)) { void *ptr = gf_list_last(p->group_descriptions); sgpd_del_entry(p->grouping_type, ptr); gf_list_rem_last(p->group_descriptions); } gf_list_del(p->group_descriptions); gf_free(p); } GF_Err sgpd_box_read(GF_Box *s, GF_BitStream *bs) { u32 entry_count; GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)s; ISOM_DECREASE_SIZE(p, 8); p->grouping_type = gf_bs_read_u32(bs); if (p->version>=1) { ISOM_DECREASE_SIZE(p, 4); p->default_length = gf_bs_read_u32(bs); } if (p->version>=2) { ISOM_DECREASE_SIZE(p, 4); p->default_description_index = gf_bs_read_u32(bs); } entry_count = gf_bs_read_u32(bs); if (entry_count>p->size) return GF_ISOM_INVALID_FILE; while (entry_count) { void *ptr; u32 parsed_bytes=0; u32 size = p->default_length; if ((p->version>=1) && !size) { size = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(p, 4); } ptr = sgpd_parse_entry(p->grouping_type, bs, size, &parsed_bytes); //don't return an error, just stop parsing so that we skip over the sgpd box if (!ptr) return GF_OK; ISOM_DECREASE_SIZE(p, parsed_bytes); gf_list_add(p->group_descriptions, ptr); entry_count--; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sgpd_box_write(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)s; GF_Err e; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, p->grouping_type); if (p->version>=1) gf_bs_write_u32(bs, p->default_length); if (p->version>=2) gf_bs_write_u32(bs, p->default_description_index); gf_bs_write_u32(bs, gf_list_count(p->group_descriptions) ); for (i=0; i<gf_list_count(p->group_descriptions); i++) { void *ptr = gf_list_get(p->group_descriptions, i); if ((p->version >= 1) && !p->default_length) { u32 size = sgpd_size_entry(p->grouping_type, ptr); gf_bs_write_u32(bs, size); } sgpd_write_entry(p->grouping_type, ptr, bs); } return GF_OK; } GF_Err sgpd_box_size(GF_Box *s) { u32 i; GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)s; p->size += 8; //we force all sample groups to version 1, v0 being deprecated p->version=1; p->size += 4; if (p->version>=2) p->size += 4; p->default_length = 0; for (i=0; i<gf_list_count(p->group_descriptions); i++) { void *ptr = gf_list_get(p->group_descriptions, i); u32 size = sgpd_size_entry(p->grouping_type, ptr); p->size += size; if (!p->default_length) { p->default_length = size; } else if (p->default_length != size) { p->default_length = 0; } } if (p->version>=1) { if (!p->default_length) p->size += gf_list_count(p->group_descriptions)*4; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void saiz_box_del(GF_Box *s) { GF_SampleAuxiliaryInfoSizeBox*ptr = (GF_SampleAuxiliaryInfoSizeBox*)s; if (ptr == NULL) return; if (ptr->sample_info_size) gf_free(ptr->sample_info_size); gf_free(ptr); } GF_Err saiz_box_read(GF_Box *s, GF_BitStream *bs) { GF_SampleAuxiliaryInfoSizeBox*ptr = (GF_SampleAuxiliaryInfoSizeBox*)s; if (ptr->flags & 1) { ISOM_DECREASE_SIZE(ptr, 8); ptr->aux_info_type = gf_bs_read_u32(bs); ptr->aux_info_type_parameter = gf_bs_read_u32(bs); } ISOM_DECREASE_SIZE(ptr, 5); ptr->default_sample_info_size = gf_bs_read_u8(bs); ptr->sample_count = gf_bs_read_u32(bs); if (ptr->default_sample_info_size == 0) { if (ptr->size < sizeof(u8)*ptr->sample_count) return GF_ISOM_INVALID_FILE; ptr->sample_info_size = gf_malloc(sizeof(u8)*ptr->sample_count); ptr->sample_alloc = ptr->sample_count; if (!ptr->sample_info_size) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, ptr->sample_count); gf_bs_read_data(bs, (char *) ptr->sample_info_size, ptr->sample_count); } return GF_OK; } GF_Box *saiz_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleAuxiliaryInfoSizeBox, GF_ISOM_BOX_TYPE_SAIZ); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err saiz_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleAuxiliaryInfoSizeBox*ptr = (GF_SampleAuxiliaryInfoSizeBox*) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->flags & 1) { gf_bs_write_u32(bs, ptr->aux_info_type); gf_bs_write_u32(bs, ptr->aux_info_type_parameter); } gf_bs_write_u8(bs, ptr->default_sample_info_size); gf_bs_write_u32(bs, ptr->sample_count); if (!ptr->default_sample_info_size) { if (!ptr->sample_info_size) gf_bs_write_u8(bs, 0); else gf_bs_write_data(bs, (char *) ptr->sample_info_size, ptr->sample_count); } return GF_OK; } GF_Err saiz_box_size(GF_Box *s) { GF_SampleAuxiliaryInfoSizeBox *ptr = (GF_SampleAuxiliaryInfoSizeBox*)s; if (ptr->aux_info_type || ptr->aux_info_type_parameter) { ptr->flags |= 1; } if (ptr->flags & 1) ptr->size += 8; ptr->size += 5; if (ptr->default_sample_info_size==0) ptr->size += ptr->sample_count; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE void saio_box_del(GF_Box *s) { GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox*)s; if (ptr == NULL) return; if (ptr->offsets) gf_free(ptr->offsets); if (ptr->cached_data) gf_free(ptr->cached_data); gf_free(ptr); } GF_Err saio_box_read(GF_Box *s, GF_BitStream *bs) { GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox *)s; if (ptr->flags & 1) { ISOM_DECREASE_SIZE(ptr, 8); ptr->aux_info_type = gf_bs_read_u32(bs); ptr->aux_info_type_parameter = gf_bs_read_u32(bs); } ISOM_DECREASE_SIZE(ptr, 4); ptr->entry_count = gf_bs_read_u32(bs); if (ptr->entry_count) { u32 i; if (ptr->size < (ptr->version == 0 ? 4 : 8) * ptr->entry_count) return GF_ISOM_INVALID_FILE; ptr->offsets = gf_malloc(sizeof(u64)*ptr->entry_count); if (!ptr->offsets) return GF_OUT_OF_MEM; ptr->entry_alloc = ptr->entry_count; if (ptr->version==0) { ISOM_DECREASE_SIZE(ptr, 4*ptr->entry_count); for (i=0; i<ptr->entry_count; i++) ptr->offsets[i] = gf_bs_read_u32(bs); } else { ISOM_DECREASE_SIZE(ptr, 8*ptr->entry_count); for (i=0; i<ptr->entry_count; i++) ptr->offsets[i] = gf_bs_read_u64(bs); } } return GF_OK; } GF_Box *saio_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleAuxiliaryInfoOffsetBox, GF_ISOM_BOX_TYPE_SAIO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err saio_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->flags & 1) { gf_bs_write_u32(bs, ptr->aux_info_type); gf_bs_write_u32(bs, ptr->aux_info_type_parameter); } gf_bs_write_u32(bs, ptr->entry_count); if (ptr->entry_count) { u32 i; //store position in bitstream before writing data - offsets can be NULL if a single offset is rewritten later on (cf senc_box_write) ptr->offset_first_offset_field = gf_bs_get_position(bs); if (ptr->version==0) { if (!ptr->offsets) { gf_bs_write_u32(bs, 0); } else { for (i=0; i<ptr->entry_count; i++) gf_bs_write_u32(bs, (u32) ptr->offsets[i]); } } else { if (!ptr->offsets) { gf_bs_write_u64(bs, 0); } else { for (i=0; i<ptr->entry_count; i++) gf_bs_write_u64(bs, ptr->offsets[i]); } } } return GF_OK; } GF_Err saio_box_size(GF_Box *s) { GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox*)s; if (ptr->aux_info_type || ptr->aux_info_type_parameter) { ptr->flags |= 1; } if (ptr->flags & 1) ptr->size += 8; ptr->size += 4; //a little optim here: in cenc, the saio always points to a single data block, only one entry is needed switch (ptr->aux_info_type) { case GF_ISOM_CENC_SCHEME: case GF_ISOM_CBC_SCHEME: case GF_ISOM_CENS_SCHEME: case GF_ISOM_CBCS_SCHEME: if (ptr->offsets) gf_free(ptr->offsets); ptr->offsets = NULL; ptr->entry_alloc = 0; ptr->entry_count = 1; break; } ptr->size += ((ptr->version==1) ? 8 : 4) * ptr->entry_count; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE void prft_box_del(GF_Box *s) { gf_free(s); } GF_Err prft_box_read(GF_Box *s,GF_BitStream *bs) { GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox *) s; ISOM_DECREASE_SIZE(ptr, 12); ptr->refTrackID = gf_bs_read_u32(bs); ptr->ntp = gf_bs_read_u64(bs); if (ptr->version==0) { ISOM_DECREASE_SIZE(ptr, 4); ptr->timestamp = gf_bs_read_u32(bs); } else { ISOM_DECREASE_SIZE(ptr, 8); ptr->timestamp = gf_bs_read_u64(bs); } return GF_OK; } GF_Box *prft_box_new() { ISOM_DECL_BOX_ALLOC(GF_ProducerReferenceTimeBox, GF_ISOM_BOX_TYPE_PRFT); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err prft_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->refTrackID); gf_bs_write_u64(bs, ptr->ntp); if (ptr->version==0) { gf_bs_write_u32(bs, (u32) ptr->timestamp); } else { gf_bs_write_u64(bs, ptr->timestamp); } return GF_OK; } GF_Err prft_box_size(GF_Box *s) { GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox*)s; ptr->size += 4+8+ (ptr->version ? 8 : 4); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *trgr_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackGroupBox, GF_ISOM_BOX_TYPE_TRGR); tmp->groups = gf_list_new(); if (!tmp->groups) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } void trgr_box_del(GF_Box *s) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s; if (ptr == NULL) return; gf_list_del(ptr->groups); gf_free(ptr); } GF_Err trgr_on_child_box(GF_Box *s, GF_Box *a) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s; return gf_list_add(ptr->groups, a); } GF_Err trgr_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, trgr_on_child_box, s->type); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trgr_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err trgr_box_size(GF_Box *s) { u32 pos=0; GF_TrackGroupBox *ptr = (GF_TrackGroupBox *) s; gf_isom_check_position_list(s, ptr->groups, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *trgt_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackGroupTypeBox, GF_ISOM_BOX_TYPE_TRGT); return (GF_Box *)tmp; } void trgt_box_del(GF_Box *s) { GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err trgt_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->track_group_id = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trgt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *) s; if (!s) return GF_BAD_PARAM; s->type = ptr->group_type; e = gf_isom_full_box_write(s, bs); s->type = GF_ISOM_BOX_TYPE_TRGT; if (e) return e; gf_bs_write_u32(bs, ptr->track_group_id); return GF_OK; } GF_Err trgt_box_size(GF_Box *s) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s; ptr->size+= 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *stvi_box_new() { ISOM_DECL_BOX_ALLOC(GF_StereoVideoBox, GF_ISOM_BOX_TYPE_STVI); return (GF_Box *)tmp; } void stvi_box_del(GF_Box *s) { GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s; if (ptr == NULL) return; if (ptr->stereo_indication_type) gf_free(ptr->stereo_indication_type); gf_free(ptr); } GF_Err stvi_box_read(GF_Box *s, GF_BitStream *bs) { GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s; ISOM_DECREASE_SIZE(ptr, 12); gf_bs_read_int(bs, 30); ptr->single_view_allowed = gf_bs_read_int(bs, 2); ptr->stereo_scheme = gf_bs_read_u32(bs); ptr->sit_len = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, ptr->sit_len); ptr->stereo_indication_type = gf_malloc(sizeof(char)*ptr->sit_len); if (!ptr->stereo_indication_type) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->stereo_indication_type, ptr->sit_len); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stvi_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_StereoVideoBox *ptr = (GF_StereoVideoBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, 0, 30); gf_bs_write_int(bs, ptr->single_view_allowed, 2); gf_bs_write_u32(bs, ptr->stereo_scheme); gf_bs_write_u32(bs, ptr->sit_len); gf_bs_write_data(bs, ptr->stereo_indication_type, ptr->sit_len); return GF_OK; } GF_Err stvi_box_size(GF_Box *s) { GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s; ptr->size+= 12 + ptr->sit_len; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *fiin_box_new() { ISOM_DECL_BOX_ALLOC(FDItemInformationBox, GF_ISOM_BOX_TYPE_FIIN); return (GF_Box *)tmp; } void fiin_box_del(GF_Box *s) { FDItemInformationBox *ptr = (FDItemInformationBox *)s; if (ptr == NULL) return; if (ptr->partition_entries) gf_list_del(ptr->partition_entries); gf_free(ptr); } GF_Err fiin_on_child_box(GF_Box *s, GF_Box *a) { FDItemInformationBox *ptr = (FDItemInformationBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_PAEN: if (!ptr->partition_entries) ptr->partition_entries = gf_list_new(); return gf_list_add(ptr->partition_entries, a); case GF_ISOM_BOX_TYPE_SEGR: if (ptr->session_info) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->session_info = (FDSessionGroupBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_GITN: if (ptr->group_id_to_name) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->group_id_to_name = (GroupIdToNameBox *)a; return GF_OK; } return GF_OK; } GF_Err fiin_box_read(GF_Box *s, GF_BitStream *bs) { FDItemInformationBox *ptr = (FDItemInformationBox *)s; ISOM_DECREASE_SIZE(ptr, 2); gf_bs_read_u16(bs); return gf_isom_box_array_read(s, bs, fiin_on_child_box); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fiin_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; FDItemInformationBox *ptr = (FDItemInformationBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, gf_list_count(ptr->partition_entries) ); return GF_OK; } GF_Err fiin_box_size(GF_Box *s) { u32 pos=0; FDItemInformationBox *ptr = (FDItemInformationBox *) s; s->size+= 2; gf_isom_check_position_list(s, ptr->partition_entries, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *paen_box_new() { ISOM_DECL_BOX_ALLOC(FDPartitionEntryBox, GF_ISOM_BOX_TYPE_PAEN); return (GF_Box *)tmp; } void paen_box_del(GF_Box *s) { gf_free(s); } GF_Err paen_on_child_box(GF_Box *s, GF_Box *a) { FDPartitionEntryBox *ptr = (FDPartitionEntryBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_FPAR: if (ptr->blocks_and_symbols) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->blocks_and_symbols = (FilePartitionBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_FECR: if (ptr->FEC_symbol_locations) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->FEC_symbol_locations = (FECReservoirBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_FIRE: if (ptr->File_symbol_locations) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->File_symbol_locations = (FileReservoirBox *)a; return GF_OK; } return GF_OK; } GF_Err paen_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, fiin_on_child_box); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err paen_box_write(GF_Box *s, GF_BitStream *bs) { if (!s) return GF_BAD_PARAM; return gf_isom_box_write_header(s, bs); } GF_Err paen_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *fpar_box_new() { ISOM_DECL_BOX_ALLOC(FilePartitionBox, GF_ISOM_BOX_TYPE_FPAR); return (GF_Box *)tmp; } void fpar_box_del(GF_Box *s) { FilePartitionBox *ptr = (FilePartitionBox *)s; if (ptr == NULL) return; if (ptr->scheme_specific_info) gf_free(ptr->scheme_specific_info); if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err gf_isom_read_null_terminated_string(GF_Box *s, GF_BitStream *bs, u64 size, char **out_str) { u32 len=10; u32 i=0; *out_str = gf_malloc(sizeof(char)*len); if (! *out_str) return GF_OUT_OF_MEM; while (1) { ISOM_DECREASE_SIZE(s, 1 ); (*out_str)[i] = gf_bs_read_u8(bs); if (!(*out_str)[i]) break; i++; if (i==len) { len += 10; *out_str = gf_realloc(*out_str, sizeof(char)*len); } if (gf_bs_available(bs) == 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] missing null character in null terminated string\n")); (*out_str)[i] = 0; return GF_OK; } if (i >= size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] string bigger than container, probably missing null character\n")); (*out_str)[i] = 0; return GF_OK; } } return GF_OK; } GF_Err fpar_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; FilePartitionBox *ptr = (FilePartitionBox *)s; ISOM_DECREASE_SIZE(ptr, ((ptr->version ? 4 : 2) + 12) ); ptr->itemID = gf_bs_read_int(bs, ptr->version ? 32 : 16); ptr->packet_payload_size = gf_bs_read_u16(bs); gf_bs_read_u8(bs); ptr->FEC_encoding_ID = gf_bs_read_u8(bs); ptr->FEC_instance_ID = gf_bs_read_u16(bs); ptr->max_source_block_length = gf_bs_read_u16(bs); ptr->encoding_symbol_length = gf_bs_read_u16(bs); ptr->max_number_of_encoding_symbols = gf_bs_read_u16(bs); e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->scheme_specific_info); if (e) return e; ISOM_DECREASE_SIZE(ptr, (ptr->version ? 4 : 2) ); ptr->nb_entries = gf_bs_read_int(bs, ptr->version ? 32 : 16); if (ptr->nb_entries > UINT_MAX / 6) return GF_ISOM_INVALID_FILE; ISOM_DECREASE_SIZE(ptr, ptr->nb_entries * 6 ); GF_SAFE_ALLOC_N(ptr->entries, ptr->nb_entries, FilePartitionEntry); if (!ptr->entries) return GF_OUT_OF_MEM; for (i=0;i < ptr->nb_entries; i++) { ptr->entries[i].block_count = gf_bs_read_u16(bs); ptr->entries[i].block_size = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fpar_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; FilePartitionBox *ptr = (FilePartitionBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->itemID, ptr->version ? 32 : 16); gf_bs_write_u16(bs, ptr->packet_payload_size); gf_bs_write_u8(bs, 0); gf_bs_write_u8(bs, ptr->FEC_encoding_ID); gf_bs_write_u16(bs, ptr->FEC_instance_ID); gf_bs_write_u16(bs, ptr->max_source_block_length); gf_bs_write_u16(bs, ptr->encoding_symbol_length); gf_bs_write_u16(bs, ptr->max_number_of_encoding_symbols); if (ptr->scheme_specific_info) { gf_bs_write_data(bs, ptr->scheme_specific_info, (u32)strlen(ptr->scheme_specific_info) ); } //null terminated string gf_bs_write_u8(bs, 0); gf_bs_write_int(bs, ptr->nb_entries, ptr->version ? 32 : 16); for (i=0;i < ptr->nb_entries; i++) { gf_bs_write_u16(bs, ptr->entries[i].block_count); gf_bs_write_u32(bs, ptr->entries[i].block_size); } return GF_OK; } GF_Err fpar_box_size(GF_Box *s) { FilePartitionBox *ptr = (FilePartitionBox *)s; ptr->size += 13 + (ptr->version ? 8 : 4); if (ptr->scheme_specific_info) ptr->size += strlen(ptr->scheme_specific_info); ptr->size+= ptr->nb_entries * 6; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *fecr_box_new() { ISOM_DECL_BOX_ALLOC(FECReservoirBox, GF_ISOM_BOX_TYPE_FECR); return (GF_Box *)tmp; } void fecr_box_del(GF_Box *s) { FECReservoirBox *ptr = (FECReservoirBox *)s; if (ptr == NULL) return; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err fecr_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; FECReservoirBox *ptr = (FECReservoirBox *)s; ISOM_DECREASE_SIZE(ptr, (ptr->version ? 4 : 2) ); ptr->nb_entries = gf_bs_read_int(bs, ptr->version ? 32 : 16); ISOM_DECREASE_SIZE(ptr, ptr->nb_entries * (ptr->version ? 8 : 6) ); GF_SAFE_ALLOC_N(ptr->entries, ptr->nb_entries, FECReservoirEntry); if (!ptr->entries) return GF_OUT_OF_MEM; for (i=0; i<ptr->nb_entries; i++) { ptr->entries[i].item_id = gf_bs_read_int(bs, ptr->version ? 32 : 16); ptr->entries[i].symbol_count = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fecr_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; FECReservoirBox *ptr = (FECReservoirBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->nb_entries, ptr->version ? 32 : 16); for (i=0; i<ptr->nb_entries; i++) { gf_bs_write_int(bs, ptr->entries[i].item_id, ptr->version ? 32 : 16); gf_bs_write_u32(bs, ptr->entries[i].symbol_count); } return GF_OK; } GF_Err fecr_box_size(GF_Box *s) { FECReservoirBox *ptr = (FECReservoirBox *)s; ptr->size += (ptr->version ? 4 : 2) + ptr->nb_entries * (ptr->version ? 8 : 6); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *segr_box_new() { ISOM_DECL_BOX_ALLOC(FDSessionGroupBox, GF_ISOM_BOX_TYPE_SEGR); return (GF_Box *)tmp; } void segr_box_del(GF_Box *s) { u32 i; FDSessionGroupBox *ptr = (FDSessionGroupBox *)s; if (ptr == NULL) return; for (i=0; i<ptr->num_session_groups; i++) { if (ptr->session_groups[i].group_ids) gf_free(ptr->session_groups[i].group_ids); if (ptr->session_groups[i].channels) gf_free(ptr->session_groups[i].channels); } if (ptr->session_groups) gf_free(ptr->session_groups); gf_free(ptr); } GF_Err segr_box_read(GF_Box *s, GF_BitStream *bs) { u32 i, k; FDSessionGroupBox *ptr = (FDSessionGroupBox *)s; ISOM_DECREASE_SIZE(ptr, 2); ptr->num_session_groups = gf_bs_read_u16(bs); if (ptr->size < ptr->num_session_groups) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in segr\n", ptr->num_session_groups)); ptr->num_session_groups = 0; return GF_ISOM_INVALID_FILE; } GF_SAFE_ALLOC_N(ptr->session_groups, ptr->num_session_groups, SessionGroupEntry); if (!ptr->session_groups) return GF_OUT_OF_MEM; for (i=0; i<ptr->num_session_groups; i++) { ptr->session_groups[i].nb_groups = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, 1); ISOM_DECREASE_SIZE(ptr, ptr->session_groups[i].nb_groups*4); GF_SAFE_ALLOC_N(ptr->session_groups[i].group_ids, ptr->session_groups[i].nb_groups, u32); if (!ptr->session_groups[i].group_ids) return GF_OUT_OF_MEM; for (k=0; k<ptr->session_groups[i].nb_groups; k++) { ptr->session_groups[i].group_ids[k] = gf_bs_read_u32(bs); } ptr->session_groups[i].nb_channels = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, ptr->session_groups[i].nb_channels*4); GF_SAFE_ALLOC_N(ptr->session_groups[i].channels, ptr->session_groups[i].nb_channels, u32); if (!ptr->session_groups[i].channels) return GF_OUT_OF_MEM; for (k=0; k<ptr->session_groups[i].nb_channels; k++) { ptr->session_groups[i].channels[k] = gf_bs_read_u32(bs); } } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err segr_box_write(GF_Box *s, GF_BitStream *bs) { u32 i, k; GF_Err e; FDSessionGroupBox *ptr = (FDSessionGroupBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->num_session_groups); for (i=0; i<ptr->num_session_groups; i++) { gf_bs_write_u8(bs, ptr->session_groups[i].nb_groups); for (k=0; k<ptr->session_groups[i].nb_groups; k++) { gf_bs_write_u32(bs, ptr->session_groups[i].group_ids[k]); } gf_bs_write_u16(bs, ptr->session_groups[i].nb_channels); for (k=0; k<ptr->session_groups[i].nb_channels; k++) { gf_bs_write_u32(bs, ptr->session_groups[i].channels[k]); } } return GF_OK; } GF_Err segr_box_size(GF_Box *s) { u32 i; FDSessionGroupBox *ptr = (FDSessionGroupBox *)s; ptr->size += 2; for (i=0; i<ptr->num_session_groups; i++) { ptr->size += 1 + 4*ptr->session_groups[i].nb_groups; ptr->size += 2 + 4*ptr->session_groups[i].nb_channels; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *gitn_box_new() { ISOM_DECL_BOX_ALLOC(GroupIdToNameBox, GF_ISOM_BOX_TYPE_GITN); return (GF_Box *)tmp; } void gitn_box_del(GF_Box *s) { u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *)s; if (ptr == NULL) return; for (i=0; i<ptr->nb_entries; i++) { if (ptr->entries[i].name) gf_free(ptr->entries[i].name); } if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err gitn_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; GroupIdToNameBox *ptr = (GroupIdToNameBox *)s; ISOM_DECREASE_SIZE(ptr, 2); ptr->nb_entries = gf_bs_read_u16(bs); if (ptr->size < ptr->nb_entries*4) return GF_ISOM_INVALID_FILE; GF_SAFE_ALLOC_N(ptr->entries, ptr->nb_entries, GroupIdNameEntry); if (!ptr->entries) return GF_OUT_OF_MEM; for (i=0; i<ptr->nb_entries; i++) { ISOM_DECREASE_SIZE(ptr, 4); ptr->entries[i].group_id = gf_bs_read_u32(bs); e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->entries[i].name); if (e) return e; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gitn_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->entries[i].group_id); if (ptr->entries[i].name) gf_bs_write_data(bs, ptr->entries[i].name, (u32)strlen(ptr->entries[i].name) ); gf_bs_write_u8(bs, 0); } return GF_OK; } GF_Err gitn_box_size(GF_Box *s) { u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *)s; ptr->size += 2; for (i=0; i<ptr->nb_entries; i++) { ptr->size += 5; if (ptr->entries[i].name) ptr->size += strlen(ptr->entries[i].name); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_HINTING GF_Box *fdpa_box_new() { ISOM_DECL_BOX_ALLOC(GF_FDpacketBox, GF_ISOM_BOX_TYPE_FDPA); return (GF_Box *)tmp; } void fdpa_box_del(GF_Box *s) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *)s; if (ptr == NULL) return; if (ptr->headers) { for (i=0; i<ptr->header_ext_count; i++) { if (ptr->headers[i].data) gf_free(ptr->headers[i].data); } gf_free(ptr->headers); } gf_free(ptr); } GF_Err fdpa_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *)s; ISOM_DECREASE_SIZE(ptr, 3); ptr->info.sender_current_time_present = gf_bs_read_int(bs, 1); ptr->info.expected_residual_time_present = gf_bs_read_int(bs, 1); ptr->info.session_close_bit = gf_bs_read_int(bs, 1); ptr->info.object_close_bit = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 4); ptr->info.transport_object_identifier = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 2); ptr->header_ext_count = gf_bs_read_u16(bs); if (ptr->size < ptr->header_ext_count*2) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in fdpa\n", ptr->header_ext_count)); return GF_ISOM_INVALID_FILE; } GF_SAFE_ALLOC_N(ptr->headers, ptr->header_ext_count, GF_LCTheaderExtension); if (!ptr->headers) return GF_OUT_OF_MEM; for (i=0; i<ptr->header_ext_count; i++) { ptr->headers[i].header_extension_type = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, 1); if (ptr->headers[i].header_extension_type > 127) { ISOM_DECREASE_SIZE(ptr, 3); gf_bs_read_data(bs, (char *) ptr->headers[i].content, 3); } else { ISOM_DECREASE_SIZE(ptr, 1); ptr->headers[i].data_length = gf_bs_read_u8(bs); if (ptr->headers[i].data_length) { ptr->headers[i].data_length = 4*ptr->headers[i].data_length - 2; if (ptr->size < sizeof(char) * ptr->headers[i].data_length) return GF_ISOM_INVALID_FILE; ptr->headers[i].data = gf_malloc(sizeof(char) * ptr->headers[i].data_length); if (!ptr->headers[i].data) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, ptr->headers[i].data_length); gf_bs_read_data(bs, ptr->headers[i].data, ptr->headers[i].data_length); } } } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fdpa_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->info.sender_current_time_present, 1); gf_bs_write_int(bs, ptr->info.expected_residual_time_present, 1); gf_bs_write_int(bs, ptr->info.session_close_bit, 1); gf_bs_write_int(bs, ptr->info.object_close_bit, 1); gf_bs_write_int(bs, 0, 4); gf_bs_write_u16(bs, ptr->info.transport_object_identifier); gf_bs_write_u16(bs, ptr->header_ext_count); for (i=0; i<ptr->header_ext_count; i++) { gf_bs_write_u8(bs, ptr->headers[i].header_extension_type); if (ptr->headers[i].header_extension_type > 127) { gf_bs_write_data(bs, (const char *) ptr->headers[i].content, 3); } else { gf_bs_write_u8(bs, ptr->headers[i].data_length ? (ptr->headers[i].data_length+2)/4 : 0); if (ptr->headers[i].data_length) { gf_bs_write_data(bs, ptr->headers[i].data, ptr->headers[i].data_length); } } } return GF_OK; } GF_Err fdpa_box_size(GF_Box *s) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *)s; ptr->size += 5; for (i=0; i<ptr->header_ext_count; i++) { ptr->size += 1; if (ptr->headers[i].header_extension_type > 127) { ptr->size += 3; } else { ptr->size += 1 + ptr->headers[i].data_length; } } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *extr_box_new() { ISOM_DECL_BOX_ALLOC(GF_ExtraDataBox, GF_ISOM_BOX_TYPE_EXTR); return (GF_Box *)tmp; } void extr_box_del(GF_Box *s) { GF_ExtraDataBox *ptr = (GF_ExtraDataBox *)s; if (ptr == NULL) return; if (ptr->feci) gf_isom_box_del((GF_Box*)ptr->feci); if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err extr_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ExtraDataBox *ptr = (GF_ExtraDataBox *)s; e = gf_isom_box_parse((GF_Box**) &ptr->feci, bs); if (e) return e; if (!ptr->feci || ptr->feci->size > ptr->size) return GF_ISOM_INVALID_MEDIA; ptr->data_length = (u32) (ptr->size - ptr->feci->size); ptr->data = gf_malloc(sizeof(char)*ptr->data_length); if (!ptr->data) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->data, ptr->data_length); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err extr_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ExtraDataBox *ptr = (GF_ExtraDataBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->feci) { e = gf_isom_box_write((GF_Box *)ptr->feci, bs); if (e) return e; } gf_bs_write_data(bs, ptr->data, ptr->data_length); return GF_OK; } GF_Err extr_box_size(GF_Box *s) { GF_ExtraDataBox *ptr = (GF_ExtraDataBox *) s; ptr->size += ptr->data_length; if (ptr->feci) { GF_Err e = gf_isom_box_size((GF_Box*)ptr->feci); if (e) return e; ptr->size += ptr->feci->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *fdsa_box_new() { ISOM_DECL_BOX_ALLOC(GF_HintSample, GF_ISOM_BOX_TYPE_FDSA); if (!tmp) return NULL; tmp->packetTable = gf_list_new(); tmp->hint_subtype = GF_ISOM_BOX_TYPE_FDP_STSD; return (GF_Box*)tmp; } void fdsa_box_del(GF_Box *s) { GF_HintSample *ptr = (GF_HintSample *)s; gf_list_del(ptr->packetTable); gf_free(ptr); } GF_Err fdsa_on_child_box(GF_Box *s, GF_Box *a) { GF_HintSample *ptr = (GF_HintSample *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_FDPA: gf_list_add(ptr->packetTable, a); break; case GF_ISOM_BOX_TYPE_EXTR: if (ptr->extra_data) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->extra_data = (GF_ExtraDataBox*)a; break; } return GF_OK; } GF_Err fdsa_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, fdsa_on_child_box); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fdsa_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_HintSample *ptr = (GF_HintSample *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; e = gf_isom_box_array_write(s, ptr->packetTable, bs); if (e) return e; if (ptr->extra_data) { e = gf_isom_box_write((GF_Box *)ptr->extra_data, bs); if (e) return e; } return GF_OK; } GF_Err fdsa_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_HINTING*/ void trik_box_del(GF_Box *s) { GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s; if (ptr == NULL) return; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err trik_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s; ptr->entry_count = (u32) ptr->size; ptr->entries = (GF_TrickPlayBoxEntry *) gf_malloc(ptr->entry_count * sizeof(GF_TrickPlayBoxEntry) ); if (!ptr->entries) return GF_OUT_OF_MEM; for (i=0; i< ptr->entry_count; i++) { ptr->entries[i].pic_type = gf_bs_read_int(bs, 2); ptr->entries[i].dependency_level = gf_bs_read_int(bs, 6); } return GF_OK; } GF_Box *trik_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrickPlayBox, GF_ISOM_BOX_TYPE_TRIK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trik_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; for (i=0; i < ptr->entry_count; i++ ) { gf_bs_write_int(bs, ptr->entries[i].pic_type, 2); gf_bs_write_int(bs, ptr->entries[i].dependency_level, 6); } return GF_OK; } GF_Err trik_box_size(GF_Box *s) { GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s; ptr->size += 8 * ptr->entry_count; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void bloc_box_del(GF_Box *s) { gf_free(s); } GF_Err bloc_box_read(GF_Box *s,GF_BitStream *bs) { GF_BaseLocationBox *ptr = (GF_BaseLocationBox *) s; ISOM_DECREASE_SIZE(s, 256) gf_bs_read_data(bs, (char *) ptr->baseLocation, 256); ISOM_DECREASE_SIZE(s, 256) gf_bs_read_data(bs, (char *) ptr->basePurlLocation, 256); ISOM_DECREASE_SIZE(s, 512) gf_bs_skip_bytes(bs, 512); return GF_OK; } GF_Box *bloc_box_new() { ISOM_DECL_BOX_ALLOC(GF_BaseLocationBox, GF_ISOM_BOX_TYPE_TRIK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err bloc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_BaseLocationBox *ptr = (GF_BaseLocationBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, (const char *) ptr->baseLocation, 256); gf_bs_write_data(bs, (const char *) ptr->basePurlLocation, 256); for (i=0; i < 64; i++ ) { gf_bs_write_u64(bs, 0); } return GF_OK; } GF_Err bloc_box_size(GF_Box *s) { s->size += 1024; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ainf_box_del(GF_Box *s) { GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s; if (ptr->APID) gf_free(ptr->APID); gf_free(s); } GF_Err ainf_box_read(GF_Box *s,GF_BitStream *bs) { GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s; ISOM_DECREASE_SIZE(s, 4) ptr->profile_version = gf_bs_read_u32(bs); return gf_isom_read_null_terminated_string(s, bs, s->size, &ptr->APID); } GF_Box *ainf_box_new() { ISOM_DECL_BOX_ALLOC(GF_AssetInformationBox, GF_ISOM_BOX_TYPE_AINF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ainf_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->profile_version); if (ptr->APID) gf_bs_write_data(bs, ptr->APID, (u32) strlen(ptr->APID) ); gf_bs_write_u8(bs, 0); return GF_OK; } GF_Err ainf_box_size(GF_Box *s) { GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s; s->size += 4 + (ptr->APID ? strlen(ptr->APID) : 0 ) + 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mhac_box_del(GF_Box *s) { GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s; if (ptr->mha_config) gf_free(ptr->mha_config); gf_free(s); } GF_Err mhac_box_read(GF_Box *s,GF_BitStream *bs) { GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s; ISOM_DECREASE_SIZE(s, 5) ptr->configuration_version = gf_bs_read_u8(bs); ptr->mha_pl_indication = gf_bs_read_u8(bs); ptr->reference_channel_layout = gf_bs_read_u8(bs); ptr->mha_config_size = gf_bs_read_u16(bs); if (ptr->mha_config_size) { ISOM_DECREASE_SIZE(s, ptr->mha_config_size) ptr->mha_config = gf_malloc(sizeof(char)*ptr->mha_config_size); if (!ptr->mha_config) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->mha_config, ptr->mha_config_size); } return GF_OK; } GF_Box *mhac_box_new() { ISOM_DECL_BOX_ALLOC(GF_MHAConfigBox, GF_ISOM_BOX_TYPE_MHAC); tmp->configuration_version = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mhac_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->configuration_version); gf_bs_write_u8(bs, ptr->mha_pl_indication); gf_bs_write_u8(bs, ptr->reference_channel_layout); gf_bs_write_u16(bs, ptr->mha_config ? ptr->mha_config_size : 0); if (ptr->mha_config && ptr->mha_config_size) gf_bs_write_data(bs, ptr->mha_config, ptr->mha_config_size); return GF_OK; } GF_Err mhac_box_size(GF_Box *s) { GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s; s->size += 5; if (ptr->mha_config_size && ptr->mha_config) s->size += ptr->mha_config_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void jp2h_box_del(GF_Box *s) { gf_free(s); } GF_Err jp2h_on_child_box(GF_Box *s, GF_Box *a) { GF_J2KHeaderBox *ptr = (GF_J2KHeaderBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_IHDR: if (ptr->ihdr) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->ihdr = (GF_J2KImageHeaderBox*)a; return GF_OK; case GF_ISOM_BOX_TYPE_COLR: if (ptr->colr) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->colr = (GF_ColourInformationBox*)a; return GF_OK; } return GF_OK; } GF_Err jp2h_box_read(GF_Box *s,GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, jp2h_on_child_box, s->type); } GF_Box *jp2h_box_new() { ISOM_DECL_BOX_ALLOC(GF_J2KHeaderBox, GF_ISOM_BOX_TYPE_JP2H); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err jp2h_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err jp2h_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ihdr_box_del(GF_Box *s) { gf_free(s); } GF_Err ihdr_box_read(GF_Box *s,GF_BitStream *bs) { GF_J2KImageHeaderBox *ptr = (GF_J2KImageHeaderBox *) s; ISOM_DECREASE_SIZE(s, 14) ptr->height = gf_bs_read_u32(bs); ptr->width = gf_bs_read_u32(bs); ptr->nb_comp = gf_bs_read_u16(bs); ptr->bpc = gf_bs_read_u8(bs); ptr->Comp = gf_bs_read_u8(bs); ptr->UnkC = gf_bs_read_u8(bs); ptr->IPR = gf_bs_read_u8(bs); return GF_OK; } GF_Box *ihdr_box_new() { ISOM_DECL_BOX_ALLOC(GF_J2KImageHeaderBox, GF_ISOM_BOX_TYPE_IHDR); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ihdr_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_J2KImageHeaderBox *ptr = (GF_J2KImageHeaderBox *) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->height); gf_bs_write_u32(bs, ptr->width); gf_bs_write_u16(bs, ptr->nb_comp); gf_bs_write_u8(bs, ptr->bpc); gf_bs_write_u8(bs, ptr->Comp); gf_bs_write_u8(bs, ptr->UnkC); gf_bs_write_u8(bs, ptr->IPR); return GF_OK; } GF_Err ihdr_box_size(GF_Box *s) { s->size += 14; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* Dolby Vision */ GF_Box *dvcC_box_new() { GF_DOVIConfigurationBox *tmp = (GF_DOVIConfigurationBox *)gf_malloc(sizeof(GF_DOVIConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_DOVIConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_DVCC; return (GF_Box *)tmp; } void dvcC_box_del(GF_Box *s) { GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox*)s; gf_free(ptr); } GF_Err dvcC_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; u32 data[5]; GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox *)s; //GF_DOVIDecoderConfigurationRecord ISOM_DECREASE_SIZE(ptr, 24) ptr->DOVIConfig.dv_version_major = gf_bs_read_u8(bs); ptr->DOVIConfig.dv_version_minor = gf_bs_read_u8(bs); ptr->DOVIConfig.dv_profile = gf_bs_read_int(bs, 7); ptr->DOVIConfig.dv_level = gf_bs_read_int(bs, 6); ptr->DOVIConfig.rpu_present_flag = gf_bs_read_int(bs, 1); ptr->DOVIConfig.el_present_flag = gf_bs_read_int(bs, 1); ptr->DOVIConfig.bl_present_flag = gf_bs_read_int(bs, 1); memset(data, 0, sizeof(u32)*5); gf_bs_read_data(bs, (char*)data, 20); for (i = 0; i < 5; ++i) { if (data[i] != 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] dvcC reserved bytes are not zero\n")); //return GF_ISOM_INVALID_FILE; } } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dvcC_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; //GF_DOVIDecoderConfigurationRecord gf_bs_write_u8(bs, ptr->DOVIConfig.dv_version_major); gf_bs_write_u8(bs, ptr->DOVIConfig.dv_version_minor); gf_bs_write_int(bs, ptr->DOVIConfig.dv_profile, 7); gf_bs_write_int(bs, ptr->DOVIConfig.dv_level, 6); gf_bs_write_int(bs, ptr->DOVIConfig.rpu_present_flag, 1); gf_bs_write_int(bs, ptr->DOVIConfig.el_present_flag, 1); gf_bs_write_int(bs, ptr->DOVIConfig.bl_present_flag, 1); gf_bs_write_u32(bs, 0); gf_bs_write_u32(bs, 0); gf_bs_write_u32(bs, 0); gf_bs_write_u32(bs, 0); gf_bs_write_u32(bs, 0); return GF_OK; } GF_Err dvcC_box_size(GF_Box *s) { GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox *)s; ptr->size += 24; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *dOps_box_new() { ISOM_DECL_BOX_ALLOC(GF_OpusSpecificBox, GF_ISOM_BOX_TYPE_DOPS); return (GF_Box *)tmp; } void dOps_box_del(GF_Box *s) { GF_OpusSpecificBox *ptr = (GF_OpusSpecificBox *)s; if (ptr) gf_free(ptr); } GF_Err dOps_box_read(GF_Box *s, GF_BitStream *bs) { GF_OpusSpecificBox *ptr = (GF_OpusSpecificBox *)s; ptr->version = gf_bs_read_u8(bs); ptr->OutputChannelCount = gf_bs_read_u8(bs); ptr->PreSkip = gf_bs_read_u16(bs); ptr->InputSampleRate = gf_bs_read_u32(bs); ptr->OutputGain = gf_bs_read_u16(bs); ptr->ChannelMappingFamily = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, 11) if (ptr->size) { ISOM_DECREASE_SIZE(ptr, 2+ptr->OutputChannelCount); ptr->StreamCount = gf_bs_read_u8(bs); ptr->CoupledCount = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *) ptr->ChannelMapping, ptr->OutputChannelCount); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dOps_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_OpusSpecificBox *ptr = (GF_OpusSpecificBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->version); gf_bs_write_u8(bs, ptr->OutputChannelCount); gf_bs_write_u16(bs, ptr->PreSkip); gf_bs_write_u32(bs, ptr->InputSampleRate); gf_bs_write_u16(bs, ptr->OutputGain); gf_bs_write_u8(bs, ptr->ChannelMappingFamily); if (ptr->ChannelMappingFamily) { gf_bs_write_u8(bs, ptr->StreamCount); gf_bs_write_u8(bs, ptr->CoupledCount); gf_bs_write_data(bs, (char *) ptr->ChannelMapping, ptr->OutputChannelCount); } return GF_OK; } GF_Err dOps_box_size(GF_Box *s) { GF_OpusSpecificBox *ptr = (GF_OpusSpecificBox *)s; ptr->size += 11; if (ptr->ChannelMappingFamily) ptr->size += 2 + ptr->OutputChannelCount; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void dfla_box_del(GF_Box *s) { GF_FLACConfigBox *ptr = (GF_FLACConfigBox *) s; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err dfla_box_read(GF_Box *s,GF_BitStream *bs) { GF_FLACConfigBox *ptr = (GF_FLACConfigBox *) s; ptr->dataSize = (u32) ptr->size; ptr->size=0; ptr->data = gf_malloc(ptr->dataSize); gf_bs_read_data(bs, ptr->data, ptr->dataSize); return GF_OK; } GF_Box *dfla_box_new() { ISOM_DECL_BOX_ALLOC(GF_FLACConfigBox, GF_ISOM_BOX_TYPE_DFLA); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dfla_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_FLACConfigBox *ptr = (GF_FLACConfigBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->data, ptr->dataSize); return GF_OK; } GF_Err dfla_box_size(GF_Box *s) { GF_FLACConfigBox *ptr = (GF_FLACConfigBox *) s; ptr->size += ptr->dataSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mvcg_box_del(GF_Box *s) { GF_MultiviewGroupBox *ptr = (GF_MultiviewGroupBox *) s; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err mvcg_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_MultiviewGroupBox *ptr = (GF_MultiviewGroupBox *) s; ISOM_DECREASE_SIZE(s, 7) ptr->multiview_group_id = gf_bs_read_u32(bs); ptr->num_entries = gf_bs_read_u16(bs); gf_bs_read_u8(bs); ptr->entries = gf_malloc(ptr->num_entries * sizeof(MVCIEntry)); memset(ptr->entries, 0, ptr->num_entries * sizeof(MVCIEntry)); for (i=0; i<ptr->num_entries; i++) { ISOM_DECREASE_SIZE(s, 1) ptr->entries[i].entry_type = gf_bs_read_u8(bs); switch (ptr->entries[i].entry_type) { case 0: ISOM_DECREASE_SIZE(s, 4) ptr->entries[i].trackID = gf_bs_read_u32(bs); break; case 1: ISOM_DECREASE_SIZE(s, 6) ptr->entries[i].trackID = gf_bs_read_u32(bs); ptr->entries[i].tierID = gf_bs_read_u16(bs); break; case 2: ISOM_DECREASE_SIZE(s, 2) gf_bs_read_int(bs, 6); ptr->entries[i].output_view_id = gf_bs_read_int(bs, 10); break; case 3: ISOM_DECREASE_SIZE(s, 4) gf_bs_read_int(bs, 6) ; ptr->entries[i].start_view_id = gf_bs_read_int(bs, 10); ptr->entries[i].view_count = gf_bs_read_u16(bs); break; } } return gf_isom_box_array_read(s, bs, NULL); } GF_Box *mvcg_box_new() { ISOM_DECL_BOX_ALLOC(GF_MultiviewGroupBox, GF_ISOM_BOX_TYPE_MVCG); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mvcg_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_MultiviewGroupBox *ptr = (GF_MultiviewGroupBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->multiview_group_id); gf_bs_write_u16(bs, ptr->num_entries); gf_bs_write_u8(bs, 0); for (i=0; i<ptr->num_entries; i++) { gf_bs_write_u8(bs, ptr->entries[i].entry_type); switch (ptr->entries[i].entry_type) { case 0: gf_bs_write_u32(bs, ptr->entries[i].trackID); break; case 1: gf_bs_write_u32(bs, ptr->entries[i].trackID); gf_bs_write_u16(bs, ptr->entries[i].tierID); break; case 2: gf_bs_write_int(bs, 0, 6); gf_bs_write_int(bs, ptr->entries[i].output_view_id, 10); break; case 3: gf_bs_write_int(bs, 0, 6) ; gf_bs_write_int(bs, ptr->entries[i].start_view_id, 10); gf_bs_write_u16(bs, ptr->entries[i].view_count); break; } } return GF_OK; } GF_Err mvcg_box_size(GF_Box *s) { u32 i; GF_MultiviewGroupBox *ptr = (GF_MultiviewGroupBox *) s; ptr->size += 7; for (i=0; i<ptr->num_entries; i++) { switch (ptr->entries[i].entry_type) { case 0: ptr->size += 1 + 4; break; case 1: ptr->size += 1 + 6; break; case 2: ptr->size += 1 + 2; break; case 3: ptr->size += 1 + 4; break; } } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void vwid_box_del(GF_Box *s) { u32 i; GF_ViewIdentifierBox *ptr = (GF_ViewIdentifierBox *) s; if (ptr->views) { for (i=0; i<ptr->num_views; i++) { if (ptr->views[i].view_refs) gf_free(ptr->views[i].view_refs); } gf_free(ptr->views); } gf_free(ptr); } GF_Err vwid_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_ViewIdentifierBox *ptr = (GF_ViewIdentifierBox *) s; ISOM_DECREASE_SIZE(s, 3) gf_bs_read_int(bs, 2); ptr->min_temporal_id = gf_bs_read_int(bs, 3); ptr->max_temporal_id = gf_bs_read_int(bs, 3); ptr->num_views = gf_bs_read_u16(bs); if (6 * ptr->num_views > ptr->size) return GF_ISOM_INVALID_FILE; ptr->views = gf_malloc(sizeof(ViewIDEntry)*ptr->num_views); for (i=0; i<ptr->num_views; i++) { u32 j; ISOM_DECREASE_SIZE(s, 6) gf_bs_read_int(bs, 6); ptr->views[i].view_id = gf_bs_read_int(bs, 10); gf_bs_read_int(bs, 6); ptr->views[i].view_order_index = gf_bs_read_int(bs, 10); ptr->views[i].texture_in_stream = gf_bs_read_int(bs, 1); ptr->views[i].texture_in_track = gf_bs_read_int(bs, 1); ptr->views[i].depth_in_stream = gf_bs_read_int(bs, 1); ptr->views[i].depth_in_track = gf_bs_read_int(bs, 1); ptr->views[i].base_view_type = gf_bs_read_int(bs, 2); ptr->views[i].num_ref_views = gf_bs_read_int(bs, 10); if (2 * ptr->views[i].num_ref_views > ptr->size) return GF_ISOM_INVALID_FILE; ptr->views[i].view_refs = gf_malloc(sizeof(ViewIDRefViewEntry)*ptr->views[i].num_ref_views); for (j=0; j<ptr->views[i].num_ref_views; j++) { ISOM_DECREASE_SIZE(s, 2) gf_bs_read_int(bs, 4); ptr->views[i].view_refs[j].dep_comp_idc = gf_bs_read_int(bs, 2); ptr->views[i].view_refs[j].ref_view_id = gf_bs_read_int(bs, 10); } } return GF_OK; } GF_Box *vwid_box_new() { ISOM_DECL_BOX_ALLOC(GF_ViewIdentifierBox, GF_ISOM_BOX_TYPE_VWID); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err vwid_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i, j; GF_ViewIdentifierBox *ptr = (GF_ViewIdentifierBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, 0, 2); gf_bs_write_int(bs, ptr->min_temporal_id, 3); gf_bs_write_int(bs, ptr->max_temporal_id, 3); gf_bs_write_u16(bs, ptr->num_views); for (i=0; i<ptr->num_views; i++) { gf_bs_write_int(bs, 0, 6); gf_bs_write_int(bs, ptr->views[i].view_id, 10); gf_bs_write_int(bs, 0, 6); gf_bs_write_int(bs, ptr->views[i].view_order_index, 10); gf_bs_write_int(bs, ptr->views[i].texture_in_stream, 1); gf_bs_write_int(bs, ptr->views[i].texture_in_track, 1); gf_bs_write_int(bs, ptr->views[i].depth_in_stream, 1); gf_bs_write_int(bs, ptr->views[i].depth_in_track, 1); gf_bs_write_int(bs, ptr->views[i].base_view_type, 2); gf_bs_write_int(bs, ptr->views[i].num_ref_views, 10); for (j=0; j<ptr->views[i].num_ref_views; j++) { gf_bs_write_int(bs, 0, 4); gf_bs_write_int(bs, ptr->views[i].view_refs[j].dep_comp_idc, 2); gf_bs_write_int(bs, ptr->views[i].view_refs[j].ref_view_id, 10); } } return GF_OK; } GF_Err vwid_box_size(GF_Box *s) { u32 i; GF_ViewIdentifierBox *ptr = (GF_ViewIdentifierBox *) s; ptr->size += 3; for (i=0; i<ptr->num_views; i++) { ptr->size += 6 + 2 * ptr->views[i].num_ref_views; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void pcmC_box_del(GF_Box *s) { gf_free(s); } GF_Err pcmC_box_read(GF_Box *s,GF_BitStream *bs) { GF_PCMConfigBox *ptr = (GF_PCMConfigBox *) s; ISOM_DECREASE_SIZE(s, 2) ptr->format_flags = gf_bs_read_u8(bs); ptr->PCM_sample_size = gf_bs_read_u8(bs); return GF_OK; } GF_Box *pcmC_box_new() { ISOM_DECL_BOX_ALLOC(GF_PCMConfigBox, GF_ISOM_BOX_TYPE_PCMC); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pcmC_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_PCMConfigBox *ptr = (GF_PCMConfigBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->format_flags); gf_bs_write_u8(bs, ptr->PCM_sample_size); return GF_OK; } GF_Err pcmC_box_size(GF_Box *s) { s->size += 2; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void chnl_box_del(GF_Box *s) { gf_free(s); } GF_Err chnl_box_read(GF_Box *s,GF_BitStream *bs) { GF_ChannelLayoutBox *ptr = (GF_ChannelLayoutBox *) s; ISOM_DECREASE_SIZE(s, 1) ptr->layout.stream_structure = gf_bs_read_u8(bs); if (ptr->layout.stream_structure & 1) { ISOM_DECREASE_SIZE(s, 1) ptr->layout.definedLayout = gf_bs_read_u8(bs); if (ptr->layout.definedLayout) { u32 remain = (u32) ptr->size; if (ptr->layout.stream_structure & 2) remain--; ptr->layout.channels_count = 0; while (remain) { ISOM_DECREASE_SIZE(s, 1) ptr->layout.layouts[ptr->layout.channels_count].position = gf_bs_read_u8(bs); remain--; if (ptr->layout.layouts[ptr->layout.channels_count].position == 126) { ISOM_DECREASE_SIZE(s, 3) ptr->layout.layouts[ptr->layout.channels_count].azimuth = gf_bs_read_int(bs, 16); ptr->layout.layouts[ptr->layout.channels_count].elevation = gf_bs_read_int(bs, 8); remain-=3; } } } else { ISOM_DECREASE_SIZE(s, 8) ptr->layout.omittedChannelsMap = gf_bs_read_u64(bs); } } if (ptr->layout.stream_structure & 2) { ISOM_DECREASE_SIZE(s, 1) ptr->layout.object_count = gf_bs_read_u8(bs); } return GF_OK; } GF_Box *chnl_box_new() { ISOM_DECL_BOX_ALLOC(GF_ChannelLayoutBox, GF_ISOM_BOX_TYPE_CHNL); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err chnl_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ChannelLayoutBox *ptr = (GF_ChannelLayoutBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->layout.stream_structure); if (ptr->layout.stream_structure & 1) { gf_bs_write_u8(bs, ptr->layout.definedLayout); if (ptr->layout.definedLayout==0) { u32 i; for (i=0; i<ptr->layout.channels_count; i++) { gf_bs_write_u8(bs, ptr->layout.layouts[i].position); if (ptr->layout.layouts[i].position==126) { gf_bs_write_int(bs, ptr->layout.layouts[i].azimuth, 16); gf_bs_write_int(bs, ptr->layout.layouts[i].elevation, 8); } } } else { gf_bs_write_u64(bs, ptr->layout.omittedChannelsMap); } } if (ptr->layout.stream_structure & 2) { gf_bs_write_u8(bs, ptr->layout.object_count); } return GF_OK; } GF_Err chnl_box_size(GF_Box *s) { GF_ChannelLayoutBox *ptr = (GF_ChannelLayoutBox *) s; s->size += 1; if (ptr->layout.stream_structure & 1) { s->size += 1; if (ptr->layout.definedLayout==0) { u32 i; for (i=0; i<ptr->layout.channels_count; i++) { s->size+=1; if (ptr->layout.layouts[i].position==126) s->size+=3; } } else { s->size += 8; } } if (ptr->layout.stream_structure & 2) { s->size += 1; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *emsg_box_new() { ISOM_DECL_BOX_ALLOC(GF_EventMessageBox, GF_ISOM_BOX_TYPE_EMSG); return (GF_Box *)tmp; } void emsg_box_del(GF_Box *s) { GF_EventMessageBox *ptr = (GF_EventMessageBox *) s; if (ptr == NULL) return; if (ptr->scheme_id_uri) gf_free(ptr->scheme_id_uri); if (ptr->value) gf_free(ptr->value); if (ptr->message_data) gf_free(ptr->message_data); gf_free(ptr); } GF_Err emsg_box_read(GF_Box *s,GF_BitStream *bs) { GF_Err e; GF_EventMessageBox *ptr = (GF_EventMessageBox*) s; if (ptr->version==0) { e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->scheme_id_uri); if (e) return e; e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->value); if (e) return e; ISOM_DECREASE_SIZE(ptr, 16); ptr->timescale = gf_bs_read_u32(bs); ptr->presentation_time_delta = gf_bs_read_u32(bs); ptr->event_duration = gf_bs_read_u32(bs); ptr->event_id = gf_bs_read_u32(bs); } else if (ptr->version==1) { ISOM_DECREASE_SIZE(ptr, 20); ptr->timescale = gf_bs_read_u32(bs); ptr->presentation_time_delta = gf_bs_read_u64(bs); ptr->event_duration = gf_bs_read_u32(bs); ptr->event_id = gf_bs_read_u32(bs); e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->scheme_id_uri); if (e) return e; e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->value); if (e) return e; } else { return GF_OK; } if (ptr->size) { if (ptr->size>0xFFFFFFFUL) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[IsoMedia] emsg message data size too big ("LLU") to be loaded\n", ptr->size)); return GF_OUT_OF_MEM; } ptr->message_data_size = (u32) ptr->size; ptr->message_data = gf_malloc(ptr->message_data_size); if (!ptr->message_data) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->message_data, ptr->message_data_size); ptr->size = 0; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err emsg_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 len; GF_EventMessageBox *ptr = (GF_EventMessageBox*) s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version==1) { gf_bs_write_u32(bs, ptr->timescale); gf_bs_write_u64(bs, ptr->presentation_time_delta); gf_bs_write_u32(bs, ptr->event_duration); gf_bs_write_u32(bs, ptr->event_id); } len = ptr->scheme_id_uri ? (u32) strlen(ptr->scheme_id_uri) : 0; if (len) gf_bs_write_data(bs, ptr->scheme_id_uri, len); gf_bs_write_u8(bs, 0); len = ptr->value ? (u32) strlen(ptr->value) : 0; if (len) gf_bs_write_data(bs, ptr->value, len); gf_bs_write_u8(bs, 0); if (ptr->version==0) { gf_bs_write_u32(bs, ptr->timescale); gf_bs_write_u32(bs, (u32) ptr->presentation_time_delta); gf_bs_write_u32(bs, ptr->event_duration); gf_bs_write_u32(bs, ptr->event_id); } if (ptr->message_data) gf_bs_write_data(bs, ptr->message_data, ptr->message_data_size); return GF_OK; } GF_Err emsg_box_size(GF_Box *s) { GF_EventMessageBox *ptr = (GF_EventMessageBox*) s; ptr->size += 4; if (ptr->version) { ptr->size += 20; } else { ptr->size += 16; } ptr->size+=2; //1 NULL-terminated strings if (ptr->scheme_id_uri) ptr->size += strlen(ptr->scheme_id_uri); if (ptr->value) ptr->size += strlen(ptr->value); if (ptr->message_data) ptr->size += ptr->message_data_size; return GF_OK; } #endif // GPAC_DISABLE_ISOM_WRITE #endif /*GPAC_DISABLE_ISOM*/
null
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2019 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #ifndef GPAC_DISABLE_ISOM void co64_box_del(GF_Box *s) { GF_ChunkLargeOffsetBox *ptr; ptr = (GF_ChunkLargeOffsetBox *) s; if (ptr == NULL) return; if (ptr->offsets) gf_free(ptr->offsets); gf_free(ptr); } GF_Err co64_box_read(GF_Box *s,GF_BitStream *bs) { u32 entries; GF_ChunkLargeOffsetBox *ptr = (GF_ChunkLargeOffsetBox *) s; ptr->nb_entries = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4) if (ptr->nb_entries > ptr->size / 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in co64\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->offsets = (u64 *) gf_malloc(ptr->nb_entries * sizeof(u64) ); if (ptr->offsets == NULL) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->nb_entries; for (entries = 0; entries < ptr->nb_entries; entries++) { ptr->offsets[entries] = gf_bs_read_u64(bs); } return GF_OK; } GF_Box *co64_box_new() { ISOM_DECL_BOX_ALLOC(GF_ChunkLargeOffsetBox, GF_ISOM_BOX_TYPE_CO64); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err co64_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_ChunkLargeOffsetBox *ptr = (GF_ChunkLargeOffsetBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i = 0; i < ptr->nb_entries; i++ ) { gf_bs_write_u64(bs, ptr->offsets[i]); } return GF_OK; } GF_Err co64_box_size(GF_Box *s) { GF_ChunkLargeOffsetBox *ptr = (GF_ChunkLargeOffsetBox *) s; ptr->size += 4 + (8 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void cprt_box_del(GF_Box *s) { GF_CopyrightBox *ptr = (GF_CopyrightBox *) s; if (ptr == NULL) return; if (ptr->notice) gf_free(ptr->notice); gf_free(ptr); } GF_Box *chpl_box_new() { ISOM_DECL_BOX_ALLOC(GF_ChapterListBox, GF_ISOM_BOX_TYPE_CHPL); tmp->list = gf_list_new(); tmp->version = 1; return (GF_Box *)tmp; } void chpl_box_del(GF_Box *s) { GF_ChapterListBox *ptr = (GF_ChapterListBox *) s; if (ptr == NULL) return; while (gf_list_count(ptr->list)) { GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(ptr->list, 0); if (ce->name) gf_free(ce->name); gf_free(ce); gf_list_rem(ptr->list, 0); } gf_list_del(ptr->list); gf_free(ptr); } /*this is using chpl format according to some NeroRecode samples*/ GF_Err chpl_box_read(GF_Box *s,GF_BitStream *bs) { GF_ChapterEntry *ce; u32 nb_chaps, len, i, count; GF_ChapterListBox *ptr = (GF_ChapterListBox *)s; ISOM_DECREASE_SIZE(ptr, 5) /*reserved or ???*/ gf_bs_read_u32(bs); nb_chaps = gf_bs_read_u8(bs); count = 0; while (nb_chaps) { GF_SAFEALLOC(ce, GF_ChapterEntry); if (!ce) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, 9) ce->start_time = gf_bs_read_u64(bs); len = gf_bs_read_u8(bs); if (ptr->size<len) return GF_ISOM_INVALID_FILE; if (len) { ce->name = (char *)gf_malloc(sizeof(char)*(len+1)); if (!ce->name) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, len) gf_bs_read_data(bs, ce->name, len); ce->name[len] = 0; } else { ce->name = gf_strdup(""); } for (i=0; i<count; i++) { GF_ChapterEntry *ace = (GF_ChapterEntry *) gf_list_get(ptr->list, i); if (ace->start_time >= ce->start_time) { gf_list_insert(ptr->list, ce, i); ce = NULL; break; } } if (ce) gf_list_add(ptr->list, ce); count++; nb_chaps--; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err chpl_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 count, i; GF_ChapterListBox *ptr = (GF_ChapterListBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; count = gf_list_count(ptr->list); gf_bs_write_u32(bs, 0); gf_bs_write_u8(bs, count); for (i=0; i<count; i++) { u32 len; GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(ptr->list, i); gf_bs_write_u64(bs, ce->start_time); if (ce->name) { len = (u32) strlen(ce->name); if (len>255) len = 255; gf_bs_write_u8(bs, len); gf_bs_write_data(bs, ce->name, len); } else { gf_bs_write_u8(bs, 0); } } return GF_OK; } GF_Err chpl_box_size(GF_Box *s) { u32 count, i; GF_ChapterListBox *ptr = (GF_ChapterListBox *)s; ptr->size += 5; count = gf_list_count(ptr->list); for (i=0; i<count; i++) { GF_ChapterEntry *ce = (GF_ChapterEntry *)gf_list_get(ptr->list, i); ptr->size += 9; /*64bit time stamp + 8bit str len*/ if (ce->name) ptr->size += strlen(ce->name); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Err cprt_box_read(GF_Box *s,GF_BitStream *bs) { GF_CopyrightBox *ptr = (GF_CopyrightBox *)s; ISOM_DECREASE_SIZE(ptr, 2); gf_bs_read_int(bs, 1); //the spec is unclear here, just says "the value 0 is interpreted as undetermined" ptr->packedLanguageCode[0] = gf_bs_read_int(bs, 5); ptr->packedLanguageCode[1] = gf_bs_read_int(bs, 5); ptr->packedLanguageCode[2] = gf_bs_read_int(bs, 5); //but before or after compaction ?? We assume before if (ptr->packedLanguageCode[0] || ptr->packedLanguageCode[1] || ptr->packedLanguageCode[2]) { ptr->packedLanguageCode[0] += 0x60; ptr->packedLanguageCode[1] += 0x60; ptr->packedLanguageCode[2] += 0x60; } else { ptr->packedLanguageCode[0] = 'u'; ptr->packedLanguageCode[1] = 'n'; ptr->packedLanguageCode[2] = 'd'; } if (ptr->size) { u32 bytesToRead = (u32) ptr->size; ptr->notice = (char*)gf_malloc(bytesToRead * sizeof(char)); if (ptr->notice == NULL) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->notice, bytesToRead); } return GF_OK; } GF_Box *cprt_box_new() { ISOM_DECL_BOX_ALLOC(GF_CopyrightBox, GF_ISOM_BOX_TYPE_CPRT); tmp->packedLanguageCode[0] = 'u'; tmp->packedLanguageCode[1] = 'n'; tmp->packedLanguageCode[2] = 'd'; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err cprt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_CopyrightBox *ptr = (GF_CopyrightBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, 0, 1); if (ptr->packedLanguageCode[0]) { gf_bs_write_int(bs, ptr->packedLanguageCode[0] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguageCode[1] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguageCode[2] - 0x60, 5); } else { gf_bs_write_int(bs, 0, 15); } if (ptr->notice) { gf_bs_write_data(bs, ptr->notice, (u32) (strlen(ptr->notice) + 1) ); } return GF_OK; } GF_Err cprt_box_size(GF_Box *s) { GF_CopyrightBox *ptr = (GF_CopyrightBox *)s; ptr->size += 2; if (ptr->notice) ptr->size += strlen(ptr->notice) + 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void kind_box_del(GF_Box *s) { GF_KindBox *ptr = (GF_KindBox *) s; if (ptr == NULL) return; if (ptr->schemeURI) gf_free(ptr->schemeURI); if (ptr->value) gf_free(ptr->value); gf_free(ptr); } GF_Err kind_box_read(GF_Box *s,GF_BitStream *bs) { GF_KindBox *ptr = (GF_KindBox *)s; if (ptr->size) { u32 bytesToRead = (u32) ptr->size; char *data; u32 schemeURIlen; data = (char*)gf_malloc(bytesToRead * sizeof(char)); if (!data) return GF_OUT_OF_MEM; gf_bs_read_data(bs, data, bytesToRead); /*safety check in case the string is not null-terminated*/ if (data[bytesToRead-1]) { data = (char*)gf_realloc(data, sizeof(char)*(bytesToRead + 1)); if (!data) return GF_OUT_OF_MEM; data[bytesToRead] = 0; bytesToRead++; } ptr->schemeURI = gf_strdup(data); if (!ptr->schemeURI) return GF_OUT_OF_MEM; schemeURIlen = (u32) strlen(data); if (bytesToRead > schemeURIlen+1) { /* read the value */ char *data_value = data + schemeURIlen +1; ptr->value = gf_strdup(data_value); if (!ptr->value) return GF_OUT_OF_MEM; } gf_free(data); } return GF_OK; } GF_Box *kind_box_new() { ISOM_DECL_BOX_ALLOC(GF_KindBox, GF_ISOM_BOX_TYPE_KIND); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err kind_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_KindBox *ptr = (GF_KindBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->schemeURI) gf_bs_write_data(bs, ptr->schemeURI, (u32) (strlen(ptr->schemeURI) + 1 )); else gf_bs_write_u8(bs, 0); if (ptr->value) { gf_bs_write_data(bs, ptr->value, (u32) (strlen(ptr->value) + 1) ); } return GF_OK; } GF_Err kind_box_size(GF_Box *s) { GF_KindBox *ptr = (GF_KindBox *)s; ptr->size += (ptr->schemeURI ? strlen(ptr->schemeURI) : 0) + 1; if (ptr->value) { ptr->size += strlen(ptr->value) + 1; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ctts_box_del(GF_Box *s) { GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err ctts_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; u32 sampleCount; GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->nb_entries > ptr->size / 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->entries = (GF_DttsEntry *)gf_malloc(sizeof(GF_DttsEntry)*ptr->alloc_size); if (!ptr->entries) return GF_OUT_OF_MEM; sampleCount = 0; for (i=0; i<ptr->nb_entries; i++) { ISOM_DECREASE_SIZE(ptr, 8); ptr->entries[i].sampleCount = gf_bs_read_u32(bs); if (ptr->version) ptr->entries[i].decodingOffset = gf_bs_read_int(bs, 32); else ptr->entries[i].decodingOffset = (s32) gf_bs_read_u32(bs); sampleCount += ptr->entries[i].sampleCount; if (ptr->max_ts_delta < ABS(ptr->entries[i].decodingOffset)) ptr->max_ts_delta = ABS(ptr->entries[i].decodingOffset); } #ifndef GPAC_DISABLE_ISOM_WRITE ptr->w_LastSampleNumber = sampleCount; #endif return GF_OK; } GF_Box *ctts_box_new() { ISOM_DECL_BOX_ALLOC(GF_CompositionOffsetBox, GF_ISOM_BOX_TYPE_CTTS); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ctts_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++ ) { gf_bs_write_u32(bs, ptr->entries[i].sampleCount); if (ptr->version) { gf_bs_write_int(bs, ptr->entries[i].decodingOffset, 32); } else { gf_bs_write_u32(bs, (u32) ptr->entries[i].decodingOffset); } } return GF_OK; } GF_Err ctts_box_size(GF_Box *s) { GF_CompositionOffsetBox *ptr = (GF_CompositionOffsetBox *) s; ptr->size += 4 + (8 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void cslg_box_del(GF_Box *s) { GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s; if (ptr == NULL) return; gf_free(ptr); return; } GF_Err cslg_box_read(GF_Box *s, GF_BitStream *bs) { GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s; ISOM_DECREASE_SIZE(ptr, 20); ptr->compositionToDTSShift = gf_bs_read_int(bs, 32); ptr->leastDecodeToDisplayDelta = gf_bs_read_int(bs, 32); ptr->greatestDecodeToDisplayDelta = gf_bs_read_int(bs, 32); ptr->compositionStartTime = gf_bs_read_int(bs, 32); ptr->compositionEndTime = gf_bs_read_int(bs, 32); return GF_OK; } GF_Box *cslg_box_new() { ISOM_DECL_BOX_ALLOC(GF_CompositionToDecodeBox, GF_ISOM_BOX_TYPE_CSLG); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err cslg_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->compositionToDTSShift, 32); gf_bs_write_int(bs, ptr->leastDecodeToDisplayDelta, 32); gf_bs_write_int(bs, ptr->greatestDecodeToDisplayDelta, 32); gf_bs_write_int(bs, ptr->compositionStartTime, 32); gf_bs_write_int(bs, ptr->compositionEndTime, 32); return GF_OK; } GF_Err cslg_box_size(GF_Box *s) { GF_CompositionToDecodeBox *ptr = (GF_CompositionToDecodeBox *)s; ptr->size += 20; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ccst_box_del(GF_Box *s) { GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s; if (ptr) gf_free(ptr); return; } GF_Err ccst_box_read(GF_Box *s, GF_BitStream *bs) { GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->all_ref_pics_intra = gf_bs_read_int(bs, 1); ptr->intra_pred_used = gf_bs_read_int(bs, 1); ptr->max_ref_per_pic = gf_bs_read_int(bs, 4); ptr->reserved = gf_bs_read_int(bs, 26); return GF_OK; } GF_Box *ccst_box_new() { ISOM_DECL_BOX_ALLOC(GF_CodingConstraintsBox, GF_ISOM_BOX_TYPE_CCST); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ccst_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->all_ref_pics_intra, 1); gf_bs_write_int(bs, ptr->intra_pred_used, 1); gf_bs_write_int(bs, ptr->max_ref_per_pic, 4); gf_bs_write_int(bs, 0, 26); return GF_OK; } GF_Err ccst_box_size(GF_Box *s) { GF_CodingConstraintsBox *ptr = (GF_CodingConstraintsBox *)s; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void url_box_del(GF_Box *s) { GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; if (ptr == NULL) return; if (ptr->location) gf_free(ptr->location); gf_free(ptr); return; } GF_Err url_box_read(GF_Box *s, GF_BitStream *bs) { GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; if (ptr->size) { ptr->location = (char*)gf_malloc((u32) ptr->size); if (! ptr->location) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->location, (u32)ptr->size); if (ptr->location[ptr->size-1]) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] url box location is not 0-terminated\n" )); return GF_ISOM_INVALID_FILE; } } return GF_OK; } GF_Box *url_box_new() { ISOM_DECL_BOX_ALLOC(GF_DataEntryURLBox, GF_ISOM_BOX_TYPE_URL); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err url_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; //the flag set indicates we have a string (WE HAVE TO for URLs) if ( !(ptr->flags & 1)) { if (ptr->location) { gf_bs_write_data(bs, ptr->location, (u32)strlen(ptr->location) + 1); } } return GF_OK; } GF_Err url_box_size(GF_Box *s) { GF_DataEntryURLBox *ptr = (GF_DataEntryURLBox *)s; if ( !(ptr->flags & 1)) { if (ptr->location) ptr->size += 1 + strlen(ptr->location); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void urn_box_del(GF_Box *s) { GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s; if (ptr == NULL) return; if (ptr->location) gf_free(ptr->location); if (ptr->nameURN) gf_free(ptr->nameURN); gf_free(ptr); } GF_Err urn_box_read(GF_Box *s, GF_BitStream *bs) { u32 i, to_read; char *tmpName; GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s; if (! ptr->size ) return GF_OK; //here we have to handle that in a clever way to_read = (u32) ptr->size; tmpName = (char*)gf_malloc(sizeof(char) * to_read); if (!tmpName) return GF_OUT_OF_MEM; //get the data gf_bs_read_data(bs, tmpName, to_read); //then get the break i = 0; while ( (i < to_read) && (tmpName[i] != 0) ) { i++; } //check the data is consistent if (i == to_read) { gf_free(tmpName); return GF_ISOM_INVALID_FILE; } //no NULL char, URL is not specified if (i == to_read - 1) { ptr->nameURN = tmpName; ptr->location = NULL; return GF_OK; } //OK, this has both URN and URL ptr->nameURN = (char*)gf_malloc(sizeof(char) * (i+1)); if (!ptr->nameURN) { gf_free(tmpName); return GF_OUT_OF_MEM; } memcpy(ptr->nameURN, tmpName, i + 1); if (tmpName[to_read - 1] != 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] urn box contains invalid location field\n" )); } else { ptr->location = (char*)gf_malloc(sizeof(char) * (to_read - i - 1)); if (!ptr->location) { gf_free(tmpName); gf_free(ptr->nameURN); ptr->nameURN = NULL; return GF_OUT_OF_MEM; } memcpy(ptr->location, tmpName + i + 1, (to_read - i - 1)); } gf_free(tmpName); return GF_OK; } GF_Box *urn_box_new() { ISOM_DECL_BOX_ALLOC(GF_DataEntryURNBox, GF_ISOM_BOX_TYPE_URN); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err urn_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; //the flag set indicates we have a string (WE HAVE TO for URLs) if ( !(ptr->flags & 1)) { //to check, the spec says: First name, then location if (ptr->nameURN) { gf_bs_write_data(bs, ptr->nameURN, (u32)strlen(ptr->nameURN) + 1); } if (ptr->location) { gf_bs_write_data(bs, ptr->location, (u32)strlen(ptr->location) + 1); } } return GF_OK; } GF_Err urn_box_size(GF_Box *s) { GF_DataEntryURNBox *ptr = (GF_DataEntryURNBox *)s; if ( !(ptr->flags & 1)) { if (ptr->nameURN) ptr->size += 1 + strlen(ptr->nameURN); if (ptr->location) ptr->size += 1 + strlen(ptr->location); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void unkn_box_del(GF_Box *s) { GF_UnknownBox *ptr = (GF_UnknownBox *) s; if (!s) return; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err unkn_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 bytesToRead, sub_size, sub_a; GF_BitStream *sub_bs; GF_UnknownBox *ptr = (GF_UnknownBox *)s; if (ptr->size > 0xFFFFFFFF) return GF_ISOM_INVALID_FILE; bytesToRead = (u32) (ptr->size); if (!bytesToRead) return GF_OK; if (bytesToRead>1000000) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Unknown box %s (0x%08X) with payload larger than 1 MBytes, ignoring\n", gf_4cc_to_str(ptr->type), ptr->type )); gf_bs_skip_bytes(bs, ptr->dataSize); return GF_OK; } ptr->data = (char*)gf_malloc(bytesToRead); if (ptr->data == NULL ) return GF_OUT_OF_MEM; ptr->dataSize = bytesToRead; gf_bs_read_data(bs, ptr->data, ptr->dataSize); //try to parse container boxes, check if next 8 bytes match a subbox sub_bs = gf_bs_new(ptr->data, ptr->dataSize, GF_BITSTREAM_READ); sub_size = gf_bs_read_u32(sub_bs); sub_a = gf_bs_read_u8(sub_bs); e = (sub_size && (sub_size <= ptr->dataSize)) ? GF_OK : GF_NOT_SUPPORTED; if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED; sub_a = gf_bs_read_u8(sub_bs); if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED; sub_a = gf_bs_read_u8(sub_bs); if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED; sub_a = gf_bs_read_u8(sub_bs); if (! isalnum(sub_a)) e = GF_NOT_SUPPORTED; if (e == GF_OK) { gf_bs_seek(sub_bs, 0); gf_bs_set_cookie(sub_bs, GF_ISOM_BS_COOKIE_NO_LOGS); e = gf_isom_box_array_read(s, sub_bs, NULL); } gf_bs_del(sub_bs); if (e==GF_OK) { gf_free(ptr->data); ptr->data = NULL; ptr->dataSize = 0; } else if (s->child_boxes) { gf_isom_box_array_del(s->child_boxes); s->child_boxes=NULL; } return GF_OK; } GF_Box *unkn_box_new() { ISOM_DECL_BOX_ALLOC(GF_UnknownBox, GF_ISOM_BOX_TYPE_UNKNOWN); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err unkn_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 type; GF_UnknownBox *ptr = (GF_UnknownBox *)s; if (!s) return GF_BAD_PARAM; type = s->type; ptr->type = ptr->original_4cc; e = gf_isom_box_write_header(s, bs); ptr->type = type; if (e) return e; if (ptr->dataSize && ptr->data) { gf_bs_write_data(bs, ptr->data, ptr->dataSize); } return GF_OK; } GF_Err unkn_box_size(GF_Box *s) { GF_UnknownBox *ptr = (GF_UnknownBox *)s; if (ptr->dataSize && ptr->data) { ptr->size += ptr->dataSize; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void def_parent_box_del(GF_Box *s) { if (s) gf_free(s); } GF_Err def_parent_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, NULL); } GF_Box *def_parent_box_new() { ISOM_DECL_BOX_ALLOC(GF_Box, 0); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITEHintSa GF_Err def_parent_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err def_parent_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void def_parent_full_box_del(GF_Box *s) { if (s) gf_free(s); } GF_Err def_parent_full_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, NULL); } GF_Box *def_parent_full_box_new() { ISOM_DECL_BOX_ALLOC(GF_Box, 0); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITEHintSa GF_Err def_parent_full_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err def_parent_full_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void uuid_box_del(GF_Box *s) { GF_UnknownUUIDBox *ptr = (GF_UnknownUUIDBox *) s; if (!s) return; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err uuid_box_read(GF_Box *s, GF_BitStream *bs) { u32 bytesToRead; GF_UnknownUUIDBox *ptr = (GF_UnknownUUIDBox *)s; if (ptr->size > 0xFFFFFFFF) return GF_ISOM_INVALID_FILE; bytesToRead = (u32) (ptr->size); if (bytesToRead) { ptr->data = (char*)gf_malloc(bytesToRead); if (ptr->data == NULL ) return GF_OUT_OF_MEM; ptr->dataSize = bytesToRead; gf_bs_read_data(bs, ptr->data, ptr->dataSize); } return GF_OK; } GF_Box *uuid_box_new() { ISOM_DECL_BOX_ALLOC(GF_UnknownUUIDBox, GF_ISOM_BOX_TYPE_UUID); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err uuid_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_UnknownUUIDBox *ptr = (GF_UnknownUUIDBox*)s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->data) { gf_bs_write_data(bs, ptr->data, ptr->dataSize); } return GF_OK; } GF_Err uuid_box_size(GF_Box *s) { GF_UnknownUUIDBox*ptr = (GF_UnknownUUIDBox*)s; ptr->size += ptr->dataSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void dinf_box_del(GF_Box *s) { gf_free(s); } GF_Err dinf_on_child_box(GF_Box *s, GF_Box *a) { GF_DataInformationBox *ptr = (GF_DataInformationBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_DREF: if (ptr->dref) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->dref = (GF_DataReferenceBox *)a; return GF_OK; } return GF_OK; } GF_Err dinf_box_read(GF_Box *s, GF_BitStream *bs) { GF_DataInformationBox *dinf; GF_Err e = gf_isom_box_array_read(s, bs, dinf_on_child_box); if (e) { return e; } dinf = (GF_DataInformationBox *)s; if (!dinf->dref) { if (! (gf_bs_get_cookie(bs) & GF_ISOM_BS_COOKIE_NO_LOGS) ) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing dref box in dinf\n")); } dinf->dref = (GF_DataReferenceBox *) gf_isom_box_new_parent(&dinf->child_boxes, GF_ISOM_BOX_TYPE_DREF); } return GF_OK; } GF_Box *dinf_box_new() { ISOM_DECL_BOX_ALLOC(GF_DataInformationBox, GF_ISOM_BOX_TYPE_DINF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dinf_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err dinf_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void dref_box_del(GF_Box *s) { GF_DataReferenceBox *ptr = (GF_DataReferenceBox *) s; if (ptr == NULL) return; gf_free(ptr); } GF_Err dref_box_read(GF_Box *s, GF_BitStream *bs) { GF_DataReferenceBox *ptr = (GF_DataReferenceBox *)s; ISOM_DECREASE_SIZE(ptr, 4); gf_bs_read_u32(bs); return gf_isom_box_array_read(s, bs, NULL); } GF_Box *dref_box_new() { ISOM_DECL_BOX_ALLOC(GF_DataReferenceBox, GF_ISOM_BOX_TYPE_DREF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dref_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 count; GF_DataReferenceBox *ptr = (GF_DataReferenceBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; count = ptr->child_boxes ? gf_list_count(ptr->child_boxes) : 0; gf_bs_write_u32(bs, count); return GF_OK; } GF_Err dref_box_size(GF_Box *s) { GF_DataReferenceBox *ptr = (GF_DataReferenceBox *)s; if (!s) return GF_BAD_PARAM; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void edts_box_del(GF_Box *s) { gf_free(s); } GF_Err edts_on_child_box(GF_Box *s, GF_Box *a) { GF_EditBox *ptr = (GF_EditBox *)s; if (a->type == GF_ISOM_BOX_TYPE_ELST) { if (ptr->editList) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->editList = (GF_EditListBox *)a; return GF_OK; } else { return GF_OK; } return GF_OK; } GF_Err edts_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, edts_on_child_box); } GF_Box *edts_box_new() { ISOM_DECL_BOX_ALLOC(GF_EditBox, GF_ISOM_BOX_TYPE_EDTS); return (GF_Box *) tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err edts_box_write(GF_Box *s, GF_BitStream *bs) { GF_EditBox *ptr = (GF_EditBox *)s; //here we have a trick: if editList is empty, skip the box if (ptr->editList && gf_list_count(ptr->editList->entryList)) { return gf_isom_box_write_header(s, bs); } else { s->size = 0; } return GF_OK; } GF_Err edts_box_size(GF_Box *s) { GF_EditBox *ptr = (GF_EditBox *)s; //here we have a trick: if editList is empty, skip the box if (!ptr->editList || ! gf_list_count(ptr->editList->entryList)) { ptr->size = 0; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void elst_box_del(GF_Box *s) { GF_EditListBox *ptr; u32 nb_entries; u32 i; ptr = (GF_EditListBox *)s; if (ptr == NULL) return; nb_entries = gf_list_count(ptr->entryList); for (i = 0; i < nb_entries; i++) { GF_EdtsEntry *p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i); if (p) gf_free(p); } gf_list_del(ptr->entryList); gf_free(ptr); } GF_Err elst_box_read(GF_Box *s, GF_BitStream *bs) { u32 entries; s32 tr; u32 nb_entries; GF_EditListBox *ptr = (GF_EditListBox *)s; ISOM_DECREASE_SIZE(ptr, 4); nb_entries = gf_bs_read_u32(bs); if (ptr->version == 1) { if (nb_entries > ptr->size / 20) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", nb_entries)); return GF_ISOM_INVALID_FILE; } } else { if (nb_entries > ptr->size / 12) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in ctts\n", nb_entries)); return GF_ISOM_INVALID_FILE; } } for (entries = 0; entries < nb_entries; entries++) { GF_EdtsEntry *p = (GF_EdtsEntry *) gf_malloc(sizeof(GF_EdtsEntry)); if (!p) return GF_OUT_OF_MEM; if (ptr->version == 1) { ISOM_DECREASE_SIZE(ptr, 16); p->segmentDuration = gf_bs_read_u64(bs); p->mediaTime = (s64) gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 8); p->segmentDuration = gf_bs_read_u32(bs); tr = gf_bs_read_u32(bs); p->mediaTime = (s64) tr; } ISOM_DECREASE_SIZE(ptr, 4); p->mediaRate = gf_bs_read_u16(bs); gf_bs_read_u16(bs); gf_list_add(ptr->entryList, p); } return GF_OK; } GF_Box *elst_box_new() { ISOM_DECL_BOX_ALLOC(GF_EditListBox, GF_ISOM_BOX_TYPE_ELST); tmp->entryList = gf_list_new(); if (!tmp->entryList) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err elst_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; u32 nb_entries; GF_EditListBox *ptr = (GF_EditListBox *)s; if (!ptr) return GF_BAD_PARAM; nb_entries = gf_list_count(ptr->entryList); e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, nb_entries); for (i = 0; i < nb_entries; i++ ) { GF_EdtsEntry *p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i); if (ptr->version == 1) { gf_bs_write_u64(bs, p->segmentDuration); gf_bs_write_u64(bs, p->mediaTime); } else { gf_bs_write_u32(bs, (u32) p->segmentDuration); gf_bs_write_u32(bs, (s32) p->mediaTime); } gf_bs_write_u16(bs, p->mediaRate); gf_bs_write_u16(bs, 0); } return GF_OK; } GF_Err elst_box_size(GF_Box *s) { u32 durtimebytes; u32 i, nb_entries; GF_EditListBox *ptr = (GF_EditListBox *)s; //entry count ptr->size += 4; nb_entries = gf_list_count(ptr->entryList); ptr->version = 0; for (i=0; i<nb_entries; i++) { GF_EdtsEntry *p = (GF_EdtsEntry*)gf_list_get(ptr->entryList, i); if ((p->segmentDuration>0xFFFFFFFF) || (p->mediaTime>0xFFFFFFFF)) { ptr->version = 1; break; } } durtimebytes = (ptr->version == 1 ? 16 : 8) + 4; ptr->size += (nb_entries * durtimebytes); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void esds_box_del(GF_Box *s) { GF_ESDBox *ptr = (GF_ESDBox *)s; if (ptr == NULL) return; if (ptr->desc) gf_odf_desc_del((GF_Descriptor *)ptr->desc); gf_free(ptr); } GF_Err esds_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e=GF_OK; u32 descSize; GF_ESDBox *ptr = (GF_ESDBox *)s; descSize = (u32) (ptr->size); if (descSize) { char *enc_desc = (char*)gf_malloc(sizeof(char) * descSize); if (!enc_desc) return GF_OUT_OF_MEM; //get the payload gf_bs_read_data(bs, enc_desc, descSize); //send it to the OD Codec e = gf_odf_desc_read(enc_desc, descSize, (GF_Descriptor **) &ptr->desc); //OK, free our desc gf_free(enc_desc); if (ptr->desc && (ptr->desc->tag!=GF_ODF_ESD_TAG) ) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid descriptor tag 0x%x in esds\n", ptr->desc->tag)); gf_odf_desc_del((GF_Descriptor*)ptr->desc); ptr->desc=NULL; return GF_ISOM_INVALID_FILE; } if (e) { ptr->desc = NULL; } else { /*fix broken files*/ if (ptr->desc && !ptr->desc->URLString) { if (!ptr->desc->slConfig) { ptr->desc->slConfig = (GF_SLConfig *) gf_odf_desc_new(GF_ODF_SLC_TAG); ptr->desc->slConfig->predefined = SLPredef_MP4; } else if (ptr->desc->slConfig->predefined != SLPredef_MP4) { ptr->desc->slConfig->predefined = SLPredef_MP4; gf_odf_slc_set_pref(ptr->desc->slConfig); } } } } return e; } GF_Box *esds_box_new() { ISOM_DECL_BOX_ALLOC(GF_ESDBox, GF_ISOM_BOX_TYPE_ESDS); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err esds_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u8 *enc_desc; u32 descSize = 0; GF_ESDBox *ptr = (GF_ESDBox *)s; //make sure we write with no ESID and no OCRESID if (ptr->desc) { ptr->desc->ESID = 0; ptr->desc->OCRESID = 0; } e = gf_isom_full_box_write(s, bs); if (e) return e; e = gf_odf_desc_write((GF_Descriptor *)ptr->desc, &enc_desc, &descSize); if (e) return e; gf_bs_write_data(bs, enc_desc, descSize); //free our buffer gf_free(enc_desc); return GF_OK; } GF_Err esds_box_size(GF_Box *s) { u32 descSize = 0; GF_ESDBox *ptr = (GF_ESDBox *)s; descSize = gf_odf_desc_size((GF_Descriptor *)ptr->desc); ptr->size += descSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void free_box_del(GF_Box *s) { GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err free_box_read(GF_Box *s, GF_BitStream *bs) { u32 bytesToRead; GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s; if (ptr->size > 0xFFFFFFFF) return GF_IO_ERR; bytesToRead = (u32) (ptr->size); if (bytesToRead) { ptr->data = (char*)gf_malloc(bytesToRead * sizeof(char)); if (!ptr->data) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->data, bytesToRead); ptr->dataSize = bytesToRead; } return GF_OK; } GF_Box *free_box_new() { ISOM_DECL_BOX_ALLOC(GF_FreeSpaceBox, GF_ISOM_BOX_TYPE_FREE); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err free_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s; if (ptr->original_4cc) { u32 t = s->type; s->type=ptr->original_4cc; e = gf_isom_box_write_header(s, bs); s->type=t; } else { e = gf_isom_box_write_header(s, bs); } if (e) return e; if (ptr->dataSize) { if (ptr->data) { gf_bs_write_data(bs, ptr->data, ptr->dataSize); } else { u32 i = 0; while (i<ptr->dataSize) { gf_bs_write_u8(bs, 0); i++; } } } return GF_OK; } GF_Err free_box_size(GF_Box *s) { GF_FreeSpaceBox *ptr = (GF_FreeSpaceBox *)s; ptr->size += ptr->dataSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ftyp_box_del(GF_Box *s) { GF_FileTypeBox *ptr = (GF_FileTypeBox *) s; if (ptr->altBrand) gf_free(ptr->altBrand); gf_free(ptr); } GF_Box *ftyp_box_new() { ISOM_DECL_BOX_ALLOC(GF_FileTypeBox, GF_ISOM_BOX_TYPE_FTYP); return (GF_Box *)tmp; } GF_Err ftyp_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_FileTypeBox *ptr = (GF_FileTypeBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->majorBrand = gf_bs_read_u32(bs); ptr->minorVersion = gf_bs_read_u32(bs); if (ptr->size % 4) return GF_ISOM_INVALID_FILE; ptr->altCount = ( (u32) (ptr->size)) / 4; if (!ptr->altCount) return GF_OK; ptr->altBrand = (u32*)gf_malloc(sizeof(u32)*ptr->altCount); if (!ptr->altBrand) return GF_OUT_OF_MEM; for (i = 0; i<ptr->altCount; i++) { ptr->altBrand[i] = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ftyp_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_FileTypeBox *ptr = (GF_FileTypeBox *) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->majorBrand); gf_bs_write_u32(bs, ptr->minorVersion); for (i=0; i<ptr->altCount; i++) { gf_bs_write_u32(bs, ptr->altBrand[i]); } return GF_OK; } GF_Err ftyp_box_size(GF_Box *s) { GF_FileTypeBox *ptr = (GF_FileTypeBox *)s; ptr->size += 8 + ptr->altCount * 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void gnrm_box_del(GF_Box *s) { GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)ptr); if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Box *gnrm_box_new() { ISOM_DECL_BOX_ALLOC(GF_GenericSampleEntryBox, GF_ISOM_BOX_TYPE_GNRM); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } //dummy GF_Err gnrm_box_read(GF_Box *s, GF_BitStream *bs) { return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gnrm_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s; //carefull we are not writing the box type but the entry type so switch for write ptr->type = ptr->EntryType; e = gf_isom_box_write_header(s, bs); if (e) return e; ptr->type = GF_ISOM_BOX_TYPE_GNRM; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); gf_bs_write_data(bs, ptr->data, ptr->data_size); return GF_OK; } GF_Err gnrm_box_size(GF_Box *s) { GF_GenericSampleEntryBox *ptr = (GF_GenericSampleEntryBox *)s; s->type = GF_ISOM_BOX_TYPE_GNRM; ptr->size += 8+ptr->data_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void gnrv_box_del(GF_Box *s) { GF_GenericVisualSampleEntryBox *ptr = (GF_GenericVisualSampleEntryBox *)s; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)ptr); if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Box *gnrv_box_new() { ISOM_DECL_BOX_ALLOC(GF_GenericVisualSampleEntryBox, GF_ISOM_BOX_TYPE_GNRV); gf_isom_video_sample_entry_init((GF_VisualSampleEntryBox*) tmp); return (GF_Box *)tmp; } //dummy GF_Err gnrv_box_read(GF_Box *s, GF_BitStream *bs) { return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gnrv_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_GenericVisualSampleEntryBox *ptr = (GF_GenericVisualSampleEntryBox *)s; //carefull we are not writing the box type but the entry type so switch for write ptr->type = ptr->EntryType; e = gf_isom_box_write_header(s, bs); if (e) return e; ptr->type = GF_ISOM_BOX_TYPE_GNRV; gf_isom_video_sample_entry_write((GF_VisualSampleEntryBox *)ptr, bs); gf_bs_write_data(bs, ptr->data, ptr->data_size); return GF_OK; } GF_Err gnrv_box_size(GF_Box *s) { GF_GenericVisualSampleEntryBox *ptr = (GF_GenericVisualSampleEntryBox *)s; s->type = GF_ISOM_BOX_TYPE_GNRV; gf_isom_video_sample_entry_size((GF_VisualSampleEntryBox *)s); ptr->size += ptr->data_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void gnra_box_del(GF_Box *s) { GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)ptr); if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Box *gnra_box_new() { ISOM_DECL_BOX_ALLOC(GF_GenericAudioSampleEntryBox, GF_ISOM_BOX_TYPE_GNRA); gf_isom_audio_sample_entry_init((GF_AudioSampleEntryBox*) tmp); return (GF_Box *)tmp; } //dummy GF_Err gnra_box_read(GF_Box *s, GF_BitStream *bs) { return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gnra_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s; //carefull we are not writing the box type but the entry type so switch for write ptr->type = ptr->EntryType; e = gf_isom_box_write_header(s, bs); if (e) return e; ptr->type = GF_ISOM_BOX_TYPE_GNRA; gf_isom_audio_sample_entry_write((GF_AudioSampleEntryBox *)ptr, bs); if (ptr->data) { gf_bs_write_data(bs, ptr->data, ptr->data_size); } return GF_OK; } GF_Err gnra_box_size(GF_Box *s) { GF_GenericAudioSampleEntryBox *ptr = (GF_GenericAudioSampleEntryBox *)s; s->type = GF_ISOM_BOX_TYPE_GNRA; gf_isom_audio_sample_entry_size((GF_AudioSampleEntryBox *)s); ptr->size += ptr->data_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void hdlr_box_del(GF_Box *s) { GF_HandlerBox *ptr = (GF_HandlerBox *)s; if (ptr == NULL) return; if (ptr->nameUTF8) gf_free(ptr->nameUTF8); gf_free(ptr); } GF_Err hdlr_box_read(GF_Box *s, GF_BitStream *bs) { u64 cookie; GF_HandlerBox *ptr = (GF_HandlerBox *)s; ISOM_DECREASE_SIZE(ptr, 20); ptr->reserved1 = gf_bs_read_u32(bs); ptr->handlerType = gf_bs_read_u32(bs); gf_bs_read_data(bs, (char*)ptr->reserved2, 12); cookie = gf_bs_get_cookie(bs); if (ptr->handlerType==GF_ISOM_MEDIA_VISUAL) cookie |= GF_ISOM_BS_COOKIE_VISUAL_TRACK; else cookie &= ~GF_ISOM_BS_COOKIE_VISUAL_TRACK; gf_bs_set_cookie(bs, cookie); if (ptr->size) { ptr->nameUTF8 = (char*)gf_malloc((u32) ptr->size); if (!ptr->nameUTF8) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->nameUTF8, (u32) ptr->size); //patch for old QT files - we cannot rely on checking if str[0]==len(str+1) since we may have //cases where the first character of the string decimal value is indeed the same as the string length!! //we had this issue with encryption_import test //we therefore only check if last char is null, and if not so assume old QT style if (ptr->nameUTF8[ptr->size-1]) { memmove(ptr->nameUTF8, ptr->nameUTF8+1, sizeof(char) * (u32) (ptr->size-1) ); ptr->nameUTF8[ptr->size-1] = 0; ptr->store_counted_string = GF_TRUE; } } return GF_OK; } GF_Box *hdlr_box_new() { ISOM_DECL_BOX_ALLOC(GF_HandlerBox, GF_ISOM_BOX_TYPE_HDLR); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hdlr_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_HandlerBox *ptr = (GF_HandlerBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->reserved1); gf_bs_write_u32(bs, ptr->handlerType); gf_bs_write_data(bs, (char*)ptr->reserved2, 12); if (ptr->nameUTF8) { u32 len = (u32)strlen(ptr->nameUTF8); if (ptr->store_counted_string) { gf_bs_write_u8(bs, len); gf_bs_write_data(bs, ptr->nameUTF8, len); } else { gf_bs_write_data(bs, ptr->nameUTF8, len); gf_bs_write_u8(bs, 0); } } else { gf_bs_write_u8(bs, 0); } return GF_OK; } GF_Err hdlr_box_size(GF_Box *s) { GF_HandlerBox *ptr = (GF_HandlerBox *)s; ptr->size += 20 + 1; //null term or counted string if (ptr->nameUTF8) { ptr->size += strlen(ptr->nameUTF8); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void hinf_box_del(GF_Box *s) { GF_HintInfoBox *hinf = (GF_HintInfoBox *)s; gf_free(hinf); } GF_Box *hinf_box_new() { ISOM_DECL_BOX_ALLOC(GF_HintInfoBox, GF_ISOM_BOX_TYPE_HINF); tmp->child_boxes = gf_list_new(); return (GF_Box *)tmp; } GF_Err hinf_on_child_box(GF_Box *s, GF_Box *a) { GF_MAXRBox *maxR; GF_HintInfoBox *hinf = (GF_HintInfoBox *)s; u32 i; switch (a->type) { case GF_ISOM_BOX_TYPE_MAXR: i=0; while ((maxR = (GF_MAXRBox *)gf_list_enum(hinf->child_boxes, &i))) { if ((maxR->type==GF_ISOM_BOX_TYPE_MAXR) && (maxR->granularity == ((GF_MAXRBox *)a)->granularity)) ERROR_ON_DUPLICATED_BOX(a, s) } break; } return GF_OK; } GF_Err hinf_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, hinf_on_child_box); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hinf_box_write(GF_Box *s, GF_BitStream *bs) { // GF_HintInfoBox *ptr = (GF_HintInfoBox *)s; if (!s) return GF_BAD_PARAM; return gf_isom_box_write_header(s, bs); } GF_Err hinf_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void hmhd_box_del(GF_Box *s) { GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err hmhd_box_read(GF_Box *s,GF_BitStream *bs) { GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s; ISOM_DECREASE_SIZE(ptr, 16); ptr->maxPDUSize = gf_bs_read_u16(bs); ptr->avgPDUSize = gf_bs_read_u16(bs); ptr->maxBitrate = gf_bs_read_u32(bs); ptr->avgBitrate = gf_bs_read_u32(bs); ptr->slidingAverageBitrate = gf_bs_read_u32(bs); return GF_OK; } GF_Box *hmhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_HintMediaHeaderBox, GF_ISOM_BOX_TYPE_HMHD); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hmhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->maxPDUSize); gf_bs_write_u16(bs, ptr->avgPDUSize); gf_bs_write_u32(bs, ptr->maxBitrate); gf_bs_write_u32(bs, ptr->avgBitrate); gf_bs_write_u32(bs, ptr->slidingAverageBitrate); return GF_OK; } GF_Err hmhd_box_size(GF_Box *s) { GF_HintMediaHeaderBox *ptr = (GF_HintMediaHeaderBox *)s; ptr->size += 16; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *hnti_box_new() { ISOM_DECL_BOX_ALLOC(GF_HintTrackInfoBox, GF_ISOM_BOX_TYPE_HNTI); return (GF_Box *)tmp; } void hnti_box_del(GF_Box *a) { gf_free(a); } GF_Err hnti_on_child_box(GF_Box *s, GF_Box *a) { GF_HintTrackInfoBox *hnti = (GF_HintTrackInfoBox *)s; if (!hnti || !a) return GF_BAD_PARAM; switch (a->type) { //this is the value for GF_RTPBox - same as HintSampleEntry for RTP !!! case GF_ISOM_BOX_TYPE_RTP: case GF_ISOM_BOX_TYPE_SDP: if (hnti->SDP) ERROR_ON_DUPLICATED_BOX(a, s) hnti->SDP = a; break; default: break; } return GF_OK; } GF_Err hnti_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, hnti_on_child_box, s->type); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hnti_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err hnti_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** GF_SDPBox **********************************************************/ void sdp_box_del(GF_Box *s) { GF_SDPBox *ptr = (GF_SDPBox *)s; if (ptr->sdpText) gf_free(ptr->sdpText); gf_free(ptr); } GF_Err sdp_box_read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_SDPBox *ptr = (GF_SDPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; length = (u32) (ptr->size); //sdp text has no delimiter !!! ptr->sdpText = (char*)gf_malloc(sizeof(char) * (length+1)); if (!ptr->sdpText) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->sdpText, length); ptr->sdpText[length] = 0; return GF_OK; } GF_Box *sdp_box_new() { ISOM_DECL_BOX_ALLOC(GF_SDPBox, GF_ISOM_BOX_TYPE_SDP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sdp_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SDPBox *ptr = (GF_SDPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; //don't write the NULL char!!! if (ptr->sdpText) gf_bs_write_data(bs, ptr->sdpText, (u32) strlen(ptr->sdpText)); return GF_OK; } GF_Err sdp_box_size(GF_Box *s) { GF_SDPBox *ptr = (GF_SDPBox *)s; //don't count the NULL char!!! if (ptr->sdpText) ptr->size += strlen(ptr->sdpText); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void rtp_hnti_box_del(GF_Box *s) { GF_RTPBox *ptr = (GF_RTPBox *)s; if (ptr->sdpText) gf_free(ptr->sdpText); gf_free(ptr); } GF_Err rtp_hnti_box_read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_RTPBox *ptr = (GF_RTPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ISOM_DECREASE_SIZE(ptr, 4) ptr->subType = gf_bs_read_u32(bs); length = (u32) (ptr->size); //sdp text has no delimiter !!! ptr->sdpText = (char*)gf_malloc(sizeof(char) * (length+1)); if (!ptr->sdpText) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->sdpText, length); ptr->sdpText[length] = 0; return GF_OK; } GF_Box *rtp_hnti_box_new() { ISOM_DECL_BOX_ALLOC(GF_RTPBox, GF_ISOM_BOX_TYPE_RTP); tmp->subType = GF_ISOM_BOX_TYPE_SDP; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rtp_hnti_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_RTPBox *ptr = (GF_RTPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->subType); //don't write the NULL char!!! gf_bs_write_data(bs, ptr->sdpText, (u32) strlen(ptr->sdpText)); return GF_OK; } GF_Err rtp_hnti_box_size(GF_Box *s) { GF_RTPBox *ptr = (GF_RTPBox *)s; ptr->size += 4 + strlen(ptr->sdpText); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TRPY GF_Box **********************************************************/ void trpy_box_del(GF_Box *s) { gf_free((GF_TRPYBox *)s); } GF_Err trpy_box_read(GF_Box *s, GF_BitStream *bs) { GF_TRPYBox *ptr = (GF_TRPYBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *trpy_box_new() { ISOM_DECL_BOX_ALLOC(GF_TRPYBox, GF_ISOM_BOX_TYPE_TRPY); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trpy_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TRPYBox *ptr = (GF_TRPYBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err trpy_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TOTL GF_Box **********************************************************/ void totl_box_del(GF_Box *s) { gf_free((GF_TRPYBox *)s); } GF_Err totl_box_read(GF_Box *s, GF_BitStream *bs) { GF_TOTLBox *ptr = (GF_TOTLBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nbBytes = gf_bs_read_u32(bs); return GF_OK; } GF_Box *totl_box_new() { ISOM_DECL_BOX_ALLOC(GF_TOTLBox, GF_ISOM_BOX_TYPE_TOTL); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err totl_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TOTLBox *ptr = (GF_TOTLBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nbBytes); return GF_OK; } GF_Err totl_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** NUMP GF_Box **********************************************************/ void nump_box_del(GF_Box *s) { gf_free((GF_NUMPBox *)s); } GF_Err nump_box_read(GF_Box *s, GF_BitStream *bs) { GF_NUMPBox *ptr = (GF_NUMPBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->nbPackets = gf_bs_read_u64(bs); return GF_OK; } GF_Box *nump_box_new() { ISOM_DECL_BOX_ALLOC(GF_NUMPBox, GF_ISOM_BOX_TYPE_NUMP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err nump_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_NUMPBox *ptr = (GF_NUMPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbPackets); return GF_OK; } GF_Err nump_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** NPCK GF_Box **********************************************************/ void npck_box_del(GF_Box *s) { gf_free((GF_NPCKBox *)s); } GF_Err npck_box_read(GF_Box *s, GF_BitStream *bs) { GF_NPCKBox *ptr = (GF_NPCKBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nbPackets = gf_bs_read_u32(bs); return GF_OK; } GF_Box *npck_box_new() { ISOM_DECL_BOX_ALLOC(GF_NPCKBox, GF_ISOM_BOX_TYPE_NPCK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err npck_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_NPCKBox *ptr = (GF_NPCKBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nbPackets); return GF_OK; } GF_Err npck_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TPYL GF_Box **********************************************************/ void tpyl_box_del(GF_Box *s) { gf_free((GF_NTYLBox *)s); } GF_Err tpyl_box_read(GF_Box *s, GF_BitStream *bs) { GF_NTYLBox *ptr = (GF_NTYLBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ISOM_DECREASE_SIZE(ptr, 8); ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *tpyl_box_new() { ISOM_DECL_BOX_ALLOC(GF_NTYLBox, GF_ISOM_BOX_TYPE_TPYL); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tpyl_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_NTYLBox *ptr = (GF_NTYLBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err tpyl_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TPAY GF_Box **********************************************************/ void tpay_box_del(GF_Box *s) { gf_free((GF_TPAYBox *)s); } GF_Err tpay_box_read(GF_Box *s, GF_BitStream *bs) { GF_TPAYBox *ptr = (GF_TPAYBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nbBytes = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tpay_box_new() { ISOM_DECL_BOX_ALLOC(GF_TPAYBox, GF_ISOM_BOX_TYPE_TPAY); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tpay_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TPAYBox *ptr = (GF_TPAYBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nbBytes); return GF_OK; } GF_Err tpay_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** MAXR GF_Box **********************************************************/ void maxr_box_del(GF_Box *s) { gf_free((GF_MAXRBox *)s); } GF_Err maxr_box_read(GF_Box *s, GF_BitStream *bs) { GF_MAXRBox *ptr = (GF_MAXRBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ISOM_DECREASE_SIZE(ptr, 8); ptr->granularity = gf_bs_read_u32(bs); ptr->maxDataRate = gf_bs_read_u32(bs); return GF_OK; } GF_Box *maxr_box_new() { ISOM_DECL_BOX_ALLOC(GF_MAXRBox, GF_ISOM_BOX_TYPE_MAXR); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err maxr_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MAXRBox *ptr = (GF_MAXRBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->granularity); gf_bs_write_u32(bs, ptr->maxDataRate); return GF_OK; } GF_Err maxr_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** DMED GF_Box **********************************************************/ void dmed_box_del(GF_Box *s) { gf_free((GF_DMEDBox *)s); } GF_Err dmed_box_read(GF_Box *s, GF_BitStream *bs) { GF_DMEDBox *ptr = (GF_DMEDBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *dmed_box_new() { ISOM_DECL_BOX_ALLOC(GF_DMEDBox, GF_ISOM_BOX_TYPE_DMED); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dmed_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DMEDBox *ptr = (GF_DMEDBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err dmed_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** DIMM GF_Box **********************************************************/ void dimm_box_del(GF_Box *s) { gf_free((GF_DIMMBox *)s); } GF_Err dimm_box_read(GF_Box *s, GF_BitStream *bs) { GF_DIMMBox *ptr = (GF_DIMMBox *)s; ISOM_DECREASE_SIZE(ptr, 8) ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *dimm_box_new() { ISOM_DECL_BOX_ALLOC(GF_DIMMBox, GF_ISOM_BOX_TYPE_DIMM); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dimm_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DIMMBox *ptr = (GF_DIMMBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err dimm_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** DREP GF_Box **********************************************************/ void drep_box_del(GF_Box *s) { gf_free((GF_DREPBox *)s); } GF_Err drep_box_read(GF_Box *s, GF_BitStream *bs) { GF_DREPBox *ptr = (GF_DREPBox *)s; ISOM_DECREASE_SIZE(ptr, 8) ptr->nbBytes = gf_bs_read_u64(bs); return GF_OK; } GF_Box *drep_box_new() { ISOM_DECL_BOX_ALLOC(GF_DREPBox, GF_ISOM_BOX_TYPE_DREP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err drep_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DREPBox *ptr = (GF_DREPBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->nbBytes); return GF_OK; } GF_Err drep_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TMIN GF_Box **********************************************************/ void tmin_box_del(GF_Box *s) { gf_free((GF_TMINBox *)s); } GF_Err tmin_box_read(GF_Box *s, GF_BitStream *bs) { GF_TMINBox *ptr = (GF_TMINBox *)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->minTime = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tmin_box_new() { ISOM_DECL_BOX_ALLOC(GF_TMINBox, GF_ISOM_BOX_TYPE_TMIN); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tmin_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TMINBox *ptr = (GF_TMINBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->minTime); return GF_OK; } GF_Err tmin_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** TMAX GF_Box **********************************************************/ void tmax_box_del(GF_Box *s) { gf_free((GF_TMAXBox *)s); } GF_Err tmax_box_read(GF_Box *s, GF_BitStream *bs) { GF_TMAXBox *ptr = (GF_TMAXBox *)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->maxTime = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tmax_box_new() { ISOM_DECL_BOX_ALLOC(GF_TMAXBox, GF_ISOM_BOX_TYPE_TMAX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tmax_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TMAXBox *ptr = (GF_TMAXBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->maxTime); return GF_OK; } GF_Err tmax_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** PMAX GF_Box **********************************************************/ void pmax_box_del(GF_Box *s) { gf_free((GF_PMAXBox *)s); } GF_Err pmax_box_read(GF_Box *s, GF_BitStream *bs) { GF_PMAXBox *ptr = (GF_PMAXBox *)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->maxSize = gf_bs_read_u32(bs); return GF_OK; } GF_Box *pmax_box_new() { ISOM_DECL_BOX_ALLOC(GF_PMAXBox, GF_ISOM_BOX_TYPE_PMAX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pmax_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_PMAXBox *ptr = (GF_PMAXBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->maxSize); return GF_OK; } GF_Err pmax_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** DMAX GF_Box **********************************************************/ void dmax_box_del(GF_Box *s) { gf_free((GF_DMAXBox *)s); } GF_Err dmax_box_read(GF_Box *s, GF_BitStream *bs) { GF_DMAXBox *ptr = (GF_DMAXBox *)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->maxDur = gf_bs_read_u32(bs); return GF_OK; } GF_Box *dmax_box_new() { ISOM_DECL_BOX_ALLOC(GF_DMAXBox, GF_ISOM_BOX_TYPE_DMAX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dmax_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DMAXBox *ptr = (GF_DMAXBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->maxDur); return GF_OK; } GF_Err dmax_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** PAYT GF_Box **********************************************************/ void payt_box_del(GF_Box *s) { GF_PAYTBox *payt = (GF_PAYTBox *)s; if (payt->payloadString) gf_free(payt->payloadString); gf_free(payt); } GF_Err payt_box_read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_PAYTBox *ptr = (GF_PAYTBox *)s; ISOM_DECREASE_SIZE(ptr, 5 ); ptr->payloadCode = gf_bs_read_u32(bs); length = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, length); ptr->payloadString = (char*)gf_malloc(sizeof(char) * (length+1) ); if (! ptr->payloadString) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->payloadString, length); ptr->payloadString[length] = 0; return GF_OK; } GF_Box *payt_box_new() { ISOM_DECL_BOX_ALLOC(GF_PAYTBox, GF_ISOM_BOX_TYPE_PAYT); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err payt_box_write(GF_Box *s, GF_BitStream *bs) { u32 len; GF_Err e; GF_PAYTBox *ptr = (GF_PAYTBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->payloadCode); len = ptr->payloadString ? (u32) strlen(ptr->payloadString) : 0; gf_bs_write_u8(bs, len); if (len) gf_bs_write_data(bs, ptr->payloadString, len); return GF_OK; } GF_Err payt_box_size(GF_Box *s) { GF_PAYTBox *ptr = (GF_PAYTBox *)s; s->size += 4 + 1; if (ptr->payloadString) ptr->size += strlen(ptr->payloadString); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /********************************************************** PAYT GF_Box **********************************************************/ void name_box_del(GF_Box *s) { GF_NameBox *name = (GF_NameBox *)s; if (name->string) gf_free(name->string); gf_free(name); } GF_Err name_box_read(GF_Box *s, GF_BitStream *bs) { u32 length; GF_NameBox *ptr = (GF_NameBox *)s; length = (u32) (ptr->size); ptr->string = (char*)gf_malloc(sizeof(char) * (length+1)); if (! ptr->string) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->string, length); ptr->string[length] = 0; return GF_OK; } GF_Box *name_box_new() { ISOM_DECL_BOX_ALLOC(GF_NameBox, GF_ISOM_BOX_TYPE_NAME); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err name_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_NameBox *ptr = (GF_NameBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->string) { gf_bs_write_data(bs, ptr->string, (u32) strlen(ptr->string) + 1); } return GF_OK; } GF_Err name_box_size(GF_Box *s) { GF_NameBox *ptr = (GF_NameBox *)s; if (ptr->string) ptr->size += strlen(ptr->string) + 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void tssy_box_del(GF_Box *s) { gf_free(s); } GF_Err tssy_box_read(GF_Box *s, GF_BitStream *bs) { GF_TimeStampSynchronyBox *ptr = (GF_TimeStampSynchronyBox *)s; ISOM_DECREASE_SIZE(ptr, 1) gf_bs_read_int(bs, 6); ptr->timestamp_sync = gf_bs_read_int(bs, 2); return GF_OK; } GF_Box *tssy_box_new() { ISOM_DECL_BOX_ALLOC(GF_TimeStampSynchronyBox, GF_ISOM_BOX_TYPE_TSSY); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tssy_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TimeStampSynchronyBox *ptr = (GF_TimeStampSynchronyBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_int(bs, 0, 6); gf_bs_write_int(bs, ptr->timestamp_sync, 2); return GF_OK; } GF_Err tssy_box_size(GF_Box *s) { s->size += 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void srpp_box_del(GF_Box *s) { gf_free(s); } GF_Err srpp_on_child_box(GF_Box *s, GF_Box *a) { GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_SCHI: if (ptr->info) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->info = (GF_SchemeInformationBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_SCHM: if (ptr->scheme_type) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->scheme_type = (GF_SchemeTypeBox *)a; return GF_OK; } return GF_OK; } GF_Err srpp_box_read(GF_Box *s, GF_BitStream *bs) { GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; ISOM_DECREASE_SIZE(s, 16) ptr->encryption_algorithm_rtp = gf_bs_read_u32(bs); ptr->encryption_algorithm_rtcp = gf_bs_read_u32(bs); ptr->integrity_algorithm_rtp = gf_bs_read_u32(bs); ptr->integrity_algorithm_rtcp = gf_bs_read_u32(bs); return gf_isom_box_array_read(s, bs, srpp_on_child_box); } GF_Box *srpp_box_new() { ISOM_DECL_BOX_ALLOC(GF_SRTPProcessBox, GF_ISOM_BOX_TYPE_SRPP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err srpp_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->encryption_algorithm_rtp); gf_bs_write_u32(bs, ptr->encryption_algorithm_rtcp); gf_bs_write_u32(bs, ptr->integrity_algorithm_rtp); gf_bs_write_u32(bs, ptr->integrity_algorithm_rtcp); return GF_OK; } GF_Err srpp_box_size(GF_Box *s) { u32 pos = 0; GF_SRTPProcessBox *ptr = (GF_SRTPProcessBox *)s; s->size += 16; gf_isom_check_position(s, (GF_Box*)ptr->info, &pos); gf_isom_check_position(s, (GF_Box*)ptr->scheme_type, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void rssr_box_del(GF_Box *s) { gf_free(s); } GF_Err rssr_box_read(GF_Box *s, GF_BitStream *bs) { GF_ReceivedSsrcBox *ptr = (GF_ReceivedSsrcBox *)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->ssrc = gf_bs_read_u32(bs); return GF_OK; } GF_Box *rssr_box_new() { ISOM_DECL_BOX_ALLOC(GF_ReceivedSsrcBox, GF_ISOM_BOX_TYPE_RSSR); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rssr_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ReceivedSsrcBox *ptr = (GF_ReceivedSsrcBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->ssrc); return GF_OK; } GF_Err rssr_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void iods_box_del(GF_Box *s) { GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s; if (ptr == NULL) return; if (ptr->descriptor) gf_odf_desc_del(ptr->descriptor); gf_free(ptr); } GF_Err iods_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 descSize; char *desc; GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s; //use the OD codec... descSize = (u32) (ptr->size); desc = (char*)gf_malloc(sizeof(char) * descSize); if (!desc) return GF_OUT_OF_MEM; gf_bs_read_data(bs, desc, descSize); e = gf_odf_desc_read(desc, descSize, &ptr->descriptor); //OK, free our desc gf_free(desc); return e; } GF_Box *iods_box_new() { ISOM_DECL_BOX_ALLOC(GF_ObjectDescriptorBox, GF_ISOM_BOX_TYPE_IODS); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err iods_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 descSize; u8 *desc; GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; //call our OD codec e = gf_odf_desc_write(ptr->descriptor, &desc, &descSize); if (e) return e; gf_bs_write_data(bs, desc, descSize); //and free our stuff maybe!! gf_free(desc); return GF_OK; } GF_Err iods_box_size(GF_Box *s) { GF_ObjectDescriptorBox *ptr = (GF_ObjectDescriptorBox *)s; ptr->size += gf_odf_desc_size(ptr->descriptor); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mdat_box_del(GF_Box *s) { GF_MediaDataBox *ptr = (GF_MediaDataBox *)s; if (!s) return; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err mdat_box_read(GF_Box *s, GF_BitStream *bs) { GF_MediaDataBox *ptr = (GF_MediaDataBox *)s; if (ptr == NULL) return GF_BAD_PARAM; ptr->dataSize = s->size; ptr->bsOffset = gf_bs_get_position(bs); //then skip these bytes gf_bs_skip_bytes(bs, ptr->dataSize); return GF_OK; } GF_Box *mdat_box_new() { ISOM_DECL_BOX_ALLOC(GF_MediaDataBox, GF_ISOM_BOX_TYPE_MDAT); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mdat_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MediaDataBox *ptr = (GF_MediaDataBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; //make sure we have some data ... //if not, we handle that independently (edit files) if (ptr->data) { gf_bs_write_data(bs, ptr->data, (u32) ptr->dataSize); } return GF_OK; } GF_Err mdat_box_size(GF_Box *s) { GF_MediaDataBox *ptr = (GF_MediaDataBox *)s; ptr->size += ptr->dataSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mdhd_box_del(GF_Box *s) { GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err mdhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s; if (ptr->version == 1) { ISOM_DECREASE_SIZE(ptr, 28) ptr->creationTime = gf_bs_read_u64(bs); ptr->modificationTime = gf_bs_read_u64(bs); ptr->timeScale = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 16) ptr->creationTime = gf_bs_read_u32(bs); ptr->modificationTime = gf_bs_read_u32(bs); ptr->timeScale = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u32(bs); } if (!ptr->timeScale) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Media header timescale is 0 - defaulting to 90000\n" )); ptr->timeScale = 90000; } ptr->original_duration = ptr->duration; ISOM_DECREASE_SIZE(ptr, 4) //our padding bit gf_bs_read_int(bs, 1); //the spec is unclear here, just says "the value 0 is interpreted as undetermined" ptr->packedLanguage[0] = gf_bs_read_int(bs, 5); ptr->packedLanguage[1] = gf_bs_read_int(bs, 5); ptr->packedLanguage[2] = gf_bs_read_int(bs, 5); //but before or after compaction ?? We assume before if (ptr->packedLanguage[0] || ptr->packedLanguage[1] || ptr->packedLanguage[2]) { ptr->packedLanguage[0] += 0x60; ptr->packedLanguage[1] += 0x60; ptr->packedLanguage[2] += 0x60; } else { ptr->packedLanguage[0] = 'u'; ptr->packedLanguage[1] = 'n'; ptr->packedLanguage[2] = 'd'; } ptr->reserved = gf_bs_read_u16(bs); return GF_OK; } GF_Box *mdhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_MediaHeaderBox, GF_ISOM_BOX_TYPE_MDHD); tmp->packedLanguage[0] = 'u'; tmp->packedLanguage[1] = 'n'; tmp->packedLanguage[2] = 'd'; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mdhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version == 1) { gf_bs_write_u64(bs, ptr->creationTime); gf_bs_write_u64(bs, ptr->modificationTime); gf_bs_write_u32(bs, ptr->timeScale); gf_bs_write_u64(bs, ptr->duration); } else { gf_bs_write_u32(bs, (u32) ptr->creationTime); gf_bs_write_u32(bs, (u32) ptr->modificationTime); gf_bs_write_u32(bs, ptr->timeScale); gf_bs_write_u32(bs, (u32) ptr->duration); } //SPECS: BIT(1) of padding gf_bs_write_int(bs, 0, 1); gf_bs_write_int(bs, ptr->packedLanguage[0] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguage[1] - 0x60, 5); gf_bs_write_int(bs, ptr->packedLanguage[2] - 0x60, 5); gf_bs_write_u16(bs, ptr->reserved); return GF_OK; } GF_Err mdhd_box_size(GF_Box *s) { GF_MediaHeaderBox *ptr = (GF_MediaHeaderBox *)s; ptr->version = (ptr->duration>0xFFFFFFFF) ? 1 : 0; ptr->size += 4; ptr->size += (ptr->version == 1) ? 28 : 16; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mdia_box_del(GF_Box *s) { GF_MediaBox *ptr = (GF_MediaBox *)s; if (ptr == NULL) return; if (ptr->nalu_parser) gf_bs_del(ptr->nalu_parser); if (ptr->nalu_out_bs) gf_bs_del(ptr->nalu_out_bs); if (ptr->nalu_ps_bs) gf_bs_del(ptr->nalu_ps_bs); if (ptr->extracted_bs) gf_bs_del(ptr->extracted_bs); if (ptr->extracted_samp) gf_isom_sample_del(&ptr->extracted_samp); if (ptr->in_sample_buffer) gf_free(ptr->in_sample_buffer); if (ptr->tmp_nal_copy_buffer) gf_free(ptr->tmp_nal_copy_buffer); gf_free(ptr); } GF_Err mdia_on_child_box(GF_Box *s, GF_Box *a) { GF_MediaBox *ptr = (GF_MediaBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_MDHD: if (ptr->mediaHeader) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mediaHeader = (GF_MediaHeaderBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_HDLR: if (ptr->handler) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->handler = (GF_HandlerBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_MINF: if (ptr->information) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->information = (GF_MediaInformationBox *)a; return GF_OK; } return GF_OK; } GF_Err mdia_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u64 cookie = gf_bs_get_cookie(bs); cookie &= ~GF_ISOM_BS_COOKIE_VISUAL_TRACK; gf_bs_set_cookie(bs, cookie); e = gf_isom_box_array_read(s, bs, mdia_on_child_box); gf_bs_set_cookie(bs, cookie); if (e) return e; if (!((GF_MediaBox *)s)->information) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MediaInformationBox\n")); return GF_ISOM_INVALID_FILE; } if (!((GF_MediaBox *)s)->handler) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing HandlerBox\n")); return GF_ISOM_INVALID_FILE; } if (!((GF_MediaBox *)s)->mediaHeader) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MediaHeaderBox\n")); return GF_ISOM_INVALID_FILE; } return GF_OK; } GF_Box *mdia_box_new() { ISOM_DECL_BOX_ALLOC(GF_MediaBox, GF_ISOM_BOX_TYPE_MDIA); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mdia_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err mdia_box_size(GF_Box *s) { u32 pos = 0; GF_MediaBox *ptr = (GF_MediaBox *)s; //Header first gf_isom_check_position(s, (GF_Box*)ptr->mediaHeader, &pos); //then handler gf_isom_check_position(s, (GF_Box*)ptr->handler, &pos); //then info gf_isom_check_position(s, (GF_Box*)ptr->information, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mfra_box_del(GF_Box *s) { GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s; if (ptr == NULL) return; gf_list_del(ptr->tfra_list); gf_free(ptr); } GF_Box *mfra_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentRandomAccessBox, GF_ISOM_BOX_TYPE_MFRA); tmp->tfra_list = gf_list_new(); return (GF_Box *)tmp; } GF_Err mfra_on_child_box(GF_Box *s, GF_Box *a) { GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_TFRA: return gf_list_add(ptr->tfra_list, a); case GF_ISOM_BOX_TYPE_MFRO: if (ptr->mfro) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mfro = (GF_MovieFragmentRandomAccessOffsetBox *)a; return GF_OK; } return GF_OK; } GF_Err mfra_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, mfra_on_child_box); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mfra_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err mfra_box_size(GF_Box *s) { u32 pos=0; GF_MovieFragmentRandomAccessBox *ptr = (GF_MovieFragmentRandomAccessBox *)s; gf_isom_check_position_list(s, ptr->tfra_list, &pos); gf_isom_check_position(s, (GF_Box *)ptr->mfro, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void tfra_box_del(GF_Box *s) { GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s; if (ptr == NULL) return; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Box *tfra_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentRandomAccessBox, GF_ISOM_BOX_TYPE_TFRA); return (GF_Box *)tmp; } GF_Err tfra_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_RandomAccessEntry *p = 0; GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s; ISOM_DECREASE_SIZE(ptr, 12); ptr->track_id = gf_bs_read_u32(bs); if (gf_bs_read_int(bs, 26) != 0) return GF_ISOM_INVALID_FILE; ptr->traf_bits = (gf_bs_read_int(bs, 2) + 1) * 8; ptr->trun_bits = (gf_bs_read_int(bs, 2) + 1) * 8; ptr->sample_bits = (gf_bs_read_int(bs, 2) + 1) * 8; ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->version == 1) { if (ptr->nb_entries > ptr->size / (16+(ptr->traf_bits+ptr->trun_bits+ptr->sample_bits)/8)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in traf\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } } else { if (ptr->nb_entries > ptr->size / (8+(ptr->traf_bits+ptr->trun_bits+ptr->sample_bits)/8)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in traf\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } } if (ptr->nb_entries) { p = (GF_RandomAccessEntry *) gf_malloc(sizeof(GF_RandomAccessEntry) * ptr->nb_entries); if (!p) return GF_OUT_OF_MEM; } ptr->entries = p; for (i=0; i<ptr->nb_entries; i++) { memset(p, 0, sizeof(GF_RandomAccessEntry)); if (ptr->version == 1) { p->time = gf_bs_read_u64(bs); p->moof_offset = gf_bs_read_u64(bs); } else { p->time = gf_bs_read_u32(bs); p->moof_offset = gf_bs_read_u32(bs); } p->traf_number = gf_bs_read_int(bs, ptr->traf_bits); p->trun_number = gf_bs_read_int(bs, ptr->trun_bits); p->sample_number = gf_bs_read_int(bs, ptr->sample_bits); ++p; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tfra_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i, sap_nb_entries; GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->track_id); gf_bs_write_int(bs, 0, 26); gf_bs_write_int(bs, ptr->traf_bits/8 - 1, 2); gf_bs_write_int(bs, ptr->trun_bits/8 - 1, 2); gf_bs_write_int(bs, ptr->sample_bits/8 - 1, 2); sap_nb_entries = 0; for (i=0; i<ptr->nb_entries; i++) { GF_RandomAccessEntry *p = &ptr->entries[i]; //no sap found, do not store if (p->trun_number) sap_nb_entries++; } gf_bs_write_u32(bs, sap_nb_entries); for (i=0; i<ptr->nb_entries; i++) { GF_RandomAccessEntry *p = &ptr->entries[i]; //no sap found, do not store if (!p->trun_number) continue; if (ptr->version==1) { gf_bs_write_u64(bs, p->time); gf_bs_write_u64(bs, p->moof_offset); } else { gf_bs_write_u32(bs, (u32) p->time); gf_bs_write_u32(bs, (u32) p->moof_offset); } gf_bs_write_int(bs, p->traf_number, ptr->traf_bits); gf_bs_write_int(bs, p->trun_number, ptr->trun_bits); gf_bs_write_int(bs, p->sample_number, ptr->sample_bits); } return GF_OK; } GF_Err tfra_box_size(GF_Box *s) { u32 i; GF_TrackFragmentRandomAccessBox *ptr = (GF_TrackFragmentRandomAccessBox *)s; ptr->size += 12; for (i=0; i<ptr->nb_entries; i++) { GF_RandomAccessEntry *p = &ptr->entries[i]; //no sap found, do not store if (!p->trun_number) continue; ptr->size += ((ptr->version==1) ? 16 : 8 ) + ptr->traf_bits/8 + ptr->trun_bits/8 + ptr->sample_bits/8; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mfro_box_del(GF_Box *s) { GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Box *mfro_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentRandomAccessOffsetBox, GF_ISOM_BOX_TYPE_MFRO); return (GF_Box *)tmp; } GF_Err mfro_box_read(GF_Box *s, GF_BitStream *bs) { GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->container_size = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mfro_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieFragmentRandomAccessOffsetBox *ptr = (GF_MovieFragmentRandomAccessOffsetBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->container_size); return GF_OK; } GF_Err mfro_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void elng_box_del(GF_Box *s) { GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; if (ptr == NULL) return; if (ptr->extended_language) gf_free(ptr->extended_language); gf_free(ptr); } GF_Err elng_box_read(GF_Box *s, GF_BitStream *bs) { GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; if (ptr->size) { ptr->extended_language = (char*)gf_malloc((u32) ptr->size); if (ptr->extended_language == NULL) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->extended_language, (u32) ptr->size); /*safety check in case the string is not null-terminated*/ if (ptr->extended_language[ptr->size-1]) { char *str = (char*)gf_malloc((u32) ptr->size + 1); if (!str) return GF_OUT_OF_MEM; memcpy(str, ptr->extended_language, (u32) ptr->size); str[ptr->size] = 0; gf_free(ptr->extended_language); ptr->extended_language = str; } } return GF_OK; } GF_Box *elng_box_new() { ISOM_DECL_BOX_ALLOC(GF_MediaBox, GF_ISOM_BOX_TYPE_ELNG); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err elng_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->extended_language) { gf_bs_write_data(bs, ptr->extended_language, (u32)(strlen(ptr->extended_language)+1)); } return GF_OK; } GF_Err elng_box_size(GF_Box *s) { GF_ExtendedLanguageBox *ptr = (GF_ExtendedLanguageBox *)s; if (ptr->extended_language) { ptr->size += strlen(ptr->extended_language)+1; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void mfhd_box_del(GF_Box *s) { GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err mfhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->sequence_number = gf_bs_read_u32(bs); return GF_OK; } GF_Box *mfhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentHeaderBox, GF_ISOM_BOX_TYPE_MFHD); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mfhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieFragmentHeaderBox *ptr = (GF_MovieFragmentHeaderBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->sequence_number); return GF_OK; } GF_Err mfhd_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void minf_box_del(GF_Box *s) { GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; if (ptr == NULL) return; //if we have a Handler not self-contained, delete it (the self-contained belongs to the movie) if (ptr->dataHandler) { gf_isom_datamap_close(ptr); } gf_free(ptr); } GF_Err minf_on_child_box(GF_Box *s, GF_Box *a) { GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_NMHD: case GF_ISOM_BOX_TYPE_STHD: case GF_ISOM_BOX_TYPE_VMHD: case GF_ISOM_BOX_TYPE_SMHD: case GF_ISOM_BOX_TYPE_HMHD: case GF_ISOM_BOX_TYPE_GMHD: if (ptr->InfoHeader) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->InfoHeader = a; return GF_OK; case GF_ISOM_BOX_TYPE_DINF: if (ptr->dataInformation) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->dataInformation = (GF_DataInformationBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_STBL: if (ptr->sampleTable ) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->sampleTable = (GF_SampleTableBox *)a; return GF_OK; } return GF_OK; } GF_Err minf_box_read(GF_Box *s, GF_BitStream *bs) { GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; GF_Err e; e = gf_isom_box_array_read(s, bs, minf_on_child_box); if (!e && ! ptr->dataInformation) { GF_Box *url; GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing DataInformationBox\n")); //commented on purpose, we are still able to handle the file, we only throw an error but keep processing // e = GF_ISOM_INVALID_FILE; //add a dinf box to avoid any access to a null dinf ptr->dataInformation = (GF_DataInformationBox *) gf_isom_box_new_parent(&ptr->child_boxes, GF_ISOM_BOX_TYPE_DINF); if (!ptr->dataInformation) return GF_OUT_OF_MEM; ptr->dataInformation->dref = (GF_DataReferenceBox *) gf_isom_box_new_parent(&ptr->dataInformation->child_boxes, GF_ISOM_BOX_TYPE_DREF); if (!ptr->dataInformation->dref) return GF_OUT_OF_MEM; url = gf_isom_box_new_parent(&ptr->dataInformation->dref->child_boxes, GF_ISOM_BOX_TYPE_URL); if (!url) return GF_OUT_OF_MEM; ((GF_FullBox*)url)->flags = 1; } return e; } GF_Box *minf_box_new() { ISOM_DECL_BOX_ALLOC(GF_MediaInformationBox, GF_ISOM_BOX_TYPE_MINF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err minf_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err minf_box_size(GF_Box *s) { u32 pos=0; GF_MediaInformationBox *ptr = (GF_MediaInformationBox *)s; //Header first gf_isom_check_position(s, (GF_Box *)ptr->InfoHeader, &pos); //then dataInfo gf_isom_check_position(s, (GF_Box *)ptr->dataInformation, &pos); gf_isom_check_position(s, gf_isom_box_find_child(s->child_boxes, GF_ISOM_BOX_TYPE_MVCI), &pos); //then sampleTable gf_isom_check_position(s, (GF_Box *)ptr->sampleTable, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void moof_box_del(GF_Box *s) { GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *)s; if (ptr == NULL) return; gf_list_del(ptr->TrackList); if (ptr->PSSHs) gf_list_del(ptr->PSSHs); if (ptr->mdat) gf_free(ptr->mdat); gf_free(ptr); } GF_Err moof_on_child_box(GF_Box *s, GF_Box *a) { GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_MFHD: if (ptr->mfhd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mfhd = (GF_MovieFragmentHeaderBox *) a; return GF_OK; case GF_ISOM_BOX_TYPE_TRAF: return gf_list_add(ptr->TrackList, a); case GF_ISOM_BOX_TYPE_PSSH: if (!ptr->PSSHs) ptr->PSSHs = gf_list_new(); return gf_list_add(ptr->PSSHs, a); } return GF_OK; } GF_Err moof_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, moof_on_child_box); } GF_Box *moof_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieFragmentBox, GF_ISOM_BOX_TYPE_MOOF); tmp->TrackList = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err moof_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err moof_box_size(GF_Box *s) { u32 pos=0; GF_MovieFragmentBox *ptr = (GF_MovieFragmentBox *) s; if (!s) return GF_BAD_PARAM; //Header First gf_isom_check_position(s, (GF_Box *)ptr->mfhd, &pos); //then PSSH gf_isom_check_position_list(s, ptr->PSSHs, &pos); //then the track list gf_isom_check_position_list(s, ptr->TrackList, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void moov_box_del(GF_Box *s) { GF_MovieBox *ptr = (GF_MovieBox *)s; if (ptr == NULL) return; gf_list_del(ptr->trackList); gf_free(ptr); } GF_Err moov_on_child_box(GF_Box *s, GF_Box *a) { GF_MovieBox *ptr = (GF_MovieBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_IODS: if (ptr->iods) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->iods = (GF_ObjectDescriptorBox *)a; //if no IOD, delete the box if (!ptr->iods->descriptor) { ptr->iods = NULL; gf_isom_box_del_parent(&s->child_boxes, a); } return GF_OK; case GF_ISOM_BOX_TYPE_MVHD: if (ptr->mvhd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mvhd = (GF_MovieHeaderBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_UDTA: if (ptr->udta) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->udta = (GF_UserDataBox *)a; return GF_OK; #ifndef GPAC_DISABLE_ISOM_FRAGMENTS case GF_ISOM_BOX_TYPE_MVEX: if (ptr->mvex) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mvex = (GF_MovieExtendsBox *)a; ptr->mvex->mov = ptr->mov; return GF_OK; #endif case GF_ISOM_BOX_TYPE_META: if (ptr->meta) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->meta = (GF_MetaBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_TRAK: //set our pointer to this obj ((GF_TrackBox *)a)->moov = ptr; return gf_list_add(ptr->trackList, a); } return GF_OK; } GF_Err moov_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, moov_on_child_box); } GF_Box *moov_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieBox, GF_ISOM_BOX_TYPE_MOOV); tmp->trackList = gf_list_new(); if (!tmp->trackList) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err moov_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err moov_box_size(GF_Box *s) { u32 pos=0; GF_MovieBox *ptr = (GF_MovieBox *)s; gf_isom_check_position(s, (GF_Box *) ptr->mvhd, &pos); gf_isom_check_position(s, (GF_Box *) ptr->iods, &pos); gf_isom_check_position(s, (GF_Box *) ptr->meta, &pos); #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (ptr->mvex && !ptr->mvex_after_traks) { gf_isom_check_position(s, (GF_Box *) ptr->mvex, &pos); } #endif gf_isom_check_position_list(s, ptr->trackList, &pos); #ifndef GPAC_DISABLE_ISOM_FRAGMENTS if (ptr->mvex && ptr->mvex_after_traks) { gf_isom_check_position(s, (GF_Box *) ptr->mvex, &pos); } #endif return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void audio_sample_entry_box_del(GF_Box *s) { GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); gf_free(ptr); } GF_Err audio_sample_entry_on_child_box(GF_Box *s, GF_Box *a) { GF_UnknownBox *wave = NULL; Bool drop_wave=GF_FALSE; GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_ESDS: if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->esd = (GF_ESDBox *)a; ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; break; case GF_ISOM_BOX_TYPE_DAMR: case GF_ISOM_BOX_TYPE_DEVC: case GF_ISOM_BOX_TYPE_DQCP: case GF_ISOM_BOX_TYPE_DSMV: if (ptr->cfg_3gpp) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_3gpp = (GF_3GPPConfigBox *) a; /*for 3GP config, remember sample entry type in config*/ ptr->cfg_3gpp->cfg.type = ptr->type; ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; break; case GF_ISOM_BOX_TYPE_DOPS: if (ptr->cfg_opus) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_opus = (GF_OpusSpecificBox *)a; ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; break; case GF_ISOM_BOX_TYPE_DAC3: if (ptr->cfg_ac3) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_ac3 = (GF_AC3ConfigBox *) a; ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; break; case GF_ISOM_BOX_TYPE_DEC3: if (ptr->cfg_ac3) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_ac3 = (GF_AC3ConfigBox *) a; break; case GF_ISOM_BOX_TYPE_MHAC: if (ptr->cfg_mha) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_mha = (GF_MHAConfigBox *) a; ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; break; case GF_ISOM_BOX_TYPE_DFLA: if (ptr->cfg_flac) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_flac = (GF_FLACConfigBox *) a; ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; break; case GF_ISOM_BOX_TYPE_UNKNOWN: wave = (GF_UnknownBox *)a; /*HACK for QT files: get the esds box from the track*/ if (s->type == GF_ISOM_BOX_TYPE_MP4A) { if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr) //wave subboxes may have been properly parsed if ((wave->original_4cc == GF_QT_BOX_TYPE_WAVE) && gf_list_count(wave->child_boxes)) { u32 i; for (i =0; i<gf_list_count(wave->child_boxes); i++) { GF_Box *inner_box = (GF_Box *)gf_list_get(wave->child_boxes, i); if (inner_box->type == GF_ISOM_BOX_TYPE_ESDS) { ptr->esd = (GF_ESDBox *)inner_box; if (ptr->qtff_mode & GF_ISOM_AUDIO_QTFF_CONVERT_FLAG) { gf_list_rem(a->child_boxes, i); drop_wave=GF_TRUE; ptr->compression_id = 0; gf_list_add(ptr->child_boxes, inner_box); } } } if (drop_wave) { gf_isom_box_del_parent(&ptr->child_boxes, a); ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; ptr->version = 0; return GF_OK; } ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_ON_EXT_VALID; return GF_OK; } gf_isom_box_del_parent(&ptr->child_boxes, a); return GF_ISOM_INVALID_MEDIA; } ptr->qtff_mode &= ~GF_ISOM_AUDIO_QTFF_CONVERT_FLAG; if ((wave->original_4cc == GF_QT_BOX_TYPE_WAVE) && gf_list_count(wave->child_boxes)) { ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_ON_NOEXT; } return GF_OK; case GF_QT_BOX_TYPE_WAVE: { u32 subtype = 0; GF_Box **cfg_ptr = NULL; if (s->type == GF_ISOM_BOX_TYPE_MP4A) { cfg_ptr = (GF_Box **) &ptr->esd; subtype = GF_ISOM_BOX_TYPE_ESDS; } else if (s->type == GF_ISOM_BOX_TYPE_AC3) { cfg_ptr = (GF_Box **) &ptr->cfg_ac3; subtype = GF_ISOM_BOX_TYPE_DAC3; } else if (s->type == GF_ISOM_BOX_TYPE_EC3) { cfg_ptr = (GF_Box **) &ptr->cfg_ac3; subtype = GF_ISOM_BOX_TYPE_DEC3; } else if (s->type == GF_ISOM_BOX_TYPE_OPUS) { cfg_ptr = (GF_Box **) &ptr->cfg_opus; subtype = GF_ISOM_BOX_TYPE_DOPS; } else if ((s->type == GF_ISOM_BOX_TYPE_MHA1) || (s->type == GF_ISOM_BOX_TYPE_MHA2) || (s->type == GF_ISOM_BOX_TYPE_MHM1) || (s->type == GF_ISOM_BOX_TYPE_MHM2) ) { cfg_ptr = (GF_Box **) &ptr->cfg_mha; subtype = GF_ISOM_BOX_TYPE_MHAC; } if (cfg_ptr) { if (*cfg_ptr) ERROR_ON_DUPLICATED_BOX(a, ptr) //wave subboxes may have been properly parsed if (gf_list_count(a->child_boxes)) { u32 i; for (i =0; i<gf_list_count(a->child_boxes); i++) { GF_Box *inner_box = (GF_Box *)gf_list_get(a->child_boxes, i); if (inner_box->type == subtype) { *cfg_ptr = inner_box; if (ptr->qtff_mode & GF_ISOM_AUDIO_QTFF_CONVERT_FLAG) { gf_list_rem(a->child_boxes, i); drop_wave=GF_TRUE; gf_list_add(ptr->child_boxes, inner_box); } break; } } if (drop_wave) { gf_isom_box_del_parent(&ptr->child_boxes, a); ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; ptr->compression_id = 0; ptr->version = 0; return GF_OK; } ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_ON_EXT_VALID; return GF_OK; } } } ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_ON_EXT_VALID; return GF_OK; } return GF_OK; } GF_Err audio_sample_entry_box_read(GF_Box *s, GF_BitStream *bs) { GF_MPEGAudioSampleEntryBox *ptr; char *data; u8 a, b, c, d; u32 i, size, v, nb_alnum; GF_Err e; u64 pos, start; ptr = (GF_MPEGAudioSampleEntryBox *)s; start = gf_bs_get_position(bs); gf_bs_seek(bs, start + 8); v = gf_bs_read_u16(bs); if (v) ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_ON_NOEXT; //try to disambiguate QTFF v1 and MP4 v1 audio sample entries ... if (v==1) { //go to end of ISOM audio sample entry, skip 4 byte (box size field), read 4 bytes (box type) and check if this looks like a box gf_bs_seek(bs, start + 8 + 20 + 4); a = gf_bs_read_u8(bs); b = gf_bs_read_u8(bs); c = gf_bs_read_u8(bs); d = gf_bs_read_u8(bs); nb_alnum = 0; if (isalnum(a)) nb_alnum++; if (isalnum(b)) nb_alnum++; if (isalnum(c)) nb_alnum++; if (isalnum(d)) nb_alnum++; if (nb_alnum>2) ptr->qtff_mode = GF_ISOM_AUDIO_QTFF_NONE; } gf_bs_seek(bs, start); e = gf_isom_audio_sample_entry_read((GF_AudioSampleEntryBox*)s, bs); if (e) return e; pos = gf_bs_get_position(bs); size = (u32) s->size; //when cookie is set on bs, always convert qtff-style mp4a to isobmff-style //since the conversion is done in addBox and we don't have the bitstream there (arg...), flag the box if (gf_bs_get_cookie(bs) & GF_ISOM_BS_COOKIE_QT_CONV) { ptr->qtff_mode |= GF_ISOM_AUDIO_QTFF_CONVERT_FLAG; } e = gf_isom_box_array_read(s, bs, audio_sample_entry_on_child_box); if (!e) { if (s->type==GF_ISOM_BOX_TYPE_ENCA) { GF_ProtectionSchemeInfoBox *sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(s->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) { u32 type = sinf->original_format->data_format; switch (type) { case GF_ISOM_SUBTYPE_3GP_AMR: case GF_ISOM_SUBTYPE_3GP_AMR_WB: case GF_ISOM_SUBTYPE_3GP_EVRC: case GF_ISOM_SUBTYPE_3GP_QCELP: case GF_ISOM_SUBTYPE_3GP_SMV: if (ptr->cfg_3gpp) ptr->cfg_3gpp->cfg.type = type; break; } } } return GF_OK; } if (size<8) return GF_ISOM_INVALID_FILE; /*hack for some weird files (possibly recorded with live.com tools, needs further investigations)*/ gf_bs_seek(bs, pos); data = (char*)gf_malloc(sizeof(char) * size); if (!data) return GF_OUT_OF_MEM; gf_bs_read_data(bs, data, size); for (i=0; i<size-8; i++) { if (GF_4CC((u32)data[i+4], (u8)data[i+5], (u8)data[i+6], (u8)data[i+7]) == GF_ISOM_BOX_TYPE_ESDS) { GF_BitStream *mybs = gf_bs_new(data + i, size - i, GF_BITSTREAM_READ); if (ptr->esd) gf_isom_box_del_parent(&ptr->child_boxes, (GF_Box *)ptr->esd); ptr->esd = NULL; e = gf_isom_box_parse((GF_Box **)&ptr->esd, mybs); gf_bs_del(mybs); if (e==GF_OK) { if (!ptr->child_boxes) ptr->child_boxes = gf_list_new(); gf_list_add(ptr->child_boxes, ptr->esd); } else if (ptr->esd) { gf_isom_box_del((GF_Box *)ptr->esd); ptr->esd = NULL; } break; } } gf_free(data); return e; } GF_Box *audio_sample_entry_box_new() { ISOM_DECL_BOX_ALLOC(GF_MPEGAudioSampleEntryBox, GF_ISOM_BOX_TYPE_MP4A); gf_isom_audio_sample_entry_init((GF_AudioSampleEntryBox*)tmp); return (GF_Box *)tmp; } GF_Box *enca_box_new() { ISOM_DECL_BOX_ALLOC(GF_MPEGAudioSampleEntryBox, GF_ISOM_BOX_TYPE_ENCA); gf_isom_audio_sample_entry_init((GF_AudioSampleEntryBox*)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err audio_sample_entry_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_isom_audio_sample_entry_write((GF_AudioSampleEntryBox*)s, bs); return GF_OK; } GF_Err audio_sample_entry_box_size(GF_Box *s) { u32 pos=0; GF_MPEGAudioSampleEntryBox *ptr = (GF_MPEGAudioSampleEntryBox *)s; gf_isom_audio_sample_entry_size((GF_AudioSampleEntryBox*)s); if (ptr->qtff_mode) return GF_OK; gf_isom_check_position(s, (GF_Box *)ptr->esd, &pos); gf_isom_check_position(s, (GF_Box *)ptr->cfg_3gpp, &pos); gf_isom_check_position(s, (GF_Box *)ptr->cfg_opus, &pos); gf_isom_check_position(s, (GF_Box *)ptr->cfg_ac3, &pos); gf_isom_check_position(s, (GF_Box *)ptr->cfg_flac, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void gen_sample_entry_box_del(GF_Box *s) { GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); gf_free(ptr); } GF_Err gen_sample_entry_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)s, bs); if (e) return e; ISOM_DECREASE_SIZE(s, 8); return gf_isom_box_array_read(s, bs, NULL); } GF_Box *gen_sample_entry_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleEntryBox, GF_QT_SUBTYPE_C608);//type will be overriten gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gen_sample_entry_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); return GF_OK; } GF_Err gen_sample_entry_box_size(GF_Box *s) { GF_SampleEntryBox *ptr = (GF_SampleEntryBox *)s; ptr->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mp4s_box_del(GF_Box *s) { GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); gf_free(ptr); } GF_Err mp4s_on_child_box(GF_Box *s, GF_Box *a) { GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_ESDS: if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->esd = (GF_ESDBox *)a; break; } return GF_OK; } GF_Err mp4s_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs); if (e) return e; ISOM_DECREASE_SIZE(ptr, 8); return gf_isom_box_array_read(s, bs, mp4s_on_child_box); } GF_Box *mp4s_box_new() { ISOM_DECL_BOX_ALLOC(GF_MPEGSampleEntryBox, GF_ISOM_BOX_TYPE_MP4S); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); tmp->internal_type = GF_ISOM_SAMPLE_ENTRY_MP4S; return (GF_Box *)tmp; } GF_Box *encs_box_new() { ISOM_DECL_BOX_ALLOC(GF_MPEGSampleEntryBox, GF_ISOM_BOX_TYPE_ENCS); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); tmp->internal_type = GF_ISOM_SAMPLE_ENTRY_MP4S; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mp4s_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); return GF_OK; } GF_Err mp4s_box_size(GF_Box *s) { u32 pos=0; GF_MPEGSampleEntryBox *ptr = (GF_MPEGSampleEntryBox *)s; s->size += 8; gf_isom_check_position(s, (GF_Box *)ptr->esd, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void video_sample_entry_box_del(GF_Box *s) { GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); /*for publishing*/ if (ptr->emul_esd) gf_odf_desc_del((GF_Descriptor *)ptr->emul_esd); gf_free(ptr); } GF_Err video_sample_entry_on_child_box(GF_Box *s, GF_Box *a) { GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_ESDS: if (ptr->esd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->esd = (GF_ESDBox *)a; break; case GF_ISOM_BOX_TYPE_RINF: if (ptr->rinf) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->rinf = (GF_RestrictedSchemeInfoBox *) a; break; case GF_ISOM_BOX_TYPE_AVCC: if (ptr->avc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->avc_config = (GF_AVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_HVCC: if (ptr->hevc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->hevc_config = (GF_HEVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_SVCC: if (ptr->svc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->svc_config = (GF_AVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_MVCC: if (ptr->mvc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->mvc_config = (GF_AVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_LHVC: if (ptr->lhvc_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->lhvc_config = (GF_HEVCConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_AV1C: if (ptr->av1_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->av1_config = (GF_AV1ConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_VPCC: if (ptr->vp_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->vp_config = (GF_VPConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_DVCC: if (ptr->dovi_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->dovi_config = (GF_DOVIConfigurationBox*)a; break; case GF_ISOM_BOX_TYPE_UUID: if (! memcmp(((GF_UnknownUUIDBox*)a)->uuid, GF_ISOM_IPOD_EXT, 16)) { if (ptr->ipod_ext) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->ipod_ext = (GF_UnknownUUIDBox *)a; } else { return GF_OK; } break; case GF_ISOM_BOX_TYPE_D263: if (ptr->cfg_3gpp) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->cfg_3gpp = (GF_3GPPConfigBox *)a; /*for 3GP config, remember sample entry type in config*/ ptr->cfg_3gpp->cfg.type = ptr->type; break; case GF_ISOM_BOX_TYPE_JP2H: if (ptr->jp2h) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->jp2h = (GF_J2KHeaderBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_PASP: case GF_ISOM_BOX_TYPE_CLAP: case GF_ISOM_BOX_TYPE_COLR: case GF_ISOM_BOX_TYPE_MDCV: case GF_ISOM_BOX_TYPE_CLLI: case GF_ISOM_BOX_TYPE_CCST: case GF_ISOM_BOX_TYPE_AUXI: case GF_ISOM_BOX_TYPE_RVCC: case GF_ISOM_BOX_TYPE_M4DS: if (!gf_isom_box_check_unique(s->child_boxes, a)) { ERROR_ON_DUPLICATED_BOX(a, ptr) } return GF_OK; } return GF_OK; } GF_Err video_sample_entry_box_read(GF_Box *s, GF_BitStream *bs) { GF_MPEGVisualSampleEntryBox *mp4v = (GF_MPEGVisualSampleEntryBox*)s; GF_Err e; e = gf_isom_video_sample_entry_read((GF_VisualSampleEntryBox *)s, bs); if (e) return e; e = gf_isom_box_array_read(s, bs, video_sample_entry_on_child_box); if (e) return e; /*this is an AVC sample desc*/ if (mp4v->avc_config || mp4v->svc_config || mp4v->mvc_config) AVC_RewriteESDescriptor(mp4v); /*this is an HEVC sample desc*/ if (mp4v->hevc_config || mp4v->lhvc_config || (mp4v->type==GF_ISOM_BOX_TYPE_HVT1)) HEVC_RewriteESDescriptor(mp4v); /*this is an AV1 sample desc*/ if (mp4v->av1_config) AV1_RewriteESDescriptor(mp4v); /*this is a VP8-9 sample desc*/ if (mp4v->vp_config) VP9_RewriteESDescriptor(mp4v); if (s->type==GF_ISOM_BOX_TYPE_ENCV) { GF_ProtectionSchemeInfoBox *sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(s->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) { u32 type = sinf->original_format->data_format; switch (type) { case GF_ISOM_SUBTYPE_3GP_H263: if (mp4v->cfg_3gpp) mp4v->cfg_3gpp->cfg.type = type; break; } } } return GF_OK; } GF_Box *video_sample_entry_box_new() { GF_MPEGVisualSampleEntryBox *tmp; GF_SAFEALLOC(tmp, GF_MPEGVisualSampleEntryBox); if (tmp == NULL) return NULL; gf_isom_video_sample_entry_init((GF_VisualSampleEntryBox *)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err video_sample_entry_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_isom_video_sample_entry_write((GF_VisualSampleEntryBox *)s, bs); return GF_OK; } GF_Err video_sample_entry_box_size(GF_Box *s) { u32 pos=0; GF_MPEGVisualSampleEntryBox *ptr = (GF_MPEGVisualSampleEntryBox *)s; gf_isom_video_sample_entry_size((GF_VisualSampleEntryBox *)s); /*make sure we write the config box first, we don't care about the rest*/ /*mp4v*/ gf_isom_check_position(s, (GF_Box *)ptr->esd, &pos); gf_isom_check_position(s, (GF_Box *)ptr->cfg_3gpp, &pos); /*avc / SVC + MVC*/ gf_isom_check_position(s, (GF_Box *)ptr->avc_config, &pos); gf_isom_check_position(s, (GF_Box *)ptr->svc_config, &pos); if (ptr->mvc_config) { gf_isom_check_position(s, gf_isom_box_find_child(s->child_boxes, GF_ISOM_BOX_TYPE_VWID), &pos); gf_isom_check_position(s, (GF_Box *)ptr->mvc_config, &pos); } /*HEVC*/ gf_isom_check_position(s, (GF_Box *)ptr->hevc_config, &pos); gf_isom_check_position(s, (GF_Box *)ptr->lhvc_config, &pos); /*AV1*/ gf_isom_check_position(s, (GF_Box *)ptr->av1_config, &pos); /*VPx*/ gf_isom_check_position(s, (GF_Box *)ptr->vp_config, &pos); /*JP2H*/ gf_isom_check_position(s, (GF_Box *)ptr->jp2h, &pos); /*DolbyVision*/ gf_isom_check_position(s, (GF_Box *)ptr->dovi_config, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void mvex_box_del(GF_Box *s) { GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *)s; if (ptr == NULL) return; gf_list_del(ptr->TrackExList); gf_list_del(ptr->TrackExPropList); gf_free(ptr); } GF_Err mvex_on_child_box(GF_Box *s, GF_Box *a) { GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_TREX: return gf_list_add(ptr->TrackExList, a); case GF_ISOM_BOX_TYPE_TREP: return gf_list_add(ptr->TrackExPropList, a); case GF_ISOM_BOX_TYPE_MEHD: if (ptr->mehd) ERROR_ON_DUPLICATED_BOX(a, s) ptr->mehd = (GF_MovieExtendsHeaderBox*)a; return GF_OK; } return GF_OK; } GF_Err mvex_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, mvex_on_child_box); } GF_Box *mvex_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieExtendsBox, GF_ISOM_BOX_TYPE_MVEX); tmp->TrackExList = gf_list_new(); if (!tmp->TrackExList) { gf_free(tmp); return NULL; } tmp->TrackExPropList = gf_list_new(); if (!tmp->TrackExPropList) { gf_list_del(tmp->TrackExList); gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mvex_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err mvex_box_size(GF_Box *s) { u32 pos=0; GF_MovieExtendsBox *ptr = (GF_MovieExtendsBox *) s; gf_isom_check_position(s, (GF_Box *)ptr->mehd, &pos); gf_isom_check_position_list(s, ptr->TrackExList, &pos); gf_isom_check_position_list(s, ptr->TrackExPropList, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *mehd_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieExtendsHeaderBox, GF_ISOM_BOX_TYPE_MEHD); return (GF_Box *)tmp; } void mehd_box_del(GF_Box *s) { gf_free(s); } GF_Err mehd_box_read(GF_Box *s, GF_BitStream *bs) { GF_MovieExtendsHeaderBox *ptr = (GF_MovieExtendsHeaderBox *)s; if (ptr->version==1) { ISOM_DECREASE_SIZE(ptr, 8); ptr->fragment_duration = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 4); ptr->fragment_duration = (u64) gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mehd_box_write(GF_Box *s, GF_BitStream *bs) { GF_MovieExtendsHeaderBox *ptr = (GF_MovieExtendsHeaderBox *)s; GF_Err e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version == 1) { gf_bs_write_u64(bs, ptr->fragment_duration); } else { gf_bs_write_u32(bs, (u32) ptr->fragment_duration); } return GF_OK; } GF_Err mehd_box_size(GF_Box *s) { GF_MovieExtendsHeaderBox *ptr = (GF_MovieExtendsHeaderBox *)s; ptr->version = (ptr->fragment_duration>0xFFFFFFFF) ? 1 : 0; s->size += (ptr->version == 1) ? 8 : 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void mvhd_box_del(GF_Box *s) { GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err mvhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s; if (ptr == NULL) return GF_BAD_PARAM; if (ptr->version == 1) { ISOM_DECREASE_SIZE(ptr, 28); ptr->creationTime = gf_bs_read_u64(bs); ptr->modificationTime = gf_bs_read_u64(bs); ptr->timeScale = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 16); ptr->creationTime = gf_bs_read_u32(bs); ptr->modificationTime = gf_bs_read_u32(bs); ptr->timeScale = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u32(bs); } if (!ptr->timeScale) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Movie header timescale is invalid (0) - defaulting to 600\n" )); ptr->timeScale = 600; } ISOM_DECREASE_SIZE(ptr, 80); ptr->preferredRate = gf_bs_read_u32(bs); ptr->preferredVolume = gf_bs_read_u16(bs); gf_bs_read_data(bs, ptr->reserved, 10); ptr->matrixA = gf_bs_read_u32(bs); ptr->matrixB = gf_bs_read_u32(bs); ptr->matrixU = gf_bs_read_u32(bs); ptr->matrixC = gf_bs_read_u32(bs); ptr->matrixD = gf_bs_read_u32(bs); ptr->matrixV = gf_bs_read_u32(bs); ptr->matrixX = gf_bs_read_u32(bs); ptr->matrixY = gf_bs_read_u32(bs); ptr->matrixW = gf_bs_read_u32(bs); ptr->previewTime = gf_bs_read_u32(bs); ptr->previewDuration = gf_bs_read_u32(bs); ptr->posterTime = gf_bs_read_u32(bs); ptr->selectionTime = gf_bs_read_u32(bs); ptr->selectionDuration = gf_bs_read_u32(bs); ptr->currentTime = gf_bs_read_u32(bs); ptr->nextTrackID = gf_bs_read_u32(bs); ptr->original_duration = ptr->duration; return GF_OK; } GF_Box *mvhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_MovieHeaderBox, GF_ISOM_BOX_TYPE_MVHD); tmp->preferredRate = (1<<16); tmp->preferredVolume = (1<<8); tmp->matrixA = (1<<16); tmp->matrixD = (1<<16); tmp->matrixW = (1<<30); tmp->nextTrackID = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mvhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version == 1) { gf_bs_write_u64(bs, ptr->creationTime); gf_bs_write_u64(bs, ptr->modificationTime); gf_bs_write_u32(bs, ptr->timeScale); gf_bs_write_u64(bs, ptr->duration); } else { gf_bs_write_u32(bs, (u32) ptr->creationTime); gf_bs_write_u32(bs, (u32) ptr->modificationTime); gf_bs_write_u32(bs, ptr->timeScale); gf_bs_write_u32(bs, (u32) ptr->duration); } gf_bs_write_u32(bs, ptr->preferredRate); gf_bs_write_u16(bs, ptr->preferredVolume); gf_bs_write_data(bs, ptr->reserved, 10); gf_bs_write_u32(bs, ptr->matrixA); gf_bs_write_u32(bs, ptr->matrixB); gf_bs_write_u32(bs, ptr->matrixU); gf_bs_write_u32(bs, ptr->matrixC); gf_bs_write_u32(bs, ptr->matrixD); gf_bs_write_u32(bs, ptr->matrixV); gf_bs_write_u32(bs, ptr->matrixX); gf_bs_write_u32(bs, ptr->matrixY); gf_bs_write_u32(bs, ptr->matrixW); gf_bs_write_u32(bs, ptr->previewTime); gf_bs_write_u32(bs, ptr->previewDuration); gf_bs_write_u32(bs, ptr->posterTime); gf_bs_write_u32(bs, ptr->selectionTime); gf_bs_write_u32(bs, ptr->selectionDuration); gf_bs_write_u32(bs, ptr->currentTime); gf_bs_write_u32(bs, ptr->nextTrackID); return GF_OK; } GF_Err mvhd_box_size(GF_Box *s) { GF_MovieHeaderBox *ptr = (GF_MovieHeaderBox *)s; if (ptr->duration==(u64) -1) ptr->version = 0; else ptr->version = (ptr->duration>0xFFFFFFFF) ? 1 : 0; ptr->size += (ptr->version == 1) ? 28 : 16; ptr->size += 80; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void nmhd_box_del(GF_Box *s) { GF_MPEGMediaHeaderBox *ptr = (GF_MPEGMediaHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err nmhd_box_read(GF_Box *s, GF_BitStream *bs) { return GF_OK; } GF_Box *nmhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_MPEGMediaHeaderBox, GF_ISOM_BOX_TYPE_NMHD); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err nmhd_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_full_box_write(s, bs); } GF_Err nmhd_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void padb_box_del(GF_Box *s) { GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *) s; if (ptr == NULL) return; if (ptr->padbits) gf_free(ptr->padbits); gf_free(ptr); } GF_Err padb_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->SampleCount = gf_bs_read_u32(bs); if (ptr->size < ptr->SampleCount/2) //half byte per sample return GF_ISOM_INVALID_FILE; ptr->padbits = (u8 *)gf_malloc(sizeof(u8)*ptr->SampleCount); if (!ptr->padbits) return GF_OUT_OF_MEM; for (i=0; i<ptr->SampleCount; i += 2) { gf_bs_read_int(bs, 1); if (i+1 < ptr->SampleCount) { ptr->padbits[i+1] = gf_bs_read_int(bs, 3); } else { gf_bs_read_int(bs, 3); } gf_bs_read_int(bs, 1); ptr->padbits[i] = gf_bs_read_int(bs, 3); } return GF_OK; } GF_Box *padb_box_new() { ISOM_DECL_BOX_ALLOC(GF_PaddingBitsBox, GF_ISOM_BOX_TYPE_PADB); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err padb_box_write(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->SampleCount, 32); for (i=0 ; i<ptr->SampleCount; i += 2) { gf_bs_write_int(bs, 0, 1); if (i+1 < ptr->SampleCount) { gf_bs_write_int(bs, ptr->padbits[i+1], 3); } else { gf_bs_write_int(bs, 0, 3); } gf_bs_write_int(bs, 0, 1); gf_bs_write_int(bs, ptr->padbits[i], 3); } return GF_OK; } GF_Err padb_box_size(GF_Box *s) { GF_PaddingBitsBox *ptr = (GF_PaddingBitsBox *)s; ptr->size += 4; if (ptr->SampleCount) ptr->size += (ptr->SampleCount + 1) / 2; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void rely_box_del(GF_Box *s) { GF_RelyHintBox *rely = (GF_RelyHintBox *)s; gf_free(rely); } GF_Err rely_box_read(GF_Box *s, GF_BitStream *bs) { GF_RelyHintBox *ptr = (GF_RelyHintBox *)s; ISOM_DECREASE_SIZE(ptr, 1); ptr->reserved = gf_bs_read_int(bs, 6); ptr->preferred = gf_bs_read_int(bs, 1); ptr->required = gf_bs_read_int(bs, 1); return GF_OK; } GF_Box *rely_box_new() { ISOM_DECL_BOX_ALLOC(GF_RelyHintBox, GF_ISOM_BOX_TYPE_RELY); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rely_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_RelyHintBox *ptr = (GF_RelyHintBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->reserved, 6); gf_bs_write_int(bs, ptr->preferred, 1); gf_bs_write_int(bs, ptr->required, 1); return GF_OK; } GF_Err rely_box_size(GF_Box *s) { s->size += 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void rtpo_box_del(GF_Box *s) { GF_RTPOBox *rtpo = (GF_RTPOBox *)s; gf_free(rtpo); } GF_Err rtpo_box_read(GF_Box *s, GF_BitStream *bs) { GF_RTPOBox *ptr = (GF_RTPOBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->timeOffset = gf_bs_read_u32(bs); return GF_OK; } GF_Box *rtpo_box_new() { ISOM_DECL_BOX_ALLOC(GF_RTPOBox, GF_ISOM_BOX_TYPE_RTPO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rtpo_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_RTPOBox *ptr = (GF_RTPOBox *)s; if (ptr == NULL) return GF_BAD_PARAM; //here we have no pb, just remembed that some entries will have to //be 4-bytes aligned ... e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->timeOffset); return GF_OK; } GF_Err rtpo_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void smhd_box_del(GF_Box *s) { GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; if (ptr == NULL ) return; gf_free(ptr); } GF_Err smhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->balance = gf_bs_read_u16(bs); ptr->reserved = gf_bs_read_u16(bs); return GF_OK; } GF_Box *smhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_SoundMediaHeaderBox, GF_ISOM_BOX_TYPE_SMHD); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err smhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->balance); gf_bs_write_u16(bs, ptr->reserved); return GF_OK; } GF_Err smhd_box_size(GF_Box *s) { GF_SoundMediaHeaderBox *ptr = (GF_SoundMediaHeaderBox *)s; ptr->reserved = 0; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void snro_box_del(GF_Box *s) { GF_SeqOffHintEntryBox *snro = (GF_SeqOffHintEntryBox *)s; gf_free(snro); } GF_Err snro_box_read(GF_Box *s, GF_BitStream *bs) { GF_SeqOffHintEntryBox *ptr = (GF_SeqOffHintEntryBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->SeqOffset = gf_bs_read_u32(bs); return GF_OK; } GF_Box *snro_box_new() { ISOM_DECL_BOX_ALLOC(GF_SeqOffHintEntryBox, GF_ISOM_BOX_TYPE_SNRO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err snro_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SeqOffHintEntryBox *ptr = (GF_SeqOffHintEntryBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->SeqOffset); return GF_OK; } GF_Err snro_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stbl_box_del(GF_Box *s) { GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; if (ptr == NULL) return; if (ptr->sub_samples) gf_list_del(ptr->sub_samples); if (ptr->sampleGroups) gf_list_del(ptr->sampleGroups); if (ptr->sampleGroupsDescription) gf_list_del(ptr->sampleGroupsDescription); if (ptr->sai_sizes) gf_list_del(ptr->sai_sizes); if (ptr->sai_offsets) gf_list_del(ptr->sai_offsets); if (ptr->traf_map) { if (ptr->traf_map->frag_starts) { u32 i; for (i=0; i<ptr->traf_map->nb_entries; i++) { if (ptr->traf_map->frag_starts[i].moof_template) gf_free(ptr->traf_map->frag_starts[i].moof_template); } gf_free(ptr->traf_map->frag_starts); } gf_free(ptr->traf_map); } gf_free(ptr); } GF_Err stbl_on_child_box(GF_Box *s, GF_Box *a) { GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; if (!a) return GF_OK; switch (a->type) { case GF_ISOM_BOX_TYPE_STTS: if (ptr->TimeToSample) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->TimeToSample = (GF_TimeToSampleBox *)a; break; case GF_ISOM_BOX_TYPE_CTTS: if (ptr->CompositionOffset) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->CompositionOffset = (GF_CompositionOffsetBox *)a; break; case GF_ISOM_BOX_TYPE_CSLG: if (ptr->CompositionToDecode) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->CompositionToDecode = (GF_CompositionToDecodeBox *)a; break; case GF_ISOM_BOX_TYPE_STSS: if (ptr->SyncSample) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SyncSample = (GF_SyncSampleBox *)a; break; case GF_ISOM_BOX_TYPE_STSD: if (ptr->SampleDescription) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SampleDescription =(GF_SampleDescriptionBox *)a; break; case GF_ISOM_BOX_TYPE_STZ2: case GF_ISOM_BOX_TYPE_STSZ: if (ptr->SampleSize) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SampleSize = (GF_SampleSizeBox *)a; break; case GF_ISOM_BOX_TYPE_STSC: if (ptr->SampleToChunk) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SampleToChunk = (GF_SampleToChunkBox *)a; break; case GF_ISOM_BOX_TYPE_PADB: if (ptr->PaddingBits) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->PaddingBits = (GF_PaddingBitsBox *) a; break; //WARNING: AS THIS MAY CHANGE DYNAMICALLY DURING EDIT, case GF_ISOM_BOX_TYPE_CO64: case GF_ISOM_BOX_TYPE_STCO: if (ptr->ChunkOffset) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->ChunkOffset = a; return GF_OK; case GF_ISOM_BOX_TYPE_STSH: if (ptr->ShadowSync) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->ShadowSync = (GF_ShadowSyncBox *)a; break; case GF_ISOM_BOX_TYPE_STDP: if (ptr->DegradationPriority) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->DegradationPriority = (GF_DegradationPriorityBox *)a; break; case GF_ISOM_BOX_TYPE_SDTP: if (ptr->SampleDep) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->SampleDep = (GF_SampleDependencyTypeBox *)a; break; case GF_ISOM_BOX_TYPE_SUBS: if (!ptr->sub_samples) ptr->sub_samples = gf_list_new(); gf_list_add(ptr->sub_samples, a); //check subsample box { GF_SubSampleInformationBox *subs = (GF_SubSampleInformationBox *)a; GF_SubSampleInfoEntry *ent = gf_list_get(subs->Samples, 0); if (!ent) { gf_list_rem(subs->Samples, 0); GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] first entry in SubSample in track SampleTable is invalid\n")); } else if (ent->sample_delta==0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] first entry in SubSample in track SampleTable has sample_delta of 0, should be one. Fixing\n")); ent->sample_delta = 1; } } break; case GF_ISOM_BOX_TYPE_SBGP: if (!ptr->sampleGroups) ptr->sampleGroups = gf_list_new(); gf_list_add(ptr->sampleGroups, a); break; case GF_ISOM_BOX_TYPE_SGPD: if (!ptr->sampleGroupsDescription) ptr->sampleGroupsDescription = gf_list_new(); gf_list_add(ptr->sampleGroupsDescription, a); break; case GF_ISOM_BOX_TYPE_SAIZ: if (!ptr->sai_sizes) ptr->sai_sizes = gf_list_new(); gf_list_add(ptr->sai_sizes, a); break; case GF_ISOM_BOX_TYPE_SAIO: if (!ptr->sai_offsets) ptr->sai_offsets = gf_list_new(); gf_list_add(ptr->sai_offsets, a); break; } return GF_OK; } GF_Err stbl_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; //we need to parse DegPrior in a special way GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; e = gf_isom_box_array_read(s, bs, stbl_on_child_box); if (e) return e; if (!ptr->SyncSample) ptr->no_sync_found = 1; ptr->nb_sgpd_in_stbl = gf_list_count(ptr->sampleGroupsDescription); ptr->nb_stbl_boxes = gf_list_count(ptr->child_boxes); if (gf_bs_get_cookie(bs) & GF_ISOM_BS_COOKIE_CLONE_TRACK) return GF_OK; // return GF_OK; //these boxes are mandatory ! if (!ptr->SampleToChunk || !ptr->SampleSize || !ptr->ChunkOffset || !ptr->TimeToSample) return GF_ISOM_INVALID_FILE; //sanity check if (ptr->SampleSize->sampleCount) { if (!ptr->TimeToSample->nb_entries || !ptr->SampleToChunk->nb_entries) return GF_ISOM_INVALID_FILE; } return GF_OK; } GF_Box *stbl_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleTableBox, GF_ISOM_BOX_TYPE_STBL); //maxSamplePer chunk is 10 by default tmp->MaxSamplePerChunk = 10; tmp->groupID = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stbl_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err stbl_box_size(GF_Box *s) { u32 pos=0; GF_SampleTableBox *ptr = (GF_SampleTableBox *)s; gf_isom_check_position(s, (GF_Box *)ptr->SampleDescription, &pos); gf_isom_check_position(s, (GF_Box *)ptr->TimeToSample, &pos); gf_isom_check_position(s, (GF_Box *)ptr->CompositionOffset, &pos); gf_isom_check_position(s, (GF_Box *)ptr->CompositionToDecode, &pos); gf_isom_check_position(s, (GF_Box *)ptr->SyncSample, &pos); gf_isom_check_position(s, (GF_Box *)ptr->ShadowSync, &pos); gf_isom_check_position(s, (GF_Box *)ptr->SampleToChunk, &pos); gf_isom_check_position(s, (GF_Box *)ptr->SampleSize, &pos); gf_isom_check_position(s, (GF_Box *)ptr->ChunkOffset, &pos); gf_isom_check_position(s, (GF_Box *)ptr->DegradationPriority, &pos); gf_isom_check_position(s, (GF_Box *)ptr->SampleDep, &pos); gf_isom_check_position(s, (GF_Box *)ptr->PaddingBits, &pos); if (ptr->sub_samples) { gf_isom_check_position_list(s, ptr->sub_samples, &pos); } if (ptr->sampleGroupsDescription) { gf_isom_check_position_list(s, ptr->sampleGroupsDescription, &pos); } if (ptr->sampleGroups) { gf_isom_check_position_list(s, ptr->sampleGroups, &pos); } if (ptr->sai_sizes) { gf_isom_check_position_list(s, ptr->sai_sizes, &pos); } if (ptr->sai_offsets) { gf_isom_check_position_list(s, ptr->sai_offsets, &pos); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stco_box_del(GF_Box *s) { GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s; if (ptr == NULL) return; if (ptr->offsets) gf_free(ptr->offsets); gf_free(ptr); } GF_Err stco_box_read(GF_Box *s, GF_BitStream *bs) { u32 entries; GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->nb_entries > ptr->size / 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stco\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } if (ptr->nb_entries) { ptr->offsets = (u32 *) gf_malloc(ptr->nb_entries * sizeof(u32) ); if (ptr->offsets == NULL) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->nb_entries; for (entries = 0; entries < ptr->nb_entries; entries++) { ptr->offsets[entries] = gf_bs_read_u32(bs); } } return GF_OK; } GF_Box *stco_box_new() { ISOM_DECL_BOX_ALLOC(GF_ChunkOffsetBox, GF_ISOM_BOX_TYPE_STCO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stco_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i = 0; i < ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->offsets[i]); } return GF_OK; } GF_Err stco_box_size(GF_Box *s) { GF_ChunkOffsetBox *ptr = (GF_ChunkOffsetBox *)s; ptr->size += 4 + (4 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stdp_box_del(GF_Box *s) { GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s; if (ptr == NULL ) return; if (ptr->priorities) gf_free(ptr->priorities); gf_free(ptr); } //this is called through stbl_read... GF_Err stdp_box_read(GF_Box *s, GF_BitStream *bs) { u32 entry; GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s; /*out-of-order stdp, assume no padding at the end and take the entire remaining data for entries*/ if (!ptr->nb_entries) ptr->nb_entries = (u32) ptr->size / 2; else if (ptr->nb_entries > ptr->size / 2) return GF_ISOM_INVALID_FILE; ptr->priorities = (u16 *) gf_malloc(ptr->nb_entries * sizeof(u16)); if (ptr->priorities == NULL) return GF_OUT_OF_MEM; for (entry = 0; entry < ptr->nb_entries; entry++) { ptr->priorities[entry] = gf_bs_read_u16(bs); } ISOM_DECREASE_SIZE(ptr, (2*ptr->nb_entries) ); return GF_OK; } GF_Box *stdp_box_new() { ISOM_DECL_BOX_ALLOC(GF_DegradationPriorityBox, GF_ISOM_BOX_TYPE_STDP); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stdp_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; for (i = 0; i < ptr->nb_entries; i++) { gf_bs_write_u16(bs, ptr->priorities[i]); } return GF_OK; } GF_Err stdp_box_size(GF_Box *s) { GF_DegradationPriorityBox *ptr = (GF_DegradationPriorityBox *)s; ptr->size += (2 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsc_box_del(GF_Box *s) { GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s; if (ptr == NULL) return; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err stsc_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->nb_entries > ptr->size / 12) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsc\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->entries = NULL; if (ptr->nb_entries) { ptr->entries = gf_malloc(sizeof(GF_StscEntry)*ptr->alloc_size); if (!ptr->entries) return GF_OUT_OF_MEM; } for (i = 0; i < ptr->nb_entries; i++) { ptr->entries[i].firstChunk = gf_bs_read_u32(bs); ptr->entries[i].samplesPerChunk = gf_bs_read_u32(bs); ptr->entries[i].sampleDescriptionIndex = gf_bs_read_u32(bs); ptr->entries[i].isEdited = 0; ptr->entries[i].nextChunk = 0; if (!ptr->entries[i].firstChunk) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] invalid first chunk 0 in stsc entry\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } //update the next chunk in the previous entry if (i) ptr->entries[i-1].nextChunk = ptr->entries[i].firstChunk; } ptr->currentIndex = 0; ptr->firstSampleInCurrentChunk = 0; ptr->currentChunk = 0; ptr->ghostNumber = 0; return GF_OK; } GF_Box *stsc_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleToChunkBox, GF_ISOM_BOX_TYPE_STSC); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->entries[i].firstChunk); gf_bs_write_u32(bs, ptr->entries[i].samplesPerChunk); gf_bs_write_u32(bs, ptr->entries[i].sampleDescriptionIndex); } return GF_OK; } GF_Err stsc_box_size(GF_Box *s) { GF_SampleToChunkBox *ptr = (GF_SampleToChunkBox *)s; ptr->size += 4 + (12 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsd_box_del(GF_Box *s) { GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err stsd_on_child_box(GF_Box *s, GF_Box *a) { GF_UnknownBox *def; if (!a) return GF_OK; if (gf_box_valid_in_parent(a, "stsd")) { return GF_OK; } switch (a->type) { //unknown sample description: we need a specific box to handle the data ref index //rather than a default box ... case GF_ISOM_BOX_TYPE_UNKNOWN: def = (GF_UnknownBox *)a; /*we need at least 8 bytes for unknown sample entries*/ if (def->dataSize < 8) { gf_isom_box_del_parent(&s->child_boxes, a); return GF_ISOM_INVALID_MEDIA; } return GF_OK; default: GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Cannot process box of type %s\n", gf_4cc_to_str(a->type))); return GF_ISOM_INVALID_FILE; } } GF_Err stsd_box_read(GF_Box *s, GF_BitStream *bs) { ISOM_DECREASE_SIZE(s, 4) gf_bs_read_u32(bs); return gf_isom_box_array_read_ex(s, bs, stsd_on_child_box, GF_ISOM_BOX_TYPE_STSD); } GF_Box *stsd_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleDescriptionBox, GF_ISOM_BOX_TYPE_STSD); tmp->child_boxes = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 nb_entries; GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; nb_entries = gf_list_count(ptr->child_boxes); gf_bs_write_u32(bs, nb_entries); return GF_OK; } GF_Err stsd_box_size(GF_Box *s) { GF_SampleDescriptionBox *ptr = (GF_SampleDescriptionBox *)s; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsh_box_del(GF_Box *s) { u32 i = 0; GF_StshEntry *ent; GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s; if (ptr == NULL) return; while ( (ent = (GF_StshEntry *)gf_list_enum(ptr->entries, &i)) ) { gf_free(ent); } gf_list_del(ptr->entries); gf_free(ptr); } GF_Err stsh_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 count, i; GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s; ISOM_DECREASE_SIZE(s, 4) count = gf_bs_read_u32(bs); if (ptr->size < count*8) return GF_ISOM_INVALID_FILE; for (i = 0; i < count; i++) { GF_StshEntry *ent = (GF_StshEntry *) gf_malloc(sizeof(GF_StshEntry)); if (!ent) return GF_OUT_OF_MEM; ent->shadowedSampleNumber = gf_bs_read_u32(bs); ent->syncSampleNumber = gf_bs_read_u32(bs); e = gf_list_add(ptr->entries, ent); if (e) return e; } return GF_OK; } GF_Box *stsh_box_new() { ISOM_DECL_BOX_ALLOC(GF_ShadowSyncBox, GF_ISOM_BOX_TYPE_STSH); tmp->entries = gf_list_new(); if (!tmp->entries) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsh_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_StshEntry *ent; GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, gf_list_count(ptr->entries)); i=0; while ((ent = (GF_StshEntry *)gf_list_enum(ptr->entries, &i))) { gf_bs_write_u32(bs, ent->shadowedSampleNumber); gf_bs_write_u32(bs, ent->syncSampleNumber); } return GF_OK; } GF_Err stsh_box_size(GF_Box *s) { GF_ShadowSyncBox *ptr = (GF_ShadowSyncBox *)s; ptr->size += 4 + (8 * gf_list_count(ptr->entries)); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stss_box_del(GF_Box *s) { GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s; if (ptr == NULL) return; if (ptr->sampleNumbers) gf_free(ptr->sampleNumbers); gf_free(ptr); } GF_Err stss_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->size < ptr->nb_entries * 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stss\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->sampleNumbers = (u32 *) gf_malloc( ptr->alloc_size * sizeof(u32)); if (ptr->sampleNumbers == NULL) return GF_OUT_OF_MEM; for (i = 0; i < ptr->nb_entries; i++) { ptr->sampleNumbers[i] = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *stss_box_new() { ISOM_DECL_BOX_ALLOC(GF_SyncSampleBox, GF_ISOM_BOX_TYPE_STSS); return (GF_Box*)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stss_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i = 0; i < ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->sampleNumbers[i]); } return GF_OK; } GF_Err stss_box_size(GF_Box *s) { GF_SyncSampleBox *ptr = (GF_SyncSampleBox *)s; ptr->size += 4 + (4 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsz_box_del(GF_Box *s) { GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; if (ptr == NULL) return; if (ptr->sizes) gf_free(ptr->sizes); gf_free(ptr); } GF_Err stsz_box_read(GF_Box *s, GF_BitStream *bs) { u32 i, estSize; GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; if (ptr == NULL) return GF_BAD_PARAM; //support for CompactSizes if (s->type == GF_ISOM_BOX_TYPE_STSZ) { ISOM_DECREASE_SIZE(ptr, 8); ptr->sampleSize = gf_bs_read_u32(bs); ptr->sampleCount = gf_bs_read_u32(bs); } else { //24-reserved ISOM_DECREASE_SIZE(ptr, 8); gf_bs_read_int(bs, 24); i = gf_bs_read_u8(bs); ptr->sampleCount = gf_bs_read_u32(bs); switch (i) { case 4: case 8: case 16: ptr->sampleSize = i; break; default: //try to fix the file //no samples, no parsing pb if (!ptr->sampleCount) { ptr->sampleSize = 16; return GF_OK; } estSize = (u32) (ptr->size) / ptr->sampleCount; if (!estSize && ((ptr->sampleCount+1)/2 == (ptr->size)) ) { ptr->sampleSize = 4; break; } else if (estSize == 1 || estSize == 2) { ptr->sampleSize = 8 * estSize; } else { return GF_ISOM_INVALID_FILE; } } } if (s->type == GF_ISOM_BOX_TYPE_STSZ) { if (! ptr->sampleSize && ptr->sampleCount) { if (ptr->sampleCount > ptr->size / 4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount)); return GF_ISOM_INVALID_FILE; } ptr->sizes = (u32 *) gf_malloc(ptr->sampleCount * sizeof(u32)); if (! ptr->sizes) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->sampleCount; for (i = 0; i < ptr->sampleCount; i++) { ptr->sizes[i] = gf_bs_read_u32(bs); if (ptr->max_size < ptr->sizes[i]) ptr->max_size = ptr->sizes[i]; ptr->total_size += ptr->sizes[i]; ptr->total_samples++; } } } else { if (ptr->sampleSize==4) { if (ptr->sampleCount / 2 > ptr->size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount)); return GF_ISOM_INVALID_FILE; } } else { if (ptr->sampleCount > ptr->size / (ptr->sampleSize/8)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stsz\n", ptr->sampleCount)); return GF_ISOM_INVALID_FILE; } } //note we could optimize the mem usage by keeping the table compact //in memory. But that would complicate both caching and editing //we therefore keep all sizes as u32 and uncompress the table ptr->sizes = (u32 *) gf_malloc(ptr->sampleCount * sizeof(u32)); if (! ptr->sizes) return GF_OUT_OF_MEM; ptr->alloc_size = ptr->sampleCount; for (i = 0; i < ptr->sampleCount; ) { switch (ptr->sampleSize) { case 4: ptr->sizes[i] = gf_bs_read_int(bs, 4); if (i+1 < ptr->sampleCount) { ptr->sizes[i+1] = gf_bs_read_int(bs, 4); } else { //0 padding in odd sample count gf_bs_read_int(bs, 4); } i += 2; break; default: ptr->sizes[i] = gf_bs_read_int(bs, ptr->sampleSize); i += 1; break; } if (ptr->max_size < ptr->sizes[i]) ptr->max_size = ptr->sizes[i]; ptr->total_size += ptr->sizes[i]; ptr->total_samples++; } } return GF_OK; } GF_Box *stsz_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleSizeBox, 0); //type is unknown here, can be regular or compact table return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsz_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; //in both versions this is still valid if (ptr->type == GF_ISOM_BOX_TYPE_STSZ) { gf_bs_write_u32(bs, ptr->sampleSize); } else { gf_bs_write_u24(bs, 0); gf_bs_write_u8(bs, ptr->sampleSize); } gf_bs_write_u32(bs, ptr->sampleCount); if (ptr->type == GF_ISOM_BOX_TYPE_STSZ) { if (! ptr->sampleSize) { for (i = 0; i < ptr->sampleCount; i++) { gf_bs_write_u32(bs, ptr->sizes ? ptr->sizes[i] : 0); } } } else { for (i = 0; i < ptr->sampleCount; ) { switch (ptr->sampleSize) { case 4: gf_bs_write_int(bs, ptr->sizes[i], 4); if (i+1 < ptr->sampleCount) { gf_bs_write_int(bs, ptr->sizes[i+1], 4); } else { //0 padding in odd sample count gf_bs_write_int(bs, 0, 4); } i += 2; break; default: gf_bs_write_int(bs, ptr->sizes[i], ptr->sampleSize); i += 1; break; } } } return GF_OK; } GF_Err stsz_box_size(GF_Box *s) { u32 i, fieldSize, size; GF_SampleSizeBox *ptr = (GF_SampleSizeBox *)s; ptr->size += 8; if (!ptr->sampleCount) return GF_OK; //regular table if (ptr->type == GF_ISOM_BOX_TYPE_STSZ) { if (ptr->sampleSize) return GF_OK; ptr->size += (4 * ptr->sampleCount); return GF_OK; } fieldSize = 4; size = ptr->sizes[0]; for (i=0; i < ptr->sampleCount; i++) { if (ptr->sizes[i] <= 0xF) continue; //switch to 8-bit table else if (ptr->sizes[i] <= 0xFF) { fieldSize = 8; } //switch to 16-bit table else if (ptr->sizes[i] <= 0xFFFF) { fieldSize = 16; } //switch to 32-bit table else { fieldSize = 32; } //check the size if (size != ptr->sizes[i]) size = 0; } //if all samples are of the same size, switch to regular (more compact) if (size) { ptr->type = GF_ISOM_BOX_TYPE_STSZ; ptr->sampleSize = size; gf_free(ptr->sizes); ptr->sizes = NULL; } if (fieldSize == 32) { //oops, doesn't fit in a compact table ptr->type = GF_ISOM_BOX_TYPE_STSZ; ptr->size += (4 * ptr->sampleCount); return GF_OK; } //make sure we are a compact table (no need to change the mem representation) ptr->type = GF_ISOM_BOX_TYPE_STZ2; ptr->sampleSize = fieldSize; if (fieldSize == 4) { //do not forget the 0 padding field for odd count ptr->size += (ptr->sampleCount + 1) / 2; } else { ptr->size += (ptr->sampleCount) * (fieldSize/8); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stts_box_del(GF_Box *s) { GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err stts_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; #ifndef GPAC_DISABLE_ISOM_WRITE ptr->w_LastDTS = 0; #endif ISOM_DECREASE_SIZE(ptr, 4); ptr->nb_entries = gf_bs_read_u32(bs); if (ptr->size < ptr->nb_entries * 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in stts\n", ptr->nb_entries)); return GF_ISOM_INVALID_FILE; } ptr->alloc_size = ptr->nb_entries; ptr->entries = gf_malloc(sizeof(GF_SttsEntry)*ptr->alloc_size); if (!ptr->entries) return GF_OUT_OF_MEM; for (i=0; i<ptr->nb_entries; i++) { ptr->entries[i].sampleCount = gf_bs_read_u32(bs); ptr->entries[i].sampleDelta = gf_bs_read_u32(bs); #ifndef GPAC_DISABLE_ISOM_WRITE ptr->w_currentSampleNum += ptr->entries[i].sampleCount; ptr->w_LastDTS += (u64)ptr->entries[i].sampleCount * ptr->entries[i].sampleDelta; #endif if (ptr->max_ts_delta<ptr->entries[i].sampleDelta) ptr->max_ts_delta = ptr->entries[i].sampleDelta; if (!ptr->entries[i].sampleDelta) { if ((i+1<ptr->nb_entries) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Found stts entry with sample_delta=0 - forbidden ! Fixing to 1\n" )); ptr->entries[i].sampleDelta = 1; } else if (ptr->entries[i].sampleCount>1) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] more than one stts entry at the end of the track with sample_delta=0 - forbidden ! Fixing to 1\n" )); ptr->entries[i].sampleDelta = 1; } } else if ((s32) ptr->entries[i].sampleDelta < 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] stts entry %d has negative duration %d - forbidden ! Fixing to 1, sync may get lost (consider reimport raw media)\n", i, (s32) ptr->entries[i].sampleDelta )); ptr->entries[i].sampleDelta = 1; } } if (ptr->size<(ptr->nb_entries*8)) return GF_ISOM_INVALID_FILE; ISOM_DECREASE_SIZE(ptr, ptr->nb_entries*8); //remove the last sample delta. #ifndef GPAC_DISABLE_ISOM_WRITE if (ptr->nb_entries) ptr->w_LastDTS -= ptr->entries[ptr->nb_entries-1].sampleDelta; #endif return GF_OK; } GF_Box *stts_box_new() { ISOM_DECL_BOX_ALLOC(GF_TimeToSampleBox, GF_ISOM_BOX_TYPE_STTS); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stts_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->entries[i].sampleCount); gf_bs_write_u32(bs, ptr->entries[i].sampleDelta); } return GF_OK; } GF_Err stts_box_size(GF_Box *s) { GF_TimeToSampleBox *ptr = (GF_TimeToSampleBox *)s; ptr->size += 4 + (8 * ptr->nb_entries); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void tfhd_box_del(GF_Box *s) { GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err tfhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->trackID = gf_bs_read_u32(bs); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) { ISOM_DECREASE_SIZE(ptr, 8); ptr->base_data_offset = gf_bs_read_u64(bs); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) { ISOM_DECREASE_SIZE(ptr, 4); ptr->sample_desc_index = gf_bs_read_u32(bs); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) { ISOM_DECREASE_SIZE(ptr, 4); ptr->def_sample_duration = gf_bs_read_u32(bs); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) { ISOM_DECREASE_SIZE(ptr, 4); ptr->def_sample_size = gf_bs_read_u32(bs); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) { ISOM_DECREASE_SIZE(ptr, 4); ptr->def_sample_flags = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *tfhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentHeaderBox, GF_ISOM_BOX_TYPE_TFHD); //NO FLAGS SET BY DEFAULT return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tfhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->trackID); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) { gf_bs_write_u64(bs, ptr->base_data_offset); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) { gf_bs_write_u32(bs, ptr->sample_desc_index); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) { gf_bs_write_u32(bs, ptr->def_sample_duration); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) { gf_bs_write_u32(bs, ptr->def_sample_size); } if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) { gf_bs_write_u32(bs, ptr->def_sample_flags); } return GF_OK; } GF_Err tfhd_box_size(GF_Box *s) { GF_TrackFragmentHeaderBox *ptr = (GF_TrackFragmentHeaderBox *)s; ptr->size += 4; //The rest depends on the flags if (ptr->flags & GF_ISOM_TRAF_BASE_OFFSET) ptr->size += 8; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DESC) ptr->size += 4; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_DUR) ptr->size += 4; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_SIZE) ptr->size += 4; if (ptr->flags & GF_ISOM_TRAF_SAMPLE_FLAGS) ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void tims_box_del(GF_Box *s) { GF_TSHintEntryBox *tims = (GF_TSHintEntryBox *)s; gf_free(tims); } GF_Err tims_box_read(GF_Box *s, GF_BitStream *bs) { GF_TSHintEntryBox *ptr = (GF_TSHintEntryBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->timeScale = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tims_box_new() { ISOM_DECL_BOX_ALLOC(GF_TSHintEntryBox, GF_ISOM_BOX_TYPE_TIMS); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tims_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TSHintEntryBox *ptr = (GF_TSHintEntryBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->timeScale); return GF_OK; } GF_Err tims_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void tkhd_box_del(GF_Box *s) { GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); return; } GF_Err tkhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s; if (ptr->version == 1) { ISOM_DECREASE_SIZE(ptr, 32); ptr->creationTime = gf_bs_read_u64(bs); ptr->modificationTime = gf_bs_read_u64(bs); ptr->trackID = gf_bs_read_u32(bs); ptr->reserved1 = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 20); ptr->creationTime = gf_bs_read_u32(bs); ptr->modificationTime = gf_bs_read_u32(bs); ptr->trackID = gf_bs_read_u32(bs); ptr->reserved1 = gf_bs_read_u32(bs); ptr->duration = gf_bs_read_u32(bs); } ptr->initial_duration = ptr->duration; ISOM_DECREASE_SIZE(ptr, 60); ptr->reserved2[0] = gf_bs_read_u32(bs); ptr->reserved2[1] = gf_bs_read_u32(bs); ptr->layer = gf_bs_read_u16(bs); ptr->alternate_group = gf_bs_read_u16(bs); ptr->volume = gf_bs_read_u16(bs); ptr->reserved3 = gf_bs_read_u16(bs); ptr->matrix[0] = gf_bs_read_u32(bs); ptr->matrix[1] = gf_bs_read_u32(bs); ptr->matrix[2] = gf_bs_read_u32(bs); ptr->matrix[3] = gf_bs_read_u32(bs); ptr->matrix[4] = gf_bs_read_u32(bs); ptr->matrix[5] = gf_bs_read_u32(bs); ptr->matrix[6] = gf_bs_read_u32(bs); ptr->matrix[7] = gf_bs_read_u32(bs); ptr->matrix[8] = gf_bs_read_u32(bs); ptr->width = gf_bs_read_u32(bs); ptr->height = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tkhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackHeaderBox, GF_ISOM_BOX_TYPE_TKHD); tmp->matrix[0] = 0x00010000; tmp->matrix[4] = 0x00010000; tmp->matrix[8] = 0x40000000; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tkhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version == 1) { gf_bs_write_u64(bs, ptr->creationTime); gf_bs_write_u64(bs, ptr->modificationTime); gf_bs_write_u32(bs, ptr->trackID); gf_bs_write_u32(bs, ptr->reserved1); gf_bs_write_u64(bs, ptr->duration); } else { gf_bs_write_u32(bs, (u32) ptr->creationTime); gf_bs_write_u32(bs, (u32) ptr->modificationTime); gf_bs_write_u32(bs, ptr->trackID); gf_bs_write_u32(bs, ptr->reserved1); gf_bs_write_u32(bs, (u32) ptr->duration); } gf_bs_write_u32(bs, ptr->reserved2[0]); gf_bs_write_u32(bs, ptr->reserved2[1]); gf_bs_write_u16(bs, ptr->layer); gf_bs_write_u16(bs, ptr->alternate_group); gf_bs_write_u16(bs, ptr->volume); gf_bs_write_u16(bs, ptr->reserved3); gf_bs_write_u32(bs, ptr->matrix[0]); gf_bs_write_u32(bs, ptr->matrix[1]); gf_bs_write_u32(bs, ptr->matrix[2]); gf_bs_write_u32(bs, ptr->matrix[3]); gf_bs_write_u32(bs, ptr->matrix[4]); gf_bs_write_u32(bs, ptr->matrix[5]); gf_bs_write_u32(bs, ptr->matrix[6]); gf_bs_write_u32(bs, ptr->matrix[7]); gf_bs_write_u32(bs, ptr->matrix[8]); gf_bs_write_u32(bs, ptr->width); gf_bs_write_u32(bs, ptr->height); return GF_OK; } GF_Err tkhd_box_size(GF_Box *s) { GF_TrackHeaderBox *ptr = (GF_TrackHeaderBox *)s; if (ptr->duration==(u64) -1) ptr->version = 0; else ptr->version = (ptr->duration>0xFFFFFFFF) ? 1 : 0; ptr->size += (ptr->version == 1) ? 32 : 20; ptr->size += 60; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void traf_box_del(GF_Box *s) { GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s; if (ptr == NULL) return; if (ptr->sub_samples) gf_list_del(ptr->sub_samples); gf_list_del(ptr->TrackRuns); if (ptr->sampleGroups) gf_list_del(ptr->sampleGroups); if (ptr->sampleGroupsDescription) gf_list_del(ptr->sampleGroupsDescription); if (ptr->sai_sizes) gf_list_del(ptr->sai_sizes); if (ptr->sai_offsets) gf_list_del(ptr->sai_offsets); gf_free(ptr); } GF_Err traf_on_child_box(GF_Box *s, GF_Box *a) { GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_TFHD: if (ptr->tfhd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->tfhd = (GF_TrackFragmentHeaderBox *) a; return GF_OK; case GF_ISOM_BOX_TYPE_TRUN: return gf_list_add(ptr->TrackRuns, a); case GF_ISOM_BOX_TYPE_SDTP: if (ptr->sdtp) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->sdtp = (GF_SampleDependencyTypeBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_TFDT: if (ptr->tfdt) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->tfdt = (GF_TFBaseMediaDecodeTimeBox*) a; return GF_OK; case GF_ISOM_BOX_TYPE_SUBS: if (!ptr->sub_samples) ptr->sub_samples = gf_list_new(); return gf_list_add(ptr->sub_samples, a); case GF_ISOM_BOX_TYPE_SBGP: if (!ptr->sampleGroups) ptr->sampleGroups = gf_list_new(); gf_list_add(ptr->sampleGroups, a); return GF_OK; case GF_ISOM_BOX_TYPE_SGPD: if (!ptr->sampleGroupsDescription) ptr->sampleGroupsDescription = gf_list_new(); gf_list_add(ptr->sampleGroupsDescription, a); return GF_OK; case GF_ISOM_BOX_TYPE_SAIZ: if (!ptr->sai_sizes) ptr->sai_sizes = gf_list_new(); gf_list_add(ptr->sai_sizes, a); return GF_OK; case GF_ISOM_BOX_TYPE_SAIO: if (!ptr->sai_offsets) ptr->sai_offsets = gf_list_new(); gf_list_add(ptr->sai_offsets, a); return GF_OK; //we will throw an error if both PIFF_PSEC and SENC are found. Not such files seen yet case GF_ISOM_BOX_TYPE_UUID: if ( ((GF_UUIDBox *)a)->internal_4cc==GF_ISOM_BOX_UUID_PSEC) { if (ptr->sample_encryption) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->sample_encryption = (GF_SampleEncryptionBox *)a; ptr->sample_encryption->traf = ptr; return GF_OK; } else { return GF_OK; } case GF_ISOM_BOX_TYPE_SENC: if (ptr->sample_encryption) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->sample_encryption = (GF_SampleEncryptionBox *)a; ptr->sample_encryption->traf = ptr; return GF_OK; } return GF_OK; } GF_Err traf_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *)s; GF_Err e = gf_isom_box_array_read(s, bs, traf_on_child_box); if (e) return e; if (!ptr->tfhd) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing TrackFragmentHeaderBox \n")); return GF_ISOM_INVALID_FILE; } return GF_OK; } GF_Box *traf_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentBox, GF_ISOM_BOX_TYPE_TRAF); tmp->TrackRuns = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Box *tfxd_box_new() { ISOM_DECL_BOX_ALLOC(GF_MSSTimeExtBox, GF_ISOM_BOX_TYPE_UUID); tmp->internal_4cc = GF_ISOM_BOX_UUID_TFXD; return (GF_Box *)tmp; } void tfxd_box_del(GF_Box *s) { gf_free(s); } GF_Err tfxd_box_read(GF_Box *s, GF_BitStream *bs) { GF_MSSTimeExtBox *ptr = (GF_MSSTimeExtBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->version = gf_bs_read_u8(bs); ptr->flags = gf_bs_read_u24(bs); if (ptr->version == 0x01) { ISOM_DECREASE_SIZE(ptr, 16); ptr->absolute_time_in_track_timescale = gf_bs_read_u64(bs); ptr->fragment_duration_in_track_timescale = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 8); ptr->absolute_time_in_track_timescale = gf_bs_read_u32(bs); ptr->fragment_duration_in_track_timescale = gf_bs_read_u32(bs); } return GF_OK; } GF_Err tfxd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MSSTimeExtBox *uuid = (GF_MSSTimeExtBox*)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, 1); gf_bs_write_u24(bs, 0); gf_bs_write_u64(bs, uuid->absolute_time_in_track_timescale); gf_bs_write_u64(bs, uuid->fragment_duration_in_track_timescale); return GF_OK; } GF_Err tfxd_box_size(GF_Box *s) { s->size += 20; return GF_OK; } GF_Err traf_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err traf_box_size(GF_Box *s) { u32 pos=0; GF_TrackFragmentBox *ptr = (GF_TrackFragmentBox *) s; //Header first gf_isom_check_position(s, (GF_Box *)ptr->tfhd, &pos); gf_isom_check_position_list(s, ptr->sub_samples, &pos); gf_isom_check_position(s, (GF_Box *)ptr->tfdt, &pos); gf_isom_check_position_list(s, ptr->sampleGroupsDescription, &pos); gf_isom_check_position_list(s, ptr->sampleGroups, &pos); gf_isom_check_position_list(s, ptr->sai_sizes, &pos); gf_isom_check_position_list(s, ptr->sai_offsets, &pos); gf_isom_check_position(s, (GF_Box *)ptr->sample_encryption, &pos); gf_isom_check_position_list(s, ptr->TrackRuns, &pos); //when sdtp is present (smooth-like) write it after the trun box gf_isom_check_position(s, (GF_Box *)ptr->sdtp, &pos); //tfxd should be last ... if (ptr->tfxd) gf_isom_check_position(s, (GF_Box *)ptr->tfxd, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void trak_box_del(GF_Box *s) { #ifndef GPAC_DISABLE_ISOM_WRITE GF_TrackBox *ptr = (GF_TrackBox *)s; if (ptr->chunk_cache) gf_bs_del(ptr->chunk_cache); #endif gf_free(s); } static void gf_isom_check_sample_desc(GF_TrackBox *trak) { GF_BitStream *bs; GF_UnknownBox *a; u32 i; GF_Err e; GF_SampleTableBox *stbl; if (!trak->Media || !trak->Media->information) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Track with no media box !\n" )); return; } if (!trak->Media->information->sampleTable) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Track with no sample table !\n" )); trak->Media->information->sampleTable = (GF_SampleTableBox *) gf_isom_box_new_parent(&trak->Media->information->child_boxes, GF_ISOM_BOX_TYPE_STBL); } stbl = trak->Media->information->sampleTable; if (!stbl->SampleDescription) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Track with no sample description box !\n" )); stbl->SampleDescription = (GF_SampleDescriptionBox *) gf_isom_box_new_parent(&stbl->child_boxes, GF_ISOM_BOX_TYPE_STSD); return; } i=0; while ((a = (GF_UnknownBox*)gf_list_enum(trak->Media->information->sampleTable->SampleDescription->child_boxes, &i))) { switch (a->type) { case GF_ISOM_BOX_TYPE_MP4S: case GF_ISOM_BOX_TYPE_ENCS: case GF_ISOM_BOX_TYPE_MP4A: case GF_ISOM_BOX_TYPE_ENCA: case GF_ISOM_BOX_TYPE_MP4V: case GF_ISOM_BOX_TYPE_ENCV: case GF_ISOM_BOX_TYPE_RESV: case GF_ISOM_SUBTYPE_3GP_AMR: case GF_ISOM_SUBTYPE_3GP_AMR_WB: case GF_ISOM_SUBTYPE_3GP_EVRC: case GF_ISOM_SUBTYPE_3GP_QCELP: case GF_ISOM_SUBTYPE_3GP_SMV: case GF_ISOM_SUBTYPE_3GP_H263: case GF_ISOM_BOX_TYPE_GHNT: case GF_ISOM_BOX_TYPE_RTP_STSD: case GF_ISOM_BOX_TYPE_SRTP_STSD: case GF_ISOM_BOX_TYPE_FDP_STSD: case GF_ISOM_BOX_TYPE_RRTP_STSD: case GF_ISOM_BOX_TYPE_RTCP_STSD: case GF_ISOM_BOX_TYPE_METX: case GF_ISOM_BOX_TYPE_METT: case GF_ISOM_BOX_TYPE_STXT: case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_HVT1: case GF_ISOM_BOX_TYPE_LHV1: case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_AV01: case GF_ISOM_BOX_TYPE_VP08: case GF_ISOM_BOX_TYPE_VP09: case GF_ISOM_BOX_TYPE_AV1C: case GF_ISOM_BOX_TYPE_TX3G: case GF_ISOM_BOX_TYPE_TEXT: case GF_ISOM_BOX_TYPE_ENCT: case GF_ISOM_BOX_TYPE_DIMS: case GF_ISOM_BOX_TYPE_OPUS: case GF_ISOM_BOX_TYPE_AC3: case GF_ISOM_BOX_TYPE_EC3: case GF_ISOM_BOX_TYPE_LSR1: case GF_ISOM_BOX_TYPE_WVTT: case GF_ISOM_BOX_TYPE_STPP: case GF_ISOM_BOX_TYPE_SBTT: case GF_ISOM_BOX_TYPE_MP3: case GF_ISOM_BOX_TYPE_JPEG: case GF_ISOM_BOX_TYPE_PNG: case GF_ISOM_BOX_TYPE_JP2K: case GF_ISOM_BOX_TYPE_MHA1: case GF_ISOM_BOX_TYPE_MHA2: case GF_ISOM_BOX_TYPE_MHM1: case GF_ISOM_BOX_TYPE_MHM2: case GF_ISOM_BOX_TYPE_MJP2: case GF_QT_SUBTYPE_RAW_AUD: case GF_QT_SUBTYPE_TWOS: case GF_QT_SUBTYPE_SOWT: case GF_QT_SUBTYPE_FL32: case GF_QT_SUBTYPE_FL64: case GF_QT_SUBTYPE_IN24: case GF_QT_SUBTYPE_IN32: case GF_QT_SUBTYPE_ULAW: case GF_QT_SUBTYPE_ALAW: case GF_QT_SUBTYPE_ADPCM: case GF_QT_SUBTYPE_IMA_ADPCM: case GF_QT_SUBTYPE_DVCA: case GF_QT_SUBTYPE_QDMC: case GF_QT_SUBTYPE_QDMC2: case GF_QT_SUBTYPE_QCELP: case GF_QT_SUBTYPE_kMP3: case GF_QT_SUBTYPE_RAW_VID: case GF_QT_SUBTYPE_APCH: case GF_QT_SUBTYPE_APCO: case GF_QT_SUBTYPE_APCN: case GF_QT_SUBTYPE_APCS: case GF_QT_SUBTYPE_AP4X: case GF_QT_SUBTYPE_AP4H: case GF_QT_SUBTYPE_YUV422: case GF_QT_SUBTYPE_YUV444: case GF_QT_SUBTYPE_YUV422_10: case GF_QT_SUBTYPE_YUV444_10: case GF_ISOM_BOX_TYPE_IPCM: case GF_ISOM_BOX_TYPE_FPCM: continue; case GF_ISOM_BOX_TYPE_UNKNOWN: break; default: if (gf_box_valid_in_parent((GF_Box *) a, "stsd")) { continue; } GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Unexpected box %s in stsd!\n", gf_4cc_to_str(a->type))); continue; } //we are sure to have an unknown box here assert(a->type==GF_ISOM_BOX_TYPE_UNKNOWN); if (!a->data || (a->dataSize<8) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] Sample description %s does not have at least 8 bytes!\n", gf_4cc_to_str(a->original_4cc) )); continue; } else if (a->dataSize > a->size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Sample description %s has wrong data size %d!\n", gf_4cc_to_str(a->original_4cc), a->dataSize)); continue; } #define STSD_SWITCH_BOX(_box) \ if (gf_bs_available(bs)) { \ u64 pos = gf_bs_get_position(bs); \ u32 count_subb = 0; \ gf_bs_set_cookie(bs, GF_ISOM_BS_COOKIE_NO_LOGS);\ e = gf_isom_box_array_read((GF_Box *) _box, bs, NULL); \ count_subb = _box->child_boxes ? gf_list_count(_box->child_boxes) : 0; \ if (!count_subb || e) { \ gf_bs_seek(bs, pos); \ _box->data_size = (u32) gf_bs_available(bs); \ if (_box->data_size) { \ _box->data = a->data; \ a->data = NULL; \ memmove(_box->data, _box->data + pos, _box->data_size); \ } \ } else { \ _box->data_size = 0; \ } \ } \ gf_bs_del(bs); \ if (!_box->data_size && _box->data) { \ gf_free(_box->data); \ _box->data = NULL; \ } \ _box->size = 0; \ _box->EntryType = a->original_4cc; \ gf_list_rem(trak->Media->information->sampleTable->SampleDescription->child_boxes, i-1); \ gf_isom_box_del((GF_Box *)a); \ gf_list_insert(trak->Media->information->sampleTable->SampleDescription->child_boxes, _box, i-1); \ /*only process visual or audio note: no need for new_box_parent here since we always store sample descriptions in child_boxes*/ switch (trak->Media->handler->handlerType) { case GF_ISOM_MEDIA_VISUAL: case GF_ISOM_MEDIA_AUXV: case GF_ISOM_MEDIA_PICT: { GF_GenericVisualSampleEntryBox *genv = (GF_GenericVisualSampleEntryBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_GNRV); bs = gf_bs_new(a->data, a->dataSize, GF_BITSTREAM_READ); genv->size = a->size-8; gf_isom_video_sample_entry_read((GF_VisualSampleEntryBox *) genv, bs); STSD_SWITCH_BOX(genv) } break; case GF_ISOM_MEDIA_AUDIO: { GF_GenericAudioSampleEntryBox *gena = (GF_GenericAudioSampleEntryBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_GNRA); gena->size = a->size-8; bs = gf_bs_new(a->data, a->dataSize, GF_BITSTREAM_READ); gf_isom_audio_sample_entry_read((GF_AudioSampleEntryBox *) gena, bs); STSD_SWITCH_BOX(gena) } break; default: { GF_GenericSampleEntryBox *genm = (GF_GenericSampleEntryBox *) gf_isom_box_new(GF_ISOM_BOX_TYPE_GNRM); genm->size = a->size-8; bs = gf_bs_new(a->data, a->dataSize, GF_BITSTREAM_READ); e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)genm, bs); if (e) return; STSD_SWITCH_BOX(genm) } break; } } } GF_Err trak_on_child_box(GF_Box *s, GF_Box *a) { GF_TrackBox *ptr = (GF_TrackBox *)s; if (!a) return GF_OK; switch(a->type) { case GF_ISOM_BOX_TYPE_TKHD: if (ptr->Header) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->Header = (GF_TrackHeaderBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_EDTS: if (ptr->editBox) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->editBox = (GF_EditBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_UDTA: if (ptr->udta) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->udta = (GF_UserDataBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_META: if (ptr->meta) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->meta = (GF_MetaBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_TREF: if (ptr->References) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->References = (GF_TrackReferenceBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_MDIA: if (ptr->Media) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->Media = (GF_MediaBox *)a; ((GF_MediaBox *)a)->mediaTrack = ptr; return GF_OK; case GF_ISOM_BOX_TYPE_TRGR: if (ptr->groups) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->groups = (GF_TrackGroupBox *)a; return GF_OK; case GF_QT_BOX_TYPE_TAPT: if (ptr->Aperture) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->Aperture = (GF_Box *)a; return GF_OK; case GF_ISOM_BOX_TYPE_SENC: ptr->sample_encryption = (GF_SampleEncryptionBox*)a; return GF_OK; case GF_ISOM_BOX_TYPE_UUID: if (((GF_UnknownUUIDBox *)a)->internal_4cc == GF_ISOM_BOX_UUID_PSEC) { ptr->sample_encryption = (GF_SampleEncryptionBox*) a; return GF_OK; } } return GF_OK; } GF_Err trak_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrackBox *ptr = (GF_TrackBox *)s; e = gf_isom_box_array_read(s, bs, trak_on_child_box); if (e) return e; gf_isom_check_sample_desc(ptr); if (!ptr->Header) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing TrackHeaderBox\n")); return GF_ISOM_INVALID_FILE; } if (!ptr->Media) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing MediaBox\n")); return GF_ISOM_INVALID_FILE; } if (!ptr->Media->information || !ptr->Media->information->sampleTable) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid MediaBox\n")); return GF_ISOM_INVALID_FILE; } if (!ptr->Media->information->sampleTable->SampleSize || (ptr->Media->information->sampleTable->SampleSize->sampleCount==0)) { if (ptr->Header->initial_duration) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[iso file] Track with no samples but duration defined, ignoring duration\n")); ptr->Header->initial_duration = 0; } } for (i=0; i<gf_list_count(ptr->Media->information->sampleTable->child_boxes); i++) { GF_Box *a = gf_list_get(ptr->Media->information->sampleTable->child_boxes, i); if ((a->type ==GF_ISOM_BOX_TYPE_UUID) && (((GF_UUIDBox *)a)->internal_4cc == GF_ISOM_BOX_UUID_PSEC)) { ptr->sample_encryption = (struct __sample_encryption_box *) a; break; } else if (a->type == GF_ISOM_BOX_TYPE_SENC) { ptr->sample_encryption = (struct __sample_encryption_box *)a; break; } } return e; } GF_Box *trak_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackBox, GF_ISOM_BOX_TYPE_TRAK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trak_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err trak_box_size(GF_Box *s) { u32 pos=0; GF_TrackBox *ptr = (GF_TrackBox *)s; if (ptr->sample_encryption && ptr->sample_encryption->load_needed) { GF_Err e = senc_Parse(ptr->moov->mov->movieFileMap->bs, ptr, NULL, ptr->sample_encryption); if (e) return e; } gf_isom_check_position(s, (GF_Box *)ptr->Header, &pos); gf_isom_check_position(s, (GF_Box *)ptr->Aperture, &pos); gf_isom_check_position(s, (GF_Box *)ptr->References, &pos); gf_isom_check_position(s, (GF_Box *)ptr->editBox, &pos); gf_isom_check_position(s, (GF_Box *)ptr->Media, &pos); gf_isom_check_position(s, (GF_Box *)ptr->meta, &pos); gf_isom_check_position(s, (GF_Box *)ptr->groups, &pos); gf_isom_check_position(s, (GF_Box *)ptr->udta, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stri_box_del(GF_Box *s) { GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s; if (ptr == NULL) return; if (ptr->attribute_list) gf_free(ptr->attribute_list); gf_free(ptr); } GF_Err stri_box_read(GF_Box *s, GF_BitStream *bs) { size_t i; GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s; ISOM_DECREASE_SIZE(ptr, 8) ptr->switch_group = gf_bs_read_u16(bs); ptr->alternate_group = gf_bs_read_u16(bs); ptr->sub_track_id = gf_bs_read_u32(bs); ptr->attribute_count = ptr->size / 4; GF_SAFE_ALLOC_N(ptr->attribute_list, (size_t)ptr->attribute_count, u32); if (!ptr->attribute_list) return GF_OUT_OF_MEM; for (i = 0; i < ptr->attribute_count; i++) { ISOM_DECREASE_SIZE(ptr, 4) ptr->attribute_list[i] = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *stri_box_new() { ISOM_DECL_BOX_ALLOC(GF_SubTrackInformationBox, GF_ISOM_BOX_TYPE_STRI); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stri_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->switch_group); gf_bs_write_u16(bs, ptr->alternate_group); gf_bs_write_u32(bs, ptr->sub_track_id); for (i = 0; i < ptr->attribute_count; i++) { gf_bs_write_u32(bs, ptr->attribute_list[i]); } return GF_OK; } GF_Err stri_box_size(GF_Box *s) { GF_SubTrackInformationBox *ptr = (GF_SubTrackInformationBox *)s; ptr->size += 8 + 4 * ptr->attribute_count; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void stsg_box_del(GF_Box *s) { GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s; if (ptr == NULL) return; if (ptr->group_description_index) gf_free(ptr->group_description_index); gf_free(ptr); } GF_Err stsg_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s; ISOM_DECREASE_SIZE(s, 6); ptr->grouping_type = gf_bs_read_u32(bs); ptr->nb_groups = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(s, ptr->nb_groups*4); GF_SAFE_ALLOC_N(ptr->group_description_index, ptr->nb_groups, u32); if (!ptr->group_description_index) return GF_OUT_OF_MEM; for (i = 0; i < ptr->nb_groups; i++) { ptr->group_description_index[i] = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *stsg_box_new() { ISOM_DECL_BOX_ALLOC(GF_SubTrackSampleGroupBox, GF_ISOM_BOX_TYPE_STSG); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stsg_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->grouping_type); gf_bs_write_u16(bs, ptr->nb_groups); for (i = 0; i < ptr->nb_groups; i++) { gf_bs_write_u32(bs, ptr->group_description_index[i]); } return GF_OK; } GF_Err stsg_box_size(GF_Box *s) { GF_SubTrackSampleGroupBox *ptr = (GF_SubTrackSampleGroupBox *)s; ptr->size += 6 + 4 * ptr->nb_groups; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void strk_box_del(GF_Box *s) { gf_free(s); } GF_Err strk_on_child_box(GF_Box *s, GF_Box *a) { GF_SubTrackBox *ptr = (GF_SubTrackBox *)s; if (!a) return GF_OK; switch (a->type) { case GF_ISOM_BOX_TYPE_STRI: if (ptr->info) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->info = (GF_SubTrackInformationBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_STRD: if (ptr->strd) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->strd = a; return GF_OK; } return GF_OK; } GF_Err strk_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SubTrackBox *ptr = (GF_SubTrackBox *)s; e = gf_isom_box_array_read(s, bs, strk_on_child_box); if (e) return e; if (!ptr->info) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Missing SubTrackInformationBox\n")); return GF_ISOM_INVALID_FILE; } return GF_OK; } GF_Box *strk_box_new() { ISOM_DECL_BOX_ALLOC(GF_SubTrackBox, GF_ISOM_BOX_TYPE_STRK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err strk_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err strk_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void tref_box_del(GF_Box *s) { GF_TrackReferenceBox *ptr = (GF_TrackReferenceBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err tref_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, NULL, s->type); } GF_Box *tref_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackReferenceBox, GF_ISOM_BOX_TYPE_TREF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tref_box_write(GF_Box *s, GF_BitStream *bs) { // GF_TrackReferenceBox *ptr = (GF_TrackReferenceBox *)s; return gf_isom_box_write_header(s, bs); } GF_Err tref_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void reftype_box_del(GF_Box *s) { GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; if (!ptr) return; if (ptr->trackIDs) gf_free(ptr->trackIDs); gf_free(ptr); } GF_Err reftype_box_read(GF_Box *s, GF_BitStream *bs) { u32 bytesToRead; u32 i; GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; bytesToRead = (u32) (ptr->size); if (!bytesToRead) return GF_OK; ptr->trackIDCount = (u32) (bytesToRead) / sizeof(u32); ptr->trackIDs = (GF_ISOTrackID *) gf_malloc(ptr->trackIDCount * sizeof(GF_ISOTrackID)); if (!ptr->trackIDs) return GF_OUT_OF_MEM; for (i = 0; i < ptr->trackIDCount; i++) { ptr->trackIDs[i] = gf_bs_read_u32(bs); } return GF_OK; } GF_Box *reftype_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackReferenceTypeBox, GF_ISOM_BOX_TYPE_REFT); return (GF_Box *)tmp; } GF_Err reftype_AddRefTrack(GF_TrackReferenceTypeBox *ref, GF_ISOTrackID trackID, u16 *outRefIndex) { u32 i; if (!ref || !trackID) return GF_BAD_PARAM; if (outRefIndex) *outRefIndex = 0; //don't add a dep if already here !! for (i = 0; i < ref->trackIDCount; i++) { if (ref->trackIDs[i] == trackID) { if (outRefIndex) *outRefIndex = i+1; return GF_OK; } } ref->trackIDs = (GF_ISOTrackID *) gf_realloc(ref->trackIDs, (ref->trackIDCount + 1) * sizeof(GF_ISOTrackID) ); if (!ref->trackIDs) return GF_OUT_OF_MEM; ref->trackIDs[ref->trackIDCount] = trackID; ref->trackIDCount++; if (outRefIndex) *outRefIndex = ref->trackIDCount; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err reftype_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; ptr->type = ptr->reference_type; e = gf_isom_box_write_header(s, bs); ptr->type = GF_ISOM_BOX_TYPE_REFT; if (e) return e; for (i = 0; i < ptr->trackIDCount; i++) { gf_bs_write_u32(bs, ptr->trackIDs[i]); } return GF_OK; } GF_Err reftype_box_size(GF_Box *s) { GF_TrackReferenceTypeBox *ptr = (GF_TrackReferenceTypeBox *)s; if (ptr->trackIDCount) ptr->size += (ptr->trackIDCount * sizeof(u32)); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void trex_box_del(GF_Box *s) { GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err trex_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s; ISOM_DECREASE_SIZE(ptr, 20); ptr->trackID = gf_bs_read_u32(bs); ptr->def_sample_desc_index = gf_bs_read_u32(bs); ptr->def_sample_duration = gf_bs_read_u32(bs); ptr->def_sample_size = gf_bs_read_u32(bs); ptr->def_sample_flags = gf_bs_read_u32(bs); return GF_OK; } GF_Box *trex_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackExtendsBox, GF_ISOM_BOX_TYPE_TREX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trex_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->trackID); //we always write 1 in trex default sample desc as using 0 breaks chrome/opera/... gf_bs_write_u32(bs, ptr->def_sample_desc_index ? ptr->def_sample_desc_index : 1); gf_bs_write_u32(bs, ptr->def_sample_duration); gf_bs_write_u32(bs, ptr->def_sample_size); gf_bs_write_u32(bs, ptr->def_sample_flags); return GF_OK; } GF_Err trex_box_size(GF_Box *s) { GF_TrackExtendsBox *ptr = (GF_TrackExtendsBox *)s; ptr->size += 20; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void trep_box_del(GF_Box *s) { GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err trep_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->trackID = gf_bs_read_u32(bs); return gf_isom_box_array_read(s, bs, NULL); } GF_Box *trep_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackExtensionPropertiesBox, GF_ISOM_BOX_TYPE_TREP); tmp->child_boxes = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trep_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->trackID); return GF_OK; } GF_Err trep_box_size(GF_Box *s) { GF_TrackExtensionPropertiesBox *ptr = (GF_TrackExtensionPropertiesBox *)s; ptr->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ #ifndef GPAC_DISABLE_ISOM_FRAGMENTS void trun_box_del(GF_Box *s) { GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s; if (ptr == NULL) return; if (ptr->samples) gf_free(ptr->samples); if (ptr->cache) gf_bs_del(ptr->cache); if (ptr->sample_order) gf_free(ptr->sample_order); gf_free(ptr); } #ifdef GF_ENABLE_CTRN static u32 ctrn_field_size(u32 field_idx) { if (field_idx==3) return 4; return field_idx; } u32 gf_isom_ctrn_field_size_bits(u32 field_idx) { if (field_idx==3) return 32; return field_idx*8; } static u32 ctrn_read_flags(GF_BitStream *bs, u32 nbbits) { u32 val = gf_bs_read_int(bs, nbbits); if (nbbits==16) val <<= 16; else if (nbbits==8) val <<= 24; return val; } static GF_Err ctrn_box_read(GF_Box *s, GF_BitStream *bs) { u32 i, count, flags, first_idx=0; Bool inherit_dur, inherit_size, inherit_flags, inherit_ctso; GF_TrunEntry *ent; GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s; flags = ptr->flags; ptr->ctrn_flags = flags; ptr->flags = 0; ptr->sample_count = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 2); if (flags & GF_ISOM_TRUN_DATA_OFFSET) { if (flags & GF_ISOM_CTRN_DATAOFFSET_16) { ptr->data_offset = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 2); } else { ptr->data_offset = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, 4); } ptr->flags |= GF_ISOM_TRUN_DATA_OFFSET; } if (flags & GF_ISOM_CTRN_CTSO_MULTIPLIER) { ptr->ctso_multiplier = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 2); } /*no sample dur/sample_flag/size/ctso for first or following, create a pack sample */ if (! (flags & 0x00FFFF00)) { GF_SAFEALLOC(ent, GF_TrunEntry); if (!ent) return GF_OUT_OF_MEM; ent->nb_pack = ptr->sample_count; gf_list_add(ptr->entries, ent); return GF_OK; } /*allocate all entries*/ for (i=0; i<ptr->sample_count; i++) { GF_SAFEALLOC(ent, GF_TrunEntry); if (!ent) return GF_OUT_OF_MEM; gf_list_add(ptr->entries, ent); } //unpack flags ptr->ctrn_first_dur = (flags>>22) & 0x3; ptr->ctrn_first_size = (flags>>20) & 0x3; ptr->ctrn_first_sample_flags = (flags>>18) & 0x3; ptr->ctrn_first_ctts = (flags>>16) & 0x3; ptr->ctrn_dur = (flags>>14) & 0x3; ptr->ctrn_size = (flags>>12) & 0x3; ptr->ctrn_sample_flags = (flags>>10) & 0x3; ptr->ctrn_ctts = (flags>>8) & 0x3; inherit_dur = flags & GF_ISOM_CTRN_INHERIT_DUR; inherit_size = flags & GF_ISOM_CTRN_INHERIT_SIZE; inherit_flags = flags & GF_ISOM_CTRN_INHERIT_FLAGS; inherit_ctso = flags & GF_ISOM_CTRN_INHERIT_CTSO; if (flags & GF_ISOM_CTRN_FIRST_SAMPLE) { ent = gf_list_get(ptr->entries, 0); first_idx = 1; if (!inherit_dur && ptr->ctrn_first_dur) { ent->Duration = gf_bs_read_int(bs, gf_isom_ctrn_field_size_bits(ptr->ctrn_first_dur) ); ISOM_DECREASE_SIZE(ptr, ctrn_field_size(ptr->ctrn_first_dur) ); } if (!inherit_size && ptr->ctrn_first_size) { ent->size = gf_bs_read_int(bs, gf_isom_ctrn_field_size_bits(ptr->ctrn_first_size) ); ISOM_DECREASE_SIZE(ptr, ctrn_field_size(ptr->ctrn_first_size) ); } if (!inherit_flags && ptr->ctrn_first_sample_flags) { ent->flags = ctrn_read_flags(bs, gf_isom_ctrn_field_size_bits(ptr->ctrn_first_sample_flags) ); ISOM_DECREASE_SIZE(ptr, ctrn_field_size(ptr->ctrn_first_sample_flags) ); } if (!inherit_ctso && ptr->ctrn_first_ctts) { ent->CTS_Offset = gf_bs_read_int(bs, gf_isom_ctrn_field_size_bits(ptr->ctrn_first_ctts) ); ISOM_DECREASE_SIZE(ptr, ctrn_field_size(ptr->ctrn_first_ctts) ); if (ptr->ctso_multiplier) ent->CTS_Offset *= (s32) ptr->ctso_multiplier; } } count = ptr->sample_count - first_idx; if (!inherit_dur && ptr->ctrn_dur) { u32 nbbits = gf_isom_ctrn_field_size_bits(ptr->ctrn_dur); ISOM_DECREASE_SIZE(ptr, count * nbbits / 8); for (i=first_idx; i<ptr->sample_count; i++) { ent = gf_list_get(ptr->entries, i); ent->Duration = gf_bs_read_int(bs, nbbits); } } if (!inherit_size && ptr->ctrn_size) { u32 nbbits = gf_isom_ctrn_field_size_bits(ptr->ctrn_size); ISOM_DECREASE_SIZE(ptr, count * nbbits / 8); for (i=first_idx; i<ptr->sample_count; i++) { ent = gf_list_get(ptr->entries, i); ent->size = gf_bs_read_int(bs, nbbits); } } if (!inherit_flags && ptr->ctrn_sample_flags) { u32 nbbits = gf_isom_ctrn_field_size_bits(ptr->ctrn_sample_flags); ISOM_DECREASE_SIZE(ptr, count * nbbits / 8); for (i=first_idx; i<ptr->sample_count; i++) { ent = gf_list_get(ptr->entries, i); ent->flags = ctrn_read_flags(bs, nbbits); } } if (!inherit_ctso && ptr->ctrn_ctts) { u32 nbbits = gf_isom_ctrn_field_size_bits(ptr->ctrn_ctts); ISOM_DECREASE_SIZE(ptr, count * nbbits / 8); for (i=first_idx; i<ptr->sample_count; i++) { ent = gf_list_get(ptr->entries, i); ent->CTS_Offset = gf_bs_read_int(bs, nbbits); if (ptr->ctso_multiplier) ent->CTS_Offset *= (s32) ptr->ctso_multiplier; } } return GF_OK; } #endif GF_Err trun_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_TrunEntry *p; GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s; #ifdef GF_ENABLE_CTRN if (ptr->type == GF_ISOM_BOX_TYPE_CTRN) { ptr->type = GF_ISOM_BOX_TYPE_TRUN; ptr->use_ctrn = GF_TRUE; return ctrn_box_read(s, bs); } #endif //check this is a good file if ((ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) && (ptr->flags & GF_ISOM_TRUN_FLAGS)) return GF_ISOM_INVALID_FILE; ISOM_DECREASE_SIZE(ptr, 4); ptr->sample_count = gf_bs_read_u32(bs); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) { ISOM_DECREASE_SIZE(ptr, 4); ptr->data_offset = gf_bs_read_u32(bs); } if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) { ISOM_DECREASE_SIZE(ptr, 4); ptr->first_sample_flags = gf_bs_read_u32(bs); } if (! (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) ) { ptr->samples = gf_malloc(sizeof(GF_TrunEntry)); if (!ptr->samples) return GF_OUT_OF_MEM; ptr->sample_alloc = ptr->nb_samples = 1; ptr->samples[0].nb_pack = ptr->sample_count; } else { //if we get here, at least one flag (so at least 4 bytes) is set, check size if (ptr->sample_count * 4 > ptr->size) { ISOM_DECREASE_SIZE(ptr, ptr->sample_count*4); } ptr->samples = gf_malloc(sizeof(GF_TrunEntry) * ptr->sample_count); if (!ptr->samples) return GF_OUT_OF_MEM; ptr->sample_alloc = ptr->nb_samples = ptr->sample_count; //read each entry (even though nothing may be written) for (i=0; i<ptr->sample_count; i++) { u32 trun_size = 0; p = &ptr->samples[i]; memset(p, 0, sizeof(GF_TrunEntry)); if (ptr->flags & GF_ISOM_TRUN_DURATION) { p->Duration = gf_bs_read_u32(bs); trun_size += 4; } if (ptr->flags & GF_ISOM_TRUN_SIZE) { p->size = gf_bs_read_u32(bs); trun_size += 4; } //SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED if (ptr->flags & GF_ISOM_TRUN_FLAGS) { p->flags = gf_bs_read_u32(bs); trun_size += 4; } if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) { if (ptr->version==0) { p->CTS_Offset = (u32) gf_bs_read_u32(bs); } else { p->CTS_Offset = (s32) gf_bs_read_u32(bs); } trun_size += 4; } ISOM_DECREASE_SIZE(ptr, trun_size); } } /*todo parse sample reorder*/ if (ptr->size) { gf_bs_skip_bytes(bs, ptr->size); ptr->size = 0; } return GF_OK; } GF_Box *trun_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackFragmentRunBox, GF_ISOM_BOX_TYPE_TRUN); //NO FLAGS SET BY DEFAULT return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE #ifdef GF_ENABLE_CTRN static void ctrn_write_sample_flags(GF_BitStream *bs, u32 flags, u32 field_size) { if (!field_size) return; if (field_size==8) flags = flags>>24; else if (field_size==16) flags = flags>>16; gf_bs_write_int(bs, flags, field_size); } static void ctrn_write_ctso(GF_TrackFragmentRunBox *ctrn, GF_BitStream *bs, u32 ctso, u32 field_size) { if (!field_size) return; if (ctrn->ctso_multiplier) { gf_bs_write_int(bs, ctso / ctrn->ctso_multiplier, field_size); } else { gf_bs_write_int(bs, ctso, field_size); } } GF_Err ctrn_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i, count, flags; GF_TrunEntry *ent; GF_TrackFragmentRunBox *ctrn = (GF_TrackFragmentRunBox *) s; if (!s) return GF_BAD_PARAM; flags = ctrn->flags; ctrn->flags = ctrn->ctrn_flags; ctrn->type = GF_ISOM_BOX_TYPE_CTRN; e = gf_isom_full_box_write(s, bs); if (e) return e; ctrn->flags = flags; ctrn->type = GF_ISOM_BOX_TYPE_TRUN; gf_bs_write_u16(bs, ctrn->sample_count); if (ctrn->flags & GF_ISOM_TRUN_DATA_OFFSET) { if (ctrn->ctrn_flags & GF_ISOM_CTRN_DATAOFFSET_16) { gf_bs_write_u16(bs, ctrn->data_offset); } else { gf_bs_write_u32(bs, ctrn->data_offset); } } if (ctrn->ctso_multiplier) { gf_bs_write_u16(bs, ctrn->ctso_multiplier); } /*we always write first sample using first flags*/ ent = gf_list_get(ctrn->entries, 0); gf_bs_write_int(bs, ent->Duration, gf_isom_ctrn_field_size_bits(ctrn->ctrn_first_dur) ); gf_bs_write_int(bs, ent->size, gf_isom_ctrn_field_size_bits(ctrn->ctrn_first_size) ); ctrn_write_sample_flags(bs, ent->flags, gf_isom_ctrn_field_size_bits(ctrn->ctrn_first_sample_flags) ); ctrn_write_ctso(ctrn,bs, ent->CTS_Offset, gf_isom_ctrn_field_size_bits(ctrn->ctrn_first_ctts) ); count = gf_list_count(ctrn->entries); if (ctrn->ctrn_dur) { u32 nbbits = gf_isom_ctrn_field_size_bits(ctrn->ctrn_dur); for (i=1; i<count; i++) { GF_TrunEntry *a_ent = gf_list_get(ctrn->entries, i); gf_bs_write_int(bs, a_ent->Duration, nbbits); } } if (ctrn->ctrn_size) { u32 nbbits = gf_isom_ctrn_field_size_bits(ctrn->ctrn_size); for (i=1; i<count; i++) { GF_TrunEntry *a_ent = gf_list_get(ctrn->entries, i); gf_bs_write_int(bs, a_ent->size, nbbits); } } if (ctrn->ctrn_sample_flags) { u32 nbbits = gf_isom_ctrn_field_size_bits(ctrn->ctrn_sample_flags); for (i=1; i<count; i++) { GF_TrunEntry *a_ent = gf_list_get(ctrn->entries, i); ctrn_write_sample_flags(bs, a_ent->flags, nbbits); } } if (ctrn->ctrn_ctts) { u32 nbbits = gf_isom_ctrn_field_size_bits(ctrn->ctrn_ctts); for (i=1; i<count; i++) { GF_TrunEntry *a_ent = gf_list_get(ctrn->entries, i); ctrn_write_ctso(ctrn, bs, a_ent->CTS_Offset, nbbits); } } return GF_OK; } #endif GF_Err trun_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *) s; if (!s) return GF_BAD_PARAM; #ifdef GF_ENABLE_CTRN if (ptr->use_ctrn) return ctrn_box_write(s, bs); #endif e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->sample_count); //The rest depends on the flags if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) { gf_bs_write_u32(bs, ptr->data_offset); } if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) { gf_bs_write_u32(bs, ptr->first_sample_flags); } if (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) { for (i=0; i<ptr->nb_samples; i++) { GF_TrunEntry *p = &ptr->samples[i]; if (ptr->flags & GF_ISOM_TRUN_DURATION) { gf_bs_write_u32(bs, p->Duration); } if (ptr->flags & GF_ISOM_TRUN_SIZE) { gf_bs_write_u32(bs, p->size); } //SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED if (ptr->flags & GF_ISOM_TRUN_FLAGS) { gf_bs_write_u32(bs, p->flags); } if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) { if (ptr->version==0) { gf_bs_write_u32(bs, p->CTS_Offset); } else { gf_bs_write_u32(bs, (u32) p->CTS_Offset); } } } } if (ptr->sample_order) { u32 nb_bits = 8; if (ptr->sample_count>0xFFFFFF) nb_bits = 32; else if (ptr->sample_count>0xFFFF) nb_bits = 24; else if (ptr->sample_count>0xFF) nb_bits = 16; for (i=0; i<ptr->sample_count; i++) { gf_bs_write_int(bs, ptr->sample_order[i], nb_bits); } } return GF_OK; } #ifdef GF_ENABLE_CTRN static u32 ctrn_sample_flags_to_index(u32 val) { if (!val) return 0; if (val & 0x0000FFFF) return 3; if (val & 0x00FF0000) return 2; return 1; } static u32 ctrn_u32_to_index(u32 val) { if (!val) return 0; if (val<=255) return 1; if (val<=65535) return 2; return 3; } static u32 ctrn_s32_to_index(s32 val) { if (!val) return 0; if (ABS(val)<=127) return 1; if (ABS(val)<=32767) return 2; return 3; } static u32 ctrn_ctts_to_index(GF_TrackFragmentRunBox *ctrn, s32 ctts) { if (!(ctrn->flags & GF_ISOM_TRUN_CTS_OFFSET)) return 0; if (!ctts) return 0; if (ctrn->version) { if (ctrn->ctso_multiplier) return ctrn_s32_to_index(ctts / ctrn->ctso_multiplier); return ctrn_s32_to_index(ctts); } assert(ctts>0); if (ctrn->ctso_multiplier) return ctrn_u32_to_index((u32)ctts / ctrn->ctso_multiplier); return ctrn_s32_to_index((u32)ctts); } static GF_Err ctrn_box_size(GF_TrackFragmentRunBox *ctrn) { Bool use_ctso_multi = GF_TRUE; u32 i, count; GF_TrunEntry *ent; ctrn->ctrn_flags = 0; ctrn->ctrn_first_dur = ctrn->ctrn_first_size = ctrn->ctrn_first_sample_flags = ctrn->ctrn_first_ctts = 0; ctrn->ctrn_dur = ctrn->ctrn_size = ctrn->ctrn_sample_flags = ctrn->ctrn_ctts = 0; ctrn->size += 2; //16 bits for sample count if (ctrn->flags & GF_ISOM_TRUN_DATA_OFFSET) { ctrn->ctrn_flags |= GF_ISOM_TRUN_DATA_OFFSET; if (ABS(ctrn->data_offset) < 32767) { ctrn->size += 2; ctrn->ctrn_flags |= GF_ISOM_CTRN_DATAOFFSET_16; } else ctrn->size += 4; } count = gf_list_count(ctrn->entries); if (ctrn->ctso_multiplier && (ctrn->flags & GF_ISOM_TRUN_CTS_OFFSET) && (ctrn->ctso_multiplier<=0xFFFF) ) { for (i=0; i<count; i++) { GF_TrunEntry *a_ent = gf_list_get(ctrn->entries, i); if (a_ent->CTS_Offset % ctrn->ctso_multiplier) { use_ctso_multi = GF_FALSE; break; } } } else { use_ctso_multi = GF_FALSE; } if (ctrn->use_inherit) { use_ctso_multi = GF_FALSE; ctrn->ctrn_flags |= 0xB0; //duration=1,size=0,flags=1,cts=1 << 4 } if (use_ctso_multi) { ctrn->size += 2; ctrn->ctrn_flags |= GF_ISOM_CTRN_CTSO_MULTIPLIER; } else { ctrn->ctso_multiplier = 0; } /*we always write first sample using first flags*/ ent = gf_list_get(ctrn->entries, 0); ctrn->ctrn_flags |= GF_ISOM_CTRN_FIRST_SAMPLE; if (!ctrn->use_inherit && (ctrn->flags & GF_ISOM_TRUN_DURATION)) { ctrn->ctrn_first_dur = ctrn_u32_to_index(ent->Duration); if (ctrn->ctrn_first_dur) { ctrn->size += ctrn_field_size(ctrn->ctrn_first_dur); ctrn->ctrn_flags |= ctrn->ctrn_first_dur<<22; } } if (ctrn->flags & GF_ISOM_TRUN_SIZE) { ctrn->ctrn_first_size = ctrn_u32_to_index(ent->size); if (ctrn->ctrn_first_size) { ctrn->size += ctrn_field_size(ctrn->ctrn_first_size); ctrn->ctrn_flags |= ctrn->ctrn_first_size<<20; } } if (!ctrn->use_inherit && (ctrn->flags & GF_ISOM_TRUN_FLAGS)) { ctrn->ctrn_first_sample_flags = ctrn_sample_flags_to_index(ent->flags); if (ctrn->ctrn_first_sample_flags) { ctrn->size += ctrn_field_size(ctrn->ctrn_first_sample_flags); ctrn->ctrn_flags |= ctrn->ctrn_first_sample_flags<<18; } } if (!ctrn->use_inherit && (ctrn->flags & GF_ISOM_TRUN_CTS_OFFSET)) { ctrn->ctrn_first_ctts = ctrn_ctts_to_index(ctrn, ent->CTS_Offset); if (ctrn->ctrn_first_ctts) { ctrn->size += ctrn_field_size(ctrn->ctrn_first_ctts); ctrn->ctrn_flags |= ctrn->ctrn_first_ctts<<16; } } for (i=1; i<count; i++) { u8 field_idx; GF_TrunEntry *a_ent = gf_list_get(ctrn->entries, i); if (!ctrn->use_inherit && (ctrn->flags & GF_ISOM_TRUN_DURATION)) { field_idx = ctrn_u32_to_index(a_ent->Duration); if (ctrn->ctrn_dur < field_idx) ctrn->ctrn_dur = field_idx; } if (ctrn->flags & GF_ISOM_TRUN_SIZE) { field_idx = ctrn_u32_to_index(a_ent->size); if (ctrn->ctrn_size < field_idx) ctrn->ctrn_size = field_idx; } if (!ctrn->use_inherit && (ctrn->flags & GF_ISOM_TRUN_FLAGS)) { field_idx = ctrn_sample_flags_to_index(a_ent->flags); if (ctrn->ctrn_sample_flags < field_idx) ctrn->ctrn_sample_flags = field_idx; } if (!ctrn->use_inherit) { field_idx = ctrn_ctts_to_index(ctrn, a_ent->CTS_Offset); if (ctrn->ctrn_ctts < field_idx) ctrn->ctrn_ctts = field_idx; } } count-=1; if (ctrn->ctrn_dur) { ctrn->size += count * ctrn_field_size(ctrn->ctrn_dur); ctrn->ctrn_flags |= ctrn->ctrn_dur<<14; } if (ctrn->ctrn_size) { ctrn->size += count * ctrn_field_size(ctrn->ctrn_size); ctrn->ctrn_flags |= ctrn->ctrn_size<<12; } if (ctrn->ctrn_sample_flags) { ctrn->size += count * ctrn_field_size(ctrn->ctrn_sample_flags); ctrn->ctrn_flags |= ctrn->ctrn_sample_flags<<10; } if (ctrn->ctrn_ctts) { ctrn->size += count * ctrn_field_size(ctrn->ctrn_ctts); ctrn->ctrn_flags |= ctrn->ctrn_ctts<<8; } return GF_OK; } #endif GF_Err trun_box_size(GF_Box *s) { GF_TrackFragmentRunBox *ptr = (GF_TrackFragmentRunBox *)s; #ifdef GF_ENABLE_CTRN if (ptr->use_ctrn) return ctrn_box_size(ptr); #endif ptr->size += 4; //The rest depends on the flags if (ptr->flags & GF_ISOM_TRUN_DATA_OFFSET) ptr->size += 4; if (ptr->flags & GF_ISOM_TRUN_FIRST_FLAG) ptr->size += 4; if (ptr->sample_order) { u32 nb_bytes = 1; if (ptr->sample_count>0xFFFFFF) nb_bytes = 4; else if (ptr->sample_count>0xFFFF) nb_bytes = 3; else if (ptr->sample_count>0xFF) nb_bytes = 2; ptr->size += ptr->sample_count*nb_bytes; } if (! (ptr->flags & (GF_ISOM_TRUN_DURATION | GF_ISOM_TRUN_SIZE | GF_ISOM_TRUN_FLAGS | GF_ISOM_TRUN_CTS_OFFSET) ) ) { return GF_OK; } //if nothing to do, this will be skipped automatically if (ptr->flags & GF_ISOM_TRUN_DURATION) ptr->size += 4*ptr->nb_samples; if (ptr->flags & GF_ISOM_TRUN_SIZE) ptr->size += 4*ptr->nb_samples; //SHOULDN'T BE USED IF GF_ISOM_TRUN_FIRST_FLAG IS DEFINED if (ptr->flags & GF_ISOM_TRUN_FLAGS) ptr->size += 4*ptr->nb_samples; if (ptr->flags & GF_ISOM_TRUN_CTS_OFFSET) ptr->size += 4*ptr->nb_samples; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ void tsro_box_del(GF_Box *s) { GF_TimeOffHintEntryBox *tsro = (GF_TimeOffHintEntryBox *)s; gf_free(tsro); } GF_Err tsro_box_read(GF_Box *s, GF_BitStream *bs) { GF_TimeOffHintEntryBox *ptr = (GF_TimeOffHintEntryBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->TimeOffset = gf_bs_read_u32(bs); return GF_OK; } GF_Box *tsro_box_new() { ISOM_DECL_BOX_ALLOC(GF_TimeOffHintEntryBox, GF_ISOM_BOX_TYPE_TSRO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tsro_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TimeOffHintEntryBox *ptr = (GF_TimeOffHintEntryBox *)s; if (ptr == NULL) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->TimeOffset); return GF_OK; } GF_Err tsro_box_size(GF_Box *s) { s->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void udta_box_del(GF_Box *s) { u32 i; GF_UserDataMap *map; GF_UserDataBox *ptr = (GF_UserDataBox *)s; if (ptr == NULL) return; i=0; while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) { gf_isom_box_array_del(map->boxes); gf_free(map); } gf_list_del(ptr->recordList); gf_free(ptr); } GF_UserDataMap *udta_getEntry(GF_UserDataBox *ptr, u32 box_type, bin128 *uuid) { u32 i; GF_UserDataMap *map; if (ptr == NULL) return NULL; i=0; while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) { if (map->boxType == box_type) { if ((box_type != GF_ISOM_BOX_TYPE_UUID) || !uuid) return map; if (!memcmp(map->uuid, *uuid, 16)) return map; } } return NULL; } GF_Err udta_on_child_box(GF_Box *s, GF_Box *a) { GF_Err e; u32 box_type; GF_UserDataMap *map; GF_UserDataBox *ptr = (GF_UserDataBox *)s; if (!ptr) return GF_BAD_PARAM; if (!a) return GF_OK; //detach from parent list if any gf_list_del_item(ptr->child_boxes, a); /* for unknown udta boxes, we reference them by their original box type */ box_type = a->type; if (box_type == GF_ISOM_BOX_TYPE_UNKNOWN) { GF_UnknownBox* unkn = (GF_UnknownBox *)a; box_type = unkn->original_4cc; } map = udta_getEntry(ptr, box_type, (a->type==GF_ISOM_BOX_TYPE_UUID) ? & ((GF_UUIDBox *)a)->uuid : NULL); if (map == NULL) { map = (GF_UserDataMap *) gf_malloc(sizeof(GF_UserDataMap)); if (map == NULL) return GF_OUT_OF_MEM; memset(map, 0, sizeof(GF_UserDataMap)); map->boxType = box_type; if (a->type == GF_ISOM_BOX_TYPE_UUID) memcpy(map->uuid, ((GF_UUIDBox *)a)->uuid, 16); map->boxes = gf_list_new(); if (!map->boxes) { gf_free(map); return GF_OUT_OF_MEM; } e = gf_list_add(ptr->recordList, map); if (e) return e; } return gf_list_add(map->boxes, a); } GF_Err udta_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e = gf_isom_box_array_read(s, bs, udta_on_child_box); if (e) return e; if (s->size==4) { u32 val = gf_bs_read_u32(bs); s->size = 0; if (val) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] udta has 4 remaining bytes set to %08X but they should be 0\n", val)); } } return GF_OK; } GF_Box *udta_box_new() { ISOM_DECL_BOX_ALLOC(GF_UserDataBox, GF_ISOM_BOX_TYPE_UDTA); tmp->recordList = gf_list_new(); if (!tmp->recordList) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err udta_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_UserDataMap *map; GF_UserDataBox *ptr = (GF_UserDataBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; i=0; while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) { //warning: here we are not passing the actual "parent" of the list //but the UDTA box. The parent itself is not an box, we don't care about it e = gf_isom_box_array_write(s, map->boxes, bs); if (e) return e; } return GF_OK; } GF_Err udta_box_size(GF_Box *s) { GF_Err e; u32 i; GF_UserDataMap *map; GF_UserDataBox *ptr = (GF_UserDataBox *)s; i=0; while ((map = (GF_UserDataMap *)gf_list_enum(ptr->recordList, &i))) { //warning: here we are not passing the actual "parent" of the list //but the UDTA box. The parent itself is not an box, we don't care about it e = gf_isom_box_array_size(s, map->boxes); if (e) return e; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void vmhd_box_del(GF_Box *s) { GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err vmhd_box_read(GF_Box *s, GF_BitStream *bs) { GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->reserved = gf_bs_read_u64(bs); return GF_OK; } GF_Box *vmhd_box_new() { ISOM_DECL_BOX_ALLOC(GF_VideoMediaHeaderBox, GF_ISOM_BOX_TYPE_VMHD); tmp->flags = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err vmhd_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u64(bs, ptr->reserved); return GF_OK; } GF_Err vmhd_box_size(GF_Box *s) { GF_VideoMediaHeaderBox *ptr = (GF_VideoMediaHeaderBox *)s; ptr->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void void_box_del(GF_Box *s) { gf_free(s); } GF_Err void_box_read(GF_Box *s, GF_BitStream *bs) { if (s->size) return GF_ISOM_INVALID_FILE; return GF_OK; } GF_Box *void_box_new() { ISOM_DECL_BOX_ALLOC(GF_Box, GF_ISOM_BOX_TYPE_VOID); return tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err void_box_write(GF_Box *s, GF_BitStream *bs) { gf_bs_write_u32(bs, 0); return GF_OK; } GF_Err void_box_size(GF_Box *s) { s->size = 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *pdin_box_new() { ISOM_DECL_BOX_ALLOC(GF_ProgressiveDownloadBox, GF_ISOM_BOX_TYPE_PDIN); return (GF_Box *)tmp; } void pdin_box_del(GF_Box *s) { GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox*)s; if (ptr == NULL) return; if (ptr->rates) gf_free(ptr->rates); if (ptr->times) gf_free(ptr->times); gf_free(ptr); } GF_Err pdin_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox*)s; ptr->count = (u32) (ptr->size) / 8; ptr->rates = (u32*)gf_malloc(sizeof(u32)*ptr->count); if (!ptr->rates) return GF_OUT_OF_MEM; ptr->times = (u32*)gf_malloc(sizeof(u32)*ptr->count); if (!ptr->times) return GF_OUT_OF_MEM; for (i=0; i<ptr->count; i++) { ptr->rates[i] = gf_bs_read_u32(bs); ptr->times[i] = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pdin_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; for (i=0; i<ptr->count; i++) { gf_bs_write_u32(bs, ptr->rates[i]); gf_bs_write_u32(bs, ptr->times[i]); } return GF_OK; } GF_Err pdin_box_size(GF_Box *s) { GF_ProgressiveDownloadBox *ptr = (GF_ProgressiveDownloadBox *)s; ptr->size += 8*ptr->count; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *sdtp_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleDependencyTypeBox, GF_ISOM_BOX_TYPE_SDTP); return (GF_Box *)tmp; } void sdtp_box_del(GF_Box *s) { GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox*)s; if (ptr == NULL) return; if (ptr->sample_info) gf_free(ptr->sample_info); gf_free(ptr); } GF_Err sdtp_box_read(GF_Box *s, GF_BitStream *bs) { GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox*)s; /*out-of-order sdtp, assume no padding at the end*/ if (!ptr->sampleCount) ptr->sampleCount = (u32) ptr->size; else if (ptr->sampleCount > (u32) ptr->size) return GF_ISOM_INVALID_FILE; ptr->sample_info = (u8 *) gf_malloc(sizeof(u8)*ptr->sampleCount); if (!ptr->sample_info) return GF_OUT_OF_MEM; ptr->sample_alloc = ptr->sampleCount; gf_bs_read_data(bs, (char*)ptr->sample_info, ptr->sampleCount); ISOM_DECREASE_SIZE(ptr, ptr->sampleCount); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sdtp_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox *)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, (char*)ptr->sample_info, ptr->sampleCount); return GF_OK; } GF_Err sdtp_box_size(GF_Box *s) { GF_SampleDependencyTypeBox *ptr = (GF_SampleDependencyTypeBox *)s; ptr->size += ptr->sampleCount; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *pasp_box_new() { ISOM_DECL_BOX_ALLOC(GF_PixelAspectRatioBox, GF_ISOM_BOX_TYPE_PASP); return (GF_Box *)tmp; } void pasp_box_del(GF_Box *s) { GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox*)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err pasp_box_read(GF_Box *s, GF_BitStream *bs) { GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox*)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->hSpacing = gf_bs_read_u32(bs); ptr->vSpacing = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pasp_box_write(GF_Box *s, GF_BitStream *bs) { GF_PixelAspectRatioBox *ptr = (GF_PixelAspectRatioBox *)s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->hSpacing); gf_bs_write_u32(bs, ptr->vSpacing); return GF_OK; } GF_Err pasp_box_size(GF_Box *s) { s->size += 8; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *clap_box_new() { ISOM_DECL_BOX_ALLOC(GF_CleanApertureBox, GF_ISOM_BOX_TYPE_CLAP); return (GF_Box *)tmp; } void clap_box_del(GF_Box *s) { GF_CleanApertureBox *ptr = (GF_CleanApertureBox*)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err clap_box_read(GF_Box *s, GF_BitStream *bs) { GF_CleanApertureBox *ptr = (GF_CleanApertureBox*)s; ISOM_DECREASE_SIZE(ptr, 32); ptr->cleanApertureWidthN = gf_bs_read_u32(bs); ptr->cleanApertureWidthD = gf_bs_read_u32(bs); ptr->cleanApertureHeightN = gf_bs_read_u32(bs); ptr->cleanApertureHeightD = gf_bs_read_u32(bs); ptr->horizOffN = gf_bs_read_u32(bs); ptr->horizOffD = gf_bs_read_u32(bs); ptr->vertOffN = gf_bs_read_u32(bs); ptr->vertOffD = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err clap_box_write(GF_Box *s, GF_BitStream *bs) { GF_CleanApertureBox *ptr = (GF_CleanApertureBox *)s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->cleanApertureWidthN); gf_bs_write_u32(bs, ptr->cleanApertureWidthD); gf_bs_write_u32(bs, ptr->cleanApertureHeightN); gf_bs_write_u32(bs, ptr->cleanApertureHeightD); gf_bs_write_u32(bs, ptr->horizOffN); gf_bs_write_u32(bs, ptr->horizOffD); gf_bs_write_u32(bs, ptr->vertOffN); gf_bs_write_u32(bs, ptr->vertOffD); return GF_OK; } GF_Err clap_box_size(GF_Box *s) { s->size += 32; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *metx_box_new() { //type is overridden by the box constructor ISOM_DECL_BOX_ALLOC(GF_MetaDataSampleEntryBox, GF_ISOM_BOX_TYPE_METX); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } void metx_box_del(GF_Box *s) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox*)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->content_encoding) gf_free(ptr->content_encoding); if (ptr->xml_namespace) gf_free(ptr->xml_namespace); if (ptr->xml_schema_loc) gf_free(ptr->xml_schema_loc); if (ptr->mime_type) gf_free(ptr->mime_type); gf_free(ptr); } GF_Err metx_on_child_box(GF_Box *s, GF_Box *a) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_SINF: return GF_OK; case GF_ISOM_BOX_TYPE_TXTC: //we allow the config box on metx if (ptr->config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->config = (GF_TextConfigBox *)a; break; } return GF_OK; } GF_Err metx_box_read(GF_Box *s, GF_BitStream *bs) { u32 size, i; GF_Err e; char *str; GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox*)s; e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs); if (e) return e; ISOM_DECREASE_SIZE(ptr, 8); size = (u32) ptr->size; str = gf_malloc(sizeof(char)*size); if (!str) return GF_OUT_OF_MEM; i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) { i++; break; } i++; } if (!size && i>1 && str[i-1]) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] metx read invalid string\n")); gf_free(str); return GF_ISOM_INVALID_FILE; } if (i>1) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->xml_namespace = gf_strdup(str); } else { ptr->content_encoding = gf_strdup(str); } } i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) { i++; break; } i++; } if (!size && i>1 && str[i-1]) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] metx read invalid string\n")); gf_free(str); return GF_ISOM_INVALID_FILE; } if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) { if (i>1) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->xml_schema_loc = gf_strdup(str); } else { ptr->xml_namespace = gf_strdup(str); } } i=0; while (size) { str[i] = gf_bs_read_u8(bs); size--; if (!str[i]) { i++; break; } i++; } if (!size && i>1 && str[i-1]) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] metx read invalid string\n")); gf_free(str); return GF_ISOM_INVALID_FILE; } if (i>1) { if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { ptr->mime_type = gf_strdup(str); } else { ptr->xml_schema_loc = gf_strdup(str); } } } //mett, sbtt, stxt, stpp else { if (i>1) ptr->mime_type = gf_strdup(str); } ptr->size = size; gf_free(str); return gf_isom_box_array_read(s, bs, metx_on_child_box); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err metx_box_write(GF_Box *s, GF_BitStream *bs) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s; GF_Err e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); if (ptr->type!=GF_ISOM_BOX_TYPE_STPP) { if (ptr->content_encoding) gf_bs_write_data(bs, ptr->content_encoding, (u32) strlen(ptr->content_encoding)); gf_bs_write_u8(bs, 0); } if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) { if (ptr->xml_namespace) gf_bs_write_data(bs, ptr->xml_namespace, (u32) strlen(ptr->xml_namespace)); gf_bs_write_u8(bs, 0); if (ptr->xml_schema_loc) gf_bs_write_data(bs, ptr->xml_schema_loc, (u32) strlen(ptr->xml_schema_loc)); gf_bs_write_u8(bs, 0); if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { if (ptr->mime_type) gf_bs_write_data(bs, ptr->mime_type, (u32) strlen(ptr->mime_type)); gf_bs_write_u8(bs, 0); } } //mett, sbtt, stxt else { if (ptr->mime_type) gf_bs_write_data(bs, ptr->mime_type, (u32) strlen(ptr->mime_type)); gf_bs_write_u8(bs, 0); } return GF_OK; } GF_Err metx_box_size(GF_Box *s) { GF_MetaDataSampleEntryBox *ptr = (GF_MetaDataSampleEntryBox *)s; ptr->size += 8; if (ptr->type!=GF_ISOM_BOX_TYPE_STPP) { if (ptr->content_encoding) ptr->size += strlen(ptr->content_encoding); ptr->size++; } if ((ptr->type==GF_ISOM_BOX_TYPE_METX) || (ptr->type==GF_ISOM_BOX_TYPE_STPP)) { if (ptr->xml_namespace) ptr->size += strlen(ptr->xml_namespace); ptr->size++; if (ptr->xml_schema_loc) ptr->size += strlen(ptr->xml_schema_loc); ptr->size++; if (ptr->type==GF_ISOM_BOX_TYPE_STPP) { if (ptr->mime_type) ptr->size += strlen(ptr->mime_type); ptr->size++; } } //mett, sbtt, stxt else { if (ptr->mime_type) ptr->size += strlen(ptr->mime_type); ptr->size++; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* SimpleTextSampleEntry */ GF_Box *txtc_box_new() { ISOM_DECL_BOX_ALLOC(GF_TextConfigBox, GF_ISOM_BOX_TYPE_TXTC); return (GF_Box *)tmp; } void txtc_box_del(GF_Box *s) { GF_TextConfigBox *ptr = (GF_TextConfigBox*)s; if (ptr == NULL) return; if (ptr->config) gf_free(ptr->config); gf_free(ptr); } GF_Err txtc_box_read(GF_Box *s, GF_BitStream *bs) { GF_TextConfigBox *ptr = (GF_TextConfigBox*)s; ptr->config = (char *)gf_malloc(sizeof(char)*((u32) ptr->size+1)); if (!ptr->config) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->config, (u32) ptr->size); ptr->config[ptr->size] = 0; return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err txtc_box_write(GF_Box *s, GF_BitStream *bs) { GF_TextConfigBox *ptr = (GF_TextConfigBox *)s; GF_Err e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->config) gf_bs_write_data(bs, ptr->config, (u32) strlen(ptr->config)); gf_bs_write_u8(bs, 0); return GF_OK; } GF_Err txtc_box_size(GF_Box *s) { GF_TextConfigBox *ptr = (GF_TextConfigBox *)s; if (ptr->config) ptr->size += strlen(ptr->config); ptr->size++; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *dac3_box_new() { ISOM_DECL_BOX_ALLOC(GF_AC3ConfigBox, GF_ISOM_BOX_TYPE_DAC3); return (GF_Box *)tmp; } GF_Box *dec3_box_new() { ISOM_DECL_BOX_ALLOC(GF_AC3ConfigBox, GF_ISOM_BOX_TYPE_DAC3); tmp->cfg.is_ec3 = 1; return (GF_Box *)tmp; } void dac3_box_del(GF_Box *s) { GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s; gf_free(ptr); } GF_Err gf_isom_ac3_config_parse_bs(GF_BitStream *bs, Bool is_ec3, GF_AC3Config *cfg); GF_Err dac3_box_read(GF_Box *s, GF_BitStream *bs) { GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s; if (ptr == NULL) return GF_BAD_PARAM; return gf_isom_ac3_config_parse_bs(bs, ptr->cfg.is_ec3, &ptr->cfg); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dac3_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s; if (ptr->cfg.is_ec3) s->type = GF_ISOM_BOX_TYPE_DEC3; e = gf_isom_box_write_header(s, bs); if (ptr->cfg.is_ec3) s->type = GF_ISOM_BOX_TYPE_DAC3; if (e) return e; if (ptr->cfg.is_ec3) { u32 i; gf_bs_write_int(bs, ptr->cfg.brcode, 13); gf_bs_write_int(bs, ptr->cfg.nb_streams - 1, 3); for (i=0; i<ptr->cfg.nb_streams; i++) { gf_bs_write_int(bs, ptr->cfg.streams[i].fscod, 2); gf_bs_write_int(bs, ptr->cfg.streams[i].bsid, 5); gf_bs_write_int(bs, ptr->cfg.streams[i].bsmod, 5); gf_bs_write_int(bs, ptr->cfg.streams[i].acmod, 3); gf_bs_write_int(bs, ptr->cfg.streams[i].lfon, 1); gf_bs_write_int(bs, 0, 3); gf_bs_write_int(bs, ptr->cfg.streams[i].nb_dep_sub, 4); if (ptr->cfg.streams[i].nb_dep_sub) { gf_bs_write_int(bs, ptr->cfg.streams[i].chan_loc, 9); } else { gf_bs_write_int(bs, 0, 1); } } } else { gf_bs_write_int(bs, ptr->cfg.streams[0].fscod, 2); gf_bs_write_int(bs, ptr->cfg.streams[0].bsid, 5); gf_bs_write_int(bs, ptr->cfg.streams[0].bsmod, 3); gf_bs_write_int(bs, ptr->cfg.streams[0].acmod, 3); gf_bs_write_int(bs, ptr->cfg.streams[0].lfon, 1); gf_bs_write_int(bs, ptr->cfg.brcode, 5); gf_bs_write_int(bs, 0, 5); } return GF_OK; } GF_Err dac3_box_size(GF_Box *s) { GF_AC3ConfigBox *ptr = (GF_AC3ConfigBox *)s; if (ptr->cfg.is_ec3) { u32 i; s->size += 2; for (i=0; i<ptr->cfg.nb_streams; i++) { s->size += 3; if (ptr->cfg.streams[i].nb_dep_sub) s->size += 1; } } else { s->size += 3; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void lsrc_box_del(GF_Box *s) { GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s; if (ptr == NULL) return; if (ptr->hdr) gf_free(ptr->hdr); gf_free(ptr); } GF_Err lsrc_box_read(GF_Box *s, GF_BitStream *bs) { GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s; ptr->hdr_size = (u32) ptr->size; ptr->hdr = gf_malloc(sizeof(char)*ptr->hdr_size); if (!ptr->hdr) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->hdr, ptr->hdr_size); return GF_OK; } GF_Box *lsrc_box_new() { ISOM_DECL_BOX_ALLOC(GF_LASERConfigurationBox, GF_ISOM_BOX_TYPE_LSRC); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err lsrc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->hdr, ptr->hdr_size); return GF_OK; } GF_Err lsrc_box_size(GF_Box *s) { GF_LASERConfigurationBox *ptr = (GF_LASERConfigurationBox *)s; ptr->size += ptr->hdr_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void lsr1_box_del(GF_Box *s) { GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; if (ptr == NULL) return; gf_isom_sample_entry_predestroy((GF_SampleEntryBox *)s); if (ptr->slc) gf_odf_desc_del((GF_Descriptor *)ptr->slc); gf_free(ptr); } GF_Err lsr1_on_child_box(GF_Box *s, GF_Box *a) { GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; switch (a->type) { case GF_ISOM_BOX_TYPE_LSRC: if (ptr->lsr_config) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->lsr_config = (GF_LASERConfigurationBox *)a; break; case GF_ISOM_BOX_TYPE_M4DS: if (ptr->descr) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->descr = (GF_MPEG4ExtensionDescriptorsBox *)a; break; } return GF_OK; } GF_Err lsr1_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox*)s; e = gf_isom_base_sample_entry_read((GF_SampleEntryBox *)ptr, bs); if (e) return e; ISOM_DECREASE_SIZE(ptr, 8); return gf_isom_box_array_read(s, bs, lsr1_on_child_box); } GF_Box *lsr1_box_new() { ISOM_DECL_BOX_ALLOC(GF_LASeRSampleEntryBox, GF_ISOM_BOX_TYPE_LSR1); gf_isom_sample_entry_init((GF_SampleEntryBox*)tmp); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err lsr1_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->reserved, 6); gf_bs_write_u16(bs, ptr->dataReferenceIndex); return GF_OK; } GF_Err lsr1_box_size(GF_Box *s) { u32 pos=0; GF_LASeRSampleEntryBox *ptr = (GF_LASeRSampleEntryBox *)s; s->size += 8; gf_isom_check_position(s, (GF_Box *)ptr->lsr_config, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void sidx_box_del(GF_Box *s) { GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox *) s; if (ptr == NULL) return; if (ptr->refs) gf_free(ptr->refs); gf_free(ptr); } GF_Err sidx_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s; ISOM_DECREASE_SIZE(ptr, 8); ptr->reference_ID = gf_bs_read_u32(bs); ptr->timescale = gf_bs_read_u32(bs); if (ptr->version==0) { ISOM_DECREASE_SIZE(ptr, 8); ptr->earliest_presentation_time = gf_bs_read_u32(bs); ptr->first_offset = gf_bs_read_u32(bs); } else { ISOM_DECREASE_SIZE(ptr, 16); ptr->earliest_presentation_time = gf_bs_read_u64(bs); ptr->first_offset = gf_bs_read_u64(bs); } ISOM_DECREASE_SIZE(ptr, 4); gf_bs_read_u16(bs); /* reserved */ ptr->nb_refs = gf_bs_read_u16(bs); ptr->refs = gf_malloc(sizeof(GF_SIDXReference)*ptr->nb_refs); if (!ptr->refs) return GF_OUT_OF_MEM; for (i=0; i<ptr->nb_refs; i++) { ptr->refs[i].reference_type = gf_bs_read_int(bs, 1); ptr->refs[i].reference_size = gf_bs_read_int(bs, 31); ptr->refs[i].subsegment_duration = gf_bs_read_u32(bs); ptr->refs[i].starts_with_SAP = gf_bs_read_int(bs, 1); ptr->refs[i].SAP_type = gf_bs_read_int(bs, 3); ptr->refs[i].SAP_delta_time = gf_bs_read_int(bs, 28); ISOM_DECREASE_SIZE(ptr, 12); } return GF_OK; } GF_Box *sidx_box_new() { ISOM_DECL_BOX_ALLOC(GF_SegmentIndexBox, GF_ISOM_BOX_TYPE_SIDX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sidx_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->reference_ID); gf_bs_write_u32(bs, ptr->timescale); if (ptr->version==0) { gf_bs_write_u32(bs, (u32) ptr->earliest_presentation_time); gf_bs_write_u32(bs, (u32) ptr->first_offset); } else { gf_bs_write_u64(bs, ptr->earliest_presentation_time); gf_bs_write_u64(bs, ptr->first_offset); } gf_bs_write_u16(bs, 0); gf_bs_write_u16(bs, ptr->nb_refs); for (i=0; i<ptr->nb_refs; i++ ) { gf_bs_write_int(bs, ptr->refs[i].reference_type, 1); gf_bs_write_int(bs, ptr->refs[i].reference_size, 31); gf_bs_write_u32(bs, ptr->refs[i].subsegment_duration); gf_bs_write_int(bs, ptr->refs[i].starts_with_SAP, 1); gf_bs_write_int(bs, ptr->refs[i].SAP_type, 3); gf_bs_write_int(bs, ptr->refs[i].SAP_delta_time, 28); } return GF_OK; } GF_Err sidx_box_size(GF_Box *s) { GF_SegmentIndexBox *ptr = (GF_SegmentIndexBox*) s; ptr->size += 12; if (ptr->version==0) { ptr->size += 8; } else { ptr->size += 16; } ptr->size += ptr->nb_refs * 12; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ssix_box_del(GF_Box *s) { u32 i; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox *)s; if (ptr == NULL) return; if (ptr->subsegments) { for (i = 0; i < ptr->subsegment_alloc; i++) { GF_SubsegmentInfo *subsegment = &ptr->subsegments[i]; if (subsegment->ranges) gf_free(subsegment->ranges); } gf_free(ptr->subsegments); } gf_free(ptr); } GF_Err ssix_box_read(GF_Box *s, GF_BitStream *bs) { u32 i,j; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s; ISOM_DECREASE_SIZE(ptr, 4) ptr->subsegment_count = gf_bs_read_u32(bs); //each subseg has at least one range_count (4 bytes), abort if not enough bytes (broken box) if (ptr->size < ptr->subsegment_count*4) return GF_ISOM_INVALID_FILE; GF_SAFE_ALLOC_N(ptr->subsegments, ptr->subsegment_count, GF_SubsegmentInfo); if (!ptr->subsegments) return GF_OUT_OF_MEM; for (i = 0; i < ptr->subsegment_count; i++) { GF_SubsegmentInfo *subseg = &ptr->subsegments[i]; ISOM_DECREASE_SIZE(ptr, 4) subseg->range_count = gf_bs_read_u32(bs); //each range is 4 bytes, abort if not enough bytes if (ptr->size < subseg->range_count*4) return GF_ISOM_INVALID_FILE; subseg->ranges = (GF_SubsegmentRangeInfo*) gf_malloc(sizeof(GF_SubsegmentRangeInfo) * subseg->range_count); if (!subseg->ranges) return GF_OUT_OF_MEM; for (j = 0; j < subseg->range_count; j++) { ISOM_DECREASE_SIZE(ptr, 4) subseg->ranges[j].level = gf_bs_read_u8(bs); subseg->ranges[j].range_size = gf_bs_read_u24(bs); } } return GF_OK; } GF_Box *ssix_box_new() { ISOM_DECL_BOX_ALLOC(GF_SubsegmentIndexBox, GF_ISOM_BOX_TYPE_SSIX); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ssix_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i, j; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->subsegment_count); for (i = 0; i<ptr->subsegment_count; i++) { gf_bs_write_u32(bs, ptr->subsegments[i].range_count); for (j = 0; j < ptr->subsegments[i].range_count; j++) { gf_bs_write_u8(bs, ptr->subsegments[i].ranges[j].level); gf_bs_write_u24(bs, ptr->subsegments[i].ranges[j].range_size); } } return GF_OK; } GF_Err ssix_box_size(GF_Box *s) { u32 i; GF_SubsegmentIndexBox *ptr = (GF_SubsegmentIndexBox*)s; ptr->size += 4; for (i = 0; i < ptr->subsegment_count; i++) { ptr->size += 4 + 4 * ptr->subsegments[i].range_count; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void leva_box_del(GF_Box *s) { GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox *)s; if (ptr == NULL) return; if (ptr->levels) gf_free(ptr->levels); gf_free(ptr); } GF_Err leva_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox*)s; ISOM_DECREASE_SIZE(ptr, 1) ptr->level_count = gf_bs_read_u8(bs); //each level is at least 5 bytes if (ptr->size < ptr->level_count * 5) return GF_ISOM_INVALID_FILE; GF_SAFE_ALLOC_N(ptr->levels, ptr->level_count, GF_LevelAssignment); if (!ptr->levels) return GF_OUT_OF_MEM; for (i = 0; i < ptr->level_count; i++) { GF_LevelAssignment *level = &ptr->levels[i]; u8 tmp; if (!level || ptr->size < 5) return GF_BAD_PARAM; ISOM_DECREASE_SIZE(ptr, 5) level->track_id = gf_bs_read_u32(bs); tmp = gf_bs_read_u8(bs); level->padding_flag = tmp >> 7; level->type = tmp & 0x7F; if (level->type == 0) { ISOM_DECREASE_SIZE(ptr, 4) level->grouping_type = gf_bs_read_u32(bs); } else if (level->type == 1) { ISOM_DECREASE_SIZE(ptr, 8) level->grouping_type = gf_bs_read_u32(bs); level->grouping_type_parameter = gf_bs_read_u32(bs); } else if (level->type == 4) { ISOM_DECREASE_SIZE(ptr, 4) level->sub_track_id = gf_bs_read_u32(bs); } } return GF_OK; } GF_Box *leva_box_new() { ISOM_DECL_BOX_ALLOC(GF_LevelAssignmentBox, GF_ISOM_BOX_TYPE_LEVA); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err leva_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->level_count); for (i = 0; i<ptr->level_count; i++) { gf_bs_write_u32(bs, ptr->levels[i].track_id); gf_bs_write_u8(bs, ptr->levels[i].padding_flag << 7 | (ptr->levels[i].type & 0x7F)); if (ptr->levels[i].type == 0) { gf_bs_write_u32(bs, ptr->levels[i].grouping_type); } else if (ptr->levels[i].type == 1) { gf_bs_write_u32(bs, ptr->levels[i].grouping_type); gf_bs_write_u32(bs, ptr->levels[i].grouping_type_parameter); } else if (ptr->levels[i].type == 4) { gf_bs_write_u32(bs, ptr->levels[i].sub_track_id); } } return GF_OK; } GF_Err leva_box_size(GF_Box *s) { u32 i; GF_LevelAssignmentBox *ptr = (GF_LevelAssignmentBox*)s; ptr->size += 1; for (i = 0; i < ptr->level_count; i++) { ptr->size += 5; if (ptr->levels[i].type == 0 || ptr->levels[i].type == 4) { ptr->size += 4; } else if (ptr->levels[i].type == 1) { ptr->size += 8; } } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *pcrb_box_new() { ISOM_DECL_BOX_ALLOC(GF_PcrInfoBox, GF_ISOM_BOX_TYPE_PCRB); return (GF_Box *)tmp; } void pcrb_box_del(GF_Box *s) { GF_PcrInfoBox *ptr = (GF_PcrInfoBox *) s; if (ptr == NULL) return; if (ptr->pcr_values) gf_free(ptr->pcr_values); gf_free(ptr); } GF_Err pcrb_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s; ISOM_DECREASE_SIZE(ptr, 4); ptr->subsegment_count = gf_bs_read_u32(bs); ptr->pcr_values = gf_malloc(sizeof(u64)*ptr->subsegment_count); if (!ptr->pcr_values) return GF_OUT_OF_MEM; for (i=0; i<ptr->subsegment_count; i++) { u64 data1 = gf_bs_read_u32(bs); u64 data2 = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 6); ptr->pcr_values[i] = (data1 << 10) | (data2 >> 6); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pcrb_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->subsegment_count); for (i=0; i<ptr->subsegment_count; i++ ) { u32 data1 = (u32) (ptr->pcr_values[i] >> 10); u16 data2 = (u16) (ptr->pcr_values[i] << 6); gf_bs_write_u32(bs, data1); gf_bs_write_u16(bs, data2); } return GF_OK; } GF_Err pcrb_box_size(GF_Box *s) { GF_PcrInfoBox *ptr = (GF_PcrInfoBox*) s; ptr->size += 4; ptr->size += ptr->subsegment_count * 6; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *subs_box_new() { ISOM_DECL_BOX_ALLOC(GF_SubSampleInformationBox, GF_ISOM_BOX_TYPE_SUBS); tmp->Samples = gf_list_new(); return (GF_Box *)tmp; } void subs_box_del(GF_Box *s) { GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *)s; if (ptr == NULL) return; while (gf_list_count(ptr->Samples)) { GF_SubSampleInfoEntry *pSamp; pSamp = (GF_SubSampleInfoEntry*)gf_list_get(ptr->Samples, 0); while (gf_list_count(pSamp->SubSamples)) { GF_SubSampleEntry *pSubSamp; pSubSamp = (GF_SubSampleEntry*) gf_list_get(pSamp->SubSamples, 0); gf_free(pSubSamp); gf_list_rem(pSamp->SubSamples, 0); } gf_list_del(pSamp->SubSamples); gf_free(pSamp); gf_list_rem(ptr->Samples, 0); } gf_list_del(ptr->Samples); gf_free(ptr); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err subs_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i, j, entry_count; u16 subsample_count; GF_SubSampleEntry *pSubSamp; GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; entry_count = gf_list_count(ptr->Samples); gf_bs_write_u32(bs, entry_count); for (i=0; i<entry_count; i++) { GF_SubSampleInfoEntry *pSamp = (GF_SubSampleInfoEntry*) gf_list_get(ptr->Samples, i); subsample_count = gf_list_count(pSamp->SubSamples); gf_bs_write_u32(bs, pSamp->sample_delta); gf_bs_write_u16(bs, subsample_count); for (j=0; j<subsample_count; j++) { pSubSamp = (GF_SubSampleEntry*) gf_list_get(pSamp->SubSamples, j); if (ptr->version == 1) { gf_bs_write_u32(bs, pSubSamp->subsample_size); } else { gf_bs_write_u16(bs, pSubSamp->subsample_size); } gf_bs_write_u8(bs, pSubSamp->subsample_priority); gf_bs_write_u8(bs, pSubSamp->discardable); gf_bs_write_u32(bs, pSubSamp->reserved); } } return e; } GF_Err subs_box_size(GF_Box *s) { GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *) s; u32 entry_count, i; u16 subsample_count; // add 4 byte for entry_count ptr->size += 4; entry_count = gf_list_count(ptr->Samples); for (i=0; i<entry_count; i++) { GF_SubSampleInfoEntry *pSamp = (GF_SubSampleInfoEntry*) gf_list_get(ptr->Samples, i); subsample_count = gf_list_count(pSamp->SubSamples); // 4 byte for sample_delta, 2 byte for subsample_count // and 6 + (4 or 2) bytes for each subsample ptr->size += 4 + 2 + subsample_count * (6 + (ptr->version==1 ? 4 : 2)); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Err subs_box_read(GF_Box *s, GF_BitStream *bs) { GF_SubSampleInformationBox *ptr = (GF_SubSampleInformationBox *)s; u32 entry_count, i, j; u16 subsample_count; ISOM_DECREASE_SIZE(ptr, 4); entry_count = gf_bs_read_u32(bs); for (i=0; i<entry_count; i++) { u32 subs_size=0; GF_SubSampleInfoEntry *pSamp = (GF_SubSampleInfoEntry*) gf_malloc(sizeof(GF_SubSampleInfoEntry)); if (!pSamp) return GF_OUT_OF_MEM; memset(pSamp, 0, sizeof(GF_SubSampleInfoEntry)); pSamp->SubSamples = gf_list_new(); pSamp->sample_delta = gf_bs_read_u32(bs); subsample_count = gf_bs_read_u16(bs); subs_size=6; for (j=0; j<subsample_count; j++) { GF_SubSampleEntry *pSubSamp = (GF_SubSampleEntry*) gf_malloc(sizeof(GF_SubSampleEntry)); if (!pSubSamp) return GF_OUT_OF_MEM; memset(pSubSamp, 0, sizeof(GF_SubSampleEntry)); if (ptr->version==1) { pSubSamp->subsample_size = gf_bs_read_u32(bs); subs_size+=4; } else { pSubSamp->subsample_size = gf_bs_read_u16(bs); subs_size+=2; } pSubSamp->subsample_priority = gf_bs_read_u8(bs); pSubSamp->discardable = gf_bs_read_u8(bs); pSubSamp->reserved = gf_bs_read_u32(bs); subs_size+=6; gf_list_add(pSamp->SubSamples, pSubSamp); } gf_list_add(ptr->Samples, pSamp); ISOM_DECREASE_SIZE(ptr, subs_size); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_FRAGMENTS GF_Box *tfdt_box_new() { ISOM_DECL_BOX_ALLOC(GF_TFBaseMediaDecodeTimeBox, GF_ISOM_BOX_TYPE_TFDT); return (GF_Box *)tmp; } void tfdt_box_del(GF_Box *s) { gf_free(s); } /*this is using chpl format according to some NeroRecode samples*/ GF_Err tfdt_box_read(GF_Box *s,GF_BitStream *bs) { GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *)s; if (ptr->version==1) { ISOM_DECREASE_SIZE(ptr, 8); ptr->baseMediaDecodeTime = gf_bs_read_u64(bs); } else { ISOM_DECREASE_SIZE(ptr, 4); ptr->baseMediaDecodeTime = (u32) gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err tfdt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version==1) { gf_bs_write_u64(bs, ptr->baseMediaDecodeTime); } else { gf_bs_write_u32(bs, (u32) ptr->baseMediaDecodeTime); } return GF_OK; } GF_Err tfdt_box_size(GF_Box *s) { GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *)s; if (ptr->baseMediaDecodeTime<=0xFFFFFFFF) { ptr->version = 0; ptr->size += 4; } else { ptr->version = 1; ptr->size += 8; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_FRAGMENTS*/ GF_Box *rvcc_box_new() { ISOM_DECL_BOX_ALLOC(GF_RVCConfigurationBox, GF_ISOM_BOX_TYPE_RVCC); return (GF_Box *)tmp; } void rvcc_box_del(GF_Box *s) { gf_free(s); } GF_Err rvcc_box_read(GF_Box *s,GF_BitStream *bs) { GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox*)s; ISOM_DECREASE_SIZE(ptr, 2); ptr->predefined_rvc_config = gf_bs_read_u16(bs); if (!ptr->predefined_rvc_config) { ISOM_DECREASE_SIZE(ptr, 2); ptr->rvc_meta_idx = gf_bs_read_u16(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err rvcc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox*) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->predefined_rvc_config); if (!ptr->predefined_rvc_config) { gf_bs_write_u16(bs, ptr->rvc_meta_idx); } return GF_OK; } GF_Err rvcc_box_size(GF_Box *s) { GF_RVCConfigurationBox *ptr = (GF_RVCConfigurationBox *)s; ptr->size += 2; if (! ptr->predefined_rvc_config) ptr->size += 2; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *sbgp_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleGroupBox, GF_ISOM_BOX_TYPE_SBGP); return (GF_Box *)tmp; } void sbgp_box_del(GF_Box *a) { GF_SampleGroupBox *p = (GF_SampleGroupBox *)a; if (p->sample_entries) gf_free(p->sample_entries); gf_free(p); } GF_Err sbgp_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SampleGroupBox *ptr = (GF_SampleGroupBox *)s; ISOM_DECREASE_SIZE(ptr, 8); ptr->grouping_type = gf_bs_read_u32(bs); if (ptr->version==1) { ISOM_DECREASE_SIZE(ptr, 4); ptr->grouping_type_parameter = gf_bs_read_u32(bs); } ptr->entry_count = gf_bs_read_u32(bs); if (ptr->size < sizeof(GF_SampleGroupEntry)*ptr->entry_count) return GF_ISOM_INVALID_FILE; ptr->sample_entries = gf_malloc(sizeof(GF_SampleGroupEntry)*ptr->entry_count); if (!ptr->sample_entries) return GF_OUT_OF_MEM; for (i=0; i<ptr->entry_count; i++) { ISOM_DECREASE_SIZE(ptr, 8); ptr->sample_entries[i].sample_count = gf_bs_read_u32(bs); ptr->sample_entries[i].group_description_index = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sbgp_box_write(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; GF_SampleGroupBox *p = (GF_SampleGroupBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, p->grouping_type); if (p->version==1) gf_bs_write_u32(bs, p->grouping_type_parameter); gf_bs_write_u32(bs, p->entry_count); for (i = 0; i<p->entry_count; i++ ) { gf_bs_write_u32(bs, p->sample_entries[i].sample_count); gf_bs_write_u32(bs, p->sample_entries[i].group_description_index); } return GF_OK; } GF_Err sbgp_box_size(GF_Box *s) { GF_SampleGroupBox *p = (GF_SampleGroupBox*)s; p->size += 8; if (p->grouping_type_parameter) p->version=1; if (p->version==1) p->size += 4; p->size += 8*p->entry_count; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ static void *sgpd_parse_entry(u32 grouping_type, GF_BitStream *bs, u32 entry_size, u32 *total_bytes) { Bool null_size_ok = GF_FALSE; GF_DefaultSampleGroupDescriptionEntry *def_ptr; switch (grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: case GF_ISOM_SAMPLE_GROUP_PROL: { GF_RollRecoveryEntry *ptr; GF_SAFEALLOC(ptr, GF_RollRecoveryEntry); if (!ptr) return NULL; ptr->roll_distance = gf_bs_read_int(bs, 16); *total_bytes = 2; return ptr; } case GF_ISOM_SAMPLE_GROUP_RAP: { GF_VisualRandomAccessEntry *ptr; GF_SAFEALLOC(ptr, GF_VisualRandomAccessEntry); if (!ptr) return NULL; ptr->num_leading_samples_known = gf_bs_read_int(bs, 1); ptr->num_leading_samples = gf_bs_read_int(bs, 7); *total_bytes = 1; return ptr; } case GF_ISOM_SAMPLE_GROUP_SAP: { GF_SAPEntry *ptr; GF_SAFEALLOC(ptr, GF_SAPEntry); if (!ptr) return NULL; ptr->dependent_flag = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 3); ptr->SAP_type = gf_bs_read_int(bs, 4); *total_bytes = 1; return ptr; } case GF_ISOM_SAMPLE_GROUP_SYNC: { GF_SYNCEntry *ptr; GF_SAFEALLOC(ptr, GF_SYNCEntry); if (!ptr) return NULL; gf_bs_read_int(bs, 2); ptr->NALU_type = gf_bs_read_int(bs, 6); *total_bytes = 1; return ptr; } case GF_ISOM_SAMPLE_GROUP_TELE: { GF_TemporalLevelEntry *ptr; GF_SAFEALLOC(ptr, GF_TemporalLevelEntry); if (!ptr) return NULL; ptr->level_independently_decodable = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 7); *total_bytes = 1; return ptr; } case GF_ISOM_SAMPLE_GROUP_SEIG: { GF_CENCSampleEncryptionGroupEntry *ptr; GF_SAFEALLOC(ptr, GF_CENCSampleEncryptionGroupEntry); if (!ptr) return NULL; gf_bs_read_u8(bs); //reserved ptr->crypt_byte_block = gf_bs_read_int(bs, 4); ptr->skip_byte_block = gf_bs_read_int(bs, 4); ptr->IsProtected = gf_bs_read_u8(bs); ptr->Per_Sample_IV_size = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *)ptr->KID, 16); *total_bytes = 20; if ((ptr->IsProtected == 1) && !ptr->Per_Sample_IV_size) { ptr->constant_IV_size = gf_bs_read_u8(bs); if ((ptr->constant_IV_size != 8) && (ptr->constant_IV_size != 16)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] seig sample group have invalid constant_IV size\n")); gf_free(ptr); return NULL; } gf_bs_read_data(bs, (char *)ptr->constant_IV, ptr->constant_IV_size); *total_bytes += 1 + ptr->constant_IV_size; } if (!entry_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] seig sample group does not indicate entry size, deprecated in spec\n")); } return ptr; } case GF_ISOM_SAMPLE_GROUP_OINF: { GF_OperatingPointsInformation *ptr = gf_isom_oinf_new_entry(); u32 s = (u32) gf_bs_get_position(bs); gf_isom_oinf_read_entry(ptr, bs); *total_bytes = (u32) gf_bs_get_position(bs) - s; if (!entry_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] oinf sample group does not indicate entry size, deprecated in spec\n")); } return ptr; } case GF_ISOM_SAMPLE_GROUP_LINF: { GF_LHVCLayerInformation *ptr = gf_isom_linf_new_entry(); u32 s = (u32) gf_bs_get_position(bs); gf_isom_linf_read_entry(ptr, bs); *total_bytes = (u32) gf_bs_get_position(bs) - s; if (!entry_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] linf sample group does not indicate entry size, deprecated in spec\n")); } return ptr; } case GF_ISOM_SAMPLE_GROUP_TRIF: if (! entry_size) { u32 flags = gf_bs_peek_bits(bs, 24, 0); if (flags & 0x10000) entry_size=3; else { if (flags & 0x80000) entry_size=7; else entry_size=11; //have dependency list if (flags & 0x200000) { u32 nb_entries = gf_bs_peek_bits(bs, 16, entry_size); entry_size += 2 + 2*nb_entries; } } GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] trif sample group does not indicate entry size, deprecated in spec\n")); } break; case GF_ISOM_SAMPLE_GROUP_NALM: if (! entry_size) { u64 start = gf_bs_get_position(bs); Bool rle, large_size; u32 entry_count; gf_bs_read_int(bs, 6); large_size = gf_bs_read_int(bs, 1); rle = gf_bs_read_int(bs, 1); entry_count = gf_bs_read_int(bs, large_size ? 16 : 8); gf_bs_seek(bs, start); entry_size = 1 + (large_size ? 2 : 1); entry_size += entry_count * 2; if (rle) entry_size += entry_count * (large_size ? 2 : 1); GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] nalm sample group does not indicate entry size, deprecated in spec\n")); } break; case GF_ISOM_SAMPLE_GROUP_TSAS: case GF_ISOM_SAMPLE_GROUP_STSA: null_size_ok = GF_TRUE; break; //TODO, add support for these ones ? case GF_ISOM_SAMPLE_GROUP_TSCL: entry_size = 20; break; case GF_ISOM_SAMPLE_GROUP_LBLI: entry_size = 2; break; default: break; } if (!entry_size && !null_size_ok) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] %s sample group does not indicate entry size and is not implemented, cannot parse!\n", gf_4cc_to_str( grouping_type) )); return NULL; } GF_SAFEALLOC(def_ptr, GF_DefaultSampleGroupDescriptionEntry); if (!def_ptr) return NULL; if (entry_size) { def_ptr->length = entry_size; def_ptr->data = (u8 *) gf_malloc(sizeof(u8)*def_ptr->length); if (!def_ptr->data) { gf_free(def_ptr); return NULL; } gf_bs_read_data(bs, (char *) def_ptr->data, def_ptr->length); *total_bytes = entry_size; } return def_ptr; } static void sgpd_del_entry(u32 grouping_type, void *entry) { switch (grouping_type) { case GF_ISOM_SAMPLE_GROUP_SYNC: case GF_ISOM_SAMPLE_GROUP_ROLL: case GF_ISOM_SAMPLE_GROUP_PROL: case GF_ISOM_SAMPLE_GROUP_RAP: case GF_ISOM_SAMPLE_GROUP_SEIG: case GF_ISOM_SAMPLE_GROUP_TELE: case GF_ISOM_SAMPLE_GROUP_SAP: gf_free(entry); return; case GF_ISOM_SAMPLE_GROUP_OINF: gf_isom_oinf_del_entry(entry); return; case GF_ISOM_SAMPLE_GROUP_LINF: gf_isom_linf_del_entry(entry); return; default: { GF_DefaultSampleGroupDescriptionEntry *ptr = (GF_DefaultSampleGroupDescriptionEntry *)entry; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } } } void sgpd_write_entry(u32 grouping_type, void *entry, GF_BitStream *bs) { switch (grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: case GF_ISOM_SAMPLE_GROUP_PROL: gf_bs_write_int(bs, ((GF_RollRecoveryEntry*)entry)->roll_distance, 16); return; case GF_ISOM_SAMPLE_GROUP_RAP: gf_bs_write_int(bs, ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples_known, 1); gf_bs_write_int(bs, ((GF_VisualRandomAccessEntry*)entry)->num_leading_samples, 7); return; case GF_ISOM_SAMPLE_GROUP_SAP: gf_bs_write_int(bs, ((GF_SAPEntry*)entry)->dependent_flag, 1); gf_bs_write_int(bs, 0, 3); gf_bs_write_int(bs, ((GF_SAPEntry*)entry)->SAP_type, 4); return; case GF_ISOM_SAMPLE_GROUP_SYNC: gf_bs_write_int(bs, 0, 2); gf_bs_write_int(bs, ((GF_SYNCEntry*)entry)->NALU_type, 6); return; case GF_ISOM_SAMPLE_GROUP_TELE: gf_bs_write_int(bs, ((GF_TemporalLevelEntry*)entry)->level_independently_decodable, 1); gf_bs_write_int(bs, 0, 7); return; case GF_ISOM_SAMPLE_GROUP_SEIG: gf_bs_write_u8(bs, 0x0); gf_bs_write_int(bs, ((GF_CENCSampleEncryptionGroupEntry*)entry)->crypt_byte_block, 4); gf_bs_write_int(bs, ((GF_CENCSampleEncryptionGroupEntry*)entry)->skip_byte_block, 4); gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->IsProtected); gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->Per_Sample_IV_size); gf_bs_write_data(bs, (char *)((GF_CENCSampleEncryptionGroupEntry *)entry)->KID, 16); if ((((GF_CENCSampleEncryptionGroupEntry *)entry)->IsProtected == 1) && !((GF_CENCSampleEncryptionGroupEntry *)entry)->Per_Sample_IV_size) { gf_bs_write_u8(bs, ((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV_size); gf_bs_write_data(bs, (char *)((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV, ((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV_size); } return; case GF_ISOM_SAMPLE_GROUP_OINF: gf_isom_oinf_write_entry(entry, bs); return; case GF_ISOM_SAMPLE_GROUP_LINF: gf_isom_linf_write_entry(entry, bs); return; default: { GF_DefaultSampleGroupDescriptionEntry *ptr = (GF_DefaultSampleGroupDescriptionEntry *)entry; if (ptr->length) gf_bs_write_data(bs, (char *) ptr->data, ptr->length); } } } #ifndef GPAC_DISABLE_ISOM_WRITE static u32 sgpd_size_entry(u32 grouping_type, void *entry) { switch (grouping_type) { case GF_ISOM_SAMPLE_GROUP_ROLL: case GF_ISOM_SAMPLE_GROUP_PROL: return 2; case GF_ISOM_SAMPLE_GROUP_TELE: case GF_ISOM_SAMPLE_GROUP_RAP: case GF_ISOM_SAMPLE_GROUP_SAP: case GF_ISOM_SAMPLE_GROUP_SYNC: return 1; case GF_ISOM_SAMPLE_GROUP_TSCL: return 20; case GF_ISOM_SAMPLE_GROUP_LBLI: return 2; case GF_ISOM_SAMPLE_GROUP_TSAS: case GF_ISOM_SAMPLE_GROUP_STSA: return 0; case GF_ISOM_SAMPLE_GROUP_SEIG: return ((((GF_CENCSampleEncryptionGroupEntry *)entry)->IsProtected == 1) && !((GF_CENCSampleEncryptionGroupEntry *)entry)->Per_Sample_IV_size) ? 21 + ((GF_CENCSampleEncryptionGroupEntry *)entry)->constant_IV_size : 20; case GF_ISOM_SAMPLE_GROUP_OINF: return gf_isom_oinf_size_entry(entry); case GF_ISOM_SAMPLE_GROUP_LINF: return gf_isom_linf_size_entry(entry); default: return ((GF_DefaultSampleGroupDescriptionEntry *)entry)->length; } } #endif GF_Box *sgpd_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleGroupDescriptionBox, GF_ISOM_BOX_TYPE_SGPD); /*version 0 is deprecated, use v1 by default*/ tmp->version = 1; tmp->group_descriptions = gf_list_new(); return (GF_Box *)tmp; } void sgpd_box_del(GF_Box *a) { GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)a; while (gf_list_count(p->group_descriptions)) { void *ptr = gf_list_last(p->group_descriptions); sgpd_del_entry(p->grouping_type, ptr); gf_list_rem_last(p->group_descriptions); } gf_list_del(p->group_descriptions); gf_free(p); } GF_Err sgpd_box_read(GF_Box *s, GF_BitStream *bs) { u32 entry_count; GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)s; ISOM_DECREASE_SIZE(p, 8); p->grouping_type = gf_bs_read_u32(bs); if (p->version>=1) { ISOM_DECREASE_SIZE(p, 4); p->default_length = gf_bs_read_u32(bs); } if (p->version>=2) { ISOM_DECREASE_SIZE(p, 4); p->default_description_index = gf_bs_read_u32(bs); } entry_count = gf_bs_read_u32(bs); if (entry_count>p->size) return GF_ISOM_INVALID_FILE; while (entry_count) { void *ptr; u32 parsed_bytes=0; u32 size = p->default_length; if ((p->version>=1) && !size) { size = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(p, 4); } ptr = sgpd_parse_entry(p->grouping_type, bs, size, &parsed_bytes); //don't return an error, just stop parsing so that we skip over the sgpd box if (!ptr) return GF_OK; ISOM_DECREASE_SIZE(p, parsed_bytes); gf_list_add(p->group_descriptions, ptr); entry_count--; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err sgpd_box_write(GF_Box *s, GF_BitStream *bs) { u32 i; GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)s; GF_Err e; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, p->grouping_type); if (p->version>=1) gf_bs_write_u32(bs, p->default_length); if (p->version>=2) gf_bs_write_u32(bs, p->default_description_index); gf_bs_write_u32(bs, gf_list_count(p->group_descriptions) ); for (i=0; i<gf_list_count(p->group_descriptions); i++) { void *ptr = gf_list_get(p->group_descriptions, i); if ((p->version >= 1) && !p->default_length) { u32 size = sgpd_size_entry(p->grouping_type, ptr); gf_bs_write_u32(bs, size); } sgpd_write_entry(p->grouping_type, ptr, bs); } return GF_OK; } GF_Err sgpd_box_size(GF_Box *s) { u32 i; GF_SampleGroupDescriptionBox *p = (GF_SampleGroupDescriptionBox *)s; p->size += 8; //we force all sample groups to version 1, v0 being deprecated p->version=1; p->size += 4; if (p->version>=2) p->size += 4; p->default_length = 0; for (i=0; i<gf_list_count(p->group_descriptions); i++) { void *ptr = gf_list_get(p->group_descriptions, i); u32 size = sgpd_size_entry(p->grouping_type, ptr); p->size += size; if (!p->default_length) { p->default_length = size; } else if (p->default_length != size) { p->default_length = 0; } } if (p->version>=1) { if (!p->default_length) p->size += gf_list_count(p->group_descriptions)*4; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void saiz_box_del(GF_Box *s) { GF_SampleAuxiliaryInfoSizeBox*ptr = (GF_SampleAuxiliaryInfoSizeBox*)s; if (ptr == NULL) return; if (ptr->sample_info_size) gf_free(ptr->sample_info_size); gf_free(ptr); } GF_Err saiz_box_read(GF_Box *s, GF_BitStream *bs) { GF_SampleAuxiliaryInfoSizeBox*ptr = (GF_SampleAuxiliaryInfoSizeBox*)s; if (ptr->flags & 1) { ISOM_DECREASE_SIZE(ptr, 8); ptr->aux_info_type = gf_bs_read_u32(bs); ptr->aux_info_type_parameter = gf_bs_read_u32(bs); } ISOM_DECREASE_SIZE(ptr, 5); ptr->default_sample_info_size = gf_bs_read_u8(bs); ptr->sample_count = gf_bs_read_u32(bs); if (ptr->default_sample_info_size == 0) { if (ptr->size < sizeof(u8)*ptr->sample_count) return GF_ISOM_INVALID_FILE; ptr->sample_info_size = gf_malloc(sizeof(u8)*ptr->sample_count); ptr->sample_alloc = ptr->sample_count; if (!ptr->sample_info_size) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, ptr->sample_count); gf_bs_read_data(bs, (char *) ptr->sample_info_size, ptr->sample_count); } return GF_OK; } GF_Box *saiz_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleAuxiliaryInfoSizeBox, GF_ISOM_BOX_TYPE_SAIZ); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err saiz_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleAuxiliaryInfoSizeBox*ptr = (GF_SampleAuxiliaryInfoSizeBox*) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->flags & 1) { gf_bs_write_u32(bs, ptr->aux_info_type); gf_bs_write_u32(bs, ptr->aux_info_type_parameter); } gf_bs_write_u8(bs, ptr->default_sample_info_size); gf_bs_write_u32(bs, ptr->sample_count); if (!ptr->default_sample_info_size) { if (!ptr->sample_info_size) gf_bs_write_u8(bs, 0); else gf_bs_write_data(bs, (char *) ptr->sample_info_size, ptr->sample_count); } return GF_OK; } GF_Err saiz_box_size(GF_Box *s) { GF_SampleAuxiliaryInfoSizeBox *ptr = (GF_SampleAuxiliaryInfoSizeBox*)s; if (ptr->aux_info_type || ptr->aux_info_type_parameter) { ptr->flags |= 1; } if (ptr->flags & 1) ptr->size += 8; ptr->size += 5; if (ptr->default_sample_info_size==0) ptr->size += ptr->sample_count; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE void saio_box_del(GF_Box *s) { GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox*)s; if (ptr == NULL) return; if (ptr->offsets) gf_free(ptr->offsets); if (ptr->cached_data) gf_free(ptr->cached_data); gf_free(ptr); } GF_Err saio_box_read(GF_Box *s, GF_BitStream *bs) { GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox *)s; if (ptr->flags & 1) { ISOM_DECREASE_SIZE(ptr, 8); ptr->aux_info_type = gf_bs_read_u32(bs); ptr->aux_info_type_parameter = gf_bs_read_u32(bs); } ISOM_DECREASE_SIZE(ptr, 4); ptr->entry_count = gf_bs_read_u32(bs); if (ptr->entry_count) { u32 i; if (ptr->size < (ptr->version == 0 ? 4 : 8) * ptr->entry_count) return GF_ISOM_INVALID_FILE; ptr->offsets = gf_malloc(sizeof(u64)*ptr->entry_count); if (!ptr->offsets) return GF_OUT_OF_MEM; ptr->entry_alloc = ptr->entry_count; if (ptr->version==0) { ISOM_DECREASE_SIZE(ptr, 4*ptr->entry_count); for (i=0; i<ptr->entry_count; i++) ptr->offsets[i] = gf_bs_read_u32(bs); } else { ISOM_DECREASE_SIZE(ptr, 8*ptr->entry_count); for (i=0; i<ptr->entry_count; i++) ptr->offsets[i] = gf_bs_read_u64(bs); } } return GF_OK; } GF_Box *saio_box_new() { ISOM_DECL_BOX_ALLOC(GF_SampleAuxiliaryInfoOffsetBox, GF_ISOM_BOX_TYPE_SAIO); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err saio_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->flags & 1) { gf_bs_write_u32(bs, ptr->aux_info_type); gf_bs_write_u32(bs, ptr->aux_info_type_parameter); } gf_bs_write_u32(bs, ptr->entry_count); if (ptr->entry_count) { u32 i; //store position in bitstream before writing data - offsets can be NULL if a single offset is rewritten later on (cf senc_box_write) ptr->offset_first_offset_field = gf_bs_get_position(bs); if (ptr->version==0) { if (!ptr->offsets) { gf_bs_write_u32(bs, 0); } else { for (i=0; i<ptr->entry_count; i++) gf_bs_write_u32(bs, (u32) ptr->offsets[i]); } } else { if (!ptr->offsets) { gf_bs_write_u64(bs, 0); } else { for (i=0; i<ptr->entry_count; i++) gf_bs_write_u64(bs, ptr->offsets[i]); } } } return GF_OK; } GF_Err saio_box_size(GF_Box *s) { GF_SampleAuxiliaryInfoOffsetBox *ptr = (GF_SampleAuxiliaryInfoOffsetBox*)s; if (ptr->aux_info_type || ptr->aux_info_type_parameter) { ptr->flags |= 1; } if (ptr->flags & 1) ptr->size += 8; ptr->size += 4; //a little optim here: in cenc, the saio always points to a single data block, only one entry is needed switch (ptr->aux_info_type) { case GF_ISOM_CENC_SCHEME: case GF_ISOM_CBC_SCHEME: case GF_ISOM_CENS_SCHEME: case GF_ISOM_CBCS_SCHEME: if (ptr->offsets) gf_free(ptr->offsets); ptr->offsets = NULL; ptr->entry_alloc = 0; ptr->entry_count = 1; break; } ptr->size += ((ptr->version==1) ? 8 : 4) * ptr->entry_count; return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE void prft_box_del(GF_Box *s) { gf_free(s); } GF_Err prft_box_read(GF_Box *s,GF_BitStream *bs) { GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox *) s; ISOM_DECREASE_SIZE(ptr, 12); ptr->refTrackID = gf_bs_read_u32(bs); ptr->ntp = gf_bs_read_u64(bs); if (ptr->version==0) { ISOM_DECREASE_SIZE(ptr, 4); ptr->timestamp = gf_bs_read_u32(bs); } else { ISOM_DECREASE_SIZE(ptr, 8); ptr->timestamp = gf_bs_read_u64(bs); } return GF_OK; } GF_Box *prft_box_new() { ISOM_DECL_BOX_ALLOC(GF_ProducerReferenceTimeBox, GF_ISOM_BOX_TYPE_PRFT); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err prft_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->refTrackID); gf_bs_write_u64(bs, ptr->ntp); if (ptr->version==0) { gf_bs_write_u32(bs, (u32) ptr->timestamp); } else { gf_bs_write_u64(bs, ptr->timestamp); } return GF_OK; } GF_Err prft_box_size(GF_Box *s) { GF_ProducerReferenceTimeBox *ptr = (GF_ProducerReferenceTimeBox*)s; ptr->size += 4+8+ (ptr->version ? 8 : 4); return GF_OK; } #endif //GPAC_DISABLE_ISOM_WRITE GF_Box *trgr_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackGroupBox, GF_ISOM_BOX_TYPE_TRGR); tmp->groups = gf_list_new(); if (!tmp->groups) { gf_free(tmp); return NULL; } return (GF_Box *)tmp; } void trgr_box_del(GF_Box *s) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s; if (ptr == NULL) return; gf_list_del(ptr->groups); gf_free(ptr); } GF_Err trgr_on_child_box(GF_Box *s, GF_Box *a) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s; return gf_list_add(ptr->groups, a); } GF_Err trgr_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, trgr_on_child_box, s->type); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trgr_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err trgr_box_size(GF_Box *s) { u32 pos=0; GF_TrackGroupBox *ptr = (GF_TrackGroupBox *) s; gf_isom_check_position_list(s, ptr->groups, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *trgt_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrackGroupTypeBox, GF_ISOM_BOX_TYPE_TRGT); return (GF_Box *)tmp; } void trgt_box_del(GF_Box *s) { GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *)s; if (ptr == NULL) return; gf_free(ptr); } GF_Err trgt_box_read(GF_Box *s, GF_BitStream *bs) { GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *)s; ISOM_DECREASE_SIZE(ptr, 4); ptr->track_group_id = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trgt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_TrackGroupTypeBox *ptr = (GF_TrackGroupTypeBox *) s; if (!s) return GF_BAD_PARAM; s->type = ptr->group_type; e = gf_isom_full_box_write(s, bs); s->type = GF_ISOM_BOX_TYPE_TRGT; if (e) return e; gf_bs_write_u32(bs, ptr->track_group_id); return GF_OK; } GF_Err trgt_box_size(GF_Box *s) { GF_TrackGroupBox *ptr = (GF_TrackGroupBox *)s; ptr->size+= 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *stvi_box_new() { ISOM_DECL_BOX_ALLOC(GF_StereoVideoBox, GF_ISOM_BOX_TYPE_STVI); return (GF_Box *)tmp; } void stvi_box_del(GF_Box *s) { GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s; if (ptr == NULL) return; if (ptr->stereo_indication_type) gf_free(ptr->stereo_indication_type); gf_free(ptr); } GF_Err stvi_box_read(GF_Box *s, GF_BitStream *bs) { GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s; ISOM_DECREASE_SIZE(ptr, 12); gf_bs_read_int(bs, 30); ptr->single_view_allowed = gf_bs_read_int(bs, 2); ptr->stereo_scheme = gf_bs_read_u32(bs); ptr->sit_len = gf_bs_read_u32(bs); ISOM_DECREASE_SIZE(ptr, ptr->sit_len); ptr->stereo_indication_type = gf_malloc(sizeof(char)*ptr->sit_len); if (!ptr->stereo_indication_type) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->stereo_indication_type, ptr->sit_len); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err stvi_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_StereoVideoBox *ptr = (GF_StereoVideoBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, 0, 30); gf_bs_write_int(bs, ptr->single_view_allowed, 2); gf_bs_write_u32(bs, ptr->stereo_scheme); gf_bs_write_u32(bs, ptr->sit_len); gf_bs_write_data(bs, ptr->stereo_indication_type, ptr->sit_len); return GF_OK; } GF_Err stvi_box_size(GF_Box *s) { GF_StereoVideoBox *ptr = (GF_StereoVideoBox *)s; ptr->size+= 12 + ptr->sit_len; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *fiin_box_new() { ISOM_DECL_BOX_ALLOC(FDItemInformationBox, GF_ISOM_BOX_TYPE_FIIN); return (GF_Box *)tmp; } void fiin_box_del(GF_Box *s) { FDItemInformationBox *ptr = (FDItemInformationBox *)s; if (ptr == NULL) return; if (ptr->partition_entries) gf_list_del(ptr->partition_entries); gf_free(ptr); } GF_Err fiin_on_child_box(GF_Box *s, GF_Box *a) { FDItemInformationBox *ptr = (FDItemInformationBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_PAEN: if (!ptr->partition_entries) ptr->partition_entries = gf_list_new(); return gf_list_add(ptr->partition_entries, a); case GF_ISOM_BOX_TYPE_SEGR: if (ptr->session_info) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->session_info = (FDSessionGroupBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_GITN: if (ptr->group_id_to_name) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->group_id_to_name = (GroupIdToNameBox *)a; return GF_OK; } return GF_OK; } GF_Err fiin_box_read(GF_Box *s, GF_BitStream *bs) { FDItemInformationBox *ptr = (FDItemInformationBox *)s; ISOM_DECREASE_SIZE(ptr, 2); gf_bs_read_u16(bs); return gf_isom_box_array_read(s, bs, fiin_on_child_box); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fiin_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; FDItemInformationBox *ptr = (FDItemInformationBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, gf_list_count(ptr->partition_entries) ); return GF_OK; } GF_Err fiin_box_size(GF_Box *s) { u32 pos=0; FDItemInformationBox *ptr = (FDItemInformationBox *) s; s->size+= 2; gf_isom_check_position_list(s, ptr->partition_entries, &pos); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *paen_box_new() { ISOM_DECL_BOX_ALLOC(FDPartitionEntryBox, GF_ISOM_BOX_TYPE_PAEN); return (GF_Box *)tmp; } void paen_box_del(GF_Box *s) { gf_free(s); } GF_Err paen_on_child_box(GF_Box *s, GF_Box *a) { FDPartitionEntryBox *ptr = (FDPartitionEntryBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_FPAR: if (ptr->blocks_and_symbols) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->blocks_and_symbols = (FilePartitionBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_FECR: if (ptr->FEC_symbol_locations) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->FEC_symbol_locations = (FECReservoirBox *)a; return GF_OK; case GF_ISOM_BOX_TYPE_FIRE: if (ptr->File_symbol_locations) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->File_symbol_locations = (FileReservoirBox *)a; return GF_OK; } return GF_OK; } GF_Err paen_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, fiin_on_child_box); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err paen_box_write(GF_Box *s, GF_BitStream *bs) { if (!s) return GF_BAD_PARAM; return gf_isom_box_write_header(s, bs); } GF_Err paen_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *fpar_box_new() { ISOM_DECL_BOX_ALLOC(FilePartitionBox, GF_ISOM_BOX_TYPE_FPAR); return (GF_Box *)tmp; } void fpar_box_del(GF_Box *s) { FilePartitionBox *ptr = (FilePartitionBox *)s; if (ptr == NULL) return; if (ptr->scheme_specific_info) gf_free(ptr->scheme_specific_info); if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err gf_isom_read_null_terminated_string(GF_Box *s, GF_BitStream *bs, u64 size, char **out_str) { u32 len=10; u32 i=0; *out_str = gf_malloc(sizeof(char)*len); if (! *out_str) return GF_OUT_OF_MEM; while (1) { ISOM_DECREASE_SIZE(s, 1 ); (*out_str)[i] = gf_bs_read_u8(bs); if (!(*out_str)[i]) break; i++; if (i==len) { len += 10; *out_str = gf_realloc(*out_str, sizeof(char)*len); } if (gf_bs_available(bs) == 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] missing null character in null terminated string\n")); (*out_str)[i] = 0; return GF_OK; } if (i >= size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] string bigger than container, probably missing null character\n")); (*out_str)[i] = 0; return GF_OK; } } return GF_OK; } GF_Err fpar_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; FilePartitionBox *ptr = (FilePartitionBox *)s; ISOM_DECREASE_SIZE(ptr, ((ptr->version ? 4 : 2) + 12) ); ptr->itemID = gf_bs_read_int(bs, ptr->version ? 32 : 16); ptr->packet_payload_size = gf_bs_read_u16(bs); gf_bs_read_u8(bs); ptr->FEC_encoding_ID = gf_bs_read_u8(bs); ptr->FEC_instance_ID = gf_bs_read_u16(bs); ptr->max_source_block_length = gf_bs_read_u16(bs); ptr->encoding_symbol_length = gf_bs_read_u16(bs); ptr->max_number_of_encoding_symbols = gf_bs_read_u16(bs); e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->scheme_specific_info); if (e) return e; ISOM_DECREASE_SIZE(ptr, (ptr->version ? 4 : 2) ); ptr->nb_entries = gf_bs_read_int(bs, ptr->version ? 32 : 16); if (ptr->nb_entries > UINT_MAX / 6) return GF_ISOM_INVALID_FILE; ISOM_DECREASE_SIZE(ptr, ptr->nb_entries * 6 ); GF_SAFE_ALLOC_N(ptr->entries, ptr->nb_entries, FilePartitionEntry); if (!ptr->entries) return GF_OUT_OF_MEM; for (i=0;i < ptr->nb_entries; i++) { ptr->entries[i].block_count = gf_bs_read_u16(bs); ptr->entries[i].block_size = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fpar_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; FilePartitionBox *ptr = (FilePartitionBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->itemID, ptr->version ? 32 : 16); gf_bs_write_u16(bs, ptr->packet_payload_size); gf_bs_write_u8(bs, 0); gf_bs_write_u8(bs, ptr->FEC_encoding_ID); gf_bs_write_u16(bs, ptr->FEC_instance_ID); gf_bs_write_u16(bs, ptr->max_source_block_length); gf_bs_write_u16(bs, ptr->encoding_symbol_length); gf_bs_write_u16(bs, ptr->max_number_of_encoding_symbols); if (ptr->scheme_specific_info) { gf_bs_write_data(bs, ptr->scheme_specific_info, (u32)strlen(ptr->scheme_specific_info) ); } //null terminated string gf_bs_write_u8(bs, 0); gf_bs_write_int(bs, ptr->nb_entries, ptr->version ? 32 : 16); for (i=0;i < ptr->nb_entries; i++) { gf_bs_write_u16(bs, ptr->entries[i].block_count); gf_bs_write_u32(bs, ptr->entries[i].block_size); } return GF_OK; } GF_Err fpar_box_size(GF_Box *s) { FilePartitionBox *ptr = (FilePartitionBox *)s; ptr->size += 13 + (ptr->version ? 8 : 4); if (ptr->scheme_specific_info) ptr->size += strlen(ptr->scheme_specific_info); ptr->size+= ptr->nb_entries * 6; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *fecr_box_new() { ISOM_DECL_BOX_ALLOC(FECReservoirBox, GF_ISOM_BOX_TYPE_FECR); return (GF_Box *)tmp; } void fecr_box_del(GF_Box *s) { FECReservoirBox *ptr = (FECReservoirBox *)s; if (ptr == NULL) return; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err fecr_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; FECReservoirBox *ptr = (FECReservoirBox *)s; ISOM_DECREASE_SIZE(ptr, (ptr->version ? 4 : 2) ); ptr->nb_entries = gf_bs_read_int(bs, ptr->version ? 32 : 16); ISOM_DECREASE_SIZE(ptr, ptr->nb_entries * (ptr->version ? 8 : 6) ); GF_SAFE_ALLOC_N(ptr->entries, ptr->nb_entries, FECReservoirEntry); if (!ptr->entries) return GF_OUT_OF_MEM; for (i=0; i<ptr->nb_entries; i++) { ptr->entries[i].item_id = gf_bs_read_int(bs, ptr->version ? 32 : 16); ptr->entries[i].symbol_count = gf_bs_read_u32(bs); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fecr_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; FECReservoirBox *ptr = (FECReservoirBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->nb_entries, ptr->version ? 32 : 16); for (i=0; i<ptr->nb_entries; i++) { gf_bs_write_int(bs, ptr->entries[i].item_id, ptr->version ? 32 : 16); gf_bs_write_u32(bs, ptr->entries[i].symbol_count); } return GF_OK; } GF_Err fecr_box_size(GF_Box *s) { FECReservoirBox *ptr = (FECReservoirBox *)s; ptr->size += (ptr->version ? 4 : 2) + ptr->nb_entries * (ptr->version ? 8 : 6); return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *segr_box_new() { ISOM_DECL_BOX_ALLOC(FDSessionGroupBox, GF_ISOM_BOX_TYPE_SEGR); return (GF_Box *)tmp; } void segr_box_del(GF_Box *s) { u32 i; FDSessionGroupBox *ptr = (FDSessionGroupBox *)s; if (ptr == NULL) return; for (i=0; i<ptr->num_session_groups; i++) { if (ptr->session_groups[i].group_ids) gf_free(ptr->session_groups[i].group_ids); if (ptr->session_groups[i].channels) gf_free(ptr->session_groups[i].channels); } if (ptr->session_groups) gf_free(ptr->session_groups); gf_free(ptr); } GF_Err segr_box_read(GF_Box *s, GF_BitStream *bs) { u32 i, k; FDSessionGroupBox *ptr = (FDSessionGroupBox *)s; ISOM_DECREASE_SIZE(ptr, 2); ptr->num_session_groups = gf_bs_read_u16(bs); if (ptr->size < ptr->num_session_groups) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in segr\n", ptr->num_session_groups)); ptr->num_session_groups = 0; return GF_ISOM_INVALID_FILE; } GF_SAFE_ALLOC_N(ptr->session_groups, ptr->num_session_groups, SessionGroupEntry); if (!ptr->session_groups) return GF_OUT_OF_MEM; for (i=0; i<ptr->num_session_groups; i++) { ptr->session_groups[i].nb_groups = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, 1); ISOM_DECREASE_SIZE(ptr, ptr->session_groups[i].nb_groups*4); GF_SAFE_ALLOC_N(ptr->session_groups[i].group_ids, ptr->session_groups[i].nb_groups, u32); if (!ptr->session_groups[i].group_ids) return GF_OUT_OF_MEM; for (k=0; k<ptr->session_groups[i].nb_groups; k++) { ptr->session_groups[i].group_ids[k] = gf_bs_read_u32(bs); } ptr->session_groups[i].nb_channels = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, ptr->session_groups[i].nb_channels*4); GF_SAFE_ALLOC_N(ptr->session_groups[i].channels, ptr->session_groups[i].nb_channels, u32); if (!ptr->session_groups[i].channels) return GF_OUT_OF_MEM; for (k=0; k<ptr->session_groups[i].nb_channels; k++) { ptr->session_groups[i].channels[k] = gf_bs_read_u32(bs); } } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err segr_box_write(GF_Box *s, GF_BitStream *bs) { u32 i, k; GF_Err e; FDSessionGroupBox *ptr = (FDSessionGroupBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->num_session_groups); for (i=0; i<ptr->num_session_groups; i++) { gf_bs_write_u8(bs, ptr->session_groups[i].nb_groups); for (k=0; k<ptr->session_groups[i].nb_groups; k++) { gf_bs_write_u32(bs, ptr->session_groups[i].group_ids[k]); } gf_bs_write_u16(bs, ptr->session_groups[i].nb_channels); for (k=0; k<ptr->session_groups[i].nb_channels; k++) { gf_bs_write_u32(bs, ptr->session_groups[i].channels[k]); } } return GF_OK; } GF_Err segr_box_size(GF_Box *s) { u32 i; FDSessionGroupBox *ptr = (FDSessionGroupBox *)s; ptr->size += 2; for (i=0; i<ptr->num_session_groups; i++) { ptr->size += 1 + 4*ptr->session_groups[i].nb_groups; ptr->size += 2 + 4*ptr->session_groups[i].nb_channels; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *gitn_box_new() { ISOM_DECL_BOX_ALLOC(GroupIdToNameBox, GF_ISOM_BOX_TYPE_GITN); return (GF_Box *)tmp; } void gitn_box_del(GF_Box *s) { u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *)s; if (ptr == NULL) return; for (i=0; i<ptr->nb_entries; i++) { if (ptr->entries[i].name) gf_free(ptr->entries[i].name); } if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err gitn_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_Err e; GroupIdToNameBox *ptr = (GroupIdToNameBox *)s; ISOM_DECREASE_SIZE(ptr, 2); ptr->nb_entries = gf_bs_read_u16(bs); if (ptr->size < ptr->nb_entries*4) return GF_ISOM_INVALID_FILE; GF_SAFE_ALLOC_N(ptr->entries, ptr->nb_entries, GroupIdNameEntry); if (!ptr->entries) return GF_OUT_OF_MEM; for (i=0; i<ptr->nb_entries; i++) { ISOM_DECREASE_SIZE(ptr, 4); ptr->entries[i].group_id = gf_bs_read_u32(bs); e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->entries[i].name); if (e) return e; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err gitn_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, ptr->nb_entries); for (i=0; i<ptr->nb_entries; i++) { gf_bs_write_u32(bs, ptr->entries[i].group_id); if (ptr->entries[i].name) gf_bs_write_data(bs, ptr->entries[i].name, (u32)strlen(ptr->entries[i].name) ); gf_bs_write_u8(bs, 0); } return GF_OK; } GF_Err gitn_box_size(GF_Box *s) { u32 i; GroupIdToNameBox *ptr = (GroupIdToNameBox *)s; ptr->size += 2; for (i=0; i<ptr->nb_entries; i++) { ptr->size += 5; if (ptr->entries[i].name) ptr->size += strlen(ptr->entries[i].name); } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #ifndef GPAC_DISABLE_ISOM_HINTING GF_Box *fdpa_box_new() { ISOM_DECL_BOX_ALLOC(GF_FDpacketBox, GF_ISOM_BOX_TYPE_FDPA); return (GF_Box *)tmp; } void fdpa_box_del(GF_Box *s) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *)s; if (ptr == NULL) return; if (ptr->headers) { for (i=0; i<ptr->header_ext_count; i++) { if (ptr->headers[i].data) gf_free(ptr->headers[i].data); } gf_free(ptr->headers); } gf_free(ptr); } GF_Err fdpa_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *)s; ISOM_DECREASE_SIZE(ptr, 3); ptr->info.sender_current_time_present = gf_bs_read_int(bs, 1); ptr->info.expected_residual_time_present = gf_bs_read_int(bs, 1); ptr->info.session_close_bit = gf_bs_read_int(bs, 1); ptr->info.object_close_bit = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 4); ptr->info.transport_object_identifier = gf_bs_read_u16(bs); ISOM_DECREASE_SIZE(ptr, 2); ptr->header_ext_count = gf_bs_read_u16(bs); if (ptr->size < ptr->header_ext_count*2) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid number of entries %d in fdpa\n", ptr->header_ext_count)); return GF_ISOM_INVALID_FILE; } GF_SAFE_ALLOC_N(ptr->headers, ptr->header_ext_count, GF_LCTheaderExtension); if (!ptr->headers) return GF_OUT_OF_MEM; for (i=0; i<ptr->header_ext_count; i++) { ptr->headers[i].header_extension_type = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, 1); if (ptr->headers[i].header_extension_type > 127) { ISOM_DECREASE_SIZE(ptr, 3); gf_bs_read_data(bs, (char *) ptr->headers[i].content, 3); } else { ISOM_DECREASE_SIZE(ptr, 1); ptr->headers[i].data_length = gf_bs_read_u8(bs); if (ptr->headers[i].data_length) { ptr->headers[i].data_length = 4*ptr->headers[i].data_length - 2; if (ptr->size < sizeof(char) * ptr->headers[i].data_length) return GF_ISOM_INVALID_FILE; ptr->headers[i].data = gf_malloc(sizeof(char) * ptr->headers[i].data_length); if (!ptr->headers[i].data) return GF_OUT_OF_MEM; ISOM_DECREASE_SIZE(ptr, ptr->headers[i].data_length); gf_bs_read_data(bs, ptr->headers[i].data, ptr->headers[i].data_length); } } } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fdpa_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_int(bs, ptr->info.sender_current_time_present, 1); gf_bs_write_int(bs, ptr->info.expected_residual_time_present, 1); gf_bs_write_int(bs, ptr->info.session_close_bit, 1); gf_bs_write_int(bs, ptr->info.object_close_bit, 1); gf_bs_write_int(bs, 0, 4); gf_bs_write_u16(bs, ptr->info.transport_object_identifier); gf_bs_write_u16(bs, ptr->header_ext_count); for (i=0; i<ptr->header_ext_count; i++) { gf_bs_write_u8(bs, ptr->headers[i].header_extension_type); if (ptr->headers[i].header_extension_type > 127) { gf_bs_write_data(bs, (const char *) ptr->headers[i].content, 3); } else { gf_bs_write_u8(bs, ptr->headers[i].data_length ? (ptr->headers[i].data_length+2)/4 : 0); if (ptr->headers[i].data_length) { gf_bs_write_data(bs, ptr->headers[i].data, ptr->headers[i].data_length); } } } return GF_OK; } GF_Err fdpa_box_size(GF_Box *s) { u32 i; GF_FDpacketBox *ptr = (GF_FDpacketBox *)s; ptr->size += 5; for (i=0; i<ptr->header_ext_count; i++) { ptr->size += 1; if (ptr->headers[i].header_extension_type > 127) { ptr->size += 3; } else { ptr->size += 1 + ptr->headers[i].data_length; } } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *extr_box_new() { ISOM_DECL_BOX_ALLOC(GF_ExtraDataBox, GF_ISOM_BOX_TYPE_EXTR); return (GF_Box *)tmp; } void extr_box_del(GF_Box *s) { GF_ExtraDataBox *ptr = (GF_ExtraDataBox *)s; if (ptr == NULL) return; if (ptr->feci) gf_isom_box_del((GF_Box*)ptr->feci); if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err extr_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ExtraDataBox *ptr = (GF_ExtraDataBox *)s; e = gf_isom_box_parse((GF_Box**) &ptr->feci, bs); if (e) return e; if (!ptr->feci || ptr->feci->size > ptr->size) return GF_ISOM_INVALID_MEDIA; ptr->data_length = (u32) (ptr->size - ptr->feci->size); ptr->data = gf_malloc(sizeof(char)*ptr->data_length); if (!ptr->data) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->data, ptr->data_length); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err extr_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ExtraDataBox *ptr = (GF_ExtraDataBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; if (ptr->feci) { e = gf_isom_box_write((GF_Box *)ptr->feci, bs); if (e) return e; } gf_bs_write_data(bs, ptr->data, ptr->data_length); return GF_OK; } GF_Err extr_box_size(GF_Box *s) { GF_ExtraDataBox *ptr = (GF_ExtraDataBox *) s; ptr->size += ptr->data_length; if (ptr->feci) { GF_Err e = gf_isom_box_size((GF_Box*)ptr->feci); if (e) return e; ptr->size += ptr->feci->size; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *fdsa_box_new() { ISOM_DECL_BOX_ALLOC(GF_HintSample, GF_ISOM_BOX_TYPE_FDSA); if (!tmp) return NULL; tmp->packetTable = gf_list_new(); tmp->hint_subtype = GF_ISOM_BOX_TYPE_FDP_STSD; return (GF_Box*)tmp; } void fdsa_box_del(GF_Box *s) { GF_HintSample *ptr = (GF_HintSample *)s; gf_list_del(ptr->packetTable); gf_free(ptr); } GF_Err fdsa_on_child_box(GF_Box *s, GF_Box *a) { GF_HintSample *ptr = (GF_HintSample *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_FDPA: gf_list_add(ptr->packetTable, a); break; case GF_ISOM_BOX_TYPE_EXTR: if (ptr->extra_data) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->extra_data = (GF_ExtraDataBox*)a; break; } return GF_OK; } GF_Err fdsa_box_read(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_array_read(s, bs, fdsa_on_child_box); } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err fdsa_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_HintSample *ptr = (GF_HintSample *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; e = gf_isom_box_array_write(s, ptr->packetTable, bs); if (e) return e; if (ptr->extra_data) { e = gf_isom_box_write((GF_Box *)ptr->extra_data, bs); if (e) return e; } return GF_OK; } GF_Err fdsa_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ #endif /*GPAC_DISABLE_ISOM_HINTING*/ void trik_box_del(GF_Box *s) { GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s; if (ptr == NULL) return; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err trik_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s; ptr->entry_count = (u32) ptr->size; ptr->entries = (GF_TrickPlayBoxEntry *) gf_malloc(ptr->entry_count * sizeof(GF_TrickPlayBoxEntry) ); if (!ptr->entries) return GF_OUT_OF_MEM; for (i=0; i< ptr->entry_count; i++) { ptr->entries[i].pic_type = gf_bs_read_int(bs, 2); ptr->entries[i].dependency_level = gf_bs_read_int(bs, 6); } return GF_OK; } GF_Box *trik_box_new() { ISOM_DECL_BOX_ALLOC(GF_TrickPlayBox, GF_ISOM_BOX_TYPE_TRIK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err trik_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; for (i=0; i < ptr->entry_count; i++ ) { gf_bs_write_int(bs, ptr->entries[i].pic_type, 2); gf_bs_write_int(bs, ptr->entries[i].dependency_level, 6); } return GF_OK; } GF_Err trik_box_size(GF_Box *s) { GF_TrickPlayBox *ptr = (GF_TrickPlayBox *) s; ptr->size += 8 * ptr->entry_count; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void bloc_box_del(GF_Box *s) { gf_free(s); } GF_Err bloc_box_read(GF_Box *s,GF_BitStream *bs) { GF_BaseLocationBox *ptr = (GF_BaseLocationBox *) s; ISOM_DECREASE_SIZE(s, 256) gf_bs_read_data(bs, (char *) ptr->baseLocation, 256); ISOM_DECREASE_SIZE(s, 256) gf_bs_read_data(bs, (char *) ptr->basePurlLocation, 256); ISOM_DECREASE_SIZE(s, 512) gf_bs_skip_bytes(bs, 512); return GF_OK; } GF_Box *bloc_box_new() { ISOM_DECL_BOX_ALLOC(GF_BaseLocationBox, GF_ISOM_BOX_TYPE_TRIK); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err bloc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_BaseLocationBox *ptr = (GF_BaseLocationBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, (const char *) ptr->baseLocation, 256); gf_bs_write_data(bs, (const char *) ptr->basePurlLocation, 256); for (i=0; i < 64; i++ ) { gf_bs_write_u64(bs, 0); } return GF_OK; } GF_Err bloc_box_size(GF_Box *s) { s->size += 1024; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ainf_box_del(GF_Box *s) { GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s; if (ptr->APID) gf_free(ptr->APID); gf_free(s); } GF_Err ainf_box_read(GF_Box *s,GF_BitStream *bs) { GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s; ISOM_DECREASE_SIZE(s, 4) ptr->profile_version = gf_bs_read_u32(bs); return gf_isom_read_null_terminated_string(s, bs, s->size, &ptr->APID); } GF_Box *ainf_box_new() { ISOM_DECL_BOX_ALLOC(GF_AssetInformationBox, GF_ISOM_BOX_TYPE_AINF); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ainf_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->profile_version); if (ptr->APID) gf_bs_write_data(bs, ptr->APID, (u32) strlen(ptr->APID) ); gf_bs_write_u8(bs, 0); return GF_OK; } GF_Err ainf_box_size(GF_Box *s) { GF_AssetInformationBox *ptr = (GF_AssetInformationBox *) s; s->size += 4 + (ptr->APID ? strlen(ptr->APID) : 0 ) + 1; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mhac_box_del(GF_Box *s) { GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s; if (ptr->mha_config) gf_free(ptr->mha_config); gf_free(s); } GF_Err mhac_box_read(GF_Box *s,GF_BitStream *bs) { GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s; ISOM_DECREASE_SIZE(s, 5) ptr->configuration_version = gf_bs_read_u8(bs); ptr->mha_pl_indication = gf_bs_read_u8(bs); ptr->reference_channel_layout = gf_bs_read_u8(bs); ptr->mha_config_size = gf_bs_read_u16(bs); if (ptr->mha_config_size) { ISOM_DECREASE_SIZE(s, ptr->mha_config_size) ptr->mha_config = gf_malloc(sizeof(char)*ptr->mha_config_size); if (!ptr->mha_config) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->mha_config, ptr->mha_config_size); } return GF_OK; } GF_Box *mhac_box_new() { ISOM_DECL_BOX_ALLOC(GF_MHAConfigBox, GF_ISOM_BOX_TYPE_MHAC); tmp->configuration_version = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mhac_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->configuration_version); gf_bs_write_u8(bs, ptr->mha_pl_indication); gf_bs_write_u8(bs, ptr->reference_channel_layout); gf_bs_write_u16(bs, ptr->mha_config ? ptr->mha_config_size : 0); if (ptr->mha_config && ptr->mha_config_size) gf_bs_write_data(bs, ptr->mha_config, ptr->mha_config_size); return GF_OK; } GF_Err mhac_box_size(GF_Box *s) { GF_MHAConfigBox *ptr = (GF_MHAConfigBox *) s; s->size += 5; if (ptr->mha_config_size && ptr->mha_config) s->size += ptr->mha_config_size; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void jp2h_box_del(GF_Box *s) { gf_free(s); } GF_Err jp2h_on_child_box(GF_Box *s, GF_Box *a) { GF_J2KHeaderBox *ptr = (GF_J2KHeaderBox *)s; switch(a->type) { case GF_ISOM_BOX_TYPE_IHDR: if (ptr->ihdr) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->ihdr = (GF_J2KImageHeaderBox*)a; return GF_OK; case GF_ISOM_BOX_TYPE_COLR: if (ptr->colr) ERROR_ON_DUPLICATED_BOX(a, ptr) ptr->colr = (GF_ColourInformationBox*)a; return GF_OK; } return GF_OK; } GF_Err jp2h_box_read(GF_Box *s,GF_BitStream *bs) { return gf_isom_box_array_read_ex(s, bs, jp2h_on_child_box, s->type); } GF_Box *jp2h_box_new() { ISOM_DECL_BOX_ALLOC(GF_J2KHeaderBox, GF_ISOM_BOX_TYPE_JP2H); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err jp2h_box_write(GF_Box *s, GF_BitStream *bs) { return gf_isom_box_write_header(s, bs); } GF_Err jp2h_box_size(GF_Box *s) { return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void ihdr_box_del(GF_Box *s) { gf_free(s); } GF_Err ihdr_box_read(GF_Box *s,GF_BitStream *bs) { GF_J2KImageHeaderBox *ptr = (GF_J2KImageHeaderBox *) s; ISOM_DECREASE_SIZE(s, 14) ptr->height = gf_bs_read_u32(bs); ptr->width = gf_bs_read_u32(bs); ptr->nb_comp = gf_bs_read_u16(bs); ptr->bpc = gf_bs_read_u8(bs); ptr->Comp = gf_bs_read_u8(bs); ptr->UnkC = gf_bs_read_u8(bs); ptr->IPR = gf_bs_read_u8(bs); return GF_OK; } GF_Box *ihdr_box_new() { ISOM_DECL_BOX_ALLOC(GF_J2KImageHeaderBox, GF_ISOM_BOX_TYPE_IHDR); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err ihdr_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_J2KImageHeaderBox *ptr = (GF_J2KImageHeaderBox *) s; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->height); gf_bs_write_u32(bs, ptr->width); gf_bs_write_u16(bs, ptr->nb_comp); gf_bs_write_u8(bs, ptr->bpc); gf_bs_write_u8(bs, ptr->Comp); gf_bs_write_u8(bs, ptr->UnkC); gf_bs_write_u8(bs, ptr->IPR); return GF_OK; } GF_Err ihdr_box_size(GF_Box *s) { s->size += 14; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ /* Dolby Vision */ GF_Box *dvcC_box_new() { GF_DOVIConfigurationBox *tmp = (GF_DOVIConfigurationBox *)gf_malloc(sizeof(GF_DOVIConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_DOVIConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_DVCC; return (GF_Box *)tmp; } void dvcC_box_del(GF_Box *s) { GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox*)s; gf_free(ptr); } GF_Err dvcC_box_read(GF_Box *s, GF_BitStream *bs) { u32 i; u32 data[5]; GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox *)s; //GF_DOVIDecoderConfigurationRecord ISOM_DECREASE_SIZE(ptr, 24) ptr->DOVIConfig.dv_version_major = gf_bs_read_u8(bs); ptr->DOVIConfig.dv_version_minor = gf_bs_read_u8(bs); ptr->DOVIConfig.dv_profile = gf_bs_read_int(bs, 7); ptr->DOVIConfig.dv_level = gf_bs_read_int(bs, 6); ptr->DOVIConfig.rpu_present_flag = gf_bs_read_int(bs, 1); ptr->DOVIConfig.el_present_flag = gf_bs_read_int(bs, 1); ptr->DOVIConfig.bl_present_flag = gf_bs_read_int(bs, 1); memset(data, 0, sizeof(u32)*5); gf_bs_read_data(bs, (char*)data, 20); for (i = 0; i < 5; ++i) { if (data[i] != 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[iso file] dvcC reserved bytes are not zero\n")); //return GF_ISOM_INVALID_FILE; } } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dvcC_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; //GF_DOVIDecoderConfigurationRecord gf_bs_write_u8(bs, ptr->DOVIConfig.dv_version_major); gf_bs_write_u8(bs, ptr->DOVIConfig.dv_version_minor); gf_bs_write_int(bs, ptr->DOVIConfig.dv_profile, 7); gf_bs_write_int(bs, ptr->DOVIConfig.dv_level, 6); gf_bs_write_int(bs, ptr->DOVIConfig.rpu_present_flag, 1); gf_bs_write_int(bs, ptr->DOVIConfig.el_present_flag, 1); gf_bs_write_int(bs, ptr->DOVIConfig.bl_present_flag, 1); gf_bs_write_u32(bs, 0); gf_bs_write_u32(bs, 0); gf_bs_write_u32(bs, 0); gf_bs_write_u32(bs, 0); gf_bs_write_u32(bs, 0); return GF_OK; } GF_Err dvcC_box_size(GF_Box *s) { GF_DOVIConfigurationBox *ptr = (GF_DOVIConfigurationBox *)s; ptr->size += 24; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *dOps_box_new() { ISOM_DECL_BOX_ALLOC(GF_OpusSpecificBox, GF_ISOM_BOX_TYPE_DOPS); return (GF_Box *)tmp; } void dOps_box_del(GF_Box *s) { GF_OpusSpecificBox *ptr = (GF_OpusSpecificBox *)s; if (ptr) gf_free(ptr); } GF_Err dOps_box_read(GF_Box *s, GF_BitStream *bs) { GF_OpusSpecificBox *ptr = (GF_OpusSpecificBox *)s; ptr->version = gf_bs_read_u8(bs); ptr->OutputChannelCount = gf_bs_read_u8(bs); ptr->PreSkip = gf_bs_read_u16(bs); ptr->InputSampleRate = gf_bs_read_u32(bs); ptr->OutputGain = gf_bs_read_u16(bs); ptr->ChannelMappingFamily = gf_bs_read_u8(bs); ISOM_DECREASE_SIZE(ptr, 11) if (ptr->size) { ISOM_DECREASE_SIZE(ptr, 2+ptr->OutputChannelCount); ptr->StreamCount = gf_bs_read_u8(bs); ptr->CoupledCount = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *) ptr->ChannelMapping, ptr->OutputChannelCount); } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dOps_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_OpusSpecificBox *ptr = (GF_OpusSpecificBox *)s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->version); gf_bs_write_u8(bs, ptr->OutputChannelCount); gf_bs_write_u16(bs, ptr->PreSkip); gf_bs_write_u32(bs, ptr->InputSampleRate); gf_bs_write_u16(bs, ptr->OutputGain); gf_bs_write_u8(bs, ptr->ChannelMappingFamily); if (ptr->ChannelMappingFamily) { gf_bs_write_u8(bs, ptr->StreamCount); gf_bs_write_u8(bs, ptr->CoupledCount); gf_bs_write_data(bs, (char *) ptr->ChannelMapping, ptr->OutputChannelCount); } return GF_OK; } GF_Err dOps_box_size(GF_Box *s) { GF_OpusSpecificBox *ptr = (GF_OpusSpecificBox *)s; ptr->size += 11; if (ptr->ChannelMappingFamily) ptr->size += 2 + ptr->OutputChannelCount; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void dfla_box_del(GF_Box *s) { GF_FLACConfigBox *ptr = (GF_FLACConfigBox *) s; if (ptr->data) gf_free(ptr->data); gf_free(ptr); } GF_Err dfla_box_read(GF_Box *s,GF_BitStream *bs) { GF_FLACConfigBox *ptr = (GF_FLACConfigBox *) s; ptr->dataSize = (u32) ptr->size; ptr->size=0; ptr->data = gf_malloc(ptr->dataSize); gf_bs_read_data(bs, ptr->data, ptr->dataSize); return GF_OK; } GF_Box *dfla_box_new() { ISOM_DECL_BOX_ALLOC(GF_FLACConfigBox, GF_ISOM_BOX_TYPE_DFLA); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err dfla_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_FLACConfigBox *ptr = (GF_FLACConfigBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_data(bs, ptr->data, ptr->dataSize); return GF_OK; } GF_Err dfla_box_size(GF_Box *s) { GF_FLACConfigBox *ptr = (GF_FLACConfigBox *) s; ptr->size += ptr->dataSize; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void mvcg_box_del(GF_Box *s) { GF_MultiviewGroupBox *ptr = (GF_MultiviewGroupBox *) s; if (ptr->entries) gf_free(ptr->entries); gf_free(ptr); } GF_Err mvcg_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_MultiviewGroupBox *ptr = (GF_MultiviewGroupBox *) s; ISOM_DECREASE_SIZE(s, 7) ptr->multiview_group_id = gf_bs_read_u32(bs); ptr->num_entries = gf_bs_read_u16(bs); gf_bs_read_u8(bs); ptr->entries = gf_malloc(ptr->num_entries * sizeof(MVCIEntry)); memset(ptr->entries, 0, ptr->num_entries * sizeof(MVCIEntry)); for (i=0; i<ptr->num_entries; i++) { ISOM_DECREASE_SIZE(s, 1) ptr->entries[i].entry_type = gf_bs_read_u8(bs); switch (ptr->entries[i].entry_type) { case 0: ISOM_DECREASE_SIZE(s, 4) ptr->entries[i].trackID = gf_bs_read_u32(bs); break; case 1: ISOM_DECREASE_SIZE(s, 6) ptr->entries[i].trackID = gf_bs_read_u32(bs); ptr->entries[i].tierID = gf_bs_read_u16(bs); break; case 2: ISOM_DECREASE_SIZE(s, 2) gf_bs_read_int(bs, 6); ptr->entries[i].output_view_id = gf_bs_read_int(bs, 10); break; case 3: ISOM_DECREASE_SIZE(s, 4) gf_bs_read_int(bs, 6) ; ptr->entries[i].start_view_id = gf_bs_read_int(bs, 10); ptr->entries[i].view_count = gf_bs_read_u16(bs); break; } } return gf_isom_box_array_read(s, bs, NULL); } GF_Box *mvcg_box_new() { ISOM_DECL_BOX_ALLOC(GF_MultiviewGroupBox, GF_ISOM_BOX_TYPE_MVCG); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err mvcg_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i; GF_MultiviewGroupBox *ptr = (GF_MultiviewGroupBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->multiview_group_id); gf_bs_write_u16(bs, ptr->num_entries); gf_bs_write_u8(bs, 0); for (i=0; i<ptr->num_entries; i++) { gf_bs_write_u8(bs, ptr->entries[i].entry_type); switch (ptr->entries[i].entry_type) { case 0: gf_bs_write_u32(bs, ptr->entries[i].trackID); break; case 1: gf_bs_write_u32(bs, ptr->entries[i].trackID); gf_bs_write_u16(bs, ptr->entries[i].tierID); break; case 2: gf_bs_write_int(bs, 0, 6); gf_bs_write_int(bs, ptr->entries[i].output_view_id, 10); break; case 3: gf_bs_write_int(bs, 0, 6) ; gf_bs_write_int(bs, ptr->entries[i].start_view_id, 10); gf_bs_write_u16(bs, ptr->entries[i].view_count); break; } } return GF_OK; } GF_Err mvcg_box_size(GF_Box *s) { u32 i; GF_MultiviewGroupBox *ptr = (GF_MultiviewGroupBox *) s; ptr->size += 7; for (i=0; i<ptr->num_entries; i++) { switch (ptr->entries[i].entry_type) { case 0: ptr->size += 1 + 4; break; case 1: ptr->size += 1 + 6; break; case 2: ptr->size += 1 + 2; break; case 3: ptr->size += 1 + 4; break; } } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void vwid_box_del(GF_Box *s) { u32 i; GF_ViewIdentifierBox *ptr = (GF_ViewIdentifierBox *) s; if (ptr->views) { for (i=0; i<ptr->num_views; i++) { if (ptr->views[i].view_refs) gf_free(ptr->views[i].view_refs); } gf_free(ptr->views); } gf_free(ptr); } GF_Err vwid_box_read(GF_Box *s,GF_BitStream *bs) { u32 i; GF_ViewIdentifierBox *ptr = (GF_ViewIdentifierBox *) s; ISOM_DECREASE_SIZE(s, 3) gf_bs_read_int(bs, 2); ptr->min_temporal_id = gf_bs_read_int(bs, 3); ptr->max_temporal_id = gf_bs_read_int(bs, 3); ptr->num_views = gf_bs_read_u16(bs); if (6 * ptr->num_views > ptr->size) return GF_ISOM_INVALID_FILE; ptr->views = gf_malloc(sizeof(ViewIDEntry)*ptr->num_views); for (i=0; i<ptr->num_views; i++) { u32 j; ISOM_DECREASE_SIZE(s, 6) gf_bs_read_int(bs, 6); ptr->views[i].view_id = gf_bs_read_int(bs, 10); gf_bs_read_int(bs, 6); ptr->views[i].view_order_index = gf_bs_read_int(bs, 10); ptr->views[i].texture_in_stream = gf_bs_read_int(bs, 1); ptr->views[i].texture_in_track = gf_bs_read_int(bs, 1); ptr->views[i].depth_in_stream = gf_bs_read_int(bs, 1); ptr->views[i].depth_in_track = gf_bs_read_int(bs, 1); ptr->views[i].base_view_type = gf_bs_read_int(bs, 2); ptr->views[i].num_ref_views = gf_bs_read_int(bs, 10); if (2 * ptr->views[i].num_ref_views > ptr->size) return GF_ISOM_INVALID_FILE; ptr->views[i].view_refs = gf_malloc(sizeof(ViewIDRefViewEntry)*ptr->views[i].num_ref_views); for (j=0; j<ptr->views[i].num_ref_views; j++) { ISOM_DECREASE_SIZE(s, 2) gf_bs_read_int(bs, 4); ptr->views[i].view_refs[j].dep_comp_idc = gf_bs_read_int(bs, 2); ptr->views[i].view_refs[j].ref_view_id = gf_bs_read_int(bs, 10); } } return GF_OK; } GF_Box *vwid_box_new() { ISOM_DECL_BOX_ALLOC(GF_ViewIdentifierBox, GF_ISOM_BOX_TYPE_VWID); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err vwid_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 i, j; GF_ViewIdentifierBox *ptr = (GF_ViewIdentifierBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_int(bs, 0, 2); gf_bs_write_int(bs, ptr->min_temporal_id, 3); gf_bs_write_int(bs, ptr->max_temporal_id, 3); gf_bs_write_u16(bs, ptr->num_views); for (i=0; i<ptr->num_views; i++) { gf_bs_write_int(bs, 0, 6); gf_bs_write_int(bs, ptr->views[i].view_id, 10); gf_bs_write_int(bs, 0, 6); gf_bs_write_int(bs, ptr->views[i].view_order_index, 10); gf_bs_write_int(bs, ptr->views[i].texture_in_stream, 1); gf_bs_write_int(bs, ptr->views[i].texture_in_track, 1); gf_bs_write_int(bs, ptr->views[i].depth_in_stream, 1); gf_bs_write_int(bs, ptr->views[i].depth_in_track, 1); gf_bs_write_int(bs, ptr->views[i].base_view_type, 2); gf_bs_write_int(bs, ptr->views[i].num_ref_views, 10); for (j=0; j<ptr->views[i].num_ref_views; j++) { gf_bs_write_int(bs, 0, 4); gf_bs_write_int(bs, ptr->views[i].view_refs[j].dep_comp_idc, 2); gf_bs_write_int(bs, ptr->views[i].view_refs[j].ref_view_id, 10); } } return GF_OK; } GF_Err vwid_box_size(GF_Box *s) { u32 i; GF_ViewIdentifierBox *ptr = (GF_ViewIdentifierBox *) s; ptr->size += 3; for (i=0; i<ptr->num_views; i++) { ptr->size += 6 + 2 * ptr->views[i].num_ref_views; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void pcmC_box_del(GF_Box *s) { gf_free(s); } GF_Err pcmC_box_read(GF_Box *s,GF_BitStream *bs) { GF_PCMConfigBox *ptr = (GF_PCMConfigBox *) s; ISOM_DECREASE_SIZE(s, 2) ptr->format_flags = gf_bs_read_u8(bs); ptr->PCM_sample_size = gf_bs_read_u8(bs); return GF_OK; } GF_Box *pcmC_box_new() { ISOM_DECL_BOX_ALLOC(GF_PCMConfigBox, GF_ISOM_BOX_TYPE_PCMC); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err pcmC_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_PCMConfigBox *ptr = (GF_PCMConfigBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->format_flags); gf_bs_write_u8(bs, ptr->PCM_sample_size); return GF_OK; } GF_Err pcmC_box_size(GF_Box *s) { s->size += 2; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void chnl_box_del(GF_Box *s) { gf_free(s); } GF_Err chnl_box_read(GF_Box *s,GF_BitStream *bs) { GF_ChannelLayoutBox *ptr = (GF_ChannelLayoutBox *) s; ISOM_DECREASE_SIZE(s, 1) ptr->layout.stream_structure = gf_bs_read_u8(bs); if (ptr->layout.stream_structure & 1) { ISOM_DECREASE_SIZE(s, 1) ptr->layout.definedLayout = gf_bs_read_u8(bs); if (ptr->layout.definedLayout) { u32 remain = (u32) ptr->size; if (ptr->layout.stream_structure & 2) remain--; ptr->layout.channels_count = 0; while (remain) { ISOM_DECREASE_SIZE(s, 1) ptr->layout.layouts[ptr->layout.channels_count].position = gf_bs_read_u8(bs); remain--; if (ptr->layout.layouts[ptr->layout.channels_count].position == 126) { ISOM_DECREASE_SIZE(s, 3) ptr->layout.layouts[ptr->layout.channels_count].azimuth = gf_bs_read_int(bs, 16); ptr->layout.layouts[ptr->layout.channels_count].elevation = gf_bs_read_int(bs, 8); remain-=3; } } } else { ISOM_DECREASE_SIZE(s, 8) ptr->layout.omittedChannelsMap = gf_bs_read_u64(bs); } } if (ptr->layout.stream_structure & 2) { ISOM_DECREASE_SIZE(s, 1) ptr->layout.object_count = gf_bs_read_u8(bs); } return GF_OK; } GF_Box *chnl_box_new() { ISOM_DECL_BOX_ALLOC(GF_ChannelLayoutBox, GF_ISOM_BOX_TYPE_CHNL); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err chnl_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_ChannelLayoutBox *ptr = (GF_ChannelLayoutBox *) s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->layout.stream_structure); if (ptr->layout.stream_structure & 1) { gf_bs_write_u8(bs, ptr->layout.definedLayout); if (ptr->layout.definedLayout==0) { u32 i; for (i=0; i<ptr->layout.channels_count; i++) { gf_bs_write_u8(bs, ptr->layout.layouts[i].position); if (ptr->layout.layouts[i].position==126) { gf_bs_write_int(bs, ptr->layout.layouts[i].azimuth, 16); gf_bs_write_int(bs, ptr->layout.layouts[i].elevation, 8); } } } else { gf_bs_write_u64(bs, ptr->layout.omittedChannelsMap); } } if (ptr->layout.stream_structure & 2) { gf_bs_write_u8(bs, ptr->layout.object_count); } return GF_OK; } GF_Err chnl_box_size(GF_Box *s) { GF_ChannelLayoutBox *ptr = (GF_ChannelLayoutBox *) s; s->size += 1; if (ptr->layout.stream_structure & 1) { s->size += 1; if (ptr->layout.definedLayout==0) { u32 i; for (i=0; i<ptr->layout.channels_count; i++) { s->size+=1; if (ptr->layout.layouts[i].position==126) s->size+=3; } } else { s->size += 8; } } if (ptr->layout.stream_structure & 2) { s->size += 1; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *emsg_box_new() { ISOM_DECL_BOX_ALLOC(GF_EventMessageBox, GF_ISOM_BOX_TYPE_EMSG); return (GF_Box *)tmp; } void emsg_box_del(GF_Box *s) { GF_EventMessageBox *ptr = (GF_EventMessageBox *) s; if (ptr == NULL) return; if (ptr->scheme_id_uri) gf_free(ptr->scheme_id_uri); if (ptr->value) gf_free(ptr->value); if (ptr->message_data) gf_free(ptr->message_data); gf_free(ptr); } GF_Err emsg_box_read(GF_Box *s,GF_BitStream *bs) { GF_Err e; GF_EventMessageBox *ptr = (GF_EventMessageBox*) s; if (ptr->version==0) { e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->scheme_id_uri); if (e) return e; e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->value); if (e) return e; ISOM_DECREASE_SIZE(ptr, 16); ptr->timescale = gf_bs_read_u32(bs); ptr->presentation_time_delta = gf_bs_read_u32(bs); ptr->event_duration = gf_bs_read_u32(bs); ptr->event_id = gf_bs_read_u32(bs); } else if (ptr->version==1) { ISOM_DECREASE_SIZE(ptr, 20); ptr->timescale = gf_bs_read_u32(bs); ptr->presentation_time_delta = gf_bs_read_u64(bs); ptr->event_duration = gf_bs_read_u32(bs); ptr->event_id = gf_bs_read_u32(bs); e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->scheme_id_uri); if (e) return e; e = gf_isom_read_null_terminated_string(s, bs, ptr->size, &ptr->value); if (e) return e; } else { return GF_OK; } if (ptr->size) { if (ptr->size>0xFFFFFFFUL) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[IsoMedia] emsg message data size too big ("LLU") to be loaded\n", ptr->size)); return GF_OUT_OF_MEM; } ptr->message_data_size = (u32) ptr->size; ptr->message_data = gf_malloc(ptr->message_data_size); if (!ptr->message_data) return GF_OUT_OF_MEM; gf_bs_read_data(bs, ptr->message_data, ptr->message_data_size); ptr->size = 0; } return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err emsg_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u32 len; GF_EventMessageBox *ptr = (GF_EventMessageBox*) s; e = gf_isom_full_box_write(s, bs); if (e) return e; if (ptr->version==1) { gf_bs_write_u32(bs, ptr->timescale); gf_bs_write_u64(bs, ptr->presentation_time_delta); gf_bs_write_u32(bs, ptr->event_duration); gf_bs_write_u32(bs, ptr->event_id); } len = ptr->scheme_id_uri ? (u32) strlen(ptr->scheme_id_uri) : 0; if (len) gf_bs_write_data(bs, ptr->scheme_id_uri, len); gf_bs_write_u8(bs, 0); len = ptr->value ? (u32) strlen(ptr->value) : 0; if (len) gf_bs_write_data(bs, ptr->value, len); gf_bs_write_u8(bs, 0); if (ptr->version==0) { gf_bs_write_u32(bs, ptr->timescale); gf_bs_write_u32(bs, (u32) ptr->presentation_time_delta); gf_bs_write_u32(bs, ptr->event_duration); gf_bs_write_u32(bs, ptr->event_id); } if (ptr->message_data) gf_bs_write_data(bs, ptr->message_data, ptr->message_data_size); return GF_OK; } GF_Err emsg_box_size(GF_Box *s) { GF_EventMessageBox *ptr = (GF_EventMessageBox*) s; ptr->size += 4; if (ptr->version) { ptr->size += 20; } else { ptr->size += 16; } ptr->size+=2; //1 NULL-terminated strings if (ptr->scheme_id_uri) ptr->size += strlen(ptr->scheme_id_uri); if (ptr->value) ptr->size += strlen(ptr->value); if (ptr->message_data) ptr->size += ptr->message_data_size; return GF_OK; } #endif // GPAC_DISABLE_ISOM_WRITE #endif /*GPAC_DISABLE_ISOM*/
null
272
CWE-787
CVE-2021-32272
/**************************************************************************** MP4 input module Copyright (C) 2017 Krzysztof Nikiel This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ****************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <stdint.h> #include <string.h> #include <time.h> #include <limits.h> #include "unicode_support.h" #include "mp4read.h" enum ATOM_TYPE { ATOM_STOP = 0 /* end of atoms */ , ATOM_NAME /* plain atom */ , ATOM_DESCENT, /* starts group of children */ ATOM_ASCENT, /* ends group */ ATOM_DATA, }; typedef struct { uint16_t opcode; void *data; } creator_t; mp4config_t mp4config = { 0 }; static FILE *g_fin = NULL; static inline uint32_t bswap32(const uint32_t u32) { #ifndef WORDS_BIGENDIAN #if defined (__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3))) return __builtin_bswap32(u32); #elif defined (_MSC_VER) return _byteswap_ulong(u32); #else return (u32 << 24) | ((u32 << 8) & 0xFF0000) | ((u32 >> 8) & 0xFF00) | (u32 >> 24); #endif #else return u32; #endif } static inline uint16_t bswap16(const uint16_t u16) { #ifndef WORDS_BIGENDIAN #if defined (__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8))) return __builtin_bswap16(u16); #elif defined (_MSC_VER) return _byteswap_ushort(u16); #else return (u16 << 8) | (u16 >> 8); #endif #else return u16; #endif } enum {ERR_OK = 0, ERR_FAIL = -1, ERR_UNSUPPORTED = -2}; static int datain(void *data, int size) { if (fread(data, 1, size, g_fin) != size) return ERR_FAIL; return size; } static int stringin(char *txt, int sizemax) { int size; for (size = 0; size < sizemax; size++) { if (fread(txt + size, 1, 1, g_fin) != 1) return ERR_FAIL; if (!txt[size]) break; } txt[sizemax-1] = '\0'; return size; } static uint32_t u32in(void) { uint32_t u32; datain(&u32, 4); u32 = bswap32(u32); return u32; } static uint16_t u16in(void) { uint16_t u16; datain(&u16, 2); u16 = bswap16(u16); return u16; } static int u8in(void) { uint8_t u8; datain(&u8, 1); return u8; } static int ftypin(int size) { enum {BUFSIZE = 40}; char buf[BUFSIZE]; uint32_t u32; buf[4] = 0; datain(buf, 4); u32 = u32in(); if (mp4config.verbose.header) fprintf(stderr, "Brand:\t\t\t%s(version %d)\n", buf, u32); stringin(buf, BUFSIZE); if (mp4config.verbose.header) fprintf(stderr, "Compatible brands:\t%s\n", buf); return size; } enum { SECSINDAY = 24 * 60 * 60 }; static char *mp4time(time_t t) { int y; // subtract some seconds from the start of 1904 to the start of 1970 for (y = 1904; y < 1970; y++) { t -= 365 * SECSINDAY; if (!(y & 3)) t -= SECSINDAY; } return ctime(&t); } static int mdhdin(int size) { // version/flags u32in(); // Creation time mp4config.ctime = u32in(); // Modification time mp4config.mtime = u32in(); // Time scale mp4config.samplerate = u32in(); // Duration mp4config.samples = u32in(); // Language u16in(); // pre_defined u16in(); return size; }; static int hdlr1in(int size) { uint8_t buf[5]; buf[4] = 0; // version/flags u32in(); // pre_defined u32in(); // Component subtype datain(buf, 4); if (mp4config.verbose.header) fprintf(stderr, "*track media type: '%s': ", buf); if (memcmp("soun", buf, 4)) { if (mp4config.verbose.header) fprintf(stderr, "unsupported, skipping\n"); return ERR_UNSUPPORTED; } else { if (mp4config.verbose.header) fprintf(stderr, "OK\n"); } // reserved u32in(); u32in(); u32in(); // name // null terminate u8in(); return size; }; static int stsdin(int size) { // version/flags u32in(); // Number of entries(one 'mp4a') if (u32in() != 1) //fixme: error handling return ERR_FAIL; return size; }; static int mp4ain(int size) { // Reserved (6 bytes) u32in(); u16in(); // Data reference index u16in(); // Version u16in(); // Revision level u16in(); // Vendor u32in(); // Number of channels mp4config.channels = u16in(); // Sample size (bits) mp4config.bits = u16in(); // Compression ID u16in(); // Packet size u16in(); // Sample rate (16.16) // fractional framerate, probably not for audio // rate integer part u16in(); // rate reminder part u16in(); return size; } static uint32_t getsize(void) { int cnt; uint32_t size = 0; for (cnt = 0; cnt < 4; cnt++) { int tmp = u8in(); size <<= 7; size |= (tmp & 0x7f); if (!(tmp & 0x80)) break; } return size; } static int esdsin(int size) { // descriptor tree: // MP4ES_Descriptor // MP4DecoderConfigDescriptor // MP4DecSpecificInfoDescriptor // MP4SLConfigDescriptor enum { TAG_ES = 3, TAG_DC = 4, TAG_DSI = 5, TAG_SLC = 6 }; // version/flags u32in(); if (u8in() != TAG_ES) return ERR_FAIL; getsize(); // ESID u16in(); // flags(url(bit 6); ocr(5); streamPriority (0-4)): u8in(); if (u8in() != TAG_DC) return ERR_FAIL; getsize(); if (u8in() != 0x40) /* not MPEG-4 audio */ return ERR_FAIL; // flags u8in(); // buffer size (24 bits) mp4config.buffersize = u16in() << 8; mp4config.buffersize |= u8in(); // bitrate mp4config.bitratemax = u32in(); mp4config.bitrateavg = u32in(); if (u8in() != TAG_DSI) return ERR_FAIL; mp4config.asc.size = getsize(); if (mp4config.asc.size > sizeof(mp4config.asc.buf)) return ERR_FAIL; // get AudioSpecificConfig datain(mp4config.asc.buf, mp4config.asc.size); if (u8in() != TAG_SLC) return ERR_FAIL; getsize(); // "predefined" (no idea) u8in(); return size; } static int sttsin(int size) { if (size < 16) //min stts size return ERR_FAIL; return size; } static int stszin(int size) { int cnt; uint32_t ofs; // version/flags u32in(); // Sample size u32in(); // Number of entries mp4config.frame.ents = u32in(); // fixme: check atom size mp4config.frame.data = malloc(sizeof(*mp4config.frame.data) * (mp4config.frame.ents + 1)); if (!mp4config.frame.data) return ERR_FAIL; ofs = 0; mp4config.frame.data[0] = ofs; for (cnt = 0; cnt < mp4config.frame.ents; cnt++) { uint32_t fsize = u32in(); ofs += fsize; if (mp4config.frame.maxsize < fsize) mp4config.frame.maxsize = fsize; mp4config.frame.data[cnt + 1] = ofs; if (ofs < mp4config.frame.data[cnt]) return ERR_FAIL; } return size; } static int stcoin(int size) { // version/flags u32in(); // Number of entries if (u32in() < 1) return ERR_FAIL; // first chunk offset mp4config.mdatofs = u32in(); // ignore the rest return size; } #if 0 static int tagtxt(char *tagname, const char *tagtxt) { //int txtsize = strlen(tagtxt); int size = 0; //int datasize = txtsize + 16; #if 0 size += u32out(datasize + 8); size += dataout(tagname, 4); size += u32out(datasize); size += dataout("data", 4); size += u32out(1); size += u32out(0); size += dataout(tagtxt, txtsize); #endif return size; } static int tagu32(char *tagname, int n /*number of stored fields*/) { //int numsize = n * 4; int size = 0; //int datasize = numsize + 16; #if 0 size += u32out(datasize + 8); size += dataout(tagname, 4); size += u32out(datasize); size += dataout("data", 4); size += u32out(0); size += u32out(0); #endif return size; } #endif static int metain(int size) { // version/flags u32in(); return ERR_OK; }; static int hdlr2in(int size) { uint8_t buf[4]; // version/flags u32in(); // Predefined u32in(); // Handler type datain(buf, 4); if (memcmp(buf, "mdir", 4)) return ERR_FAIL; datain(buf, 4); if (memcmp(buf, "appl", 4)) return ERR_FAIL; // Reserved u32in(); u32in(); // null terminator u8in(); return size; }; static int ilstin(int size) { enum {NUMSET = 1, GENRE, EXTAG}; int read = 0; static struct { char *name; char *id; int flag; } tags[] = { {"Album ", "\xa9" "alb"}, {"Album Artist", "aART"}, {"Artist ", "\xa9" "ART"}, {"Comment ", "\xa9" "cmt"}, {"Cover image ", "covr"}, {"Compilation ", "cpil"}, {"Copyright ", "cprt"}, {"Date ", "\xa9" "day"}, {"Disc# ", "disk", NUMSET}, {"Genre ", "gnre", GENRE}, {"Grouping ", "\xa9" "grp"}, {"Lyrics ", "\xa9" "lyr"}, {"Title ", "\xa9" "nam"}, {"Rating ", "rtng"}, {"BPM ", "tmpo"}, {"Encoder ", "\xa9" "too"}, {"Track ", "trkn", NUMSET}, {"Composer ", "\xa9" "wrt"}, {0, "----", EXTAG}, {0}, }; static const char *genres[] = { "Blues", "Classic Rock", "Country", "Dance", "Disco", "Funk", "Grunge", "Hip-Hop", "Jazz", "Metal", "New Age", "Oldies", "Other", "Pop", "R&B", "Rap", "Reggae", "Rock", "Techno", "Industrial", "Alternative", "Ska", "Death Metal", "Pranks", "Soundtrack", "Euro-Techno", "Ambient", "Trip-Hop", "Vocal", "Jazz+Funk", "Fusion", "Trance", "Classical", "Instrumental", "Acid", "House", "Game", "Sound Clip", "Gospel", "Noise", "Alternative Rock", "Bass", "Soul", "Punk", "Space", "Meditative", "Instrumental Pop", "Instrumental Rock", "Ethnic", "Gothic", "Darkwave", "Techno-Industrial", "Electronic", "Pop-Folk", "Eurodance", "Dream", "Southern Rock", "Comedy", "Cult", "Gangsta", "Top 40", "Christian Rap", "Pop/Funk", "Jungle", "Native US", "Cabaret", "New Wave", "Psychadelic", "Rave", "Showtunes", "Trailer", "Lo-Fi", "Tribal", "Acid Punk", "Acid Jazz", "Polka", "Retro", "Musical", "Rock & Roll", "Hard Rock", "Folk", "Folk-Rock", "National Folk", "Swing", "Fast Fusion", "Bebob", "Latin", "Revival", "Celtic", "Bluegrass", "Avantgarde", "Gothic Rock", "Progressive Rock", "Psychedelic Rock", "Symphonic Rock", "Slow Rock", "Big Band", "Chorus", "Easy Listening", "Acoustic", "Humour", "Speech", "Chanson", "Opera", "Chamber Music", "Sonata", "Symphony", "Booty Bass", "Primus", "Porn Groove", "Satire", "Slow Jam", "Club", "Tango", "Samba", "Folklore", "Ballad", "Power Ballad", "Rhythmic Soul", "Freestyle", "Duet", "Punk Rock", "Drum Solo", "Acapella", "Euro-House", "Dance Hall", "Goa", "Drum & Bass", "Club - House", "Hardcore", "Terror", "Indie", "BritPop", "Negerpunk", "Polsk Punk", "Beat", "Christian Gangsta Rap", "Heavy Metal", "Black Metal", "Crossover", "Contemporary Christian", "Christian Rock", "Merengue", "Salsa", "Thrash Metal", "Anime", "JPop", "Synthpop", "Unknown", }; fprintf(stderr, "----------tag list-------------\n"); while(read < size) { int asize, dsize; uint8_t id[5]; int cnt; uint32_t type; id[4] = 0; asize = u32in(); read += asize; asize -= 4; if (datain(id, 4) < 4) return ERR_FAIL; asize -= 4; for (cnt = 0; tags[cnt].id; cnt++) { if (!memcmp(id, tags[cnt].id, 4)) break; } if (tags[cnt].name) fprintf(stderr, "%s : ", tags[cnt].name); else { if (tags[cnt].flag != EXTAG) fprintf(stderr, "'%s' : ", id); } dsize = u32in(); asize -= 4; if (datain(id, 4) < 4) return ERR_FAIL; asize -= 4; if (tags[cnt].flag != EXTAG) { if (memcmp(id, "data", 4)) return ERR_FAIL; } else { int spc; if (memcmp(id, "mean", 4)) goto skip; dsize -= 8; while (dsize > 0) { u8in(); asize--; dsize--; } if (asize >= 8) { dsize = u32in() - 8; asize -= 4; if (datain(id, 4) < 4) return ERR_FAIL; asize -= 4; if (memcmp(id, "name", 4)) goto skip; u32in(); asize -= 4; dsize -= 4; } spc = 13 - dsize; if (spc < 0) spc = 0; while (dsize > 0) { fprintf(stderr, "%c",u8in()); asize--; dsize--; } while (spc--) fprintf(stderr, " "); fprintf(stderr, ": "); if (asize >= 8) { dsize = u32in() - 8; asize -= 4; if (datain(id, 4) < 4) return ERR_FAIL; asize -= 4; if (memcmp(id, "data", 4)) goto skip; u32in(); asize -= 4; dsize -= 4; } while (dsize > 0) { fprintf(stderr, "%c",u8in()); asize--; dsize--; } fprintf(stderr, "\n"); goto skip; } type = u32in(); asize -= 4; u32in(); asize -= 4; switch(type) { case 1: while (asize > 0) { fprintf(stderr, "%c",u8in()); asize--; } break; case 0: switch(tags[cnt].flag) { case NUMSET: u16in(); asize -= 2; fprintf(stderr, "%d", u16in()); asize -= 2; fprintf(stderr, "/%d", u16in()); asize -= 2; break; case GENRE: { uint8_t gnum = u16in(); asize -= 2; if (!gnum) goto skip; gnum--; if (gnum >= 147) gnum = 147; fprintf(stderr, "%s", genres[gnum]); } break; default: while(asize > 0) { fprintf(stderr, "%d/", u16in()); asize-=2; } } break; case 0x15: //fprintf(stderr, "(8bit data)"); while(asize > 0) { fprintf(stderr, "%d", u8in()); asize--; if (asize) fprintf(stderr, "/"); } break; case 0xd: fprintf(stderr, "(image data)"); break; default: fprintf(stderr, "(unknown data type)"); break; } fprintf(stderr, "\n"); skip: // skip to the end of atom while (asize > 0) { u8in(); asize--; } } fprintf(stderr, "-------------------------------\n"); return size; }; static creator_t *g_atom = 0; static int parse(uint32_t *sizemax) { long apos = 0; long aposmax = ftell(g_fin) + *sizemax; uint32_t size; if (g_atom->opcode != ATOM_NAME) { fprintf(stderr, "parse error: root is not a 'name' opcode\n"); return ERR_FAIL; } //fprintf(stderr, "looking for '%s'\n", (char *)g_atom->data); // search for atom in the file while (1) { char name[4]; uint32_t tmp; apos = ftell(g_fin); if (apos >= (aposmax - 8)) { fprintf(stderr, "parse error: atom '%s' not found\n", (char *)g_atom->data); return ERR_FAIL; } if ((tmp = u32in()) < 8) { fprintf(stderr, "invalid atom size %x @%lx\n", tmp, ftell(g_fin)); return ERR_FAIL; } size = tmp; if (datain(name, 4) != 4) { // EOF fprintf(stderr, "can't read atom name @%lx\n", ftell(g_fin)); return ERR_FAIL; } //fprintf(stderr, "atom: '%c%c%c%c'(%x)", name[0],name[1],name[2],name[3], size); if (!memcmp(name, g_atom->data, 4)) { //fprintf(stderr, "OK\n"); break; } //fprintf(stderr, "\n"); fseek(g_fin, apos + size, SEEK_SET); } *sizemax = size; g_atom++; if (g_atom->opcode == ATOM_DATA) { int err = ((int (*)(int)) g_atom->data)(size - 8); if (err < ERR_OK) { fseek(g_fin, apos + size, SEEK_SET); return err; } g_atom++; } if (g_atom->opcode == ATOM_DESCENT) { long apos = ftell(g_fin);; //fprintf(stderr, "descent\n"); g_atom++; while (g_atom->opcode != ATOM_STOP) { uint32_t subsize = size - 8; int ret; if (g_atom->opcode == ATOM_ASCENT) { g_atom++; break; } fseek(g_fin, apos, SEEK_SET); if ((ret = parse(&subsize)) < 0) return ret; } //fprintf(stderr, "ascent\n"); } fseek(g_fin, apos + size, SEEK_SET); return ERR_OK; } static int moovin(int sizemax) { long apos = ftell(g_fin); uint32_t atomsize; creator_t *old_atom = g_atom; int err, ret = sizemax; static creator_t mvhd[] = { {ATOM_NAME, "mvhd"}, {0} }; static creator_t trak[] = { {ATOM_NAME, "trak"}, {ATOM_DESCENT}, {ATOM_NAME, "tkhd"}, {ATOM_NAME, "mdia"}, {ATOM_DESCENT}, {ATOM_NAME, "mdhd"}, {ATOM_DATA, mdhdin}, {ATOM_NAME, "hdlr"}, {ATOM_DATA, hdlr1in}, {ATOM_NAME, "minf"}, {ATOM_DESCENT}, {ATOM_NAME, "smhd"}, {ATOM_NAME, "dinf"}, {ATOM_NAME, "stbl"}, {ATOM_DESCENT}, {ATOM_NAME, "stsd"}, {ATOM_DATA, stsdin}, {ATOM_DESCENT}, {ATOM_NAME, "mp4a"}, {ATOM_DATA, mp4ain}, {ATOM_DESCENT}, {ATOM_NAME, "esds"}, {ATOM_DATA, esdsin}, {ATOM_ASCENT}, {ATOM_ASCENT}, {ATOM_NAME, "stts"}, {ATOM_DATA, sttsin}, {ATOM_NAME, "stsc"}, {ATOM_NAME, "stsz"}, {ATOM_DATA, stszin}, {ATOM_NAME, "stco"}, {ATOM_DATA, stcoin}, {0} }; g_atom = mvhd; atomsize = sizemax + apos - ftell(g_fin); if (parse(&atomsize) < 0) { g_atom = old_atom; return ERR_FAIL; } fseek(g_fin, apos, SEEK_SET); while (1) { //fprintf(stderr, "TRAK\n"); g_atom = trak; atomsize = sizemax + apos - ftell(g_fin); if (atomsize < 8) break; //fprintf(stderr, "PARSE(%x)\n", atomsize); err = parse(&atomsize); //fprintf(stderr, "SIZE: %x/%x\n", atomsize, sizemax); if (err >= 0) break; if (err != ERR_UNSUPPORTED) { ret = err; break; } //fprintf(stderr, "UNSUPP\n"); } g_atom = old_atom; return ret; } static creator_t g_head[] = { {ATOM_NAME, "ftyp"}, {ATOM_DATA, ftypin}, {0} }; static creator_t g_moov[] = { {ATOM_NAME, "moov"}, {ATOM_DATA, moovin}, //{ATOM_DESCENT}, //{ATOM_NAME, "mvhd"}, {0} }; static creator_t g_meta1[] = { {ATOM_NAME, "moov"}, {ATOM_DESCENT}, {ATOM_NAME, "udta"}, {ATOM_DESCENT}, {ATOM_NAME, "meta"}, {ATOM_DATA, metain}, {ATOM_DESCENT}, {ATOM_NAME, "hdlr"}, {ATOM_DATA, hdlr2in}, {ATOM_NAME, "ilst"}, {ATOM_DATA, ilstin}, {0} }; static creator_t g_meta2[] = { {ATOM_NAME, "meta"}, {ATOM_DATA, metain}, {ATOM_DESCENT}, {ATOM_NAME, "hdlr"}, {ATOM_DATA, hdlr2in}, {ATOM_NAME, "ilst"}, {ATOM_DATA, ilstin}, {0} }; int mp4read_frame(void) { if (mp4config.frame.current >= mp4config.frame.ents) return ERR_FAIL; mp4config.bitbuf.size = mp4config.frame.data[mp4config.frame.current + 1] - mp4config.frame.data[mp4config.frame.current]; if (fread(mp4config.bitbuf.data, 1, mp4config.bitbuf.size, g_fin) != mp4config.bitbuf.size) { fprintf(stderr, "can't read frame data(frame %d@0x%x)\n", mp4config.frame.current, mp4config.frame.data[mp4config.frame.current]); return ERR_FAIL; } mp4config.frame.current++; return ERR_OK; } int mp4read_seek(int framenum) { if (framenum > mp4config.frame.ents) return ERR_FAIL; if (fseek(g_fin, mp4config.mdatofs + mp4config.frame.data[framenum], SEEK_SET)) return ERR_FAIL; mp4config.frame.current = framenum; return ERR_OK; } static void mp4info(void) { fprintf(stderr, "Modification Time:\t\t%s\n", mp4time(mp4config.mtime)); fprintf(stderr, "Samplerate:\t\t%d\n", mp4config.samplerate); fprintf(stderr, "Total samples:\t\t%d\n", mp4config.samples); fprintf(stderr, "Total channels:\t\t%d\n", mp4config.channels); fprintf(stderr, "Bits per sample:\t%d\n", mp4config.bits); fprintf(stderr, "Buffer size:\t\t%d\n", mp4config.buffersize); fprintf(stderr, "Max bitrate:\t\t%d\n", mp4config.bitratemax); fprintf(stderr, "Average bitrate:\t%d\n", mp4config.bitrateavg); fprintf(stderr, "Samples per frame:\t%d\n", mp4config.framesamples); fprintf(stderr, "Frames:\t\t\t%d\n", mp4config.frame.ents); fprintf(stderr, "ASC size:\t\t%d\n", mp4config.asc.size); fprintf(stderr, "Duration:\t\t%.1f sec\n", (float)mp4config.samples/mp4config.samplerate); fprintf(stderr, "Data offset/size:\t%x/%x\n", mp4config.mdatofs, mp4config.mdatsize); } int mp4read_close(void) { #define FREE(x) if(x){free(x);x=0;} FREE(mp4config.frame.data); FREE(mp4config.bitbuf.data); return ERR_OK; } int mp4read_open(char *name) { uint32_t atomsize; int ret; mp4read_close(); g_fin = faad_fopen(name, "rb"); if (!g_fin) return ERR_FAIL; if (mp4config.verbose.header) fprintf(stderr, "**** MP4 header ****\n"); g_atom = g_head; atomsize = INT_MAX; if (parse(&atomsize) < 0) goto err; g_atom = g_moov; atomsize = INT_MAX; rewind(g_fin); if ((ret = parse(&atomsize)) < 0) { fprintf(stderr, "parse:%d\n", ret); goto err; } // alloc frame buffer mp4config.bitbuf.data = malloc(mp4config.frame.maxsize); if (!mp4config.bitbuf.data) goto err; if (mp4config.verbose.header) { mp4info(); fprintf(stderr, "********************\n"); } if (mp4config.verbose.tags) { rewind(g_fin); g_atom = g_meta1; atomsize = INT_MAX; ret = parse(&atomsize); if (ret < 0) { rewind(g_fin); g_atom = g_meta2; atomsize = INT_MAX; ret = parse(&atomsize); } } return ERR_OK; err: mp4read_close(); return ERR_FAIL; }
null
/**************************************************************************** MP4 input module Copyright (C) 2017 Krzysztof Nikiel This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ****************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <stdint.h> #include <string.h> #include <time.h> #include <limits.h> #include "unicode_support.h" #include "mp4read.h" enum ATOM_TYPE { ATOM_STOP = 0 /* end of atoms */ , ATOM_NAME /* plain atom */ , ATOM_DESCENT, /* starts group of children */ ATOM_ASCENT, /* ends group */ ATOM_DATA, }; typedef struct { uint16_t opcode; void *data; } creator_t; mp4config_t mp4config = { 0 }; static FILE *g_fin = NULL; static inline uint32_t bswap32(const uint32_t u32) { #ifndef WORDS_BIGENDIAN #if defined (__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3))) return __builtin_bswap32(u32); #elif defined (_MSC_VER) return _byteswap_ulong(u32); #else return (u32 << 24) | ((u32 << 8) & 0xFF0000) | ((u32 >> 8) & 0xFF00) | (u32 >> 24); #endif #else return u32; #endif } static inline uint16_t bswap16(const uint16_t u16) { #ifndef WORDS_BIGENDIAN #if defined (__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8))) return __builtin_bswap16(u16); #elif defined (_MSC_VER) return _byteswap_ushort(u16); #else return (u16 << 8) | (u16 >> 8); #endif #else return u16; #endif } enum {ERR_OK = 0, ERR_FAIL = -1, ERR_UNSUPPORTED = -2}; static int datain(void *data, int size) { if (fread(data, 1, size, g_fin) != size) return ERR_FAIL; return size; } static int stringin(char *txt, int sizemax) { int size; for (size = 0; size < sizemax; size++) { if (fread(txt + size, 1, 1, g_fin) != 1) return ERR_FAIL; if (!txt[size]) break; } txt[sizemax-1] = '\0'; return size; } static uint32_t u32in(void) { uint32_t u32; datain(&u32, 4); u32 = bswap32(u32); return u32; } static uint16_t u16in(void) { uint16_t u16; datain(&u16, 2); u16 = bswap16(u16); return u16; } static int u8in(void) { uint8_t u8; datain(&u8, 1); return u8; } static int ftypin(int size) { enum {BUFSIZE = 40}; char buf[BUFSIZE]; uint32_t u32; buf[4] = 0; datain(buf, 4); u32 = u32in(); if (mp4config.verbose.header) fprintf(stderr, "Brand:\t\t\t%s(version %d)\n", buf, u32); stringin(buf, BUFSIZE); if (mp4config.verbose.header) fprintf(stderr, "Compatible brands:\t%s\n", buf); return size; } enum { SECSINDAY = 24 * 60 * 60 }; static char *mp4time(time_t t) { int y; // subtract some seconds from the start of 1904 to the start of 1970 for (y = 1904; y < 1970; y++) { t -= 365 * SECSINDAY; if (!(y & 3)) t -= SECSINDAY; } return ctime(&t); } static int mdhdin(int size) { // version/flags u32in(); // Creation time mp4config.ctime = u32in(); // Modification time mp4config.mtime = u32in(); // Time scale mp4config.samplerate = u32in(); // Duration mp4config.samples = u32in(); // Language u16in(); // pre_defined u16in(); return size; }; static int hdlr1in(int size) { uint8_t buf[5]; buf[4] = 0; // version/flags u32in(); // pre_defined u32in(); // Component subtype datain(buf, 4); if (mp4config.verbose.header) fprintf(stderr, "*track media type: '%s': ", buf); if (memcmp("soun", buf, 4)) { if (mp4config.verbose.header) fprintf(stderr, "unsupported, skipping\n"); return ERR_UNSUPPORTED; } else { if (mp4config.verbose.header) fprintf(stderr, "OK\n"); } // reserved u32in(); u32in(); u32in(); // name // null terminate u8in(); return size; }; static int stsdin(int size) { // version/flags u32in(); // Number of entries(one 'mp4a') if (u32in() != 1) //fixme: error handling return ERR_FAIL; return size; }; static int mp4ain(int size) { // Reserved (6 bytes) u32in(); u16in(); // Data reference index u16in(); // Version u16in(); // Revision level u16in(); // Vendor u32in(); // Number of channels mp4config.channels = u16in(); // Sample size (bits) mp4config.bits = u16in(); // Compression ID u16in(); // Packet size u16in(); // Sample rate (16.16) // fractional framerate, probably not for audio // rate integer part u16in(); // rate reminder part u16in(); return size; } static uint32_t getsize(void) { int cnt; uint32_t size = 0; for (cnt = 0; cnt < 4; cnt++) { int tmp = u8in(); size <<= 7; size |= (tmp & 0x7f); if (!(tmp & 0x80)) break; } return size; } static int esdsin(int size) { // descriptor tree: // MP4ES_Descriptor // MP4DecoderConfigDescriptor // MP4DecSpecificInfoDescriptor // MP4SLConfigDescriptor enum { TAG_ES = 3, TAG_DC = 4, TAG_DSI = 5, TAG_SLC = 6 }; // version/flags u32in(); if (u8in() != TAG_ES) return ERR_FAIL; getsize(); // ESID u16in(); // flags(url(bit 6); ocr(5); streamPriority (0-4)): u8in(); if (u8in() != TAG_DC) return ERR_FAIL; getsize(); if (u8in() != 0x40) /* not MPEG-4 audio */ return ERR_FAIL; // flags u8in(); // buffer size (24 bits) mp4config.buffersize = u16in() << 8; mp4config.buffersize |= u8in(); // bitrate mp4config.bitratemax = u32in(); mp4config.bitrateavg = u32in(); if (u8in() != TAG_DSI) return ERR_FAIL; mp4config.asc.size = getsize(); if (mp4config.asc.size > sizeof(mp4config.asc.buf)) return ERR_FAIL; // get AudioSpecificConfig datain(mp4config.asc.buf, mp4config.asc.size); if (u8in() != TAG_SLC) return ERR_FAIL; getsize(); // "predefined" (no idea) u8in(); return size; } static int sttsin(int size) { if (size < 16) //min stts size return ERR_FAIL; return size; } static int stszin(int size) { int cnt; uint32_t ofs; // version/flags u32in(); // Sample size u32in(); // Number of entries mp4config.frame.ents = u32in(); if (!(mp4config.frame.ents + 1)) return ERR_FAIL; mp4config.frame.data = malloc(sizeof(*mp4config.frame.data) * (mp4config.frame.ents + 1)); if (!mp4config.frame.data) return ERR_FAIL; ofs = 0; mp4config.frame.data[0] = ofs; for (cnt = 0; cnt < mp4config.frame.ents; cnt++) { uint32_t fsize = u32in(); ofs += fsize; if (mp4config.frame.maxsize < fsize) mp4config.frame.maxsize = fsize; mp4config.frame.data[cnt + 1] = ofs; if (ofs < mp4config.frame.data[cnt]) return ERR_FAIL; } return size; } static int stcoin(int size) { // version/flags u32in(); // Number of entries if (u32in() < 1) return ERR_FAIL; // first chunk offset mp4config.mdatofs = u32in(); // ignore the rest return size; } #if 0 static int tagtxt(char *tagname, const char *tagtxt) { //int txtsize = strlen(tagtxt); int size = 0; //int datasize = txtsize + 16; #if 0 size += u32out(datasize + 8); size += dataout(tagname, 4); size += u32out(datasize); size += dataout("data", 4); size += u32out(1); size += u32out(0); size += dataout(tagtxt, txtsize); #endif return size; } static int tagu32(char *tagname, int n /*number of stored fields*/) { //int numsize = n * 4; int size = 0; //int datasize = numsize + 16; #if 0 size += u32out(datasize + 8); size += dataout(tagname, 4); size += u32out(datasize); size += dataout("data", 4); size += u32out(0); size += u32out(0); #endif return size; } #endif static int metain(int size) { // version/flags u32in(); return ERR_OK; }; static int hdlr2in(int size) { uint8_t buf[4]; // version/flags u32in(); // Predefined u32in(); // Handler type datain(buf, 4); if (memcmp(buf, "mdir", 4)) return ERR_FAIL; datain(buf, 4); if (memcmp(buf, "appl", 4)) return ERR_FAIL; // Reserved u32in(); u32in(); // null terminator u8in(); return size; }; static int ilstin(int size) { enum {NUMSET = 1, GENRE, EXTAG}; int read = 0; static struct { char *name; char *id; int flag; } tags[] = { {"Album ", "\xa9" "alb"}, {"Album Artist", "aART"}, {"Artist ", "\xa9" "ART"}, {"Comment ", "\xa9" "cmt"}, {"Cover image ", "covr"}, {"Compilation ", "cpil"}, {"Copyright ", "cprt"}, {"Date ", "\xa9" "day"}, {"Disc# ", "disk", NUMSET}, {"Genre ", "gnre", GENRE}, {"Grouping ", "\xa9" "grp"}, {"Lyrics ", "\xa9" "lyr"}, {"Title ", "\xa9" "nam"}, {"Rating ", "rtng"}, {"BPM ", "tmpo"}, {"Encoder ", "\xa9" "too"}, {"Track ", "trkn", NUMSET}, {"Composer ", "\xa9" "wrt"}, {0, "----", EXTAG}, {0}, }; static const char *genres[] = { "Blues", "Classic Rock", "Country", "Dance", "Disco", "Funk", "Grunge", "Hip-Hop", "Jazz", "Metal", "New Age", "Oldies", "Other", "Pop", "R&B", "Rap", "Reggae", "Rock", "Techno", "Industrial", "Alternative", "Ska", "Death Metal", "Pranks", "Soundtrack", "Euro-Techno", "Ambient", "Trip-Hop", "Vocal", "Jazz+Funk", "Fusion", "Trance", "Classical", "Instrumental", "Acid", "House", "Game", "Sound Clip", "Gospel", "Noise", "Alternative Rock", "Bass", "Soul", "Punk", "Space", "Meditative", "Instrumental Pop", "Instrumental Rock", "Ethnic", "Gothic", "Darkwave", "Techno-Industrial", "Electronic", "Pop-Folk", "Eurodance", "Dream", "Southern Rock", "Comedy", "Cult", "Gangsta", "Top 40", "Christian Rap", "Pop/Funk", "Jungle", "Native US", "Cabaret", "New Wave", "Psychadelic", "Rave", "Showtunes", "Trailer", "Lo-Fi", "Tribal", "Acid Punk", "Acid Jazz", "Polka", "Retro", "Musical", "Rock & Roll", "Hard Rock", "Folk", "Folk-Rock", "National Folk", "Swing", "Fast Fusion", "Bebob", "Latin", "Revival", "Celtic", "Bluegrass", "Avantgarde", "Gothic Rock", "Progressive Rock", "Psychedelic Rock", "Symphonic Rock", "Slow Rock", "Big Band", "Chorus", "Easy Listening", "Acoustic", "Humour", "Speech", "Chanson", "Opera", "Chamber Music", "Sonata", "Symphony", "Booty Bass", "Primus", "Porn Groove", "Satire", "Slow Jam", "Club", "Tango", "Samba", "Folklore", "Ballad", "Power Ballad", "Rhythmic Soul", "Freestyle", "Duet", "Punk Rock", "Drum Solo", "Acapella", "Euro-House", "Dance Hall", "Goa", "Drum & Bass", "Club - House", "Hardcore", "Terror", "Indie", "BritPop", "Negerpunk", "Polsk Punk", "Beat", "Christian Gangsta Rap", "Heavy Metal", "Black Metal", "Crossover", "Contemporary Christian", "Christian Rock", "Merengue", "Salsa", "Thrash Metal", "Anime", "JPop", "Synthpop", "Unknown", }; fprintf(stderr, "----------tag list-------------\n"); while(read < size) { int asize, dsize; uint8_t id[5]; int cnt; uint32_t type; id[4] = 0; asize = u32in(); read += asize; asize -= 4; if (datain(id, 4) < 4) return ERR_FAIL; asize -= 4; for (cnt = 0; tags[cnt].id; cnt++) { if (!memcmp(id, tags[cnt].id, 4)) break; } if (tags[cnt].name) fprintf(stderr, "%s : ", tags[cnt].name); else { if (tags[cnt].flag != EXTAG) fprintf(stderr, "'%s' : ", id); } dsize = u32in(); asize -= 4; if (datain(id, 4) < 4) return ERR_FAIL; asize -= 4; if (tags[cnt].flag != EXTAG) { if (memcmp(id, "data", 4)) return ERR_FAIL; } else { int spc; if (memcmp(id, "mean", 4)) goto skip; dsize -= 8; while (dsize > 0) { u8in(); asize--; dsize--; } if (asize >= 8) { dsize = u32in() - 8; asize -= 4; if (datain(id, 4) < 4) return ERR_FAIL; asize -= 4; if (memcmp(id, "name", 4)) goto skip; u32in(); asize -= 4; dsize -= 4; } spc = 13 - dsize; if (spc < 0) spc = 0; while (dsize > 0) { fprintf(stderr, "%c",u8in()); asize--; dsize--; } while (spc--) fprintf(stderr, " "); fprintf(stderr, ": "); if (asize >= 8) { dsize = u32in() - 8; asize -= 4; if (datain(id, 4) < 4) return ERR_FAIL; asize -= 4; if (memcmp(id, "data", 4)) goto skip; u32in(); asize -= 4; dsize -= 4; } while (dsize > 0) { fprintf(stderr, "%c",u8in()); asize--; dsize--; } fprintf(stderr, "\n"); goto skip; } type = u32in(); asize -= 4; u32in(); asize -= 4; switch(type) { case 1: while (asize > 0) { fprintf(stderr, "%c",u8in()); asize--; } break; case 0: switch(tags[cnt].flag) { case NUMSET: u16in(); asize -= 2; fprintf(stderr, "%d", u16in()); asize -= 2; fprintf(stderr, "/%d", u16in()); asize -= 2; break; case GENRE: { uint8_t gnum = u16in(); asize -= 2; if (!gnum) goto skip; gnum--; if (gnum >= 147) gnum = 147; fprintf(stderr, "%s", genres[gnum]); } break; default: while(asize > 0) { fprintf(stderr, "%d/", u16in()); asize-=2; } } break; case 0x15: //fprintf(stderr, "(8bit data)"); while(asize > 0) { fprintf(stderr, "%d", u8in()); asize--; if (asize) fprintf(stderr, "/"); } break; case 0xd: fprintf(stderr, "(image data)"); break; default: fprintf(stderr, "(unknown data type)"); break; } fprintf(stderr, "\n"); skip: // skip to the end of atom while (asize > 0) { u8in(); asize--; } } fprintf(stderr, "-------------------------------\n"); return size; }; static creator_t *g_atom = 0; static int parse(uint32_t *sizemax) { long apos = 0; long aposmax = ftell(g_fin) + *sizemax; uint32_t size; if (g_atom->opcode != ATOM_NAME) { fprintf(stderr, "parse error: root is not a 'name' opcode\n"); return ERR_FAIL; } //fprintf(stderr, "looking for '%s'\n", (char *)g_atom->data); // search for atom in the file while (1) { char name[4]; uint32_t tmp; apos = ftell(g_fin); if (apos >= (aposmax - 8)) { fprintf(stderr, "parse error: atom '%s' not found\n", (char *)g_atom->data); return ERR_FAIL; } if ((tmp = u32in()) < 8) { fprintf(stderr, "invalid atom size %x @%lx\n", tmp, ftell(g_fin)); return ERR_FAIL; } size = tmp; if (datain(name, 4) != 4) { // EOF fprintf(stderr, "can't read atom name @%lx\n", ftell(g_fin)); return ERR_FAIL; } //fprintf(stderr, "atom: '%c%c%c%c'(%x)", name[0],name[1],name[2],name[3], size); if (!memcmp(name, g_atom->data, 4)) { //fprintf(stderr, "OK\n"); break; } //fprintf(stderr, "\n"); fseek(g_fin, apos + size, SEEK_SET); } *sizemax = size; g_atom++; if (g_atom->opcode == ATOM_DATA) { int err = ((int (*)(int)) g_atom->data)(size - 8); if (err < ERR_OK) { fseek(g_fin, apos + size, SEEK_SET); return err; } g_atom++; } if (g_atom->opcode == ATOM_DESCENT) { long apos = ftell(g_fin);; //fprintf(stderr, "descent\n"); g_atom++; while (g_atom->opcode != ATOM_STOP) { uint32_t subsize = size - 8; int ret; if (g_atom->opcode == ATOM_ASCENT) { g_atom++; break; } fseek(g_fin, apos, SEEK_SET); if ((ret = parse(&subsize)) < 0) return ret; } //fprintf(stderr, "ascent\n"); } fseek(g_fin, apos + size, SEEK_SET); return ERR_OK; } static int moovin(int sizemax) { long apos = ftell(g_fin); uint32_t atomsize; creator_t *old_atom = g_atom; int err, ret = sizemax; static creator_t mvhd[] = { {ATOM_NAME, "mvhd"}, {0} }; static creator_t trak[] = { {ATOM_NAME, "trak"}, {ATOM_DESCENT}, {ATOM_NAME, "tkhd"}, {ATOM_NAME, "mdia"}, {ATOM_DESCENT}, {ATOM_NAME, "mdhd"}, {ATOM_DATA, mdhdin}, {ATOM_NAME, "hdlr"}, {ATOM_DATA, hdlr1in}, {ATOM_NAME, "minf"}, {ATOM_DESCENT}, {ATOM_NAME, "smhd"}, {ATOM_NAME, "dinf"}, {ATOM_NAME, "stbl"}, {ATOM_DESCENT}, {ATOM_NAME, "stsd"}, {ATOM_DATA, stsdin}, {ATOM_DESCENT}, {ATOM_NAME, "mp4a"}, {ATOM_DATA, mp4ain}, {ATOM_DESCENT}, {ATOM_NAME, "esds"}, {ATOM_DATA, esdsin}, {ATOM_ASCENT}, {ATOM_ASCENT}, {ATOM_NAME, "stts"}, {ATOM_DATA, sttsin}, {ATOM_NAME, "stsc"}, {ATOM_NAME, "stsz"}, {ATOM_DATA, stszin}, {ATOM_NAME, "stco"}, {ATOM_DATA, stcoin}, {0} }; g_atom = mvhd; atomsize = sizemax + apos - ftell(g_fin); if (parse(&atomsize) < 0) { g_atom = old_atom; return ERR_FAIL; } fseek(g_fin, apos, SEEK_SET); while (1) { //fprintf(stderr, "TRAK\n"); g_atom = trak; atomsize = sizemax + apos - ftell(g_fin); if (atomsize < 8) break; //fprintf(stderr, "PARSE(%x)\n", atomsize); err = parse(&atomsize); //fprintf(stderr, "SIZE: %x/%x\n", atomsize, sizemax); if (err >= 0) break; if (err != ERR_UNSUPPORTED) { ret = err; break; } //fprintf(stderr, "UNSUPP\n"); } g_atom = old_atom; return ret; } static creator_t g_head[] = { {ATOM_NAME, "ftyp"}, {ATOM_DATA, ftypin}, {0} }; static creator_t g_moov[] = { {ATOM_NAME, "moov"}, {ATOM_DATA, moovin}, //{ATOM_DESCENT}, //{ATOM_NAME, "mvhd"}, {0} }; static creator_t g_meta1[] = { {ATOM_NAME, "moov"}, {ATOM_DESCENT}, {ATOM_NAME, "udta"}, {ATOM_DESCENT}, {ATOM_NAME, "meta"}, {ATOM_DATA, metain}, {ATOM_DESCENT}, {ATOM_NAME, "hdlr"}, {ATOM_DATA, hdlr2in}, {ATOM_NAME, "ilst"}, {ATOM_DATA, ilstin}, {0} }; static creator_t g_meta2[] = { {ATOM_NAME, "meta"}, {ATOM_DATA, metain}, {ATOM_DESCENT}, {ATOM_NAME, "hdlr"}, {ATOM_DATA, hdlr2in}, {ATOM_NAME, "ilst"}, {ATOM_DATA, ilstin}, {0} }; int mp4read_frame(void) { if (mp4config.frame.current >= mp4config.frame.ents) return ERR_FAIL; mp4config.bitbuf.size = mp4config.frame.data[mp4config.frame.current + 1] - mp4config.frame.data[mp4config.frame.current]; if (fread(mp4config.bitbuf.data, 1, mp4config.bitbuf.size, g_fin) != mp4config.bitbuf.size) { fprintf(stderr, "can't read frame data(frame %d@0x%x)\n", mp4config.frame.current, mp4config.frame.data[mp4config.frame.current]); return ERR_FAIL; } mp4config.frame.current++; return ERR_OK; } int mp4read_seek(int framenum) { if (framenum > mp4config.frame.ents) return ERR_FAIL; if (fseek(g_fin, mp4config.mdatofs + mp4config.frame.data[framenum], SEEK_SET)) return ERR_FAIL; mp4config.frame.current = framenum; return ERR_OK; } static void mp4info(void) { fprintf(stderr, "Modification Time:\t\t%s\n", mp4time(mp4config.mtime)); fprintf(stderr, "Samplerate:\t\t%d\n", mp4config.samplerate); fprintf(stderr, "Total samples:\t\t%d\n", mp4config.samples); fprintf(stderr, "Total channels:\t\t%d\n", mp4config.channels); fprintf(stderr, "Bits per sample:\t%d\n", mp4config.bits); fprintf(stderr, "Buffer size:\t\t%d\n", mp4config.buffersize); fprintf(stderr, "Max bitrate:\t\t%d\n", mp4config.bitratemax); fprintf(stderr, "Average bitrate:\t%d\n", mp4config.bitrateavg); fprintf(stderr, "Samples per frame:\t%d\n", mp4config.framesamples); fprintf(stderr, "Frames:\t\t\t%d\n", mp4config.frame.ents); fprintf(stderr, "ASC size:\t\t%d\n", mp4config.asc.size); fprintf(stderr, "Duration:\t\t%.1f sec\n", (float)mp4config.samples/mp4config.samplerate); fprintf(stderr, "Data offset/size:\t%x/%x\n", mp4config.mdatofs, mp4config.mdatsize); } int mp4read_close(void) { #define FREE(x) if(x){free(x);x=0;} FREE(mp4config.frame.data); FREE(mp4config.bitbuf.data); return ERR_OK; } int mp4read_open(char *name) { uint32_t atomsize; int ret; mp4read_close(); g_fin = faad_fopen(name, "rb"); if (!g_fin) return ERR_FAIL; if (mp4config.verbose.header) fprintf(stderr, "**** MP4 header ****\n"); g_atom = g_head; atomsize = INT_MAX; if (parse(&atomsize) < 0) goto err; g_atom = g_moov; atomsize = INT_MAX; rewind(g_fin); if ((ret = parse(&atomsize)) < 0) { fprintf(stderr, "parse:%d\n", ret); goto err; } // alloc frame buffer mp4config.bitbuf.data = malloc(mp4config.frame.maxsize); if (!mp4config.bitbuf.data) goto err; if (mp4config.verbose.header) { mp4info(); fprintf(stderr, "********************\n"); } if (mp4config.verbose.tags) { rewind(g_fin); g_atom = g_meta1; atomsize = INT_MAX; ret = parse(&atomsize); if (ret < 0) { rewind(g_fin); g_atom = g_meta2; atomsize = INT_MAX; ret = parse(&atomsize); } } return ERR_OK; err: mp4read_close(); return ERR_FAIL; }
null
273
CWE-787
CVE-2021-32435
/* * Generic ABC parser. * * This file is part of abcm2ps. * * Copyright (C) 1998-2020 Jean-François Moine (http://moinejf.free.fr) * Adapted from abc2ps, Copyright (C) 1996-1998 Michael Methfessel * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. */ #include "config.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include "abcm2ps.h" /* global values */ int severity; /* error severity */ static int ulen; /* unit note length set by M: or L: */ static short meter; /* upper value of time sig for n-plets */ static unsigned char microscale; /* current microtone scale */ static signed char vover; /* voice overlay (1: single bar, -1: multi-bar */ static char lyric_started; /* lyric started */ static char *gchord; /* guitar chord */ static struct decos dc; /* decorations */ static struct SYMBOL *deco_start; /* 1st note of the line for d: / s: */ static struct SYMBOL *deco_cont; /* current symbol when d: / s: continuation */ static int g_abc_vers, g_ulen, g_microscale; static char g_char_tb[128]; static char *g_deco_tb[128]; /* global decoration names */ static unsigned short g_micro_tb[MAXMICRO]; /* global microtone values */ static char *abc_fn; /* current source file name */ static int linenum; /* current source line number */ static int colnum; /* current source column number */ static char *abc_line; /* line being parsed */ static struct SYMBOL *last_sym; /* last symbol for errors */ static short nvoice; /* number of voices (0..n-1) */ struct VOICE_S *curvoice; /* current voice while parsing */ struct parse parse; /* char table for note line parsing */ #define CHAR_BAD 0 #define CHAR_IGN 1 #define CHAR_NOTE 2 #define CHAR_GR_ST 3 #define CHAR_DECO 4 #define CHAR_GCHORD 5 #define CHAR_BSLASH 6 #define CHAR_OBRA 7 #define CHAR_BAR 8 #define CHAR_OPAR 9 #define CHAR_VOV 10 #define CHAR_SPAC 11 #define CHAR_MINUS 12 #define CHAR_CPAR 13 #define CHAR_BRHY 14 #define CHAR_DECOS 15 #define CHAR_SLASH 16 #define CHAR_GR_EN 17 #define CHAR_LINEBREAK 18 static char char_tb[256] = { 0, 0, 0, 0, 0, 0, 0, 0, /* 00 - 07 */ 0, CHAR_SPAC, CHAR_LINEBREAK, 0, 0, 0, 0, 0, /* 08 - 0f */ 0, 0, 0, 0, 0, 0, 0, 0, /* 10 - 17 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 18 - 1f */ CHAR_SPAC, CHAR_DECOS, CHAR_GCHORD, CHAR_BAD, /* (sp) ! " # */ CHAR_BAD, CHAR_BAD, CHAR_VOV, CHAR_BAD, /* $ % & ' */ CHAR_OPAR, CHAR_CPAR, CHAR_BAD, CHAR_DECOS, /* ( ) * + */ CHAR_BAD, CHAR_MINUS, CHAR_DECO, CHAR_SLASH, /* , - . / */ CHAR_BAD, CHAR_BAD, CHAR_BAD, CHAR_BAD, /* 0 1 2 3 */ CHAR_BAD, CHAR_BAD, CHAR_BAD, CHAR_BAD, /* 4 5 6 7 */ CHAR_BAD, CHAR_BAD, CHAR_BAR, CHAR_BAD, /* 8 9 : ; */ CHAR_BRHY, CHAR_NOTE, CHAR_BRHY, CHAR_BAD, /* < = > ? */ CHAR_BAD, CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, /* @ A B C */ CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, /* D E F G */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* H I J K */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* L M N O */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* P Q R S */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* T U V W */ CHAR_NOTE, CHAR_DECO, CHAR_NOTE, CHAR_OBRA, /* X Y Z [ */ CHAR_BSLASH, CHAR_BAR, CHAR_NOTE, CHAR_NOTE, /* \ ] ^ _ */ CHAR_IGN, CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, /* ` a b c */ CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, /* d e f g */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* h i j k */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* l m n o */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* p q r s */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* t u v w */ CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, CHAR_GR_ST, /* x y z { */ CHAR_BAR, CHAR_GR_EN, CHAR_DECO, CHAR_BAD, /* | } ~ (del) */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 80 - 8f */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 90 - 9f */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a0 - af */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b0 - bf */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* c0 - cf */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d0 - df */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* e0 - ef */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* f0 - ff */ }; static const char all_notes[] = "CDEFGABcdefgab"; static int parse_info(char *p); static char *parse_gchord(char *p); static int parse_line(char *p); static char *parse_note(char *p, int flags); static void syntax(char *msg, char *q); static void vover_new(void); /* -- abcMIDI like errors -- */ static void print_error(char *s, int col) { if (col >= 0) fprintf(stderr, "%s:%d:%d: error: %s\n", abc_fn, linenum, col, s); else fprintf(stderr, "%s:%d: error: %s\n", abc_fn, linenum, s); } /* -- new symbol -- */ static struct SYMBOL *abc_new(int type, char *text) { struct SYMBOL *s; s = getarena(sizeof(struct SYMBOL)); memset(s, 0, sizeof(struct SYMBOL)); if (text) { s->text = getarena(strlen(text) + 1); strcpy(s->text, text); } if (!parse.last_sym) { parse.first_sym = s; } else { if ((s->abc_next = parse.last_sym->abc_next) != NULL) s->abc_next->abc_prev = s; parse.last_sym->abc_next = s; s->abc_prev = parse.last_sym; } last_sym = parse.last_sym = s; s->abc_type = type; s->state = parse.abc_state; s->fn = abc_fn; s->linenum = linenum; s->colnum = colnum; return s; } /* -- parse an ABC line -- */ void abc_parse(char *p, char *fname, int ln) { abc_fn = fname; linenum = ln; abc_line = p; /* parse the music line */ switch (parse_line(p)) { case 2: /* start of tune (X:) */ g_abc_vers = parse.abc_vers; g_ulen = ulen; g_microscale = microscale; meter = 2; memcpy(g_char_tb, char_tb, sizeof g_char_tb); memcpy(g_deco_tb, parse.deco_tb, sizeof g_deco_tb); memcpy(g_micro_tb, parse.micro_tb, sizeof g_micro_tb); break; case 1: /* end of tune */ if (parse.first_sym) { do_tune(); parse.first_sym = parse.last_sym = NULL; } parse.abc_state = ABC_S_GLOBAL; parse.abc_vers = g_abc_vers; ulen = g_ulen; microscale = g_microscale; memcpy(char_tb, g_char_tb, sizeof g_char_tb); memcpy(parse.deco_tb, g_deco_tb, sizeof parse.deco_tb); memcpy(parse.micro_tb, g_micro_tb, sizeof parse.micro_tb); lvlarena(0); if (dc.n > 0) syntax("Decoration without symbol", 0); dc.n = 0; break; } } /* treat the end of file */ void abc_eof(void) { // if (parse.abc_state == ABC_S_HEAD) // severity = 1; do_tune(); parse.first_sym = parse.last_sym = NULL; if (parse.abc_state != ABC_S_GLOBAL) { parse.abc_vers = g_abc_vers; ulen = g_ulen; microscale = g_microscale; memcpy(char_tb, g_char_tb, sizeof g_char_tb); } } /* -- treat the broken rhythm '>' and '<' -- */ static void broken_rhythm(struct SYMBOL *s, int num) /* >0: do dot, <0: do half */ { struct notes *notes = &s->u.note; int l, m, n; num *= 2; if (num > 0) { if (num == 6) num = 8; n = num * 2 - 1; for (m = 0; m <= s->nhd; m++) notes->notes[m].len = (notes->notes[m].len * n) / num; } else { n = -num; if (n == 6) n = 8; for (m = 0; m <= s->nhd; m++) notes->notes[m].len /= n; } l = notes->notes[0].len; for (m = 1; m <= s->nhd; m++) if (notes->notes[m].len < l) l = notes->notes[m].len; } /* -- check for the '!' as end of line (ABC2Win) -- */ static int check_nl(char *p) { while (*p != '\0') { switch (*p++) { case '!': return 0; case '|': case '[': case ':': case ']': case ' ': case '\t': return 1; } } return 1; } /* -- parse extra K: or V: definitions (clef, octave and microscale -- */ static char *parse_extra(char *p, char **p_name, char **p_middle, char **p_stlines, char **p_scale, char **p_octave, char **p_cue, char **p_map) { for (;;) { if (strncmp(p, "clef=", 5) == 0 || strncmp(p, "bass", 4) == 0 || strncmp(p, "treble", 6) == 0 || strncmp(p, "alto", 4) == 0 || strncmp(p, "tenor", 5) == 0 || strncmp(p, "perc", 4) == 0) { if (*p_name) syntax("Double clef name", p); *p_name = p; } else if (strncmp(p, "microscale=", 11) == 0 || strncmp(p, "uscale=", 7) == 0) { int i; p += p[0] == 'm' ? 11 : 7; i = atoi(p); if (i < 4 || i >= 256) syntax("Invalid value in microscale=", p); else microscale = i; } else if (strncmp(p, "middle=", 7) == 0 || strncmp(p, "m=", 2) == 0) { if (*p_middle) syntax("Double clef middle", p); *p_middle = p + (p[1] == '=' ? 2 : 7); } else if (strncmp(p, "octave=", 7) == 0) { if (*p_octave) syntax("Double octave=", p); *p_octave = p + 7; } else if (strncmp(p, "stafflines=", 11) == 0) { int l; char *q; if (*p_stlines) syntax("Double stafflines", p); p += 11; if (isdigit((unsigned char) *p)) { switch (atoi(p)) { case 0: *p_stlines = "..."; break; case 1: *p_stlines = "..|"; break; case 2: *p_stlines = ".||"; break; case 3: *p_stlines = ".|||"; break; case 4: *p_stlines = "||||"; break; case 5: *p_stlines = "|||||"; break; case 6: *p_stlines = "||||||"; break; case 7: *p_stlines = "|||||||"; break; case 8: *p_stlines = "||||||||"; break; default: syntax("Bad number of lines", p); break; } } else { q = p; while (!isspace((unsigned char) *p) && *p != '\0') p++; l = p - q; *p_stlines = getarena(l + 1); strncpy(*p_stlines, q, l); (*p_stlines)[l] = '\0'; } } else if (strncmp(p, "staffscale=", 11) == 0) { if (*p_scale) syntax("Double staffscale", p); *p_scale = p + 11; } else if (strncmp(p, "cue=", 4) == 0) { if (*p_cue) syntax("Double cue", p); *p_cue = p + 4; } else if (strncmp(p, "map=", 4) == 0) { if (*p_map) syntax("Double map", p); *p_map = p + 4; // } else if (strncmp(p, "transpose=", 10) == 0 // || strncmp(p, "t=", 2) == 0) { // ; /* ignored - abcMIDI */ } else { break; } while (!isspace((unsigned char) *p) && *p != '\0') p++; while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; } return p; } /* -- parse a decoration 'xxx<decosep>' -- */ static char *get_deco(char *p, unsigned char *p_dc) { char *q, sep, **t; unsigned i, l; *p_dc = 0; q = p; sep = q[-1]; if (char_tb[(unsigned char) sep] == CHAR_DECOS) { if (sep == '+') { if (*p == '+' && p[1] == '+') p++; /* special case "+++" */ } } else { sep = '\0'; /* Barfly U: */ } while (*p != sep) { if (*p == '\0') { syntax("Decoration not terminated", q); return p; } p++; } l = p - q; if (*p == sep) p++; for (i = 1, t = &parse.deco_tb[1]; *t && i < DC_NAME_SZ; i++, t++) { if (strlen(*t) == l && strncmp(*t, q, l) == 0) { *p_dc = i + 128; return p; } } /* new decoration */ if (i < DC_NAME_SZ) { // if (parse.abc_state != ABC_S_GLOBAL) // lvlarena(0); *t = getarena(l + 1); // if (parse.abc_state != ABC_S_GLOBAL) // lvlarena(1); memcpy(*t, q, l); (*t)[l] = '\0'; *p_dc = i + 128; } else { syntax("Too many decoration types", q); } return p; } /* -- parse a list of accidentals (K:) -- */ static char *parse_acc(char *p, struct SYMBOL *s) { int pit = 0, acc; unsigned nacc; nacc = 0; for (;;) { if (nacc >= sizeof s->u.key.pits) { syntax("Too many accidentals", p); break; } p = parse_acc_pit(p, &pit, &acc); if (acc < 0) break; s->u.key.pits[nacc] = pit; s->u.key.accs[nacc++] = acc; while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; if (*p != '^' && *p != '_' && *p != '=') break; } s->u.key.microscale = microscale; if (s->u.key.empty != 2) s->u.key.nacc = nacc; return p; } /* -- parse a clef (K: or V:) -- */ static void parse_clef(struct SYMBOL *s, char *name, char *middle) { int clef = -1; int transpose = 0; int clef_line = 2; char *warn = NULL; char str[80]; str[0] = '\0'; if (name && strncmp(name, "clef=", 5) == 0) { name += 5; switch (*name) { case '\"': name = get_str(str, name, sizeof str); s->u.clef.name = getarena(strlen(str) + 1); strcpy(s->u.clef.name, str); clef = TREBLE; break; case 'g': warn = name; transpose = -7; case 'G': clef = TREBLE; break; case 'f': warn = name; transpose = -14; clef = BASS; clef_line = 4; break; case 'F': if (name[1] == ',') /* abc2.1.1 clef=F == clef=F, */ transpose = -7; clef = BASS; clef_line = 4; break; case 'c': warn = name; transpose = -7; case 'C': clef = ALTO; clef_line = 3; break; case 'P': clef = PERC; clef_line = 3; break; } if (clef >= 0) { name++; if (*name == ',' || *name== '\'') warn = name; while (*name == ',') { transpose += 7; name++; } while (*name == '\'') { transpose -= 7; name++; } } } if (name && clef < 0) { if (!strncmp(name, "bass", 4)) { clef = BASS; clef_line = 4; s->u.clef.check_pitch = 1; name += 4; } else if (!strncmp(name, "treble", 6)) { clef = TREBLE; name += 6; } else if (!strncmp(name, "alto", 4) || !strncmp(name, "tenor", 5)) { clef = ALTO; clef_line = *name == 'a' ? 3 : 4; s->u.clef.check_pitch = 1; if (*name == 'a') name += 4; else name += 5; } else if (!strncmp(name, "perc", 4)) { clef = PERC; clef_line = 3; name += 4; } else if (!strncmp(name, "auto", 4)) { clef = AUTOCLEF; name += 4; } else if (strncmp(name, "none", 4) == 0) { clef = TREBLE; s->u.clef.invis = 1; s->flags |= ABC_F_INVIS; name += 4; } else { syntax("Unknown clef", name); clef = TREBLE; } } if (clef >= 0) { if (isdigit((unsigned char) *name)) clef_line = *name++ - '0'; if (name[1] == '8') { switch (*name) { case '^': transpose -= 7; case '+': s->u.clef.octave = 1; break; case '_': transpose += 7; case '-': s->u.clef.octave = -1; break; } } } if (middle) { int pit = 0, acc, l; static const char line_tb[7] = {ALTO, TREBLE, ALTO, BASS, ALTO, BASS, ALTO}; warn = middle; /* 'middle=<note pitch>' */ parse_acc_pit(middle, &pit, &acc); if (acc < 0) // if error pit = 22; if (clef < 0) clef = line_tb[(pit + 7) % 7]; switch (clef) { default: l = 20 + 4; break; case ALTO: l = 16 + 4; break; case BASS: l = 12 + 4; break; } clef_line = (l - pit + 28) % 7; if (clef_line & 1) { syntax("Bad 'middle' value for the clef", middle); pit++; } clef_line = clef_line / 2 + 1; transpose = l - (clef_line - 1) * 2 - pit; s->u.clef.check_pitch = 0; } s->u.clef.type = clef; s->u.clef.line = clef_line; s->u.clef.transpose = transpose; if (warn) { int sev_sav; sev_sav = severity; syntax("Warning: Deprecated or non-standard item", warn); severity = sev_sav; } } /* get the octave= value */ static int parse_octave(char *p) { int oct; if (p) { oct = 1; if (*p == '-') { oct = -1; p++; } if (*p >= '0' && *p <= '4') return oct * (*p - '0'); syntax("Bad octave value", p); } return NO_OCTAVE; } /* -- parse a 'K:' -- */ static void parse_key(char *p, struct SYMBOL *s) { int sf, empty, instr; // int mode; char *clef_name, *clef_middle, *clef_stlines, *clef_scale; char *p_octave, *p_cue, *p_map; // set important default values // s->u.key.stafflines = "|||||"; s->u.key.octave = NO_OCTAVE; if (*p == '\0') { s->u.key.empty = 1; return; } sf = 0; // mode = 0; empty = 0; instr = 0; switch (*p++) { case 'F': sf = -1; break; case 'B': sf++; case 'E': sf++; case 'A': sf++; case 'D': sf++; case 'G': sf++; case 'C': break; case 'H': if (*p == 'P') { instr = K_HP; p++; } else if (*p == 'p') { instr = K_Hp; sf = 2; p++; } else { syntax("Unknown bagpipe-like key", p); } break; case 'P': instr = K_DRUM; p++; break; case 'n': if (strncmp(p, "one", 3) == 0) { // none empty = 2; p += 3; while (isspace((unsigned char) *p)) p++; if (*p == '\0') { s->u.key.empty = empty; return; } break; } // fall thru default: p--; empty = 1; break; } s->u.key.empty = empty; if (!empty) { if (*p == '#') { sf += 7; p++; } else if (*p == 'b') { sf -= 7; p++; } while (isspace((unsigned char) *p)) p++; switch (*p) { case 'a': case 'A': if (strncasecmp(p, "aeo", 3) == 0) { sf -= 3; // mode = 5; break; } goto unk; case 'd': case 'D': if (strncasecmp(p, "dor", 3) == 0) { sf -= 2; // mode = 1; break; } goto unk; case 'i': case 'I': if (strncasecmp(p, "ion", 3) == 0) { // mode = 0; break; } goto unk; case 'l': case 'L': if (strncasecmp(p, "loc", 3) == 0) { sf -= 5; // mode = 6; break; } if (strncasecmp(p, "lyd", 3) == 0) { sf += 1; // mode = 3; break; } goto unk; case 'm': case 'M': if (strncasecmp(p, "maj", 3) == 0) break; if (strncasecmp(p, "mix", 3) == 0) { sf -= 1; // mode = 4; break; } if (strncasecmp(p, "min", 3) == 0 || !isalpha((unsigned char) p[1])) { /* 'm' alone */ sf -= 3; // mode = 5; break; } goto unk; case 'p': case 'P': if (strncasecmp(p, "phr", 3) == 0) { sf -= 4; // mode = 2; break; } goto unk; default: unk: empty = 1; // (local value) break; } if (!empty) { while (isalpha((unsigned char) *p)) p++; while (isspace((unsigned char) *p)) p++; } // [exp] accidentals if (strncmp(p, "exp ", 4) == 0) { p += 4; while (isspace((unsigned char) *p)) p++; if (*p == '\0') syntax("no accidental after 'exp'", p); s->u.key.exp = 1; } if (s->u.key.exp && strncmp(p, "none", 4) == 0) { sf = 0; p += 4; while (isspace((unsigned char) *p)) p++; } else switch (*p) { case '^': case '_': case '=': p = parse_acc(p, s); /* accidentals */ break; } } if (sf > 7 || sf < -7) { syntax("Too many sharps/flats", p); if (sf > 0) sf -= 12; else sf += 12; } // extra parameters clef_name = clef_middle = clef_stlines = clef_scale = NULL; p_octave = p_cue = p_map = NULL; parse_extra(p, &clef_name, &clef_middle, &clef_stlines, &clef_scale, &p_octave, &p_cue, &p_map); s->u.key.sf = sf; // s->u.key.mode = mode; s->u.key.instr = instr; s->u.key.octave = parse_octave(p_octave); if (p_cue) { if (strncmp(p_cue, "on", 2) == 0) s->u.key.cue = 1; else s->u.key.cue = -1; } if (clef_stlines) s->u.key.stafflines = clef_stlines; if (clef_scale) { float sc; sc = atof(clef_scale); if (sc >= 0.5 && sc <= 3) s->u.key.staffscale = sc; else syntax("Bad value of staffscale", clef_scale); } if (clef_name || clef_middle) { s = abc_new(ABC_T_CLEF, NULL); parse_clef(s, clef_name, clef_middle); } if (p_map) { strcpy(tex_buf, "%%voicemap "); get_str(&tex_buf[11], p_map, TEX_BUF_SZ - 12); abc_new(ABC_T_PSCOM, tex_buf); } } /* -- set default length from 'L:' -- */ static char *get_len(char *p, struct SYMBOL *s) { int l1, l2, d; char *error_txt = NULL; if (strcmp(p, "auto") == 0) { /* L:auto */ ulen = 15120; // 2*2*2*2*3*3*3*5*7 s->u.length.base_length = -1; return error_txt; } l1 = 0; l2 = 1; if (sscanf(p, "%d /%d ", &l1, &l2) != 2 || l1 == 0) { s->u.length.base_length = ulen ? ulen : BASE_LEN / 8; return "Bad unit note length: unchanged"; } if (l2 == 0) { error_txt = "Bad length divisor, set to 4"; l2 = 4; } d = BASE_LEN / l2; if (d * l2 != BASE_LEN) { error_txt = "Length incompatible with BASE, using 1/8"; d = BASE_LEN / 8; } else { d *= l1; if (l1 != 1 || (l2 & (l2 - 1))) { error_txt = "Incorrect unit note length, using 1/8"; d = BASE_LEN / 8; } } s->u.length.base_length = d; return error_txt; } /* -- parse a 'M:' -- */ static char *parse_meter(char *p, struct SYMBOL *s) { int m1, m2, d, wmeasure, nm, in_parenth; unsigned i; char *q; static char top_err[] = "Cannot identify meter top"; if (*p == '\0') return "Empty meter string"; nm = 0; in_parenth = 0; m1 = 0; if (strncmp(p, "none", 4) == 0) { p += 4; /* no meter */ wmeasure = 1; /* simplify measure numbering and MREST conversion */ } else { wmeasure = 0; while (*p != '\0') { if (*p == '=') break; if (nm >= MAX_MEASURE) return "Too many values in M:"; switch (*p) { case 'C': s->u.meter.meter[nm].top[0] = *p++; if (*p == '|') s->u.meter.meter[nm].top[1] = *p++; m1 = 4; m2 = 4; break; case 'c': case 'o': if (*p == 'c') m1 = 4; else m1 = 3; m2 = 4; s->u.meter.meter[nm].top[0] = *p++; if (*p == '.') s->u.meter.meter[nm].top[1] = *p++; break; case '(': if (p[1] == '(') { /* "M:5/4 ((2+3)/4)" */ in_parenth = 1; s->u.meter.meter[nm++].top[0] = *p++; } q = p + 1; while (*q != '\0') { if (*q == ')' || *q == '/') break; q++; } if (*q == ')' && q[1] == '/') { /* "M:5/4 (2+3)/4" */ p++; /* remove the parenthesis */ continue; } /* "M:5 (2+3)" */ /* fall thru */ case ')': in_parenth = *p == '('; s->u.meter.meter[nm++].top[0] = *p++; continue; default: if (sscanf(p, "%d", &m1) != 1 || m1 <= 0) return top_err; i = 0; m2 = 2; /* default when no bottom value */ for (;;) { while (isdigit((unsigned char) *p) && i < sizeof s->u.meter.meter[0].top) s->u.meter.meter[nm].top[i++] = *p++; if (*p == ')') { if (p[1] != '/') break; p++; } if (*p == '/') { p++; if (sscanf(p, "%d", &m2) != 1 || m2 <= 0) return "Cannot identify meter bottom"; i = 0; while (isdigit((unsigned char) *p) && i < sizeof s->u.meter.meter[0].bot) s->u.meter.meter[nm].bot[i++] = *p++; break; } if (*p != ' ' && *p != '+') break; if (*p == '\0' || p[1] == '(') /* "M:5 (2/4+3/4)" */ break; if (i < sizeof s->u.meter.meter[0].top) s->u.meter.meter[nm].top[i++] = *p++; if (sscanf(p, "%d", &d) != 1 || d <= 0) return top_err; if (p[-1] == ' ') { if (d > m1) m1 = d; } else { m1 += d; } } break; } if (!in_parenth) wmeasure += m1 * BASE_LEN / m2; nm++; if (*p == ' ') p++; else if (*p == '+') s->u.meter.meter[nm++].top[0] = *p++; } } meter = m1; if (*p == '=') { if (sscanf(++p, "%d/%d", &m1, &m2) != 2 || m1 <= 0 || m2 <= 0) return "Cannot identify meter explicit duration"; wmeasure = m1 * BASE_LEN / m2; s->u.meter.expdur = 1; } s->u.meter.wmeasure = wmeasure; s->u.meter.nmeter = nm; /* in the tune header, change the unit note length */ if (parse.abc_state == ABC_S_HEAD && ulen == 0) { if (wmeasure >= BASE_LEN * 3 / 4 || wmeasure <= 1) ulen = BASE_LEN / 8; else ulen = BASE_LEN / 16; } return 0; } /* -- get a possibly quoted string -- */ char *get_str(char *d, /* destination */ char *s, /* source */ int maxlen) /* max length */ { char c; maxlen--; /* have place for the EOS */ while (isspace((unsigned char) *s)) s++; if (*s == '"') { s++; while ((c = *s) != '\0') { if (c == '"') { s++; break; } if (c == '\\') { if (--maxlen > 0) *d++ = c; c = *++s; } if (--maxlen > 0) *d++ = c; s++; } } else { while ((c = *s) != '\0') { if (isspace((unsigned char) c)) break; if (--maxlen > 0) *d++ = c; s++; } } *d = '\0'; while (isspace((unsigned char) *s)) s++; return s; } /* -- parse a tempo (Q:) -- */ static char *parse_tempo(char *p, struct SYMBOL *s) { char c, str[80]; int i, l, n, top, bot; /* string before */ if (*p == '"') { p = get_str(str, p, sizeof str); s->u.tempo.str1 = getarena(strlen(str) + 1); strcpy(s->u.tempo.str1, str); } /* beat */ if (*p == 'C' || *p == 'c' || *p == 'L' || *p == 'l') { s->u.tempo.beats[0] = ulen; if (parse.abc_vers >= (2 << 16)) syntax("Deprecated Q: value", p); p++; while (isspace((unsigned char) *p)) p++; if (*p != '=') goto inval; c = '='; p--; } else if (isdigit((unsigned char) *p)) { if (strchr(p, '/') != NULL) { i = 0; while (isdigit((unsigned char) *p)) { if (sscanf(p, "%d/%d%n", &top, &bot, &n) != 2 || bot <= 0) goto inval; l = (BASE_LEN * top) / bot; if (l <= 0 || i >= sizeof s->u.tempo.beats / sizeof s->u.tempo.beats[0]) goto inval; s->u.tempo.beats[i++] = l; p += n; while (isspace((unsigned char) *p)) p++; } c = *p; if (c != '=') goto inval; } else { s->u.tempo.beats[0] = ulen; if (parse.abc_vers >= (2 << 16)) syntax("Deprecated Q: value", p); c = '='; p--; } } else { c = '\0'; } /* tempo value */ if (c == '=') { p++; if (strncmp(p, "ca. ", 4) == 0) { s->u.tempo.circa = 1; p += 4; } if (sscanf(p, "%d/%d%n", &top, &bot, &n) == 2) { if (bot <= 0) goto inval; l = (BASE_LEN * top) / bot; if (l <= 0) goto inval; s->u.tempo.new_beat = l; } else { if (sscanf(p, "%d%n", &top, &n) != 1) goto inval; s->u.tempo.tempo = top; } p += n; while (isspace((unsigned char) *p)) p++; } /* string after */ if (*p == '"') { p = get_str(str, p, sizeof str); s->u.tempo.str2 = getarena(strlen(str) + 1); strcpy(s->u.tempo.str2, str); } return 0; inval: return "Invalid tempo"; } /* -- get a user defined symbol (U:) -- */ static char *get_user(char *p, struct SYMBOL *s) { unsigned char c; char *value; c = (unsigned char) *p++; if (c == '\\') { c = (unsigned char) *p++; switch (c) { case 'n': c = '\n'; break; case 't': c = '\t'; break; } } switch (char_tb[c]) { default: return "Bad decoration character"; case CHAR_DECO: break; case CHAR_BAD: case CHAR_IGN: case CHAR_SPAC: case CHAR_DECOS: case CHAR_LINEBREAK: char_tb[c] = CHAR_DECO; break; } s->u.user.symbol = c; /* skip '=' */ while (isspace((unsigned char) *p) || *p == '=') p++; if (char_tb[(unsigned char) *p] == CHAR_DECOS) p++; /*fixme: 'U: <char> = "text"' is not treated */ get_deco(p, &s->u.user.value); if (!s->u.user.value) return 0; /* treat special pseudo decorations */ value = parse.deco_tb[s->u.user.value - 128]; if (strcmp(value, "beambreak") == 0) char_tb[c] = CHAR_SPAC; else if (strcmp(value, "ignore") == 0) char_tb[c] = CHAR_IGN; else if (strcmp(value, "nil") == 0 || strcmp(value, "none") == 0) char_tb[c] = CHAR_BAD; else return 0; s->u.user.value = 0; /* not a decoration */ return 0; } /* -- parse the voice parameters (V:) -- */ static char *parse_voice(char *p, struct SYMBOL *s) { int voice; char *error_txt = NULL; char *clef_name, *clef_middle, *clef_stlines, *clef_scale; char *p_octave, *p_cue, *p_map; signed char *p_stem; static struct kw_s { char *name; short len; short index; } kw_tb[] = { {"name=", 5, 0}, {"nm=", 3, 0}, {"subname=", 8, 1}, {"sname=", 6, 1}, {"snm=", 4, 1}, {"merge", 5, 2}, {"up", 2, 3}, {"down", 4, 4}, {"stem=", 5, 5}, {"gstem=", 6, 6}, {"auto", 4, 7}, {"dyn=", 4, 8}, {"lyrics=", 7, 9}, {"scale=", 6, 10}, {"gchord=", 7, 11}, {0} }; struct kw_s *kw; /* save the parameters of the previous voice */ curvoice->ulen = ulen; curvoice->microscale = microscale; if (voice_tb[0].id[0] == '\0') { switch (s->abc_prev->abc_type) { case ABC_T_EOLN: case ABC_T_NOTE: case ABC_T_REST: case ABC_T_BAR: /* the previous voice was implicit (after K:) */ voice_tb[0].id[0] = '1'; break; } } { char *id, sep; id = p; while (isalnum((unsigned char) *p) || *p == '_') p++; sep = *p; *p = '\0'; if (voice_tb[0].id[0] == '\0') { voice = 0; /* first voice */ } else { for (voice = 0; voice <= nvoice; voice++) { if (strcmp(id, voice_tb[voice].id) == 0) goto found; } if (voice >= MAXVOICE) { syntax("Too many voices", id); voice--; } } nvoice = voice; strncpy(voice_tb[voice].id, id, sizeof voice_tb[voice].id - 1); voice_tb[voice].mvoice = voice; found: strcpy(s->u.voice.id, voice_tb[voice].id); *p = sep; } curvoice = &voice_tb[voice]; s->u.voice.voice = voice; /* if in tune, set the voice parameters */ if (parse.abc_state == ABC_S_TUNE) { ulen = curvoice->ulen; microscale = curvoice->microscale; } /* parse the other parameters */ clef_name = clef_middle = clef_stlines = clef_scale = NULL; p_octave = p_cue = p_map = NULL; p_stem = &s->u.voice.stem; for (;;) { while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; p = parse_extra(p, &clef_name, &clef_middle, &clef_stlines, &clef_scale, &p_octave, &p_cue, &p_map); if (*p == '\0') break; for (kw = kw_tb; kw->name; kw++) { if (strncmp(p, kw->name, kw->len) == 0) break; } if (!kw->name) { while (!isspace((unsigned char) *p) && *p != '\0') p++; /* ignore unknown keywords */ continue; } p += kw->len; switch (kw->index) { case 0: /* name */ p = get_str(tex_buf, p, TEX_BUF_SZ); s->u.voice.fname = getarena(strlen(tex_buf) + 1); strcpy(s->u.voice.fname, tex_buf); break; case 1: /* subname */ p = get_str(tex_buf, p, TEX_BUF_SZ); s->u.voice.nname = getarena(strlen(tex_buf) + 1); strcpy(s->u.voice.nname, tex_buf); break; case 2: /* merge */ s->u.voice.merge = 1; break; case 3: /* up */ *p_stem = 1; break; case 4: /* down */ *p_stem = -1; break; case 5: /* stem= */ p_stem = &s->u.voice.stem; break; case 6: /* gstem= */ p_stem = &s->u.voice.gstem; break; case 7: /* auto */ *p_stem = 2; break; case 8: /* dyn= */ p_stem = &s->u.voice.dyn; break; case 9: /* lyrics= */ p_stem = &s->u.voice.lyrics; break; case 10: { /* scale= */ float sc; sc = atof(p); if (sc >= 0.5 && sc <= 2) s->u.voice.scale = sc; else error_txt = "Bad value for voice scale"; while (!isspace((unsigned char) *p) && *p != '\0') p++; break; } case 11: /* gchord= */ p_stem = &s->u.voice.gchord; break; } } s->u.voice.octave = parse_octave(p_octave); if (p_cue) { if (strncmp(p_cue, "on", 2) == 0) s->u.voice.cue = 1; else s->u.voice.cue = -1; } if (clef_stlines) s->u.voice.stafflines = clef_stlines; // else // s->u.voice.stafflines = "|||||"; if (clef_scale) { float sc; sc = atof(clef_scale); if (sc >= 0.5 && sc <= 3) s->u.voice.staffscale = sc; else syntax("Bad value of staffscale", clef_scale); } if (clef_name || clef_middle) { s = abc_new(ABC_T_CLEF, NULL); parse_clef(s, clef_name, clef_middle); } if (p_map) { strcpy(tex_buf, "%%voicemap "); get_str(&tex_buf[11], p_map, TEX_BUF_SZ - 12); abc_new(ABC_T_PSCOM, tex_buf); } return error_txt; } /* -- parse a bar -- */ static char *parse_bar(char *p) { struct SYMBOL *s; char *q; int bar_type, i; char repeat_value[32]; q = --p; // keep the first char bar_type = 0; for (;;) { switch (*p++) { case '|': bar_type <<= 4; bar_type |= B_BAR; continue; case '[': bar_type <<= 4; bar_type |= B_OBRA; continue; case ']': bar_type <<= 4; bar_type |= B_CBRA; continue; case ':': bar_type <<= 4; bar_type |= B_COL; continue; default: break; } break; } p--; /* if the last element is '[', it may start * a chord, an embedded header or an other bar */ if ((bar_type & 0x0f) == B_OBRA && bar_type != B_OBRA && *p != ' ') { bar_type >>= 4; p--; } if (bar_type == (B_OBRA << 8) + (B_BAR << 4) + B_CBRA) /* [|] */ bar_type = (B_OBRA << 4) + B_CBRA; /* [] */ /* curvoice->last_note = NULL; */ if (vover > 0) { curvoice = &voice_tb[curvoice->mvoice]; vover = 0; } s = abc_new(ABC_T_BAR, gchord); if (gchord) gchord = NULL; /* handle the repeat sequences */ if (bar_type == B_COL) { bar_type = B_BAR; s->u.bar.dotted = 1; } else { if (*q == ']') { /* repeat bar stop */ i = p - q - 1; if (i > 0) /* remove the starting ']' */ s->u.bar.type &= (1 << (i * 4)) - 1; s->flags |= ABC_F_RBSTOP; s->sflags |= S_RBSTOP; } else if ((bar_type & 0x0f) == B_COL /* left or */ || *q == ':') { /* right repeat bar */ s->flags |= ABC_F_RBSTOP; s->sflags |= S_RBSTOP; if (*q == ':') /* right repeat bar */ s->sflags |= S_RRBAR; } } s->u.bar.type = bar_type; if (dc.n > 0) { memcpy(&s->u.bar.dc, &dc, sizeof s->u.bar.dc); dc.n = 0; } if (!lyric_started) { lyric_started = 1; s->flags |= ABC_F_LYRIC_START; } if (!isdigit((unsigned char) *p) /* if not a repeat bar */ && (*p != '"' || p[-1] != '[')) /* ('["' only) */ return p; if (*p == '"') { p = get_str(repeat_value, p, sizeof repeat_value); } else { char *q; q = repeat_value; while (isdigit((unsigned char) *p) || *p == ',' || *p == '-' || (*p == '.' && isdigit((unsigned char) p[1]))) { if (q < &repeat_value[sizeof repeat_value - 1]) *q++ = *p++; else p++; } *q = '\0'; } if (bar_type != B_OBRA || s->text) { s = abc_new(ABC_T_BAR, repeat_value); s->u.bar.type = B_OBRA; } else { s->text = getarena(strlen(repeat_value) + 1); strcpy(s->text, repeat_value); } s->u.bar.repeat_bar = 1; s->flags |= ABC_F_RBSTART | ABC_F_RBSTOP; s->sflags |= S_RBSTART | S_RBSTOP; return p; } // parse the note accidental and pitch char *parse_acc_pit(char *p, int *pit, int *acc) { /* look for accidental sign */ switch (*p) { case '^': p++; if (*p == '^') { p++; *acc = A_DS; } else { *acc = A_SH; } break; case '=': p++; *acc = A_NT; break; case '_': p++; if (*p == '_') { p++; *acc = A_DF; } else { *acc = A_FT; } break; default: *acc = 0; } /* look for microtone value */ if (*acc != 0 && (isdigit((unsigned char) *p) || (*p == '/' && microscale == 0))) { int n, d; char *q; n = d = 1; if (*p != '/') { n = strtol(p, &q, 10); p = q; } if (*p == '/') { p++; if (!isdigit((unsigned char) *p)) { d = 2; } else { d = strtol(p, &q, 10); p = q; } } if (microscale == 0) { d--; d += (n - 1) << 8; /* short [ (n-1) | (d-1) ] */ if (d == 0) { n = MAXMICRO - 1; } else { for (n = 1; n < MAXMICRO; n++) { if (parse.micro_tb[n] == d) break; if (parse.micro_tb[n] == 0) { parse.micro_tb[n] = d; break; } } } if (n == MAXMICRO) { syntax("Too many microtone accidentals", p); n = 0; } } *acc += (n << 3); } /* get the pitch */ { char *p_n; p_n = strchr(all_notes, *p); if (!p_n || *p == '\0') { syntax(*acc ? "Missing note after accidental" : "Not a note", p); *acc = -1; if (*p == '\0') p--; } else { *pit = p_n - all_notes + 16; } p++; } while (*p == '\'') { /* eat up following ' chars */ *pit += 7; p++; } while (*p == ',') { /* eat up following , chars */ *pit -= 7; p++; } return p; } /* -- parse the decorations of notes and bars -- */ static char *parse_deco(char *p, struct decos *deco, int m) /* note index / -1 */ { int n; unsigned char t; n = deco->n; for (;;) { t = (unsigned char) *p++; if (char_tb[t] != CHAR_DECO && char_tb[t] != CHAR_DECOS) break; if (char_tb[t] == CHAR_DECOS) p = get_deco(p, &t); if (n >= MAXDC) { syntax("Too many decorations for the note", p); } else if (t != 0) { deco->tm[n].t = t; deco->tm[n++].m = m; } } deco->n = n; return p - 1; } /* -- parse a decoration line (d: or s:) -- */ static char *parse_decoline(char *p) { struct SYMBOL *is; unsigned char t; int n; if ((is = deco_cont) == NULL) is = deco_start; else deco_cont = NULL; /* scan the decoration line */ while (*p != '\0') { while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; switch (*p) { case '|': while (is && (is->abc_type != ABC_T_BAR || is->u.bar.type == B_OBRA)) is = is->abc_next; if (!is) { syntax("Not enough bar lines for deco line", p); return NULL; } is = is->abc_next; p++; continue; case '*': while (is && is->abc_type != ABC_T_NOTE) is = is->abc_next; if (!is) { syntax("Not enough notes for deco line", p); return NULL; } is = is->abc_next; p++; continue; case '\\': if (p[1] == '\0') { if (!is) return "Not enough notes for deco line"; deco_cont = is; return NULL; } syntax("'\\' ignored", p); p++; continue; case '"': p = parse_gchord(p + 1); break; default: if (char_tb[(unsigned char) *p] == CHAR_DECOS) p = get_deco(p + 1, &t); else t = (unsigned char) *p++; break; } /* store the decoration and gchord/annotation in the next note */ while (is && (is->abc_type != ABC_T_NOTE || (is->flags & ABC_F_GRACE))) is = is->abc_next; if (!is) return "Not enough notes for deco line"; if (gchord) { if (is->text) { char *gch; n = strlen(is->text); gch = getarena(n + strlen(gchord) + 2); strcpy(gch, is->text); gch[n] = '\n'; strcpy(gch + n + 1, gchord); gchord = gch; } is->text = gchord; gchord = NULL; } else { n = is->u.note.dc.n; if (n >= MAXDC) { syntax("Too many decorations for the note", p); } else if (t != 0) { is->u.note.dc.tm[n].t = t; is->u.note.dc.tm[n].m = -1; is->u.note.dc.n = ++n; } } is = is->abc_next; } return NULL; } /* -- parse a guitar chord / annotation -- */ static char *parse_gchord(char *p) { char *q; int l, l2; q = p; while (*p != '"') { if (*p == '\\') p++; if (*p == '\0') { syntax("No end of guitar chord", p); break; } p++; } l = p - q; if (gchord) { char *gch; /* many guitar chords: concatenate with '\n' */ l2 = strlen(gchord); gch = getarena(l2 + 1 + l + 1); strcpy(gch, gchord); gch[l2++] = '\n'; strncpy(&gch[l2], q, l); gch[l2 + l] = '\0'; gchord = gch; } else { gchord = getarena(l + 1); strncpy(gchord, q, l); gchord[l] = '\0'; } if (*p != '\0') p++; return p; } /* -- parse a note length -- */ static char *parse_len(char *p, int dur_u, int *p_len) { int len, fac; int err = 0; char *q; len = dur_u; if (isdigit((unsigned char) *p)) { len *= strtol(p, &q, 10); if (len <= 0 || len > 10000) { syntax("Bad length", p); len = dur_u; } p = q; } if (*p != '/') { *p_len = len; return p; } if (isdigit((unsigned char) p[1])) { fac = strtol(p + 1, &q, 10); p = q; if (fac == 0 || (fac & (fac - 1))) err = 1; else len /= fac; } else { while (*p == '/') { if (len & 1) err = 1; len /= 2; p++; } } if (err || !len) { syntax("Bad length divisor", p - 1); len = dur_u; } *p_len = len; return p; } /* -- parse a ABC line -- */ /* return 1 on end of tune, and 2 on start of new tune */ static int parse_line(char *p) { struct SYMBOL *s; char *q, c; char *dot = NULL; struct SYMBOL *last_note_sav = NULL; struct decos dc_sav; int i, flags, flags_sav = 0, slur; static char qtb[10] = {0, 1, 3, 2, 3, 0, 2, 0, 3, 0}; colnum = 0; switch (*p) { case '\0': /* blank line */ switch (parse.abc_state) { case ABC_S_GLOBAL: if (parse.last_sym && parse.last_sym->abc_type != ABC_T_NULL) abc_new(ABC_T_NULL, NULL); case ABC_S_HEAD: /*fixme: may have blank lines in headers?*/ return 0; } return 1; case '%': if (p[1] == '%') { s = abc_new(ABC_T_PSCOM, p); p += 2; /* skip '%%' */ if (strncasecmp(p, "decoration ", 11) == 0) { p += 11; while (isspace((unsigned char) *p)) p++; switch (*p) { case '!': char_tb['!'] = CHAR_DECOS; char_tb['+'] = CHAR_BAD; break; case '+': char_tb['+'] = CHAR_DECOS; char_tb['!'] = CHAR_BAD; break; } return 0; } if (strncasecmp(p, "linebreak ", 10) == 0) { for (i = 0; i < sizeof char_tb; i++) { if (char_tb[i] == CHAR_LINEBREAK) char_tb[i] = i != '!' ? CHAR_BAD : CHAR_DECOS; } p += 10; for (;;) { while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; switch (*p) { case '!': case '$': case '*': case ';': case '?': case '@': char_tb[(unsigned char) *p++] = CHAR_LINEBREAK; break; case '<': if (strncmp(p, "<none>", 6) == 0) return 0; if (strncmp(p, "<EOL>", 5) == 0) { char_tb['\n'] = CHAR_LINEBREAK; p += 5; break; } /* fall thru */ default: if (strcmp(p, "lock") != 0) syntax("Invalid character in %%%%linebreak", p); return 0; } } return 0; } if (strncasecmp(p, "microscale ", 11) == 0) { int v; p += 11; while (isspace((unsigned char) *p)) p++; sscanf(p, "%d", &v); if (v < 4 || v >= 256 || v & 1) syntax("Invalid value in %%microscale", p); else microscale = v; return 0; } if (strncasecmp(p, "user ", 5) == 0) { p += 5; while (isspace((unsigned char) *p)) p++; get_user(p, s); return 0; } return 0; } /* fall thru */ case '\\': /* abc2mtex specific lines */ return 0; /* skip */ } /* header fields */ if (p[1] == ':' && *p != '|' && *p != ':') { /* not '|:' nor '::' */ int new_tune; new_tune = parse_info(p); /* handle BarFly voice definition */ /* 'V:n <note line ending with a bar>' */ if (*p != 'V' || parse.abc_state != ABC_S_TUNE) return new_tune; /* (normal return) */ c = p[strlen(p) - 1]; if (c != '|' && c != ']') return new_tune; while (!isspace((unsigned char) *p) && *p != '\0') p++; while (isspace((unsigned char) *p)) p++; } if (parse.abc_state != ABC_S_TUNE) return 0; /* music */ flags = 0; if (parse.abc_vers <= (2 << 16)) lyric_started = 0; deco_start = deco_cont = NULL; slur = 0; while (*p != '\0') { colnum = p - abc_line; switch (char_tb[(unsigned char) *p++]) { case CHAR_GCHORD: /* " */ if (flags & ABC_F_GRACE) goto bad_char; p = parse_gchord(p); break; case CHAR_GR_ST: /* '{' */ if (flags & ABC_F_GRACE) goto bad_char; last_note_sav = curvoice->last_note; curvoice->last_note = NULL; memcpy(&dc_sav, &dc, sizeof dc); dc.n = 0; flags_sav = flags; flags = ABC_F_GRACE; if (*p == '/') { flags |= ABC_F_SAPPO; p++; } break; case CHAR_GR_EN: /* '}' */ if (!(flags & ABC_F_GRACE)) goto bad_char; parse.last_sym->flags |= ABC_F_GR_END; if (dc.n != 0) syntax("Decoration ignored", p); curvoice->last_note = last_note_sav; memcpy(&dc, &dc_sav, sizeof dc); flags = flags_sav; break; case CHAR_DECOS: if (p[-1] == '!' && char_tb['\n'] == CHAR_LINEBREAK && check_nl(p)) { s = abc_new(ABC_T_EOLN, NULL); /* abc2win EOL */ s->u.eoln.type = 2; break; } /* fall thru */ case CHAR_DECO: if (p[-1] == '.') { if (*p == '(' || *p == '-') { dot = p; break; } // if (*p == '|') { // p = parse_bar(p + 1); // parse.last_sym->u.bar.dotted = 1; // break; // } } p = parse_deco(p - 1, &dc, -1); break; case CHAR_LINEBREAK: s = abc_new(ABC_T_EOLN, NULL); // s->u.eoln.type = 0; break; case CHAR_NOTE: p = parse_note(p - 1, flags); flags &= ABC_F_GRACE; parse.last_sym->u.note.slur_st = slur; slur = 0; if (parse.last_sym->u.note.notes[0].len > 0) /* if not space */ curvoice->last_note = parse.last_sym; break; case CHAR_SLASH: /* '/' */ if (flags & ABC_F_GRACE) goto bad_char; if (char_tb[(unsigned char) p[-1]] != CHAR_BAR) goto bad_char; q = p; while (*q == '/') q++; if (char_tb[(unsigned char) *q] != CHAR_BAR) goto bad_char; s = abc_new(ABC_T_MREP, NULL); s->u.bar.type = 0; s->u.bar.len = q - p + 1; syntax("Non standard measure repeat syntax", p - 1); p = q; break; case CHAR_BSLASH: /* '\\' */ if (*p == '\0') break; syntax("'\\' ignored", p - 1); break; case CHAR_OBRA: /* '[' */ if (*p == '|' || *p == ']' || *p == ':' || isdigit((unsigned char) *p) || *p == '"' || *p == ' ') { if (flags & ABC_F_GRACE) goto bad_char; p = parse_bar(p); break; } if (p[1] != ':') { p = parse_note(p - 1, flags); /* chord */ flags &= ABC_F_GRACE; parse.last_sym->u.note.slur_st = slur; slur = 0; curvoice->last_note = parse.last_sym; break; } /* embedded information field */ #if 0 /*fixme:OK for [I:staff n], ?? for other headers*/ if (flags & ABC_F_GRACE) goto bad_char; #endif while (p[2] == ' ') { /* remove the spaces */ p[2] = ':'; p[1] = *p; p++; } c = ']'; q = p; while (*p != '\0' && *p != c) p++; if (*p == '\0') { syntax("Escape sequence [..] not closed", q); c = '\0'; } else { *p = '\0'; } parse_info(q); *p = c; if (c != '\0') p++; break; case CHAR_BAR: /* '|', ':' or ']' */ if (flags & ABC_F_GRACE) goto bad_char; p = parse_bar(p); break; case CHAR_OPAR: /* '(' */ if (*p > '0' && *p <= '9') { int pplet, qplet, rplet; pplet = strtol(p, &q, 10); p = q; if ((unsigned) pplet < sizeof qtb / sizeof qtb[0]) qplet = qtb[pplet]; else qplet = qtb[0]; rplet = pplet; if (*p == ':') { p++; if (isdigit((unsigned char) *p)) { qplet = strtol(p, &q, 10); p = q; } if (*p == ':') { p++; if (isdigit((unsigned char) *p)) { rplet = strtol(p, &q, 10); p = q; } } } if (rplet < 1) { syntax("Invalid 'r' in tuplet", p); break; } if (pplet >= 128 || qplet >= 128 || rplet >= 128) { syntax("Invalid 'p:q:r' in tuplet", p); break; } if (qplet == 0) qplet = meter % 3 == 0 ? 3 : 2; s = abc_new(ABC_T_TUPLET, NULL); s->u.tuplet.p_plet = pplet; s->u.tuplet.q_plet = qplet; s->u.tuplet.r_plet = rplet; s->flags |= flags; break; } if (*p == '&') { if (flags & ABC_F_GRACE) goto bad_char; p++; if (vover != 0) { syntax("Nested voice overlay", p - 1); break; } s = abc_new(ABC_T_V_OVER, NULL); s->u.v_over.type = V_OVER_S; s->u.v_over.voice = curvoice - voice_tb; vover = -1; /* multi-bars */ break; } slur <<= 4; if (p == dot + 1 && dc.n == 0) slur |= SL_DOTTED; switch (*p) { case '\'': slur += SL_ABOVE; p++; break; case ',': slur += SL_BELOW; p++; break; default: slur += SL_AUTO; break; } break; case CHAR_CPAR: /* ')' */ switch (parse.last_sym->abc_type) { case ABC_T_NOTE: case ABC_T_REST: break; default: goto bad_char; } parse.last_sym->u.note.slur_end++; break; case CHAR_VOV: /* '&' */ if (flags & ABC_F_GRACE) goto bad_char; if (*p != ')' || vover == 0) { /*??*/ if (!curvoice->last_note) { syntax("Bad start of voice overlay", p); break; } s = abc_new(ABC_T_V_OVER, NULL); /*s->u.v_over.type = V_OVER_V; */ vover_new(); s->u.v_over.voice = curvoice - voice_tb; if (vover == 0) vover = 1; /* single bar */ break; } p++; vover = 0; s = abc_new(ABC_T_V_OVER, NULL); s->u.v_over.type = V_OVER_E; s->u.v_over.voice = curvoice->mvoice; curvoice->last_note = NULL; /* ?? */ curvoice = &voice_tb[curvoice->mvoice]; break; case CHAR_SPAC: /* ' ' and '\t' */ flags |= ABC_F_SPACE; break; case CHAR_MINUS: { /* '-' */ int tie_pos; if (!curvoice->last_note || curvoice->last_note->abc_type != ABC_T_NOTE) goto bad_char; if (p == dot + 1 && dc.n == 0) tie_pos = SL_DOTTED; else tie_pos = 0; switch (*p) { case '\'': tie_pos += SL_ABOVE; p++; break; case ',': tie_pos += SL_BELOW; p++; break; default: tie_pos += SL_AUTO; break; } for (i = 0; i <= curvoice->last_note->nhd; i++) { if (curvoice->last_note->u.note.notes[i].ti1 == 0) curvoice->last_note->u.note.notes[i].ti1 = tie_pos; else if (curvoice->last_note->nhd == 0) syntax("Too many ties", p); } break; } case CHAR_BRHY: /* '>' and '<' */ if (!curvoice->last_note) goto bad_char; i = 1; while (*p == p[-1]) { i++; p++; } if (i > 3) { syntax("Bad broken rhythm", p - 1); i = 3; } if (p[-1] == '<') i = -i; broken_rhythm(curvoice->last_note, i); curvoice->last_note->u.note.brhythm = i; break; case CHAR_IGN: /* '*' & '`' */ break; default: bad_char: syntax((flags & ABC_F_GRACE) ? "Bad character in grace note sequence" : "Bad character", p - 1); break; } } /*fixme: may we have grace notes across lines?*/ if (flags & ABC_F_GRACE) { syntax("EOLN in grace note sequence", p - 1); if (curvoice->last_note) curvoice->last_note->flags |= ABC_F_GR_END; curvoice->last_note = last_note_sav; memcpy(&dc, &dc_sav, sizeof dc); } /* add eoln */ s = abc_new(ABC_T_EOLN, NULL); if (flags & ABC_F_SPACE) s->flags |= ABC_F_SPACE; if (p[-1] == '\\' || char_tb['\n'] != CHAR_LINEBREAK) s->u.eoln.type = 1; /* no break */ return 0; } /* -- parse a note or a rest -- */ static char *parse_note(char *p, int flags) { struct SYMBOL *s; char *q; int pit = 0, len, acc, nostem, chord, j, m, n; if (flags & ABC_F_GRACE) { /* in a grace note sequence */ s = abc_new(ABC_T_NOTE, NULL); } else { s = abc_new(ABC_T_NOTE, gchord); if (gchord) gchord = NULL; } s->flags |= flags; s->u.note.notes[0].color = -1; if (!lyric_started) { lyric_started = 1; s->flags |= ABC_F_LYRIC_START; } if (*p != 'X' && *p != 'Z' && !(flags & ABC_F_GRACE)) { if (!deco_start) deco_start = s; } chord = 0; /* rest */ switch (*p) { case 'X': s->flags |= ABC_F_INVIS; case 'Z': /* multi-rest */ s->abc_type = ABC_T_MREST; p++; len = 1; if (isdigit((unsigned char) *p)) { len = strtol(p, &q, 10); if (len == 0 || len > 100) { syntax("Bad number of measures", p); len = 1; } p = q; } s->u.bar.type = 0; s->u.bar.len = len; goto add_deco; case 'y': /* space (BarFly) */ s->abc_type = ABC_T_REST; s->flags |= ABC_F_INVIS; p++; if (isdigit((unsigned char) *p) /* number of points */ || *p == '-') { /* accept negative offset... */ s->u.note.notes[0].shhd = strtol(p, &q, 10); p = q; } else { s->u.note.notes[0].shhd = 10; // default } goto add_deco; case 'x': /* invisible rest */ s->flags |= ABC_F_INVIS; /* fall thru */ case 'z': s->abc_type = ABC_T_REST; p = parse_len(p + 1, ulen, &len); s->u.note.notes[0].len = len; goto do_brhythm; case '[': /* '[..]' = chord */ chord = 1; p++; break; } q = p; /* get pitch, length and possible accidental */ m = 0; nostem = 0; for (;;) { if (chord) { if (m >= MAXHD) { syntax("Too many notes in chord", p); m--; } n = 0; if (*p == '.') { n = SL_DOTTED; p++; } if (*p == '(') { p++; switch (*p) { case '\'': n += SL_ABOVE; p++; break; case ',': n += SL_BELOW; p++; break; default: n += SL_AUTO; break; } s->u.note.notes[m].sl1 = (s->u.note.notes[m].sl1 << 3) + n; } } p = parse_deco(p, &dc, m); /* note head decorations */ p = parse_acc_pit(p, &pit, &acc); if (*p == '0') { nostem = 1; p++; } p = parse_len(p, (flags & ABC_F_GRACE) ? BASE_LEN / 8 : // for grace note alone ulen, &len); s->u.note.notes[m].pit = pit; s->pits[m] = pit; s->u.note.notes[m].len = len; s->u.note.notes[m].acc = acc; s->u.note.notes[m].color = -1; if (chord) { for (;;) { if (*p == '.') { if (p[1] != '-') break; p++; } if (*p == '-') { switch (p[1]) { case '\'': s->u.note.notes[m].ti1 = SL_ABOVE; p++; break; case ',': s->u.note.notes[m].ti1 = SL_BELOW; p++; break; default: s->u.note.notes[m].ti1 = SL_AUTO; break; } } else if (*p == ')') { s->u.note.notes[m].sl2++; } else { break; } p++; } } if (acc >= 0) /* if no error */ m++; /* normal case */ if (!chord) break; if (*p == ']') { p++; if (*p == '0') { nostem = 1; p++; } if (*p == '/' || isdigit((unsigned char) *p)) { p = parse_len(p, ulen, &len); for (j = 0; j < m; j++) { s->u.note.notes[j].len = len * s->u.note.notes[j].len / ulen; } } break; } if (*p == '\0') { syntax("Chord not closed", q); break; } } if (nostem) s->flags |= ABC_F_STEMLESS; if (m == 0) /* if no note (or error) */ goto err; s->u.note.microscale = microscale; s->nhd = m - 1; do_brhythm: if (curvoice->last_note && curvoice->last_note->u.note.brhythm != 0) broken_rhythm(s, -curvoice->last_note->u.note.brhythm); add_deco: if (dc.n > 0) { memcpy(s->abc_type != ABC_T_MREST ? &s->u.note.dc : &s->u.bar.dc, &dc, sizeof dc); dc.n = 0; } /* forbid rests in grace note sequences */ if (s->abc_type != ABC_T_NOTE && (flags & ABC_F_GRACE)) { syntax("Not a note in grace note sequence", p); goto err; } return p; err: if ((parse.last_sym = s->abc_prev) == NULL) { parse.first_sym = NULL; } else { s->abc_prev->abc_next = NULL; s->abc_prev->flags |= (s->flags & ABC_F_ERROR); } return p; } /* -- parse an information field -- */ /* return 2 on start of new tune */ static int parse_info(char *p) { struct SYMBOL *s; char info_type = *p; char *error_txt = NULL; s = abc_new(ABC_T_INFO, p); p += 2; switch (info_type) { case 'd': case 's': if (parse.abc_state == ABC_S_GLOBAL) break; if (!deco_start) { error_txt = "Erroneous 'd:'/'s:'"; break; } error_txt = parse_decoline(p); break; case 'K': if (parse.abc_state == ABC_S_GLOBAL) break; parse_key(p, s); if (parse.abc_state == ABC_S_HEAD) { int i; parse.abc_state = ABC_S_TUNE; if (ulen == 0) ulen = BASE_LEN / 8; for (i = MAXVOICE; --i >= 0; ) voice_tb[i].ulen = ulen; lyric_started = 0; } break; case 'L': error_txt = get_len(p, s); if (s->u.length.base_length > 0) ulen = s->u.length.base_length; break; case 'M': error_txt = parse_meter(p, s); break; case 'Q': error_txt = parse_tempo(p, s); break; case 'U': error_txt = get_user(p, s); break; case 'V': if (parse.abc_state == ABC_S_GLOBAL) break; error_txt = parse_voice(p, s); break; case 'X': memset(voice_tb, 0, sizeof voice_tb); nvoice = 0; curvoice = voice_tb; parse.abc_state = ABC_S_HEAD; lvlarena(1); return 2; } if (error_txt) syntax(error_txt, p); return 0; } /* -- print a syntax error message -- */ static void syntax(char *msg, char *q) { int n, len, m1, m2, pp; int maxcol = 73; severity = 1; n = q - abc_line; len = strlen(abc_line); if ((unsigned) n > (unsigned) len) n = -1; print_error(msg, n); if (n < 0) { if (q && *q != '\0') fprintf(stderr, " (near '%s')\n", q); return; } m1 = 0; m2 = len; if (m2 > maxcol) { if (n < maxcol) { m2 = maxcol; } else { m1 = n - 20; m2 = m1 + maxcol; if (m2 > len) m2 = len; } } fprintf(stderr, "%4d ", linenum); pp = 6; if (m1 > 0) { fprintf(stderr, "..."); pp += 3; } fprintf(stderr, "%.*s", m2 - m1, &abc_line[m1]); if (m2 < len) fprintf(stderr, "..."); fprintf(stderr, "\n"); if ((unsigned) n < 200) fprintf(stderr, "%*s\n", n + pp - m1, "^"); if (last_sym) last_sym->flags |= ABC_F_ERROR; } /* -- switch to a new voice overlay -- */ static void vover_new(void) { int voice, mvoice; mvoice = curvoice->mvoice; for (voice = curvoice - voice_tb + 1; voice <= nvoice; voice++) if (voice_tb[voice].mvoice == mvoice) break; if (voice > nvoice) { if (nvoice >= MAXVOICE) { syntax("Too many voices", 0); return; } nvoice = voice; voice_tb[voice].id[0] = '&'; voice_tb[voice].mvoice = mvoice; } voice_tb[voice].ulen = curvoice->ulen; voice_tb[voice].microscale = curvoice->microscale; curvoice = &voice_tb[voice]; }
null
/* * Generic ABC parser. * * This file is part of abcm2ps. * * Copyright (C) 1998-2020 Jean-François Moine (http://moinejf.free.fr) * Adapted from abc2ps, Copyright (C) 1996-1998 Michael Methfessel * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. */ #include "config.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include "abcm2ps.h" /* global values */ int severity; /* error severity */ static int ulen; /* unit note length set by M: or L: */ static short meter; /* upper value of time sig for n-plets */ static unsigned char microscale; /* current microtone scale */ static signed char vover; /* voice overlay (1: single bar, -1: multi-bar */ static char lyric_started; /* lyric started */ static char *gchord; /* guitar chord */ static struct decos dc; /* decorations */ static struct SYMBOL *deco_start; /* 1st note of the line for d: / s: */ static struct SYMBOL *deco_cont; /* current symbol when d: / s: continuation */ static int g_abc_vers, g_ulen, g_microscale; static char g_char_tb[128]; static char *g_deco_tb[128]; /* global decoration names */ static unsigned short g_micro_tb[MAXMICRO]; /* global microtone values */ static char *abc_fn; /* current source file name */ static int linenum; /* current source line number */ static int colnum; /* current source column number */ static char *abc_line; /* line being parsed */ static struct SYMBOL *last_sym; /* last symbol for errors */ static short nvoice; /* number of voices (0..n-1) */ struct VOICE_S *curvoice; /* current voice while parsing */ struct parse parse; /* char table for note line parsing */ #define CHAR_BAD 0 #define CHAR_IGN 1 #define CHAR_NOTE 2 #define CHAR_GR_ST 3 #define CHAR_DECO 4 #define CHAR_GCHORD 5 #define CHAR_BSLASH 6 #define CHAR_OBRA 7 #define CHAR_BAR 8 #define CHAR_OPAR 9 #define CHAR_VOV 10 #define CHAR_SPAC 11 #define CHAR_MINUS 12 #define CHAR_CPAR 13 #define CHAR_BRHY 14 #define CHAR_DECOS 15 #define CHAR_SLASH 16 #define CHAR_GR_EN 17 #define CHAR_LINEBREAK 18 static char char_tb[256] = { 0, 0, 0, 0, 0, 0, 0, 0, /* 00 - 07 */ 0, CHAR_SPAC, CHAR_LINEBREAK, 0, 0, 0, 0, 0, /* 08 - 0f */ 0, 0, 0, 0, 0, 0, 0, 0, /* 10 - 17 */ 0, 0, 0, 0, 0, 0, 0, 0, /* 18 - 1f */ CHAR_SPAC, CHAR_DECOS, CHAR_GCHORD, CHAR_BAD, /* (sp) ! " # */ CHAR_BAD, CHAR_BAD, CHAR_VOV, CHAR_BAD, /* $ % & ' */ CHAR_OPAR, CHAR_CPAR, CHAR_BAD, CHAR_DECOS, /* ( ) * + */ CHAR_BAD, CHAR_MINUS, CHAR_DECO, CHAR_SLASH, /* , - . / */ CHAR_BAD, CHAR_BAD, CHAR_BAD, CHAR_BAD, /* 0 1 2 3 */ CHAR_BAD, CHAR_BAD, CHAR_BAD, CHAR_BAD, /* 4 5 6 7 */ CHAR_BAD, CHAR_BAD, CHAR_BAR, CHAR_BAD, /* 8 9 : ; */ CHAR_BRHY, CHAR_NOTE, CHAR_BRHY, CHAR_BAD, /* < = > ? */ CHAR_BAD, CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, /* @ A B C */ CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, /* D E F G */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* H I J K */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* L M N O */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* P Q R S */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* T U V W */ CHAR_NOTE, CHAR_DECO, CHAR_NOTE, CHAR_OBRA, /* X Y Z [ */ CHAR_BSLASH, CHAR_BAR, CHAR_NOTE, CHAR_NOTE, /* \ ] ^ _ */ CHAR_IGN, CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, /* ` a b c */ CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, /* d e f g */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* h i j k */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* l m n o */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* p q r s */ CHAR_DECO, CHAR_DECO, CHAR_DECO, CHAR_DECO, /* t u v w */ CHAR_NOTE, CHAR_NOTE, CHAR_NOTE, CHAR_GR_ST, /* x y z { */ CHAR_BAR, CHAR_GR_EN, CHAR_DECO, CHAR_BAD, /* | } ~ (del) */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 80 - 8f */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 90 - 9f */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* a0 - af */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* b0 - bf */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* c0 - cf */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* d0 - df */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* e0 - ef */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* f0 - ff */ }; static const char all_notes[] = "CDEFGABcdefgab"; static int parse_info(char *p); static char *parse_gchord(char *p); static int parse_line(char *p); static char *parse_note(char *p, int flags); static void syntax(char *msg, char *q); static void vover_new(void); /* -- abcMIDI like errors -- */ static void print_error(char *s, int col) { if (col >= 0) fprintf(stderr, "%s:%d:%d: error: %s\n", abc_fn, linenum, col, s); else fprintf(stderr, "%s:%d: error: %s\n", abc_fn, linenum, s); } /* -- new symbol -- */ static struct SYMBOL *abc_new(int type, char *text) { struct SYMBOL *s; s = getarena(sizeof(struct SYMBOL)); memset(s, 0, sizeof(struct SYMBOL)); if (text) { s->text = getarena(strlen(text) + 1); strcpy(s->text, text); } if (!parse.last_sym) { parse.first_sym = s; } else { if ((s->abc_next = parse.last_sym->abc_next) != NULL) s->abc_next->abc_prev = s; parse.last_sym->abc_next = s; s->abc_prev = parse.last_sym; } last_sym = parse.last_sym = s; s->abc_type = type; s->state = parse.abc_state; s->fn = abc_fn; s->linenum = linenum; s->colnum = colnum; return s; } /* -- parse an ABC line -- */ void abc_parse(char *p, char *fname, int ln) { abc_fn = fname; linenum = ln; abc_line = p; /* parse the music line */ switch (parse_line(p)) { case 2: /* start of tune (X:) */ g_abc_vers = parse.abc_vers; g_ulen = ulen; g_microscale = microscale; meter = 2; memcpy(g_char_tb, char_tb, sizeof g_char_tb); memcpy(g_deco_tb, parse.deco_tb, sizeof g_deco_tb); memcpy(g_micro_tb, parse.micro_tb, sizeof g_micro_tb); break; case 1: /* end of tune */ if (parse.first_sym) { do_tune(); parse.first_sym = parse.last_sym = NULL; } parse.abc_state = ABC_S_GLOBAL; parse.abc_vers = g_abc_vers; ulen = g_ulen; microscale = g_microscale; memcpy(char_tb, g_char_tb, sizeof g_char_tb); memcpy(parse.deco_tb, g_deco_tb, sizeof parse.deco_tb); memcpy(parse.micro_tb, g_micro_tb, sizeof parse.micro_tb); lvlarena(0); if (dc.n > 0) syntax("Decoration without symbol", 0); dc.n = 0; break; } } /* treat the end of file */ void abc_eof(void) { // if (parse.abc_state == ABC_S_HEAD) // severity = 1; do_tune(); parse.first_sym = parse.last_sym = NULL; if (parse.abc_state != ABC_S_GLOBAL) { parse.abc_vers = g_abc_vers; ulen = g_ulen; microscale = g_microscale; memcpy(char_tb, g_char_tb, sizeof g_char_tb); } } /* -- treat the broken rhythm '>' and '<' -- */ static void broken_rhythm(struct SYMBOL *s, int num) /* >0: do dot, <0: do half */ { struct notes *notes = &s->u.note; int l, m, n; num *= 2; if (num > 0) { if (num == 6) num = 8; n = num * 2 - 1; for (m = 0; m <= s->nhd; m++) notes->notes[m].len = (notes->notes[m].len * n) / num; } else { n = -num; if (n == 6) n = 8; for (m = 0; m <= s->nhd; m++) notes->notes[m].len /= n; } l = notes->notes[0].len; for (m = 1; m <= s->nhd; m++) if (notes->notes[m].len < l) l = notes->notes[m].len; } /* -- check for the '!' as end of line (ABC2Win) -- */ static int check_nl(char *p) { while (*p != '\0') { switch (*p++) { case '!': return 0; case '|': case '[': case ':': case ']': case ' ': case '\t': return 1; } } return 1; } /* -- parse extra K: or V: definitions (clef, octave and microscale -- */ static char *parse_extra(char *p, char **p_name, char **p_middle, char **p_stlines, char **p_scale, char **p_octave, char **p_cue, char **p_map) { for (;;) { if (strncmp(p, "clef=", 5) == 0 || strncmp(p, "bass", 4) == 0 || strncmp(p, "treble", 6) == 0 || strncmp(p, "alto", 4) == 0 || strncmp(p, "tenor", 5) == 0 || strncmp(p, "perc", 4) == 0) { if (*p_name) syntax("Double clef name", p); *p_name = p; } else if (strncmp(p, "microscale=", 11) == 0 || strncmp(p, "uscale=", 7) == 0) { int i; p += p[0] == 'm' ? 11 : 7; i = atoi(p); if (i < 4 || i >= 256) syntax("Invalid value in microscale=", p); else microscale = i; } else if (strncmp(p, "middle=", 7) == 0 || strncmp(p, "m=", 2) == 0) { if (*p_middle) syntax("Double clef middle", p); *p_middle = p + (p[1] == '=' ? 2 : 7); } else if (strncmp(p, "octave=", 7) == 0) { if (*p_octave) syntax("Double octave=", p); *p_octave = p + 7; } else if (strncmp(p, "stafflines=", 11) == 0) { int l; char *q; if (*p_stlines) syntax("Double stafflines", p); p += 11; if (isdigit((unsigned char) *p)) { switch (atoi(p)) { case 0: *p_stlines = "..."; break; case 1: *p_stlines = "..|"; break; case 2: *p_stlines = ".||"; break; case 3: *p_stlines = ".|||"; break; case 4: *p_stlines = "||||"; break; case 5: *p_stlines = "|||||"; break; case 6: *p_stlines = "||||||"; break; case 7: *p_stlines = "|||||||"; break; case 8: *p_stlines = "||||||||"; break; default: syntax("Bad number of lines", p); break; } } else { q = p; while (!isspace((unsigned char) *p) && *p != '\0') p++; l = p - q; *p_stlines = getarena(l + 1); strncpy(*p_stlines, q, l); (*p_stlines)[l] = '\0'; } } else if (strncmp(p, "staffscale=", 11) == 0) { if (*p_scale) syntax("Double staffscale", p); *p_scale = p + 11; } else if (strncmp(p, "cue=", 4) == 0) { if (*p_cue) syntax("Double cue", p); *p_cue = p + 4; } else if (strncmp(p, "map=", 4) == 0) { if (*p_map) syntax("Double map", p); *p_map = p + 4; // } else if (strncmp(p, "transpose=", 10) == 0 // || strncmp(p, "t=", 2) == 0) { // ; /* ignored - abcMIDI */ } else { break; } while (!isspace((unsigned char) *p) && *p != '\0') p++; while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; } return p; } /* -- parse a decoration 'xxx<decosep>' -- */ static char *get_deco(char *p, unsigned char *p_dc) { char *q, sep, **t; unsigned i, l; *p_dc = 0; q = p; sep = q[-1]; if (char_tb[(unsigned char) sep] == CHAR_DECOS) { if (sep == '+') { if (*p == '+' && p[1] == '+') p++; /* special case "+++" */ } } else { sep = '\0'; /* Barfly U: */ } while (*p != sep) { if (*p == '\0') { syntax("Decoration not terminated", q); return p; } p++; } l = p - q; if (*p == sep) p++; for (i = 1, t = &parse.deco_tb[1]; *t && i < DC_NAME_SZ; i++, t++) { if (strlen(*t) == l && strncmp(*t, q, l) == 0) { *p_dc = i + 128; return p; } } /* new decoration */ if (i < DC_NAME_SZ) { // if (parse.abc_state != ABC_S_GLOBAL) // lvlarena(0); *t = getarena(l + 1); // if (parse.abc_state != ABC_S_GLOBAL) // lvlarena(1); memcpy(*t, q, l); (*t)[l] = '\0'; *p_dc = i + 128; } else { syntax("Too many decoration types", q); } return p; } /* -- parse a list of accidentals (K:) -- */ static char *parse_acc(char *p, struct SYMBOL *s) { int pit = 0, acc; unsigned nacc; nacc = 0; for (;;) { if (nacc >= sizeof s->u.key.pits) { syntax("Too many accidentals", p); break; } p = parse_acc_pit(p, &pit, &acc); if (acc < 0) break; s->u.key.pits[nacc] = pit; s->u.key.accs[nacc++] = acc; while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; if (*p != '^' && *p != '_' && *p != '=') break; } s->u.key.microscale = microscale; if (s->u.key.empty != 2) s->u.key.nacc = nacc; return p; } /* -- parse a clef (K: or V:) -- */ static void parse_clef(struct SYMBOL *s, char *name, char *middle) { int clef = -1; int transpose = 0; int clef_line = 2; char *warn = NULL; char str[80]; str[0] = '\0'; if (name && strncmp(name, "clef=", 5) == 0) { name += 5; switch (*name) { case '\"': name = get_str(str, name, sizeof str); s->u.clef.name = getarena(strlen(str) + 1); strcpy(s->u.clef.name, str); clef = TREBLE; break; case 'g': warn = name; transpose = -7; case 'G': clef = TREBLE; break; case 'f': warn = name; transpose = -14; clef = BASS; clef_line = 4; break; case 'F': if (name[1] == ',') /* abc2.1.1 clef=F == clef=F, */ transpose = -7; clef = BASS; clef_line = 4; break; case 'c': warn = name; transpose = -7; case 'C': clef = ALTO; clef_line = 3; break; case 'P': clef = PERC; clef_line = 3; break; } if (clef >= 0) { name++; if (*name == ',' || *name== '\'') warn = name; while (*name == ',') { transpose += 7; name++; } while (*name == '\'') { transpose -= 7; name++; } } } if (name && clef < 0) { if (!strncmp(name, "bass", 4)) { clef = BASS; clef_line = 4; s->u.clef.check_pitch = 1; name += 4; } else if (!strncmp(name, "treble", 6)) { clef = TREBLE; name += 6; } else if (!strncmp(name, "alto", 4) || !strncmp(name, "tenor", 5)) { clef = ALTO; clef_line = *name == 'a' ? 3 : 4; s->u.clef.check_pitch = 1; if (*name == 'a') name += 4; else name += 5; } else if (!strncmp(name, "perc", 4)) { clef = PERC; clef_line = 3; name += 4; } else if (!strncmp(name, "auto", 4)) { clef = AUTOCLEF; name += 4; } else if (strncmp(name, "none", 4) == 0) { clef = TREBLE; s->u.clef.invis = 1; s->flags |= ABC_F_INVIS; name += 4; } else { syntax("Unknown clef", name); clef = TREBLE; } } if (clef >= 0) { if (isdigit((unsigned char) *name)) clef_line = *name++ - '0'; if (name[1] == '8') { switch (*name) { case '^': transpose -= 7; case '+': s->u.clef.octave = 1; break; case '_': transpose += 7; case '-': s->u.clef.octave = -1; break; } } } if (middle) { int pit = 0, acc, l; static const char line_tb[7] = {ALTO, TREBLE, ALTO, BASS, ALTO, BASS, ALTO}; warn = middle; /* 'middle=<note pitch>' */ parse_acc_pit(middle, &pit, &acc); if (acc < 0) // if error pit = 22; if (clef < 0) clef = line_tb[(pit + 7) % 7]; switch (clef) { default: l = 20 + 4; break; case ALTO: l = 16 + 4; break; case BASS: l = 12 + 4; break; } clef_line = (l - pit + 28) % 7; if (clef_line & 1) { syntax("Bad 'middle' value for the clef", middle); pit++; } clef_line = clef_line / 2 + 1; transpose = l - (clef_line - 1) * 2 - pit; s->u.clef.check_pitch = 0; } s->u.clef.type = clef; s->u.clef.line = clef_line; s->u.clef.transpose = transpose; if (warn) { int sev_sav; sev_sav = severity; syntax("Warning: Deprecated or non-standard item", warn); severity = sev_sav; } } /* get the octave= value */ static int parse_octave(char *p) { int oct; if (p) { oct = 1; if (*p == '-') { oct = -1; p++; } if (*p >= '0' && *p <= '4') return oct * (*p - '0'); syntax("Bad octave value", p); } return NO_OCTAVE; } /* -- parse a 'K:' -- */ static void parse_key(char *p, struct SYMBOL *s) { int sf, empty, instr; // int mode; char *clef_name, *clef_middle, *clef_stlines, *clef_scale; char *p_octave, *p_cue, *p_map; // set important default values // s->u.key.stafflines = "|||||"; s->u.key.octave = NO_OCTAVE; if (*p == '\0') { s->u.key.empty = 1; return; } sf = 0; // mode = 0; empty = 0; instr = 0; switch (*p++) { case 'F': sf = -1; break; case 'B': sf++; case 'E': sf++; case 'A': sf++; case 'D': sf++; case 'G': sf++; case 'C': break; case 'H': if (*p == 'P') { instr = K_HP; p++; } else if (*p == 'p') { instr = K_Hp; sf = 2; p++; } else { syntax("Unknown bagpipe-like key", p); } break; case 'P': instr = K_DRUM; p++; break; case 'n': if (strncmp(p, "one", 3) == 0) { // none empty = 2; p += 3; while (isspace((unsigned char) *p)) p++; if (*p == '\0') { s->u.key.empty = empty; return; } break; } // fall thru default: p--; empty = 1; break; } s->u.key.empty = empty; if (!empty) { if (*p == '#') { sf += 7; p++; } else if (*p == 'b') { sf -= 7; p++; } while (isspace((unsigned char) *p)) p++; switch (*p) { case 'a': case 'A': if (strncasecmp(p, "aeo", 3) == 0) { sf -= 3; // mode = 5; break; } goto unk; case 'd': case 'D': if (strncasecmp(p, "dor", 3) == 0) { sf -= 2; // mode = 1; break; } goto unk; case 'i': case 'I': if (strncasecmp(p, "ion", 3) == 0) { // mode = 0; break; } goto unk; case 'l': case 'L': if (strncasecmp(p, "loc", 3) == 0) { sf -= 5; // mode = 6; break; } if (strncasecmp(p, "lyd", 3) == 0) { sf += 1; // mode = 3; break; } goto unk; case 'm': case 'M': if (strncasecmp(p, "maj", 3) == 0) break; if (strncasecmp(p, "mix", 3) == 0) { sf -= 1; // mode = 4; break; } if (strncasecmp(p, "min", 3) == 0 || !isalpha((unsigned char) p[1])) { /* 'm' alone */ sf -= 3; // mode = 5; break; } goto unk; case 'p': case 'P': if (strncasecmp(p, "phr", 3) == 0) { sf -= 4; // mode = 2; break; } goto unk; default: unk: empty = 1; // (local value) break; } if (!empty) { while (isalpha((unsigned char) *p)) p++; while (isspace((unsigned char) *p)) p++; } // [exp] accidentals if (strncmp(p, "exp ", 4) == 0) { p += 4; while (isspace((unsigned char) *p)) p++; if (*p == '\0') syntax("no accidental after 'exp'", p); s->u.key.exp = 1; } if (s->u.key.exp && strncmp(p, "none", 4) == 0) { sf = 0; p += 4; while (isspace((unsigned char) *p)) p++; } else switch (*p) { case '^': case '_': case '=': p = parse_acc(p, s); /* accidentals */ break; } } if (sf > 7 || sf < -7) { syntax("Too many sharps/flats", p); if (sf > 0) sf -= 12; else sf += 12; } // extra parameters clef_name = clef_middle = clef_stlines = clef_scale = NULL; p_octave = p_cue = p_map = NULL; parse_extra(p, &clef_name, &clef_middle, &clef_stlines, &clef_scale, &p_octave, &p_cue, &p_map); s->u.key.sf = sf; // s->u.key.mode = mode; s->u.key.instr = instr; s->u.key.octave = parse_octave(p_octave); if (p_cue) { if (strncmp(p_cue, "on", 2) == 0) s->u.key.cue = 1; else s->u.key.cue = -1; } if (clef_stlines) s->u.key.stafflines = clef_stlines; if (clef_scale) { float sc; sc = atof(clef_scale); if (sc >= 0.5 && sc <= 3) s->u.key.staffscale = sc; else syntax("Bad value of staffscale", clef_scale); } if (clef_name || clef_middle) { s = abc_new(ABC_T_CLEF, NULL); parse_clef(s, clef_name, clef_middle); } if (p_map) { strcpy(tex_buf, "%%voicemap "); get_str(&tex_buf[11], p_map, TEX_BUF_SZ - 12); abc_new(ABC_T_PSCOM, tex_buf); } } /* -- set default length from 'L:' -- */ static char *get_len(char *p, struct SYMBOL *s) { int l1, l2, d; char *error_txt = NULL; if (strcmp(p, "auto") == 0) { /* L:auto */ ulen = 15120; // 2*2*2*2*3*3*3*5*7 s->u.length.base_length = -1; return error_txt; } l1 = 0; l2 = 1; if (sscanf(p, "%d /%d ", &l1, &l2) != 2 || l1 == 0) { s->u.length.base_length = ulen ? ulen : BASE_LEN / 8; return "Bad unit note length: unchanged"; } if (l2 == 0) { error_txt = "Bad length divisor, set to 4"; l2 = 4; } d = BASE_LEN / l2; if (d * l2 != BASE_LEN) { error_txt = "Length incompatible with BASE, using 1/8"; d = BASE_LEN / 8; } else { d *= l1; if (l1 != 1 || (l2 & (l2 - 1))) { error_txt = "Incorrect unit note length, using 1/8"; d = BASE_LEN / 8; } } s->u.length.base_length = d; return error_txt; } /* -- parse a 'M:' -- */ static char *parse_meter(char *p, struct SYMBOL *s) { int m1, m2, d, wmeasure, nm, in_parenth; unsigned i; char *q; static char top_err[] = "Cannot identify meter top"; if (*p == '\0') return "Empty meter string"; nm = 0; in_parenth = 0; m1 = 0; if (strncmp(p, "none", 4) == 0) { p += 4; /* no meter */ wmeasure = 1; /* simplify measure numbering and MREST conversion */ } else { wmeasure = 0; while (*p != '\0') { if (*p == '=') break; if (nm >= MAX_MEASURE) return "Too many values in M:"; switch (*p) { case 'C': s->u.meter.meter[nm].top[0] = *p++; if (*p == '|') s->u.meter.meter[nm].top[1] = *p++; m1 = 4; m2 = 4; break; case 'c': case 'o': if (*p == 'c') m1 = 4; else m1 = 3; m2 = 4; s->u.meter.meter[nm].top[0] = *p++; if (*p == '.') s->u.meter.meter[nm].top[1] = *p++; break; case '(': if (p[1] == '(') { /* "M:5/4 ((2+3)/4)" */ in_parenth = 1; s->u.meter.meter[nm++].top[0] = *p++; } q = p + 1; while (*q != '\0') { if (*q == ')' || *q == '/') break; q++; } if (*q == ')' && q[1] == '/') { /* "M:5/4 (2+3)/4" */ p++; /* remove the parenthesis */ continue; } /* "M:5 (2+3)" */ /* fall thru */ case ')': in_parenth = *p == '('; s->u.meter.meter[nm++].top[0] = *p++; continue; default: if (sscanf(p, "%d", &m1) != 1 || m1 <= 0) return top_err; i = 0; m2 = 2; /* default when no bottom value */ for (;;) { while (isdigit((unsigned char) *p) && i < sizeof s->u.meter.meter[0].top) s->u.meter.meter[nm].top[i++] = *p++; if (*p == ')') { if (p[1] != '/') break; p++; } if (*p == '/') { p++; if (sscanf(p, "%d", &m2) != 1 || m2 <= 0) return "Cannot identify meter bottom"; i = 0; while (isdigit((unsigned char) *p) && i < sizeof s->u.meter.meter[0].bot) s->u.meter.meter[nm].bot[i++] = *p++; break; } if (*p != ' ' && *p != '+') break; if (*p == '\0' || p[1] == '(') /* "M:5 (2/4+3/4)" */ break; if (i < sizeof s->u.meter.meter[0].top) s->u.meter.meter[nm].top[i++] = *p++; if (sscanf(p, "%d", &d) != 1 || d <= 0) return top_err; if (p[-1] == ' ') { if (d > m1) m1 = d; } else { m1 += d; } } break; } if (!in_parenth) wmeasure += m1 * BASE_LEN / m2; nm++; if (*p == ' ') p++; else if (*p == '+') s->u.meter.meter[nm++].top[0] = *p++; } } meter = m1; if (*p == '=') { if (sscanf(++p, "%d/%d", &m1, &m2) != 2 || m1 <= 0 || m2 <= 0) return "Cannot identify meter explicit duration"; wmeasure = m1 * BASE_LEN / m2; s->u.meter.expdur = 1; } s->u.meter.wmeasure = wmeasure; s->u.meter.nmeter = nm; /* in the tune header, change the unit note length */ if (parse.abc_state == ABC_S_HEAD && ulen == 0) { if (wmeasure >= BASE_LEN * 3 / 4 || wmeasure <= 1) ulen = BASE_LEN / 8; else ulen = BASE_LEN / 16; } return 0; } /* -- get a possibly quoted string -- */ char *get_str(char *d, /* destination */ char *s, /* source */ int maxlen) /* max length */ { char c; maxlen--; /* have place for the EOS */ while (isspace((unsigned char) *s)) s++; if (*s == '"') { s++; while ((c = *s) != '\0') { if (c == '"') { s++; break; } if (c == '\\') { if (--maxlen > 0) *d++ = c; c = *++s; } if (--maxlen > 0) *d++ = c; s++; } } else { while ((c = *s) != '\0') { if (isspace((unsigned char) c)) break; if (--maxlen > 0) *d++ = c; s++; } } *d = '\0'; while (isspace((unsigned char) *s)) s++; return s; } /* -- parse a tempo (Q:) -- */ static char *parse_tempo(char *p, struct SYMBOL *s) { char c, str[80]; int i, l, n, top, bot; /* string before */ if (*p == '"') { p = get_str(str, p, sizeof str); s->u.tempo.str1 = getarena(strlen(str) + 1); strcpy(s->u.tempo.str1, str); } /* beat */ if (*p == 'C' || *p == 'c' || *p == 'L' || *p == 'l') { s->u.tempo.beats[0] = ulen; if (parse.abc_vers >= (2 << 16)) syntax("Deprecated Q: value", p); p++; while (isspace((unsigned char) *p)) p++; if (*p != '=') goto inval; c = '='; p--; } else if (isdigit((unsigned char) *p)) { if (strchr(p, '/') != NULL) { i = 0; while (isdigit((unsigned char) *p)) { if (sscanf(p, "%d/%d%n", &top, &bot, &n) != 2 || bot <= 0) goto inval; l = (BASE_LEN * top) / bot; if (l <= 0 || i >= sizeof s->u.tempo.beats / sizeof s->u.tempo.beats[0]) goto inval; s->u.tempo.beats[i++] = l; p += n; while (isspace((unsigned char) *p)) p++; } c = *p; if (c != '=') goto inval; } else { s->u.tempo.beats[0] = ulen; if (parse.abc_vers >= (2 << 16)) syntax("Deprecated Q: value", p); c = '='; p--; } } else { c = '\0'; } /* tempo value */ if (c == '=') { p++; if (strncmp(p, "ca. ", 4) == 0) { s->u.tempo.circa = 1; p += 4; } if (sscanf(p, "%d/%d%n", &top, &bot, &n) == 2) { if (bot <= 0) goto inval; l = (BASE_LEN * top) / bot; if (l <= 0) goto inval; s->u.tempo.new_beat = l; } else { if (sscanf(p, "%d%n", &top, &n) != 1) goto inval; s->u.tempo.tempo = top; } p += n; while (isspace((unsigned char) *p)) p++; } /* string after */ if (*p == '"') { p = get_str(str, p, sizeof str); s->u.tempo.str2 = getarena(strlen(str) + 1); strcpy(s->u.tempo.str2, str); } return 0; inval: return "Invalid tempo"; } /* -- get a user defined symbol (U:) -- */ static char *get_user(char *p, struct SYMBOL *s) { unsigned char c; char *value; c = (unsigned char) *p++; if (c == '\\') { c = (unsigned char) *p++; switch (c) { case 'n': c = '\n'; break; case 't': c = '\t'; break; } } switch (char_tb[c]) { default: return "Bad decoration character"; case CHAR_DECO: break; case CHAR_BAD: case CHAR_IGN: case CHAR_SPAC: case CHAR_DECOS: case CHAR_LINEBREAK: char_tb[c] = CHAR_DECO; break; } s->u.user.symbol = c; /* skip '=' */ while (isspace((unsigned char) *p) || *p == '=') p++; if (char_tb[(unsigned char) *p] == CHAR_DECOS) p++; /*fixme: 'U: <char> = "text"' is not treated */ get_deco(p, &s->u.user.value); if (!s->u.user.value) return 0; /* treat special pseudo decorations */ value = parse.deco_tb[s->u.user.value - 128]; if (strcmp(value, "beambreak") == 0) char_tb[c] = CHAR_SPAC; else if (strcmp(value, "ignore") == 0) char_tb[c] = CHAR_IGN; else if (strcmp(value, "nil") == 0 || strcmp(value, "none") == 0) char_tb[c] = CHAR_BAD; else return 0; s->u.user.value = 0; /* not a decoration */ return 0; } /* -- parse the voice parameters (V:) -- */ static char *parse_voice(char *p, struct SYMBOL *s) { int voice; char *error_txt = NULL; char *clef_name, *clef_middle, *clef_stlines, *clef_scale; char *p_octave, *p_cue, *p_map; signed char *p_stem; static struct kw_s { char *name; short len; short index; } kw_tb[] = { {"name=", 5, 0}, {"nm=", 3, 0}, {"subname=", 8, 1}, {"sname=", 6, 1}, {"snm=", 4, 1}, {"merge", 5, 2}, {"up", 2, 3}, {"down", 4, 4}, {"stem=", 5, 5}, {"gstem=", 6, 6}, {"auto", 4, 7}, {"dyn=", 4, 8}, {"lyrics=", 7, 9}, {"scale=", 6, 10}, {"gchord=", 7, 11}, {0} }; struct kw_s *kw; /* save the parameters of the previous voice */ curvoice->ulen = ulen; curvoice->microscale = microscale; if (voice_tb[0].id[0] == '\0') { switch (s->abc_prev->abc_type) { case ABC_T_EOLN: case ABC_T_NOTE: case ABC_T_REST: case ABC_T_BAR: /* the previous voice was implicit (after K:) */ voice_tb[0].id[0] = '1'; break; } } { char *id, sep; id = p; while (isalnum((unsigned char) *p) || *p == '_') p++; sep = *p; *p = '\0'; if (voice_tb[0].id[0] == '\0') { voice = 0; /* first voice */ } else { for (voice = 0; voice <= nvoice; voice++) { if (strcmp(id, voice_tb[voice].id) == 0) goto found; } if (voice >= MAXVOICE) { syntax("Too many voices", id); voice--; } } nvoice = voice; strncpy(voice_tb[voice].id, id, sizeof voice_tb[voice].id - 1); voice_tb[voice].mvoice = voice; found: strcpy(s->u.voice.id, voice_tb[voice].id); *p = sep; } curvoice = &voice_tb[voice]; s->u.voice.voice = voice; /* if in tune, set the voice parameters */ if (parse.abc_state == ABC_S_TUNE) { ulen = curvoice->ulen; microscale = curvoice->microscale; } /* parse the other parameters */ clef_name = clef_middle = clef_stlines = clef_scale = NULL; p_octave = p_cue = p_map = NULL; p_stem = &s->u.voice.stem; for (;;) { while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; p = parse_extra(p, &clef_name, &clef_middle, &clef_stlines, &clef_scale, &p_octave, &p_cue, &p_map); if (*p == '\0') break; for (kw = kw_tb; kw->name; kw++) { if (strncmp(p, kw->name, kw->len) == 0) break; } if (!kw->name) { while (!isspace((unsigned char) *p) && *p != '\0') p++; /* ignore unknown keywords */ continue; } p += kw->len; switch (kw->index) { case 0: /* name */ p = get_str(tex_buf, p, TEX_BUF_SZ); s->u.voice.fname = getarena(strlen(tex_buf) + 1); strcpy(s->u.voice.fname, tex_buf); break; case 1: /* subname */ p = get_str(tex_buf, p, TEX_BUF_SZ); s->u.voice.nname = getarena(strlen(tex_buf) + 1); strcpy(s->u.voice.nname, tex_buf); break; case 2: /* merge */ s->u.voice.merge = 1; break; case 3: /* up */ *p_stem = 1; break; case 4: /* down */ *p_stem = -1; break; case 5: /* stem= */ p_stem = &s->u.voice.stem; break; case 6: /* gstem= */ p_stem = &s->u.voice.gstem; break; case 7: /* auto */ *p_stem = 2; break; case 8: /* dyn= */ p_stem = &s->u.voice.dyn; break; case 9: /* lyrics= */ p_stem = &s->u.voice.lyrics; break; case 10: { /* scale= */ float sc; sc = atof(p); if (sc >= 0.5 && sc <= 2) s->u.voice.scale = sc; else error_txt = "Bad value for voice scale"; while (!isspace((unsigned char) *p) && *p != '\0') p++; break; } case 11: /* gchord= */ p_stem = &s->u.voice.gchord; break; } } s->u.voice.octave = parse_octave(p_octave); if (p_cue) { if (strncmp(p_cue, "on", 2) == 0) s->u.voice.cue = 1; else s->u.voice.cue = -1; } if (clef_stlines) s->u.voice.stafflines = clef_stlines; // else // s->u.voice.stafflines = "|||||"; if (clef_scale) { float sc; sc = atof(clef_scale); if (sc >= 0.5 && sc <= 3) s->u.voice.staffscale = sc; else syntax("Bad value of staffscale", clef_scale); } if (clef_name || clef_middle) { s = abc_new(ABC_T_CLEF, NULL); parse_clef(s, clef_name, clef_middle); } if (p_map) { strcpy(tex_buf, "%%voicemap "); get_str(&tex_buf[11], p_map, TEX_BUF_SZ - 12); abc_new(ABC_T_PSCOM, tex_buf); } return error_txt; } /* -- parse a bar -- */ static char *parse_bar(char *p) { struct SYMBOL *s; char *q; int bar_type, i; char repeat_value[32]; q = --p; // keep the first char bar_type = 0; for (;;) { switch (*p++) { case '|': bar_type <<= 4; bar_type |= B_BAR; continue; case '[': bar_type <<= 4; bar_type |= B_OBRA; continue; case ']': bar_type <<= 4; bar_type |= B_CBRA; continue; case ':': bar_type <<= 4; bar_type |= B_COL; continue; default: break; } break; } p--; /* if the last element is '[', it may start * a chord, an embedded header or an other bar */ if ((bar_type & 0x0f) == B_OBRA && bar_type != B_OBRA && *p != ' ') { bar_type >>= 4; p--; } if (bar_type == (B_OBRA << 8) + (B_BAR << 4) + B_CBRA) /* [|] */ bar_type = (B_OBRA << 4) + B_CBRA; /* [] */ /* curvoice->last_note = NULL; */ if (vover > 0) { curvoice = &voice_tb[curvoice->mvoice]; vover = 0; } s = abc_new(ABC_T_BAR, gchord); if (gchord) gchord = NULL; /* handle the repeat sequences */ if (bar_type == B_COL) { bar_type = B_BAR; s->u.bar.dotted = 1; } else { if (*q == ']') { /* repeat bar stop */ i = p - q - 1; if (i > 0) /* remove the starting ']' */ s->u.bar.type &= (1 << (i * 4)) - 1; s->flags |= ABC_F_RBSTOP; s->sflags |= S_RBSTOP; } else if ((bar_type & 0x0f) == B_COL /* left or */ || *q == ':') { /* right repeat bar */ s->flags |= ABC_F_RBSTOP; s->sflags |= S_RBSTOP; if (*q == ':') /* right repeat bar */ s->sflags |= S_RRBAR; } } s->u.bar.type = bar_type; if (dc.n > 0) { memcpy(&s->u.bar.dc, &dc, sizeof s->u.bar.dc); dc.n = 0; } if (!lyric_started) { lyric_started = 1; s->flags |= ABC_F_LYRIC_START; } if (!isdigit((unsigned char) *p) /* if not a repeat bar */ && (*p != '"' || p[-1] != '[')) /* ('["' only) */ return p; if (*p == '"') { p = get_str(repeat_value, p, sizeof repeat_value); } else { char *q; q = repeat_value; while (isdigit((unsigned char) *p) || *p == ',' || *p == '-' || (*p == '.' && isdigit((unsigned char) p[1]))) { if (q < &repeat_value[sizeof repeat_value - 1]) *q++ = *p++; else p++; } *q = '\0'; } if (bar_type != B_OBRA || s->text) { s = abc_new(ABC_T_BAR, repeat_value); s->u.bar.type = B_OBRA; } else { s->text = getarena(strlen(repeat_value) + 1); strcpy(s->text, repeat_value); } s->u.bar.repeat_bar = 1; s->flags |= ABC_F_RBSTART | ABC_F_RBSTOP; s->sflags |= S_RBSTART | S_RBSTOP; return p; } // parse the note accidental and pitch char *parse_acc_pit(char *p, int *pit, int *acc) { /* look for accidental sign */ switch (*p) { case '^': p++; if (*p == '^') { p++; *acc = A_DS; } else { *acc = A_SH; } break; case '=': p++; *acc = A_NT; break; case '_': p++; if (*p == '_') { p++; *acc = A_DF; } else { *acc = A_FT; } break; default: *acc = 0; } /* look for microtone value */ if (*acc != 0 && (isdigit((unsigned char) *p) || (*p == '/' && microscale == 0))) { int n, d; char *q; n = d = 1; if (*p != '/') { n = strtol(p, &q, 10); p = q; } if (*p == '/') { p++; if (!isdigit((unsigned char) *p)) { d = 2; } else { d = strtol(p, &q, 10); p = q; } } if (microscale == 0) { d--; d += (n - 1) << 8; /* short [ (n-1) | (d-1) ] */ if (d == 0) { n = MAXMICRO - 1; } else { for (n = 1; n < MAXMICRO; n++) { if (parse.micro_tb[n] == d) break; if (parse.micro_tb[n] == 0) { parse.micro_tb[n] = d; break; } } } if (n == MAXMICRO) { syntax("Too many microtone accidentals", p); n = 0; } } *acc += (n << 3); } /* get the pitch */ { char *p_n; p_n = strchr(all_notes, *p); if (!p_n || *p == '\0') { syntax(*acc ? "Missing note after accidental" : "Not a note", p); *acc = -1; if (*p == '\0') p--; } else { *pit = p_n - all_notes + 16; } p++; } while (*p == '\'') { /* eat up following ' chars */ *pit += 7; p++; } while (*p == ',') { /* eat up following , chars */ *pit -= 7; p++; } return p; } /* -- parse the decorations of notes and bars -- */ static char *parse_deco(char *p, struct decos *deco, int m) /* note index / -1 */ { int n; unsigned char t; n = deco->n; for (;;) { t = (unsigned char) *p++; if (char_tb[t] != CHAR_DECO && char_tb[t] != CHAR_DECOS) break; if (char_tb[t] == CHAR_DECOS) p = get_deco(p, &t); if (n >= MAXDC) { syntax("Too many decorations for the note", p); } else if (t != 0) { deco->tm[n].t = t; deco->tm[n++].m = m; } } deco->n = n; return p - 1; } /* -- parse a decoration line (d: or s:) -- */ static char *parse_decoline(char *p) { struct SYMBOL *is; unsigned char t; int n; if ((is = deco_cont) == NULL) is = deco_start; else deco_cont = NULL; /* scan the decoration line */ while (*p != '\0') { while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; switch (*p) { case '|': while (is && (is->abc_type != ABC_T_BAR || is->u.bar.type == B_OBRA)) is = is->abc_next; if (!is) { syntax("Not enough bar lines for deco line", p); return NULL; } is = is->abc_next; p++; continue; case '*': while (is && is->abc_type != ABC_T_NOTE) is = is->abc_next; if (!is) { syntax("Not enough notes for deco line", p); return NULL; } is = is->abc_next; p++; continue; case '\\': if (p[1] == '\0') { if (!is) return "Not enough notes for deco line"; deco_cont = is; return NULL; } syntax("'\\' ignored", p); p++; continue; case '"': p = parse_gchord(p + 1); break; default: if (char_tb[(unsigned char) *p] == CHAR_DECOS) p = get_deco(p + 1, &t); else t = (unsigned char) *p++; break; } /* store the decoration and gchord/annotation in the next note */ while (is && (is->abc_type != ABC_T_NOTE || (is->flags & ABC_F_GRACE))) is = is->abc_next; if (!is) return "Not enough notes for deco line"; if (gchord) { if (is->text) { char *gch; n = strlen(is->text); gch = getarena(n + strlen(gchord) + 2); strcpy(gch, is->text); gch[n] = '\n'; strcpy(gch + n + 1, gchord); gchord = gch; } is->text = gchord; gchord = NULL; } else { n = is->u.note.dc.n; if (n >= MAXDC) { syntax("Too many decorations for the note", p); } else if (t != 0) { is->u.note.dc.tm[n].t = t; is->u.note.dc.tm[n].m = -1; is->u.note.dc.n = ++n; } } is = is->abc_next; } return NULL; } /* -- parse a guitar chord / annotation -- */ static char *parse_gchord(char *p) { char *q; int l, l2; q = p; while (*p != '"') { if (*p == '\\') p++; if (*p == '\0') { syntax("No end of guitar chord", p); break; } p++; } l = p - q; if (gchord) { char *gch; /* many guitar chords: concatenate with '\n' */ l2 = strlen(gchord); gch = getarena(l2 + 1 + l + 1); strcpy(gch, gchord); gch[l2++] = '\n'; strncpy(&gch[l2], q, l); gch[l2 + l] = '\0'; gchord = gch; } else { gchord = getarena(l + 1); strncpy(gchord, q, l); gchord[l] = '\0'; } if (*p != '\0') p++; return p; } /* -- parse a note length -- */ static char *parse_len(char *p, int dur_u, int *p_len) { int len, fac; int err = 0; char *q; len = dur_u; if (isdigit((unsigned char) *p)) { len *= strtol(p, &q, 10); if (len <= 0 || len > 10000) { syntax("Bad length", p); len = dur_u; } p = q; } if (*p != '/') { *p_len = len; return p; } if (isdigit((unsigned char) p[1])) { fac = strtol(p + 1, &q, 10); p = q; if (fac == 0 || (fac & (fac - 1))) err = 1; else len /= fac; } else { while (*p == '/') { if (len & 1) err = 1; len /= 2; p++; } } if (err || !len) { syntax("Bad length divisor", p - 1); len = dur_u; } *p_len = len; return p; } /* -- parse a ABC line -- */ /* return 1 on end of tune, and 2 on start of new tune */ static int parse_line(char *p) { struct SYMBOL *s; char *q, c; char *dot = NULL; struct SYMBOL *last_note_sav = NULL; struct decos dc_sav; int i, flags, flags_sav = 0, slur; static char qtb[10] = {0, 1, 3, 2, 3, 0, 2, 0, 3, 0}; colnum = 0; switch (*p) { case '\0': /* blank line */ switch (parse.abc_state) { case ABC_S_GLOBAL: if (parse.last_sym && parse.last_sym->abc_type != ABC_T_NULL) abc_new(ABC_T_NULL, NULL); case ABC_S_HEAD: /*fixme: may have blank lines in headers?*/ return 0; } return 1; case '%': if (p[1] == '%') { s = abc_new(ABC_T_PSCOM, p); p += 2; /* skip '%%' */ if (strncasecmp(p, "decoration ", 11) == 0) { p += 11; while (isspace((unsigned char) *p)) p++; switch (*p) { case '!': char_tb['!'] = CHAR_DECOS; char_tb['+'] = CHAR_BAD; break; case '+': char_tb['+'] = CHAR_DECOS; char_tb['!'] = CHAR_BAD; break; } return 0; } if (strncasecmp(p, "linebreak ", 10) == 0) { for (i = 0; i < sizeof char_tb; i++) { if (char_tb[i] == CHAR_LINEBREAK) char_tb[i] = i != '!' ? CHAR_BAD : CHAR_DECOS; } p += 10; for (;;) { while (isspace((unsigned char) *p)) p++; if (*p == '\0') break; switch (*p) { case '!': case '$': case '*': case ';': case '?': case '@': char_tb[(unsigned char) *p++] = CHAR_LINEBREAK; break; case '<': if (strncmp(p, "<none>", 6) == 0) return 0; if (strncmp(p, "<EOL>", 5) == 0) { char_tb['\n'] = CHAR_LINEBREAK; p += 5; break; } /* fall thru */ default: if (strcmp(p, "lock") != 0) syntax("Invalid character in %%%%linebreak", p); return 0; } } return 0; } if (strncasecmp(p, "microscale ", 11) == 0) { int v; p += 11; while (isspace((unsigned char) *p)) p++; sscanf(p, "%d", &v); if (v < 4 || v >= 256 || v & 1) syntax("Invalid value in %%microscale", p); else microscale = v; return 0; } if (strncasecmp(p, "user ", 5) == 0) { p += 5; while (isspace((unsigned char) *p)) p++; get_user(p, s); return 0; } return 0; } /* fall thru */ case '\\': /* abc2mtex specific lines */ return 0; /* skip */ } /* header fields */ if (p[1] == ':' && *p != '|' && *p != ':') { /* not '|:' nor '::' */ int new_tune; new_tune = parse_info(p); /* handle BarFly voice definition */ /* 'V:n <note line ending with a bar>' */ if (*p != 'V' || parse.abc_state != ABC_S_TUNE) return new_tune; /* (normal return) */ c = p[strlen(p) - 1]; if (c != '|' && c != ']') return new_tune; while (!isspace((unsigned char) *p) && *p != '\0') p++; while (isspace((unsigned char) *p)) p++; } if (parse.abc_state != ABC_S_TUNE) return 0; /* music */ flags = 0; if (parse.abc_vers <= (2 << 16)) lyric_started = 0; deco_start = deco_cont = NULL; slur = 0; while (*p != '\0') { colnum = p - abc_line; switch (char_tb[(unsigned char) *p++]) { case CHAR_GCHORD: /* " */ if (flags & ABC_F_GRACE) goto bad_char; p = parse_gchord(p); break; case CHAR_GR_ST: /* '{' */ if (flags & ABC_F_GRACE) goto bad_char; last_note_sav = curvoice->last_note; curvoice->last_note = NULL; memcpy(&dc_sav, &dc, sizeof dc); dc.n = 0; flags_sav = flags; flags = ABC_F_GRACE; if (*p == '/') { flags |= ABC_F_SAPPO; p++; } break; case CHAR_GR_EN: /* '}' */ if (!(flags & ABC_F_GRACE)) goto bad_char; parse.last_sym->flags |= ABC_F_GR_END; if (dc.n != 0) syntax("Decoration ignored", p); curvoice->last_note = last_note_sav; memcpy(&dc, &dc_sav, sizeof dc); flags = flags_sav; break; case CHAR_DECOS: if (p[-1] == '!' && char_tb['\n'] == CHAR_LINEBREAK && check_nl(p)) { s = abc_new(ABC_T_EOLN, NULL); /* abc2win EOL */ s->u.eoln.type = 2; break; } /* fall thru */ case CHAR_DECO: if (p[-1] == '.') { if (*p == '(' || *p == '-') { dot = p; break; } // if (*p == '|') { // p = parse_bar(p + 1); // parse.last_sym->u.bar.dotted = 1; // break; // } } p = parse_deco(p - 1, &dc, -1); break; case CHAR_LINEBREAK: s = abc_new(ABC_T_EOLN, NULL); // s->u.eoln.type = 0; break; case CHAR_NOTE: p = parse_note(p - 1, flags); flags &= ABC_F_GRACE; if (slur && parse.last_sym->u.note.notes[0].len) { parse.last_sym->u.note.slur_st = slur; slur = 0; } break; case CHAR_SLASH: /* '/' */ if (flags & ABC_F_GRACE) goto bad_char; if (char_tb[(unsigned char) p[-1]] != CHAR_BAR) goto bad_char; q = p; while (*q == '/') q++; if (char_tb[(unsigned char) *q] != CHAR_BAR) goto bad_char; s = abc_new(ABC_T_MREP, NULL); s->u.bar.type = 0; s->u.bar.len = q - p + 1; syntax("Non standard measure repeat syntax", p - 1); p = q; break; case CHAR_BSLASH: /* '\\' */ if (*p == '\0') break; syntax("'\\' ignored", p - 1); break; case CHAR_OBRA: /* '[' */ if (*p == '|' || *p == ']' || *p == ':' || isdigit((unsigned char) *p) || *p == '"' || *p == ' ') { if (flags & ABC_F_GRACE) goto bad_char; p = parse_bar(p); break; } if (p[1] != ':') { p = parse_note(p - 1, flags); /* chord */ flags &= ABC_F_GRACE; if (slur && parse.last_sym->u.note.notes[0].len) { parse.last_sym->u.note.slur_st = slur; slur = 0; } break; } /* embedded information field */ #if 0 /*fixme:OK for [I:staff n], ?? for other headers*/ if (flags & ABC_F_GRACE) goto bad_char; #endif while (p[2] == ' ') { /* remove the spaces */ p[2] = ':'; p[1] = *p; p++; } c = ']'; q = p; while (*p != '\0' && *p != c) p++; if (*p == '\0') { syntax("Escape sequence [..] not closed", q); c = '\0'; } else { *p = '\0'; } parse_info(q); *p = c; if (c != '\0') p++; break; case CHAR_BAR: /* '|', ':' or ']' */ if (flags & ABC_F_GRACE) goto bad_char; p = parse_bar(p); break; case CHAR_OPAR: /* '(' */ if (*p > '0' && *p <= '9') { int pplet, qplet, rplet; pplet = strtol(p, &q, 10); p = q; if ((unsigned) pplet < sizeof qtb / sizeof qtb[0]) qplet = qtb[pplet]; else qplet = qtb[0]; rplet = pplet; if (*p == ':') { p++; if (isdigit((unsigned char) *p)) { qplet = strtol(p, &q, 10); p = q; } if (*p == ':') { p++; if (isdigit((unsigned char) *p)) { rplet = strtol(p, &q, 10); p = q; } } } if (rplet < 1) { syntax("Invalid 'r' in tuplet", p); break; } if (pplet >= 128 || qplet >= 128 || rplet >= 128) { syntax("Invalid 'p:q:r' in tuplet", p); break; } if (qplet == 0) qplet = meter % 3 == 0 ? 3 : 2; s = abc_new(ABC_T_TUPLET, NULL); s->u.tuplet.p_plet = pplet; s->u.tuplet.q_plet = qplet; s->u.tuplet.r_plet = rplet; s->flags |= flags; break; } if (*p == '&') { if (flags & ABC_F_GRACE) goto bad_char; p++; if (vover != 0) { syntax("Nested voice overlay", p - 1); break; } s = abc_new(ABC_T_V_OVER, NULL); s->u.v_over.type = V_OVER_S; s->u.v_over.voice = curvoice - voice_tb; vover = -1; /* multi-bars */ break; } slur <<= 4; if (p == dot + 1 && dc.n == 0) slur |= SL_DOTTED; switch (*p) { case '\'': slur += SL_ABOVE; p++; break; case ',': slur += SL_BELOW; p++; break; default: slur += SL_AUTO; break; } break; case CHAR_CPAR: /* ')' */ switch (parse.last_sym->abc_type) { case ABC_T_NOTE: case ABC_T_REST: break; default: goto bad_char; } parse.last_sym->u.note.slur_end++; break; case CHAR_VOV: /* '&' */ if (flags & ABC_F_GRACE) goto bad_char; if (*p != ')' || vover == 0) { /*??*/ if (!curvoice->last_note) { syntax("Bad start of voice overlay", p); break; } s = abc_new(ABC_T_V_OVER, NULL); /*s->u.v_over.type = V_OVER_V; */ vover_new(); s->u.v_over.voice = curvoice - voice_tb; if (vover == 0) vover = 1; /* single bar */ break; } p++; vover = 0; s = abc_new(ABC_T_V_OVER, NULL); s->u.v_over.type = V_OVER_E; s->u.v_over.voice = curvoice->mvoice; curvoice->last_note = NULL; /* ?? */ curvoice = &voice_tb[curvoice->mvoice]; break; case CHAR_SPAC: /* ' ' and '\t' */ flags |= ABC_F_SPACE; break; case CHAR_MINUS: { /* '-' */ int tie_pos; if (!curvoice->last_note || curvoice->last_note->abc_type != ABC_T_NOTE) goto bad_char; if (p == dot + 1 && dc.n == 0) tie_pos = SL_DOTTED; else tie_pos = 0; switch (*p) { case '\'': tie_pos += SL_ABOVE; p++; break; case ',': tie_pos += SL_BELOW; p++; break; default: tie_pos += SL_AUTO; break; } for (i = 0; i <= curvoice->last_note->nhd; i++) { if (curvoice->last_note->u.note.notes[i].ti1 == 0) curvoice->last_note->u.note.notes[i].ti1 = tie_pos; else if (curvoice->last_note->nhd == 0) syntax("Too many ties", p); } break; } case CHAR_BRHY: /* '>' and '<' */ if (!curvoice->last_note) goto bad_char; i = 1; while (*p == p[-1]) { i++; p++; } if (i > 3) { syntax("Bad broken rhythm", p - 1); i = 3; } if (p[-1] == '<') i = -i; broken_rhythm(curvoice->last_note, i); curvoice->last_note->u.note.brhythm = i; break; case CHAR_IGN: /* '*' & '`' */ break; default: bad_char: syntax((flags & ABC_F_GRACE) ? "Bad character in grace note sequence" : "Bad character", p - 1); break; } } /*fixme: may we have grace notes across lines?*/ if (flags & ABC_F_GRACE) { syntax("EOLN in grace note sequence", p - 1); if (curvoice->last_note) curvoice->last_note->flags |= ABC_F_GR_END; curvoice->last_note = last_note_sav; memcpy(&dc, &dc_sav, sizeof dc); } /* add eoln */ s = abc_new(ABC_T_EOLN, NULL); if (flags & ABC_F_SPACE) s->flags |= ABC_F_SPACE; if (p[-1] == '\\' || char_tb['\n'] != CHAR_LINEBREAK) s->u.eoln.type = 1; /* no break */ return 0; } /* -- parse a note or a rest -- */ static char *parse_note(char *p, int flags) { struct SYMBOL *s; char *q; int pit = 0, len, acc, nostem, chord, j, m, n; if (flags & ABC_F_GRACE) { /* in a grace note sequence */ s = abc_new(ABC_T_NOTE, NULL); } else { s = abc_new(ABC_T_NOTE, gchord); if (gchord) gchord = NULL; } s->flags |= flags; s->u.note.notes[0].color = -1; if (!lyric_started) { lyric_started = 1; s->flags |= ABC_F_LYRIC_START; } if (*p != 'X' && *p != 'Z' && !(flags & ABC_F_GRACE)) { if (!deco_start) deco_start = s; } chord = 0; /* rest */ switch (*p) { case 'X': s->flags |= ABC_F_INVIS; case 'Z': /* multi-rest */ s->abc_type = ABC_T_MREST; p++; len = 1; if (isdigit((unsigned char) *p)) { len = strtol(p, &q, 10); if (len == 0 || len > 100) { syntax("Bad number of measures", p); len = 1; } p = q; } s->u.bar.type = 0; s->u.bar.len = len; goto add_deco; case 'y': /* space (BarFly) */ s->abc_type = ABC_T_REST; s->flags |= ABC_F_INVIS; p++; if (isdigit((unsigned char) *p) /* number of points */ || *p == '-') { /* accept negative offset... */ s->u.note.notes[0].shhd = strtol(p, &q, 10); p = q; } else { s->u.note.notes[0].shhd = 10; // default } goto add_deco; case 'x': /* invisible rest */ s->flags |= ABC_F_INVIS; /* fall thru */ case 'z': s->abc_type = ABC_T_REST; p = parse_len(p + 1, ulen, &len); s->u.note.notes[0].len = len; goto do_brhythm; case '[': /* '[..]' = chord */ chord = 1; p++; break; } q = p; /* get pitch, length and possible accidental */ m = 0; nostem = 0; for (;;) { if (chord) { if (m >= MAXHD) { syntax("Too many notes in chord", p); m--; } n = 0; if (*p == '.') { n = SL_DOTTED; p++; } if (*p == '(') { p++; switch (*p) { case '\'': n += SL_ABOVE; p++; break; case ',': n += SL_BELOW; p++; break; default: n += SL_AUTO; break; } s->u.note.notes[m].sl1 = (s->u.note.notes[m].sl1 << 3) + n; } } p = parse_deco(p, &dc, m); /* note head decorations */ p = parse_acc_pit(p, &pit, &acc); if (*p == '0') { nostem = 1; p++; } p = parse_len(p, (flags & ABC_F_GRACE) ? BASE_LEN / 8 : // for grace note alone ulen, &len); s->u.note.notes[m].pit = pit; s->pits[m] = pit; s->u.note.notes[m].len = len; s->u.note.notes[m].acc = acc; s->u.note.notes[m].color = -1; if (chord) { for (;;) { if (*p == '.') { if (p[1] != '-') break; p++; } if (*p == '-') { switch (p[1]) { case '\'': s->u.note.notes[m].ti1 = SL_ABOVE; p++; break; case ',': s->u.note.notes[m].ti1 = SL_BELOW; p++; break; default: s->u.note.notes[m].ti1 = SL_AUTO; break; } } else if (*p == ')') { s->u.note.notes[m].sl2++; } else { break; } p++; } } if (acc >= 0) /* if no error */ m++; /* normal case */ if (!chord) break; if (*p == ']') { p++; if (*p == '0') { nostem = 1; p++; } if (*p == '/' || isdigit((unsigned char) *p)) { p = parse_len(p, ulen, &len); for (j = 0; j < m; j++) { s->u.note.notes[j].len = len * s->u.note.notes[j].len / ulen; } } break; } if (*p == '\0') { syntax("Chord not closed", q); break; } } if (nostem) s->flags |= ABC_F_STEMLESS; if (m == 0) /* if no note (or error) */ goto err; s->u.note.microscale = microscale; s->nhd = m - 1; do_brhythm: if (curvoice->last_note && curvoice->last_note->u.note.brhythm != 0) broken_rhythm(s, -curvoice->last_note->u.note.brhythm); add_deco: if (dc.n > 0) { memcpy(s->abc_type != ABC_T_MREST ? &s->u.note.dc : &s->u.bar.dc, &dc, sizeof dc); dc.n = 0; } /* forbid rests in grace note sequences */ if (s->abc_type != ABC_T_NOTE && (flags & ABC_F_GRACE)) { syntax("Not a note in grace note sequence", p); goto err; } if (s->u.note.notes[0].len > 0) /* if not space */ curvoice->last_note = s; return p; err: if ((parse.last_sym = s->abc_prev) == NULL) { parse.first_sym = NULL; } else { s->abc_prev->abc_next = NULL; s->abc_prev->flags |= (s->flags & ABC_F_ERROR); } return p; } /* -- parse an information field -- */ /* return 2 on start of new tune */ static int parse_info(char *p) { struct SYMBOL *s; char info_type = *p; char *error_txt = NULL; s = abc_new(ABC_T_INFO, p); p += 2; switch (info_type) { case 'd': case 's': if (parse.abc_state == ABC_S_GLOBAL) break; if (!deco_start) { error_txt = "Erroneous 'd:'/'s:'"; break; } error_txt = parse_decoline(p); break; case 'K': if (parse.abc_state == ABC_S_GLOBAL) break; parse_key(p, s); if (parse.abc_state == ABC_S_HEAD) { int i; parse.abc_state = ABC_S_TUNE; if (ulen == 0) ulen = BASE_LEN / 8; for (i = MAXVOICE; --i >= 0; ) voice_tb[i].ulen = ulen; lyric_started = 0; } break; case 'L': error_txt = get_len(p, s); if (s->u.length.base_length > 0) ulen = s->u.length.base_length; break; case 'M': error_txt = parse_meter(p, s); break; case 'Q': error_txt = parse_tempo(p, s); break; case 'U': error_txt = get_user(p, s); break; case 'V': if (parse.abc_state == ABC_S_GLOBAL) break; error_txt = parse_voice(p, s); break; case 'X': memset(voice_tb, 0, sizeof voice_tb); nvoice = 0; curvoice = voice_tb; parse.abc_state = ABC_S_HEAD; lvlarena(1); return 2; } if (error_txt) syntax(error_txt, p); return 0; } /* -- print a syntax error message -- */ static void syntax(char *msg, char *q) { int n, len, m1, m2, pp; int maxcol = 73; severity = 1; n = q - abc_line; len = strlen(abc_line); if ((unsigned) n > (unsigned) len) n = -1; print_error(msg, n); if (n < 0) { if (q && *q != '\0') fprintf(stderr, " (near '%s')\n", q); return; } m1 = 0; m2 = len; if (m2 > maxcol) { if (n < maxcol) { m2 = maxcol; } else { m1 = n - 20; m2 = m1 + maxcol; if (m2 > len) m2 = len; } } fprintf(stderr, "%4d ", linenum); pp = 6; if (m1 > 0) { fprintf(stderr, "..."); pp += 3; } fprintf(stderr, "%.*s", m2 - m1, &abc_line[m1]); if (m2 < len) fprintf(stderr, "..."); fprintf(stderr, "\n"); if ((unsigned) n < 200) fprintf(stderr, "%*s\n", n + pp - m1, "^"); if (last_sym) last_sym->flags |= ABC_F_ERROR; } /* -- switch to a new voice overlay -- */ static void vover_new(void) { int voice, mvoice; mvoice = curvoice->mvoice; for (voice = curvoice - voice_tb + 1; voice <= nvoice; voice++) if (voice_tb[voice].mvoice == mvoice) break; if (voice > nvoice) { if (nvoice >= MAXVOICE) { syntax("Too many voices", 0); return; } nvoice = voice; voice_tb[voice].id[0] = '&'; voice_tb[voice].mvoice = mvoice; } voice_tb[voice].ulen = curvoice->ulen; voice_tb[voice].microscale = curvoice->microscale; curvoice = &voice_tb[voice]; }
null
274
CWE-787
CVE-2021-33362
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre, Romain Bouqueau, Cyril Concolato * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / Media Tools sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/media_dev.h> #include <gpac/constants.h> #include <gpac/mpeg4_odf.h> #include <gpac/maths.h> #include <gpac/avparse.h> #ifndef GPAC_DISABLE_OGG #include <gpac/internal/ogg.h> #endif //uncomment/define globally to remove all bitstream parsing logging from code (this will break inspect mode ananlyze=bs) //#define GPAC_DISABLE_AVPARSE_LOGS #ifndef GPAC_DISABLE_AVPARSE_LOGS void gf_bs_log_idx(GF_BitStream *bs, u32 nBits, const char *fname, s64 val, s32 idx1, s32 idx2, s32 idx3); #define gf_bs_log(_bs, _nBits, _fname, _val) gf_bs_log_idx(_bs, _nBits, _fname, _val, -1, -1, -1) u32 gf_bs_read_int_log_idx3(GF_BitStream *bs, u32 nBits, const char *fname, s32 idx1, s32 idx2, s32 idx3) { u32 val = gf_bs_read_int(bs, nBits); gf_bs_log_idx(bs, nBits, fname, val, idx1, idx2, idx3); return val; } #define gf_bs_read_int_log(_bs, _nBits, _fname) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, -1, -1, -1) #define gf_bs_read_int_log_idx(_bs, _nBits, _fname, _idx) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, _idx, -1, -1) #define gf_bs_read_int_log_idx2(_bs, _nBits, _fname, _idx1, _idx2) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, (s32) _idx1, (s32) _idx2, -1) #else #define gf_bs_log(_bs, _nBits, _fname, _val) #define gf_bs_log_idx(_bs, _nBits, _fname, _val, _idx1, _idx2, _idx3) #define gf_bs_read_int_log(_bs, _nbb, _f) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx(_bs, _nbb, _f, _idx) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx2(_bs, _nbb, _f, _idx1, _idx2) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx3(_bs, _nbb, _f, _idx1, _idx2, _idx3) gf_bs_read_int(_bs, _nbb) #endif static const struct { u32 w, h; } std_par[] = { { 4, 3}, {3, 2}, {16, 9}, {5, 3}, {5, 4}, {8, 5}, {2, 1}, {1, 1}, {0, 0}, }; GF_EXPORT void gf_media_reduce_aspect_ratio(u32 *width, u32 *height) { u32 i = 0; u32 w = *width; u32 h = *height; while (std_par[i].w) { if (std_par[i].w * h == std_par[i].h * w) { *width = std_par[i].w; *height = std_par[i].h; return; } i++; } //not standard one, reduce by power of 2 i = 2; while (1) { if (w <= i) return; if (h <= i) return; if (w % i) return; if (h % i) return; *width = w / i; *height = h / i; i *= 2; } } GF_EXPORT void gf_media_get_reduced_frame_rate(u32 *timescale, u32 *sample_dur) { u32 res; if (!*sample_dur) return; res = *timescale / *sample_dur; if (res * (*sample_dur) == *timescale) { *timescale = res; *sample_dur = 1; } else if ((double)(*timescale * 1001 - (res + 1) * *sample_dur * 1000) / ((res + 1) * *sample_dur * 1000) < 0.001) { *timescale = (res + 1) * 1000; *sample_dur = 1001; } } struct __m4v_profile { u32 value; const char *name; } M4VProfiles[] = { {0x00, "Reserved (0x00) Profile"}, {0x01, "Simple Profile @ Level 1"}, {0x02, "Simple Profile @ Level 2"}, {0x03, "Simple Profile @ Level 3"}, {0x08, "Simple Profile @ Level 0"}, {0x10, "Simple Scalable Profile @ Level 0"}, {0x11, "Simple Scalable Profile @ Level 1"}, {0x12, "Simple Scalable Profile @ Level 2"}, {0x21, "Core Profile @ Level 1"}, {0x22, "Core Profile @ Level 2"}, {0x32, "Main Profile @ Level 2"}, {0x33, "Main Profile @ Level 3"}, {0x34, "Main Profile @ Level 4"}, {0x42, "N-bit Profile @ Level 2"}, {0x51, "Scalable Texture Profile @ Level 1"}, {0x61, "Simple Face Animation Profile @ Level 1"}, {0x62, "Simple Face Animation Profile @ Level 2"}, {0x63, "Simple FBA Profile @ Level 1"}, {0x64, "Simple FBA Profile @ Level 2"}, {0x71, "Basic Animated Texture Profile @ Level 1"}, {0x72, "Basic Animated Texture Profile @ Level 2"}, {0x7F, "AVC/H264 Profile"}, {0x81, "Hybrid Profile @ Level 1"}, {0x82, "Hybrid Profile @ Level 2"}, {0x91, "Advanced Real Time Simple Profile @ Level 1"}, {0x92, "Advanced Real Time Simple Profile @ Level 2"}, {0x93, "Advanced Real Time Simple Profile @ Level 3"}, {0x94, "Advanced Real Time Simple Profile @ Level 4"}, {0xA1, "Core Scalable Profile @ Level1"}, {0xA2, "Core Scalable Profile @ Level2"}, {0xA3, "Core Scalable Profile @ Level3"}, {0xB1, "Advanced Coding Efficiency Profile @ Level 1"}, {0xB2, "Advanced Coding Efficiency Profile @ Level 2"}, {0xB3, "Advanced Coding Efficiency Profile @ Level 3"}, {0xB4, "Advanced Coding Efficiency Profile @ Level 4"}, {0xC1, "Advanced Core Profile @ Level 1"}, {0xC2, "Advanced Core Profile @ Level 2"}, {0xD1, "Advanced Scalable Texture @ Level1"}, {0xD2, "Advanced Scalable Texture @ Level2"}, {0xE1, "Simple Studio Profile @ Level 1"}, {0xE2, "Simple Studio Profile @ Level 2"}, {0xE3, "Simple Studio Profile @ Level 3"}, {0xE4, "Simple Studio Profile @ Level 4"}, {0xE5, "Core Studio Profile @ Level 1"}, {0xE6, "Core Studio Profile @ Level 2"}, {0xE7, "Core Studio Profile @ Level 3"}, {0xE8, "Core Studio Profile @ Level 4"}, {0xF0, "Advanced Simple Profile @ Level 0"}, {0xF1, "Advanced Simple Profile @ Level 1"}, {0xF2, "Advanced Simple Profile @ Level 2"}, {0xF3, "Advanced Simple Profile @ Level 3"}, {0xF4, "Advanced Simple Profile @ Level 4"}, {0xF5, "Advanced Simple Profile @ Level 5"}, {0xF7, "Advanced Simple Profile @ Level 3b"}, {0xF8, "Fine Granularity Scalable Profile @ Level 0"}, {0xF9, "Fine Granularity Scalable Profile @ Level 1"}, {0xFA, "Fine Granularity Scalable Profile @ Level 2"}, {0xFB, "Fine Granularity Scalable Profile @ Level 3"}, {0xFC, "Fine Granularity Scalable Profile @ Level 4"}, {0xFD, "Fine Granularity Scalable Profile @ Level 5"}, {0xFE, "Not part of MPEG-4 Visual profiles"}, {0xFF, "No visual capability required"} }; GF_EXPORT const char *gf_m4v_get_profile_name(u8 video_pl) { u32 i, count = GF_ARRAY_LENGTH(M4VProfiles); for (i=0; i<count; i++) { if ((u32)video_pl == M4VProfiles[i].value) return M4VProfiles[i].name; } return "ISO Reserved Profile"; } #ifndef GPAC_DISABLE_AV_PARSERS #define MPEG12_START_CODE_PREFIX 0x000001 #define MPEG12_PICTURE_START_CODE 0x00000100 #define MPEG12_SLICE_MIN_START 0x00000101 #define MPEG12_SLICE_MAX_START 0x000001af #define MPEG12_USER_DATA_START_CODE 0x000001b2 #define MPEG12_SEQUENCE_START_CODE 0x000001b3 #define MPEG12_SEQUENCE_ERR_START_CODE 0x000001b4 #define MPEG12_EXT_START_CODE 0x000001b5 #define MPEG12_SEQUENCE_END_START_CODE 0x000001b7 #define MPEG12_GOP_START_CODE 0x000001b8 s32 gf_mv12_next_start_code(unsigned char *pbuffer, u32 buflen, u32 *optr, u32 *scode) { u32 value; u32 offset; if (buflen < 4) return -1; for (offset = 0; offset < buflen - 3; offset++, pbuffer++) { #ifdef GPAC_BIG_ENDIAN value = *(u32 *)pbuffer >> 8; #else value = (pbuffer[0] << 16) | (pbuffer[1] << 8) | (pbuffer[2] << 0); #endif if (value == MPEG12_START_CODE_PREFIX) { *optr = offset; *scode = (value << 8) | pbuffer[3]; return 0; } } return -1; } s32 gf_mv12_next_slice_start(unsigned char *pbuffer, u32 startoffset, u32 buflen, u32 *slice_offset) { u32 slicestart, code; while (gf_mv12_next_start_code(pbuffer + startoffset, buflen - startoffset, &slicestart, &code) >= 0) { if ((code >= MPEG12_SLICE_MIN_START) && (code <= MPEG12_SLICE_MAX_START)) { *slice_offset = slicestart + startoffset; return 0; } startoffset += slicestart + 4; } return -1; } /* MPEG-4 video (14496-2) */ struct __tag_m4v_parser { GF_BitStream *bs; Bool mpeg12, step_mode; u32 current_object_type; u32 force_next_obj_type; u64 current_object_start; u32 tc_dec, prev_tc_dec, tc_disp, prev_tc_disp; }; GF_EXPORT GF_M4VParser *gf_m4v_parser_new(u8 *data, u64 data_size, Bool mpeg12video) { GF_M4VParser *tmp; if (!data || !data_size) return NULL; GF_SAFEALLOC(tmp, GF_M4VParser); if (!tmp) return NULL; tmp->bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); tmp->mpeg12 = mpeg12video; return tmp; } GF_M4VParser *gf_m4v_parser_bs_new(GF_BitStream *bs, Bool mpeg12video) { GF_M4VParser *tmp; GF_SAFEALLOC(tmp, GF_M4VParser); if (!tmp) return NULL; tmp->bs = bs; tmp->mpeg12 = mpeg12video; return tmp; } GF_EXPORT void gf_m4v_parser_del(GF_M4VParser *m4v) { gf_bs_del(m4v->bs); gf_free(m4v); } GF_EXPORT void gf_m4v_parser_del_no_bs(GF_M4VParser *m4v) { gf_free(m4v); } GF_EXPORT void gf_m4v_parser_set_inspect(GF_M4VParser *m4v) { if (m4v) m4v->step_mode = 1; } GF_EXPORT u32 gf_m4v_parser_get_obj_type(GF_M4VParser *m4v) { if (m4v) return m4v->current_object_type; return 0; } #define M4V_CACHE_SIZE 4096 s32 M4V_LoadObject(GF_M4VParser *m4v) { u32 v, bpos, found; char m4v_cache[M4V_CACHE_SIZE]; u64 end, cache_start, load_size; if (!m4v) return 0; if (m4v->force_next_obj_type) { m4v->current_object_type = m4v->force_next_obj_type - 1; m4v->force_next_obj_type = 0; return (s32)m4v->current_object_type; } bpos = 0; found = 0; load_size = 0; end = 0; cache_start = 0; v = 0xffffffff; while (!end) { /*refill cache*/ if (bpos == (u32)load_size) { if (!gf_bs_available(m4v->bs)) break; load_size = gf_bs_available(m4v->bs); if (load_size > M4V_CACHE_SIZE) load_size = M4V_CACHE_SIZE; bpos = 0; cache_start = gf_bs_get_position(m4v->bs); gf_bs_read_data(m4v->bs, m4v_cache, (u32)load_size); } v = ((v << 8) & 0xFFFFFF00) | ((u8)m4v_cache[bpos]); bpos++; if ((v & 0xFFFFFF00) == 0x00000100) { end = cache_start + bpos - 4; found = 1; break; } } if (!found) return -1; m4v->current_object_start = end; gf_bs_seek(m4v->bs, end + 3); m4v->current_object_type = gf_bs_read_u8(m4v->bs); return (s32)m4v->current_object_type; } GF_EXPORT void gf_m4v_rewrite_pl(u8 **o_data, u32 *o_dataLen, u8 PL) { u32 pos = 0; unsigned char *data = (unsigned char *)*o_data; u32 dataLen = *o_dataLen; while (pos + 4 < dataLen) { if (!data[pos] && !data[pos + 1] && (data[pos + 2] == 0x01) && (data[pos + 3] == M4V_VOS_START_CODE)) { data[pos + 4] = PL; return; } pos++; } /*emulate VOS at beggining*/ (*o_data) = (char *)gf_malloc(sizeof(char)*(dataLen + 5)); (*o_data)[0] = 0; (*o_data)[1] = 0; (*o_data)[2] = 1; (*o_data)[3] = (char)M4V_VOS_START_CODE; (*o_data)[4] = PL; memcpy((*o_data + 5), data, sizeof(char)*dataLen); gf_free(data); (*o_dataLen) = dataLen + 5; } static GF_Err M4V_Reset(GF_M4VParser *m4v, u64 start) { gf_bs_seek(m4v->bs, start); assert(start < (u64)1<<31); m4v->current_object_start = (u32)start; m4v->current_object_type = 0; return GF_OK; } void gf_m4v_parser_reset(GF_M4VParser *m4v, u8 sc_type) { m4v->current_object_start = 0; m4v->current_object_type = 0; m4v->force_next_obj_type = sc_type; } static GF_Err gf_m4v_parse_config_mpeg12(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { unsigned char p[4]; u32 ext_type; s32 o_type; u8 go, par; if (!m4v || !dsi) return GF_BAD_PARAM; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); dsi->VideoPL = 0; go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M2V_SEQ_START_CODE: dsi->RAP_stream = 1; gf_bs_read_data(m4v->bs, (char *)p, 4); dsi->width = (p[0] << 4) | ((p[1] >> 4) & 0xf); dsi->height = ((p[1] & 0xf) << 8) | p[2]; dsi->VideoPL = GF_CODECID_MPEG1; par = (p[3] >> 4) & 0xf; switch (par) { case 2: dsi->par_num = dsi->height / 3; dsi->par_den = dsi->width / 4; break; case 3: dsi->par_num = dsi->height / 9; dsi->par_den = dsi->width / 16; break; case 4: dsi->par_num = dsi->height / 2; dsi->par_den = dsi->width / 21; break; default: dsi->par_den = dsi->par_num = 0; break; } switch (p[3] & 0xf) { case 0: break; case 1: dsi->fps = 24000.0 / 1001.0; break; case 2: dsi->fps = 24.0; break; case 3: dsi->fps = 25.0; break; case 4: dsi->fps = 30000.0 / 1001.0; break; case 5: dsi->fps = 30.0; break; case 6: dsi->fps = 50.0; break; case 7: dsi->fps = ((60.0*1000.0) / 1001.0); break; case 8: dsi->fps = 60.0; break; case 9: dsi->fps = 1; break; case 10: dsi->fps = 5; break; case 11: dsi->fps = 10; break; case 12: dsi->fps = 12; break; case 13: dsi->fps = 15; break; } break; case M2V_EXT_START_CODE: gf_bs_read_data(m4v->bs, (char *)p, 4); ext_type = ((p[0] >> 4) & 0xf); if (ext_type == 1) { dsi->VideoPL = 0x65; dsi->height = ((p[1] & 0x1) << 13) | ((p[2] & 0x80) << 5) | (dsi->height & 0x0fff); dsi->width = (((p[2] >> 5) & 0x3) << 12) | (dsi->width & 0x0fff); } break; case M2V_PIC_START_CODE: if (dsi->width) go = 0; break; default: break; /*EOS*/ case -1: go = 0; m4v->current_object_start = gf_bs_get_position(m4v->bs); break; } } M4V_Reset(m4v, 0); return GF_OK; } static const struct { u32 w, h; } m4v_sar[6] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 } }; static u8 m4v_get_sar_idx(u32 w, u32 h) { u32 i; for (i = 0; i < 6; i++) { if ((m4v_sar[i].w == w) && (m4v_sar[i].h == h)) return i; } return 0xF; } static void gf_m4v_parse_vol(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { u8 verid, par; s32 clock_rate; u8 vpl = dsi->VideoPL; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); dsi->VideoPL = vpl; verid = 0; dsi->RAP_stream = gf_bs_read_int(m4v->bs, 1); dsi->objectType = gf_bs_read_int(m4v->bs, 8); if (gf_bs_read_int(m4v->bs, 1)) { verid = gf_bs_read_int(m4v->bs, 4); gf_bs_read_int(m4v->bs, 3); } par = gf_bs_read_int(m4v->bs, 4); if (par == 0xF) { dsi->par_num = gf_bs_read_int(m4v->bs, 8); dsi->par_den = gf_bs_read_int(m4v->bs, 8); } else if (par<6) { dsi->par_num = m4v_sar[par].w; dsi->par_den = m4v_sar[par].h; } if (gf_bs_read_int(m4v->bs, 1)) { gf_bs_read_int(m4v->bs, 3); if (gf_bs_read_int(m4v->bs, 1)) gf_bs_read_int(m4v->bs, 79); } dsi->has_shape = gf_bs_read_int(m4v->bs, 2); if (dsi->has_shape && (verid!=1) ) gf_bs_read_int(m4v->bs, 4); gf_bs_read_int(m4v->bs, 1); /*clock rate*/ dsi->clock_rate = gf_bs_read_int(m4v->bs, 16); /*marker*/ gf_bs_read_int(m4v->bs, 1); clock_rate = dsi->clock_rate-1; if (clock_rate >= 65536) clock_rate = 65535; if (clock_rate > 0) { for (dsi->NumBitsTimeIncrement = 1; dsi->NumBitsTimeIncrement < 16; dsi->NumBitsTimeIncrement++) { if (clock_rate == 1) break; clock_rate = (clock_rate >> 1); } } else { /*fix from vivien for divX*/ dsi->NumBitsTimeIncrement = 1; } /*fixed FPS stream*/ dsi->time_increment = 0; if (gf_bs_read_int(m4v->bs, 1)) { dsi->time_increment = gf_bs_read_int(m4v->bs, dsi->NumBitsTimeIncrement); } if (!dsi->has_shape) { gf_bs_read_int(m4v->bs, 1); dsi->width = gf_bs_read_int(m4v->bs, 13); gf_bs_read_int(m4v->bs, 1); dsi->height = gf_bs_read_int(m4v->bs, 13); } else { dsi->width = dsi->height = 0; } gf_bs_align(m4v->bs); } static GF_Err gf_m4v_parse_config_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { s32 o_type; u8 go; if (!m4v || !dsi) return GF_BAD_PARAM; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { /*vosh*/ case M4V_VOS_START_CODE: dsi->VideoPL = (u8)gf_bs_read_u8(m4v->bs); break; case M4V_VOL_START_CODE: gf_m4v_parse_vol(m4v, dsi); /*shape will be done later*/ gf_bs_align(m4v->bs); break; case M4V_VOP_START_CODE: case M4V_GOV_START_CODE: go = 0; break; /*EOS*/ case -1: m4v->current_object_start = gf_bs_get_position(m4v->bs); return GF_EOS; /*don't interest us*/ case M4V_UDTA_START_CODE: default: break; } } return GF_OK; } GF_EXPORT GF_Err gf_m4v_parse_config(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { if (m4v->mpeg12) { return gf_m4v_parse_config_mpeg12(m4v, dsi); } else { return gf_m4v_parse_config_mpeg4(m4v, dsi); } } static GF_Err gf_m4v_parse_frame_mpeg12(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, val; s32 o_type; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = GF_FALSE; m4v->current_object_type = (u32)-1; *frame_type = 0; if (!m4v->step_mode) M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M2V_PIC_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; *is_coded = 1; /*val = */gf_bs_read_u8(m4v->bs); val = gf_bs_read_u8(m4v->bs); *frame_type = ((val >> 3) & 0x7) - 1; break; case M2V_GOP_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M2V_SEQ_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) { go = 0; break; } /**/ break; default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } if (m4v->step_mode) return GF_OK; } *size = m4v->current_object_start - *start; return GF_OK; } static GF_Err gf_m4v_parse_frame_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, secs; s32 o_type; u32 vop_inc = 0; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = 0; m4v->current_object_type = (u32)-1; *frame_type = 0; *start = 0; if (!m4v->step_mode) M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M4V_VOP_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; /*coding type*/ *frame_type = gf_bs_read_int(m4v->bs, 2); /*modulo time base*/ secs = 0; while (gf_bs_read_int(m4v->bs, 1) != 0) secs++; /*no support for B frames in parsing*/ secs += (dsi->enh_layer || *frame_type!=2) ? m4v->tc_dec : m4v->tc_disp; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*vop_time_inc*/ if (dsi->NumBitsTimeIncrement) vop_inc = gf_bs_read_int(m4v->bs, dsi->NumBitsTimeIncrement); m4v->prev_tc_dec = m4v->tc_dec; m4v->prev_tc_disp = m4v->tc_disp; if (dsi->enh_layer || *frame_type!=2) { m4v->tc_disp = m4v->tc_dec; m4v->tc_dec = secs; } *time_inc = secs * dsi->clock_rate + vop_inc; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*coded*/ *is_coded = gf_bs_read_int(m4v->bs, 1); gf_bs_align(m4v->bs); break; case M4V_GOV_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M4V_VOL_START_CODE: if (m4v->step_mode) gf_m4v_parse_vol(m4v, dsi); case M4V_VOS_START_CODE: if (hasVOP) { go = 0; } else if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } break; case M4V_VO_START_CODE: default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } if (m4v->step_mode) return GF_OK; } assert(m4v->current_object_start >= *start); *size = m4v->current_object_start - *start; return GF_OK; } GF_EXPORT GF_Err gf_m4v_parse_frame(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { if (m4v->mpeg12) { return gf_m4v_parse_frame_mpeg12(m4v, dsi, frame_type, time_inc, size, start, is_coded); } else { return gf_m4v_parse_frame_mpeg4(m4v, dsi, frame_type, time_inc, size, start, is_coded); } } GF_Err gf_m4v_rewrite_par(u8 **o_data, u32 *o_dataLen, s32 par_n, s32 par_d) { u64 start, end, size; GF_BitStream *mod; GF_M4VParser *m4v; Bool go = 1; m4v = gf_m4v_parser_new(*o_data, *o_dataLen, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); start = 0; while (go) { u32 type = M4V_LoadObject(m4v); end = gf_bs_get_position(m4v->bs) - 4; size = end - start; /*store previous object*/ if (size) { assert (size < (u64)1<<31); gf_bs_write_data(mod, *o_data + start, (u32)size); start = end; } switch (type) { case M4V_VOL_START_CODE: gf_bs_write_int(mod, 0, 8); gf_bs_write_int(mod, 0, 8); gf_bs_write_int(mod, 1, 8); gf_bs_write_int(mod, M4V_VOL_START_CODE, 8); gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 1), 1); gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 8), 8); start = gf_bs_read_int(m4v->bs, 1); gf_bs_write_int(mod, (u32)start, 1); if (start) { gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 7), 7); } start = gf_bs_read_int(m4v->bs, 4); if (start == 0xF) { gf_bs_read_int(m4v->bs, 8); gf_bs_read_int(m4v->bs, 8); } if ((par_n >= 0) && (par_d >= 0)) { u8 par = m4v_get_sar_idx(par_n, par_d); gf_bs_write_int(mod, par, 4); if (par == 0xF) { gf_bs_write_int(mod, par_n, 8); gf_bs_write_int(mod, par_d, 8); } } else { gf_bs_write_int(mod, 0x0, 4); } case -1: go = 0; break; default: break; } } while (gf_bs_bits_available(m4v->bs)) { u32 b = gf_bs_read_int(m4v->bs, 1); gf_bs_write_int(mod, b, 1); } gf_m4v_parser_del(m4v); gf_free(*o_data); gf_bs_get_content(mod, o_data, o_dataLen); gf_bs_del(mod); return GF_OK; } GF_EXPORT u64 gf_m4v_get_object_start(GF_M4VParser *m4v) { return m4v->current_object_start; } #if 0 //unused Bool gf_m4v_is_valid_object_type(GF_M4VParser *m4v) { return ((s32)m4v->current_object_type == -1) ? 0 : 1; } #endif GF_EXPORT GF_Err gf_m4v_get_config(u8 *rawdsi, u32 rawdsi_size, GF_M4VDecSpecInfo *dsi) { GF_Err e; GF_M4VParser *vparse; if (!rawdsi || !rawdsi_size) return GF_NON_COMPLIANT_BITSTREAM; vparse = gf_m4v_parser_new(rawdsi, rawdsi_size, 0); e = gf_m4v_parse_config(vparse, dsi); dsi->next_object_start = (u32)vparse->current_object_start; gf_m4v_parser_del(vparse); return e < 0 ? e : GF_OK; } GF_EXPORT GF_Err gf_mpegv12_get_config(u8 *rawdsi, u32 rawdsi_size, GF_M4VDecSpecInfo *dsi) { GF_Err e; GF_M4VParser *vparse; if (!rawdsi || !rawdsi_size) return GF_NON_COMPLIANT_BITSTREAM; vparse = gf_m4v_parser_new(rawdsi, rawdsi_size, GF_TRUE); e = gf_m4v_parse_config(vparse, dsi); dsi->next_object_start = (u32)vparse->current_object_start; gf_m4v_parser_del(vparse); return e; } #endif /* AAC parser */ struct __m4a_oti { u32 type; const char *name; } M4AObjectTypes[] = { {0, "MPEG-4 Audio Reserved"}, {1, "MPEG-4 Audio AAC Main"}, {2, "MPEG-4 Audio AAC LC"}, {3, "MPEG-4 Audio AAC SSR"}, {4, "MPEG-4 Audio AAC LTP"}, {5, "MPEG-4 Audio SBR"}, {6, "MPEG-4 Audio AAC Scalable"}, {7, "MPEG-4 Audio TwinVQ"}, {8, "MPEG-4 Audio CELP"}, {9, "MPEG-4 Audio HVXC"}, {10, "MPEG-4 Audio Reserved"}, {11, "MPEG-4 Audio Reserved"}, {12, "MPEG-4 Audio TTSI"}, {13, "MPEG-4 Audio Main synthetic"}, {14, "MPEG-4 Audio Wavetable synthesis"}, {15, "MPEG-4 Audio General MIDI"}, {16, "MPEG-4 Audio Algorithmic Synthesis and Audio FX"}, {17, "MPEG-4 Audio ER AAC LC"}, {18, "MPEG-4 Audio Reserved"}, {19, "MPEG-4 Audio ER AAC LTP"}, {20, "MPEG-4 Audio ER AAC scalable"}, {21, "MPEG-4 Audio ER TwinVQ"}, {22, "MPEG-4 Audio ER BSAC"}, {23, "MPEG-4 Audio ER AAC LD"}, {24, "MPEG-4 Audio ER CELP"}, {25, "MPEG-4 Audio ER HVXC"}, {26, "MPEG-4 Audio ER HILN"}, {27, "MPEG-4 Audio ER Parametric"}, {28, "MPEG-4 Audio SSC"}, {29, "MPEG-4 Audio ParametricStereo"}, {30, "MPEG-4 Audio Reserved"}, {31, "MPEG-4 Audio Reserved"}, {32, "MPEG-1 Audio Layer-1"}, {33, "MPEG-1 Audio Layer-2"}, {34, "MPEG-1 Audio Layer-3"}, {35, "MPEG-4 Audio DST"}, {36, "MPEG-4 Audio ALS"}, {37, "MPEG-4 Audio SLS"}, {42, "MPEG Audio xHE-AAC"}, }; GF_EXPORT const char *gf_m4a_object_type_name(u32 objectType) { u32 i, count = GF_ARRAY_LENGTH(M4AObjectTypes); for (i=0; i<count; i++) { if (objectType==M4AObjectTypes[i].type) return M4AObjectTypes[i].name; } return "MPEG-4 Audio Unknown"; } struct __m4a_profile { u32 value; const char *name; } M4AProfiles[] = { {0x00, "ISO Reserved (0x00)"}, {0x01, "Main Audio Profile @ Level 1"}, {0x02, "Main Audio Profile @ Level 2"}, {0x03, "Main Audio Profile @ Level 3"}, {0x04, "Main Audio Profile @ Level 4"}, {0x05, "Scalable Audio Profile @ Level 1"}, {0x06, "Scalable Audio Profile @ Level 2"}, {0x07, "Scalable Audio Profile @ Level 3"}, {0x08, "Scalable Audio Profile @ Level 4"}, {0x09, "Speech Audio Profile @ Level 1"}, {0x0A, "Speech Audio Profile @ Level 2"}, {0x0B, "Synthetic Audio Profile @ Level 1"}, {0x0C, "Synthetic Audio Profile @ Level 2"}, {0x0D, "Synthetic Audio Profile @ Level 3"}, {0x0E, "High Quality Audio Profile @ Level 1"}, {0x0F, "High Quality Audio Profile @ Level 2"}, {0x10, "High Quality Audio Profile @ Level 3"}, {0x11, "High Quality Audio Profile @ Level 4"}, {0x12, "High Quality Audio Profile @ Level 5"}, {0x13, "High Quality Audio Profile @ Level 6"}, {0x14, "High Quality Audio Profile @ Level 7"}, {0x15, "High Quality Audio Profile @ Level 8"}, {0x16, "Low Delay Audio Profile @ Level 1"}, {0x17, "Low Delay Audio Profile @ Level 2"}, {0x18, "Low Delay Audio Profile @ Level 3"}, {0x19, "Low Delay Audio Profile @ Level 4"}, {0x1A, "Low Delay Audio Profile @ Level 5"}, {0x1B, "Low Delay Audio Profile @ Level 6"}, {0x1C, "Low Delay Audio Profile @ Level 7"}, {0x1D, "Low Delay Audio Profile @ Level 8"}, {0x1E, "Natural Audio Profile @ Level 1"}, {0x1F, "Natural Audio Profile @ Level 2"}, {0x20, "Natural Audio Profile @ Level 3"}, {0x21, "Natural Audio Profile @ Level 4"}, {0x22, "Mobile Audio Internetworking Profile @ Level 1"}, {0x23, "Mobile Audio Internetworking Profile @ Level 2"}, {0x24, "Mobile Audio Internetworking Profile @ Level 3"}, {0x25, "Mobile Audio Internetworking Profile @ Level 4"}, {0x26, "Mobile Audio Internetworking Profile @ Level 5"}, {0x27, "Mobile Audio Internetworking Profile @ Level 6"}, {0x28, "AAC Profile @ Level 1"}, {0x29, "AAC Profile @ Level 2"}, {0x2A, "AAC Profile @ Level 4"}, {0x2B, "AAC Profile @ Level 5"}, {0x2C, "High Efficiency AAC Profile @ Level 2"}, {0x2D, "High Efficiency AAC Profile @ Level 3"}, {0x2E, "High Efficiency AAC Profile @ Level 4"}, {0x2F, "High Efficiency AAC Profile @ Level 5"}, {0x30, "High Efficiency AAC v2 Profile @ Level 2"}, {0x31, "High Efficiency AAC v2 Profile @ Level 3"}, {0x32, "High Efficiency AAC v2 Profile @ Level 4"}, {0x33, "High Efficiency AAC v2 Profile @ Level 5"}, {0x34, "Low Delay AAC Profile"}, {0x35, "Baseline MPEG Surround Profile @ Level 1"}, {0x36, "Baseline MPEG Surround Profile @ Level 2"}, {0x37, "Baseline MPEG Surround Profile @ Level 3"}, {0x38, "Baseline MPEG Surround Profile @ Level 4"}, {0x39, "Baseline MPEG Surround Profile @ Level 5"}, {0x3A, "Baseline MPEG Surround Profile @ Level 6"}, {0x3B, "High Definition AAC Profile @ Level 1"}, {0x3C, "ALS Simple Profile @ Level 1"}, {0x50, "AAC Profile @ Level 6"}, {0x51, "AAC Profile @ Level 7"}, {0x52, "High Efficiency AAC Profile @ Level 6"}, {0x53, "High Efficiency AAC Profile @ Level 7"}, {0x54, "High Efficiency AAC v2 Profile @ Level 6"}, {0x55, "High Efficiency AAC v2 Profile @ Level 7"}, {0x56, "Extended High Efficiency AAC Profile @ Level 6"}, {0x57, "Extended High Efficiency AAC Profile @ Level 7"}, {0xFE, "Not part of MPEG-4 audio profiles"}, {0xFF, "No audio capability required"} }; GF_EXPORT const char *gf_m4a_get_profile_name(u8 audio_pl) { u32 i, count = GF_ARRAY_LENGTH(M4AProfiles); for (i=0; i<count; i++) { if ((u32) audio_pl==M4AProfiles[i].value) return M4AProfiles[i].name; } return "ISO Reserved / User Private"; } #ifndef GPAC_DISABLE_AV_PARSERS GF_EXPORT u32 gf_m4a_get_profile(GF_M4ADecSpecInfo *cfg) { switch (cfg->base_object_type) { case 2: /*AAC LC*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x28 : 0x29; /*LC@L1 or LC@L2*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x2A : 0x2B; /*LC@L4 or LC@L5*/ return (cfg->base_sr <= 48000) ? 0x50 : 0x51; /*LC@L4 or LC@L5*/ case 5: /*HE-AAC - SBR*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x2C : 0x2D; /*HE@L2 or HE@L3*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x2E : 0x2F; /*HE@L4 or HE@L5*/ return (cfg->base_sr <= 48000) ? 0x52 : 0x53; /*HE@L6 or HE@L7*/ case 29: /*HE-AACv2 - SBR+PS*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x30 : 0x31; /*HE-AACv2@L2 or HE-AACv2@L3*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x32 : 0x33; /*HE-AACv2@L4 or HE-AACv2@L5*/ return (cfg->base_sr <= 48000) ? 0x54 : 0x55; /*HE-AACv2@L6 or HE-AACv2@L7*/ /*default to HQ*/ default: if (cfg->nb_chan <= 2) return (cfg->base_sr < 24000) ? 0x0E : 0x0F; /*HQ@L1 or HQ@L2*/ return 0x10; /*HQ@L3*/ } } GF_EXPORT GF_Err gf_m4a_parse_program_config_element(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { u32 i; cfg->program_config_element_present = 1; cfg->cpe_channels = 0; cfg->element_instance_tag = gf_bs_read_int_log(bs, 4, "element_instance_tag"); cfg->object_type = gf_bs_read_int_log(bs, 2, "object_type"); cfg->sampling_frequency_index = gf_bs_read_int_log(bs, 4, "sampling_frequency_index"); cfg->num_front_channel_elements = gf_bs_read_int_log(bs, 4, "num_front_channel_elements"); cfg->num_side_channel_elements = gf_bs_read_int_log(bs, 4, "num_side_channel_elements"); cfg->num_back_channel_elements = gf_bs_read_int_log(bs, 4, "num_back_channel_elements"); cfg->num_lfe_channel_elements = gf_bs_read_int_log(bs, 2, "num_lfe_channel_elements"); cfg->num_assoc_data_elements = gf_bs_read_int_log(bs, 3, "num_assoc_data_elements"); cfg->num_valid_cc_elements = gf_bs_read_int_log(bs, 4, "num_valid_cc_elements"); cfg->mono_mixdown_present = (Bool)gf_bs_read_int_log(bs, 1, "mono_mixdown_present"); if (cfg->mono_mixdown_present) { cfg->mono_mixdown_element_number = gf_bs_read_int_log(bs, 4, "mono_mixdown_element_number"); } cfg->stereo_mixdown_present = gf_bs_read_int_log(bs, 1, "stereo_mixdown_present"); if (cfg->stereo_mixdown_present) { cfg->stereo_mixdown_element_number = gf_bs_read_int_log(bs, 4, "stereo_mixdown_element_number"); } cfg->matrix_mixdown_idx_present = gf_bs_read_int_log(bs, 1, "matrix_mixdown_idx_present"); if (cfg->matrix_mixdown_idx_present) { cfg->matrix_mixdown_idx = gf_bs_read_int_log(bs, 2, "matrix_mixdown_idx"); cfg->pseudo_surround_enable = gf_bs_read_int_log(bs, 1, "pseudo_surround_enable"); } for (i = 0; i < cfg->num_front_channel_elements; i++) { cfg->front_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "front_element_is_cpe", i); cfg->front_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "front_element_tag_select", i); if (cfg->front_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_side_channel_elements; i++) { cfg->side_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "side_element_is_cpe", i); cfg->side_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "side_element_tag_select", i); if (cfg->side_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_back_channel_elements; i++) { cfg->back_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "back_element_is_cpe", i); cfg->back_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "back_element_tag_select", i); if (cfg->back_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_lfe_channel_elements; i++) { cfg->lfe_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "lfe_element_tag_select", i); } for (i = 0; i < cfg->num_assoc_data_elements; i++) { cfg->assoc_data_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "assoc_data_element_tag_select", i); } for (i = 0; i < cfg->num_valid_cc_elements; i++) { cfg->cc_element_is_ind_sw[i] = gf_bs_read_int_log_idx(bs, 1, "cc_element_is_ind_sw", i); cfg->valid_cc_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "valid_cc_element_tag_select", i); } gf_bs_align(bs); cfg->comment_field_bytes = gf_bs_read_int_log(bs, 8, "comment_field_bytes"); gf_bs_read_data(bs, (char *)cfg->comments, cfg->comment_field_bytes); cfg->nb_chan = cfg->num_front_channel_elements + cfg->num_back_channel_elements + cfg->num_side_channel_elements + cfg->num_lfe_channel_elements; cfg->nb_chan += cfg->cpe_channels; return GF_OK; } GF_EXPORT GF_Err gf_m4a_parse_config(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg, Bool size_known) { u32 audio_obj_type; memset(cfg, 0, sizeof(GF_M4ADecSpecInfo)); cfg->base_object_type = gf_bs_read_int_log(bs, 5, "base_object_type"); /*extended object type*/ if (cfg->base_object_type == 31) { cfg->base_object_type = 32 + gf_bs_read_int_log(bs, 6, "extended_base_object_type"); } cfg->base_sr_index = gf_bs_read_int_log(bs, 4, "base_samplerate_index"); if (cfg->base_sr_index == 0x0F) { cfg->base_sr = gf_bs_read_int_log(bs, 24, "base_samplerate"); } else { cfg->base_sr = GF_M4ASampleRates[cfg->base_sr_index]; } cfg->chan_cfg = gf_bs_read_int_log(bs, 4, "channel_configuration"); if (cfg->chan_cfg) { cfg->nb_chan = GF_M4ANumChannels[cfg->chan_cfg - 1]; } audio_obj_type = cfg->base_object_type; if (cfg->base_object_type == 5 || cfg->base_object_type == 29) { if (cfg->base_object_type == 29) { cfg->has_ps = 1; cfg->nb_chan = 1; } cfg->has_sbr = GF_TRUE; cfg->sbr_sr_index = gf_bs_read_int_log(bs, 4, "sbr_samplerate_index"); if (cfg->sbr_sr_index == 0x0F) { cfg->sbr_sr = gf_bs_read_int_log(bs, 24, "sbr_samplerate"); } else { cfg->sbr_sr = GF_M4ASampleRates[cfg->sbr_sr_index]; } cfg->sbr_object_type = gf_bs_read_int_log(bs, 5, "sbr_object_type"); if (cfg->sbr_object_type==31) cfg->sbr_object_type = 32 + gf_bs_read_int_log(bs, 6, "audioObjectTypeExt"); audio_obj_type = cfg->sbr_object_type; if (cfg->sbr_object_type==22) { /*ext_chan_cfg = */gf_bs_read_int_log(bs, 4, "channel_configuration"); } } /*object cfg*/ switch (audio_obj_type) { case 1: case 2: case 3: case 4: case 6: case 7: case 17: case 19: case 20: case 21: case 22: case 23: case 42: { Bool ext_flag; gf_bs_read_int_log(bs, 1, "frame_length_flag"); if (gf_bs_read_int_log(bs, 1, "depends_on_core_coder")) gf_bs_read_int_log(bs, 14, "delay"); ext_flag = gf_bs_read_int_log(bs, 1, "extension_flag"); if (!cfg->chan_cfg) { gf_m4a_parse_program_config_element(bs, cfg); } if ((cfg->base_object_type == 6) || (cfg->base_object_type == 20)) { gf_bs_read_int_log(bs, 3, "layerN"); } if (ext_flag) { if (cfg->base_object_type == 22) { gf_bs_read_int_log(bs, 5, "numOfSubFrame"); gf_bs_read_int_log(bs, 11, "layer_length"); } if ((cfg->base_object_type == 17) || (cfg->base_object_type == 19) || (cfg->base_object_type == 20) || (cfg->base_object_type == 23) ) { gf_bs_read_int_log(bs, 1, "aacSectionDataResilienceFlag"); gf_bs_read_int_log(bs, 1, "aacScalefactorDataResilienceFlag"); gf_bs_read_int_log(bs, 1, "aacSpectralDataResilienceFlag"); } gf_bs_read_int_log(bs, 1, "extensionFlag3"); } } break; } /*ER cfg*/ switch (audio_obj_type) { case 17: case 19: case 20: case 21: case 22: case 23: case 24: case 25: case 26: case 27: { u32 epConfig = gf_bs_read_int_log(bs, 2, "epConfig"); if ((epConfig == 2) || (epConfig == 3)) { } if (epConfig == 3) { gf_bs_read_int_log(bs, 1, "directMapping"); } } break; } if (size_known && (cfg->base_object_type != 5) && (cfg->base_object_type != 29)) { while (gf_bs_available(bs) >= 2) { u32 sync = gf_bs_peek_bits(bs, 11, 0); if (sync == 0x2b7) { gf_bs_read_int_log(bs, 11, "syncExtensionType"); cfg->sbr_object_type = gf_bs_read_int_log(bs, 5, "extensionAudioObjectType "); cfg->has_sbr = gf_bs_read_int_log(bs, 1, "sbrPresentFlag"); if (cfg->has_sbr) { cfg->sbr_sr_index = gf_bs_read_int_log(bs, 4, "extensionSamplingFrequencyIndex"); if (cfg->sbr_sr_index == 0x0F) { cfg->sbr_sr = gf_bs_read_int_log(bs, 24, "extensionSamplingFrequency"); } else { cfg->sbr_sr = GF_M4ASampleRates[cfg->sbr_sr_index]; } } } else if (sync == 0x548) { gf_bs_read_int_log(bs, 11, "syncExtensionType"); cfg->has_ps = gf_bs_read_int_log(bs, 1, "hasParametricStereo"); if (cfg->has_ps) cfg->nb_chan = 1; } else { break; } } } cfg->audioPL = gf_m4a_get_profile(cfg); return GF_OK; } GF_EXPORT GF_Err gf_m4a_get_config(u8 *dsi, u32 dsi_size, GF_M4ADecSpecInfo *cfg) { GF_BitStream *bs; if (!dsi || !dsi_size || (dsi_size < 2)) return GF_NON_COMPLIANT_BITSTREAM; bs = gf_bs_new(dsi, dsi_size, GF_BITSTREAM_READ); gf_m4a_parse_config(bs, cfg, GF_TRUE); gf_bs_del(bs); return GF_OK; } u32 gf_latm_get_value(GF_BitStream *bs) { u32 i, tmp, value = 0; u32 bytesForValue = gf_bs_read_int(bs, 2); for (i = 0; i <= bytesForValue; i++) { value <<= 8; tmp = gf_bs_read_int(bs, 8); value += tmp; } return value; } GF_EXPORT u32 gf_m4a_get_channel_cfg(u32 nb_chan) { u32 i, count = sizeof(GF_M4ANumChannels) / sizeof(u32); for (i = 0; i < count; i++) { if (GF_M4ANumChannels[i] == nb_chan) return i + 1; } return 0; } GF_EXPORT GF_Err gf_m4a_write_program_config_element_bs(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { u32 i; gf_bs_write_int(bs, cfg->element_instance_tag, 4); gf_bs_write_int(bs, cfg->object_type, 2); gf_bs_write_int(bs, cfg->sampling_frequency_index, 4); gf_bs_write_int(bs, cfg->num_front_channel_elements, 4); gf_bs_write_int(bs, cfg->num_side_channel_elements, 4); gf_bs_write_int(bs, cfg->num_back_channel_elements, 4); gf_bs_write_int(bs, cfg->num_lfe_channel_elements, 2); gf_bs_write_int(bs, cfg->num_assoc_data_elements, 3); gf_bs_write_int(bs, cfg->num_valid_cc_elements, 4); gf_bs_write_int(bs, cfg->mono_mixdown_present, 1); if (cfg->mono_mixdown_present) { gf_bs_write_int(bs, cfg->mono_mixdown_element_number, 4); } gf_bs_write_int(bs, cfg->stereo_mixdown_present, 1); if (cfg->stereo_mixdown_present) { gf_bs_write_int(bs, cfg->stereo_mixdown_element_number, 4); } gf_bs_write_int(bs, cfg->matrix_mixdown_idx_present, 1); if (cfg->matrix_mixdown_idx_present) { gf_bs_write_int(bs, cfg->matrix_mixdown_idx, 2); gf_bs_write_int(bs, cfg->pseudo_surround_enable, 1); } for (i = 0; i < cfg->num_front_channel_elements; i++) { gf_bs_write_int(bs, cfg->front_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->front_element_tag_select[i], 4); } for (i = 0; i < cfg->num_side_channel_elements; i++) { gf_bs_write_int(bs, cfg->side_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->side_element_tag_select[i], 4); } for (i = 0; i < cfg->num_back_channel_elements; i++) { gf_bs_write_int(bs, cfg->back_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->back_element_tag_select[i], 4); } for (i = 0; i < cfg->num_lfe_channel_elements; i++) { gf_bs_write_int(bs, cfg->lfe_element_tag_select[i], 4); } for (i = 0; i < cfg->num_assoc_data_elements; i++) { gf_bs_write_int(bs, cfg->assoc_data_element_tag_select[i], 4); } for (i = 0; i < cfg->num_valid_cc_elements; i++) { gf_bs_write_int(bs, cfg->cc_element_is_ind_sw[i], 1); gf_bs_write_int(bs, cfg->valid_cc_element_tag_select[i], 4); } gf_bs_align(bs); gf_bs_write_int(bs, cfg->comment_field_bytes, 8); gf_bs_write_data(bs, (char *)cfg->comments, cfg->comment_field_bytes); return GF_OK; } GF_EXPORT GF_Err gf_m4a_write_config_bs(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { if (!cfg->base_sr_index) { if (!cfg->base_sr) return GF_BAD_PARAM; while (GF_M4ASampleRates[cfg->base_sr_index]) { if (GF_M4ASampleRates[cfg->base_sr_index] == cfg->base_sr) break; cfg->base_sr_index++; } } if (cfg->sbr_sr && !cfg->sbr_sr_index) { while (GF_M4ASampleRates[cfg->sbr_sr_index]) { if (GF_M4ASampleRates[cfg->sbr_sr_index] == cfg->sbr_sr) break; cfg->sbr_sr_index++; } } /*extended object type*/ if (cfg->base_object_type >= 32) { gf_bs_write_int(bs, 31, 5); gf_bs_write_int(bs, cfg->base_object_type - 32, 6); } else { gf_bs_write_int(bs, cfg->base_object_type, 5); } gf_bs_write_int(bs, cfg->base_sr_index, 4); if (cfg->base_sr_index == 0x0F) { gf_bs_write_int(bs, cfg->base_sr, 24); } if (cfg->program_config_element_present) { gf_bs_write_int(bs, 0, 4); } else { cfg->chan_cfg = gf_m4a_get_channel_cfg(cfg->nb_chan); if (!cfg->chan_cfg) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AAC] Cannot write decoder config, ProgramConfigElement is missing and channel configuration is not a predefined one !\n")); return GF_BAD_PARAM; } gf_bs_write_int(bs, cfg->chan_cfg, 4); } if (cfg->base_object_type == 5 || cfg->base_object_type == 29) { if (cfg->base_object_type == 29) { cfg->has_ps = 1; cfg->nb_chan = 1; } cfg->has_sbr = 1; gf_bs_write_int(bs, cfg->sbr_sr_index, 4); if (cfg->sbr_sr_index == 0x0F) { gf_bs_write_int(bs, cfg->sbr_sr, 24); } gf_bs_write_int(bs, cfg->sbr_object_type, 5); } /*object cfg*/ switch (cfg->base_object_type) { case 1: case 2: case 3: case 4: case 6: case 7: case 17: case 19: case 20: case 21: case 22: case 23: case 42: { /*frame length flag*/ gf_bs_write_int(bs, 0, 1); /*depends on core coder*/ gf_bs_write_int(bs, 0, 1); /*ext flag*/ gf_bs_write_int(bs, 0, 1); if (cfg->program_config_element_present) { gf_m4a_write_program_config_element_bs(bs, cfg); } if ((cfg->base_object_type == 6) || (cfg->base_object_type == 20)) { gf_bs_write_int(bs, 0, 3); } } break; } /*ER cfg - not supported*/ /*implicit sbr/ps signaling not written here, cf reframe_adts*/ return GF_OK; } GF_EXPORT GF_Err gf_m4a_write_config(GF_M4ADecSpecInfo *cfg, u8 **dsi, u32 *dsi_size) { GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_m4a_write_config_bs(bs, cfg); gf_bs_get_content(bs, dsi, dsi_size); gf_bs_del(bs); return GF_OK; } /*AV1 parsing*/ static u32 av1_read_ns(GF_BitStream *bs, u32 n, const char *fname) { u32 v, res; Bool extra_bit; int w = (u32)(log(n) / log(2)) + 1; u32 m = (1 << w) - n; assert(w < 32); v = gf_bs_read_int(bs, w - 1); if (v < m) { if (fname) { gf_bs_log(bs, w-1, fname, v); } return v; } extra_bit = gf_bs_read_int(bs, 1); res = (v << 1) - m + extra_bit; if (fname) { gf_bs_log(bs, w, fname, res); } return res; } static void av1_color_config(GF_BitStream *bs, AV1State *state) { state->config->high_bitdepth = gf_bs_read_int_log(bs, 1, "high_bitdepth"); state->bit_depth = 8; if (state->config->seq_profile == 2 && state->config->high_bitdepth) { state->config->twelve_bit = gf_bs_read_int_log(bs, 1, "twelve_bit"); state->bit_depth = state->config->twelve_bit ? 12 : 10; } else if (state->config->seq_profile <= 2) { state->bit_depth = state->config->high_bitdepth ? 10 : 8; } state->config->monochrome = GF_FALSE; if (state->config->seq_profile == 1) { state->config->monochrome = GF_FALSE; } else { state->config->monochrome = gf_bs_read_int_log(bs, 1, "monochrome"); } /*NumPlanes = mono_chrome ? 1 : 3;*/ state->color_description_present_flag = gf_bs_read_int_log(bs, 1, "color_description_present_flag"); if (state->color_description_present_flag) { state->color_primaries = gf_bs_read_int_log(bs, 8, "color_primaries"); state->transfer_characteristics = gf_bs_read_int_log(bs, 8, "transfer_characteristics"); state->matrix_coefficients = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } else { state->color_primaries = 2/*CP_UNSPECIFIED*/; state->transfer_characteristics = 2/*TC_UNSPECIFIED*/; state->matrix_coefficients = 2/*MC_UNSPECIFIED*/; } if (state->config->monochrome) { state->color_range = gf_bs_read_int_log(bs, 1, "color_range"); state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_TRUE; state->config->chroma_sample_position = 0/*CSP_UNKNOWN*/; state->separate_uv_delta_q = 0; return; } else if (state->color_primaries == 0/*CP_BT_709*/ && state->transfer_characteristics == 13/*TC_SRGB*/ && state->matrix_coefficients == 0/*MC_IDENTITY*/) { state->color_range = GF_TRUE; state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; } else { state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; state->color_range = gf_bs_read_int_log(bs, 1, "color_range"); if (state->config->seq_profile == 0) { state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_TRUE; } else if (state->config->seq_profile == 1) { state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; } else { if (state->bit_depth == 12) { state->config->chroma_subsampling_x = gf_bs_read_int_log(bs, 1, "chroma_subsampling_x"); if (state->config->chroma_subsampling_x) state->config->chroma_subsampling_y = gf_bs_read_int_log(bs, 1, "chroma_subsampling_y"); else state->config->chroma_subsampling_y = GF_FALSE; } else { state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_FALSE; } } if (state->config->chroma_subsampling_x && state->config->chroma_subsampling_y) { state->config->chroma_sample_position = gf_bs_read_int_log(bs, 2, "chroma_sample_position"); } } state->separate_uv_delta_q = gf_bs_read_int_log(bs, 1, "separate_uv_delta_q"); } static u32 av1_uvlc(GF_BitStream *bs, const char *fname) { u32 res; u8 leadingZeros = 0; while (1) { Bool done = gf_bs_read_int(bs, 1); if (done) break; leadingZeros++; } if (leadingZeros >= 32) { return 0xFFFFFFFF; } res = gf_bs_read_int(bs, leadingZeros) + (1 << leadingZeros) - 1; gf_bs_log(bs, 2*leadingZeros, fname, res); return res; } static void timing_info(GF_BitStream *bs, AV1State *state) { u32 time_scale = 0; u32 num_units_in_display_tick = gf_bs_read_int_log(bs, 32, "num_units_in_display_tick"); if (num_units_in_display_tick == 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] num_units_in_display_tick must be greater than 0.\n")); } time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); if (time_scale == 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] time_scale must be greater than 0.\n")); } state->equal_picture_interval = gf_bs_read_int_log(bs, 1, "equal_picture_interval"); if (state->equal_picture_interval) { u32 num_ticks_per_picture_minus_1 = av1_uvlc(bs, "num_ticks_per_picture_minus_1"); state->tb_num = time_scale; state->tb_den = (num_ticks_per_picture_minus_1 + 1)*num_units_in_display_tick; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] VFR not supported.\n")); //TODO: upload num_units_in_display_tick (eq. to the POC in H264), compute delta between frames, set it as dts_inc in gf_import_aom_av1() } } static void decoder_model_info(AV1State *state, GF_BitStream *bs) { state->buffer_delay_length = 1 + gf_bs_read_int_log(bs, 5, "buffer_delay_length_minus1"); gf_bs_read_int_log(bs, 32, "num_units_in_decoding_tick"); state->buffer_removal_time_length = gf_bs_read_int_log(bs, 5, "buffer_removal_time_length"); state->frame_presentation_time_length = 1 + gf_bs_read_int_log(bs, 5, "frame_presentation_time_length_minus1"); } static void operating_parameters_info(GF_BitStream *bs, const u8 idx, const u8 buffer_delay_length_minus_1) { const u8 n = buffer_delay_length_minus_1 + 1; gf_bs_read_int_log(bs, n, "decoder_buffer_delay"); gf_bs_read_int_log(bs, n, "encoder_buffer_delay"); gf_bs_read_int_log(bs, 1, "low_delay_mode_flag"); } static void av1_parse_sequence_header_obu(GF_BitStream *bs, AV1State *state) { u8 buffer_delay_length_minus_1 = 0; state->frame_state.seen_seq_header = GF_TRUE; state->config->seq_profile = gf_bs_read_int_log(bs, 3, "seq_profile"); state->still_picture = gf_bs_read_int_log(bs, 1, "still_picture"); state->reduced_still_picture_header = gf_bs_read_int_log(bs, 1, "reduced_still_picture_header"); if (state->reduced_still_picture_header) { //timing_info_present_flag = GF_FALSE; //initial_display_delay_present_flag = GF_FALSE; state->operating_points_count = 1; state->config->seq_level_idx_0 = gf_bs_read_int_log(bs, 5, "seq_level_idx_0"); } else { u8 i = 0; Bool initial_display_delay_present_flag; Bool timing_info_present_flag = gf_bs_read_int_log(bs, 1, "timing_info_present_flag"); if (timing_info_present_flag) { timing_info(bs, state); state->decoder_model_info_present_flag = gf_bs_read_int_log(bs, 1, "decoder_model_info_present_flag"); if (state->decoder_model_info_present_flag) { decoder_model_info(state, bs); } } else { state->decoder_model_info_present_flag = GF_FALSE; } initial_display_delay_present_flag = gf_bs_read_int_log(bs, 1, "initial_display_delay_present_flag"); state->operating_points_count = 1 + gf_bs_read_int_log(bs, 5, "operating_points_count_minus1"); for (i = 0; i < state->operating_points_count; i++) { u8 seq_level_idx_i, seq_tier = 0; state->operating_point_idc[i] = gf_bs_read_int_log_idx(bs, 12, "operating_point_idc", i); seq_level_idx_i = gf_bs_read_int_log_idx(bs, 5, "seq_level_idx", i); if (i == 0) state->config->seq_level_idx_0 = seq_level_idx_i; if (seq_level_idx_i > 7) { seq_tier = gf_bs_read_int_log_idx(bs, 1, "seq_tier", i); } if (i == 0) state->config->seq_tier_0 = seq_tier; if (state->decoder_model_info_present_flag) { state->decoder_model_present_for_this_op[i] = gf_bs_read_int_log_idx(bs, 1, "decoder_model_present_for_this_op", i); if (state->decoder_model_present_for_this_op[i]) { operating_parameters_info(bs, i, buffer_delay_length_minus_1); } } else { state->decoder_model_present_for_this_op[i] = 0; } if (initial_display_delay_present_flag) { if (gf_bs_read_int_log_idx(bs, 1, "initial_display_delay_present_for_this_op", i) ) { gf_bs_read_int_log_idx(bs, 4, "initial_display_delay_minus1", i); } } } } //operatingPoint = av1_choose_operating_point(bs); state->OperatingPointIdc = 0;//TODO: operating_point_idc[operatingPoint]; state->frame_width_bits_minus_1 = gf_bs_read_int_log(bs, 4, "frame_width_bits_minus1"); state->frame_height_bits_minus_1 = gf_bs_read_int_log(bs, 4, "frame_height_bits_minus1"); state->width = gf_bs_read_int_log(bs, state->frame_width_bits_minus_1 + 1, "width_minus1") + 1; state->height = gf_bs_read_int_log(bs, state->frame_height_bits_minus_1 + 1, "height_minus1") + 1; state->sequence_width = state->width; state->sequence_height = state->height; state->frame_id_numbers_present_flag = GF_FALSE; if (!state->reduced_still_picture_header) { state->frame_id_numbers_present_flag = gf_bs_read_int_log(bs, 1, "frame_id_numbers_present_flag"); } if (state->frame_id_numbers_present_flag) { state->delta_frame_id_length_minus_2 = gf_bs_read_int_log(bs, 4, "delta_frame_id_length_minus2"); state->additional_frame_id_length_minus_1 = gf_bs_read_int_log(bs, 3, "additional_frame_id_length_minus1"); } state->use_128x128_superblock = gf_bs_read_int_log(bs, 1, "use_128x128_superblock"); gf_bs_read_int_log(bs, 1, "enable_filter_intra"); gf_bs_read_int_log(bs, 1, "enable_intra_edge_filter"); if (state->reduced_still_picture_header) { /*enable_interintra_compound = 0; enable_masked_compound = 0; enable_dual_filter = 0; enable_jnt_comp = 0; enable_ref_frame_mvs = 0;*/ state->enable_warped_motion = 0; state->enable_order_hint = GF_FALSE; state->OrderHintBits = 0; state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; state->seq_force_screen_content_tools = 2/*SELECT_SCREEN_CONTENT_TOOLS*/; } else { Bool seq_choose_screen_content_tools; gf_bs_read_int_log(bs, 1, "enable_interintra_compound"); gf_bs_read_int_log(bs, 1, "enable_masked_compound"); state->enable_warped_motion = gf_bs_read_int_log(bs, 1, "enable_warped_motion"); gf_bs_read_int_log(bs, 1, "enable_dual_filter"); state->enable_order_hint = gf_bs_read_int_log(bs, 1, "enable_order_hint"); if (state->enable_order_hint) { gf_bs_read_int_log(bs, 1, "enable_jnt_comp"); state->enable_ref_frame_mvs = gf_bs_read_int_log(bs, 1, "enable_ref_frame_mvs"); } else { /*enable_jnt_comp = 0*/; /*enable_ref_frame_mvs = 0*/; } seq_choose_screen_content_tools = gf_bs_read_int_log(bs, 1, "seq_choose_screen_content_tools"); state->seq_force_screen_content_tools = 0; if (seq_choose_screen_content_tools) { state->seq_force_screen_content_tools = 2/*SELECT_SCREEN_CONTENT_TOOLS*/; } else { state->seq_force_screen_content_tools = gf_bs_read_int_log(bs, 1, "seq_force_screen_content_tools"); } state->seq_force_integer_mv = 0; if (state->seq_force_screen_content_tools > 0) { const Bool seq_choose_integer_mv = gf_bs_read_int_log(bs, 1, "seq_choose_integer_mv"); if (seq_choose_integer_mv) { state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; } else { state->seq_force_integer_mv = gf_bs_read_int_log(bs, 1, "seq_force_integer_mv"); } } else { state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; } if (state->enable_order_hint) { u8 order_hint_bits_minus_1 = gf_bs_read_int_log(bs, 3, "order_hint_bits_minus1"); state->OrderHintBits = order_hint_bits_minus_1 + 1; } else { state->OrderHintBits = 0; } } state->enable_superres = gf_bs_read_int_log(bs, 1, "enable_superres"); state->enable_cdef = gf_bs_read_int_log(bs, 1, "enable_cdef"); state->enable_restoration = gf_bs_read_int_log(bs, 1, "enable_restoration"); av1_color_config(bs, state); state->film_grain_params_present = gf_bs_read_int_log(bs, 1, "film_grain_params_present"); } #define IVF_FILE_HEADER_SIZE 32 Bool gf_media_probe_ivf(GF_BitStream *bs) { u32 dw = 0; if (gf_bs_available(bs) < IVF_FILE_HEADER_SIZE) return GF_FALSE; dw = gf_bs_peek_bits(bs, 32, 0); if (dw != GF_4CC('D', 'K', 'I', 'F')) { return GF_FALSE; } return GF_TRUE; } GF_Err gf_media_parse_ivf_file_header(GF_BitStream *bs, u32 *width, u32 *height, u32 *codec_fourcc, u32 *timebase_num, u32 *timebase_den, u32 *num_frames) { u32 dw = 0; if (!width || !height || !codec_fourcc || !timebase_den || !timebase_num || !num_frames) { assert(0); return GF_BAD_PARAM; } if (gf_bs_available(bs) < IVF_FILE_HEADER_SIZE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Not enough bytes available ("LLU").\n", gf_bs_available(bs))); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u32(bs); if (dw != GF_4CC('D', 'K', 'I', 'F')) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[IVF] Invalid signature\n")); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u16_le(bs); if (dw != 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong IVF version. 0 expected, got %u\n", dw)); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u16_le(bs); //length of header in bytes if (dw != IVF_FILE_HEADER_SIZE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong IVF header length. Expected 32 bytes, got %u\n", dw)); return GF_NON_COMPLIANT_BITSTREAM; } *codec_fourcc = gf_bs_read_u32(bs); *width = gf_bs_read_u16_le(bs); *height = gf_bs_read_u16_le(bs); *timebase_num = gf_bs_read_u32_le(bs); *timebase_den = gf_bs_read_u32_le(bs); *num_frames = gf_bs_read_u32_le(bs); gf_bs_read_u32_le(bs); //skip unused return GF_OK; } GF_Err gf_media_parse_ivf_frame_header(GF_BitStream *bs, u64 *frame_size, u64 *pts) { if (!frame_size) return GF_BAD_PARAM; if (gf_bs_available(bs) < 12) return GF_BUFFER_TOO_SMALL; *frame_size = gf_bs_read_u32_le(bs); if (*frame_size > 256 * 1024 * 1024) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong frame size %u\n", *frame_size)); *frame_size = 0; return GF_NON_COMPLIANT_BITSTREAM; } *pts = gf_bs_read_u64_le(bs); return GF_OK; } GF_Err gf_media_vp9_parse_superframe(GF_BitStream *bs, u64 ivf_frame_size, u32 *num_frames_in_superframe, u32 frame_sizes[VP9_MAX_FRAMES_IN_SUPERFRAME], u32 *superframe_index_size) { u32 byte, bytes_per_framesize; u64 pos = gf_bs_get_position(bs), i = 0; GF_Err e; assert(bs && num_frames_in_superframe); /*initialize like there is no superframe*/ memset(frame_sizes, 0, VP9_MAX_FRAMES_IN_SUPERFRAME * sizeof(frame_sizes[0])); *num_frames_in_superframe = 1; frame_sizes[0] = (u32)ivf_frame_size; *superframe_index_size = 0; e = gf_bs_seek(bs, pos + ivf_frame_size - 1); if (e) return e; byte = gf_bs_read_u8(bs); if ((byte & 0xe0) != 0xc0) goto exit; /*no superframe*/ bytes_per_framesize = 1 + ((byte & 0x18) >> 3); *num_frames_in_superframe = (u32)(1 + (byte & 0x7)); /*superframe_index()*/ *superframe_index_size = 2 + bytes_per_framesize * *num_frames_in_superframe; gf_bs_seek(bs, pos + ivf_frame_size - *superframe_index_size); byte = gf_bs_read_u8(bs); if ((byte & 0xe0) != 0xc0) goto exit; /*no superframe*/ frame_sizes[0] = 0; for (i = 0; i < *num_frames_in_superframe; ++i) { gf_bs_read_data(bs, (char*)(frame_sizes + i), bytes_per_framesize); } exit: gf_bs_seek(bs, pos); return e; } static Bool vp9_frame_sync_code(GF_BitStream *bs) { u8 val = gf_bs_read_int_log(bs, 8, "syncbyte1"); if (val != 0x49) return GF_FALSE; val = gf_bs_read_int_log(bs, 8, "syncbyte2"); if (val != 0x83) return GF_FALSE; val = gf_bs_read_int_log(bs, 8, "syncbyte3"); if (val != 0x42) return GF_FALSE; return GF_TRUE; } typedef enum { CS_UNKNOWN = 0, CS_BT_601 = 1, CS_BT_709 = 2, CS_SMPTE_170 = 3, CS_SMPTE_240 = 4, CS_BT_2020 = 5, CS_RESERVED = 6, CS_RGB = 7, } VP9_color_space; static const int VP9_CS_to_23001_8_colour_primaries[] = { -1/*undefined*/, 5, 1, 6, 7, 9, -1/*reserved*/, 1 }; static const int VP9_CS_to_23001_8_transfer_characteristics[] = { -1/*undefined*/, 5, 1, 6, 7, 9, -1/*reserved*/, 13 }; static const int VP9_CS_to_23001_8_matrix_coefficients[] = { -1/*undefined*/, 6, 1, -1, -1, 9, -1/*reserved*/, 0 }; static GF_Err vp9_color_config(GF_BitStream *bs, GF_VPConfig *vp9_cfg) { VP9_color_space color_space; if (vp9_cfg->profile >= 2) { Bool ten_or_twelve_bit = gf_bs_read_int_log(bs, 1, "ten_or_twelve_bit"); vp9_cfg->bit_depth = ten_or_twelve_bit ? 12 : 10; } else { vp9_cfg->bit_depth = 8; } color_space = gf_bs_read_int_log(bs, 3, "color_space"); vp9_cfg->colour_primaries = VP9_CS_to_23001_8_colour_primaries[color_space]; vp9_cfg->transfer_characteristics = VP9_CS_to_23001_8_transfer_characteristics[color_space]; vp9_cfg->matrix_coefficients = VP9_CS_to_23001_8_matrix_coefficients[color_space]; if (color_space != CS_RGB) { vp9_cfg->video_fullRange_flag = gf_bs_read_int_log(bs, 1, "video_fullRange_flag"); if (vp9_cfg->profile == 1 || vp9_cfg->profile == 3) { u8 subsampling_x, subsampling_y, subsampling_xy_to_chroma_subsampling[2][2] = { {3, 0}, {2, 0} }; subsampling_x = gf_bs_read_int_log(bs, 1, "subsampling_x"); subsampling_y = gf_bs_read_int_log(bs, 1, "subsampling_x"); vp9_cfg->chroma_subsampling = subsampling_xy_to_chroma_subsampling[subsampling_x][subsampling_y]; Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] color config reserved zero (1) is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } else { vp9_cfg->chroma_subsampling = 0; } } else { vp9_cfg->video_fullRange_flag = GF_TRUE; if (vp9_cfg->profile == 1 || vp9_cfg->profile == 3) { vp9_cfg->chroma_subsampling = 3; Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] color config reserved zero (2) is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } } return GF_OK; } static void vp9_compute_image_size(int FrameWidth, int FrameHeight, int *Sb64Cols, int *Sb64Rows) { int MiCols = (FrameWidth + 7) >> 3; int MiRows = (FrameHeight + 7) >> 3; *Sb64Cols = (MiCols + 7) >> 3; *Sb64Rows = (MiRows + 7) >> 3; } static void vp9_frame_size(GF_BitStream *bs, int *FrameWidth, int *FrameHeight, int *Sb64Cols, int *Sb64Rows) { int frame_width_minus_1 = gf_bs_read_int_log(bs, 16, "frame_width_minus_1"); int frame_height_minus_1 = gf_bs_read_int_log(bs, 16, "frame_height_minus_1"); if (frame_width_minus_1 + 1 != *FrameWidth || frame_height_minus_1 + 1 != *FrameHeight) { if (*FrameWidth || *FrameHeight) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[VP9] inconsistent frame dimensions: previous was %dx%d, new one is %dx%d.\n", *FrameWidth, *FrameHeight, frame_width_minus_1 + 1, frame_height_minus_1 + 1)); } *FrameWidth = frame_width_minus_1 + 1; *FrameHeight = frame_height_minus_1 + 1; vp9_compute_image_size(*FrameWidth, *FrameHeight, Sb64Cols, Sb64Rows); } static void vp9_render_size(GF_BitStream *bs, int FrameWidth, int FrameHeight, int *renderWidth, int *renderHeight) { Bool render_and_frame_size_different = gf_bs_read_int_log(bs, 1, "render_and_frame_size_different"); if (render_and_frame_size_different == 1) { int render_width_minus_1 = gf_bs_read_int_log(bs, 16, "render_width_minus_1"); int render_height_minus_1 = gf_bs_read_int_log(bs, 16, "render_height_minus_1"); *renderWidth = render_width_minus_1 + 1; *renderHeight = render_height_minus_1 + 1; } else { *renderWidth = FrameWidth; *renderHeight = FrameHeight; } } static s64 vp9_s(GF_BitStream *bs, int n, const char *fname, u32 idx) { s64 value = gf_bs_read_int(bs, n); Bool sign = gf_bs_read_int(bs, 1); if (sign) value = -value; gf_bs_log_idx(bs, n+1, fname, value, idx, -1, -1); return value; } static void vp9_loop_filter_params(GF_BitStream *bs) { /*loop_filter_level = */gf_bs_read_int_log(bs, 6, "loop_filter_level"); /*loop_filter_sharpness = */gf_bs_read_int_log(bs, 3, "loop_filter_sharpness"); Bool loop_filter_delta_enabled = gf_bs_read_int_log(bs, 1, "loop_filter_delta_enabled"); if (loop_filter_delta_enabled == 1) { Bool loop_filter_delta_update = gf_bs_read_int_log(bs, 1, "loop_filter_delta_update"); if (loop_filter_delta_update == GF_TRUE) { int i; for (i = 0; i < 4; i++) { Bool update_ref_delta = gf_bs_read_int_log_idx(bs, 1, "update_ref_delta", i); if (update_ref_delta == GF_TRUE) vp9_s(bs, 6, "loop_filter_ref_deltas", i); } for (i = 0; i < 2; i++) { Bool update_mode_delta = gf_bs_read_int_log_idx(bs, 1, "update_mode_delta", i); if (update_mode_delta == GF_TRUE) vp9_s(bs, 6, "loop_filter_mode_deltas", i); } } } } static void vp9_quantization_params(GF_BitStream *bs) { /*base_q_idx = */gf_bs_read_int_log(bs, 8, "base_q_idx"); } #define VP9_MAX_SEGMENTS 8 #define VP9_SEG_LVL_MAX 4 static const int segmentation_feature_bits[VP9_SEG_LVL_MAX] = { 8, 6, 2, 0 }; static const int segmentation_feature_signed[VP9_SEG_LVL_MAX] = { 1, 1, 0, 0 }; #define VP9_MIN_TILE_WIDTH_B64 4 #define VP9_MAX_TILE_WIDTH_B64 64 static void vp9_segmentation_params(GF_BitStream *bs) { Bool segmentation_enabled = gf_bs_read_int_log(bs, 1, "segmentation_enabled"); if (segmentation_enabled == 1) { int i; Bool segmentation_update_map = gf_bs_read_int_log(bs, 1, "segmentation_update_map"); if (segmentation_update_map) { for (i = 0; i < 7; i++) /*segmentation_tree_probs[i] = read_prob()*/ /*segmentation_temporal_update = */gf_bs_read_int_log(bs, 1, "segmentation_temporal_update"); /*for (i = 0; i < 3; i++) segmentation_pred_prob[i] = segmentation_temporal_update ? read_prob() : 255*/ } Bool segmentation_update_data = gf_bs_read_int_log(bs, 1, "segmentation_update_data"); if (segmentation_update_data == 1) { /*segmentation_abs_or_delta_update =*/ gf_bs_read_int_log(bs, 1, "segmentation_abs_or_delta_update"); for (i = 0; i < VP9_MAX_SEGMENTS; i++) { int j; for (j = 0; j < VP9_SEG_LVL_MAX; j++) { /*feature_value = 0*/ Bool feature_enabled = gf_bs_read_int_log(bs, 1, "feature_enabled"); /*FeatureEnabled[i][j] = feature_enabled*/ if (feature_enabled) { int bits_to_read = segmentation_feature_bits[j]; /*feature_value =*/ gf_bs_read_int_log(bs, bits_to_read, "feature_value"); if (segmentation_feature_signed[j] == 1) { /*Bool feature_sign = */gf_bs_read_int_log(bs, 1, "feature_sign"); /*if (feature_sign == 1) feature_value *= -1*/ } } /*FeatureData[i][j] = feature_value*/ } } } } } static int calc_min_log2_tile_cols(int Sb64Cols) { int minLog2 = 0; while ((VP9_MAX_TILE_WIDTH_B64 << minLog2) < Sb64Cols) minLog2++; return minLog2; } static int calc_max_log2_tile_cols(int Sb64Cols) { int maxLog2 = 1; while ((Sb64Cols >> maxLog2) >= VP9_MIN_TILE_WIDTH_B64) maxLog2++; return maxLog2 - 1; } static void vp9_tile_info(GF_BitStream *bs, int Sb64Cols) { Bool tile_rows_log2; int minLog2TileCols = calc_min_log2_tile_cols(Sb64Cols); int maxLog2TileCols = calc_max_log2_tile_cols(Sb64Cols); int tile_cols_log2 = minLog2TileCols; while (tile_cols_log2 < maxLog2TileCols) { Bool increment_tile_cols_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_cols_log2"); if (increment_tile_cols_log2) tile_cols_log2++; else break; } tile_rows_log2 = gf_bs_read_int_log(bs, 1, "tile_rows_log2"); if (tile_rows_log2) { /*Bool increment_tile_rows_log2 = */gf_bs_read_int_log(bs, 1, "increment_tile_rows_log2"); //tile_rows_log2 += increment_tile_rows_log2; } } static void vp9_frame_size_with_refs(GF_BitStream *bs, u8 refresh_frame_flags, u8 * ref_frame_idx, int * RefFrameWidth, int *RefFrameHeight, int *FrameWidth, int *FrameHeight, int *RenderWidth, int *RenderHeight, int *Sb64Cols, int *Sb64Rows) { Bool found_ref; int i; for (i = 0; i < 3; i++) { found_ref = gf_bs_read_int_log(bs, 1, "found_ref"); if (found_ref) { *FrameWidth = RefFrameWidth [ref_frame_idx[i]]; *FrameHeight = RefFrameHeight[ref_frame_idx[i]]; break; } } if (found_ref == 0) { vp9_frame_size(bs, FrameWidth, FrameHeight, Sb64Cols, Sb64Rows); } else { vp9_compute_image_size(*FrameWidth, *FrameHeight, Sb64Cols, Sb64Rows); } vp9_render_size(bs, *FrameWidth, *FrameHeight, RenderWidth, RenderHeight); } static void vp9_read_interpolation_filter(GF_BitStream *bs) { Bool is_filter_switchable = gf_bs_read_int_log(bs, 1, "is_filter_switchable"); if (!is_filter_switchable) { /*raw_interpolation_filter = */gf_bs_read_int_log(bs, 2, "raw_interpolation_filter"); } } #define VP9_KEY_FRAME 0 GF_Err gf_media_vp9_parse_sample(GF_BitStream *bs, GF_VPConfig *vp9_cfg, Bool *key_frame, u32 *FrameWidth, u32 *FrameHeight, u32 *renderWidth, u32 *renderHeight) { Bool FrameIsIntra = GF_FALSE, profile_low_bit, profile_high_bit, show_existing_frame = GF_FALSE, frame_type = GF_FALSE, show_frame = GF_FALSE, error_resilient_mode = GF_FALSE; /*u8 frame_context_idx = 0, reset_frame_context = 0, frame_marker = 0*/; int Sb64Cols = 0, Sb64Rows = 0, i; u8 refresh_frame_flags = 0; assert(bs && key_frame); /*uncompressed header*/ /*frame_marker = */gf_bs_read_int_log(bs, 2, "frame_marker"); profile_low_bit = gf_bs_read_int_log(bs, 1, "profile_low_bit"); profile_high_bit = gf_bs_read_int_log(bs, 1, "profile_high_bit"); vp9_cfg->profile = (profile_high_bit << 1) + profile_low_bit; if (vp9_cfg->profile == 3) { Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] uncompressed header reserved zero is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } show_existing_frame = gf_bs_read_int_log(bs, 1, "show_existing_frame"); if (show_existing_frame == GF_TRUE) { /*frame_to_show_map_idx = */gf_bs_read_int_log(bs, 3, "frame_to_show_map_idx"); return GF_OK; } frame_type = gf_bs_read_int_log(bs, 1, "frame_type"); show_frame = gf_bs_read_int_log(bs, 1, "show_frame"); error_resilient_mode = gf_bs_read_int_log(bs, 1, "error_resilient_mode"); if (frame_type == VP9_KEY_FRAME) { if (!vp9_frame_sync_code(bs)) return GF_NON_COMPLIANT_BITSTREAM; if (vp9_color_config(bs, vp9_cfg) != GF_OK) return GF_NON_COMPLIANT_BITSTREAM; vp9_frame_size(bs, FrameWidth, FrameHeight, &Sb64Cols, &Sb64Rows); vp9_render_size(bs, *FrameWidth, *FrameHeight, renderWidth, renderHeight); refresh_frame_flags = 0xFF; *key_frame = GF_TRUE; FrameIsIntra = GF_TRUE; } else { Bool intra_only = GF_FALSE; *key_frame = GF_FALSE; if (show_frame == GF_FALSE) { intra_only = gf_bs_read_int_log(bs, 1, "intra_only"); } FrameIsIntra = intra_only; if (error_resilient_mode == GF_FALSE) { /*reset_frame_context = */gf_bs_read_int_log(bs, 2, "reset_frame_context"); } if (intra_only == GF_TRUE) { if (!vp9_frame_sync_code(bs)) return GF_NON_COMPLIANT_BITSTREAM; if (vp9_cfg->profile > 0) { if (vp9_color_config(bs, vp9_cfg) != GF_OK) return GF_NON_COMPLIANT_BITSTREAM; } else { u8 color_space = CS_BT_601; vp9_cfg->colour_primaries = VP9_CS_to_23001_8_colour_primaries[color_space]; vp9_cfg->transfer_characteristics = VP9_CS_to_23001_8_transfer_characteristics[color_space]; vp9_cfg->matrix_coefficients = VP9_CS_to_23001_8_matrix_coefficients[color_space]; vp9_cfg->chroma_subsampling = 0; vp9_cfg->bit_depth = 8; } refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); vp9_frame_size(bs, FrameWidth, FrameHeight, &Sb64Cols, &Sb64Rows); vp9_render_size(bs, *FrameWidth, *FrameHeight, renderWidth, renderHeight); } else { refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); u8 ref_frame_idx[3]; for (i = 0; i < 3; i++) { ref_frame_idx[i] = gf_bs_read_int_log_idx(bs, 3, "ref_frame_idx", i); /*ref_frame_sign_bias[LAST_FRAME + i] = */gf_bs_read_int_log_idx(bs, 1, "ref_frame_sign_bias", i); } vp9_frame_size_with_refs(bs, refresh_frame_flags, ref_frame_idx, vp9_cfg->RefFrameWidth, vp9_cfg->RefFrameHeight, FrameWidth, FrameHeight, renderWidth, renderHeight, &Sb64Cols, &Sb64Rows); /*allow_high_precision_mv = */gf_bs_read_int_log(bs, 1, "allow_high_precision_mv"); vp9_read_interpolation_filter(bs); } } if (error_resilient_mode == 0) { /*refresh_frame_context = */gf_bs_read_int_log(bs, 1, "refresh_frame_context"); /*frame_parallel_decoding_mode = */gf_bs_read_int_log(bs, 1, "frame_parallel_decoding_mode"); } /*frame_context_idx = */gf_bs_read_int_log(bs, 2, "frame_context_idx"); if (FrameIsIntra || error_resilient_mode) { /*setup_past_independence + save_probs ...*/ //frame_context_idx = 0; } vp9_loop_filter_params(bs); vp9_quantization_params(bs); vp9_segmentation_params(bs); vp9_tile_info(bs, Sb64Cols); /*header_size_in_bytes = */gf_bs_read_int_log(bs, 16, "header_size_in_bytes"); /*Reference frame update process (8.10 - partial)*/ for (i = 0; i < VP9_NUM_REF_FRAMES; i++) { if ((refresh_frame_flags >> i) & 1) { vp9_cfg->RefFrameWidth[i] = *FrameWidth; vp9_cfg->RefFrameHeight[i] = *FrameHeight; } } return GF_OK; } GF_Err gf_av1_parse_obu_header(GF_BitStream *bs, ObuType *obu_type, Bool *obu_extension_flag, Bool *obu_has_size_field, u8 *temporal_id, u8 *spatial_id) { Bool forbidden = gf_bs_read_int(bs, 1); if (forbidden) { return GF_NON_COMPLIANT_BITSTREAM; } *obu_type = gf_bs_read_int(bs, 4); *obu_extension_flag = gf_bs_read_int(bs, 1); *obu_has_size_field = gf_bs_read_int(bs, 1); if (gf_bs_read_int(bs, 1) /*obu_reserved_1bit*/) { return GF_NON_COMPLIANT_BITSTREAM; } if (*obu_extension_flag) { *temporal_id = gf_bs_read_int(bs, 3); *spatial_id = gf_bs_read_int(bs, 2); /*extension_header_reserved_3bits = */gf_bs_read_int(bs, 3); } return GF_OK; } #endif // GPAC_DISABLE_AV_PARSERS GF_EXPORT const char *gf_av1_get_obu_name(ObuType obu_type) { switch (obu_type) { case OBU_SEQUENCE_HEADER: return "seq_header"; case OBU_TEMPORAL_DELIMITER: return "delimiter"; case OBU_FRAME_HEADER: return "frame_header"; case OBU_TILE_GROUP: return "tile_group"; case OBU_METADATA: return "metadata"; case OBU_FRAME: return "frame"; case OBU_REDUNDANT_FRAME_HEADER: return "redundant_frame_header"; case OBU_TILE_LIST: return "tile_list"; case OBU_PADDING: return "padding"; case OBU_RESERVED_0: case OBU_RESERVED_9: case OBU_RESERVED_10: case OBU_RESERVED_11: case OBU_RESERVED_12: case OBU_RESERVED_13: case OBU_RESERVED_14: return "reserved"; default: return "unknown"; } } Bool av1_is_obu_header(ObuType obu_type) { switch (obu_type) { case OBU_SEQUENCE_HEADER: case OBU_METADATA: // TODO add check based on the metadata type return GF_TRUE; default: return GF_FALSE; } } #ifndef GPAC_DISABLE_AV_PARSERS static Bool av1_is_obu_frame(AV1State *state, ObuType obu_type) { switch (obu_type) { case OBU_PADDING: case OBU_REDUNDANT_FRAME_HEADER: return GF_FALSE; case OBU_TEMPORAL_DELIMITER: return state->keep_temporal_delim ? GF_TRUE : GF_FALSE; default: return GF_TRUE; } } u64 gf_av1_leb128_read(GF_BitStream *bs, u8 *opt_Leb128Bytes) { u64 value = 0; u8 Leb128Bytes = 0, i = 0; for (i = 0; i < 8; i++) { u8 leb128_byte = gf_bs_read_u8(bs); value |= ( ((u64) (leb128_byte & 0x7f)) << (i * 7)); Leb128Bytes += 1; if (!(leb128_byte & 0x80)) { break; } } if (opt_Leb128Bytes) { *opt_Leb128Bytes = Leb128Bytes; } return value; } u32 gf_av1_leb128_size(u64 value) { u32 gf_av1_leb128_size = 0; do { ++gf_av1_leb128_size; } while ((value >>= 7) != 0); return gf_av1_leb128_size; } u64 gf_av1_leb128_write(GF_BitStream *bs, u64 value) { u32 i, leb_size = gf_av1_leb128_size(value); for (i = 0; i < leb_size; ++i) { u8 byte = value & 0x7f; value >>= 7; if (value != 0) byte |= 0x80; //more bytes follow gf_bs_write_u8(bs, byte); } return leb_size; } #define OBU_BLOCK_SIZE 4096 static void av1_add_obu_internal(GF_BitStream *bs, u64 pos, u64 obu_length, ObuType obu_type, GF_List **obu_list, AV1State *state) { char block[OBU_BLOCK_SIZE]; Bool has_size_field = 0, obu_extension_flag = 0; u8 temporal_id, spatial_id; GF_AV1_OBUArrayEntry *a = NULL; if (state && state->mem_mode) { if (!state->bs) state->bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(state->bs, state->frame_obus, state->frame_obus_alloc); } else { GF_SAFEALLOC(a, GF_AV1_OBUArrayEntry); if (!a) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] Failed to allocate OBU\n")); return; } } gf_bs_seek(bs, pos); gf_av1_parse_obu_header(bs, &obu_type, &obu_extension_flag, &has_size_field, &temporal_id, &spatial_id); gf_bs_seek(bs, pos); if (has_size_field) { if (a) { a->obu = gf_malloc((size_t)obu_length); gf_bs_read_data(bs, a->obu, (u32)obu_length); a->obu_length = obu_length; } else { u32 remain = (u32)obu_length; while (remain) { u32 block_size = OBU_BLOCK_SIZE; if (block_size > remain) block_size = remain; gf_bs_read_data(bs, block, block_size); gf_bs_write_data(state->bs, block, block_size); remain -= block_size; } return; } } else { u8 i, hdr_size = obu_extension_flag ? 2 : 1; const u32 leb_size = (u32)gf_av1_leb128_size(obu_length); const u64 obu_size = obu_length - hdr_size; if (a) { a->obu = gf_malloc((size_t)obu_length + leb_size); a->obu_length = obu_length + leb_size; for (i = 0; i < hdr_size; ++i) { a->obu[i] = gf_bs_read_u8(bs); /*add size field flag*/ if (i == 0) a->obu[0] |= 0x02; } { u32 out_size = 0; u8 *output = NULL; GF_BitStream *bsLeb128 = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*write size field*/ gf_av1_leb128_write(bsLeb128, obu_size); assert(gf_bs_get_position(bsLeb128) == leb_size); gf_bs_get_content(bsLeb128, &output, &out_size); gf_bs_del(bsLeb128); memcpy(a->obu + hdr_size, output, out_size); gf_free(output); } gf_bs_read_data(bs, a->obu + hdr_size + leb_size, (u32)(obu_size)); assert(gf_bs_get_position(bs) == pos + obu_length); } else { u32 remain; for (i = 0; i < hdr_size; ++i) { u8 hdr_b = gf_bs_read_u8(bs); if (i == 0) hdr_b |= 0x02; /*add size field flag*/ gf_bs_write_u8(state->bs, hdr_b); } /*add size field */ gf_av1_leb128_write(state->bs, obu_size); remain = (u32)obu_length - hdr_size; while (remain) { u32 block_size = OBU_BLOCK_SIZE; if (block_size > remain) block_size = remain; gf_bs_read_data(bs, block, block_size); gf_bs_write_data(state->bs, block, block_size); remain -= block_size; } assert(gf_bs_get_position(bs) == pos + obu_length); return; } } if (!obu_list) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] internal error, no OBU list cannot add\n")); gf_free(a->obu); gf_free(a); return; } a->obu_type = obu_type; if (! *obu_list) *obu_list = gf_list_new(); gf_list_add(*obu_list, a); } static void av1_populate_state_from_obu(GF_BitStream *bs, u64 pos, u64 obu_length, ObuType obu_type, AV1State *state) { if (av1_is_obu_header(obu_type)) { av1_add_obu_internal(bs, pos, obu_length, obu_type, &state->frame_state.header_obus, NULL); } if (!state->skip_frames && av1_is_obu_frame(state, obu_type)) { if (!state->mem_mode) { av1_add_obu_internal(bs, pos, obu_length, obu_type, &state->frame_state.frame_obus, NULL); } else { av1_add_obu_internal(bs, pos, obu_length, obu_type, NULL, state); } } } GF_Err aom_av1_parse_temporal_unit_from_section5(GF_BitStream *bs, AV1State *state) { if (!state) return GF_BAD_PARAM; state->obu_type = -1; while (state->obu_type != OBU_TEMPORAL_DELIMITER) { GF_Err e; if (!gf_bs_available(bs)) return state->unframed ? GF_BUFFER_TOO_SMALL : GF_OK; u64 pos = gf_bs_get_position(bs), obu_length = 0; e = gf_av1_parse_obu(bs, &state->obu_type, &obu_length, NULL, state); if (e) return e; if (obu_length != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] OBU (Section 5) frame size "LLU" different from consumed bytes "LLU".\n", obu_length, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Section5 OBU detected (size "LLU")\n", obu_length)); av1_populate_state_from_obu(bs, pos, obu_length, state->obu_type, state); } return GF_OK; } Bool gf_media_aom_probe_annexb(GF_BitStream *bs) { Bool res = GF_TRUE; u64 init_pos = gf_bs_get_position(bs); u64 sz = gf_av1_leb128_read(bs, NULL); if (!sz) res = GF_FALSE; while (sz > 0) { u8 Leb128Bytes = 0; u64 frame_unit_size = gf_av1_leb128_read(bs, &Leb128Bytes); if (!frame_unit_size) { res = GF_FALSE; break; } if (sz < Leb128Bytes + frame_unit_size) { res = GF_FALSE; break; } sz -= Leb128Bytes + frame_unit_size; while (frame_unit_size > 0) { ObuType obu_type; u64 pos, obu_length = gf_av1_leb128_read(bs, &Leb128Bytes); if (frame_unit_size < Leb128Bytes + obu_length) { res = GF_FALSE; break; } pos = gf_bs_get_position(bs); frame_unit_size -= Leb128Bytes; u8 tid, sid; Bool extflag, has_size; GF_Err e = gf_av1_parse_obu_header(bs, &obu_type, &extflag, &has_size, &tid, &sid); if (e) { res = GF_FALSE; break; } if (has_size) { obu_length = (u32)gf_av1_leb128_read(bs, NULL); } else { if (obu_length >= 1 + extflag) { obu_length = obu_length - 1 - extflag; } else { res = GF_FALSE; break; } } u32 hdr_size = (u32)(gf_bs_get_position(bs) - pos); obu_length += hdr_size; if (frame_unit_size < obu_length) { res = GF_FALSE; break; } frame_unit_size -= obu_length; gf_bs_skip_bytes(bs, obu_length - hdr_size); } if (!res) break; } gf_bs_seek(bs, init_pos); return res; } GF_Err aom_av1_parse_temporal_unit_from_annexb(GF_BitStream *bs, AV1State *state) { GF_Err e; u64 tupos; u64 tusize, sz; if (!bs || !state) return GF_BAD_PARAM; state->bs_overread = GF_FALSE; tusize = sz = gf_av1_leb128_read(bs, NULL); tupos = gf_bs_get_position(bs); if (!sz) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[AV1] temporal unit size is 0, likely not annex B\n")); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B temporal unit detected (size "LLU") ***** \n", sz)); while (sz > 0) { u8 Leb128Bytes = 0; u64 frame_unit_size = gf_av1_leb128_read(bs, &Leb128Bytes); if (state->bs_overread) { return GF_BUFFER_TOO_SMALL; } if (sz < Leb128Bytes + frame_unit_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B sz("LLU") < Leb128Bytes("LLU") + frame_unit_size("LLU")\n", sz, Leb128Bytes, frame_unit_size)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B frame unit detected (size "LLU")\n", frame_unit_size)); sz -= Leb128Bytes + frame_unit_size; while (frame_unit_size > 0) { u64 pos, obu_length = gf_av1_leb128_read(bs, &Leb128Bytes); if (state->bs_overread) { return GF_BUFFER_TOO_SMALL; } if (frame_unit_size < Leb128Bytes + obu_length) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B frame_unit_size("LLU") < Leb128Bytes("LLU") + obu_length("LLU")\n", frame_unit_size, Leb128Bytes, obu_length)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B OBU detected (size "LLU")\n", obu_length)); pos = gf_bs_get_position(bs); frame_unit_size -= Leb128Bytes; e = gf_av1_parse_obu(bs, &state->obu_type, &obu_length, NULL, state); if (e) return e; if (obu_length != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] Annex B frame size "LLU" different from consumed bytes "LLU".\n", obu_length, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } av1_populate_state_from_obu(bs, pos, obu_length, state->obu_type, state); if (frame_unit_size < obu_length) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B frame_unit_size("LLU") < OBU size ("LLU")\n", frame_unit_size, obu_length)); return GF_NON_COMPLIANT_BITSTREAM; } frame_unit_size -= obu_length; } } assert(sz == 0); if (tusize != gf_bs_get_position(bs) - tupos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] Annex B TU size "LLU" different from consumed bytes "LLU".\n", tusize, gf_bs_get_position(bs) - tupos)); return GF_NON_COMPLIANT_BITSTREAM; } return GF_OK; } GF_Err aom_av1_parse_temporal_unit_from_ivf(GF_BitStream *bs, AV1State *state) { u64 frame_size, pts_ignored; GF_Err e; if (gf_bs_available(bs)<12) return GF_EOS; e = gf_media_parse_ivf_frame_header(bs, &frame_size, &pts_ignored); if (e) return e; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] IVF frame detected (size "LLU")\n", frame_size)); if (gf_bs_available(bs) < frame_size) return GF_EOS; while (frame_size > 0) { u64 obu_size = 0, pos = gf_bs_get_position(bs); e = gf_av1_parse_obu(bs, &state->obu_type, &obu_size, NULL, state); if (e != GF_OK) return e; if (obu_size != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] IVF frame size "LLU" different from consumed bytes "LLU".\n", obu_size, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } av1_populate_state_from_obu(bs, pos, obu_size, state->obu_type, state); frame_size -= obu_size; } return GF_OK; } #define AV1_NUM_REF_FRAMES 8 #define AV1_ALL_FRAMES ((1 << AV1_NUM_REF_FRAMES) - 1) #define AV1_SUPERRES_DENOM_MIN 9 #define AV1_SUPERRES_DENOM_BITS 3 #define AV1_SUPERRES_NUM 8 #define AV1_REFS_PER_FRAME 7 #define AV1_PRIMARY_REF_NONE 7 #define MAX_TILE_WIDTH 4096 #define MAX_TILE_AREA (4096 * 2304) static u32 aom_av1_tile_log2(u32 blkSize, u32 target) { u32 k; for (k = 0; (blkSize << k) < target; k++) { } return k; } static u64 aom_av1_le(GF_BitStream *bs, u32 n, const char *name) { u32 i = 0; u64 t = 0; for (i = 0; i < n; i++) { u8 byte = gf_bs_read_int(bs, 8); t += (byte << (i * 8)); } gf_bs_log(bs, n*8, name, t); return t; } static void av1_parse_tile_info(GF_BitStream *bs, AV1State *state) { u32 i; u32 MiCols = 2 * ((state->width + 7) >> 3); u32 MiRows = 2 * ((state->height + 7) >> 3); u32 sbCols = state->use_128x128_superblock ? ((MiCols + 31) >> 5) : ((MiCols + 15) >> 4); u32 sbRows = state->use_128x128_superblock ? ((MiRows + 31) >> 5) : ((MiRows + 15) >> 4); u32 sbShift = state->use_128x128_superblock ? 5 : 4; u32 sbSize = sbShift + 2; u32 maxTileWidthSb = MAX_TILE_WIDTH >> sbSize; u32 maxTileAreaSb = MAX_TILE_AREA >> (2 * sbSize); u32 minLog2tileCols = aom_av1_tile_log2(maxTileWidthSb, sbCols); u32 maxLog2tileCols = aom_av1_tile_log2(1, MIN(sbCols, AV1_MAX_TILE_COLS)); u32 maxLog2tileRows = aom_av1_tile_log2(1, MIN(sbRows, AV1_MAX_TILE_ROWS)); u32 minLog2Tiles = MAX(minLog2tileCols, aom_av1_tile_log2(maxTileAreaSb, sbRows * sbCols)); Bool uniform_tile_spacing_flag = gf_bs_read_int_log(bs, 1, "uniform_tile_spacing_flag"); if (uniform_tile_spacing_flag) { u32 startSb, tileWidthSb, tileHeightSb, minLog2tileRows; state->tileColsLog2 = minLog2tileCols; while (state->tileColsLog2 < maxLog2tileCols) { Bool increment_tile_cols_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_cols_log2"); if (increment_tile_cols_log2 == 1) state->tileColsLog2++; else break; } tileWidthSb = (sbCols + (1 << state->tileColsLog2) - 1) >> state->tileColsLog2; i = 0; for (startSb = 0; startSb < sbCols; startSb += tileWidthSb) { i += 1; } state->tileCols = i; minLog2tileRows = MAX((int)(minLog2Tiles - state->tileColsLog2), 0); state->tileRowsLog2 = minLog2tileRows; while (state->tileRowsLog2 < maxLog2tileRows) { Bool increment_tile_rows_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_rows_log2"); if (increment_tile_rows_log2 == 1) state->tileRowsLog2++; else break; } tileHeightSb = (sbRows + (1 << state->tileRowsLog2) - 1) >> state->tileRowsLog2; i = 0; for (startSb = 0; startSb < sbRows; startSb += tileHeightSb) { i += 1; } state->tileRows = i; } else { u32 startSb, maxTileHeightSb, widestTileSb; widestTileSb = 0; startSb = 0; for (i = 0; startSb < sbCols; i++) { u32 maxWidth = MIN((int)(sbCols - startSb), maxTileWidthSb); u32 width_in_sbs_minus_1 = av1_read_ns(bs, maxWidth, "width_in_sbs_minus_1"); u32 sizeSb = width_in_sbs_minus_1 + 1; widestTileSb = MAX(sizeSb, widestTileSb); startSb += sizeSb; } if (!widestTileSb) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] widest tile is 0, broken bitstream\n")); return; } state->tileCols = i; state->tileColsLog2 = aom_av1_tile_log2(1, state->tileCols); if (minLog2Tiles > 0) maxTileAreaSb = (sbRows * sbCols) >> (minLog2Tiles + 1); else maxTileAreaSb = sbRows * sbCols; maxTileHeightSb = MAX(maxTileAreaSb / widestTileSb, 1); startSb = 0; for (i = 0; startSb < sbRows; i++) { u32 maxHeight = MIN((int)(sbRows - startSb), maxTileHeightSb); u32 height_in_sbs_minus_1 = av1_read_ns(bs, maxHeight, "height_in_sbs_minus_1"); u32 sizeSb = height_in_sbs_minus_1 + 1; startSb += sizeSb; } state->tileRows = i; state->tileRowsLog2 = aom_av1_tile_log2(1, state->tileRows); } if (state->tileColsLog2 > 0 || state->tileRowsLog2 > 0) { gf_bs_read_int_log(bs, state->tileRowsLog2 + state->tileColsLog2, "context_update_tile_id"); state->tile_size_bytes = gf_bs_read_int_log(bs, 2, "tile_size_bytes_minus1") + 1; } } static void superres_params(GF_BitStream *bs, AV1State *state) { u32 SuperresDenom; Bool use_superres; if (state->enable_superres) { use_superres = gf_bs_read_int_log(bs, 1, "use_superres"); } else { use_superres = GF_FALSE; } if (use_superres) { u8 coded_denom = gf_bs_read_int_log(bs, AV1_SUPERRES_DENOM_BITS, "coded_denom"); SuperresDenom = coded_denom + AV1_SUPERRES_DENOM_MIN; } else { SuperresDenom = AV1_SUPERRES_NUM; } state->UpscaledWidth = state->width; state->width = (state->UpscaledWidth * AV1_SUPERRES_NUM + (SuperresDenom / 2)) / SuperresDenom; } static void av1_frame_size(GF_BitStream *bs, AV1State *state, Bool frame_size_override_flag) { if (frame_size_override_flag) { u32 frame_width_minus_1, frame_height_minus_1; u8 n = state->frame_width_bits_minus_1 + 1; frame_width_minus_1 = gf_bs_read_int_log(bs, n, "frame_width_minus_1"); n = state->frame_height_bits_minus_1 + 1; frame_height_minus_1 = gf_bs_read_int_log(bs, n, "frame_height_minus_1"); state->width = frame_width_minus_1 + 1; state->height = frame_height_minus_1 + 1; } else { state->width = state->sequence_width; state->height = state->sequence_height; } superres_params(bs, state); //compute_image_size(); //no bits } static void av1_render_size(GF_BitStream *bs) { Bool render_and_frame_size_different = gf_bs_read_int_log(bs, 1, "render_and_frame_size_different_flag"); if (render_and_frame_size_different == GF_TRUE) { gf_bs_read_int_log(bs, 16, "render_width_minus_1"); gf_bs_read_int_log(bs, 16, "render_height_minus_1"); //RenderWidth = render_width_minus_1 + 1; //RenderHeight = render_height_minus_1 + 1; } else { //RenderWidth = UpscaledWidth; //RenderHeight = FrameHeight; } } static void read_interpolation_filter(GF_BitStream *bs) { Bool is_filter_switchable = gf_bs_read_int_log(bs, 1, "is_filter_switchable"); if (!is_filter_switchable) { /*interpolation_filter =*/ gf_bs_read_int_log(bs, 2, "interpolation_filter"); } } static void frame_size_with_refs(GF_BitStream *bs, AV1State *state, Bool frame_size_override_flag, s8 *ref_frame_idx) { Bool found_ref = GF_FALSE; u32 i = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { found_ref = gf_bs_read_int_log_idx(bs, 1, "found_ref", i); if (found_ref == 1) { state->UpscaledWidth = state->RefUpscaledWidth[ref_frame_idx[i]]; state->width = state->UpscaledWidth; state->height = state->RefFrameHeight[ref_frame_idx[i]]; break; } } if (found_ref == 0) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); } else { superres_params(bs, state); //compute_image_size(); } } static s32 av1_delta_q(GF_BitStream *bs, const char *name_flag, const char *name) { Bool delta_coded = gf_bs_read_int_log(bs, 1, name_flag); s32 delta_q = 0; if (delta_coded) { u32 signMask = 1 << (7 - 1); delta_q = gf_bs_read_int_log(bs, 7, name); if (delta_q & signMask) delta_q = delta_q - 2 * signMask; } return delta_q; } static u8 Segmentation_Feature_Bits[] = { 8,6,6,6,6,3,0,0 }; static u8 Segmentation_Feature_Signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 }; static u8 av1_get_qindex(Bool ignoreDeltaQ, u32 segmentId, u32 base_q_idx, u32 delta_q_present, u32 CurrentQIndex, Bool segmentation_enabled, u8 *features_SEG_LVL_ALT_Q_enabled, s32 *features_SEG_LVL_ALT_Q) { //If seg_feature_active_idx( segmentId, SEG_LVL_ALT_Q ) is equal to 1 the following ordered steps apply: if (segmentation_enabled && features_SEG_LVL_ALT_Q_enabled[segmentId]) { //Set the variable data equal to FeatureData[ segmentId ][ SEG_LVL_ALT_Q ]. s32 data = features_SEG_LVL_ALT_Q[segmentId]; s32 qindex = base_q_idx + data; //If ignoreDeltaQ is equal to 0 and delta_q_present is equal to 1, set qindex equal to CurrentQIndex + data. if ((ignoreDeltaQ == 0) && (delta_q_present == 1)) qindex = CurrentQIndex + data; //Return Clip3( 0, 255, qindex ). if (qindex < 0) return 0; else if (qindex > 255) return 255; else return (u8)qindex; } //Otherwise, if ignoreDeltaQ is equal to 0 and delta_q_present is equal to 1, return CurrentQIndex. if ((ignoreDeltaQ == 0) && (delta_q_present == 1)) return CurrentQIndex; //otherwise return base_q_idx; } enum { AV1_RESTORE_NONE = 0, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ }; #define AV1_GMC_IDENTITY 0 #define AV1_GMC_TRANSLATION 1 #define AV1_GMC_ROTZOOM 2 #define AV1_GMC_AFFINE 3 #define AV1_LAST_FRAME 1 #define AV1_LAST2_FRAME 2 #define AV1_LAST3_FRAME 3 #define AV1_GOLDEN_FRAME 4 #define AV1_BWDREF_FRAME 5 #define AV1_ALTREF2_FRAME 6 #define AV1_ALTREF_FRAME 7 #define GM_ABS_ALPHA_BITS 12 #define GM_ALPHA_PREC_BITS 15 #define GM_ABS_TRANS_ONLY_BITS 9 #define GM_TRANS_ONLY_PREC_BITS 3 #define GM_ABS_TRANS_BITS 12 #define GM_TRANS_PREC_BITS 6 #define WARPEDMODEL_PREC_BITS 16 static u32 av1_decode_subexp(GF_BitStream *bs, s32 numSyms) { s32 i = 0; s32 mk = 0; s32 k = 3; while (1) { s32 b2 = i ? k + i - 1 : k; s32 a = 1 << b2; if (numSyms <= mk + 3 * a) { s32 subexp_final_bits = av1_read_ns(bs, numSyms - mk, NULL); return subexp_final_bits + mk; } else { s32 subexp_more_bits = gf_bs_read_int(bs, 1); if (subexp_more_bits) { i++; mk += a; } else { s32 subexp_bits = gf_bs_read_int(bs, b2); return subexp_bits + mk; } } } } static GFINLINE s32 inverse_recenter(s32 r, u32 v) { if ((s64)v > (s64)(2 * r)) return v; else if (v & 1) return r - ((v + 1) >> 1); else return r + (v >> 1); } static s32 av1_decode_unsigned_subexp_with_ref(GF_BitStream *bs, s32 mx, s32 r) { u32 v = av1_decode_subexp(bs, mx); if ((r < 0) && (-(-r << 1) <= mx)) { return inverse_recenter(r, v); } else if ((r << 1) <= mx) { return inverse_recenter(r, v); } else { return mx - 1 - inverse_recenter(mx - 1 - r, v); } } static s16 av1_decode_signed_subexp_with_ref(GF_BitStream *bs, s32 low, s32 high, s32 r) { s16 x = av1_decode_unsigned_subexp_with_ref(bs, high - low, r - low); return x + low; } static void av1_read_global_param(AV1State *state, GF_BitStream *bs, u8 type, u8 ref, u8 idx) { u8 absBits = GM_ABS_ALPHA_BITS; u8 precBits = GM_ALPHA_PREC_BITS; if (idx < 2) { if (type == AV1_GMC_TRANSLATION) { absBits = GM_ABS_TRANS_ONLY_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0); precBits = GM_TRANS_ONLY_PREC_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0); } else { absBits = GM_ABS_TRANS_BITS; precBits = GM_TRANS_PREC_BITS; } } s32 precDiff = WARPEDMODEL_PREC_BITS - precBits; s32 round = (idx % 3) == 2 ? (1 << WARPEDMODEL_PREC_BITS) : 0; s32 sub = (idx % 3) == 2 ? (1 << precBits) : 0; s32 mx = (1 << absBits); s32 r = (state->PrevGmParams.coefs[ref][idx] >> precDiff) - sub; s32 val = av1_decode_signed_subexp_with_ref(bs, -mx, mx + 1, r); if (val < 0) { val = -val; state->GmParams.coefs[ref][idx] = (-(val << precDiff) + round); } else { state->GmParams.coefs[ref][idx] = (val << precDiff) + round; } } static s32 av1_get_relative_dist(s32 a, s32 b, AV1State *state) { if (!state->enable_order_hint) return 0; s32 diff = a - b; s32 m = 1 << (state->OrderHintBits - 1); diff = (diff & (m - 1)) - (diff & m); return diff; } static void av1_setup_past_independence(AV1State *state) { u32 ref, i; for (ref = AV1_LAST_FRAME; ref <= AV1_ALTREF_FRAME; ref++) { for (i = 0; i <= 5; i++) { state->PrevGmParams.coefs[ref][i] = ((i % 3 == 2) ? 1 << WARPEDMODEL_PREC_BITS : 0); } } } static void av1_load_previous(AV1State *state, u8 primary_ref_frame, s8 *ref_frame_idx) { s8 prevFrame = ref_frame_idx[primary_ref_frame]; if (prevFrame < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] load_previous: prevFrame reference index %d is invalid\n", prevFrame)); } else { state->PrevGmParams = state->SavedGmParams[prevFrame]; // load_loop_filter_params( prevFrame ) // load_segmentation_params( prevFrame ) } } static void av1_decode_frame_wrapup(AV1State *state) { u32 i; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { if ((state->frame_state.refresh_frame_flags >> i) & 1) { state->RefOrderHint[i] = state->frame_state.order_hint; state->SavedGmParams[i] = state->GmParams; state->RefFrameType[i] = state->frame_state.frame_type; state->RefUpscaledWidth[i] = state->UpscaledWidth; state->RefFrameHeight[i] = state->height; } } state->frame_state.seen_frame_header = GF_FALSE; //Otherwise (show_existing_frame is equal to 1), if frame_type is equal to KEY_FRAME, the reference frame loading process as specified in section 7.21 is invoked if ((state->frame_state.show_existing_frame) && (state->frame_state.frame_type == AV1_KEY_FRAME)) { state->frame_state.order_hint = state->RefOrderHint[state->frame_state.frame_to_show_map_idx]; //OrderHints[ j + LAST_FRAME ] is set equal to SavedOrderHints[state->frame_to_show_map_idx ][ j + LAST_FRAME ] for j = 0..REFS_PER_FRAME-1. //gm_params[ ref ][ j ] is set equal to SavedGmParams[ frame_to_show_map_idx ][ ref ][ j ] for ref = LAST_FRAME..ALTREF_FRAME, for j = 0..5. state->GmParams = state->SavedGmParams[state->frame_state.frame_to_show_map_idx]; } } static s32 find_latest_forward(u32 curFrameHint, u8 *shiftedOrderHints, u8 *usedFrame) { u32 i; s32 ref = -1; s32 latestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint < curFrameHint) && (ref < 0 || hint >= latestOrderHint)) { ref = i; latestOrderHint = hint; } } return ref; } //see 7.8 of AV1 spec static void av1_set_frame_refs(AV1State *state, u8 last_frame_idx, u8 gold_frame_idx, s8 *ref_frame_idx) { u32 i; u8 usedFrame[AV1_NUM_REF_FRAMES]; u8 shiftedOrderHints[AV1_NUM_REF_FRAMES]; for (i = 0; i < AV1_REFS_PER_FRAME; i++) ref_frame_idx[i] = -1; ref_frame_idx[AV1_LAST_FRAME - AV1_LAST_FRAME] = last_frame_idx; ref_frame_idx[AV1_GOLDEN_FRAME - AV1_LAST_FRAME] = gold_frame_idx; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { usedFrame[i] = 0; } usedFrame[last_frame_idx] = 1; usedFrame[gold_frame_idx] = 1; u32 curFrameHint = 1 << (state->OrderHintBits - 1); for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { shiftedOrderHints[i] = curFrameHint + av1_get_relative_dist(state->RefOrderHint[i], state->frame_state.order_hint, state); } u8 lastOrderHint = shiftedOrderHints[last_frame_idx]; u8 goldOrderHint = shiftedOrderHints[gold_frame_idx]; //It is a requirement of bitstream conformance that lastOrderHint is strictly less than curFrameHint. if (lastOrderHint >= curFrameHint) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] non conformant bitstream detected while setting up frame refs: lastOrderHint(%d) shall be stricly less than curFrameHint(%d)\n", lastOrderHint, curFrameHint)); } //It is a requirement of bitstream conformance that goldOrderHint is strictly less than curFrameHint. if (goldOrderHint >= curFrameHint) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] non conformant bitstream detected while setting up frame refs: goldOrderHint(%d) shall be stricly less than curFrameHint(%d)\n", lastOrderHint, curFrameHint)); } //find_latest_backward() { s32 ref = -1; s32 latestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint >= latestOrderHint)) { ref = i; latestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_ALTREF_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //find_earliest_backward() for BWDREF_FRAME ref = -1; s32 earliestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint < earliestOrderHint)) { ref = i; earliestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_BWDREF_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //find_earliest_backward() for ALTREF2_FRAME ref = -1; earliestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint < earliestOrderHint)) { ref = i; earliestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_ALTREF2_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //The remaining references are set to be forward references in anti-chronological order as follows: const u8 Ref_Frame_List[AV1_REFS_PER_FRAME - 2] = { AV1_LAST2_FRAME, AV1_LAST3_FRAME, AV1_BWDREF_FRAME, AV1_ALTREF2_FRAME, AV1_ALTREF_FRAME }; for (i = 0; i < AV1_REFS_PER_FRAME - 2; i++) { u8 refFrame = Ref_Frame_List[i]; if (ref_frame_idx[refFrame - AV1_LAST_FRAME] < 0) { s32 last_ref = find_latest_forward(curFrameHint, shiftedOrderHints, usedFrame); if (last_ref >= 0) { ref_frame_idx[refFrame - AV1_LAST_FRAME] = last_ref; usedFrame[last_ref] = 1; } } } //Finally, any remaining references are set to the reference frame with smallest output order as follows: ref = -1; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (ref < 0 || hint < earliestOrderHint) { ref = i; earliestOrderHint = hint; } } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { if (ref_frame_idx[i] < 0) { ref_frame_idx[i] = ref; } } } static void av1_parse_uncompressed_header(GF_BitStream *bs, AV1State *state) { Bool error_resilient_mode = GF_FALSE, allow_screen_content_tools = GF_FALSE, force_integer_mv = GF_FALSE; Bool /*use_ref_frame_mvs = GF_FALSE,*/ FrameIsIntra = GF_FALSE, frame_size_override_flag = GF_FALSE; Bool disable_cdf_update = GF_FALSE; u8 showable_frame; u8 primary_ref_frame; u16 idLen = 0; u32 idx; s8 ref_frame_idx[AV1_REFS_PER_FRAME]; AV1StateFrame *frame_state = &state->frame_state; if (state->frame_id_numbers_present_flag) { idLen = (state->additional_frame_id_length_minus_1 + state->delta_frame_id_length_minus_2 + 3); } frame_state->refresh_frame_flags = 0; showable_frame = 0; if (state->reduced_still_picture_header) { frame_state->key_frame = GF_TRUE; FrameIsIntra = GF_TRUE; frame_state->frame_type = AV1_KEY_FRAME; frame_state->show_frame = GF_TRUE; frame_state->show_existing_frame = 0; } else { frame_state->show_existing_frame = gf_bs_read_int_log(bs, 1, "show_existing_frame"); if (frame_state->show_existing_frame == GF_TRUE) { frame_state->frame_to_show_map_idx = gf_bs_read_int_log(bs, 3, "frame_to_show_map_idx"); frame_state->frame_type = state->RefFrameType[frame_state->frame_to_show_map_idx]; if (state->decoder_model_info_present_flag && !state->equal_picture_interval) { gf_bs_read_int_log(bs, state->frame_presentation_time_length, "frame_presentation_time"); } frame_state->refresh_frame_flags = 0; if (state->frame_id_numbers_present_flag) { gf_bs_read_int_log(bs, idLen, "display_frame_id"); } if (frame_state->frame_type == AV1_KEY_FRAME) { frame_state->refresh_frame_flags = AV1_ALL_FRAMES; } /* if (film_grain_params_present) { load_grain_params(frame_to_show_map_idx) }*/ return; } frame_state->frame_type = gf_bs_read_int_log(bs, 2, "frame_type"); FrameIsIntra = (frame_state->frame_type == AV1_INTRA_ONLY_FRAME || frame_state->frame_type == AV1_KEY_FRAME); frame_state->show_frame = gf_bs_read_int_log(bs, 1, "show_frame"); if (frame_state->is_first_frame) { frame_state->key_frame = frame_state->seen_seq_header && frame_state->show_frame && frame_state->frame_type == AV1_KEY_FRAME && frame_state->seen_frame_header; } if (frame_state->show_frame && state->decoder_model_info_present_flag && !state->equal_picture_interval) { gf_bs_read_int_log(bs, state->frame_presentation_time_length, "frame_presentation_time"); } if (frame_state->show_frame) { showable_frame = frame_state->frame_type != AV1_KEY_FRAME; } else { showable_frame = gf_bs_read_int_log(bs, 1, "showable_frame"); } if (frame_state->frame_type == AV1_SWITCH_FRAME || (frame_state->frame_type == AV1_KEY_FRAME && frame_state->show_frame)) error_resilient_mode = GF_TRUE; else error_resilient_mode = gf_bs_read_int_log(bs, 1, "error_resilient_mode"); } if ((frame_state->frame_type == AV1_KEY_FRAME) && frame_state->show_frame) { u32 i; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { state->RefValid[i] = 0; state->RefOrderHint[i] = 0; } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { state->OrderHints[AV1_LAST_FRAME + i] = 0; } } disable_cdf_update = gf_bs_read_int_log(bs, 1, "disable_cdf_update"); if (state->seq_force_screen_content_tools == 2/*SELECT_SCREEN_CONTENT_TOOLS*/) { allow_screen_content_tools = gf_bs_read_int_log(bs, 1, "allow_screen_content_tools"); } else { allow_screen_content_tools = state->seq_force_screen_content_tools; } if (allow_screen_content_tools) { if (state->seq_force_integer_mv == 2/*SELECT_INTEGER_MV*/) { force_integer_mv = gf_bs_read_int_log(bs, 1, "force_integer_mv"); } else { force_integer_mv = state->seq_force_integer_mv; } } else { force_integer_mv = 0; } if (FrameIsIntra) { force_integer_mv = 1; } if (state->frame_id_numbers_present_flag) { gf_bs_read_int_log(bs, idLen, "current_frame_id"); } if (frame_state->frame_type == AV1_SWITCH_FRAME) frame_size_override_flag = GF_TRUE; else if (state->reduced_still_picture_header) frame_size_override_flag = GF_FALSE; else frame_size_override_flag = gf_bs_read_int_log(bs, 1, "frame_size_override_flag"); frame_state->order_hint = gf_bs_read_int_log(bs, state->OrderHintBits, "order_hint"); if (FrameIsIntra || error_resilient_mode) { primary_ref_frame = AV1_PRIMARY_REF_NONE; } else { primary_ref_frame = gf_bs_read_int_log(bs, 3, "primary_ref_frame"); } if (state->decoder_model_info_present_flag) { u8 buffer_removal_time_present_flag = gf_bs_read_int_log(bs, 1, "buffer_removal_time_present_flag"); if (buffer_removal_time_present_flag) { u32 opNum; for (opNum = 0; opNum < state->operating_points_count; opNum++) { if (state->decoder_model_present_for_this_op[opNum]) { u8 opPtIdc = state->operating_point_idc[opNum]; u8 inTemporalLayer = (opPtIdc >> state->temporal_id) & 1; u8 inSpatialLayer = (opPtIdc >> (state->spatial_id + 8)) & 1; if (opPtIdc == 0 || (inTemporalLayer && inSpatialLayer)) { gf_bs_read_int_log_idx(bs, state->buffer_removal_time_length, "buffer_removal_time", opNum); } } } } } if (frame_state->frame_type == AV1_SWITCH_FRAME || (frame_state->frame_type == AV1_KEY_FRAME && frame_state->show_frame)) { frame_state->refresh_frame_flags = AV1_ALL_FRAMES; } else { frame_state->refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); } if (!FrameIsIntra || frame_state->refresh_frame_flags != AV1_ALL_FRAMES) { if (error_resilient_mode && state->enable_order_hint) { u32 i = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { u8 ref_order_hint = gf_bs_read_int_log_idx(bs, state->OrderHintBits, "ref_order_hint", i); if (ref_order_hint != state->RefOrderHint[i]) { state->RefValid[i] = 0; } state->RefOrderHint[i] = ref_order_hint; } } } u8 allow_intrabc = 0; if (frame_state->frame_type == AV1_KEY_FRAME) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); if (allow_screen_content_tools && state->UpscaledWidth == state->width) { allow_intrabc = gf_bs_read_int_log(bs, 1, "allow_intrabc"); } } else { if (frame_state->frame_type == AV1_INTRA_ONLY_FRAME) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); if (allow_screen_content_tools && state->UpscaledWidth == state->width) { allow_intrabc = gf_bs_read_int_log(bs, 1, "allow_intrabc"); } } else { u32 i = 0; Bool frame_refs_short_signaling = GF_FALSE; if (state->enable_order_hint) { frame_refs_short_signaling = gf_bs_read_int_log(bs, 1, "frame_refs_short_signaling"); if (frame_refs_short_signaling) { u8 last_frame_idx = gf_bs_read_int_log(bs, 3, "last_frame_idx"); u8 gold_frame_idx = gf_bs_read_int_log(bs, 3, "gold_frame_idx"); av1_set_frame_refs(state, last_frame_idx, gold_frame_idx, ref_frame_idx); } } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { if (!frame_refs_short_signaling) ref_frame_idx[i] = gf_bs_read_int_log_idx(bs, 3, "ref_frame_idx", i); if (state->frame_id_numbers_present_flag) { u32 n = state->delta_frame_id_length_minus_2 + 2; /*delta_frame_id_minus_1 =*/ gf_bs_read_int_log_idx(bs, n, "delta_frame_id_minus1", i); //DeltaFrameId = delta_frame_id_minus_1 + 1; //expectedFrameId[i] = ((current_frame_id + (1 << idLen) - DeltaFrameId) % (1 << idLen)); } } if (frame_size_override_flag && !error_resilient_mode) { frame_size_with_refs(bs, state, frame_size_override_flag, ref_frame_idx); } else { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); } frame_state->allow_high_precision_mv = 0; if (!force_integer_mv) { frame_state->allow_high_precision_mv = gf_bs_read_int_log(bs, 1, "allow_high_precision_mv"); } read_interpolation_filter(bs); gf_bs_read_int_log(bs, 1, "is_motion_mode_switchable"); if (!(error_resilient_mode || !state->enable_ref_frame_mvs)) { gf_bs_read_int_log(bs, 1, "use_ref_frame_mvs"); } } } if (!FrameIsIntra) { u32 i; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refFrame = AV1_LAST_FRAME + i; u8 ridx = ref_frame_idx[i]; if (ridx >= 0) { u8 hint = state->RefOrderHint[ridx]; state->OrderHints[refFrame] = hint; /* if ( !enable_order_hint ) { RefFrameSignBias[ refFrame ] = 0; } else { RefFrameSignBias[ refFrame ] = get_relative_dist( hint, OrderHint) > 0; } */ } } } if (!(state->reduced_still_picture_header || disable_cdf_update)) gf_bs_read_int_log(bs, 1, "disable_frame_end_update_cdf"); if (primary_ref_frame == AV1_PRIMARY_REF_NONE) { //init_non_coeff_cdfs(); av1_setup_past_independence(state); } else { //load_cdfs(ref_frame_idx[primary_ref_frame]); av1_load_previous(state, primary_ref_frame, ref_frame_idx); } av1_parse_tile_info(bs, state); //quantization_params( ): u8 base_q_idx = gf_bs_read_int_log(bs, 8, "base_q_idx"); s32 DeltaQUDc = 0; s32 DeltaQUAc = 0; s32 DeltaQVDc = 0; s32 DeltaQVAc = 0; s32 DeltaQYDc = av1_delta_q(bs, "DeltaQYDc_coded", "DeltaQYDc"); if (!state->config->monochrome) { u8 diff_uv_delta = 0; if (state->separate_uv_delta_q) diff_uv_delta = gf_bs_read_int_log(bs, 1, "diff_uv_delta"); DeltaQUDc = av1_delta_q(bs, "DeltaQUDc_coded", "DeltaQUDc"); DeltaQUAc = av1_delta_q(bs, "DeltaQUAc_coded", "DeltaQUAc"); if (diff_uv_delta) { DeltaQVDc = av1_delta_q(bs, "DeltaQVDc_coded", "DeltaQVDc"); DeltaQVAc = av1_delta_q(bs, "DeltaQVAc_coded", "DeltaQVAc"); } } if (gf_bs_read_int_log(bs, 1, "using_qmatrix")) { gf_bs_read_int_log(bs, 4, "qm_y"); gf_bs_read_int_log(bs, 4, "qm_u"); if (!state->separate_uv_delta_q) { gf_bs_read_int_log(bs, 4, "qm_v"); } } u8 seg_features_SEG_LVL_ALT_Q_enabled[8] = { 0,0,0,0,0,0,0,0 }; s32 seg_features_SEG_LVL_ALT_Q[8] = { 0,0,0,0,0,0,0,0 }; //segmentation_params( ): u8 segmentation_enabled = gf_bs_read_int_log(bs, 1, "segmentation_enabled"); if (segmentation_enabled) { /*u8 segmentation_temporal_update = 0;*/ u8 segmentation_update_data = 1; if (primary_ref_frame != AV1_PRIMARY_REF_NONE) { u8 segmentation_update_map = gf_bs_read_int_log(bs, 1, "segmentation_update_map"); if (segmentation_update_map == 1) gf_bs_read_int_log(bs, 1, "segmentation_temporal_update"); segmentation_update_data = gf_bs_read_int_log(bs, 1, "segmentation_update_data"); } if (segmentation_update_data == 1) { u32 i, j; for (i = 0; i < 8/*=MAX_SEGMENTS*/; i++) { for (j = 0; j < 8 /*=SEG_LVL_MAX*/; j++) { if (/*feature_enabled = */gf_bs_read_int_log_idx2(bs, 1, "feature_enabled", i, j) == 1) { s32 val; u32 bitsToRead = Segmentation_Feature_Bits[j]; //this is SEG_LVL_ALT_Q if (!j) seg_features_SEG_LVL_ALT_Q_enabled[i] = 1; if (Segmentation_Feature_Signed[j] == 1) { val = gf_bs_read_int_log_idx2(bs, 1 + bitsToRead, "signed_feature_value", i, j); } else { val = gf_bs_read_int_log_idx2(bs, bitsToRead, "feature_value", i, j); } if (!j) seg_features_SEG_LVL_ALT_Q[i] = val; } } } //ignore all init steps } } //delta_q_params(): /*u8 delta_q_res = 0;*/ u8 delta_q_present = 0; if (base_q_idx > 0) { delta_q_present = gf_bs_read_int_log(bs, 1, "delta_q_present"); } if (delta_q_present) { gf_bs_read_int_log(bs, 2, "delta_q_res"); } //delta_lf_params(): u8 delta_lf_present = 0; /*u8 delta_lf_res = 0; u8 delta_lf_multi = 0;*/ if (delta_q_present) { if (!allow_intrabc) { delta_lf_present = gf_bs_read_int_log(bs, 1, "delta_lf_present"); } if (delta_lf_present) { gf_bs_read_int_log(bs, 2, "delta_lf_res"); gf_bs_read_int_log(bs, 1, "delta_lf_multi"); } } //init lossless stuff! u8 CodedLossless = 1; for (idx = 0; idx < 8; idx++) { u8 qindex = av1_get_qindex(GF_TRUE, idx, base_q_idx, delta_q_present, 0/*CurrentQIndex always ignored at this level of parsin*/, segmentation_enabled, seg_features_SEG_LVL_ALT_Q_enabled, seg_features_SEG_LVL_ALT_Q); Bool LosslessArray = (qindex == 0) && (DeltaQYDc == 0) && (DeltaQUAc == 0) && (DeltaQUDc == 0) && (DeltaQVAc == 0) && (DeltaQVDc == 0); if (!LosslessArray) CodedLossless = 0; } Bool AllLossless = CodedLossless && (state->width == state->UpscaledWidth); //loop_filter_params(): if (!CodedLossless && !allow_intrabc) { u8 loop_filter_level_0 = gf_bs_read_int_log(bs, 6, "loop_filter_level_0"); u8 loop_filter_level_1 = gf_bs_read_int_log(bs, 6, "loop_filter_level_1"); if (!state->config->monochrome) { if (loop_filter_level_0 || loop_filter_level_1) { gf_bs_read_int_log(bs, 6, "loop_filter_level_2"); gf_bs_read_int_log(bs, 6, "loop_filter_level_3"); } } gf_bs_read_int_log(bs, 3, "loop_filter_sharpness"); u8 loop_filter_delta_enabled = gf_bs_read_int_log(bs, 1, "loop_filter_delta_enabled"); if (loop_filter_delta_enabled == 1) { u8 loop_filter_delta_update = gf_bs_read_int_log(bs, 1, "loop_filter_delta_update"); if (loop_filter_delta_update) { u32 i; for (i = 0; i < 8/*TOTAL_REFS_PER_FRAME*/; i++) { u8 update_ref_delta = gf_bs_read_int_log_idx(bs, 1, "update_ref_delta", i); if (update_ref_delta == 1) { gf_bs_read_int_log_idx(bs, 1 + 6, "loop_filter_ref_deltas", i); } } for (i = 0; i < 2; i++) { u8 update_mode_delta = gf_bs_read_int_log_idx(bs, 1, "update_mode_delta", i); if (update_mode_delta) { gf_bs_read_int_log_idx(bs, 1 + 6, "loop_filter_mode_deltas", i); } } } } } //cdef_params( ): if (!CodedLossless && !allow_intrabc && state->enable_cdef) { gf_bs_read_int_log(bs, 2, "cdef_damping_minus_3"); u8 cdef_bits = gf_bs_read_int_log(bs, 2, "cdef_bits"); u32 i, num_cd = 1 << cdef_bits; for (i = 0; i < num_cd; i++) { gf_bs_read_int_log_idx(bs, 4, "cdef_y_pri_strength", i); gf_bs_read_int_log_idx(bs, 2, "cdef_y_sec_strength", i); if (!state->config->monochrome) { gf_bs_read_int_log_idx(bs, 4, "cdef_uv_pri_strength", i); gf_bs_read_int_log_idx(bs, 2, "cdef_uv_sec_strength", i); } } } //lr_params( ) : if (!AllLossless && !allow_intrabc && state->enable_restoration) { u32 i, nb_planes = state->config->monochrome ? 1 : 3; u8 UsesLr = 0; u8 usesChromaLr = 0; for (i = 0; i < nb_planes; i++) { u8 lr_type = gf_bs_read_int_log_idx(bs, 2, "lr_type", i); //FrameRestorationType[i] = Remap_Lr_Type[lr_type] if (lr_type != AV1_RESTORE_NONE) { UsesLr = 1; if (i > 0) { usesChromaLr = 1; } } } if (UsesLr) { if (state->use_128x128_superblock) { gf_bs_read_int_log(bs, 1, "lr_unit_shift_minus_1"); } else { u8 lr_unit_shift = gf_bs_read_int_log(bs, 1, "lr_unit_shift"); if (lr_unit_shift) { gf_bs_read_int_log(bs, 1, "lr_unit_extra_shift"); //lr_unit_shift += lr_unit_extra_shift; } } if (state->config->chroma_subsampling_x && state->config->chroma_subsampling_y && usesChromaLr) { gf_bs_read_int_log(bs, 1, "lr_uv_shift"); } } } //read_tx_mode(): if (CodedLossless == 1) { } else { gf_bs_read_int_log(bs, 1, "tx_mode_select"); } //frame_reference_mode( ): u8 reference_select = 0; if (FrameIsIntra) { } else { reference_select = gf_bs_read_int_log(bs, 1, "reference_select"); } //skip_mode_params( ): u8 skipModeAllowed = 0; if (FrameIsIntra || !reference_select || !state->enable_order_hint) { } else { u32 i; s32 forwardIdx = -1; s32 backwardIdx = -1; s32 forwardHint = 0; s32 backwardHint = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refHint = state->RefOrderHint[ref_frame_idx[i]]; if (av1_get_relative_dist(refHint, frame_state->order_hint, state) < 0) { if (forwardIdx < 0 || av1_get_relative_dist(refHint, forwardHint, state) > 0) { forwardIdx = i; forwardHint = refHint; } } else if (av1_get_relative_dist(refHint, frame_state->order_hint, state) > 0) { if (backwardIdx < 0 || av1_get_relative_dist(refHint, backwardHint, state) < 0) { backwardIdx = i; backwardHint = refHint; } } } if (forwardIdx < 0) { skipModeAllowed = 0; } else if (backwardIdx >= 0) { skipModeAllowed = 1; //SkipModeFrame[0] = AV1_LAST_FRAME + MIN(forwardIdx, backwardIdx); //SkipModeFrame[1] = AV1_LAST_FRAME + MAX(forwardIdx, backwardIdx); } else { s32 secondForwardIdx = -1; s32 secondForwardHint = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refHint = state->RefOrderHint[ref_frame_idx[i]]; if (av1_get_relative_dist(refHint, forwardHint, state) < 0) { if (secondForwardIdx < 0 || av1_get_relative_dist(refHint, secondForwardHint, state) > 0) { secondForwardIdx = i; secondForwardHint = refHint; } } } if (secondForwardIdx < 0) { skipModeAllowed = 0; } else { skipModeAllowed = 1; //SkipModeFrame[ 0 ] = LAST_FRAME + Min(forwardIdx, secondForwardIdx) //SkipModeFrame[ 1 ] = LAST_FRAME + Max(forwardIdx, secondForwardIdx) } } } if (skipModeAllowed) { gf_bs_read_int_log(bs, 1, "skip_mode_present"); } if (FrameIsIntra || error_resilient_mode || !state->enable_warped_motion) { } else { gf_bs_read_int_log(bs, 1, "allow_warped_motion"); } gf_bs_read_int_log(bs, 1, "reduced_tx"); //global_motion_params( ) u32 ref; for (ref = AV1_LAST_FRAME; ref <= AV1_ALTREF_FRAME; ref++) { u32 i; for (i = 0; i < 6; i++) { state->GmParams.coefs[ref][i] = ((i % 3 == 2) ? 1 << WARPEDMODEL_PREC_BITS : 0); } } if (!FrameIsIntra) { u32 refs; for (refs = AV1_LAST_FRAME; refs <= AV1_ALTREF_FRAME; refs++) { u8 type = AV1_GMC_IDENTITY; Bool is_global = gf_bs_read_int_log_idx(bs, 1, "is_global", refs); if (is_global) { Bool is_rot_zoom = gf_bs_read_int_log_idx(bs, 1, "is_rot_zoom", refs); if (is_rot_zoom) { type = AV1_GMC_ROTZOOM; } else { Bool is_trans = gf_bs_read_int_log_idx(bs, 1, "is_translation", refs); type = is_trans ? AV1_GMC_TRANSLATION : AV1_GMC_AFFINE; } } if (type >= AV1_GMC_ROTZOOM) { av1_read_global_param(state, bs, type, refs, 2); av1_read_global_param(state, bs, type, refs, 3); if (type == AV1_GMC_AFFINE) { av1_read_global_param(state, bs, type, refs, 4); av1_read_global_param(state, bs, type, refs, 5); } else { state->GmParams.coefs[refs][4] = -state->GmParams.coefs[refs][3]; state->GmParams.coefs[refs][5] = state->GmParams.coefs[refs][2]; } } if (type >= AV1_GMC_TRANSLATION) { av1_read_global_param(state, bs, type, refs, 0); av1_read_global_param(state, bs, type, refs, 1); } } } //film_grain_params() if (!state->film_grain_params_present || (!state->frame_state.show_frame && !showable_frame)) { } else { u8 apply_grain = gf_bs_read_int_log(bs, 1, "apply_grain"); if (apply_grain) { gf_bs_read_int_log(bs, 16, "grain_seed"); u8 update_grain = 1; if (state->frame_state.frame_type == AV1_INTER_FRAME) { update_grain = gf_bs_read_int_log(bs, 1, "update_grain"); } if (!update_grain) { gf_bs_read_int_log(bs, 3, "film_grain_params_ref_idx"); } else { u32 i, num_y_points = gf_bs_read_int_log(bs, 4, "num_y_points"); for (i = 0; i < num_y_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_y_value", i); gf_bs_read_int_log_idx(bs, 8, "point_y_scaling", i); } u8 chroma_scaling_from_luma = 0; if (!state->config->monochrome) chroma_scaling_from_luma = gf_bs_read_int_log(bs, 1, "chroma_scaling_from_luma"); u8 num_cb_points = 0; u8 num_cr_points = 0; if (state->config->monochrome || chroma_scaling_from_luma || ((state->config->chroma_subsampling_x == 1) && (state->config->chroma_subsampling_y == 1) && (num_y_points == 0)) ) { } else { num_cb_points = gf_bs_read_int_log(bs, 4, "num_cb_points"); for (i = 0; i < num_cb_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_cb_value", i); gf_bs_read_int_log_idx(bs, 8, "point_cb_scaling", i); } num_cr_points = gf_bs_read_int_log(bs, 4, "num_cr_points"); for (i = 0; i < num_cr_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_cr_value", i); gf_bs_read_int_log_idx(bs, 8, "point_cr_scaling", i); } } gf_bs_read_int_log(bs, 2, "grain_scaling_minus_8"); u8 ar_coeff_lag = gf_bs_read_int_log(bs, 2, "ar_coeff_lag"); u16 numPosLuma = 2 * ar_coeff_lag * (ar_coeff_lag + 1); u16 numPosChroma = numPosLuma; if (num_y_points) { numPosChroma = numPosLuma + 1; for (i = 0; i < numPosLuma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_y_plus_128", i); } } if (chroma_scaling_from_luma || num_cb_points) { for (i = 0; i < numPosChroma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_cb_plus_128", i); } } if (chroma_scaling_from_luma || num_cr_points) { for (i = 0; i < numPosChroma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_cr_plus_128", i); } } gf_bs_read_int_log(bs, 2, "ar_coeff_shift_minus_6"); gf_bs_read_int_log(bs, 2, "grain_scale_shift"); if (num_cb_points) { gf_bs_read_int_log(bs, 8, "cb_mult"); gf_bs_read_int_log(bs, 8, "cb_luma_mult"); gf_bs_read_int_log(bs, 9, "cb_offset"); } if (num_cr_points) { gf_bs_read_int_log(bs, 8, "cr_mult"); gf_bs_read_int_log(bs, 8, "cr_luma_mult"); gf_bs_read_int_log(bs, 9, "cr_offset"); } gf_bs_read_int_log(bs, 1, "overlap_flag"); gf_bs_read_int_log(bs, 1, "clip_to_restricted_range"); } } } //end of uncompressed header !! } GF_EXPORT void gf_av1_init_state(AV1State *state) { if (!state) return; memset(state, 0, sizeof(AV1State)); state->color_primaries = 2; state->transfer_characteristics = 2; state->matrix_coefficients = 2; } GF_EXPORT void gf_av1_reset_state(AV1State *state, Bool is_destroy) { GF_List *l1, *l2; if (state->frame_state.header_obus) { while (gf_list_count(state->frame_state.header_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.header_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } if (state->frame_state.frame_obus) { while (gf_list_count(state->frame_state.frame_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.frame_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } l1 = state->frame_state.frame_obus; l2 = state->frame_state.header_obus; memset(&state->frame_state, 0, sizeof(AV1StateFrame)); state->frame_state.is_first_frame = GF_TRUE; if (is_destroy) { gf_list_del(l1); gf_list_del(l2); if (state->bs) { if (gf_bs_get_position(state->bs)) { u32 size; gf_bs_get_content_no_truncate(state->bs, &state->frame_obus, &size, &state->frame_obus_alloc); } gf_bs_del(state->bs); } state->bs = NULL; } else { state->frame_state.frame_obus = l1; state->frame_state.header_obus = l2; if (state->bs) gf_bs_seek(state->bs, 0); } } static GF_Err av1_parse_tile_group(GF_BitStream *bs, AV1State *state, u64 obu_start, u64 obu_size) { u32 TileNum, tg_start = 0, tg_end = 0; Bool numTiles = state->tileCols * state->tileRows; Bool tile_start_and_end_present_flag = GF_FALSE; GF_Err e = GF_OK; if (numTiles > 1) tile_start_and_end_present_flag = gf_bs_read_int_log(bs, 1, "tile_start_and_end_present_flag"); if (numTiles == 1 || !tile_start_and_end_present_flag) { tg_start = 0; tg_end = numTiles - 1; /*state->frame_state.tg[0].start_idx = 0; state->frame_state.tg[0].end_idx = numTiles - 1;*/ } else { u32 tileBits = state->tileColsLog2 + state->tileRowsLog2; /*state->frame_state.tg[state->frame_state.tg_idx].start_idx*/ tg_start = gf_bs_read_int_log(bs, tileBits, "tg_start"); /*state->frame_state.tg[state->frame_state.tg_idx].end_idx*/ tg_end = gf_bs_read_int_log(bs, tileBits, "tg_end"); } /*state->frame_state.tg_idx++;*/ gf_bs_align(bs); if (tg_end >= GF_ARRAY_LENGTH(state->frame_state.tiles)) return GF_NON_COMPLIANT_BITSTREAM; state->frame_state.nb_tiles_in_obu = 0; for (TileNum = tg_start; TileNum <= tg_end; TileNum++) { u32 tile_start_offset, tile_size; /*u32 tileRow = TileNum / state->tileCols; u32 tileCol = TileNum % state->tileCols;*/ Bool lastTile = TileNum == tg_end; u64 pos = gf_bs_get_position(bs); if (lastTile) { tile_start_offset = (u32)(pos - obu_start); tile_size = (u32)(obu_size - (pos - obu_start)); } else { u64 tile_size_minus_1 = aom_av1_le(bs, state->tile_size_bytes, "tile_size_minus_1"); pos = gf_bs_get_position(bs); tile_start_offset = (u32)(pos - obu_start); tile_size = (u32)(tile_size_minus_1 + 1/* + state->tile_size_bytes*/); } if (tile_start_offset + tile_size > obu_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Error parsing tile group, tile %d start %d + size %d exceeds OBU length %d\n", TileNum, tile_start_offset, tile_size, obu_size)); e = GF_NON_COMPLIANT_BITSTREAM; break; } state->frame_state.tiles[state->frame_state.nb_tiles_in_obu].obu_start_offset = tile_start_offset; state->frame_state.tiles[state->frame_state.nb_tiles_in_obu].size = tile_size; gf_bs_skip_bytes(bs, tile_size); state->frame_state.nb_tiles_in_obu++; } if (tg_end == numTiles - 1) { av1_decode_frame_wrapup(state); } return e; } static void av1_parse_frame_header(GF_BitStream *bs, AV1State *state) { AV1StateFrame *frame_state = &state->frame_state; if (frame_state->seen_frame_header == GF_FALSE) { u64 pos = gf_bs_get_position(bs); state->frame_state.show_existing_frame = GF_FALSE; frame_state->seen_frame_header = GF_TRUE; av1_parse_uncompressed_header(bs, state); state->frame_state.is_first_frame = GF_FALSE; state->frame_state.uncompressed_header_bytes = (u32) (gf_bs_get_position(bs) - pos); if (state->frame_state.show_existing_frame) { av1_decode_frame_wrapup(state); frame_state->seen_frame_header = GF_FALSE; } else { //TileNum = 0; frame_state->seen_frame_header = GF_TRUE; } } } static GF_Err av1_parse_frame(GF_BitStream *bs, AV1State *state, u64 obu_start, u64 obu_size) { av1_parse_frame_header(bs, state); //byte alignment gf_bs_align(bs); return av1_parse_tile_group(bs, state, obu_start, obu_size); } static void on_aom_av1_eos(void *_state) { AV1State *state = (AV1State *)_state; state->bs_overread = GF_TRUE; } GF_EXPORT GF_Err gf_av1_parse_obu(GF_BitStream *bs, ObuType *obu_type, u64 *obu_size, u32 *obu_hdr_size, AV1State *state) { GF_Err e = GF_OK; u32 hdr_size; u64 pos = gf_bs_get_position(bs); if (!bs || !obu_type || !state) return GF_BAD_PARAM; state->bs_overread = GF_FALSE; gf_bs_set_eos_callback(bs, on_aom_av1_eos, state); state->obu_extension_flag = state->obu_has_size_field = 0; state->temporal_id = state->spatial_id = 0; state->frame_state.uncompressed_header_bytes = 0; e = gf_av1_parse_obu_header(bs, obu_type, &state->obu_extension_flag, &state->obu_has_size_field, &state->temporal_id, &state->spatial_id); if (e) return e; if (state->obu_has_size_field) { *obu_size = (u32)gf_av1_leb128_read(bs, NULL); } else { if (*obu_size >= 1 + state->obu_extension_flag) { *obu_size = *obu_size - 1 - state->obu_extension_flag; } else { GF_LOG(state->config ? GF_LOG_WARNING : GF_LOG_DEBUG, GF_LOG_CODING, ("[AV1] computed OBU size "LLD" (input value = "LLU"). Skipping.\n", *obu_size - 1 - state->obu_extension_flag, *obu_size)); return GF_NON_COMPLIANT_BITSTREAM; } } hdr_size = (u32)(gf_bs_get_position(bs) - pos); if ((gf_bs_available(bs) < *obu_size) || state->bs_overread) { gf_bs_seek(bs, pos); return GF_BUFFER_TOO_SMALL; } *obu_size += hdr_size; if (obu_hdr_size) *obu_hdr_size = hdr_size; if (*obu_type != OBU_SEQUENCE_HEADER && *obu_type != OBU_TEMPORAL_DELIMITER && state->OperatingPointIdc != 0 && state->obu_extension_flag == 1) { u32 inTemporalLayer = (state->OperatingPointIdc >> state->temporal_id) & 1; u32 inSpatialLayer = (state->OperatingPointIdc >> (state->spatial_id + 8)) & 1; if (!inTemporalLayer || !inSpatialLayer) { *obu_type = -1; gf_bs_seek(bs, pos + *obu_size); return GF_OK; } } e = GF_OK; switch (*obu_type) { case OBU_SEQUENCE_HEADER: av1_parse_sequence_header_obu(bs, state); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Sequence header parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_METADATA: #if 0 //TODO + sample groups const ObuMetadataType metadata_type = (u32)read_leb128(bs, NULL); we should check for 16 bits limit(AV1MetadataSampleGroupEntry) for ISOBMFF bindings, see https ://github.com/AOMediaCodec/av1-isobmff/pull/86#issuecomment-416659538 if (metadata_type == OBU_METADATA_TYPE_ITUT_T35) { } else if (metadata_type == OBU_METADATA_TYPE_HDR_CLL) { } else if (metadata_type == OBU_METADATA_TYPE_HDR_MDCV) { } else if (metadata_type == OBU_METADATA_TYPE_SCALABILITY) { } else if (metadata_type == METADATA_TYPE_TIMECODE) { } #endif GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[AV1] parsing for metadata is not implemented. Forwarding.\n")); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Metadata parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_FRAME_HEADER: case OBU_REDUNDANT_FRAME_HEADER: if (state->config) { av1_parse_frame_header(bs, state); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Frame header parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } } gf_bs_seek(bs, pos + *obu_size); break; case OBU_FRAME: e = av1_parse_frame(bs, state, pos, *obu_size); if (gf_bs_get_position(bs) != pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Frame parsing did not consume the right number of bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_TILE_GROUP: if (state->config) { e = av1_parse_tile_group(bs, state, pos, *obu_size); if (gf_bs_get_position(bs) != pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Tile group parsing did not consume the right number of bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } } gf_bs_seek(bs, pos + *obu_size); break; case OBU_TEMPORAL_DELIMITER: state->frame_state.seen_frame_header = GF_FALSE; case OBU_PADDING: gf_bs_seek(bs, pos + *obu_size); break; default: GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] unknown OBU type %u (size "LLU"). Skipping.\n", *obu_type, *obu_size)); gf_bs_seek(bs, pos + *obu_size); break; } return e; } GF_EXPORT GF_Err gf_media_prores_parse_bs(GF_BitStream *bs, GF_ProResFrameInfo *prores_frame) { u32 i, j; u64 start, pos; memset(prores_frame, 0, sizeof(GF_ProResFrameInfo)); start = gf_bs_get_position(bs); if (gf_bs_available(bs) < 10) return GF_BUFFER_TOO_SMALL; prores_frame->frame_size = gf_bs_read_u32(bs); prores_frame->frame_identifier = gf_bs_read_u32(bs); if (prores_frame->frame_identifier != GF_4CC('i','c','p','f')) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[ProRes] Invalid frame identifier, expected \"icpf\" got \"%s\"\n", gf_4cc_to_str(prores_frame->frame_identifier) )); gf_bs_seek(bs, start); return GF_NON_COMPLIANT_BITSTREAM; } /*parse frame header*/ pos = gf_bs_get_position(bs); prores_frame->frame_hdr_size = gf_bs_read_u16(bs); if (gf_bs_available(bs) + 2 < prores_frame->frame_hdr_size) { gf_bs_seek(bs, start); return GF_BUFFER_TOO_SMALL; } gf_bs_read_u8(bs); prores_frame->version = gf_bs_read_u8(bs); prores_frame->encoder_id = gf_bs_read_u32(bs); prores_frame->width = gf_bs_read_u16(bs); prores_frame->height = gf_bs_read_u16(bs); prores_frame->chroma_format = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 2); prores_frame->interlaced_mode = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 2); prores_frame->aspect_ratio_information = gf_bs_read_int(bs, 4); prores_frame->framerate_code = gf_bs_read_int(bs, 4); prores_frame->color_primaries = gf_bs_read_u8(bs); prores_frame->transfer_characteristics = gf_bs_read_u8(bs); prores_frame->matrix_coefficients = gf_bs_read_u8(bs); gf_bs_read_int(bs, 4); prores_frame->alpha_channel_type = gf_bs_read_int(bs, 4); gf_bs_read_int(bs, 14); prores_frame->load_luma_quant_matrix = gf_bs_read_int(bs, 1); prores_frame->load_chroma_quant_matrix = gf_bs_read_int(bs, 1); if (prores_frame->load_luma_quant_matrix) { for (i=0; i<8; i++) { for (j=0; j<8; j++) { prores_frame->luma_quant_matrix[i][j] = gf_bs_read_u8(bs); } } } if (prores_frame->load_chroma_quant_matrix) { for (i=0; i<8; i++) { for (j=0; j<8; j++) { prores_frame->chroma_quant_matrix[i][j] = gf_bs_read_u8(bs); } } } pos = gf_bs_get_position(bs) - pos; if (pos != prores_frame->frame_hdr_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[ProRes] Invalid frame header size, expected %d got %d\n", prores_frame->frame_hdr_size, (u32) pos)); gf_bs_seek(bs, start); return GF_NON_COMPLIANT_BITSTREAM; } prores_frame->nb_pic = ((prores_frame->interlaced_mode==1) || (prores_frame->interlaced_mode==2)) ? 2 : 1; gf_bs_seek(bs, start); return GF_OK; } #endif /*GPAC_DISABLE_AV_PARSERS*/ GF_EXPORT u8 gf_mp3_version(u32 hdr) { return ((hdr >> 19) & 0x3); } GF_EXPORT const char *gf_mp3_version_name(u32 hdr) { u32 v = gf_mp3_version(hdr); switch (v) { case 0: return "MPEG-2.5"; case 1: return "Reserved"; case 2: return "MPEG-2"; case 3: return "MPEG-1"; default: return "Unknown"; } } #ifndef GPAC_DISABLE_AV_PARSERS GF_EXPORT u8 gf_mp3_layer(u32 hdr) { return 4 - (((hdr >> 17) & 0x3)); } GF_EXPORT u8 gf_mp3_num_channels(u32 hdr) { if (((hdr >> 6) & 0x3) == 3) return 1; return 2; } GF_EXPORT u16 gf_mp3_sampling_rate(u32 hdr) { u16 res; /* extract the necessary fields from the MP3 header */ u8 version = gf_mp3_version(hdr); u8 sampleRateIndex = (hdr >> 10) & 0x3; switch (sampleRateIndex) { case 0: res = 44100; break; case 1: res = 48000; break; case 2: res = 32000; break; default: GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] Samplerate index not valid\n")); return 0; } /*reserved or MPEG-1*/ if (version & 1) return res; /*MPEG-2*/ res /= 2; /*MPEG-2.5*/ if (version == 0) res /= 2; return res; } GF_EXPORT u16 gf_mp3_window_size(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); if (layer == 3) { if (version == 3) return 1152; return 576; } if (layer == 2) return 1152; return 384; } GF_EXPORT u8 gf_mp3_object_type_indication(u32 hdr) { switch (gf_mp3_version(hdr)) { case 3: return GF_CODECID_MPEG_AUDIO; case 2: case 0: return GF_CODECID_MPEG2_PART3; default: return 0x00; } } /*aligned bitrate parsing with libMAD*/ static u32 const bitrate_table[5][15] = { /* MPEG-1 */ { 0, 32000, 64000, 96000, 128000, 160000, 192000, 224000, /* Layer I */ 256000, 288000, 320000, 352000, 384000, 416000, 448000 }, { 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer II */ 128000, 160000, 192000, 224000, 256000, 320000, 384000 }, { 0, 32000, 40000, 48000, 56000, 64000, 80000, 96000, /* Layer III */ 112000, 128000, 160000, 192000, 224000, 256000, 320000 }, /* MPEG-2 LSF */ { 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer I */ 128000, 144000, 160000, 176000, 192000, 224000, 256000 }, { 0, 8000, 16000, 24000, 32000, 40000, 48000, 56000, /* Layers */ 64000, 80000, 96000, 112000, 128000, 144000, 160000 } /* II & III */ }; u32 gf_mp3_bit_rate(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); u8 bitRateIndex = (hdr >> 12) & 0xF; u32 lidx; /*MPEG-1*/ if (version & 1) { if (!layer) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] layer index not valid\n")); return 0; } lidx = layer - 1; } /*MPEG-2/2.5*/ else { lidx = 3 + (layer >> 1); } if (lidx>4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] layer index not valid\n")); return 0; } return bitrate_table[lidx][bitRateIndex]; } GF_EXPORT u16 gf_mp3_frame_size(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); u32 pad = ((hdr >> 9) & 0x1) ? 1 : 0; u32 bitrate = gf_mp3_bit_rate(hdr); u32 samplerate = gf_mp3_sampling_rate(hdr); u32 frameSize = 0; if (!samplerate || !bitrate) return 0; if (layer == 1) { frameSize = ((12 * bitrate / samplerate) + pad) * 4; } else { u32 slots_per_frame = 144; if ((layer == 3) && !(version & 1)) slots_per_frame = 72; frameSize = (slots_per_frame * bitrate / samplerate) + pad; } return (u16)frameSize; } GF_EXPORT u32 gf_mp3_get_next_header(FILE* in) { u8 b, state = 0; u32 dropped = 0; unsigned char bytes[4]; bytes[0] = bytes[1] = bytes[2] = bytes[3] = 0; while (1) { if (gf_fread(&b, 1, in) == 0) return 0; if (state == 3) { bytes[state] = b; return GF_4CC((u32)bytes[0], bytes[1], bytes[2], bytes[3]); } if (state == 2) { if (((b & 0xF0) == 0) || ((b & 0xF0) == 0xF0) || ((b & 0x0C) == 0x0C)) { if (bytes[1] == 0xFF) state = 1; else state = 0; } else { bytes[state] = b; state = 3; } } if (state == 1) { if (((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[state] = b; state = 2; } else { state = 0; } } if (state == 0) { if (b == 0xFF) { bytes[state] = b; state = 1; } else { if ((dropped == 0) && ((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[0] = (u8)0xFF; bytes[1] = b; state = 2; } else { dropped++; } } } } return 0; } GF_EXPORT u32 gf_mp3_get_next_header_mem(const u8 *buffer, u32 size, u32 *pos) { u32 cur; u8 b, state = 0; u32 dropped = 0; unsigned char bytes[4]; bytes[0] = bytes[1] = bytes[2] = bytes[3] = 0; cur = 0; *pos = 0; while (cur < size) { b = (u8)buffer[cur]; cur++; if (state == 3) { u32 val; bytes[state] = b; val = GF_4CC((u32)bytes[0], bytes[1], bytes[2], bytes[3]); if (gf_mp3_frame_size(val)) { *pos = dropped; return val; } state = 0; dropped = cur; } if (state == 2) { if (((b & 0xF0) == 0) || ((b & 0xF0) == 0xF0) || ((b & 0x0C) == 0x0C)) { if (bytes[1] == 0xFF) { state = 1; dropped += 1; } else { state = 0; dropped = cur; } } else { bytes[state] = b; state = 3; } } if (state == 1) { if (((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[state] = b; state = 2; } else { state = 0; dropped = cur; } } if (state == 0) { if (b == 0xFF) { bytes[state] = b; state = 1; } else { dropped++; } } } return 0; } #endif /*GPAC_DISABLE_AV_PARSERS*/ GF_EXPORT Bool gf_avc_is_rext_profile(u8 profile_idc) { switch (profile_idc) { case 100: case 110: case 122: case 244: case 44: case 83: case 86: case 118: case 128: case 138: case 139: case 134: case 135: return GF_TRUE; default: return GF_FALSE; } } GF_EXPORT const char *gf_avc_get_profile_name(u8 video_prof) { switch (video_prof) { case 0x42: return "Baseline"; case 0x4D: return "Main"; case 0x53: return "Scalable Baseline"; case 0x56: return "Scalable High"; case 0x58: return "Extended"; case 0x64: return "High"; case 0x6E: return "High 10"; case 0x7A: return "High 4:2:2"; case 0x90: case 0xF4: return "High 4:4:4"; default: return "Unknown"; } } GF_EXPORT const char *gf_hevc_get_profile_name(u8 video_prof) { switch (video_prof) { case 0x01: return "Main"; case 0x02: return "Main 10"; case 0x03: return "Main Still Picture"; default: return "Unknown"; } } GF_EXPORT const char *gf_avc_hevc_get_chroma_format_name(u8 chroma_format) { switch (chroma_format) { case 1: return "YUV 4:2:0"; case 2: return "YUV 4:2:2"; case 3: return "YUV 4:4:4"; default: return "Unknown"; } } #ifndef GPAC_DISABLE_AV_PARSERS u32 gf_bs_read_ue_log_idx3(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2, s32 idx3) { u32 val=0, code; s32 nb_lead = -1; u32 bits = 0; for (code=0; !code; nb_lead++) { if (nb_lead>=32) { //gf_bs_read_int keeps returning 0 on EOS, so if no more bits available, rbsp was truncated otherwise code is broken in rbsp) //we only test once nb_lead>=32 to avoid testing at each bit read if (!gf_bs_available(bs)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] exp-golomb read failed, not enough bits in bitstream !\n")); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] corrupted exp-golomb code, %d leading zeros, max 31 allowed !\n", nb_lead)); } return 0; } code = gf_bs_read_int(bs, 1); bits++; } if (nb_lead) { val = gf_bs_read_int(bs, nb_lead); val += (1 << nb_lead) - 1; bits += nb_lead; } if (fname) { gf_bs_log_idx(bs, bits, fname, val, idx1, idx2, idx3); } return val; } #define gf_bs_read_ue_log_idx2(_bs, _fname, _idx1, _idx2) gf_bs_read_ue_log_idx3(_bs, _fname, (s32) _idx1, (s32) _idx2, -1) #define gf_bs_read_ue_log_idx(_bs, _fname, _idx) gf_bs_read_ue_log_idx3(_bs, _fname, (s32) _idx, -1, -1) #define gf_bs_read_ue_log(_bs, _fname) gf_bs_read_ue_log_idx3(_bs, _fname, -1, -1, -1) u32 gf_bs_read_ue(GF_BitStream *bs) { return gf_bs_read_ue_log(bs, NULL); } s32 gf_bs_read_se(GF_BitStream *bs) { u32 v = gf_bs_read_ue(bs); if ((v & 0x1) == 0) return (s32)(0 - (v >> 1)); return (v + 1) >> 1; } s32 gf_bs_read_se_log_idx2(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2) { s32 res = gf_bs_read_se(bs); if (fname) gf_bs_log_idx(bs, -1, fname, res, idx1, idx2, -1); return res; } #define gf_bs_read_se_log_idx(_bs, _fname, _idx) gf_bs_read_se_log_idx2(_bs, _fname, (s32) _idx, -1) #define gf_bs_read_se_log(_bs, _fname) gf_bs_read_se_log_idx2(_bs, _fname, -1, -1) void gf_bs_write_ue(GF_BitStream *bs, u32 num) { s32 length = 1; s32 temp = ++num; while (temp != 1) { temp >>= 1; length += 2; } gf_bs_write_int(bs, 0, length >> 1); gf_bs_write_int(bs, num, (length + 1) >> 1); } void gf_bs_write_se(GF_BitStream *bs, s32 num) { u32 v; if (num <= 0) v = (-1 * num) << 1; else v = (num << 1) - 1; gf_bs_write_ue(bs, v); } u32 gf_media_nalu_is_start_code(GF_BitStream *bs) { u8 s1, s2, s3, s4; Bool is_sc = 0; u64 pos = gf_bs_get_position(bs); s1 = gf_bs_read_int(bs, 8); s2 = gf_bs_read_int(bs, 8); if (!s1 && !s2) { s3 = gf_bs_read_int(bs, 8); if (s3 == 0x01) is_sc = 3; else if (!s3) { s4 = gf_bs_read_int(bs, 8); if (s4 == 0x01) is_sc = 4; } } gf_bs_seek(bs, pos + is_sc); return is_sc; } /*read that amount of data at each IO access rather than fetching byte by byte...*/ #define AVC_CACHE_SIZE 4096 static u32 gf_media_nalu_locate_start_code_bs(GF_BitStream *bs, Bool locate_trailing) { u32 v, bpos, nb_cons_zeros = 0; char avc_cache[AVC_CACHE_SIZE]; u64 end, cache_start, load_size; u64 start = gf_bs_get_position(bs); if (start < 3) return 0; load_size = 0; bpos = 0; cache_start = 0; end = 0; v = 0xffffffff; while (!end) { /*refill cache*/ if (bpos == (u32)load_size) { if (!gf_bs_available(bs)) break; load_size = gf_bs_available(bs); if (load_size > AVC_CACHE_SIZE) load_size = AVC_CACHE_SIZE; bpos = 0; cache_start = gf_bs_get_position(bs); gf_bs_read_data(bs, avc_cache, (u32)load_size); } v = ( (v<<8) & 0xFFFFFF00) | ((u32) avc_cache[bpos]); bpos++; if (locate_trailing) { if ((v & 0x000000FF) == 0) nb_cons_zeros++; else nb_cons_zeros = 0; } if (v == 0x00000001) end = cache_start + bpos - 4; else if ((v & 0x00FFFFFF) == 0x00000001) end = cache_start + bpos - 3; } gf_bs_seek(bs, start); if (!end) end = gf_bs_get_size(bs); if (locate_trailing) { if (nb_cons_zeros >= 3) return (u32)(end - start - nb_cons_zeros); } return (u32)(end - start); } GF_EXPORT u32 gf_media_nalu_next_start_code_bs(GF_BitStream *bs) { return gf_media_nalu_locate_start_code_bs(bs, 0); } GF_EXPORT u32 gf_media_nalu_next_start_code(const u8 *data, u32 data_len, u32 *sc_size) { u32 avail = data_len; const u8 *cur = data; while (cur) { u32 v, bpos; u8 *next_zero = memchr(cur, 0, avail); if (!next_zero) return data_len; v = 0xffffff00; bpos = (u32)(next_zero - data) + 1; while (1) { u8 cval; if (bpos == (u32)data_len) return data_len; cval = data[bpos]; v = ((v << 8) & 0xFFFFFF00) | ((u32)cval); bpos++; if (v == 0x00000001) { *sc_size = 4; return bpos - 4; } else if ((v & 0x00FFFFFF) == 0x00000001) { *sc_size = 3; return bpos - 3; } if (cval) break; } if (bpos >= data_len) break; cur = data + bpos; avail = data_len - bpos; } return data_len; } Bool gf_media_avc_slice_is_intra(AVCState *avc) { switch (avc->s_info.slice_type) { case GF_AVC_TYPE_I: case GF_AVC_TYPE2_I: case GF_AVC_TYPE_SI: case GF_AVC_TYPE2_SI: return 1; default: return 0; } } #if 0 //unused Bool gf_media_avc_slice_is_IDR(AVCState *avc) { if (avc->sei.recovery_point.valid) { avc->sei.recovery_point.valid = 0; return 1; } if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) return 0; return gf_media_avc_slice_is_intra(avc); } #endif static const struct { u32 w, h; } avc_hevc_sar[] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, { 160,99 }, { 4, 3 }, { 3, 2 }, { 2, 1 } }; /*ISO 14496-10 (N11084) E.1.2*/ static void avc_parse_hrd_parameters(GF_BitStream *bs, AVC_HRD *hrd) { int i, cpb_cnt_minus1; cpb_cnt_minus1 = gf_bs_read_ue_log(bs, "cpb_cnt_minus1"); if (cpb_cnt_minus1 > 31) GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] invalid cpb_cnt_minus1 value: %d (expected in [0;31])\n", cpb_cnt_minus1)); gf_bs_read_int_log(bs, 4, "bit_rate_scale"); gf_bs_read_int_log(bs, 4, "cpb_size_scale"); /*for( SchedSelIdx = 0; SchedSelIdx <= cpb_cnt_minus1; SchedSelIdx++ ) {*/ for (i = 0; i <= cpb_cnt_minus1; i++) { gf_bs_read_ue_log_idx(bs, "bit_rate_value_minus1", i); gf_bs_read_ue_log_idx(bs, "cpb_size_value_minus1", i); gf_bs_read_int_log_idx(bs, 1, "cbr_flag", i); } gf_bs_read_int_log(bs, 5, "initial_cpb_removal_delay_length_minus1"); hrd->cpb_removal_delay_length_minus1 = gf_bs_read_int_log(bs, 5, "cpb_removal_delay_length_minus1"); hrd->dpb_output_delay_length_minus1 = gf_bs_read_int_log(bs, 5, "dpb_output_delay_length_minus1"); hrd->time_offset_length = gf_bs_read_int_log(bs, 5, "time_offset_length"); return; } /*returns the nal_size without emulation prevention bytes*/ u32 gf_media_nalu_emulation_bytes_add_count(u8 *buffer, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: \96 0x00000300 \96 0x00000301 \96 0x00000302 \96 0x00000303" */ if (num_zero == 2 && (u8)buffer[i] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; if (!buffer[i]) num_zero = 1; } else { if (!buffer[i]) num_zero++; else num_zero = 0; } i++; } return emulation_bytes_count; } u32 gf_media_nalu_add_emulation_bytes(const u8 *buffer_src, u8 *buffer_dst, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: 0x00000300 0x00000301 0x00000302 0x00000303" */ if (num_zero == 2 && (u8)buffer_src[i] < 0x04) { /*add emulation code*/ num_zero = 0; buffer_dst[i + emulation_bytes_count] = 0x03; emulation_bytes_count++; if (!buffer_src[i]) num_zero = 1; } else { if (!buffer_src[i]) num_zero++; else num_zero = 0; } buffer_dst[i + emulation_bytes_count] = buffer_src[i]; i++; } return nal_size + emulation_bytes_count; } /*returns the nal_size without emulation prevention bytes*/ u32 gf_media_nalu_emulation_bytes_remove_count(const u8 *buffer, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; if (!buffer || !nal_size) return 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: \96 0x00000300 \96 0x00000301 \96 0x00000302 \96 0x00000303" */ if (num_zero == 2 && buffer[i] == 0x03 && i + 1 < nal_size /*next byte is readable*/ && (u8)buffer[i + 1] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; i++; } if (!buffer[i]) num_zero++; else num_zero = 0; i++; } return emulation_bytes_count; } /*nal_size is updated to allow better error detection*/ GF_EXPORT u32 gf_media_nalu_remove_emulation_bytes(const u8 *buffer_src, u8 *buffer_dst, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: 0x00000300 0x00000301 0x00000302 0x00000303" */ if (num_zero == 2 && buffer_src[i] == 0x03 && i + 1 < nal_size /*next byte is readable*/ && (u8)buffer_src[i + 1] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; i++; } buffer_dst[i - emulation_bytes_count] = buffer_src[i]; if (!buffer_src[i]) num_zero++; else num_zero = 0; i++; } return nal_size - emulation_bytes_count; } static s32 gf_avc_read_sps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos, u32 nal_hdr) { AVC_SPS *sps; s32 mb_width, mb_height, sps_id = -1; u32 profile_idc, level_idc, pcomp, i, chroma_format_idc, cl = 0, cr = 0, ct = 0, cb = 0, luma_bd, chroma_bd; u8 separate_colour_plane_flag = 0; if (!vui_flag_pos) { gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); } if (!bs) { return -1; } if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } profile_idc = gf_bs_read_int_log(bs, 8, "profile_idc"); pcomp = gf_bs_read_int_log(bs, 8, "profile_compatibility"); /*sanity checks*/ if (pcomp & 0x3) return -1; level_idc = gf_bs_read_int_log(bs, 8, "level_idc"); /*SubsetSps is used to be sure that AVC SPS are not going to be scratched by subset SPS. According to the SVC standard, subset SPS can have the same sps_id than its base layer, but it does not refer to the same SPS. */ sps_id = gf_bs_read_ue_log(bs, "sps_id") + GF_SVC_SSPS_ID_SHIFT * subseq_sps; if ((sps_id < 0) || (sps_id >= 32)) { return -1; } luma_bd = chroma_bd = 0; sps = &avc->sps[sps_id]; chroma_format_idc = sps->ChromaArrayType = 1; sps->state |= subseq_sps ? AVC_SUBSPS_PARSED : AVC_SPS_PARSED; /*High Profile and SVC*/ switch (profile_idc) { case 100: case 110: case 122: case 244: case 44: /*sanity checks: note1 from 7.4.2.1.1 of iso/iec 14496-10-N11084*/ if (pcomp & 0xE0) return -1; case 83: case 86: case 118: case 128: chroma_format_idc = gf_bs_read_ue_log(bs, "chroma_format_idc"); sps->ChromaArrayType = chroma_format_idc; if (chroma_format_idc == 3) { separate_colour_plane_flag = gf_bs_read_int_log(bs, 1, "separate_colour_plane_flag"); /* Depending on the value of separate_colour_plane_flag, the value of the variable ChromaArrayType is assigned as follows. \96 If separate_colour_plane_flag is equal to 0, ChromaArrayType is set equal to chroma_format_idc. \96 Otherwise (separate_colour_plane_flag is equal to 1), ChromaArrayType is set equal to 0. */ if (separate_colour_plane_flag) sps->ChromaArrayType = 0; } luma_bd = gf_bs_read_ue_log(bs, "luma_bit_depth"); chroma_bd = gf_bs_read_ue_log(bs, "chroma_bit_depth"); /*qpprime_y_zero_transform_bypass_flag = */ gf_bs_read_int_log(bs, 1, "qpprime_y_zero_transform_bypass_flag"); /*seq_scaling_matrix_present_flag*/ if (gf_bs_read_int_log(bs, 1, "seq_scaling_matrix_present_flag")) { u32 k; for (k = 0; k < 8; k++) { if (gf_bs_read_int_log_idx(bs, 1, "seq_scaling_list_present_flag", k)) { u32 z, last = 8, next = 8; u32 sl = k < 6 ? 16 : 64; for (z = 0; z < sl; z++) { if (next) { s32 delta = gf_bs_read_se(bs); next = (last + delta + 256) % 256; } last = next ? next : last; } } } } break; } sps->profile_idc = profile_idc; sps->level_idc = level_idc; sps->prof_compat = pcomp; sps->log2_max_frame_num = gf_bs_read_ue_log(bs, "log2_max_frame_num") + 4; sps->poc_type = gf_bs_read_ue_log(bs, "poc_type"); sps->chroma_format = chroma_format_idc; sps->luma_bit_depth_m8 = luma_bd; sps->chroma_bit_depth_m8 = chroma_bd; if (sps->poc_type == 0) { sps->log2_max_poc_lsb = gf_bs_read_ue_log(bs, "log2_max_poc_lsb") + 4; } else if (sps->poc_type == 1) { sps->delta_pic_order_always_zero_flag = gf_bs_read_int_log(bs, 1, "delta_pic_order_always_zero_flag"); sps->offset_for_non_ref_pic = gf_bs_read_se_log(bs, "offset_for_non_ref_pic"); sps->offset_for_top_to_bottom_field = gf_bs_read_se_log(bs, "offset_for_top_to_bottom_field"); sps->poc_cycle_length = gf_bs_read_ue_log(bs, "poc_cycle_length"); if (sps->poc_cycle_length > GF_ARRAY_LENGTH(sps->offset_for_ref_frame)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] offset_for_ref_frame overflow from poc_cycle_length\n")); return -1; } for (i = 0; i < sps->poc_cycle_length; i++) sps->offset_for_ref_frame[i] = gf_bs_read_se_log_idx(bs, "offset_for_ref_frame", i); } if (sps->poc_type > 2) { return -1; } sps->max_num_ref_frames = gf_bs_read_ue_log(bs, "max_num_ref_frames"); sps->gaps_in_frame_num_value_allowed_flag = gf_bs_read_int_log(bs, 1, "gaps_in_frame_num_value_allowed_flag"); mb_width = gf_bs_read_ue_log(bs, "pic_width_in_mbs_minus1") + 1; mb_height = gf_bs_read_ue_log(bs, "pic_height_in_map_units_minus1") + 1; sps->frame_mbs_only_flag = gf_bs_read_int_log(bs, 1, "frame_mbs_only_flag"); sps->width = mb_width * 16; sps->height = (2 - sps->frame_mbs_only_flag) * mb_height * 16; if (!sps->frame_mbs_only_flag) sps->mb_adaptive_frame_field_flag = gf_bs_read_int_log(bs, 1, "mb_adaptive_frame_field_flag"); gf_bs_read_int_log(bs, 1, "direct_8x8_inference_flag"); if (gf_bs_read_int_log(bs, 1, "frame_cropping_flag")) { int CropUnitX, CropUnitY, SubWidthC = -1, SubHeightC = -1; if (chroma_format_idc == 1) { SubWidthC = 2; SubHeightC = 2; } else if (chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else if ((chroma_format_idc == 3) && (separate_colour_plane_flag == 0)) { SubWidthC = 1; SubHeightC = 1; } if (sps->ChromaArrayType == 0) { assert(SubWidthC == -1); CropUnitX = 1; CropUnitY = 2 - sps->frame_mbs_only_flag; } else { CropUnitX = SubWidthC; CropUnitY = SubHeightC * (2 - sps->frame_mbs_only_flag); } cl = gf_bs_read_ue_log(bs, "frame_crop_left_offset"); cr = gf_bs_read_ue_log(bs, "frame_crop_right_offset"); ct = gf_bs_read_ue_log(bs, "frame_crop_top_offset"); cb = gf_bs_read_ue_log(bs, "frame_crop_bottom_offset"); sps->width -= CropUnitX * (cl + cr); sps->height -= CropUnitY * (ct + cb); cl *= CropUnitX; cr *= CropUnitX; ct *= CropUnitY; cb *= CropUnitY; } sps->crop.left = cl; sps->crop.right = cr; sps->crop.top = ct; sps->crop.bottom = cb; if (vui_flag_pos) { *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); } /*vui_parameters_present_flag*/ sps->vui_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_parameters_present_flag"); if (sps->vui_parameters_present_flag) { sps->vui.aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "aspect_ratio_info_present_flag"); if (sps->vui.aspect_ratio_info_present_flag) { s32 aspect_ratio_idc = gf_bs_read_int_log(bs, 8, "aspect_ratio_idc"); if (aspect_ratio_idc == 255) { sps->vui.par_num = gf_bs_read_int_log(bs, 16, "aspect_ratio_num"); sps->vui.par_den = gf_bs_read_int_log(bs, 16, "aspect_ratio_den"); } else if (aspect_ratio_idc < GF_ARRAY_LENGTH(avc_hevc_sar) ) { sps->vui.par_num = avc_hevc_sar[aspect_ratio_idc].w; sps->vui.par_den = avc_hevc_sar[aspect_ratio_idc].h; } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] Unknown aspect_ratio_idc: your video may have a wrong aspect ratio. Contact the GPAC team!\n")); } } sps->vui.overscan_info_present_flag = gf_bs_read_int_log(bs, 1, "overscan_info_present_flag"); if (sps->vui.overscan_info_present_flag) gf_bs_read_int_log(bs, 1, "overscan_appropriate_flag"); /* default values */ sps->vui.video_format = 5; sps->vui.colour_primaries = 2; sps->vui.transfer_characteristics = 2; sps->vui.matrix_coefficients = 2; /* now read values if possible */ sps->vui.video_signal_type_present_flag = gf_bs_read_int_log(bs, 1, "video_signal_type_present_flag"); if (sps->vui.video_signal_type_present_flag) { sps->vui.video_format = gf_bs_read_int_log(bs, 3, "video_format"); sps->vui.video_full_range_flag = gf_bs_read_int_log(bs, 1, "video_full_range_flag"); sps->vui.colour_description_present_flag = gf_bs_read_int_log(bs, 1, "colour_description_present_flag"); if (sps->vui.colour_description_present_flag) { sps->vui.colour_primaries = gf_bs_read_int_log(bs, 8, "colour_primaries"); sps->vui.transfer_characteristics = gf_bs_read_int_log(bs, 8, "transfer_characteristics"); sps->vui.matrix_coefficients = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } } if (gf_bs_read_int_log(bs, 1, "chroma_location_info_present_flag")) { gf_bs_read_ue_log(bs, "chroma_sample_location_type_top_field"); gf_bs_read_ue_log(bs, "chroma_sample_location_type_bottom_field"); } sps->vui.timing_info_present_flag = gf_bs_read_int_log(bs, 1, "timing_info_present_flag"); if (sps->vui.timing_info_present_flag) { sps->vui.num_units_in_tick = gf_bs_read_int_log(bs, 32, "num_units_in_tick"); sps->vui.time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); sps->vui.fixed_frame_rate_flag = gf_bs_read_int_log(bs, 1, "fixed_frame_rate_flag"); } sps->vui.nal_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "nal_hrd_parameters_present_flag"); if (sps->vui.nal_hrd_parameters_present_flag) avc_parse_hrd_parameters(bs, &sps->vui.hrd); sps->vui.vcl_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vcl_hrd_parameters_present_flag"); if (sps->vui.vcl_hrd_parameters_present_flag) avc_parse_hrd_parameters(bs, &sps->vui.hrd); if (sps->vui.nal_hrd_parameters_present_flag || sps->vui.vcl_hrd_parameters_present_flag) sps->vui.low_delay_hrd_flag = gf_bs_read_int_log(bs, 1, "low_delay_hrd_flag"); sps->vui.pic_struct_present_flag = gf_bs_read_int_log(bs, 1, "pic_struct_present_flag"); } /*end of seq_parameter_set_data*/ if (subseq_sps) { if ((profile_idc == 83) || (profile_idc == 86)) { u8 extended_spatial_scalability_idc; /*parsing seq_parameter_set_svc_extension*/ gf_bs_read_int_log(bs, 1, "inter_layer_deblocking_filter_control_present_flag"); extended_spatial_scalability_idc = gf_bs_read_int_log(bs, 2, "extended_spatial_scalability_idc"); if (sps->ChromaArrayType == 1 || sps->ChromaArrayType == 2) { gf_bs_read_int_log(bs, 1, "chroma_phase_x_plus1_flag"); } if (sps->ChromaArrayType == 1) { gf_bs_read_int_log(bs, 2, "chroma_phase_y_plus1"); } if (extended_spatial_scalability_idc == 1) { if (sps->ChromaArrayType > 0) { gf_bs_read_int_log(bs, 1, "seq_ref_layer_chroma_phase_x_plus1_flag"); gf_bs_read_int_log(bs, 2, "seq_ref_layer_chroma_phase_y_plus1"); } gf_bs_read_se_log(bs, "seq_scaled_ref_layer_left_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_top_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_right_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_bottom_offset"); } if (gf_bs_read_int_log(bs, 1, "seq_tcoeff_level_prediction_flag")) { gf_bs_read_int_log(bs, 1, "adaptive_tcoeff_level_prediction_flag"); } gf_bs_read_int_log(bs, 1, "slice_header_restriction_flag"); if (gf_bs_read_int_log(bs, 1, "svc_vui_parameters_present")) { u32 vui_ext_num_entries_minus1 = gf_bs_read_ue_log(bs, "vui_ext_num_entries_minus1"); for (i = 0; i <= vui_ext_num_entries_minus1; i++) { u8 vui_ext_nal_hrd_parameters_present_flag, vui_ext_vcl_hrd_parameters_present_flag, vui_ext_timing_info_present_flag; gf_bs_read_int_log(bs, 3, "vui_ext_dependency_id"); gf_bs_read_int_log(bs, 4, "vui_ext_quality_id"); gf_bs_read_int_log(bs, 3, "vui_ext_temporal_id"); vui_ext_timing_info_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_timing_info_present_flag"); if (vui_ext_timing_info_present_flag) { gf_bs_read_int_log(bs, 32, "vui_ext_num_units_in_tick"); gf_bs_read_int_log(bs, 32, "vui_ext_time_scale"); gf_bs_read_int_log(bs, 1, "vui_ext_fixed_frame_rate_flag"); } vui_ext_nal_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_nal_hrd_parameters_present_flag"); if (vui_ext_nal_hrd_parameters_present_flag) { //hrd_parameters( ) } vui_ext_vcl_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_vcl_hrd_parameters_present_flag"); if (vui_ext_vcl_hrd_parameters_present_flag) { //hrd_parameters( ) } if (vui_ext_nal_hrd_parameters_present_flag || vui_ext_vcl_hrd_parameters_present_flag) { gf_bs_read_int_log(bs, 1, "vui_ext_low_delay_hrd_flag"); } gf_bs_read_int_log(bs, 1, "vui_ext_pic_struct_present_flag"); } } } else if ((profile_idc == 118) || (profile_idc == 128)) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[avc-h264] MVC parsing not implemented - skipping parsing end of Subset SPS\n")); return sps_id; } if (gf_bs_read_int_log(bs, 1, "additional_extension2")) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] skipping parsing end of Subset SPS (additional_extension2)\n")); return sps_id; } } return sps_id; } GF_EXPORT s32 gf_avc_read_sps_bs(GF_BitStream *bs, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos) { return gf_avc_read_sps_bs_internal(bs, avc, subseq_sps, vui_flag_pos, 0); } GF_EXPORT s32 gf_avc_read_sps(const u8 *sps_data, u32 sps_size, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos) { s32 sps_id = -1; GF_BitStream *bs; char *sps_data_without_emulation_bytes = NULL; u32 sps_data_without_emulation_bytes_size = 0; if (vui_flag_pos) { /*SPS still contains emulation bytes*/ sps_data_without_emulation_bytes = gf_malloc(sps_size * sizeof(char)); sps_data_without_emulation_bytes_size = gf_media_nalu_remove_emulation_bytes(sps_data, sps_data_without_emulation_bytes, sps_size); bs = gf_bs_new(sps_data_without_emulation_bytes, sps_data_without_emulation_bytes_size, GF_BITSTREAM_READ); *vui_flag_pos = 0; } else { bs = gf_bs_new(sps_data, sps_size, GF_BITSTREAM_READ); } if (!bs) { sps_id = -1; goto exit; } sps_id = gf_avc_read_sps_bs(bs, avc, subseq_sps, vui_flag_pos); exit: gf_bs_del(bs); if (sps_data_without_emulation_bytes) gf_free(sps_data_without_emulation_bytes); return sps_id; } static s32 gf_avc_read_pps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 nal_hdr) { s32 pps_id; AVC_PPS *pps; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 255)) { return -1; } pps = &avc->pps[pps_id]; pps->id = pps_id; if (!pps->status) pps->status = 1; pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((pps->sps_id<0) || (pps->sps_id >= 32)) { pps->sps_id = 0; return -1; } /*sps_id may be refer to regular SPS or subseq sps, depending on the coded slice referring to the pps*/ if (!avc->sps[pps->sps_id].state && !avc->sps[pps->sps_id + GF_SVC_SSPS_ID_SHIFT].state) { return -1; } avc->pps_active_idx = pps->id; /*set active sps*/ avc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->entropy_coding_mode_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_mode_flag"); pps->pic_order_present = gf_bs_read_int_log(bs, 1, "pic_order_present"); pps->slice_group_count = gf_bs_read_ue_log(bs, "slice_group_count_minus1") + 1; if (pps->slice_group_count > 1) { u32 iGroup; pps->mb_slice_group_map_type = gf_bs_read_ue_log(bs, "mb_slice_group_map_type"); if (pps->mb_slice_group_map_type == 0) { for (iGroup = 0; iGroup <= pps->slice_group_count - 1; iGroup++) gf_bs_read_ue_log_idx(bs, "run_length_minus1", iGroup); } else if (pps->mb_slice_group_map_type == 2) { for (iGroup = 0; iGroup < pps->slice_group_count - 1; iGroup++) { gf_bs_read_ue_log_idx(bs, "top_left", iGroup); gf_bs_read_ue_log_idx(bs, "bottom_right", iGroup); } } else if (pps->mb_slice_group_map_type == 3 || pps->mb_slice_group_map_type == 4 || pps->mb_slice_group_map_type == 5) { gf_bs_read_int_log(bs, 1, "slice_group_change_direction_flag"); gf_bs_read_ue_log(bs, "slice_group_change_rate_minus1"); } else if (pps->mb_slice_group_map_type == 6) { u32 i; pps->pic_size_in_map_units_minus1 = gf_bs_read_ue_log(bs, "pic_size_in_map_units_minus1"); for (i = 0; i <= pps->pic_size_in_map_units_minus1; i++) { gf_bs_read_int_log_idx(bs, (u32)ceil(log(pps->slice_group_count) / log(2)), "slice_group_id", i); } } } pps->num_ref_idx_l0_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active_minus1"); pps->num_ref_idx_l1_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active_minus1"); /* if ((pps->ref_count[0] > 32) || (pps->ref_count[1] > 32)) goto exit; */ pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); gf_bs_read_int_log(bs, 2, "weighted_bipred_idc"); gf_bs_read_se_log(bs, "init_qp_minus26"); gf_bs_read_se_log(bs, "init_qs_minus26"); gf_bs_read_se_log(bs, "chroma_qp_index_offset"); pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"); gf_bs_read_int_log(bs, 1, "constrained_intra_pred"); pps->redundant_pic_cnt_present = gf_bs_read_int_log(bs, 1, "redundant_pic_cnt_present"); return pps_id; } GF_EXPORT s32 gf_avc_read_pps_bs(GF_BitStream *bs, AVCState *avc) { return gf_avc_read_pps_bs_internal(bs, avc, 0); } GF_EXPORT s32 gf_avc_read_pps(const u8 *pps_data, u32 pps_size, AVCState *avc) { GF_BitStream *bs; s32 pps_id; /*PPS still contains emulation bytes*/ bs = gf_bs_new(pps_data, pps_size, GF_BITSTREAM_READ); if (!bs) { return -1; } gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); pps_id = gf_avc_read_pps_bs(bs, avc); gf_bs_del(bs); return pps_id; } #if 0 //unused s32 gf_avc_read_sps_ext(const char *spse_data, u32 spse_size) { GF_BitStream *bs; s32 sps_id; bs = gf_bs_new(spse_data, spse_size, GF_BITSTREAM_READ); sps_id = gf_avc_read_sps_ext_bs(bs); gf_bs_del(bs); return sps_id; } #endif static s32 SVC_ReadNal_header_extension(GF_BitStream *bs, SVC_NALUHeader *NalHeader) { gf_bs_read_int_log(bs, 1, "reserved_one_bit"); NalHeader->idr_pic_flag = gf_bs_read_int_log(bs, 1, "idr_flag"); NalHeader->priority_id = gf_bs_read_int_log(bs, 6, "priority_id"); gf_bs_read_int_log(bs, 1, "no_inter_layer_pred_flag"); NalHeader->dependency_id = gf_bs_read_int_log(bs, 3, "DependencyId"); NalHeader->quality_id = gf_bs_read_int_log(bs, 4, "quality_id"); NalHeader->temporal_id = gf_bs_read_int_log(bs, 3, "temporal_id"); gf_bs_read_int_log(bs, 1, "use_ref_base_pic_flag"); gf_bs_read_int_log(bs, 1, "discardable_flag"); gf_bs_read_int_log(bs, 1, "output_flag"); gf_bs_read_int_log(bs, 2, "reserved_three_2bits"); return 1; } static void ref_pic_list_modification(GF_BitStream *bs, u32 slice_type) { if (slice_type % 5 != 2 && slice_type % 5 != 4) { if (gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l0")) { u32 idx=0, modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = gf_bs_read_ue_log_idx(bs, "modification_of_pic_nums_idc", idx); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { gf_bs_read_ue_log_idx(bs, "abs_diff_pic_num_minus1", idx); } else if (modification_of_pic_nums_idc == 2) { gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); } idx++; } while ((modification_of_pic_nums_idc != 3) && gf_bs_available(bs)); } } if (slice_type % 5 == 1) { if (gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l1")) { u32 idx=0, modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = gf_bs_read_ue_log_idx(bs, "modification_of_pic_nums_idc", idx); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { gf_bs_read_ue_log_idx(bs, "abs_diff_pic_num_minus1", idx); } else if (modification_of_pic_nums_idc == 2) { gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); } idx++; } while ((modification_of_pic_nums_idc != 3) && gf_bs_available(bs)); } } } static void pred_weight_table(GF_BitStream *bs, u32 slice_type, u32 ChromaArrayType, u32 num_ref_idx_l0_active_minus1, u32 num_ref_idx_l1_active_minus1) { u32 i, j; gf_bs_read_ue_log(bs, "luma_log2_weight_denom"); if (ChromaArrayType != 0) { gf_bs_read_ue_log(bs, "chroma_log2_weight_denom"); } for (i = 0; i <= num_ref_idx_l0_active_minus1; i++) { if (gf_bs_read_int_log_idx(bs, 1, "luma_weight_l0_flag", i)) { gf_bs_read_se_log_idx(bs, "luma_weight_l0", i); gf_bs_read_se_log_idx(bs, "luma_offset_l0", i); } if (ChromaArrayType != 0) { if (gf_bs_read_int_log_idx(bs, 1, "chroma_weight_l0_flag", i)) for (j = 0; j < 2; j++) { gf_bs_read_se_log_idx2(bs, "chroma_weight_l0", i, j); gf_bs_read_se_log_idx2(bs, "chroma_offset_l0", i, j); } } } if (slice_type % 5 == 1) { for (i = 0; i <= num_ref_idx_l1_active_minus1; i++) { if (gf_bs_read_int_log_idx(bs, 1, "luma_weight_l1_flag", i)) { gf_bs_read_se_log_idx(bs, "luma_weight_l1", i); gf_bs_read_se_log_idx(bs, "luma_offset_l1", i); } if (ChromaArrayType != 0) { if (gf_bs_read_int_log_idx(bs, 1, "chroma_weight_l1_flag", i)) { for (j = 0; j < 2; j++) { gf_bs_read_se_log_idx2(bs, "chroma_weight_l1", i, j); gf_bs_read_se_log_idx2(bs, "chroma_offset_l1", i, j); } } } } } } static void dec_ref_pic_marking(GF_BitStream *bs, Bool IdrPicFlag) { if (IdrPicFlag) { gf_bs_read_int_log(bs, 1, "no_output_of_prior_pics_flag"); gf_bs_read_int_log(bs, 1, "long_term_reference_flag"); } else { if (gf_bs_read_int_log(bs, 1, "adaptive_ref_pic_marking_mode_flag")) { u32 idx=0, memory_management_control_operation; do { memory_management_control_operation = gf_bs_read_ue_log_idx(bs, "memory_management_control_operation", idx); if (memory_management_control_operation == 1 || memory_management_control_operation == 3) gf_bs_read_ue_log_idx(bs, "difference_of_pic_nums_minus1", idx); if (memory_management_control_operation == 2) gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); if (memory_management_control_operation == 3 || memory_management_control_operation == 6) gf_bs_read_ue_log_idx(bs, "long_term_frame_idx", idx); if (memory_management_control_operation == 4) gf_bs_read_ue_log_idx(bs, "max_long_term_frame_idx_plus1", idx); idx++; } while (memory_management_control_operation != 0); } } } static s32 avc_parse_slice(GF_BitStream *bs, AVCState *avc, Bool svc_idr_flag, AVCSliceInfo *si) { s32 pps_id, num_ref_idx_l0_active_minus1 = 0, num_ref_idx_l1_active_minus1 = 0; /*s->current_picture.reference= h->nal_ref_idc != 0;*/ gf_bs_read_ue_log(bs, "first_mb_in_slice"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (si->slice_type > 9) return -1; pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id > 255) return -1; si->pps = &avc->pps[pps_id]; if (!si->pps->slice_group_count) return -2; si->sps = &avc->sps[si->pps->sps_id]; if (!si->sps->log2_max_frame_num) return -2; avc->sps_active_idx = si->pps->sps_id; avc->pps_active_idx = pps_id; si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num"); si->field_pic_flag = 0; si->bottom_field_flag = 0; if (!si->sps->frame_mbs_only_flag) { si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag"); if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag"); } if ((si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) || svc_idr_flag) si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id"); if (si->sps->poc_type == 0) { si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); if (si->pps->pic_order_present && !si->field_pic_flag) { si->delta_poc_bottom = gf_bs_read_se_log(bs, "poc_lsb"); } } else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) { si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0"); if ((si->pps->pic_order_present == 1) && !si->field_pic_flag) si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1"); } if (si->pps->redundant_pic_cnt_present) { si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt"); } if (si->slice_type % 5 == GF_AVC_TYPE_B) { gf_bs_read_int_log(bs, 1, "direct_spatial_mv_pred_flag"); } num_ref_idx_l0_active_minus1 = si->pps->num_ref_idx_l0_default_active_minus1; num_ref_idx_l1_active_minus1 = si->pps->num_ref_idx_l1_default_active_minus1; if (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_B) { Bool num_ref_idx_active_override_flag = gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag"); if (num_ref_idx_active_override_flag) { num_ref_idx_l0_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_active_minus1"); if (si->slice_type % 5 == GF_AVC_TYPE_B) { num_ref_idx_l1_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_active_minus1"); } } } if (si->nal_unit_type == 20 || si->nal_unit_type == 21) { //ref_pic_list_mvc_modification(); /* specified in Annex H */ GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] unimplemented ref_pic_list_mvc_modification() in slide header\n")); assert(0); return -1; } else { ref_pic_list_modification(bs, si->slice_type); } if ((si->pps->weighted_pred_flag && (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP)) || (si->pps->weighted_bipred_idc == 1 && si->slice_type % 5 == GF_AVC_TYPE_B)) { pred_weight_table(bs, si->slice_type, si->sps->ChromaArrayType, num_ref_idx_l0_active_minus1, num_ref_idx_l1_active_minus1); } if (si->nal_ref_idc != 0) { dec_ref_pic_marking(bs, (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE)); } if (si->pps->entropy_coding_mode_flag && si->slice_type % 5 != GF_AVC_TYPE_I && si->slice_type % 5 != GF_AVC_TYPE_SI) { gf_bs_read_ue_log(bs, "cabac_init_idc"); } /*slice_qp_delta = */gf_bs_read_se(bs); if (si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_SI) { if (si->slice_type % 5 == GF_AVC_TYPE_SP) { gf_bs_read_int_log(bs, 1, "sp_for_switch_flag"); } gf_bs_read_se_log(bs, "slice_qs_delta"); } if (si->pps->deblocking_filter_control_present_flag) { if (gf_bs_read_ue_log(bs, "disable_deblocking_filter_idc") != 1) { gf_bs_read_se_log(bs, "slice_alpha_c0_offset_div2"); gf_bs_read_se_log(bs, "slice_beta_offset_div2"); } } if (si->pps->slice_group_count > 1 && si->pps->mb_slice_group_map_type >= 3 && si->pps->mb_slice_group_map_type <= 5) { gf_bs_read_int_log(bs, (u32)ceil(log1p((si->pps->pic_size_in_map_units_minus1 + 1) / (si->pps->slice_group_change_rate_minus1 + 1) ) / log(2)), "slice_group_change_cycle"); } return 0; } static s32 svc_parse_slice(GF_BitStream *bs, AVCState *avc, AVCSliceInfo *si) { s32 pps_id; /*s->current_picture.reference= h->nal_ref_idc != 0;*/ gf_bs_read_ue_log(bs, "first_mb_in_slice"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (si->slice_type > 9) return -1; pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id > 255) return -1; si->pps = &avc->pps[pps_id]; si->pps->id = pps_id; if (!si->pps->slice_group_count) return -2; si->sps = &avc->sps[si->pps->sps_id + GF_SVC_SSPS_ID_SHIFT]; if (!si->sps->log2_max_frame_num) return -2; si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num"); si->field_pic_flag = 0; if (si->sps->frame_mbs_only_flag) { /*s->picture_structure= PICT_FRAME;*/ } else { si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag"); if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag"); } if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE || si->NalHeader.idr_pic_flag) si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id"); if (si->sps->poc_type == 0) { si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); if (si->pps->pic_order_present && !si->field_pic_flag) { si->delta_poc_bottom = gf_bs_read_se_log(bs, "delta_poc_bottom"); } } else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) { si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0"); if ((si->pps->pic_order_present == 1) && !si->field_pic_flag) si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1"); } if (si->pps->redundant_pic_cnt_present) { si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt"); } return 0; } static s32 avc_parse_recovery_point_sei(GF_BitStream *bs, AVCState *avc) { AVCSeiRecoveryPoint *rp = &avc->sei.recovery_point; rp->frame_cnt = gf_bs_read_ue_log(bs, "frame_cnt"); rp->exact_match_flag = gf_bs_read_int_log(bs, 1, "exact_match_flag"); rp->broken_link_flag = gf_bs_read_int_log(bs, 1, "broken_link_flag"); rp->changing_slice_group_idc = gf_bs_read_int_log(bs, 2, "changing_slice_group_idc"); rp->valid = 1; return 0; } /*for interpretation see ISO 14496-10 N.11084, table D-1*/ static s32 avc_parse_pic_timing_sei(GF_BitStream *bs, AVCState *avc) { int sps_id = avc->sps_active_idx; const char NumClockTS[] = { 1, 1, 1, 2, 2, 3, 3, 2, 3 }; AVCSeiPicTiming *pt = &avc->sei.pic_timing; if (sps_id < 0) { /*sps_active_idx equals -1 when no sps has been detected. In this case SEI should not be decoded.*/ assert(0); return 1; } if (avc->sps[sps_id].vui.nal_hrd_parameters_present_flag || avc->sps[sps_id].vui.vcl_hrd_parameters_present_flag) { /*CpbDpbDelaysPresentFlag, see 14496-10(2003) E.11*/ gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.cpb_removal_delay_length_minus1, "cpb_removal_delay_minus1"); gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.dpb_output_delay_length_minus1, "dpb_output_delay_minus1"); } /*ISO 14496-10 (2003), D.8.2: we need to get pic_struct in order to know if we display top field first or bottom field first*/ if (avc->sps[sps_id].vui.pic_struct_present_flag) { int i; pt->pic_struct = gf_bs_read_int_log(bs, 4, "pic_struct"); if (pt->pic_struct > 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] invalid pic_struct value %d\n", pt->pic_struct)); return 1; } for (i = 0; i < NumClockTS[pt->pic_struct]; i++) { if (gf_bs_read_int_log_idx(bs, 1, "clock_timestamp_flag", i)) { Bool full_timestamp_flag; gf_bs_read_int_log_idx(bs, 2, "ct_type", i); gf_bs_read_int_log_idx(bs, 1, "nuit_field_based_flag", i); gf_bs_read_int_log_idx(bs, 5, "counting_type", i); full_timestamp_flag = gf_bs_read_int_log_idx(bs, 1, "full_timestamp_flag", i); gf_bs_read_int_log_idx(bs, 1, "discontinuity_flag", i); gf_bs_read_int_log_idx(bs, 1, "cnt_dropped_flag", i); gf_bs_read_int_log_idx(bs, 8, "n_frames", i); if (full_timestamp_flag) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } else { if (gf_bs_read_int_log_idx(bs, 1, "seconds_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); if (gf_bs_read_int_log_idx(bs, 1, "minutes_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); if (gf_bs_read_int_log_idx(bs, 1, "hours_flag", i)) { gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } } } if (avc->sps[sps_id].vui.hrd.time_offset_length > 0) gf_bs_read_int_log_idx(bs, avc->sps[sps_id].vui.hrd.time_offset_length, "time_offset", i); } } } } return 0; } #if !defined(GPAC_DISABLE_HEVC) static void avc_parse_itu_t_t35_sei(GF_BitStream* bs, AVCSeiItuTT35DolbyVision *dovi) { u8 itu_t_t35_country_code = gf_bs_read_u8(bs); u16 terminal_provider_code = gf_bs_read_u16(bs); u32 user_id = gf_bs_read_u32(bs); u8 data_type_code = gf_bs_read_u8(bs); if (itu_t_t35_country_code == 0xB5 && terminal_provider_code == 0x31 && user_id == 0x47413934 && (data_type_code == 0x8 || data_type_code == 0x9)) { dovi->rpu_flag = GF_TRUE; } } #endif static void avc_compute_poc(AVCSliceInfo *si) { enum { AVC_PIC_FRAME, AVC_PIC_FIELD_TOP, AVC_PIC_FIELD_BOTTOM, } pic_type; s32 field_poc[2] = { 0,0 }; s32 max_frame_num; if (!si->sps) return; max_frame_num = 1 << (si->sps->log2_max_frame_num); /* picture type */ if (si->sps->frame_mbs_only_flag || !si->field_pic_flag) pic_type = AVC_PIC_FRAME; else if (si->bottom_field_flag) pic_type = AVC_PIC_FIELD_BOTTOM; else pic_type = AVC_PIC_FIELD_TOP; /* frame_num_offset */ if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; si->frame_num_offset = 0; } else { if (si->frame_num < si->frame_num_prev) si->frame_num_offset = si->frame_num_offset_prev + max_frame_num; else si->frame_num_offset = si->frame_num_offset_prev; } /*ISO 14496-10 N.11084 8.2.1.1*/ if (si->sps->poc_type == 0) { const u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*ISO 14496-10 N.11084 eq (8-3)*/ if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; /*ISO 14496-10 N.11084 eq (8-4)*/ if (pic_type != AVC_PIC_FIELD_BOTTOM) field_poc[0] = si->poc_msb + si->poc_lsb; /*ISO 14496-10 N.11084 eq (8-5)*/ if (pic_type != AVC_PIC_FIELD_TOP) { if (!si->field_pic_flag) field_poc[1] = field_poc[0] + si->delta_poc_bottom; else field_poc[1] = si->poc_msb + si->poc_lsb; } } /*ISO 14496-10 N.11084 8.2.1.2*/ else if (si->sps->poc_type == 1) { u32 i; s32 abs_frame_num, expected_delta_per_poc_cycle, expected_poc; if (si->sps->poc_cycle_length) abs_frame_num = si->frame_num_offset + si->frame_num; else abs_frame_num = 0; if (!si->nal_ref_idc && (abs_frame_num > 0)) abs_frame_num--; expected_delta_per_poc_cycle = 0; for (i = 0; i < si->sps->poc_cycle_length; i++) expected_delta_per_poc_cycle += si->sps->offset_for_ref_frame[i]; if (abs_frame_num > 0) { const u32 poc_cycle_cnt = (abs_frame_num - 1) / si->sps->poc_cycle_length; const u32 frame_num_in_poc_cycle = (abs_frame_num - 1) % si->sps->poc_cycle_length; expected_poc = poc_cycle_cnt * expected_delta_per_poc_cycle; for (i = 0; i <= frame_num_in_poc_cycle; i++) expected_poc += si->sps->offset_for_ref_frame[i]; } else { expected_poc = 0; } if (!si->nal_ref_idc) expected_poc += si->sps->offset_for_non_ref_pic; field_poc[0] = expected_poc + si->delta_poc[0]; field_poc[1] = field_poc[0] + si->sps->offset_for_top_to_bottom_field; if (pic_type == AVC_PIC_FRAME) field_poc[1] += si->delta_poc[1]; } /*ISO 14496-10 N.11084 8.2.1.3*/ else if (si->sps->poc_type == 2) { int poc; if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) { poc = 0; } else { const int abs_frame_num = si->frame_num_offset + si->frame_num; poc = 2 * abs_frame_num; if (!si->nal_ref_idc) poc -= 1; } field_poc[0] = poc; field_poc[1] = poc; } /*ISO 14496-10 N.11084 eq (8-1)*/ if (pic_type == AVC_PIC_FRAME) si->poc = MIN(field_poc[0], field_poc[1]); else if (pic_type == AVC_PIC_FIELD_TOP) si->poc = field_poc[0]; else si->poc = field_poc[1]; } GF_EXPORT s32 gf_avc_parse_nalu(GF_BitStream *bs, AVCState *avc) { u8 idr_flag; s32 slice, ret; u32 nal_hdr; AVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nal_hdr = gf_bs_read_u8(bs); slice = 0; memcpy(&n_state, &avc->s_info, sizeof(AVCSliceInfo)); avc->last_nal_type_parsed = n_state.nal_unit_type = nal_hdr & 0x1F; n_state.nal_ref_idc = (nal_hdr >> 5) & 0x3; idr_flag = 0; switch (n_state.nal_unit_type) { case GF_AVC_NALU_ACCESS_UNIT: case GF_AVC_NALU_END_OF_SEQ: case GF_AVC_NALU_END_OF_STREAM: ret = 1; break; case GF_AVC_NALU_SVC_SLICE: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); // slice buffer - read the info and compare. /*ret = */svc_parse_slice(bs, avc, &n_state); if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } avc_compute_poc(&n_state); if (avc->s_info.poc != n_state.poc) { memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 1; } memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 0; case GF_AVC_NALU_SVC_PREFIX_NALU: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); return 0; case GF_AVC_NALU_IDR_SLICE: case GF_AVC_NALU_NON_IDR_SLICE: case GF_AVC_NALU_DP_A_SLICE: case GF_AVC_NALU_DP_B_SLICE: case GF_AVC_NALU_DP_C_SLICE: slice = 1; /* slice buffer - read the info and compare.*/ ret = avc_parse_slice(bs, avc, idr_flag, &n_state); if (ret < 0) return ret; ret = 0; if ( ((avc->s_info.nal_unit_type > GF_AVC_NALU_IDR_SLICE) || (avc->s_info.nal_unit_type < GF_AVC_NALU_NON_IDR_SLICE)) && (avc->s_info.nal_unit_type != GF_AVC_NALU_SVC_SLICE) ) { break; } if (avc->s_info.frame_num != n_state.frame_num) { ret = 1; break; } if (avc->s_info.field_pic_flag != n_state.field_pic_flag) { ret = 1; break; } if ((avc->s_info.nal_ref_idc != n_state.nal_ref_idc) && (!avc->s_info.nal_ref_idc || !n_state.nal_ref_idc)) { ret = 1; break; } assert(avc->s_info.sps); if (avc->s_info.sps->poc_type == n_state.sps->poc_type) { if (!avc->s_info.sps->poc_type) { if (!n_state.bottom_field_flag && (avc->s_info.poc_lsb != n_state.poc_lsb)) { ret = 1; break; } if (avc->s_info.delta_poc_bottom != n_state.delta_poc_bottom) { ret = 1; break; } } else if (avc->s_info.sps->poc_type == 1) { if (avc->s_info.delta_poc[0] != n_state.delta_poc[0]) { ret = 1; break; } if (avc->s_info.delta_poc[1] != n_state.delta_poc[1]) { ret = 1; break; } } } if (n_state.nal_unit_type == GF_AVC_NALU_IDR_SLICE) { if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) { /*IdrPicFlag differs in value*/ ret = 1; break; } else if (avc->s_info.idr_pic_id != n_state.idr_pic_id) { /*both IDR and idr_pic_id differs*/ ret = 1; break; } } break; case GF_AVC_NALU_SEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 0, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_PIC_PARAM: avc->last_ps_idx = gf_avc_read_pps_bs_internal(bs, avc, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SVC_SUBSEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 1, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEQ_PARAM_EXT: avc->last_ps_idx = (s32) gf_bs_read_ue(bs); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEI: case GF_AVC_NALU_FILLER_DATA: return 0; default: if (avc->s_info.nal_unit_type <= GF_AVC_NALU_IDR_SLICE) ret = 1; //To detect change of AU when multiple sps and pps in stream else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEI && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEQ_PARAM && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else ret = 0; break; } /* save _prev values */ if (ret && avc->s_info.sps) { n_state.frame_num_offset_prev = avc->s_info.frame_num_offset; if ((avc->s_info.sps->poc_type != 2) || (avc->s_info.nal_ref_idc != 0)) n_state.frame_num_prev = avc->s_info.frame_num; if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } } if (slice) avc_compute_poc(&n_state); memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return ret; } u32 gf_media_avc_reformat_sei(u8 *buffer, u32 nal_size, Bool isobmf_rewrite, AVCState *avc) { u32 ptype, psize, hdr, var; u32 start; GF_BitStream *bs; GF_BitStream *bs_dest = NULL; u8 nhdr; Bool sei_removed = GF_FALSE; char store; hdr = buffer[0]; if ((hdr & 0x1F) != GF_AVC_NALU_SEI) return 0; if (isobmf_rewrite) bs_dest = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nhdr = gf_bs_read_int(bs, 8); if (bs_dest) gf_bs_write_int(bs_dest, nhdr, 8); /*parse SEI*/ while (gf_bs_available(bs)) { Bool do_copy; ptype = 0; while (1) { u8 v = gf_bs_read_int(bs, 8); ptype += v; if (v != 0xFF) break; } psize = 0; while (1) { u8 v = gf_bs_read_int(bs, 8); psize += v; if (v != 0xFF) break; } start = (u32)gf_bs_get_position(bs); do_copy = 1; if (start + psize >= nal_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message type %d size error (%d but %d remain), keeping full SEI untouched\n", ptype, psize, nal_size - start)); if (bs_dest) gf_bs_del(bs_dest); return nal_size; } switch (ptype) { /*remove SEI messages forbidden in MP4*/ case 3: /*filler data*/ case 10: /*sub_seq info*/ case 11: /*sub_seq_layer char*/ case 12: /*sub_seq char*/ do_copy = 0; sei_removed = GF_TRUE; break; case 5: /*user unregistered */ store = buffer[start + psize]; buffer[start + psize] = 0; GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[avc-h264] SEI user message %s\n", buffer + start + 16)); buffer[start + psize] = store; break; case 6: /*recovery point*/ avc_parse_recovery_point_sei(bs, avc); break; case 1: /*pic_timing*/ avc_parse_pic_timing_sei(bs, avc); break; case 0: /*buffering period*/ case 2: /*pan scan rect*/ case 4: /*user registered ITU t35*/ case 7: /*def_rec_pic_marking_repetition*/ case 8: /*spare_pic*/ case 9: /*scene info*/ case 13: /*full frame freeze*/ case 14: /*full frame freeze release*/ case 15: /*full frame snapshot*/ case 16: /*progressive refinement segment start*/ case 17: /*progressive refinement segment end*/ case 18: /*motion constrained slice group*/ default: /*add all unknown SEIs*/ break; } if (do_copy && bs_dest) { var = ptype; while (var >= 255) { gf_bs_write_int(bs_dest, 0xFF, 8); var -= 255; } gf_bs_write_int(bs_dest, var, 8); var = psize; while (var >= 255) { gf_bs_write_int(bs_dest, 0xFF, 8); var -= 255; } gf_bs_write_int(bs_dest, var, 8); gf_bs_seek(bs, start); //bs_read_data does not skip EPB, read byte per byte var = psize; while (var) { gf_bs_write_u8(bs_dest, gf_bs_read_u8(bs)); var--; } } else { gf_bs_seek(bs, start); //bs_skip_bytes does not skip EPB, skip byte per byte while (psize) { gf_bs_read_u8(bs); psize--; } } if (gf_bs_available(bs) <= 2) { var = gf_bs_read_int(bs, 8); if (var != 0x80) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message has less than 2 bytes remaining but no end of sei found\n")); } if (bs_dest) gf_bs_write_int(bs_dest, 0x80, 8); break; } } gf_bs_del(bs); //we cannot compare final size and original size since original may have EPB and final does not yet have them if (bs_dest && sei_removed) { u8 *dst_no_epb = NULL; u32 dst_no_epb_size = 0; gf_bs_get_content(bs_dest, &dst_no_epb, &dst_no_epb_size); nal_size = gf_media_nalu_add_emulation_bytes(buffer, dst_no_epb, dst_no_epb_size); } if (bs_dest) gf_bs_del(bs_dest); return nal_size; } static u8 avc_hevc_get_sar_idx(u32 w, u32 h) { u32 i, count = GF_ARRAY_LENGTH(avc_hevc_sar); for (i = 0; i < count; i++) { if ((avc_hevc_sar[i].w == w) && (avc_hevc_sar[i].h == h)) return i; } return 0xFF; } static void avc_hevc_rewrite_vui(GF_VUIInfo *vui_info, GF_BitStream *orig, GF_BitStream *mod) { /* VUI present flag*/ Bool vui_present_flag = gf_bs_read_int(orig, 1); /*setup default values*/ Bool aspect_ratio_info_present_flag = 0; s32 aspect_ratio_idc = -1; u32 ar_n=0, ar_d=0; Bool overscan_info_present_flag = 0; u32 overscan_info=0; u32 video_signal_type_present_flag=0; u32 video_format = 5; u32 video_full_range_flag = 0; u32 colour_description_present_flag = 0; u32 colour_primaries = 2; u32 transfer_characteristics = 2; u32 matrix_coefficients = 2; //if VUI is present, read all SAR and overscan values if (vui_present_flag) { /* VUI found in input bitstream */ aspect_ratio_info_present_flag = gf_bs_read_int(orig, 1); if (aspect_ratio_info_present_flag) { aspect_ratio_idc = gf_bs_read_int(orig, 8); /*aspect_ratio_idc*/ if (aspect_ratio_idc == 255) { ar_n = gf_bs_read_int(orig, 16); /*sar_width*/ ar_d = gf_bs_read_int(orig, 16); /*sar_height*/ } } /*overscan_info_present_flag */ overscan_info_present_flag = gf_bs_read_int(orig, 1); if(overscan_info_present_flag) { overscan_info = gf_bs_read_int(orig, 1); } /* read all video signal related flags first */ video_signal_type_present_flag = gf_bs_read_int(orig, 1); if(video_signal_type_present_flag) { video_format = gf_bs_read_int(orig, 3); video_full_range_flag = gf_bs_read_int(orig, 1); colour_description_present_flag = gf_bs_read_int(orig, 1); if(colour_description_present_flag) { colour_primaries = gf_bs_read_int(orig, 8); transfer_characteristics = gf_bs_read_int(orig, 8); matrix_coefficients = gf_bs_read_int(orig, 8); } } } //recompute values //no change if ((vui_info->ar_num<0) || (vui_info->ar_den<0)) { } //remove par else if ((vui_info->ar_num==0) || (vui_info->ar_den==0)) { aspect_ratio_info_present_flag = 0; } //set par else { aspect_ratio_info_present_flag = 1; ar_n = vui_info->ar_num; ar_d = vui_info->ar_den; aspect_ratio_idc = avc_hevc_get_sar_idx((u32) ar_n, (u32) ar_d); } if (vui_info->remove_video_info) { video_signal_type_present_flag = 0; } /* correct the values of each flags */ else if ((vui_info->fullrange==0) && (vui_info->video_format==5) && (vui_info->color_prim==2) && (vui_info->color_tfc==2) && (vui_info->color_matrix==2)) { video_signal_type_present_flag = 0; /* all default, nothing to write*/ } else { video_signal_type_present_flag = 1; video_format = (vui_info->video_format < 0) ? video_format : vui_info->video_format; video_full_range_flag = (vui_info->fullrange < 0) ? video_full_range_flag : vui_info->fullrange; if ((vui_info->color_prim==2) && (vui_info->color_tfc==2) && (vui_info->color_matrix==2)) { colour_description_present_flag = 0; } else { colour_description_present_flag = 1; colour_primaries = (vui_info->color_prim < 0) ? colour_primaries : vui_info->color_prim; transfer_characteristics = (vui_info->color_tfc < 0) ? transfer_characteristics : vui_info->color_tfc; matrix_coefficients = (vui_info->color_matrix < 0) ? matrix_coefficients : vui_info->color_matrix; } if ((colour_primaries==2) && (transfer_characteristics==2) && (matrix_coefficients==2)) { colour_description_present_flag = 0; if ((video_format==5) && (video_full_range_flag==0)) video_signal_type_present_flag = 0; } } //always rewrite VUI gf_bs_write_int(mod, 1, 1); gf_bs_write_int(mod, aspect_ratio_info_present_flag, 1); if (aspect_ratio_info_present_flag) { gf_bs_write_int(mod, aspect_ratio_idc, 8); if (aspect_ratio_idc == 255) { gf_bs_write_int(mod, ar_n, 16); gf_bs_write_int(mod, ar_d, 16); } if (vui_info->update) { vui_info->ar_num = ar_n; vui_info->ar_den = ar_d; } } gf_bs_write_int(mod, overscan_info_present_flag, 1); if (overscan_info_present_flag) { gf_bs_write_int(mod, overscan_info, 1); } gf_bs_write_int(mod, video_signal_type_present_flag, 1); if (video_signal_type_present_flag) { gf_bs_write_int(mod, video_format, 3); gf_bs_write_int(mod, video_full_range_flag, 1); gf_bs_write_int(mod, colour_description_present_flag, 1); if (colour_description_present_flag) { gf_bs_write_int(mod, colour_primaries, 8); gf_bs_write_int(mod, transfer_characteristics, 8); gf_bs_write_int(mod, matrix_coefficients, 8); } if (vui_info->update) { vui_info->video_format = video_format; vui_info->fullrange = video_full_range_flag; if (colour_description_present_flag) { vui_info->color_prim = colour_primaries; vui_info->color_tfc = transfer_characteristics; vui_info->color_matrix = matrix_coefficients; } } } /*no VUI in input bitstream but we just inserted one, set all remaining vui flags to 0*/ if (!vui_present_flag) { gf_bs_write_int(mod, 0, 1); /*chroma_location_info_present_flag */ gf_bs_write_int(mod, 0, 1); /*timing_info_present_flag*/ gf_bs_write_int(mod, 0, 1); /*nal_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*vcl_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*pic_struct_present*/ gf_bs_write_int(mod, 0, 1); /*bitstream_restriction*/ } /*otherwise we copy over th bits from the input bitrate*/ } GF_Err gf_avc_change_vui(GF_AVCConfig *avcc, GF_VUIInfo *vui_info) { GF_BitStream *orig, *mod; AVCState avc; u32 i, bit_offset, flag; s32 idx; GF_AVCConfigSlot *slc; orig = NULL; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; i=0; while ((slc = (GF_AVCConfigSlot *)gf_list_enum(avcc->sequenceParameterSets, &i))) { u8 *no_emulation_buf = NULL; u32 no_emulation_buf_size = 0, emulation_bytes = 0; idx = gf_avc_read_sps(slc->data, slc->size, &avc, 0, &bit_offset); if (idx<0) { if ( orig ) gf_bs_del(orig); continue; } /*SPS still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size - 1) * sizeof(char)); no_emulation_buf_size = gf_media_nalu_remove_emulation_bytes(slc->data + 1, no_emulation_buf, slc->size - 1); orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); gf_bs_read_data(orig, no_emulation_buf, no_emulation_buf_size); gf_bs_seek(orig, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset >= 8); while (bit_offset - 8/*bit_offset doesn't take care of the first byte (NALU type)*/) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } avc_hevc_rewrite_vui(vui_info, orig, mod); /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, &no_emulation_buf, &flag); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(no_emulation_buf, flag); if (flag+emulation_bytes+1>slc->size) slc->data = (char*)gf_realloc(slc->data, flag+emulation_bytes+1); slc->size = gf_media_nalu_add_emulation_bytes(no_emulation_buf, slc->data + 1, flag) + 1; gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; } GF_EXPORT GF_Err gf_media_avc_change_par(GF_AVCConfig *avcc, s32 ar_n, s32 ar_d) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = ar_n; vuii.ar_den = ar_d; vuii.fullrange = -1; vuii.video_format = -1; vuii.color_prim = -1; vuii.color_tfc = -1; vuii.color_matrix = -1; return gf_avc_change_vui(avcc, &vuii); } GF_EXPORT GF_Err gf_media_avc_change_color(GF_AVCConfig *avcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = -1; vuii.ar_den = -1; vuii.fullrange = fullrange; vuii.video_format = vidformat; vuii.color_prim = colorprim; vuii.color_tfc = transfer; vuii.color_matrix = colmatrix; return gf_avc_change_vui(avcc, &vuii); } GF_EXPORT GF_Err gf_avc_get_sps_info(u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { AVCState avc; s32 idx; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; idx = gf_avc_read_sps(sps_data, sps_size, &avc, 0, NULL); if (idx < 0) { return GF_NON_COMPLIANT_BITSTREAM; } if (sps_id) *sps_id = idx; if (width) *width = avc.sps[idx].width; if (height) *height = avc.sps[idx].height; if (par_n) *par_n = avc.sps[idx].vui.par_num ? avc.sps[idx].vui.par_num : (u32)-1; if (par_d) *par_d = avc.sps[idx].vui.par_den ? avc.sps[idx].vui.par_den : (u32)-1; return GF_OK; } GF_EXPORT GF_Err gf_avc_get_pps_info(u8 *pps_data, u32 pps_size, u32 *pps_id, u32 *sps_id) { GF_BitStream *bs; GF_Err e = GF_OK; bs = gf_bs_new(pps_data, pps_size, GF_BITSTREAM_READ); if (!bs) { e = GF_NON_COMPLIANT_BITSTREAM; goto exit; } gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); /*nal hdr*/ gf_bs_read_int(bs, 8); *pps_id = gf_bs_read_ue(bs); *sps_id = gf_bs_read_ue(bs); exit: gf_bs_del(bs); return e; } #ifndef GPAC_DISABLE_HEVC /********** HEVC parsing **********/ Bool gf_hevc_slice_is_intra(HEVCState *hevc) { switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: return GF_TRUE; default: return GF_FALSE; } } Bool gf_hevc_slice_is_IDR(HEVCState *hevc) { if (hevc->sei.recovery_point.valid) { hevc->sei.recovery_point.valid = 0; return GF_TRUE; } switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: return GF_TRUE; default: return GF_FALSE; } } static Bool hevc_parse_short_term_ref_pic_set(GF_BitStream *bs, HEVC_SPS *sps, u32 idx_rps) { u32 i; Bool inter_ref_pic_set_prediction_flag = 0; if (idx_rps != 0) inter_ref_pic_set_prediction_flag = gf_bs_read_int_log_idx(bs, 1, "inter_ref_pic_set_prediction_flag", idx_rps); if (inter_ref_pic_set_prediction_flag) { HEVC_ReferencePictureSets *ref_ps, *rps; u32 delta_idx_minus1 = 0; u32 ref_idx; u32 delta_rps_sign; u32 abs_delta_rps_minus1, nb_ref_pics; s32 deltaRPS; u32 k = 0, k0 = 0, k1 = 0; if (idx_rps == sps->num_short_term_ref_pic_sets) delta_idx_minus1 = gf_bs_read_ue_log_idx(bs, "delta_idx_minus1", idx_rps); assert(delta_idx_minus1 <= idx_rps - 1); ref_idx = idx_rps - 1 - delta_idx_minus1; delta_rps_sign = gf_bs_read_int_log_idx(bs, 1, "delta_rps_sign", idx_rps); abs_delta_rps_minus1 = gf_bs_read_ue_log_idx(bs, "abs_delta_rps_minus1", idx_rps); deltaRPS = (1 - (delta_rps_sign << 1)) * (abs_delta_rps_minus1 + 1); rps = &sps->rps[idx_rps]; ref_ps = &sps->rps[ref_idx]; nb_ref_pics = ref_ps->num_negative_pics + ref_ps->num_positive_pics; for (i = 0; i <= nb_ref_pics; i++) { s32 ref_idc; s32 used_by_curr_pic_flag = gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_flag", idx_rps, i); ref_idc = used_by_curr_pic_flag ? 1 : 0; if (!used_by_curr_pic_flag) { used_by_curr_pic_flag = gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_flag", idx_rps, i); ref_idc = used_by_curr_pic_flag << 1; } if ((ref_idc == 1) || (ref_idc == 2)) { s32 deltaPOC = deltaRPS; if (i < nb_ref_pics) deltaPOC += ref_ps->delta_poc[i]; rps->delta_poc[k] = deltaPOC; if (deltaPOC < 0) k0++; else k1++; k++; } } rps->num_negative_pics = k0; rps->num_positive_pics = k1; } else { s32 prev = 0, poc; sps->rps[idx_rps].num_negative_pics = gf_bs_read_ue_log_idx(bs, "num_negative_pics", idx_rps); sps->rps[idx_rps].num_positive_pics = gf_bs_read_ue_log_idx(bs, "num_positive_pics", idx_rps); if (sps->rps[idx_rps].num_negative_pics > 16) return GF_FALSE; if (sps->rps[idx_rps].num_positive_pics > 16) return GF_FALSE; for (i = 0; i < sps->rps[idx_rps].num_negative_pics; i++) { u32 delta_poc_s0_minus1 = gf_bs_read_ue_log_idx2(bs, "delta_poc_s0_minus1", idx_rps, i); poc = prev - delta_poc_s0_minus1 - 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; gf_bs_read_int_log_idx2(bs, 1, "delta_poc_s0_minus1", idx_rps, i); } for (i = 0; i < sps->rps[idx_rps].num_positive_pics; i++) { u32 delta_poc_s1_minus1 = gf_bs_read_ue_log_idx2(bs, "delta_poc_s1_minus1" , idx_rps, i); poc = prev + delta_poc_s1_minus1 + 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_s1_flag", idx_rps, i); } } return GF_TRUE; } void hevc_pred_weight_table(GF_BitStream *bs, HEVCState *hevc, HEVCSliceInfo *si, HEVC_PPS *pps, HEVC_SPS *sps, u32 num_ref_idx_l0_active, u32 num_ref_idx_l1_active) { u32 i, num_ref_idx; Bool first_pass = GF_TRUE; u8 luma_weights[20], chroma_weights[20]; u32 ChromaArrayType = sps->separate_colour_plane_flag ? 0 : sps->chroma_format_idc; num_ref_idx = num_ref_idx_l0_active; gf_bs_read_ue_log(bs, "luma_log2_weight_denom"); if (ChromaArrayType != 0) gf_bs_read_se_log(bs, "delta_chroma_log2_weight_denom"); parse_weights: for (i = 0; i < num_ref_idx; i++) { luma_weights[i] = gf_bs_read_int_log_idx(bs, 1, "luma_weights", i); //infered to be 0 if not present chroma_weights[i] = 0; } if (ChromaArrayType != 0) { for (i = 0; i < num_ref_idx; i++) { chroma_weights[i] = gf_bs_read_int_log_idx(bs, 1, "chroma_weights", i); } } for (i = 0; i < num_ref_idx; i++) { if (luma_weights[i]) { gf_bs_read_se_log_idx(bs, "delta_luma_weight_l0", i); gf_bs_read_se_log_idx(bs, "luma_offset_l0", i); } if (chroma_weights[i]) { gf_bs_read_se_log_idx(bs, "delta_chroma_weight_l0_0", i); gf_bs_read_se_log_idx(bs, "delta_chroma_offset_l0_0", i); gf_bs_read_se_log_idx(bs, "delta_chroma_weight_l0_1", i); gf_bs_read_se_log_idx(bs, "delta_chroma_offset_l0_1", i); } } if (si->slice_type == GF_HEVC_SLICE_TYPE_B) { if (!first_pass) return; first_pass = GF_FALSE; num_ref_idx = num_ref_idx_l1_active; goto parse_weights; } } static Bool ref_pic_lists_modification(GF_BitStream *bs, u32 slice_type, u32 num_ref_idx_l0_active, u32 num_ref_idx_l1_active) { //u32 i; Bool ref_pic_list_modification_flag_l0 = gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l0"); if (ref_pic_list_modification_flag_l0) { /*for (i=0; i<num_ref_idx_l0_active; i++) { list_entry_l0[i] = *//*gf_bs_read_int(bs, (u32)ceil(log(getNumPicTotalCurr())/log(2))); }*/ return GF_FALSE; } if (slice_type == GF_HEVC_SLICE_TYPE_B) { Bool ref_pic_list_modification_flag_l1 = gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l1"); if (ref_pic_list_modification_flag_l1) { /*for (i=0; i<num_ref_idx_l1_active; i++) { list_entry_l1[i] = *//*gf_bs_read_int(bs, (u32)ceil(log(getNumPicTotalCurr()) / log(2))); }*/ return GF_FALSE; } } return GF_TRUE; } static s32 hevc_parse_slice_segment(GF_BitStream *bs, HEVCState *hevc, HEVCSliceInfo *si) { u32 i, j; u32 num_ref_idx_l0_active = 0, num_ref_idx_l1_active = 0; HEVC_PPS *pps; HEVC_SPS *sps; s32 pps_id; Bool RapPicFlag = GF_FALSE; Bool IDRPicFlag = GF_FALSE; si->first_slice_segment_in_pic_flag = gf_bs_read_int_log(bs, 1, "first_slice_segment_in_pic_flag"); switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: IDRPicFlag = GF_TRUE; RapPicFlag = GF_TRUE; break; case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_CRA: RapPicFlag = GF_TRUE; break; } if (RapPicFlag) { gf_bs_read_int_log(bs, 1, "no_output_of_prior_pics_flag"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 64)) return -1; pps = &hevc->pps[pps_id]; sps = &hevc->sps[pps->sps_id]; si->sps = sps; si->pps = pps; if (!si->first_slice_segment_in_pic_flag && pps->dependent_slice_segments_enabled_flag) { si->dependent_slice_segment_flag = gf_bs_read_int_log(bs, 1, "dependent_slice_segment_flag"); } else { si->dependent_slice_segment_flag = GF_FALSE; } if (!si->first_slice_segment_in_pic_flag) { si->slice_segment_address = gf_bs_read_int_log(bs, sps->bitsSliceSegmentAddress, "slice_segment_address"); } else { si->slice_segment_address = 0; } if (!si->dependent_slice_segment_flag) { Bool deblocking_filter_override_flag = 0; Bool slice_temporal_mvp_enabled_flag = 0; Bool slice_sao_luma_flag = 0; Bool slice_sao_chroma_flag = 0; Bool slice_deblocking_filter_disabled_flag = 0; //"slice_reserved_undetermined_flag[]" gf_bs_read_int_log(bs, pps->num_extra_slice_header_bits, "slice_reserved_undetermined_flag"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (pps->output_flag_present_flag) gf_bs_read_int_log(bs, 1, "pic_output_flag"); if (sps->separate_colour_plane_flag == 1) gf_bs_read_int_log(bs, 2, "colour_plane_id"); if (IDRPicFlag) { si->poc_lsb = 0; //if not asked to parse full header, abort since we know the poc if (!hevc->full_slice_header_parse) return 0; } else { si->poc_lsb = gf_bs_read_int_log(bs, sps->log2_max_pic_order_cnt_lsb, "poc_lsb"); //if not asked to parse full header, abort once we have the poc if (!hevc->full_slice_header_parse) return 0; if (gf_bs_read_int_log(bs, 1, "short_term_ref_pic_set_sps_flag") == 0) { Bool ret = hevc_parse_short_term_ref_pic_set(bs, sps, sps->num_short_term_ref_pic_sets); if (!ret) return -1; } else if (sps->num_short_term_ref_pic_sets > 1) { u32 numbits = 0; while ((u32)(1 << numbits) < sps->num_short_term_ref_pic_sets) numbits++; if (numbits > 0) gf_bs_read_int_log(bs, numbits, "short_term_ref_pic_set_idx"); /*else short_term_ref_pic_set_idx = 0;*/ } if (sps->long_term_ref_pics_present_flag) { u8 DeltaPocMsbCycleLt[32]; u32 num_long_term_sps = 0; u32 num_long_term_pics = 0; memset(DeltaPocMsbCycleLt, 0, sizeof(u8) * 32); if (sps->num_long_term_ref_pic_sps > 0) { num_long_term_sps = gf_bs_read_ue_log(bs, "num_long_term_sps"); } num_long_term_pics = gf_bs_read_ue_log(bs, "num_long_term_pics"); for (i = 0; i < num_long_term_sps + num_long_term_pics; i++) { if (i < num_long_term_sps) { if (sps->num_long_term_ref_pic_sps > 1) gf_bs_read_int_log_idx(bs, gf_get_bit_size(sps->num_long_term_ref_pic_sps), "lt_idx_sps", i); } else { gf_bs_read_int_log_idx(bs, sps->log2_max_pic_order_cnt_lsb, "PocLsbLt", i); gf_bs_read_int_log_idx(bs, 1, "UsedByCurrPicLt", i); } if (gf_bs_read_int_log_idx(bs, 1, "delta_poc_msb_present_flag", i)) { if (i == 0 || i == num_long_term_sps) DeltaPocMsbCycleLt[i] = gf_bs_read_ue_log_idx(bs, "DeltaPocMsbCycleLt", i); else DeltaPocMsbCycleLt[i] = gf_bs_read_ue_log_idx(bs, "DeltaPocMsbCycleLt", i) + DeltaPocMsbCycleLt[i - 1]; } } } if (sps->temporal_mvp_enable_flag) slice_temporal_mvp_enabled_flag = gf_bs_read_int_log(bs, 1, "slice_temporal_mvp_enabled_flag"); } if (sps->sample_adaptive_offset_enabled_flag) { u32 ChromaArrayType = sps->separate_colour_plane_flag ? 0 : sps->chroma_format_idc; slice_sao_luma_flag = gf_bs_read_int_log(bs, 1, "slice_sao_luma_flag"); if (ChromaArrayType != 0) slice_sao_chroma_flag = gf_bs_read_int_log(bs, 1, "slice_sao_chroma_flag"); } if (si->slice_type == GF_HEVC_SLICE_TYPE_P || si->slice_type == GF_HEVC_SLICE_TYPE_B) { //u32 NumPocTotalCurr; num_ref_idx_l0_active = pps->num_ref_idx_l0_default_active; num_ref_idx_l1_active = 0; if (si->slice_type == GF_HEVC_SLICE_TYPE_B) num_ref_idx_l1_active = pps->num_ref_idx_l1_default_active; if (gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag")) { num_ref_idx_l0_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l0_active"); if (si->slice_type == GF_HEVC_SLICE_TYPE_B) num_ref_idx_l1_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l1_active"); } if (pps->lists_modification_present_flag /*TODO: && NumPicTotalCurr > 1*/) { if (!ref_pic_lists_modification(bs, si->slice_type, num_ref_idx_l0_active, num_ref_idx_l1_active)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[hevc] ref_pic_lists_modification( ) not implemented\n")); return -1; } } if (si->slice_type == GF_HEVC_SLICE_TYPE_B) gf_bs_read_int_log(bs, 1, "mvd_l1_zero_flag"); if (pps->cabac_init_present_flag) gf_bs_read_int_log(bs, 1, "cabac_init_flag"); if (slice_temporal_mvp_enabled_flag) { // When collocated_from_l0_flag is not present, it is inferred to be equal to 1. Bool collocated_from_l0_flag = 1; if (si->slice_type == GF_HEVC_SLICE_TYPE_B) collocated_from_l0_flag = gf_bs_read_int_log(bs, 1, "collocated_from_l0_flag"); if ((collocated_from_l0_flag && (num_ref_idx_l0_active > 1)) || (!collocated_from_l0_flag && (num_ref_idx_l1_active > 1)) ) { gf_bs_read_ue_log(bs, "collocated_ref_idx"); } } if ((pps->weighted_pred_flag && si->slice_type == GF_HEVC_SLICE_TYPE_P) || (pps->weighted_bipred_flag && si->slice_type == GF_HEVC_SLICE_TYPE_B) ) { hevc_pred_weight_table(bs, hevc, si, pps, sps, num_ref_idx_l0_active, num_ref_idx_l1_active); } gf_bs_read_ue_log(bs, "five_minus_max_num_merge_cand"); } si->slice_qp_delta_start_bits = (s32) (gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); si->slice_qp_delta = gf_bs_read_se_log(bs, "slice_qp_delta"); if (pps->slice_chroma_qp_offsets_present_flag) { gf_bs_read_se_log(bs, "slice_cb_qp_offset"); gf_bs_read_se_log(bs, "slice_cr_qp_offset"); } if (pps->deblocking_filter_override_enabled_flag) { deblocking_filter_override_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_override_flag"); } if (deblocking_filter_override_flag) { slice_deblocking_filter_disabled_flag = gf_bs_read_int_log(bs, 1, "slice_deblocking_filter_disabled_flag"); if (!slice_deblocking_filter_disabled_flag) { gf_bs_read_se_log(bs, "slice_beta_offset_div2"); gf_bs_read_se_log(bs, "slice_tc_offset_div2"); } } if (pps->loop_filter_across_slices_enabled_flag && (slice_sao_luma_flag || slice_sao_chroma_flag || !slice_deblocking_filter_disabled_flag) ) { gf_bs_read_int_log(bs, 1, "slice_loop_filter_across_slices_enabled_flag"); } } //dependent slice segment else { //if not asked to parse full header, abort if (!hevc->full_slice_header_parse) return 0; } si->entry_point_start_bits = ((u32)gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); if (pps->tiles_enabled_flag || pps->entropy_coding_sync_enabled_flag) { u32 num_entry_point_offsets = gf_bs_read_ue_log(bs, "num_entry_point_offsets"); if (num_entry_point_offsets > 0) { u32 offset = gf_bs_read_ue_log(bs, "offset") + 1; u32 segments = offset >> 4; s32 remain = (offset & 15); for (i = 0; i < num_entry_point_offsets; i++) { //u32 res = 0; for (j = 0; j < segments; j++) { //res <<= 16; /*res +=*/ gf_bs_read_int(bs, 16); } if (remain) { //res <<= remain; /* res += */ gf_bs_read_int(bs, remain); } // entry_point_offset = val + 1; // +1; // +1 to get the size } } } if (pps->slice_segment_header_extension_present_flag) { u32 size_ext = gf_bs_read_ue_log(bs, "size_ext"); while (size_ext) { gf_bs_read_int(bs, 8); size_ext--; } } si->header_size_bits = (gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); // av_parser.c modified on 16 jan. 2019 if (gf_bs_read_int_log(bs, 1, "byte_align") == 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("Error parsing slice header: byte_align not found at end of header !\n")); } gf_bs_align(bs); si->payload_start_offset = (s32)gf_bs_get_position(bs); return 0; } static void gf_hevc_vvc_parse_sei(char *buffer, u32 nal_size, HEVCState *hevc, VVCState *vvc) { u32 ptype, psize, hdr; u64 start; GF_BitStream *bs; hdr = buffer[0]; if (((hdr & 0x7e) >> 1) != GF_HEVC_NALU_SEI_PREFIX) return; bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); gf_bs_read_int(bs, 16); /*parse SEI*/ while (gf_bs_available(bs)) { u32 consumed; ptype = 0; while (gf_bs_peek_bits(bs, 8, 0)==0xFF) { gf_bs_read_int(bs, 8); ptype += 255; } ptype += gf_bs_read_int(bs, 8); psize = 0; while (gf_bs_peek_bits(bs, 8, 0)==0xFF) { gf_bs_read_int(bs, 8); psize += 255; } psize += gf_bs_read_int(bs, 8); start = gf_bs_get_position(bs); if (start+psize >= nal_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] SEI user message type %d size error (%d but %d remain), skipping SEI message\n", hevc ? "HEVC" : "VVC", ptype, psize, nal_size-start)); break; } switch (ptype) { case 4: /*user registered ITU-T T35*/ if (hevc) { avc_parse_itu_t_t35_sei(bs, &hevc->sei.dovi); } break; default: break; } gf_bs_align(bs); consumed = (u32) (gf_bs_get_position(bs) - start); psize-=consumed; gf_bs_skip_bytes(bs, psize); if (gf_bs_available(bs) <= 2) break; } gf_bs_del(bs); } void gf_hevc_parse_sei(char *buffer, u32 nal_size, HEVCState *hevc) { gf_hevc_vvc_parse_sei(buffer, nal_size, hevc, NULL); } static void hevc_compute_poc(HEVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_pic_order_cnt_lsb); /*POC reset for IDR frames, NOT for CRA*/ switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: si->poc_lsb_prev = 0; si->poc_msb_prev = 0; break; } if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: si->poc_msb = 0; break; } si->poc = si->poc_msb + si->poc_lsb; } static Bool hevc_parse_nal_header(GF_BitStream *bs, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { u32 val; val = gf_bs_read_int_log(bs, 1, "forbidden_zero"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 6, "nuh_type"); if (nal_unit_type) *nal_unit_type = val; val = gf_bs_read_int_log(bs, 6, "layerID"); if (layer_id) *layer_id = val; val = gf_bs_read_int_log(bs, 3, "temporalID"); if (!val) return GF_FALSE; val -= 1; if (temporal_id) *temporal_id = val; return GF_TRUE; } void hevc_profile_tier_level(GF_BitStream *bs, Bool ProfilePresentFlag, u8 MaxNumSubLayersMinus1, HEVC_ProfileTierLevel *ptl, u32 idx) { u32 i; if (ProfilePresentFlag) { ptl->profile_space = gf_bs_read_int_log_idx(bs, 2, "profile_space", idx); ptl->tier_flag = gf_bs_read_int_log_idx(bs, 1, "tier_flag", idx); ptl->profile_idc = gf_bs_read_int_log_idx(bs, 5, "profile_idc", idx); ptl->profile_compatibility_flag = gf_bs_read_int_log_idx(bs, 32, "profile_compatibility_flag", idx); ptl->general_progressive_source_flag = gf_bs_read_int_log_idx(bs, 1, "general_progressive_source_flag", idx); ptl->general_interlaced_source_flag = gf_bs_read_int_log_idx(bs, 1, "general_interlaced_source_flag", idx); ptl->general_non_packed_constraint_flag = gf_bs_read_int_log_idx(bs, 1, "general_non_packed_constraint_flag", idx); ptl->general_frame_only_constraint_flag = gf_bs_read_int_log_idx(bs, 1, "general_frame_only_constraint_flag", idx); ptl->general_reserved_44bits = gf_bs_read_long_int(bs, 44); } ptl->level_idc = gf_bs_read_int_log(bs, 8, "level_idc"); for (i = 0; i < MaxNumSubLayersMinus1; i++) { ptl->sub_ptl[i].profile_present_flag = gf_bs_read_int_log_idx2(bs, 1, "profile_present_flag", idx, i); ptl->sub_ptl[i].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i); } if (MaxNumSubLayersMinus1 > 0) { for (i = MaxNumSubLayersMinus1; i < 8; i++) { /*reserved_zero_2bits*/gf_bs_read_int(bs, 2); } } for (i = 0; i < MaxNumSubLayersMinus1; i++) { if (ptl->sub_ptl[i].profile_present_flag) { ptl->sub_ptl[i].profile_space = gf_bs_read_int_log_idx2(bs, 2, "sublayer_profile_space", idx, i); ptl->sub_ptl[i].tier_flag = gf_bs_read_int_log_idx2(bs, 1, "sublayer_tier_flag", idx, i); ptl->sub_ptl[i].profile_idc = gf_bs_read_int_log_idx2(bs, 5, "sublayer_profile_idc", idx, i); ptl->sub_ptl[i].profile_compatibility_flag = gf_bs_read_int_log_idx2(bs, 32, "sublayer_profile_compatibility_flag", idx, i); /*ptl->sub_ptl[i].progressive_source_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_progressive_source_flag", idx, i); /*ptl->sub_ptl[i].interlaced_source_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_interlaced_source_flag", idx, i); /*ptl->sub_ptl[i].non_packed_constraint_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_non_packed_constraint_flag", idx, i); /*ptl->sub_ptl[i].frame_only_constraint_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_frame_only_constraint_flag", idx, i); /*ptl->sub_ptl[i].reserved_44bits =*/ gf_bs_read_long_int(bs, 44); } if (ptl->sub_ptl[i].level_present_flag) ptl->sub_ptl[i].level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i); } } static u32 scalability_type_to_idx(HEVC_VPS *vps, u32 scalability_type) { u32 idx = 0, type; for (type = 0; type < scalability_type; type++) { idx += (vps->scalability_mask[type] ? 1 : 0); } return idx; } #define LHVC_VIEW_ORDER_INDEX 1 #define LHVC_SCALABILITY_INDEX 2 static u32 lhvc_get_scalability_id(HEVC_VPS *vps, u32 layer_id_in_vps, u32 scalability_type) { u32 idx; if (!vps->scalability_mask[scalability_type]) return 0; idx = scalability_type_to_idx(vps, scalability_type); return vps->dimension_id[layer_id_in_vps][idx]; } static u32 lhvc_get_view_index(HEVC_VPS *vps, u32 id) { return lhvc_get_scalability_id(vps, vps->layer_id_in_vps[id], LHVC_VIEW_ORDER_INDEX); } static u32 lhvc_get_num_views(HEVC_VPS *vps) { u32 numViews = 1, i; for (i = 0; i < vps->max_layers; i++) { u32 layer_id = vps->layer_id_in_nuh[i]; if (i > 0 && (lhvc_get_view_index(vps, layer_id) != lhvc_get_scalability_id(vps, i - 1, LHVC_VIEW_ORDER_INDEX))) { numViews++; } } return numViews; } static void lhvc_parse_rep_format(HEVC_RepFormat *fmt, GF_BitStream *bs, u32 idx) { u8 chroma_bitdepth_present_flag; fmt->pic_width_luma_samples = gf_bs_read_int_log_idx(bs, 16, "pic_width_luma_samples", idx); fmt->pic_height_luma_samples = gf_bs_read_int_log_idx(bs, 16, "pic_height_luma_samples", idx); chroma_bitdepth_present_flag = gf_bs_read_int_log_idx(bs, 1, "chroma_bitdepth_present_flag", idx); if (chroma_bitdepth_present_flag) { fmt->chroma_format_idc = gf_bs_read_int_log_idx(bs, 2, "chroma_format_idc", idx); if (fmt->chroma_format_idc == 3) fmt->separate_colour_plane_flag = gf_bs_read_int_log_idx(bs, 1, "separate_colour_plane_flag", idx); fmt->bit_depth_luma = 8 + gf_bs_read_int_log_idx(bs, 4, "bit_depth_luma_minus8", idx); fmt->bit_depth_chroma = 8 + gf_bs_read_int_log_idx(bs, 4, "bit_depth_chroma_minus8", idx); } if (gf_bs_read_int_log_idx(bs, 1, "conformance_window_vps_flag", idx)) { gf_bs_read_ue_log_idx(bs, "conf_win_vps_left_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_right_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_top_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_bottom_offset", idx); } } static Bool hevc_parse_vps_extension(HEVC_VPS *vps, GF_BitStream *bs) { u8 splitting_flag, vps_nuh_layer_id_present_flag, view_id_len; u32 i, j, num_scalability_types, num_add_olss, num_add_layer_set, num_indepentdent_layers, nb_bits, default_output_layer_idc = 0; u8 dimension_id_len[16], dim_bit_offset[16]; u8 /*avc_base_layer_flag, */NumLayerSets, /*default_one_target_output_layer_flag, */rep_format_idx_present_flag, ols_ids_to_ls_idx; u8 layer_set_idx_for_ols_minus1[MAX_LHVC_LAYERS]; u8 nb_output_layers_in_output_layer_set[MAX_LHVC_LAYERS + 1]; u8 ols_highest_output_layer_id[MAX_LHVC_LAYERS + 1]; u32 k, d, r, p, iNuhLId, jNuhLId; u8 num_direct_ref_layers[64], num_pred_layers[64], num_layers_in_tree_partition[MAX_LHVC_LAYERS]; u8 dependency_flag[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS], id_pred_layers[64][MAX_LHVC_LAYERS]; // u8 num_ref_layers[64]; // u8 tree_partition_layer_id[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS]; // u8 id_ref_layers[64][MAX_LHVC_LAYERS]; // u8 id_direct_ref_layers[64][MAX_LHVC_LAYERS]; u8 layer_id_in_list_flag[64]; Bool OutputLayerFlag[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS]; vps->vps_extension_found = 1; if ((vps->max_layers > 1) && vps->base_layer_internal_flag) hevc_profile_tier_level(bs, 0, vps->max_sub_layers - 1, &vps->ext_ptl[0], 0); splitting_flag = gf_bs_read_int_log(bs, 1, "splitting_flag"); num_scalability_types = 0; for (i = 0; i < 16; i++) { vps->scalability_mask[i] = gf_bs_read_int_log_idx(bs, 1, "scalability_mask", i); num_scalability_types += vps->scalability_mask[i]; } if (num_scalability_types >= 16) { num_scalability_types = 16; } dimension_id_len[0] = 0; for (i = 0; i < (num_scalability_types - splitting_flag); i++) { dimension_id_len[i] = 1 + gf_bs_read_int_log_idx(bs, 3, "dimension_id_len_minus1", i); } if (splitting_flag) { for (i = 0; i < num_scalability_types; i++) { dim_bit_offset[i] = 0; for (j = 0; j < i; j++) dim_bit_offset[i] += dimension_id_len[j]; } dimension_id_len[num_scalability_types - 1] = 1 + (5 - dim_bit_offset[num_scalability_types - 1]); dim_bit_offset[num_scalability_types] = 6; } vps_nuh_layer_id_present_flag = gf_bs_read_int_log(bs, 1, "vps_nuh_layer_id_present_flag"); vps->layer_id_in_nuh[0] = 0; vps->layer_id_in_vps[0] = 0; for (i = 1; i < vps->max_layers; i++) { if (vps_nuh_layer_id_present_flag) { vps->layer_id_in_nuh[i] = gf_bs_read_int_log_idx(bs, 6, "layer_id_in_nuh", i); } else { vps->layer_id_in_nuh[i] = i; } vps->layer_id_in_vps[vps->layer_id_in_nuh[i]] = i; if (!splitting_flag) { for (j = 0; j < num_scalability_types; j++) { vps->dimension_id[i][j] = gf_bs_read_int_log_idx2(bs, dimension_id_len[j], "dimension_id", i, j); } } } if (splitting_flag) { for (i = 0; i < vps->max_layers; i++) for (j = 0; j < num_scalability_types; j++) vps->dimension_id[i][j] = ((vps->layer_id_in_nuh[i] & ((1 << dim_bit_offset[j + 1]) - 1)) >> dim_bit_offset[j]); } else { for (j = 0; j < num_scalability_types; j++) vps->dimension_id[0][j] = 0; } view_id_len = gf_bs_read_int_log(bs, 4, "view_id_len"); if (view_id_len > 0) { for (i = 0; i < lhvc_get_num_views(vps); i++) { gf_bs_read_int_log_idx(bs, view_id_len, "view_id_val", i); } } for (i = 1; i < vps->max_layers; i++) { for (j = 0; j < i; j++) { vps->direct_dependency_flag[i][j] = gf_bs_read_int_log_idx(bs, 1, "direct_dependency_flag", i); } } //we do the test on MAX_LHVC_LAYERS and break in the loop to avoid a wrong GCC 4.8 warning on array bounds for (i = 0; i < MAX_LHVC_LAYERS; i++) { if (i >= vps->max_layers) break; for (j = 0; j < vps->max_layers; j++) { dependency_flag[i][j] = vps->direct_dependency_flag[i][j]; for (k = 0; k < i; k++) if (vps->direct_dependency_flag[i][k] && vps->direct_dependency_flag[k][j]) dependency_flag[i][j] = 1; } } for (i = 0; i < vps->max_layers; i++) { iNuhLId = vps->layer_id_in_nuh[i]; d = r = p = 0; for (j = 0; j < vps->max_layers; j++) { jNuhLId = vps->layer_id_in_nuh[j]; if (vps->direct_dependency_flag[i][j]) { // id_direct_ref_layers[iNuhLId][d] = jNuhLId; d++; } if (dependency_flag[i][j]) { // id_ref_layers[iNuhLId][r] = jNuhLId; r++; } if (dependency_flag[j][i]) id_pred_layers[iNuhLId][p++] = jNuhLId; } num_direct_ref_layers[iNuhLId] = d; // num_ref_layers[iNuhLId] = r; num_pred_layers[iNuhLId] = p; } memset(layer_id_in_list_flag, 0, 64 * sizeof(u8)); k = 0; //num_indepentdent_layers for (i = 0; i < vps->max_layers; i++) { iNuhLId = vps->layer_id_in_nuh[i]; if (!num_direct_ref_layers[iNuhLId]) { u32 h = 1; //tree_partition_layer_id[k][0] = iNuhLId; for (j = 0; j < num_pred_layers[iNuhLId]; j++) { u32 predLId = id_pred_layers[iNuhLId][j]; if (!layer_id_in_list_flag[predLId]) { //tree_partition_layer_id[k][h++] = predLId; layer_id_in_list_flag[predLId] = 1; } } num_layers_in_tree_partition[k++] = h; } } num_indepentdent_layers = k; num_add_layer_set = 0; if (num_indepentdent_layers > 1) num_add_layer_set = gf_bs_read_ue_log(bs, "num_add_layer_set"); for (i = 0; i < num_add_layer_set; i++) for (j = 1; j < num_indepentdent_layers; j++) { nb_bits = 1; while ((1 << nb_bits) < (num_layers_in_tree_partition[j] + 1)) nb_bits++; gf_bs_read_int_log_idx2(bs, nb_bits, "highest_layer_idx_plus1", i, j); } if (gf_bs_read_int_log(bs, 1, "vps_sub_layers_max_minus1_present_flag")) { for (i = 0; i < vps->max_layers; i++) { gf_bs_read_int_log_idx(bs, 3, "sub_layers_vps_max_minus1", i); } } if (gf_bs_read_int_log(bs, 1, "max_tid_ref_present_flag")) { for (i = 0; i < (vps->max_layers - 1); i++) { for (j = i + 1; j < vps->max_layers; j++) { if (vps->direct_dependency_flag[j][i]) gf_bs_read_int_log_idx2(bs, 3, "max_tid_il_ref_pics_plus1", i, j); } } } gf_bs_read_int_log(bs, 1, "default_ref_layers_active_flag"); vps->num_profile_tier_level = 1 + gf_bs_read_ue_log(bs, "num_profile_tier_level"); if (vps->num_profile_tier_level > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of PTLs in VPS %d\n", vps->num_profile_tier_level)); vps->num_profile_tier_level = 1; return GF_FALSE; } for (i = vps->base_layer_internal_flag ? 2 : 1; i < vps->num_profile_tier_level; i++) { Bool vps_profile_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_profile_present_flag", i); hevc_profile_tier_level(bs, vps_profile_present_flag, vps->max_sub_layers - 1, &vps->ext_ptl[i - 1], i-1); } NumLayerSets = vps->num_layer_sets + num_add_layer_set; num_add_olss = 0; if (NumLayerSets > 1) { num_add_olss = gf_bs_read_ue_log(bs, "num_add_olss"); default_output_layer_idc = gf_bs_read_int_log(bs, 2, "default_output_layer_idc"); default_output_layer_idc = default_output_layer_idc < 2 ? default_output_layer_idc : 2; } vps->num_output_layer_sets = num_add_olss + NumLayerSets; layer_set_idx_for_ols_minus1[0] = 1; vps->output_layer_flag[0][0] = 1; for (i = 0; i < vps->num_output_layer_sets; i++) { if ((NumLayerSets > 2) && (i >= NumLayerSets)) { nb_bits = 1; while ((1 << nb_bits) < (NumLayerSets - 1)) nb_bits++; layer_set_idx_for_ols_minus1[i] = gf_bs_read_int_log_idx(bs, nb_bits, "layer_set_idx_for_ols_minus1", i); } else layer_set_idx_for_ols_minus1[i] = 0; ols_ids_to_ls_idx = i < NumLayerSets ? i : layer_set_idx_for_ols_minus1[i] + 1; if ((i > (vps->num_layer_sets - 1)) || (default_output_layer_idc == 2)) { for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) vps->output_layer_flag[i][j] = gf_bs_read_int_log_idx2(bs, 1, "output_layer_flag", i, j); } if ((default_output_layer_idc == 0) || (default_output_layer_idc == 1)) { for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if ((default_output_layer_idc == 0) || (vps->LayerSetLayerIdList[i][j] == vps->LayerSetLayerIdListMax[i])) OutputLayerFlag[i][j] = GF_TRUE; else OutputLayerFlag[i][j] = GF_FALSE; } } for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if (OutputLayerFlag[i][j]) { u32 curLayerID; vps->necessary_layers_flag[i][j] = GF_TRUE; curLayerID = vps->LayerSetLayerIdList[i][j]; for (k = 0; k < j; k++) { u32 refLayerId = vps->LayerSetLayerIdList[i][k]; if (dependency_flag[vps->layer_id_in_vps[curLayerID]][vps->layer_id_in_vps[refLayerId]]) vps->necessary_layers_flag[i][k] = GF_TRUE; } } } vps->num_necessary_layers[i] = 0; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if (vps->necessary_layers_flag[i][j]) vps->num_necessary_layers[i] += 1; } if (i == 0) { if (vps->base_layer_internal_flag) { if (vps->max_layers > 1) vps->profile_tier_level_idx[0][0] = 1; else vps->profile_tier_level_idx[0][0] = 0; } continue; } nb_bits = 1; while ((u32)(1 << nb_bits) < vps->num_profile_tier_level) nb_bits++; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) if (vps->necessary_layers_flag[i][j] && vps->num_profile_tier_level) vps->profile_tier_level_idx[i][j] = gf_bs_read_int_log_idx2(bs, nb_bits, "profile_tier_level_idx", i, j); else vps->profile_tier_level_idx[i][j] = 0; nb_output_layers_in_output_layer_set[i] = 0; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { nb_output_layers_in_output_layer_set[i] += OutputLayerFlag[i][j]; if (OutputLayerFlag[i][j]) { ols_highest_output_layer_id[i] = vps->LayerSetLayerIdList[ols_ids_to_ls_idx][j]; } } if (nb_output_layers_in_output_layer_set[i] == 1 && ols_highest_output_layer_id[i] > 0) vps->alt_output_layer_flag[i] = gf_bs_read_int_log_idx(bs, 1, "alt_output_layer_flag", i); } vps->num_rep_formats = 1 + gf_bs_read_ue_log(bs, "num_rep_formats_minus1"); if (vps->num_rep_formats > 16) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of rep formats in VPS %d\n", vps->num_rep_formats)); vps->num_rep_formats = 0; return GF_FALSE; } for (i = 0; i < vps->num_rep_formats; i++) { lhvc_parse_rep_format(&vps->rep_formats[i], bs, i); } if (vps->num_rep_formats > 1) rep_format_idx_present_flag = gf_bs_read_int_log(bs, 1, "rep_format_idx_present_flag"); else rep_format_idx_present_flag = 0; vps->rep_format_idx[0] = 0; nb_bits = 1; while ((u32)(1 << nb_bits) < vps->num_rep_formats) nb_bits++; for (i = vps->base_layer_internal_flag ? 1 : 0; i < vps->max_layers; i++) { if (rep_format_idx_present_flag) { vps->rep_format_idx[i] = gf_bs_read_int_log_idx(bs, nb_bits, "rep_format_idx", i); } else { vps->rep_format_idx[i] = i < vps->num_rep_formats - 1 ? i : vps->num_rep_formats - 1; } } //TODO - we don't use the rest ... return GF_TRUE; } static void sub_layer_hrd_parameters(GF_BitStream *bs, int subLayerId, u32 cpb_cnt, Bool sub_pic_hrd_params_present_flag, u32 idx1, u32 idx2) { u32 i; if (!gf_bs_available(bs)) return; for (i = 0; i <= cpb_cnt; i++) { gf_bs_read_ue_log_idx3(bs, "bit_rate_value_minus1", idx1, idx2, i); gf_bs_read_ue_log_idx3(bs, "cpb_size_value_minus1", idx1, idx2, i); if (sub_pic_hrd_params_present_flag) { gf_bs_read_ue_log_idx3(bs, "cpb_size_du_value_minus1", idx1, idx2, i); gf_bs_read_ue_log_idx3(bs, "bit_rate_du_value_minus1", idx1, idx2, i); } gf_bs_read_int_log_idx3(bs, 1, "cbr_flag", idx1, idx2, i); } } static void hevc_parse_hrd_parameters(GF_BitStream *bs, Bool commonInfPresentFlag, int maxNumSubLayersMinus1, u32 idx) { int i; Bool nal_hrd_parameters_present_flag = GF_FALSE; Bool vcl_hrd_parameters_present_flag = GF_FALSE; Bool sub_pic_hrd_params_present_flag = GF_FALSE; if (commonInfPresentFlag) { nal_hrd_parameters_present_flag = gf_bs_read_int_log_idx(bs, 1, "nal_hrd_parameters_present_flag", idx); vcl_hrd_parameters_present_flag = gf_bs_read_int_log_idx(bs, 1, "vcl_hrd_parameters_present_flag", idx); if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) { sub_pic_hrd_params_present_flag = gf_bs_read_int_log_idx(bs, 1, "sub_pic_hrd_params_present_flag", idx); if (sub_pic_hrd_params_present_flag) { gf_bs_read_int_log_idx(bs, 8, "tick_divisor_minus2", idx); gf_bs_read_int_log_idx(bs, 5, "du_cpb_removal_delay_increment_length_minus1", idx); gf_bs_read_int_log_idx(bs, 1, "sub_pic_cpb_params_in_pic_timing_sei_flag", idx); gf_bs_read_int_log_idx(bs, 5, "dpb_output_delay_du_length_minus1", idx); } gf_bs_read_int_log_idx(bs, 4, "bit_rate_scale", idx); gf_bs_read_int_log_idx(bs, 4, "cpb_size_scale", idx); if (sub_pic_hrd_params_present_flag) { gf_bs_read_int_log_idx(bs, 4, "cpb_size_du_scale", idx); } gf_bs_read_int_log_idx(bs, 5, "initial_cpb_removal_delay_length_minus1", idx); gf_bs_read_int_log_idx(bs, 5, "au_cpb_removal_delay_length_minus1", idx); gf_bs_read_int_log_idx(bs, 5, "dpb_output_delay_length_minus1", idx); } } for (i = 0; i <= maxNumSubLayersMinus1; i++) { Bool fixed_pic_rate_general_flag_i = gf_bs_read_int_log_idx(bs, 1, "fixed_pic_rate_general_flag", idx); Bool fixed_pic_rate_within_cvs_flag_i = GF_TRUE; Bool low_delay_hrd_flag_i = GF_FALSE; u32 cpb_cnt_minus1_i = 0; if (!fixed_pic_rate_general_flag_i) { fixed_pic_rate_within_cvs_flag_i = gf_bs_read_int_log_idx(bs, 1, "fixed_pic_rate_within_cvs_flag", idx); } if (fixed_pic_rate_within_cvs_flag_i) gf_bs_read_ue_log_idx(bs, "elemental_duration_in_tc_minus1", idx); else low_delay_hrd_flag_i = gf_bs_read_int_log_idx(bs, 1, "low_delay_hrd_flag", idx); if (!low_delay_hrd_flag_i) { cpb_cnt_minus1_i = gf_bs_read_ue_log_idx(bs, "cpb_cnt_minus1", idx); } if (nal_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag, idx, i); } if (vcl_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag, idx, i); } } } static s32 gf_hevc_read_vps_bs_internal(GF_BitStream *bs, HEVCState *hevc, Bool stop_at_vps_ext) { u8 vps_sub_layer_ordering_info_present_flag, vps_extension_flag; u32 i, j; s32 vps_id; HEVC_VPS *vps; u8 layer_id_included_flag[MAX_LHVC_LAYERS][64]; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) return -1; vps = &hevc->vps[vps_id]; vps->bit_pos_vps_extensions = -1; if (!vps->state) { vps->id = vps_id; vps->state = 1; } vps->base_layer_internal_flag = gf_bs_read_int_log(bs, 1, "base_layer_internal_flag"); vps->base_layer_available_flag = gf_bs_read_int_log(bs, 1, "base_layer_available_flag"); vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers_minus1"); if (vps->max_layers > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS)); return -1; } vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1; vps->temporal_id_nesting = gf_bs_read_int_log(bs, 1, "temporal_id_nesting"); gf_bs_read_int_log(bs, 16, "vps_reserved_ffff_16bits"); hevc_profile_tier_level(bs, 1, vps->max_sub_layers - 1, &vps->ptl, 0); vps_sub_layer_ordering_info_present_flag = gf_bs_read_int_log(bs, 1, "vps_sub_layer_ordering_info_present_flag"); for (i = (vps_sub_layer_ordering_info_present_flag ? 0 : vps->max_sub_layers - 1); i < vps->max_sub_layers; i++) { gf_bs_read_ue_log_idx(bs, "vps_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "vps_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "vps_max_latency_increase_plus1", i); } vps->max_layer_id = gf_bs_read_int_log(bs, 6, "max_layer_id"); if (vps->max_layer_id > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] VPS max layer ID %u but GPAC only supports %u\n", vps->max_layer_id, MAX_LHVC_LAYERS)); return -1; } vps->num_layer_sets = gf_bs_read_ue_log(bs, "num_layer_sets_minus1") + 1; if (vps->num_layer_sets > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of layer sets in VPS %d\n", vps->num_layer_sets)); return -1; } for (i = 1; i < vps->num_layer_sets; i++) { for (j = 0; j <= vps->max_layer_id; j++) { layer_id_included_flag[i][j] = gf_bs_read_int_log_idx2(bs, 1, "layer_id_included_flag", i, j); } } vps->num_layers_in_id_list[0] = 1; for (i = 1; i < vps->num_layer_sets; i++) { u32 n, m; n = 0; for (m = 0; m <= vps->max_layer_id; m++) { if (layer_id_included_flag[i][m]) { vps->LayerSetLayerIdList[i][n++] = m; if (vps->LayerSetLayerIdListMax[i] < m) vps->LayerSetLayerIdListMax[i] = m; } } vps->num_layers_in_id_list[i] = n; } if (gf_bs_read_int_log(bs, 1, "vps_timing_info_present_flag")) { u32 vps_num_hrd_parameters; gf_bs_read_int_log(bs, 32, "vps_num_units_in_tick"); gf_bs_read_int_log(bs, 32, "vps_time_scale"); if (gf_bs_read_int_log(bs, 1, "vps_poc_proportional_to_timing_flag")) { gf_bs_read_ue_log(bs, "vps_num_ticks_poc_diff_one_minus1"); } vps_num_hrd_parameters = gf_bs_read_ue_log(bs, "vps_num_hrd_parameters"); for (i = 0; i < vps_num_hrd_parameters; i++) { Bool cprms_present_flag = GF_TRUE; gf_bs_read_ue_log_idx(bs, "hrd_layer_set_idx", i); if (i > 0) cprms_present_flag = gf_bs_read_int_log(bs, 1, "cprms_present_flag"); hevc_parse_hrd_parameters(bs, cprms_present_flag, vps->max_sub_layers - 1, i); } } if (stop_at_vps_ext) { return vps_id; } vps_extension_flag = gf_bs_read_int_log(bs, 1, "vps_extension_flag"); if (vps_extension_flag) { Bool res; gf_bs_align(bs); res = hevc_parse_vps_extension(vps, bs); if (res != GF_TRUE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Failed to parse VPS extensions\n")); return -1; } if (gf_bs_read_int_log(bs, 1, "vps_extension2_flag")) { #if 0 while (gf_bs_available(bs)) { /*vps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } } return vps_id; } GF_EXPORT s32 gf_hevc_read_vps_ex(u8 *data, u32 *size, HEVCState *hevc, Bool remove_extensions) { GF_BitStream *bs; char *data_without_emulation_bytes = NULL; u32 data_without_emulation_bytes_size = 0; s32 vps_id = -1; /*still contains emulation bytes*/ data_without_emulation_bytes_size = remove_extensions ? gf_media_nalu_emulation_bytes_remove_count(data, (*size)) : 0; if (!data_without_emulation_bytes_size) { bs = gf_bs_new(data, (*size), GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); } //when removing VPS ext, we have to get the full buffer without emulation prevention bytes becuase we do a bit-by-bit copy of the vps else { data_without_emulation_bytes = gf_malloc((*size) * sizeof(char)); data_without_emulation_bytes_size = gf_media_nalu_remove_emulation_bytes(data, data_without_emulation_bytes, (*size)); bs = gf_bs_new(data_without_emulation_bytes, data_without_emulation_bytes_size, GF_BITSTREAM_READ); } if (!bs) goto exit; if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) goto exit; vps_id = gf_hevc_read_vps_bs_internal(bs, hevc, remove_extensions); if (vps_id < 0) goto exit; if (remove_extensions) { u8 *new_vps; u32 new_vps_size, emulation_bytes; u32 bit_pos = gf_bs_get_bit_offset(bs); GF_BitStream *w_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_seek(bs, 0); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u16(w_bs, gf_bs_read_u16(bs) ); bit_pos -= 48; while (bit_pos) { u32 v = gf_bs_read_int(bs, 1); gf_bs_write_int(w_bs, v, 1); bit_pos--; } /*vps extension flag*/ gf_bs_write_int(w_bs, 0, 1); new_vps = NULL; gf_bs_get_content(w_bs, &new_vps, &new_vps_size); gf_bs_del(w_bs); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(new_vps, new_vps_size); if (emulation_bytes + new_vps_size > *size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("Buffer too small to rewrite VPS - skipping rewrite\n")); } else { *size = gf_media_nalu_add_emulation_bytes(new_vps, data, new_vps_size); } if (new_vps) gf_free(new_vps); } exit: if (bs) gf_bs_del(bs); if (data_without_emulation_bytes) gf_free(data_without_emulation_bytes); return vps_id; } GF_EXPORT s32 gf_hevc_read_vps(u8 *data, u32 size, HEVCState *hevc) { return gf_hevc_read_vps_ex(data, &size, hevc, GF_FALSE); } GF_EXPORT s32 gf_hevc_read_vps_bs(GF_BitStream *bs, HEVCState *hevc) { if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) return -1; return gf_hevc_read_vps_bs_internal(bs, hevc, GF_FALSE); } static void hevc_scaling_list_data(GF_BitStream *bs) { u32 i, sizeId, matrixId; for (sizeId = 0; sizeId < 4; sizeId++) { for (matrixId = 0; matrixId < 6; matrixId += (sizeId == 3) ? 3 : 1) { u32 idx = sizeId*100 + 10*matrixId; u32 scaling_list_pred_mode_flag_sizeId_matrixId = gf_bs_read_int_log_idx(bs, 1, "scaling_list_pred_mode_flag_sizeId_matrixId", idx); if (!scaling_list_pred_mode_flag_sizeId_matrixId) { gf_bs_read_ue_log_idx(bs, "scaling_list_pred_matrix_id_delta", idx); } else { //u32 nextCoef = 8; u32 coefNum = MIN(64, (1 << (4 + (sizeId << 1)))); if (sizeId > 1) { gf_bs_read_se_log_idx(bs, "scaling_list_dc_coef_minus8", idx); } for (i = 0; i < coefNum; i++) { gf_bs_read_se_log_idx2(bs, "scaling_list_delta_coef", idx, i); } } } } } static const struct { u32 w, h; } hevc_sar[17] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, { 160,99 }, { 4,3}, { 3,2}, { 2,1} }; static s32 gf_hevc_read_sps_bs_internal(GF_BitStream *bs, HEVCState *hevc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id = -1; u32 i, nb_CTUs, depth; HEVC_SPS *sps; HEVC_VPS *vps; HEVC_ProfileTierLevel ptl; Bool multiLayerExtSpsFlag; u8 sps_ext_or_max_sub_layers_minus1, max_sub_layers_minus1; if (vui_flag_pos) *vui_flag_pos = 0; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) { return -1; } memset(&ptl, 0, sizeof(ptl)); max_sub_layers_minus1 = 0; sps_ext_or_max_sub_layers_minus1 = 0; if (layer_id == 0) max_sub_layers_minus1 = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1"); else sps_ext_or_max_sub_layers_minus1 = gf_bs_read_int_log(bs, 3, "sps_ext_or_max_sub_layers_minus1"); multiLayerExtSpsFlag = (layer_id != 0) && (sps_ext_or_max_sub_layers_minus1 == 7); if (!multiLayerExtSpsFlag) { gf_bs_read_int_log(bs, 1, "temporal_id_nesting_flag"); hevc_profile_tier_level(bs, 1, max_sub_layers_minus1, &ptl, 0); } sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((sps_id < 0) || (sps_id >= 16)) { return -1; } sps = &hevc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->ptl = ptl; vps = &hevc->vps[vps_id]; sps->max_sub_layers_minus1 = 0; sps->sps_ext_or_max_sub_layers_minus1 = 0; /* default values */ sps->colour_primaries = 2; sps->transfer_characteristic = 2; sps->matrix_coeffs = 2; //sps_rep_format_idx = 0; if (multiLayerExtSpsFlag) { sps->update_rep_format_flag = gf_bs_read_int_log(bs, 1, "update_rep_format_flag"); if (sps->update_rep_format_flag) { sps->rep_format_idx = gf_bs_read_int_log(bs, 8, "rep_format_idx"); } else { sps->rep_format_idx = vps->rep_format_idx[layer_id]; } sps->width = vps->rep_formats[sps->rep_format_idx].pic_width_luma_samples; sps->height = vps->rep_formats[sps->rep_format_idx].pic_height_luma_samples; sps->chroma_format_idc = vps->rep_formats[sps->rep_format_idx].chroma_format_idc; sps->bit_depth_luma = vps->rep_formats[sps->rep_format_idx].bit_depth_luma; sps->bit_depth_chroma = vps->rep_formats[sps->rep_format_idx].bit_depth_chroma; sps->separate_colour_plane_flag = vps->rep_formats[sps->rep_format_idx].separate_colour_plane_flag; //TODO this is crude ... sps->ptl = vps->ext_ptl[0]; } else { sps->chroma_format_idc = gf_bs_read_ue_log(bs, "chroma_format_idc"); if (sps->chroma_format_idc == 3) sps->separate_colour_plane_flag = gf_bs_read_int_log(bs, 1, "separate_colour_plane_flag"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); if ((sps->cw_flag = gf_bs_read_int_log(bs, 1, "conformance_window_flag"))) { u32 SubWidthC, SubHeightC; if (sps->chroma_format_idc == 1) { SubWidthC = SubHeightC = 2; } else if (sps->chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else { SubWidthC = SubHeightC = 1; } sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); sps->width -= SubWidthC * (sps->cw_left + sps->cw_right); sps->height -= SubHeightC * (sps->cw_top + sps->cw_bottom); } sps->bit_depth_luma = 8 + gf_bs_read_ue_log(bs, "bit_depth_luma_minus8"); sps->bit_depth_chroma = 8 + gf_bs_read_ue_log(bs, "bit_depth_chroma_minus8"); } sps->log2_max_pic_order_cnt_lsb = 4 + gf_bs_read_ue_log(bs, "log2_max_pic_order_cnt_lsb_minus4"); if (!multiLayerExtSpsFlag) { sps->sub_layer_ordering_info_present_flag = gf_bs_read_int_log(bs, 1, "sub_layer_ordering_info_present_flag"); for (i = sps->sub_layer_ordering_info_present_flag ? 0 : sps->max_sub_layers_minus1; i <= sps->max_sub_layers_minus1; i++) { gf_bs_read_ue_log_idx(bs, "max_dec_pic_buffering", i); gf_bs_read_ue_log_idx(bs, "num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "max_latency_increase", i); } } sps->log2_min_luma_coding_block_size = 3 + gf_bs_read_ue_log(bs, "log2_min_luma_coding_block_size_minus3"); sps->log2_diff_max_min_luma_coding_block_size = gf_bs_read_ue_log(bs, "log2_diff_max_min_luma_coding_block_size"); sps->max_CU_width = (1 << (sps->log2_min_luma_coding_block_size + sps->log2_diff_max_min_luma_coding_block_size)); sps->max_CU_height = (1 << (sps->log2_min_luma_coding_block_size + sps->log2_diff_max_min_luma_coding_block_size)); sps->log2_min_transform_block_size = 2 + gf_bs_read_ue_log(bs, "log2_min_transform_block_size_minus2"); sps->log2_max_transform_block_size = sps->log2_min_transform_block_size + gf_bs_read_ue_log(bs, "log2_max_transform_block_size"); depth = 0; sps->max_transform_hierarchy_depth_inter = gf_bs_read_ue_log(bs, "max_transform_hierarchy_depth_inter"); sps->max_transform_hierarchy_depth_intra = gf_bs_read_ue_log(bs, "max_transform_hierarchy_depth_intra"); while ((u32)(sps->max_CU_width >> sps->log2_diff_max_min_luma_coding_block_size) > (u32)(1 << (sps->log2_min_transform_block_size + depth))) { depth++; } sps->max_CU_depth = sps->log2_diff_max_min_luma_coding_block_size + depth; nb_CTUs = ((sps->width + sps->max_CU_width - 1) / sps->max_CU_width) * ((sps->height + sps->max_CU_height - 1) / sps->max_CU_height); sps->bitsSliceSegmentAddress = 0; while (nb_CTUs > (u32)(1 << sps->bitsSliceSegmentAddress)) { sps->bitsSliceSegmentAddress++; } sps->scaling_list_enable_flag = gf_bs_read_int_log(bs, 1, "scaling_list_enable_flag"); if (sps->scaling_list_enable_flag) { sps->infer_scaling_list_flag = 0; sps->scaling_list_ref_layer_id = 0; if (multiLayerExtSpsFlag) { sps->infer_scaling_list_flag = gf_bs_read_int_log(bs, 1, "infer_scaling_list_flag"); } if (sps->infer_scaling_list_flag) { sps->scaling_list_ref_layer_id = gf_bs_read_int_log(bs, 6, "scaling_list_ref_layer_id"); } else { sps->scaling_list_data_present_flag = gf_bs_read_int_log(bs, 1, "scaling_list_data_present_flag"); if (sps->scaling_list_data_present_flag) { hevc_scaling_list_data(bs); } } } sps->asymmetric_motion_partitions_enabled_flag = gf_bs_read_int_log(bs, 1, "asymmetric_motion_partitions_enabled_flag"); sps->sample_adaptive_offset_enabled_flag = gf_bs_read_int_log(bs, 1, "sample_adaptive_offset_enabled_flag"); if ( (sps->pcm_enabled_flag = gf_bs_read_int_log(bs, 1, "pcm_enabled_flag")) ) { sps->pcm_sample_bit_depth_luma_minus1 = gf_bs_read_int_log(bs, 4, "pcm_sample_bit_depth_luma_minus1"); sps->pcm_sample_bit_depth_chroma_minus1 = gf_bs_read_int_log(bs, 4, "pcm_sample_bit_depth_chroma_minus1"); sps->log2_min_pcm_luma_coding_block_size_minus3 = gf_bs_read_ue_log(bs, "log2_min_pcm_luma_coding_block_size_minus3"); sps->log2_diff_max_min_pcm_luma_coding_block_size = gf_bs_read_ue_log(bs, "log2_diff_max_min_pcm_luma_coding_block_size"); sps->pcm_loop_filter_disable_flag = gf_bs_read_int_log(bs, 1, "pcm_loop_filter_disable_flag"); } sps->num_short_term_ref_pic_sets = gf_bs_read_ue_log(bs, "num_short_term_ref_pic_sets"); if (sps->num_short_term_ref_pic_sets > 64) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Invalid number of short term reference picture sets %d\n", sps->num_short_term_ref_pic_sets)); return -1; } for (i = 0; i < sps->num_short_term_ref_pic_sets; i++) { Bool ret = hevc_parse_short_term_ref_pic_set(bs, sps, i); /*cannot parse short_term_ref_pic_set, skip VUI parsing*/ if (!ret) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Invalid short_term_ref_pic_set\n")); return -1; } } sps->long_term_ref_pics_present_flag = gf_bs_read_int_log(bs, 1, "long_term_ref_pics_present_flag"); if (sps->long_term_ref_pics_present_flag) { sps->num_long_term_ref_pic_sps = gf_bs_read_ue_log(bs, "num_long_term_ref_pic_sps"); for (i = 0; i < sps->num_long_term_ref_pic_sps; i++) { gf_bs_read_int_log_idx(bs, sps->log2_max_pic_order_cnt_lsb, "lt_ref_pic_poc_lsb_sps", i); gf_bs_read_int_log_idx(bs, 1, "used_by_curr_pic_lt_sps_flag", i); } } sps->temporal_mvp_enable_flag = gf_bs_read_int_log(bs, 1, "temporal_mvp_enable_flag"); sps->strong_intra_smoothing_enable_flag = gf_bs_read_int_log(bs, 1, "strong_intra_smoothing_enable_flag"); if (vui_flag_pos) *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); if ((sps->vui_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_parameters_present_flag")) ) { sps->aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "aspect_ratio_info_present_flag"); if (sps->aspect_ratio_info_present_flag) { sps->sar_idc = gf_bs_read_int_log(bs, 8, "aspect_ratio_idc"); if (sps->sar_idc == 255) { sps->sar_width = gf_bs_read_int_log(bs, 16, "aspect_ratio_width"); sps->sar_height = gf_bs_read_int_log(bs, 16, "aspect_ratio_height"); } else if (sps->sar_idc < 17) { sps->sar_width = hevc_sar[sps->sar_idc].w; sps->sar_height = hevc_sar[sps->sar_idc].h; } } if ((sps->overscan_info_present = gf_bs_read_int_log(bs, 1, "overscan_info_present"))) sps->overscan_appropriate = gf_bs_read_int_log(bs, 1, "overscan_appropriate"); sps->video_signal_type_present_flag = gf_bs_read_int_log(bs, 1, "video_signal_type_present_flag"); if (sps->video_signal_type_present_flag) { sps->video_format = gf_bs_read_int_log(bs, 3, "video_format"); sps->video_full_range_flag = gf_bs_read_int_log(bs, 1, "video_full_range_flag"); if ((sps->colour_description_present_flag = gf_bs_read_int_log(bs, 1, "colour_description_present_flag"))) { sps->colour_primaries = gf_bs_read_int_log(bs, 8, "colour_primaries"); sps->transfer_characteristic = gf_bs_read_int_log(bs, 8, "transfer_characteristic"); sps->matrix_coeffs = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } } if ((sps->chroma_loc_info_present_flag = gf_bs_read_int_log(bs, 1, "chroma_loc_info_present_flag"))) { sps->chroma_sample_loc_type_top_field = gf_bs_read_ue_log(bs, "chroma_sample_loc_type_top_field"); sps->chroma_sample_loc_type_bottom_field = gf_bs_read_ue_log(bs, "chroma_sample_loc_type_bottom_field"); } sps->neutra_chroma_indication_flag = gf_bs_read_int_log(bs, 1, "neutra_chroma_indication_flag"); sps->field_seq_flag = gf_bs_read_int_log(bs, 1, "field_seq_flag"); sps->frame_field_info_present_flag = gf_bs_read_int_log(bs, 1, "frame_field_info_present_flag"); if ((sps->default_display_window_flag = gf_bs_read_int_log(bs, 1, "default_display_window_flag"))) { sps->left_offset = gf_bs_read_ue_log(bs, "display_window_left_offset"); sps->right_offset = gf_bs_read_ue_log(bs, "display_window_right_offset"); sps->top_offset = gf_bs_read_ue_log(bs, "display_window_top_offset"); sps->bottom_offset = gf_bs_read_ue_log(bs, "display_window_bottom_offset"); } sps->has_timing_info = gf_bs_read_int_log(bs, 1, "has_timing_info"); if (sps->has_timing_info) { sps->num_units_in_tick = gf_bs_read_int_log(bs, 32, "num_units_in_tick"); sps->time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); sps->poc_proportional_to_timing_flag = gf_bs_read_int_log(bs, 1, "poc_proportional_to_timing_flag"); if (sps->poc_proportional_to_timing_flag) sps->num_ticks_poc_diff_one_minus1 = gf_bs_read_ue_log(bs, "num_ticks_poc_diff_one_minus1"); if ((sps->hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "hrd_parameters_present_flag"))) { // GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[HEVC] HRD param parsing not implemented\n")); return sps_id; } } if (gf_bs_read_int_log(bs, 1, "bitstream_restriction_flag")) { gf_bs_read_int_log(bs, 1, "tiles_fixed_structure_flag"); gf_bs_read_int_log(bs, 1, "motion_vectors_over_pic_boundaries_flag"); gf_bs_read_int_log(bs, 1, "restricted_ref_pic_lists_flag"); gf_bs_read_ue_log(bs, "min_spatial_segmentation_idc"); gf_bs_read_ue_log(bs, "max_bytes_per_pic_denom"); gf_bs_read_ue_log(bs, "max_bits_per_min_cu_denom"); gf_bs_read_ue_log(bs, "log2_max_mv_length_horizontal"); gf_bs_read_ue_log(bs, "log2_max_mv_length_vertical"); } } if (gf_bs_read_int_log(bs, 1, "sps_extension_flag")) { #if 0 while (gf_bs_available(bs)) { /*sps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } return sps_id; } GF_EXPORT s32 gf_hevc_read_sps_ex(char *data, u32 size, HEVCState *hevc, u32 *vui_flag_pos) { GF_BitStream *bs; s32 sps_id = -1; u8 layer_id; if (vui_flag_pos) *vui_flag_pos = 0; bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) goto exit; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) goto exit; sps_id = gf_hevc_read_sps_bs_internal(bs, hevc, layer_id, vui_flag_pos); exit: if (bs) gf_bs_del(bs); return sps_id; } GF_EXPORT s32 gf_hevc_read_sps(u8 *data, u32 size, HEVCState *hevc) { return gf_hevc_read_sps_ex(data, size, hevc, NULL); } GF_EXPORT s32 gf_hevc_read_sps_bs(GF_BitStream *bs, HEVCState *hevc) { u8 layer_id; if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) return -1; return gf_hevc_read_sps_bs_internal(bs, hevc, layer_id, NULL); } static s32 gf_hevc_read_pps_bs_internal(GF_BitStream *bs, HEVCState *hevc) { u32 i; s32 pps_id; HEVC_PPS *pps; //NAL header already read pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id < 0) || (pps_id >= 64)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong PPS ID %d in PPS\n", pps_id)); return -1; } pps = &hevc->pps[pps_id]; if (!pps->state) { pps->id = pps_id; pps->state = 1; } pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if (((s32)pps->sps_id<0) || (pps->sps_id >= 16)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong SPS ID %d in PPS\n", pps->sps_id)); pps->sps_id=0; return -1; } hevc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->dependent_slice_segments_enabled_flag = gf_bs_read_int_log(bs, 1, "dependent_slice_segments_enabled_flag"); pps->output_flag_present_flag = gf_bs_read_int_log(bs, 1, "output_flag_present_flag"); pps->num_extra_slice_header_bits = gf_bs_read_int_log(bs, 3, "num_extra_slice_header_bits"); pps->sign_data_hiding_flag = gf_bs_read_int_log(bs, 1, "sign_data_hiding_flag"); pps->cabac_init_present_flag = gf_bs_read_int_log(bs, 1, "cabac_init_present_flag"); pps->num_ref_idx_l0_default_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active"); pps->num_ref_idx_l1_default_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active"); pps->pic_init_qp_minus26 = gf_bs_read_se_log(bs, "pic_init_qp_minus26"); pps->constrained_intra_pred_flag = gf_bs_read_int_log(bs, 1, "constrained_intra_pred_flag"); pps->transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "transform_skip_enabled_flag"); if ((pps->cu_qp_delta_enabled_flag = gf_bs_read_int_log(bs, 1, "cu_qp_delta_enabled_flag"))) pps->diff_cu_qp_delta_depth = gf_bs_read_ue_log(bs, "diff_cu_qp_delta_depth"); pps->pic_cb_qp_offset = gf_bs_read_se_log(bs, "pic_cb_qp_offset"); pps->pic_cr_qp_offset = gf_bs_read_se_log(bs, "pic_cr_qp_offset"); pps->slice_chroma_qp_offsets_present_flag = gf_bs_read_int_log(bs, 1, "slice_chroma_qp_offsets_present_flag"); pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); pps->weighted_bipred_flag = gf_bs_read_int_log(bs, 1, "weighted_bipred_flag"); pps->transquant_bypass_enable_flag = gf_bs_read_int_log(bs, 1, "transquant_bypass_enable_flag"); pps->tiles_enabled_flag = gf_bs_read_int_log(bs, 1, "tiles_enabled_flag"); pps->entropy_coding_sync_enabled_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); if (pps->tiles_enabled_flag) { pps->num_tile_columns = 1 + gf_bs_read_ue_log(bs, "num_tile_columns_minus1"); pps->num_tile_rows = 1 + gf_bs_read_ue_log(bs, "num_tile_rows_minus1"); pps->uniform_spacing_flag = gf_bs_read_int_log(bs, 1, "uniform_spacing_flag"); if (!pps->uniform_spacing_flag) { for (i = 0; i < pps->num_tile_columns - 1; i++) { pps->column_width[i] = 1 + gf_bs_read_ue_log_idx(bs, "column_width_minus1", i); } for (i = 0; i < pps->num_tile_rows - 1; i++) { pps->row_height[i] = 1 + gf_bs_read_ue_log_idx(bs, "row_height_minus1", i); } } pps->loop_filter_across_tiles_enabled_flag = gf_bs_read_int_log(bs, 1, "loop_filter_across_tiles_enabled_flag"); } pps->loop_filter_across_slices_enabled_flag = gf_bs_read_int_log(bs, 1, "loop_filter_across_slices_enabled_flag"); if ((pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"))) { pps->deblocking_filter_override_enabled_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_override_enabled_flag"); if (! (pps->pic_disable_deblocking_filter_flag = gf_bs_read_int_log(bs, 1, "pic_disable_deblocking_filter_flag"))) { pps->beta_offset_div2 = gf_bs_read_se_log(bs, "beta_offset_div2"); pps->tc_offset_div2 = gf_bs_read_se_log(bs, "tc_offset_div2"); } } if ((pps->pic_scaling_list_data_present_flag = gf_bs_read_int_log(bs, 1, "pic_scaling_list_data_present_flag"))) { hevc_scaling_list_data(bs); } pps->lists_modification_present_flag = gf_bs_read_int_log(bs, 1, "lists_modification_present_flag"); pps->log2_parallel_merge_level_minus2 = gf_bs_read_ue_log(bs, "log2_parallel_merge_level_minus2"); pps->slice_segment_header_extension_present_flag = gf_bs_read_int_log(bs, 1, "slice_segment_header_extension_present_flag"); if (gf_bs_read_int_log(bs, 1, "pps_extension_flag")) { #if 0 while (gf_bs_available(bs)) { /*pps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } return pps_id; } GF_EXPORT s32 gf_hevc_read_pps(u8 *data, u32 size, HEVCState *hevc) { GF_BitStream *bs; s32 pps_id = -1; bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) goto exit; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) goto exit; pps_id = gf_hevc_read_pps_bs_internal(bs, hevc); exit: if (bs) gf_bs_del(bs); return pps_id; } GF_EXPORT s32 gf_hevc_read_pps_bs(GF_BitStream *bs, HEVCState *hevc) { if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) return -1; return gf_hevc_read_pps_bs_internal(bs, hevc); } GF_EXPORT s32 gf_hevc_parse_nalu_bs(GF_BitStream *bs, HEVCState *hevc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { Bool is_slice = GF_FALSE; s32 ret = -1; HEVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); memcpy(&n_state, &hevc->s_info, sizeof(HEVCSliceInfo)); if (!hevc_parse_nal_header(bs, nal_unit_type, temporal_id, layer_id)) return -1; n_state.nal_unit_type = *nal_unit_type; switch (n_state.nal_unit_type) { case GF_HEVC_NALU_ACCESS_UNIT: case GF_HEVC_NALU_END_OF_SEQ: case GF_HEVC_NALU_END_OF_STREAM: ret = 1; break; /*slice_segment_layer_rbsp*/ case GF_HEVC_NALU_SLICE_TRAIL_N: case GF_HEVC_NALU_SLICE_TRAIL_R: case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_TSA_R: case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_STSA_R: case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: case GF_HEVC_NALU_SLICE_RADL_N: case GF_HEVC_NALU_SLICE_RADL_R: case GF_HEVC_NALU_SLICE_RASL_N: case GF_HEVC_NALU_SLICE_RASL_R: is_slice = GF_TRUE; /* slice - read the info and compare.*/ ret = hevc_parse_slice_segment(bs, hevc, &n_state); if (ret < 0) return ret; hevc_compute_poc(&n_state); ret = 0; if (hevc->s_info.poc != n_state.poc) { ret = 1; break; } if (n_state.first_slice_segment_in_pic_flag) { if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; break; } } break; case GF_HEVC_NALU_SEQ_PARAM: hevc->last_parsed_sps_id = gf_hevc_read_sps_bs_internal(bs, hevc, *layer_id, NULL); ret = (hevc->last_parsed_sps_id>=0) ? 0 : -1; break; case GF_HEVC_NALU_PIC_PARAM: hevc->last_parsed_pps_id = gf_hevc_read_pps_bs_internal(bs, hevc); ret = (hevc->last_parsed_pps_id>=0) ? 0 : -1; break; case GF_HEVC_NALU_VID_PARAM: hevc->last_parsed_vps_id = gf_hevc_read_vps_bs_internal(bs, hevc, GF_FALSE); ret = (hevc->last_parsed_vps_id>=0) ? 0 : -1; break; default: ret = 0; break; } /* save _prev values */ if ((ret>0) && hevc->s_info.sps) { n_state.frame_num_offset_prev = hevc->s_info.frame_num_offset; n_state.frame_num_prev = hevc->s_info.frame_num; n_state.poc_lsb_prev = hevc->s_info.poc_lsb; n_state.poc_msb_prev = hevc->s_info.poc_msb; if (is_slice) n_state.prev_layer_id_plus1 = *layer_id + 1; } if (is_slice) hevc_compute_poc(&n_state); memcpy(&hevc->s_info, &n_state, sizeof(HEVCSliceInfo)); return ret; } GF_EXPORT s32 gf_hevc_parse_nalu(u8 *data, u32 size, HEVCState *hevc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { GF_BitStream *bs = NULL; s32 ret = -1; if (!hevc) { if (nal_unit_type) (*nal_unit_type) = (data[0] & 0x7E) >> 1; if (layer_id) { u8 id = data[0] & 1; id <<= 5; id |= (data[1] >> 3) & 0x1F; (*layer_id) = id; } if (temporal_id) (*temporal_id) = (data[1] & 0x7); return -1; } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); ret = gf_hevc_parse_nalu_bs(bs, hevc, nal_unit_type, temporal_id, layer_id); gf_bs_del(bs); return ret; } GF_EXPORT GF_Err gf_hevc_change_vui(GF_HEVCConfig *hvcc, GF_VUIInfo *vui_info) { GF_BitStream *orig, *mod; HEVCState hevc; u32 i, bit_offset, flag; s32 idx; GF_NALUFFParamArray *spss; GF_NALUFFParam *slc; orig = NULL; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; i = 0; spss = NULL; while ((spss = (GF_NALUFFParamArray *)gf_list_enum(hvcc->param_array, &i))) { if (spss->type == GF_HEVC_NALU_SEQ_PARAM) break; spss = NULL; } if (!spss) return GF_NON_COMPLIANT_BITSTREAM; i = 0; while ((slc = (GF_NALUFFParam *)gf_list_enum(spss->nalus, &i))) { u8 *no_emulation_buf; u32 no_emulation_buf_size, emulation_bytes; /*SPS may still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size) * sizeof(char)); no_emulation_buf_size = gf_media_nalu_remove_emulation_bytes(slc->data, no_emulation_buf, slc->size); idx = gf_hevc_read_sps_ex(no_emulation_buf, no_emulation_buf_size, &hevc, &bit_offset); if (idx < 0) { if (orig) gf_bs_del(orig); gf_free(no_emulation_buf); continue; } orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset >= 0); while (bit_offset) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } avc_hevc_rewrite_vui(vui_info, orig, mod); /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, &no_emulation_buf, &no_emulation_buf_size); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(no_emulation_buf, no_emulation_buf_size); if (no_emulation_buf_size + emulation_bytes > slc->size) slc->data = (char*)gf_realloc(slc->data, no_emulation_buf_size + emulation_bytes); slc->size = gf_media_nalu_add_emulation_bytes(no_emulation_buf, slc->data, no_emulation_buf_size); gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; } GF_EXPORT GF_Err gf_hevc_change_par(GF_HEVCConfig *hvcc, s32 ar_n, s32 ar_d) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = ar_n; vuii.ar_den = ar_d; vuii.fullrange = -1; vuii.video_format = -1; vuii.color_prim = -1; vuii.color_tfc = -1; vuii.color_matrix = -1; return gf_hevc_change_vui(hvcc, &vuii); } GF_EXPORT GF_Err gf_hevc_change_color(GF_HEVCConfig *hvcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = -1; vuii.ar_den = -1; vuii.fullrange = fullrange; vuii.video_format = vidformat; vuii.color_prim = colorprim; vuii.color_tfc = transfer; vuii.color_matrix = colmatrix; return gf_hevc_change_vui(hvcc, &vuii); } GF_EXPORT GF_Err gf_hevc_get_sps_info_with_state(HEVCState *hevc, u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { s32 idx; idx = gf_hevc_read_sps(sps_data, sps_size, hevc); if (idx < 0) { return GF_NON_COMPLIANT_BITSTREAM; } if (sps_id) *sps_id = idx; if (width) *width = hevc->sps[idx].width; if (height) *height = hevc->sps[idx].height; if (par_n) *par_n = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_width : (u32)-1; if (par_d) *par_d = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_height : (u32)-1; return GF_OK; } GF_EXPORT GF_Err gf_hevc_get_sps_info(u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { HEVCState hevc; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; return gf_hevc_get_sps_info_with_state(&hevc, sps_data, sps_size, sps_id, width, height, par_n, par_d); } #endif //GPAC_DISABLE_HEVC static u32 AC3_FindSyncCode(u8 *buf, u32 buflen) { u32 end = buflen - 6; u32 offset = 0; while (offset <= end) { if (buf[offset] == 0x0b && buf[offset + 1] == 0x77) { return offset; } offset++; } return buflen; } static Bool AC3_FindSyncCodeBS(GF_BitStream *bs) { u8 b1; u64 pos = gf_bs_get_position(bs); u64 end = gf_bs_get_size(bs); pos += 1; b1 = gf_bs_read_u8(bs); while (pos + 1 <= end) { u8 b2 = gf_bs_read_u8(bs); if ((b1 == 0x0b) && (b2 == 0x77)) { gf_bs_seek(bs, pos - 1); return GF_TRUE; } pos++; b1 = b2; } return GF_FALSE; } static const u32 ac3_sizecod_to_bitrate[] = { 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 320000, 384000, 448000, 512000, 576000, 640000 }; static const u32 ac3_sizecod2_to_framesize[] = { 96, 120, 144, 168, 192, 240, 288, 336, 384, 480, 576, 672, 768, 960, 1152, 1344, 1536, 1728, 1920 }; static const u32 ac3_sizecod1_to_framesize[] = { 69, 87, 104, 121, 139, 174, 208, 243, 278, 348, 417, 487, 557, 696, 835, 975, 1114, 1253, 1393 }; static const u32 ac3_sizecod0_to_framesize[] = { 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 640, 768, 896, 1024, 1152, 1280 }; static const u32 ac3_mod_to_chans[] = { 2, 1, 2, 3, 3, 4, 4, 5 }; GF_EXPORT u32 gf_ac3_get_channels(u32 acmod) { u32 nb_ch; nb_ch = ac3_mod_to_chans[acmod]; return nb_ch; } GF_EXPORT u32 gf_ac3_get_bitrate(u32 brcode) { return ac3_sizecod_to_bitrate[brcode]; } Bool gf_ac3_parser(u8 *buf, u32 buflen, u32 *pos, GF_AC3Config *hdr, Bool full_parse) { GF_BitStream *bs; Bool ret; if (buflen < 6) return GF_FALSE; (*pos) = AC3_FindSyncCode(buf, buflen); if (*pos >= buflen) return GF_FALSE; bs = gf_bs_new((const char*)(buf + *pos), buflen, GF_BITSTREAM_READ); ret = gf_ac3_parser_bs(bs, hdr, full_parse); gf_bs_del(bs); return ret; } GF_EXPORT Bool gf_ac3_parser_bs(GF_BitStream *bs, GF_AC3Config *hdr, Bool full_parse) { u32 fscod, frmsizecod, bsid, ac3_mod, freq, framesize, bsmod, syncword; u64 pos; if (!hdr || (gf_bs_available(bs) < 6)) return GF_FALSE; if (!AC3_FindSyncCodeBS(bs)) return GF_FALSE; pos = gf_bs_get_position(bs); syncword = gf_bs_read_u16(bs); if (syncword != 0x0B77) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AC3] Wrong sync word detected (0x%X - expecting 0x0B77).\n", syncword)); return GF_FALSE; } gf_bs_read_int_log(bs, 16, "crc1"); fscod = gf_bs_read_int_log(bs, 2, "fscod"); frmsizecod = gf_bs_read_int_log(bs, 6, "frmsizecod"); bsid = gf_bs_read_int_log(bs, 5, "bsid"); bsmod = gf_bs_read_int_log(bs, 3, "bsmod"); ac3_mod = gf_bs_read_int_log(bs, 3, "ac3_mod"); if (frmsizecod >= 2 * sizeof(ac3_sizecod_to_bitrate) / sizeof(u32)) return GF_FALSE; hdr->bitrate = ac3_sizecod_to_bitrate[frmsizecod / 2]; if (bsid > 8) hdr->bitrate = hdr->bitrate >> (bsid - 8); switch (fscod) { case 0: if (frmsizecod >= 2 * sizeof(ac3_sizecod0_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 48000; framesize = ac3_sizecod0_to_framesize[frmsizecod / 2] * 2; break; case 1: if (frmsizecod >= 2 * sizeof(ac3_sizecod1_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 44100; framesize = (ac3_sizecod1_to_framesize[frmsizecod / 2] + (frmsizecod & 0x1)) * 2; break; case 2: if (frmsizecod >= 2 * sizeof(ac3_sizecod2_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 32000; framesize = ac3_sizecod2_to_framesize[frmsizecod / 2] * 2; break; default: return GF_FALSE; } hdr->sample_rate = freq; hdr->framesize = framesize; if (full_parse) { hdr->streams[0].bsid = bsid; hdr->streams[0].bsmod = bsmod; hdr->streams[0].acmod = ac3_mod; hdr->streams[0].lfon = 0; hdr->streams[0].fscod = fscod; hdr->brcode = frmsizecod / 2; } if (ac3_mod >= 2 * sizeof(ac3_mod_to_chans) / sizeof(u32)) return GF_FALSE; hdr->channels = ac3_mod_to_chans[ac3_mod]; if ((ac3_mod & 0x1) && (ac3_mod != 1)) gf_bs_read_int_log(bs, 2, "cmixlev"); if (ac3_mod & 0x4) gf_bs_read_int_log(bs, 2, "surmixlev"); if (ac3_mod == 0x2) gf_bs_read_int_log(bs, 2, "dsurmod"); if (gf_bs_read_int_log(bs, 1, "lfeon")) { hdr->channels += 1; hdr->streams[0].lfon = 1; } gf_bs_seek(bs, pos); return GF_TRUE; } GF_EXPORT Bool gf_eac3_parser_bs(GF_BitStream *bs, GF_AC3Config *hdr, Bool full_parse) { u32 fscod, bsid, ac3_mod, freq, framesize, syncword, substreamid, lfon, channels, numblkscod, strmtyp, frmsiz; u64 pos; u16 chanmap; static u32 numblks[4] = {1, 2, 3, 6}; if (!hdr || (gf_bs_available(bs) < 6)) return GF_FALSE; if (!AC3_FindSyncCodeBS(bs)) return GF_FALSE; pos = gf_bs_get_position(bs); framesize = 0; numblkscod = 0; memset(hdr, 0, sizeof(GF_AC3Config)); block: syncword = gf_bs_read_u16(bs); if (syncword != 0x0B77) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[E-AC3] Wrong sync word detected (0x%X - expecting 0x0B77).\n", syncword)); return GF_FALSE; } strmtyp = gf_bs_read_int_log(bs, 2, "strmtyp"); substreamid = gf_bs_read_int_log(bs, 3, "substreamid"); //next main (independent) AU, done with this frame if ((strmtyp!=0x1) && ((hdr->substreams >> substreamid) & 0x1)) { hdr->framesize = framesize; gf_bs_seek(bs, pos); return GF_TRUE; } frmsiz = gf_bs_read_int_log(bs, 11, "frmsiz"); framesize += 2 * (1 + frmsiz); fscod = gf_bs_read_int_log(bs, 2, "fscod"); if (fscod == 0x3) { fscod = gf_bs_read_int_log(bs, 2, "fscod2"); numblkscod += 6; } else { numblkscod += gf_bs_read_int_log(bs, 2, "numblkscod"); } assert(numblkscod <= 9); if ((hdr->substreams >> substreamid) & 0x1) { //we still have sync frames following if (substreamid) { if (gf_bs_seek(bs, pos + framesize) != GF_OK) { gf_bs_seek(bs, pos); return GF_FALSE; } if ((gf_bs_available(bs) < 6) || !AC3_FindSyncCodeBS(bs)) { gf_bs_seek(bs, pos); return GF_FALSE; } goto block; } } hdr->substreams |= (1 << substreamid); switch (fscod) { case 0: freq = 48000; break; case 1: freq = 44100; break; case 2: freq = 32000; break; default: return GF_FALSE; } ac3_mod = gf_bs_read_int_log(bs, 3, "ac3_mod"); lfon = gf_bs_read_int_log(bs, 1, "lfon"); bsid = gf_bs_read_int_log(bs, 5, "bsid"); if (!substreamid && (bsid != 16/*E-AC3*/)) return GF_FALSE; gf_bs_read_int_log(bs, 5, "dialnorm"); if (gf_bs_read_int_log(bs, 1, "compre")) { gf_bs_read_int_log(bs, 8, "compr"); } if (ac3_mod==0) { gf_bs_read_int_log(bs, 5, "dialnorm2"); if (gf_bs_read_int_log(bs, 1, "compr2e")) { gf_bs_read_int_log(bs, 8, "compr2"); } } chanmap = 0; if (strmtyp==0x1) { if (gf_bs_read_int_log(bs, 1, "chanmape")) { chanmap = gf_bs_read_int_log(bs, 16, "chanmap"); } } channels = ac3_mod_to_chans[ac3_mod]; if (lfon) channels += 1; hdr->bitrate = 0; hdr->sample_rate = freq; hdr->framesize = framesize; if (strmtyp != 1) { hdr->channels = channels; hdr->streams[substreamid].lfon = lfon; if (full_parse) { hdr->streams[substreamid].bsid = bsid; hdr->streams[substreamid].bsmod = 0; hdr->streams[substreamid].acmod = ac3_mod; hdr->streams[substreamid].fscod = fscod; hdr->brcode = 0; } hdr->nb_streams++; //not clear if this is only for the independent streams hdr->brcode += ((frmsiz+1) * freq) / (numblks[numblkscod]*16) / 1000; if (lfon) hdr->channels += 1; } else { hdr->streams[substreamid].nb_dep_sub = substreamid; hdr->streams[substreamid].chan_loc |= chanmap; } if (numblkscod < 6) { //we need 6 blocks to make a sample if (gf_bs_seek(bs, pos + framesize) != GF_OK) { gf_bs_seek(bs, pos); return GF_FALSE; } if ((gf_bs_available(bs) < 6) || !AC3_FindSyncCodeBS(bs)) return GF_FALSE; goto block; } gf_bs_seek(bs, pos); return GF_TRUE; } #endif /*GPAC_DISABLE_AV_PARSERS*/ u32 gf_id3_read_size(GF_BitStream *bs) { u32 size = 0; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); return size; } #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined (GPAC_DISABLE_OGG) /* Vorbis parser */ static u32 vorbis_book_maptype1_quantvals(u32 entries, u32 dim) { u32 vals = (u32)floor(pow(entries, 1.0 / dim)); while (1) { u32 acc = 1; u32 acc1 = 1; u32 i; for (i = 0; i < dim; i++) { acc *= vals; acc1 *= vals + 1; } if (acc <= entries && acc1 > entries) return (vals); else { if (acc > entries) vals--; else vals++; } } } static u32 ilog(u32 v, Bool dec) { u32 ret = 0; if (dec && v) --v; while (v) { ret++; v >>= 1; } return (ret); } static u32 icount(u32 v) { u32 ret = 0; while (v) { ret += v & 1; v >>= 1; } return(ret); } GF_EXPORT Bool gf_vorbis_parse_header(GF_VorbisParser *vp, u8 *data, u32 data_len) { u32 pack_type, i, j, k, times, nb_part, nb_books, nb_modes; u32 l; char szNAME[8]; oggpack_buffer opb; oggpack_readinit(&opb, (u8*)data, data_len); pack_type = oggpack_read(&opb, 8); i = 0; while (i < 6) { szNAME[i] = oggpack_read(&opb, 8); i++; } szNAME[i] = 0; if (strcmp(szNAME, "vorbis")) { return GF_FALSE; } switch (pack_type) { case 0x01: vp->version = oggpack_read(&opb, 32); if (vp->version != 0) { return GF_FALSE; } vp->channels = oggpack_read(&opb, 8); vp->sample_rate = oggpack_read(&opb, 32); vp->max_r = oggpack_read(&opb, 32); vp->avg_r = oggpack_read(&opb, 32); vp->low_r = oggpack_read(&opb, 32); vp->min_block = 1<<oggpack_read(&opb, 4); vp->max_block = 1<<oggpack_read(&opb, 4); if (vp->sample_rate < 1 || vp->channels < 1 || vp->min_block < 8 || vp->max_block < vp->min_block || oggpack_read(&opb, 1) != 1) { return GF_FALSE; } vp->nb_init=1; return GF_TRUE; case 0x03: /*trash comments*/ vp->nb_init++; return GF_TRUE; case 0x05: /*need at least bitstream header to make sure we're parsing the right thing*/ if (!vp->nb_init) return GF_FALSE; break; default: return GF_FALSE; } /*OK parse codebook*/ nb_books = oggpack_read(&opb, 8) + 1; /*skip vorbis static books*/ for (i = 0; i < nb_books; i++) { u32 map_type, qb, qq; u32 entries, dim; oggpack_read(&opb, 24); dim = oggpack_read(&opb, 16); entries = oggpack_read(&opb, 24); if ((s32)entries < 0) entries = 0; if (oggpack_read(&opb, 1) == 0) { if (oggpack_read(&opb, 1)) { for (j = 0; j < entries; j++) { if (oggpack_read(&opb, 1)) { oggpack_read(&opb, 5); } } } else { for (j = 0; j < entries; j++) oggpack_read(&opb, 5); } } else { oggpack_read(&opb, 5); for (j = 0; j < entries;) { u32 num = oggpack_read(&opb, ilog(entries - j, GF_FALSE)); for (k = 0; k < num && j < entries; k++, j++) { } } } switch ((map_type = oggpack_read(&opb, 4))) { case 0: break; case 1: case 2: oggpack_read(&opb, 32); oggpack_read(&opb, 32); qq = oggpack_read(&opb, 4) + 1; oggpack_read(&opb, 1); if (map_type == 1) qb = vorbis_book_maptype1_quantvals(entries, dim); else if (map_type == 2) qb = entries * dim; else qb = 0; for (j = 0; j < qb; j++) oggpack_read(&opb, qq); break; } } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) oggpack_read(&opb, 16); times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 type = oggpack_read(&opb, 16); if (type) { u32 *parts, *class_dims, count, rangebits; u32 max_class = 0; nb_part = oggpack_read(&opb, 5); parts = (u32*)gf_malloc(sizeof(u32) * nb_part); for (j = 0; j < nb_part; j++) { parts[j] = oggpack_read(&opb, 4); if (max_class < parts[j]) max_class = parts[j]; } class_dims = (u32*)gf_malloc(sizeof(u32) * (max_class + 1)); for (j = 0; j < max_class + 1; j++) { u32 class_sub; class_dims[j] = oggpack_read(&opb, 3) + 1; class_sub = oggpack_read(&opb, 2); if (class_sub) oggpack_read(&opb, 8); for (k = 0; k < (u32)(1 << class_sub); k++) oggpack_read(&opb, 8); } oggpack_read(&opb, 2); rangebits = oggpack_read(&opb, 4); count = 0; for (j = 0, k = 0; j < nb_part; j++) { count += class_dims[parts[j]]; for (; k < count; k++) oggpack_read(&opb, rangebits); } gf_free(parts); gf_free(class_dims); } else { oggpack_read(&opb, 8 + 16 + 16 + 6 + 8); nb_books = oggpack_read(&opb, 4) + 1; for (j = 0; j < nb_books; j++) oggpack_read(&opb, 8); } } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 acc = 0; oggpack_read(&opb, 16);/*type*/ oggpack_read(&opb, 24); oggpack_read(&opb, 24); oggpack_read(&opb, 24); nb_part = oggpack_read(&opb, 6) + 1; oggpack_read(&opb, 8); for (j = 0; j < nb_part; j++) { u32 cascade = oggpack_read(&opb, 3); if (oggpack_read(&opb, 1)) cascade |= (oggpack_read(&opb, 5) << 3); acc += icount(cascade); } for (j = 0; j < acc; j++) oggpack_read(&opb, 8); } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 sub_maps = 1; oggpack_read(&opb, 16); if (oggpack_read(&opb, 1)) sub_maps = oggpack_read(&opb, 4) + 1; if (oggpack_read(&opb, 1)) { u32 nb_steps = oggpack_read(&opb, 8) + 1; for (j = 0; j < nb_steps; j++) { oggpack_read(&opb, ilog(vp->channels, GF_TRUE)); oggpack_read(&opb, ilog(vp->channels, GF_TRUE)); } } oggpack_read(&opb, 2); if (sub_maps>1) { for(l=0; l<vp->channels; l++) oggpack_read(&opb, 4); } for (j = 0; j < sub_maps; j++) { oggpack_read(&opb, 8); oggpack_read(&opb, 8); oggpack_read(&opb, 8); } } nb_modes = oggpack_read(&opb, 6) + 1; for (i = 0; i < nb_modes; i++) { vp->mode_flag[i] = oggpack_read(&opb, 1); oggpack_read(&opb, 16); oggpack_read(&opb, 16); oggpack_read(&opb, 8); } vp->modebits = 0; j = nb_modes; while (j > 1) { vp->modebits++; j >>= 1; } return GF_TRUE; } GF_EXPORT u32 gf_vorbis_check_frame(GF_VorbisParser *vp, u8 *data, u32 data_length) { s32 block_size; oggpack_buffer opb; if (!vp) return 0; oggpack_readinit(&opb, (unsigned char*)data, data_length); /*not audio*/ if (oggpack_read(&opb, 1) != 0) return 0; block_size = oggpack_read(&opb, vp->modebits); if (block_size == -1) return 0; return ((vp->mode_flag[block_size]) ? vp->max_block : vp->min_block) / (2); } /*call with vorbis header packets - initializes the parser on success, leave it to NULL otherwise returns 1 if success, 0 if error.*/ Bool gf_opus_parse_header(GF_OpusParser *opus, u8 *data, u32 data_len) { char tag[9]; GF_BitStream *bs = gf_bs_new(data, data_len, GF_BITSTREAM_READ); gf_bs_read_data(bs, tag, 8); tag[8]=0; if (memcmp(data, "OpusHead", sizeof(char)*8)) { gf_bs_del(bs); return GF_FALSE; } /*Identification Header*/ opus->version = gf_bs_read_u8(bs); /*version*/ if (opus->version != 1) { gf_bs_del(bs); GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Opus] Unsupported version %d\n", opus->version)); return GF_FALSE; } opus->OutputChannelCount = gf_bs_read_u8(bs); opus->PreSkip = gf_bs_read_u16_le(bs); opus->InputSampleRate = gf_bs_read_u32_le(bs); opus->OutputGain = gf_bs_read_u16_le(bs); opus->ChannelMappingFamily = gf_bs_read_u8(bs); if (opus->ChannelMappingFamily != 0) { opus->StreamCount = gf_bs_read_u8(bs); opus->CoupledCount = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *) opus->ChannelMapping, opus->OutputChannelCount); } gf_bs_del(bs); return GF_TRUE; } /*returns 0 if init error or not a vorbis frame, otherwise returns the number of audio samples in this frame*/ u32 gf_opus_check_frame(GF_OpusParser *op, u8 *data, u32 data_length) { u32 block_size; if (!memcmp(data, "OpusHead", sizeof(char)*8)) return 0; if (!memcmp(data, "OpusTags", sizeof(char)*8)) return 0; /*consider the whole packet as Ogg packets and ISOBMFF samples for Opus are framed similarly*/ static const int OpusFrameDurIn48k[] = { 480, 960, 1920, 2880, 480, 960, 1920, 2880, 480, 960, 1920, 2880, 480, 960, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, }; int TOC_config = (data[0] & 0xf8) >> 3; //int s = (data[0] & 0x04) >> 2; block_size = OpusFrameDurIn48k[TOC_config]; int c = data[0] & 0x03; if (c == 1 || c == 2) { block_size *= 2; } else if (c == 3) { /*unknown number of frames*/ int num_frames = data[1] & 0x3f; block_size *= num_frames; } return block_size; } #endif /*!defined(GPAC_DISABLE_AV_PARSERS) && !defined (GPAC_DISABLE_OGG)*/ u64 gf_mpegh_escaped_value(GF_BitStream *bs, u32 nBits1, u32 nBits2, u32 nBits3) { u64 value = gf_bs_read_int(bs, nBits1); if (value == (1<<nBits1)-1) { u32 vadd = gf_bs_read_int(bs, nBits2); value += vadd; if (vadd == (1<<nBits2)-1) { vadd = gf_bs_read_int(bs, nBits3); value += vadd; } } return value; } GF_EXPORT s32 gf_mpegh_get_mhas_pl(u8 *ptr, u32 size, u64 *ch_layout) { s32 PL = -1; GF_BitStream *bs; u32 i; s32 sync_pos=-1; for (i=0; i<size-3; i++) { if ((ptr[i]==0xC0) && (ptr[i+1]== 0x01) && (ptr[i+2]==0xA5)) { sync_pos = i; break; } } if (sync_pos<0) return 0; if (ch_layout) *ch_layout = 0; bs = gf_bs_new(ptr, size, GF_BITSTREAM_READ); gf_bs_skip_bytes(bs, sync_pos); while (gf_bs_available(bs)) { u32 type = (u32) gf_mpegh_escaped_value(bs, 3, 8, 8); /*u64 label = */gf_mpegh_escaped_value(bs, 2, 8, 32); u64 mh_size = gf_mpegh_escaped_value(bs, 11, 24, 24); if (mh_size > gf_bs_available(bs)) break; //MHAS config if (type==1) { PL = gf_bs_read_int(bs, 8); if (ch_layout) { u32 idx = gf_bs_read_int(bs, 5); if (idx==0x1f) gf_bs_read_int(bs, 24); /*idx = */gf_bs_read_int(bs, 3); gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 1); //speaker config idx = gf_bs_read_int(bs, 2); if (idx == 0) { *ch_layout = gf_audio_fmt_get_layout_from_cicp( gf_bs_read_int(bs, 6) ); } } break; } gf_bs_skip_bytes(bs, mh_size); } gf_bs_del(bs); return PL; } GF_EXPORT void gf_media_vvc_parse_sei(char *buffer, u32 nal_size, VVCState *vvc) { gf_hevc_vvc_parse_sei(buffer, nal_size, NULL, vvc); } static Bool vvc_parse_nal_header(GF_BitStream *bs, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { u32 val; val = gf_bs_read_int_log(bs, 1, "forbidden_zero"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 1, "resevred0"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 6, "layerID"); if (layer_id) *layer_id = val; val = gf_bs_read_int_log(bs, 5, "nuh_type"); if (nal_unit_type) *nal_unit_type = val; val = gf_bs_read_int_log(bs, 3, "temporalID"); if (!val) return GF_FALSE; val -= 1; if (temporal_id) *temporal_id = val; return GF_TRUE; } static void vvc_profile_tier_level(GF_BitStream *bs, VVC_ProfileTierLevel *ptl, u32 idx) { u32 i; if (ptl->pt_present) { ptl->general_profile_idc = gf_bs_read_int_log_idx(bs, 7, "general_profile_idc", idx); ptl->general_tier_flag = gf_bs_read_int_log_idx(bs, 1, "general_tier_flag", idx); } ptl->general_level_idc = gf_bs_read_int_log_idx(bs, 8, "general_level_idc", idx); ptl->frame_only_constraint = gf_bs_read_int_log_idx(bs, 1, "frame_only_constraint", idx); ptl->multilayer_enabled = gf_bs_read_int_log_idx(bs, 1, "multilayer_enabled", idx); //general constraints info - max size if 1 + 81 + 8 + 255 if (ptl->pt_present) { // general_constraints_info ptl->gci_present = gf_bs_read_int_log_idx(bs, 1, "gci_present", idx); if (ptl->gci_present) { u8 res; ptl->gci[0] = 0x80; ptl->gci[0] |= gf_bs_read_int(bs, 7); //81-7 = 74 bits till reserved gf_bs_read_data(bs, ptl->gci+1, 9); ptl->gci[10] = gf_bs_read_int(bs, 2)<<6; //skip extensions ptl->gci[11] = 0; res = gf_bs_read_int(bs, 8); gf_bs_read_int(bs, res); } gf_bs_align(bs); } for (i=ptl->ptl_max_tid; i>0; i--) { ptl->sub_ptl[i-1].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i); } gf_bs_align(bs); for (i=ptl->ptl_max_tid; i>0; i--) { if (ptl->sub_ptl[i-1].level_present_flag) ptl->sub_ptl[i-1].sublayer_level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i); } if (ptl->pt_present) { ptl->num_sub_profiles = gf_bs_read_int_log_idx(bs, 8, "num_sub_profiles", idx); for (i=0; i<ptl->num_sub_profiles; i++) { ptl->sub_profile_idc[i] = gf_bs_read_int_log_idx2(bs, 32, "sub_profile_idc", idx, i); } } } static s32 gf_media_vvc_read_vps_bs_internal(GF_BitStream *bs, VVCState *vvc, Bool stop_at_vps_ext) { u32 i, j; s32 vps_id; VVC_VPS *vps; Bool vps_default_ptl_dpb_hrd_max_tid_flag=0; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) return -1; if (!vps_id) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] VPS ID 0 is forbidden\n")); return -1; } vps = &vvc->vps[vps_id]; if (!vps->state) { vps->id = vps_id; vps->state = 1; } vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers"); if (vps->max_layers > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS)); return -1; } vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1; if ((vps->max_layers>1) && (vps->max_sub_layers>1)) vps_default_ptl_dpb_hrd_max_tid_flag = gf_bs_read_int_log(bs, 1, "vps_default_ptl_dpb_hrd_max_tid_flag"); if (vps->max_layers>1) vps->all_layers_independent = gf_bs_read_int_log(bs, 1, "all_layers_independent"); for (i=0; i<vps->max_layers; i++) { u32 layer_id = gf_bs_read_int_log_idx(bs, 6, "layer_id", i); if (layer_id>vps->max_layer_id) vps->max_layer_id = layer_id; if (i && !vps->all_layers_independent) { Bool layer_indep = gf_bs_read_int_log_idx(bs, 1, "layer_independent", i); if (!layer_indep) { Bool vps_max_tid_ref_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_max_tid_ref_present_flag", i); for (j=0; j<i; j++) { Bool vps_direct_ref_layer_flag = gf_bs_read_int_log_idx2(bs, 1, "vps_direct_ref_layer_flag", i, j); if (vps_max_tid_ref_present_flag && vps_direct_ref_layer_flag) { gf_bs_read_int_log_idx2(bs, 3, "vps_max_tid_il_ref_pics_plus1", i, j); } } } } } vps->num_ptl = 1; if (vps->max_layers > 1) { if (vps->all_layers_independent) { vps->each_layer_is_ols = gf_bs_read_int_log(bs, 1, "each_layer_is_ols"); } if (!vps->each_layer_is_ols) { u32 vps_ols_mode_idc = 2; if (!vps->all_layers_independent) { vps_ols_mode_idc = gf_bs_read_int_log(bs, 2, "vps_ols_mode_idc"); } if (vps_ols_mode_idc==2) { u8 vps_num_output_layer_sets = 2 + gf_bs_read_int_log(bs, 8, "vps_num_output_layer_sets_minus2"); for (i=0; i<vps_num_output_layer_sets; i++) { for (j=0; j<vps->max_layers; j++) { gf_bs_read_int_log_idx2(bs, 1, "vps_ols_output_layer_flag", i, j); } } } } vps->num_ptl = 1 + gf_bs_read_int_log(bs, 8, "num_ptl_minus1"); } vps->ptl[0].pt_present = 1; for (i=0; i<vps->num_ptl; i++) { if (i) vps->ptl[i].pt_present = gf_bs_read_int_log_idx(bs, 1, "pt_present", i); if (!vps_default_ptl_dpb_hrd_max_tid_flag) vps->ptl[i].ptl_max_tid = gf_bs_read_int_log_idx(bs, 3, "ptl_max_tid", i); else vps->ptl[i].ptl_max_tid = vps->max_sub_layers - 1;; } //align gf_bs_align(bs); for (i=0; i<vps->num_ptl; i++) { vvc_profile_tier_level(bs, &vps->ptl[i], i); } //TODO, parse multilayer stuff return vps_id; } static s32 gf_media_vvc_read_sps_bs_internal(GF_BitStream *bs, VVCState *vvc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id; u32 i, CtbSizeY; VVC_SPS *sps; u8 sps_ptl_dpb_hrd_params_present_flag; if (vui_flag_pos) *vui_flag_pos = 0; sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if ((sps_id<0) || (sps_id >= 16)) { return -1; } vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) { return -1; } if (!vps_id && !vvc->vps[0].state) { vvc->vps[0].state = 1; vvc->vps[0].num_ptl = 1; vvc->vps[0].max_layers = 1; vvc->vps[0].all_layers_independent = 1; } sps = &vvc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->max_sublayers = 1 + gf_bs_read_int_log(bs, 3, "max_sublayers_minus1"); sps->chroma_format_idc = gf_bs_read_int_log(bs, 2, "chroma_format_idc"); sps->log2_ctu_size = 5 + gf_bs_read_int_log(bs, 2, "log2_ctu_size_minus5"); CtbSizeY = 1<<sps->log2_ctu_size; sps_ptl_dpb_hrd_params_present_flag = gf_bs_read_int_log(bs, 1, "sps_ptl_dpb_hrd_params_present_flag"); if (sps_ptl_dpb_hrd_params_present_flag) { VVC_ProfileTierLevel ptl, *p_ptl; if (sps->vps_id) { p_ptl = &ptl; } else { p_ptl = &vvc->vps[0].ptl[0]; } memset(p_ptl, 0, sizeof(VVC_ProfileTierLevel)); p_ptl->pt_present = 1; p_ptl->ptl_max_tid = sps->max_sublayers-1; vvc_profile_tier_level(bs, p_ptl, 0); } sps->gdr_enabled = gf_bs_read_int_log(bs, 1, "gdr_enabled"); sps->ref_pic_resampling = gf_bs_read_int_log(bs, 1, "ref_pic_resampling"); if (sps->ref_pic_resampling) sps->res_change_in_clvs = gf_bs_read_int_log(bs, 1, "res_change_in_clvs"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); sps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_present_flag"); if (sps->conf_window) { sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); } sps->subpic_info_present = gf_bs_read_int_log(bs, 1, "subpic_info_present"); if (sps->subpic_info_present) { sps->nb_subpics = 1 + gf_bs_read_ue_log(bs, "nb_subpics_minus1"); if (sps->nb_subpics>1) { u32 tmpWidthVal, tmpHeightVal; sps->independent_subpic_flags = gf_bs_read_int_log(bs, 1, "independent_subpic_flags"); sps->subpic_same_size = gf_bs_read_int_log(bs, 1, "subpic_same_size"); tmpWidthVal = (sps->width + CtbSizeY-1) / CtbSizeY; tmpWidthVal = gf_get_bit_size(tmpWidthVal); tmpHeightVal = (sps->height + CtbSizeY-1) / CtbSizeY; tmpHeightVal = gf_get_bit_size(tmpHeightVal); for (i=0; i<sps->nb_subpics; i++) { if( !sps->subpic_same_size || !i) { if (i && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_ctu_top_left_x"); if (i && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_ctu_top_left_y"); if ((i+1 < sps->nb_subpics) && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_width_minus1"); if ((i+1 < sps->nb_subpics) && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_height_minus1"); } if (!sps->independent_subpic_flags) { gf_bs_read_int_log(bs, 1, "subpic_treated_as_pic_flag"); gf_bs_read_int_log(bs, 1, "loop_filter_across_subpic_enabled_flag"); } } sps->subpicid_len = gf_bs_read_ue_log(bs, "subpic_id_len_minus1") + 1; sps->subpicid_mapping_explicit = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_explicitly_signalled_flag"); if (sps->subpicid_mapping_explicit) { sps->subpicid_mapping_present = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (sps->subpicid_mapping_present) { for (i=0; i<sps->nb_subpics; i++) { gf_bs_read_ue_log(bs, "subpic_id"); } } } } } sps->bitdepth = gf_bs_read_ue_log(bs, "bitdepth_minus8") + 8; gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); gf_bs_read_int_log(bs, 1, "entry_point_offsets_present_flag"); sps->log2_max_poc_lsb = 4 + gf_bs_read_int_log(bs, 4, "log2_max_poc_lsb_minus4"); if ((sps->poc_msb_cycle_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_flag"))) sps->poc_msb_cycle_len = 1 + gf_bs_read_ue_log(bs, "poc_msb_cycle_len_minus1"); u8 sps_num_extra_ph_bits = 8 * gf_bs_read_int_log(bs, 2, "sps_num_extra_ph_bytes"); for (i=0; i<sps_num_extra_ph_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_ph_bit_present_flag", 1)) sps->ph_num_extra_bits++; } u8 sps_num_extra_sh_bits = 8 * gf_bs_read_int_log(bs, 2, "num_extra_sh_bytes"); for (i=0; i<sps_num_extra_sh_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_sh_bit_present_flag", i)) sps->sh_num_extra_bits++; } if (sps_ptl_dpb_hrd_params_present_flag) { u8 sps_sublayer_dpb_params_flag = 0; if (sps->max_sublayers>1) { sps_sublayer_dpb_params_flag = gf_bs_read_int_log(bs, 1, "sps_sublayer_dpb_params_flag"); } for (i=(sps_sublayer_dpb_params_flag ? 0 : sps->max_sublayers-1); i < sps->max_sublayers; i++ ) { gf_bs_read_ue_log_idx(bs, "dpb_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "dpb_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "dpb_max_latency_increase_plus1", i); } } gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_partition_constraints_override_enabled_flag"); gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); u8 sps_max_mtt_hierarchy_depth_intra_slice_luma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_luma"); if (sps_max_mtt_hierarchy_depth_intra_slice_luma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_luma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_luma"); } u8 sps_qtbtt_dual_tree_intra_flag = 0; if (sps->chroma_format_idc) { sps_qtbtt_dual_tree_intra_flag = gf_bs_read_int_log(bs, 1, "sps_qtbtt_dual_tree_intra_flag"); } if (sps_qtbtt_dual_tree_intra_flag) { gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_intra_slice_chroma"); u8 sps_max_mtt_hierarchy_depth_intra_slice_chroma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_chroma"); if( sps_max_mtt_hierarchy_depth_intra_slice_chroma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_chroma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_chroma"); } } gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_inter_slice"); u8 sps_max_mtt_hierarchy_depth_inter_slice = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_inter_slice"); if (sps_max_mtt_hierarchy_depth_inter_slice != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_inter_slice"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_inter_slice"); } //u8 sps_max_luma_transform_size_64_flag = 0; if (CtbSizeY > 32) { /*sps_max_luma_transform_size_64_flag = */gf_bs_read_int_log(bs, 1, "sps_max_luma_transform_size_64_flag"); } u8 sps_transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_transform_skip_enabled_flag"); if (sps_transform_skip_enabled_flag) { gf_bs_read_ue_log(bs, "sps_log2_transform_skip_max_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_bdpcm_enabled_flag"); } if (gf_bs_read_int_log(bs, 1, "sps_mts_enabled_flag")) { gf_bs_read_int_log(bs, 1, "sps_explicit_mts_intra_enabled_flag"); gf_bs_read_int_log(bs, 1, "sps_explicit_mts_inter_enabled_flag"); } gf_bs_read_int_log(bs, 1, "sps_lfnst_enabled_flag"); if (sps->chroma_format_idc) { u8 sps_joint_cbcr_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_joint_cbcr_enabled_flag"); u8 sps_same_qp_table_for_chroma_flag = gf_bs_read_int_log(bs, 1, "sps_same_qp_table_for_chroma_flag"); u32 numQpTables = sps_same_qp_table_for_chroma_flag ? 1 : (sps_joint_cbcr_enabled_flag ? 3 : 2); for (i=0; i<numQpTables; i++) { gf_bs_read_se_log_idx(bs, "sps_qp_table_start_minus26", i); u32 j, sps_num_points_in_qp_table = 1 + gf_bs_read_ue_log_idx(bs, "sps_num_points_in_qp_table_minus1", i); for (j=0; j<sps_num_points_in_qp_table; j++) { gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_in_val_minus1", i, j); gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_diff_val", i, j); } } } gf_bs_read_int_log(bs, 1, "sps_sao_enabled_flag"); sps->alf_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_alf_enabled_flag"); if (sps->alf_enabled_flag && sps->chroma_format_idc) { gf_bs_read_int_log(bs, 1, "sps_ccalf_enabled_flag"); } /*! TODO parse the rest !*/ return sps_id; } static s32 gf_media_vvc_read_pps_bs_internal(GF_BitStream *bs, VVCState *vvc) { u32 i; s32 pps_id; VVC_PPS *pps; //NAL header already read pps_id = gf_bs_read_int_log(bs, 6, "pps_id"); if ((pps_id < 0) || (pps_id >= 64)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] wrong PPS ID %d in PPS\n", pps_id)); return -1; } pps = &vvc->pps[pps_id]; if (!pps->state) { pps->id = pps_id; pps->state = 1; } pps->sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if (((s32)pps->sps_id<0) || (pps->sps_id >= 16)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] wrong SPS ID %d in PPS\n", pps->sps_id)); pps->sps_id=0; return -1; } vvc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->mixed_nal_types = gf_bs_read_int_log(bs, 1, "mixed_nal_types"); pps->width = gf_bs_read_ue_log(bs, "width"); pps->height = gf_bs_read_ue_log(bs, "height"); pps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_flag"); if (pps->conf_window) { pps->cw_left = gf_bs_read_ue_log(bs, "conf_win_left_offset"); pps->cw_right = gf_bs_read_ue_log(bs, "conf_win_right_offset"); pps->cw_top = gf_bs_read_ue_log(bs, "conf_win_top_offset"); pps->cw_bottom = gf_bs_read_ue_log(bs, "conf_win_bottom_offset"); } //scaling window if (gf_bs_read_int_log(bs, 1, "scaling_window_explicit_signaling_flag")) { gf_bs_read_se_log(bs, "scaling_win_left_offset"); gf_bs_read_se_log(bs, "scaling_win_right_offset"); gf_bs_read_se_log(bs, "scaling_win_top_offset"); gf_bs_read_se_log(bs, "scaling_win_bottom_offset"); } pps->output_flag_present_flag = gf_bs_read_int_log(bs, 1, "output_flag_present_flag"); pps->no_pic_partition_flag = gf_bs_read_int_log(bs, 1, "no_pic_partition_flag"); pps->subpic_id_mapping_present_flag = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (pps->subpic_id_mapping_present_flag) { u32 pps_subpic_id_len, pps_num_subpics=0; if (!pps->no_pic_partition_flag) { pps_num_subpics = 1+gf_bs_read_ue_log(bs, "pps_num_subpics_minus1"); } pps_subpic_id_len = 1 + gf_bs_read_ue(bs); for (i=0; i<pps_num_subpics; i++) { gf_bs_read_int_log_idx(bs, pps_subpic_id_len, "subpic_id", i); } } if (!pps->no_pic_partition_flag) { gf_bs_read_int_log(bs, 2, "pps_log2_ctu_size_minus5"); u32 num_exp_tile_columns = 1 + gf_bs_read_ue_log(bs, "num_exp_tile_columns_minus1"); u32 num_exp_tile_rows = 1 + gf_bs_read_ue_log(bs, "num_exp_tile_rows_minus1"); for (i=0; i<num_exp_tile_columns; i++) gf_bs_read_ue_log_idx(bs, "tile_column_width_minus1", i); for (i=0; i<num_exp_tile_rows; i++) gf_bs_read_ue_log_idx(bs, "tile_row_height_minus1", i); //todo parse the rest return pps_id; } //todo parse the rest return pps_id; } static s32 vvc_parse_picture_header(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { s32 pps_id; si->irap_or_gdr_pic = gf_bs_read_int_log(bs, 1, "irap_or_gdr_pic"); si->non_ref_pic = gf_bs_read_int_log(bs, 1, "non_ref_pic"); if (si->irap_or_gdr_pic) si->gdr_pic = gf_bs_read_int_log(bs, 1, "gdr_pic"); if ((si->inter_slice_allowed_flag = gf_bs_read_int_log(bs, 1, "inter_slice_allowed_flag"))) si->intra_slice_allowed_flag = gf_bs_read_int_log(bs, 1, "intra_slice_allowed_flag"); pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 64)) return -1; si->pps = &vvc->pps[pps_id]; si->sps = &vvc->sps[si->pps->sps_id]; si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); si->recovery_point_valid = 0; si->gdr_recovery_count = 0; if (si->gdr_pic) { si->recovery_point_valid = 1; si->gdr_recovery_count = gf_bs_read_ue_log(bs, "gdr_recovery_count"); } gf_bs_read_int_log(bs, si->sps->ph_num_extra_bits, "ph_extra_bits"); if (si->sps->poc_msb_cycle_flag) { if ( (si->poc_msb_cycle_present_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_present_flag"))) { si->poc_msb_cycle = gf_bs_read_int_log(bs, si->sps->poc_msb_cycle_len, "poc_msb_cycle"); } } return 0; } static s32 vvc_parse_slice(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { // u32 CurrSubpicIdx = 0; si->picture_header_in_slice_header_flag = gf_bs_read_int_log(bs, 1, "picture_header_in_slice_header_flag"); if (si->picture_header_in_slice_header_flag) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[VVC] Picture header in slice header incomplete support, cannot guess slice type\n")); si->slice_type = GF_VVC_SLICE_TYPE_UNKNOWN; return vvc_parse_picture_header(bs, vvc, si); } if (!si->sps) return -1; si->slice_type = GF_VVC_SLICE_TYPE_I; if (gf_bs_read_int_log(bs, 1, "sps_subpic_info_present_flag")) { gf_bs_read_int_log(bs, si->sps->subpicid_len, "subpic_id"); //todo update CurrSubpicIdx } if (si->pps->rect_slice_flag ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[VVC] tiling parsing not supported - patch welcome\n")); return 0; } gf_bs_read_int_log(bs, si->sps->sh_num_extra_bits, "num_extra_bits"); /* if( !pps_rect_slice_flag && NumTilesInPic − sh_slice_address > 1 ) sh_num_tiles_in_slice_minus1 */ if (si->inter_slice_allowed_flag ) si->slice_type = gf_bs_read_int_log(bs, 2, "slice_type"); return 0; } /*this needs further tests !*/ static void vvc_compute_poc(VVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*POC reset for IDR frames, NOT for CRA*/ if (si->irap_or_gdr_pic && !si->gdr_pic) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; } if (si->poc_msb_cycle_present_flag) { si->poc_msb = si->poc_msb_cycle; } else { if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; } si->poc = si->poc_msb + si->poc_lsb; } GF_EXPORT s32 gf_media_vvc_parse_nalu_bs(GF_BitStream *bs, VVCState *vvc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { Bool is_slice = GF_FALSE; s32 ret = -1; VVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); memcpy(&n_state, &vvc->s_info, sizeof(VVCSliceInfo)); if (!vvc_parse_nal_header(bs, nal_unit_type, temporal_id, layer_id)) return -1; n_state.nal_unit_type = *nal_unit_type; switch (n_state.nal_unit_type) { case GF_VVC_NALU_ACCESS_UNIT: case GF_VVC_NALU_END_OF_SEQ: case GF_VVC_NALU_END_OF_STREAM: ret = 1; break; case GF_VVC_NALU_SLICE_TRAIL: case GF_VVC_NALU_SLICE_STSA: case GF_VVC_NALU_SLICE_RADL: case GF_VVC_NALU_SLICE_RASL: case GF_VVC_NALU_SLICE_IDR_W_RADL: case GF_VVC_NALU_SLICE_IDR_N_LP: case GF_VVC_NALU_SLICE_CRA: case GF_VVC_NALU_SLICE_GDR: /* slice - read the info and compare.*/ ret = vvc_parse_slice(bs, vvc, &n_state); if (ret < 0) return ret; ret = 0; if (n_state.picture_header_in_slice_header_flag) { is_slice = GF_TRUE; vvc_compute_poc(&n_state); if (vvc->s_info.poc != n_state.poc) { ret = 1; break; } if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; break; } } break; case GF_VVC_NALU_PIC_HEADER: if (vvc_parse_picture_header(bs, vvc, &n_state)<0) { ret = -1; break; } is_slice = GF_TRUE; vvc_compute_poc(&n_state); if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; } break; case GF_VVC_NALU_SEQ_PARAM: vvc->last_parsed_sps_id = gf_media_vvc_read_sps_bs_internal(bs, vvc, *layer_id, NULL); ret = (vvc->last_parsed_sps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_PIC_PARAM: vvc->last_parsed_pps_id = gf_media_vvc_read_pps_bs_internal(bs, vvc); ret = (vvc->last_parsed_pps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_VID_PARAM: vvc->last_parsed_vps_id = gf_media_vvc_read_vps_bs_internal(bs, vvc, GF_FALSE); ret = (vvc->last_parsed_vps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_DEC_PARAM: ret = 0; break; case GF_VVC_NALU_APS_PREFIX: //we use the mix aps type + aps id (first 8 bits) as unique identifier vvc->last_parsed_aps_id = gf_bs_read_int_log(bs, 8, "aps_id"); ret = 0; break; default: ret = 0; break; } /* save _prev values */ if ((ret>0) && vvc->s_info.sps) { // n_state.frame_num_offset_prev = vvc->s_info.frame_num_offset; // n_state.frame_num_prev = vvc->s_info.frame_num; n_state.poc_lsb_prev = vvc->s_info.poc_lsb; n_state.poc_msb_prev = vvc->s_info.poc_msb; if (is_slice) n_state.prev_layer_id_plus1 = *layer_id + 1; } if (is_slice) vvc_compute_poc(&n_state); memcpy(&vvc->s_info, &n_state, sizeof(VVCSliceInfo)); return ret; } GF_EXPORT s32 gf_media_vvc_parse_nalu(u8 *data, u32 size, VVCState *vvc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { GF_BitStream *bs = NULL; s32 ret; if (!vvc) { if (nal_unit_type) (*nal_unit_type) = data[1] >> 3; if (layer_id) (*layer_id) = data[0] & 0x3f; if (temporal_id) (*temporal_id) = (data[1] & 0x7); return -1; } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); ret = gf_media_vvc_parse_nalu_bs(bs, vvc, nal_unit_type, temporal_id, layer_id); gf_bs_del(bs); return ret; } Bool gf_media_vvc_slice_is_ref(VVCState *vvc) { if (!vvc->s_info.irap_or_gdr_pic) { return GF_FALSE; } if (vvc->s_info.gdr_pic) { if (vvc->s_info.recovery_point_valid) { vvc->s_info.recovery_point_valid = 0; return GF_TRUE; } return GF_FALSE; } return GF_TRUE; }
null
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre, Romain Bouqueau, Cyril Concolato * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / Media Tools sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/media_dev.h> #include <gpac/constants.h> #include <gpac/mpeg4_odf.h> #include <gpac/maths.h> #include <gpac/avparse.h> #ifndef GPAC_DISABLE_OGG #include <gpac/internal/ogg.h> #endif //uncomment/define globally to remove all bitstream parsing logging from code (this will break inspect mode ananlyze=bs) //#define GPAC_DISABLE_AVPARSE_LOGS #ifndef GPAC_DISABLE_AVPARSE_LOGS void gf_bs_log_idx(GF_BitStream *bs, u32 nBits, const char *fname, s64 val, s32 idx1, s32 idx2, s32 idx3); #define gf_bs_log(_bs, _nBits, _fname, _val) gf_bs_log_idx(_bs, _nBits, _fname, _val, -1, -1, -1) u32 gf_bs_read_int_log_idx3(GF_BitStream *bs, u32 nBits, const char *fname, s32 idx1, s32 idx2, s32 idx3) { u32 val = gf_bs_read_int(bs, nBits); gf_bs_log_idx(bs, nBits, fname, val, idx1, idx2, idx3); return val; } #define gf_bs_read_int_log(_bs, _nBits, _fname) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, -1, -1, -1) #define gf_bs_read_int_log_idx(_bs, _nBits, _fname, _idx) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, _idx, -1, -1) #define gf_bs_read_int_log_idx2(_bs, _nBits, _fname, _idx1, _idx2) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, (s32) _idx1, (s32) _idx2, -1) #else #define gf_bs_log(_bs, _nBits, _fname, _val) #define gf_bs_log_idx(_bs, _nBits, _fname, _val, _idx1, _idx2, _idx3) #define gf_bs_read_int_log(_bs, _nbb, _f) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx(_bs, _nbb, _f, _idx) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx2(_bs, _nbb, _f, _idx1, _idx2) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx3(_bs, _nbb, _f, _idx1, _idx2, _idx3) gf_bs_read_int(_bs, _nbb) #endif static const struct { u32 w, h; } std_par[] = { { 4, 3}, {3, 2}, {16, 9}, {5, 3}, {5, 4}, {8, 5}, {2, 1}, {1, 1}, {0, 0}, }; GF_EXPORT void gf_media_reduce_aspect_ratio(u32 *width, u32 *height) { u32 i = 0; u32 w = *width; u32 h = *height; while (std_par[i].w) { if (std_par[i].w * h == std_par[i].h * w) { *width = std_par[i].w; *height = std_par[i].h; return; } i++; } //not standard one, reduce by power of 2 i = 2; while (1) { if (w <= i) return; if (h <= i) return; if (w % i) return; if (h % i) return; *width = w / i; *height = h / i; i *= 2; } } GF_EXPORT void gf_media_get_reduced_frame_rate(u32 *timescale, u32 *sample_dur) { u32 res; if (!*sample_dur) return; res = *timescale / *sample_dur; if (res * (*sample_dur) == *timescale) { *timescale = res; *sample_dur = 1; } else if ((double)(*timescale * 1001 - (res + 1) * *sample_dur * 1000) / ((res + 1) * *sample_dur * 1000) < 0.001) { *timescale = (res + 1) * 1000; *sample_dur = 1001; } } struct __m4v_profile { u32 value; const char *name; } M4VProfiles[] = { {0x00, "Reserved (0x00) Profile"}, {0x01, "Simple Profile @ Level 1"}, {0x02, "Simple Profile @ Level 2"}, {0x03, "Simple Profile @ Level 3"}, {0x08, "Simple Profile @ Level 0"}, {0x10, "Simple Scalable Profile @ Level 0"}, {0x11, "Simple Scalable Profile @ Level 1"}, {0x12, "Simple Scalable Profile @ Level 2"}, {0x21, "Core Profile @ Level 1"}, {0x22, "Core Profile @ Level 2"}, {0x32, "Main Profile @ Level 2"}, {0x33, "Main Profile @ Level 3"}, {0x34, "Main Profile @ Level 4"}, {0x42, "N-bit Profile @ Level 2"}, {0x51, "Scalable Texture Profile @ Level 1"}, {0x61, "Simple Face Animation Profile @ Level 1"}, {0x62, "Simple Face Animation Profile @ Level 2"}, {0x63, "Simple FBA Profile @ Level 1"}, {0x64, "Simple FBA Profile @ Level 2"}, {0x71, "Basic Animated Texture Profile @ Level 1"}, {0x72, "Basic Animated Texture Profile @ Level 2"}, {0x7F, "AVC/H264 Profile"}, {0x81, "Hybrid Profile @ Level 1"}, {0x82, "Hybrid Profile @ Level 2"}, {0x91, "Advanced Real Time Simple Profile @ Level 1"}, {0x92, "Advanced Real Time Simple Profile @ Level 2"}, {0x93, "Advanced Real Time Simple Profile @ Level 3"}, {0x94, "Advanced Real Time Simple Profile @ Level 4"}, {0xA1, "Core Scalable Profile @ Level1"}, {0xA2, "Core Scalable Profile @ Level2"}, {0xA3, "Core Scalable Profile @ Level3"}, {0xB1, "Advanced Coding Efficiency Profile @ Level 1"}, {0xB2, "Advanced Coding Efficiency Profile @ Level 2"}, {0xB3, "Advanced Coding Efficiency Profile @ Level 3"}, {0xB4, "Advanced Coding Efficiency Profile @ Level 4"}, {0xC1, "Advanced Core Profile @ Level 1"}, {0xC2, "Advanced Core Profile @ Level 2"}, {0xD1, "Advanced Scalable Texture @ Level1"}, {0xD2, "Advanced Scalable Texture @ Level2"}, {0xE1, "Simple Studio Profile @ Level 1"}, {0xE2, "Simple Studio Profile @ Level 2"}, {0xE3, "Simple Studio Profile @ Level 3"}, {0xE4, "Simple Studio Profile @ Level 4"}, {0xE5, "Core Studio Profile @ Level 1"}, {0xE6, "Core Studio Profile @ Level 2"}, {0xE7, "Core Studio Profile @ Level 3"}, {0xE8, "Core Studio Profile @ Level 4"}, {0xF0, "Advanced Simple Profile @ Level 0"}, {0xF1, "Advanced Simple Profile @ Level 1"}, {0xF2, "Advanced Simple Profile @ Level 2"}, {0xF3, "Advanced Simple Profile @ Level 3"}, {0xF4, "Advanced Simple Profile @ Level 4"}, {0xF5, "Advanced Simple Profile @ Level 5"}, {0xF7, "Advanced Simple Profile @ Level 3b"}, {0xF8, "Fine Granularity Scalable Profile @ Level 0"}, {0xF9, "Fine Granularity Scalable Profile @ Level 1"}, {0xFA, "Fine Granularity Scalable Profile @ Level 2"}, {0xFB, "Fine Granularity Scalable Profile @ Level 3"}, {0xFC, "Fine Granularity Scalable Profile @ Level 4"}, {0xFD, "Fine Granularity Scalable Profile @ Level 5"}, {0xFE, "Not part of MPEG-4 Visual profiles"}, {0xFF, "No visual capability required"} }; GF_EXPORT const char *gf_m4v_get_profile_name(u8 video_pl) { u32 i, count = GF_ARRAY_LENGTH(M4VProfiles); for (i=0; i<count; i++) { if ((u32)video_pl == M4VProfiles[i].value) return M4VProfiles[i].name; } return "ISO Reserved Profile"; } #ifndef GPAC_DISABLE_AV_PARSERS #define MPEG12_START_CODE_PREFIX 0x000001 #define MPEG12_PICTURE_START_CODE 0x00000100 #define MPEG12_SLICE_MIN_START 0x00000101 #define MPEG12_SLICE_MAX_START 0x000001af #define MPEG12_USER_DATA_START_CODE 0x000001b2 #define MPEG12_SEQUENCE_START_CODE 0x000001b3 #define MPEG12_SEQUENCE_ERR_START_CODE 0x000001b4 #define MPEG12_EXT_START_CODE 0x000001b5 #define MPEG12_SEQUENCE_END_START_CODE 0x000001b7 #define MPEG12_GOP_START_CODE 0x000001b8 s32 gf_mv12_next_start_code(unsigned char *pbuffer, u32 buflen, u32 *optr, u32 *scode) { u32 value; u32 offset; if (buflen < 4) return -1; for (offset = 0; offset < buflen - 3; offset++, pbuffer++) { #ifdef GPAC_BIG_ENDIAN value = *(u32 *)pbuffer >> 8; #else value = (pbuffer[0] << 16) | (pbuffer[1] << 8) | (pbuffer[2] << 0); #endif if (value == MPEG12_START_CODE_PREFIX) { *optr = offset; *scode = (value << 8) | pbuffer[3]; return 0; } } return -1; } s32 gf_mv12_next_slice_start(unsigned char *pbuffer, u32 startoffset, u32 buflen, u32 *slice_offset) { u32 slicestart, code; while (gf_mv12_next_start_code(pbuffer + startoffset, buflen - startoffset, &slicestart, &code) >= 0) { if ((code >= MPEG12_SLICE_MIN_START) && (code <= MPEG12_SLICE_MAX_START)) { *slice_offset = slicestart + startoffset; return 0; } startoffset += slicestart + 4; } return -1; } /* MPEG-4 video (14496-2) */ struct __tag_m4v_parser { GF_BitStream *bs; Bool mpeg12, step_mode; u32 current_object_type; u32 force_next_obj_type; u64 current_object_start; u32 tc_dec, prev_tc_dec, tc_disp, prev_tc_disp; }; GF_EXPORT GF_M4VParser *gf_m4v_parser_new(u8 *data, u64 data_size, Bool mpeg12video) { GF_M4VParser *tmp; if (!data || !data_size) return NULL; GF_SAFEALLOC(tmp, GF_M4VParser); if (!tmp) return NULL; tmp->bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); tmp->mpeg12 = mpeg12video; return tmp; } GF_M4VParser *gf_m4v_parser_bs_new(GF_BitStream *bs, Bool mpeg12video) { GF_M4VParser *tmp; GF_SAFEALLOC(tmp, GF_M4VParser); if (!tmp) return NULL; tmp->bs = bs; tmp->mpeg12 = mpeg12video; return tmp; } GF_EXPORT void gf_m4v_parser_del(GF_M4VParser *m4v) { gf_bs_del(m4v->bs); gf_free(m4v); } GF_EXPORT void gf_m4v_parser_del_no_bs(GF_M4VParser *m4v) { gf_free(m4v); } GF_EXPORT void gf_m4v_parser_set_inspect(GF_M4VParser *m4v) { if (m4v) m4v->step_mode = 1; } GF_EXPORT u32 gf_m4v_parser_get_obj_type(GF_M4VParser *m4v) { if (m4v) return m4v->current_object_type; return 0; } #define M4V_CACHE_SIZE 4096 s32 M4V_LoadObject(GF_M4VParser *m4v) { u32 v, bpos, found; char m4v_cache[M4V_CACHE_SIZE]; u64 end, cache_start, load_size; if (!m4v) return 0; if (m4v->force_next_obj_type) { m4v->current_object_type = m4v->force_next_obj_type - 1; m4v->force_next_obj_type = 0; return (s32)m4v->current_object_type; } bpos = 0; found = 0; load_size = 0; end = 0; cache_start = 0; v = 0xffffffff; while (!end) { /*refill cache*/ if (bpos == (u32)load_size) { if (!gf_bs_available(m4v->bs)) break; load_size = gf_bs_available(m4v->bs); if (load_size > M4V_CACHE_SIZE) load_size = M4V_CACHE_SIZE; bpos = 0; cache_start = gf_bs_get_position(m4v->bs); gf_bs_read_data(m4v->bs, m4v_cache, (u32)load_size); } v = ((v << 8) & 0xFFFFFF00) | ((u8)m4v_cache[bpos]); bpos++; if ((v & 0xFFFFFF00) == 0x00000100) { end = cache_start + bpos - 4; found = 1; break; } } if (!found) return -1; m4v->current_object_start = end; gf_bs_seek(m4v->bs, end + 3); m4v->current_object_type = gf_bs_read_u8(m4v->bs); return (s32)m4v->current_object_type; } GF_EXPORT void gf_m4v_rewrite_pl(u8 **o_data, u32 *o_dataLen, u8 PL) { u32 pos = 0; unsigned char *data = (unsigned char *)*o_data; u32 dataLen = *o_dataLen; while (pos + 4 < dataLen) { if (!data[pos] && !data[pos + 1] && (data[pos + 2] == 0x01) && (data[pos + 3] == M4V_VOS_START_CODE)) { data[pos + 4] = PL; return; } pos++; } /*emulate VOS at beggining*/ (*o_data) = (char *)gf_malloc(sizeof(char)*(dataLen + 5)); (*o_data)[0] = 0; (*o_data)[1] = 0; (*o_data)[2] = 1; (*o_data)[3] = (char)M4V_VOS_START_CODE; (*o_data)[4] = PL; memcpy((*o_data + 5), data, sizeof(char)*dataLen); gf_free(data); (*o_dataLen) = dataLen + 5; } static GF_Err M4V_Reset(GF_M4VParser *m4v, u64 start) { gf_bs_seek(m4v->bs, start); assert(start < (u64)1<<31); m4v->current_object_start = (u32)start; m4v->current_object_type = 0; return GF_OK; } void gf_m4v_parser_reset(GF_M4VParser *m4v, u8 sc_type) { m4v->current_object_start = 0; m4v->current_object_type = 0; m4v->force_next_obj_type = sc_type; } static GF_Err gf_m4v_parse_config_mpeg12(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { unsigned char p[4]; u32 ext_type; s32 o_type; u8 go, par; if (!m4v || !dsi) return GF_BAD_PARAM; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); dsi->VideoPL = 0; go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M2V_SEQ_START_CODE: dsi->RAP_stream = 1; gf_bs_read_data(m4v->bs, (char *)p, 4); dsi->width = (p[0] << 4) | ((p[1] >> 4) & 0xf); dsi->height = ((p[1] & 0xf) << 8) | p[2]; dsi->VideoPL = GF_CODECID_MPEG1; par = (p[3] >> 4) & 0xf; switch (par) { case 2: dsi->par_num = dsi->height / 3; dsi->par_den = dsi->width / 4; break; case 3: dsi->par_num = dsi->height / 9; dsi->par_den = dsi->width / 16; break; case 4: dsi->par_num = dsi->height / 2; dsi->par_den = dsi->width / 21; break; default: dsi->par_den = dsi->par_num = 0; break; } switch (p[3] & 0xf) { case 0: break; case 1: dsi->fps = 24000.0 / 1001.0; break; case 2: dsi->fps = 24.0; break; case 3: dsi->fps = 25.0; break; case 4: dsi->fps = 30000.0 / 1001.0; break; case 5: dsi->fps = 30.0; break; case 6: dsi->fps = 50.0; break; case 7: dsi->fps = ((60.0*1000.0) / 1001.0); break; case 8: dsi->fps = 60.0; break; case 9: dsi->fps = 1; break; case 10: dsi->fps = 5; break; case 11: dsi->fps = 10; break; case 12: dsi->fps = 12; break; case 13: dsi->fps = 15; break; } break; case M2V_EXT_START_CODE: gf_bs_read_data(m4v->bs, (char *)p, 4); ext_type = ((p[0] >> 4) & 0xf); if (ext_type == 1) { dsi->VideoPL = 0x65; dsi->height = ((p[1] & 0x1) << 13) | ((p[2] & 0x80) << 5) | (dsi->height & 0x0fff); dsi->width = (((p[2] >> 5) & 0x3) << 12) | (dsi->width & 0x0fff); } break; case M2V_PIC_START_CODE: if (dsi->width) go = 0; break; default: break; /*EOS*/ case -1: go = 0; m4v->current_object_start = gf_bs_get_position(m4v->bs); break; } } M4V_Reset(m4v, 0); return GF_OK; } static const struct { u32 w, h; } m4v_sar[6] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 } }; static u8 m4v_get_sar_idx(u32 w, u32 h) { u32 i; for (i = 0; i < 6; i++) { if ((m4v_sar[i].w == w) && (m4v_sar[i].h == h)) return i; } return 0xF; } static void gf_m4v_parse_vol(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { u8 verid, par; s32 clock_rate; u8 vpl = dsi->VideoPL; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); dsi->VideoPL = vpl; verid = 0; dsi->RAP_stream = gf_bs_read_int(m4v->bs, 1); dsi->objectType = gf_bs_read_int(m4v->bs, 8); if (gf_bs_read_int(m4v->bs, 1)) { verid = gf_bs_read_int(m4v->bs, 4); gf_bs_read_int(m4v->bs, 3); } par = gf_bs_read_int(m4v->bs, 4); if (par == 0xF) { dsi->par_num = gf_bs_read_int(m4v->bs, 8); dsi->par_den = gf_bs_read_int(m4v->bs, 8); } else if (par<6) { dsi->par_num = m4v_sar[par].w; dsi->par_den = m4v_sar[par].h; } if (gf_bs_read_int(m4v->bs, 1)) { gf_bs_read_int(m4v->bs, 3); if (gf_bs_read_int(m4v->bs, 1)) gf_bs_read_int(m4v->bs, 79); } dsi->has_shape = gf_bs_read_int(m4v->bs, 2); if (dsi->has_shape && (verid!=1) ) gf_bs_read_int(m4v->bs, 4); gf_bs_read_int(m4v->bs, 1); /*clock rate*/ dsi->clock_rate = gf_bs_read_int(m4v->bs, 16); /*marker*/ gf_bs_read_int(m4v->bs, 1); clock_rate = dsi->clock_rate-1; if (clock_rate >= 65536) clock_rate = 65535; if (clock_rate > 0) { for (dsi->NumBitsTimeIncrement = 1; dsi->NumBitsTimeIncrement < 16; dsi->NumBitsTimeIncrement++) { if (clock_rate == 1) break; clock_rate = (clock_rate >> 1); } } else { /*fix from vivien for divX*/ dsi->NumBitsTimeIncrement = 1; } /*fixed FPS stream*/ dsi->time_increment = 0; if (gf_bs_read_int(m4v->bs, 1)) { dsi->time_increment = gf_bs_read_int(m4v->bs, dsi->NumBitsTimeIncrement); } if (!dsi->has_shape) { gf_bs_read_int(m4v->bs, 1); dsi->width = gf_bs_read_int(m4v->bs, 13); gf_bs_read_int(m4v->bs, 1); dsi->height = gf_bs_read_int(m4v->bs, 13); } else { dsi->width = dsi->height = 0; } gf_bs_align(m4v->bs); } static GF_Err gf_m4v_parse_config_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { s32 o_type; u8 go; if (!m4v || !dsi) return GF_BAD_PARAM; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { /*vosh*/ case M4V_VOS_START_CODE: dsi->VideoPL = (u8)gf_bs_read_u8(m4v->bs); break; case M4V_VOL_START_CODE: gf_m4v_parse_vol(m4v, dsi); /*shape will be done later*/ gf_bs_align(m4v->bs); break; case M4V_VOP_START_CODE: case M4V_GOV_START_CODE: go = 0; break; /*EOS*/ case -1: m4v->current_object_start = gf_bs_get_position(m4v->bs); return GF_EOS; /*don't interest us*/ case M4V_UDTA_START_CODE: default: break; } } return GF_OK; } GF_EXPORT GF_Err gf_m4v_parse_config(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { if (m4v->mpeg12) { return gf_m4v_parse_config_mpeg12(m4v, dsi); } else { return gf_m4v_parse_config_mpeg4(m4v, dsi); } } static GF_Err gf_m4v_parse_frame_mpeg12(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, val; s32 o_type; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = GF_FALSE; m4v->current_object_type = (u32)-1; *frame_type = 0; if (!m4v->step_mode) M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M2V_PIC_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; *is_coded = 1; /*val = */gf_bs_read_u8(m4v->bs); val = gf_bs_read_u8(m4v->bs); *frame_type = ((val >> 3) & 0x7) - 1; break; case M2V_GOP_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M2V_SEQ_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) { go = 0; break; } /**/ break; default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } if (m4v->step_mode) return GF_OK; } *size = m4v->current_object_start - *start; return GF_OK; } static GF_Err gf_m4v_parse_frame_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, secs; s32 o_type; u32 vop_inc = 0; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = 0; m4v->current_object_type = (u32)-1; *frame_type = 0; *start = 0; if (!m4v->step_mode) M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M4V_VOP_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; /*coding type*/ *frame_type = gf_bs_read_int(m4v->bs, 2); /*modulo time base*/ secs = 0; while (gf_bs_read_int(m4v->bs, 1) != 0) secs++; /*no support for B frames in parsing*/ secs += (dsi->enh_layer || *frame_type!=2) ? m4v->tc_dec : m4v->tc_disp; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*vop_time_inc*/ if (dsi->NumBitsTimeIncrement) vop_inc = gf_bs_read_int(m4v->bs, dsi->NumBitsTimeIncrement); m4v->prev_tc_dec = m4v->tc_dec; m4v->prev_tc_disp = m4v->tc_disp; if (dsi->enh_layer || *frame_type!=2) { m4v->tc_disp = m4v->tc_dec; m4v->tc_dec = secs; } *time_inc = secs * dsi->clock_rate + vop_inc; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*coded*/ *is_coded = gf_bs_read_int(m4v->bs, 1); gf_bs_align(m4v->bs); break; case M4V_GOV_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M4V_VOL_START_CODE: if (m4v->step_mode) gf_m4v_parse_vol(m4v, dsi); case M4V_VOS_START_CODE: if (hasVOP) { go = 0; } else if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } break; case M4V_VO_START_CODE: default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } if (m4v->step_mode) return GF_OK; } assert(m4v->current_object_start >= *start); *size = m4v->current_object_start - *start; return GF_OK; } GF_EXPORT GF_Err gf_m4v_parse_frame(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { if (m4v->mpeg12) { return gf_m4v_parse_frame_mpeg12(m4v, dsi, frame_type, time_inc, size, start, is_coded); } else { return gf_m4v_parse_frame_mpeg4(m4v, dsi, frame_type, time_inc, size, start, is_coded); } } GF_Err gf_m4v_rewrite_par(u8 **o_data, u32 *o_dataLen, s32 par_n, s32 par_d) { u64 start, end, size; GF_BitStream *mod; GF_M4VParser *m4v; Bool go = 1; m4v = gf_m4v_parser_new(*o_data, *o_dataLen, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); start = 0; while (go) { u32 type = M4V_LoadObject(m4v); end = gf_bs_get_position(m4v->bs) - 4; size = end - start; /*store previous object*/ if (size) { assert (size < (u64)1<<31); gf_bs_write_data(mod, *o_data + start, (u32)size); start = end; } switch (type) { case M4V_VOL_START_CODE: gf_bs_write_int(mod, 0, 8); gf_bs_write_int(mod, 0, 8); gf_bs_write_int(mod, 1, 8); gf_bs_write_int(mod, M4V_VOL_START_CODE, 8); gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 1), 1); gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 8), 8); start = gf_bs_read_int(m4v->bs, 1); gf_bs_write_int(mod, (u32)start, 1); if (start) { gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 7), 7); } start = gf_bs_read_int(m4v->bs, 4); if (start == 0xF) { gf_bs_read_int(m4v->bs, 8); gf_bs_read_int(m4v->bs, 8); } if ((par_n >= 0) && (par_d >= 0)) { u8 par = m4v_get_sar_idx(par_n, par_d); gf_bs_write_int(mod, par, 4); if (par == 0xF) { gf_bs_write_int(mod, par_n, 8); gf_bs_write_int(mod, par_d, 8); } } else { gf_bs_write_int(mod, 0x0, 4); } case -1: go = 0; break; default: break; } } while (gf_bs_bits_available(m4v->bs)) { u32 b = gf_bs_read_int(m4v->bs, 1); gf_bs_write_int(mod, b, 1); } gf_m4v_parser_del(m4v); gf_free(*o_data); gf_bs_get_content(mod, o_data, o_dataLen); gf_bs_del(mod); return GF_OK; } GF_EXPORT u64 gf_m4v_get_object_start(GF_M4VParser *m4v) { return m4v->current_object_start; } #if 0 //unused Bool gf_m4v_is_valid_object_type(GF_M4VParser *m4v) { return ((s32)m4v->current_object_type == -1) ? 0 : 1; } #endif GF_EXPORT GF_Err gf_m4v_get_config(u8 *rawdsi, u32 rawdsi_size, GF_M4VDecSpecInfo *dsi) { GF_Err e; GF_M4VParser *vparse; if (!rawdsi || !rawdsi_size) return GF_NON_COMPLIANT_BITSTREAM; vparse = gf_m4v_parser_new(rawdsi, rawdsi_size, 0); e = gf_m4v_parse_config(vparse, dsi); dsi->next_object_start = (u32)vparse->current_object_start; gf_m4v_parser_del(vparse); return e < 0 ? e : GF_OK; } GF_EXPORT GF_Err gf_mpegv12_get_config(u8 *rawdsi, u32 rawdsi_size, GF_M4VDecSpecInfo *dsi) { GF_Err e; GF_M4VParser *vparse; if (!rawdsi || !rawdsi_size) return GF_NON_COMPLIANT_BITSTREAM; vparse = gf_m4v_parser_new(rawdsi, rawdsi_size, GF_TRUE); e = gf_m4v_parse_config(vparse, dsi); dsi->next_object_start = (u32)vparse->current_object_start; gf_m4v_parser_del(vparse); return e; } #endif /* AAC parser */ struct __m4a_oti { u32 type; const char *name; } M4AObjectTypes[] = { {0, "MPEG-4 Audio Reserved"}, {1, "MPEG-4 Audio AAC Main"}, {2, "MPEG-4 Audio AAC LC"}, {3, "MPEG-4 Audio AAC SSR"}, {4, "MPEG-4 Audio AAC LTP"}, {5, "MPEG-4 Audio SBR"}, {6, "MPEG-4 Audio AAC Scalable"}, {7, "MPEG-4 Audio TwinVQ"}, {8, "MPEG-4 Audio CELP"}, {9, "MPEG-4 Audio HVXC"}, {10, "MPEG-4 Audio Reserved"}, {11, "MPEG-4 Audio Reserved"}, {12, "MPEG-4 Audio TTSI"}, {13, "MPEG-4 Audio Main synthetic"}, {14, "MPEG-4 Audio Wavetable synthesis"}, {15, "MPEG-4 Audio General MIDI"}, {16, "MPEG-4 Audio Algorithmic Synthesis and Audio FX"}, {17, "MPEG-4 Audio ER AAC LC"}, {18, "MPEG-4 Audio Reserved"}, {19, "MPEG-4 Audio ER AAC LTP"}, {20, "MPEG-4 Audio ER AAC scalable"}, {21, "MPEG-4 Audio ER TwinVQ"}, {22, "MPEG-4 Audio ER BSAC"}, {23, "MPEG-4 Audio ER AAC LD"}, {24, "MPEG-4 Audio ER CELP"}, {25, "MPEG-4 Audio ER HVXC"}, {26, "MPEG-4 Audio ER HILN"}, {27, "MPEG-4 Audio ER Parametric"}, {28, "MPEG-4 Audio SSC"}, {29, "MPEG-4 Audio ParametricStereo"}, {30, "MPEG-4 Audio Reserved"}, {31, "MPEG-4 Audio Reserved"}, {32, "MPEG-1 Audio Layer-1"}, {33, "MPEG-1 Audio Layer-2"}, {34, "MPEG-1 Audio Layer-3"}, {35, "MPEG-4 Audio DST"}, {36, "MPEG-4 Audio ALS"}, {37, "MPEG-4 Audio SLS"}, {42, "MPEG Audio xHE-AAC"}, }; GF_EXPORT const char *gf_m4a_object_type_name(u32 objectType) { u32 i, count = GF_ARRAY_LENGTH(M4AObjectTypes); for (i=0; i<count; i++) { if (objectType==M4AObjectTypes[i].type) return M4AObjectTypes[i].name; } return "MPEG-4 Audio Unknown"; } struct __m4a_profile { u32 value; const char *name; } M4AProfiles[] = { {0x00, "ISO Reserved (0x00)"}, {0x01, "Main Audio Profile @ Level 1"}, {0x02, "Main Audio Profile @ Level 2"}, {0x03, "Main Audio Profile @ Level 3"}, {0x04, "Main Audio Profile @ Level 4"}, {0x05, "Scalable Audio Profile @ Level 1"}, {0x06, "Scalable Audio Profile @ Level 2"}, {0x07, "Scalable Audio Profile @ Level 3"}, {0x08, "Scalable Audio Profile @ Level 4"}, {0x09, "Speech Audio Profile @ Level 1"}, {0x0A, "Speech Audio Profile @ Level 2"}, {0x0B, "Synthetic Audio Profile @ Level 1"}, {0x0C, "Synthetic Audio Profile @ Level 2"}, {0x0D, "Synthetic Audio Profile @ Level 3"}, {0x0E, "High Quality Audio Profile @ Level 1"}, {0x0F, "High Quality Audio Profile @ Level 2"}, {0x10, "High Quality Audio Profile @ Level 3"}, {0x11, "High Quality Audio Profile @ Level 4"}, {0x12, "High Quality Audio Profile @ Level 5"}, {0x13, "High Quality Audio Profile @ Level 6"}, {0x14, "High Quality Audio Profile @ Level 7"}, {0x15, "High Quality Audio Profile @ Level 8"}, {0x16, "Low Delay Audio Profile @ Level 1"}, {0x17, "Low Delay Audio Profile @ Level 2"}, {0x18, "Low Delay Audio Profile @ Level 3"}, {0x19, "Low Delay Audio Profile @ Level 4"}, {0x1A, "Low Delay Audio Profile @ Level 5"}, {0x1B, "Low Delay Audio Profile @ Level 6"}, {0x1C, "Low Delay Audio Profile @ Level 7"}, {0x1D, "Low Delay Audio Profile @ Level 8"}, {0x1E, "Natural Audio Profile @ Level 1"}, {0x1F, "Natural Audio Profile @ Level 2"}, {0x20, "Natural Audio Profile @ Level 3"}, {0x21, "Natural Audio Profile @ Level 4"}, {0x22, "Mobile Audio Internetworking Profile @ Level 1"}, {0x23, "Mobile Audio Internetworking Profile @ Level 2"}, {0x24, "Mobile Audio Internetworking Profile @ Level 3"}, {0x25, "Mobile Audio Internetworking Profile @ Level 4"}, {0x26, "Mobile Audio Internetworking Profile @ Level 5"}, {0x27, "Mobile Audio Internetworking Profile @ Level 6"}, {0x28, "AAC Profile @ Level 1"}, {0x29, "AAC Profile @ Level 2"}, {0x2A, "AAC Profile @ Level 4"}, {0x2B, "AAC Profile @ Level 5"}, {0x2C, "High Efficiency AAC Profile @ Level 2"}, {0x2D, "High Efficiency AAC Profile @ Level 3"}, {0x2E, "High Efficiency AAC Profile @ Level 4"}, {0x2F, "High Efficiency AAC Profile @ Level 5"}, {0x30, "High Efficiency AAC v2 Profile @ Level 2"}, {0x31, "High Efficiency AAC v2 Profile @ Level 3"}, {0x32, "High Efficiency AAC v2 Profile @ Level 4"}, {0x33, "High Efficiency AAC v2 Profile @ Level 5"}, {0x34, "Low Delay AAC Profile"}, {0x35, "Baseline MPEG Surround Profile @ Level 1"}, {0x36, "Baseline MPEG Surround Profile @ Level 2"}, {0x37, "Baseline MPEG Surround Profile @ Level 3"}, {0x38, "Baseline MPEG Surround Profile @ Level 4"}, {0x39, "Baseline MPEG Surround Profile @ Level 5"}, {0x3A, "Baseline MPEG Surround Profile @ Level 6"}, {0x3B, "High Definition AAC Profile @ Level 1"}, {0x3C, "ALS Simple Profile @ Level 1"}, {0x50, "AAC Profile @ Level 6"}, {0x51, "AAC Profile @ Level 7"}, {0x52, "High Efficiency AAC Profile @ Level 6"}, {0x53, "High Efficiency AAC Profile @ Level 7"}, {0x54, "High Efficiency AAC v2 Profile @ Level 6"}, {0x55, "High Efficiency AAC v2 Profile @ Level 7"}, {0x56, "Extended High Efficiency AAC Profile @ Level 6"}, {0x57, "Extended High Efficiency AAC Profile @ Level 7"}, {0xFE, "Not part of MPEG-4 audio profiles"}, {0xFF, "No audio capability required"} }; GF_EXPORT const char *gf_m4a_get_profile_name(u8 audio_pl) { u32 i, count = GF_ARRAY_LENGTH(M4AProfiles); for (i=0; i<count; i++) { if ((u32) audio_pl==M4AProfiles[i].value) return M4AProfiles[i].name; } return "ISO Reserved / User Private"; } #ifndef GPAC_DISABLE_AV_PARSERS GF_EXPORT u32 gf_m4a_get_profile(GF_M4ADecSpecInfo *cfg) { switch (cfg->base_object_type) { case 2: /*AAC LC*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x28 : 0x29; /*LC@L1 or LC@L2*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x2A : 0x2B; /*LC@L4 or LC@L5*/ return (cfg->base_sr <= 48000) ? 0x50 : 0x51; /*LC@L4 or LC@L5*/ case 5: /*HE-AAC - SBR*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x2C : 0x2D; /*HE@L2 or HE@L3*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x2E : 0x2F; /*HE@L4 or HE@L5*/ return (cfg->base_sr <= 48000) ? 0x52 : 0x53; /*HE@L6 or HE@L7*/ case 29: /*HE-AACv2 - SBR+PS*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x30 : 0x31; /*HE-AACv2@L2 or HE-AACv2@L3*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x32 : 0x33; /*HE-AACv2@L4 or HE-AACv2@L5*/ return (cfg->base_sr <= 48000) ? 0x54 : 0x55; /*HE-AACv2@L6 or HE-AACv2@L7*/ /*default to HQ*/ default: if (cfg->nb_chan <= 2) return (cfg->base_sr < 24000) ? 0x0E : 0x0F; /*HQ@L1 or HQ@L2*/ return 0x10; /*HQ@L3*/ } } GF_EXPORT GF_Err gf_m4a_parse_program_config_element(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { u32 i; cfg->program_config_element_present = 1; cfg->cpe_channels = 0; cfg->element_instance_tag = gf_bs_read_int_log(bs, 4, "element_instance_tag"); cfg->object_type = gf_bs_read_int_log(bs, 2, "object_type"); cfg->sampling_frequency_index = gf_bs_read_int_log(bs, 4, "sampling_frequency_index"); cfg->num_front_channel_elements = gf_bs_read_int_log(bs, 4, "num_front_channel_elements"); cfg->num_side_channel_elements = gf_bs_read_int_log(bs, 4, "num_side_channel_elements"); cfg->num_back_channel_elements = gf_bs_read_int_log(bs, 4, "num_back_channel_elements"); cfg->num_lfe_channel_elements = gf_bs_read_int_log(bs, 2, "num_lfe_channel_elements"); cfg->num_assoc_data_elements = gf_bs_read_int_log(bs, 3, "num_assoc_data_elements"); cfg->num_valid_cc_elements = gf_bs_read_int_log(bs, 4, "num_valid_cc_elements"); cfg->mono_mixdown_present = (Bool)gf_bs_read_int_log(bs, 1, "mono_mixdown_present"); if (cfg->mono_mixdown_present) { cfg->mono_mixdown_element_number = gf_bs_read_int_log(bs, 4, "mono_mixdown_element_number"); } cfg->stereo_mixdown_present = gf_bs_read_int_log(bs, 1, "stereo_mixdown_present"); if (cfg->stereo_mixdown_present) { cfg->stereo_mixdown_element_number = gf_bs_read_int_log(bs, 4, "stereo_mixdown_element_number"); } cfg->matrix_mixdown_idx_present = gf_bs_read_int_log(bs, 1, "matrix_mixdown_idx_present"); if (cfg->matrix_mixdown_idx_present) { cfg->matrix_mixdown_idx = gf_bs_read_int_log(bs, 2, "matrix_mixdown_idx"); cfg->pseudo_surround_enable = gf_bs_read_int_log(bs, 1, "pseudo_surround_enable"); } for (i = 0; i < cfg->num_front_channel_elements; i++) { cfg->front_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "front_element_is_cpe", i); cfg->front_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "front_element_tag_select", i); if (cfg->front_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_side_channel_elements; i++) { cfg->side_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "side_element_is_cpe", i); cfg->side_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "side_element_tag_select", i); if (cfg->side_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_back_channel_elements; i++) { cfg->back_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "back_element_is_cpe", i); cfg->back_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "back_element_tag_select", i); if (cfg->back_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_lfe_channel_elements; i++) { cfg->lfe_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "lfe_element_tag_select", i); } for (i = 0; i < cfg->num_assoc_data_elements; i++) { cfg->assoc_data_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "assoc_data_element_tag_select", i); } for (i = 0; i < cfg->num_valid_cc_elements; i++) { cfg->cc_element_is_ind_sw[i] = gf_bs_read_int_log_idx(bs, 1, "cc_element_is_ind_sw", i); cfg->valid_cc_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "valid_cc_element_tag_select", i); } gf_bs_align(bs); cfg->comment_field_bytes = gf_bs_read_int_log(bs, 8, "comment_field_bytes"); gf_bs_read_data(bs, (char *)cfg->comments, cfg->comment_field_bytes); cfg->nb_chan = cfg->num_front_channel_elements + cfg->num_back_channel_elements + cfg->num_side_channel_elements + cfg->num_lfe_channel_elements; cfg->nb_chan += cfg->cpe_channels; return GF_OK; } GF_EXPORT GF_Err gf_m4a_parse_config(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg, Bool size_known) { u32 audio_obj_type; memset(cfg, 0, sizeof(GF_M4ADecSpecInfo)); cfg->base_object_type = gf_bs_read_int_log(bs, 5, "base_object_type"); /*extended object type*/ if (cfg->base_object_type == 31) { cfg->base_object_type = 32 + gf_bs_read_int_log(bs, 6, "extended_base_object_type"); } cfg->base_sr_index = gf_bs_read_int_log(bs, 4, "base_samplerate_index"); if (cfg->base_sr_index == 0x0F) { cfg->base_sr = gf_bs_read_int_log(bs, 24, "base_samplerate"); } else { cfg->base_sr = GF_M4ASampleRates[cfg->base_sr_index]; } cfg->chan_cfg = gf_bs_read_int_log(bs, 4, "channel_configuration"); if (cfg->chan_cfg) { cfg->nb_chan = GF_M4ANumChannels[cfg->chan_cfg - 1]; } audio_obj_type = cfg->base_object_type; if (cfg->base_object_type == 5 || cfg->base_object_type == 29) { if (cfg->base_object_type == 29) { cfg->has_ps = 1; cfg->nb_chan = 1; } cfg->has_sbr = GF_TRUE; cfg->sbr_sr_index = gf_bs_read_int_log(bs, 4, "sbr_samplerate_index"); if (cfg->sbr_sr_index == 0x0F) { cfg->sbr_sr = gf_bs_read_int_log(bs, 24, "sbr_samplerate"); } else { cfg->sbr_sr = GF_M4ASampleRates[cfg->sbr_sr_index]; } cfg->sbr_object_type = gf_bs_read_int_log(bs, 5, "sbr_object_type"); if (cfg->sbr_object_type==31) cfg->sbr_object_type = 32 + gf_bs_read_int_log(bs, 6, "audioObjectTypeExt"); audio_obj_type = cfg->sbr_object_type; if (cfg->sbr_object_type==22) { /*ext_chan_cfg = */gf_bs_read_int_log(bs, 4, "channel_configuration"); } } /*object cfg*/ switch (audio_obj_type) { case 1: case 2: case 3: case 4: case 6: case 7: case 17: case 19: case 20: case 21: case 22: case 23: case 42: { Bool ext_flag; gf_bs_read_int_log(bs, 1, "frame_length_flag"); if (gf_bs_read_int_log(bs, 1, "depends_on_core_coder")) gf_bs_read_int_log(bs, 14, "delay"); ext_flag = gf_bs_read_int_log(bs, 1, "extension_flag"); if (!cfg->chan_cfg) { gf_m4a_parse_program_config_element(bs, cfg); } if ((cfg->base_object_type == 6) || (cfg->base_object_type == 20)) { gf_bs_read_int_log(bs, 3, "layerN"); } if (ext_flag) { if (cfg->base_object_type == 22) { gf_bs_read_int_log(bs, 5, "numOfSubFrame"); gf_bs_read_int_log(bs, 11, "layer_length"); } if ((cfg->base_object_type == 17) || (cfg->base_object_type == 19) || (cfg->base_object_type == 20) || (cfg->base_object_type == 23) ) { gf_bs_read_int_log(bs, 1, "aacSectionDataResilienceFlag"); gf_bs_read_int_log(bs, 1, "aacScalefactorDataResilienceFlag"); gf_bs_read_int_log(bs, 1, "aacSpectralDataResilienceFlag"); } gf_bs_read_int_log(bs, 1, "extensionFlag3"); } } break; } /*ER cfg*/ switch (audio_obj_type) { case 17: case 19: case 20: case 21: case 22: case 23: case 24: case 25: case 26: case 27: { u32 epConfig = gf_bs_read_int_log(bs, 2, "epConfig"); if ((epConfig == 2) || (epConfig == 3)) { } if (epConfig == 3) { gf_bs_read_int_log(bs, 1, "directMapping"); } } break; } if (size_known && (cfg->base_object_type != 5) && (cfg->base_object_type != 29)) { while (gf_bs_available(bs) >= 2) { u32 sync = gf_bs_peek_bits(bs, 11, 0); if (sync == 0x2b7) { gf_bs_read_int_log(bs, 11, "syncExtensionType"); cfg->sbr_object_type = gf_bs_read_int_log(bs, 5, "extensionAudioObjectType "); cfg->has_sbr = gf_bs_read_int_log(bs, 1, "sbrPresentFlag"); if (cfg->has_sbr) { cfg->sbr_sr_index = gf_bs_read_int_log(bs, 4, "extensionSamplingFrequencyIndex"); if (cfg->sbr_sr_index == 0x0F) { cfg->sbr_sr = gf_bs_read_int_log(bs, 24, "extensionSamplingFrequency"); } else { cfg->sbr_sr = GF_M4ASampleRates[cfg->sbr_sr_index]; } } } else if (sync == 0x548) { gf_bs_read_int_log(bs, 11, "syncExtensionType"); cfg->has_ps = gf_bs_read_int_log(bs, 1, "hasParametricStereo"); if (cfg->has_ps) cfg->nb_chan = 1; } else { break; } } } cfg->audioPL = gf_m4a_get_profile(cfg); return GF_OK; } GF_EXPORT GF_Err gf_m4a_get_config(u8 *dsi, u32 dsi_size, GF_M4ADecSpecInfo *cfg) { GF_BitStream *bs; if (!dsi || !dsi_size || (dsi_size < 2)) return GF_NON_COMPLIANT_BITSTREAM; bs = gf_bs_new(dsi, dsi_size, GF_BITSTREAM_READ); gf_m4a_parse_config(bs, cfg, GF_TRUE); gf_bs_del(bs); return GF_OK; } u32 gf_latm_get_value(GF_BitStream *bs) { u32 i, tmp, value = 0; u32 bytesForValue = gf_bs_read_int(bs, 2); for (i = 0; i <= bytesForValue; i++) { value <<= 8; tmp = gf_bs_read_int(bs, 8); value += tmp; } return value; } GF_EXPORT u32 gf_m4a_get_channel_cfg(u32 nb_chan) { u32 i, count = sizeof(GF_M4ANumChannels) / sizeof(u32); for (i = 0; i < count; i++) { if (GF_M4ANumChannels[i] == nb_chan) return i + 1; } return 0; } GF_EXPORT GF_Err gf_m4a_write_program_config_element_bs(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { u32 i; gf_bs_write_int(bs, cfg->element_instance_tag, 4); gf_bs_write_int(bs, cfg->object_type, 2); gf_bs_write_int(bs, cfg->sampling_frequency_index, 4); gf_bs_write_int(bs, cfg->num_front_channel_elements, 4); gf_bs_write_int(bs, cfg->num_side_channel_elements, 4); gf_bs_write_int(bs, cfg->num_back_channel_elements, 4); gf_bs_write_int(bs, cfg->num_lfe_channel_elements, 2); gf_bs_write_int(bs, cfg->num_assoc_data_elements, 3); gf_bs_write_int(bs, cfg->num_valid_cc_elements, 4); gf_bs_write_int(bs, cfg->mono_mixdown_present, 1); if (cfg->mono_mixdown_present) { gf_bs_write_int(bs, cfg->mono_mixdown_element_number, 4); } gf_bs_write_int(bs, cfg->stereo_mixdown_present, 1); if (cfg->stereo_mixdown_present) { gf_bs_write_int(bs, cfg->stereo_mixdown_element_number, 4); } gf_bs_write_int(bs, cfg->matrix_mixdown_idx_present, 1); if (cfg->matrix_mixdown_idx_present) { gf_bs_write_int(bs, cfg->matrix_mixdown_idx, 2); gf_bs_write_int(bs, cfg->pseudo_surround_enable, 1); } for (i = 0; i < cfg->num_front_channel_elements; i++) { gf_bs_write_int(bs, cfg->front_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->front_element_tag_select[i], 4); } for (i = 0; i < cfg->num_side_channel_elements; i++) { gf_bs_write_int(bs, cfg->side_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->side_element_tag_select[i], 4); } for (i = 0; i < cfg->num_back_channel_elements; i++) { gf_bs_write_int(bs, cfg->back_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->back_element_tag_select[i], 4); } for (i = 0; i < cfg->num_lfe_channel_elements; i++) { gf_bs_write_int(bs, cfg->lfe_element_tag_select[i], 4); } for (i = 0; i < cfg->num_assoc_data_elements; i++) { gf_bs_write_int(bs, cfg->assoc_data_element_tag_select[i], 4); } for (i = 0; i < cfg->num_valid_cc_elements; i++) { gf_bs_write_int(bs, cfg->cc_element_is_ind_sw[i], 1); gf_bs_write_int(bs, cfg->valid_cc_element_tag_select[i], 4); } gf_bs_align(bs); gf_bs_write_int(bs, cfg->comment_field_bytes, 8); gf_bs_write_data(bs, (char *)cfg->comments, cfg->comment_field_bytes); return GF_OK; } GF_EXPORT GF_Err gf_m4a_write_config_bs(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { if (!cfg->base_sr_index) { if (!cfg->base_sr) return GF_BAD_PARAM; while (GF_M4ASampleRates[cfg->base_sr_index]) { if (GF_M4ASampleRates[cfg->base_sr_index] == cfg->base_sr) break; cfg->base_sr_index++; } } if (cfg->sbr_sr && !cfg->sbr_sr_index) { while (GF_M4ASampleRates[cfg->sbr_sr_index]) { if (GF_M4ASampleRates[cfg->sbr_sr_index] == cfg->sbr_sr) break; cfg->sbr_sr_index++; } } /*extended object type*/ if (cfg->base_object_type >= 32) { gf_bs_write_int(bs, 31, 5); gf_bs_write_int(bs, cfg->base_object_type - 32, 6); } else { gf_bs_write_int(bs, cfg->base_object_type, 5); } gf_bs_write_int(bs, cfg->base_sr_index, 4); if (cfg->base_sr_index == 0x0F) { gf_bs_write_int(bs, cfg->base_sr, 24); } if (cfg->program_config_element_present) { gf_bs_write_int(bs, 0, 4); } else { cfg->chan_cfg = gf_m4a_get_channel_cfg(cfg->nb_chan); if (!cfg->chan_cfg) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AAC] Cannot write decoder config, ProgramConfigElement is missing and channel configuration is not a predefined one !\n")); return GF_BAD_PARAM; } gf_bs_write_int(bs, cfg->chan_cfg, 4); } if (cfg->base_object_type == 5 || cfg->base_object_type == 29) { if (cfg->base_object_type == 29) { cfg->has_ps = 1; cfg->nb_chan = 1; } cfg->has_sbr = 1; gf_bs_write_int(bs, cfg->sbr_sr_index, 4); if (cfg->sbr_sr_index == 0x0F) { gf_bs_write_int(bs, cfg->sbr_sr, 24); } gf_bs_write_int(bs, cfg->sbr_object_type, 5); } /*object cfg*/ switch (cfg->base_object_type) { case 1: case 2: case 3: case 4: case 6: case 7: case 17: case 19: case 20: case 21: case 22: case 23: case 42: { /*frame length flag*/ gf_bs_write_int(bs, 0, 1); /*depends on core coder*/ gf_bs_write_int(bs, 0, 1); /*ext flag*/ gf_bs_write_int(bs, 0, 1); if (cfg->program_config_element_present) { gf_m4a_write_program_config_element_bs(bs, cfg); } if ((cfg->base_object_type == 6) || (cfg->base_object_type == 20)) { gf_bs_write_int(bs, 0, 3); } } break; } /*ER cfg - not supported*/ /*implicit sbr/ps signaling not written here, cf reframe_adts*/ return GF_OK; } GF_EXPORT GF_Err gf_m4a_write_config(GF_M4ADecSpecInfo *cfg, u8 **dsi, u32 *dsi_size) { GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_m4a_write_config_bs(bs, cfg); gf_bs_get_content(bs, dsi, dsi_size); gf_bs_del(bs); return GF_OK; } /*AV1 parsing*/ static u32 av1_read_ns(GF_BitStream *bs, u32 n, const char *fname) { u32 v, res; Bool extra_bit; int w = (u32)(log(n) / log(2)) + 1; u32 m = (1 << w) - n; assert(w < 32); v = gf_bs_read_int(bs, w - 1); if (v < m) { if (fname) { gf_bs_log(bs, w-1, fname, v); } return v; } extra_bit = gf_bs_read_int(bs, 1); res = (v << 1) - m + extra_bit; if (fname) { gf_bs_log(bs, w, fname, res); } return res; } static void av1_color_config(GF_BitStream *bs, AV1State *state) { state->config->high_bitdepth = gf_bs_read_int_log(bs, 1, "high_bitdepth"); state->bit_depth = 8; if (state->config->seq_profile == 2 && state->config->high_bitdepth) { state->config->twelve_bit = gf_bs_read_int_log(bs, 1, "twelve_bit"); state->bit_depth = state->config->twelve_bit ? 12 : 10; } else if (state->config->seq_profile <= 2) { state->bit_depth = state->config->high_bitdepth ? 10 : 8; } state->config->monochrome = GF_FALSE; if (state->config->seq_profile == 1) { state->config->monochrome = GF_FALSE; } else { state->config->monochrome = gf_bs_read_int_log(bs, 1, "monochrome"); } /*NumPlanes = mono_chrome ? 1 : 3;*/ state->color_description_present_flag = gf_bs_read_int_log(bs, 1, "color_description_present_flag"); if (state->color_description_present_flag) { state->color_primaries = gf_bs_read_int_log(bs, 8, "color_primaries"); state->transfer_characteristics = gf_bs_read_int_log(bs, 8, "transfer_characteristics"); state->matrix_coefficients = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } else { state->color_primaries = 2/*CP_UNSPECIFIED*/; state->transfer_characteristics = 2/*TC_UNSPECIFIED*/; state->matrix_coefficients = 2/*MC_UNSPECIFIED*/; } if (state->config->monochrome) { state->color_range = gf_bs_read_int_log(bs, 1, "color_range"); state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_TRUE; state->config->chroma_sample_position = 0/*CSP_UNKNOWN*/; state->separate_uv_delta_q = 0; return; } else if (state->color_primaries == 0/*CP_BT_709*/ && state->transfer_characteristics == 13/*TC_SRGB*/ && state->matrix_coefficients == 0/*MC_IDENTITY*/) { state->color_range = GF_TRUE; state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; } else { state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; state->color_range = gf_bs_read_int_log(bs, 1, "color_range"); if (state->config->seq_profile == 0) { state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_TRUE; } else if (state->config->seq_profile == 1) { state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; } else { if (state->bit_depth == 12) { state->config->chroma_subsampling_x = gf_bs_read_int_log(bs, 1, "chroma_subsampling_x"); if (state->config->chroma_subsampling_x) state->config->chroma_subsampling_y = gf_bs_read_int_log(bs, 1, "chroma_subsampling_y"); else state->config->chroma_subsampling_y = GF_FALSE; } else { state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_FALSE; } } if (state->config->chroma_subsampling_x && state->config->chroma_subsampling_y) { state->config->chroma_sample_position = gf_bs_read_int_log(bs, 2, "chroma_sample_position"); } } state->separate_uv_delta_q = gf_bs_read_int_log(bs, 1, "separate_uv_delta_q"); } static u32 av1_uvlc(GF_BitStream *bs, const char *fname) { u32 res; u8 leadingZeros = 0; while (1) { Bool done = gf_bs_read_int(bs, 1); if (done) break; leadingZeros++; } if (leadingZeros >= 32) { return 0xFFFFFFFF; } res = gf_bs_read_int(bs, leadingZeros) + (1 << leadingZeros) - 1; gf_bs_log(bs, 2*leadingZeros, fname, res); return res; } static void timing_info(GF_BitStream *bs, AV1State *state) { u32 time_scale = 0; u32 num_units_in_display_tick = gf_bs_read_int_log(bs, 32, "num_units_in_display_tick"); if (num_units_in_display_tick == 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] num_units_in_display_tick must be greater than 0.\n")); } time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); if (time_scale == 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] time_scale must be greater than 0.\n")); } state->equal_picture_interval = gf_bs_read_int_log(bs, 1, "equal_picture_interval"); if (state->equal_picture_interval) { u32 num_ticks_per_picture_minus_1 = av1_uvlc(bs, "num_ticks_per_picture_minus_1"); state->tb_num = time_scale; state->tb_den = (num_ticks_per_picture_minus_1 + 1)*num_units_in_display_tick; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] VFR not supported.\n")); //TODO: upload num_units_in_display_tick (eq. to the POC in H264), compute delta between frames, set it as dts_inc in gf_import_aom_av1() } } static void decoder_model_info(AV1State *state, GF_BitStream *bs) { state->buffer_delay_length = 1 + gf_bs_read_int_log(bs, 5, "buffer_delay_length_minus1"); gf_bs_read_int_log(bs, 32, "num_units_in_decoding_tick"); state->buffer_removal_time_length = gf_bs_read_int_log(bs, 5, "buffer_removal_time_length"); state->frame_presentation_time_length = 1 + gf_bs_read_int_log(bs, 5, "frame_presentation_time_length_minus1"); } static void operating_parameters_info(GF_BitStream *bs, const u8 idx, const u8 buffer_delay_length_minus_1) { const u8 n = buffer_delay_length_minus_1 + 1; gf_bs_read_int_log(bs, n, "decoder_buffer_delay"); gf_bs_read_int_log(bs, n, "encoder_buffer_delay"); gf_bs_read_int_log(bs, 1, "low_delay_mode_flag"); } static void av1_parse_sequence_header_obu(GF_BitStream *bs, AV1State *state) { u8 buffer_delay_length_minus_1 = 0; state->frame_state.seen_seq_header = GF_TRUE; state->config->seq_profile = gf_bs_read_int_log(bs, 3, "seq_profile"); state->still_picture = gf_bs_read_int_log(bs, 1, "still_picture"); state->reduced_still_picture_header = gf_bs_read_int_log(bs, 1, "reduced_still_picture_header"); if (state->reduced_still_picture_header) { //timing_info_present_flag = GF_FALSE; //initial_display_delay_present_flag = GF_FALSE; state->operating_points_count = 1; state->config->seq_level_idx_0 = gf_bs_read_int_log(bs, 5, "seq_level_idx_0"); } else { u8 i = 0; Bool initial_display_delay_present_flag; Bool timing_info_present_flag = gf_bs_read_int_log(bs, 1, "timing_info_present_flag"); if (timing_info_present_flag) { timing_info(bs, state); state->decoder_model_info_present_flag = gf_bs_read_int_log(bs, 1, "decoder_model_info_present_flag"); if (state->decoder_model_info_present_flag) { decoder_model_info(state, bs); } } else { state->decoder_model_info_present_flag = GF_FALSE; } initial_display_delay_present_flag = gf_bs_read_int_log(bs, 1, "initial_display_delay_present_flag"); state->operating_points_count = 1 + gf_bs_read_int_log(bs, 5, "operating_points_count_minus1"); for (i = 0; i < state->operating_points_count; i++) { u8 seq_level_idx_i, seq_tier = 0; state->operating_point_idc[i] = gf_bs_read_int_log_idx(bs, 12, "operating_point_idc", i); seq_level_idx_i = gf_bs_read_int_log_idx(bs, 5, "seq_level_idx", i); if (i == 0) state->config->seq_level_idx_0 = seq_level_idx_i; if (seq_level_idx_i > 7) { seq_tier = gf_bs_read_int_log_idx(bs, 1, "seq_tier", i); } if (i == 0) state->config->seq_tier_0 = seq_tier; if (state->decoder_model_info_present_flag) { state->decoder_model_present_for_this_op[i] = gf_bs_read_int_log_idx(bs, 1, "decoder_model_present_for_this_op", i); if (state->decoder_model_present_for_this_op[i]) { operating_parameters_info(bs, i, buffer_delay_length_minus_1); } } else { state->decoder_model_present_for_this_op[i] = 0; } if (initial_display_delay_present_flag) { if (gf_bs_read_int_log_idx(bs, 1, "initial_display_delay_present_for_this_op", i) ) { gf_bs_read_int_log_idx(bs, 4, "initial_display_delay_minus1", i); } } } } //operatingPoint = av1_choose_operating_point(bs); state->OperatingPointIdc = 0;//TODO: operating_point_idc[operatingPoint]; state->frame_width_bits_minus_1 = gf_bs_read_int_log(bs, 4, "frame_width_bits_minus1"); state->frame_height_bits_minus_1 = gf_bs_read_int_log(bs, 4, "frame_height_bits_minus1"); state->width = gf_bs_read_int_log(bs, state->frame_width_bits_minus_1 + 1, "width_minus1") + 1; state->height = gf_bs_read_int_log(bs, state->frame_height_bits_minus_1 + 1, "height_minus1") + 1; state->sequence_width = state->width; state->sequence_height = state->height; state->frame_id_numbers_present_flag = GF_FALSE; if (!state->reduced_still_picture_header) { state->frame_id_numbers_present_flag = gf_bs_read_int_log(bs, 1, "frame_id_numbers_present_flag"); } if (state->frame_id_numbers_present_flag) { state->delta_frame_id_length_minus_2 = gf_bs_read_int_log(bs, 4, "delta_frame_id_length_minus2"); state->additional_frame_id_length_minus_1 = gf_bs_read_int_log(bs, 3, "additional_frame_id_length_minus1"); } state->use_128x128_superblock = gf_bs_read_int_log(bs, 1, "use_128x128_superblock"); gf_bs_read_int_log(bs, 1, "enable_filter_intra"); gf_bs_read_int_log(bs, 1, "enable_intra_edge_filter"); if (state->reduced_still_picture_header) { /*enable_interintra_compound = 0; enable_masked_compound = 0; enable_dual_filter = 0; enable_jnt_comp = 0; enable_ref_frame_mvs = 0;*/ state->enable_warped_motion = 0; state->enable_order_hint = GF_FALSE; state->OrderHintBits = 0; state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; state->seq_force_screen_content_tools = 2/*SELECT_SCREEN_CONTENT_TOOLS*/; } else { Bool seq_choose_screen_content_tools; gf_bs_read_int_log(bs, 1, "enable_interintra_compound"); gf_bs_read_int_log(bs, 1, "enable_masked_compound"); state->enable_warped_motion = gf_bs_read_int_log(bs, 1, "enable_warped_motion"); gf_bs_read_int_log(bs, 1, "enable_dual_filter"); state->enable_order_hint = gf_bs_read_int_log(bs, 1, "enable_order_hint"); if (state->enable_order_hint) { gf_bs_read_int_log(bs, 1, "enable_jnt_comp"); state->enable_ref_frame_mvs = gf_bs_read_int_log(bs, 1, "enable_ref_frame_mvs"); } else { /*enable_jnt_comp = 0*/; /*enable_ref_frame_mvs = 0*/; } seq_choose_screen_content_tools = gf_bs_read_int_log(bs, 1, "seq_choose_screen_content_tools"); state->seq_force_screen_content_tools = 0; if (seq_choose_screen_content_tools) { state->seq_force_screen_content_tools = 2/*SELECT_SCREEN_CONTENT_TOOLS*/; } else { state->seq_force_screen_content_tools = gf_bs_read_int_log(bs, 1, "seq_force_screen_content_tools"); } state->seq_force_integer_mv = 0; if (state->seq_force_screen_content_tools > 0) { const Bool seq_choose_integer_mv = gf_bs_read_int_log(bs, 1, "seq_choose_integer_mv"); if (seq_choose_integer_mv) { state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; } else { state->seq_force_integer_mv = gf_bs_read_int_log(bs, 1, "seq_force_integer_mv"); } } else { state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; } if (state->enable_order_hint) { u8 order_hint_bits_minus_1 = gf_bs_read_int_log(bs, 3, "order_hint_bits_minus1"); state->OrderHintBits = order_hint_bits_minus_1 + 1; } else { state->OrderHintBits = 0; } } state->enable_superres = gf_bs_read_int_log(bs, 1, "enable_superres"); state->enable_cdef = gf_bs_read_int_log(bs, 1, "enable_cdef"); state->enable_restoration = gf_bs_read_int_log(bs, 1, "enable_restoration"); av1_color_config(bs, state); state->film_grain_params_present = gf_bs_read_int_log(bs, 1, "film_grain_params_present"); } #define IVF_FILE_HEADER_SIZE 32 Bool gf_media_probe_ivf(GF_BitStream *bs) { u32 dw = 0; if (gf_bs_available(bs) < IVF_FILE_HEADER_SIZE) return GF_FALSE; dw = gf_bs_peek_bits(bs, 32, 0); if (dw != GF_4CC('D', 'K', 'I', 'F')) { return GF_FALSE; } return GF_TRUE; } GF_Err gf_media_parse_ivf_file_header(GF_BitStream *bs, u32 *width, u32 *height, u32 *codec_fourcc, u32 *timebase_num, u32 *timebase_den, u32 *num_frames) { u32 dw = 0; if (!width || !height || !codec_fourcc || !timebase_den || !timebase_num || !num_frames) { assert(0); return GF_BAD_PARAM; } if (gf_bs_available(bs) < IVF_FILE_HEADER_SIZE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Not enough bytes available ("LLU").\n", gf_bs_available(bs))); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u32(bs); if (dw != GF_4CC('D', 'K', 'I', 'F')) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[IVF] Invalid signature\n")); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u16_le(bs); if (dw != 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong IVF version. 0 expected, got %u\n", dw)); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u16_le(bs); //length of header in bytes if (dw != IVF_FILE_HEADER_SIZE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong IVF header length. Expected 32 bytes, got %u\n", dw)); return GF_NON_COMPLIANT_BITSTREAM; } *codec_fourcc = gf_bs_read_u32(bs); *width = gf_bs_read_u16_le(bs); *height = gf_bs_read_u16_le(bs); *timebase_num = gf_bs_read_u32_le(bs); *timebase_den = gf_bs_read_u32_le(bs); *num_frames = gf_bs_read_u32_le(bs); gf_bs_read_u32_le(bs); //skip unused return GF_OK; } GF_Err gf_media_parse_ivf_frame_header(GF_BitStream *bs, u64 *frame_size, u64 *pts) { if (!frame_size) return GF_BAD_PARAM; if (gf_bs_available(bs) < 12) return GF_BUFFER_TOO_SMALL; *frame_size = gf_bs_read_u32_le(bs); if (*frame_size > 256 * 1024 * 1024) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong frame size %u\n", *frame_size)); *frame_size = 0; return GF_NON_COMPLIANT_BITSTREAM; } *pts = gf_bs_read_u64_le(bs); return GF_OK; } GF_Err gf_media_vp9_parse_superframe(GF_BitStream *bs, u64 ivf_frame_size, u32 *num_frames_in_superframe, u32 frame_sizes[VP9_MAX_FRAMES_IN_SUPERFRAME], u32 *superframe_index_size) { u32 byte, bytes_per_framesize; u64 pos = gf_bs_get_position(bs), i = 0; GF_Err e; assert(bs && num_frames_in_superframe); /*initialize like there is no superframe*/ memset(frame_sizes, 0, VP9_MAX_FRAMES_IN_SUPERFRAME * sizeof(frame_sizes[0])); *num_frames_in_superframe = 1; frame_sizes[0] = (u32)ivf_frame_size; *superframe_index_size = 0; e = gf_bs_seek(bs, pos + ivf_frame_size - 1); if (e) return e; byte = gf_bs_read_u8(bs); if ((byte & 0xe0) != 0xc0) goto exit; /*no superframe*/ bytes_per_framesize = 1 + ((byte & 0x18) >> 3); *num_frames_in_superframe = (u32)(1 + (byte & 0x7)); /*superframe_index()*/ *superframe_index_size = 2 + bytes_per_framesize * *num_frames_in_superframe; gf_bs_seek(bs, pos + ivf_frame_size - *superframe_index_size); byte = gf_bs_read_u8(bs); if ((byte & 0xe0) != 0xc0) goto exit; /*no superframe*/ frame_sizes[0] = 0; for (i = 0; i < *num_frames_in_superframe; ++i) { gf_bs_read_data(bs, (char*)(frame_sizes + i), bytes_per_framesize); } exit: gf_bs_seek(bs, pos); return e; } static Bool vp9_frame_sync_code(GF_BitStream *bs) { u8 val = gf_bs_read_int_log(bs, 8, "syncbyte1"); if (val != 0x49) return GF_FALSE; val = gf_bs_read_int_log(bs, 8, "syncbyte2"); if (val != 0x83) return GF_FALSE; val = gf_bs_read_int_log(bs, 8, "syncbyte3"); if (val != 0x42) return GF_FALSE; return GF_TRUE; } typedef enum { CS_UNKNOWN = 0, CS_BT_601 = 1, CS_BT_709 = 2, CS_SMPTE_170 = 3, CS_SMPTE_240 = 4, CS_BT_2020 = 5, CS_RESERVED = 6, CS_RGB = 7, } VP9_color_space; static const int VP9_CS_to_23001_8_colour_primaries[] = { -1/*undefined*/, 5, 1, 6, 7, 9, -1/*reserved*/, 1 }; static const int VP9_CS_to_23001_8_transfer_characteristics[] = { -1/*undefined*/, 5, 1, 6, 7, 9, -1/*reserved*/, 13 }; static const int VP9_CS_to_23001_8_matrix_coefficients[] = { -1/*undefined*/, 6, 1, -1, -1, 9, -1/*reserved*/, 0 }; static GF_Err vp9_color_config(GF_BitStream *bs, GF_VPConfig *vp9_cfg) { VP9_color_space color_space; if (vp9_cfg->profile >= 2) { Bool ten_or_twelve_bit = gf_bs_read_int_log(bs, 1, "ten_or_twelve_bit"); vp9_cfg->bit_depth = ten_or_twelve_bit ? 12 : 10; } else { vp9_cfg->bit_depth = 8; } color_space = gf_bs_read_int_log(bs, 3, "color_space"); vp9_cfg->colour_primaries = VP9_CS_to_23001_8_colour_primaries[color_space]; vp9_cfg->transfer_characteristics = VP9_CS_to_23001_8_transfer_characteristics[color_space]; vp9_cfg->matrix_coefficients = VP9_CS_to_23001_8_matrix_coefficients[color_space]; if (color_space != CS_RGB) { vp9_cfg->video_fullRange_flag = gf_bs_read_int_log(bs, 1, "video_fullRange_flag"); if (vp9_cfg->profile == 1 || vp9_cfg->profile == 3) { u8 subsampling_x, subsampling_y, subsampling_xy_to_chroma_subsampling[2][2] = { {3, 0}, {2, 0} }; subsampling_x = gf_bs_read_int_log(bs, 1, "subsampling_x"); subsampling_y = gf_bs_read_int_log(bs, 1, "subsampling_x"); vp9_cfg->chroma_subsampling = subsampling_xy_to_chroma_subsampling[subsampling_x][subsampling_y]; Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] color config reserved zero (1) is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } else { vp9_cfg->chroma_subsampling = 0; } } else { vp9_cfg->video_fullRange_flag = GF_TRUE; if (vp9_cfg->profile == 1 || vp9_cfg->profile == 3) { vp9_cfg->chroma_subsampling = 3; Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] color config reserved zero (2) is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } } return GF_OK; } static void vp9_compute_image_size(int FrameWidth, int FrameHeight, int *Sb64Cols, int *Sb64Rows) { int MiCols = (FrameWidth + 7) >> 3; int MiRows = (FrameHeight + 7) >> 3; *Sb64Cols = (MiCols + 7) >> 3; *Sb64Rows = (MiRows + 7) >> 3; } static void vp9_frame_size(GF_BitStream *bs, int *FrameWidth, int *FrameHeight, int *Sb64Cols, int *Sb64Rows) { int frame_width_minus_1 = gf_bs_read_int_log(bs, 16, "frame_width_minus_1"); int frame_height_minus_1 = gf_bs_read_int_log(bs, 16, "frame_height_minus_1"); if (frame_width_minus_1 + 1 != *FrameWidth || frame_height_minus_1 + 1 != *FrameHeight) { if (*FrameWidth || *FrameHeight) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[VP9] inconsistent frame dimensions: previous was %dx%d, new one is %dx%d.\n", *FrameWidth, *FrameHeight, frame_width_minus_1 + 1, frame_height_minus_1 + 1)); } *FrameWidth = frame_width_minus_1 + 1; *FrameHeight = frame_height_minus_1 + 1; vp9_compute_image_size(*FrameWidth, *FrameHeight, Sb64Cols, Sb64Rows); } static void vp9_render_size(GF_BitStream *bs, int FrameWidth, int FrameHeight, int *renderWidth, int *renderHeight) { Bool render_and_frame_size_different = gf_bs_read_int_log(bs, 1, "render_and_frame_size_different"); if (render_and_frame_size_different == 1) { int render_width_minus_1 = gf_bs_read_int_log(bs, 16, "render_width_minus_1"); int render_height_minus_1 = gf_bs_read_int_log(bs, 16, "render_height_minus_1"); *renderWidth = render_width_minus_1 + 1; *renderHeight = render_height_minus_1 + 1; } else { *renderWidth = FrameWidth; *renderHeight = FrameHeight; } } static s64 vp9_s(GF_BitStream *bs, int n, const char *fname, u32 idx) { s64 value = gf_bs_read_int(bs, n); Bool sign = gf_bs_read_int(bs, 1); if (sign) value = -value; gf_bs_log_idx(bs, n+1, fname, value, idx, -1, -1); return value; } static void vp9_loop_filter_params(GF_BitStream *bs) { /*loop_filter_level = */gf_bs_read_int_log(bs, 6, "loop_filter_level"); /*loop_filter_sharpness = */gf_bs_read_int_log(bs, 3, "loop_filter_sharpness"); Bool loop_filter_delta_enabled = gf_bs_read_int_log(bs, 1, "loop_filter_delta_enabled"); if (loop_filter_delta_enabled == 1) { Bool loop_filter_delta_update = gf_bs_read_int_log(bs, 1, "loop_filter_delta_update"); if (loop_filter_delta_update == GF_TRUE) { int i; for (i = 0; i < 4; i++) { Bool update_ref_delta = gf_bs_read_int_log_idx(bs, 1, "update_ref_delta", i); if (update_ref_delta == GF_TRUE) vp9_s(bs, 6, "loop_filter_ref_deltas", i); } for (i = 0; i < 2; i++) { Bool update_mode_delta = gf_bs_read_int_log_idx(bs, 1, "update_mode_delta", i); if (update_mode_delta == GF_TRUE) vp9_s(bs, 6, "loop_filter_mode_deltas", i); } } } } static void vp9_quantization_params(GF_BitStream *bs) { /*base_q_idx = */gf_bs_read_int_log(bs, 8, "base_q_idx"); } #define VP9_MAX_SEGMENTS 8 #define VP9_SEG_LVL_MAX 4 static const int segmentation_feature_bits[VP9_SEG_LVL_MAX] = { 8, 6, 2, 0 }; static const int segmentation_feature_signed[VP9_SEG_LVL_MAX] = { 1, 1, 0, 0 }; #define VP9_MIN_TILE_WIDTH_B64 4 #define VP9_MAX_TILE_WIDTH_B64 64 static void vp9_segmentation_params(GF_BitStream *bs) { Bool segmentation_enabled = gf_bs_read_int_log(bs, 1, "segmentation_enabled"); if (segmentation_enabled == 1) { int i; Bool segmentation_update_map = gf_bs_read_int_log(bs, 1, "segmentation_update_map"); if (segmentation_update_map) { for (i = 0; i < 7; i++) /*segmentation_tree_probs[i] = read_prob()*/ /*segmentation_temporal_update = */gf_bs_read_int_log(bs, 1, "segmentation_temporal_update"); /*for (i = 0; i < 3; i++) segmentation_pred_prob[i] = segmentation_temporal_update ? read_prob() : 255*/ } Bool segmentation_update_data = gf_bs_read_int_log(bs, 1, "segmentation_update_data"); if (segmentation_update_data == 1) { /*segmentation_abs_or_delta_update =*/ gf_bs_read_int_log(bs, 1, "segmentation_abs_or_delta_update"); for (i = 0; i < VP9_MAX_SEGMENTS; i++) { int j; for (j = 0; j < VP9_SEG_LVL_MAX; j++) { /*feature_value = 0*/ Bool feature_enabled = gf_bs_read_int_log(bs, 1, "feature_enabled"); /*FeatureEnabled[i][j] = feature_enabled*/ if (feature_enabled) { int bits_to_read = segmentation_feature_bits[j]; /*feature_value =*/ gf_bs_read_int_log(bs, bits_to_read, "feature_value"); if (segmentation_feature_signed[j] == 1) { /*Bool feature_sign = */gf_bs_read_int_log(bs, 1, "feature_sign"); /*if (feature_sign == 1) feature_value *= -1*/ } } /*FeatureData[i][j] = feature_value*/ } } } } } static int calc_min_log2_tile_cols(int Sb64Cols) { int minLog2 = 0; while ((VP9_MAX_TILE_WIDTH_B64 << minLog2) < Sb64Cols) minLog2++; return minLog2; } static int calc_max_log2_tile_cols(int Sb64Cols) { int maxLog2 = 1; while ((Sb64Cols >> maxLog2) >= VP9_MIN_TILE_WIDTH_B64) maxLog2++; return maxLog2 - 1; } static void vp9_tile_info(GF_BitStream *bs, int Sb64Cols) { Bool tile_rows_log2; int minLog2TileCols = calc_min_log2_tile_cols(Sb64Cols); int maxLog2TileCols = calc_max_log2_tile_cols(Sb64Cols); int tile_cols_log2 = minLog2TileCols; while (tile_cols_log2 < maxLog2TileCols) { Bool increment_tile_cols_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_cols_log2"); if (increment_tile_cols_log2) tile_cols_log2++; else break; } tile_rows_log2 = gf_bs_read_int_log(bs, 1, "tile_rows_log2"); if (tile_rows_log2) { /*Bool increment_tile_rows_log2 = */gf_bs_read_int_log(bs, 1, "increment_tile_rows_log2"); //tile_rows_log2 += increment_tile_rows_log2; } } static void vp9_frame_size_with_refs(GF_BitStream *bs, u8 refresh_frame_flags, u8 * ref_frame_idx, int * RefFrameWidth, int *RefFrameHeight, int *FrameWidth, int *FrameHeight, int *RenderWidth, int *RenderHeight, int *Sb64Cols, int *Sb64Rows) { Bool found_ref; int i; for (i = 0; i < 3; i++) { found_ref = gf_bs_read_int_log(bs, 1, "found_ref"); if (found_ref) { *FrameWidth = RefFrameWidth [ref_frame_idx[i]]; *FrameHeight = RefFrameHeight[ref_frame_idx[i]]; break; } } if (found_ref == 0) { vp9_frame_size(bs, FrameWidth, FrameHeight, Sb64Cols, Sb64Rows); } else { vp9_compute_image_size(*FrameWidth, *FrameHeight, Sb64Cols, Sb64Rows); } vp9_render_size(bs, *FrameWidth, *FrameHeight, RenderWidth, RenderHeight); } static void vp9_read_interpolation_filter(GF_BitStream *bs) { Bool is_filter_switchable = gf_bs_read_int_log(bs, 1, "is_filter_switchable"); if (!is_filter_switchable) { /*raw_interpolation_filter = */gf_bs_read_int_log(bs, 2, "raw_interpolation_filter"); } } #define VP9_KEY_FRAME 0 GF_Err gf_media_vp9_parse_sample(GF_BitStream *bs, GF_VPConfig *vp9_cfg, Bool *key_frame, u32 *FrameWidth, u32 *FrameHeight, u32 *renderWidth, u32 *renderHeight) { Bool FrameIsIntra = GF_FALSE, profile_low_bit, profile_high_bit, show_existing_frame = GF_FALSE, frame_type = GF_FALSE, show_frame = GF_FALSE, error_resilient_mode = GF_FALSE; /*u8 frame_context_idx = 0, reset_frame_context = 0, frame_marker = 0*/; int Sb64Cols = 0, Sb64Rows = 0, i; u8 refresh_frame_flags = 0; assert(bs && key_frame); /*uncompressed header*/ /*frame_marker = */gf_bs_read_int_log(bs, 2, "frame_marker"); profile_low_bit = gf_bs_read_int_log(bs, 1, "profile_low_bit"); profile_high_bit = gf_bs_read_int_log(bs, 1, "profile_high_bit"); vp9_cfg->profile = (profile_high_bit << 1) + profile_low_bit; if (vp9_cfg->profile == 3) { Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] uncompressed header reserved zero is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } show_existing_frame = gf_bs_read_int_log(bs, 1, "show_existing_frame"); if (show_existing_frame == GF_TRUE) { /*frame_to_show_map_idx = */gf_bs_read_int_log(bs, 3, "frame_to_show_map_idx"); return GF_OK; } frame_type = gf_bs_read_int_log(bs, 1, "frame_type"); show_frame = gf_bs_read_int_log(bs, 1, "show_frame"); error_resilient_mode = gf_bs_read_int_log(bs, 1, "error_resilient_mode"); if (frame_type == VP9_KEY_FRAME) { if (!vp9_frame_sync_code(bs)) return GF_NON_COMPLIANT_BITSTREAM; if (vp9_color_config(bs, vp9_cfg) != GF_OK) return GF_NON_COMPLIANT_BITSTREAM; vp9_frame_size(bs, FrameWidth, FrameHeight, &Sb64Cols, &Sb64Rows); vp9_render_size(bs, *FrameWidth, *FrameHeight, renderWidth, renderHeight); refresh_frame_flags = 0xFF; *key_frame = GF_TRUE; FrameIsIntra = GF_TRUE; } else { Bool intra_only = GF_FALSE; *key_frame = GF_FALSE; if (show_frame == GF_FALSE) { intra_only = gf_bs_read_int_log(bs, 1, "intra_only"); } FrameIsIntra = intra_only; if (error_resilient_mode == GF_FALSE) { /*reset_frame_context = */gf_bs_read_int_log(bs, 2, "reset_frame_context"); } if (intra_only == GF_TRUE) { if (!vp9_frame_sync_code(bs)) return GF_NON_COMPLIANT_BITSTREAM; if (vp9_cfg->profile > 0) { if (vp9_color_config(bs, vp9_cfg) != GF_OK) return GF_NON_COMPLIANT_BITSTREAM; } else { u8 color_space = CS_BT_601; vp9_cfg->colour_primaries = VP9_CS_to_23001_8_colour_primaries[color_space]; vp9_cfg->transfer_characteristics = VP9_CS_to_23001_8_transfer_characteristics[color_space]; vp9_cfg->matrix_coefficients = VP9_CS_to_23001_8_matrix_coefficients[color_space]; vp9_cfg->chroma_subsampling = 0; vp9_cfg->bit_depth = 8; } refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); vp9_frame_size(bs, FrameWidth, FrameHeight, &Sb64Cols, &Sb64Rows); vp9_render_size(bs, *FrameWidth, *FrameHeight, renderWidth, renderHeight); } else { refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); u8 ref_frame_idx[3]; for (i = 0; i < 3; i++) { ref_frame_idx[i] = gf_bs_read_int_log_idx(bs, 3, "ref_frame_idx", i); /*ref_frame_sign_bias[LAST_FRAME + i] = */gf_bs_read_int_log_idx(bs, 1, "ref_frame_sign_bias", i); } vp9_frame_size_with_refs(bs, refresh_frame_flags, ref_frame_idx, vp9_cfg->RefFrameWidth, vp9_cfg->RefFrameHeight, FrameWidth, FrameHeight, renderWidth, renderHeight, &Sb64Cols, &Sb64Rows); /*allow_high_precision_mv = */gf_bs_read_int_log(bs, 1, "allow_high_precision_mv"); vp9_read_interpolation_filter(bs); } } if (error_resilient_mode == 0) { /*refresh_frame_context = */gf_bs_read_int_log(bs, 1, "refresh_frame_context"); /*frame_parallel_decoding_mode = */gf_bs_read_int_log(bs, 1, "frame_parallel_decoding_mode"); } /*frame_context_idx = */gf_bs_read_int_log(bs, 2, "frame_context_idx"); if (FrameIsIntra || error_resilient_mode) { /*setup_past_independence + save_probs ...*/ //frame_context_idx = 0; } vp9_loop_filter_params(bs); vp9_quantization_params(bs); vp9_segmentation_params(bs); vp9_tile_info(bs, Sb64Cols); /*header_size_in_bytes = */gf_bs_read_int_log(bs, 16, "header_size_in_bytes"); /*Reference frame update process (8.10 - partial)*/ for (i = 0; i < VP9_NUM_REF_FRAMES; i++) { if ((refresh_frame_flags >> i) & 1) { vp9_cfg->RefFrameWidth[i] = *FrameWidth; vp9_cfg->RefFrameHeight[i] = *FrameHeight; } } return GF_OK; } GF_Err gf_av1_parse_obu_header(GF_BitStream *bs, ObuType *obu_type, Bool *obu_extension_flag, Bool *obu_has_size_field, u8 *temporal_id, u8 *spatial_id) { Bool forbidden = gf_bs_read_int(bs, 1); if (forbidden) { return GF_NON_COMPLIANT_BITSTREAM; } *obu_type = gf_bs_read_int(bs, 4); *obu_extension_flag = gf_bs_read_int(bs, 1); *obu_has_size_field = gf_bs_read_int(bs, 1); if (gf_bs_read_int(bs, 1) /*obu_reserved_1bit*/) { return GF_NON_COMPLIANT_BITSTREAM; } if (*obu_extension_flag) { *temporal_id = gf_bs_read_int(bs, 3); *spatial_id = gf_bs_read_int(bs, 2); /*extension_header_reserved_3bits = */gf_bs_read_int(bs, 3); } return GF_OK; } #endif // GPAC_DISABLE_AV_PARSERS GF_EXPORT const char *gf_av1_get_obu_name(ObuType obu_type) { switch (obu_type) { case OBU_SEQUENCE_HEADER: return "seq_header"; case OBU_TEMPORAL_DELIMITER: return "delimiter"; case OBU_FRAME_HEADER: return "frame_header"; case OBU_TILE_GROUP: return "tile_group"; case OBU_METADATA: return "metadata"; case OBU_FRAME: return "frame"; case OBU_REDUNDANT_FRAME_HEADER: return "redundant_frame_header"; case OBU_TILE_LIST: return "tile_list"; case OBU_PADDING: return "padding"; case OBU_RESERVED_0: case OBU_RESERVED_9: case OBU_RESERVED_10: case OBU_RESERVED_11: case OBU_RESERVED_12: case OBU_RESERVED_13: case OBU_RESERVED_14: return "reserved"; default: return "unknown"; } } Bool av1_is_obu_header(ObuType obu_type) { switch (obu_type) { case OBU_SEQUENCE_HEADER: case OBU_METADATA: // TODO add check based on the metadata type return GF_TRUE; default: return GF_FALSE; } } #ifndef GPAC_DISABLE_AV_PARSERS static Bool av1_is_obu_frame(AV1State *state, ObuType obu_type) { switch (obu_type) { case OBU_PADDING: case OBU_REDUNDANT_FRAME_HEADER: return GF_FALSE; case OBU_TEMPORAL_DELIMITER: return state->keep_temporal_delim ? GF_TRUE : GF_FALSE; default: return GF_TRUE; } } u64 gf_av1_leb128_read(GF_BitStream *bs, u8 *opt_Leb128Bytes) { u64 value = 0; u8 Leb128Bytes = 0, i = 0; for (i = 0; i < 8; i++) { u8 leb128_byte = gf_bs_read_u8(bs); value |= ( ((u64) (leb128_byte & 0x7f)) << (i * 7)); Leb128Bytes += 1; if (!(leb128_byte & 0x80)) { break; } } if (opt_Leb128Bytes) { *opt_Leb128Bytes = Leb128Bytes; } return value; } u32 gf_av1_leb128_size(u64 value) { u32 gf_av1_leb128_size = 0; do { ++gf_av1_leb128_size; } while ((value >>= 7) != 0); return gf_av1_leb128_size; } u64 gf_av1_leb128_write(GF_BitStream *bs, u64 value) { u32 i, leb_size = gf_av1_leb128_size(value); for (i = 0; i < leb_size; ++i) { u8 byte = value & 0x7f; value >>= 7; if (value != 0) byte |= 0x80; //more bytes follow gf_bs_write_u8(bs, byte); } return leb_size; } #define OBU_BLOCK_SIZE 4096 static void av1_add_obu_internal(GF_BitStream *bs, u64 pos, u64 obu_length, ObuType obu_type, GF_List **obu_list, AV1State *state) { char block[OBU_BLOCK_SIZE]; Bool has_size_field = 0, obu_extension_flag = 0; u8 temporal_id, spatial_id; GF_AV1_OBUArrayEntry *a = NULL; if (state && state->mem_mode) { if (!state->bs) state->bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(state->bs, state->frame_obus, state->frame_obus_alloc); } else { GF_SAFEALLOC(a, GF_AV1_OBUArrayEntry); if (!a) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] Failed to allocate OBU\n")); return; } } gf_bs_seek(bs, pos); gf_av1_parse_obu_header(bs, &obu_type, &obu_extension_flag, &has_size_field, &temporal_id, &spatial_id); gf_bs_seek(bs, pos); if (has_size_field) { if (a) { a->obu = gf_malloc((size_t)obu_length); gf_bs_read_data(bs, a->obu, (u32)obu_length); a->obu_length = obu_length; } else { u32 remain = (u32)obu_length; while (remain) { u32 block_size = OBU_BLOCK_SIZE; if (block_size > remain) block_size = remain; gf_bs_read_data(bs, block, block_size); gf_bs_write_data(state->bs, block, block_size); remain -= block_size; } return; } } else { u8 i, hdr_size = obu_extension_flag ? 2 : 1; const u32 leb_size = (u32)gf_av1_leb128_size(obu_length); const u64 obu_size = obu_length - hdr_size; if (a) { a->obu = gf_malloc((size_t)obu_length + leb_size); a->obu_length = obu_length + leb_size; for (i = 0; i < hdr_size; ++i) { a->obu[i] = gf_bs_read_u8(bs); /*add size field flag*/ if (i == 0) a->obu[0] |= 0x02; } { u32 out_size = 0; u8 *output = NULL; GF_BitStream *bsLeb128 = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*write size field*/ gf_av1_leb128_write(bsLeb128, obu_size); assert(gf_bs_get_position(bsLeb128) == leb_size); gf_bs_get_content(bsLeb128, &output, &out_size); gf_bs_del(bsLeb128); memcpy(a->obu + hdr_size, output, out_size); gf_free(output); } gf_bs_read_data(bs, a->obu + hdr_size + leb_size, (u32)(obu_size)); assert(gf_bs_get_position(bs) == pos + obu_length); } else { u32 remain; for (i = 0; i < hdr_size; ++i) { u8 hdr_b = gf_bs_read_u8(bs); if (i == 0) hdr_b |= 0x02; /*add size field flag*/ gf_bs_write_u8(state->bs, hdr_b); } /*add size field */ gf_av1_leb128_write(state->bs, obu_size); remain = (u32)obu_length - hdr_size; while (remain) { u32 block_size = OBU_BLOCK_SIZE; if (block_size > remain) block_size = remain; gf_bs_read_data(bs, block, block_size); gf_bs_write_data(state->bs, block, block_size); remain -= block_size; } assert(gf_bs_get_position(bs) == pos + obu_length); return; } } if (!obu_list) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] internal error, no OBU list cannot add\n")); gf_free(a->obu); gf_free(a); return; } a->obu_type = obu_type; if (! *obu_list) *obu_list = gf_list_new(); gf_list_add(*obu_list, a); } static void av1_populate_state_from_obu(GF_BitStream *bs, u64 pos, u64 obu_length, ObuType obu_type, AV1State *state) { if (av1_is_obu_header(obu_type)) { av1_add_obu_internal(bs, pos, obu_length, obu_type, &state->frame_state.header_obus, NULL); } if (!state->skip_frames && av1_is_obu_frame(state, obu_type)) { if (!state->mem_mode) { av1_add_obu_internal(bs, pos, obu_length, obu_type, &state->frame_state.frame_obus, NULL); } else { av1_add_obu_internal(bs, pos, obu_length, obu_type, NULL, state); } } } GF_Err aom_av1_parse_temporal_unit_from_section5(GF_BitStream *bs, AV1State *state) { if (!state) return GF_BAD_PARAM; state->obu_type = -1; while (state->obu_type != OBU_TEMPORAL_DELIMITER) { GF_Err e; if (!gf_bs_available(bs)) return state->unframed ? GF_BUFFER_TOO_SMALL : GF_OK; u64 pos = gf_bs_get_position(bs), obu_length = 0; e = gf_av1_parse_obu(bs, &state->obu_type, &obu_length, NULL, state); if (e) return e; if (obu_length != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] OBU (Section 5) frame size "LLU" different from consumed bytes "LLU".\n", obu_length, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Section5 OBU detected (size "LLU")\n", obu_length)); av1_populate_state_from_obu(bs, pos, obu_length, state->obu_type, state); } return GF_OK; } Bool gf_media_aom_probe_annexb(GF_BitStream *bs) { Bool res = GF_TRUE; u64 init_pos = gf_bs_get_position(bs); u64 sz = gf_av1_leb128_read(bs, NULL); if (!sz) res = GF_FALSE; while (sz > 0) { u8 Leb128Bytes = 0; u64 frame_unit_size = gf_av1_leb128_read(bs, &Leb128Bytes); if (!frame_unit_size) { res = GF_FALSE; break; } if (sz < Leb128Bytes + frame_unit_size) { res = GF_FALSE; break; } sz -= Leb128Bytes + frame_unit_size; while (frame_unit_size > 0) { ObuType obu_type; u64 pos, obu_length = gf_av1_leb128_read(bs, &Leb128Bytes); if (frame_unit_size < Leb128Bytes + obu_length) { res = GF_FALSE; break; } pos = gf_bs_get_position(bs); frame_unit_size -= Leb128Bytes; u8 tid, sid; Bool extflag, has_size; GF_Err e = gf_av1_parse_obu_header(bs, &obu_type, &extflag, &has_size, &tid, &sid); if (e) { res = GF_FALSE; break; } if (has_size) { obu_length = (u32)gf_av1_leb128_read(bs, NULL); } else { if (obu_length >= 1 + extflag) { obu_length = obu_length - 1 - extflag; } else { res = GF_FALSE; break; } } u32 hdr_size = (u32)(gf_bs_get_position(bs) - pos); obu_length += hdr_size; if (frame_unit_size < obu_length) { res = GF_FALSE; break; } frame_unit_size -= obu_length; gf_bs_skip_bytes(bs, obu_length - hdr_size); } if (!res) break; } gf_bs_seek(bs, init_pos); return res; } GF_Err aom_av1_parse_temporal_unit_from_annexb(GF_BitStream *bs, AV1State *state) { GF_Err e; u64 tupos; u64 tusize, sz; if (!bs || !state) return GF_BAD_PARAM; state->bs_overread = GF_FALSE; tusize = sz = gf_av1_leb128_read(bs, NULL); tupos = gf_bs_get_position(bs); if (!sz) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[AV1] temporal unit size is 0, likely not annex B\n")); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B temporal unit detected (size "LLU") ***** \n", sz)); while (sz > 0) { u8 Leb128Bytes = 0; u64 frame_unit_size = gf_av1_leb128_read(bs, &Leb128Bytes); if (state->bs_overread) { return GF_BUFFER_TOO_SMALL; } if (sz < Leb128Bytes + frame_unit_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B sz("LLU") < Leb128Bytes("LLU") + frame_unit_size("LLU")\n", sz, Leb128Bytes, frame_unit_size)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B frame unit detected (size "LLU")\n", frame_unit_size)); sz -= Leb128Bytes + frame_unit_size; while (frame_unit_size > 0) { u64 pos, obu_length = gf_av1_leb128_read(bs, &Leb128Bytes); if (state->bs_overread) { return GF_BUFFER_TOO_SMALL; } if (frame_unit_size < Leb128Bytes + obu_length) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B frame_unit_size("LLU") < Leb128Bytes("LLU") + obu_length("LLU")\n", frame_unit_size, Leb128Bytes, obu_length)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B OBU detected (size "LLU")\n", obu_length)); pos = gf_bs_get_position(bs); frame_unit_size -= Leb128Bytes; e = gf_av1_parse_obu(bs, &state->obu_type, &obu_length, NULL, state); if (e) return e; if (obu_length != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] Annex B frame size "LLU" different from consumed bytes "LLU".\n", obu_length, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } av1_populate_state_from_obu(bs, pos, obu_length, state->obu_type, state); if (frame_unit_size < obu_length) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B frame_unit_size("LLU") < OBU size ("LLU")\n", frame_unit_size, obu_length)); return GF_NON_COMPLIANT_BITSTREAM; } frame_unit_size -= obu_length; } } assert(sz == 0); if (tusize != gf_bs_get_position(bs) - tupos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] Annex B TU size "LLU" different from consumed bytes "LLU".\n", tusize, gf_bs_get_position(bs) - tupos)); return GF_NON_COMPLIANT_BITSTREAM; } return GF_OK; } GF_Err aom_av1_parse_temporal_unit_from_ivf(GF_BitStream *bs, AV1State *state) { u64 frame_size, pts_ignored; GF_Err e; if (gf_bs_available(bs)<12) return GF_EOS; e = gf_media_parse_ivf_frame_header(bs, &frame_size, &pts_ignored); if (e) return e; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] IVF frame detected (size "LLU")\n", frame_size)); if (gf_bs_available(bs) < frame_size) return GF_EOS; while (frame_size > 0) { u64 obu_size = 0, pos = gf_bs_get_position(bs); e = gf_av1_parse_obu(bs, &state->obu_type, &obu_size, NULL, state); if (e != GF_OK) return e; if (obu_size != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] IVF frame size "LLU" different from consumed bytes "LLU".\n", obu_size, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } av1_populate_state_from_obu(bs, pos, obu_size, state->obu_type, state); frame_size -= obu_size; } return GF_OK; } #define AV1_NUM_REF_FRAMES 8 #define AV1_ALL_FRAMES ((1 << AV1_NUM_REF_FRAMES) - 1) #define AV1_SUPERRES_DENOM_MIN 9 #define AV1_SUPERRES_DENOM_BITS 3 #define AV1_SUPERRES_NUM 8 #define AV1_REFS_PER_FRAME 7 #define AV1_PRIMARY_REF_NONE 7 #define MAX_TILE_WIDTH 4096 #define MAX_TILE_AREA (4096 * 2304) static u32 aom_av1_tile_log2(u32 blkSize, u32 target) { u32 k; for (k = 0; (blkSize << k) < target; k++) { } return k; } static u64 aom_av1_le(GF_BitStream *bs, u32 n, const char *name) { u32 i = 0; u64 t = 0; for (i = 0; i < n; i++) { u8 byte = gf_bs_read_int(bs, 8); t += (byte << (i * 8)); } gf_bs_log(bs, n*8, name, t); return t; } static void av1_parse_tile_info(GF_BitStream *bs, AV1State *state) { u32 i; u32 MiCols = 2 * ((state->width + 7) >> 3); u32 MiRows = 2 * ((state->height + 7) >> 3); u32 sbCols = state->use_128x128_superblock ? ((MiCols + 31) >> 5) : ((MiCols + 15) >> 4); u32 sbRows = state->use_128x128_superblock ? ((MiRows + 31) >> 5) : ((MiRows + 15) >> 4); u32 sbShift = state->use_128x128_superblock ? 5 : 4; u32 sbSize = sbShift + 2; u32 maxTileWidthSb = MAX_TILE_WIDTH >> sbSize; u32 maxTileAreaSb = MAX_TILE_AREA >> (2 * sbSize); u32 minLog2tileCols = aom_av1_tile_log2(maxTileWidthSb, sbCols); u32 maxLog2tileCols = aom_av1_tile_log2(1, MIN(sbCols, AV1_MAX_TILE_COLS)); u32 maxLog2tileRows = aom_av1_tile_log2(1, MIN(sbRows, AV1_MAX_TILE_ROWS)); u32 minLog2Tiles = MAX(minLog2tileCols, aom_av1_tile_log2(maxTileAreaSb, sbRows * sbCols)); Bool uniform_tile_spacing_flag = gf_bs_read_int_log(bs, 1, "uniform_tile_spacing_flag"); if (uniform_tile_spacing_flag) { u32 startSb, tileWidthSb, tileHeightSb, minLog2tileRows; state->tileColsLog2 = minLog2tileCols; while (state->tileColsLog2 < maxLog2tileCols) { Bool increment_tile_cols_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_cols_log2"); if (increment_tile_cols_log2 == 1) state->tileColsLog2++; else break; } tileWidthSb = (sbCols + (1 << state->tileColsLog2) - 1) >> state->tileColsLog2; i = 0; for (startSb = 0; startSb < sbCols; startSb += tileWidthSb) { i += 1; } state->tileCols = i; minLog2tileRows = MAX((int)(minLog2Tiles - state->tileColsLog2), 0); state->tileRowsLog2 = minLog2tileRows; while (state->tileRowsLog2 < maxLog2tileRows) { Bool increment_tile_rows_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_rows_log2"); if (increment_tile_rows_log2 == 1) state->tileRowsLog2++; else break; } tileHeightSb = (sbRows + (1 << state->tileRowsLog2) - 1) >> state->tileRowsLog2; i = 0; for (startSb = 0; startSb < sbRows; startSb += tileHeightSb) { i += 1; } state->tileRows = i; } else { u32 startSb, maxTileHeightSb, widestTileSb; widestTileSb = 0; startSb = 0; for (i = 0; startSb < sbCols; i++) { u32 maxWidth = MIN((int)(sbCols - startSb), maxTileWidthSb); u32 width_in_sbs_minus_1 = av1_read_ns(bs, maxWidth, "width_in_sbs_minus_1"); u32 sizeSb = width_in_sbs_minus_1 + 1; widestTileSb = MAX(sizeSb, widestTileSb); startSb += sizeSb; } if (!widestTileSb) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] widest tile is 0, broken bitstream\n")); return; } state->tileCols = i; state->tileColsLog2 = aom_av1_tile_log2(1, state->tileCols); if (minLog2Tiles > 0) maxTileAreaSb = (sbRows * sbCols) >> (minLog2Tiles + 1); else maxTileAreaSb = sbRows * sbCols; maxTileHeightSb = MAX(maxTileAreaSb / widestTileSb, 1); startSb = 0; for (i = 0; startSb < sbRows; i++) { u32 maxHeight = MIN((int)(sbRows - startSb), maxTileHeightSb); u32 height_in_sbs_minus_1 = av1_read_ns(bs, maxHeight, "height_in_sbs_minus_1"); u32 sizeSb = height_in_sbs_minus_1 + 1; startSb += sizeSb; } state->tileRows = i; state->tileRowsLog2 = aom_av1_tile_log2(1, state->tileRows); } if (state->tileColsLog2 > 0 || state->tileRowsLog2 > 0) { gf_bs_read_int_log(bs, state->tileRowsLog2 + state->tileColsLog2, "context_update_tile_id"); state->tile_size_bytes = gf_bs_read_int_log(bs, 2, "tile_size_bytes_minus1") + 1; } } static void superres_params(GF_BitStream *bs, AV1State *state) { u32 SuperresDenom; Bool use_superres; if (state->enable_superres) { use_superres = gf_bs_read_int_log(bs, 1, "use_superres"); } else { use_superres = GF_FALSE; } if (use_superres) { u8 coded_denom = gf_bs_read_int_log(bs, AV1_SUPERRES_DENOM_BITS, "coded_denom"); SuperresDenom = coded_denom + AV1_SUPERRES_DENOM_MIN; } else { SuperresDenom = AV1_SUPERRES_NUM; } state->UpscaledWidth = state->width; state->width = (state->UpscaledWidth * AV1_SUPERRES_NUM + (SuperresDenom / 2)) / SuperresDenom; } static void av1_frame_size(GF_BitStream *bs, AV1State *state, Bool frame_size_override_flag) { if (frame_size_override_flag) { u32 frame_width_minus_1, frame_height_minus_1; u8 n = state->frame_width_bits_minus_1 + 1; frame_width_minus_1 = gf_bs_read_int_log(bs, n, "frame_width_minus_1"); n = state->frame_height_bits_minus_1 + 1; frame_height_minus_1 = gf_bs_read_int_log(bs, n, "frame_height_minus_1"); state->width = frame_width_minus_1 + 1; state->height = frame_height_minus_1 + 1; } else { state->width = state->sequence_width; state->height = state->sequence_height; } superres_params(bs, state); //compute_image_size(); //no bits } static void av1_render_size(GF_BitStream *bs) { Bool render_and_frame_size_different = gf_bs_read_int_log(bs, 1, "render_and_frame_size_different_flag"); if (render_and_frame_size_different == GF_TRUE) { gf_bs_read_int_log(bs, 16, "render_width_minus_1"); gf_bs_read_int_log(bs, 16, "render_height_minus_1"); //RenderWidth = render_width_minus_1 + 1; //RenderHeight = render_height_minus_1 + 1; } else { //RenderWidth = UpscaledWidth; //RenderHeight = FrameHeight; } } static void read_interpolation_filter(GF_BitStream *bs) { Bool is_filter_switchable = gf_bs_read_int_log(bs, 1, "is_filter_switchable"); if (!is_filter_switchable) { /*interpolation_filter =*/ gf_bs_read_int_log(bs, 2, "interpolation_filter"); } } static void frame_size_with_refs(GF_BitStream *bs, AV1State *state, Bool frame_size_override_flag, s8 *ref_frame_idx) { Bool found_ref = GF_FALSE; u32 i = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { found_ref = gf_bs_read_int_log_idx(bs, 1, "found_ref", i); if (found_ref == 1) { state->UpscaledWidth = state->RefUpscaledWidth[ref_frame_idx[i]]; state->width = state->UpscaledWidth; state->height = state->RefFrameHeight[ref_frame_idx[i]]; break; } } if (found_ref == 0) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); } else { superres_params(bs, state); //compute_image_size(); } } static s32 av1_delta_q(GF_BitStream *bs, const char *name_flag, const char *name) { Bool delta_coded = gf_bs_read_int_log(bs, 1, name_flag); s32 delta_q = 0; if (delta_coded) { u32 signMask = 1 << (7 - 1); delta_q = gf_bs_read_int_log(bs, 7, name); if (delta_q & signMask) delta_q = delta_q - 2 * signMask; } return delta_q; } static u8 Segmentation_Feature_Bits[] = { 8,6,6,6,6,3,0,0 }; static u8 Segmentation_Feature_Signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 }; static u8 av1_get_qindex(Bool ignoreDeltaQ, u32 segmentId, u32 base_q_idx, u32 delta_q_present, u32 CurrentQIndex, Bool segmentation_enabled, u8 *features_SEG_LVL_ALT_Q_enabled, s32 *features_SEG_LVL_ALT_Q) { //If seg_feature_active_idx( segmentId, SEG_LVL_ALT_Q ) is equal to 1 the following ordered steps apply: if (segmentation_enabled && features_SEG_LVL_ALT_Q_enabled[segmentId]) { //Set the variable data equal to FeatureData[ segmentId ][ SEG_LVL_ALT_Q ]. s32 data = features_SEG_LVL_ALT_Q[segmentId]; s32 qindex = base_q_idx + data; //If ignoreDeltaQ is equal to 0 and delta_q_present is equal to 1, set qindex equal to CurrentQIndex + data. if ((ignoreDeltaQ == 0) && (delta_q_present == 1)) qindex = CurrentQIndex + data; //Return Clip3( 0, 255, qindex ). if (qindex < 0) return 0; else if (qindex > 255) return 255; else return (u8)qindex; } //Otherwise, if ignoreDeltaQ is equal to 0 and delta_q_present is equal to 1, return CurrentQIndex. if ((ignoreDeltaQ == 0) && (delta_q_present == 1)) return CurrentQIndex; //otherwise return base_q_idx; } enum { AV1_RESTORE_NONE = 0, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ }; #define AV1_GMC_IDENTITY 0 #define AV1_GMC_TRANSLATION 1 #define AV1_GMC_ROTZOOM 2 #define AV1_GMC_AFFINE 3 #define AV1_LAST_FRAME 1 #define AV1_LAST2_FRAME 2 #define AV1_LAST3_FRAME 3 #define AV1_GOLDEN_FRAME 4 #define AV1_BWDREF_FRAME 5 #define AV1_ALTREF2_FRAME 6 #define AV1_ALTREF_FRAME 7 #define GM_ABS_ALPHA_BITS 12 #define GM_ALPHA_PREC_BITS 15 #define GM_ABS_TRANS_ONLY_BITS 9 #define GM_TRANS_ONLY_PREC_BITS 3 #define GM_ABS_TRANS_BITS 12 #define GM_TRANS_PREC_BITS 6 #define WARPEDMODEL_PREC_BITS 16 static u32 av1_decode_subexp(GF_BitStream *bs, s32 numSyms) { s32 i = 0; s32 mk = 0; s32 k = 3; while (1) { s32 b2 = i ? k + i - 1 : k; s32 a = 1 << b2; if (numSyms <= mk + 3 * a) { s32 subexp_final_bits = av1_read_ns(bs, numSyms - mk, NULL); return subexp_final_bits + mk; } else { s32 subexp_more_bits = gf_bs_read_int(bs, 1); if (subexp_more_bits) { i++; mk += a; } else { s32 subexp_bits = gf_bs_read_int(bs, b2); return subexp_bits + mk; } } } } static GFINLINE s32 inverse_recenter(s32 r, u32 v) { if ((s64)v > (s64)(2 * r)) return v; else if (v & 1) return r - ((v + 1) >> 1); else return r + (v >> 1); } static s32 av1_decode_unsigned_subexp_with_ref(GF_BitStream *bs, s32 mx, s32 r) { u32 v = av1_decode_subexp(bs, mx); if ((r < 0) && (-(-r << 1) <= mx)) { return inverse_recenter(r, v); } else if ((r << 1) <= mx) { return inverse_recenter(r, v); } else { return mx - 1 - inverse_recenter(mx - 1 - r, v); } } static s16 av1_decode_signed_subexp_with_ref(GF_BitStream *bs, s32 low, s32 high, s32 r) { s16 x = av1_decode_unsigned_subexp_with_ref(bs, high - low, r - low); return x + low; } static void av1_read_global_param(AV1State *state, GF_BitStream *bs, u8 type, u8 ref, u8 idx) { u8 absBits = GM_ABS_ALPHA_BITS; u8 precBits = GM_ALPHA_PREC_BITS; if (idx < 2) { if (type == AV1_GMC_TRANSLATION) { absBits = GM_ABS_TRANS_ONLY_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0); precBits = GM_TRANS_ONLY_PREC_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0); } else { absBits = GM_ABS_TRANS_BITS; precBits = GM_TRANS_PREC_BITS; } } s32 precDiff = WARPEDMODEL_PREC_BITS - precBits; s32 round = (idx % 3) == 2 ? (1 << WARPEDMODEL_PREC_BITS) : 0; s32 sub = (idx % 3) == 2 ? (1 << precBits) : 0; s32 mx = (1 << absBits); s32 r = (state->PrevGmParams.coefs[ref][idx] >> precDiff) - sub; s32 val = av1_decode_signed_subexp_with_ref(bs, -mx, mx + 1, r); if (val < 0) { val = -val; state->GmParams.coefs[ref][idx] = (-(val << precDiff) + round); } else { state->GmParams.coefs[ref][idx] = (val << precDiff) + round; } } static s32 av1_get_relative_dist(s32 a, s32 b, AV1State *state) { if (!state->enable_order_hint) return 0; s32 diff = a - b; s32 m = 1 << (state->OrderHintBits - 1); diff = (diff & (m - 1)) - (diff & m); return diff; } static void av1_setup_past_independence(AV1State *state) { u32 ref, i; for (ref = AV1_LAST_FRAME; ref <= AV1_ALTREF_FRAME; ref++) { for (i = 0; i <= 5; i++) { state->PrevGmParams.coefs[ref][i] = ((i % 3 == 2) ? 1 << WARPEDMODEL_PREC_BITS : 0); } } } static void av1_load_previous(AV1State *state, u8 primary_ref_frame, s8 *ref_frame_idx) { s8 prevFrame = ref_frame_idx[primary_ref_frame]; if (prevFrame < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] load_previous: prevFrame reference index %d is invalid\n", prevFrame)); } else { state->PrevGmParams = state->SavedGmParams[prevFrame]; // load_loop_filter_params( prevFrame ) // load_segmentation_params( prevFrame ) } } static void av1_decode_frame_wrapup(AV1State *state) { u32 i; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { if ((state->frame_state.refresh_frame_flags >> i) & 1) { state->RefOrderHint[i] = state->frame_state.order_hint; state->SavedGmParams[i] = state->GmParams; state->RefFrameType[i] = state->frame_state.frame_type; state->RefUpscaledWidth[i] = state->UpscaledWidth; state->RefFrameHeight[i] = state->height; } } state->frame_state.seen_frame_header = GF_FALSE; //Otherwise (show_existing_frame is equal to 1), if frame_type is equal to KEY_FRAME, the reference frame loading process as specified in section 7.21 is invoked if ((state->frame_state.show_existing_frame) && (state->frame_state.frame_type == AV1_KEY_FRAME)) { state->frame_state.order_hint = state->RefOrderHint[state->frame_state.frame_to_show_map_idx]; //OrderHints[ j + LAST_FRAME ] is set equal to SavedOrderHints[state->frame_to_show_map_idx ][ j + LAST_FRAME ] for j = 0..REFS_PER_FRAME-1. //gm_params[ ref ][ j ] is set equal to SavedGmParams[ frame_to_show_map_idx ][ ref ][ j ] for ref = LAST_FRAME..ALTREF_FRAME, for j = 0..5. state->GmParams = state->SavedGmParams[state->frame_state.frame_to_show_map_idx]; } } static s32 find_latest_forward(u32 curFrameHint, u8 *shiftedOrderHints, u8 *usedFrame) { u32 i; s32 ref = -1; s32 latestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint < curFrameHint) && (ref < 0 || hint >= latestOrderHint)) { ref = i; latestOrderHint = hint; } } return ref; } //see 7.8 of AV1 spec static void av1_set_frame_refs(AV1State *state, u8 last_frame_idx, u8 gold_frame_idx, s8 *ref_frame_idx) { u32 i; u8 usedFrame[AV1_NUM_REF_FRAMES]; u8 shiftedOrderHints[AV1_NUM_REF_FRAMES]; for (i = 0; i < AV1_REFS_PER_FRAME; i++) ref_frame_idx[i] = -1; ref_frame_idx[AV1_LAST_FRAME - AV1_LAST_FRAME] = last_frame_idx; ref_frame_idx[AV1_GOLDEN_FRAME - AV1_LAST_FRAME] = gold_frame_idx; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { usedFrame[i] = 0; } usedFrame[last_frame_idx] = 1; usedFrame[gold_frame_idx] = 1; u32 curFrameHint = 1 << (state->OrderHintBits - 1); for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { shiftedOrderHints[i] = curFrameHint + av1_get_relative_dist(state->RefOrderHint[i], state->frame_state.order_hint, state); } u8 lastOrderHint = shiftedOrderHints[last_frame_idx]; u8 goldOrderHint = shiftedOrderHints[gold_frame_idx]; //It is a requirement of bitstream conformance that lastOrderHint is strictly less than curFrameHint. if (lastOrderHint >= curFrameHint) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] non conformant bitstream detected while setting up frame refs: lastOrderHint(%d) shall be stricly less than curFrameHint(%d)\n", lastOrderHint, curFrameHint)); } //It is a requirement of bitstream conformance that goldOrderHint is strictly less than curFrameHint. if (goldOrderHint >= curFrameHint) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] non conformant bitstream detected while setting up frame refs: goldOrderHint(%d) shall be stricly less than curFrameHint(%d)\n", lastOrderHint, curFrameHint)); } //find_latest_backward() { s32 ref = -1; s32 latestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint >= latestOrderHint)) { ref = i; latestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_ALTREF_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //find_earliest_backward() for BWDREF_FRAME ref = -1; s32 earliestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint < earliestOrderHint)) { ref = i; earliestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_BWDREF_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //find_earliest_backward() for ALTREF2_FRAME ref = -1; earliestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint < earliestOrderHint)) { ref = i; earliestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_ALTREF2_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //The remaining references are set to be forward references in anti-chronological order as follows: const u8 Ref_Frame_List[AV1_REFS_PER_FRAME - 2] = { AV1_LAST2_FRAME, AV1_LAST3_FRAME, AV1_BWDREF_FRAME, AV1_ALTREF2_FRAME, AV1_ALTREF_FRAME }; for (i = 0; i < AV1_REFS_PER_FRAME - 2; i++) { u8 refFrame = Ref_Frame_List[i]; if (ref_frame_idx[refFrame - AV1_LAST_FRAME] < 0) { s32 last_ref = find_latest_forward(curFrameHint, shiftedOrderHints, usedFrame); if (last_ref >= 0) { ref_frame_idx[refFrame - AV1_LAST_FRAME] = last_ref; usedFrame[last_ref] = 1; } } } //Finally, any remaining references are set to the reference frame with smallest output order as follows: ref = -1; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (ref < 0 || hint < earliestOrderHint) { ref = i; earliestOrderHint = hint; } } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { if (ref_frame_idx[i] < 0) { ref_frame_idx[i] = ref; } } } static void av1_parse_uncompressed_header(GF_BitStream *bs, AV1State *state) { Bool error_resilient_mode = GF_FALSE, allow_screen_content_tools = GF_FALSE, force_integer_mv = GF_FALSE; Bool /*use_ref_frame_mvs = GF_FALSE,*/ FrameIsIntra = GF_FALSE, frame_size_override_flag = GF_FALSE; Bool disable_cdf_update = GF_FALSE; u8 showable_frame; u8 primary_ref_frame; u16 idLen = 0; u32 idx; s8 ref_frame_idx[AV1_REFS_PER_FRAME]; AV1StateFrame *frame_state = &state->frame_state; if (state->frame_id_numbers_present_flag) { idLen = (state->additional_frame_id_length_minus_1 + state->delta_frame_id_length_minus_2 + 3); } frame_state->refresh_frame_flags = 0; showable_frame = 0; if (state->reduced_still_picture_header) { frame_state->key_frame = GF_TRUE; FrameIsIntra = GF_TRUE; frame_state->frame_type = AV1_KEY_FRAME; frame_state->show_frame = GF_TRUE; frame_state->show_existing_frame = 0; } else { frame_state->show_existing_frame = gf_bs_read_int_log(bs, 1, "show_existing_frame"); if (frame_state->show_existing_frame == GF_TRUE) { frame_state->frame_to_show_map_idx = gf_bs_read_int_log(bs, 3, "frame_to_show_map_idx"); frame_state->frame_type = state->RefFrameType[frame_state->frame_to_show_map_idx]; if (state->decoder_model_info_present_flag && !state->equal_picture_interval) { gf_bs_read_int_log(bs, state->frame_presentation_time_length, "frame_presentation_time"); } frame_state->refresh_frame_flags = 0; if (state->frame_id_numbers_present_flag) { gf_bs_read_int_log(bs, idLen, "display_frame_id"); } if (frame_state->frame_type == AV1_KEY_FRAME) { frame_state->refresh_frame_flags = AV1_ALL_FRAMES; } /* if (film_grain_params_present) { load_grain_params(frame_to_show_map_idx) }*/ return; } frame_state->frame_type = gf_bs_read_int_log(bs, 2, "frame_type"); FrameIsIntra = (frame_state->frame_type == AV1_INTRA_ONLY_FRAME || frame_state->frame_type == AV1_KEY_FRAME); frame_state->show_frame = gf_bs_read_int_log(bs, 1, "show_frame"); if (frame_state->is_first_frame) { frame_state->key_frame = frame_state->seen_seq_header && frame_state->show_frame && frame_state->frame_type == AV1_KEY_FRAME && frame_state->seen_frame_header; } if (frame_state->show_frame && state->decoder_model_info_present_flag && !state->equal_picture_interval) { gf_bs_read_int_log(bs, state->frame_presentation_time_length, "frame_presentation_time"); } if (frame_state->show_frame) { showable_frame = frame_state->frame_type != AV1_KEY_FRAME; } else { showable_frame = gf_bs_read_int_log(bs, 1, "showable_frame"); } if (frame_state->frame_type == AV1_SWITCH_FRAME || (frame_state->frame_type == AV1_KEY_FRAME && frame_state->show_frame)) error_resilient_mode = GF_TRUE; else error_resilient_mode = gf_bs_read_int_log(bs, 1, "error_resilient_mode"); } if ((frame_state->frame_type == AV1_KEY_FRAME) && frame_state->show_frame) { u32 i; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { state->RefValid[i] = 0; state->RefOrderHint[i] = 0; } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { state->OrderHints[AV1_LAST_FRAME + i] = 0; } } disable_cdf_update = gf_bs_read_int_log(bs, 1, "disable_cdf_update"); if (state->seq_force_screen_content_tools == 2/*SELECT_SCREEN_CONTENT_TOOLS*/) { allow_screen_content_tools = gf_bs_read_int_log(bs, 1, "allow_screen_content_tools"); } else { allow_screen_content_tools = state->seq_force_screen_content_tools; } if (allow_screen_content_tools) { if (state->seq_force_integer_mv == 2/*SELECT_INTEGER_MV*/) { force_integer_mv = gf_bs_read_int_log(bs, 1, "force_integer_mv"); } else { force_integer_mv = state->seq_force_integer_mv; } } else { force_integer_mv = 0; } if (FrameIsIntra) { force_integer_mv = 1; } if (state->frame_id_numbers_present_flag) { gf_bs_read_int_log(bs, idLen, "current_frame_id"); } if (frame_state->frame_type == AV1_SWITCH_FRAME) frame_size_override_flag = GF_TRUE; else if (state->reduced_still_picture_header) frame_size_override_flag = GF_FALSE; else frame_size_override_flag = gf_bs_read_int_log(bs, 1, "frame_size_override_flag"); frame_state->order_hint = gf_bs_read_int_log(bs, state->OrderHintBits, "order_hint"); if (FrameIsIntra || error_resilient_mode) { primary_ref_frame = AV1_PRIMARY_REF_NONE; } else { primary_ref_frame = gf_bs_read_int_log(bs, 3, "primary_ref_frame"); } if (state->decoder_model_info_present_flag) { u8 buffer_removal_time_present_flag = gf_bs_read_int_log(bs, 1, "buffer_removal_time_present_flag"); if (buffer_removal_time_present_flag) { u32 opNum; for (opNum = 0; opNum < state->operating_points_count; opNum++) { if (state->decoder_model_present_for_this_op[opNum]) { u8 opPtIdc = state->operating_point_idc[opNum]; u8 inTemporalLayer = (opPtIdc >> state->temporal_id) & 1; u8 inSpatialLayer = (opPtIdc >> (state->spatial_id + 8)) & 1; if (opPtIdc == 0 || (inTemporalLayer && inSpatialLayer)) { gf_bs_read_int_log_idx(bs, state->buffer_removal_time_length, "buffer_removal_time", opNum); } } } } } if (frame_state->frame_type == AV1_SWITCH_FRAME || (frame_state->frame_type == AV1_KEY_FRAME && frame_state->show_frame)) { frame_state->refresh_frame_flags = AV1_ALL_FRAMES; } else { frame_state->refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); } if (!FrameIsIntra || frame_state->refresh_frame_flags != AV1_ALL_FRAMES) { if (error_resilient_mode && state->enable_order_hint) { u32 i = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { u8 ref_order_hint = gf_bs_read_int_log_idx(bs, state->OrderHintBits, "ref_order_hint", i); if (ref_order_hint != state->RefOrderHint[i]) { state->RefValid[i] = 0; } state->RefOrderHint[i] = ref_order_hint; } } } u8 allow_intrabc = 0; if (frame_state->frame_type == AV1_KEY_FRAME) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); if (allow_screen_content_tools && state->UpscaledWidth == state->width) { allow_intrabc = gf_bs_read_int_log(bs, 1, "allow_intrabc"); } } else { if (frame_state->frame_type == AV1_INTRA_ONLY_FRAME) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); if (allow_screen_content_tools && state->UpscaledWidth == state->width) { allow_intrabc = gf_bs_read_int_log(bs, 1, "allow_intrabc"); } } else { u32 i = 0; Bool frame_refs_short_signaling = GF_FALSE; if (state->enable_order_hint) { frame_refs_short_signaling = gf_bs_read_int_log(bs, 1, "frame_refs_short_signaling"); if (frame_refs_short_signaling) { u8 last_frame_idx = gf_bs_read_int_log(bs, 3, "last_frame_idx"); u8 gold_frame_idx = gf_bs_read_int_log(bs, 3, "gold_frame_idx"); av1_set_frame_refs(state, last_frame_idx, gold_frame_idx, ref_frame_idx); } } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { if (!frame_refs_short_signaling) ref_frame_idx[i] = gf_bs_read_int_log_idx(bs, 3, "ref_frame_idx", i); if (state->frame_id_numbers_present_flag) { u32 n = state->delta_frame_id_length_minus_2 + 2; /*delta_frame_id_minus_1 =*/ gf_bs_read_int_log_idx(bs, n, "delta_frame_id_minus1", i); //DeltaFrameId = delta_frame_id_minus_1 + 1; //expectedFrameId[i] = ((current_frame_id + (1 << idLen) - DeltaFrameId) % (1 << idLen)); } } if (frame_size_override_flag && !error_resilient_mode) { frame_size_with_refs(bs, state, frame_size_override_flag, ref_frame_idx); } else { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); } frame_state->allow_high_precision_mv = 0; if (!force_integer_mv) { frame_state->allow_high_precision_mv = gf_bs_read_int_log(bs, 1, "allow_high_precision_mv"); } read_interpolation_filter(bs); gf_bs_read_int_log(bs, 1, "is_motion_mode_switchable"); if (!(error_resilient_mode || !state->enable_ref_frame_mvs)) { gf_bs_read_int_log(bs, 1, "use_ref_frame_mvs"); } } } if (!FrameIsIntra) { u32 i; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refFrame = AV1_LAST_FRAME + i; u8 ridx = ref_frame_idx[i]; if (ridx >= 0) { u8 hint = state->RefOrderHint[ridx]; state->OrderHints[refFrame] = hint; /* if ( !enable_order_hint ) { RefFrameSignBias[ refFrame ] = 0; } else { RefFrameSignBias[ refFrame ] = get_relative_dist( hint, OrderHint) > 0; } */ } } } if (!(state->reduced_still_picture_header || disable_cdf_update)) gf_bs_read_int_log(bs, 1, "disable_frame_end_update_cdf"); if (primary_ref_frame == AV1_PRIMARY_REF_NONE) { //init_non_coeff_cdfs(); av1_setup_past_independence(state); } else { //load_cdfs(ref_frame_idx[primary_ref_frame]); av1_load_previous(state, primary_ref_frame, ref_frame_idx); } av1_parse_tile_info(bs, state); //quantization_params( ): u8 base_q_idx = gf_bs_read_int_log(bs, 8, "base_q_idx"); s32 DeltaQUDc = 0; s32 DeltaQUAc = 0; s32 DeltaQVDc = 0; s32 DeltaQVAc = 0; s32 DeltaQYDc = av1_delta_q(bs, "DeltaQYDc_coded", "DeltaQYDc"); if (!state->config->monochrome) { u8 diff_uv_delta = 0; if (state->separate_uv_delta_q) diff_uv_delta = gf_bs_read_int_log(bs, 1, "diff_uv_delta"); DeltaQUDc = av1_delta_q(bs, "DeltaQUDc_coded", "DeltaQUDc"); DeltaQUAc = av1_delta_q(bs, "DeltaQUAc_coded", "DeltaQUAc"); if (diff_uv_delta) { DeltaQVDc = av1_delta_q(bs, "DeltaQVDc_coded", "DeltaQVDc"); DeltaQVAc = av1_delta_q(bs, "DeltaQVAc_coded", "DeltaQVAc"); } } if (gf_bs_read_int_log(bs, 1, "using_qmatrix")) { gf_bs_read_int_log(bs, 4, "qm_y"); gf_bs_read_int_log(bs, 4, "qm_u"); if (!state->separate_uv_delta_q) { gf_bs_read_int_log(bs, 4, "qm_v"); } } u8 seg_features_SEG_LVL_ALT_Q_enabled[8] = { 0,0,0,0,0,0,0,0 }; s32 seg_features_SEG_LVL_ALT_Q[8] = { 0,0,0,0,0,0,0,0 }; //segmentation_params( ): u8 segmentation_enabled = gf_bs_read_int_log(bs, 1, "segmentation_enabled"); if (segmentation_enabled) { /*u8 segmentation_temporal_update = 0;*/ u8 segmentation_update_data = 1; if (primary_ref_frame != AV1_PRIMARY_REF_NONE) { u8 segmentation_update_map = gf_bs_read_int_log(bs, 1, "segmentation_update_map"); if (segmentation_update_map == 1) gf_bs_read_int_log(bs, 1, "segmentation_temporal_update"); segmentation_update_data = gf_bs_read_int_log(bs, 1, "segmentation_update_data"); } if (segmentation_update_data == 1) { u32 i, j; for (i = 0; i < 8/*=MAX_SEGMENTS*/; i++) { for (j = 0; j < 8 /*=SEG_LVL_MAX*/; j++) { if (/*feature_enabled = */gf_bs_read_int_log_idx2(bs, 1, "feature_enabled", i, j) == 1) { s32 val; u32 bitsToRead = Segmentation_Feature_Bits[j]; //this is SEG_LVL_ALT_Q if (!j) seg_features_SEG_LVL_ALT_Q_enabled[i] = 1; if (Segmentation_Feature_Signed[j] == 1) { val = gf_bs_read_int_log_idx2(bs, 1 + bitsToRead, "signed_feature_value", i, j); } else { val = gf_bs_read_int_log_idx2(bs, bitsToRead, "feature_value", i, j); } if (!j) seg_features_SEG_LVL_ALT_Q[i] = val; } } } //ignore all init steps } } //delta_q_params(): /*u8 delta_q_res = 0;*/ u8 delta_q_present = 0; if (base_q_idx > 0) { delta_q_present = gf_bs_read_int_log(bs, 1, "delta_q_present"); } if (delta_q_present) { gf_bs_read_int_log(bs, 2, "delta_q_res"); } //delta_lf_params(): u8 delta_lf_present = 0; /*u8 delta_lf_res = 0; u8 delta_lf_multi = 0;*/ if (delta_q_present) { if (!allow_intrabc) { delta_lf_present = gf_bs_read_int_log(bs, 1, "delta_lf_present"); } if (delta_lf_present) { gf_bs_read_int_log(bs, 2, "delta_lf_res"); gf_bs_read_int_log(bs, 1, "delta_lf_multi"); } } //init lossless stuff! u8 CodedLossless = 1; for (idx = 0; idx < 8; idx++) { u8 qindex = av1_get_qindex(GF_TRUE, idx, base_q_idx, delta_q_present, 0/*CurrentQIndex always ignored at this level of parsin*/, segmentation_enabled, seg_features_SEG_LVL_ALT_Q_enabled, seg_features_SEG_LVL_ALT_Q); Bool LosslessArray = (qindex == 0) && (DeltaQYDc == 0) && (DeltaQUAc == 0) && (DeltaQUDc == 0) && (DeltaQVAc == 0) && (DeltaQVDc == 0); if (!LosslessArray) CodedLossless = 0; } Bool AllLossless = CodedLossless && (state->width == state->UpscaledWidth); //loop_filter_params(): if (!CodedLossless && !allow_intrabc) { u8 loop_filter_level_0 = gf_bs_read_int_log(bs, 6, "loop_filter_level_0"); u8 loop_filter_level_1 = gf_bs_read_int_log(bs, 6, "loop_filter_level_1"); if (!state->config->monochrome) { if (loop_filter_level_0 || loop_filter_level_1) { gf_bs_read_int_log(bs, 6, "loop_filter_level_2"); gf_bs_read_int_log(bs, 6, "loop_filter_level_3"); } } gf_bs_read_int_log(bs, 3, "loop_filter_sharpness"); u8 loop_filter_delta_enabled = gf_bs_read_int_log(bs, 1, "loop_filter_delta_enabled"); if (loop_filter_delta_enabled == 1) { u8 loop_filter_delta_update = gf_bs_read_int_log(bs, 1, "loop_filter_delta_update"); if (loop_filter_delta_update) { u32 i; for (i = 0; i < 8/*TOTAL_REFS_PER_FRAME*/; i++) { u8 update_ref_delta = gf_bs_read_int_log_idx(bs, 1, "update_ref_delta", i); if (update_ref_delta == 1) { gf_bs_read_int_log_idx(bs, 1 + 6, "loop_filter_ref_deltas", i); } } for (i = 0; i < 2; i++) { u8 update_mode_delta = gf_bs_read_int_log_idx(bs, 1, "update_mode_delta", i); if (update_mode_delta) { gf_bs_read_int_log_idx(bs, 1 + 6, "loop_filter_mode_deltas", i); } } } } } //cdef_params( ): if (!CodedLossless && !allow_intrabc && state->enable_cdef) { gf_bs_read_int_log(bs, 2, "cdef_damping_minus_3"); u8 cdef_bits = gf_bs_read_int_log(bs, 2, "cdef_bits"); u32 i, num_cd = 1 << cdef_bits; for (i = 0; i < num_cd; i++) { gf_bs_read_int_log_idx(bs, 4, "cdef_y_pri_strength", i); gf_bs_read_int_log_idx(bs, 2, "cdef_y_sec_strength", i); if (!state->config->monochrome) { gf_bs_read_int_log_idx(bs, 4, "cdef_uv_pri_strength", i); gf_bs_read_int_log_idx(bs, 2, "cdef_uv_sec_strength", i); } } } //lr_params( ) : if (!AllLossless && !allow_intrabc && state->enable_restoration) { u32 i, nb_planes = state->config->monochrome ? 1 : 3; u8 UsesLr = 0; u8 usesChromaLr = 0; for (i = 0; i < nb_planes; i++) { u8 lr_type = gf_bs_read_int_log_idx(bs, 2, "lr_type", i); //FrameRestorationType[i] = Remap_Lr_Type[lr_type] if (lr_type != AV1_RESTORE_NONE) { UsesLr = 1; if (i > 0) { usesChromaLr = 1; } } } if (UsesLr) { if (state->use_128x128_superblock) { gf_bs_read_int_log(bs, 1, "lr_unit_shift_minus_1"); } else { u8 lr_unit_shift = gf_bs_read_int_log(bs, 1, "lr_unit_shift"); if (lr_unit_shift) { gf_bs_read_int_log(bs, 1, "lr_unit_extra_shift"); //lr_unit_shift += lr_unit_extra_shift; } } if (state->config->chroma_subsampling_x && state->config->chroma_subsampling_y && usesChromaLr) { gf_bs_read_int_log(bs, 1, "lr_uv_shift"); } } } //read_tx_mode(): if (CodedLossless == 1) { } else { gf_bs_read_int_log(bs, 1, "tx_mode_select"); } //frame_reference_mode( ): u8 reference_select = 0; if (FrameIsIntra) { } else { reference_select = gf_bs_read_int_log(bs, 1, "reference_select"); } //skip_mode_params( ): u8 skipModeAllowed = 0; if (FrameIsIntra || !reference_select || !state->enable_order_hint) { } else { u32 i; s32 forwardIdx = -1; s32 backwardIdx = -1; s32 forwardHint = 0; s32 backwardHint = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refHint = state->RefOrderHint[ref_frame_idx[i]]; if (av1_get_relative_dist(refHint, frame_state->order_hint, state) < 0) { if (forwardIdx < 0 || av1_get_relative_dist(refHint, forwardHint, state) > 0) { forwardIdx = i; forwardHint = refHint; } } else if (av1_get_relative_dist(refHint, frame_state->order_hint, state) > 0) { if (backwardIdx < 0 || av1_get_relative_dist(refHint, backwardHint, state) < 0) { backwardIdx = i; backwardHint = refHint; } } } if (forwardIdx < 0) { skipModeAllowed = 0; } else if (backwardIdx >= 0) { skipModeAllowed = 1; //SkipModeFrame[0] = AV1_LAST_FRAME + MIN(forwardIdx, backwardIdx); //SkipModeFrame[1] = AV1_LAST_FRAME + MAX(forwardIdx, backwardIdx); } else { s32 secondForwardIdx = -1; s32 secondForwardHint = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refHint = state->RefOrderHint[ref_frame_idx[i]]; if (av1_get_relative_dist(refHint, forwardHint, state) < 0) { if (secondForwardIdx < 0 || av1_get_relative_dist(refHint, secondForwardHint, state) > 0) { secondForwardIdx = i; secondForwardHint = refHint; } } } if (secondForwardIdx < 0) { skipModeAllowed = 0; } else { skipModeAllowed = 1; //SkipModeFrame[ 0 ] = LAST_FRAME + Min(forwardIdx, secondForwardIdx) //SkipModeFrame[ 1 ] = LAST_FRAME + Max(forwardIdx, secondForwardIdx) } } } if (skipModeAllowed) { gf_bs_read_int_log(bs, 1, "skip_mode_present"); } if (FrameIsIntra || error_resilient_mode || !state->enable_warped_motion) { } else { gf_bs_read_int_log(bs, 1, "allow_warped_motion"); } gf_bs_read_int_log(bs, 1, "reduced_tx"); //global_motion_params( ) u32 ref; for (ref = AV1_LAST_FRAME; ref <= AV1_ALTREF_FRAME; ref++) { u32 i; for (i = 0; i < 6; i++) { state->GmParams.coefs[ref][i] = ((i % 3 == 2) ? 1 << WARPEDMODEL_PREC_BITS : 0); } } if (!FrameIsIntra) { u32 refs; for (refs = AV1_LAST_FRAME; refs <= AV1_ALTREF_FRAME; refs++) { u8 type = AV1_GMC_IDENTITY; Bool is_global = gf_bs_read_int_log_idx(bs, 1, "is_global", refs); if (is_global) { Bool is_rot_zoom = gf_bs_read_int_log_idx(bs, 1, "is_rot_zoom", refs); if (is_rot_zoom) { type = AV1_GMC_ROTZOOM; } else { Bool is_trans = gf_bs_read_int_log_idx(bs, 1, "is_translation", refs); type = is_trans ? AV1_GMC_TRANSLATION : AV1_GMC_AFFINE; } } if (type >= AV1_GMC_ROTZOOM) { av1_read_global_param(state, bs, type, refs, 2); av1_read_global_param(state, bs, type, refs, 3); if (type == AV1_GMC_AFFINE) { av1_read_global_param(state, bs, type, refs, 4); av1_read_global_param(state, bs, type, refs, 5); } else { state->GmParams.coefs[refs][4] = -state->GmParams.coefs[refs][3]; state->GmParams.coefs[refs][5] = state->GmParams.coefs[refs][2]; } } if (type >= AV1_GMC_TRANSLATION) { av1_read_global_param(state, bs, type, refs, 0); av1_read_global_param(state, bs, type, refs, 1); } } } //film_grain_params() if (!state->film_grain_params_present || (!state->frame_state.show_frame && !showable_frame)) { } else { u8 apply_grain = gf_bs_read_int_log(bs, 1, "apply_grain"); if (apply_grain) { gf_bs_read_int_log(bs, 16, "grain_seed"); u8 update_grain = 1; if (state->frame_state.frame_type == AV1_INTER_FRAME) { update_grain = gf_bs_read_int_log(bs, 1, "update_grain"); } if (!update_grain) { gf_bs_read_int_log(bs, 3, "film_grain_params_ref_idx"); } else { u32 i, num_y_points = gf_bs_read_int_log(bs, 4, "num_y_points"); for (i = 0; i < num_y_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_y_value", i); gf_bs_read_int_log_idx(bs, 8, "point_y_scaling", i); } u8 chroma_scaling_from_luma = 0; if (!state->config->monochrome) chroma_scaling_from_luma = gf_bs_read_int_log(bs, 1, "chroma_scaling_from_luma"); u8 num_cb_points = 0; u8 num_cr_points = 0; if (state->config->monochrome || chroma_scaling_from_luma || ((state->config->chroma_subsampling_x == 1) && (state->config->chroma_subsampling_y == 1) && (num_y_points == 0)) ) { } else { num_cb_points = gf_bs_read_int_log(bs, 4, "num_cb_points"); for (i = 0; i < num_cb_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_cb_value", i); gf_bs_read_int_log_idx(bs, 8, "point_cb_scaling", i); } num_cr_points = gf_bs_read_int_log(bs, 4, "num_cr_points"); for (i = 0; i < num_cr_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_cr_value", i); gf_bs_read_int_log_idx(bs, 8, "point_cr_scaling", i); } } gf_bs_read_int_log(bs, 2, "grain_scaling_minus_8"); u8 ar_coeff_lag = gf_bs_read_int_log(bs, 2, "ar_coeff_lag"); u16 numPosLuma = 2 * ar_coeff_lag * (ar_coeff_lag + 1); u16 numPosChroma = numPosLuma; if (num_y_points) { numPosChroma = numPosLuma + 1; for (i = 0; i < numPosLuma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_y_plus_128", i); } } if (chroma_scaling_from_luma || num_cb_points) { for (i = 0; i < numPosChroma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_cb_plus_128", i); } } if (chroma_scaling_from_luma || num_cr_points) { for (i = 0; i < numPosChroma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_cr_plus_128", i); } } gf_bs_read_int_log(bs, 2, "ar_coeff_shift_minus_6"); gf_bs_read_int_log(bs, 2, "grain_scale_shift"); if (num_cb_points) { gf_bs_read_int_log(bs, 8, "cb_mult"); gf_bs_read_int_log(bs, 8, "cb_luma_mult"); gf_bs_read_int_log(bs, 9, "cb_offset"); } if (num_cr_points) { gf_bs_read_int_log(bs, 8, "cr_mult"); gf_bs_read_int_log(bs, 8, "cr_luma_mult"); gf_bs_read_int_log(bs, 9, "cr_offset"); } gf_bs_read_int_log(bs, 1, "overlap_flag"); gf_bs_read_int_log(bs, 1, "clip_to_restricted_range"); } } } //end of uncompressed header !! } GF_EXPORT void gf_av1_init_state(AV1State *state) { if (!state) return; memset(state, 0, sizeof(AV1State)); state->color_primaries = 2; state->transfer_characteristics = 2; state->matrix_coefficients = 2; } GF_EXPORT void gf_av1_reset_state(AV1State *state, Bool is_destroy) { GF_List *l1, *l2; if (state->frame_state.header_obus) { while (gf_list_count(state->frame_state.header_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.header_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } if (state->frame_state.frame_obus) { while (gf_list_count(state->frame_state.frame_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.frame_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } l1 = state->frame_state.frame_obus; l2 = state->frame_state.header_obus; memset(&state->frame_state, 0, sizeof(AV1StateFrame)); state->frame_state.is_first_frame = GF_TRUE; if (is_destroy) { gf_list_del(l1); gf_list_del(l2); if (state->bs) { if (gf_bs_get_position(state->bs)) { u32 size; gf_bs_get_content_no_truncate(state->bs, &state->frame_obus, &size, &state->frame_obus_alloc); } gf_bs_del(state->bs); } state->bs = NULL; } else { state->frame_state.frame_obus = l1; state->frame_state.header_obus = l2; if (state->bs) gf_bs_seek(state->bs, 0); } } static GF_Err av1_parse_tile_group(GF_BitStream *bs, AV1State *state, u64 obu_start, u64 obu_size) { u32 TileNum, tg_start = 0, tg_end = 0; Bool numTiles = state->tileCols * state->tileRows; Bool tile_start_and_end_present_flag = GF_FALSE; GF_Err e = GF_OK; if (numTiles > 1) tile_start_and_end_present_flag = gf_bs_read_int_log(bs, 1, "tile_start_and_end_present_flag"); if (numTiles == 1 || !tile_start_and_end_present_flag) { tg_start = 0; tg_end = numTiles - 1; /*state->frame_state.tg[0].start_idx = 0; state->frame_state.tg[0].end_idx = numTiles - 1;*/ } else { u32 tileBits = state->tileColsLog2 + state->tileRowsLog2; /*state->frame_state.tg[state->frame_state.tg_idx].start_idx*/ tg_start = gf_bs_read_int_log(bs, tileBits, "tg_start"); /*state->frame_state.tg[state->frame_state.tg_idx].end_idx*/ tg_end = gf_bs_read_int_log(bs, tileBits, "tg_end"); } /*state->frame_state.tg_idx++;*/ gf_bs_align(bs); if (tg_end >= GF_ARRAY_LENGTH(state->frame_state.tiles)) return GF_NON_COMPLIANT_BITSTREAM; state->frame_state.nb_tiles_in_obu = 0; for (TileNum = tg_start; TileNum <= tg_end; TileNum++) { u32 tile_start_offset, tile_size; /*u32 tileRow = TileNum / state->tileCols; u32 tileCol = TileNum % state->tileCols;*/ Bool lastTile = TileNum == tg_end; u64 pos = gf_bs_get_position(bs); if (lastTile) { tile_start_offset = (u32)(pos - obu_start); tile_size = (u32)(obu_size - (pos - obu_start)); } else { u64 tile_size_minus_1 = aom_av1_le(bs, state->tile_size_bytes, "tile_size_minus_1"); pos = gf_bs_get_position(bs); tile_start_offset = (u32)(pos - obu_start); tile_size = (u32)(tile_size_minus_1 + 1/* + state->tile_size_bytes*/); } if (tile_start_offset + tile_size > obu_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Error parsing tile group, tile %d start %d + size %d exceeds OBU length %d\n", TileNum, tile_start_offset, tile_size, obu_size)); e = GF_NON_COMPLIANT_BITSTREAM; break; } state->frame_state.tiles[state->frame_state.nb_tiles_in_obu].obu_start_offset = tile_start_offset; state->frame_state.tiles[state->frame_state.nb_tiles_in_obu].size = tile_size; gf_bs_skip_bytes(bs, tile_size); state->frame_state.nb_tiles_in_obu++; } if (tg_end == numTiles - 1) { av1_decode_frame_wrapup(state); } return e; } static void av1_parse_frame_header(GF_BitStream *bs, AV1State *state) { AV1StateFrame *frame_state = &state->frame_state; if (frame_state->seen_frame_header == GF_FALSE) { u64 pos = gf_bs_get_position(bs); state->frame_state.show_existing_frame = GF_FALSE; frame_state->seen_frame_header = GF_TRUE; av1_parse_uncompressed_header(bs, state); state->frame_state.is_first_frame = GF_FALSE; state->frame_state.uncompressed_header_bytes = (u32) (gf_bs_get_position(bs) - pos); if (state->frame_state.show_existing_frame) { av1_decode_frame_wrapup(state); frame_state->seen_frame_header = GF_FALSE; } else { //TileNum = 0; frame_state->seen_frame_header = GF_TRUE; } } } static GF_Err av1_parse_frame(GF_BitStream *bs, AV1State *state, u64 obu_start, u64 obu_size) { av1_parse_frame_header(bs, state); //byte alignment gf_bs_align(bs); return av1_parse_tile_group(bs, state, obu_start, obu_size); } static void on_aom_av1_eos(void *_state) { AV1State *state = (AV1State *)_state; state->bs_overread = GF_TRUE; } GF_EXPORT GF_Err gf_av1_parse_obu(GF_BitStream *bs, ObuType *obu_type, u64 *obu_size, u32 *obu_hdr_size, AV1State *state) { GF_Err e = GF_OK; u32 hdr_size; u64 pos = gf_bs_get_position(bs); if (!bs || !obu_type || !state) return GF_BAD_PARAM; state->bs_overread = GF_FALSE; gf_bs_set_eos_callback(bs, on_aom_av1_eos, state); state->obu_extension_flag = state->obu_has_size_field = 0; state->temporal_id = state->spatial_id = 0; state->frame_state.uncompressed_header_bytes = 0; e = gf_av1_parse_obu_header(bs, obu_type, &state->obu_extension_flag, &state->obu_has_size_field, &state->temporal_id, &state->spatial_id); if (e) return e; if (state->obu_has_size_field) { *obu_size = (u32)gf_av1_leb128_read(bs, NULL); } else { if (*obu_size >= 1 + state->obu_extension_flag) { *obu_size = *obu_size - 1 - state->obu_extension_flag; } else { GF_LOG(state->config ? GF_LOG_WARNING : GF_LOG_DEBUG, GF_LOG_CODING, ("[AV1] computed OBU size "LLD" (input value = "LLU"). Skipping.\n", *obu_size - 1 - state->obu_extension_flag, *obu_size)); return GF_NON_COMPLIANT_BITSTREAM; } } hdr_size = (u32)(gf_bs_get_position(bs) - pos); if ((gf_bs_available(bs) < *obu_size) || state->bs_overread) { gf_bs_seek(bs, pos); return GF_BUFFER_TOO_SMALL; } *obu_size += hdr_size; if (obu_hdr_size) *obu_hdr_size = hdr_size; if (*obu_type != OBU_SEQUENCE_HEADER && *obu_type != OBU_TEMPORAL_DELIMITER && state->OperatingPointIdc != 0 && state->obu_extension_flag == 1) { u32 inTemporalLayer = (state->OperatingPointIdc >> state->temporal_id) & 1; u32 inSpatialLayer = (state->OperatingPointIdc >> (state->spatial_id + 8)) & 1; if (!inTemporalLayer || !inSpatialLayer) { *obu_type = -1; gf_bs_seek(bs, pos + *obu_size); return GF_OK; } } e = GF_OK; switch (*obu_type) { case OBU_SEQUENCE_HEADER: av1_parse_sequence_header_obu(bs, state); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Sequence header parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_METADATA: #if 0 //TODO + sample groups const ObuMetadataType metadata_type = (u32)read_leb128(bs, NULL); we should check for 16 bits limit(AV1MetadataSampleGroupEntry) for ISOBMFF bindings, see https ://github.com/AOMediaCodec/av1-isobmff/pull/86#issuecomment-416659538 if (metadata_type == OBU_METADATA_TYPE_ITUT_T35) { } else if (metadata_type == OBU_METADATA_TYPE_HDR_CLL) { } else if (metadata_type == OBU_METADATA_TYPE_HDR_MDCV) { } else if (metadata_type == OBU_METADATA_TYPE_SCALABILITY) { } else if (metadata_type == METADATA_TYPE_TIMECODE) { } #endif GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[AV1] parsing for metadata is not implemented. Forwarding.\n")); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Metadata parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_FRAME_HEADER: case OBU_REDUNDANT_FRAME_HEADER: if (state->config) { av1_parse_frame_header(bs, state); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Frame header parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } } gf_bs_seek(bs, pos + *obu_size); break; case OBU_FRAME: e = av1_parse_frame(bs, state, pos, *obu_size); if (gf_bs_get_position(bs) != pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Frame parsing did not consume the right number of bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_TILE_GROUP: if (state->config) { e = av1_parse_tile_group(bs, state, pos, *obu_size); if (gf_bs_get_position(bs) != pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Tile group parsing did not consume the right number of bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } } gf_bs_seek(bs, pos + *obu_size); break; case OBU_TEMPORAL_DELIMITER: state->frame_state.seen_frame_header = GF_FALSE; case OBU_PADDING: gf_bs_seek(bs, pos + *obu_size); break; default: GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] unknown OBU type %u (size "LLU"). Skipping.\n", *obu_type, *obu_size)); gf_bs_seek(bs, pos + *obu_size); break; } return e; } GF_EXPORT GF_Err gf_media_prores_parse_bs(GF_BitStream *bs, GF_ProResFrameInfo *prores_frame) { u32 i, j; u64 start, pos; memset(prores_frame, 0, sizeof(GF_ProResFrameInfo)); start = gf_bs_get_position(bs); if (gf_bs_available(bs) < 10) return GF_BUFFER_TOO_SMALL; prores_frame->frame_size = gf_bs_read_u32(bs); prores_frame->frame_identifier = gf_bs_read_u32(bs); if (prores_frame->frame_identifier != GF_4CC('i','c','p','f')) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[ProRes] Invalid frame identifier, expected \"icpf\" got \"%s\"\n", gf_4cc_to_str(prores_frame->frame_identifier) )); gf_bs_seek(bs, start); return GF_NON_COMPLIANT_BITSTREAM; } /*parse frame header*/ pos = gf_bs_get_position(bs); prores_frame->frame_hdr_size = gf_bs_read_u16(bs); if (gf_bs_available(bs) + 2 < prores_frame->frame_hdr_size) { gf_bs_seek(bs, start); return GF_BUFFER_TOO_SMALL; } gf_bs_read_u8(bs); prores_frame->version = gf_bs_read_u8(bs); prores_frame->encoder_id = gf_bs_read_u32(bs); prores_frame->width = gf_bs_read_u16(bs); prores_frame->height = gf_bs_read_u16(bs); prores_frame->chroma_format = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 2); prores_frame->interlaced_mode = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 2); prores_frame->aspect_ratio_information = gf_bs_read_int(bs, 4); prores_frame->framerate_code = gf_bs_read_int(bs, 4); prores_frame->color_primaries = gf_bs_read_u8(bs); prores_frame->transfer_characteristics = gf_bs_read_u8(bs); prores_frame->matrix_coefficients = gf_bs_read_u8(bs); gf_bs_read_int(bs, 4); prores_frame->alpha_channel_type = gf_bs_read_int(bs, 4); gf_bs_read_int(bs, 14); prores_frame->load_luma_quant_matrix = gf_bs_read_int(bs, 1); prores_frame->load_chroma_quant_matrix = gf_bs_read_int(bs, 1); if (prores_frame->load_luma_quant_matrix) { for (i=0; i<8; i++) { for (j=0; j<8; j++) { prores_frame->luma_quant_matrix[i][j] = gf_bs_read_u8(bs); } } } if (prores_frame->load_chroma_quant_matrix) { for (i=0; i<8; i++) { for (j=0; j<8; j++) { prores_frame->chroma_quant_matrix[i][j] = gf_bs_read_u8(bs); } } } pos = gf_bs_get_position(bs) - pos; if (pos != prores_frame->frame_hdr_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[ProRes] Invalid frame header size, expected %d got %d\n", prores_frame->frame_hdr_size, (u32) pos)); gf_bs_seek(bs, start); return GF_NON_COMPLIANT_BITSTREAM; } prores_frame->nb_pic = ((prores_frame->interlaced_mode==1) || (prores_frame->interlaced_mode==2)) ? 2 : 1; gf_bs_seek(bs, start); return GF_OK; } #endif /*GPAC_DISABLE_AV_PARSERS*/ GF_EXPORT u8 gf_mp3_version(u32 hdr) { return ((hdr >> 19) & 0x3); } GF_EXPORT const char *gf_mp3_version_name(u32 hdr) { u32 v = gf_mp3_version(hdr); switch (v) { case 0: return "MPEG-2.5"; case 1: return "Reserved"; case 2: return "MPEG-2"; case 3: return "MPEG-1"; default: return "Unknown"; } } #ifndef GPAC_DISABLE_AV_PARSERS GF_EXPORT u8 gf_mp3_layer(u32 hdr) { return 4 - (((hdr >> 17) & 0x3)); } GF_EXPORT u8 gf_mp3_num_channels(u32 hdr) { if (((hdr >> 6) & 0x3) == 3) return 1; return 2; } GF_EXPORT u16 gf_mp3_sampling_rate(u32 hdr) { u16 res; /* extract the necessary fields from the MP3 header */ u8 version = gf_mp3_version(hdr); u8 sampleRateIndex = (hdr >> 10) & 0x3; switch (sampleRateIndex) { case 0: res = 44100; break; case 1: res = 48000; break; case 2: res = 32000; break; default: GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] Samplerate index not valid\n")); return 0; } /*reserved or MPEG-1*/ if (version & 1) return res; /*MPEG-2*/ res /= 2; /*MPEG-2.5*/ if (version == 0) res /= 2; return res; } GF_EXPORT u16 gf_mp3_window_size(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); if (layer == 3) { if (version == 3) return 1152; return 576; } if (layer == 2) return 1152; return 384; } GF_EXPORT u8 gf_mp3_object_type_indication(u32 hdr) { switch (gf_mp3_version(hdr)) { case 3: return GF_CODECID_MPEG_AUDIO; case 2: case 0: return GF_CODECID_MPEG2_PART3; default: return 0x00; } } /*aligned bitrate parsing with libMAD*/ static u32 const bitrate_table[5][15] = { /* MPEG-1 */ { 0, 32000, 64000, 96000, 128000, 160000, 192000, 224000, /* Layer I */ 256000, 288000, 320000, 352000, 384000, 416000, 448000 }, { 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer II */ 128000, 160000, 192000, 224000, 256000, 320000, 384000 }, { 0, 32000, 40000, 48000, 56000, 64000, 80000, 96000, /* Layer III */ 112000, 128000, 160000, 192000, 224000, 256000, 320000 }, /* MPEG-2 LSF */ { 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer I */ 128000, 144000, 160000, 176000, 192000, 224000, 256000 }, { 0, 8000, 16000, 24000, 32000, 40000, 48000, 56000, /* Layers */ 64000, 80000, 96000, 112000, 128000, 144000, 160000 } /* II & III */ }; u32 gf_mp3_bit_rate(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); u8 bitRateIndex = (hdr >> 12) & 0xF; u32 lidx; /*MPEG-1*/ if (version & 1) { if (!layer) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] layer index not valid\n")); return 0; } lidx = layer - 1; } /*MPEG-2/2.5*/ else { lidx = 3 + (layer >> 1); } if (lidx>4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] layer index not valid\n")); return 0; } return bitrate_table[lidx][bitRateIndex]; } GF_EXPORT u16 gf_mp3_frame_size(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); u32 pad = ((hdr >> 9) & 0x1) ? 1 : 0; u32 bitrate = gf_mp3_bit_rate(hdr); u32 samplerate = gf_mp3_sampling_rate(hdr); u32 frameSize = 0; if (!samplerate || !bitrate) return 0; if (layer == 1) { frameSize = ((12 * bitrate / samplerate) + pad) * 4; } else { u32 slots_per_frame = 144; if ((layer == 3) && !(version & 1)) slots_per_frame = 72; frameSize = (slots_per_frame * bitrate / samplerate) + pad; } return (u16)frameSize; } GF_EXPORT u32 gf_mp3_get_next_header(FILE* in) { u8 b, state = 0; u32 dropped = 0; unsigned char bytes[4]; bytes[0] = bytes[1] = bytes[2] = bytes[3] = 0; while (1) { if (gf_fread(&b, 1, in) == 0) return 0; if (state == 3) { bytes[state] = b; return GF_4CC((u32)bytes[0], bytes[1], bytes[2], bytes[3]); } if (state == 2) { if (((b & 0xF0) == 0) || ((b & 0xF0) == 0xF0) || ((b & 0x0C) == 0x0C)) { if (bytes[1] == 0xFF) state = 1; else state = 0; } else { bytes[state] = b; state = 3; } } if (state == 1) { if (((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[state] = b; state = 2; } else { state = 0; } } if (state == 0) { if (b == 0xFF) { bytes[state] = b; state = 1; } else { if ((dropped == 0) && ((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[0] = (u8)0xFF; bytes[1] = b; state = 2; } else { dropped++; } } } } return 0; } GF_EXPORT u32 gf_mp3_get_next_header_mem(const u8 *buffer, u32 size, u32 *pos) { u32 cur; u8 b, state = 0; u32 dropped = 0; unsigned char bytes[4]; bytes[0] = bytes[1] = bytes[2] = bytes[3] = 0; cur = 0; *pos = 0; while (cur < size) { b = (u8)buffer[cur]; cur++; if (state == 3) { u32 val; bytes[state] = b; val = GF_4CC((u32)bytes[0], bytes[1], bytes[2], bytes[3]); if (gf_mp3_frame_size(val)) { *pos = dropped; return val; } state = 0; dropped = cur; } if (state == 2) { if (((b & 0xF0) == 0) || ((b & 0xF0) == 0xF0) || ((b & 0x0C) == 0x0C)) { if (bytes[1] == 0xFF) { state = 1; dropped += 1; } else { state = 0; dropped = cur; } } else { bytes[state] = b; state = 3; } } if (state == 1) { if (((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[state] = b; state = 2; } else { state = 0; dropped = cur; } } if (state == 0) { if (b == 0xFF) { bytes[state] = b; state = 1; } else { dropped++; } } } return 0; } #endif /*GPAC_DISABLE_AV_PARSERS*/ GF_EXPORT Bool gf_avc_is_rext_profile(u8 profile_idc) { switch (profile_idc) { case 100: case 110: case 122: case 244: case 44: case 83: case 86: case 118: case 128: case 138: case 139: case 134: case 135: return GF_TRUE; default: return GF_FALSE; } } GF_EXPORT const char *gf_avc_get_profile_name(u8 video_prof) { switch (video_prof) { case 0x42: return "Baseline"; case 0x4D: return "Main"; case 0x53: return "Scalable Baseline"; case 0x56: return "Scalable High"; case 0x58: return "Extended"; case 0x64: return "High"; case 0x6E: return "High 10"; case 0x7A: return "High 4:2:2"; case 0x90: case 0xF4: return "High 4:4:4"; default: return "Unknown"; } } GF_EXPORT const char *gf_hevc_get_profile_name(u8 video_prof) { switch (video_prof) { case 0x01: return "Main"; case 0x02: return "Main 10"; case 0x03: return "Main Still Picture"; default: return "Unknown"; } } GF_EXPORT const char *gf_avc_hevc_get_chroma_format_name(u8 chroma_format) { switch (chroma_format) { case 1: return "YUV 4:2:0"; case 2: return "YUV 4:2:2"; case 3: return "YUV 4:4:4"; default: return "Unknown"; } } #ifndef GPAC_DISABLE_AV_PARSERS u32 gf_bs_read_ue_log_idx3(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2, s32 idx3) { u32 val=0, code; s32 nb_lead = -1; u32 bits = 0; for (code=0; !code; nb_lead++) { if (nb_lead>=32) { //gf_bs_read_int keeps returning 0 on EOS, so if no more bits available, rbsp was truncated otherwise code is broken in rbsp) //we only test once nb_lead>=32 to avoid testing at each bit read if (!gf_bs_available(bs)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] exp-golomb read failed, not enough bits in bitstream !\n")); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] corrupted exp-golomb code, %d leading zeros, max 31 allowed !\n", nb_lead)); } return 0; } code = gf_bs_read_int(bs, 1); bits++; } if (nb_lead) { val = gf_bs_read_int(bs, nb_lead); val += (1 << nb_lead) - 1; bits += nb_lead; } if (fname) { gf_bs_log_idx(bs, bits, fname, val, idx1, idx2, idx3); } return val; } #define gf_bs_read_ue_log_idx2(_bs, _fname, _idx1, _idx2) gf_bs_read_ue_log_idx3(_bs, _fname, (s32) _idx1, (s32) _idx2, -1) #define gf_bs_read_ue_log_idx(_bs, _fname, _idx) gf_bs_read_ue_log_idx3(_bs, _fname, (s32) _idx, -1, -1) #define gf_bs_read_ue_log(_bs, _fname) gf_bs_read_ue_log_idx3(_bs, _fname, -1, -1, -1) u32 gf_bs_read_ue(GF_BitStream *bs) { return gf_bs_read_ue_log(bs, NULL); } s32 gf_bs_read_se(GF_BitStream *bs) { u32 v = gf_bs_read_ue(bs); if ((v & 0x1) == 0) return (s32)(0 - (v >> 1)); return (v + 1) >> 1; } s32 gf_bs_read_se_log_idx2(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2) { s32 res = gf_bs_read_se(bs); if (fname) gf_bs_log_idx(bs, -1, fname, res, idx1, idx2, -1); return res; } #define gf_bs_read_se_log_idx(_bs, _fname, _idx) gf_bs_read_se_log_idx2(_bs, _fname, (s32) _idx, -1) #define gf_bs_read_se_log(_bs, _fname) gf_bs_read_se_log_idx2(_bs, _fname, -1, -1) void gf_bs_write_ue(GF_BitStream *bs, u32 num) { s32 length = 1; s32 temp = ++num; while (temp != 1) { temp >>= 1; length += 2; } gf_bs_write_int(bs, 0, length >> 1); gf_bs_write_int(bs, num, (length + 1) >> 1); } void gf_bs_write_se(GF_BitStream *bs, s32 num) { u32 v; if (num <= 0) v = (-1 * num) << 1; else v = (num << 1) - 1; gf_bs_write_ue(bs, v); } u32 gf_media_nalu_is_start_code(GF_BitStream *bs) { u8 s1, s2, s3, s4; Bool is_sc = 0; u64 pos = gf_bs_get_position(bs); s1 = gf_bs_read_int(bs, 8); s2 = gf_bs_read_int(bs, 8); if (!s1 && !s2) { s3 = gf_bs_read_int(bs, 8); if (s3 == 0x01) is_sc = 3; else if (!s3) { s4 = gf_bs_read_int(bs, 8); if (s4 == 0x01) is_sc = 4; } } gf_bs_seek(bs, pos + is_sc); return is_sc; } /*read that amount of data at each IO access rather than fetching byte by byte...*/ #define AVC_CACHE_SIZE 4096 static u32 gf_media_nalu_locate_start_code_bs(GF_BitStream *bs, Bool locate_trailing) { u32 v, bpos, nb_cons_zeros = 0; char avc_cache[AVC_CACHE_SIZE]; u64 end, cache_start, load_size; u64 start = gf_bs_get_position(bs); if (start < 3) return 0; load_size = 0; bpos = 0; cache_start = 0; end = 0; v = 0xffffffff; while (!end) { /*refill cache*/ if (bpos == (u32)load_size) { if (!gf_bs_available(bs)) break; load_size = gf_bs_available(bs); if (load_size > AVC_CACHE_SIZE) load_size = AVC_CACHE_SIZE; bpos = 0; cache_start = gf_bs_get_position(bs); gf_bs_read_data(bs, avc_cache, (u32)load_size); } v = ( (v<<8) & 0xFFFFFF00) | ((u32) avc_cache[bpos]); bpos++; if (locate_trailing) { if ((v & 0x000000FF) == 0) nb_cons_zeros++; else nb_cons_zeros = 0; } if (v == 0x00000001) end = cache_start + bpos - 4; else if ((v & 0x00FFFFFF) == 0x00000001) end = cache_start + bpos - 3; } gf_bs_seek(bs, start); if (!end) end = gf_bs_get_size(bs); if (locate_trailing) { if (nb_cons_zeros >= 3) return (u32)(end - start - nb_cons_zeros); } return (u32)(end - start); } GF_EXPORT u32 gf_media_nalu_next_start_code_bs(GF_BitStream *bs) { return gf_media_nalu_locate_start_code_bs(bs, 0); } GF_EXPORT u32 gf_media_nalu_next_start_code(const u8 *data, u32 data_len, u32 *sc_size) { u32 avail = data_len; const u8 *cur = data; while (cur) { u32 v, bpos; u8 *next_zero = memchr(cur, 0, avail); if (!next_zero) return data_len; v = 0xffffff00; bpos = (u32)(next_zero - data) + 1; while (1) { u8 cval; if (bpos == (u32)data_len) return data_len; cval = data[bpos]; v = ((v << 8) & 0xFFFFFF00) | ((u32)cval); bpos++; if (v == 0x00000001) { *sc_size = 4; return bpos - 4; } else if ((v & 0x00FFFFFF) == 0x00000001) { *sc_size = 3; return bpos - 3; } if (cval) break; } if (bpos >= data_len) break; cur = data + bpos; avail = data_len - bpos; } return data_len; } Bool gf_media_avc_slice_is_intra(AVCState *avc) { switch (avc->s_info.slice_type) { case GF_AVC_TYPE_I: case GF_AVC_TYPE2_I: case GF_AVC_TYPE_SI: case GF_AVC_TYPE2_SI: return 1; default: return 0; } } #if 0 //unused Bool gf_media_avc_slice_is_IDR(AVCState *avc) { if (avc->sei.recovery_point.valid) { avc->sei.recovery_point.valid = 0; return 1; } if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) return 0; return gf_media_avc_slice_is_intra(avc); } #endif static const struct { u32 w, h; } avc_hevc_sar[] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, { 160,99 }, { 4, 3 }, { 3, 2 }, { 2, 1 } }; /*ISO 14496-10 (N11084) E.1.2*/ static void avc_parse_hrd_parameters(GF_BitStream *bs, AVC_HRD *hrd) { int i, cpb_cnt_minus1; cpb_cnt_minus1 = gf_bs_read_ue_log(bs, "cpb_cnt_minus1"); if (cpb_cnt_minus1 > 31) GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] invalid cpb_cnt_minus1 value: %d (expected in [0;31])\n", cpb_cnt_minus1)); gf_bs_read_int_log(bs, 4, "bit_rate_scale"); gf_bs_read_int_log(bs, 4, "cpb_size_scale"); /*for( SchedSelIdx = 0; SchedSelIdx <= cpb_cnt_minus1; SchedSelIdx++ ) {*/ for (i = 0; i <= cpb_cnt_minus1; i++) { gf_bs_read_ue_log_idx(bs, "bit_rate_value_minus1", i); gf_bs_read_ue_log_idx(bs, "cpb_size_value_minus1", i); gf_bs_read_int_log_idx(bs, 1, "cbr_flag", i); } gf_bs_read_int_log(bs, 5, "initial_cpb_removal_delay_length_minus1"); hrd->cpb_removal_delay_length_minus1 = gf_bs_read_int_log(bs, 5, "cpb_removal_delay_length_minus1"); hrd->dpb_output_delay_length_minus1 = gf_bs_read_int_log(bs, 5, "dpb_output_delay_length_minus1"); hrd->time_offset_length = gf_bs_read_int_log(bs, 5, "time_offset_length"); return; } /*returns the nal_size without emulation prevention bytes*/ u32 gf_media_nalu_emulation_bytes_add_count(u8 *buffer, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: \96 0x00000300 \96 0x00000301 \96 0x00000302 \96 0x00000303" */ if (num_zero == 2 && (u8)buffer[i] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; if (!buffer[i]) num_zero = 1; } else { if (!buffer[i]) num_zero++; else num_zero = 0; } i++; } return emulation_bytes_count; } u32 gf_media_nalu_add_emulation_bytes(const u8 *buffer_src, u8 *buffer_dst, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: 0x00000300 0x00000301 0x00000302 0x00000303" */ if (num_zero == 2 && (u8)buffer_src[i] < 0x04) { /*add emulation code*/ num_zero = 0; buffer_dst[i + emulation_bytes_count] = 0x03; emulation_bytes_count++; if (!buffer_src[i]) num_zero = 1; } else { if (!buffer_src[i]) num_zero++; else num_zero = 0; } buffer_dst[i + emulation_bytes_count] = buffer_src[i]; i++; } return nal_size + emulation_bytes_count; } /*returns the nal_size without emulation prevention bytes*/ u32 gf_media_nalu_emulation_bytes_remove_count(const u8 *buffer, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; if (!buffer || !nal_size) return 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: \96 0x00000300 \96 0x00000301 \96 0x00000302 \96 0x00000303" */ if (num_zero == 2 && buffer[i] == 0x03 && i + 1 < nal_size /*next byte is readable*/ && (u8)buffer[i + 1] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; i++; } if (!buffer[i]) num_zero++; else num_zero = 0; i++; } return emulation_bytes_count; } /*nal_size is updated to allow better error detection*/ GF_EXPORT u32 gf_media_nalu_remove_emulation_bytes(const u8 *buffer_src, u8 *buffer_dst, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: 0x00000300 0x00000301 0x00000302 0x00000303" */ if (num_zero == 2 && buffer_src[i] == 0x03 && i + 1 < nal_size /*next byte is readable*/ && (u8)buffer_src[i + 1] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; i++; } buffer_dst[i - emulation_bytes_count] = buffer_src[i]; if (!buffer_src[i]) num_zero++; else num_zero = 0; i++; } return nal_size - emulation_bytes_count; } static s32 gf_avc_read_sps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos, u32 nal_hdr) { AVC_SPS *sps; s32 mb_width, mb_height, sps_id = -1; u32 profile_idc, level_idc, pcomp, i, chroma_format_idc, cl = 0, cr = 0, ct = 0, cb = 0, luma_bd, chroma_bd; u8 separate_colour_plane_flag = 0; if (!vui_flag_pos) { gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); } if (!bs) { return -1; } if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } profile_idc = gf_bs_read_int_log(bs, 8, "profile_idc"); pcomp = gf_bs_read_int_log(bs, 8, "profile_compatibility"); /*sanity checks*/ if (pcomp & 0x3) return -1; level_idc = gf_bs_read_int_log(bs, 8, "level_idc"); /*SubsetSps is used to be sure that AVC SPS are not going to be scratched by subset SPS. According to the SVC standard, subset SPS can have the same sps_id than its base layer, but it does not refer to the same SPS. */ sps_id = gf_bs_read_ue_log(bs, "sps_id") + GF_SVC_SSPS_ID_SHIFT * subseq_sps; if ((sps_id < 0) || (sps_id >= 32)) { return -1; } luma_bd = chroma_bd = 0; sps = &avc->sps[sps_id]; chroma_format_idc = sps->ChromaArrayType = 1; sps->state |= subseq_sps ? AVC_SUBSPS_PARSED : AVC_SPS_PARSED; /*High Profile and SVC*/ switch (profile_idc) { case 100: case 110: case 122: case 244: case 44: /*sanity checks: note1 from 7.4.2.1.1 of iso/iec 14496-10-N11084*/ if (pcomp & 0xE0) return -1; case 83: case 86: case 118: case 128: chroma_format_idc = gf_bs_read_ue_log(bs, "chroma_format_idc"); sps->ChromaArrayType = chroma_format_idc; if (chroma_format_idc == 3) { separate_colour_plane_flag = gf_bs_read_int_log(bs, 1, "separate_colour_plane_flag"); /* Depending on the value of separate_colour_plane_flag, the value of the variable ChromaArrayType is assigned as follows. \96 If separate_colour_plane_flag is equal to 0, ChromaArrayType is set equal to chroma_format_idc. \96 Otherwise (separate_colour_plane_flag is equal to 1), ChromaArrayType is set equal to 0. */ if (separate_colour_plane_flag) sps->ChromaArrayType = 0; } luma_bd = gf_bs_read_ue_log(bs, "luma_bit_depth"); chroma_bd = gf_bs_read_ue_log(bs, "chroma_bit_depth"); /*qpprime_y_zero_transform_bypass_flag = */ gf_bs_read_int_log(bs, 1, "qpprime_y_zero_transform_bypass_flag"); /*seq_scaling_matrix_present_flag*/ if (gf_bs_read_int_log(bs, 1, "seq_scaling_matrix_present_flag")) { u32 k; for (k = 0; k < 8; k++) { if (gf_bs_read_int_log_idx(bs, 1, "seq_scaling_list_present_flag", k)) { u32 z, last = 8, next = 8; u32 sl = k < 6 ? 16 : 64; for (z = 0; z < sl; z++) { if (next) { s32 delta = gf_bs_read_se(bs); next = (last + delta + 256) % 256; } last = next ? next : last; } } } } break; } sps->profile_idc = profile_idc; sps->level_idc = level_idc; sps->prof_compat = pcomp; sps->log2_max_frame_num = gf_bs_read_ue_log(bs, "log2_max_frame_num") + 4; sps->poc_type = gf_bs_read_ue_log(bs, "poc_type"); sps->chroma_format = chroma_format_idc; sps->luma_bit_depth_m8 = luma_bd; sps->chroma_bit_depth_m8 = chroma_bd; if (sps->poc_type == 0) { sps->log2_max_poc_lsb = gf_bs_read_ue_log(bs, "log2_max_poc_lsb") + 4; } else if (sps->poc_type == 1) { sps->delta_pic_order_always_zero_flag = gf_bs_read_int_log(bs, 1, "delta_pic_order_always_zero_flag"); sps->offset_for_non_ref_pic = gf_bs_read_se_log(bs, "offset_for_non_ref_pic"); sps->offset_for_top_to_bottom_field = gf_bs_read_se_log(bs, "offset_for_top_to_bottom_field"); sps->poc_cycle_length = gf_bs_read_ue_log(bs, "poc_cycle_length"); if (sps->poc_cycle_length > GF_ARRAY_LENGTH(sps->offset_for_ref_frame)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] offset_for_ref_frame overflow from poc_cycle_length\n")); return -1; } for (i = 0; i < sps->poc_cycle_length; i++) sps->offset_for_ref_frame[i] = gf_bs_read_se_log_idx(bs, "offset_for_ref_frame", i); } if (sps->poc_type > 2) { return -1; } sps->max_num_ref_frames = gf_bs_read_ue_log(bs, "max_num_ref_frames"); sps->gaps_in_frame_num_value_allowed_flag = gf_bs_read_int_log(bs, 1, "gaps_in_frame_num_value_allowed_flag"); mb_width = gf_bs_read_ue_log(bs, "pic_width_in_mbs_minus1") + 1; mb_height = gf_bs_read_ue_log(bs, "pic_height_in_map_units_minus1") + 1; sps->frame_mbs_only_flag = gf_bs_read_int_log(bs, 1, "frame_mbs_only_flag"); sps->width = mb_width * 16; sps->height = (2 - sps->frame_mbs_only_flag) * mb_height * 16; if (!sps->frame_mbs_only_flag) sps->mb_adaptive_frame_field_flag = gf_bs_read_int_log(bs, 1, "mb_adaptive_frame_field_flag"); gf_bs_read_int_log(bs, 1, "direct_8x8_inference_flag"); if (gf_bs_read_int_log(bs, 1, "frame_cropping_flag")) { int CropUnitX, CropUnitY, SubWidthC = -1, SubHeightC = -1; if (chroma_format_idc == 1) { SubWidthC = 2; SubHeightC = 2; } else if (chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else if ((chroma_format_idc == 3) && (separate_colour_plane_flag == 0)) { SubWidthC = 1; SubHeightC = 1; } if (sps->ChromaArrayType == 0) { assert(SubWidthC == -1); CropUnitX = 1; CropUnitY = 2 - sps->frame_mbs_only_flag; } else { CropUnitX = SubWidthC; CropUnitY = SubHeightC * (2 - sps->frame_mbs_only_flag); } cl = gf_bs_read_ue_log(bs, "frame_crop_left_offset"); cr = gf_bs_read_ue_log(bs, "frame_crop_right_offset"); ct = gf_bs_read_ue_log(bs, "frame_crop_top_offset"); cb = gf_bs_read_ue_log(bs, "frame_crop_bottom_offset"); sps->width -= CropUnitX * (cl + cr); sps->height -= CropUnitY * (ct + cb); cl *= CropUnitX; cr *= CropUnitX; ct *= CropUnitY; cb *= CropUnitY; } sps->crop.left = cl; sps->crop.right = cr; sps->crop.top = ct; sps->crop.bottom = cb; if (vui_flag_pos) { *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); } /*vui_parameters_present_flag*/ sps->vui_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_parameters_present_flag"); if (sps->vui_parameters_present_flag) { sps->vui.aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "aspect_ratio_info_present_flag"); if (sps->vui.aspect_ratio_info_present_flag) { s32 aspect_ratio_idc = gf_bs_read_int_log(bs, 8, "aspect_ratio_idc"); if (aspect_ratio_idc == 255) { sps->vui.par_num = gf_bs_read_int_log(bs, 16, "aspect_ratio_num"); sps->vui.par_den = gf_bs_read_int_log(bs, 16, "aspect_ratio_den"); } else if (aspect_ratio_idc < GF_ARRAY_LENGTH(avc_hevc_sar) ) { sps->vui.par_num = avc_hevc_sar[aspect_ratio_idc].w; sps->vui.par_den = avc_hevc_sar[aspect_ratio_idc].h; } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] Unknown aspect_ratio_idc: your video may have a wrong aspect ratio. Contact the GPAC team!\n")); } } sps->vui.overscan_info_present_flag = gf_bs_read_int_log(bs, 1, "overscan_info_present_flag"); if (sps->vui.overscan_info_present_flag) gf_bs_read_int_log(bs, 1, "overscan_appropriate_flag"); /* default values */ sps->vui.video_format = 5; sps->vui.colour_primaries = 2; sps->vui.transfer_characteristics = 2; sps->vui.matrix_coefficients = 2; /* now read values if possible */ sps->vui.video_signal_type_present_flag = gf_bs_read_int_log(bs, 1, "video_signal_type_present_flag"); if (sps->vui.video_signal_type_present_flag) { sps->vui.video_format = gf_bs_read_int_log(bs, 3, "video_format"); sps->vui.video_full_range_flag = gf_bs_read_int_log(bs, 1, "video_full_range_flag"); sps->vui.colour_description_present_flag = gf_bs_read_int_log(bs, 1, "colour_description_present_flag"); if (sps->vui.colour_description_present_flag) { sps->vui.colour_primaries = gf_bs_read_int_log(bs, 8, "colour_primaries"); sps->vui.transfer_characteristics = gf_bs_read_int_log(bs, 8, "transfer_characteristics"); sps->vui.matrix_coefficients = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } } if (gf_bs_read_int_log(bs, 1, "chroma_location_info_present_flag")) { gf_bs_read_ue_log(bs, "chroma_sample_location_type_top_field"); gf_bs_read_ue_log(bs, "chroma_sample_location_type_bottom_field"); } sps->vui.timing_info_present_flag = gf_bs_read_int_log(bs, 1, "timing_info_present_flag"); if (sps->vui.timing_info_present_flag) { sps->vui.num_units_in_tick = gf_bs_read_int_log(bs, 32, "num_units_in_tick"); sps->vui.time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); sps->vui.fixed_frame_rate_flag = gf_bs_read_int_log(bs, 1, "fixed_frame_rate_flag"); } sps->vui.nal_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "nal_hrd_parameters_present_flag"); if (sps->vui.nal_hrd_parameters_present_flag) avc_parse_hrd_parameters(bs, &sps->vui.hrd); sps->vui.vcl_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vcl_hrd_parameters_present_flag"); if (sps->vui.vcl_hrd_parameters_present_flag) avc_parse_hrd_parameters(bs, &sps->vui.hrd); if (sps->vui.nal_hrd_parameters_present_flag || sps->vui.vcl_hrd_parameters_present_flag) sps->vui.low_delay_hrd_flag = gf_bs_read_int_log(bs, 1, "low_delay_hrd_flag"); sps->vui.pic_struct_present_flag = gf_bs_read_int_log(bs, 1, "pic_struct_present_flag"); } /*end of seq_parameter_set_data*/ if (subseq_sps) { if ((profile_idc == 83) || (profile_idc == 86)) { u8 extended_spatial_scalability_idc; /*parsing seq_parameter_set_svc_extension*/ gf_bs_read_int_log(bs, 1, "inter_layer_deblocking_filter_control_present_flag"); extended_spatial_scalability_idc = gf_bs_read_int_log(bs, 2, "extended_spatial_scalability_idc"); if (sps->ChromaArrayType == 1 || sps->ChromaArrayType == 2) { gf_bs_read_int_log(bs, 1, "chroma_phase_x_plus1_flag"); } if (sps->ChromaArrayType == 1) { gf_bs_read_int_log(bs, 2, "chroma_phase_y_plus1"); } if (extended_spatial_scalability_idc == 1) { if (sps->ChromaArrayType > 0) { gf_bs_read_int_log(bs, 1, "seq_ref_layer_chroma_phase_x_plus1_flag"); gf_bs_read_int_log(bs, 2, "seq_ref_layer_chroma_phase_y_plus1"); } gf_bs_read_se_log(bs, "seq_scaled_ref_layer_left_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_top_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_right_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_bottom_offset"); } if (gf_bs_read_int_log(bs, 1, "seq_tcoeff_level_prediction_flag")) { gf_bs_read_int_log(bs, 1, "adaptive_tcoeff_level_prediction_flag"); } gf_bs_read_int_log(bs, 1, "slice_header_restriction_flag"); if (gf_bs_read_int_log(bs, 1, "svc_vui_parameters_present")) { u32 vui_ext_num_entries_minus1 = gf_bs_read_ue_log(bs, "vui_ext_num_entries_minus1"); for (i = 0; i <= vui_ext_num_entries_minus1; i++) { u8 vui_ext_nal_hrd_parameters_present_flag, vui_ext_vcl_hrd_parameters_present_flag, vui_ext_timing_info_present_flag; gf_bs_read_int_log(bs, 3, "vui_ext_dependency_id"); gf_bs_read_int_log(bs, 4, "vui_ext_quality_id"); gf_bs_read_int_log(bs, 3, "vui_ext_temporal_id"); vui_ext_timing_info_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_timing_info_present_flag"); if (vui_ext_timing_info_present_flag) { gf_bs_read_int_log(bs, 32, "vui_ext_num_units_in_tick"); gf_bs_read_int_log(bs, 32, "vui_ext_time_scale"); gf_bs_read_int_log(bs, 1, "vui_ext_fixed_frame_rate_flag"); } vui_ext_nal_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_nal_hrd_parameters_present_flag"); if (vui_ext_nal_hrd_parameters_present_flag) { //hrd_parameters( ) } vui_ext_vcl_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_vcl_hrd_parameters_present_flag"); if (vui_ext_vcl_hrd_parameters_present_flag) { //hrd_parameters( ) } if (vui_ext_nal_hrd_parameters_present_flag || vui_ext_vcl_hrd_parameters_present_flag) { gf_bs_read_int_log(bs, 1, "vui_ext_low_delay_hrd_flag"); } gf_bs_read_int_log(bs, 1, "vui_ext_pic_struct_present_flag"); } } } else if ((profile_idc == 118) || (profile_idc == 128)) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[avc-h264] MVC parsing not implemented - skipping parsing end of Subset SPS\n")); return sps_id; } if (gf_bs_read_int_log(bs, 1, "additional_extension2")) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] skipping parsing end of Subset SPS (additional_extension2)\n")); return sps_id; } } return sps_id; } GF_EXPORT s32 gf_avc_read_sps_bs(GF_BitStream *bs, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos) { return gf_avc_read_sps_bs_internal(bs, avc, subseq_sps, vui_flag_pos, 0); } GF_EXPORT s32 gf_avc_read_sps(const u8 *sps_data, u32 sps_size, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos) { s32 sps_id = -1; GF_BitStream *bs; char *sps_data_without_emulation_bytes = NULL; u32 sps_data_without_emulation_bytes_size = 0; if (vui_flag_pos) { /*SPS still contains emulation bytes*/ sps_data_without_emulation_bytes = gf_malloc(sps_size * sizeof(char)); sps_data_without_emulation_bytes_size = gf_media_nalu_remove_emulation_bytes(sps_data, sps_data_without_emulation_bytes, sps_size); bs = gf_bs_new(sps_data_without_emulation_bytes, sps_data_without_emulation_bytes_size, GF_BITSTREAM_READ); *vui_flag_pos = 0; } else { bs = gf_bs_new(sps_data, sps_size, GF_BITSTREAM_READ); } if (!bs) { sps_id = -1; goto exit; } sps_id = gf_avc_read_sps_bs(bs, avc, subseq_sps, vui_flag_pos); exit: gf_bs_del(bs); if (sps_data_without_emulation_bytes) gf_free(sps_data_without_emulation_bytes); return sps_id; } static s32 gf_avc_read_pps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 nal_hdr) { s32 pps_id; AVC_PPS *pps; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 255)) { return -1; } pps = &avc->pps[pps_id]; pps->id = pps_id; if (!pps->status) pps->status = 1; pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((pps->sps_id<0) || (pps->sps_id >= 32)) { pps->sps_id = 0; return -1; } /*sps_id may be refer to regular SPS or subseq sps, depending on the coded slice referring to the pps*/ if (!avc->sps[pps->sps_id].state && !avc->sps[pps->sps_id + GF_SVC_SSPS_ID_SHIFT].state) { return -1; } avc->pps_active_idx = pps->id; /*set active sps*/ avc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->entropy_coding_mode_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_mode_flag"); pps->pic_order_present = gf_bs_read_int_log(bs, 1, "pic_order_present"); pps->slice_group_count = gf_bs_read_ue_log(bs, "slice_group_count_minus1") + 1; if (pps->slice_group_count > 1) { u32 iGroup; pps->mb_slice_group_map_type = gf_bs_read_ue_log(bs, "mb_slice_group_map_type"); if (pps->mb_slice_group_map_type == 0) { for (iGroup = 0; iGroup <= pps->slice_group_count - 1; iGroup++) gf_bs_read_ue_log_idx(bs, "run_length_minus1", iGroup); } else if (pps->mb_slice_group_map_type == 2) { for (iGroup = 0; iGroup < pps->slice_group_count - 1; iGroup++) { gf_bs_read_ue_log_idx(bs, "top_left", iGroup); gf_bs_read_ue_log_idx(bs, "bottom_right", iGroup); } } else if (pps->mb_slice_group_map_type == 3 || pps->mb_slice_group_map_type == 4 || pps->mb_slice_group_map_type == 5) { gf_bs_read_int_log(bs, 1, "slice_group_change_direction_flag"); gf_bs_read_ue_log(bs, "slice_group_change_rate_minus1"); } else if (pps->mb_slice_group_map_type == 6) { u32 i; pps->pic_size_in_map_units_minus1 = gf_bs_read_ue_log(bs, "pic_size_in_map_units_minus1"); for (i = 0; i <= pps->pic_size_in_map_units_minus1; i++) { gf_bs_read_int_log_idx(bs, (u32)ceil(log(pps->slice_group_count) / log(2)), "slice_group_id", i); } } } pps->num_ref_idx_l0_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active_minus1"); pps->num_ref_idx_l1_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active_minus1"); /* if ((pps->ref_count[0] > 32) || (pps->ref_count[1] > 32)) goto exit; */ pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); gf_bs_read_int_log(bs, 2, "weighted_bipred_idc"); gf_bs_read_se_log(bs, "init_qp_minus26"); gf_bs_read_se_log(bs, "init_qs_minus26"); gf_bs_read_se_log(bs, "chroma_qp_index_offset"); pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"); gf_bs_read_int_log(bs, 1, "constrained_intra_pred"); pps->redundant_pic_cnt_present = gf_bs_read_int_log(bs, 1, "redundant_pic_cnt_present"); return pps_id; } GF_EXPORT s32 gf_avc_read_pps_bs(GF_BitStream *bs, AVCState *avc) { return gf_avc_read_pps_bs_internal(bs, avc, 0); } GF_EXPORT s32 gf_avc_read_pps(const u8 *pps_data, u32 pps_size, AVCState *avc) { GF_BitStream *bs; s32 pps_id; /*PPS still contains emulation bytes*/ bs = gf_bs_new(pps_data, pps_size, GF_BITSTREAM_READ); if (!bs) { return -1; } gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); pps_id = gf_avc_read_pps_bs(bs, avc); gf_bs_del(bs); return pps_id; } #if 0 //unused s32 gf_avc_read_sps_ext(const char *spse_data, u32 spse_size) { GF_BitStream *bs; s32 sps_id; bs = gf_bs_new(spse_data, spse_size, GF_BITSTREAM_READ); sps_id = gf_avc_read_sps_ext_bs(bs); gf_bs_del(bs); return sps_id; } #endif static s32 SVC_ReadNal_header_extension(GF_BitStream *bs, SVC_NALUHeader *NalHeader) { gf_bs_read_int_log(bs, 1, "reserved_one_bit"); NalHeader->idr_pic_flag = gf_bs_read_int_log(bs, 1, "idr_flag"); NalHeader->priority_id = gf_bs_read_int_log(bs, 6, "priority_id"); gf_bs_read_int_log(bs, 1, "no_inter_layer_pred_flag"); NalHeader->dependency_id = gf_bs_read_int_log(bs, 3, "DependencyId"); NalHeader->quality_id = gf_bs_read_int_log(bs, 4, "quality_id"); NalHeader->temporal_id = gf_bs_read_int_log(bs, 3, "temporal_id"); gf_bs_read_int_log(bs, 1, "use_ref_base_pic_flag"); gf_bs_read_int_log(bs, 1, "discardable_flag"); gf_bs_read_int_log(bs, 1, "output_flag"); gf_bs_read_int_log(bs, 2, "reserved_three_2bits"); return 1; } static void ref_pic_list_modification(GF_BitStream *bs, u32 slice_type) { if (slice_type % 5 != 2 && slice_type % 5 != 4) { if (gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l0")) { u32 idx=0, modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = gf_bs_read_ue_log_idx(bs, "modification_of_pic_nums_idc", idx); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { gf_bs_read_ue_log_idx(bs, "abs_diff_pic_num_minus1", idx); } else if (modification_of_pic_nums_idc == 2) { gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); } idx++; } while ((modification_of_pic_nums_idc != 3) && gf_bs_available(bs)); } } if (slice_type % 5 == 1) { if (gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l1")) { u32 idx=0, modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = gf_bs_read_ue_log_idx(bs, "modification_of_pic_nums_idc", idx); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { gf_bs_read_ue_log_idx(bs, "abs_diff_pic_num_minus1", idx); } else if (modification_of_pic_nums_idc == 2) { gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); } idx++; } while ((modification_of_pic_nums_idc != 3) && gf_bs_available(bs)); } } } static void pred_weight_table(GF_BitStream *bs, u32 slice_type, u32 ChromaArrayType, u32 num_ref_idx_l0_active_minus1, u32 num_ref_idx_l1_active_minus1) { u32 i, j; gf_bs_read_ue_log(bs, "luma_log2_weight_denom"); if (ChromaArrayType != 0) { gf_bs_read_ue_log(bs, "chroma_log2_weight_denom"); } for (i = 0; i <= num_ref_idx_l0_active_minus1; i++) { if (gf_bs_read_int_log_idx(bs, 1, "luma_weight_l0_flag", i)) { gf_bs_read_se_log_idx(bs, "luma_weight_l0", i); gf_bs_read_se_log_idx(bs, "luma_offset_l0", i); } if (ChromaArrayType != 0) { if (gf_bs_read_int_log_idx(bs, 1, "chroma_weight_l0_flag", i)) for (j = 0; j < 2; j++) { gf_bs_read_se_log_idx2(bs, "chroma_weight_l0", i, j); gf_bs_read_se_log_idx2(bs, "chroma_offset_l0", i, j); } } } if (slice_type % 5 == 1) { for (i = 0; i <= num_ref_idx_l1_active_minus1; i++) { if (gf_bs_read_int_log_idx(bs, 1, "luma_weight_l1_flag", i)) { gf_bs_read_se_log_idx(bs, "luma_weight_l1", i); gf_bs_read_se_log_idx(bs, "luma_offset_l1", i); } if (ChromaArrayType != 0) { if (gf_bs_read_int_log_idx(bs, 1, "chroma_weight_l1_flag", i)) { for (j = 0; j < 2; j++) { gf_bs_read_se_log_idx2(bs, "chroma_weight_l1", i, j); gf_bs_read_se_log_idx2(bs, "chroma_offset_l1", i, j); } } } } } } static void dec_ref_pic_marking(GF_BitStream *bs, Bool IdrPicFlag) { if (IdrPicFlag) { gf_bs_read_int_log(bs, 1, "no_output_of_prior_pics_flag"); gf_bs_read_int_log(bs, 1, "long_term_reference_flag"); } else { if (gf_bs_read_int_log(bs, 1, "adaptive_ref_pic_marking_mode_flag")) { u32 idx=0, memory_management_control_operation; do { memory_management_control_operation = gf_bs_read_ue_log_idx(bs, "memory_management_control_operation", idx); if (memory_management_control_operation == 1 || memory_management_control_operation == 3) gf_bs_read_ue_log_idx(bs, "difference_of_pic_nums_minus1", idx); if (memory_management_control_operation == 2) gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); if (memory_management_control_operation == 3 || memory_management_control_operation == 6) gf_bs_read_ue_log_idx(bs, "long_term_frame_idx", idx); if (memory_management_control_operation == 4) gf_bs_read_ue_log_idx(bs, "max_long_term_frame_idx_plus1", idx); idx++; } while (memory_management_control_operation != 0); } } } static s32 avc_parse_slice(GF_BitStream *bs, AVCState *avc, Bool svc_idr_flag, AVCSliceInfo *si) { s32 pps_id, num_ref_idx_l0_active_minus1 = 0, num_ref_idx_l1_active_minus1 = 0; /*s->current_picture.reference= h->nal_ref_idc != 0;*/ gf_bs_read_ue_log(bs, "first_mb_in_slice"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (si->slice_type > 9) return -1; pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id > 255) return -1; si->pps = &avc->pps[pps_id]; if (!si->pps->slice_group_count) return -2; si->sps = &avc->sps[si->pps->sps_id]; if (!si->sps->log2_max_frame_num) return -2; avc->sps_active_idx = si->pps->sps_id; avc->pps_active_idx = pps_id; si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num"); si->field_pic_flag = 0; si->bottom_field_flag = 0; if (!si->sps->frame_mbs_only_flag) { si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag"); if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag"); } if ((si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) || svc_idr_flag) si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id"); if (si->sps->poc_type == 0) { si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); if (si->pps->pic_order_present && !si->field_pic_flag) { si->delta_poc_bottom = gf_bs_read_se_log(bs, "poc_lsb"); } } else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) { si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0"); if ((si->pps->pic_order_present == 1) && !si->field_pic_flag) si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1"); } if (si->pps->redundant_pic_cnt_present) { si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt"); } if (si->slice_type % 5 == GF_AVC_TYPE_B) { gf_bs_read_int_log(bs, 1, "direct_spatial_mv_pred_flag"); } num_ref_idx_l0_active_minus1 = si->pps->num_ref_idx_l0_default_active_minus1; num_ref_idx_l1_active_minus1 = si->pps->num_ref_idx_l1_default_active_minus1; if (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_B) { Bool num_ref_idx_active_override_flag = gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag"); if (num_ref_idx_active_override_flag) { num_ref_idx_l0_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_active_minus1"); if (si->slice_type % 5 == GF_AVC_TYPE_B) { num_ref_idx_l1_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_active_minus1"); } } } if (si->nal_unit_type == 20 || si->nal_unit_type == 21) { //ref_pic_list_mvc_modification(); /* specified in Annex H */ GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] unimplemented ref_pic_list_mvc_modification() in slide header\n")); assert(0); return -1; } else { ref_pic_list_modification(bs, si->slice_type); } if ((si->pps->weighted_pred_flag && (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP)) || (si->pps->weighted_bipred_idc == 1 && si->slice_type % 5 == GF_AVC_TYPE_B)) { pred_weight_table(bs, si->slice_type, si->sps->ChromaArrayType, num_ref_idx_l0_active_minus1, num_ref_idx_l1_active_minus1); } if (si->nal_ref_idc != 0) { dec_ref_pic_marking(bs, (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE)); } if (si->pps->entropy_coding_mode_flag && si->slice_type % 5 != GF_AVC_TYPE_I && si->slice_type % 5 != GF_AVC_TYPE_SI) { gf_bs_read_ue_log(bs, "cabac_init_idc"); } /*slice_qp_delta = */gf_bs_read_se(bs); if (si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_SI) { if (si->slice_type % 5 == GF_AVC_TYPE_SP) { gf_bs_read_int_log(bs, 1, "sp_for_switch_flag"); } gf_bs_read_se_log(bs, "slice_qs_delta"); } if (si->pps->deblocking_filter_control_present_flag) { if (gf_bs_read_ue_log(bs, "disable_deblocking_filter_idc") != 1) { gf_bs_read_se_log(bs, "slice_alpha_c0_offset_div2"); gf_bs_read_se_log(bs, "slice_beta_offset_div2"); } } if (si->pps->slice_group_count > 1 && si->pps->mb_slice_group_map_type >= 3 && si->pps->mb_slice_group_map_type <= 5) { gf_bs_read_int_log(bs, (u32)ceil(log1p((si->pps->pic_size_in_map_units_minus1 + 1) / (si->pps->slice_group_change_rate_minus1 + 1) ) / log(2)), "slice_group_change_cycle"); } return 0; } static s32 svc_parse_slice(GF_BitStream *bs, AVCState *avc, AVCSliceInfo *si) { s32 pps_id; /*s->current_picture.reference= h->nal_ref_idc != 0;*/ gf_bs_read_ue_log(bs, "first_mb_in_slice"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (si->slice_type > 9) return -1; pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id > 255) return -1; si->pps = &avc->pps[pps_id]; si->pps->id = pps_id; if (!si->pps->slice_group_count) return -2; si->sps = &avc->sps[si->pps->sps_id + GF_SVC_SSPS_ID_SHIFT]; if (!si->sps->log2_max_frame_num) return -2; si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num"); si->field_pic_flag = 0; if (si->sps->frame_mbs_only_flag) { /*s->picture_structure= PICT_FRAME;*/ } else { si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag"); if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag"); } if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE || si->NalHeader.idr_pic_flag) si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id"); if (si->sps->poc_type == 0) { si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); if (si->pps->pic_order_present && !si->field_pic_flag) { si->delta_poc_bottom = gf_bs_read_se_log(bs, "delta_poc_bottom"); } } else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) { si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0"); if ((si->pps->pic_order_present == 1) && !si->field_pic_flag) si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1"); } if (si->pps->redundant_pic_cnt_present) { si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt"); } return 0; } static s32 avc_parse_recovery_point_sei(GF_BitStream *bs, AVCState *avc) { AVCSeiRecoveryPoint *rp = &avc->sei.recovery_point; rp->frame_cnt = gf_bs_read_ue_log(bs, "frame_cnt"); rp->exact_match_flag = gf_bs_read_int_log(bs, 1, "exact_match_flag"); rp->broken_link_flag = gf_bs_read_int_log(bs, 1, "broken_link_flag"); rp->changing_slice_group_idc = gf_bs_read_int_log(bs, 2, "changing_slice_group_idc"); rp->valid = 1; return 0; } /*for interpretation see ISO 14496-10 N.11084, table D-1*/ static s32 avc_parse_pic_timing_sei(GF_BitStream *bs, AVCState *avc) { int sps_id = avc->sps_active_idx; const char NumClockTS[] = { 1, 1, 1, 2, 2, 3, 3, 2, 3 }; AVCSeiPicTiming *pt = &avc->sei.pic_timing; if (sps_id < 0) { /*sps_active_idx equals -1 when no sps has been detected. In this case SEI should not be decoded.*/ assert(0); return 1; } if (avc->sps[sps_id].vui.nal_hrd_parameters_present_flag || avc->sps[sps_id].vui.vcl_hrd_parameters_present_flag) { /*CpbDpbDelaysPresentFlag, see 14496-10(2003) E.11*/ gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.cpb_removal_delay_length_minus1, "cpb_removal_delay_minus1"); gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.dpb_output_delay_length_minus1, "dpb_output_delay_minus1"); } /*ISO 14496-10 (2003), D.8.2: we need to get pic_struct in order to know if we display top field first or bottom field first*/ if (avc->sps[sps_id].vui.pic_struct_present_flag) { int i; pt->pic_struct = gf_bs_read_int_log(bs, 4, "pic_struct"); if (pt->pic_struct > 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] invalid pic_struct value %d\n", pt->pic_struct)); return 1; } for (i = 0; i < NumClockTS[pt->pic_struct]; i++) { if (gf_bs_read_int_log_idx(bs, 1, "clock_timestamp_flag", i)) { Bool full_timestamp_flag; gf_bs_read_int_log_idx(bs, 2, "ct_type", i); gf_bs_read_int_log_idx(bs, 1, "nuit_field_based_flag", i); gf_bs_read_int_log_idx(bs, 5, "counting_type", i); full_timestamp_flag = gf_bs_read_int_log_idx(bs, 1, "full_timestamp_flag", i); gf_bs_read_int_log_idx(bs, 1, "discontinuity_flag", i); gf_bs_read_int_log_idx(bs, 1, "cnt_dropped_flag", i); gf_bs_read_int_log_idx(bs, 8, "n_frames", i); if (full_timestamp_flag) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } else { if (gf_bs_read_int_log_idx(bs, 1, "seconds_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); if (gf_bs_read_int_log_idx(bs, 1, "minutes_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); if (gf_bs_read_int_log_idx(bs, 1, "hours_flag", i)) { gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } } } if (avc->sps[sps_id].vui.hrd.time_offset_length > 0) gf_bs_read_int_log_idx(bs, avc->sps[sps_id].vui.hrd.time_offset_length, "time_offset", i); } } } } return 0; } #if !defined(GPAC_DISABLE_HEVC) static void avc_parse_itu_t_t35_sei(GF_BitStream* bs, AVCSeiItuTT35DolbyVision *dovi) { u8 itu_t_t35_country_code = gf_bs_read_u8(bs); u16 terminal_provider_code = gf_bs_read_u16(bs); u32 user_id = gf_bs_read_u32(bs); u8 data_type_code = gf_bs_read_u8(bs); if (itu_t_t35_country_code == 0xB5 && terminal_provider_code == 0x31 && user_id == 0x47413934 && (data_type_code == 0x8 || data_type_code == 0x9)) { dovi->rpu_flag = GF_TRUE; } } #endif static void avc_compute_poc(AVCSliceInfo *si) { enum { AVC_PIC_FRAME, AVC_PIC_FIELD_TOP, AVC_PIC_FIELD_BOTTOM, } pic_type; s32 field_poc[2] = { 0,0 }; s32 max_frame_num; if (!si->sps) return; max_frame_num = 1 << (si->sps->log2_max_frame_num); /* picture type */ if (si->sps->frame_mbs_only_flag || !si->field_pic_flag) pic_type = AVC_PIC_FRAME; else if (si->bottom_field_flag) pic_type = AVC_PIC_FIELD_BOTTOM; else pic_type = AVC_PIC_FIELD_TOP; /* frame_num_offset */ if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; si->frame_num_offset = 0; } else { if (si->frame_num < si->frame_num_prev) si->frame_num_offset = si->frame_num_offset_prev + max_frame_num; else si->frame_num_offset = si->frame_num_offset_prev; } /*ISO 14496-10 N.11084 8.2.1.1*/ if (si->sps->poc_type == 0) { const u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*ISO 14496-10 N.11084 eq (8-3)*/ if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; /*ISO 14496-10 N.11084 eq (8-4)*/ if (pic_type != AVC_PIC_FIELD_BOTTOM) field_poc[0] = si->poc_msb + si->poc_lsb; /*ISO 14496-10 N.11084 eq (8-5)*/ if (pic_type != AVC_PIC_FIELD_TOP) { if (!si->field_pic_flag) field_poc[1] = field_poc[0] + si->delta_poc_bottom; else field_poc[1] = si->poc_msb + si->poc_lsb; } } /*ISO 14496-10 N.11084 8.2.1.2*/ else if (si->sps->poc_type == 1) { u32 i; s32 abs_frame_num, expected_delta_per_poc_cycle, expected_poc; if (si->sps->poc_cycle_length) abs_frame_num = si->frame_num_offset + si->frame_num; else abs_frame_num = 0; if (!si->nal_ref_idc && (abs_frame_num > 0)) abs_frame_num--; expected_delta_per_poc_cycle = 0; for (i = 0; i < si->sps->poc_cycle_length; i++) expected_delta_per_poc_cycle += si->sps->offset_for_ref_frame[i]; if (abs_frame_num > 0) { const u32 poc_cycle_cnt = (abs_frame_num - 1) / si->sps->poc_cycle_length; const u32 frame_num_in_poc_cycle = (abs_frame_num - 1) % si->sps->poc_cycle_length; expected_poc = poc_cycle_cnt * expected_delta_per_poc_cycle; for (i = 0; i <= frame_num_in_poc_cycle; i++) expected_poc += si->sps->offset_for_ref_frame[i]; } else { expected_poc = 0; } if (!si->nal_ref_idc) expected_poc += si->sps->offset_for_non_ref_pic; field_poc[0] = expected_poc + si->delta_poc[0]; field_poc[1] = field_poc[0] + si->sps->offset_for_top_to_bottom_field; if (pic_type == AVC_PIC_FRAME) field_poc[1] += si->delta_poc[1]; } /*ISO 14496-10 N.11084 8.2.1.3*/ else if (si->sps->poc_type == 2) { int poc; if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) { poc = 0; } else { const int abs_frame_num = si->frame_num_offset + si->frame_num; poc = 2 * abs_frame_num; if (!si->nal_ref_idc) poc -= 1; } field_poc[0] = poc; field_poc[1] = poc; } /*ISO 14496-10 N.11084 eq (8-1)*/ if (pic_type == AVC_PIC_FRAME) si->poc = MIN(field_poc[0], field_poc[1]); else if (pic_type == AVC_PIC_FIELD_TOP) si->poc = field_poc[0]; else si->poc = field_poc[1]; } GF_EXPORT s32 gf_avc_parse_nalu(GF_BitStream *bs, AVCState *avc) { u8 idr_flag; s32 slice, ret; u32 nal_hdr; AVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nal_hdr = gf_bs_read_u8(bs); slice = 0; memcpy(&n_state, &avc->s_info, sizeof(AVCSliceInfo)); avc->last_nal_type_parsed = n_state.nal_unit_type = nal_hdr & 0x1F; n_state.nal_ref_idc = (nal_hdr >> 5) & 0x3; idr_flag = 0; switch (n_state.nal_unit_type) { case GF_AVC_NALU_ACCESS_UNIT: case GF_AVC_NALU_END_OF_SEQ: case GF_AVC_NALU_END_OF_STREAM: ret = 1; break; case GF_AVC_NALU_SVC_SLICE: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); // slice buffer - read the info and compare. /*ret = */svc_parse_slice(bs, avc, &n_state); if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } avc_compute_poc(&n_state); if (avc->s_info.poc != n_state.poc) { memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 1; } memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 0; case GF_AVC_NALU_SVC_PREFIX_NALU: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); return 0; case GF_AVC_NALU_IDR_SLICE: case GF_AVC_NALU_NON_IDR_SLICE: case GF_AVC_NALU_DP_A_SLICE: case GF_AVC_NALU_DP_B_SLICE: case GF_AVC_NALU_DP_C_SLICE: slice = 1; /* slice buffer - read the info and compare.*/ ret = avc_parse_slice(bs, avc, idr_flag, &n_state); if (ret < 0) return ret; ret = 0; if ( ((avc->s_info.nal_unit_type > GF_AVC_NALU_IDR_SLICE) || (avc->s_info.nal_unit_type < GF_AVC_NALU_NON_IDR_SLICE)) && (avc->s_info.nal_unit_type != GF_AVC_NALU_SVC_SLICE) ) { break; } if (avc->s_info.frame_num != n_state.frame_num) { ret = 1; break; } if (avc->s_info.field_pic_flag != n_state.field_pic_flag) { ret = 1; break; } if ((avc->s_info.nal_ref_idc != n_state.nal_ref_idc) && (!avc->s_info.nal_ref_idc || !n_state.nal_ref_idc)) { ret = 1; break; } assert(avc->s_info.sps); if (avc->s_info.sps->poc_type == n_state.sps->poc_type) { if (!avc->s_info.sps->poc_type) { if (!n_state.bottom_field_flag && (avc->s_info.poc_lsb != n_state.poc_lsb)) { ret = 1; break; } if (avc->s_info.delta_poc_bottom != n_state.delta_poc_bottom) { ret = 1; break; } } else if (avc->s_info.sps->poc_type == 1) { if (avc->s_info.delta_poc[0] != n_state.delta_poc[0]) { ret = 1; break; } if (avc->s_info.delta_poc[1] != n_state.delta_poc[1]) { ret = 1; break; } } } if (n_state.nal_unit_type == GF_AVC_NALU_IDR_SLICE) { if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) { /*IdrPicFlag differs in value*/ ret = 1; break; } else if (avc->s_info.idr_pic_id != n_state.idr_pic_id) { /*both IDR and idr_pic_id differs*/ ret = 1; break; } } break; case GF_AVC_NALU_SEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 0, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_PIC_PARAM: avc->last_ps_idx = gf_avc_read_pps_bs_internal(bs, avc, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SVC_SUBSEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 1, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEQ_PARAM_EXT: avc->last_ps_idx = (s32) gf_bs_read_ue(bs); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEI: case GF_AVC_NALU_FILLER_DATA: return 0; default: if (avc->s_info.nal_unit_type <= GF_AVC_NALU_IDR_SLICE) ret = 1; //To detect change of AU when multiple sps and pps in stream else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEI && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEQ_PARAM && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else ret = 0; break; } /* save _prev values */ if (ret && avc->s_info.sps) { n_state.frame_num_offset_prev = avc->s_info.frame_num_offset; if ((avc->s_info.sps->poc_type != 2) || (avc->s_info.nal_ref_idc != 0)) n_state.frame_num_prev = avc->s_info.frame_num; if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } } if (slice) avc_compute_poc(&n_state); memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return ret; } u32 gf_media_avc_reformat_sei(u8 *buffer, u32 nal_size, Bool isobmf_rewrite, AVCState *avc) { u32 ptype, psize, hdr, var; u32 start; GF_BitStream *bs; GF_BitStream *bs_dest = NULL; u8 nhdr; Bool sei_removed = GF_FALSE; char store; hdr = buffer[0]; if ((hdr & 0x1F) != GF_AVC_NALU_SEI) return 0; if (isobmf_rewrite) bs_dest = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nhdr = gf_bs_read_int(bs, 8); if (bs_dest) gf_bs_write_int(bs_dest, nhdr, 8); /*parse SEI*/ while (gf_bs_available(bs)) { Bool do_copy; ptype = 0; while (1) { u8 v = gf_bs_read_int(bs, 8); ptype += v; if (v != 0xFF) break; } psize = 0; while (1) { u8 v = gf_bs_read_int(bs, 8); psize += v; if (v != 0xFF) break; } start = (u32)gf_bs_get_position(bs); do_copy = 1; if (start + psize >= nal_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message type %d size error (%d but %d remain), keeping full SEI untouched\n", ptype, psize, nal_size - start)); if (bs_dest) gf_bs_del(bs_dest); return nal_size; } switch (ptype) { /*remove SEI messages forbidden in MP4*/ case 3: /*filler data*/ case 10: /*sub_seq info*/ case 11: /*sub_seq_layer char*/ case 12: /*sub_seq char*/ do_copy = 0; sei_removed = GF_TRUE; break; case 5: /*user unregistered */ store = buffer[start + psize]; buffer[start + psize] = 0; GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[avc-h264] SEI user message %s\n", buffer + start + 16)); buffer[start + psize] = store; break; case 6: /*recovery point*/ avc_parse_recovery_point_sei(bs, avc); break; case 1: /*pic_timing*/ avc_parse_pic_timing_sei(bs, avc); break; case 0: /*buffering period*/ case 2: /*pan scan rect*/ case 4: /*user registered ITU t35*/ case 7: /*def_rec_pic_marking_repetition*/ case 8: /*spare_pic*/ case 9: /*scene info*/ case 13: /*full frame freeze*/ case 14: /*full frame freeze release*/ case 15: /*full frame snapshot*/ case 16: /*progressive refinement segment start*/ case 17: /*progressive refinement segment end*/ case 18: /*motion constrained slice group*/ default: /*add all unknown SEIs*/ break; } if (do_copy && bs_dest) { var = ptype; while (var >= 255) { gf_bs_write_int(bs_dest, 0xFF, 8); var -= 255; } gf_bs_write_int(bs_dest, var, 8); var = psize; while (var >= 255) { gf_bs_write_int(bs_dest, 0xFF, 8); var -= 255; } gf_bs_write_int(bs_dest, var, 8); gf_bs_seek(bs, start); //bs_read_data does not skip EPB, read byte per byte var = psize; while (var) { gf_bs_write_u8(bs_dest, gf_bs_read_u8(bs)); var--; } } else { gf_bs_seek(bs, start); //bs_skip_bytes does not skip EPB, skip byte per byte while (psize) { gf_bs_read_u8(bs); psize--; } } if (gf_bs_available(bs) <= 2) { var = gf_bs_read_int(bs, 8); if (var != 0x80) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message has less than 2 bytes remaining but no end of sei found\n")); } if (bs_dest) gf_bs_write_int(bs_dest, 0x80, 8); break; } } gf_bs_del(bs); //we cannot compare final size and original size since original may have EPB and final does not yet have them if (bs_dest && sei_removed) { u8 *dst_no_epb = NULL; u32 dst_no_epb_size = 0; gf_bs_get_content(bs_dest, &dst_no_epb, &dst_no_epb_size); nal_size = gf_media_nalu_add_emulation_bytes(buffer, dst_no_epb, dst_no_epb_size); } if (bs_dest) gf_bs_del(bs_dest); return nal_size; } static u8 avc_hevc_get_sar_idx(u32 w, u32 h) { u32 i, count = GF_ARRAY_LENGTH(avc_hevc_sar); for (i = 0; i < count; i++) { if ((avc_hevc_sar[i].w == w) && (avc_hevc_sar[i].h == h)) return i; } return 0xFF; } static void avc_hevc_rewrite_vui(GF_VUIInfo *vui_info, GF_BitStream *orig, GF_BitStream *mod) { /* VUI present flag*/ Bool vui_present_flag = gf_bs_read_int(orig, 1); /*setup default values*/ Bool aspect_ratio_info_present_flag = 0; s32 aspect_ratio_idc = -1; u32 ar_n=0, ar_d=0; Bool overscan_info_present_flag = 0; u32 overscan_info=0; u32 video_signal_type_present_flag=0; u32 video_format = 5; u32 video_full_range_flag = 0; u32 colour_description_present_flag = 0; u32 colour_primaries = 2; u32 transfer_characteristics = 2; u32 matrix_coefficients = 2; //if VUI is present, read all SAR and overscan values if (vui_present_flag) { /* VUI found in input bitstream */ aspect_ratio_info_present_flag = gf_bs_read_int(orig, 1); if (aspect_ratio_info_present_flag) { aspect_ratio_idc = gf_bs_read_int(orig, 8); /*aspect_ratio_idc*/ if (aspect_ratio_idc == 255) { ar_n = gf_bs_read_int(orig, 16); /*sar_width*/ ar_d = gf_bs_read_int(orig, 16); /*sar_height*/ } } /*overscan_info_present_flag */ overscan_info_present_flag = gf_bs_read_int(orig, 1); if(overscan_info_present_flag) { overscan_info = gf_bs_read_int(orig, 1); } /* read all video signal related flags first */ video_signal_type_present_flag = gf_bs_read_int(orig, 1); if(video_signal_type_present_flag) { video_format = gf_bs_read_int(orig, 3); video_full_range_flag = gf_bs_read_int(orig, 1); colour_description_present_flag = gf_bs_read_int(orig, 1); if(colour_description_present_flag) { colour_primaries = gf_bs_read_int(orig, 8); transfer_characteristics = gf_bs_read_int(orig, 8); matrix_coefficients = gf_bs_read_int(orig, 8); } } } //recompute values //no change if ((vui_info->ar_num<0) || (vui_info->ar_den<0)) { } //remove par else if ((vui_info->ar_num==0) || (vui_info->ar_den==0)) { aspect_ratio_info_present_flag = 0; } //set par else { aspect_ratio_info_present_flag = 1; ar_n = vui_info->ar_num; ar_d = vui_info->ar_den; aspect_ratio_idc = avc_hevc_get_sar_idx((u32) ar_n, (u32) ar_d); } if (vui_info->remove_video_info) { video_signal_type_present_flag = 0; } /* correct the values of each flags */ else if ((vui_info->fullrange==0) && (vui_info->video_format==5) && (vui_info->color_prim==2) && (vui_info->color_tfc==2) && (vui_info->color_matrix==2)) { video_signal_type_present_flag = 0; /* all default, nothing to write*/ } else { video_signal_type_present_flag = 1; video_format = (vui_info->video_format < 0) ? video_format : vui_info->video_format; video_full_range_flag = (vui_info->fullrange < 0) ? video_full_range_flag : vui_info->fullrange; if ((vui_info->color_prim==2) && (vui_info->color_tfc==2) && (vui_info->color_matrix==2)) { colour_description_present_flag = 0; } else { colour_description_present_flag = 1; colour_primaries = (vui_info->color_prim < 0) ? colour_primaries : vui_info->color_prim; transfer_characteristics = (vui_info->color_tfc < 0) ? transfer_characteristics : vui_info->color_tfc; matrix_coefficients = (vui_info->color_matrix < 0) ? matrix_coefficients : vui_info->color_matrix; } if ((colour_primaries==2) && (transfer_characteristics==2) && (matrix_coefficients==2)) { colour_description_present_flag = 0; if ((video_format==5) && (video_full_range_flag==0)) video_signal_type_present_flag = 0; } } //always rewrite VUI gf_bs_write_int(mod, 1, 1); gf_bs_write_int(mod, aspect_ratio_info_present_flag, 1); if (aspect_ratio_info_present_flag) { gf_bs_write_int(mod, aspect_ratio_idc, 8); if (aspect_ratio_idc == 255) { gf_bs_write_int(mod, ar_n, 16); gf_bs_write_int(mod, ar_d, 16); } if (vui_info->update) { vui_info->ar_num = ar_n; vui_info->ar_den = ar_d; } } gf_bs_write_int(mod, overscan_info_present_flag, 1); if (overscan_info_present_flag) { gf_bs_write_int(mod, overscan_info, 1); } gf_bs_write_int(mod, video_signal_type_present_flag, 1); if (video_signal_type_present_flag) { gf_bs_write_int(mod, video_format, 3); gf_bs_write_int(mod, video_full_range_flag, 1); gf_bs_write_int(mod, colour_description_present_flag, 1); if (colour_description_present_flag) { gf_bs_write_int(mod, colour_primaries, 8); gf_bs_write_int(mod, transfer_characteristics, 8); gf_bs_write_int(mod, matrix_coefficients, 8); } if (vui_info->update) { vui_info->video_format = video_format; vui_info->fullrange = video_full_range_flag; if (colour_description_present_flag) { vui_info->color_prim = colour_primaries; vui_info->color_tfc = transfer_characteristics; vui_info->color_matrix = matrix_coefficients; } } } /*no VUI in input bitstream but we just inserted one, set all remaining vui flags to 0*/ if (!vui_present_flag) { gf_bs_write_int(mod, 0, 1); /*chroma_location_info_present_flag */ gf_bs_write_int(mod, 0, 1); /*timing_info_present_flag*/ gf_bs_write_int(mod, 0, 1); /*nal_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*vcl_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*pic_struct_present*/ gf_bs_write_int(mod, 0, 1); /*bitstream_restriction*/ } /*otherwise we copy over th bits from the input bitrate*/ } GF_Err gf_avc_change_vui(GF_AVCConfig *avcc, GF_VUIInfo *vui_info) { GF_BitStream *orig, *mod; AVCState avc; u32 i, bit_offset, flag; s32 idx; GF_AVCConfigSlot *slc; orig = NULL; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; i=0; while ((slc = (GF_AVCConfigSlot *)gf_list_enum(avcc->sequenceParameterSets, &i))) { u8 *no_emulation_buf = NULL; u32 no_emulation_buf_size = 0, emulation_bytes = 0; idx = gf_avc_read_sps(slc->data, slc->size, &avc, 0, &bit_offset); if (idx<0) { if ( orig ) gf_bs_del(orig); continue; } /*SPS still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size - 1) * sizeof(char)); no_emulation_buf_size = gf_media_nalu_remove_emulation_bytes(slc->data + 1, no_emulation_buf, slc->size - 1); orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); gf_bs_read_data(orig, no_emulation_buf, no_emulation_buf_size); gf_bs_seek(orig, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset >= 8); while (bit_offset - 8/*bit_offset doesn't take care of the first byte (NALU type)*/) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } avc_hevc_rewrite_vui(vui_info, orig, mod); /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, &no_emulation_buf, &flag); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(no_emulation_buf, flag); if (flag+emulation_bytes+1>slc->size) slc->data = (char*)gf_realloc(slc->data, flag+emulation_bytes+1); slc->size = gf_media_nalu_add_emulation_bytes(no_emulation_buf, slc->data + 1, flag) + 1; gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; } GF_EXPORT GF_Err gf_media_avc_change_par(GF_AVCConfig *avcc, s32 ar_n, s32 ar_d) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = ar_n; vuii.ar_den = ar_d; vuii.fullrange = -1; vuii.video_format = -1; vuii.color_prim = -1; vuii.color_tfc = -1; vuii.color_matrix = -1; return gf_avc_change_vui(avcc, &vuii); } GF_EXPORT GF_Err gf_media_avc_change_color(GF_AVCConfig *avcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = -1; vuii.ar_den = -1; vuii.fullrange = fullrange; vuii.video_format = vidformat; vuii.color_prim = colorprim; vuii.color_tfc = transfer; vuii.color_matrix = colmatrix; return gf_avc_change_vui(avcc, &vuii); } GF_EXPORT GF_Err gf_avc_get_sps_info(u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { AVCState avc; s32 idx; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; idx = gf_avc_read_sps(sps_data, sps_size, &avc, 0, NULL); if (idx < 0) { return GF_NON_COMPLIANT_BITSTREAM; } if (sps_id) *sps_id = idx; if (width) *width = avc.sps[idx].width; if (height) *height = avc.sps[idx].height; if (par_n) *par_n = avc.sps[idx].vui.par_num ? avc.sps[idx].vui.par_num : (u32)-1; if (par_d) *par_d = avc.sps[idx].vui.par_den ? avc.sps[idx].vui.par_den : (u32)-1; return GF_OK; } GF_EXPORT GF_Err gf_avc_get_pps_info(u8 *pps_data, u32 pps_size, u32 *pps_id, u32 *sps_id) { GF_BitStream *bs; GF_Err e = GF_OK; bs = gf_bs_new(pps_data, pps_size, GF_BITSTREAM_READ); if (!bs) { e = GF_NON_COMPLIANT_BITSTREAM; goto exit; } gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); /*nal hdr*/ gf_bs_read_int(bs, 8); *pps_id = gf_bs_read_ue(bs); *sps_id = gf_bs_read_ue(bs); exit: gf_bs_del(bs); return e; } #ifndef GPAC_DISABLE_HEVC /********** HEVC parsing **********/ Bool gf_hevc_slice_is_intra(HEVCState *hevc) { switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: return GF_TRUE; default: return GF_FALSE; } } Bool gf_hevc_slice_is_IDR(HEVCState *hevc) { if (hevc->sei.recovery_point.valid) { hevc->sei.recovery_point.valid = 0; return GF_TRUE; } switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: return GF_TRUE; default: return GF_FALSE; } } static Bool hevc_parse_short_term_ref_pic_set(GF_BitStream *bs, HEVC_SPS *sps, u32 idx_rps) { u32 i; Bool inter_ref_pic_set_prediction_flag = 0; if (idx_rps != 0) inter_ref_pic_set_prediction_flag = gf_bs_read_int_log_idx(bs, 1, "inter_ref_pic_set_prediction_flag", idx_rps); if (inter_ref_pic_set_prediction_flag) { HEVC_ReferencePictureSets *ref_ps, *rps; u32 delta_idx_minus1 = 0; u32 ref_idx; u32 delta_rps_sign; u32 abs_delta_rps_minus1, nb_ref_pics; s32 deltaRPS; u32 k = 0, k0 = 0, k1 = 0; if (idx_rps == sps->num_short_term_ref_pic_sets) delta_idx_minus1 = gf_bs_read_ue_log_idx(bs, "delta_idx_minus1", idx_rps); assert(delta_idx_minus1 <= idx_rps - 1); ref_idx = idx_rps - 1 - delta_idx_minus1; delta_rps_sign = gf_bs_read_int_log_idx(bs, 1, "delta_rps_sign", idx_rps); abs_delta_rps_minus1 = gf_bs_read_ue_log_idx(bs, "abs_delta_rps_minus1", idx_rps); deltaRPS = (1 - (delta_rps_sign << 1)) * (abs_delta_rps_minus1 + 1); rps = &sps->rps[idx_rps]; ref_ps = &sps->rps[ref_idx]; nb_ref_pics = ref_ps->num_negative_pics + ref_ps->num_positive_pics; for (i = 0; i <= nb_ref_pics; i++) { s32 ref_idc; s32 used_by_curr_pic_flag = gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_flag", idx_rps, i); ref_idc = used_by_curr_pic_flag ? 1 : 0; if (!used_by_curr_pic_flag) { used_by_curr_pic_flag = gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_flag", idx_rps, i); ref_idc = used_by_curr_pic_flag << 1; } if ((ref_idc == 1) || (ref_idc == 2)) { s32 deltaPOC = deltaRPS; if (i < nb_ref_pics) deltaPOC += ref_ps->delta_poc[i]; rps->delta_poc[k] = deltaPOC; if (deltaPOC < 0) k0++; else k1++; k++; } } rps->num_negative_pics = k0; rps->num_positive_pics = k1; } else { s32 prev = 0, poc; sps->rps[idx_rps].num_negative_pics = gf_bs_read_ue_log_idx(bs, "num_negative_pics", idx_rps); sps->rps[idx_rps].num_positive_pics = gf_bs_read_ue_log_idx(bs, "num_positive_pics", idx_rps); if (sps->rps[idx_rps].num_negative_pics > 16) return GF_FALSE; if (sps->rps[idx_rps].num_positive_pics > 16) return GF_FALSE; for (i = 0; i < sps->rps[idx_rps].num_negative_pics; i++) { u32 delta_poc_s0_minus1 = gf_bs_read_ue_log_idx2(bs, "delta_poc_s0_minus1", idx_rps, i); poc = prev - delta_poc_s0_minus1 - 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; gf_bs_read_int_log_idx2(bs, 1, "delta_poc_s0_minus1", idx_rps, i); } for (i = 0; i < sps->rps[idx_rps].num_positive_pics; i++) { u32 delta_poc_s1_minus1 = gf_bs_read_ue_log_idx2(bs, "delta_poc_s1_minus1" , idx_rps, i); poc = prev + delta_poc_s1_minus1 + 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_s1_flag", idx_rps, i); } } return GF_TRUE; } void hevc_pred_weight_table(GF_BitStream *bs, HEVCState *hevc, HEVCSliceInfo *si, HEVC_PPS *pps, HEVC_SPS *sps, u32 num_ref_idx_l0_active, u32 num_ref_idx_l1_active) { u32 i, num_ref_idx; Bool first_pass = GF_TRUE; u8 luma_weights[20], chroma_weights[20]; u32 ChromaArrayType = sps->separate_colour_plane_flag ? 0 : sps->chroma_format_idc; num_ref_idx = num_ref_idx_l0_active; gf_bs_read_ue_log(bs, "luma_log2_weight_denom"); if (ChromaArrayType != 0) gf_bs_read_se_log(bs, "delta_chroma_log2_weight_denom"); parse_weights: for (i = 0; i < num_ref_idx; i++) { luma_weights[i] = gf_bs_read_int_log_idx(bs, 1, "luma_weights", i); //infered to be 0 if not present chroma_weights[i] = 0; } if (ChromaArrayType != 0) { for (i = 0; i < num_ref_idx; i++) { chroma_weights[i] = gf_bs_read_int_log_idx(bs, 1, "chroma_weights", i); } } for (i = 0; i < num_ref_idx; i++) { if (luma_weights[i]) { gf_bs_read_se_log_idx(bs, "delta_luma_weight_l0", i); gf_bs_read_se_log_idx(bs, "luma_offset_l0", i); } if (chroma_weights[i]) { gf_bs_read_se_log_idx(bs, "delta_chroma_weight_l0_0", i); gf_bs_read_se_log_idx(bs, "delta_chroma_offset_l0_0", i); gf_bs_read_se_log_idx(bs, "delta_chroma_weight_l0_1", i); gf_bs_read_se_log_idx(bs, "delta_chroma_offset_l0_1", i); } } if (si->slice_type == GF_HEVC_SLICE_TYPE_B) { if (!first_pass) return; first_pass = GF_FALSE; num_ref_idx = num_ref_idx_l1_active; goto parse_weights; } } static Bool ref_pic_lists_modification(GF_BitStream *bs, u32 slice_type, u32 num_ref_idx_l0_active, u32 num_ref_idx_l1_active) { //u32 i; Bool ref_pic_list_modification_flag_l0 = gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l0"); if (ref_pic_list_modification_flag_l0) { /*for (i=0; i<num_ref_idx_l0_active; i++) { list_entry_l0[i] = *//*gf_bs_read_int(bs, (u32)ceil(log(getNumPicTotalCurr())/log(2))); }*/ return GF_FALSE; } if (slice_type == GF_HEVC_SLICE_TYPE_B) { Bool ref_pic_list_modification_flag_l1 = gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l1"); if (ref_pic_list_modification_flag_l1) { /*for (i=0; i<num_ref_idx_l1_active; i++) { list_entry_l1[i] = *//*gf_bs_read_int(bs, (u32)ceil(log(getNumPicTotalCurr()) / log(2))); }*/ return GF_FALSE; } } return GF_TRUE; } static s32 hevc_parse_slice_segment(GF_BitStream *bs, HEVCState *hevc, HEVCSliceInfo *si) { u32 i, j; u32 num_ref_idx_l0_active = 0, num_ref_idx_l1_active = 0; HEVC_PPS *pps; HEVC_SPS *sps; s32 pps_id; Bool RapPicFlag = GF_FALSE; Bool IDRPicFlag = GF_FALSE; si->first_slice_segment_in_pic_flag = gf_bs_read_int_log(bs, 1, "first_slice_segment_in_pic_flag"); switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: IDRPicFlag = GF_TRUE; RapPicFlag = GF_TRUE; break; case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_CRA: RapPicFlag = GF_TRUE; break; } if (RapPicFlag) { gf_bs_read_int_log(bs, 1, "no_output_of_prior_pics_flag"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 64)) return -1; pps = &hevc->pps[pps_id]; sps = &hevc->sps[pps->sps_id]; si->sps = sps; si->pps = pps; if (!si->first_slice_segment_in_pic_flag && pps->dependent_slice_segments_enabled_flag) { si->dependent_slice_segment_flag = gf_bs_read_int_log(bs, 1, "dependent_slice_segment_flag"); } else { si->dependent_slice_segment_flag = GF_FALSE; } if (!si->first_slice_segment_in_pic_flag) { si->slice_segment_address = gf_bs_read_int_log(bs, sps->bitsSliceSegmentAddress, "slice_segment_address"); } else { si->slice_segment_address = 0; } if (!si->dependent_slice_segment_flag) { Bool deblocking_filter_override_flag = 0; Bool slice_temporal_mvp_enabled_flag = 0; Bool slice_sao_luma_flag = 0; Bool slice_sao_chroma_flag = 0; Bool slice_deblocking_filter_disabled_flag = 0; //"slice_reserved_undetermined_flag[]" gf_bs_read_int_log(bs, pps->num_extra_slice_header_bits, "slice_reserved_undetermined_flag"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (pps->output_flag_present_flag) gf_bs_read_int_log(bs, 1, "pic_output_flag"); if (sps->separate_colour_plane_flag == 1) gf_bs_read_int_log(bs, 2, "colour_plane_id"); if (IDRPicFlag) { si->poc_lsb = 0; //if not asked to parse full header, abort since we know the poc if (!hevc->full_slice_header_parse) return 0; } else { si->poc_lsb = gf_bs_read_int_log(bs, sps->log2_max_pic_order_cnt_lsb, "poc_lsb"); //if not asked to parse full header, abort once we have the poc if (!hevc->full_slice_header_parse) return 0; if (gf_bs_read_int_log(bs, 1, "short_term_ref_pic_set_sps_flag") == 0) { Bool ret = hevc_parse_short_term_ref_pic_set(bs, sps, sps->num_short_term_ref_pic_sets); if (!ret) return -1; } else if (sps->num_short_term_ref_pic_sets > 1) { u32 numbits = 0; while ((u32)(1 << numbits) < sps->num_short_term_ref_pic_sets) numbits++; if (numbits > 0) gf_bs_read_int_log(bs, numbits, "short_term_ref_pic_set_idx"); /*else short_term_ref_pic_set_idx = 0;*/ } if (sps->long_term_ref_pics_present_flag) { u8 DeltaPocMsbCycleLt[32]; u32 num_long_term_sps = 0; u32 num_long_term_pics = 0; memset(DeltaPocMsbCycleLt, 0, sizeof(u8) * 32); if (sps->num_long_term_ref_pic_sps > 0) { num_long_term_sps = gf_bs_read_ue_log(bs, "num_long_term_sps"); } num_long_term_pics = gf_bs_read_ue_log(bs, "num_long_term_pics"); for (i = 0; i < num_long_term_sps + num_long_term_pics; i++) { if (i < num_long_term_sps) { if (sps->num_long_term_ref_pic_sps > 1) gf_bs_read_int_log_idx(bs, gf_get_bit_size(sps->num_long_term_ref_pic_sps), "lt_idx_sps", i); } else { gf_bs_read_int_log_idx(bs, sps->log2_max_pic_order_cnt_lsb, "PocLsbLt", i); gf_bs_read_int_log_idx(bs, 1, "UsedByCurrPicLt", i); } if (gf_bs_read_int_log_idx(bs, 1, "delta_poc_msb_present_flag", i)) { if (i == 0 || i == num_long_term_sps) DeltaPocMsbCycleLt[i] = gf_bs_read_ue_log_idx(bs, "DeltaPocMsbCycleLt", i); else DeltaPocMsbCycleLt[i] = gf_bs_read_ue_log_idx(bs, "DeltaPocMsbCycleLt", i) + DeltaPocMsbCycleLt[i - 1]; } } } if (sps->temporal_mvp_enable_flag) slice_temporal_mvp_enabled_flag = gf_bs_read_int_log(bs, 1, "slice_temporal_mvp_enabled_flag"); } if (sps->sample_adaptive_offset_enabled_flag) { u32 ChromaArrayType = sps->separate_colour_plane_flag ? 0 : sps->chroma_format_idc; slice_sao_luma_flag = gf_bs_read_int_log(bs, 1, "slice_sao_luma_flag"); if (ChromaArrayType != 0) slice_sao_chroma_flag = gf_bs_read_int_log(bs, 1, "slice_sao_chroma_flag"); } if (si->slice_type == GF_HEVC_SLICE_TYPE_P || si->slice_type == GF_HEVC_SLICE_TYPE_B) { //u32 NumPocTotalCurr; num_ref_idx_l0_active = pps->num_ref_idx_l0_default_active; num_ref_idx_l1_active = 0; if (si->slice_type == GF_HEVC_SLICE_TYPE_B) num_ref_idx_l1_active = pps->num_ref_idx_l1_default_active; if (gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag")) { num_ref_idx_l0_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l0_active"); if (si->slice_type == GF_HEVC_SLICE_TYPE_B) num_ref_idx_l1_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l1_active"); } if (pps->lists_modification_present_flag /*TODO: && NumPicTotalCurr > 1*/) { if (!ref_pic_lists_modification(bs, si->slice_type, num_ref_idx_l0_active, num_ref_idx_l1_active)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[hevc] ref_pic_lists_modification( ) not implemented\n")); return -1; } } if (si->slice_type == GF_HEVC_SLICE_TYPE_B) gf_bs_read_int_log(bs, 1, "mvd_l1_zero_flag"); if (pps->cabac_init_present_flag) gf_bs_read_int_log(bs, 1, "cabac_init_flag"); if (slice_temporal_mvp_enabled_flag) { // When collocated_from_l0_flag is not present, it is inferred to be equal to 1. Bool collocated_from_l0_flag = 1; if (si->slice_type == GF_HEVC_SLICE_TYPE_B) collocated_from_l0_flag = gf_bs_read_int_log(bs, 1, "collocated_from_l0_flag"); if ((collocated_from_l0_flag && (num_ref_idx_l0_active > 1)) || (!collocated_from_l0_flag && (num_ref_idx_l1_active > 1)) ) { gf_bs_read_ue_log(bs, "collocated_ref_idx"); } } if ((pps->weighted_pred_flag && si->slice_type == GF_HEVC_SLICE_TYPE_P) || (pps->weighted_bipred_flag && si->slice_type == GF_HEVC_SLICE_TYPE_B) ) { hevc_pred_weight_table(bs, hevc, si, pps, sps, num_ref_idx_l0_active, num_ref_idx_l1_active); } gf_bs_read_ue_log(bs, "five_minus_max_num_merge_cand"); } si->slice_qp_delta_start_bits = (s32) (gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); si->slice_qp_delta = gf_bs_read_se_log(bs, "slice_qp_delta"); if (pps->slice_chroma_qp_offsets_present_flag) { gf_bs_read_se_log(bs, "slice_cb_qp_offset"); gf_bs_read_se_log(bs, "slice_cr_qp_offset"); } if (pps->deblocking_filter_override_enabled_flag) { deblocking_filter_override_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_override_flag"); } if (deblocking_filter_override_flag) { slice_deblocking_filter_disabled_flag = gf_bs_read_int_log(bs, 1, "slice_deblocking_filter_disabled_flag"); if (!slice_deblocking_filter_disabled_flag) { gf_bs_read_se_log(bs, "slice_beta_offset_div2"); gf_bs_read_se_log(bs, "slice_tc_offset_div2"); } } if (pps->loop_filter_across_slices_enabled_flag && (slice_sao_luma_flag || slice_sao_chroma_flag || !slice_deblocking_filter_disabled_flag) ) { gf_bs_read_int_log(bs, 1, "slice_loop_filter_across_slices_enabled_flag"); } } //dependent slice segment else { //if not asked to parse full header, abort if (!hevc->full_slice_header_parse) return 0; } si->entry_point_start_bits = ((u32)gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); if (pps->tiles_enabled_flag || pps->entropy_coding_sync_enabled_flag) { u32 num_entry_point_offsets = gf_bs_read_ue_log(bs, "num_entry_point_offsets"); if (num_entry_point_offsets > 0) { u32 offset = gf_bs_read_ue_log(bs, "offset") + 1; u32 segments = offset >> 4; s32 remain = (offset & 15); for (i = 0; i < num_entry_point_offsets; i++) { //u32 res = 0; for (j = 0; j < segments; j++) { //res <<= 16; /*res +=*/ gf_bs_read_int(bs, 16); } if (remain) { //res <<= remain; /* res += */ gf_bs_read_int(bs, remain); } // entry_point_offset = val + 1; // +1; // +1 to get the size } } } if (pps->slice_segment_header_extension_present_flag) { u32 size_ext = gf_bs_read_ue_log(bs, "size_ext"); while (size_ext) { gf_bs_read_int(bs, 8); size_ext--; } } si->header_size_bits = (gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); // av_parser.c modified on 16 jan. 2019 if (gf_bs_read_int_log(bs, 1, "byte_align") == 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("Error parsing slice header: byte_align not found at end of header !\n")); } gf_bs_align(bs); si->payload_start_offset = (s32)gf_bs_get_position(bs); return 0; } static void gf_hevc_vvc_parse_sei(char *buffer, u32 nal_size, HEVCState *hevc, VVCState *vvc) { u32 ptype, psize, hdr; u64 start; GF_BitStream *bs; hdr = buffer[0]; if (((hdr & 0x7e) >> 1) != GF_HEVC_NALU_SEI_PREFIX) return; bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); gf_bs_read_int(bs, 16); /*parse SEI*/ while (gf_bs_available(bs)) { u32 consumed; ptype = 0; while (gf_bs_peek_bits(bs, 8, 0)==0xFF) { gf_bs_read_int(bs, 8); ptype += 255; } ptype += gf_bs_read_int(bs, 8); psize = 0; while (gf_bs_peek_bits(bs, 8, 0)==0xFF) { gf_bs_read_int(bs, 8); psize += 255; } psize += gf_bs_read_int(bs, 8); start = gf_bs_get_position(bs); if (start+psize >= nal_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] SEI user message type %d size error (%d but %d remain), skipping SEI message\n", hevc ? "HEVC" : "VVC", ptype, psize, nal_size-start)); break; } switch (ptype) { case 4: /*user registered ITU-T T35*/ if (hevc) { avc_parse_itu_t_t35_sei(bs, &hevc->sei.dovi); } break; default: break; } gf_bs_align(bs); consumed = (u32) (gf_bs_get_position(bs) - start); psize-=consumed; gf_bs_skip_bytes(bs, psize); if (gf_bs_available(bs) <= 2) break; } gf_bs_del(bs); } void gf_hevc_parse_sei(char *buffer, u32 nal_size, HEVCState *hevc) { gf_hevc_vvc_parse_sei(buffer, nal_size, hevc, NULL); } static void hevc_compute_poc(HEVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_pic_order_cnt_lsb); /*POC reset for IDR frames, NOT for CRA*/ switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: si->poc_lsb_prev = 0; si->poc_msb_prev = 0; break; } if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: si->poc_msb = 0; break; } si->poc = si->poc_msb + si->poc_lsb; } static Bool hevc_parse_nal_header(GF_BitStream *bs, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { u32 val; val = gf_bs_read_int_log(bs, 1, "forbidden_zero"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 6, "nuh_type"); if (nal_unit_type) *nal_unit_type = val; val = gf_bs_read_int_log(bs, 6, "layerID"); if (layer_id) *layer_id = val; val = gf_bs_read_int_log(bs, 3, "temporalID"); if (!val) return GF_FALSE; val -= 1; if (temporal_id) *temporal_id = val; return GF_TRUE; } void hevc_profile_tier_level(GF_BitStream *bs, Bool ProfilePresentFlag, u8 MaxNumSubLayersMinus1, HEVC_ProfileTierLevel *ptl, u32 idx) { u32 i; if (ProfilePresentFlag) { ptl->profile_space = gf_bs_read_int_log_idx(bs, 2, "profile_space", idx); ptl->tier_flag = gf_bs_read_int_log_idx(bs, 1, "tier_flag", idx); ptl->profile_idc = gf_bs_read_int_log_idx(bs, 5, "profile_idc", idx); ptl->profile_compatibility_flag = gf_bs_read_int_log_idx(bs, 32, "profile_compatibility_flag", idx); ptl->general_progressive_source_flag = gf_bs_read_int_log_idx(bs, 1, "general_progressive_source_flag", idx); ptl->general_interlaced_source_flag = gf_bs_read_int_log_idx(bs, 1, "general_interlaced_source_flag", idx); ptl->general_non_packed_constraint_flag = gf_bs_read_int_log_idx(bs, 1, "general_non_packed_constraint_flag", idx); ptl->general_frame_only_constraint_flag = gf_bs_read_int_log_idx(bs, 1, "general_frame_only_constraint_flag", idx); ptl->general_reserved_44bits = gf_bs_read_long_int(bs, 44); } ptl->level_idc = gf_bs_read_int_log(bs, 8, "level_idc"); for (i = 0; i < MaxNumSubLayersMinus1; i++) { ptl->sub_ptl[i].profile_present_flag = gf_bs_read_int_log_idx2(bs, 1, "profile_present_flag", idx, i); ptl->sub_ptl[i].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i); } if (MaxNumSubLayersMinus1 > 0) { for (i = MaxNumSubLayersMinus1; i < 8; i++) { /*reserved_zero_2bits*/gf_bs_read_int(bs, 2); } } for (i = 0; i < MaxNumSubLayersMinus1; i++) { if (ptl->sub_ptl[i].profile_present_flag) { ptl->sub_ptl[i].profile_space = gf_bs_read_int_log_idx2(bs, 2, "sublayer_profile_space", idx, i); ptl->sub_ptl[i].tier_flag = gf_bs_read_int_log_idx2(bs, 1, "sublayer_tier_flag", idx, i); ptl->sub_ptl[i].profile_idc = gf_bs_read_int_log_idx2(bs, 5, "sublayer_profile_idc", idx, i); ptl->sub_ptl[i].profile_compatibility_flag = gf_bs_read_int_log_idx2(bs, 32, "sublayer_profile_compatibility_flag", idx, i); /*ptl->sub_ptl[i].progressive_source_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_progressive_source_flag", idx, i); /*ptl->sub_ptl[i].interlaced_source_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_interlaced_source_flag", idx, i); /*ptl->sub_ptl[i].non_packed_constraint_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_non_packed_constraint_flag", idx, i); /*ptl->sub_ptl[i].frame_only_constraint_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_frame_only_constraint_flag", idx, i); /*ptl->sub_ptl[i].reserved_44bits =*/ gf_bs_read_long_int(bs, 44); } if (ptl->sub_ptl[i].level_present_flag) ptl->sub_ptl[i].level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i); } } static u32 scalability_type_to_idx(HEVC_VPS *vps, u32 scalability_type) { u32 idx = 0, type; for (type = 0; type < scalability_type; type++) { idx += (vps->scalability_mask[type] ? 1 : 0); } return idx; } #define LHVC_VIEW_ORDER_INDEX 1 #define LHVC_SCALABILITY_INDEX 2 static u32 lhvc_get_scalability_id(HEVC_VPS *vps, u32 layer_id_in_vps, u32 scalability_type) { u32 idx; if (!vps->scalability_mask[scalability_type]) return 0; idx = scalability_type_to_idx(vps, scalability_type); return vps->dimension_id[layer_id_in_vps][idx]; } static u32 lhvc_get_view_index(HEVC_VPS *vps, u32 id) { return lhvc_get_scalability_id(vps, vps->layer_id_in_vps[id], LHVC_VIEW_ORDER_INDEX); } static u32 lhvc_get_num_views(HEVC_VPS *vps) { u32 numViews = 1, i; for (i = 0; i < vps->max_layers; i++) { u32 layer_id = vps->layer_id_in_nuh[i]; if (i > 0 && (lhvc_get_view_index(vps, layer_id) != lhvc_get_scalability_id(vps, i - 1, LHVC_VIEW_ORDER_INDEX))) { numViews++; } } return numViews; } static void lhvc_parse_rep_format(HEVC_RepFormat *fmt, GF_BitStream *bs, u32 idx) { u8 chroma_bitdepth_present_flag; fmt->pic_width_luma_samples = gf_bs_read_int_log_idx(bs, 16, "pic_width_luma_samples", idx); fmt->pic_height_luma_samples = gf_bs_read_int_log_idx(bs, 16, "pic_height_luma_samples", idx); chroma_bitdepth_present_flag = gf_bs_read_int_log_idx(bs, 1, "chroma_bitdepth_present_flag", idx); if (chroma_bitdepth_present_flag) { fmt->chroma_format_idc = gf_bs_read_int_log_idx(bs, 2, "chroma_format_idc", idx); if (fmt->chroma_format_idc == 3) fmt->separate_colour_plane_flag = gf_bs_read_int_log_idx(bs, 1, "separate_colour_plane_flag", idx); fmt->bit_depth_luma = 8 + gf_bs_read_int_log_idx(bs, 4, "bit_depth_luma_minus8", idx); fmt->bit_depth_chroma = 8 + gf_bs_read_int_log_idx(bs, 4, "bit_depth_chroma_minus8", idx); } if (gf_bs_read_int_log_idx(bs, 1, "conformance_window_vps_flag", idx)) { gf_bs_read_ue_log_idx(bs, "conf_win_vps_left_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_right_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_top_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_bottom_offset", idx); } } static Bool hevc_parse_vps_extension(HEVC_VPS *vps, GF_BitStream *bs) { u8 splitting_flag, vps_nuh_layer_id_present_flag, view_id_len; u32 i, j, num_scalability_types, num_add_olss, num_add_layer_set, num_indepentdent_layers, nb_bits, default_output_layer_idc = 0; u8 dimension_id_len[16], dim_bit_offset[16]; u8 /*avc_base_layer_flag, */NumLayerSets, /*default_one_target_output_layer_flag, */rep_format_idx_present_flag, ols_ids_to_ls_idx; u8 layer_set_idx_for_ols_minus1[MAX_LHVC_LAYERS]; u8 nb_output_layers_in_output_layer_set[MAX_LHVC_LAYERS + 1]; u8 ols_highest_output_layer_id[MAX_LHVC_LAYERS + 1]; u32 k, d, r, p, iNuhLId, jNuhLId; u8 num_direct_ref_layers[64], num_pred_layers[64], num_layers_in_tree_partition[MAX_LHVC_LAYERS]; u8 dependency_flag[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS], id_pred_layers[64][MAX_LHVC_LAYERS]; // u8 num_ref_layers[64]; // u8 tree_partition_layer_id[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS]; // u8 id_ref_layers[64][MAX_LHVC_LAYERS]; // u8 id_direct_ref_layers[64][MAX_LHVC_LAYERS]; u8 layer_id_in_list_flag[64]; Bool OutputLayerFlag[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS]; vps->vps_extension_found = 1; if ((vps->max_layers > 1) && vps->base_layer_internal_flag) hevc_profile_tier_level(bs, 0, vps->max_sub_layers - 1, &vps->ext_ptl[0], 0); splitting_flag = gf_bs_read_int_log(bs, 1, "splitting_flag"); num_scalability_types = 0; for (i = 0; i < 16; i++) { vps->scalability_mask[i] = gf_bs_read_int_log_idx(bs, 1, "scalability_mask", i); num_scalability_types += vps->scalability_mask[i]; } if (num_scalability_types >= 16) { num_scalability_types = 16; } dimension_id_len[0] = 0; for (i = 0; i < (num_scalability_types - splitting_flag); i++) { dimension_id_len[i] = 1 + gf_bs_read_int_log_idx(bs, 3, "dimension_id_len_minus1", i); } if (splitting_flag) { for (i = 0; i < num_scalability_types; i++) { dim_bit_offset[i] = 0; for (j = 0; j < i; j++) dim_bit_offset[i] += dimension_id_len[j]; } dimension_id_len[num_scalability_types - 1] = 1 + (5 - dim_bit_offset[num_scalability_types - 1]); dim_bit_offset[num_scalability_types] = 6; } vps_nuh_layer_id_present_flag = gf_bs_read_int_log(bs, 1, "vps_nuh_layer_id_present_flag"); vps->layer_id_in_nuh[0] = 0; vps->layer_id_in_vps[0] = 0; for (i = 1; i < vps->max_layers; i++) { if (vps_nuh_layer_id_present_flag) { vps->layer_id_in_nuh[i] = gf_bs_read_int_log_idx(bs, 6, "layer_id_in_nuh", i); } else { vps->layer_id_in_nuh[i] = i; } vps->layer_id_in_vps[vps->layer_id_in_nuh[i]] = i; if (!splitting_flag) { for (j = 0; j < num_scalability_types; j++) { vps->dimension_id[i][j] = gf_bs_read_int_log_idx2(bs, dimension_id_len[j], "dimension_id", i, j); } } } if (splitting_flag) { for (i = 0; i < vps->max_layers; i++) for (j = 0; j < num_scalability_types; j++) vps->dimension_id[i][j] = ((vps->layer_id_in_nuh[i] & ((1 << dim_bit_offset[j + 1]) - 1)) >> dim_bit_offset[j]); } else { for (j = 0; j < num_scalability_types; j++) vps->dimension_id[0][j] = 0; } view_id_len = gf_bs_read_int_log(bs, 4, "view_id_len"); if (view_id_len > 0) { for (i = 0; i < lhvc_get_num_views(vps); i++) { gf_bs_read_int_log_idx(bs, view_id_len, "view_id_val", i); } } for (i = 1; i < vps->max_layers; i++) { for (j = 0; j < i; j++) { vps->direct_dependency_flag[i][j] = gf_bs_read_int_log_idx(bs, 1, "direct_dependency_flag", i); } } //we do the test on MAX_LHVC_LAYERS and break in the loop to avoid a wrong GCC 4.8 warning on array bounds for (i = 0; i < MAX_LHVC_LAYERS; i++) { if (i >= vps->max_layers) break; for (j = 0; j < vps->max_layers; j++) { dependency_flag[i][j] = vps->direct_dependency_flag[i][j]; for (k = 0; k < i; k++) if (vps->direct_dependency_flag[i][k] && vps->direct_dependency_flag[k][j]) dependency_flag[i][j] = 1; } } for (i = 0; i < vps->max_layers; i++) { iNuhLId = vps->layer_id_in_nuh[i]; d = r = p = 0; for (j = 0; j < vps->max_layers; j++) { jNuhLId = vps->layer_id_in_nuh[j]; if (vps->direct_dependency_flag[i][j]) { // id_direct_ref_layers[iNuhLId][d] = jNuhLId; d++; } if (dependency_flag[i][j]) { // id_ref_layers[iNuhLId][r] = jNuhLId; r++; } if (dependency_flag[j][i]) id_pred_layers[iNuhLId][p++] = jNuhLId; } num_direct_ref_layers[iNuhLId] = d; // num_ref_layers[iNuhLId] = r; num_pred_layers[iNuhLId] = p; } memset(layer_id_in_list_flag, 0, 64 * sizeof(u8)); k = 0; //num_indepentdent_layers for (i = 0; i < vps->max_layers; i++) { iNuhLId = vps->layer_id_in_nuh[i]; if (!num_direct_ref_layers[iNuhLId]) { u32 h = 1; //tree_partition_layer_id[k][0] = iNuhLId; for (j = 0; j < num_pred_layers[iNuhLId]; j++) { u32 predLId = id_pred_layers[iNuhLId][j]; if (!layer_id_in_list_flag[predLId]) { //tree_partition_layer_id[k][h++] = predLId; layer_id_in_list_flag[predLId] = 1; } } num_layers_in_tree_partition[k++] = h; } } num_indepentdent_layers = k; num_add_layer_set = 0; if (num_indepentdent_layers > 1) num_add_layer_set = gf_bs_read_ue_log(bs, "num_add_layer_set"); for (i = 0; i < num_add_layer_set; i++) for (j = 1; j < num_indepentdent_layers; j++) { nb_bits = 1; while ((1 << nb_bits) < (num_layers_in_tree_partition[j] + 1)) nb_bits++; gf_bs_read_int_log_idx2(bs, nb_bits, "highest_layer_idx_plus1", i, j); } if (gf_bs_read_int_log(bs, 1, "vps_sub_layers_max_minus1_present_flag")) { for (i = 0; i < vps->max_layers; i++) { gf_bs_read_int_log_idx(bs, 3, "sub_layers_vps_max_minus1", i); } } if (gf_bs_read_int_log(bs, 1, "max_tid_ref_present_flag")) { for (i = 0; i < (vps->max_layers - 1); i++) { for (j = i + 1; j < vps->max_layers; j++) { if (vps->direct_dependency_flag[j][i]) gf_bs_read_int_log_idx2(bs, 3, "max_tid_il_ref_pics_plus1", i, j); } } } gf_bs_read_int_log(bs, 1, "default_ref_layers_active_flag"); vps->num_profile_tier_level = 1 + gf_bs_read_ue_log(bs, "num_profile_tier_level"); if (vps->num_profile_tier_level > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of PTLs in VPS %d\n", vps->num_profile_tier_level)); vps->num_profile_tier_level = 1; return GF_FALSE; } for (i = vps->base_layer_internal_flag ? 2 : 1; i < vps->num_profile_tier_level; i++) { Bool vps_profile_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_profile_present_flag", i); hevc_profile_tier_level(bs, vps_profile_present_flag, vps->max_sub_layers - 1, &vps->ext_ptl[i - 1], i-1); } NumLayerSets = vps->num_layer_sets + num_add_layer_set; num_add_olss = 0; if (NumLayerSets > 1) { num_add_olss = gf_bs_read_ue_log(bs, "num_add_olss"); default_output_layer_idc = gf_bs_read_int_log(bs, 2, "default_output_layer_idc"); default_output_layer_idc = default_output_layer_idc < 2 ? default_output_layer_idc : 2; } vps->num_output_layer_sets = num_add_olss + NumLayerSets; if (vps->num_output_layer_sets > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of output layer sets in VPS %d, max %d supported\n", vps->num_output_layer_sets, MAX_LHVC_LAYERS)); vps->num_output_layer_sets = 1; return GF_FALSE; } layer_set_idx_for_ols_minus1[0] = 1; vps->output_layer_flag[0][0] = 1; for (i = 0; i < vps->num_output_layer_sets; i++) { if ((NumLayerSets > 2) && (i >= NumLayerSets)) { nb_bits = 1; while ((1 << nb_bits) < (NumLayerSets - 1)) nb_bits++; layer_set_idx_for_ols_minus1[i] = gf_bs_read_int_log_idx(bs, nb_bits, "layer_set_idx_for_ols_minus1", i); } else layer_set_idx_for_ols_minus1[i] = 0; ols_ids_to_ls_idx = i < NumLayerSets ? i : layer_set_idx_for_ols_minus1[i] + 1; if ((i > (vps->num_layer_sets - 1)) || (default_output_layer_idc == 2)) { for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) vps->output_layer_flag[i][j] = gf_bs_read_int_log_idx2(bs, 1, "output_layer_flag", i, j); } if ((default_output_layer_idc == 0) || (default_output_layer_idc == 1)) { for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if ((default_output_layer_idc == 0) || (vps->LayerSetLayerIdList[i][j] == vps->LayerSetLayerIdListMax[i])) OutputLayerFlag[i][j] = GF_TRUE; else OutputLayerFlag[i][j] = GF_FALSE; } } for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if (OutputLayerFlag[i][j]) { u32 curLayerID; vps->necessary_layers_flag[i][j] = GF_TRUE; curLayerID = vps->LayerSetLayerIdList[i][j]; for (k = 0; k < j; k++) { u32 refLayerId = vps->LayerSetLayerIdList[i][k]; if (dependency_flag[vps->layer_id_in_vps[curLayerID]][vps->layer_id_in_vps[refLayerId]]) vps->necessary_layers_flag[i][k] = GF_TRUE; } } } vps->num_necessary_layers[i] = 0; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if (vps->necessary_layers_flag[i][j]) vps->num_necessary_layers[i] += 1; } if (i == 0) { if (vps->base_layer_internal_flag) { if (vps->max_layers > 1) vps->profile_tier_level_idx[0][0] = 1; else vps->profile_tier_level_idx[0][0] = 0; } continue; } nb_bits = 1; while ((u32)(1 << nb_bits) < vps->num_profile_tier_level) nb_bits++; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) if (vps->necessary_layers_flag[i][j] && vps->num_profile_tier_level) vps->profile_tier_level_idx[i][j] = gf_bs_read_int_log_idx2(bs, nb_bits, "profile_tier_level_idx", i, j); else vps->profile_tier_level_idx[i][j] = 0; nb_output_layers_in_output_layer_set[i] = 0; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { nb_output_layers_in_output_layer_set[i] += OutputLayerFlag[i][j]; if (OutputLayerFlag[i][j]) { ols_highest_output_layer_id[i] = vps->LayerSetLayerIdList[ols_ids_to_ls_idx][j]; } } if (nb_output_layers_in_output_layer_set[i] == 1 && ols_highest_output_layer_id[i] > 0) vps->alt_output_layer_flag[i] = gf_bs_read_int_log_idx(bs, 1, "alt_output_layer_flag", i); } vps->num_rep_formats = 1 + gf_bs_read_ue_log(bs, "num_rep_formats_minus1"); if (vps->num_rep_formats > 16) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of rep formats in VPS %d\n", vps->num_rep_formats)); vps->num_rep_formats = 0; return GF_FALSE; } for (i = 0; i < vps->num_rep_formats; i++) { lhvc_parse_rep_format(&vps->rep_formats[i], bs, i); } if (vps->num_rep_formats > 1) rep_format_idx_present_flag = gf_bs_read_int_log(bs, 1, "rep_format_idx_present_flag"); else rep_format_idx_present_flag = 0; vps->rep_format_idx[0] = 0; nb_bits = 1; while ((u32)(1 << nb_bits) < vps->num_rep_formats) nb_bits++; for (i = vps->base_layer_internal_flag ? 1 : 0; i < vps->max_layers; i++) { if (rep_format_idx_present_flag) { vps->rep_format_idx[i] = gf_bs_read_int_log_idx(bs, nb_bits, "rep_format_idx", i); } else { vps->rep_format_idx[i] = i < vps->num_rep_formats - 1 ? i : vps->num_rep_formats - 1; } } //TODO - we don't use the rest ... return GF_TRUE; } static void sub_layer_hrd_parameters(GF_BitStream *bs, int subLayerId, u32 cpb_cnt, Bool sub_pic_hrd_params_present_flag, u32 idx1, u32 idx2) { u32 i; if (!gf_bs_available(bs)) return; for (i = 0; i <= cpb_cnt; i++) { gf_bs_read_ue_log_idx3(bs, "bit_rate_value_minus1", idx1, idx2, i); gf_bs_read_ue_log_idx3(bs, "cpb_size_value_minus1", idx1, idx2, i); if (sub_pic_hrd_params_present_flag) { gf_bs_read_ue_log_idx3(bs, "cpb_size_du_value_minus1", idx1, idx2, i); gf_bs_read_ue_log_idx3(bs, "bit_rate_du_value_minus1", idx1, idx2, i); } gf_bs_read_int_log_idx3(bs, 1, "cbr_flag", idx1, idx2, i); } } static void hevc_parse_hrd_parameters(GF_BitStream *bs, Bool commonInfPresentFlag, int maxNumSubLayersMinus1, u32 idx) { int i; Bool nal_hrd_parameters_present_flag = GF_FALSE; Bool vcl_hrd_parameters_present_flag = GF_FALSE; Bool sub_pic_hrd_params_present_flag = GF_FALSE; if (commonInfPresentFlag) { nal_hrd_parameters_present_flag = gf_bs_read_int_log_idx(bs, 1, "nal_hrd_parameters_present_flag", idx); vcl_hrd_parameters_present_flag = gf_bs_read_int_log_idx(bs, 1, "vcl_hrd_parameters_present_flag", idx); if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) { sub_pic_hrd_params_present_flag = gf_bs_read_int_log_idx(bs, 1, "sub_pic_hrd_params_present_flag", idx); if (sub_pic_hrd_params_present_flag) { gf_bs_read_int_log_idx(bs, 8, "tick_divisor_minus2", idx); gf_bs_read_int_log_idx(bs, 5, "du_cpb_removal_delay_increment_length_minus1", idx); gf_bs_read_int_log_idx(bs, 1, "sub_pic_cpb_params_in_pic_timing_sei_flag", idx); gf_bs_read_int_log_idx(bs, 5, "dpb_output_delay_du_length_minus1", idx); } gf_bs_read_int_log_idx(bs, 4, "bit_rate_scale", idx); gf_bs_read_int_log_idx(bs, 4, "cpb_size_scale", idx); if (sub_pic_hrd_params_present_flag) { gf_bs_read_int_log_idx(bs, 4, "cpb_size_du_scale", idx); } gf_bs_read_int_log_idx(bs, 5, "initial_cpb_removal_delay_length_minus1", idx); gf_bs_read_int_log_idx(bs, 5, "au_cpb_removal_delay_length_minus1", idx); gf_bs_read_int_log_idx(bs, 5, "dpb_output_delay_length_minus1", idx); } } for (i = 0; i <= maxNumSubLayersMinus1; i++) { Bool fixed_pic_rate_general_flag_i = gf_bs_read_int_log_idx(bs, 1, "fixed_pic_rate_general_flag", idx); Bool fixed_pic_rate_within_cvs_flag_i = GF_TRUE; Bool low_delay_hrd_flag_i = GF_FALSE; u32 cpb_cnt_minus1_i = 0; if (!fixed_pic_rate_general_flag_i) { fixed_pic_rate_within_cvs_flag_i = gf_bs_read_int_log_idx(bs, 1, "fixed_pic_rate_within_cvs_flag", idx); } if (fixed_pic_rate_within_cvs_flag_i) gf_bs_read_ue_log_idx(bs, "elemental_duration_in_tc_minus1", idx); else low_delay_hrd_flag_i = gf_bs_read_int_log_idx(bs, 1, "low_delay_hrd_flag", idx); if (!low_delay_hrd_flag_i) { cpb_cnt_minus1_i = gf_bs_read_ue_log_idx(bs, "cpb_cnt_minus1", idx); } if (nal_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag, idx, i); } if (vcl_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag, idx, i); } } } static s32 gf_hevc_read_vps_bs_internal(GF_BitStream *bs, HEVCState *hevc, Bool stop_at_vps_ext) { u8 vps_sub_layer_ordering_info_present_flag, vps_extension_flag; u32 i, j; s32 vps_id; HEVC_VPS *vps; u8 layer_id_included_flag[MAX_LHVC_LAYERS][64]; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) return -1; vps = &hevc->vps[vps_id]; vps->bit_pos_vps_extensions = -1; if (!vps->state) { vps->id = vps_id; vps->state = 1; } vps->base_layer_internal_flag = gf_bs_read_int_log(bs, 1, "base_layer_internal_flag"); vps->base_layer_available_flag = gf_bs_read_int_log(bs, 1, "base_layer_available_flag"); vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers_minus1"); if (vps->max_layers > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS)); return -1; } vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1; vps->temporal_id_nesting = gf_bs_read_int_log(bs, 1, "temporal_id_nesting"); gf_bs_read_int_log(bs, 16, "vps_reserved_ffff_16bits"); hevc_profile_tier_level(bs, 1, vps->max_sub_layers - 1, &vps->ptl, 0); vps_sub_layer_ordering_info_present_flag = gf_bs_read_int_log(bs, 1, "vps_sub_layer_ordering_info_present_flag"); for (i = (vps_sub_layer_ordering_info_present_flag ? 0 : vps->max_sub_layers - 1); i < vps->max_sub_layers; i++) { gf_bs_read_ue_log_idx(bs, "vps_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "vps_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "vps_max_latency_increase_plus1", i); } vps->max_layer_id = gf_bs_read_int_log(bs, 6, "max_layer_id"); if (vps->max_layer_id > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] VPS max layer ID %u but GPAC only supports %u\n", vps->max_layer_id, MAX_LHVC_LAYERS)); return -1; } vps->num_layer_sets = gf_bs_read_ue_log(bs, "num_layer_sets_minus1") + 1; if (vps->num_layer_sets > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of layer sets in VPS %d\n", vps->num_layer_sets)); return -1; } for (i = 1; i < vps->num_layer_sets; i++) { for (j = 0; j <= vps->max_layer_id; j++) { layer_id_included_flag[i][j] = gf_bs_read_int_log_idx2(bs, 1, "layer_id_included_flag", i, j); } } vps->num_layers_in_id_list[0] = 1; for (i = 1; i < vps->num_layer_sets; i++) { u32 n, m; n = 0; for (m = 0; m <= vps->max_layer_id; m++) { if (layer_id_included_flag[i][m]) { vps->LayerSetLayerIdList[i][n++] = m; if (vps->LayerSetLayerIdListMax[i] < m) vps->LayerSetLayerIdListMax[i] = m; } } vps->num_layers_in_id_list[i] = n; } if (gf_bs_read_int_log(bs, 1, "vps_timing_info_present_flag")) { u32 vps_num_hrd_parameters; gf_bs_read_int_log(bs, 32, "vps_num_units_in_tick"); gf_bs_read_int_log(bs, 32, "vps_time_scale"); if (gf_bs_read_int_log(bs, 1, "vps_poc_proportional_to_timing_flag")) { gf_bs_read_ue_log(bs, "vps_num_ticks_poc_diff_one_minus1"); } vps_num_hrd_parameters = gf_bs_read_ue_log(bs, "vps_num_hrd_parameters"); for (i = 0; i < vps_num_hrd_parameters; i++) { Bool cprms_present_flag = GF_TRUE; gf_bs_read_ue_log_idx(bs, "hrd_layer_set_idx", i); if (i > 0) cprms_present_flag = gf_bs_read_int_log(bs, 1, "cprms_present_flag"); hevc_parse_hrd_parameters(bs, cprms_present_flag, vps->max_sub_layers - 1, i); } } if (stop_at_vps_ext) { return vps_id; } vps_extension_flag = gf_bs_read_int_log(bs, 1, "vps_extension_flag"); if (vps_extension_flag) { Bool res; gf_bs_align(bs); res = hevc_parse_vps_extension(vps, bs); if (res != GF_TRUE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Failed to parse VPS extensions\n")); return -1; } if (gf_bs_read_int_log(bs, 1, "vps_extension2_flag")) { #if 0 while (gf_bs_available(bs)) { /*vps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } } return vps_id; } GF_EXPORT s32 gf_hevc_read_vps_ex(u8 *data, u32 *size, HEVCState *hevc, Bool remove_extensions) { GF_BitStream *bs; char *data_without_emulation_bytes = NULL; u32 data_without_emulation_bytes_size = 0; s32 vps_id = -1; /*still contains emulation bytes*/ data_without_emulation_bytes_size = remove_extensions ? gf_media_nalu_emulation_bytes_remove_count(data, (*size)) : 0; if (!data_without_emulation_bytes_size) { bs = gf_bs_new(data, (*size), GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); } //when removing VPS ext, we have to get the full buffer without emulation prevention bytes becuase we do a bit-by-bit copy of the vps else { data_without_emulation_bytes = gf_malloc((*size) * sizeof(char)); data_without_emulation_bytes_size = gf_media_nalu_remove_emulation_bytes(data, data_without_emulation_bytes, (*size)); bs = gf_bs_new(data_without_emulation_bytes, data_without_emulation_bytes_size, GF_BITSTREAM_READ); } if (!bs) goto exit; if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) goto exit; vps_id = gf_hevc_read_vps_bs_internal(bs, hevc, remove_extensions); if (vps_id < 0) goto exit; if (remove_extensions) { u8 *new_vps; u32 new_vps_size, emulation_bytes; u32 bit_pos = gf_bs_get_bit_offset(bs); GF_BitStream *w_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_seek(bs, 0); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u16(w_bs, gf_bs_read_u16(bs) ); bit_pos -= 48; while (bit_pos) { u32 v = gf_bs_read_int(bs, 1); gf_bs_write_int(w_bs, v, 1); bit_pos--; } /*vps extension flag*/ gf_bs_write_int(w_bs, 0, 1); new_vps = NULL; gf_bs_get_content(w_bs, &new_vps, &new_vps_size); gf_bs_del(w_bs); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(new_vps, new_vps_size); if (emulation_bytes + new_vps_size > *size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("Buffer too small to rewrite VPS - skipping rewrite\n")); } else { *size = gf_media_nalu_add_emulation_bytes(new_vps, data, new_vps_size); } if (new_vps) gf_free(new_vps); } exit: if (bs) gf_bs_del(bs); if (data_without_emulation_bytes) gf_free(data_without_emulation_bytes); return vps_id; } GF_EXPORT s32 gf_hevc_read_vps(u8 *data, u32 size, HEVCState *hevc) { return gf_hevc_read_vps_ex(data, &size, hevc, GF_FALSE); } GF_EXPORT s32 gf_hevc_read_vps_bs(GF_BitStream *bs, HEVCState *hevc) { if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) return -1; return gf_hevc_read_vps_bs_internal(bs, hevc, GF_FALSE); } static void hevc_scaling_list_data(GF_BitStream *bs) { u32 i, sizeId, matrixId; for (sizeId = 0; sizeId < 4; sizeId++) { for (matrixId = 0; matrixId < 6; matrixId += (sizeId == 3) ? 3 : 1) { u32 idx = sizeId*100 + 10*matrixId; u32 scaling_list_pred_mode_flag_sizeId_matrixId = gf_bs_read_int_log_idx(bs, 1, "scaling_list_pred_mode_flag_sizeId_matrixId", idx); if (!scaling_list_pred_mode_flag_sizeId_matrixId) { gf_bs_read_ue_log_idx(bs, "scaling_list_pred_matrix_id_delta", idx); } else { //u32 nextCoef = 8; u32 coefNum = MIN(64, (1 << (4 + (sizeId << 1)))); if (sizeId > 1) { gf_bs_read_se_log_idx(bs, "scaling_list_dc_coef_minus8", idx); } for (i = 0; i < coefNum; i++) { gf_bs_read_se_log_idx2(bs, "scaling_list_delta_coef", idx, i); } } } } } static const struct { u32 w, h; } hevc_sar[17] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, { 160,99 }, { 4,3}, { 3,2}, { 2,1} }; static s32 gf_hevc_read_sps_bs_internal(GF_BitStream *bs, HEVCState *hevc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id = -1; u32 i, nb_CTUs, depth; HEVC_SPS *sps; HEVC_VPS *vps; HEVC_ProfileTierLevel ptl; Bool multiLayerExtSpsFlag; u8 sps_ext_or_max_sub_layers_minus1, max_sub_layers_minus1; if (vui_flag_pos) *vui_flag_pos = 0; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) { return -1; } memset(&ptl, 0, sizeof(ptl)); max_sub_layers_minus1 = 0; sps_ext_or_max_sub_layers_minus1 = 0; if (layer_id == 0) max_sub_layers_minus1 = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1"); else sps_ext_or_max_sub_layers_minus1 = gf_bs_read_int_log(bs, 3, "sps_ext_or_max_sub_layers_minus1"); multiLayerExtSpsFlag = (layer_id != 0) && (sps_ext_or_max_sub_layers_minus1 == 7); if (!multiLayerExtSpsFlag) { gf_bs_read_int_log(bs, 1, "temporal_id_nesting_flag"); hevc_profile_tier_level(bs, 1, max_sub_layers_minus1, &ptl, 0); } sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((sps_id < 0) || (sps_id >= 16)) { return -1; } sps = &hevc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->ptl = ptl; vps = &hevc->vps[vps_id]; sps->max_sub_layers_minus1 = 0; sps->sps_ext_or_max_sub_layers_minus1 = 0; /* default values */ sps->colour_primaries = 2; sps->transfer_characteristic = 2; sps->matrix_coeffs = 2; //sps_rep_format_idx = 0; if (multiLayerExtSpsFlag) { sps->update_rep_format_flag = gf_bs_read_int_log(bs, 1, "update_rep_format_flag"); if (sps->update_rep_format_flag) { sps->rep_format_idx = gf_bs_read_int_log(bs, 8, "rep_format_idx"); } else { sps->rep_format_idx = vps->rep_format_idx[layer_id]; } sps->width = vps->rep_formats[sps->rep_format_idx].pic_width_luma_samples; sps->height = vps->rep_formats[sps->rep_format_idx].pic_height_luma_samples; sps->chroma_format_idc = vps->rep_formats[sps->rep_format_idx].chroma_format_idc; sps->bit_depth_luma = vps->rep_formats[sps->rep_format_idx].bit_depth_luma; sps->bit_depth_chroma = vps->rep_formats[sps->rep_format_idx].bit_depth_chroma; sps->separate_colour_plane_flag = vps->rep_formats[sps->rep_format_idx].separate_colour_plane_flag; //TODO this is crude ... sps->ptl = vps->ext_ptl[0]; } else { sps->chroma_format_idc = gf_bs_read_ue_log(bs, "chroma_format_idc"); if (sps->chroma_format_idc == 3) sps->separate_colour_plane_flag = gf_bs_read_int_log(bs, 1, "separate_colour_plane_flag"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); if ((sps->cw_flag = gf_bs_read_int_log(bs, 1, "conformance_window_flag"))) { u32 SubWidthC, SubHeightC; if (sps->chroma_format_idc == 1) { SubWidthC = SubHeightC = 2; } else if (sps->chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else { SubWidthC = SubHeightC = 1; } sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); sps->width -= SubWidthC * (sps->cw_left + sps->cw_right); sps->height -= SubHeightC * (sps->cw_top + sps->cw_bottom); } sps->bit_depth_luma = 8 + gf_bs_read_ue_log(bs, "bit_depth_luma_minus8"); sps->bit_depth_chroma = 8 + gf_bs_read_ue_log(bs, "bit_depth_chroma_minus8"); } sps->log2_max_pic_order_cnt_lsb = 4 + gf_bs_read_ue_log(bs, "log2_max_pic_order_cnt_lsb_minus4"); if (!multiLayerExtSpsFlag) { sps->sub_layer_ordering_info_present_flag = gf_bs_read_int_log(bs, 1, "sub_layer_ordering_info_present_flag"); for (i = sps->sub_layer_ordering_info_present_flag ? 0 : sps->max_sub_layers_minus1; i <= sps->max_sub_layers_minus1; i++) { gf_bs_read_ue_log_idx(bs, "max_dec_pic_buffering", i); gf_bs_read_ue_log_idx(bs, "num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "max_latency_increase", i); } } sps->log2_min_luma_coding_block_size = 3 + gf_bs_read_ue_log(bs, "log2_min_luma_coding_block_size_minus3"); sps->log2_diff_max_min_luma_coding_block_size = gf_bs_read_ue_log(bs, "log2_diff_max_min_luma_coding_block_size"); sps->max_CU_width = (1 << (sps->log2_min_luma_coding_block_size + sps->log2_diff_max_min_luma_coding_block_size)); sps->max_CU_height = (1 << (sps->log2_min_luma_coding_block_size + sps->log2_diff_max_min_luma_coding_block_size)); sps->log2_min_transform_block_size = 2 + gf_bs_read_ue_log(bs, "log2_min_transform_block_size_minus2"); sps->log2_max_transform_block_size = sps->log2_min_transform_block_size + gf_bs_read_ue_log(bs, "log2_max_transform_block_size"); depth = 0; sps->max_transform_hierarchy_depth_inter = gf_bs_read_ue_log(bs, "max_transform_hierarchy_depth_inter"); sps->max_transform_hierarchy_depth_intra = gf_bs_read_ue_log(bs, "max_transform_hierarchy_depth_intra"); while ((u32)(sps->max_CU_width >> sps->log2_diff_max_min_luma_coding_block_size) > (u32)(1 << (sps->log2_min_transform_block_size + depth))) { depth++; } sps->max_CU_depth = sps->log2_diff_max_min_luma_coding_block_size + depth; nb_CTUs = ((sps->width + sps->max_CU_width - 1) / sps->max_CU_width) * ((sps->height + sps->max_CU_height - 1) / sps->max_CU_height); sps->bitsSliceSegmentAddress = 0; while (nb_CTUs > (u32)(1 << sps->bitsSliceSegmentAddress)) { sps->bitsSliceSegmentAddress++; } sps->scaling_list_enable_flag = gf_bs_read_int_log(bs, 1, "scaling_list_enable_flag"); if (sps->scaling_list_enable_flag) { sps->infer_scaling_list_flag = 0; sps->scaling_list_ref_layer_id = 0; if (multiLayerExtSpsFlag) { sps->infer_scaling_list_flag = gf_bs_read_int_log(bs, 1, "infer_scaling_list_flag"); } if (sps->infer_scaling_list_flag) { sps->scaling_list_ref_layer_id = gf_bs_read_int_log(bs, 6, "scaling_list_ref_layer_id"); } else { sps->scaling_list_data_present_flag = gf_bs_read_int_log(bs, 1, "scaling_list_data_present_flag"); if (sps->scaling_list_data_present_flag) { hevc_scaling_list_data(bs); } } } sps->asymmetric_motion_partitions_enabled_flag = gf_bs_read_int_log(bs, 1, "asymmetric_motion_partitions_enabled_flag"); sps->sample_adaptive_offset_enabled_flag = gf_bs_read_int_log(bs, 1, "sample_adaptive_offset_enabled_flag"); if ( (sps->pcm_enabled_flag = gf_bs_read_int_log(bs, 1, "pcm_enabled_flag")) ) { sps->pcm_sample_bit_depth_luma_minus1 = gf_bs_read_int_log(bs, 4, "pcm_sample_bit_depth_luma_minus1"); sps->pcm_sample_bit_depth_chroma_minus1 = gf_bs_read_int_log(bs, 4, "pcm_sample_bit_depth_chroma_minus1"); sps->log2_min_pcm_luma_coding_block_size_minus3 = gf_bs_read_ue_log(bs, "log2_min_pcm_luma_coding_block_size_minus3"); sps->log2_diff_max_min_pcm_luma_coding_block_size = gf_bs_read_ue_log(bs, "log2_diff_max_min_pcm_luma_coding_block_size"); sps->pcm_loop_filter_disable_flag = gf_bs_read_int_log(bs, 1, "pcm_loop_filter_disable_flag"); } sps->num_short_term_ref_pic_sets = gf_bs_read_ue_log(bs, "num_short_term_ref_pic_sets"); if (sps->num_short_term_ref_pic_sets > 64) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Invalid number of short term reference picture sets %d\n", sps->num_short_term_ref_pic_sets)); return -1; } for (i = 0; i < sps->num_short_term_ref_pic_sets; i++) { Bool ret = hevc_parse_short_term_ref_pic_set(bs, sps, i); /*cannot parse short_term_ref_pic_set, skip VUI parsing*/ if (!ret) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Invalid short_term_ref_pic_set\n")); return -1; } } sps->long_term_ref_pics_present_flag = gf_bs_read_int_log(bs, 1, "long_term_ref_pics_present_flag"); if (sps->long_term_ref_pics_present_flag) { sps->num_long_term_ref_pic_sps = gf_bs_read_ue_log(bs, "num_long_term_ref_pic_sps"); for (i = 0; i < sps->num_long_term_ref_pic_sps; i++) { gf_bs_read_int_log_idx(bs, sps->log2_max_pic_order_cnt_lsb, "lt_ref_pic_poc_lsb_sps", i); gf_bs_read_int_log_idx(bs, 1, "used_by_curr_pic_lt_sps_flag", i); } } sps->temporal_mvp_enable_flag = gf_bs_read_int_log(bs, 1, "temporal_mvp_enable_flag"); sps->strong_intra_smoothing_enable_flag = gf_bs_read_int_log(bs, 1, "strong_intra_smoothing_enable_flag"); if (vui_flag_pos) *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); if ((sps->vui_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_parameters_present_flag")) ) { sps->aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "aspect_ratio_info_present_flag"); if (sps->aspect_ratio_info_present_flag) { sps->sar_idc = gf_bs_read_int_log(bs, 8, "aspect_ratio_idc"); if (sps->sar_idc == 255) { sps->sar_width = gf_bs_read_int_log(bs, 16, "aspect_ratio_width"); sps->sar_height = gf_bs_read_int_log(bs, 16, "aspect_ratio_height"); } else if (sps->sar_idc < 17) { sps->sar_width = hevc_sar[sps->sar_idc].w; sps->sar_height = hevc_sar[sps->sar_idc].h; } } if ((sps->overscan_info_present = gf_bs_read_int_log(bs, 1, "overscan_info_present"))) sps->overscan_appropriate = gf_bs_read_int_log(bs, 1, "overscan_appropriate"); sps->video_signal_type_present_flag = gf_bs_read_int_log(bs, 1, "video_signal_type_present_flag"); if (sps->video_signal_type_present_flag) { sps->video_format = gf_bs_read_int_log(bs, 3, "video_format"); sps->video_full_range_flag = gf_bs_read_int_log(bs, 1, "video_full_range_flag"); if ((sps->colour_description_present_flag = gf_bs_read_int_log(bs, 1, "colour_description_present_flag"))) { sps->colour_primaries = gf_bs_read_int_log(bs, 8, "colour_primaries"); sps->transfer_characteristic = gf_bs_read_int_log(bs, 8, "transfer_characteristic"); sps->matrix_coeffs = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } } if ((sps->chroma_loc_info_present_flag = gf_bs_read_int_log(bs, 1, "chroma_loc_info_present_flag"))) { sps->chroma_sample_loc_type_top_field = gf_bs_read_ue_log(bs, "chroma_sample_loc_type_top_field"); sps->chroma_sample_loc_type_bottom_field = gf_bs_read_ue_log(bs, "chroma_sample_loc_type_bottom_field"); } sps->neutra_chroma_indication_flag = gf_bs_read_int_log(bs, 1, "neutra_chroma_indication_flag"); sps->field_seq_flag = gf_bs_read_int_log(bs, 1, "field_seq_flag"); sps->frame_field_info_present_flag = gf_bs_read_int_log(bs, 1, "frame_field_info_present_flag"); if ((sps->default_display_window_flag = gf_bs_read_int_log(bs, 1, "default_display_window_flag"))) { sps->left_offset = gf_bs_read_ue_log(bs, "display_window_left_offset"); sps->right_offset = gf_bs_read_ue_log(bs, "display_window_right_offset"); sps->top_offset = gf_bs_read_ue_log(bs, "display_window_top_offset"); sps->bottom_offset = gf_bs_read_ue_log(bs, "display_window_bottom_offset"); } sps->has_timing_info = gf_bs_read_int_log(bs, 1, "has_timing_info"); if (sps->has_timing_info) { sps->num_units_in_tick = gf_bs_read_int_log(bs, 32, "num_units_in_tick"); sps->time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); sps->poc_proportional_to_timing_flag = gf_bs_read_int_log(bs, 1, "poc_proportional_to_timing_flag"); if (sps->poc_proportional_to_timing_flag) sps->num_ticks_poc_diff_one_minus1 = gf_bs_read_ue_log(bs, "num_ticks_poc_diff_one_minus1"); if ((sps->hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "hrd_parameters_present_flag"))) { // GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[HEVC] HRD param parsing not implemented\n")); return sps_id; } } if (gf_bs_read_int_log(bs, 1, "bitstream_restriction_flag")) { gf_bs_read_int_log(bs, 1, "tiles_fixed_structure_flag"); gf_bs_read_int_log(bs, 1, "motion_vectors_over_pic_boundaries_flag"); gf_bs_read_int_log(bs, 1, "restricted_ref_pic_lists_flag"); gf_bs_read_ue_log(bs, "min_spatial_segmentation_idc"); gf_bs_read_ue_log(bs, "max_bytes_per_pic_denom"); gf_bs_read_ue_log(bs, "max_bits_per_min_cu_denom"); gf_bs_read_ue_log(bs, "log2_max_mv_length_horizontal"); gf_bs_read_ue_log(bs, "log2_max_mv_length_vertical"); } } if (gf_bs_read_int_log(bs, 1, "sps_extension_flag")) { #if 0 while (gf_bs_available(bs)) { /*sps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } return sps_id; } GF_EXPORT s32 gf_hevc_read_sps_ex(char *data, u32 size, HEVCState *hevc, u32 *vui_flag_pos) { GF_BitStream *bs; s32 sps_id = -1; u8 layer_id; if (vui_flag_pos) *vui_flag_pos = 0; bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) goto exit; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) goto exit; sps_id = gf_hevc_read_sps_bs_internal(bs, hevc, layer_id, vui_flag_pos); exit: if (bs) gf_bs_del(bs); return sps_id; } GF_EXPORT s32 gf_hevc_read_sps(u8 *data, u32 size, HEVCState *hevc) { return gf_hevc_read_sps_ex(data, size, hevc, NULL); } GF_EXPORT s32 gf_hevc_read_sps_bs(GF_BitStream *bs, HEVCState *hevc) { u8 layer_id; if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) return -1; return gf_hevc_read_sps_bs_internal(bs, hevc, layer_id, NULL); } static s32 gf_hevc_read_pps_bs_internal(GF_BitStream *bs, HEVCState *hevc) { u32 i; s32 pps_id; HEVC_PPS *pps; //NAL header already read pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id < 0) || (pps_id >= 64)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong PPS ID %d in PPS\n", pps_id)); return -1; } pps = &hevc->pps[pps_id]; if (!pps->state) { pps->id = pps_id; pps->state = 1; } pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if (((s32)pps->sps_id<0) || (pps->sps_id >= 16)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong SPS ID %d in PPS\n", pps->sps_id)); pps->sps_id=0; return -1; } hevc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->dependent_slice_segments_enabled_flag = gf_bs_read_int_log(bs, 1, "dependent_slice_segments_enabled_flag"); pps->output_flag_present_flag = gf_bs_read_int_log(bs, 1, "output_flag_present_flag"); pps->num_extra_slice_header_bits = gf_bs_read_int_log(bs, 3, "num_extra_slice_header_bits"); pps->sign_data_hiding_flag = gf_bs_read_int_log(bs, 1, "sign_data_hiding_flag"); pps->cabac_init_present_flag = gf_bs_read_int_log(bs, 1, "cabac_init_present_flag"); pps->num_ref_idx_l0_default_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active"); pps->num_ref_idx_l1_default_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active"); pps->pic_init_qp_minus26 = gf_bs_read_se_log(bs, "pic_init_qp_minus26"); pps->constrained_intra_pred_flag = gf_bs_read_int_log(bs, 1, "constrained_intra_pred_flag"); pps->transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "transform_skip_enabled_flag"); if ((pps->cu_qp_delta_enabled_flag = gf_bs_read_int_log(bs, 1, "cu_qp_delta_enabled_flag"))) pps->diff_cu_qp_delta_depth = gf_bs_read_ue_log(bs, "diff_cu_qp_delta_depth"); pps->pic_cb_qp_offset = gf_bs_read_se_log(bs, "pic_cb_qp_offset"); pps->pic_cr_qp_offset = gf_bs_read_se_log(bs, "pic_cr_qp_offset"); pps->slice_chroma_qp_offsets_present_flag = gf_bs_read_int_log(bs, 1, "slice_chroma_qp_offsets_present_flag"); pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); pps->weighted_bipred_flag = gf_bs_read_int_log(bs, 1, "weighted_bipred_flag"); pps->transquant_bypass_enable_flag = gf_bs_read_int_log(bs, 1, "transquant_bypass_enable_flag"); pps->tiles_enabled_flag = gf_bs_read_int_log(bs, 1, "tiles_enabled_flag"); pps->entropy_coding_sync_enabled_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); if (pps->tiles_enabled_flag) { pps->num_tile_columns = 1 + gf_bs_read_ue_log(bs, "num_tile_columns_minus1"); pps->num_tile_rows = 1 + gf_bs_read_ue_log(bs, "num_tile_rows_minus1"); pps->uniform_spacing_flag = gf_bs_read_int_log(bs, 1, "uniform_spacing_flag"); if (!pps->uniform_spacing_flag) { for (i = 0; i < pps->num_tile_columns - 1; i++) { pps->column_width[i] = 1 + gf_bs_read_ue_log_idx(bs, "column_width_minus1", i); } for (i = 0; i < pps->num_tile_rows - 1; i++) { pps->row_height[i] = 1 + gf_bs_read_ue_log_idx(bs, "row_height_minus1", i); } } pps->loop_filter_across_tiles_enabled_flag = gf_bs_read_int_log(bs, 1, "loop_filter_across_tiles_enabled_flag"); } pps->loop_filter_across_slices_enabled_flag = gf_bs_read_int_log(bs, 1, "loop_filter_across_slices_enabled_flag"); if ((pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"))) { pps->deblocking_filter_override_enabled_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_override_enabled_flag"); if (! (pps->pic_disable_deblocking_filter_flag = gf_bs_read_int_log(bs, 1, "pic_disable_deblocking_filter_flag"))) { pps->beta_offset_div2 = gf_bs_read_se_log(bs, "beta_offset_div2"); pps->tc_offset_div2 = gf_bs_read_se_log(bs, "tc_offset_div2"); } } if ((pps->pic_scaling_list_data_present_flag = gf_bs_read_int_log(bs, 1, "pic_scaling_list_data_present_flag"))) { hevc_scaling_list_data(bs); } pps->lists_modification_present_flag = gf_bs_read_int_log(bs, 1, "lists_modification_present_flag"); pps->log2_parallel_merge_level_minus2 = gf_bs_read_ue_log(bs, "log2_parallel_merge_level_minus2"); pps->slice_segment_header_extension_present_flag = gf_bs_read_int_log(bs, 1, "slice_segment_header_extension_present_flag"); if (gf_bs_read_int_log(bs, 1, "pps_extension_flag")) { #if 0 while (gf_bs_available(bs)) { /*pps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } return pps_id; } GF_EXPORT s32 gf_hevc_read_pps(u8 *data, u32 size, HEVCState *hevc) { GF_BitStream *bs; s32 pps_id = -1; bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) goto exit; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) goto exit; pps_id = gf_hevc_read_pps_bs_internal(bs, hevc); exit: if (bs) gf_bs_del(bs); return pps_id; } GF_EXPORT s32 gf_hevc_read_pps_bs(GF_BitStream *bs, HEVCState *hevc) { if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) return -1; return gf_hevc_read_pps_bs_internal(bs, hevc); } GF_EXPORT s32 gf_hevc_parse_nalu_bs(GF_BitStream *bs, HEVCState *hevc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { Bool is_slice = GF_FALSE; s32 ret = -1; HEVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); memcpy(&n_state, &hevc->s_info, sizeof(HEVCSliceInfo)); if (!hevc_parse_nal_header(bs, nal_unit_type, temporal_id, layer_id)) return -1; n_state.nal_unit_type = *nal_unit_type; switch (n_state.nal_unit_type) { case GF_HEVC_NALU_ACCESS_UNIT: case GF_HEVC_NALU_END_OF_SEQ: case GF_HEVC_NALU_END_OF_STREAM: ret = 1; break; /*slice_segment_layer_rbsp*/ case GF_HEVC_NALU_SLICE_TRAIL_N: case GF_HEVC_NALU_SLICE_TRAIL_R: case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_TSA_R: case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_STSA_R: case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: case GF_HEVC_NALU_SLICE_RADL_N: case GF_HEVC_NALU_SLICE_RADL_R: case GF_HEVC_NALU_SLICE_RASL_N: case GF_HEVC_NALU_SLICE_RASL_R: is_slice = GF_TRUE; /* slice - read the info and compare.*/ ret = hevc_parse_slice_segment(bs, hevc, &n_state); if (ret < 0) return ret; hevc_compute_poc(&n_state); ret = 0; if (hevc->s_info.poc != n_state.poc) { ret = 1; break; } if (n_state.first_slice_segment_in_pic_flag) { if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; break; } } break; case GF_HEVC_NALU_SEQ_PARAM: hevc->last_parsed_sps_id = gf_hevc_read_sps_bs_internal(bs, hevc, *layer_id, NULL); ret = (hevc->last_parsed_sps_id>=0) ? 0 : -1; break; case GF_HEVC_NALU_PIC_PARAM: hevc->last_parsed_pps_id = gf_hevc_read_pps_bs_internal(bs, hevc); ret = (hevc->last_parsed_pps_id>=0) ? 0 : -1; break; case GF_HEVC_NALU_VID_PARAM: hevc->last_parsed_vps_id = gf_hevc_read_vps_bs_internal(bs, hevc, GF_FALSE); ret = (hevc->last_parsed_vps_id>=0) ? 0 : -1; break; default: ret = 0; break; } /* save _prev values */ if ((ret>0) && hevc->s_info.sps) { n_state.frame_num_offset_prev = hevc->s_info.frame_num_offset; n_state.frame_num_prev = hevc->s_info.frame_num; n_state.poc_lsb_prev = hevc->s_info.poc_lsb; n_state.poc_msb_prev = hevc->s_info.poc_msb; if (is_slice) n_state.prev_layer_id_plus1 = *layer_id + 1; } if (is_slice) hevc_compute_poc(&n_state); memcpy(&hevc->s_info, &n_state, sizeof(HEVCSliceInfo)); return ret; } GF_EXPORT s32 gf_hevc_parse_nalu(u8 *data, u32 size, HEVCState *hevc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { GF_BitStream *bs = NULL; s32 ret = -1; if (!hevc) { if (nal_unit_type) (*nal_unit_type) = (data[0] & 0x7E) >> 1; if (layer_id) { u8 id = data[0] & 1; id <<= 5; id |= (data[1] >> 3) & 0x1F; (*layer_id) = id; } if (temporal_id) (*temporal_id) = (data[1] & 0x7); return -1; } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); ret = gf_hevc_parse_nalu_bs(bs, hevc, nal_unit_type, temporal_id, layer_id); gf_bs_del(bs); return ret; } GF_EXPORT GF_Err gf_hevc_change_vui(GF_HEVCConfig *hvcc, GF_VUIInfo *vui_info) { GF_BitStream *orig, *mod; HEVCState hevc; u32 i, bit_offset, flag; s32 idx; GF_NALUFFParamArray *spss; GF_NALUFFParam *slc; orig = NULL; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; i = 0; spss = NULL; while ((spss = (GF_NALUFFParamArray *)gf_list_enum(hvcc->param_array, &i))) { if (spss->type == GF_HEVC_NALU_SEQ_PARAM) break; spss = NULL; } if (!spss) return GF_NON_COMPLIANT_BITSTREAM; i = 0; while ((slc = (GF_NALUFFParam *)gf_list_enum(spss->nalus, &i))) { u8 *no_emulation_buf; u32 no_emulation_buf_size, emulation_bytes; /*SPS may still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size) * sizeof(char)); no_emulation_buf_size = gf_media_nalu_remove_emulation_bytes(slc->data, no_emulation_buf, slc->size); idx = gf_hevc_read_sps_ex(no_emulation_buf, no_emulation_buf_size, &hevc, &bit_offset); if (idx < 0) { if (orig) gf_bs_del(orig); gf_free(no_emulation_buf); continue; } orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset >= 0); while (bit_offset) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } avc_hevc_rewrite_vui(vui_info, orig, mod); /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, &no_emulation_buf, &no_emulation_buf_size); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(no_emulation_buf, no_emulation_buf_size); if (no_emulation_buf_size + emulation_bytes > slc->size) slc->data = (char*)gf_realloc(slc->data, no_emulation_buf_size + emulation_bytes); slc->size = gf_media_nalu_add_emulation_bytes(no_emulation_buf, slc->data, no_emulation_buf_size); gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; } GF_EXPORT GF_Err gf_hevc_change_par(GF_HEVCConfig *hvcc, s32 ar_n, s32 ar_d) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = ar_n; vuii.ar_den = ar_d; vuii.fullrange = -1; vuii.video_format = -1; vuii.color_prim = -1; vuii.color_tfc = -1; vuii.color_matrix = -1; return gf_hevc_change_vui(hvcc, &vuii); } GF_EXPORT GF_Err gf_hevc_change_color(GF_HEVCConfig *hvcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = -1; vuii.ar_den = -1; vuii.fullrange = fullrange; vuii.video_format = vidformat; vuii.color_prim = colorprim; vuii.color_tfc = transfer; vuii.color_matrix = colmatrix; return gf_hevc_change_vui(hvcc, &vuii); } GF_EXPORT GF_Err gf_hevc_get_sps_info_with_state(HEVCState *hevc, u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { s32 idx; idx = gf_hevc_read_sps(sps_data, sps_size, hevc); if (idx < 0) { return GF_NON_COMPLIANT_BITSTREAM; } if (sps_id) *sps_id = idx; if (width) *width = hevc->sps[idx].width; if (height) *height = hevc->sps[idx].height; if (par_n) *par_n = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_width : (u32)-1; if (par_d) *par_d = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_height : (u32)-1; return GF_OK; } GF_EXPORT GF_Err gf_hevc_get_sps_info(u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { HEVCState hevc; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; return gf_hevc_get_sps_info_with_state(&hevc, sps_data, sps_size, sps_id, width, height, par_n, par_d); } #endif //GPAC_DISABLE_HEVC static u32 AC3_FindSyncCode(u8 *buf, u32 buflen) { u32 end = buflen - 6; u32 offset = 0; while (offset <= end) { if (buf[offset] == 0x0b && buf[offset + 1] == 0x77) { return offset; } offset++; } return buflen; } static Bool AC3_FindSyncCodeBS(GF_BitStream *bs) { u8 b1; u64 pos = gf_bs_get_position(bs); u64 end = gf_bs_get_size(bs); pos += 1; b1 = gf_bs_read_u8(bs); while (pos + 1 <= end) { u8 b2 = gf_bs_read_u8(bs); if ((b1 == 0x0b) && (b2 == 0x77)) { gf_bs_seek(bs, pos - 1); return GF_TRUE; } pos++; b1 = b2; } return GF_FALSE; } static const u32 ac3_sizecod_to_bitrate[] = { 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 320000, 384000, 448000, 512000, 576000, 640000 }; static const u32 ac3_sizecod2_to_framesize[] = { 96, 120, 144, 168, 192, 240, 288, 336, 384, 480, 576, 672, 768, 960, 1152, 1344, 1536, 1728, 1920 }; static const u32 ac3_sizecod1_to_framesize[] = { 69, 87, 104, 121, 139, 174, 208, 243, 278, 348, 417, 487, 557, 696, 835, 975, 1114, 1253, 1393 }; static const u32 ac3_sizecod0_to_framesize[] = { 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 640, 768, 896, 1024, 1152, 1280 }; static const u32 ac3_mod_to_chans[] = { 2, 1, 2, 3, 3, 4, 4, 5 }; GF_EXPORT u32 gf_ac3_get_channels(u32 acmod) { u32 nb_ch; nb_ch = ac3_mod_to_chans[acmod]; return nb_ch; } GF_EXPORT u32 gf_ac3_get_bitrate(u32 brcode) { return ac3_sizecod_to_bitrate[brcode]; } Bool gf_ac3_parser(u8 *buf, u32 buflen, u32 *pos, GF_AC3Config *hdr, Bool full_parse) { GF_BitStream *bs; Bool ret; if (buflen < 6) return GF_FALSE; (*pos) = AC3_FindSyncCode(buf, buflen); if (*pos >= buflen) return GF_FALSE; bs = gf_bs_new((const char*)(buf + *pos), buflen, GF_BITSTREAM_READ); ret = gf_ac3_parser_bs(bs, hdr, full_parse); gf_bs_del(bs); return ret; } GF_EXPORT Bool gf_ac3_parser_bs(GF_BitStream *bs, GF_AC3Config *hdr, Bool full_parse) { u32 fscod, frmsizecod, bsid, ac3_mod, freq, framesize, bsmod, syncword; u64 pos; if (!hdr || (gf_bs_available(bs) < 6)) return GF_FALSE; if (!AC3_FindSyncCodeBS(bs)) return GF_FALSE; pos = gf_bs_get_position(bs); syncword = gf_bs_read_u16(bs); if (syncword != 0x0B77) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AC3] Wrong sync word detected (0x%X - expecting 0x0B77).\n", syncword)); return GF_FALSE; } gf_bs_read_int_log(bs, 16, "crc1"); fscod = gf_bs_read_int_log(bs, 2, "fscod"); frmsizecod = gf_bs_read_int_log(bs, 6, "frmsizecod"); bsid = gf_bs_read_int_log(bs, 5, "bsid"); bsmod = gf_bs_read_int_log(bs, 3, "bsmod"); ac3_mod = gf_bs_read_int_log(bs, 3, "ac3_mod"); if (frmsizecod >= 2 * sizeof(ac3_sizecod_to_bitrate) / sizeof(u32)) return GF_FALSE; hdr->bitrate = ac3_sizecod_to_bitrate[frmsizecod / 2]; if (bsid > 8) hdr->bitrate = hdr->bitrate >> (bsid - 8); switch (fscod) { case 0: if (frmsizecod >= 2 * sizeof(ac3_sizecod0_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 48000; framesize = ac3_sizecod0_to_framesize[frmsizecod / 2] * 2; break; case 1: if (frmsizecod >= 2 * sizeof(ac3_sizecod1_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 44100; framesize = (ac3_sizecod1_to_framesize[frmsizecod / 2] + (frmsizecod & 0x1)) * 2; break; case 2: if (frmsizecod >= 2 * sizeof(ac3_sizecod2_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 32000; framesize = ac3_sizecod2_to_framesize[frmsizecod / 2] * 2; break; default: return GF_FALSE; } hdr->sample_rate = freq; hdr->framesize = framesize; if (full_parse) { hdr->streams[0].bsid = bsid; hdr->streams[0].bsmod = bsmod; hdr->streams[0].acmod = ac3_mod; hdr->streams[0].lfon = 0; hdr->streams[0].fscod = fscod; hdr->brcode = frmsizecod / 2; } if (ac3_mod >= 2 * sizeof(ac3_mod_to_chans) / sizeof(u32)) return GF_FALSE; hdr->channels = ac3_mod_to_chans[ac3_mod]; if ((ac3_mod & 0x1) && (ac3_mod != 1)) gf_bs_read_int_log(bs, 2, "cmixlev"); if (ac3_mod & 0x4) gf_bs_read_int_log(bs, 2, "surmixlev"); if (ac3_mod == 0x2) gf_bs_read_int_log(bs, 2, "dsurmod"); if (gf_bs_read_int_log(bs, 1, "lfeon")) { hdr->channels += 1; hdr->streams[0].lfon = 1; } gf_bs_seek(bs, pos); return GF_TRUE; } GF_EXPORT Bool gf_eac3_parser_bs(GF_BitStream *bs, GF_AC3Config *hdr, Bool full_parse) { u32 fscod, bsid, ac3_mod, freq, framesize, syncword, substreamid, lfon, channels, numblkscod, strmtyp, frmsiz; u64 pos; u16 chanmap; static u32 numblks[4] = {1, 2, 3, 6}; if (!hdr || (gf_bs_available(bs) < 6)) return GF_FALSE; if (!AC3_FindSyncCodeBS(bs)) return GF_FALSE; pos = gf_bs_get_position(bs); framesize = 0; numblkscod = 0; memset(hdr, 0, sizeof(GF_AC3Config)); block: syncword = gf_bs_read_u16(bs); if (syncword != 0x0B77) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[E-AC3] Wrong sync word detected (0x%X - expecting 0x0B77).\n", syncword)); return GF_FALSE; } strmtyp = gf_bs_read_int_log(bs, 2, "strmtyp"); substreamid = gf_bs_read_int_log(bs, 3, "substreamid"); //next main (independent) AU, done with this frame if ((strmtyp!=0x1) && ((hdr->substreams >> substreamid) & 0x1)) { hdr->framesize = framesize; gf_bs_seek(bs, pos); return GF_TRUE; } frmsiz = gf_bs_read_int_log(bs, 11, "frmsiz"); framesize += 2 * (1 + frmsiz); fscod = gf_bs_read_int_log(bs, 2, "fscod"); if (fscod == 0x3) { fscod = gf_bs_read_int_log(bs, 2, "fscod2"); numblkscod += 6; } else { numblkscod += gf_bs_read_int_log(bs, 2, "numblkscod"); } assert(numblkscod <= 9); if ((hdr->substreams >> substreamid) & 0x1) { //we still have sync frames following if (substreamid) { if (gf_bs_seek(bs, pos + framesize) != GF_OK) { gf_bs_seek(bs, pos); return GF_FALSE; } if ((gf_bs_available(bs) < 6) || !AC3_FindSyncCodeBS(bs)) { gf_bs_seek(bs, pos); return GF_FALSE; } goto block; } } hdr->substreams |= (1 << substreamid); switch (fscod) { case 0: freq = 48000; break; case 1: freq = 44100; break; case 2: freq = 32000; break; default: return GF_FALSE; } ac3_mod = gf_bs_read_int_log(bs, 3, "ac3_mod"); lfon = gf_bs_read_int_log(bs, 1, "lfon"); bsid = gf_bs_read_int_log(bs, 5, "bsid"); if (!substreamid && (bsid != 16/*E-AC3*/)) return GF_FALSE; gf_bs_read_int_log(bs, 5, "dialnorm"); if (gf_bs_read_int_log(bs, 1, "compre")) { gf_bs_read_int_log(bs, 8, "compr"); } if (ac3_mod==0) { gf_bs_read_int_log(bs, 5, "dialnorm2"); if (gf_bs_read_int_log(bs, 1, "compr2e")) { gf_bs_read_int_log(bs, 8, "compr2"); } } chanmap = 0; if (strmtyp==0x1) { if (gf_bs_read_int_log(bs, 1, "chanmape")) { chanmap = gf_bs_read_int_log(bs, 16, "chanmap"); } } channels = ac3_mod_to_chans[ac3_mod]; if (lfon) channels += 1; hdr->bitrate = 0; hdr->sample_rate = freq; hdr->framesize = framesize; if (strmtyp != 1) { hdr->channels = channels; hdr->streams[substreamid].lfon = lfon; if (full_parse) { hdr->streams[substreamid].bsid = bsid; hdr->streams[substreamid].bsmod = 0; hdr->streams[substreamid].acmod = ac3_mod; hdr->streams[substreamid].fscod = fscod; hdr->brcode = 0; } hdr->nb_streams++; //not clear if this is only for the independent streams hdr->brcode += ((frmsiz+1) * freq) / (numblks[numblkscod]*16) / 1000; if (lfon) hdr->channels += 1; } else { hdr->streams[substreamid].nb_dep_sub = substreamid; hdr->streams[substreamid].chan_loc |= chanmap; } if (numblkscod < 6) { //we need 6 blocks to make a sample if (gf_bs_seek(bs, pos + framesize) != GF_OK) { gf_bs_seek(bs, pos); return GF_FALSE; } if ((gf_bs_available(bs) < 6) || !AC3_FindSyncCodeBS(bs)) return GF_FALSE; goto block; } gf_bs_seek(bs, pos); return GF_TRUE; } #endif /*GPAC_DISABLE_AV_PARSERS*/ u32 gf_id3_read_size(GF_BitStream *bs) { u32 size = 0; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); return size; } #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined (GPAC_DISABLE_OGG) /* Vorbis parser */ static u32 vorbis_book_maptype1_quantvals(u32 entries, u32 dim) { u32 vals = (u32)floor(pow(entries, 1.0 / dim)); while (1) { u32 acc = 1; u32 acc1 = 1; u32 i; for (i = 0; i < dim; i++) { acc *= vals; acc1 *= vals + 1; } if (acc <= entries && acc1 > entries) return (vals); else { if (acc > entries) vals--; else vals++; } } } static u32 ilog(u32 v, Bool dec) { u32 ret = 0; if (dec && v) --v; while (v) { ret++; v >>= 1; } return (ret); } static u32 icount(u32 v) { u32 ret = 0; while (v) { ret += v & 1; v >>= 1; } return(ret); } GF_EXPORT Bool gf_vorbis_parse_header(GF_VorbisParser *vp, u8 *data, u32 data_len) { u32 pack_type, i, j, k, times, nb_part, nb_books, nb_modes; u32 l; char szNAME[8]; oggpack_buffer opb; oggpack_readinit(&opb, (u8*)data, data_len); pack_type = oggpack_read(&opb, 8); i = 0; while (i < 6) { szNAME[i] = oggpack_read(&opb, 8); i++; } szNAME[i] = 0; if (strcmp(szNAME, "vorbis")) { return GF_FALSE; } switch (pack_type) { case 0x01: vp->version = oggpack_read(&opb, 32); if (vp->version != 0) { return GF_FALSE; } vp->channels = oggpack_read(&opb, 8); vp->sample_rate = oggpack_read(&opb, 32); vp->max_r = oggpack_read(&opb, 32); vp->avg_r = oggpack_read(&opb, 32); vp->low_r = oggpack_read(&opb, 32); vp->min_block = 1<<oggpack_read(&opb, 4); vp->max_block = 1<<oggpack_read(&opb, 4); if (vp->sample_rate < 1 || vp->channels < 1 || vp->min_block < 8 || vp->max_block < vp->min_block || oggpack_read(&opb, 1) != 1) { return GF_FALSE; } vp->nb_init=1; return GF_TRUE; case 0x03: /*trash comments*/ vp->nb_init++; return GF_TRUE; case 0x05: /*need at least bitstream header to make sure we're parsing the right thing*/ if (!vp->nb_init) return GF_FALSE; break; default: return GF_FALSE; } /*OK parse codebook*/ nb_books = oggpack_read(&opb, 8) + 1; /*skip vorbis static books*/ for (i = 0; i < nb_books; i++) { u32 map_type, qb, qq; u32 entries, dim; oggpack_read(&opb, 24); dim = oggpack_read(&opb, 16); entries = oggpack_read(&opb, 24); if ((s32)entries < 0) entries = 0; if (oggpack_read(&opb, 1) == 0) { if (oggpack_read(&opb, 1)) { for (j = 0; j < entries; j++) { if (oggpack_read(&opb, 1)) { oggpack_read(&opb, 5); } } } else { for (j = 0; j < entries; j++) oggpack_read(&opb, 5); } } else { oggpack_read(&opb, 5); for (j = 0; j < entries;) { u32 num = oggpack_read(&opb, ilog(entries - j, GF_FALSE)); for (k = 0; k < num && j < entries; k++, j++) { } } } switch ((map_type = oggpack_read(&opb, 4))) { case 0: break; case 1: case 2: oggpack_read(&opb, 32); oggpack_read(&opb, 32); qq = oggpack_read(&opb, 4) + 1; oggpack_read(&opb, 1); if (map_type == 1) qb = vorbis_book_maptype1_quantvals(entries, dim); else if (map_type == 2) qb = entries * dim; else qb = 0; for (j = 0; j < qb; j++) oggpack_read(&opb, qq); break; } } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) oggpack_read(&opb, 16); times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 type = oggpack_read(&opb, 16); if (type) { u32 *parts, *class_dims, count, rangebits; u32 max_class = 0; nb_part = oggpack_read(&opb, 5); parts = (u32*)gf_malloc(sizeof(u32) * nb_part); for (j = 0; j < nb_part; j++) { parts[j] = oggpack_read(&opb, 4); if (max_class < parts[j]) max_class = parts[j]; } class_dims = (u32*)gf_malloc(sizeof(u32) * (max_class + 1)); for (j = 0; j < max_class + 1; j++) { u32 class_sub; class_dims[j] = oggpack_read(&opb, 3) + 1; class_sub = oggpack_read(&opb, 2); if (class_sub) oggpack_read(&opb, 8); for (k = 0; k < (u32)(1 << class_sub); k++) oggpack_read(&opb, 8); } oggpack_read(&opb, 2); rangebits = oggpack_read(&opb, 4); count = 0; for (j = 0, k = 0; j < nb_part; j++) { count += class_dims[parts[j]]; for (; k < count; k++) oggpack_read(&opb, rangebits); } gf_free(parts); gf_free(class_dims); } else { oggpack_read(&opb, 8 + 16 + 16 + 6 + 8); nb_books = oggpack_read(&opb, 4) + 1; for (j = 0; j < nb_books; j++) oggpack_read(&opb, 8); } } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 acc = 0; oggpack_read(&opb, 16);/*type*/ oggpack_read(&opb, 24); oggpack_read(&opb, 24); oggpack_read(&opb, 24); nb_part = oggpack_read(&opb, 6) + 1; oggpack_read(&opb, 8); for (j = 0; j < nb_part; j++) { u32 cascade = oggpack_read(&opb, 3); if (oggpack_read(&opb, 1)) cascade |= (oggpack_read(&opb, 5) << 3); acc += icount(cascade); } for (j = 0; j < acc; j++) oggpack_read(&opb, 8); } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 sub_maps = 1; oggpack_read(&opb, 16); if (oggpack_read(&opb, 1)) sub_maps = oggpack_read(&opb, 4) + 1; if (oggpack_read(&opb, 1)) { u32 nb_steps = oggpack_read(&opb, 8) + 1; for (j = 0; j < nb_steps; j++) { oggpack_read(&opb, ilog(vp->channels, GF_TRUE)); oggpack_read(&opb, ilog(vp->channels, GF_TRUE)); } } oggpack_read(&opb, 2); if (sub_maps>1) { for(l=0; l<vp->channels; l++) oggpack_read(&opb, 4); } for (j = 0; j < sub_maps; j++) { oggpack_read(&opb, 8); oggpack_read(&opb, 8); oggpack_read(&opb, 8); } } nb_modes = oggpack_read(&opb, 6) + 1; for (i = 0; i < nb_modes; i++) { vp->mode_flag[i] = oggpack_read(&opb, 1); oggpack_read(&opb, 16); oggpack_read(&opb, 16); oggpack_read(&opb, 8); } vp->modebits = 0; j = nb_modes; while (j > 1) { vp->modebits++; j >>= 1; } return GF_TRUE; } GF_EXPORT u32 gf_vorbis_check_frame(GF_VorbisParser *vp, u8 *data, u32 data_length) { s32 block_size; oggpack_buffer opb; if (!vp) return 0; oggpack_readinit(&opb, (unsigned char*)data, data_length); /*not audio*/ if (oggpack_read(&opb, 1) != 0) return 0; block_size = oggpack_read(&opb, vp->modebits); if (block_size == -1) return 0; return ((vp->mode_flag[block_size]) ? vp->max_block : vp->min_block) / (2); } /*call with vorbis header packets - initializes the parser on success, leave it to NULL otherwise returns 1 if success, 0 if error.*/ Bool gf_opus_parse_header(GF_OpusParser *opus, u8 *data, u32 data_len) { char tag[9]; GF_BitStream *bs = gf_bs_new(data, data_len, GF_BITSTREAM_READ); gf_bs_read_data(bs, tag, 8); tag[8]=0; if (memcmp(data, "OpusHead", sizeof(char)*8)) { gf_bs_del(bs); return GF_FALSE; } /*Identification Header*/ opus->version = gf_bs_read_u8(bs); /*version*/ if (opus->version != 1) { gf_bs_del(bs); GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Opus] Unsupported version %d\n", opus->version)); return GF_FALSE; } opus->OutputChannelCount = gf_bs_read_u8(bs); opus->PreSkip = gf_bs_read_u16_le(bs); opus->InputSampleRate = gf_bs_read_u32_le(bs); opus->OutputGain = gf_bs_read_u16_le(bs); opus->ChannelMappingFamily = gf_bs_read_u8(bs); if (opus->ChannelMappingFamily != 0) { opus->StreamCount = gf_bs_read_u8(bs); opus->CoupledCount = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *) opus->ChannelMapping, opus->OutputChannelCount); } gf_bs_del(bs); return GF_TRUE; } /*returns 0 if init error or not a vorbis frame, otherwise returns the number of audio samples in this frame*/ u32 gf_opus_check_frame(GF_OpusParser *op, u8 *data, u32 data_length) { u32 block_size; if (!memcmp(data, "OpusHead", sizeof(char)*8)) return 0; if (!memcmp(data, "OpusTags", sizeof(char)*8)) return 0; /*consider the whole packet as Ogg packets and ISOBMFF samples for Opus are framed similarly*/ static const int OpusFrameDurIn48k[] = { 480, 960, 1920, 2880, 480, 960, 1920, 2880, 480, 960, 1920, 2880, 480, 960, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, }; int TOC_config = (data[0] & 0xf8) >> 3; //int s = (data[0] & 0x04) >> 2; block_size = OpusFrameDurIn48k[TOC_config]; int c = data[0] & 0x03; if (c == 1 || c == 2) { block_size *= 2; } else if (c == 3) { /*unknown number of frames*/ int num_frames = data[1] & 0x3f; block_size *= num_frames; } return block_size; } #endif /*!defined(GPAC_DISABLE_AV_PARSERS) && !defined (GPAC_DISABLE_OGG)*/ u64 gf_mpegh_escaped_value(GF_BitStream *bs, u32 nBits1, u32 nBits2, u32 nBits3) { u64 value = gf_bs_read_int(bs, nBits1); if (value == (1<<nBits1)-1) { u32 vadd = gf_bs_read_int(bs, nBits2); value += vadd; if (vadd == (1<<nBits2)-1) { vadd = gf_bs_read_int(bs, nBits3); value += vadd; } } return value; } GF_EXPORT s32 gf_mpegh_get_mhas_pl(u8 *ptr, u32 size, u64 *ch_layout) { s32 PL = -1; GF_BitStream *bs; u32 i; s32 sync_pos=-1; for (i=0; i<size-3; i++) { if ((ptr[i]==0xC0) && (ptr[i+1]== 0x01) && (ptr[i+2]==0xA5)) { sync_pos = i; break; } } if (sync_pos<0) return 0; if (ch_layout) *ch_layout = 0; bs = gf_bs_new(ptr, size, GF_BITSTREAM_READ); gf_bs_skip_bytes(bs, sync_pos); while (gf_bs_available(bs)) { u32 type = (u32) gf_mpegh_escaped_value(bs, 3, 8, 8); /*u64 label = */gf_mpegh_escaped_value(bs, 2, 8, 32); u64 mh_size = gf_mpegh_escaped_value(bs, 11, 24, 24); if (mh_size > gf_bs_available(bs)) break; //MHAS config if (type==1) { PL = gf_bs_read_int(bs, 8); if (ch_layout) { u32 idx = gf_bs_read_int(bs, 5); if (idx==0x1f) gf_bs_read_int(bs, 24); /*idx = */gf_bs_read_int(bs, 3); gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 1); //speaker config idx = gf_bs_read_int(bs, 2); if (idx == 0) { *ch_layout = gf_audio_fmt_get_layout_from_cicp( gf_bs_read_int(bs, 6) ); } } break; } gf_bs_skip_bytes(bs, mh_size); } gf_bs_del(bs); return PL; } GF_EXPORT void gf_media_vvc_parse_sei(char *buffer, u32 nal_size, VVCState *vvc) { gf_hevc_vvc_parse_sei(buffer, nal_size, NULL, vvc); } static Bool vvc_parse_nal_header(GF_BitStream *bs, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { u32 val; val = gf_bs_read_int_log(bs, 1, "forbidden_zero"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 1, "resevred0"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 6, "layerID"); if (layer_id) *layer_id = val; val = gf_bs_read_int_log(bs, 5, "nuh_type"); if (nal_unit_type) *nal_unit_type = val; val = gf_bs_read_int_log(bs, 3, "temporalID"); if (!val) return GF_FALSE; val -= 1; if (temporal_id) *temporal_id = val; return GF_TRUE; } static void vvc_profile_tier_level(GF_BitStream *bs, VVC_ProfileTierLevel *ptl, u32 idx) { u32 i; if (ptl->pt_present) { ptl->general_profile_idc = gf_bs_read_int_log_idx(bs, 7, "general_profile_idc", idx); ptl->general_tier_flag = gf_bs_read_int_log_idx(bs, 1, "general_tier_flag", idx); } ptl->general_level_idc = gf_bs_read_int_log_idx(bs, 8, "general_level_idc", idx); ptl->frame_only_constraint = gf_bs_read_int_log_idx(bs, 1, "frame_only_constraint", idx); ptl->multilayer_enabled = gf_bs_read_int_log_idx(bs, 1, "multilayer_enabled", idx); //general constraints info - max size if 1 + 81 + 8 + 255 if (ptl->pt_present) { // general_constraints_info ptl->gci_present = gf_bs_read_int_log_idx(bs, 1, "gci_present", idx); if (ptl->gci_present) { u8 res; ptl->gci[0] = 0x80; ptl->gci[0] |= gf_bs_read_int(bs, 7); //81-7 = 74 bits till reserved gf_bs_read_data(bs, ptl->gci+1, 9); ptl->gci[10] = gf_bs_read_int(bs, 2)<<6; //skip extensions ptl->gci[11] = 0; res = gf_bs_read_int(bs, 8); gf_bs_read_int(bs, res); } gf_bs_align(bs); } for (i=ptl->ptl_max_tid; i>0; i--) { ptl->sub_ptl[i-1].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i); } gf_bs_align(bs); for (i=ptl->ptl_max_tid; i>0; i--) { if (ptl->sub_ptl[i-1].level_present_flag) ptl->sub_ptl[i-1].sublayer_level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i); } if (ptl->pt_present) { ptl->num_sub_profiles = gf_bs_read_int_log_idx(bs, 8, "num_sub_profiles", idx); for (i=0; i<ptl->num_sub_profiles; i++) { ptl->sub_profile_idc[i] = gf_bs_read_int_log_idx2(bs, 32, "sub_profile_idc", idx, i); } } } static s32 gf_media_vvc_read_vps_bs_internal(GF_BitStream *bs, VVCState *vvc, Bool stop_at_vps_ext) { u32 i, j; s32 vps_id; VVC_VPS *vps; Bool vps_default_ptl_dpb_hrd_max_tid_flag=0; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) return -1; if (!vps_id) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] VPS ID 0 is forbidden\n")); return -1; } vps = &vvc->vps[vps_id]; if (!vps->state) { vps->id = vps_id; vps->state = 1; } vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers"); if (vps->max_layers > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS)); return -1; } vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1; if ((vps->max_layers>1) && (vps->max_sub_layers>1)) vps_default_ptl_dpb_hrd_max_tid_flag = gf_bs_read_int_log(bs, 1, "vps_default_ptl_dpb_hrd_max_tid_flag"); if (vps->max_layers>1) vps->all_layers_independent = gf_bs_read_int_log(bs, 1, "all_layers_independent"); for (i=0; i<vps->max_layers; i++) { u32 layer_id = gf_bs_read_int_log_idx(bs, 6, "layer_id", i); if (layer_id>vps->max_layer_id) vps->max_layer_id = layer_id; if (i && !vps->all_layers_independent) { Bool layer_indep = gf_bs_read_int_log_idx(bs, 1, "layer_independent", i); if (!layer_indep) { Bool vps_max_tid_ref_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_max_tid_ref_present_flag", i); for (j=0; j<i; j++) { Bool vps_direct_ref_layer_flag = gf_bs_read_int_log_idx2(bs, 1, "vps_direct_ref_layer_flag", i, j); if (vps_max_tid_ref_present_flag && vps_direct_ref_layer_flag) { gf_bs_read_int_log_idx2(bs, 3, "vps_max_tid_il_ref_pics_plus1", i, j); } } } } } vps->num_ptl = 1; if (vps->max_layers > 1) { if (vps->all_layers_independent) { vps->each_layer_is_ols = gf_bs_read_int_log(bs, 1, "each_layer_is_ols"); } if (!vps->each_layer_is_ols) { u32 vps_ols_mode_idc = 2; if (!vps->all_layers_independent) { vps_ols_mode_idc = gf_bs_read_int_log(bs, 2, "vps_ols_mode_idc"); } if (vps_ols_mode_idc==2) { u8 vps_num_output_layer_sets = 2 + gf_bs_read_int_log(bs, 8, "vps_num_output_layer_sets_minus2"); for (i=0; i<vps_num_output_layer_sets; i++) { for (j=0; j<vps->max_layers; j++) { gf_bs_read_int_log_idx2(bs, 1, "vps_ols_output_layer_flag", i, j); } } } } vps->num_ptl = 1 + gf_bs_read_int_log(bs, 8, "num_ptl_minus1"); } vps->ptl[0].pt_present = 1; for (i=0; i<vps->num_ptl; i++) { if (i) vps->ptl[i].pt_present = gf_bs_read_int_log_idx(bs, 1, "pt_present", i); if (!vps_default_ptl_dpb_hrd_max_tid_flag) vps->ptl[i].ptl_max_tid = gf_bs_read_int_log_idx(bs, 3, "ptl_max_tid", i); else vps->ptl[i].ptl_max_tid = vps->max_sub_layers - 1;; } //align gf_bs_align(bs); for (i=0; i<vps->num_ptl; i++) { vvc_profile_tier_level(bs, &vps->ptl[i], i); } //TODO, parse multilayer stuff return vps_id; } static s32 gf_media_vvc_read_sps_bs_internal(GF_BitStream *bs, VVCState *vvc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id; u32 i, CtbSizeY; VVC_SPS *sps; u8 sps_ptl_dpb_hrd_params_present_flag; if (vui_flag_pos) *vui_flag_pos = 0; sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if ((sps_id<0) || (sps_id >= 16)) { return -1; } vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) { return -1; } if (!vps_id && !vvc->vps[0].state) { vvc->vps[0].state = 1; vvc->vps[0].num_ptl = 1; vvc->vps[0].max_layers = 1; vvc->vps[0].all_layers_independent = 1; } sps = &vvc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->max_sublayers = 1 + gf_bs_read_int_log(bs, 3, "max_sublayers_minus1"); sps->chroma_format_idc = gf_bs_read_int_log(bs, 2, "chroma_format_idc"); sps->log2_ctu_size = 5 + gf_bs_read_int_log(bs, 2, "log2_ctu_size_minus5"); CtbSizeY = 1<<sps->log2_ctu_size; sps_ptl_dpb_hrd_params_present_flag = gf_bs_read_int_log(bs, 1, "sps_ptl_dpb_hrd_params_present_flag"); if (sps_ptl_dpb_hrd_params_present_flag) { VVC_ProfileTierLevel ptl, *p_ptl; if (sps->vps_id) { p_ptl = &ptl; } else { p_ptl = &vvc->vps[0].ptl[0]; } memset(p_ptl, 0, sizeof(VVC_ProfileTierLevel)); p_ptl->pt_present = 1; p_ptl->ptl_max_tid = sps->max_sublayers-1; vvc_profile_tier_level(bs, p_ptl, 0); } sps->gdr_enabled = gf_bs_read_int_log(bs, 1, "gdr_enabled"); sps->ref_pic_resampling = gf_bs_read_int_log(bs, 1, "ref_pic_resampling"); if (sps->ref_pic_resampling) sps->res_change_in_clvs = gf_bs_read_int_log(bs, 1, "res_change_in_clvs"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); sps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_present_flag"); if (sps->conf_window) { sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); } sps->subpic_info_present = gf_bs_read_int_log(bs, 1, "subpic_info_present"); if (sps->subpic_info_present) { sps->nb_subpics = 1 + gf_bs_read_ue_log(bs, "nb_subpics_minus1"); if (sps->nb_subpics>1) { u32 tmpWidthVal, tmpHeightVal; sps->independent_subpic_flags = gf_bs_read_int_log(bs, 1, "independent_subpic_flags"); sps->subpic_same_size = gf_bs_read_int_log(bs, 1, "subpic_same_size"); tmpWidthVal = (sps->width + CtbSizeY-1) / CtbSizeY; tmpWidthVal = gf_get_bit_size(tmpWidthVal); tmpHeightVal = (sps->height + CtbSizeY-1) / CtbSizeY; tmpHeightVal = gf_get_bit_size(tmpHeightVal); for (i=0; i<sps->nb_subpics; i++) { if( !sps->subpic_same_size || !i) { if (i && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_ctu_top_left_x"); if (i && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_ctu_top_left_y"); if ((i+1 < sps->nb_subpics) && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_width_minus1"); if ((i+1 < sps->nb_subpics) && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_height_minus1"); } if (!sps->independent_subpic_flags) { gf_bs_read_int_log(bs, 1, "subpic_treated_as_pic_flag"); gf_bs_read_int_log(bs, 1, "loop_filter_across_subpic_enabled_flag"); } } sps->subpicid_len = gf_bs_read_ue_log(bs, "subpic_id_len_minus1") + 1; sps->subpicid_mapping_explicit = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_explicitly_signalled_flag"); if (sps->subpicid_mapping_explicit) { sps->subpicid_mapping_present = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (sps->subpicid_mapping_present) { for (i=0; i<sps->nb_subpics; i++) { gf_bs_read_ue_log(bs, "subpic_id"); } } } } } sps->bitdepth = gf_bs_read_ue_log(bs, "bitdepth_minus8") + 8; gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); gf_bs_read_int_log(bs, 1, "entry_point_offsets_present_flag"); sps->log2_max_poc_lsb = 4 + gf_bs_read_int_log(bs, 4, "log2_max_poc_lsb_minus4"); if ((sps->poc_msb_cycle_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_flag"))) sps->poc_msb_cycle_len = 1 + gf_bs_read_ue_log(bs, "poc_msb_cycle_len_minus1"); u8 sps_num_extra_ph_bits = 8 * gf_bs_read_int_log(bs, 2, "sps_num_extra_ph_bytes"); for (i=0; i<sps_num_extra_ph_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_ph_bit_present_flag", 1)) sps->ph_num_extra_bits++; } u8 sps_num_extra_sh_bits = 8 * gf_bs_read_int_log(bs, 2, "num_extra_sh_bytes"); for (i=0; i<sps_num_extra_sh_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_sh_bit_present_flag", i)) sps->sh_num_extra_bits++; } if (sps_ptl_dpb_hrd_params_present_flag) { u8 sps_sublayer_dpb_params_flag = 0; if (sps->max_sublayers>1) { sps_sublayer_dpb_params_flag = gf_bs_read_int_log(bs, 1, "sps_sublayer_dpb_params_flag"); } for (i=(sps_sublayer_dpb_params_flag ? 0 : sps->max_sublayers-1); i < sps->max_sublayers; i++ ) { gf_bs_read_ue_log_idx(bs, "dpb_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "dpb_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "dpb_max_latency_increase_plus1", i); } } gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_partition_constraints_override_enabled_flag"); gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); u8 sps_max_mtt_hierarchy_depth_intra_slice_luma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_luma"); if (sps_max_mtt_hierarchy_depth_intra_slice_luma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_luma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_luma"); } u8 sps_qtbtt_dual_tree_intra_flag = 0; if (sps->chroma_format_idc) { sps_qtbtt_dual_tree_intra_flag = gf_bs_read_int_log(bs, 1, "sps_qtbtt_dual_tree_intra_flag"); } if (sps_qtbtt_dual_tree_intra_flag) { gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_intra_slice_chroma"); u8 sps_max_mtt_hierarchy_depth_intra_slice_chroma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_chroma"); if( sps_max_mtt_hierarchy_depth_intra_slice_chroma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_chroma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_chroma"); } } gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_inter_slice"); u8 sps_max_mtt_hierarchy_depth_inter_slice = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_inter_slice"); if (sps_max_mtt_hierarchy_depth_inter_slice != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_inter_slice"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_inter_slice"); } //u8 sps_max_luma_transform_size_64_flag = 0; if (CtbSizeY > 32) { /*sps_max_luma_transform_size_64_flag = */gf_bs_read_int_log(bs, 1, "sps_max_luma_transform_size_64_flag"); } u8 sps_transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_transform_skip_enabled_flag"); if (sps_transform_skip_enabled_flag) { gf_bs_read_ue_log(bs, "sps_log2_transform_skip_max_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_bdpcm_enabled_flag"); } if (gf_bs_read_int_log(bs, 1, "sps_mts_enabled_flag")) { gf_bs_read_int_log(bs, 1, "sps_explicit_mts_intra_enabled_flag"); gf_bs_read_int_log(bs, 1, "sps_explicit_mts_inter_enabled_flag"); } gf_bs_read_int_log(bs, 1, "sps_lfnst_enabled_flag"); if (sps->chroma_format_idc) { u8 sps_joint_cbcr_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_joint_cbcr_enabled_flag"); u8 sps_same_qp_table_for_chroma_flag = gf_bs_read_int_log(bs, 1, "sps_same_qp_table_for_chroma_flag"); u32 numQpTables = sps_same_qp_table_for_chroma_flag ? 1 : (sps_joint_cbcr_enabled_flag ? 3 : 2); for (i=0; i<numQpTables; i++) { gf_bs_read_se_log_idx(bs, "sps_qp_table_start_minus26", i); u32 j, sps_num_points_in_qp_table = 1 + gf_bs_read_ue_log_idx(bs, "sps_num_points_in_qp_table_minus1", i); for (j=0; j<sps_num_points_in_qp_table; j++) { gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_in_val_minus1", i, j); gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_diff_val", i, j); } } } gf_bs_read_int_log(bs, 1, "sps_sao_enabled_flag"); sps->alf_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_alf_enabled_flag"); if (sps->alf_enabled_flag && sps->chroma_format_idc) { gf_bs_read_int_log(bs, 1, "sps_ccalf_enabled_flag"); } /*! TODO parse the rest !*/ return sps_id; } static s32 gf_media_vvc_read_pps_bs_internal(GF_BitStream *bs, VVCState *vvc) { u32 i; s32 pps_id; VVC_PPS *pps; //NAL header already read pps_id = gf_bs_read_int_log(bs, 6, "pps_id"); if ((pps_id < 0) || (pps_id >= 64)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] wrong PPS ID %d in PPS\n", pps_id)); return -1; } pps = &vvc->pps[pps_id]; if (!pps->state) { pps->id = pps_id; pps->state = 1; } pps->sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if (((s32)pps->sps_id<0) || (pps->sps_id >= 16)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] wrong SPS ID %d in PPS\n", pps->sps_id)); pps->sps_id=0; return -1; } vvc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->mixed_nal_types = gf_bs_read_int_log(bs, 1, "mixed_nal_types"); pps->width = gf_bs_read_ue_log(bs, "width"); pps->height = gf_bs_read_ue_log(bs, "height"); pps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_flag"); if (pps->conf_window) { pps->cw_left = gf_bs_read_ue_log(bs, "conf_win_left_offset"); pps->cw_right = gf_bs_read_ue_log(bs, "conf_win_right_offset"); pps->cw_top = gf_bs_read_ue_log(bs, "conf_win_top_offset"); pps->cw_bottom = gf_bs_read_ue_log(bs, "conf_win_bottom_offset"); } //scaling window if (gf_bs_read_int_log(bs, 1, "scaling_window_explicit_signaling_flag")) { gf_bs_read_se_log(bs, "scaling_win_left_offset"); gf_bs_read_se_log(bs, "scaling_win_right_offset"); gf_bs_read_se_log(bs, "scaling_win_top_offset"); gf_bs_read_se_log(bs, "scaling_win_bottom_offset"); } pps->output_flag_present_flag = gf_bs_read_int_log(bs, 1, "output_flag_present_flag"); pps->no_pic_partition_flag = gf_bs_read_int_log(bs, 1, "no_pic_partition_flag"); pps->subpic_id_mapping_present_flag = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (pps->subpic_id_mapping_present_flag) { u32 pps_subpic_id_len, pps_num_subpics=0; if (!pps->no_pic_partition_flag) { pps_num_subpics = 1+gf_bs_read_ue_log(bs, "pps_num_subpics_minus1"); } pps_subpic_id_len = 1 + gf_bs_read_ue(bs); for (i=0; i<pps_num_subpics; i++) { gf_bs_read_int_log_idx(bs, pps_subpic_id_len, "subpic_id", i); } } if (!pps->no_pic_partition_flag) { gf_bs_read_int_log(bs, 2, "pps_log2_ctu_size_minus5"); u32 num_exp_tile_columns = 1 + gf_bs_read_ue_log(bs, "num_exp_tile_columns_minus1"); u32 num_exp_tile_rows = 1 + gf_bs_read_ue_log(bs, "num_exp_tile_rows_minus1"); for (i=0; i<num_exp_tile_columns; i++) gf_bs_read_ue_log_idx(bs, "tile_column_width_minus1", i); for (i=0; i<num_exp_tile_rows; i++) gf_bs_read_ue_log_idx(bs, "tile_row_height_minus1", i); //todo parse the rest return pps_id; } //todo parse the rest return pps_id; } static s32 vvc_parse_picture_header(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { s32 pps_id; si->irap_or_gdr_pic = gf_bs_read_int_log(bs, 1, "irap_or_gdr_pic"); si->non_ref_pic = gf_bs_read_int_log(bs, 1, "non_ref_pic"); if (si->irap_or_gdr_pic) si->gdr_pic = gf_bs_read_int_log(bs, 1, "gdr_pic"); if ((si->inter_slice_allowed_flag = gf_bs_read_int_log(bs, 1, "inter_slice_allowed_flag"))) si->intra_slice_allowed_flag = gf_bs_read_int_log(bs, 1, "intra_slice_allowed_flag"); pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 64)) return -1; si->pps = &vvc->pps[pps_id]; si->sps = &vvc->sps[si->pps->sps_id]; si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); si->recovery_point_valid = 0; si->gdr_recovery_count = 0; if (si->gdr_pic) { si->recovery_point_valid = 1; si->gdr_recovery_count = gf_bs_read_ue_log(bs, "gdr_recovery_count"); } gf_bs_read_int_log(bs, si->sps->ph_num_extra_bits, "ph_extra_bits"); if (si->sps->poc_msb_cycle_flag) { if ( (si->poc_msb_cycle_present_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_present_flag"))) { si->poc_msb_cycle = gf_bs_read_int_log(bs, si->sps->poc_msb_cycle_len, "poc_msb_cycle"); } } return 0; } static s32 vvc_parse_slice(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { // u32 CurrSubpicIdx = 0; si->picture_header_in_slice_header_flag = gf_bs_read_int_log(bs, 1, "picture_header_in_slice_header_flag"); if (si->picture_header_in_slice_header_flag) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[VVC] Picture header in slice header incomplete support, cannot guess slice type\n")); si->slice_type = GF_VVC_SLICE_TYPE_UNKNOWN; return vvc_parse_picture_header(bs, vvc, si); } if (!si->sps) return -1; si->slice_type = GF_VVC_SLICE_TYPE_I; if (gf_bs_read_int_log(bs, 1, "sps_subpic_info_present_flag")) { gf_bs_read_int_log(bs, si->sps->subpicid_len, "subpic_id"); //todo update CurrSubpicIdx } if (si->pps->rect_slice_flag ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[VVC] tiling parsing not supported - patch welcome\n")); return 0; } gf_bs_read_int_log(bs, si->sps->sh_num_extra_bits, "num_extra_bits"); /* if( !pps_rect_slice_flag && NumTilesInPic − sh_slice_address > 1 ) sh_num_tiles_in_slice_minus1 */ if (si->inter_slice_allowed_flag ) si->slice_type = gf_bs_read_int_log(bs, 2, "slice_type"); return 0; } /*this needs further tests !*/ static void vvc_compute_poc(VVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*POC reset for IDR frames, NOT for CRA*/ if (si->irap_or_gdr_pic && !si->gdr_pic) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; } if (si->poc_msb_cycle_present_flag) { si->poc_msb = si->poc_msb_cycle; } else { if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; } si->poc = si->poc_msb + si->poc_lsb; } GF_EXPORT s32 gf_media_vvc_parse_nalu_bs(GF_BitStream *bs, VVCState *vvc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { Bool is_slice = GF_FALSE; s32 ret = -1; VVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); memcpy(&n_state, &vvc->s_info, sizeof(VVCSliceInfo)); if (!vvc_parse_nal_header(bs, nal_unit_type, temporal_id, layer_id)) return -1; n_state.nal_unit_type = *nal_unit_type; switch (n_state.nal_unit_type) { case GF_VVC_NALU_ACCESS_UNIT: case GF_VVC_NALU_END_OF_SEQ: case GF_VVC_NALU_END_OF_STREAM: ret = 1; break; case GF_VVC_NALU_SLICE_TRAIL: case GF_VVC_NALU_SLICE_STSA: case GF_VVC_NALU_SLICE_RADL: case GF_VVC_NALU_SLICE_RASL: case GF_VVC_NALU_SLICE_IDR_W_RADL: case GF_VVC_NALU_SLICE_IDR_N_LP: case GF_VVC_NALU_SLICE_CRA: case GF_VVC_NALU_SLICE_GDR: /* slice - read the info and compare.*/ ret = vvc_parse_slice(bs, vvc, &n_state); if (ret < 0) return ret; ret = 0; if (n_state.picture_header_in_slice_header_flag) { is_slice = GF_TRUE; vvc_compute_poc(&n_state); if (vvc->s_info.poc != n_state.poc) { ret = 1; break; } if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; break; } } break; case GF_VVC_NALU_PIC_HEADER: if (vvc_parse_picture_header(bs, vvc, &n_state)<0) { ret = -1; break; } is_slice = GF_TRUE; vvc_compute_poc(&n_state); if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; } break; case GF_VVC_NALU_SEQ_PARAM: vvc->last_parsed_sps_id = gf_media_vvc_read_sps_bs_internal(bs, vvc, *layer_id, NULL); ret = (vvc->last_parsed_sps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_PIC_PARAM: vvc->last_parsed_pps_id = gf_media_vvc_read_pps_bs_internal(bs, vvc); ret = (vvc->last_parsed_pps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_VID_PARAM: vvc->last_parsed_vps_id = gf_media_vvc_read_vps_bs_internal(bs, vvc, GF_FALSE); ret = (vvc->last_parsed_vps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_DEC_PARAM: ret = 0; break; case GF_VVC_NALU_APS_PREFIX: //we use the mix aps type + aps id (first 8 bits) as unique identifier vvc->last_parsed_aps_id = gf_bs_read_int_log(bs, 8, "aps_id"); ret = 0; break; default: ret = 0; break; } /* save _prev values */ if ((ret>0) && vvc->s_info.sps) { // n_state.frame_num_offset_prev = vvc->s_info.frame_num_offset; // n_state.frame_num_prev = vvc->s_info.frame_num; n_state.poc_lsb_prev = vvc->s_info.poc_lsb; n_state.poc_msb_prev = vvc->s_info.poc_msb; if (is_slice) n_state.prev_layer_id_plus1 = *layer_id + 1; } if (is_slice) vvc_compute_poc(&n_state); memcpy(&vvc->s_info, &n_state, sizeof(VVCSliceInfo)); return ret; } GF_EXPORT s32 gf_media_vvc_parse_nalu(u8 *data, u32 size, VVCState *vvc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { GF_BitStream *bs = NULL; s32 ret; if (!vvc) { if (nal_unit_type) (*nal_unit_type) = data[1] >> 3; if (layer_id) (*layer_id) = data[0] & 0x3f; if (temporal_id) (*temporal_id) = (data[1] & 0x7); return -1; } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); ret = gf_media_vvc_parse_nalu_bs(bs, vvc, nal_unit_type, temporal_id, layer_id); gf_bs_del(bs); return ret; } Bool gf_media_vvc_slice_is_ref(VVCState *vvc) { if (!vvc->s_info.irap_or_gdr_pic) { return GF_FALSE; } if (vvc->s_info.gdr_pic) { if (vvc->s_info.recovery_point_valid) { vvc->s_info.recovery_point_valid = 0; return GF_TRUE; } return GF_FALSE; } return GF_TRUE; }
null
275
CWE-787
CVE-2021-33657
/* Simple DirectMedia Layer Copyright (C) 1997-2021 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../SDL_internal.h" /* General (mostly internal) pixel/color manipulation routines for SDL */ #include "SDL_endian.h" #include "SDL_video.h" #include "SDL_sysvideo.h" #include "SDL_blit.h" #include "SDL_pixels_c.h" #include "SDL_RLEaccel_c.h" /* Lookup tables to expand partial bytes to the full 0..255 range */ static Uint8 lookup_0[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255 }; static Uint8 lookup_1[] = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 255 }; static Uint8 lookup_2[] = { 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 170, 174, 178, 182, 186, 190, 194, 198, 202, 206, 210, 214, 218, 222, 226, 230, 234, 238, 242, 246, 250, 255 }; static Uint8 lookup_3[] = { 0, 8, 16, 24, 32, 41, 49, 57, 65, 74, 82, 90, 98, 106, 115, 123, 131, 139, 148, 156, 164, 172, 180, 189, 197, 205, 213, 222, 230, 238, 246, 255 }; static Uint8 lookup_4[] = { 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255 }; static Uint8 lookup_5[] = { 0, 36, 72, 109, 145, 182, 218, 255 }; static Uint8 lookup_6[] = { 0, 85, 170, 255 }; static Uint8 lookup_7[] = { 0, 255 }; static Uint8 lookup_8[] = { 255 }; Uint8* SDL_expand_byte[9] = { lookup_0, lookup_1, lookup_2, lookup_3, lookup_4, lookup_5, lookup_6, lookup_7, lookup_8 }; /* Helper functions */ const char* SDL_GetPixelFormatName(Uint32 format) { switch (format) { #define CASE(X) case X: return #X; CASE(SDL_PIXELFORMAT_INDEX1LSB) CASE(SDL_PIXELFORMAT_INDEX1MSB) CASE(SDL_PIXELFORMAT_INDEX4LSB) CASE(SDL_PIXELFORMAT_INDEX4MSB) CASE(SDL_PIXELFORMAT_INDEX8) CASE(SDL_PIXELFORMAT_RGB332) CASE(SDL_PIXELFORMAT_RGB444) CASE(SDL_PIXELFORMAT_BGR444) CASE(SDL_PIXELFORMAT_RGB555) CASE(SDL_PIXELFORMAT_BGR555) CASE(SDL_PIXELFORMAT_ARGB4444) CASE(SDL_PIXELFORMAT_RGBA4444) CASE(SDL_PIXELFORMAT_ABGR4444) CASE(SDL_PIXELFORMAT_BGRA4444) CASE(SDL_PIXELFORMAT_ARGB1555) CASE(SDL_PIXELFORMAT_RGBA5551) CASE(SDL_PIXELFORMAT_ABGR1555) CASE(SDL_PIXELFORMAT_BGRA5551) CASE(SDL_PIXELFORMAT_RGB565) CASE(SDL_PIXELFORMAT_BGR565) CASE(SDL_PIXELFORMAT_RGB24) CASE(SDL_PIXELFORMAT_BGR24) CASE(SDL_PIXELFORMAT_RGB888) CASE(SDL_PIXELFORMAT_RGBX8888) CASE(SDL_PIXELFORMAT_BGR888) CASE(SDL_PIXELFORMAT_BGRX8888) CASE(SDL_PIXELFORMAT_ARGB8888) CASE(SDL_PIXELFORMAT_RGBA8888) CASE(SDL_PIXELFORMAT_ABGR8888) CASE(SDL_PIXELFORMAT_BGRA8888) CASE(SDL_PIXELFORMAT_ARGB2101010) CASE(SDL_PIXELFORMAT_YV12) CASE(SDL_PIXELFORMAT_IYUV) CASE(SDL_PIXELFORMAT_YUY2) CASE(SDL_PIXELFORMAT_UYVY) CASE(SDL_PIXELFORMAT_YVYU) CASE(SDL_PIXELFORMAT_NV12) CASE(SDL_PIXELFORMAT_NV21) #undef CASE default: return "SDL_PIXELFORMAT_UNKNOWN"; } } SDL_bool SDL_PixelFormatEnumToMasks(Uint32 format, int *bpp, Uint32 * Rmask, Uint32 * Gmask, Uint32 * Bmask, Uint32 * Amask) { Uint32 masks[4]; /* This function doesn't work with FourCC pixel formats */ if (SDL_ISPIXELFORMAT_FOURCC(format)) { SDL_SetError("FOURCC pixel formats are not supported"); return SDL_FALSE; } /* Initialize the values here */ if (SDL_BYTESPERPIXEL(format) <= 2) { *bpp = SDL_BITSPERPIXEL(format); } else { *bpp = SDL_BYTESPERPIXEL(format) * 8; } *Rmask = *Gmask = *Bmask = *Amask = 0; if (format == SDL_PIXELFORMAT_RGB24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #else *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #endif return SDL_TRUE; } if (format == SDL_PIXELFORMAT_BGR24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #else *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #endif return SDL_TRUE; } if (SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED8 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED16 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED32) { /* Not a format that uses masks */ return SDL_TRUE; } switch (SDL_PIXELLAYOUT(format)) { case SDL_PACKEDLAYOUT_332: masks[0] = 0x00000000; masks[1] = 0x000000E0; masks[2] = 0x0000001C; masks[3] = 0x00000003; break; case SDL_PACKEDLAYOUT_4444: masks[0] = 0x0000F000; masks[1] = 0x00000F00; masks[2] = 0x000000F0; masks[3] = 0x0000000F; break; case SDL_PACKEDLAYOUT_1555: masks[0] = 0x00008000; masks[1] = 0x00007C00; masks[2] = 0x000003E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_5551: masks[0] = 0x0000F800; masks[1] = 0x000007C0; masks[2] = 0x0000003E; masks[3] = 0x00000001; break; case SDL_PACKEDLAYOUT_565: masks[0] = 0x00000000; masks[1] = 0x0000F800; masks[2] = 0x000007E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_8888: masks[0] = 0xFF000000; masks[1] = 0x00FF0000; masks[2] = 0x0000FF00; masks[3] = 0x000000FF; break; case SDL_PACKEDLAYOUT_2101010: masks[0] = 0xC0000000; masks[1] = 0x3FF00000; masks[2] = 0x000FFC00; masks[3] = 0x000003FF; break; case SDL_PACKEDLAYOUT_1010102: masks[0] = 0xFFC00000; masks[1] = 0x003FF000; masks[2] = 0x00000FFC; masks[3] = 0x00000003; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } switch (SDL_PIXELORDER(format)) { case SDL_PACKEDORDER_XRGB: *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBX: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; break; case SDL_PACKEDORDER_ARGB: *Amask = masks[0]; *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBA: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_XBGR: *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; case SDL_PACKEDORDER_BGRX: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; break; case SDL_PACKEDORDER_BGRA: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_ABGR: *Amask = masks[0]; *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } return SDL_TRUE; } Uint32 SDL_MasksToPixelFormatEnum(int bpp, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask, Uint32 Amask) { switch (bpp) { case 1: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX1MSB; case 4: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX4MSB; case 8: if (Rmask == 0) { return SDL_PIXELFORMAT_INDEX8; } if (Rmask == 0xE0 && Gmask == 0x1C && Bmask == 0x03 && Amask == 0x00) { return SDL_PIXELFORMAT_RGB332; } break; case 12: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR444; } break; case 15: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB555; } SDL_FALLTHROUGH; case 16: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB555; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR555; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0xF000) { return SDL_PIXELFORMAT_ARGB4444; } if (Rmask == 0xF000 && Gmask == 0x0F00 && Bmask == 0x00F0 && Amask == 0x000F) { return SDL_PIXELFORMAT_RGBA4444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0xF000) { return SDL_PIXELFORMAT_ABGR4444; } if (Rmask == 0x00F0 && Gmask == 0x0F00 && Bmask == 0xF000 && Amask == 0x000F) { return SDL_PIXELFORMAT_BGRA4444; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x8000) { return SDL_PIXELFORMAT_ARGB1555; } if (Rmask == 0xF800 && Gmask == 0x07C0 && Bmask == 0x003E && Amask == 0x0001) { return SDL_PIXELFORMAT_RGBA5551; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x8000) { return SDL_PIXELFORMAT_ABGR1555; } if (Rmask == 0x003E && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0001) { return SDL_PIXELFORMAT_BGRA5551; } if (Rmask == 0xF800 && Gmask == 0x07E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x001F && Gmask == 0x07E0 && Bmask == 0xF800 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR565; } if (Rmask == 0x003F && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0000) { /* Technically this would be BGR556, but Witek says this works in bug 3158 */ return SDL_PIXELFORMAT_RGB565; } break; case 24: switch (Rmask) { case 0: case 0x00FF0000: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_RGB24; #else return SDL_PIXELFORMAT_BGR24; #endif case 0x000000FF: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_BGR24; #else return SDL_PIXELFORMAT_RGB24; #endif } case 32: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGBX8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGR888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGRX8888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ARGB8888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_RGBA8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ABGR8888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_BGRA8888; } if (Rmask == 0x3FF00000 && Gmask == 0x000FFC00 && Bmask == 0x000003FF && Amask == 0xC0000000) { return SDL_PIXELFORMAT_ARGB2101010; } } return SDL_PIXELFORMAT_UNKNOWN; } static SDL_PixelFormat *formats; static SDL_SpinLock formats_lock = 0; SDL_PixelFormat * SDL_AllocFormat(Uint32 pixel_format) { SDL_PixelFormat *format; SDL_AtomicLock(&formats_lock); /* Look it up in our list of previously allocated formats */ for (format = formats; format; format = format->next) { if (pixel_format == format->format) { ++format->refcount; SDL_AtomicUnlock(&formats_lock); return format; } } /* Allocate an empty pixel format structure, and initialize it */ format = SDL_malloc(sizeof(*format)); if (format == NULL) { SDL_AtomicUnlock(&formats_lock); SDL_OutOfMemory(); return NULL; } if (SDL_InitFormat(format, pixel_format) < 0) { SDL_AtomicUnlock(&formats_lock); SDL_free(format); SDL_InvalidParamError("format"); return NULL; } if (!SDL_ISPIXELFORMAT_INDEXED(pixel_format)) { /* Cache the RGB formats */ format->next = formats; formats = format; } SDL_AtomicUnlock(&formats_lock); return format; } int SDL_InitFormat(SDL_PixelFormat * format, Uint32 pixel_format) { int bpp; Uint32 Rmask, Gmask, Bmask, Amask; Uint32 mask; if (!SDL_PixelFormatEnumToMasks(pixel_format, &bpp, &Rmask, &Gmask, &Bmask, &Amask)) { return -1; } /* Set up the format */ SDL_zerop(format); format->format = pixel_format; format->BitsPerPixel = bpp; format->BytesPerPixel = (bpp + 7) / 8; format->Rmask = Rmask; format->Rshift = 0; format->Rloss = 8; if (Rmask) { for (mask = Rmask; !(mask & 0x01); mask >>= 1) ++format->Rshift; for (; (mask & 0x01); mask >>= 1) --format->Rloss; } format->Gmask = Gmask; format->Gshift = 0; format->Gloss = 8; if (Gmask) { for (mask = Gmask; !(mask & 0x01); mask >>= 1) ++format->Gshift; for (; (mask & 0x01); mask >>= 1) --format->Gloss; } format->Bmask = Bmask; format->Bshift = 0; format->Bloss = 8; if (Bmask) { for (mask = Bmask; !(mask & 0x01); mask >>= 1) ++format->Bshift; for (; (mask & 0x01); mask >>= 1) --format->Bloss; } format->Amask = Amask; format->Ashift = 0; format->Aloss = 8; if (Amask) { for (mask = Amask; !(mask & 0x01); mask >>= 1) ++format->Ashift; for (; (mask & 0x01); mask >>= 1) --format->Aloss; } format->palette = NULL; format->refcount = 1; format->next = NULL; return 0; } void SDL_FreeFormat(SDL_PixelFormat *format) { SDL_PixelFormat *prev; if (!format) { SDL_InvalidParamError("format"); return; } SDL_AtomicLock(&formats_lock); if (--format->refcount > 0) { SDL_AtomicUnlock(&formats_lock); return; } /* Remove this format from our list */ if (format == formats) { formats = format->next; } else if (formats) { for (prev = formats; prev->next; prev = prev->next) { if (prev->next == format) { prev->next = format->next; break; } } } SDL_AtomicUnlock(&formats_lock); if (format->palette) { SDL_FreePalette(format->palette); } SDL_free(format); } SDL_Palette * SDL_AllocPalette(int ncolors) { SDL_Palette *palette; /* Input validation */ if (ncolors < 1) { SDL_InvalidParamError("ncolors"); return NULL; } palette = (SDL_Palette *) SDL_malloc(sizeof(*palette)); if (!palette) { SDL_OutOfMemory(); return NULL; } palette->colors = (SDL_Color *) SDL_malloc(ncolors * sizeof(*palette->colors)); if (!palette->colors) { SDL_free(palette); return NULL; } palette->ncolors = ncolors; palette->version = 1; palette->refcount = 1; SDL_memset(palette->colors, 0xFF, ncolors * sizeof(*palette->colors)); return palette; } int SDL_SetPixelFormatPalette(SDL_PixelFormat * format, SDL_Palette *palette) { if (!format) { return SDL_SetError("SDL_SetPixelFormatPalette() passed NULL format"); } if (palette && palette->ncolors > (1 << format->BitsPerPixel)) { return SDL_SetError("SDL_SetPixelFormatPalette() passed a palette that doesn't match the format"); } if (format->palette == palette) { return 0; } if (format->palette) { SDL_FreePalette(format->palette); } format->palette = palette; if (format->palette) { ++format->palette->refcount; } return 0; } int SDL_SetPaletteColors(SDL_Palette * palette, const SDL_Color * colors, int firstcolor, int ncolors) { int status = 0; /* Verify the parameters */ if (!palette) { return -1; } if (ncolors > (palette->ncolors - firstcolor)) { ncolors = (palette->ncolors - firstcolor); status = -1; } if (colors != (palette->colors + firstcolor)) { SDL_memcpy(palette->colors + firstcolor, colors, ncolors * sizeof(*colors)); } ++palette->version; if (!palette->version) { palette->version = 1; } return status; } void SDL_FreePalette(SDL_Palette * palette) { if (!palette) { SDL_InvalidParamError("palette"); return; } if (--palette->refcount > 0) { return; } SDL_free(palette->colors); SDL_free(palette); } /* * Calculate an 8-bit (3 red, 3 green, 2 blue) dithered palette of colors */ void SDL_DitherColors(SDL_Color * colors, int bpp) { int i; if (bpp != 8) return; /* only 8bpp supported right now */ for (i = 0; i < 256; i++) { int r, g, b; /* map each bit field to the full [0, 255] interval, so 0 is mapped to (0, 0, 0) and 255 to (255, 255, 255) */ r = i & 0xe0; r |= r >> 3 | r >> 6; colors[i].r = r; g = (i << 3) & 0xe0; g |= g >> 3 | g >> 6; colors[i].g = g; b = i & 0x3; b |= b << 2; b |= b << 4; colors[i].b = b; colors[i].a = SDL_ALPHA_OPAQUE; } } /* * Match an RGB value to a particular palette index */ Uint8 SDL_FindColor(SDL_Palette * pal, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { /* Do colorspace distance matching */ unsigned int smallest; unsigned int distance; int rd, gd, bd, ad; int i; Uint8 pixel = 0; smallest = ~0; for (i = 0; i < pal->ncolors; ++i) { rd = pal->colors[i].r - r; gd = pal->colors[i].g - g; bd = pal->colors[i].b - b; ad = pal->colors[i].a - a; distance = (rd * rd) + (gd * gd) + (bd * bd) + (ad * ad); if (distance < smallest) { pixel = i; if (distance == 0) { /* Perfect match! */ break; } smallest = distance; } } return (pixel); } /* Tell whether palette is opaque, and if it has an alpha_channel */ void SDL_DetectPalette(SDL_Palette *pal, SDL_bool *is_opaque, SDL_bool *has_alpha_channel) { int i; { SDL_bool all_opaque = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_OPAQUE) { all_opaque = SDL_FALSE; break; } } if (all_opaque) { /* Palette is opaque, with an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_TRUE; return; } } { SDL_bool all_transparent = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_TRANSPARENT) { all_transparent = SDL_FALSE; break; } } if (all_transparent) { /* Palette is opaque, without an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_FALSE; return; } } /* Palette has alpha values */ *is_opaque = SDL_FALSE; *has_alpha_channel = SDL_TRUE; } /* Find the opaque pixel value corresponding to an RGB triple */ Uint32 SDL_MapRGB(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | format->Amask; } else { return SDL_FindColor(format->palette, r, g, b, SDL_ALPHA_OPAQUE); } } /* Find the pixel value corresponding to an RGBA quadruple */ Uint32 SDL_MapRGBA(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | ((Uint32)(a >> format->Aloss) << format->Ashift & format->Amask); } else { return SDL_FindColor(format->palette, r, g, b, a); } } void SDL_GetRGB(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; } else { *r = *g = *b = 0; } } } void SDL_GetRGBA(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b, Uint8 * a) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; v = (pixel & format->Amask) >> format->Ashift; *a = SDL_expand_byte[format->Aloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; *a = format->palette->colors[pixel].a; } else { *r = *g = *b = *a = 0; } } } /* Map from Palette to Palette */ static Uint8 * Map1to1(SDL_Palette * src, SDL_Palette * dst, int *identical) { Uint8 *map; int i; if (identical) { if (src->ncolors <= dst->ncolors) { /* If an identical palette, no need to map */ if (src == dst || (SDL_memcmp (src->colors, dst->colors, src->ncolors * sizeof(SDL_Color)) == 0)) { *identical = 1; return (NULL); } } *identical = 0; } map = (Uint8 *) SDL_malloc(src->ncolors); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } for (i = 0; i < src->ncolors; ++i) { map[i] = SDL_FindColor(dst, src->colors[i].r, src->colors[i].g, src->colors[i].b, src->colors[i].a); } return (map); } /* Map from Palette to BitField */ static Uint8 * Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod, SDL_PixelFormat * dst) { Uint8 *map; int i; int bpp; SDL_Palette *pal = src->palette; bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel); map = (Uint8 *) SDL_malloc(pal->ncolors * bpp); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } /* We memory copy to the pixel map so the endianness is preserved */ for (i = 0; i < pal->ncolors; ++i) { Uint8 R = (Uint8) ((pal->colors[i].r * Rmod) / 255); Uint8 G = (Uint8) ((pal->colors[i].g * Gmod) / 255); Uint8 B = (Uint8) ((pal->colors[i].b * Bmod) / 255); Uint8 A = (Uint8) ((pal->colors[i].a * Amod) / 255); ASSEMBLE_RGBA(&map[i * bpp], dst->BytesPerPixel, dst, (Uint32)R, (Uint32)G, (Uint32)B, (Uint32)A); } return (map); } /* Map from BitField to Dithered-Palette to Palette */ static Uint8 * MapNto1(SDL_PixelFormat * src, SDL_PixelFormat * dst, int *identical) { /* Generate a 256 color dither palette */ SDL_Palette dithered; SDL_Color colors[256]; SDL_Palette *pal = dst->palette; dithered.ncolors = 256; SDL_DitherColors(colors, 8); dithered.colors = colors; return (Map1to1(&dithered, pal, identical)); } SDL_BlitMap * SDL_AllocBlitMap(void) { SDL_BlitMap *map; /* Allocate the empty map */ map = (SDL_BlitMap *) SDL_calloc(1, sizeof(*map)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } map->info.r = 0xFF; map->info.g = 0xFF; map->info.b = 0xFF; map->info.a = 0xFF; /* It's ready to go */ return (map); } typedef struct SDL_ListNode { void *entry; struct SDL_ListNode *next; } SDL_ListNode; void SDL_InvalidateAllBlitMap(SDL_Surface *surface) { SDL_ListNode *l = surface->list_blitmap; surface->list_blitmap = NULL; while (l) { SDL_ListNode *tmp = l; SDL_InvalidateMap((SDL_BlitMap *)l->entry); l = l->next; SDL_free(tmp); } } static void SDL_ListAdd(SDL_ListNode **head, void *ent); static void SDL_ListRemove(SDL_ListNode **head, void *ent); void SDL_ListAdd(SDL_ListNode **head, void *ent) { SDL_ListNode *node = SDL_malloc(sizeof (*node)); if (node == NULL) { SDL_OutOfMemory(); return; } node->entry = ent; node->next = *head; *head = node; } void SDL_ListRemove(SDL_ListNode **head, void *ent) { SDL_ListNode **ptr = head; while (*ptr) { if ((*ptr)->entry == ent) { SDL_ListNode *tmp = *ptr; *ptr = (*ptr)->next; SDL_free(tmp); return; } ptr = &(*ptr)->next; } } void SDL_InvalidateMap(SDL_BlitMap * map) { if (!map) { return; } if (map->dst) { /* Un-register from the destination surface */ SDL_ListRemove((SDL_ListNode **)&(map->dst->list_blitmap), map); } map->dst = NULL; map->src_palette_version = 0; map->dst_palette_version = 0; SDL_free(map->info.table); map->info.table = NULL; } int SDL_MapSurface(SDL_Surface * src, SDL_Surface * dst) { SDL_PixelFormat *srcfmt; SDL_PixelFormat *dstfmt; SDL_BlitMap *map; /* Clear out any previous mapping */ map = src->map; #if SDL_HAVE_RLE if ((src->flags & SDL_RLEACCEL) == SDL_RLEACCEL) { SDL_UnRLESurface(src, 1); } #endif SDL_InvalidateMap(map); /* Figure out what kind of mapping we're doing */ map->identity = 0; srcfmt = src->format; dstfmt = dst->format; if (SDL_ISPIXELFORMAT_INDEXED(srcfmt->format)) { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* Palette --> Palette */ map->info.table = Map1to1(srcfmt->palette, dstfmt->palette, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } if (srcfmt->BitsPerPixel != dstfmt->BitsPerPixel) map->identity = 0; } else { /* Palette --> BitField */ map->info.table = Map1toN(srcfmt, src->map->info.r, src->map->info.g, src->map->info.b, src->map->info.a, dstfmt); if (map->info.table == NULL) { return (-1); } } } else { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* BitField --> Palette */ map->info.table = MapNto1(srcfmt, dstfmt, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } map->identity = 0; /* Don't optimize to copy */ } else { /* BitField --> BitField */ if (srcfmt == dstfmt) { map->identity = 1; } } } map->dst = dst; if (map->dst) { /* Register BlitMap to the destination surface, to be invalidated when needed */ SDL_ListAdd((SDL_ListNode **)&(map->dst->list_blitmap), map); } if (dstfmt->palette) { map->dst_palette_version = dstfmt->palette->version; } else { map->dst_palette_version = 0; } if (srcfmt->palette) { map->src_palette_version = srcfmt->palette->version; } else { map->src_palette_version = 0; } /* Choose your blitters wisely */ return (SDL_CalculateBlit(src)); } void SDL_FreeBlitMap(SDL_BlitMap * map) { if (map) { SDL_InvalidateMap(map); SDL_free(map); } } void SDL_CalculateGammaRamp(float gamma, Uint16 * ramp) { int i; /* Input validation */ if (gamma < 0.0f ) { SDL_InvalidParamError("gamma"); return; } if (ramp == NULL) { SDL_InvalidParamError("ramp"); return; } /* 0.0 gamma is all black */ if (gamma == 0.0f) { SDL_memset(ramp, 0, 256 * sizeof(Uint16)); return; } else if (gamma == 1.0f) { /* 1.0 gamma is identity */ for (i = 0; i < 256; ++i) { ramp[i] = (i << 8) | i; } return; } else { /* Calculate a real gamma ramp */ int value; gamma = 1.0f / gamma; for (i = 0; i < 256; ++i) { value = (int) (SDL_pow((double) i / 256.0, gamma) * 65535.0 + 0.5); if (value > 65535) { value = 65535; } ramp[i] = (Uint16) value; } } } /* vi: set ts=4 sw=4 expandtab: */
null
/* Simple DirectMedia Layer Copyright (C) 1997-2021 Sam Lantinga <slouken@libsdl.org> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #include "../SDL_internal.h" /* General (mostly internal) pixel/color manipulation routines for SDL */ #include "SDL_endian.h" #include "SDL_video.h" #include "SDL_sysvideo.h" #include "SDL_blit.h" #include "SDL_pixels_c.h" #include "SDL_RLEaccel_c.h" /* Lookup tables to expand partial bytes to the full 0..255 range */ static Uint8 lookup_0[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255 }; static Uint8 lookup_1[] = { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 255 }; static Uint8 lookup_2[] = { 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129, 133, 137, 141, 145, 149, 153, 157, 161, 165, 170, 174, 178, 182, 186, 190, 194, 198, 202, 206, 210, 214, 218, 222, 226, 230, 234, 238, 242, 246, 250, 255 }; static Uint8 lookup_3[] = { 0, 8, 16, 24, 32, 41, 49, 57, 65, 74, 82, 90, 98, 106, 115, 123, 131, 139, 148, 156, 164, 172, 180, 189, 197, 205, 213, 222, 230, 238, 246, 255 }; static Uint8 lookup_4[] = { 0, 17, 34, 51, 68, 85, 102, 119, 136, 153, 170, 187, 204, 221, 238, 255 }; static Uint8 lookup_5[] = { 0, 36, 72, 109, 145, 182, 218, 255 }; static Uint8 lookup_6[] = { 0, 85, 170, 255 }; static Uint8 lookup_7[] = { 0, 255 }; static Uint8 lookup_8[] = { 255 }; Uint8* SDL_expand_byte[9] = { lookup_0, lookup_1, lookup_2, lookup_3, lookup_4, lookup_5, lookup_6, lookup_7, lookup_8 }; /* Helper functions */ const char* SDL_GetPixelFormatName(Uint32 format) { switch (format) { #define CASE(X) case X: return #X; CASE(SDL_PIXELFORMAT_INDEX1LSB) CASE(SDL_PIXELFORMAT_INDEX1MSB) CASE(SDL_PIXELFORMAT_INDEX4LSB) CASE(SDL_PIXELFORMAT_INDEX4MSB) CASE(SDL_PIXELFORMAT_INDEX8) CASE(SDL_PIXELFORMAT_RGB332) CASE(SDL_PIXELFORMAT_RGB444) CASE(SDL_PIXELFORMAT_BGR444) CASE(SDL_PIXELFORMAT_RGB555) CASE(SDL_PIXELFORMAT_BGR555) CASE(SDL_PIXELFORMAT_ARGB4444) CASE(SDL_PIXELFORMAT_RGBA4444) CASE(SDL_PIXELFORMAT_ABGR4444) CASE(SDL_PIXELFORMAT_BGRA4444) CASE(SDL_PIXELFORMAT_ARGB1555) CASE(SDL_PIXELFORMAT_RGBA5551) CASE(SDL_PIXELFORMAT_ABGR1555) CASE(SDL_PIXELFORMAT_BGRA5551) CASE(SDL_PIXELFORMAT_RGB565) CASE(SDL_PIXELFORMAT_BGR565) CASE(SDL_PIXELFORMAT_RGB24) CASE(SDL_PIXELFORMAT_BGR24) CASE(SDL_PIXELFORMAT_RGB888) CASE(SDL_PIXELFORMAT_RGBX8888) CASE(SDL_PIXELFORMAT_BGR888) CASE(SDL_PIXELFORMAT_BGRX8888) CASE(SDL_PIXELFORMAT_ARGB8888) CASE(SDL_PIXELFORMAT_RGBA8888) CASE(SDL_PIXELFORMAT_ABGR8888) CASE(SDL_PIXELFORMAT_BGRA8888) CASE(SDL_PIXELFORMAT_ARGB2101010) CASE(SDL_PIXELFORMAT_YV12) CASE(SDL_PIXELFORMAT_IYUV) CASE(SDL_PIXELFORMAT_YUY2) CASE(SDL_PIXELFORMAT_UYVY) CASE(SDL_PIXELFORMAT_YVYU) CASE(SDL_PIXELFORMAT_NV12) CASE(SDL_PIXELFORMAT_NV21) #undef CASE default: return "SDL_PIXELFORMAT_UNKNOWN"; } } SDL_bool SDL_PixelFormatEnumToMasks(Uint32 format, int *bpp, Uint32 * Rmask, Uint32 * Gmask, Uint32 * Bmask, Uint32 * Amask) { Uint32 masks[4]; /* This function doesn't work with FourCC pixel formats */ if (SDL_ISPIXELFORMAT_FOURCC(format)) { SDL_SetError("FOURCC pixel formats are not supported"); return SDL_FALSE; } /* Initialize the values here */ if (SDL_BYTESPERPIXEL(format) <= 2) { *bpp = SDL_BITSPERPIXEL(format); } else { *bpp = SDL_BYTESPERPIXEL(format) * 8; } *Rmask = *Gmask = *Bmask = *Amask = 0; if (format == SDL_PIXELFORMAT_RGB24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #else *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #endif return SDL_TRUE; } if (format == SDL_PIXELFORMAT_BGR24) { #if SDL_BYTEORDER == SDL_BIG_ENDIAN *Rmask = 0x000000FF; *Gmask = 0x0000FF00; *Bmask = 0x00FF0000; #else *Rmask = 0x00FF0000; *Gmask = 0x0000FF00; *Bmask = 0x000000FF; #endif return SDL_TRUE; } if (SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED8 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED16 && SDL_PIXELTYPE(format) != SDL_PIXELTYPE_PACKED32) { /* Not a format that uses masks */ return SDL_TRUE; } switch (SDL_PIXELLAYOUT(format)) { case SDL_PACKEDLAYOUT_332: masks[0] = 0x00000000; masks[1] = 0x000000E0; masks[2] = 0x0000001C; masks[3] = 0x00000003; break; case SDL_PACKEDLAYOUT_4444: masks[0] = 0x0000F000; masks[1] = 0x00000F00; masks[2] = 0x000000F0; masks[3] = 0x0000000F; break; case SDL_PACKEDLAYOUT_1555: masks[0] = 0x00008000; masks[1] = 0x00007C00; masks[2] = 0x000003E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_5551: masks[0] = 0x0000F800; masks[1] = 0x000007C0; masks[2] = 0x0000003E; masks[3] = 0x00000001; break; case SDL_PACKEDLAYOUT_565: masks[0] = 0x00000000; masks[1] = 0x0000F800; masks[2] = 0x000007E0; masks[3] = 0x0000001F; break; case SDL_PACKEDLAYOUT_8888: masks[0] = 0xFF000000; masks[1] = 0x00FF0000; masks[2] = 0x0000FF00; masks[3] = 0x000000FF; break; case SDL_PACKEDLAYOUT_2101010: masks[0] = 0xC0000000; masks[1] = 0x3FF00000; masks[2] = 0x000FFC00; masks[3] = 0x000003FF; break; case SDL_PACKEDLAYOUT_1010102: masks[0] = 0xFFC00000; masks[1] = 0x003FF000; masks[2] = 0x00000FFC; masks[3] = 0x00000003; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } switch (SDL_PIXELORDER(format)) { case SDL_PACKEDORDER_XRGB: *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBX: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; break; case SDL_PACKEDORDER_ARGB: *Amask = masks[0]; *Rmask = masks[1]; *Gmask = masks[2]; *Bmask = masks[3]; break; case SDL_PACKEDORDER_RGBA: *Rmask = masks[0]; *Gmask = masks[1]; *Bmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_XBGR: *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; case SDL_PACKEDORDER_BGRX: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; break; case SDL_PACKEDORDER_BGRA: *Bmask = masks[0]; *Gmask = masks[1]; *Rmask = masks[2]; *Amask = masks[3]; break; case SDL_PACKEDORDER_ABGR: *Amask = masks[0]; *Bmask = masks[1]; *Gmask = masks[2]; *Rmask = masks[3]; break; default: SDL_SetError("Unknown pixel format"); return SDL_FALSE; } return SDL_TRUE; } Uint32 SDL_MasksToPixelFormatEnum(int bpp, Uint32 Rmask, Uint32 Gmask, Uint32 Bmask, Uint32 Amask) { switch (bpp) { case 1: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX1MSB; case 4: /* SDL defaults to MSB ordering */ return SDL_PIXELFORMAT_INDEX4MSB; case 8: if (Rmask == 0) { return SDL_PIXELFORMAT_INDEX8; } if (Rmask == 0xE0 && Gmask == 0x1C && Bmask == 0x03 && Amask == 0x00) { return SDL_PIXELFORMAT_RGB332; } break; case 12: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR444; } break; case 15: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB555; } SDL_FALLTHROUGH; case 16: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB555; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR555; } if (Rmask == 0x0F00 && Gmask == 0x00F0 && Bmask == 0x000F && Amask == 0xF000) { return SDL_PIXELFORMAT_ARGB4444; } if (Rmask == 0xF000 && Gmask == 0x0F00 && Bmask == 0x00F0 && Amask == 0x000F) { return SDL_PIXELFORMAT_RGBA4444; } if (Rmask == 0x000F && Gmask == 0x00F0 && Bmask == 0x0F00 && Amask == 0xF000) { return SDL_PIXELFORMAT_ABGR4444; } if (Rmask == 0x00F0 && Gmask == 0x0F00 && Bmask == 0xF000 && Amask == 0x000F) { return SDL_PIXELFORMAT_BGRA4444; } if (Rmask == 0x7C00 && Gmask == 0x03E0 && Bmask == 0x001F && Amask == 0x8000) { return SDL_PIXELFORMAT_ARGB1555; } if (Rmask == 0xF800 && Gmask == 0x07C0 && Bmask == 0x003E && Amask == 0x0001) { return SDL_PIXELFORMAT_RGBA5551; } if (Rmask == 0x001F && Gmask == 0x03E0 && Bmask == 0x7C00 && Amask == 0x8000) { return SDL_PIXELFORMAT_ABGR1555; } if (Rmask == 0x003E && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0001) { return SDL_PIXELFORMAT_BGRA5551; } if (Rmask == 0xF800 && Gmask == 0x07E0 && Bmask == 0x001F && Amask == 0x0000) { return SDL_PIXELFORMAT_RGB565; } if (Rmask == 0x001F && Gmask == 0x07E0 && Bmask == 0xF800 && Amask == 0x0000) { return SDL_PIXELFORMAT_BGR565; } if (Rmask == 0x003F && Gmask == 0x07C0 && Bmask == 0xF800 && Amask == 0x0000) { /* Technically this would be BGR556, but Witek says this works in bug 3158 */ return SDL_PIXELFORMAT_RGB565; } break; case 24: switch (Rmask) { case 0: case 0x00FF0000: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_RGB24; #else return SDL_PIXELFORMAT_BGR24; #endif case 0x000000FF: #if SDL_BYTEORDER == SDL_BIG_ENDIAN return SDL_PIXELFORMAT_BGR24; #else return SDL_PIXELFORMAT_RGB24; #endif } case 32: if (Rmask == 0) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGB888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x00000000) { return SDL_PIXELFORMAT_RGBX8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGR888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x00000000) { return SDL_PIXELFORMAT_BGRX8888; } if (Rmask == 0x00FF0000 && Gmask == 0x0000FF00 && Bmask == 0x000000FF && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ARGB8888; } if (Rmask == 0xFF000000 && Gmask == 0x00FF0000 && Bmask == 0x0000FF00 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_RGBA8888; } if (Rmask == 0x000000FF && Gmask == 0x0000FF00 && Bmask == 0x00FF0000 && Amask == 0xFF000000) { return SDL_PIXELFORMAT_ABGR8888; } if (Rmask == 0x0000FF00 && Gmask == 0x00FF0000 && Bmask == 0xFF000000 && Amask == 0x000000FF) { return SDL_PIXELFORMAT_BGRA8888; } if (Rmask == 0x3FF00000 && Gmask == 0x000FFC00 && Bmask == 0x000003FF && Amask == 0xC0000000) { return SDL_PIXELFORMAT_ARGB2101010; } } return SDL_PIXELFORMAT_UNKNOWN; } static SDL_PixelFormat *formats; static SDL_SpinLock formats_lock = 0; SDL_PixelFormat * SDL_AllocFormat(Uint32 pixel_format) { SDL_PixelFormat *format; SDL_AtomicLock(&formats_lock); /* Look it up in our list of previously allocated formats */ for (format = formats; format; format = format->next) { if (pixel_format == format->format) { ++format->refcount; SDL_AtomicUnlock(&formats_lock); return format; } } /* Allocate an empty pixel format structure, and initialize it */ format = SDL_malloc(sizeof(*format)); if (format == NULL) { SDL_AtomicUnlock(&formats_lock); SDL_OutOfMemory(); return NULL; } if (SDL_InitFormat(format, pixel_format) < 0) { SDL_AtomicUnlock(&formats_lock); SDL_free(format); SDL_InvalidParamError("format"); return NULL; } if (!SDL_ISPIXELFORMAT_INDEXED(pixel_format)) { /* Cache the RGB formats */ format->next = formats; formats = format; } SDL_AtomicUnlock(&formats_lock); return format; } int SDL_InitFormat(SDL_PixelFormat * format, Uint32 pixel_format) { int bpp; Uint32 Rmask, Gmask, Bmask, Amask; Uint32 mask; if (!SDL_PixelFormatEnumToMasks(pixel_format, &bpp, &Rmask, &Gmask, &Bmask, &Amask)) { return -1; } /* Set up the format */ SDL_zerop(format); format->format = pixel_format; format->BitsPerPixel = bpp; format->BytesPerPixel = (bpp + 7) / 8; format->Rmask = Rmask; format->Rshift = 0; format->Rloss = 8; if (Rmask) { for (mask = Rmask; !(mask & 0x01); mask >>= 1) ++format->Rshift; for (; (mask & 0x01); mask >>= 1) --format->Rloss; } format->Gmask = Gmask; format->Gshift = 0; format->Gloss = 8; if (Gmask) { for (mask = Gmask; !(mask & 0x01); mask >>= 1) ++format->Gshift; for (; (mask & 0x01); mask >>= 1) --format->Gloss; } format->Bmask = Bmask; format->Bshift = 0; format->Bloss = 8; if (Bmask) { for (mask = Bmask; !(mask & 0x01); mask >>= 1) ++format->Bshift; for (; (mask & 0x01); mask >>= 1) --format->Bloss; } format->Amask = Amask; format->Ashift = 0; format->Aloss = 8; if (Amask) { for (mask = Amask; !(mask & 0x01); mask >>= 1) ++format->Ashift; for (; (mask & 0x01); mask >>= 1) --format->Aloss; } format->palette = NULL; format->refcount = 1; format->next = NULL; return 0; } void SDL_FreeFormat(SDL_PixelFormat *format) { SDL_PixelFormat *prev; if (!format) { SDL_InvalidParamError("format"); return; } SDL_AtomicLock(&formats_lock); if (--format->refcount > 0) { SDL_AtomicUnlock(&formats_lock); return; } /* Remove this format from our list */ if (format == formats) { formats = format->next; } else if (formats) { for (prev = formats; prev->next; prev = prev->next) { if (prev->next == format) { prev->next = format->next; break; } } } SDL_AtomicUnlock(&formats_lock); if (format->palette) { SDL_FreePalette(format->palette); } SDL_free(format); } SDL_Palette * SDL_AllocPalette(int ncolors) { SDL_Palette *palette; /* Input validation */ if (ncolors < 1) { SDL_InvalidParamError("ncolors"); return NULL; } palette = (SDL_Palette *) SDL_malloc(sizeof(*palette)); if (!palette) { SDL_OutOfMemory(); return NULL; } palette->colors = (SDL_Color *) SDL_malloc(ncolors * sizeof(*palette->colors)); if (!palette->colors) { SDL_free(palette); return NULL; } palette->ncolors = ncolors; palette->version = 1; palette->refcount = 1; SDL_memset(palette->colors, 0xFF, ncolors * sizeof(*palette->colors)); return palette; } int SDL_SetPixelFormatPalette(SDL_PixelFormat * format, SDL_Palette *palette) { if (!format) { return SDL_SetError("SDL_SetPixelFormatPalette() passed NULL format"); } if (palette && palette->ncolors > (1 << format->BitsPerPixel)) { return SDL_SetError("SDL_SetPixelFormatPalette() passed a palette that doesn't match the format"); } if (format->palette == palette) { return 0; } if (format->palette) { SDL_FreePalette(format->palette); } format->palette = palette; if (format->palette) { ++format->palette->refcount; } return 0; } int SDL_SetPaletteColors(SDL_Palette * palette, const SDL_Color * colors, int firstcolor, int ncolors) { int status = 0; /* Verify the parameters */ if (!palette) { return -1; } if (ncolors > (palette->ncolors - firstcolor)) { ncolors = (palette->ncolors - firstcolor); status = -1; } if (colors != (palette->colors + firstcolor)) { SDL_memcpy(palette->colors + firstcolor, colors, ncolors * sizeof(*colors)); } ++palette->version; if (!palette->version) { palette->version = 1; } return status; } void SDL_FreePalette(SDL_Palette * palette) { if (!palette) { SDL_InvalidParamError("palette"); return; } if (--palette->refcount > 0) { return; } SDL_free(palette->colors); SDL_free(palette); } /* * Calculate an 8-bit (3 red, 3 green, 2 blue) dithered palette of colors */ void SDL_DitherColors(SDL_Color * colors, int bpp) { int i; if (bpp != 8) return; /* only 8bpp supported right now */ for (i = 0; i < 256; i++) { int r, g, b; /* map each bit field to the full [0, 255] interval, so 0 is mapped to (0, 0, 0) and 255 to (255, 255, 255) */ r = i & 0xe0; r |= r >> 3 | r >> 6; colors[i].r = r; g = (i << 3) & 0xe0; g |= g >> 3 | g >> 6; colors[i].g = g; b = i & 0x3; b |= b << 2; b |= b << 4; colors[i].b = b; colors[i].a = SDL_ALPHA_OPAQUE; } } /* * Match an RGB value to a particular palette index */ Uint8 SDL_FindColor(SDL_Palette * pal, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { /* Do colorspace distance matching */ unsigned int smallest; unsigned int distance; int rd, gd, bd, ad; int i; Uint8 pixel = 0; smallest = ~0; for (i = 0; i < pal->ncolors; ++i) { rd = pal->colors[i].r - r; gd = pal->colors[i].g - g; bd = pal->colors[i].b - b; ad = pal->colors[i].a - a; distance = (rd * rd) + (gd * gd) + (bd * bd) + (ad * ad); if (distance < smallest) { pixel = i; if (distance == 0) { /* Perfect match! */ break; } smallest = distance; } } return (pixel); } /* Tell whether palette is opaque, and if it has an alpha_channel */ void SDL_DetectPalette(SDL_Palette *pal, SDL_bool *is_opaque, SDL_bool *has_alpha_channel) { int i; { SDL_bool all_opaque = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_OPAQUE) { all_opaque = SDL_FALSE; break; } } if (all_opaque) { /* Palette is opaque, with an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_TRUE; return; } } { SDL_bool all_transparent = SDL_TRUE; for (i = 0; i < pal->ncolors; i++) { Uint8 alpha_value = pal->colors[i].a; if (alpha_value != SDL_ALPHA_TRANSPARENT) { all_transparent = SDL_FALSE; break; } } if (all_transparent) { /* Palette is opaque, without an alpha channel */ *is_opaque = SDL_TRUE; *has_alpha_channel = SDL_FALSE; return; } } /* Palette has alpha values */ *is_opaque = SDL_FALSE; *has_alpha_channel = SDL_TRUE; } /* Find the opaque pixel value corresponding to an RGB triple */ Uint32 SDL_MapRGB(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | format->Amask; } else { return SDL_FindColor(format->palette, r, g, b, SDL_ALPHA_OPAQUE); } } /* Find the pixel value corresponding to an RGBA quadruple */ Uint32 SDL_MapRGBA(const SDL_PixelFormat * format, Uint8 r, Uint8 g, Uint8 b, Uint8 a) { if (format->palette == NULL) { return (r >> format->Rloss) << format->Rshift | (g >> format->Gloss) << format->Gshift | (b >> format->Bloss) << format->Bshift | ((Uint32)(a >> format->Aloss) << format->Ashift & format->Amask); } else { return SDL_FindColor(format->palette, r, g, b, a); } } void SDL_GetRGB(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; } else { *r = *g = *b = 0; } } } void SDL_GetRGBA(Uint32 pixel, const SDL_PixelFormat * format, Uint8 * r, Uint8 * g, Uint8 * b, Uint8 * a) { if (format->palette == NULL) { unsigned v; v = (pixel & format->Rmask) >> format->Rshift; *r = SDL_expand_byte[format->Rloss][v]; v = (pixel & format->Gmask) >> format->Gshift; *g = SDL_expand_byte[format->Gloss][v]; v = (pixel & format->Bmask) >> format->Bshift; *b = SDL_expand_byte[format->Bloss][v]; v = (pixel & format->Amask) >> format->Ashift; *a = SDL_expand_byte[format->Aloss][v]; } else { if (pixel < (unsigned)format->palette->ncolors) { *r = format->palette->colors[pixel].r; *g = format->palette->colors[pixel].g; *b = format->palette->colors[pixel].b; *a = format->palette->colors[pixel].a; } else { *r = *g = *b = *a = 0; } } } /* Map from Palette to Palette */ static Uint8 * Map1to1(SDL_Palette * src, SDL_Palette * dst, int *identical) { Uint8 *map; int i; if (identical) { if (src->ncolors <= dst->ncolors) { /* If an identical palette, no need to map */ if (src == dst || (SDL_memcmp (src->colors, dst->colors, src->ncolors * sizeof(SDL_Color)) == 0)) { *identical = 1; return (NULL); } } *identical = 0; } map = (Uint8 *) SDL_calloc(256, sizeof(Uint8)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } for (i = 0; i < src->ncolors; ++i) { map[i] = SDL_FindColor(dst, src->colors[i].r, src->colors[i].g, src->colors[i].b, src->colors[i].a); } return (map); } /* Map from Palette to BitField */ static Uint8 * Map1toN(SDL_PixelFormat * src, Uint8 Rmod, Uint8 Gmod, Uint8 Bmod, Uint8 Amod, SDL_PixelFormat * dst) { Uint8 *map; int i; int bpp; SDL_Palette *pal = src->palette; bpp = ((dst->BytesPerPixel == 3) ? 4 : dst->BytesPerPixel); map = (Uint8 *) SDL_calloc(256, bpp); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } /* We memory copy to the pixel map so the endianness is preserved */ for (i = 0; i < pal->ncolors; ++i) { Uint8 R = (Uint8) ((pal->colors[i].r * Rmod) / 255); Uint8 G = (Uint8) ((pal->colors[i].g * Gmod) / 255); Uint8 B = (Uint8) ((pal->colors[i].b * Bmod) / 255); Uint8 A = (Uint8) ((pal->colors[i].a * Amod) / 255); ASSEMBLE_RGBA(&map[i * bpp], dst->BytesPerPixel, dst, (Uint32)R, (Uint32)G, (Uint32)B, (Uint32)A); } return (map); } /* Map from BitField to Dithered-Palette to Palette */ static Uint8 * MapNto1(SDL_PixelFormat * src, SDL_PixelFormat * dst, int *identical) { /* Generate a 256 color dither palette */ SDL_Palette dithered; SDL_Color colors[256]; SDL_Palette *pal = dst->palette; dithered.ncolors = 256; SDL_DitherColors(colors, 8); dithered.colors = colors; return (Map1to1(&dithered, pal, identical)); } SDL_BlitMap * SDL_AllocBlitMap(void) { SDL_BlitMap *map; /* Allocate the empty map */ map = (SDL_BlitMap *) SDL_calloc(1, sizeof(*map)); if (map == NULL) { SDL_OutOfMemory(); return (NULL); } map->info.r = 0xFF; map->info.g = 0xFF; map->info.b = 0xFF; map->info.a = 0xFF; /* It's ready to go */ return (map); } typedef struct SDL_ListNode { void *entry; struct SDL_ListNode *next; } SDL_ListNode; void SDL_InvalidateAllBlitMap(SDL_Surface *surface) { SDL_ListNode *l = surface->list_blitmap; surface->list_blitmap = NULL; while (l) { SDL_ListNode *tmp = l; SDL_InvalidateMap((SDL_BlitMap *)l->entry); l = l->next; SDL_free(tmp); } } static void SDL_ListAdd(SDL_ListNode **head, void *ent); static void SDL_ListRemove(SDL_ListNode **head, void *ent); void SDL_ListAdd(SDL_ListNode **head, void *ent) { SDL_ListNode *node = SDL_malloc(sizeof (*node)); if (node == NULL) { SDL_OutOfMemory(); return; } node->entry = ent; node->next = *head; *head = node; } void SDL_ListRemove(SDL_ListNode **head, void *ent) { SDL_ListNode **ptr = head; while (*ptr) { if ((*ptr)->entry == ent) { SDL_ListNode *tmp = *ptr; *ptr = (*ptr)->next; SDL_free(tmp); return; } ptr = &(*ptr)->next; } } void SDL_InvalidateMap(SDL_BlitMap * map) { if (!map) { return; } if (map->dst) { /* Un-register from the destination surface */ SDL_ListRemove((SDL_ListNode **)&(map->dst->list_blitmap), map); } map->dst = NULL; map->src_palette_version = 0; map->dst_palette_version = 0; SDL_free(map->info.table); map->info.table = NULL; } int SDL_MapSurface(SDL_Surface * src, SDL_Surface * dst) { SDL_PixelFormat *srcfmt; SDL_PixelFormat *dstfmt; SDL_BlitMap *map; /* Clear out any previous mapping */ map = src->map; #if SDL_HAVE_RLE if ((src->flags & SDL_RLEACCEL) == SDL_RLEACCEL) { SDL_UnRLESurface(src, 1); } #endif SDL_InvalidateMap(map); /* Figure out what kind of mapping we're doing */ map->identity = 0; srcfmt = src->format; dstfmt = dst->format; if (SDL_ISPIXELFORMAT_INDEXED(srcfmt->format)) { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* Palette --> Palette */ map->info.table = Map1to1(srcfmt->palette, dstfmt->palette, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } if (srcfmt->BitsPerPixel != dstfmt->BitsPerPixel) map->identity = 0; } else { /* Palette --> BitField */ map->info.table = Map1toN(srcfmt, src->map->info.r, src->map->info.g, src->map->info.b, src->map->info.a, dstfmt); if (map->info.table == NULL) { return (-1); } } } else { if (SDL_ISPIXELFORMAT_INDEXED(dstfmt->format)) { /* BitField --> Palette */ map->info.table = MapNto1(srcfmt, dstfmt, &map->identity); if (!map->identity) { if (map->info.table == NULL) { return (-1); } } map->identity = 0; /* Don't optimize to copy */ } else { /* BitField --> BitField */ if (srcfmt == dstfmt) { map->identity = 1; } } } map->dst = dst; if (map->dst) { /* Register BlitMap to the destination surface, to be invalidated when needed */ SDL_ListAdd((SDL_ListNode **)&(map->dst->list_blitmap), map); } if (dstfmt->palette) { map->dst_palette_version = dstfmt->palette->version; } else { map->dst_palette_version = 0; } if (srcfmt->palette) { map->src_palette_version = srcfmt->palette->version; } else { map->src_palette_version = 0; } /* Choose your blitters wisely */ return (SDL_CalculateBlit(src)); } void SDL_FreeBlitMap(SDL_BlitMap * map) { if (map) { SDL_InvalidateMap(map); SDL_free(map); } } void SDL_CalculateGammaRamp(float gamma, Uint16 * ramp) { int i; /* Input validation */ if (gamma < 0.0f ) { SDL_InvalidParamError("gamma"); return; } if (ramp == NULL) { SDL_InvalidParamError("ramp"); return; } /* 0.0 gamma is all black */ if (gamma == 0.0f) { SDL_memset(ramp, 0, 256 * sizeof(Uint16)); return; } else if (gamma == 1.0f) { /* 1.0 gamma is identity */ for (i = 0; i < 256; ++i) { ramp[i] = (i << 8) | i; } return; } else { /* Calculate a real gamma ramp */ int value; gamma = 1.0f / gamma; for (i = 0; i < 256; ++i) { value = (int) (SDL_pow((double) i / 256.0, gamma) * 65535.0 + 0.5); if (value > 65535) { value = 65535; } ramp[i] = (Uint16) value; } } } /* vi: set ts=4 sw=4 expandtab: */
null
276
CWE-787
CVE-2021-36082
/* * tls.c - TLS/TLS/DTLS dissector * * Copyright (C) 2016-21 - ntop.org * * nDPI is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * nDPI is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with nDPI. If not, see <http://www.gnu.org/licenses/>. * */ #include "ndpi_protocol_ids.h" #define NDPI_CURRENT_PROTO NDPI_PROTOCOL_TLS #include "ndpi_api.h" #include "ndpi_md5.h" #include "ndpi_sha1.h" #include "ndpi_encryption.h" extern char *strptime(const char *s, const char *format, struct tm *tm); extern int processClientServerHello(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, uint32_t quic_version); extern int http_process_user_agent(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, const u_int8_t *ua_ptr, u_int16_t ua_ptr_len); /* QUIC/GQUIC stuff */ extern int quic_len(const uint8_t *buf, uint64_t *value); extern int quic_len_buffer_still_required(uint8_t value); extern int is_version_with_var_int_transport_params(uint32_t version); // #define DEBUG_TLS_MEMORY 1 // #define DEBUG_TLS 1 // #define DEBUG_TLS_BLOCKS 1 // #define DEBUG_CERTIFICATE_HASH // #define DEBUG_JA3C 1 /* #define DEBUG_FINGERPRINT 1 */ /* #define DEBUG_ENCRYPTED_SNI 1 */ /* **************************************** */ /* https://engineering.salesforce.com/tls-fingerprinting-with-ja3-and-ja3s-247362855967 */ #define JA3_STR_LEN 1024 #define MAX_NUM_JA3 512 #define MAX_JA3_STRLEN 256 union ja3_info { struct { u_int16_t tls_handshake_version; u_int16_t num_cipher, cipher[MAX_NUM_JA3]; u_int16_t num_tls_extension, tls_extension[MAX_NUM_JA3]; u_int16_t num_elliptic_curve, elliptic_curve[MAX_NUM_JA3]; u_int16_t num_elliptic_curve_point_format, elliptic_curve_point_format[MAX_NUM_JA3]; char signature_algorithms[MAX_JA3_STRLEN], supported_versions[MAX_JA3_STRLEN], alpn[MAX_JA3_STRLEN]; } client; struct { u_int16_t tls_handshake_version; u_int16_t num_cipher, cipher[MAX_NUM_JA3]; u_int16_t num_tls_extension, tls_extension[MAX_NUM_JA3]; u_int16_t tls_supported_version; u_int16_t num_elliptic_curve_point_format, elliptic_curve_point_format[MAX_NUM_JA3]; char alpn[MAX_JA3_STRLEN]; } server; /* Used for JA3+ */ }; /* NOTE How to view the certificate fingerprint 1. Using wireshark save the certificate on certificate.bin file as explained in https://security.stackexchange.com/questions/123851/how-can-i-extract-the-certificate-from-this-pcap-file 2. openssl x509 -inform der -in certificate.bin -text > certificate.der 3. openssl x509 -noout -fingerprint -sha1 -inform pem -in certificate.der SHA1 Fingerprint=15:9A:76.... $ shasum -a 1 www.grc.com.bin 159a76..... */ #define NDPI_MAX_TLS_REQUEST_SIZE 10000 /* skype.c */ extern u_int8_t is_skype_flow(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow); /* stun.c */ extern u_int32_t get_stun_lru_key(struct ndpi_flow_struct *flow, u_int8_t rev); static void ndpi_int_tls_add_connection(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int32_t protocol); /* **************************************** */ static u_int32_t ndpi_tls_refine_master_protocol(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int32_t protocol) { struct ndpi_packet_struct *packet = &flow->packet; // protocol = NDPI_PROTOCOL_TLS; if(packet->tcp != NULL) { switch(protocol) { case NDPI_PROTOCOL_TLS: { /* In case of TLS there are probably sub-protocols such as IMAPS that can be otherwise detected */ u_int16_t sport = ntohs(packet->tcp->source); u_int16_t dport = ntohs(packet->tcp->dest); if((sport == 465) || (dport == 465) || (sport == 587) || (dport == 587)) protocol = NDPI_PROTOCOL_MAIL_SMTPS; else if((sport == 993) || (dport == 993) || (flow->l4.tcp.mail_imap_starttls) ) protocol = NDPI_PROTOCOL_MAIL_IMAPS; else if((sport == 995) || (dport == 995)) protocol = NDPI_PROTOCOL_MAIL_POPS; } break; } } return(protocol); } /* **************************************** */ void ndpi_search_tls_tcp_memory(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int avail_bytes; /* TCP */ #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Handling TCP/TLS flow [payload_len: %u][buffer_len: %u][direction: %u]\n", packet->payload_packet_len, flow->l4.tcp.tls.message.buffer_len, packet->packet_direction); #endif if(flow->l4.tcp.tls.message.buffer == NULL) { /* Allocate buffer */ flow->l4.tcp.tls.message.buffer_len = 2048, flow->l4.tcp.tls.message.buffer_used = 0; flow->l4.tcp.tls.message.buffer = (u_int8_t*)ndpi_malloc(flow->l4.tcp.tls.message.buffer_len); if(flow->l4.tcp.tls.message.buffer == NULL) return; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Allocating %u buffer\n", flow->l4.tcp.tls.message.buffer_len); #endif } avail_bytes = flow->l4.tcp.tls.message.buffer_len - flow->l4.tcp.tls.message.buffer_used; if(avail_bytes < packet->payload_packet_len) { u_int new_len = flow->l4.tcp.tls.message.buffer_len + packet->payload_packet_len - avail_bytes + 1; void *newbuf = ndpi_realloc(flow->l4.tcp.tls.message.buffer, flow->l4.tcp.tls.message.buffer_len, new_len); if(!newbuf) return; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Enlarging %u -> %u buffer\n", flow->l4.tcp.tls.message.buffer_len, new_len); #endif flow->l4.tcp.tls.message.buffer = (u_int8_t*)newbuf; flow->l4.tcp.tls.message.buffer_len = new_len; avail_bytes = flow->l4.tcp.tls.message.buffer_len - flow->l4.tcp.tls.message.buffer_used; } if(packet->payload_packet_len > 0 && avail_bytes >= packet->payload_packet_len) { u_int8_t ok = 0; if(flow->l4.tcp.tls.message.next_seq[packet->packet_direction] != 0) { if(ntohl(packet->tcp->seq) == flow->l4.tcp.tls.message.next_seq[packet->packet_direction]) ok = 1; } else ok = 1; if(ok) { memcpy(&flow->l4.tcp.tls.message.buffer[flow->l4.tcp.tls.message.buffer_used], packet->payload, packet->payload_packet_len); flow->l4.tcp.tls.message.buffer_used += packet->payload_packet_len; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Copied data to buffer [%u/%u bytes][direction: %u][tcp_seq: %u][next: %u]\n", flow->l4.tcp.tls.message.buffer_used, flow->l4.tcp.tls.message.buffer_len, packet->packet_direction, ntohl(packet->tcp->seq), ntohl(packet->tcp->seq)+packet->payload_packet_len); #endif flow->l4.tcp.tls.message.next_seq[packet->packet_direction] = ntohl(packet->tcp->seq)+packet->payload_packet_len; } else { #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Skipping packet [%u bytes][direction: %u][tcp_seq: %u][expected next: %u]\n", flow->l4.tcp.tls.message.buffer_len, packet->packet_direction, ntohl(packet->tcp->seq), ntohl(packet->tcp->seq)+packet->payload_packet_len); #endif } } } /* **************************************** */ /* Can't call libc functions from kernel space, define some stub instead */ #define ndpi_isalpha(ch) (((ch) >= 'a' && (ch) <= 'z') || ((ch) >= 'A' && (ch) <= 'Z')) #define ndpi_isdigit(ch) ((ch) >= '0' && (ch) <= '9') #define ndpi_isspace(ch) (((ch) >= '\t' && (ch) <= '\r') || ((ch) == ' ')) #define ndpi_isprint(ch) ((ch) >= 0x20 && (ch) <= 0x7e) #define ndpi_ispunct(ch) (((ch) >= '!' && (ch) <= '/') || \ ((ch) >= ':' && (ch) <= '@') || \ ((ch) >= '[' && (ch) <= '`') || \ ((ch) >= '{' && (ch) <= '~')) /* **************************************** */ static void cleanupServerName(char *buffer, int buffer_len) { u_int i; /* Now all lowecase */ for(i=0; i<buffer_len; i++) buffer[i] = tolower(buffer[i]); } /* **************************************** */ /* Return code -1: error (buffer too short) 0: OK but buffer is not human readeable (so something went wrong) 1: OK */ static int extractRDNSequence(struct ndpi_packet_struct *packet, u_int offset, char *buffer, u_int buffer_len, char *rdnSeqBuf, u_int *rdnSeqBuf_offset, u_int rdnSeqBuf_len, const char *label) { u_int8_t str_len = packet->payload[offset+4], is_printable = 1; char *str; u_int len, j; if (*rdnSeqBuf_offset >= rdnSeqBuf_len) { #ifdef DEBUG_TLS printf("[TLS] %s() [buffer capacity reached][%u]\n", __FUNCTION__, rdnSeqBuf_len); #endif return -1; } // packet is truncated... further inspection is not needed if((offset+4+str_len) >= packet->payload_packet_len) return(-1); str = (char*)&packet->payload[offset+5]; len = (u_int)ndpi_min(str_len, buffer_len-1); strncpy(buffer, str, len); buffer[len] = '\0'; // check string is printable for(j = 0; j < len; j++) { if(!ndpi_isprint(buffer[j])) { is_printable = 0; break; } } if(is_printable) { int rc = snprintf(&rdnSeqBuf[*rdnSeqBuf_offset], rdnSeqBuf_len-(*rdnSeqBuf_offset), "%s%s=%s", (*rdnSeqBuf_offset > 0) ? ", " : "", label, buffer); if(rc > 0) (*rdnSeqBuf_offset) += rc; } return(is_printable); } /* **************************************** */ static void checkTLSSubprotocol(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { if(flow->detected_protocol_stack[1] == NDPI_PROTOCOL_UNKNOWN) { /* Subprotocol not yet set */ if(ndpi_struct->tls_cert_cache && flow->packet.iph) { u_int32_t key = flow->packet.iph->daddr + flow->packet.tcp->dest; u_int16_t cached_proto; if(ndpi_lru_find_cache(ndpi_struct->tls_cert_cache, key, &cached_proto, 0 /* Don't remove it as it can be used for other connections */)) { ndpi_protocol ret = { NDPI_PROTOCOL_TLS, cached_proto, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED }; flow->detected_protocol_stack[0] = cached_proto, flow->detected_protocol_stack[1] = NDPI_PROTOCOL_TLS; flow->category = ndpi_get_proto_category(ndpi_struct, ret); ndpi_check_subprotocol_risk(flow, cached_proto); } } } } /* **************************************** */ /* See https://blog.catchpoint.com/2017/05/12/dissecting-tls-using-wireshark/ */ static void processCertificateElements(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int16_t p_offset, u_int16_t certificate_len) { struct ndpi_packet_struct *packet = &flow->packet; u_int num_found = 0, i; char buffer[64] = { '\0' }, rdnSeqBuf[2048] = { '\0' }; u_int rdn_len = 0; #ifdef DEBUG_TLS printf("[TLS] %s() [offset: %u][certificate_len: %u]\n", __FUNCTION__, p_offset, certificate_len); #endif /* Check after handshake protocol header (5 bytes) and message header (4 bytes) */ for(i = p_offset; i < certificate_len; i++) { /* See https://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.sec.doc/q009860_.htm for X.509 certificate labels */ if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x03)) { /* Common Name */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "CN"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Common Name", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x06)) { /* Country */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "C"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Country", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x07)) { /* Locality */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "L"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Locality", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x08)) { /* State or Province */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "ST"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "State or Province", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x0a)) { /* Organization Name */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "O"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Organization Name", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x0b)) { /* Organization Unit */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "OU"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Organization Unit", buffer); #endif } else if((packet->payload[i] == 0x30) && (packet->payload[i+1] == 0x1e) && (packet->payload[i+2] == 0x17)) { /* Certificate Validity */ u_int8_t len = packet->payload[i+3]; u_int offset = i+4; if(num_found == 0) { num_found++; #ifdef DEBUG_TLS printf("[TLS] %s() IssuerDN [%s]\n", __FUNCTION__, rdnSeqBuf); #endif if(rdn_len && (flow->protos.tls_quic_stun.tls_quic.issuerDN == NULL)) flow->protos.tls_quic_stun.tls_quic.issuerDN = ndpi_strdup(rdnSeqBuf); rdn_len = 0; /* Reset buffer */ } if((offset+len) < packet->payload_packet_len) { char utcDate[32]; #ifdef DEBUG_TLS u_int j; printf("[CERTIFICATE] notBefore [len: %u][", len); for(j=0; j<len; j++) printf("%c", packet->payload[i+4+j]); printf("]\n"); #endif if(len < (sizeof(utcDate)-1)) { struct tm utc; utc.tm_isdst = -1; /* Not set by strptime */ strncpy(utcDate, (const char*)&packet->payload[i+4], len); utcDate[len] = '\0'; /* 141021000000Z */ if(strptime(utcDate, "%y%m%d%H%M%SZ", &utc) != NULL) { flow->protos.tls_quic_stun.tls_quic.notBefore = timegm(&utc); #ifdef DEBUG_TLS printf("[CERTIFICATE] notBefore %u [%s]\n", flow->protos.tls_quic_stun.tls_quic.notBefore, utcDate); #endif } } offset += len; if((offset+1) < packet->payload_packet_len) { len = packet->payload[offset+1]; offset += 2; if((offset+len) < packet->payload_packet_len) { u_int32_t time_sec = flow->packet.current_time_ms / 1000; #ifdef DEBUG_TLS u_int j; printf("[CERTIFICATE] notAfter [len: %u][", len); for(j=0; j<len; j++) printf("%c", packet->payload[offset+j]); printf("]\n"); #endif if(len < (sizeof(utcDate)-1)) { struct tm utc; utc.tm_isdst = -1; /* Not set by strptime */ strncpy(utcDate, (const char*)&packet->payload[offset], len); utcDate[len] = '\0'; /* 141021000000Z */ if(strptime(utcDate, "%y%m%d%H%M%SZ", &utc) != NULL) { flow->protos.tls_quic_stun.tls_quic.notAfter = timegm(&utc); #ifdef DEBUG_TLS printf("[CERTIFICATE] notAfter %u [%s]\n", flow->protos.tls_quic_stun.tls_quic.notAfter, utcDate); #endif } } if((time_sec < flow->protos.tls_quic_stun.tls_quic.notBefore) || (time_sec > flow->protos.tls_quic_stun.tls_quic.notAfter)) ndpi_set_risk(flow, NDPI_TLS_CERTIFICATE_EXPIRED); /* Certificate expired */ } } } } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x1d) && (packet->payload[i+2] == 0x11)) { /* Organization OID: 2.5.29.17 (subjectAltName) */ u_int8_t matched_name = 0; #ifdef DEBUG_TLS printf("******* [TLS] Found subjectAltName\n"); #endif i += 3 /* skip the initial patten 55 1D 11 */; i++; /* skip the first type, 0x04 == BIT STRING, and jump to it's length */ if(i < packet->payload_packet_len) { i += (packet->payload[i] & 0x80) ? (packet->payload[i] & 0x7F) : 0; /* skip BIT STRING length */ if(i < packet->payload_packet_len) { i += 2; /* skip the second type, 0x30 == SEQUENCE, and jump to it's length */ if(i < packet->payload_packet_len) { i += (packet->payload[i] & 0x80) ? (packet->payload[i] & 0x7F) : 0; /* skip SEQUENCE length */ i++; while(i < packet->payload_packet_len) { if(packet->payload[i] == 0x82) { if((i < (packet->payload_packet_len - 1)) && ((i + packet->payload[i + 1] + 2) < packet->payload_packet_len)) { u_int8_t len = packet->payload[i + 1]; char dNSName[256]; i += 2; /* The check "len > sizeof(dNSName) - 1" will be always false. If we add it, the compiler is smart enough to detect it and throws a warning */ if((len == 0 /* Looks something went wrong */) || ((i+len) > packet->payload_packet_len)) break; strncpy(dNSName, (const char*)&packet->payload[i], len); dNSName[len] = '\0'; cleanupServerName(dNSName, len); #if DEBUG_TLS printf("[TLS] dNSName %s [%s][len: %u][leftover: %d]\n", dNSName, flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, len, packet->payload_packet_len-i-len); #endif if(matched_name == 0) { if(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] == '\0') matched_name = 1; /* No SNI */ else if (dNSName[0] == '*') { char * label = strstr(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, &dNSName[1]); if (label != NULL) { char * first_dot = strchr(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, '.'); if (first_dot == NULL || first_dot >= label) { matched_name = 1; } } } else if(strcmp(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, dNSName) == 0) matched_name = 1; } if(flow->protos.tls_quic_stun.tls_quic.server_names == NULL) flow->protos.tls_quic_stun.tls_quic.server_names = ndpi_strdup(dNSName), flow->protos.tls_quic_stun.tls_quic.server_names_len = strlen(dNSName); else { u_int16_t dNSName_len = strlen(dNSName); u_int16_t newstr_len = flow->protos.tls_quic_stun.tls_quic.server_names_len + dNSName_len + 1; char *newstr = (char*)ndpi_realloc(flow->protos.tls_quic_stun.tls_quic.server_names, flow->protos.tls_quic_stun.tls_quic.server_names_len+1, newstr_len+1); if(newstr) { flow->protos.tls_quic_stun.tls_quic.server_names = newstr; flow->protos.tls_quic_stun.tls_quic.server_names[flow->protos.tls_quic_stun.tls_quic.server_names_len] = ','; strncpy(&flow->protos.tls_quic_stun.tls_quic.server_names[flow->protos.tls_quic_stun.tls_quic.server_names_len+1], dNSName, dNSName_len+1); flow->protos.tls_quic_stun.tls_quic.server_names[newstr_len] = '\0'; flow->protos.tls_quic_stun.tls_quic.server_names_len = newstr_len; } } if(!flow->l4.tcp.tls.subprotocol_detected) if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, dNSName, len)) flow->l4.tcp.tls.subprotocol_detected = 1; i += len; } else { #if DEBUG_TLS printf("[TLS] Leftover %u bytes", packet->payload_packet_len - i); #endif break; } } else { break; } } /* while */ if(!matched_name) ndpi_set_risk(flow, NDPI_TLS_CERTIFICATE_MISMATCH); /* Certificate mismatch */ } } } } } if(rdn_len && (flow->protos.tls_quic_stun.tls_quic.subjectDN == NULL)) { flow->protos.tls_quic_stun.tls_quic.subjectDN = ndpi_strdup(rdnSeqBuf); if(flow->detected_protocol_stack[1] == NDPI_PROTOCOL_UNKNOWN) { /* No idea what is happening behind the scenes: let's check the certificate */ u_int32_t proto_id; int rc = ndpi_match_string_value(ndpi_struct->tls_cert_subject_automa.ac_automa, rdnSeqBuf, strlen(rdnSeqBuf),&proto_id); if(rc == 0) { /* Match found */ ndpi_protocol ret = { NDPI_PROTOCOL_TLS, proto_id, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; flow->detected_protocol_stack[0] = proto_id, flow->detected_protocol_stack[1] = NDPI_PROTOCOL_TLS; flow->category = ndpi_get_proto_category(ndpi_struct, ret); ndpi_check_subprotocol_risk(flow, proto_id); if(ndpi_struct->tls_cert_cache == NULL) ndpi_struct->tls_cert_cache = ndpi_lru_cache_init(1024); if(ndpi_struct->tls_cert_cache && flow->packet.iph) { u_int32_t key = flow->packet.iph->daddr + flow->packet.tcp->dest; ndpi_lru_add_to_cache(ndpi_struct->tls_cert_cache, key, proto_id); } } } } if(flow->protos.tls_quic_stun.tls_quic.subjectDN && flow->protos.tls_quic_stun.tls_quic.issuerDN && (!strcmp(flow->protos.tls_quic_stun.tls_quic.subjectDN, flow->protos.tls_quic_stun.tls_quic.issuerDN))) ndpi_set_risk(flow, NDPI_TLS_SELFSIGNED_CERTIFICATE); #if DEBUG_TLS printf("[TLS] %s() SubjectDN [%s]\n", __FUNCTION__, rdnSeqBuf); #endif } /* **************************************** */ /* See https://blog.catchpoint.com/2017/05/12/dissecting-tls-using-wireshark/ */ int processCertificate(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; int is_dtls = packet->udp ? 1 : 0; u_int32_t certificates_length, length = (packet->payload[1] << 16) + (packet->payload[2] << 8) + packet->payload[3]; u_int32_t certificates_offset = 7 + (is_dtls ? 8 : 0); u_int8_t num_certificates_found = 0; SHA1_CTX srv_cert_fingerprint_ctx ; #ifdef DEBUG_TLS printf("[TLS] %s() [payload_packet_len=%u][direction: %u][%02X %02X %02X %02X %02X %02X...]\n", __FUNCTION__, packet->payload_packet_len, packet->packet_direction, packet->payload[0], packet->payload[1], packet->payload[2], packet->payload[3], packet->payload[4], packet->payload[5]); #endif if((packet->payload_packet_len != (length + 4 + (is_dtls ? 8 : 0))) || (packet->payload[1] != 0x0)) { ndpi_set_risk(flow, NDPI_MALFORMED_PACKET); return(-1); /* Invalid length */ } certificates_length = (packet->payload[certificates_offset - 3] << 16) + (packet->payload[certificates_offset - 2] << 8) + packet->payload[certificates_offset - 1]; if((packet->payload[certificates_offset - 3] != 0x0) || ((certificates_length+3) != length)) { ndpi_set_risk(flow, NDPI_MALFORMED_PACKET); return(-2); /* Invalid length */ } /* Now let's process each individual certificates */ while(certificates_offset < certificates_length) { u_int32_t certificate_len = (packet->payload[certificates_offset] << 16) + (packet->payload[certificates_offset+1] << 8) + packet->payload[certificates_offset+2]; /* Invalid lenght */ if((certificate_len == 0) || (packet->payload[certificates_offset] != 0x0) || ((certificates_offset+certificate_len) > (4+certificates_length+(is_dtls ? 8 : 0)))) { #ifdef DEBUG_TLS printf("[TLS] Invalid length [certificate_len: %u][certificates_offset: %u][%u vs %u]\n", certificate_len, certificates_offset, (certificates_offset+certificate_len), certificates_length); #endif break; } certificates_offset += 3; #ifdef DEBUG_TLS printf("[TLS] Processing %u bytes certificate [%02X %02X %02X]\n", certificate_len, packet->payload[certificates_offset], packet->payload[certificates_offset+1], packet->payload[certificates_offset+2]); #endif if(num_certificates_found++ == 0) /* Dissect only the first certificate that is the one we care */ { /* For SHA-1 we take into account only the first certificate and not all of them */ SHA1Init(&srv_cert_fingerprint_ctx); #ifdef DEBUG_CERTIFICATE_HASH { int i; for(i=0;i<certificate_len;i++) printf("%02X ", packet->payload[certificates_offset+i]); printf("\n"); } #endif SHA1Update(&srv_cert_fingerprint_ctx, &packet->payload[certificates_offset], certificate_len); SHA1Final(flow->protos.tls_quic_stun.tls_quic.sha1_certificate_fingerprint, &srv_cert_fingerprint_ctx); flow->l4.tcp.tls.fingerprint_set = 1; uint8_t * sha1 = flow->protos.tls_quic_stun.tls_quic.sha1_certificate_fingerprint; const size_t sha1_siz = sizeof(flow->protos.tls_quic_stun.tls_quic.sha1_certificate_fingerprint); char sha1_str[20 /* sha1_siz */ * 2 + 1]; static const char hexalnum[] = "0123456789ABCDEF"; for (size_t i = 0; i < sha1_siz; ++i) { u_int8_t lower = (sha1[i] & 0x0F); u_int8_t upper = (sha1[i] & 0xF0) >> 4; sha1_str[i*2] = hexalnum[upper]; sha1_str[i*2 + 1] = hexalnum[lower]; } sha1_str[sha1_siz * 2] = '\0'; #ifdef DEBUG_TLS printf("[TLS] SHA-1: %s\n", sha1_str); #endif if (ndpi_struct->malicious_sha1_automa.ac_automa != NULL) { u_int16_t rc1 = ndpi_match_string(ndpi_struct->malicious_sha1_automa.ac_automa, sha1_str); if(rc1 > 0) ndpi_set_risk(flow, NDPI_MALICIOUS_SHA1_CERTIFICATE); } processCertificateElements(ndpi_struct, flow, certificates_offset, certificate_len); } certificates_offset += certificate_len; } if((ndpi_struct->num_tls_blocks_to_follow != 0) && (flow->l4.tcp.tls.num_tls_blocks >= ndpi_struct->num_tls_blocks_to_follow)) { #ifdef DEBUG_TLS_BLOCKS printf("*** [TLS Block] Enough blocks dissected\n"); #endif flow->extra_packets_func = NULL; /* We're good now */ } return(1); } /* **************************************** */ static int processTLSBlock(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; int ret; switch(packet->payload[0] /* block type */) { case 0x01: /* Client Hello */ case 0x02: /* Server Hello */ processClientServerHello(ndpi_struct, flow, 0); flow->l4.tcp.tls.hello_processed = 1; ndpi_int_tls_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_TLS); #ifdef DEBUG_TLS printf("*** TLS [version: %02X][%s Hello]\n", flow->protos.tls_quic_stun.tls_quic.ssl_version, (packet->payload[0] == 0x01) ? "Client" : "Server"); #endif if((flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0304 /* TLS 1.3 */) && (packet->payload[0] == 0x02 /* Server Hello */)) { flow->l4.tcp.tls.certificate_processed = 1; /* No Certificate with TLS 1.3+ */ } checkTLSSubprotocol(ndpi_struct, flow); break; case 0x0b: /* Certificate */ /* Important: populate the tls union fields only after * ndpi_int_tls_add_connection has been called */ if(flow->l4.tcp.tls.hello_processed) { ret = processCertificate(ndpi_struct, flow); if (ret != 1) { #ifdef DEBUG_TLS printf("[TLS] Error processing certificate: %d\n", ret); #endif } flow->l4.tcp.tls.certificate_processed = 1; } break; default: return(-1); } return(0); } /* **************************************** */ static void ndpi_looks_like_tls(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { // ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, NDPI_PROTOCOL_UNKNOWN); if(flow->guessed_protocol_id == NDPI_PROTOCOL_UNKNOWN) flow->guessed_protocol_id = NDPI_PROTOCOL_TLS; } /* **************************************** */ static int ndpi_search_tls_tcp(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int8_t something_went_wrong = 0; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] ndpi_search_tls_tcp() Processing new packet [payload_packet_len: %u]\n", packet->payload_packet_len); #endif if(packet->payload_packet_len == 0) return(1); /* Keep working */ ndpi_search_tls_tcp_memory(ndpi_struct, flow); while(!something_went_wrong) { u_int16_t len, p_len; const u_int8_t *p; u_int8_t content_type; if(flow->l4.tcp.tls.message.buffer_used < 5) return(1); /* Keep working */ len = (flow->l4.tcp.tls.message.buffer[3] << 8) + flow->l4.tcp.tls.message.buffer[4] + 5; if(len > flow->l4.tcp.tls.message.buffer_used) { #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Not enough TLS data [%u < %u][%02X %02X %02X %02X %02X]\n", len, flow->l4.tcp.tls.message.buffer_used, flow->l4.tcp.tls.message.buffer[0], flow->l4.tcp.tls.message.buffer[1], flow->l4.tcp.tls.message.buffer[2], flow->l4.tcp.tls.message.buffer[3], flow->l4.tcp.tls.message.buffer[4]); #endif break; } if(len == 0) { something_went_wrong = 1; break; } #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Processing %u bytes message\n", len); #endif content_type = flow->l4.tcp.tls.message.buffer[0]; /* Overwriting packet payload */ p = packet->payload; p_len = packet->payload_packet_len; /* Backup */ if(content_type == 0x14 /* Change Cipher Spec */) { if(ndpi_struct->skip_tls_blocks_until_change_cipher) { /* Ignore Application Data up until change cipher so in this case we reset the number of observed TLS blocks */ flow->l4.tcp.tls.num_tls_blocks = 0; } } if((len > 9) && (content_type != 0x17 /* Application Data */) && (!flow->l4.tcp.tls.certificate_processed)) { /* Split the element in blocks */ u_int16_t processed = 5; while((processed+4) <= len) { const u_int8_t *block = (const u_int8_t *)&flow->l4.tcp.tls.message.buffer[processed]; u_int32_t block_len = (block[1] << 16) + (block[2] << 8) + block[3]; if(/* (block_len == 0) || */ /* Note blocks can have zero lenght */ (block_len > len) || ((block[1] != 0x0))) { something_went_wrong = 1; break; } packet->payload = block; packet->payload_packet_len = ndpi_min(block_len+4, flow->l4.tcp.tls.message.buffer_used); if((processed+packet->payload_packet_len) > len) { something_went_wrong = 1; break; } processTLSBlock(ndpi_struct, flow); ndpi_looks_like_tls(ndpi_struct, flow); processed += packet->payload_packet_len; } } else { /* Process element as a whole */ if(content_type == 0x17 /* Application Data */) { ndpi_looks_like_tls(ndpi_struct, flow); if(flow->l4.tcp.tls.certificate_processed) { if(flow->l4.tcp.tls.num_tls_blocks < ndpi_struct->num_tls_blocks_to_follow) flow->l4.tcp.tls.tls_application_blocks_len[flow->l4.tcp.tls.num_tls_blocks++] = (packet->packet_direction == 0) ? (len-5) : -(len-5); #ifdef DEBUG_TLS_BLOCKS printf("*** [TLS Block] [len: %u][num_tls_blocks: %u/%u]\n", len-5, flow->l4.tcp.tls.num_tls_blocks, ndpi_struct->num_tls_blocks_to_follow); #endif } } } packet->payload = p; packet->payload_packet_len = p_len; /* Restore */ flow->l4.tcp.tls.message.buffer_used -= len; if(flow->l4.tcp.tls.message.buffer_used > 0) memmove(flow->l4.tcp.tls.message.buffer, &flow->l4.tcp.tls.message.buffer[len], flow->l4.tcp.tls.message.buffer_used); else break; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Left memory buffer %u bytes\n", flow->l4.tcp.tls.message.buffer_used); #endif } if(something_went_wrong || ((ndpi_struct->num_tls_blocks_to_follow > 0) && (flow->l4.tcp.tls.num_tls_blocks == ndpi_struct->num_tls_blocks_to_follow)) ) { #ifdef DEBUG_TLS_BLOCKS printf("*** [TLS Block] No more blocks\n"); #endif flow->check_extra_packets = 0; flow->extra_packets_func = NULL; return(0); /* That's all */ } else return(1); } /* **************************************** */ static int ndpi_search_tls_udp(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int32_t handshake_len; u_int16_t p_len, processed; const u_int8_t *p; u_int8_t no_dtls = 0, change_cipher_found = 0; #ifdef DEBUG_TLS printf("[TLS] %s()\n", __FUNCTION__); #endif /* Overwriting packet payload */ p = packet->payload, p_len = packet->payload_packet_len; /* Backup */ /* Split the element in blocks */ processed = 0; while(processed + 13 < p_len) { u_int32_t block_len; const u_int8_t *block = (const u_int8_t *)&p[processed]; if((block[0] != 0x16 && block[0] != 0x14) || /* Handshake, change-cipher-spec */ (block[1] != 0xfe) || /* We ignore old DTLS versions */ ((block[2] != 0xff) && (block[2] != 0xfd))) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid block 0x%x or old version 0x%x-0x%x-0x%x\n", block[0], block[1], block[2], block[3]); #endif no_dtls = 1; break; } block_len = ntohs(*((u_int16_t*)&block[11])); #ifdef DEBUG_TLS printf("[TLS] DTLS block len: %d\n", block_len); #endif if (block_len == 0 || (processed + block_len + 12 >= p_len)) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid block len %d (processed %d, p_len %d)\n", block_len, processed, p_len); #endif no_dtls = 1; break; } /* We process only handshake msgs */ if(block[0] == 0x16) { if (processed + block_len + 13 > p_len) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid len %d %d %d\n", processed, block_len, p_len); #endif no_dtls = 1; break; } /* TODO: handle (certificate) fragments */ handshake_len = (block[14] << 16) + (block[15] << 8) + block[16]; if((handshake_len + 12) != block_len) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid handshake_len %d, %d)\n", handshake_len, block_len); #endif no_dtls = 1; break; } packet->payload = &block[13]; packet->payload_packet_len = block_len; processTLSBlock(ndpi_struct, flow); } else { /* Change-cipher-spec: any subsequent block might be encrypted */ #ifdef DEBUG_TLS printf("[TLS] Change-cipher-spec\n"); #endif change_cipher_found = 1; processed += block_len + 13; break; } processed += block_len + 13; } if(processed != p_len) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid processed len %d/%d (%d)\n", processed, p_len, change_cipher_found); #endif if(!change_cipher_found) no_dtls = 1; } packet->payload = p; packet->payload_packet_len = p_len; /* Restore */ if(no_dtls || change_cipher_found) { NDPI_EXCLUDE_PROTO(ndpi_struct, flow); return(0); /* That's all */ } else { return(1); /* Keep working */ } } /* **************************************** */ static void tlsInitExtraPacketProcessing(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { flow->check_extra_packets = 1; /* At most 12 packets should almost always be enough to find the server certificate if it's there */ flow->max_extra_packets_to_check = 12 + (ndpi_struct->num_tls_blocks_to_follow*4); flow->extra_packets_func = (flow->packet.udp != NULL) ? ndpi_search_tls_udp : ndpi_search_tls_tcp; } /* **************************************** */ static void tlsCheckUncommonALPN(struct ndpi_flow_struct *flow) { /* see: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml */ static char const * const common_alpns[] = { "http/0.9", "http/1.0", "http/1.1", "spdy/1", "spdy/2", "spdy/3", "spdy/3.1", "stun.turn", "stun.nat-discovery", "h2", "h2c", "h2-16", "h2-15", "h2-14", "webrtc", "c-webrtc", "ftp", "imap", "pop3", "managesieve", "coap", "xmpp-client", "xmpp-server", "acme-tls/1", "mqtt", "dot", "ntske/1", "sunrpc", "h3", "smb", "irc", /* QUIC ALPNs */ "h3-T051", "h3-T050", "h3-32", "h3-30", "h3-29", "h3-28", "h3-27", "h3-24", "h3-22", "hq-30", "hq-29", "hq-28", "hq-27", "h3-fb-05", "h1q-fb", "doq-i00" }; /* * If the ALPN list increases in size, iterating over all items for every incoming ALPN may * have a performance impact. A hash map could solve this issue. */ char * alpn_start = flow->protos.tls_quic_stun.tls_quic.alpn; char * comma_or_nul = alpn_start; do { comma_or_nul = strchr(comma_or_nul, ','); if (comma_or_nul == NULL) { comma_or_nul = alpn_start + strlen(alpn_start); } int alpn_found = 0; int alpn_len = comma_or_nul - alpn_start; char const * const alpn = alpn_start; for (size_t i = 0; i < sizeof(common_alpns)/sizeof(common_alpns[0]); ++i) { if (strlen(common_alpns[i]) == alpn_len && strncmp(alpn, common_alpns[i], alpn_len) == 0) { alpn_found = 1; break; } } if (alpn_found == 0) { #ifdef DEBUG_TLS printf("TLS uncommon ALPN found: %.*s\n", alpn_len, alpn); #endif ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } alpn_start = comma_or_nul + 1; } while (*(comma_or_nul++) != '\0'); } /* **************************************** */ static void ndpi_int_tls_add_connection(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int32_t protocol) { #if DEBUG_TLS printf("[TLS] %s()\n", __FUNCTION__); #endif if((flow->packet.udp != NULL) && (protocol == NDPI_PROTOCOL_TLS)) protocol = NDPI_PROTOCOL_DTLS; if((flow->detected_protocol_stack[0] == protocol) || (flow->detected_protocol_stack[1] == protocol)) { if(!flow->check_extra_packets) tlsInitExtraPacketProcessing(ndpi_struct, flow); return; } if(protocol != NDPI_PROTOCOL_TLS) ; else protocol = ndpi_tls_refine_master_protocol(ndpi_struct, flow, protocol); ndpi_set_detected_protocol(ndpi_struct, flow, protocol, protocol); tlsInitExtraPacketProcessing(ndpi_struct, flow); } /* **************************************** */ int processClientServerHello(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, uint32_t quic_version) { struct ndpi_packet_struct *packet = &flow->packet; union ja3_info ja3; u_int8_t invalid_ja3 = 0; u_int16_t tls_version, ja3_str_len; char ja3_str[JA3_STR_LEN]; ndpi_MD5_CTX ctx; u_char md5_hash[16]; int i; u_int16_t total_len; u_int8_t handshake_type; char buffer[64] = { '\0' }; int is_quic = (quic_version != 0); int is_dtls = packet->udp && (!is_quic); #ifdef DEBUG_TLS printf("TLS %s() called\n", __FUNCTION__); #endif memset(&ja3, 0, sizeof(ja3)); handshake_type = packet->payload[0]; total_len = (packet->payload[1] << 16) + (packet->payload[2] << 8) + packet->payload[3]; if((total_len > packet->payload_packet_len) || (packet->payload[1] != 0x0)) return(0); /* Not found */ total_len = packet->payload_packet_len; /* At least "magic" 3 bytes, null for string end, otherwise no need to waste cpu cycles */ if(total_len > 4) { u_int16_t base_offset = (!is_dtls) ? 38 : 46; u_int16_t version_offset = (!is_dtls) ? 4 : 12; u_int16_t offset = (!is_dtls) ? 38 : 46, extension_len, j; u_int8_t session_id_len = 0; if((base_offset >= total_len) || (version_offset + 1) >= total_len) return 0; /* Not found */ session_id_len = packet->payload[base_offset]; #ifdef DEBUG_TLS printf("TLS [len: %u][handshake_type: %02X]\n", packet->payload_packet_len, handshake_type); #endif tls_version = ntohs(*((u_int16_t*)&packet->payload[version_offset])); if(handshake_type == 0x02 /* Server Hello */) { int i, rc; ja3.server.tls_handshake_version = tls_version; #ifdef DEBUG_TLS printf("TLS Server Hello [version: 0x%04X]\n", tls_version); #endif /* The server hello decides about the TLS version of this flow https://networkengineering.stackexchange.com/questions/55752/why-does-wireshark-show-version-tls-1-2-here-instead-of-tls-1-3 */ if(packet->udp) offset += session_id_len + 1; else { if(tls_version < 0x7F15 /* TLS 1.3 lacks of session id */) offset += session_id_len+1; } if((offset+3) > packet->payload_packet_len) return(0); /* Not found */ ja3.server.num_cipher = 1, ja3.server.cipher[0] = ntohs(*((u_int16_t*)&packet->payload[offset])); if((flow->protos.tls_quic_stun.tls_quic.server_unsafe_cipher = ndpi_is_safe_ssl_cipher(ja3.server.cipher[0])) == 1) ndpi_set_risk(flow, NDPI_TLS_WEAK_CIPHER); flow->protos.tls_quic_stun.tls_quic.server_cipher = ja3.server.cipher[0]; #ifdef DEBUG_TLS printf("TLS [server][session_id_len: %u][cipher: %04X]\n", session_id_len, ja3.server.cipher[0]); #endif offset += 2 + 1; if((offset + 1) < packet->payload_packet_len) /* +1 because we are goint to read 2 bytes */ extension_len = ntohs(*((u_int16_t*)&packet->payload[offset])); else extension_len = 0; #ifdef DEBUG_TLS printf("TLS [server][extension_len: %u]\n", extension_len); #endif offset += 2; for(i=0; i<extension_len; ) { u_int16_t extension_id, extension_len; if((offset+4) > packet->payload_packet_len) break; extension_id = ntohs(*((u_int16_t*)&packet->payload[offset])); extension_len = ntohs(*((u_int16_t*)&packet->payload[offset+2])); if(ja3.server.num_tls_extension < MAX_NUM_JA3) ja3.server.tls_extension[ja3.server.num_tls_extension++] = extension_id; #ifdef DEBUG_TLS printf("TLS [server][extension_id: %u/0x%04X][len: %u]\n", extension_id, extension_id, extension_len); #endif if(extension_id == 43 /* supported versions */) { if(extension_len >= 2) { u_int16_t tls_version = ntohs(*((u_int16_t*)&packet->payload[offset+4])); #ifdef DEBUG_TLS printf("TLS [server] [TLS version: 0x%04X]\n", tls_version); #endif flow->protos.tls_quic_stun.tls_quic.ssl_version = ja3.server.tls_supported_version = tls_version; } } else if(extension_id == 16 /* application_layer_protocol_negotiation (ALPN) */) { u_int16_t s_offset = offset+4; u_int16_t tot_alpn_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); char alpn_str[256]; u_int8_t alpn_str_len = 0, i; #ifdef DEBUG_TLS printf("Server TLS [ALPN: block_len=%u/len=%u]\n", extension_len, tot_alpn_len); #endif s_offset += 2; tot_alpn_len += s_offset; while(s_offset < tot_alpn_len && s_offset < total_len) { u_int8_t alpn_i, alpn_len = packet->payload[s_offset++]; if((s_offset + alpn_len) <= tot_alpn_len) { #ifdef DEBUG_TLS printf("Server TLS [ALPN: %u]\n", alpn_len); #endif if((alpn_str_len+alpn_len+1) < (sizeof(alpn_str)-1)) { if(alpn_str_len > 0) { alpn_str[alpn_str_len] = ','; alpn_str_len++; } for(alpn_i=0; alpn_i<alpn_len; alpn_i++) { alpn_str[alpn_str_len+alpn_i] = packet->payload[s_offset+alpn_i]; } s_offset += alpn_len, alpn_str_len += alpn_len;; } else { ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } } else { ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } } /* while */ alpn_str[alpn_str_len] = '\0'; #ifdef DEBUG_TLS printf("Server TLS [ALPN: %s][len: %u]\n", alpn_str, alpn_str_len); #endif if(flow->protos.tls_quic_stun.tls_quic.alpn == NULL) flow->protos.tls_quic_stun.tls_quic.alpn = ndpi_strdup(alpn_str); if(flow->protos.tls_quic_stun.tls_quic.alpn != NULL) tlsCheckUncommonALPN(flow); snprintf(ja3.server.alpn, sizeof(ja3.server.alpn), "%s", alpn_str); /* Replace , with - as in JA3 */ for(i=0; ja3.server.alpn[i] != '\0'; i++) if(ja3.server.alpn[i] == ',') ja3.server.alpn[i] = '-'; } else if(extension_id == 11 /* ec_point_formats groups */) { u_int16_t s_offset = offset+4 + 1; #ifdef DEBUG_TLS printf("Server TLS [EllipticCurveFormat: len=%u]\n", extension_len); #endif if((s_offset+extension_len-1) <= total_len) { for(i=0; i<extension_len-1; i++) { u_int8_t s_group = packet->payload[s_offset+i]; #ifdef DEBUG_TLS printf("Server TLS [EllipticCurveFormat: %u]\n", s_group); #endif if(ja3.server.num_elliptic_curve_point_format < MAX_NUM_JA3) ja3.server.elliptic_curve_point_format[ja3.server.num_elliptic_curve_point_format++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Server TLS Invalid num elliptic %u\n", ja3.server.num_elliptic_curve_point_format); #endif } } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Server TLS Invalid len %u vs %u\n", s_offset+extension_len, total_len); #endif } } i += 4 + extension_len, offset += 4 + extension_len; } /* for */ ja3_str_len = snprintf(ja3_str, sizeof(ja3_str), "%u,", ja3.server.tls_handshake_version); for(i=0; i<ja3.server.num_cipher; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.cipher[i]); if(rc <= 0) break; else ja3_str_len += rc; } rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ","); if(rc > 0 && ja3_str_len + rc < JA3_STR_LEN) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.server.num_tls_extension; i++) { int rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.tls_extension[i]); if(rc <= 0) break; else ja3_str_len += rc; } if(ndpi_struct->enable_ja3_plus) { for(i=0; i<ja3.server.num_elliptic_curve_point_format; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.elliptic_curve_point_format[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } if(ja3.server.alpn[0] != '\0') { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ",%s", ja3.server.alpn); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; } #ifdef DEBUG_TLS printf("[JA3+] Server: %s \n", ja3_str); #endif } else { #ifdef DEBUG_TLS printf("[JA3] Server: %s \n", ja3_str); #endif } ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, (const unsigned char *)ja3_str, strlen(ja3_str)); ndpi_MD5Final(md5_hash, &ctx); for(i=0, j=0; i<16; i++) { int rc = snprintf(&flow->protos.tls_quic_stun.tls_quic.ja3_server[j], sizeof(flow->protos.tls_quic_stun.tls_quic.ja3_server)-j, "%02x", md5_hash[i]); if(rc <= 0) break; else j += rc; } #ifdef DEBUG_TLS printf("[JA3] Server: %s \n", flow->protos.tls_quic_stun.tls_quic.ja3_server); #endif } else if(handshake_type == 0x01 /* Client Hello */) { u_int16_t cipher_len, cipher_offset; u_int8_t cookie_len = 0; flow->protos.tls_quic_stun.tls_quic.ssl_version = ja3.client.tls_handshake_version = tls_version; if(flow->protos.tls_quic_stun.tls_quic.ssl_version < 0x0302) /* TLSv1.1 */ ndpi_set_risk(flow, NDPI_TLS_OBSOLETE_VERSION); if((session_id_len+base_offset+3) > packet->payload_packet_len) return(0); /* Not found */ if(!is_dtls) { cipher_len = packet->payload[session_id_len+base_offset+2] + (packet->payload[session_id_len+base_offset+1] << 8); cipher_offset = base_offset + session_id_len + 3; } else { cookie_len = packet->payload[base_offset+session_id_len+1]; #ifdef DEBUG_TLS printf("[JA3] Client: DTLS cookie len %d\n", cookie_len); #endif if((session_id_len+base_offset+cookie_len+4) > packet->payload_packet_len) return(0); /* Not found */ cipher_len = ntohs(*((u_int16_t*)&packet->payload[base_offset+session_id_len+cookie_len+2])); cipher_offset = base_offset + session_id_len + cookie_len + 4; } #ifdef DEBUG_TLS printf("Client TLS [client cipher_len: %u][tls_version: 0x%04X]\n", cipher_len, tls_version); #endif if((cipher_offset+cipher_len) <= total_len) { u_int8_t safari_ciphers = 0, chrome_ciphers = 0; for(i=0; i<cipher_len;) { u_int16_t *id = (u_int16_t*)&packet->payload[cipher_offset+i]; #ifdef DEBUG_TLS printf("Client TLS [cipher suite: %u/0x%04X] [%d/%u]\n", ntohs(*id), ntohs(*id), i, cipher_len); #endif if((*id == 0) || (packet->payload[cipher_offset+i] != packet->payload[cipher_offset+i+1])) { u_int16_t cipher_id = ntohs(*id); /* Skip GREASE [https://tools.ietf.org/id/draft-ietf-tls-grease-01.html] https://engineering.salesforce.com/tls-fingerprinting-with-ja3-and-ja3s-247362855967 */ if(ja3.client.num_cipher < MAX_NUM_JA3) ja3.client.cipher[ja3.client.num_cipher++] = cipher_id; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid cipher %u\n", ja3.client.num_cipher); #endif } switch(cipher_id) { case TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: case TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: safari_ciphers++; break; case TLS_CIPHER_GREASE_RESERVED_0: case TLS_AES_128_GCM_SHA256: case TLS_AES_256_GCM_SHA384: case TLS_CHACHA20_POLY1305_SHA256: chrome_ciphers++; break; case TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: case TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: case TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: case TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: case TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: case TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_128_CBC_SHA: case TLS_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_128_GCM_SHA256: case TLS_RSA_WITH_AES_256_GCM_SHA384: safari_ciphers++, chrome_ciphers++; break; } } i += 2; } /* for */ if(chrome_ciphers == 13) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 1; else if(safari_ciphers == 12) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 1; } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", (cipher_offset+cipher_len), total_len); #endif } offset = base_offset + session_id_len + cookie_len + cipher_len + 2; offset += (!is_dtls) ? 1 : 2; if(offset < total_len) { u_int16_t compression_len; u_int16_t extensions_len; compression_len = packet->payload[offset]; offset++; #ifdef DEBUG_TLS printf("Client TLS [compression_len: %u]\n", compression_len); #endif // offset += compression_len + 3; offset += compression_len; if(offset+1 < total_len) { extensions_len = ntohs(*((u_int16_t*)&packet->payload[offset])); offset += 2; #ifdef DEBUG_TLS printf("Client TLS [extensions_len: %u]\n", extensions_len); #endif if((extensions_len+offset) <= total_len) { /* Move to the first extension Type is u_int to avoid possible overflow on extension_len addition */ u_int extension_offset = 0; u_int32_t j; while(extension_offset < extensions_len && offset+extension_offset+4 <= total_len) { u_int16_t extension_id, extension_len, extn_off = offset+extension_offset; extension_id = ntohs(*((u_int16_t*)&packet->payload[offset+extension_offset])); extension_offset += 2; extension_len = ntohs(*((u_int16_t*)&packet->payload[offset+extension_offset])); extension_offset += 2; #ifdef DEBUG_TLS printf("Client TLS [extension_id: %u][extension_len: %u]\n", extension_id, extension_len); #endif if((extension_id == 0) || (packet->payload[extn_off] != packet->payload[extn_off+1])) { /* Skip GREASE */ if(ja3.client.num_tls_extension < MAX_NUM_JA3) ja3.client.tls_extension[ja3.client.num_tls_extension++] = extension_id; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid extensions %u\n", ja3.client.num_tls_extension); #endif } } if(extension_id == 0 /* server name */) { u_int16_t len; #ifdef DEBUG_TLS printf("[TLS] Extensions: found server name\n"); #endif if((offset+extension_offset+4) < packet->payload_packet_len) { len = (packet->payload[offset+extension_offset+3] << 8) + packet->payload[offset+extension_offset+4]; len = (u_int)ndpi_min(len, sizeof(buffer)-1); if((offset+extension_offset+5+len) <= packet->payload_packet_len) { strncpy(buffer, (char*)&packet->payload[offset+extension_offset+5], len); buffer[len] = '\0'; cleanupServerName(buffer, sizeof(buffer)); snprintf(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, sizeof(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name), "%s", buffer); #ifdef DEBUG_TLS printf("[TLS] SNI: [%s]\n", buffer); #endif if(!is_quic) { if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, buffer, strlen(buffer))) flow->l4.tcp.tls.subprotocol_detected = 1; } else { if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_QUIC, buffer, strlen(buffer))) flow->l4.tcp.tls.subprotocol_detected = 1; } if(ndpi_check_dga_name(ndpi_struct, flow, flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, 1)) { char *sni = flow->protos.tls_quic_stun.tls_quic.client_requested_server_name; int len = strlen(sni); #ifdef DEBUG_TLS printf("[TLS] SNI: (DGA) [%s]\n", flow->protos.tls_quic_stun.tls_quic.client_requested_server_name); #endif if((len >= 4) /* Check if it ends in .com or .net */ && ((strcmp(&sni[len-4], ".com") == 0) || (strcmp(&sni[len-4], ".net") == 0)) && (strncmp(sni, "www.", 4) == 0)) /* Not starting with www.... */ ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TOR, NDPI_PROTOCOL_TLS); } else { #ifdef DEBUG_TLS printf("[TLS] SNI: (NO DGA) [%s]\n", flow->protos.tls_quic_stun.tls_quic.client_requested_server_name); #endif } } else { #ifdef DEBUG_TLS printf("[TLS] Extensions server len too short: %u vs %u\n", offset+extension_offset+5+len, packet->payload_packet_len); #endif } } } else if(extension_id == 10 /* supported groups */) { u_int16_t s_offset = offset+extension_offset + 2; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveGroups: len=%u]\n", extension_len); #endif if((s_offset+extension_len-2) <= total_len) { for(i=0; i<extension_len-2;) { u_int16_t s_group = ntohs(*((u_int16_t*)&packet->payload[s_offset+i])); #ifdef DEBUG_TLS printf("Client TLS [EllipticCurve: %u/0x%04X]\n", s_group, s_group); #endif if((s_group == 0) || (packet->payload[s_offset+i] != packet->payload[s_offset+i+1])) { /* Skip GREASE */ if(ja3.client.num_elliptic_curve < MAX_NUM_JA3) ja3.client.elliptic_curve[ja3.client.num_elliptic_curve++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid num elliptic %u\n", ja3.client.num_elliptic_curve); #endif } } i += 2; } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", (s_offset+extension_len-1), total_len); #endif } } else if(extension_id == 11 /* ec_point_formats groups */) { u_int16_t s_offset = offset+extension_offset + 1; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveFormat: len=%u]\n", extension_len); #endif if((s_offset+extension_len-1) <= total_len) { for(i=0; i<extension_len-1; i++) { u_int8_t s_group = packet->payload[s_offset+i]; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveFormat: %u]\n", s_group); #endif if(ja3.client.num_elliptic_curve_point_format < MAX_NUM_JA3) ja3.client.elliptic_curve_point_format[ja3.client.num_elliptic_curve_point_format++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid num elliptic %u\n", ja3.client.num_elliptic_curve_point_format); #endif } } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", s_offset+extension_len, total_len); #endif } } else if(extension_id == 13 /* signature algorithms */) { u_int16_t s_offset = offset+extension_offset, safari_signature_algorithms = 0, chrome_signature_algorithms = 0; u_int16_t tot_signature_algorithms_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); #ifdef DEBUG_TLS printf("Client TLS [SIGNATURE_ALGORITHMS: block_len=%u/len=%u]\n", extension_len, tot_signature_algorithms_len); #endif s_offset += 2; tot_signature_algorithms_len = ndpi_min((sizeof(ja3.client.signature_algorithms) / 2) - 1, tot_signature_algorithms_len); #ifdef TLS_HANDLE_SIGNATURE_ALGORITMS flow->protos.tls_quic_stun.tls_quic.num_tls_signature_algorithms = ndpi_min(tot_signature_algorithms_len / 2, MAX_NUM_TLS_SIGNATURE_ALGORITHMS); memcpy(flow->protos.tls_quic_stun.tls_quic.client_signature_algorithms, &packet->payload[s_offset], 2 /* 16 bit */*flow->protos.tls_quic_stun.tls_quic.num_tls_signature_algorithms); #endif for(i=0; i<tot_signature_algorithms_len; i++) { int rc = snprintf(&ja3.client.signature_algorithms[i*2], sizeof(ja3.client.signature_algorithms)-i*2, "%02X", packet->payload[s_offset+i]); if(rc < 0) break; } for(i=0; i<tot_signature_algorithms_len; i+=2) { u_int16_t cipher_id = (u_int16_t)ntohs(*((u_int16_t*)&packet->payload[s_offset+i])); // printf("=>> %04X\n", cipher_id); switch(cipher_id) { case ECDSA_SECP521R1_SHA512: flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_firefox_tls = 1; break; case ECDSA_SECP256R1_SHA256: case ECDSA_SECP384R1_SHA384: case RSA_PKCS1_SHA256: case RSA_PKCS1_SHA384: case RSA_PKCS1_SHA512: case RSA_PSS_RSAE_SHA256: case RSA_PSS_RSAE_SHA384: case RSA_PSS_RSAE_SHA512: chrome_signature_algorithms++, safari_signature_algorithms++; break; } } if(flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_firefox_tls) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 0, flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 0; if(safari_signature_algorithms != 8) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 0; if(chrome_signature_algorithms != 8) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 0; ja3.client.signature_algorithms[i*2] = '\0'; #ifdef DEBUG_TLS printf("Client TLS [SIGNATURE_ALGORITHMS: %s]\n", ja3.client.signature_algorithms); #endif } else if(extension_id == 16 /* application_layer_protocol_negotiation */) { u_int16_t s_offset = offset+extension_offset; u_int16_t tot_alpn_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); char alpn_str[256]; u_int8_t alpn_str_len = 0, i; #ifdef DEBUG_TLS printf("Client TLS [ALPN: block_len=%u/len=%u]\n", extension_len, tot_alpn_len); #endif s_offset += 2; tot_alpn_len += s_offset; while(s_offset < tot_alpn_len && s_offset < total_len) { u_int8_t alpn_i, alpn_len = packet->payload[s_offset++]; if((s_offset + alpn_len) <= tot_alpn_len && (s_offset + alpn_len) <= total_len) { #ifdef DEBUG_TLS printf("Client TLS [ALPN: %u]\n", alpn_len); #endif if((alpn_str_len+alpn_len+1) < (sizeof(alpn_str)-1)) { if(alpn_str_len > 0) { alpn_str[alpn_str_len] = ','; alpn_str_len++; } for(alpn_i=0; alpn_i<alpn_len; alpn_i++) alpn_str[alpn_str_len+alpn_i] = packet->payload[s_offset+alpn_i]; s_offset += alpn_len, alpn_str_len += alpn_len;; } else break; } else break; } /* while */ alpn_str[alpn_str_len] = '\0'; #ifdef DEBUG_TLS printf("Client TLS [ALPN: %s][len: %u]\n", alpn_str, alpn_str_len); #endif if(flow->protos.tls_quic_stun.tls_quic.alpn == NULL) flow->protos.tls_quic_stun.tls_quic.alpn = ndpi_strdup(alpn_str); snprintf(ja3.client.alpn, sizeof(ja3.client.alpn), "%s", alpn_str); /* Replace , with - as in JA3 */ for(i=0; ja3.client.alpn[i] != '\0'; i++) if(ja3.client.alpn[i] == ',') ja3.client.alpn[i] = '-'; } else if(extension_id == 43 /* supported versions */) { u_int16_t s_offset = offset+extension_offset; u_int8_t version_len = packet->payload[s_offset]; char version_str[256]; u_int8_t version_str_len = 0; version_str[0] = 0; #ifdef DEBUG_TLS printf("Client TLS [TLS version len: %u]\n", version_len); #endif if(version_len == (extension_len-1)) { u_int8_t j; u_int16_t supported_versions_offset = 0; s_offset++; // careful not to overflow and loop forever with u_int8_t for(j=0; j+1<version_len; j += 2) { u_int16_t tls_version = ntohs(*((u_int16_t*)&packet->payload[s_offset+j])); u_int8_t unknown_tls_version; #ifdef DEBUG_TLS printf("Client TLS [TLS version: %s/0x%04X]\n", ndpi_ssl_version2str(flow, tls_version, &unknown_tls_version), tls_version); #endif if((version_str_len+8) < sizeof(version_str)) { int rc = snprintf(&version_str[version_str_len], sizeof(version_str) - version_str_len, "%s%s", (version_str_len > 0) ? "," : "", ndpi_ssl_version2str(flow, tls_version, &unknown_tls_version)); if(rc <= 0) break; else version_str_len += rc; rc = snprintf(&ja3.client.supported_versions[supported_versions_offset], sizeof(ja3.client.supported_versions)-supported_versions_offset, "%s%04X", (j > 0) ? "-" : "", tls_version); if(rc > 0) supported_versions_offset += rc; } } #ifdef DEBUG_TLS printf("Client TLS [SUPPORTED_VERSIONS: %s]\n", ja3.client.supported_versions); #endif if(flow->protos.tls_quic_stun.tls_quic.tls_supported_versions == NULL) flow->protos.tls_quic_stun.tls_quic.tls_supported_versions = ndpi_strdup(version_str); } } else if(extension_id == 65486 /* encrypted server name */) { /* - https://tools.ietf.org/html/draft-ietf-tls-esni-06 - https://blog.cloudflare.com/encrypted-sni/ */ u_int16_t e_offset = offset+extension_offset; u_int16_t initial_offset = e_offset; u_int16_t e_sni_len, cipher_suite = ntohs(*((u_int16_t*)&packet->payload[e_offset])); flow->protos.tls_quic_stun.tls_quic.encrypted_sni.cipher_suite = cipher_suite; e_offset += 2; /* Cipher suite len */ /* Key Share Entry */ e_offset += 2; /* Group */ e_offset += ntohs(*((u_int16_t*)&packet->payload[e_offset])) + 2; /* Lenght */ if((e_offset+4) < packet->payload_packet_len) { /* Record Digest */ e_offset += ntohs(*((u_int16_t*)&packet->payload[e_offset])) + 2; /* Lenght */ if((e_offset+4) < packet->payload_packet_len) { e_sni_len = ntohs(*((u_int16_t*)&packet->payload[e_offset])); e_offset += 2; if((e_offset+e_sni_len-extension_len-initial_offset) >= 0 && e_offset+e_sni_len < packet->payload_packet_len) { #ifdef DEBUG_ENCRYPTED_SNI printf("Client TLS [Encrypted Server Name len: %u]\n", e_sni_len); #endif if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni == NULL) { flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni = (char*)ndpi_malloc(e_sni_len*2+1); if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni) { u_int16_t i, off; for(i=e_offset, off=0; i<(e_offset+e_sni_len); i++) { int rc = sprintf(&flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni[off], "%02X", packet->payload[i] & 0XFF); if(rc <= 0) { flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni[off] = '\0'; break; } else off += rc; } } } } } } } else if(extension_id == 65445 || /* QUIC transport parameters (drafts version) */ extension_id == 57) { /* QUIC transport parameters (final version) */ u_int16_t s_offset = offset+extension_offset; uint16_t final_offset; int using_var_int = is_version_with_var_int_transport_params(quic_version); if(!using_var_int) { if(s_offset+1 >= total_len) { final_offset = 0; /* Force skipping extension */ } else { u_int16_t seq_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); s_offset += 2; final_offset = MIN(total_len, s_offset + seq_len); } } else { final_offset = MIN(total_len, s_offset + extension_len); } while(s_offset < final_offset) { u_int64_t param_type, param_len; if(!using_var_int) { if(s_offset+3 >= final_offset) break; param_type = ntohs(*((u_int16_t*)&packet->payload[s_offset])); param_len = ntohs(*((u_int16_t*)&packet->payload[s_offset + 2])); s_offset += 4; } else { if(s_offset >= final_offset || (s_offset + quic_len_buffer_still_required(packet->payload[s_offset])) >= final_offset) break; s_offset += quic_len(&packet->payload[s_offset], &param_type); if(s_offset >= final_offset || (s_offset + quic_len_buffer_still_required(packet->payload[s_offset])) >= final_offset) break; s_offset += quic_len(&packet->payload[s_offset], &param_len); } #ifdef DEBUG_TLS printf("Client TLS [QUIC TP: Param 0x%x Len %d]\n", (int)param_type, (int)param_len); #endif if(s_offset+param_len > final_offset) break; if(param_type==0x3129) { #ifdef DEBUG_TLS printf("UA [%.*s]\n", (int)param_len, &packet->payload[s_offset]); #endif http_process_user_agent(ndpi_struct, flow, &packet->payload[s_offset], param_len); break; } s_offset += param_len; } } extension_offset += extension_len; /* Move to the next extension */ #ifdef DEBUG_TLS printf("Client TLS [extension_offset/len: %u/%u]\n", extension_offset, extension_len); #endif } /* while */ if(!invalid_ja3) { int rc; compute_ja3c: ja3_str_len = snprintf(ja3_str, sizeof(ja3_str), "%u,", ja3.client.tls_handshake_version); for(i=0; i<ja3.client.num_cipher; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.cipher[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.client.num_tls_extension; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.tls_extension[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.client.num_elliptic_curve; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.elliptic_curve[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; for(i=0; i<ja3.client.num_elliptic_curve_point_format; i++) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.elliptic_curve_point_format[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } if(ndpi_struct->enable_ja3_plus) { rc = snprintf(&ja3_str[ja3_str_len], sizeof(ja3_str)-ja3_str_len, ",%s,%s,%s", ja3.client.signature_algorithms, ja3.client.supported_versions, ja3.client.alpn); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; } #ifdef DEBUG_JA3C printf("[JA3+] Client: %s \n", ja3_str); #endif ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, (const unsigned char *)ja3_str, strlen(ja3_str)); ndpi_MD5Final(md5_hash, &ctx); for(i=0, j=0; i<16; i++) { rc = snprintf(&flow->protos.tls_quic_stun.tls_quic.ja3_client[j], sizeof(flow->protos.tls_quic_stun.tls_quic.ja3_client)-j, "%02x", md5_hash[i]); if(rc > 0) j += rc; else break; } #ifdef DEBUG_JA3C printf("[JA3] Client: %s \n", flow->protos.tls_quic_stun.tls_quic.ja3_client); #endif if(ndpi_struct->malicious_ja3_automa.ac_automa != NULL) { u_int16_t rc1 = ndpi_match_string(ndpi_struct->malicious_ja3_automa.ac_automa, flow->protos.tls_quic_stun.tls_quic.ja3_client); if(rc1 > 0) ndpi_set_risk(flow, NDPI_MALICIOUS_JA3); } } /* Before returning to the caller we need to make a final check */ if((flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0303) /* >= TLSv1.2 */ && (flow->protos.tls_quic_stun.tls_quic.alpn == NULL) /* No ALPN */) { ndpi_set_risk(flow, NDPI_TLS_NOT_CARRYING_HTTPS); } /* Suspicious Domain Fronting: https://github.com/SixGenInc/Noctilucent/blob/master/docs/ */ if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni && flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] != '\0') { ndpi_set_risk(flow, NDPI_TLS_SUSPICIOUS_ESNI_USAGE); } /* Add check for missing SNI */ if((flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] == 0) && (flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0302) /* TLSv1.1 */ && (flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni == NULL) /* No ESNI */ ) { /* This is a bit suspicious */ ndpi_set_risk(flow, NDPI_TLS_MISSING_SNI); } return(2 /* Client Certificate */); } else { #ifdef DEBUG_TLS printf("[TLS] Client: too short [%u vs %u]\n", (extensions_len+offset), total_len); #endif } } else if(offset == total_len) { /* TLS does not have extensions etc */ goto compute_ja3c; } } else { #ifdef DEBUG_TLS printf("[JA3] Client: invalid length detected\n"); #endif } } } return(0); /* Not found */ } /* **************************************** */ static void ndpi_search_tls_wrapper(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; #ifdef DEBUG_TLS printf("==>> %s() %u [len: %u][version: %u]\n", __FUNCTION__, flow->guessed_host_protocol_id, packet->payload_packet_len, flow->protos.tls_quic_stun.tls_quic.ssl_version); #endif if(packet->udp != NULL) ndpi_search_tls_udp(ndpi_struct, flow); else ndpi_search_tls_tcp(ndpi_struct, flow); } /* **************************************** */ void init_tls_dissector(struct ndpi_detection_module_struct *ndpi_struct, u_int32_t *id, NDPI_PROTOCOL_BITMASK *detection_bitmask) { ndpi_set_bitmask_protocol_detection("TLS", ndpi_struct, detection_bitmask, *id, NDPI_PROTOCOL_TLS, ndpi_search_tls_wrapper, NDPI_SELECTION_BITMASK_PROTOCOL_V4_V6_TCP_WITH_PAYLOAD_WITHOUT_RETRANSMISSION, SAVE_DETECTION_BITMASK_AS_UNKNOWN, ADD_TO_DETECTION_BITMASK); *id += 1; /* *************************************************** */ ndpi_set_bitmask_protocol_detection("DTLS", ndpi_struct, detection_bitmask, *id, NDPI_PROTOCOL_DTLS, ndpi_search_tls_wrapper, NDPI_SELECTION_BITMASK_PROTOCOL_V4_V6_UDP_WITH_PAYLOAD, SAVE_DETECTION_BITMASK_AS_UNKNOWN, ADD_TO_DETECTION_BITMASK); *id += 1; }
null
/* * tls.c - TLS/TLS/DTLS dissector * * Copyright (C) 2016-21 - ntop.org * * nDPI is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * nDPI is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with nDPI. If not, see <http://www.gnu.org/licenses/>. * */ #include "ndpi_protocol_ids.h" #define NDPI_CURRENT_PROTO NDPI_PROTOCOL_TLS #include "ndpi_api.h" #include "ndpi_md5.h" #include "ndpi_sha1.h" #include "ndpi_encryption.h" extern char *strptime(const char *s, const char *format, struct tm *tm); extern int processClientServerHello(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, uint32_t quic_version); extern int http_process_user_agent(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, const u_int8_t *ua_ptr, u_int16_t ua_ptr_len); /* QUIC/GQUIC stuff */ extern int quic_len(const uint8_t *buf, uint64_t *value); extern int quic_len_buffer_still_required(uint8_t value); extern int is_version_with_var_int_transport_params(uint32_t version); // #define DEBUG_TLS_MEMORY 1 // #define DEBUG_TLS 1 // #define DEBUG_TLS_BLOCKS 1 // #define DEBUG_CERTIFICATE_HASH // #define DEBUG_JA3C 1 /* #define DEBUG_FINGERPRINT 1 */ /* #define DEBUG_ENCRYPTED_SNI 1 */ /* **************************************** */ /* https://engineering.salesforce.com/tls-fingerprinting-with-ja3-and-ja3s-247362855967 */ #define JA3_STR_LEN 1024 #define MAX_NUM_JA3 512 #define MAX_JA3_STRLEN 256 union ja3_info { struct { u_int16_t tls_handshake_version; u_int16_t num_cipher, cipher[MAX_NUM_JA3]; u_int16_t num_tls_extension, tls_extension[MAX_NUM_JA3]; u_int16_t num_elliptic_curve, elliptic_curve[MAX_NUM_JA3]; u_int16_t num_elliptic_curve_point_format, elliptic_curve_point_format[MAX_NUM_JA3]; char signature_algorithms[MAX_JA3_STRLEN], supported_versions[MAX_JA3_STRLEN], alpn[MAX_JA3_STRLEN]; } client; struct { u_int16_t tls_handshake_version; u_int16_t num_cipher, cipher[MAX_NUM_JA3]; u_int16_t num_tls_extension, tls_extension[MAX_NUM_JA3]; u_int16_t tls_supported_version; u_int16_t num_elliptic_curve_point_format, elliptic_curve_point_format[MAX_NUM_JA3]; char alpn[MAX_JA3_STRLEN]; } server; /* Used for JA3+ */ }; /* NOTE How to view the certificate fingerprint 1. Using wireshark save the certificate on certificate.bin file as explained in https://security.stackexchange.com/questions/123851/how-can-i-extract-the-certificate-from-this-pcap-file 2. openssl x509 -inform der -in certificate.bin -text > certificate.der 3. openssl x509 -noout -fingerprint -sha1 -inform pem -in certificate.der SHA1 Fingerprint=15:9A:76.... $ shasum -a 1 www.grc.com.bin 159a76..... */ #define NDPI_MAX_TLS_REQUEST_SIZE 10000 /* skype.c */ extern u_int8_t is_skype_flow(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow); /* stun.c */ extern u_int32_t get_stun_lru_key(struct ndpi_flow_struct *flow, u_int8_t rev); static void ndpi_int_tls_add_connection(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int32_t protocol); /* **************************************** */ static u_int32_t ndpi_tls_refine_master_protocol(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int32_t protocol) { struct ndpi_packet_struct *packet = &flow->packet; // protocol = NDPI_PROTOCOL_TLS; if(packet->tcp != NULL) { switch(protocol) { case NDPI_PROTOCOL_TLS: { /* In case of TLS there are probably sub-protocols such as IMAPS that can be otherwise detected */ u_int16_t sport = ntohs(packet->tcp->source); u_int16_t dport = ntohs(packet->tcp->dest); if((sport == 465) || (dport == 465) || (sport == 587) || (dport == 587)) protocol = NDPI_PROTOCOL_MAIL_SMTPS; else if((sport == 993) || (dport == 993) || (flow->l4.tcp.mail_imap_starttls) ) protocol = NDPI_PROTOCOL_MAIL_IMAPS; else if((sport == 995) || (dport == 995)) protocol = NDPI_PROTOCOL_MAIL_POPS; } break; } } return(protocol); } /* **************************************** */ void ndpi_search_tls_tcp_memory(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int avail_bytes; /* TCP */ #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Handling TCP/TLS flow [payload_len: %u][buffer_len: %u][direction: %u]\n", packet->payload_packet_len, flow->l4.tcp.tls.message.buffer_len, packet->packet_direction); #endif if(flow->l4.tcp.tls.message.buffer == NULL) { /* Allocate buffer */ flow->l4.tcp.tls.message.buffer_len = 2048, flow->l4.tcp.tls.message.buffer_used = 0; flow->l4.tcp.tls.message.buffer = (u_int8_t*)ndpi_malloc(flow->l4.tcp.tls.message.buffer_len); if(flow->l4.tcp.tls.message.buffer == NULL) return; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Allocating %u buffer\n", flow->l4.tcp.tls.message.buffer_len); #endif } avail_bytes = flow->l4.tcp.tls.message.buffer_len - flow->l4.tcp.tls.message.buffer_used; if(avail_bytes < packet->payload_packet_len) { u_int new_len = flow->l4.tcp.tls.message.buffer_len + packet->payload_packet_len - avail_bytes + 1; void *newbuf = ndpi_realloc(flow->l4.tcp.tls.message.buffer, flow->l4.tcp.tls.message.buffer_len, new_len); if(!newbuf) return; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Enlarging %u -> %u buffer\n", flow->l4.tcp.tls.message.buffer_len, new_len); #endif flow->l4.tcp.tls.message.buffer = (u_int8_t*)newbuf; flow->l4.tcp.tls.message.buffer_len = new_len; avail_bytes = flow->l4.tcp.tls.message.buffer_len - flow->l4.tcp.tls.message.buffer_used; } if(packet->payload_packet_len > 0 && avail_bytes >= packet->payload_packet_len) { u_int8_t ok = 0; if(flow->l4.tcp.tls.message.next_seq[packet->packet_direction] != 0) { if(ntohl(packet->tcp->seq) == flow->l4.tcp.tls.message.next_seq[packet->packet_direction]) ok = 1; } else ok = 1; if(ok) { memcpy(&flow->l4.tcp.tls.message.buffer[flow->l4.tcp.tls.message.buffer_used], packet->payload, packet->payload_packet_len); flow->l4.tcp.tls.message.buffer_used += packet->payload_packet_len; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Copied data to buffer [%u/%u bytes][direction: %u][tcp_seq: %u][next: %u]\n", flow->l4.tcp.tls.message.buffer_used, flow->l4.tcp.tls.message.buffer_len, packet->packet_direction, ntohl(packet->tcp->seq), ntohl(packet->tcp->seq)+packet->payload_packet_len); #endif flow->l4.tcp.tls.message.next_seq[packet->packet_direction] = ntohl(packet->tcp->seq)+packet->payload_packet_len; } else { #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Skipping packet [%u bytes][direction: %u][tcp_seq: %u][expected next: %u]\n", flow->l4.tcp.tls.message.buffer_len, packet->packet_direction, ntohl(packet->tcp->seq), ntohl(packet->tcp->seq)+packet->payload_packet_len); #endif } } } /* **************************************** */ /* Can't call libc functions from kernel space, define some stub instead */ #define ndpi_isalpha(ch) (((ch) >= 'a' && (ch) <= 'z') || ((ch) >= 'A' && (ch) <= 'Z')) #define ndpi_isdigit(ch) ((ch) >= '0' && (ch) <= '9') #define ndpi_isspace(ch) (((ch) >= '\t' && (ch) <= '\r') || ((ch) == ' ')) #define ndpi_isprint(ch) ((ch) >= 0x20 && (ch) <= 0x7e) #define ndpi_ispunct(ch) (((ch) >= '!' && (ch) <= '/') || \ ((ch) >= ':' && (ch) <= '@') || \ ((ch) >= '[' && (ch) <= '`') || \ ((ch) >= '{' && (ch) <= '~')) /* **************************************** */ static void cleanupServerName(char *buffer, int buffer_len) { u_int i; /* Now all lowecase */ for(i=0; i<buffer_len; i++) buffer[i] = tolower(buffer[i]); } /* **************************************** */ /* Return code -1: error (buffer too short) 0: OK but buffer is not human readeable (so something went wrong) 1: OK */ static int extractRDNSequence(struct ndpi_packet_struct *packet, u_int offset, char *buffer, u_int buffer_len, char *rdnSeqBuf, u_int *rdnSeqBuf_offset, u_int rdnSeqBuf_len, const char *label) { u_int8_t str_len = packet->payload[offset+4], is_printable = 1; char *str; u_int len, j; if (*rdnSeqBuf_offset >= rdnSeqBuf_len) { #ifdef DEBUG_TLS printf("[TLS] %s() [buffer capacity reached][%u]\n", __FUNCTION__, rdnSeqBuf_len); #endif return -1; } // packet is truncated... further inspection is not needed if((offset+4+str_len) >= packet->payload_packet_len) return(-1); str = (char*)&packet->payload[offset+5]; len = (u_int)ndpi_min(str_len, buffer_len-1); strncpy(buffer, str, len); buffer[len] = '\0'; // check string is printable for(j = 0; j < len; j++) { if(!ndpi_isprint(buffer[j])) { is_printable = 0; break; } } if(is_printable) { int rc = snprintf(&rdnSeqBuf[*rdnSeqBuf_offset], rdnSeqBuf_len-(*rdnSeqBuf_offset), "%s%s=%s", (*rdnSeqBuf_offset > 0) ? ", " : "", label, buffer); if(rc > 0) (*rdnSeqBuf_offset) += rc; } return(is_printable); } /* **************************************** */ static void checkTLSSubprotocol(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { if(flow->detected_protocol_stack[1] == NDPI_PROTOCOL_UNKNOWN) { /* Subprotocol not yet set */ if(ndpi_struct->tls_cert_cache && flow->packet.iph) { u_int32_t key = flow->packet.iph->daddr + flow->packet.tcp->dest; u_int16_t cached_proto; if(ndpi_lru_find_cache(ndpi_struct->tls_cert_cache, key, &cached_proto, 0 /* Don't remove it as it can be used for other connections */)) { ndpi_protocol ret = { NDPI_PROTOCOL_TLS, cached_proto, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED }; flow->detected_protocol_stack[0] = cached_proto, flow->detected_protocol_stack[1] = NDPI_PROTOCOL_TLS; flow->category = ndpi_get_proto_category(ndpi_struct, ret); ndpi_check_subprotocol_risk(flow, cached_proto); } } } } /* **************************************** */ /* See https://blog.catchpoint.com/2017/05/12/dissecting-tls-using-wireshark/ */ static void processCertificateElements(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int16_t p_offset, u_int16_t certificate_len) { struct ndpi_packet_struct *packet = &flow->packet; u_int num_found = 0, i; char buffer[64] = { '\0' }, rdnSeqBuf[2048] = { '\0' }; u_int rdn_len = 0; #ifdef DEBUG_TLS printf("[TLS] %s() [offset: %u][certificate_len: %u]\n", __FUNCTION__, p_offset, certificate_len); #endif /* Check after handshake protocol header (5 bytes) and message header (4 bytes) */ for(i = p_offset; i < certificate_len; i++) { /* See https://www.ibm.com/support/knowledgecenter/SSFKSJ_7.5.0/com.ibm.mq.sec.doc/q009860_.htm for X.509 certificate labels */ if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x03)) { /* Common Name */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "CN"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Common Name", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x06)) { /* Country */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "C"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Country", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x07)) { /* Locality */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "L"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Locality", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x08)) { /* State or Province */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "ST"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "State or Province", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x0a)) { /* Organization Name */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "O"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Organization Name", buffer); #endif } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x04) && (packet->payload[i+2] == 0x0b)) { /* Organization Unit */ int rc = extractRDNSequence(packet, i, buffer, sizeof(buffer), rdnSeqBuf, &rdn_len, sizeof(rdnSeqBuf), "OU"); if(rc == -1) break; #ifdef DEBUG_TLS printf("[TLS] %s() [%s][%s: %s]\n", __FUNCTION__, (num_found == 0) ? "Subject" : "Issuer", "Organization Unit", buffer); #endif } else if((packet->payload[i] == 0x30) && (packet->payload[i+1] == 0x1e) && (packet->payload[i+2] == 0x17)) { /* Certificate Validity */ u_int8_t len = packet->payload[i+3]; u_int offset = i+4; if(num_found == 0) { num_found++; #ifdef DEBUG_TLS printf("[TLS] %s() IssuerDN [%s]\n", __FUNCTION__, rdnSeqBuf); #endif if(rdn_len && (flow->protos.tls_quic_stun.tls_quic.issuerDN == NULL)) flow->protos.tls_quic_stun.tls_quic.issuerDN = ndpi_strdup(rdnSeqBuf); rdn_len = 0; /* Reset buffer */ } if((offset+len) < packet->payload_packet_len) { char utcDate[32]; #ifdef DEBUG_TLS u_int j; printf("[CERTIFICATE] notBefore [len: %u][", len); for(j=0; j<len; j++) printf("%c", packet->payload[i+4+j]); printf("]\n"); #endif if(len < (sizeof(utcDate)-1)) { struct tm utc; utc.tm_isdst = -1; /* Not set by strptime */ strncpy(utcDate, (const char*)&packet->payload[i+4], len); utcDate[len] = '\0'; /* 141021000000Z */ if(strptime(utcDate, "%y%m%d%H%M%SZ", &utc) != NULL) { flow->protos.tls_quic_stun.tls_quic.notBefore = timegm(&utc); #ifdef DEBUG_TLS printf("[CERTIFICATE] notBefore %u [%s]\n", flow->protos.tls_quic_stun.tls_quic.notBefore, utcDate); #endif } } offset += len; if((offset+1) < packet->payload_packet_len) { len = packet->payload[offset+1]; offset += 2; if((offset+len) < packet->payload_packet_len) { u_int32_t time_sec = flow->packet.current_time_ms / 1000; #ifdef DEBUG_TLS u_int j; printf("[CERTIFICATE] notAfter [len: %u][", len); for(j=0; j<len; j++) printf("%c", packet->payload[offset+j]); printf("]\n"); #endif if(len < (sizeof(utcDate)-1)) { struct tm utc; utc.tm_isdst = -1; /* Not set by strptime */ strncpy(utcDate, (const char*)&packet->payload[offset], len); utcDate[len] = '\0'; /* 141021000000Z */ if(strptime(utcDate, "%y%m%d%H%M%SZ", &utc) != NULL) { flow->protos.tls_quic_stun.tls_quic.notAfter = timegm(&utc); #ifdef DEBUG_TLS printf("[CERTIFICATE] notAfter %u [%s]\n", flow->protos.tls_quic_stun.tls_quic.notAfter, utcDate); #endif } } if((time_sec < flow->protos.tls_quic_stun.tls_quic.notBefore) || (time_sec > flow->protos.tls_quic_stun.tls_quic.notAfter)) ndpi_set_risk(flow, NDPI_TLS_CERTIFICATE_EXPIRED); /* Certificate expired */ } } } } else if((packet->payload[i] == 0x55) && (packet->payload[i+1] == 0x1d) && (packet->payload[i+2] == 0x11)) { /* Organization OID: 2.5.29.17 (subjectAltName) */ u_int8_t matched_name = 0; #ifdef DEBUG_TLS printf("******* [TLS] Found subjectAltName\n"); #endif i += 3 /* skip the initial patten 55 1D 11 */; i++; /* skip the first type, 0x04 == BIT STRING, and jump to it's length */ if(i < packet->payload_packet_len) { i += (packet->payload[i] & 0x80) ? (packet->payload[i] & 0x7F) : 0; /* skip BIT STRING length */ if(i < packet->payload_packet_len) { i += 2; /* skip the second type, 0x30 == SEQUENCE, and jump to it's length */ if(i < packet->payload_packet_len) { i += (packet->payload[i] & 0x80) ? (packet->payload[i] & 0x7F) : 0; /* skip SEQUENCE length */ i++; while(i < packet->payload_packet_len) { if(packet->payload[i] == 0x82) { if((i < (packet->payload_packet_len - 1)) && ((i + packet->payload[i + 1] + 2) < packet->payload_packet_len)) { u_int8_t len = packet->payload[i + 1]; char dNSName[256]; i += 2; /* The check "len > sizeof(dNSName) - 1" will be always false. If we add it, the compiler is smart enough to detect it and throws a warning */ if((len == 0 /* Looks something went wrong */) || ((i+len) > packet->payload_packet_len)) break; strncpy(dNSName, (const char*)&packet->payload[i], len); dNSName[len] = '\0'; cleanupServerName(dNSName, len); #if DEBUG_TLS printf("[TLS] dNSName %s [%s][len: %u][leftover: %d]\n", dNSName, flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, len, packet->payload_packet_len-i-len); #endif if(matched_name == 0) { if(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] == '\0') matched_name = 1; /* No SNI */ else if (dNSName[0] == '*') { char * label = strstr(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, &dNSName[1]); if (label != NULL) { char * first_dot = strchr(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, '.'); if (first_dot == NULL || first_dot >= label) { matched_name = 1; } } } else if(strcmp(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, dNSName) == 0) matched_name = 1; } if(flow->protos.tls_quic_stun.tls_quic.server_names == NULL) flow->protos.tls_quic_stun.tls_quic.server_names = ndpi_strdup(dNSName), flow->protos.tls_quic_stun.tls_quic.server_names_len = strlen(dNSName); else { u_int16_t dNSName_len = strlen(dNSName); u_int16_t newstr_len = flow->protos.tls_quic_stun.tls_quic.server_names_len + dNSName_len + 1; char *newstr = (char*)ndpi_realloc(flow->protos.tls_quic_stun.tls_quic.server_names, flow->protos.tls_quic_stun.tls_quic.server_names_len+1, newstr_len+1); if(newstr) { flow->protos.tls_quic_stun.tls_quic.server_names = newstr; flow->protos.tls_quic_stun.tls_quic.server_names[flow->protos.tls_quic_stun.tls_quic.server_names_len] = ','; strncpy(&flow->protos.tls_quic_stun.tls_quic.server_names[flow->protos.tls_quic_stun.tls_quic.server_names_len+1], dNSName, dNSName_len+1); flow->protos.tls_quic_stun.tls_quic.server_names[newstr_len] = '\0'; flow->protos.tls_quic_stun.tls_quic.server_names_len = newstr_len; } } if(!flow->l4.tcp.tls.subprotocol_detected) if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, dNSName, len)) flow->l4.tcp.tls.subprotocol_detected = 1; i += len; } else { #if DEBUG_TLS printf("[TLS] Leftover %u bytes", packet->payload_packet_len - i); #endif break; } } else { break; } } /* while */ if(!matched_name) ndpi_set_risk(flow, NDPI_TLS_CERTIFICATE_MISMATCH); /* Certificate mismatch */ } } } } } if(rdn_len && (flow->protos.tls_quic_stun.tls_quic.subjectDN == NULL)) { flow->protos.tls_quic_stun.tls_quic.subjectDN = ndpi_strdup(rdnSeqBuf); if(flow->detected_protocol_stack[1] == NDPI_PROTOCOL_UNKNOWN) { /* No idea what is happening behind the scenes: let's check the certificate */ u_int32_t proto_id; int rc = ndpi_match_string_value(ndpi_struct->tls_cert_subject_automa.ac_automa, rdnSeqBuf, strlen(rdnSeqBuf),&proto_id); if(rc == 0) { /* Match found */ ndpi_protocol ret = { NDPI_PROTOCOL_TLS, proto_id, NDPI_PROTOCOL_CATEGORY_UNSPECIFIED}; flow->detected_protocol_stack[0] = proto_id, flow->detected_protocol_stack[1] = NDPI_PROTOCOL_TLS; flow->category = ndpi_get_proto_category(ndpi_struct, ret); ndpi_check_subprotocol_risk(flow, proto_id); if(ndpi_struct->tls_cert_cache == NULL) ndpi_struct->tls_cert_cache = ndpi_lru_cache_init(1024); if(ndpi_struct->tls_cert_cache && flow->packet.iph) { u_int32_t key = flow->packet.iph->daddr + flow->packet.tcp->dest; ndpi_lru_add_to_cache(ndpi_struct->tls_cert_cache, key, proto_id); } } } } if(flow->protos.tls_quic_stun.tls_quic.subjectDN && flow->protos.tls_quic_stun.tls_quic.issuerDN && (!strcmp(flow->protos.tls_quic_stun.tls_quic.subjectDN, flow->protos.tls_quic_stun.tls_quic.issuerDN))) ndpi_set_risk(flow, NDPI_TLS_SELFSIGNED_CERTIFICATE); #if DEBUG_TLS printf("[TLS] %s() SubjectDN [%s]\n", __FUNCTION__, rdnSeqBuf); #endif } /* **************************************** */ /* See https://blog.catchpoint.com/2017/05/12/dissecting-tls-using-wireshark/ */ int processCertificate(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; int is_dtls = packet->udp ? 1 : 0; u_int32_t certificates_length, length = (packet->payload[1] << 16) + (packet->payload[2] << 8) + packet->payload[3]; u_int32_t certificates_offset = 7 + (is_dtls ? 8 : 0); u_int8_t num_certificates_found = 0; SHA1_CTX srv_cert_fingerprint_ctx ; #ifdef DEBUG_TLS printf("[TLS] %s() [payload_packet_len=%u][direction: %u][%02X %02X %02X %02X %02X %02X...]\n", __FUNCTION__, packet->payload_packet_len, packet->packet_direction, packet->payload[0], packet->payload[1], packet->payload[2], packet->payload[3], packet->payload[4], packet->payload[5]); #endif if((packet->payload_packet_len != (length + 4 + (is_dtls ? 8 : 0))) || (packet->payload[1] != 0x0)) { ndpi_set_risk(flow, NDPI_MALFORMED_PACKET); return(-1); /* Invalid length */ } certificates_length = (packet->payload[certificates_offset - 3] << 16) + (packet->payload[certificates_offset - 2] << 8) + packet->payload[certificates_offset - 1]; if((packet->payload[certificates_offset - 3] != 0x0) || ((certificates_length+3) != length)) { ndpi_set_risk(flow, NDPI_MALFORMED_PACKET); return(-2); /* Invalid length */ } /* Now let's process each individual certificates */ while(certificates_offset < certificates_length) { u_int32_t certificate_len = (packet->payload[certificates_offset] << 16) + (packet->payload[certificates_offset+1] << 8) + packet->payload[certificates_offset+2]; /* Invalid lenght */ if((certificate_len == 0) || (packet->payload[certificates_offset] != 0x0) || ((certificates_offset+certificate_len) > (4+certificates_length+(is_dtls ? 8 : 0)))) { #ifdef DEBUG_TLS printf("[TLS] Invalid length [certificate_len: %u][certificates_offset: %u][%u vs %u]\n", certificate_len, certificates_offset, (certificates_offset+certificate_len), certificates_length); #endif break; } certificates_offset += 3; #ifdef DEBUG_TLS printf("[TLS] Processing %u bytes certificate [%02X %02X %02X]\n", certificate_len, packet->payload[certificates_offset], packet->payload[certificates_offset+1], packet->payload[certificates_offset+2]); #endif if(num_certificates_found++ == 0) /* Dissect only the first certificate that is the one we care */ { /* For SHA-1 we take into account only the first certificate and not all of them */ SHA1Init(&srv_cert_fingerprint_ctx); #ifdef DEBUG_CERTIFICATE_HASH { int i; for(i=0;i<certificate_len;i++) printf("%02X ", packet->payload[certificates_offset+i]); printf("\n"); } #endif SHA1Update(&srv_cert_fingerprint_ctx, &packet->payload[certificates_offset], certificate_len); SHA1Final(flow->protos.tls_quic_stun.tls_quic.sha1_certificate_fingerprint, &srv_cert_fingerprint_ctx); flow->l4.tcp.tls.fingerprint_set = 1; uint8_t * sha1 = flow->protos.tls_quic_stun.tls_quic.sha1_certificate_fingerprint; const size_t sha1_siz = sizeof(flow->protos.tls_quic_stun.tls_quic.sha1_certificate_fingerprint); char sha1_str[20 /* sha1_siz */ * 2 + 1]; static const char hexalnum[] = "0123456789ABCDEF"; for (size_t i = 0; i < sha1_siz; ++i) { u_int8_t lower = (sha1[i] & 0x0F); u_int8_t upper = (sha1[i] & 0xF0) >> 4; sha1_str[i*2] = hexalnum[upper]; sha1_str[i*2 + 1] = hexalnum[lower]; } sha1_str[sha1_siz * 2] = '\0'; #ifdef DEBUG_TLS printf("[TLS] SHA-1: %s\n", sha1_str); #endif if (ndpi_struct->malicious_sha1_automa.ac_automa != NULL) { u_int16_t rc1 = ndpi_match_string(ndpi_struct->malicious_sha1_automa.ac_automa, sha1_str); if(rc1 > 0) ndpi_set_risk(flow, NDPI_MALICIOUS_SHA1_CERTIFICATE); } processCertificateElements(ndpi_struct, flow, certificates_offset, certificate_len); } certificates_offset += certificate_len; } if((ndpi_struct->num_tls_blocks_to_follow != 0) && (flow->l4.tcp.tls.num_tls_blocks >= ndpi_struct->num_tls_blocks_to_follow)) { #ifdef DEBUG_TLS_BLOCKS printf("*** [TLS Block] Enough blocks dissected\n"); #endif flow->extra_packets_func = NULL; /* We're good now */ } return(1); } /* **************************************** */ static int processTLSBlock(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; int ret; switch(packet->payload[0] /* block type */) { case 0x01: /* Client Hello */ case 0x02: /* Server Hello */ processClientServerHello(ndpi_struct, flow, 0); flow->l4.tcp.tls.hello_processed = 1; ndpi_int_tls_add_connection(ndpi_struct, flow, NDPI_PROTOCOL_TLS); #ifdef DEBUG_TLS printf("*** TLS [version: %02X][%s Hello]\n", flow->protos.tls_quic_stun.tls_quic.ssl_version, (packet->payload[0] == 0x01) ? "Client" : "Server"); #endif if((flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0304 /* TLS 1.3 */) && (packet->payload[0] == 0x02 /* Server Hello */)) { flow->l4.tcp.tls.certificate_processed = 1; /* No Certificate with TLS 1.3+ */ } checkTLSSubprotocol(ndpi_struct, flow); break; case 0x0b: /* Certificate */ /* Important: populate the tls union fields only after * ndpi_int_tls_add_connection has been called */ if(flow->l4.tcp.tls.hello_processed) { ret = processCertificate(ndpi_struct, flow); if (ret != 1) { #ifdef DEBUG_TLS printf("[TLS] Error processing certificate: %d\n", ret); #endif } flow->l4.tcp.tls.certificate_processed = 1; } break; default: return(-1); } return(0); } /* **************************************** */ static void ndpi_looks_like_tls(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { // ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, NDPI_PROTOCOL_UNKNOWN); if(flow->guessed_protocol_id == NDPI_PROTOCOL_UNKNOWN) flow->guessed_protocol_id = NDPI_PROTOCOL_TLS; } /* **************************************** */ static int ndpi_search_tls_tcp(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int8_t something_went_wrong = 0; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] ndpi_search_tls_tcp() Processing new packet [payload_packet_len: %u]\n", packet->payload_packet_len); #endif if(packet->payload_packet_len == 0) return(1); /* Keep working */ ndpi_search_tls_tcp_memory(ndpi_struct, flow); while(!something_went_wrong) { u_int16_t len, p_len; const u_int8_t *p; u_int8_t content_type; if(flow->l4.tcp.tls.message.buffer_used < 5) return(1); /* Keep working */ len = (flow->l4.tcp.tls.message.buffer[3] << 8) + flow->l4.tcp.tls.message.buffer[4] + 5; if(len > flow->l4.tcp.tls.message.buffer_used) { #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Not enough TLS data [%u < %u][%02X %02X %02X %02X %02X]\n", len, flow->l4.tcp.tls.message.buffer_used, flow->l4.tcp.tls.message.buffer[0], flow->l4.tcp.tls.message.buffer[1], flow->l4.tcp.tls.message.buffer[2], flow->l4.tcp.tls.message.buffer[3], flow->l4.tcp.tls.message.buffer[4]); #endif break; } if(len == 0) { something_went_wrong = 1; break; } #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Processing %u bytes message\n", len); #endif content_type = flow->l4.tcp.tls.message.buffer[0]; /* Overwriting packet payload */ p = packet->payload; p_len = packet->payload_packet_len; /* Backup */ if(content_type == 0x14 /* Change Cipher Spec */) { if(ndpi_struct->skip_tls_blocks_until_change_cipher) { /* Ignore Application Data up until change cipher so in this case we reset the number of observed TLS blocks */ flow->l4.tcp.tls.num_tls_blocks = 0; } } if((len > 9) && (content_type != 0x17 /* Application Data */) && (!flow->l4.tcp.tls.certificate_processed)) { /* Split the element in blocks */ u_int16_t processed = 5; while((processed+4) <= len) { const u_int8_t *block = (const u_int8_t *)&flow->l4.tcp.tls.message.buffer[processed]; u_int32_t block_len = (block[1] << 16) + (block[2] << 8) + block[3]; if(/* (block_len == 0) || */ /* Note blocks can have zero lenght */ (block_len > len) || ((block[1] != 0x0))) { something_went_wrong = 1; break; } packet->payload = block; packet->payload_packet_len = ndpi_min(block_len+4, flow->l4.tcp.tls.message.buffer_used); if((processed+packet->payload_packet_len) > len) { something_went_wrong = 1; break; } processTLSBlock(ndpi_struct, flow); ndpi_looks_like_tls(ndpi_struct, flow); processed += packet->payload_packet_len; } } else { /* Process element as a whole */ if(content_type == 0x17 /* Application Data */) { ndpi_looks_like_tls(ndpi_struct, flow); if(flow->l4.tcp.tls.certificate_processed) { if(flow->l4.tcp.tls.num_tls_blocks < ndpi_struct->num_tls_blocks_to_follow) flow->l4.tcp.tls.tls_application_blocks_len[flow->l4.tcp.tls.num_tls_blocks++] = (packet->packet_direction == 0) ? (len-5) : -(len-5); #ifdef DEBUG_TLS_BLOCKS printf("*** [TLS Block] [len: %u][num_tls_blocks: %u/%u]\n", len-5, flow->l4.tcp.tls.num_tls_blocks, ndpi_struct->num_tls_blocks_to_follow); #endif } } } packet->payload = p; packet->payload_packet_len = p_len; /* Restore */ flow->l4.tcp.tls.message.buffer_used -= len; if(flow->l4.tcp.tls.message.buffer_used > 0) memmove(flow->l4.tcp.tls.message.buffer, &flow->l4.tcp.tls.message.buffer[len], flow->l4.tcp.tls.message.buffer_used); else break; #ifdef DEBUG_TLS_MEMORY printf("[TLS Mem] Left memory buffer %u bytes\n", flow->l4.tcp.tls.message.buffer_used); #endif } if(something_went_wrong || ((ndpi_struct->num_tls_blocks_to_follow > 0) && (flow->l4.tcp.tls.num_tls_blocks == ndpi_struct->num_tls_blocks_to_follow)) ) { #ifdef DEBUG_TLS_BLOCKS printf("*** [TLS Block] No more blocks\n"); #endif flow->check_extra_packets = 0; flow->extra_packets_func = NULL; return(0); /* That's all */ } else return(1); } /* **************************************** */ static int ndpi_search_tls_udp(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; u_int32_t handshake_len; u_int16_t p_len, processed; const u_int8_t *p; u_int8_t no_dtls = 0, change_cipher_found = 0; #ifdef DEBUG_TLS printf("[TLS] %s()\n", __FUNCTION__); #endif /* Overwriting packet payload */ p = packet->payload, p_len = packet->payload_packet_len; /* Backup */ /* Split the element in blocks */ processed = 0; while(processed + 13 < p_len) { u_int32_t block_len; const u_int8_t *block = (const u_int8_t *)&p[processed]; if((block[0] != 0x16 && block[0] != 0x14) || /* Handshake, change-cipher-spec */ (block[1] != 0xfe) || /* We ignore old DTLS versions */ ((block[2] != 0xff) && (block[2] != 0xfd))) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid block 0x%x or old version 0x%x-0x%x-0x%x\n", block[0], block[1], block[2], block[3]); #endif no_dtls = 1; break; } block_len = ntohs(*((u_int16_t*)&block[11])); #ifdef DEBUG_TLS printf("[TLS] DTLS block len: %d\n", block_len); #endif if (block_len == 0 || (processed + block_len + 12 >= p_len)) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid block len %d (processed %d, p_len %d)\n", block_len, processed, p_len); #endif no_dtls = 1; break; } /* We process only handshake msgs */ if(block[0] == 0x16) { if (processed + block_len + 13 > p_len) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid len %d %d %d\n", processed, block_len, p_len); #endif no_dtls = 1; break; } /* TODO: handle (certificate) fragments */ handshake_len = (block[14] << 16) + (block[15] << 8) + block[16]; if((handshake_len + 12) != block_len) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid handshake_len %d, %d)\n", handshake_len, block_len); #endif no_dtls = 1; break; } packet->payload = &block[13]; packet->payload_packet_len = block_len; processTLSBlock(ndpi_struct, flow); } else { /* Change-cipher-spec: any subsequent block might be encrypted */ #ifdef DEBUG_TLS printf("[TLS] Change-cipher-spec\n"); #endif change_cipher_found = 1; processed += block_len + 13; break; } processed += block_len + 13; } if(processed != p_len) { #ifdef DEBUG_TLS printf("[TLS] DTLS invalid processed len %d/%d (%d)\n", processed, p_len, change_cipher_found); #endif if(!change_cipher_found) no_dtls = 1; } packet->payload = p; packet->payload_packet_len = p_len; /* Restore */ if(no_dtls || change_cipher_found) { NDPI_EXCLUDE_PROTO(ndpi_struct, flow); return(0); /* That's all */ } else { return(1); /* Keep working */ } } /* **************************************** */ static void tlsInitExtraPacketProcessing(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { flow->check_extra_packets = 1; /* At most 12 packets should almost always be enough to find the server certificate if it's there */ flow->max_extra_packets_to_check = 12 + (ndpi_struct->num_tls_blocks_to_follow*4); flow->extra_packets_func = (flow->packet.udp != NULL) ? ndpi_search_tls_udp : ndpi_search_tls_tcp; } /* **************************************** */ static void tlsCheckUncommonALPN(struct ndpi_flow_struct *flow) { /* see: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml */ static char const * const common_alpns[] = { "http/0.9", "http/1.0", "http/1.1", "spdy/1", "spdy/2", "spdy/3", "spdy/3.1", "stun.turn", "stun.nat-discovery", "h2", "h2c", "h2-16", "h2-15", "h2-14", "webrtc", "c-webrtc", "ftp", "imap", "pop3", "managesieve", "coap", "xmpp-client", "xmpp-server", "acme-tls/1", "mqtt", "dot", "ntske/1", "sunrpc", "h3", "smb", "irc", /* QUIC ALPNs */ "h3-T051", "h3-T050", "h3-32", "h3-30", "h3-29", "h3-28", "h3-27", "h3-24", "h3-22", "hq-30", "hq-29", "hq-28", "hq-27", "h3-fb-05", "h1q-fb", "doq-i00" }; /* * If the ALPN list increases in size, iterating over all items for every incoming ALPN may * have a performance impact. A hash map could solve this issue. */ char * alpn_start = flow->protos.tls_quic_stun.tls_quic.alpn; char * comma_or_nul = alpn_start; do { comma_or_nul = strchr(comma_or_nul, ','); if (comma_or_nul == NULL) { comma_or_nul = alpn_start + strlen(alpn_start); } int alpn_found = 0; int alpn_len = comma_or_nul - alpn_start; char const * const alpn = alpn_start; for (size_t i = 0; i < sizeof(common_alpns)/sizeof(common_alpns[0]); ++i) { if (strlen(common_alpns[i]) == alpn_len && strncmp(alpn, common_alpns[i], alpn_len) == 0) { alpn_found = 1; break; } } if (alpn_found == 0) { #ifdef DEBUG_TLS printf("TLS uncommon ALPN found: %.*s\n", alpn_len, alpn); #endif ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } alpn_start = comma_or_nul + 1; } while (*(comma_or_nul++) != '\0'); } /* **************************************** */ static void ndpi_int_tls_add_connection(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, u_int32_t protocol) { #if DEBUG_TLS printf("[TLS] %s()\n", __FUNCTION__); #endif if((flow->packet.udp != NULL) && (protocol == NDPI_PROTOCOL_TLS)) protocol = NDPI_PROTOCOL_DTLS; if((flow->detected_protocol_stack[0] == protocol) || (flow->detected_protocol_stack[1] == protocol)) { if(!flow->check_extra_packets) tlsInitExtraPacketProcessing(ndpi_struct, flow); return; } if(protocol != NDPI_PROTOCOL_TLS) ; else protocol = ndpi_tls_refine_master_protocol(ndpi_struct, flow, protocol); ndpi_set_detected_protocol(ndpi_struct, flow, protocol, protocol); tlsInitExtraPacketProcessing(ndpi_struct, flow); } /* **************************************** */ int processClientServerHello(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow, uint32_t quic_version) { struct ndpi_packet_struct *packet = &flow->packet; union ja3_info ja3; u_int8_t invalid_ja3 = 0; u_int16_t tls_version, ja3_str_len; char ja3_str[JA3_STR_LEN]; ndpi_MD5_CTX ctx; u_char md5_hash[16]; int i; u_int16_t total_len; u_int8_t handshake_type; char buffer[64] = { '\0' }; int is_quic = (quic_version != 0); int is_dtls = packet->udp && (!is_quic); #ifdef DEBUG_TLS printf("TLS %s() called\n", __FUNCTION__); #endif memset(&ja3, 0, sizeof(ja3)); handshake_type = packet->payload[0]; total_len = (packet->payload[1] << 16) + (packet->payload[2] << 8) + packet->payload[3]; if((total_len > packet->payload_packet_len) || (packet->payload[1] != 0x0)) return(0); /* Not found */ total_len = packet->payload_packet_len; /* At least "magic" 3 bytes, null for string end, otherwise no need to waste cpu cycles */ if(total_len > 4) { u_int16_t base_offset = (!is_dtls) ? 38 : 46; u_int16_t version_offset = (!is_dtls) ? 4 : 12; u_int16_t offset = (!is_dtls) ? 38 : 46, extension_len, j; u_int8_t session_id_len = 0; if((base_offset >= total_len) || (version_offset + 1) >= total_len) return 0; /* Not found */ session_id_len = packet->payload[base_offset]; #ifdef DEBUG_TLS printf("TLS [len: %u][handshake_type: %02X]\n", packet->payload_packet_len, handshake_type); #endif tls_version = ntohs(*((u_int16_t*)&packet->payload[version_offset])); if(handshake_type == 0x02 /* Server Hello */) { int i, rc; ja3.server.tls_handshake_version = tls_version; #ifdef DEBUG_TLS printf("TLS Server Hello [version: 0x%04X]\n", tls_version); #endif /* The server hello decides about the TLS version of this flow https://networkengineering.stackexchange.com/questions/55752/why-does-wireshark-show-version-tls-1-2-here-instead-of-tls-1-3 */ if(packet->udp) offset += session_id_len + 1; else { if(tls_version < 0x7F15 /* TLS 1.3 lacks of session id */) offset += session_id_len+1; } if((offset+3) > packet->payload_packet_len) return(0); /* Not found */ ja3.server.num_cipher = 1, ja3.server.cipher[0] = ntohs(*((u_int16_t*)&packet->payload[offset])); if((flow->protos.tls_quic_stun.tls_quic.server_unsafe_cipher = ndpi_is_safe_ssl_cipher(ja3.server.cipher[0])) == 1) ndpi_set_risk(flow, NDPI_TLS_WEAK_CIPHER); flow->protos.tls_quic_stun.tls_quic.server_cipher = ja3.server.cipher[0]; #ifdef DEBUG_TLS printf("TLS [server][session_id_len: %u][cipher: %04X]\n", session_id_len, ja3.server.cipher[0]); #endif offset += 2 + 1; if((offset + 1) < packet->payload_packet_len) /* +1 because we are goint to read 2 bytes */ extension_len = ntohs(*((u_int16_t*)&packet->payload[offset])); else extension_len = 0; #ifdef DEBUG_TLS printf("TLS [server][extension_len: %u]\n", extension_len); #endif offset += 2; for(i=0; i<extension_len; ) { u_int16_t extension_id, extension_len; if((offset+4) > packet->payload_packet_len) break; extension_id = ntohs(*((u_int16_t*)&packet->payload[offset])); extension_len = ntohs(*((u_int16_t*)&packet->payload[offset+2])); if(ja3.server.num_tls_extension < MAX_NUM_JA3) ja3.server.tls_extension[ja3.server.num_tls_extension++] = extension_id; #ifdef DEBUG_TLS printf("TLS [server][extension_id: %u/0x%04X][len: %u]\n", extension_id, extension_id, extension_len); #endif if(extension_id == 43 /* supported versions */) { if(extension_len >= 2) { u_int16_t tls_version = ntohs(*((u_int16_t*)&packet->payload[offset+4])); #ifdef DEBUG_TLS printf("TLS [server] [TLS version: 0x%04X]\n", tls_version); #endif flow->protos.tls_quic_stun.tls_quic.ssl_version = ja3.server.tls_supported_version = tls_version; } } else if(extension_id == 16 /* application_layer_protocol_negotiation (ALPN) */) { u_int16_t s_offset = offset+4; u_int16_t tot_alpn_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); char alpn_str[256]; u_int8_t alpn_str_len = 0, i; #ifdef DEBUG_TLS printf("Server TLS [ALPN: block_len=%u/len=%u]\n", extension_len, tot_alpn_len); #endif s_offset += 2; tot_alpn_len += s_offset; while(s_offset < tot_alpn_len && s_offset < total_len) { u_int8_t alpn_i, alpn_len = packet->payload[s_offset++]; if((s_offset + alpn_len) <= tot_alpn_len) { #ifdef DEBUG_TLS printf("Server TLS [ALPN: %u]\n", alpn_len); #endif if((alpn_str_len+alpn_len+1) < (sizeof(alpn_str)-1)) { if(alpn_str_len > 0) { alpn_str[alpn_str_len] = ','; alpn_str_len++; } for(alpn_i=0; alpn_i<alpn_len; alpn_i++) { alpn_str[alpn_str_len+alpn_i] = packet->payload[s_offset+alpn_i]; } s_offset += alpn_len, alpn_str_len += alpn_len;; } else { ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } } else { ndpi_set_risk(flow, NDPI_TLS_UNCOMMON_ALPN); break; } } /* while */ alpn_str[alpn_str_len] = '\0'; #ifdef DEBUG_TLS printf("Server TLS [ALPN: %s][len: %u]\n", alpn_str, alpn_str_len); #endif if(flow->protos.tls_quic_stun.tls_quic.alpn == NULL) flow->protos.tls_quic_stun.tls_quic.alpn = ndpi_strdup(alpn_str); if(flow->protos.tls_quic_stun.tls_quic.alpn != NULL) tlsCheckUncommonALPN(flow); snprintf(ja3.server.alpn, sizeof(ja3.server.alpn), "%s", alpn_str); /* Replace , with - as in JA3 */ for(i=0; ja3.server.alpn[i] != '\0'; i++) if(ja3.server.alpn[i] == ',') ja3.server.alpn[i] = '-'; } else if(extension_id == 11 /* ec_point_formats groups */) { u_int16_t s_offset = offset+4 + 1; #ifdef DEBUG_TLS printf("Server TLS [EllipticCurveFormat: len=%u]\n", extension_len); #endif if((s_offset+extension_len-1) <= total_len) { for(i=0; i<extension_len-1; i++) { u_int8_t s_group = packet->payload[s_offset+i]; #ifdef DEBUG_TLS printf("Server TLS [EllipticCurveFormat: %u]\n", s_group); #endif if(ja3.server.num_elliptic_curve_point_format < MAX_NUM_JA3) ja3.server.elliptic_curve_point_format[ja3.server.num_elliptic_curve_point_format++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Server TLS Invalid num elliptic %u\n", ja3.server.num_elliptic_curve_point_format); #endif } } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Server TLS Invalid len %u vs %u\n", s_offset+extension_len, total_len); #endif } } i += 4 + extension_len, offset += 4 + extension_len; } /* for */ ja3_str_len = snprintf(ja3_str, JA3_STR_LEN, "%u,", ja3.server.tls_handshake_version); for(i=0; (i<ja3.server.num_cipher) && (JA3_STR_LEN > ja3_str_len); i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.cipher[i]); if(rc <= 0) break; else ja3_str_len += rc; } if(JA3_STR_LEN > ja3_str_len) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ","); if(rc > 0 && ja3_str_len + rc < JA3_STR_LEN) ja3_str_len += rc; } /* ********** */ for(i=0; (i<ja3.server.num_tls_extension) && (JA3_STR_LEN > ja3_str_len); i++) { int rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.tls_extension[i]); if(rc <= 0) break; else ja3_str_len += rc; } if(ndpi_struct->enable_ja3_plus) { for(i=0; (i<ja3.server.num_elliptic_curve_point_format) && (JA3_STR_LEN > ja3_str_len); i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.server.elliptic_curve_point_format[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } if((ja3.server.alpn[0] != '\0') && (JA3_STR_LEN > ja3_str_len)) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ",%s", ja3.server.alpn); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; } #ifdef DEBUG_TLS printf("[JA3+] Server: %s \n", ja3_str); #endif } else { #ifdef DEBUG_TLS printf("[JA3] Server: %s \n", ja3_str); #endif } ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, (const unsigned char *)ja3_str, strlen(ja3_str)); ndpi_MD5Final(md5_hash, &ctx); for(i=0, j=0; i<16; i++) { int rc = snprintf(&flow->protos.tls_quic_stun.tls_quic.ja3_server[j], sizeof(flow->protos.tls_quic_stun.tls_quic.ja3_server)-j, "%02x", md5_hash[i]); if(rc <= 0) break; else j += rc; } #ifdef DEBUG_TLS printf("[JA3] Server: %s \n", flow->protos.tls_quic_stun.tls_quic.ja3_server); #endif } else if(handshake_type == 0x01 /* Client Hello */) { u_int16_t cipher_len, cipher_offset; u_int8_t cookie_len = 0; flow->protos.tls_quic_stun.tls_quic.ssl_version = ja3.client.tls_handshake_version = tls_version; if(flow->protos.tls_quic_stun.tls_quic.ssl_version < 0x0302) /* TLSv1.1 */ ndpi_set_risk(flow, NDPI_TLS_OBSOLETE_VERSION); if((session_id_len+base_offset+3) > packet->payload_packet_len) return(0); /* Not found */ if(!is_dtls) { cipher_len = packet->payload[session_id_len+base_offset+2] + (packet->payload[session_id_len+base_offset+1] << 8); cipher_offset = base_offset + session_id_len + 3; } else { cookie_len = packet->payload[base_offset+session_id_len+1]; #ifdef DEBUG_TLS printf("[JA3] Client: DTLS cookie len %d\n", cookie_len); #endif if((session_id_len+base_offset+cookie_len+4) > packet->payload_packet_len) return(0); /* Not found */ cipher_len = ntohs(*((u_int16_t*)&packet->payload[base_offset+session_id_len+cookie_len+2])); cipher_offset = base_offset + session_id_len + cookie_len + 4; } #ifdef DEBUG_TLS printf("Client TLS [client cipher_len: %u][tls_version: 0x%04X]\n", cipher_len, tls_version); #endif if((cipher_offset+cipher_len) <= total_len) { u_int8_t safari_ciphers = 0, chrome_ciphers = 0; for(i=0; i<cipher_len;) { u_int16_t *id = (u_int16_t*)&packet->payload[cipher_offset+i]; #ifdef DEBUG_TLS printf("Client TLS [cipher suite: %u/0x%04X] [%d/%u]\n", ntohs(*id), ntohs(*id), i, cipher_len); #endif if((*id == 0) || (packet->payload[cipher_offset+i] != packet->payload[cipher_offset+i+1])) { u_int16_t cipher_id = ntohs(*id); /* Skip GREASE [https://tools.ietf.org/id/draft-ietf-tls-grease-01.html] https://engineering.salesforce.com/tls-fingerprinting-with-ja3-and-ja3s-247362855967 */ if(ja3.client.num_cipher < MAX_NUM_JA3) ja3.client.cipher[ja3.client.num_cipher++] = cipher_id; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid cipher %u\n", ja3.client.num_cipher); #endif } switch(cipher_id) { case TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: case TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: safari_ciphers++; break; case TLS_CIPHER_GREASE_RESERVED_0: case TLS_AES_128_GCM_SHA256: case TLS_AES_256_GCM_SHA384: case TLS_CHACHA20_POLY1305_SHA256: chrome_ciphers++; break; case TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: case TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: case TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: case TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: case TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: case TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_128_CBC_SHA: case TLS_RSA_WITH_AES_256_CBC_SHA: case TLS_RSA_WITH_AES_128_GCM_SHA256: case TLS_RSA_WITH_AES_256_GCM_SHA384: safari_ciphers++, chrome_ciphers++; break; } } i += 2; } /* for */ if(chrome_ciphers == 13) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 1; else if(safari_ciphers == 12) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 1; } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", (cipher_offset+cipher_len), total_len); #endif } offset = base_offset + session_id_len + cookie_len + cipher_len + 2; offset += (!is_dtls) ? 1 : 2; if(offset < total_len) { u_int16_t compression_len; u_int16_t extensions_len; compression_len = packet->payload[offset]; offset++; #ifdef DEBUG_TLS printf("Client TLS [compression_len: %u]\n", compression_len); #endif // offset += compression_len + 3; offset += compression_len; if(offset+1 < total_len) { extensions_len = ntohs(*((u_int16_t*)&packet->payload[offset])); offset += 2; #ifdef DEBUG_TLS printf("Client TLS [extensions_len: %u]\n", extensions_len); #endif if((extensions_len+offset) <= total_len) { /* Move to the first extension Type is u_int to avoid possible overflow on extension_len addition */ u_int extension_offset = 0; u_int32_t j; while(extension_offset < extensions_len && offset+extension_offset+4 <= total_len) { u_int16_t extension_id, extension_len, extn_off = offset+extension_offset; extension_id = ntohs(*((u_int16_t*)&packet->payload[offset+extension_offset])); extension_offset += 2; extension_len = ntohs(*((u_int16_t*)&packet->payload[offset+extension_offset])); extension_offset += 2; #ifdef DEBUG_TLS printf("Client TLS [extension_id: %u][extension_len: %u]\n", extension_id, extension_len); #endif if((extension_id == 0) || (packet->payload[extn_off] != packet->payload[extn_off+1])) { /* Skip GREASE */ if(ja3.client.num_tls_extension < MAX_NUM_JA3) ja3.client.tls_extension[ja3.client.num_tls_extension++] = extension_id; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid extensions %u\n", ja3.client.num_tls_extension); #endif } } if(extension_id == 0 /* server name */) { u_int16_t len; #ifdef DEBUG_TLS printf("[TLS] Extensions: found server name\n"); #endif if((offset+extension_offset+4) < packet->payload_packet_len) { len = (packet->payload[offset+extension_offset+3] << 8) + packet->payload[offset+extension_offset+4]; len = (u_int)ndpi_min(len, sizeof(buffer)-1); if((offset+extension_offset+5+len) <= packet->payload_packet_len) { strncpy(buffer, (char*)&packet->payload[offset+extension_offset+5], len); buffer[len] = '\0'; cleanupServerName(buffer, sizeof(buffer)); snprintf(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, sizeof(flow->protos.tls_quic_stun.tls_quic.client_requested_server_name), "%s", buffer); #ifdef DEBUG_TLS printf("[TLS] SNI: [%s]\n", buffer); #endif if(!is_quic) { if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TLS, buffer, strlen(buffer))) flow->l4.tcp.tls.subprotocol_detected = 1; } else { if(ndpi_match_hostname_protocol(ndpi_struct, flow, NDPI_PROTOCOL_QUIC, buffer, strlen(buffer))) flow->l4.tcp.tls.subprotocol_detected = 1; } if(ndpi_check_dga_name(ndpi_struct, flow, flow->protos.tls_quic_stun.tls_quic.client_requested_server_name, 1)) { char *sni = flow->protos.tls_quic_stun.tls_quic.client_requested_server_name; int len = strlen(sni); #ifdef DEBUG_TLS printf("[TLS] SNI: (DGA) [%s]\n", flow->protos.tls_quic_stun.tls_quic.client_requested_server_name); #endif if((len >= 4) /* Check if it ends in .com or .net */ && ((strcmp(&sni[len-4], ".com") == 0) || (strcmp(&sni[len-4], ".net") == 0)) && (strncmp(sni, "www.", 4) == 0)) /* Not starting with www.... */ ndpi_set_detected_protocol(ndpi_struct, flow, NDPI_PROTOCOL_TOR, NDPI_PROTOCOL_TLS); } else { #ifdef DEBUG_TLS printf("[TLS] SNI: (NO DGA) [%s]\n", flow->protos.tls_quic_stun.tls_quic.client_requested_server_name); #endif } } else { #ifdef DEBUG_TLS printf("[TLS] Extensions server len too short: %u vs %u\n", offset+extension_offset+5+len, packet->payload_packet_len); #endif } } } else if(extension_id == 10 /* supported groups */) { u_int16_t s_offset = offset+extension_offset + 2; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveGroups: len=%u]\n", extension_len); #endif if((s_offset+extension_len-2) <= total_len) { for(i=0; i<extension_len-2;) { u_int16_t s_group = ntohs(*((u_int16_t*)&packet->payload[s_offset+i])); #ifdef DEBUG_TLS printf("Client TLS [EllipticCurve: %u/0x%04X]\n", s_group, s_group); #endif if((s_group == 0) || (packet->payload[s_offset+i] != packet->payload[s_offset+i+1])) { /* Skip GREASE */ if(ja3.client.num_elliptic_curve < MAX_NUM_JA3) ja3.client.elliptic_curve[ja3.client.num_elliptic_curve++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid num elliptic %u\n", ja3.client.num_elliptic_curve); #endif } } i += 2; } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", (s_offset+extension_len-1), total_len); #endif } } else if(extension_id == 11 /* ec_point_formats groups */) { u_int16_t s_offset = offset+extension_offset + 1; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveFormat: len=%u]\n", extension_len); #endif if((s_offset+extension_len-1) <= total_len) { for(i=0; i<extension_len-1; i++) { u_int8_t s_group = packet->payload[s_offset+i]; #ifdef DEBUG_TLS printf("Client TLS [EllipticCurveFormat: %u]\n", s_group); #endif if(ja3.client.num_elliptic_curve_point_format < MAX_NUM_JA3) ja3.client.elliptic_curve_point_format[ja3.client.num_elliptic_curve_point_format++] = s_group; else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid num elliptic %u\n", ja3.client.num_elliptic_curve_point_format); #endif } } } else { invalid_ja3 = 1; #ifdef DEBUG_TLS printf("Client TLS Invalid len %u vs %u\n", s_offset+extension_len, total_len); #endif } } else if(extension_id == 13 /* signature algorithms */) { u_int16_t s_offset = offset+extension_offset, safari_signature_algorithms = 0, chrome_signature_algorithms = 0; u_int16_t tot_signature_algorithms_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); #ifdef DEBUG_TLS printf("Client TLS [SIGNATURE_ALGORITHMS: block_len=%u/len=%u]\n", extension_len, tot_signature_algorithms_len); #endif s_offset += 2; tot_signature_algorithms_len = ndpi_min((sizeof(ja3.client.signature_algorithms) / 2) - 1, tot_signature_algorithms_len); #ifdef TLS_HANDLE_SIGNATURE_ALGORITMS flow->protos.tls_quic_stun.tls_quic.num_tls_signature_algorithms = ndpi_min(tot_signature_algorithms_len / 2, MAX_NUM_TLS_SIGNATURE_ALGORITHMS); memcpy(flow->protos.tls_quic_stun.tls_quic.client_signature_algorithms, &packet->payload[s_offset], 2 /* 16 bit */*flow->protos.tls_quic_stun.tls_quic.num_tls_signature_algorithms); #endif for(i=0; i<tot_signature_algorithms_len; i++) { int rc = snprintf(&ja3.client.signature_algorithms[i*2], sizeof(ja3.client.signature_algorithms)-i*2, "%02X", packet->payload[s_offset+i]); if(rc < 0) break; } for(i=0; i<tot_signature_algorithms_len; i+=2) { u_int16_t cipher_id = (u_int16_t)ntohs(*((u_int16_t*)&packet->payload[s_offset+i])); // printf("=>> %04X\n", cipher_id); switch(cipher_id) { case ECDSA_SECP521R1_SHA512: flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_firefox_tls = 1; break; case ECDSA_SECP256R1_SHA256: case ECDSA_SECP384R1_SHA384: case RSA_PKCS1_SHA256: case RSA_PKCS1_SHA384: case RSA_PKCS1_SHA512: case RSA_PSS_RSAE_SHA256: case RSA_PSS_RSAE_SHA384: case RSA_PSS_RSAE_SHA512: chrome_signature_algorithms++, safari_signature_algorithms++; break; } } if(flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_firefox_tls) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 0, flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 0; if(safari_signature_algorithms != 8) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_safari_tls = 0; if(chrome_signature_algorithms != 8) flow->protos.tls_quic_stun.tls_quic.browser_euristics.is_chrome_tls = 0; ja3.client.signature_algorithms[i*2] = '\0'; #ifdef DEBUG_TLS printf("Client TLS [SIGNATURE_ALGORITHMS: %s]\n", ja3.client.signature_algorithms); #endif } else if(extension_id == 16 /* application_layer_protocol_negotiation */) { u_int16_t s_offset = offset+extension_offset; u_int16_t tot_alpn_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); char alpn_str[256]; u_int8_t alpn_str_len = 0, i; #ifdef DEBUG_TLS printf("Client TLS [ALPN: block_len=%u/len=%u]\n", extension_len, tot_alpn_len); #endif s_offset += 2; tot_alpn_len += s_offset; while(s_offset < tot_alpn_len && s_offset < total_len) { u_int8_t alpn_i, alpn_len = packet->payload[s_offset++]; if((s_offset + alpn_len) <= tot_alpn_len && (s_offset + alpn_len) <= total_len) { #ifdef DEBUG_TLS printf("Client TLS [ALPN: %u]\n", alpn_len); #endif if((alpn_str_len+alpn_len+1) < (sizeof(alpn_str)-1)) { if(alpn_str_len > 0) { alpn_str[alpn_str_len] = ','; alpn_str_len++; } for(alpn_i=0; alpn_i<alpn_len; alpn_i++) alpn_str[alpn_str_len+alpn_i] = packet->payload[s_offset+alpn_i]; s_offset += alpn_len, alpn_str_len += alpn_len;; } else break; } else break; } /* while */ alpn_str[alpn_str_len] = '\0'; #ifdef DEBUG_TLS printf("Client TLS [ALPN: %s][len: %u]\n", alpn_str, alpn_str_len); #endif if(flow->protos.tls_quic_stun.tls_quic.alpn == NULL) flow->protos.tls_quic_stun.tls_quic.alpn = ndpi_strdup(alpn_str); snprintf(ja3.client.alpn, sizeof(ja3.client.alpn), "%s", alpn_str); /* Replace , with - as in JA3 */ for(i=0; ja3.client.alpn[i] != '\0'; i++) if(ja3.client.alpn[i] == ',') ja3.client.alpn[i] = '-'; } else if(extension_id == 43 /* supported versions */) { u_int16_t s_offset = offset+extension_offset; u_int8_t version_len = packet->payload[s_offset]; char version_str[256]; u_int8_t version_str_len = 0; version_str[0] = 0; #ifdef DEBUG_TLS printf("Client TLS [TLS version len: %u]\n", version_len); #endif if(version_len == (extension_len-1)) { u_int8_t j; u_int16_t supported_versions_offset = 0; s_offset++; // careful not to overflow and loop forever with u_int8_t for(j=0; j+1<version_len; j += 2) { u_int16_t tls_version = ntohs(*((u_int16_t*)&packet->payload[s_offset+j])); u_int8_t unknown_tls_version; #ifdef DEBUG_TLS printf("Client TLS [TLS version: %s/0x%04X]\n", ndpi_ssl_version2str(flow, tls_version, &unknown_tls_version), tls_version); #endif if((version_str_len+8) < sizeof(version_str)) { int rc = snprintf(&version_str[version_str_len], sizeof(version_str) - version_str_len, "%s%s", (version_str_len > 0) ? "," : "", ndpi_ssl_version2str(flow, tls_version, &unknown_tls_version)); if(rc <= 0) break; else version_str_len += rc; rc = snprintf(&ja3.client.supported_versions[supported_versions_offset], sizeof(ja3.client.supported_versions)-supported_versions_offset, "%s%04X", (j > 0) ? "-" : "", tls_version); if(rc > 0) supported_versions_offset += rc; } } #ifdef DEBUG_TLS printf("Client TLS [SUPPORTED_VERSIONS: %s]\n", ja3.client.supported_versions); #endif if(flow->protos.tls_quic_stun.tls_quic.tls_supported_versions == NULL) flow->protos.tls_quic_stun.tls_quic.tls_supported_versions = ndpi_strdup(version_str); } } else if(extension_id == 65486 /* encrypted server name */) { /* - https://tools.ietf.org/html/draft-ietf-tls-esni-06 - https://blog.cloudflare.com/encrypted-sni/ */ u_int16_t e_offset = offset+extension_offset; u_int16_t initial_offset = e_offset; u_int16_t e_sni_len, cipher_suite = ntohs(*((u_int16_t*)&packet->payload[e_offset])); flow->protos.tls_quic_stun.tls_quic.encrypted_sni.cipher_suite = cipher_suite; e_offset += 2; /* Cipher suite len */ /* Key Share Entry */ e_offset += 2; /* Group */ e_offset += ntohs(*((u_int16_t*)&packet->payload[e_offset])) + 2; /* Lenght */ if((e_offset+4) < packet->payload_packet_len) { /* Record Digest */ e_offset += ntohs(*((u_int16_t*)&packet->payload[e_offset])) + 2; /* Lenght */ if((e_offset+4) < packet->payload_packet_len) { e_sni_len = ntohs(*((u_int16_t*)&packet->payload[e_offset])); e_offset += 2; if((e_offset+e_sni_len-extension_len-initial_offset) >= 0 && e_offset+e_sni_len < packet->payload_packet_len) { #ifdef DEBUG_ENCRYPTED_SNI printf("Client TLS [Encrypted Server Name len: %u]\n", e_sni_len); #endif if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni == NULL) { flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni = (char*)ndpi_malloc(e_sni_len*2+1); if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni) { u_int16_t i, off; for(i=e_offset, off=0; i<(e_offset+e_sni_len); i++) { int rc = sprintf(&flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni[off], "%02X", packet->payload[i] & 0XFF); if(rc <= 0) { flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni[off] = '\0'; break; } else off += rc; } } } } } } } else if(extension_id == 65445 || /* QUIC transport parameters (drafts version) */ extension_id == 57) { /* QUIC transport parameters (final version) */ u_int16_t s_offset = offset+extension_offset; uint16_t final_offset; int using_var_int = is_version_with_var_int_transport_params(quic_version); if(!using_var_int) { if(s_offset+1 >= total_len) { final_offset = 0; /* Force skipping extension */ } else { u_int16_t seq_len = ntohs(*((u_int16_t*)&packet->payload[s_offset])); s_offset += 2; final_offset = MIN(total_len, s_offset + seq_len); } } else { final_offset = MIN(total_len, s_offset + extension_len); } while(s_offset < final_offset) { u_int64_t param_type, param_len; if(!using_var_int) { if(s_offset+3 >= final_offset) break; param_type = ntohs(*((u_int16_t*)&packet->payload[s_offset])); param_len = ntohs(*((u_int16_t*)&packet->payload[s_offset + 2])); s_offset += 4; } else { if(s_offset >= final_offset || (s_offset + quic_len_buffer_still_required(packet->payload[s_offset])) >= final_offset) break; s_offset += quic_len(&packet->payload[s_offset], &param_type); if(s_offset >= final_offset || (s_offset + quic_len_buffer_still_required(packet->payload[s_offset])) >= final_offset) break; s_offset += quic_len(&packet->payload[s_offset], &param_len); } #ifdef DEBUG_TLS printf("Client TLS [QUIC TP: Param 0x%x Len %d]\n", (int)param_type, (int)param_len); #endif if(s_offset+param_len > final_offset) break; if(param_type==0x3129) { #ifdef DEBUG_TLS printf("UA [%.*s]\n", (int)param_len, &packet->payload[s_offset]); #endif http_process_user_agent(ndpi_struct, flow, &packet->payload[s_offset], param_len); break; } s_offset += param_len; } } extension_offset += extension_len; /* Move to the next extension */ #ifdef DEBUG_TLS printf("Client TLS [extension_offset/len: %u/%u]\n", extension_offset, extension_len); #endif } /* while */ if(!invalid_ja3) { int rc; compute_ja3c: ja3_str_len = snprintf(ja3_str, JA3_STR_LEN, "%u,", ja3.client.tls_handshake_version); for(i=0; i<ja3.client.num_cipher; i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.cipher[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.client.num_tls_extension; i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.tls_extension[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; /* ********** */ for(i=0; i<ja3.client.num_elliptic_curve; i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.elliptic_curve[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ","); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; for(i=0; i<ja3.client.num_elliptic_curve_point_format; i++) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, "%s%u", (i > 0) ? "-" : "", ja3.client.elliptic_curve_point_format[i]); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; else break; } if(ndpi_struct->enable_ja3_plus) { rc = snprintf(&ja3_str[ja3_str_len], JA3_STR_LEN-ja3_str_len, ",%s,%s,%s", ja3.client.signature_algorithms, ja3.client.supported_versions, ja3.client.alpn); if((rc > 0) && (ja3_str_len + rc < JA3_STR_LEN)) ja3_str_len += rc; } #ifdef DEBUG_JA3C printf("[JA3+] Client: %s \n", ja3_str); #endif ndpi_MD5Init(&ctx); ndpi_MD5Update(&ctx, (const unsigned char *)ja3_str, strlen(ja3_str)); ndpi_MD5Final(md5_hash, &ctx); for(i=0, j=0; i<16; i++) { rc = snprintf(&flow->protos.tls_quic_stun.tls_quic.ja3_client[j], sizeof(flow->protos.tls_quic_stun.tls_quic.ja3_client)-j, "%02x", md5_hash[i]); if(rc > 0) j += rc; else break; } #ifdef DEBUG_JA3C printf("[JA3] Client: %s \n", flow->protos.tls_quic_stun.tls_quic.ja3_client); #endif if(ndpi_struct->malicious_ja3_automa.ac_automa != NULL) { u_int16_t rc1 = ndpi_match_string(ndpi_struct->malicious_ja3_automa.ac_automa, flow->protos.tls_quic_stun.tls_quic.ja3_client); if(rc1 > 0) ndpi_set_risk(flow, NDPI_MALICIOUS_JA3); } } /* Before returning to the caller we need to make a final check */ if((flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0303) /* >= TLSv1.2 */ && (flow->protos.tls_quic_stun.tls_quic.alpn == NULL) /* No ALPN */) { ndpi_set_risk(flow, NDPI_TLS_NOT_CARRYING_HTTPS); } /* Suspicious Domain Fronting: https://github.com/SixGenInc/Noctilucent/blob/master/docs/ */ if(flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni && flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] != '\0') { ndpi_set_risk(flow, NDPI_TLS_SUSPICIOUS_ESNI_USAGE); } /* Add check for missing SNI */ if((flow->protos.tls_quic_stun.tls_quic.client_requested_server_name[0] == 0) && (flow->protos.tls_quic_stun.tls_quic.ssl_version >= 0x0302) /* TLSv1.1 */ && (flow->protos.tls_quic_stun.tls_quic.encrypted_sni.esni == NULL) /* No ESNI */ ) { /* This is a bit suspicious */ ndpi_set_risk(flow, NDPI_TLS_MISSING_SNI); } return(2 /* Client Certificate */); } else { #ifdef DEBUG_TLS printf("[TLS] Client: too short [%u vs %u]\n", (extensions_len+offset), total_len); #endif } } else if(offset == total_len) { /* TLS does not have extensions etc */ goto compute_ja3c; } } else { #ifdef DEBUG_TLS printf("[JA3] Client: invalid length detected\n"); #endif } } } return(0); /* Not found */ } /* **************************************** */ static void ndpi_search_tls_wrapper(struct ndpi_detection_module_struct *ndpi_struct, struct ndpi_flow_struct *flow) { struct ndpi_packet_struct *packet = &flow->packet; #ifdef DEBUG_TLS printf("==>> %s() %u [len: %u][version: %u]\n", __FUNCTION__, flow->guessed_host_protocol_id, packet->payload_packet_len, flow->protos.tls_quic_stun.tls_quic.ssl_version); #endif if(packet->udp != NULL) ndpi_search_tls_udp(ndpi_struct, flow); else ndpi_search_tls_tcp(ndpi_struct, flow); } /* **************************************** */ void init_tls_dissector(struct ndpi_detection_module_struct *ndpi_struct, u_int32_t *id, NDPI_PROTOCOL_BITMASK *detection_bitmask) { ndpi_set_bitmask_protocol_detection("TLS", ndpi_struct, detection_bitmask, *id, NDPI_PROTOCOL_TLS, ndpi_search_tls_wrapper, NDPI_SELECTION_BITMASK_PROTOCOL_V4_V6_TCP_WITH_PAYLOAD_WITHOUT_RETRANSMISSION, SAVE_DETECTION_BITMASK_AS_UNKNOWN, ADD_TO_DETECTION_BITMASK); *id += 1; /* *************************************************** */ ndpi_set_bitmask_protocol_detection("DTLS", ndpi_struct, detection_bitmask, *id, NDPI_PROTOCOL_DTLS, ndpi_search_tls_wrapper, NDPI_SELECTION_BITMASK_PROTOCOL_V4_V6_UDP_WITH_PAYLOAD, SAVE_DETECTION_BITMASK_AS_UNKNOWN, ADD_TO_DETECTION_BITMASK); *id += 1; }
null
277
CWE-787
CVE-2021-36218
/* Copyright (C) 2019-Present SKALE Labs This file is part of sgxwallet. sgxwallet is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. sgxwallet is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with sgxwallet. If not, see <https://www.gnu.org/licenses/>. @file testw.cpp @author Stan Kladko @date 2020 */ #include <libff/algebra/fields/fp.hpp> #include <dkg/dkg.h> #include <jsonrpccpp/server/connectors/httpserver.h> #include <libff/algebra/curves/alt_bn128/alt_bn128_pp.hpp> #include <libff/algebra/exponentiation/exponentiation.hpp> #include <libff/algebra/fields/fp.hpp> #include <dkg/dkg.h> #include "sgxwallet_common.h" #include "third_party/intel/create_enclave.h" #include "secure_enclave_u.h" #include "third_party/intel/sgx_detect.h" #include <gmp.h> #include <sgx_urts.h> #include <stdio.h> #include <jsonrpccpp/client/connectors/httpclient.h> #include <sgx_tcrypto.h> #include "BLSCrypto.h" #include "ServerInit.h" #include "DKGCrypto.h" #include "SGXException.h" #include "LevelDB.h" #include "SGXWalletServer.hpp" #define CATCH_CONFIG_MAIN #include "catch.hpp" #include "stubclient.h" #include "BLSSigShare.h" #include "BLSSigShareSet.h" #include "BLSPublicKeyShare.h" #include "BLSPublicKey.h" #include "SEKManager.h" #include <thread> #include "common.h" #include "SGXRegistrationServer.h" #include "SGXWalletServer.h" #include "sgxwallet.h" #include "TestUtils.h" #include "testw.h" #define PRINT_SRC_LINE cerr << "Executing line " << to_string(__LINE__) << endl; using namespace jsonrpc; using namespace std; class TestFixture { public: TestFixture() { TestUtils::resetDB(); setOptions(L_INFO, false, true); initAll(L_INFO, false, true); } ~TestFixture() { TestUtils::destroyEnclave(); } }; class TestFixtureHTTPS { public: TestFixtureHTTPS() { TestUtils::resetDB(); setOptions(L_INFO, true, true); initAll(L_INFO, false, true); } ~TestFixtureHTTPS() { TestUtils::destroyEnclave(); } }; class TestFixtureNoResetFromBackup { public: TestFixtureNoResetFromBackup() { setFullOptions(L_INFO, false, true, true); initAll(L_INFO, false, true); } ~TestFixtureNoResetFromBackup() { TestUtils::destroyEnclave(); } }; class TestFixtureNoReset { public: TestFixtureNoReset() { setOptions(L_INFO, false, true); initAll(L_INFO, false, true); } ~TestFixtureNoReset() { TestUtils::destroyEnclave(); } }; TEST_CASE_METHOD(TestFixture, "ECDSA AES keygen and signature test", "[ecdsa-aes-key-sig-gen]") { vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; vector <uint8_t> encrPrivKey(BUF_LEN, 0); vector<char> pubKeyX(BUF_LEN, 0); vector<char> pubKeyY(BUF_LEN, 0); uint32_t encLen = 0; PRINT_SRC_LINE auto status = trustedGenerateEcdsaKeyAES(eid, &errStatus, errMsg.data(), encrPrivKey.data(), &encLen, pubKeyX.data(), pubKeyY.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); string hex = SAMPLE_HEX_HASH; vector<char> signatureR(BUF_LEN, 0); vector<char> signatureS(BUF_LEN, 0); uint8_t signatureV = 0; for (int i = 0; i < 50; i++) { PRINT_SRC_LINE status = trustedEcdsaSignAES(eid, &errStatus, errMsg.data(), encrPrivKey.data(), encLen, hex.data(), signatureR.data(), signatureS.data(), &signatureV, 16); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); } } TEST_CASE_METHOD(TestFixture, "ECDSA AES key gen", "[ecdsa-aes-key-gen]") { vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; vector <uint8_t> encrPrivKey(BUF_LEN, 0); vector<char> pubKeyX(BUF_LEN, 0); vector<char> pubKeyY(BUF_LEN, 0); uint32_t encLen = 0; PRINT_SRC_LINE auto status = trustedGenerateEcdsaKeyAES(eid, &errStatus, errMsg.data(), encrPrivKey.data(), &encLen, pubKeyX.data(), pubKeyY.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); } TEST_CASE_METHOD(TestFixture, "ECDSA AES get public key", "[ecdsa-aes-get-pub-key]") { int errStatus = 0; vector<char> errMsg(BUF_LEN, 0); vector <uint8_t> encPrivKey(BUF_LEN, 0); vector<char> pubKeyX(BUF_LEN, 0); vector<char> pubKeyY(BUF_LEN, 0); uint32_t encLen = 0; PRINT_SRC_LINE auto status = trustedGenerateEcdsaKeyAES(eid, &errStatus, errMsg.data(), encPrivKey.data(), &encLen, pubKeyX.data(), pubKeyY.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); vector<char> receivedPubKeyX(BUF_LEN, 0); vector<char> receivedPubKeyY(BUF_LEN, 0); PRINT_SRC_LINE status = trustedGetPublicEcdsaKeyAES(eid, &errStatus, errMsg.data(), encPrivKey.data(), encLen, receivedPubKeyX.data(), receivedPubKeyY.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); } /* Do later TEST_CASE_METHOD("BLS key encrypt/decrypt", "[bls-key-encrypt-decrypt]") { resetDB(); setOptions(false, false, false, true); initAll(0, false, true); //init_enclave(); int errStatus = -1; vector<char> errMsg(BUF_LEN, 0); char *encryptedKey = TestUtils::encryptTestKey(); REQUIRE(encryptedKey != nullptr); char *plaintextKey = decryptBLSKeyShareFromHex(&errStatus, errMsg.data(), encryptedKey); free(encryptedKey); REQUIRE(errStatus == 0); REQUIRE(strcmp(plaintextKey, TEST_BLS_KEY_SHARE) == 0); printf("Decrypt key completed with status: %d %s \n", errStatus, errMsg.data()); printf("Decrypted key len %d\n", (int) strlen(plaintextKey)); printf("Decrypted key: %s\n", plaintextKey); free(plaintextKey); } */ string genECDSAKeyAPI(StubClient &_c) { Json::Value genKey = _c.generateECDSAKey(); CHECK_STATE(genKey["status"].asInt() == 0); auto keyName = genKey["keyName"].asString(); CHECK_STATE(keyName.size() == ECDSA_KEY_NAME_SIZE); return keyName; } TEST_CASE_METHOD(TestFixture, "ECDSA key gen API", "[ecdsa-key-gen-api]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); for (int i = 0; i <= 20; i++) { try { PRINT_SRC_LINE auto keyName = genECDSAKeyAPI(c); PRINT_SRC_LINE Json::Value sig = c.ecdsaSignMessageHash(16, keyName, SAMPLE_HASH); REQUIRE(sig["status"].asInt() == 0); Json::Value getPubKey = c.getPublicECDSAKey(keyName); REQUIRE(getPubKey["status"].asInt() == 0); } catch (JsonRpcException &e) { cerr << e.what() << endl; throw; } } auto keyName = genECDSAKeyAPI(c); Json::Value sig = c.ecdsaSignMessageHash(10, keyName, SAMPLE_HASH); for (int i = 0; i <= 20; i++) { try { PRINT_SRC_LINE auto keyName = genECDSAKeyAPI(c); PRINT_SRC_LINE Json::Value sig = c.ecdsaSignMessageHash(10, keyName, SAMPLE_HASH); REQUIRE(sig["status"].asInt() == 0); PRINT_SRC_LINE Json::Value getPubKey = c.getPublicECDSAKey(keyName); REQUIRE(getPubKey["status"].asInt() == 0); } catch (JsonRpcException &e) { cerr << e.what() << endl; throw; } } } TEST_CASE_METHOD(TestFixture, "BLS key encrypt", "[bls-key-encrypt]") { auto key = TestUtils::encryptTestKey(); REQUIRE(key != nullptr); } TEST_CASE_METHOD(TestFixture, "DKG AES gen test", "[dkg-aes-gen]") { vector <uint8_t> encryptedDKGSecret(BUF_LEN, 0); vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; uint32_t encLen = 0; PRINT_SRC_LINE auto status = trustedGenDkgSecretAES(eid, &errStatus, errMsg.data(), encryptedDKGSecret.data(), &encLen, 32); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); vector<char> secret(BUF_LEN, 0); vector<char> errMsg1(BUF_LEN, 0); status = trustedDecryptDkgSecretAES(eid, &errStatus, errMsg1.data(), encryptedDKGSecret.data(), encLen, (uint8_t *) secret.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); } TEST_CASE_METHOD(TestFixture, "DKG AES public shares test", "[dkg-aes-pub-shares]") { vector <uint8_t> encryptedDKGSecret(BUF_LEN, 0); vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; uint32_t encLen = 0; unsigned t = 32, n = 32; PRINT_SRC_LINE auto status = trustedGenDkgSecretAES(eid, &errStatus, errMsg.data(), encryptedDKGSecret.data(), &encLen, n); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); vector<char> errMsg1(BUF_LEN, 0); char colon = ':'; vector<char> pubShares(10000, 0); PRINT_SRC_LINE status = trustedGetPublicSharesAES(eid, &errStatus, errMsg1.data(), encryptedDKGSecret.data(), encLen, pubShares.data(), t, n); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); vector <string> g2Strings = splitString(pubShares.data(), ','); vector <libff::alt_bn128_G2> pubSharesG2; for (u_int64_t i = 0; i < g2Strings.size(); i++) { vector <string> coeffStr = splitString(g2Strings.at(i).c_str(), ':'); pubSharesG2.push_back(TestUtils::vectStringToG2(coeffStr)); } vector<char> secret(BUF_LEN, 0); PRINT_SRC_LINE status = trustedDecryptDkgSecretAES(eid, &errStatus, errMsg1.data(), encryptedDKGSecret.data(), encLen, (uint8_t *) secret.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); signatures::Dkg dkgObj(t, n); vector <libff::alt_bn128_Fr> poly = TestUtils::splitStringToFr(secret.data(), colon); vector <libff::alt_bn128_G2> pubSharesDkg = dkgObj.VerificationVector(poly); for (uint32_t i = 0; i < pubSharesDkg.size(); i++) { libff::alt_bn128_G2 el = pubSharesDkg.at(i); el.to_affine_coordinates(); } REQUIRE(pubSharesG2 == pubSharesDkg); } TEST_CASE_METHOD(TestFixture, "DKG AES encrypted secret shares test", "[dkg-aes-encr-sshares]") { vector<char> errMsg(BUF_LEN, 0); vector<char> result(BUF_LEN, 0); int errStatus = 0; uint32_t encLen = 0; vector <uint8_t> encryptedDKGSecret(BUF_LEN, 0); PRINT_SRC_LINE auto status = trustedGenDkgSecretAES(eid, &errStatus, errMsg.data(), encryptedDKGSecret.data(), &encLen, 2); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); uint64_t enc_len = encLen; PRINT_SRC_LINE status = trustedSetEncryptedDkgPolyAES(eid, &errStatus, errMsg.data(), encryptedDKGSecret.data(), enc_len); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); vector <uint8_t> encrPRDHKey(BUF_LEN, 0); string pub_keyB = SAMPLE_PUBLIC_KEY_B; vector<char> s_shareG2(BUF_LEN, 0); PRINT_SRC_LINE status = trustedGetEncryptedSecretShareAES(eid, &errStatus, errMsg.data(), encrPRDHKey.data(), &encLen, result.data(), s_shareG2.data(), (char *) pub_keyB.data(), 2, 2, 1); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); } /* * ( "verification test", "[verify]" ) { char* pubshares = "0d72c21fc5a43452ad5f36699822309149ce6ce2cdce50dafa896e873f1b8ddd12f65a2e9c39c617a1f695f076b33b236b47ed773901fc2762f8b6f63277f5e30d7080be8e98c97f913d1920357f345dc0916c1fcb002b7beb060aa8b6b473a011bfafe9f8a5d8ea4c643ca4101e5119adbef5ae64f8dfb39cd10f1e69e31c591858d7eaca25b4c412fe909ca87ca7aadbf6d97d32d9b984e93d436f13d43ec31f40432cc750a64ac239cad6b8f78c1f1dd37427e4ff8c1cc4fe1c950fcbcec10ebfd79e0c19d0587adafe6db4f3c63ea9a329724a8804b63a9422e6898c0923209e828facf3a073254ec31af4231d999ba04eb5b7d1e0056d742a65b766f2f3"; char *sec_share = "11592366544581417165283270001305852351194685098958224535357729125789505948557"; mpz_t sshare; mpz_init(sshare); mpz_set_str(sshare, "11592366544581417165283270001305852351194685098958224535357729125789505948557", 10); int result = Verification(pubshares, sshare, 2, 0); REQUIRE(result == 1); }*/ TEST_CASE_METHOD(TestFixture, "DKG_BLS test", "[dkg-bls]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); vector <string> ecdsaKeyNames; vector <string> blsKeyNames; int schainID = TestUtils::randGen(); int dkgID = TestUtils::randGen(); PRINT_SRC_LINE TestUtils::doDKG(c, 4, 1, ecdsaKeyNames, blsKeyNames, schainID, dkgID); REQUIRE(blsKeyNames.size() == 4); schainID = TestUtils::randGen(); dkgID = TestUtils::randGen(); TestUtils::doDKG(c, 16, 5, ecdsaKeyNames, blsKeyNames, schainID, dkgID); } TEST_CASE_METHOD(TestFixture, "Delete Bls Key", "[delete-bls-key]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); std::string name = "BLS_KEY:SCHAIN_ID:123456789:NODE_ID:0:DKG_ID:0"; libff::alt_bn128_Fr key = libff::alt_bn128_Fr( "6507625568967977077291849236396320012317305261598035438182864059942098934847"); std::string key_str = TestUtils::stringFromFr(key); PRINT_SRC_LINE c.importBLSKeyShare(key_str, name); PRINT_SRC_LINE REQUIRE(c.deleteBlsKey(name)["deleted"] == true); } TEST_CASE_METHOD(TestFixture, "Backup Key", "[backup-key]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); std::ifstream sek_file("sgx_data/sgxwallet_backup_key.txt"); REQUIRE(sek_file.good()); std::string sek; sek_file >> sek; REQUIRE(sek.size() == 32); } TEST_CASE_METHOD(TestFixture, "Get ServerStatus", "[get-server-status]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); REQUIRE(c.getServerStatus()["status"] == 0); } TEST_CASE_METHOD(TestFixture, "Get ServerVersion", "[get-server-version]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); REQUIRE(c.getServerVersion()["version"] == SGXWalletServer::getVersion()); } TEST_CASE_METHOD(TestFixtureHTTPS, "Cert request sign", "[cert-sign]") { PRINT_SRC_LINE REQUIRE_NOTHROW(SGXRegistrationServer::getServer()); PRINT_SRC_LINE string csrFile = "insecure-samples/yourdomain.csr"; ifstream infile(csrFile); infile.exceptions(std::ifstream::failbit | std::ifstream::badbit); ostringstream ss; ss << infile.rdbuf(); infile.close(); PRINT_SRC_LINE auto result = SGXRegistrationServer::getServer()->SignCertificate(ss.str()); REQUIRE(result["status"] == 0); PRINT_SRC_LINE result = SGXRegistrationServer::getServer()->SignCertificate("Haha"); REQUIRE(result["status"] != 0); } TEST_CASE_METHOD(TestFixture, "DKG API test", "[dkg-api]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); string polyName = SAMPLE_POLY_NAME; PRINT_SRC_LINE Json::Value genPoly = c.generateDKGPoly(polyName, 2); REQUIRE(genPoly["status"].asInt() == 0); Json::Value publicKeys; publicKeys.append(SAMPLE_DKG_PUB_KEY_1); publicKeys.append(SAMPLE_DKG_PUB_KEY_2); // wrongName Json::Value genPolyWrongName = c.generateDKGPoly("poly", 2); REQUIRE(genPolyWrongName["status"].asInt() != 0); Json::Value verifVectWrongName = c.getVerificationVector("poly", 2, 2); REQUIRE(verifVectWrongName["status"].asInt() != 0); Json::Value secretSharesWrongName = c.getSecretShare("poly", publicKeys, 2, 2); REQUIRE(secretSharesWrongName["status"].asInt() != 0); // wrong_t Json::Value genPolyWrong_t = c.generateDKGPoly(polyName, 33); REQUIRE(genPolyWrong_t["status"].asInt() != 0); Json::Value verifVectWrong_t = c.getVerificationVector(polyName, 1, 2); REQUIRE(verifVectWrong_t["status"].asInt() != 0); Json::Value secretSharesWrong_t = c.getSecretShare(polyName, publicKeys, 3, 3); REQUIRE(secretSharesWrong_t["status"].asInt() != 0); // wrong_n Json::Value verifVectWrong_n = c.getVerificationVector(polyName, 2, 1); REQUIRE(verifVectWrong_n["status"].asInt() != 0); Json::Value publicKeys1; publicKeys1.append(SAMPLE_DKG_PUB_KEY_1); Json::Value secretSharesWrong_n = c.getSecretShare(polyName, publicKeys1, 2, 1); REQUIRE(secretSharesWrong_n["status"].asInt() != 0); //wrong number of publicKeys Json::Value secretSharesWrongPkeys = c.getSecretShare(polyName, publicKeys, 2, 3); REQUIRE(secretSharesWrongPkeys["status"].asInt() != 0); //wrong verif Json::Value Skeys = c.getSecretShare(polyName, publicKeys, 2, 2); Json::Value verifVect = c.getVerificationVector(polyName, 2, 2); Json::Value verificationWrongSkeys = c.dkgVerification("", "", "", 2, 2, 1); REQUIRE(verificationWrongSkeys["status"].asInt() != 0); } TEST_CASE_METHOD(TestFixture, "PolyExists test", "[dkg-poly-exists]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); string polyName = SAMPLE_POLY_NAME; PRINT_SRC_LINE Json::Value genPoly = c.generateDKGPoly(polyName, 2); REQUIRE(genPoly["status"] == 0); PRINT_SRC_LINE Json::Value polyExists = c.isPolyExists(polyName); REQUIRE(polyExists["status"] == 0); REQUIRE(polyExists["IsExist"].asBool()); PRINT_SRC_LINE Json::Value polyDoesNotExist = c.isPolyExists("Vasya"); REQUIRE(!polyDoesNotExist["IsExist"].asBool()); } TEST_CASE_METHOD(TestFixture, "AES_DKG test", "[aes-dkg]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); int n = 2, t = 2; Json::Value ethKeys[n]; Json::Value verifVects[n]; Json::Value pubEthKeys; Json::Value secretShares[n]; Json::Value pubBLSKeys[n]; Json::Value blsSigShares[n]; vector <string> pubShares(n); vector <string> polyNames(n); int schainID = TestUtils::randGen(); int dkgID = TestUtils::randGen(); for (uint8_t i = 0; i < n; i++) { PRINT_SRC_LINE ethKeys[i] = c.generateECDSAKey(); REQUIRE(ethKeys[i]["status"] == 0); string polyName = "POLY:SCHAIN_ID:" + to_string(schainID) + ":NODE_ID:" + to_string(i) + ":DKG_ID:" + to_string(dkgID); REQUIRE(ethKeys[i]["status"] == 0); auto response = c.generateDKGPoly(polyName, t); REQUIRE(response["status"] == 0); polyNames[i] = polyName; PRINT_SRC_LINE verifVects[i] = c.getVerificationVector(polyName, t, n); REQUIRE(verifVects[i]["status"] == 0); pubEthKeys.append(ethKeys[i]["publicKey"]); } for (uint8_t i = 0; i < n; i++) { PRINT_SRC_LINE secretShares[i] = c.getSecretShare(polyNames[i], pubEthKeys, t, n); REQUIRE(secretShares[i]["status"] == 0); for (uint8_t k = 0; k < t; k++) for (uint8_t j = 0; j < 4; j++) { string pubShare = verifVects[i]["verificationVector"][k][j].asString(); pubShares[i] += TestUtils::convertDecToHex(pubShare); } } int k = 0; vector <string> secShares(n); for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) { string secretShare = secretShares[i]["secretShare"].asString().substr(192 * j, 192); secShares[i] += secretShares[j]["secretShare"].asString().substr(192 * i, 192); PRINT_SRC_LINE Json::Value verif = c.dkgVerification(pubShares[i], ethKeys[j]["keyName"].asString(), secretShare, t, n, j); REQUIRE(verif["status"] == 0); bool res = verif["result"].asBool(); k++; REQUIRE(res); } Json::Value complaintResponse = c.complaintResponse(polyNames[1], 0); REQUIRE(complaintResponse["status"] == 0); BLSSigShareSet sigShareSet(t, n); string hash = SAMPLE_HASH; auto hash_arr = make_shared < array < uint8_t, 32 >> (); uint64_t binLen; if (!hex2carray(hash.c_str(), &binLen, hash_arr->data(), 32)) { throw SGXException(INVALID_HEX, "Invalid hash"); } map <size_t, shared_ptr<BLSPublicKeyShare>> coeffs_pkeys_map; for (int i = 0; i < t; i++) { string endName = polyNames[i].substr(4); string blsName = "BLS_KEY" + polyNames[i].substr(4); auto response = c.createBLSPrivateKey(blsName, ethKeys[i]["keyName"].asString(), polyNames[i], secShares[i], t, n); REQUIRE(response["status"] == 0); PRINT_SRC_LINE pubBLSKeys[i] = c.getBLSPublicKeyShare(blsName); REQUIRE(pubBLSKeys[i]["status"] == 0); string hash = SAMPLE_HASH; blsSigShares[i] = c.blsSignMessageHash(blsName, hash, t, n); REQUIRE(blsSigShares[i]["status"] == 0); shared_ptr <string> sig_share_ptr = make_shared<string>(blsSigShares[i]["signatureShare"].asString()); BLSSigShare sig(sig_share_ptr, i + 1, t, n); sigShareSet.addSigShare(make_shared<BLSSigShare>(sig)); vector <string> pubKey_vect; for (uint8_t j = 0; j < 4; j++) { pubKey_vect.push_back(pubBLSKeys[i]["blsPublicKeyShare"][j].asString()); } BLSPublicKeyShare pubKey(make_shared < vector < string >> (pubKey_vect), t, n); PRINT_SRC_LINE REQUIRE(pubKey.VerifySigWithHelper(hash_arr, make_shared<BLSSigShare>(sig), t, n)); coeffs_pkeys_map[i + 1] = make_shared<BLSPublicKeyShare>(pubKey); } shared_ptr <BLSSignature> commonSig = sigShareSet.merge(); BLSPublicKey common_public(make_shared < map < size_t, shared_ptr < BLSPublicKeyShare >>>(coeffs_pkeys_map), t, n); REQUIRE(common_public.VerifySigWithHelper(hash_arr, commonSig, t, n)); } TEST_CASE_METHOD(TestFixture, "AES encrypt/decrypt", "[aes-encrypt-decrypt]") { int errStatus = 0; vector<char> errMsg(BUF_LEN, 0); uint32_t encLen; string key = SAMPLE_AES_KEY; vector <uint8_t> encrypted_key(BUF_LEN, 0); PRINT_SRC_LINE auto status = trustedEncryptKeyAES(eid, &errStatus, errMsg.data(), key.c_str(), encrypted_key.data(), &encLen); REQUIRE(status == 0); REQUIRE(errStatus == 0); vector<char> decr_key(BUF_LEN, 0); PRINT_SRC_LINE status = trustedDecryptKeyAES(eid, &errStatus, errMsg.data(), encrypted_key.data(), encLen, decr_key.data()); REQUIRE(status == 0); REQUIRE(errStatus == 0); REQUIRE(key.compare(decr_key.data()) == 0); } TEST_CASE_METHOD(TestFixture, "Many threads ecdsa dkg bls", "[many-threads-crypto]") { vector <thread> threads; int num_threads = 4; for (int i = 0; i < num_threads; i++) { threads.push_back(thread(TestUtils::sendRPCRequest)); } for (auto &thread : threads) { thread.join(); } } TEST_CASE_METHOD(TestFixture, "First run", "[first-run]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); try { PRINT_SRC_LINE auto keyName = genECDSAKeyAPI(c); ofstream namefile("/tmp/keyname"); namefile << keyName; PRINT_SRC_LINE } catch (JsonRpcException & e) { cerr << e.what() << endl; throw; } } TEST_CASE_METHOD(TestFixtureNoReset, "Second run", "[second-run]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); try { PRINT_SRC_LINE string keyName; ifstream namefile("/tmp/keyname"); getline(namefile, keyName); Json::Value sig = c.ecdsaSignMessageHash(16, keyName, SAMPLE_HASH); REQUIRE(sig["status"].asInt() == 0); Json::Value getPubKey = c.getPublicECDSAKey(keyName); REQUIRE(getPubKey["status"].asInt() == 0); } catch (JsonRpcException &e) { cerr << e.what() << endl; throw; } } TEST_CASE_METHOD(TestFixtureNoResetFromBackup, "Backup restore", "[backup-restore]") { }
null
/* Copyright (C) 2019-Present SKALE Labs This file is part of sgxwallet. sgxwallet is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. sgxwallet is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with sgxwallet. If not, see <https://www.gnu.org/licenses/>. @file testw.cpp @author Stan Kladko @date 2020 */ #include <libff/algebra/fields/fp.hpp> #include <dkg/dkg.h> #include <jsonrpccpp/server/connectors/httpserver.h> #include <libff/algebra/curves/alt_bn128/alt_bn128_pp.hpp> #include <libff/algebra/exponentiation/exponentiation.hpp> #include <libff/algebra/fields/fp.hpp> #include <dkg/dkg.h> #include "sgxwallet_common.h" #include "third_party/intel/create_enclave.h" #include "secure_enclave_u.h" #include "third_party/intel/sgx_detect.h" #include <gmp.h> #include <sgx_urts.h> #include <stdio.h> #include <jsonrpccpp/client/connectors/httpclient.h> #include <sgx_tcrypto.h> #include "BLSCrypto.h" #include "ServerInit.h" #include "DKGCrypto.h" #include "SGXException.h" #include "LevelDB.h" #include "SGXWalletServer.hpp" #define CATCH_CONFIG_MAIN #include "catch.hpp" #include "stubclient.h" #include "BLSSigShare.h" #include "BLSSigShareSet.h" #include "BLSPublicKeyShare.h" #include "BLSPublicKey.h" #include "SEKManager.h" #include <thread> #include "common.h" #include "SGXRegistrationServer.h" #include "SGXWalletServer.h" #include "sgxwallet.h" #include "TestUtils.h" #include "testw.h" #define PRINT_SRC_LINE cerr << "Executing line " << to_string(__LINE__) << endl; using namespace jsonrpc; using namespace std; class TestFixture { public: TestFixture() { TestUtils::resetDB(); setOptions(L_INFO, false, true); initAll(L_INFO, false, true); } ~TestFixture() { TestUtils::destroyEnclave(); } }; class TestFixtureHTTPS { public: TestFixtureHTTPS() { TestUtils::resetDB(); setOptions(L_INFO, true, true); initAll(L_INFO, false, true); } ~TestFixtureHTTPS() { TestUtils::destroyEnclave(); } }; class TestFixtureNoResetFromBackup { public: TestFixtureNoResetFromBackup() { setFullOptions(L_INFO, false, true, true); initAll(L_INFO, false, true); } ~TestFixtureNoResetFromBackup() { TestUtils::destroyEnclave(); } }; class TestFixtureNoReset { public: TestFixtureNoReset() { setOptions(L_INFO, false, true); initAll(L_INFO, false, true); } ~TestFixtureNoReset() { TestUtils::destroyEnclave(); } }; TEST_CASE_METHOD(TestFixture, "ECDSA AES keygen and signature test", "[ecdsa-aes-key-sig-gen]") { vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; vector <uint8_t> encrPrivKey(BUF_LEN, 0); vector<char> pubKeyX(BUF_LEN, 0); vector<char> pubKeyY(BUF_LEN, 0); uint64_t encLen = 0; PRINT_SRC_LINE auto status = trustedGenerateEcdsaKeyAES(eid, &errStatus, errMsg.data(), encrPrivKey.data(), &encLen, pubKeyX.data(), pubKeyY.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); string hex = SAMPLE_HEX_HASH; vector<char> signatureR(BUF_LEN, 0); vector<char> signatureS(BUF_LEN, 0); uint8_t signatureV = 0; for (int i = 0; i < 50; i++) { PRINT_SRC_LINE status = trustedEcdsaSignAES(eid, &errStatus, errMsg.data(), encrPrivKey.data(), encLen, hex.data(), signatureR.data(), signatureS.data(), &signatureV, 16); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); } } TEST_CASE_METHOD(TestFixture, "ECDSA AES key gen", "[ecdsa-aes-key-gen]") { vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; vector <uint8_t> encrPrivKey(BUF_LEN, 0); vector<char> pubKeyX(BUF_LEN, 0); vector<char> pubKeyY(BUF_LEN, 0); uint64_t encLen = 0; PRINT_SRC_LINE auto status = trustedGenerateEcdsaKeyAES(eid, &errStatus, errMsg.data(), encrPrivKey.data(), &encLen, pubKeyX.data(), pubKeyY.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); } TEST_CASE_METHOD(TestFixture, "ECDSA AES get public key", "[ecdsa-aes-get-pub-key]") { int errStatus = 0; vector<char> errMsg(BUF_LEN, 0); vector <uint8_t> encPrivKey(BUF_LEN, 0); vector<char> pubKeyX(BUF_LEN, 0); vector<char> pubKeyY(BUF_LEN, 0); uint64_t encLen = 0; PRINT_SRC_LINE auto status = trustedGenerateEcdsaKeyAES(eid, &errStatus, errMsg.data(), encPrivKey.data(), &encLen, pubKeyX.data(), pubKeyY.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); vector<char> receivedPubKeyX(BUF_LEN, 0); vector<char> receivedPubKeyY(BUF_LEN, 0); PRINT_SRC_LINE status = trustedGetPublicEcdsaKeyAES(eid, &errStatus, errMsg.data(), encPrivKey.data(), encLen, receivedPubKeyX.data(), receivedPubKeyY.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); } /* Do later TEST_CASE_METHOD("BLS key encrypt/decrypt", "[bls-key-encrypt-decrypt]") { resetDB(); setOptions(false, false, false, true); initAll(0, false, true); //init_enclave(); int errStatus = -1; vector<char> errMsg(BUF_LEN, 0); char *encryptedKey = TestUtils::encryptTestKey(); REQUIRE(encryptedKey != nullptr); char *plaintextKey = decryptBLSKeyShareFromHex(&errStatus, errMsg.data(), encryptedKey); free(encryptedKey); REQUIRE(errStatus == 0); REQUIRE(strcmp(plaintextKey, TEST_BLS_KEY_SHARE) == 0); printf("Decrypt key completed with status: %d %s \n", errStatus, errMsg.data()); printf("Decrypted key len %d\n", (int) strlen(plaintextKey)); printf("Decrypted key: %s\n", plaintextKey); free(plaintextKey); } */ string genECDSAKeyAPI(StubClient &_c) { Json::Value genKey = _c.generateECDSAKey(); CHECK_STATE(genKey["status"].asInt() == 0); auto keyName = genKey["keyName"].asString(); CHECK_STATE(keyName.size() == ECDSA_KEY_NAME_SIZE); return keyName; } TEST_CASE_METHOD(TestFixture, "ECDSA key gen API", "[ecdsa-key-gen-api]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); for (int i = 0; i <= 20; i++) { try { PRINT_SRC_LINE auto keyName = genECDSAKeyAPI(c); PRINT_SRC_LINE Json::Value sig = c.ecdsaSignMessageHash(16, keyName, SAMPLE_HASH); REQUIRE(sig["status"].asInt() == 0); Json::Value getPubKey = c.getPublicECDSAKey(keyName); REQUIRE(getPubKey["status"].asInt() == 0); } catch (JsonRpcException &e) { cerr << e.what() << endl; throw; } } auto keyName = genECDSAKeyAPI(c); Json::Value sig = c.ecdsaSignMessageHash(10, keyName, SAMPLE_HASH); for (int i = 0; i <= 20; i++) { try { PRINT_SRC_LINE auto keyName = genECDSAKeyAPI(c); PRINT_SRC_LINE Json::Value sig = c.ecdsaSignMessageHash(10, keyName, SAMPLE_HASH); REQUIRE(sig["status"].asInt() == 0); PRINT_SRC_LINE Json::Value getPubKey = c.getPublicECDSAKey(keyName); REQUIRE(getPubKey["status"].asInt() == 0); } catch (JsonRpcException &e) { cerr << e.what() << endl; throw; } } } TEST_CASE_METHOD(TestFixture, "BLS key encrypt", "[bls-key-encrypt]") { auto key = TestUtils::encryptTestKey(); REQUIRE(key != nullptr); } TEST_CASE_METHOD(TestFixture, "DKG AES gen test", "[dkg-aes-gen]") { vector <uint8_t> encryptedDKGSecret(BUF_LEN, 0); vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; uint64_t encLen = 0; PRINT_SRC_LINE auto status = trustedGenDkgSecretAES(eid, &errStatus, errMsg.data(), encryptedDKGSecret.data(), &encLen, 32); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); vector<char> secret(BUF_LEN, 0); vector<char> errMsg1(BUF_LEN, 0); status = trustedDecryptDkgSecretAES(eid, &errStatus, errMsg1.data(), encryptedDKGSecret.data(), encLen, (uint8_t *) secret.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); } TEST_CASE_METHOD(TestFixture, "DKG AES public shares test", "[dkg-aes-pub-shares]") { vector <uint8_t> encryptedDKGSecret(BUF_LEN, 0); vector<char> errMsg(BUF_LEN, 0); int errStatus = 0; uint64_t encLen = 0; unsigned t = 32, n = 32; PRINT_SRC_LINE auto status = trustedGenDkgSecretAES(eid, &errStatus, errMsg.data(), encryptedDKGSecret.data(), &encLen, n); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); vector<char> errMsg1(BUF_LEN, 0); char colon = ':'; vector<char> pubShares(10000, 0); PRINT_SRC_LINE status = trustedGetPublicSharesAES(eid, &errStatus, errMsg1.data(), encryptedDKGSecret.data(), encLen, pubShares.data(), t, n); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); vector <string> g2Strings = splitString(pubShares.data(), ','); vector <libff::alt_bn128_G2> pubSharesG2; for (u_int64_t i = 0; i < g2Strings.size(); i++) { vector <string> coeffStr = splitString(g2Strings.at(i).c_str(), ':'); pubSharesG2.push_back(TestUtils::vectStringToG2(coeffStr)); } vector<char> secret(BUF_LEN, 0); PRINT_SRC_LINE status = trustedDecryptDkgSecretAES(eid, &errStatus, errMsg1.data(), encryptedDKGSecret.data(), encLen, (uint8_t *) secret.data()); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); signatures::Dkg dkgObj(t, n); vector <libff::alt_bn128_Fr> poly = TestUtils::splitStringToFr(secret.data(), colon); vector <libff::alt_bn128_G2> pubSharesDkg = dkgObj.VerificationVector(poly); for (uint32_t i = 0; i < pubSharesDkg.size(); i++) { libff::alt_bn128_G2 el = pubSharesDkg.at(i); el.to_affine_coordinates(); } REQUIRE(pubSharesG2 == pubSharesDkg); } TEST_CASE_METHOD(TestFixture, "DKG AES encrypted secret shares test", "[dkg-aes-encr-sshares]") { vector<char> errMsg(BUF_LEN, 0); vector<char> result(BUF_LEN, 0); int errStatus = 0; uint64_t encLen = 0; vector <uint8_t> encryptedDKGSecret(BUF_LEN, 0); PRINT_SRC_LINE auto status = trustedGenDkgSecretAES(eid, &errStatus, errMsg.data(), encryptedDKGSecret.data(), &encLen, 2); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); uint64_t enc_len = encLen; PRINT_SRC_LINE status = trustedSetEncryptedDkgPolyAES(eid, &errStatus, errMsg.data(), encryptedDKGSecret.data(), enc_len); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); vector <uint8_t> encrPRDHKey(BUF_LEN, 0); string pub_keyB = SAMPLE_PUBLIC_KEY_B; vector<char> s_shareG2(BUF_LEN, 0); PRINT_SRC_LINE status = trustedGetEncryptedSecretShareAES(eid, &errStatus, errMsg.data(), encrPRDHKey.data(), &encLen, result.data(), s_shareG2.data(), (char *) pub_keyB.data(), 2, 2, 1); REQUIRE(status == SGX_SUCCESS); REQUIRE(errStatus == SGX_SUCCESS); } /* * ( "verification test", "[verify]" ) { char* pubshares = "0d72c21fc5a43452ad5f36699822309149ce6ce2cdce50dafa896e873f1b8ddd12f65a2e9c39c617a1f695f076b33b236b47ed773901fc2762f8b6f63277f5e30d7080be8e98c97f913d1920357f345dc0916c1fcb002b7beb060aa8b6b473a011bfafe9f8a5d8ea4c643ca4101e5119adbef5ae64f8dfb39cd10f1e69e31c591858d7eaca25b4c412fe909ca87ca7aadbf6d97d32d9b984e93d436f13d43ec31f40432cc750a64ac239cad6b8f78c1f1dd37427e4ff8c1cc4fe1c950fcbcec10ebfd79e0c19d0587adafe6db4f3c63ea9a329724a8804b63a9422e6898c0923209e828facf3a073254ec31af4231d999ba04eb5b7d1e0056d742a65b766f2f3"; char *sec_share = "11592366544581417165283270001305852351194685098958224535357729125789505948557"; mpz_t sshare; mpz_init(sshare); mpz_set_str(sshare, "11592366544581417165283270001305852351194685098958224535357729125789505948557", 10); int result = Verification(pubshares, sshare, 2, 0); REQUIRE(result == 1); }*/ TEST_CASE_METHOD(TestFixture, "DKG_BLS test", "[dkg-bls]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); vector <string> ecdsaKeyNames; vector <string> blsKeyNames; int schainID = TestUtils::randGen(); int dkgID = TestUtils::randGen(); PRINT_SRC_LINE TestUtils::doDKG(c, 4, 1, ecdsaKeyNames, blsKeyNames, schainID, dkgID); REQUIRE(blsKeyNames.size() == 4); schainID = TestUtils::randGen(); dkgID = TestUtils::randGen(); TestUtils::doDKG(c, 16, 5, ecdsaKeyNames, blsKeyNames, schainID, dkgID); } TEST_CASE_METHOD(TestFixture, "Delete Bls Key", "[delete-bls-key]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); std::string name = "BLS_KEY:SCHAIN_ID:123456789:NODE_ID:0:DKG_ID:0"; libff::alt_bn128_Fr key = libff::alt_bn128_Fr( "6507625568967977077291849236396320012317305261598035438182864059942098934847"); std::string key_str = TestUtils::stringFromFr(key); PRINT_SRC_LINE c.importBLSKeyShare(key_str, name); PRINT_SRC_LINE REQUIRE(c.deleteBlsKey(name)["deleted"] == true); } TEST_CASE_METHOD(TestFixture, "Backup Key", "[backup-key]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); std::ifstream sek_file("sgx_data/sgxwallet_backup_key.txt"); REQUIRE(sek_file.good()); std::string sek; sek_file >> sek; REQUIRE(sek.size() == 32); } TEST_CASE_METHOD(TestFixture, "Get ServerStatus", "[get-server-status]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); REQUIRE(c.getServerStatus()["status"] == 0); } TEST_CASE_METHOD(TestFixture, "Get ServerVersion", "[get-server-version]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); REQUIRE(c.getServerVersion()["version"] == SGXWalletServer::getVersion()); } TEST_CASE_METHOD(TestFixtureHTTPS, "Cert request sign", "[cert-sign]") { PRINT_SRC_LINE REQUIRE_NOTHROW(SGXRegistrationServer::getServer()); PRINT_SRC_LINE string csrFile = "insecure-samples/yourdomain.csr"; ifstream infile(csrFile); infile.exceptions(std::ifstream::failbit | std::ifstream::badbit); ostringstream ss; ss << infile.rdbuf(); infile.close(); PRINT_SRC_LINE auto result = SGXRegistrationServer::getServer()->SignCertificate(ss.str()); REQUIRE(result["status"] == 0); PRINT_SRC_LINE result = SGXRegistrationServer::getServer()->SignCertificate("Haha"); REQUIRE(result["status"] != 0); } TEST_CASE_METHOD(TestFixture, "DKG API test", "[dkg-api]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); string polyName = SAMPLE_POLY_NAME; PRINT_SRC_LINE Json::Value genPoly = c.generateDKGPoly(polyName, 2); REQUIRE(genPoly["status"].asInt() == 0); Json::Value publicKeys; publicKeys.append(SAMPLE_DKG_PUB_KEY_1); publicKeys.append(SAMPLE_DKG_PUB_KEY_2); // wrongName Json::Value genPolyWrongName = c.generateDKGPoly("poly", 2); REQUIRE(genPolyWrongName["status"].asInt() != 0); Json::Value verifVectWrongName = c.getVerificationVector("poly", 2, 2); REQUIRE(verifVectWrongName["status"].asInt() != 0); Json::Value secretSharesWrongName = c.getSecretShare("poly", publicKeys, 2, 2); REQUIRE(secretSharesWrongName["status"].asInt() != 0); // wrong_t Json::Value genPolyWrong_t = c.generateDKGPoly(polyName, 33); REQUIRE(genPolyWrong_t["status"].asInt() != 0); Json::Value verifVectWrong_t = c.getVerificationVector(polyName, 1, 2); REQUIRE(verifVectWrong_t["status"].asInt() != 0); Json::Value secretSharesWrong_t = c.getSecretShare(polyName, publicKeys, 3, 3); REQUIRE(secretSharesWrong_t["status"].asInt() != 0); // wrong_n Json::Value verifVectWrong_n = c.getVerificationVector(polyName, 2, 1); REQUIRE(verifVectWrong_n["status"].asInt() != 0); Json::Value publicKeys1; publicKeys1.append(SAMPLE_DKG_PUB_KEY_1); Json::Value secretSharesWrong_n = c.getSecretShare(polyName, publicKeys1, 2, 1); REQUIRE(secretSharesWrong_n["status"].asInt() != 0); //wrong number of publicKeys Json::Value secretSharesWrongPkeys = c.getSecretShare(polyName, publicKeys, 2, 3); REQUIRE(secretSharesWrongPkeys["status"].asInt() != 0); //wrong verif Json::Value Skeys = c.getSecretShare(polyName, publicKeys, 2, 2); Json::Value verifVect = c.getVerificationVector(polyName, 2, 2); Json::Value verificationWrongSkeys = c.dkgVerification("", "", "", 2, 2, 1); REQUIRE(verificationWrongSkeys["status"].asInt() != 0); } TEST_CASE_METHOD(TestFixture, "PolyExists test", "[dkg-poly-exists]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); string polyName = SAMPLE_POLY_NAME; PRINT_SRC_LINE Json::Value genPoly = c.generateDKGPoly(polyName, 2); REQUIRE(genPoly["status"] == 0); PRINT_SRC_LINE Json::Value polyExists = c.isPolyExists(polyName); REQUIRE(polyExists["status"] == 0); REQUIRE(polyExists["IsExist"].asBool()); PRINT_SRC_LINE Json::Value polyDoesNotExist = c.isPolyExists("Vasya"); REQUIRE(!polyDoesNotExist["IsExist"].asBool()); } TEST_CASE_METHOD(TestFixture, "AES_DKG test", "[aes-dkg]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); int n = 2, t = 2; Json::Value ethKeys[n]; Json::Value verifVects[n]; Json::Value pubEthKeys; Json::Value secretShares[n]; Json::Value pubBLSKeys[n]; Json::Value blsSigShares[n]; vector <string> pubShares(n); vector <string> polyNames(n); int schainID = TestUtils::randGen(); int dkgID = TestUtils::randGen(); for (uint8_t i = 0; i < n; i++) { PRINT_SRC_LINE ethKeys[i] = c.generateECDSAKey(); REQUIRE(ethKeys[i]["status"] == 0); string polyName = "POLY:SCHAIN_ID:" + to_string(schainID) + ":NODE_ID:" + to_string(i) + ":DKG_ID:" + to_string(dkgID); REQUIRE(ethKeys[i]["status"] == 0); auto response = c.generateDKGPoly(polyName, t); REQUIRE(response["status"] == 0); polyNames[i] = polyName; PRINT_SRC_LINE verifVects[i] = c.getVerificationVector(polyName, t, n); REQUIRE(verifVects[i]["status"] == 0); pubEthKeys.append(ethKeys[i]["publicKey"]); } for (uint8_t i = 0; i < n; i++) { PRINT_SRC_LINE secretShares[i] = c.getSecretShare(polyNames[i], pubEthKeys, t, n); REQUIRE(secretShares[i]["status"] == 0); for (uint8_t k = 0; k < t; k++) for (uint8_t j = 0; j < 4; j++) { string pubShare = verifVects[i]["verificationVector"][k][j].asString(); pubShares[i] += TestUtils::convertDecToHex(pubShare); } } int k = 0; vector <string> secShares(n); for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) { string secretShare = secretShares[i]["secretShare"].asString().substr(192 * j, 192); secShares[i] += secretShares[j]["secretShare"].asString().substr(192 * i, 192); PRINT_SRC_LINE Json::Value verif = c.dkgVerification(pubShares[i], ethKeys[j]["keyName"].asString(), secretShare, t, n, j); REQUIRE(verif["status"] == 0); bool res = verif["result"].asBool(); k++; REQUIRE(res); } Json::Value complaintResponse = c.complaintResponse(polyNames[1], 0); REQUIRE(complaintResponse["status"] == 0); BLSSigShareSet sigShareSet(t, n); string hash = SAMPLE_HASH; auto hash_arr = make_shared < array < uint8_t, 32 >> (); uint64_t binLen; if (!hex2carray(hash.c_str(), &binLen, hash_arr->data(), 32)) { throw SGXException(INVALID_HEX, "Invalid hash"); } map <size_t, shared_ptr<BLSPublicKeyShare>> coeffs_pkeys_map; for (int i = 0; i < t; i++) { string endName = polyNames[i].substr(4); string blsName = "BLS_KEY" + polyNames[i].substr(4); auto response = c.createBLSPrivateKey(blsName, ethKeys[i]["keyName"].asString(), polyNames[i], secShares[i], t, n); REQUIRE(response["status"] == 0); PRINT_SRC_LINE pubBLSKeys[i] = c.getBLSPublicKeyShare(blsName); REQUIRE(pubBLSKeys[i]["status"] == 0); string hash = SAMPLE_HASH; blsSigShares[i] = c.blsSignMessageHash(blsName, hash, t, n); REQUIRE(blsSigShares[i]["status"] == 0); shared_ptr <string> sig_share_ptr = make_shared<string>(blsSigShares[i]["signatureShare"].asString()); BLSSigShare sig(sig_share_ptr, i + 1, t, n); sigShareSet.addSigShare(make_shared<BLSSigShare>(sig)); vector <string> pubKey_vect; for (uint8_t j = 0; j < 4; j++) { pubKey_vect.push_back(pubBLSKeys[i]["blsPublicKeyShare"][j].asString()); } BLSPublicKeyShare pubKey(make_shared < vector < string >> (pubKey_vect), t, n); PRINT_SRC_LINE REQUIRE(pubKey.VerifySigWithHelper(hash_arr, make_shared<BLSSigShare>(sig), t, n)); coeffs_pkeys_map[i + 1] = make_shared<BLSPublicKeyShare>(pubKey); } shared_ptr <BLSSignature> commonSig = sigShareSet.merge(); BLSPublicKey common_public(make_shared < map < size_t, shared_ptr < BLSPublicKeyShare >>>(coeffs_pkeys_map), t, n); REQUIRE(common_public.VerifySigWithHelper(hash_arr, commonSig, t, n)); } TEST_CASE_METHOD(TestFixture, "AES encrypt/decrypt", "[aes-encrypt-decrypt]") { int errStatus = 0; vector<char> errMsg(BUF_LEN, 0); uint64_t encLen; string key = SAMPLE_AES_KEY; vector <uint8_t> encrypted_key(BUF_LEN, 0); PRINT_SRC_LINE auto status = trustedEncryptKeyAES(eid, &errStatus, errMsg.data(), key.c_str(), encrypted_key.data(), &encLen); REQUIRE(status == 0); REQUIRE(errStatus == 0); vector<char> decr_key(BUF_LEN, 0); PRINT_SRC_LINE status = trustedDecryptKeyAES(eid, &errStatus, errMsg.data(), encrypted_key.data(), encLen, decr_key.data()); REQUIRE(status == 0); REQUIRE(errStatus == 0); REQUIRE(key.compare(decr_key.data()) == 0); } TEST_CASE_METHOD(TestFixture, "Many threads ecdsa dkg bls", "[many-threads-crypto]") { vector <thread> threads; int num_threads = 4; for (int i = 0; i < num_threads; i++) { threads.push_back(thread(TestUtils::sendRPCRequest)); } for (auto &thread : threads) { thread.join(); } } TEST_CASE_METHOD(TestFixture, "First run", "[first-run]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); try { PRINT_SRC_LINE auto keyName = genECDSAKeyAPI(c); ofstream namefile("/tmp/keyname"); namefile << keyName; PRINT_SRC_LINE } catch (JsonRpcException & e) { cerr << e.what() << endl; throw; } } TEST_CASE_METHOD(TestFixtureNoReset, "Second run", "[second-run]") { HttpClient client(RPC_ENDPOINT); StubClient c(client, JSONRPC_CLIENT_V2); try { PRINT_SRC_LINE string keyName; ifstream namefile("/tmp/keyname"); getline(namefile, keyName); Json::Value sig = c.ecdsaSignMessageHash(16, keyName, SAMPLE_HASH); REQUIRE(sig["status"].asInt() == 0); Json::Value getPubKey = c.getPublicECDSAKey(keyName); REQUIRE(getPubKey["status"].asInt() == 0); } catch (JsonRpcException &e) { cerr << e.what() << endl; throw; } } TEST_CASE_METHOD(TestFixtureNoResetFromBackup, "Backup restore", "[backup-restore]") { }
null
278
CWE-787
CVE-2021-36977
id: OSV-2021-440 summary: Heap-buffer-overflow in H5MM_memcpy details: | OSS-Fuzz report: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=31265 Crash type: Heap-buffer-overflow WRITE 4 Crash state: H5MM_memcpy H5MM_malloc H5C_load_entry modified: '2021-06-21T06:56:28.151210Z' published: '2021-02-24T00:00:05.141282Z' references: - type: REPORT url: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=31265 affected: - package: name: matio ecosystem: OSS-Fuzz ranges: - type: GIT repo: git://git.code.sf.net/p/matio/matio events: - introduced: 1ce8f2d1845ecdde19a35605cabdbb884776d52d versions: - v1.5.20 - v1.5.21 ecosystem_specific: severity: HIGH
null
id: OSV-2021-440 summary: Heap-buffer-overflow in H5MM_memcpy details: | OSS-Fuzz report: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=31265 Crash type: Heap-buffer-overflow WRITE 4 Crash state: H5MM_memcpy H5MM_malloc H5C_load_entry modified: '2021-06-21T06:56:28.151210Z' published: '2021-02-24T00:00:05.141282Z' references: - type: REPORT url: https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=31265 affected: - package: name: matio ecosystem: OSS-Fuzz ranges: - type: GIT repo: git://git.code.sf.net/p/matio/matio events: - introduced: 1ce8f2d1845ecdde19a35605cabdbb884776d52d - fixed: cddcdad17864c4b95ead23581047b41636f180a3 versions: - v1.5.20 - v1.5.21 ecosystem_specific: severity: HIGH
null
279
CWE-787
CVE-2021-36978
#include <qpdf/Pl_ASCII85Decoder.hh> #include <qpdf/QTC.hh> #include <stdexcept> #include <string.h> Pl_ASCII85Decoder::Pl_ASCII85Decoder(char const* identifier, Pipeline* next) : Pipeline(identifier, next), pos(0), eod(0) { memset(this->inbuf, 117, 5); } Pl_ASCII85Decoder::~Pl_ASCII85Decoder() { } void Pl_ASCII85Decoder::write(unsigned char* buf, size_t len) { if (eod > 1) { return; } for (size_t i = 0; i < len; ++i) { if (eod > 1) { break; } else if (eod == 1) { if (buf[i] == '>') { flush(); eod = 2; } else { throw std::runtime_error( "broken end-of-data sequence in base 85 data"); } } else { switch (buf[i]) { case ' ': case '\f': case '\v': case '\t': case '\r': case '\n': QTC::TC("libtests", "Pl_ASCII85Decoder ignore space"); // ignore whitespace break; case '~': eod = 1; break; case 'z': if (pos != 0) { throw std::runtime_error( "unexpected z during base 85 decode"); } else { QTC::TC("libtests", "Pl_ASCII85Decoder read z"); unsigned char zeroes[4]; memset(zeroes, '\0', 4); getNext()->write(zeroes, 4); } break; default: if ((buf[i] < 33) || (buf[i] > 117)) { throw std::runtime_error( "character out of range during base 85 decode"); } else { this->inbuf[this->pos++] = buf[i]; if (pos == 5) { flush(); } } break; } } } } void Pl_ASCII85Decoder::flush() { if (this->pos == 0) { QTC::TC("libtests", "Pl_ASCII85Decoder no-op flush"); return; } unsigned long lval = 0; for (int i = 0; i < 5; ++i) { lval *= 85; lval += (this->inbuf[i] - 33U); } unsigned char outbuf[4]; memset(outbuf, 0, 4); for (int i = 3; i >= 0; --i) { outbuf[i] = lval & 0xff; lval >>= 8; } QTC::TC("libtests", "Pl_ASCII85Decoder partial flush", (this->pos == 5) ? 0 : 1); getNext()->write(outbuf, this->pos - 1); this->pos = 0; memset(this->inbuf, 117, 5); } void Pl_ASCII85Decoder::finish() { flush(); getNext()->finish(); }
null
#include <qpdf/Pl_ASCII85Decoder.hh> #include <qpdf/QTC.hh> #include <stdexcept> #include <string.h> Pl_ASCII85Decoder::Pl_ASCII85Decoder(char const* identifier, Pipeline* next) : Pipeline(identifier, next), pos(0), eod(0) { memset(this->inbuf, 117, 5); } Pl_ASCII85Decoder::~Pl_ASCII85Decoder() { } void Pl_ASCII85Decoder::write(unsigned char* buf, size_t len) { if (eod > 1) { return; } for (size_t i = 0; i < len; ++i) { if (eod > 1) { break; } else if (eod == 1) { if (buf[i] == '>') { flush(); eod = 2; } else { throw std::runtime_error( "broken end-of-data sequence in base 85 data"); } } else { switch (buf[i]) { case ' ': case '\f': case '\v': case '\t': case '\r': case '\n': QTC::TC("libtests", "Pl_ASCII85Decoder ignore space"); // ignore whitespace break; case '~': eod = 1; break; case 'z': if (pos != 0) { throw std::runtime_error( "unexpected z during base 85 decode"); } else { QTC::TC("libtests", "Pl_ASCII85Decoder read z"); unsigned char zeroes[4]; memset(zeroes, '\0', 4); getNext()->write(zeroes, 4); } break; default: if ((buf[i] < 33) || (buf[i] > 117)) { throw std::runtime_error( "character out of range during base 85 decode"); } else { this->inbuf[this->pos++] = buf[i]; if (pos == 5) { flush(); } } break; } } } } void Pl_ASCII85Decoder::flush() { if (this->pos == 0) { QTC::TC("libtests", "Pl_ASCII85Decoder no-op flush"); return; } unsigned long lval = 0; for (int i = 0; i < 5; ++i) { lval *= 85; lval += (this->inbuf[i] - 33U); } unsigned char outbuf[4]; memset(outbuf, 0, 4); for (int i = 3; i >= 0; --i) { outbuf[i] = lval & 0xff; lval >>= 8; } QTC::TC("libtests", "Pl_ASCII85Decoder partial flush", (this->pos == 5) ? 0 : 1); // Reset before calling getNext()->write in case that throws an // exception. auto t = this->pos - 1; this->pos = 0; memset(this->inbuf, 117, 5); getNext()->write(outbuf, t); } void Pl_ASCII85Decoder::finish() { flush(); getNext()->finish(); }
null
280
CWE-787
CVE-2021-36979
/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh, 2015 */ /* Sample code to demonstrate how to emulate ARM code */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated #define ARM_CODE "\x37\x00\xa0\xe3\x03\x10\x42\xe0" // mov r0, #0x37; sub r1, r2, r3 #define THUMB_CODE "\x83\xb0" // sub sp, #0xc // memory address where emulation starts #define ADDRESS 0x10000 static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); } static void test_arm(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int r0 = 0x1234; // R0 register int r2 = 0x6789; // R1 register int r3 = 0x3333; // R2 register int r1; // R1 register printf("Emulate ARM code\n"); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, ARM_CODE, sizeof(ARM_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM_REG_R0, &r0); uc_reg_write(uc, UC_ARM_REG_R2, &r2); uc_reg_write(uc, UC_ARM_REG_R3, &r3); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM_CODE) -1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_ARM_REG_R0, &r0); uc_reg_read(uc, UC_ARM_REG_R1, &r1); printf(">>> R0 = 0x%x\n", r0); printf(">>> R1 = 0x%x\n", r1); uc_close(uc); } static void test_thumb(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int sp = 0x1234; // R0 register printf("Emulate THUMB code\n"); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, THUMB_CODE, sizeof(THUMB_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM_REG_SP, &sp); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. // Note we start at ADDRESS | 1 to indicate THUMB mode. err = uc_emu_start(uc, ADDRESS | 1, ADDRESS + sizeof(THUMB_CODE) -1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_ARM_REG_SP, &sp); printf(">>> SP = 0x%x\n", sp); uc_close(uc); } int main(int argc, char **argv, char **envp) { // dynamically load shared library #ifdef DYNLOAD if (!uc_dyn_load(NULL, 0)) { printf("Error dynamically loading shared library.\n"); printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); printf("any other dependent dll/so files.\n"); printf("The easiest way is to place them in the same directory as this app.\n"); return 1; } #endif test_arm(); printf("==========================\n"); test_thumb(); // dynamically free shared library #ifdef DYNLOAD uc_dyn_free(); #endif return 0; }
null
/* Unicorn Emulator Engine */ /* By Nguyen Anh Quynh, 2015 */ /* Sample code to demonstrate how to emulate ARM code */ #include <unicorn/unicorn.h> #include <string.h> // code to be emulated #define ARM_CODE "\x37\x00\xa0\xe3\x03\x10\x42\xe0" // mov r0, #0x37; sub r1, r2, r3 #define THUMB_CODE "\x83\xb0" // sub sp, #0xc #define ARM_THUM_COND_CODE "\x9a\x42\x14\xbf\x68\x22\x4d\x22" // 'cmp r2, r3\nit ne\nmov r2, #0x68\nmov r2, #0x4d' // memory address where emulation starts #define ADDRESS 0x10000 static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing basic block at 0x%"PRIx64 ", block size = 0x%x\n", address, size); } static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data) { printf(">>> Tracing instruction at 0x%"PRIx64 ", instruction size = 0x%x\n", address, size); } static void test_arm(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int r0 = 0x1234; // R0 register int r2 = 0x6789; // R1 register int r3 = 0x3333; // R2 register int r1; // R1 register printf("Emulate ARM code\n"); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM, UC_MODE_ARM, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, ARM_CODE, sizeof(ARM_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM_REG_R0, &r0); uc_reg_write(uc, UC_ARM_REG_R2, &r2); uc_reg_write(uc, UC_ARM_REG_R3, &r3); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(ARM_CODE) -1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_ARM_REG_R0, &r0); uc_reg_read(uc, UC_ARM_REG_R1, &r1); printf(">>> R0 = 0x%x\n", r0); printf(">>> R1 = 0x%x\n", r1); uc_close(uc); } static void test_thumb(void) { uc_engine *uc; uc_err err; uc_hook trace1, trace2; int sp = 0x1234; // R0 register printf("Emulate THUMB code\n"); // Initialize emulator in ARM mode err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } // map 2MB memory for this emulation uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); // write machine code to be emulated to memory uc_mem_write(uc, ADDRESS, THUMB_CODE, sizeof(THUMB_CODE) - 1); // initialize machine registers uc_reg_write(uc, UC_ARM_REG_SP, &sp); // tracing all basic blocks with customized callback uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); // tracing one instruction at ADDRESS with customized callback uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, ADDRESS); // emulate machine code in infinite time (last param = 0), or when // finishing all the code. // Note we start at ADDRESS | 1 to indicate THUMB mode. err = uc_emu_start(uc, ADDRESS | 1, ADDRESS + sizeof(THUMB_CODE) -1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } // now print out some registers printf(">>> Emulation done. Below is the CPU context\n"); uc_reg_read(uc, UC_ARM_REG_SP, &sp); printf(">>> SP = 0x%x\n", sp); uc_close(uc); } static void test_thumb_ite() { uc_engine *uc; uc_err err; uint32_t sp = 0x1234; uint32_t r2 = 0, r3 = 1; uint32_t step_r2, step_r3; int i, addr=ADDRESS; printf("Emulate a THUMB ITE block as a whole or per instruction.\n"); err = uc_open(UC_ARCH_ARM, UC_MODE_THUMB, &uc); if (err) { printf("Failed on uc_open() with error returned: %u (%s)\n", err, uc_strerror(err)); return; } uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); uc_mem_write(uc, ADDRESS, ARM_THUM_COND_CODE, sizeof(ARM_THUM_COND_CODE) - 1); uc_reg_write(uc, UC_ARM_REG_SP, &sp); uc_reg_write(uc, UC_ARM_REG_R2, &r2); uc_reg_write(uc, UC_ARM_REG_R3, &r3); // Run once. printf("Running the entire binary.\n"); err = uc_emu_start(uc, ADDRESS | 1, ADDRESS + sizeof(ARM_THUM_COND_CODE) - 1, 0, 0); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } uc_reg_read(uc, UC_ARM_REG_R2, &r2); uc_reg_read(uc, UC_ARM_REG_R3, &r3); printf(">>> R2: %d\n", r2); printf(">>> R3: %d\n\n", r3); // Step each instruction. printf("Running the binary one instruction at a time.\n"); for (i = 0; i < sizeof(ARM_THUM_COND_CODE) / 2; i++) { err = uc_emu_start(uc, addr | 1, ADDRESS + sizeof(ARM_THUM_COND_CODE) - 1, 0, 1); if (err) { printf("Failed on uc_emu_start() with error returned: %u\n", err); } uc_reg_read(uc, UC_ARM_REG_PC, &addr); } uc_reg_read(uc, UC_ARM_REG_R2, &step_r2); uc_reg_read(uc, UC_ARM_REG_R3, &step_r3); printf(">>> R2: %d\n", step_r2); printf(">>> R3: %d\n\n", step_r3); if (step_r2 != r2 || step_r3 != r3) { printf("Failed with ARM ITE blocks stepping!\n"); } uc_close(uc); } int main(int argc, char **argv, char **envp) { // dynamically load shared library #ifdef DYNLOAD if (!uc_dyn_load(NULL, 0)) { printf("Error dynamically loading shared library.\n"); printf("Please check that unicorn.dll/unicorn.so is available as well as\n"); printf("any other dependent dll/so files.\n"); printf("The easiest way is to place them in the same directory as this app.\n"); return 1; } #endif test_arm(); printf("==========================\n"); test_thumb(); printf("==========================\n"); test_thumb_ite(); // dynamically free shared library #ifdef DYNLOAD uc_dyn_free(); #endif return 0; }
null
281
CWE-787
CVE-2021-37232
null
null
#!/bin/bash set -xe ./AtomicParsley ./tests/issue-32.mp4 -T 1 -t +
null
282
CWE-787
CVE-2021-3751
/** @file buffer.c * @brief Functions to read/write raw big endian data * * Copyright (c) 2014 Bartek Fabiszewski * http://www.fabiszewski.net * * This file is part of libmobi. * Licensed under LGPL, either version 3, or any later. * See <http://www.gnu.org/licenses/> */ #include <stdlib.h> #include <string.h> #include "buffer.h" #include "debug.h" /** @brief Initializer for MOBIBuffer structure It allocates memory for structure and for data. Memory should be freed with mobi_buffer_free(). @param[in] len Size of data to be allocated for the buffer @return MOBIBuffer on success, NULL otherwise */ MOBIBuffer * mobi_buffer_init(const size_t len) { unsigned char *data = malloc(len); if (data == NULL) { debug_print("%s", "Buffer data allocation failed\n"); return NULL; } MOBIBuffer *buf = mobi_buffer_init_null(data, len); if (buf == NULL) { free(data); } return buf; } /** @brief Initializer for MOBIBuffer structure It allocates memory for structure but, unlike mobi_buffer_init(), it does not allocate memory for data. Instead it works on external data. Memory should be freed with mobi_buffer_free_null() (buf->data will not be deallocated). @param[in,out] data Set data as buffer data @param[in] len Size of data held by the buffer @return MOBIBuffer on success, NULL otherwise */ MOBIBuffer * mobi_buffer_init_null(unsigned char *data, const size_t len) { MOBIBuffer *buf = malloc(sizeof(MOBIBuffer)); if (buf == NULL) { debug_print("%s", "Buffer allocation failed\n"); return NULL; } buf->data = data; buf->offset = 0; buf->maxlen = len; buf->error = MOBI_SUCCESS; return buf; } /** @brief Resize buffer Smaller size than offset will cause data truncation. @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] newlen New buffer size */ void mobi_buffer_resize(MOBIBuffer *buf, const size_t newlen) { unsigned char *tmp = realloc(buf->data, newlen); if (tmp == NULL) { debug_print("%s", "Buffer allocation failed\n"); buf->error = MOBI_MALLOC_FAILED; return; } buf->data = tmp; buf->maxlen = newlen; if (buf->offset >= newlen) { buf->offset = newlen - 1; } debug_print("Buffer successfully resized to %zu\n", newlen); buf->error = MOBI_SUCCESS; } /** @brief Adds 8-bit value to MOBIBuffer @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] data Integer to be put into the buffer */ void mobi_buffer_add8(MOBIBuffer *buf, const uint8_t data) { if (buf->offset + 1 > buf->maxlen) { debug_print("%s", "Buffer full\n"); buf->error = MOBI_BUFFER_END; return; } buf->data[buf->offset++] = data; } /** @brief Adds 16-bit value to MOBIBuffer @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] data Integer to be put into the buffer */ void mobi_buffer_add16(MOBIBuffer *buf, const uint16_t data) { if (buf->offset + 2 > buf->maxlen) { debug_print("%s", "Buffer full\n"); buf->error = MOBI_BUFFER_END; return; } unsigned char *buftr = buf->data + buf->offset; *buftr++ = (uint8_t)((uint32_t)(data & 0xff00U) >> 8); *buftr = (uint8_t)((uint32_t)(data & 0xffU)); buf->offset += 2; } /** @brief Adds 32-bit value to MOBIBuffer @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] data Integer to be put into the buffer */ void mobi_buffer_add32(MOBIBuffer *buf, const uint32_t data) { if (buf->offset + 4 > buf->maxlen) { debug_print("%s", "Buffer full\n"); buf->error = MOBI_BUFFER_END; return; } unsigned char *buftr = buf->data + buf->offset; *buftr++ = (uint8_t)((uint32_t)(data & 0xff000000U) >> 24); *buftr++ = (uint8_t)((uint32_t)(data & 0xff0000U) >> 16); *buftr++ = (uint8_t)((uint32_t)(data & 0xff00U) >> 8); *buftr = (uint8_t)((uint32_t)(data & 0xffU)); buf->offset += 4; } /** @brief Adds raw data to MOBIBuffer @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] data Pointer to read data @param[in] len Size of the read data */ void mobi_buffer_addraw(MOBIBuffer *buf, const unsigned char* data, const size_t len) { if (buf->offset + len > buf->maxlen) { debug_print("%s", "Buffer full\n"); buf->error = MOBI_BUFFER_END; return; } memcpy(buf->data + buf->offset, data, len); buf->offset += len; } /** @brief Adds string to MOBIBuffer without null terminator @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] str Pointer to string */ void mobi_buffer_addstring(MOBIBuffer *buf, const char *str) { const size_t len = strlen(str); mobi_buffer_addraw(buf, (const unsigned char *) str, len); } /** @brief Adds count of zeroes to MOBIBuffer @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] count Number of zeroes to be put into the buffer */ void mobi_buffer_addzeros(MOBIBuffer *buf, const size_t count) { if (buf->offset + count > buf->maxlen) { debug_print("%s", "Buffer full\n"); buf->error = MOBI_BUFFER_END; return; } memset(buf->data + buf->offset, 0, count); buf->offset += count; } /** @brief Reads 8-bit value from MOBIBuffer @param[in] buf MOBIBuffer structure containing data @return Read value, 0 if end of buffer is encountered */ uint8_t mobi_buffer_get8(MOBIBuffer *buf) { if (buf->offset + 1 > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return 0; } return buf->data[buf->offset++]; } /** @brief Reads 16-bit value from MOBIBuffer @param[in] buf MOBIBuffer structure containing data @return Read value, 0 if end of buffer is encountered */ uint16_t mobi_buffer_get16(MOBIBuffer *buf) { if (buf->offset + 2 > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return 0; } uint16_t val; val = (uint16_t)((uint16_t) buf->data[buf->offset] << 8 | (uint16_t) buf->data[buf->offset + 1]); buf->offset += 2; return val; } /** @brief Reads 32-bit value from MOBIBuffer @param[in] buf MOBIBuffer structure containing data @return Read value, 0 if end of buffer is encountered */ uint32_t mobi_buffer_get32(MOBIBuffer *buf) { if (buf->offset + 4 > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return 0; } uint32_t val; val = (uint32_t) buf->data[buf->offset] << 24 | (uint32_t) buf->data[buf->offset + 1] << 16 | (uint32_t) buf->data[buf->offset + 2] << 8 | (uint32_t) buf->data[buf->offset + 3]; buf->offset += 4; return val; } /** @brief Reads variable length value from MOBIBuffer Internal function for wrappers: mobi_buffer_get_varlen(); mobi_buffer_get_varlen_dec(); Reads maximum 4 bytes from the buffer. Stops when byte has bit 7 set. @param[in] buf MOBIBuffer structure containing data @param[out] len Value will be increased by number of bytes read @param[in] direction 1 - read buffer forward, -1 - read buffer backwards @return Read value, 0 if end of buffer is encountered */ static uint32_t _buffer_get_varlen(MOBIBuffer *buf, size_t *len, const int direction) { uint32_t val = 0; uint8_t byte_count = 0; uint8_t byte; const uint8_t stop_flag = 0x80; const uint8_t mask = 0x7f; uint32_t shift = 0; do { if (direction == 1) { if (buf->offset + 1 > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return val; } byte = buf->data[buf->offset++]; val <<= 7; val |= (byte & mask); } else { if (buf->offset < 1) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return val; } byte = buf->data[buf->offset--]; val = val | (uint32_t)(byte & mask) << shift; shift += 7; } (*len)++; byte_count++; } while (!(byte & stop_flag) && (byte_count < 4)); return val; } /** @brief Reads variable length value from MOBIBuffer Reads maximum 4 bytes from the buffer. Stops when byte has bit 7 set. @param[in] buf MOBIBuffer structure containing data @param[out] len Value will be increased by number of bytes read @return Read value, 0 if end of buffer is encountered */ uint32_t mobi_buffer_get_varlen(MOBIBuffer *buf, size_t *len) { return _buffer_get_varlen(buf, len, 1); } /** @brief Reads variable length value from MOBIBuffer going backwards Reads maximum 4 bytes from the buffer. Stops when byte has bit 7 set. @param[in] buf MOBIBuffer structure containing data @param[out] len Value will be increased by number of bytes read @return Read value, 0 if end of buffer is encountered */ uint32_t mobi_buffer_get_varlen_dec(MOBIBuffer *buf, size_t *len) { return _buffer_get_varlen(buf, len, -1); } /** @brief Reads raw data from MOBIBuffer and pads it with zero character @param[out] str Destination for string read from buffer. Length must be (len + 1) @param[in] buf MOBIBuffer structure containing data @param[in] len Length of the data to be read from buffer */ void mobi_buffer_getstring(char *str, MOBIBuffer *buf, const size_t len) { if (!str) { buf->error = MOBI_PARAM_ERR; return; } if (buf->offset + len > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; str[0] = '\0'; return; } memcpy(str, buf->data + buf->offset, len); str[len] = '\0'; buf->offset += len; } /** @brief Reads raw data from MOBIBuffer, appends it to a string and pads it with zero character @param[in,out] str A string to which data will be appended @param[in] buf MOBIBuffer structure containing data @param[in] len Length of the data to be read from buffer */ void mobi_buffer_appendstring(char *str, MOBIBuffer *buf, const size_t len) { if (!str) { buf->error = MOBI_PARAM_ERR; return; } if (buf->offset + len > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return; } size_t str_len = strlen(str); memcpy(str + str_len, buf->data + buf->offset, len); str[str_len + len] = '\0'; buf->offset += len; } /** @brief Reads raw data from MOBIBuffer @param[out] data Destination to which data will be appended @param[in] buf MOBIBuffer structure containing data @param[in] len Length of the data to be read from buffer */ void mobi_buffer_getraw(void *data, MOBIBuffer *buf, const size_t len) { if (!data) { buf->error = MOBI_PARAM_ERR; return; } if (buf->offset + len > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return; } memcpy(data, buf->data + buf->offset, len); buf->offset += len; } /** @brief Get pointer to MOBIBuffer data at offset @param[in] buf MOBIBuffer structure containing data @param[in] len Check if requested length is available in buffer @return Pointer to offset, or NULL on failure */ unsigned char * mobi_buffer_getpointer(MOBIBuffer *buf, const size_t len) { if (buf->offset + len > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return NULL; } buf->offset += len; return buf->data + buf->offset - len; } /** @brief Read 8-bit value from MOBIBuffer into allocated memory Read 8-bit value from buffer into memory allocated by the function. Returns pointer to the value, which must be freed later. If the data is not accessible function will return null pointer. @param[out] val Pointer to value or null pointer on failure @param[in] buf MOBIBuffer structure containing data */ void mobi_buffer_dup8(uint8_t **val, MOBIBuffer *buf) { *val = NULL; if (buf->offset + 1 > buf->maxlen) { return; } *val = malloc(sizeof(uint8_t)); if (*val == NULL) { return; } **val = mobi_buffer_get8(buf); } /** @brief Read 16-bit value from MOBIBuffer into allocated memory Read 16-bit value from buffer into allocated memory. Returns pointer to the value, which must be freed later. If the data is not accessible function will return null pointer. @param[out] val Pointer to value or null pointer on failure @param[in] buf MOBIBuffer structure containing data */ void mobi_buffer_dup16(uint16_t **val, MOBIBuffer *buf) { *val = NULL; if (buf->offset + 2 > buf->maxlen) { return; } *val = malloc(sizeof(uint16_t)); if (*val == NULL) { return; } **val = mobi_buffer_get16(buf); } /** @brief Read 32-bit value from MOBIBuffer into allocated memory Read 32-bit value from buffer into allocated memory. Returns pointer to the value, which must be freed later. If the data is not accessible function will return null pointer. @param[out] val Pointer to value @param[in] buf MOBIBuffer structure containing data */ void mobi_buffer_dup32(uint32_t **val, MOBIBuffer *buf) { *val = NULL; if (buf->offset + 4 > buf->maxlen) { return; } *val = malloc(sizeof(uint32_t)); if (*val == NULL) { return; } **val = mobi_buffer_get32(buf); } /** @brief Copy 8-bit value from one MOBIBuffer into another @param[out] dest Destination buffer @param[in] source Source buffer */ void mobi_buffer_copy8(MOBIBuffer *dest, MOBIBuffer *source) { mobi_buffer_add8(dest, mobi_buffer_get8(source)); } /** @brief Copy raw value from one MOBIBuffer into another @param[out] dest Destination buffer @param[in] source Source buffer @param[in] len Number of bytes to copy */ void mobi_buffer_copy(MOBIBuffer *dest, MOBIBuffer *source, const size_t len) { if (source->offset + len > source->maxlen) { debug_print("%s", "End of buffer\n"); source->error = MOBI_BUFFER_END; return; } if (dest->offset + len > dest->maxlen) { debug_print("%s", "End of buffer\n"); dest->error = MOBI_BUFFER_END; return; } memcpy(dest->data + dest->offset, source->data + source->offset, len); dest->offset += len; source->offset += len; } /** @brief Copy raw value within one MOBIBuffer Memmove len bytes from offset (relative to current position) to current position in buffer and advance buffer position. Data may overlap. @param[out] buf Buffer @param[in] offset Offset to read from @param[in] len Number of bytes to copy */ void mobi_buffer_move(MOBIBuffer *buf, const int offset, const size_t len) { size_t aoffset = (size_t) abs(offset); unsigned char *source = buf->data + buf->offset; if (offset >= 0) { if (buf->offset + aoffset + len > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return; } source += aoffset; } else { if (buf->offset < aoffset) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return; } source -= aoffset; } memmove(buf->data + buf->offset, source, len); buf->offset += len; } /** @brief Check if buffer data header contains magic signature @param[in] buf MOBIBuffer buffer containing data @param[in] magic Magic signature @return boolean true on match, false otherwise */ bool mobi_buffer_match_magic(MOBIBuffer *buf, const char *magic) { const size_t magic_length = strlen(magic); if (buf->offset + magic_length > buf->maxlen) { return false; } if (memcmp(buf->data + buf->offset, magic, magic_length) == 0) { return true; } return false; } /** @brief Check if buffer contains magic signature at given offset @param[in] buf MOBIBuffer buffer containing data @param[in] magic Magic signature @param[in] offset Offset @return boolean true on match, false otherwise */ bool mobi_buffer_match_magic_offset(MOBIBuffer *buf, const char *magic, const size_t offset) { bool match = false; if (offset <= buf->maxlen) { const size_t save_offset = buf->offset; buf->offset = offset; match = mobi_buffer_match_magic(buf, magic); buf->offset = save_offset; } return match; } /** @brief Move current buffer offset by diff bytes @param[in,out] buf MOBIBuffer buffer containing data @param[in] diff Number of bytes by which the offset is adjusted */ void mobi_buffer_seek(MOBIBuffer *buf, const int diff) { size_t adiff = (size_t) abs(diff); if (diff >= 0) { if (buf->offset + adiff <= buf->maxlen) { buf->offset += adiff; return; } } else { if (buf->offset >= adiff) { buf->offset -= adiff; return; } } buf->error = MOBI_BUFFER_END; debug_print("%s", "End of buffer\n"); } /** @brief Set buffer offset to pos position @param[in,out] buf MOBIBuffer buffer containing data @param[in] pos New position */ void mobi_buffer_setpos(MOBIBuffer *buf, const size_t pos) { if (pos <= buf->maxlen) { buf->offset = pos; return; } buf->error = MOBI_BUFFER_END; debug_print("%s", "End of buffer\n"); } /** @brief Free pointer to MOBIBuffer structure and pointer to data Free data initialized with mobi_buffer_init(); @param[in] buf MOBIBuffer structure */ void mobi_buffer_free(MOBIBuffer *buf) { if (buf == NULL) { return; } if (buf->data != NULL) { free(buf->data); } free(buf); } /** @brief Free pointer to MOBIBuffer structure Free data initialized with mobi_buffer_init_null(); Unlike mobi_buffer_free() it will not free pointer to buf->data @param[in] buf MOBIBuffer structure */ void mobi_buffer_free_null(MOBIBuffer *buf) { if (buf == NULL) { return; } free(buf); }
null
/** @file buffer.c * @brief Functions to read/write raw big endian data * * Copyright (c) 2014 Bartek Fabiszewski * http://www.fabiszewski.net * * This file is part of libmobi. * Licensed under LGPL, either version 3, or any later. * See <http://www.gnu.org/licenses/> */ #include <stdlib.h> #include <string.h> #include "buffer.h" #include "debug.h" /** @brief Initializer for MOBIBuffer structure It allocates memory for structure and for data. Memory should be freed with mobi_buffer_free(). @param[in] len Size of data to be allocated for the buffer @return MOBIBuffer on success, NULL otherwise */ MOBIBuffer * mobi_buffer_init(const size_t len) { unsigned char *data = malloc(len); if (data == NULL) { debug_print("%s", "Buffer data allocation failed\n"); return NULL; } MOBIBuffer *buf = mobi_buffer_init_null(data, len); if (buf == NULL) { free(data); } return buf; } /** @brief Initializer for MOBIBuffer structure It allocates memory for structure but, unlike mobi_buffer_init(), it does not allocate memory for data. Instead it works on external data. Memory should be freed with mobi_buffer_free_null() (buf->data will not be deallocated). @param[in,out] data Set data as buffer data @param[in] len Size of data held by the buffer @return MOBIBuffer on success, NULL otherwise */ MOBIBuffer * mobi_buffer_init_null(unsigned char *data, const size_t len) { MOBIBuffer *buf = malloc(sizeof(MOBIBuffer)); if (buf == NULL) { debug_print("%s", "Buffer allocation failed\n"); return NULL; } buf->data = data; buf->offset = 0; buf->maxlen = len; buf->error = MOBI_SUCCESS; return buf; } /** @brief Resize buffer Smaller size than offset will cause data truncation. @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] newlen New buffer size */ void mobi_buffer_resize(MOBIBuffer *buf, const size_t newlen) { unsigned char *tmp = realloc(buf->data, newlen); if (tmp == NULL) { debug_print("%s", "Buffer allocation failed\n"); buf->error = MOBI_MALLOC_FAILED; return; } buf->data = tmp; buf->maxlen = newlen; if (buf->offset >= newlen) { buf->offset = newlen - 1; } debug_print("Buffer successfully resized to %zu\n", newlen); buf->error = MOBI_SUCCESS; } /** @brief Adds 8-bit value to MOBIBuffer @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] data Integer to be put into the buffer */ void mobi_buffer_add8(MOBIBuffer *buf, const uint8_t data) { if (buf->offset + 1 > buf->maxlen) { debug_print("%s", "Buffer full\n"); buf->error = MOBI_BUFFER_END; return; } buf->data[buf->offset++] = data; } /** @brief Adds 16-bit value to MOBIBuffer @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] data Integer to be put into the buffer */ void mobi_buffer_add16(MOBIBuffer *buf, const uint16_t data) { if (buf->offset + 2 > buf->maxlen) { debug_print("%s", "Buffer full\n"); buf->error = MOBI_BUFFER_END; return; } unsigned char *buftr = buf->data + buf->offset; *buftr++ = (uint8_t)((uint32_t)(data & 0xff00U) >> 8); *buftr = (uint8_t)((uint32_t)(data & 0xffU)); buf->offset += 2; } /** @brief Adds 32-bit value to MOBIBuffer @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] data Integer to be put into the buffer */ void mobi_buffer_add32(MOBIBuffer *buf, const uint32_t data) { if (buf->offset + 4 > buf->maxlen) { debug_print("%s", "Buffer full\n"); buf->error = MOBI_BUFFER_END; return; } unsigned char *buftr = buf->data + buf->offset; *buftr++ = (uint8_t)((uint32_t)(data & 0xff000000U) >> 24); *buftr++ = (uint8_t)((uint32_t)(data & 0xff0000U) >> 16); *buftr++ = (uint8_t)((uint32_t)(data & 0xff00U) >> 8); *buftr = (uint8_t)((uint32_t)(data & 0xffU)); buf->offset += 4; } /** @brief Adds raw data to MOBIBuffer @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] data Pointer to read data @param[in] len Size of the read data */ void mobi_buffer_addraw(MOBIBuffer *buf, const unsigned char* data, const size_t len) { if (buf->offset + len > buf->maxlen) { debug_print("%s", "Buffer full\n"); buf->error = MOBI_BUFFER_END; return; } memcpy(buf->data + buf->offset, data, len); buf->offset += len; } /** @brief Adds string to MOBIBuffer without null terminator @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] str Pointer to string */ void mobi_buffer_addstring(MOBIBuffer *buf, const char *str) { const size_t len = strlen(str); mobi_buffer_addraw(buf, (const unsigned char *) str, len); } /** @brief Adds count of zeroes to MOBIBuffer @param[in,out] buf MOBIBuffer structure to be filled with data @param[in] count Number of zeroes to be put into the buffer */ void mobi_buffer_addzeros(MOBIBuffer *buf, const size_t count) { if (buf->offset + count > buf->maxlen) { debug_print("%s", "Buffer full\n"); buf->error = MOBI_BUFFER_END; return; } memset(buf->data + buf->offset, 0, count); buf->offset += count; } /** @brief Reads 8-bit value from MOBIBuffer @param[in] buf MOBIBuffer structure containing data @return Read value, 0 if end of buffer is encountered */ uint8_t mobi_buffer_get8(MOBIBuffer *buf) { if (buf->offset + 1 > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return 0; } return buf->data[buf->offset++]; } /** @brief Reads 16-bit value from MOBIBuffer @param[in] buf MOBIBuffer structure containing data @return Read value, 0 if end of buffer is encountered */ uint16_t mobi_buffer_get16(MOBIBuffer *buf) { if (buf->offset + 2 > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return 0; } uint16_t val; val = (uint16_t)((uint16_t) buf->data[buf->offset] << 8 | (uint16_t) buf->data[buf->offset + 1]); buf->offset += 2; return val; } /** @brief Reads 32-bit value from MOBIBuffer @param[in] buf MOBIBuffer structure containing data @return Read value, 0 if end of buffer is encountered */ uint32_t mobi_buffer_get32(MOBIBuffer *buf) { if (buf->offset + 4 > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return 0; } uint32_t val; val = (uint32_t) buf->data[buf->offset] << 24 | (uint32_t) buf->data[buf->offset + 1] << 16 | (uint32_t) buf->data[buf->offset + 2] << 8 | (uint32_t) buf->data[buf->offset + 3]; buf->offset += 4; return val; } /** @brief Reads variable length value from MOBIBuffer Internal function for wrappers: mobi_buffer_get_varlen(); mobi_buffer_get_varlen_dec(); Reads maximum 4 bytes from the buffer. Stops when byte has bit 7 set. @param[in] buf MOBIBuffer structure containing data @param[out] len Value will be increased by number of bytes read @param[in] direction 1 - read buffer forward, -1 - read buffer backwards @return Read value, 0 if end of buffer is encountered */ static uint32_t _buffer_get_varlen(MOBIBuffer *buf, size_t *len, const int direction) { uint32_t val = 0; uint8_t byte_count = 0; uint8_t byte; const uint8_t stop_flag = 0x80; const uint8_t mask = 0x7f; uint32_t shift = 0; do { if (direction == 1) { if (buf->offset + 1 > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return val; } byte = buf->data[buf->offset++]; val <<= 7; val |= (byte & mask); } else { if (buf->offset < 1) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return val; } byte = buf->data[buf->offset--]; val = val | (uint32_t)(byte & mask) << shift; shift += 7; } (*len)++; byte_count++; } while (!(byte & stop_flag) && (byte_count < 4)); return val; } /** @brief Reads variable length value from MOBIBuffer Reads maximum 4 bytes from the buffer. Stops when byte has bit 7 set. @param[in] buf MOBIBuffer structure containing data @param[out] len Value will be increased by number of bytes read @return Read value, 0 if end of buffer is encountered */ uint32_t mobi_buffer_get_varlen(MOBIBuffer *buf, size_t *len) { return _buffer_get_varlen(buf, len, 1); } /** @brief Reads variable length value from MOBIBuffer going backwards Reads maximum 4 bytes from the buffer. Stops when byte has bit 7 set. @param[in] buf MOBIBuffer structure containing data @param[out] len Value will be increased by number of bytes read @return Read value, 0 if end of buffer is encountered */ uint32_t mobi_buffer_get_varlen_dec(MOBIBuffer *buf, size_t *len) { return _buffer_get_varlen(buf, len, -1); } /** @brief Reads raw data from MOBIBuffer and pads it with zero character @param[out] str Destination for string read from buffer. Length must be (len + 1) @param[in] buf MOBIBuffer structure containing data @param[in] len Length of the data to be read from buffer */ void mobi_buffer_getstring(char *str, MOBIBuffer *buf, const size_t len) { if (!str) { buf->error = MOBI_PARAM_ERR; return; } if (buf->offset + len > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; str[0] = '\0'; return; } memcpy(str, buf->data + buf->offset, len); str[len] = '\0'; buf->offset += len; } /** @brief Reads raw data from MOBIBuffer, appends it to a string and pads it with zero character @param[in,out] str A string to which data will be appended @param[in] buf MOBIBuffer structure containing data @param[in] len Length of the data to be read from buffer */ void mobi_buffer_appendstring(char *str, MOBIBuffer *buf, const size_t len) { if (!str) { buf->error = MOBI_PARAM_ERR; return; } if (buf->offset + len > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return; } size_t str_len = strlen(str); memcpy(str + str_len, buf->data + buf->offset, len); str[str_len + len] = '\0'; buf->offset += len; } /** @brief Reads raw data from MOBIBuffer @param[out] data Destination to which data will be appended @param[in] buf MOBIBuffer structure containing data @param[in] len Length of the data to be read from buffer */ void mobi_buffer_getraw(void *data, MOBIBuffer *buf, const size_t len) { if (!data) { buf->error = MOBI_PARAM_ERR; return; } if (buf->offset + len > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return; } memcpy(data, buf->data + buf->offset, len); buf->offset += len; } /** @brief Get pointer to MOBIBuffer data at offset @param[in] buf MOBIBuffer structure containing data @param[in] len Check if requested length is available in buffer @return Pointer to offset, or NULL on failure */ unsigned char * mobi_buffer_getpointer(MOBIBuffer *buf, const size_t len) { if (buf->offset + len > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return NULL; } buf->offset += len; return buf->data + buf->offset - len; } /** @brief Read 8-bit value from MOBIBuffer into allocated memory Read 8-bit value from buffer into memory allocated by the function. Returns pointer to the value, which must be freed later. If the data is not accessible function will return null pointer. @param[out] val Pointer to value or null pointer on failure @param[in] buf MOBIBuffer structure containing data */ void mobi_buffer_dup8(uint8_t **val, MOBIBuffer *buf) { *val = NULL; if (buf->offset + 1 > buf->maxlen) { return; } *val = malloc(sizeof(uint8_t)); if (*val == NULL) { return; } **val = mobi_buffer_get8(buf); } /** @brief Read 16-bit value from MOBIBuffer into allocated memory Read 16-bit value from buffer into allocated memory. Returns pointer to the value, which must be freed later. If the data is not accessible function will return null pointer. @param[out] val Pointer to value or null pointer on failure @param[in] buf MOBIBuffer structure containing data */ void mobi_buffer_dup16(uint16_t **val, MOBIBuffer *buf) { *val = NULL; if (buf->offset + 2 > buf->maxlen) { return; } *val = malloc(sizeof(uint16_t)); if (*val == NULL) { return; } **val = mobi_buffer_get16(buf); } /** @brief Read 32-bit value from MOBIBuffer into allocated memory Read 32-bit value from buffer into allocated memory. Returns pointer to the value, which must be freed later. If the data is not accessible function will return null pointer. @param[out] val Pointer to value @param[in] buf MOBIBuffer structure containing data */ void mobi_buffer_dup32(uint32_t **val, MOBIBuffer *buf) { *val = NULL; if (buf->offset + 4 > buf->maxlen) { return; } *val = malloc(sizeof(uint32_t)); if (*val == NULL) { return; } **val = mobi_buffer_get32(buf); } /** @brief Copy 8-bit value from one MOBIBuffer into another @param[out] dest Destination buffer @param[in] source Source buffer */ void mobi_buffer_copy8(MOBIBuffer *dest, MOBIBuffer *source) { mobi_buffer_add8(dest, mobi_buffer_get8(source)); } /** @brief Copy raw value from one MOBIBuffer into another @param[out] dest Destination buffer @param[in] source Source buffer @param[in] len Number of bytes to copy */ void mobi_buffer_copy(MOBIBuffer *dest, MOBIBuffer *source, const size_t len) { if (source->offset + len > source->maxlen) { debug_print("%s", "End of buffer\n"); source->error = MOBI_BUFFER_END; return; } if (dest->offset + len > dest->maxlen) { debug_print("%s", "End of buffer\n"); dest->error = MOBI_BUFFER_END; return; } memcpy(dest->data + dest->offset, source->data + source->offset, len); dest->offset += len; source->offset += len; } /** @brief Copy raw value within one MOBIBuffer Memmove len bytes from offset (relative to current position) to current position in buffer and advance buffer position. Data may overlap. @param[out] buf Buffer @param[in] offset Offset to read from @param[in] len Number of bytes to copy */ void mobi_buffer_move(MOBIBuffer *buf, const int offset, const size_t len) { size_t aoffset = (size_t) abs(offset); unsigned char *source = buf->data + buf->offset; if (offset >= 0) { if (buf->offset + aoffset + len > buf->maxlen) { debug_print("%s", "End of buffer\n"); buf->error = MOBI_BUFFER_END; return; } source += aoffset; } else { if ( (buf->offset < aoffset) || (buf->offset + len > buf->maxlen) ) { debug_print("%s", "Beyond start/end of buffer\n"); buf->error = MOBI_BUFFER_END; return; } source -= aoffset; } memmove(buf->data + buf->offset, source, len); buf->offset += len; } /** @brief Check if buffer data header contains magic signature @param[in] buf MOBIBuffer buffer containing data @param[in] magic Magic signature @return boolean true on match, false otherwise */ bool mobi_buffer_match_magic(MOBIBuffer *buf, const char *magic) { const size_t magic_length = strlen(magic); if (buf->offset + magic_length > buf->maxlen) { return false; } if (memcmp(buf->data + buf->offset, magic, magic_length) == 0) { return true; } return false; } /** @brief Check if buffer contains magic signature at given offset @param[in] buf MOBIBuffer buffer containing data @param[in] magic Magic signature @param[in] offset Offset @return boolean true on match, false otherwise */ bool mobi_buffer_match_magic_offset(MOBIBuffer *buf, const char *magic, const size_t offset) { bool match = false; if (offset <= buf->maxlen) { const size_t save_offset = buf->offset; buf->offset = offset; match = mobi_buffer_match_magic(buf, magic); buf->offset = save_offset; } return match; } /** @brief Move current buffer offset by diff bytes @param[in,out] buf MOBIBuffer buffer containing data @param[in] diff Number of bytes by which the offset is adjusted */ void mobi_buffer_seek(MOBIBuffer *buf, const int diff) { size_t adiff = (size_t) abs(diff); if (diff >= 0) { if (buf->offset + adiff <= buf->maxlen) { buf->offset += adiff; return; } } else { if (buf->offset >= adiff) { buf->offset -= adiff; return; } } buf->error = MOBI_BUFFER_END; debug_print("%s", "End of buffer\n"); } /** @brief Set buffer offset to pos position @param[in,out] buf MOBIBuffer buffer containing data @param[in] pos New position */ void mobi_buffer_setpos(MOBIBuffer *buf, const size_t pos) { if (pos <= buf->maxlen) { buf->offset = pos; return; } buf->error = MOBI_BUFFER_END; debug_print("%s", "End of buffer\n"); } /** @brief Free pointer to MOBIBuffer structure and pointer to data Free data initialized with mobi_buffer_init(); @param[in] buf MOBIBuffer structure */ void mobi_buffer_free(MOBIBuffer *buf) { if (buf == NULL) { return; } if (buf->data != NULL) { free(buf->data); } free(buf); } /** @brief Free pointer to MOBIBuffer structure Free data initialized with mobi_buffer_init_null(); Unlike mobi_buffer_free() it will not free pointer to buf->data @param[in] buf MOBIBuffer structure */ void mobi_buffer_free_null(MOBIBuffer *buf) { if (buf == NULL) { return; } free(buf); }
null
283
CWE-787
CVE-2021-3756
/* Copyright 2016 Christian Hoene, Symonics GmbH */ /* IV.A.1.b. Version 2 Data Object Header Prefix 00000030 4f 48 44 52 02 2d d3 18 2b 53 d3 18 2b 53 d3 18 |OHDR.-..+S..+S..| 00000040 2b 53 d3 18 2b 53 f4 01 02 22 00 00 00 00 |+S..+S..."......| .... 00000230 00 00 00 00 00 00 00 00 00 00 00 00 f9 ba 5d c9 |..............].| */ #include "reader.h" #include <ctype.h> #include <errno.h> #include <inttypes.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <string.h> static int readOCHK(struct READER *reader, struct DATAOBJECT *dataobject, uint64_t end); static struct DATAOBJECT *findDataobject(struct READER *reader, uint64_t address) { struct DATAOBJECT *p = reader->all; while (p && p->address != address) p = p->all; return p; } /* * IV.A.2.a. The NIL Message 00000090 00 9c 01 00 00 00 |................| 000000a0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| * 00000230 00 00 00 00 00 00 00 00 00 00 00 00 f9 ba 5d c9 |..............].| */ static int readOHDRHeaderMessageNIL(struct READER *reader, int length) { if (fseek(reader->fhd, length, SEEK_CUR) < 0) return errno; // LCOV_EXCL_LINE return MYSOFA_OK; } /* * IV.A.2.b. The Dataspace Message */ static int readOHDRHeaderMessageDataspace1(struct READER *reader, struct DATASPACE *ds) { int i; readValue(reader, 5); for (i = 0; i < ds->dimensionality; i++) { if (i < 4) { ds->dimension_size[i] = readValue(reader, reader->superblock.size_of_lengths); if (ds->dimension_size[i] > 1000000) { mylog("dimension_size is too large\n"); // LCOV_EXCL_LINE return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE } mylog(" dimension %d %" PRIu64 "\n", i, ds->dimension_size[i]); } else readValue(reader, reader->superblock.size_of_lengths); } if (ds->flags & 1) { for (i = 0; i < ds->dimensionality; i++) { if (i < 4) ds->dimension_max_size[i] = readValue(reader, reader->superblock.size_of_lengths); else readValue(reader, reader->superblock.size_of_lengths); } } if (ds->flags & 2) { mylog("permutation in OHDR not supported\n"); // LCOV_EXCL_LINE return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE } return MYSOFA_OK; } static int readOHDRHeaderMessageDataspace2(struct READER *reader, struct DATASPACE *ds) { int i; ds->type = (uint8_t)fgetc(reader->fhd); for (i = 0; i < ds->dimensionality; i++) { if (i < 4) { ds->dimension_size[i] = readValue(reader, reader->superblock.size_of_lengths); mylog(" dimension %d %" PRIu64 "\n", i, ds->dimension_size[i]); } else readValue(reader, reader->superblock.size_of_lengths); } if (ds->flags & 1) { for (i = 0; i < ds->dimensionality; i++) { if (i < 4) ds->dimension_max_size[i] = readValue(reader, reader->superblock.size_of_lengths); else readValue(reader, reader->superblock.size_of_lengths); } } return MYSOFA_OK; } static int readOHDRHeaderMessageDataspace(struct READER *reader, struct DATASPACE *ds) { int version = fgetc(reader->fhd); ds->dimensionality = (uint8_t)fgetc(reader->fhd); if (ds->dimensionality > 4) { mylog("dimensionality must be lower than 5\n"); // LCOV_EXCL_LINE return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE } ds->flags = (uint8_t)fgetc(reader->fhd); switch (version) { case 1: return readOHDRHeaderMessageDataspace1(reader, ds); case 2: return readOHDRHeaderMessageDataspace2(reader, ds); default: // LCOV_EXCL_START mylog("object OHDR dataspace message must have version 1 or 2 but is %X at " "%lX\n", version, ftell(reader->fhd) - 1); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } } /* * IV.A.2.c. The Link Info Message 00 03 |+S..+S..."......| 00000050 0f 00 00 00 00 00 00 00 c9 11 00 00 00 00 00 00 |................| 00000060 5b 12 00 00 00 00 00 00 81 12 00 00 00 00 00 00 |[...............| */ static int readOHDRHeaderMessageLinkInfo(struct READER *reader, struct LINKINFO *li) { if (fgetc(reader->fhd) != 0) { mylog( "object OHDR link info message must have version 0\n"); // LCOV_EXCL_LINE return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE } li->flags = (uint8_t)fgetc(reader->fhd); if (li->flags & 1) li->maximum_creation_index = readValue(reader, 8); li->fractal_heap_address = readValue(reader, reader->superblock.size_of_offsets); li->address_btree_index = readValue(reader, reader->superblock.size_of_offsets); if (li->flags & 2) li->address_btree_order = readValue(reader, reader->superblock.size_of_offsets); return MYSOFA_OK; } /* * IV.A.2.d. The Datatype Message 000007c0 03 14 00 01 00 00|11|21 1f |..............!.| 000007d0 00|04 00 00 00|00 00|20 00|17|08|00|17|7f 00 00 |....... ........| 000007e0 00|05 02 00 01 00 00 03 0a 10 10 00 00 07 00 6d |...............m| 000007f0 36 00 00 00 00 00 00 ea 00 00 00 00 00 00 00 15 |6...............| */ static int readOHDRHeaderMessageDatatype(struct READER *reader, struct DATATYPE *dt) { int i, j, c, err; char *buffer; struct DATATYPE dt2; dt->class_and_version = (uint8_t)fgetc(reader->fhd); if ((dt->class_and_version & 0xf0) != 0x10 && (dt->class_and_version & 0xf0) != 0x30) { // LCOV_EXCL_START mylog("object OHDR datatype message must have version 1 not %d at %lX\n", dt->class_and_version >> 4, ftell(reader->fhd) - 1); return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_STOP } dt->class_bit_field = (uint32_t)readValue(reader, 3); dt->size = (uint32_t)readValue(reader, 4); if (dt->size > 64) return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE switch (dt->class_and_version & 0xf) { case 0: /* int */ dt->u.i.bit_offset = readValue(reader, 2); dt->u.i.bit_precision = readValue(reader, 2); mylog(" INT bit %d %d %d %d\n", dt->u.i.bit_offset, dt->u.i.bit_precision, dt->class_and_version >> 4, dt->size); break; case 1: /* float */ dt->u.f.bit_offset = (uint16_t)readValue(reader, 2); dt->u.f.bit_precision = (uint16_t)readValue(reader, 2); dt->u.f.exponent_location = (uint8_t)fgetc(reader->fhd); dt->u.f.exponent_size = (uint8_t)fgetc(reader->fhd); dt->u.f.mantissa_location = (uint8_t)fgetc(reader->fhd); dt->u.f.mantissa_size = (uint8_t)fgetc(reader->fhd); dt->u.f.exponent_bias = (uint32_t)readValue(reader, 4); mylog(" FLOAT bit %d %d exponent %d %d MANTISSA %d %d OFFSET %d\n", dt->u.f.bit_offset, dt->u.f.bit_precision, dt->u.f.exponent_location, dt->u.f.exponent_size, dt->u.f.mantissa_location, dt->u.f.mantissa_size, dt->u.f.exponent_bias); /* FLOAT bit 0 32 exponent 23 8 MANTISSA 0 23 OFFSET 127 FLOAT bit 0 64 exponent 52 11 MANTISSA 0 52 OFFSET 1023 */ if (dt->u.f.bit_offset != 0 || dt->u.f.mantissa_location != 0 || (dt->u.f.bit_precision != 32 && dt->u.f.bit_precision != 64) || (dt->u.f.bit_precision == 32 && (dt->u.f.exponent_location != 23 || dt->u.f.exponent_size != 8 || dt->u.f.mantissa_size != 23 || dt->u.f.exponent_bias != 127)) || (dt->u.f.bit_precision == 64 && (dt->u.f.exponent_location != 52 || dt->u.f.exponent_size != 11 || dt->u.f.mantissa_size != 52 || dt->u.f.exponent_bias != 1023))) return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE break; case 3: /* string */ mylog(" STRING %d %02X\n", dt->size, dt->class_bit_field); break; case 6: mylog(" COMPOUND %d %02X\n", dt->size, dt->class_bit_field); switch (dt->class_and_version >> 4) { case 3: for (i = 0; i < (dt->class_bit_field & 0xffff); i++) { int maxsize = 0x1000; buffer = malloc(maxsize); if (!buffer) return MYSOFA_NO_MEMORY; for (j = 0; j < maxsize - 1; j++) { c = fgetc(reader->fhd); if (c < 0) { free(buffer); return MYSOFA_READ_ERROR; } buffer[j] = c; if (c == 0) break; } buffer[j] = 0; for (j = 0, c = 0; (dt->size >> (8 * j)) > 0; j++) { c |= fgetc(reader->fhd) << (8 * j); } mylog(" COMPOUND %s offset %d\n", buffer, c); /* not needed until the data is stored somewhere permanently p = realloc(buffer, j); if (!p) { free(buffer); return errno; } buffer = p; */ free(buffer); err = readOHDRHeaderMessageDatatype(reader, &dt2); if (err) return err; // LCOV_EXCL_LINE } break; case 1: for (i = 0; i < (dt->class_bit_field & 0xffff); i++) { char name[256]; int res; for (j = 0;; j++) { if (j == sizeof(name)) return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE res = fgetc(reader->fhd); if (res < 0) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE name[j] = res; if (name[j] == 0) break; } if (fseek(reader->fhd, (7 - j) & 7, SEEK_CUR)) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE c = readValue(reader, 4); int dimension = fgetc(reader->fhd); if (dimension != 0) { mylog("COMPOUND v1 with dimension not supported"); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE } // ignore the following fields if (fseek(reader->fhd, 3 + 4 + 4 + 4 * 4, SEEK_CUR)) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE mylog(" COMPOUND %s %d %d %lX\n", name, c, dimension, ftell(reader->fhd)); err = readOHDRHeaderMessageDatatype(reader, &dt2); if (err) return err; // LCOV_EXCL_LINE } break; default: // LCOV_EXCL_START mylog("object OHDR datatype message must have version 1 or 3 not %d\n", dt->class_and_version >> 4); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } break; case 7: /* reference */ mylog(" REFERENCE %d %02X\n", dt->size, dt->class_bit_field); break; case 9: /* list */ dt->list = dt->size; mylog(" LIST %d\n", dt->size); err = readOHDRHeaderMessageDatatype(reader, dt); if (err) return err; // LCOV_EXCL_LINE break; default: // LCOV_EXCL_START mylog("object OHDR datatype message has unknown variable type %d\n", dt->class_and_version & 0xf); return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_STOP } return MYSOFA_OK; } /* * IV.A.2.f. The Data Storage - Fill Value Message 000007e0 |05 02 00 01 00 00|03 0a */ static int readOHDRHeaderMessageDataFill1or2(struct READER *reader) { int spaceAllocationTime = fgetc(reader->fhd); int fillValueWriteTime = fgetc(reader->fhd); int fillValueDefined = fgetc(reader->fhd); if (spaceAllocationTime < 0 || fillValueWriteTime < 0 || fillValueDefined < 0) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE if ((spaceAllocationTime & ~1) != 2 || fillValueWriteTime != 2 || (fillValueDefined & ~1) != 0) { mylog("spaceAllocationTime %d fillValueWriteTime %d fillValueDefined %d\n", spaceAllocationTime, fillValueWriteTime, fillValueDefined); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE } if (fillValueDefined > 0) { uint32_t size = (uint32_t)readValue(reader, 4); if (fseek(reader->fhd, size, SEEK_CUR) < 0) return errno; // LCOV_EXCL_LINE } return MYSOFA_OK; } static int readOHDRHeaderMessageDataFill3(struct READER *reader) { uint8_t flags; uint32_t size; flags = (uint8_t)fgetc(reader->fhd); if (flags & (1 << 5)) { size = (uint32_t)readValue(reader, 4); if (fseek(reader->fhd, size, SEEK_CUR) < 0) return errno; // LCOV_EXCL_LINE } return MYSOFA_OK; } static int readOHDRHeaderMessageDataFill(struct READER *reader) { int version = fgetc(reader->fhd); switch (version) { case 1: case 2: return readOHDRHeaderMessageDataFill1or2(reader); case 3: return readOHDRHeaderMessageDataFill3(reader); default: // LCOV_EXCL_START mylog("object OHDR data storage fill value message must have version 1,2, " "or 3 not " "%d\n", version); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } } static int readOHDRHeaderMessageDataFillOld(struct READER *reader) { uint32_t size; size = (uint32_t)readValue(reader, 4); if (fseek(reader->fhd, size, SEEK_CUR) < 0) return errno; // LCOV_EXCL_LINE return MYSOFA_OK; } /* * IV.A.2.i. The Data Layout Message 00000ec0 08 00 00 00 00 00 00 00 00 |......+.........| 00000ed0 00 9e 47 0b 16 00 01 00 00 02 02 02 00 01 00 01 |..G.............| 00000ee0 00 08 00 00 00 01 00 01 00 01 00 01 00 00 00 08 |................| 00000ef0 17 00 01 00 00 03 02 03 01 42 00 00 00 00 00 00 |.........B......| 00000f00 01 00 00 00 03 00 00 00 08 00 00 00 15 1c 00 04 |................| 03 02 03 01 42 00 00 00 00 00 00 |.........B......| 00000f00 01 00 00 00 03 00 00 00 08 00 00 00 15 1c 00 04 |................| 00000f10 00 00 00 03 03 00 ff ff ff ff ff ff ff ff ff ff |................| 00000f20 ff ff ff ff ff ff ff ff ff ff ff ff ff ff 0c 23 |...............#| 00000f30 00 00 00 00 03 00 05 00 08 00 04 00 00 54 79 70 |.............Typ| */ static int readOHDRHeaderMessageDataLayout(struct READER *reader, struct DATAOBJECT *data) { int i, err; unsigned size; uint8_t dimensionality, layout_class; uint32_t dataset_element_size; uint64_t data_address, store, data_size; UNUSED(dataset_element_size); UNUSED(data_size); if (fgetc(reader->fhd) != 3) { // LCOV_EXCL_START mylog("object OHDR message data layout message must have version 3\n"); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } layout_class = (uint8_t)fgetc(reader->fhd); mylog("data layout %d\n", layout_class); switch (layout_class) { #if 0 case 0: data_size = readValue(reader, 2); fseek(reader->fhd, data_size, SEEK_CUR); mylog("TODO 0 SIZE %u\n", data_size); break; #endif case 1: data_address = readValue(reader, reader->superblock.size_of_offsets); data_size = readValue(reader, reader->superblock.size_of_lengths); mylog("CHUNK Contiguous SIZE %" PRIu64 "\n", data_size); if (validAddress(reader, data_address)) { store = ftell(reader->fhd); if (fseek(reader->fhd, data_address, SEEK_SET) < 0) return errno; // LCOV_EXCL_LINE if (!data->data) { if (data_size > 0x10000000) return MYSOFA_INVALID_FORMAT; data->data_len = data_size; data->data = calloc(1, data_size); if (!data->data) return MYSOFA_NO_MEMORY; // LCOV_EXCL_LINE } err = fread(data->data, 1, data_size, reader->fhd); if (err != data_size) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE if (fseek(reader->fhd, store, SEEK_SET) < 0) return errno; // LCOV_EXCL_LINE } break; case 2: dimensionality = (uint8_t)fgetc(reader->fhd); mylog("dimensionality %d\n", dimensionality); if (dimensionality < 1 || dimensionality > DATAOBJECT_MAX_DIMENSIONALITY) { mylog("data layout 2: invalid dimensionality %d %lu %lu\n", dimensionality, sizeof(data->datalayout_chunk), sizeof(data->datalayout_chunk[0])); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE } data_address = readValue(reader, reader->superblock.size_of_offsets); mylog(" CHUNK %" PRIX64 "\n", data_address); for (i = 0; i < dimensionality; i++) { data->datalayout_chunk[i] = readValue(reader, 4); mylog(" %d\n", data->datalayout_chunk[i]); } /* TODO last entry? error in spec: ?*/ size = data->datalayout_chunk[dimensionality - 1]; for (i = 0; i < data->ds.dimensionality; i++) size *= data->ds.dimension_size[i]; if (validAddress(reader, data_address) && dimensionality <= 4) { store = ftell(reader->fhd); if (fseek(reader->fhd, data_address, SEEK_SET) < 0) return errno; // LCOV_EXCL_LINE if (!data->data) { if (size > 0x10000000) return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE data->data_len = size; data->data = calloc(1, size); if (!data->data) return MYSOFA_NO_MEMORY; // LCOV_EXCL_LINE } err = treeRead(reader, data); if (err) return err; // LCOV_EXCL_LINE if (fseek(reader->fhd, store, SEEK_SET) < 0) return errno; // LCOV_EXCL_LINE } break; default: // LCOV_EXCL_START mylog("object OHDR message data layout message has unknown layout class " "%d\n", layout_class); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } return MYSOFA_OK; } /* * IV.A.2.k. The Group Info Message * 00000070 0a 02 00 01 00 00 00 00 * */ static int readOHDRHeaderMessageGroupInfo(struct READER *reader, struct GROUPINFO *gi) { if (fgetc(reader->fhd) != 0) { // LCOV_EXCL_START mylog("object OHDR group info message must have version 0\n"); return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_STOP } gi->flags = (uint8_t)fgetc(reader->fhd); if (gi->flags & 1) { gi->maximum_compact_value = (uint16_t)readValue(reader, 2); gi->minimum_dense_value = (uint16_t)readValue(reader, 2); } if (gi->flags & 2) { gi->number_of_entries = (uint16_t)readValue(reader, 2); gi->length_of_entries = (uint16_t)readValue(reader, 2); } return MYSOFA_OK; } /* * IV.A.2.l. The Data Storage - Filter Pipeline Message * * 00000070 0a 02 00 01 00 00 00 00 * */ /* type 1 00 |......G.8.......| 000010a0 00 00 00 00 00 02 00 08 00 01 00 01 00 73 68 75 |.............shu| 000010b0 66 66 6c 65 00 08 00 00 00 00 00 00 00 01 00 08 |ffle............| 000010c0 00 01 00 01 00 64 65 66 6c 61 74 65 00 01 00 00 |.....deflate....| 000010d0 00 00 00 00 00 08 17 00 01 00 00 03 02 03 01 48 |...............H| */ static int readOHDRHeaderMessageFilterPipelineV1(struct READER *reader, uint8_t filters) { int i, j; uint16_t filter_identification_value, flags, number_client_data_values, namelength; if (readValue(reader, 6) != 0) { mylog("reserved values not zero\n"); return MYSOFA_INVALID_FORMAT; } for (i = 0; i < filters; i++) { filter_identification_value = (uint16_t)readValue(reader, 2); switch (filter_identification_value) { case 1: case 2: break; default: // LCOV_EXCL_START mylog("object OHDR filter pipeline message contains unsupported filter: " "%d %lX\n", filter_identification_value, ftell(reader->fhd) - 2); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } namelength = (uint16_t)readValue(reader, 2); flags = (uint16_t)readValue(reader, 2); number_client_data_values = (uint16_t)readValue(reader, 2); if (namelength > 0) if (fseek(reader->fhd, ((namelength - 1) & ~7) + 8, SEEK_CUR) == -1) // skip name return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE mylog(" filter %d namelen %d flags %04X values %d\n", filter_identification_value, namelength, flags, number_client_data_values); if (number_client_data_values > 0x1000) return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE /* no name here */ for (j = 0; j < number_client_data_values; j++) { readValue(reader, 4); } if ((number_client_data_values & 1) == 1) readValue(reader, 4); } return MYSOFA_OK; } static int readOHDRHeaderMessageFilterPipelineV2(struct READER *reader, uint8_t filters) { int i, j; uint16_t filter_identification_value, flags, number_client_data_values; uint32_t client_data; uint64_t maximum_compact_value, minimum_dense_value, number_of_entries, length_of_entries; UNUSED(flags); UNUSED(client_data); UNUSED(maximum_compact_value); UNUSED(minimum_dense_value); UNUSED(number_of_entries); UNUSED(length_of_entries); for (i = 0; i < filters; i++) { filter_identification_value = (uint16_t)readValue(reader, 2); switch (filter_identification_value) { case 1: case 2: break; default: // LCOV_EXCL_START mylog("object OHDR filter pipeline message contains unsupported filter: " "%d\n", filter_identification_value); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } mylog(" filter %d\n", filter_identification_value); flags = (uint16_t)readValue(reader, 2); number_client_data_values = (uint16_t)readValue(reader, 2); if (number_client_data_values > 0x1000) return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE /* no name here */ for (j = 0; j < number_client_data_values; j++) { client_data = readValue(reader, 4); } } return MYSOFA_OK; } static int readOHDRHeaderMessageFilterPipeline(struct READER *reader) { int filterversion, filters; filterversion = fgetc(reader->fhd); filters = fgetc(reader->fhd); if (filterversion < 0 || filters < 0) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE if (filters > 32) { // LCOV_EXCL_START mylog("object OHDR filter pipeline message has too many filters: %d\n", filters); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } switch (filterversion) { case 1: return readOHDRHeaderMessageFilterPipelineV1(reader, filters); case 2: return readOHDRHeaderMessageFilterPipelineV2(reader, filters); default: // LCOV_EXCL_START mylog( "object OHDR filter pipeline message must have version 1 or 2 not %d\n", filterversion); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } } int readDataVar(struct READER *reader, struct DATAOBJECT *data, struct DATATYPE *dt, struct DATASPACE *ds) { char *buffer, number[20]; uint64_t reference, gcol = 0, dataobject; int err; struct DATAOBJECT *referenceData; if (dt->list) { if (dt->list - dt->size == 8) { readValue(reader, 4); /* TODO unknown? */ gcol = readValue(reader, 4); } else { gcol = readValue(reader, dt->list - dt->size); } mylog(" GCOL %d %8" PRIX64 " %8lX\n", dt->list - dt->size, gcol, ftell(reader->fhd)); /* fseek(reader->fhd, dt->list - dt->size, SEEK_CUR); TODO: * TODO: missing part in specification */ } switch (dt->class_and_version & 0xf) { case 0: mylog("FIXED POINT todo %lX %d\n", ftell(reader->fhd), dt->size); if (fseek(reader->fhd, dt->size, SEEK_CUR)) return errno; // LCOV_EXCL_LINE break; case 3: buffer = malloc(dt->size + 1); if (buffer == NULL) { return MYSOFA_NO_MEMORY; // LCOV_EXCL_LINE } if (fread(buffer, 1, dt->size, reader->fhd) != dt->size) { free(buffer); // LCOV_EXCL_LINE return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE } buffer[dt->size] = 0; mylog("STRING %s\n", buffer); data->string = buffer; break; /* * 000036e3 67 0e 00 00 00 00 00 00 00 00 00 00 00 |...g............| 000036f0 00 00 00 */ case 6: /* TODO unclear spec */ mylog("COMPONENT todo %lX %d\n", ftell(reader->fhd), dt->size); if (fseek(reader->fhd, dt->size, SEEK_CUR)) return errno; // LCOV_EXCL_LINE break; case 7: readValue(reader, 4); /* TODO unclear reference */ reference = readValue(reader, dt->size - 4); mylog(" REFERENCE size %d %" PRIX64 "\n", dt->size, reference); if (!!(err = gcolRead(reader, gcol, reference, &dataobject))) { return MYSOFA_OK; /* ignore error. TODO: why? return err; */ } referenceData = findDataobject(reader, dataobject); if (referenceData) buffer = referenceData->name; else { sprintf(number, "REF%08lX", (long unsigned int)reference); buffer = number; } mylog(" REFERENCE %" PRIX64 " %" PRIX64 " %s\n", reference, dataobject, buffer); /* if(!referenceData) { TODO? return MYSOFA_UNSUPPORTED_FORMAT; } */ if (data->string) { data->string = realloc(data->string, strlen(data->string) + strlen(buffer) + 2); if (!data->string) return MYSOFA_NO_MEMORY; strcat(data->string, ","); strcat(data->string, buffer); } else { data->string = mysofa_strdup(buffer); } break; default: // LCOV_EXCL_START mylog("data reader unknown type %d\n", dt->class_and_version & 0xf); return MYSOFA_INTERNAL_ERROR; // LCOV_EXCL_STOP } return MYSOFA_OK; } int readDataDim(struct READER *reader, struct DATAOBJECT *da, struct DATATYPE *dt, struct DATASPACE *ds, int dim) { int i, err; if (dim >= sizeof(ds->dimension_size) / sizeof(ds->dimension_size[0])) return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE for (i = 0; i < ds->dimension_size[dim]; i++) { if (dim + 1 < ds->dimensionality) { if (!!(err = readDataDim(reader, da, dt, ds, dim + 1))) { return err; // LCOV_EXCL_LINE } } else { if (!!(err = readDataVar(reader, da, dt, ds))) { return err; // LCOV_EXCL_LINE } } } return MYSOFA_OK; } int readData(struct READER *reader, struct DATAOBJECT *da, struct DATATYPE *dt, struct DATASPACE *ds) { if (ds->dimensionality == 0) { ds->dimension_size[0] = 1; } return readDataDim(reader, da, dt, ds, 0); } /* IV.A.2.q. The Object Header Continuation Message 10 10 00 00 07 00 6d |...............m| 000007f0 36 00 00 00 00 00 00 ea 00 00 00 00 00 00 00 15 |6...............| */ static int readOHDRHeaderMessageContinue(struct READER *reader, struct DATAOBJECT *dataobject) { int err; uint64_t offset, length; long store; offset = readValue(reader, reader->superblock.size_of_offsets); length = readValue(reader, reader->superblock.size_of_lengths); if (offset > 0x2000000 || length > 0x10000000) return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE mylog(" continue %08" PRIX64 " %08" PRIX64 "\n", offset, length); if (reader->recursive_counter >= 25) { mylog("recursive problem"); return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE } else reader->recursive_counter++; store = ftell(reader->fhd); if (fseek(reader->fhd, offset, SEEK_SET) < 0) return errno; // LCOV_EXCL_LINE err = readOCHK(reader, dataobject, offset + length); if (err) return err; // LCOV_EXCL_LINE if (store < 0) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE if (fseek(reader->fhd, store, SEEK_SET) < 0) return errno; // LCOV_EXCL_LINE mylog(" continue back\n"); return MYSOFA_OK; } /* IV.A.2.m. The Attribute Message */ static int readOHDRHeaderMessageAttribute(struct READER *reader, struct DATAOBJECT *dataobject) { int err; uint8_t flags, encoding; uint16_t name_size, datatype_size, dataspace_size; char *name; struct DATAOBJECT d; struct MYSOFA_ATTRIBUTE *attr; UNUSED(encoding); UNUSED(datatype_size); UNUSED(dataspace_size); memset(&d, 0, sizeof(d)); int version = fgetc(reader->fhd); if (version != 1 && version != 3) { // LCOV_EXCL_START mylog("object OHDR attribute message must have version 1 or 3\n"); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } flags = (uint8_t)fgetc(reader->fhd); name_size = (uint16_t)readValue(reader, 2); datatype_size = (uint16_t)readValue(reader, 2); dataspace_size = (uint16_t)readValue(reader, 2); if (version == 3) encoding = (uint8_t)fgetc(reader->fhd); if (name_size > 0x1000) return MYSOFA_NO_MEMORY; // LCOV_EXCL_LINE name = malloc(name_size + 1); if (!name) return MYSOFA_NO_MEMORY; // LCOV_EXCL_LINE if (fread(name, 1, name_size, reader->fhd) != name_size) { free(name); // LCOV_EXCL_LINE return errno; // LCOV_EXCL_LINE } if (version == 1 && fseek(reader->fhd, (8 - name_size) & 7, SEEK_CUR) != 0) { free(name); // LCOV_EXCL_LINE return errno; // LCOV_EXCL_LINE } name[name_size] = 0; mylog(" attribute name %s %d %d %lX\n", name, datatype_size, dataspace_size, ftell(reader->fhd)); if (version == 3 && (flags & 3)) { // LCOV_EXCL_START mylog("object OHDR attribute message must not have any flags set\n"); free(name); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } err = readOHDRHeaderMessageDatatype(reader, &d.dt); if (err) { // LCOV_EXCL_START mylog("object OHDR attribute message read datatype error\n"); free(name); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } if (version == 1) { if (fseek(reader->fhd, (8 - datatype_size) & 7, SEEK_CUR) < 0) { // LCOV_EXCL_START free(name); return errno; // LCOV_EXCL_STOP } } err = readOHDRHeaderMessageDataspace(reader, &d.ds); if (err) { // LCOV_EXCL_START mylog("object OHDR attribute message read dataspace error\n"); free(name); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } if (version == 1) { if (fseek(reader->fhd, (8 - dataspace_size) & 7, SEEK_CUR) < 0) { // LCOV_EXCL_START free(name); return errno; // LCOV_EXCL_STOP } } err = readData(reader, &d, &d.dt, &d.ds); if (err) { mylog("object OHDR attribute message read data error\n"); free(name); return MYSOFA_INVALID_FORMAT; } attr = malloc(sizeof(struct MYSOFA_ATTRIBUTE)); if (!attr) { // LCOV_EXCL_START free(name); return MYSOFA_NO_MEMORY; // LCOV_EXCL_STOP } attr->name = name; attr->value = d.string; d.string = NULL; attr->next = dataobject->attributes; dataobject->attributes = attr; dataobjectFree(reader, &d); return MYSOFA_OK; } /* * IV.A.2.v. The Attribute Info Message 00000070 15 1c 00 04 00 00 00 03 |................| 00000080 16 00 40 02 00 00 00 00 00 00 d2 02 00 00 00 00 |..@.............| 00000090 00 00 f8 02 00 00 00 00 00 00 */ static int readOHDRHeaderMessageAttributeInfo(struct READER *reader, struct ATTRIBUTEINFO *ai) { if (fgetc(reader->fhd) != 0) { mylog("object OHDR attribute info message must have version 0\n"); return MYSOFA_UNSUPPORTED_FORMAT; } ai->flags = (uint8_t)fgetc(reader->fhd); if (ai->flags & 1) ai->maximum_creation_index = readValue(reader, 2); ai->fractal_heap_address = readValue(reader, reader->superblock.size_of_offsets); ai->attribute_name_btree = readValue(reader, reader->superblock.size_of_offsets); if (ai->flags & 2) ai->attribute_creation_order_btree = readValue(reader, reader->superblock.size_of_offsets); return MYSOFA_OK; } /** * read all OHDR messages */ static int readOHDRmessages(struct READER *reader, struct DATAOBJECT *dataobject, uint64_t end_of_messages) { FILE *fhd = reader->fhd; int err; long end; while (ftell(fhd) < end_of_messages - 4) { /* final gap may has a size of up to 3 */ uint8_t header_message_type = (uint8_t)fgetc(fhd); uint16_t header_message_size = (uint16_t)readValue(reader, 2); uint8_t header_message_flags = (uint8_t)fgetc(fhd); if ((header_message_flags & ~5) != 0) { mylog("OHDR unsupported OHDR message flag %02X\n", header_message_flags); return MYSOFA_UNSUPPORTED_FORMAT; } if ((dataobject->flags & (1 << 2)) != 0) /* ignore header_creation_order */ if (fseek(reader->fhd, 2, SEEK_CUR) < 0) return errno; mylog(" OHDR message type %2d offset %6lX len %4X\n", header_message_type, ftell(fhd), header_message_size); end = ftell(fhd) + header_message_size; switch (header_message_type) { case 0: /* NIL Message */ if (!!(err = readOHDRHeaderMessageNIL(reader, header_message_size))) return err; break; case 1: /* Dataspace Message */ if (!!(err = readOHDRHeaderMessageDataspace(reader, &dataobject->ds))) return err; break; case 2: /* Link Info Message */ if (!!(err = readOHDRHeaderMessageLinkInfo(reader, &dataobject->li))) return err; break; case 3: /* Datatype Message */ if (!!(err = readOHDRHeaderMessageDatatype(reader, &dataobject->dt))) return err; break; case 4: /* Data Fill Message Old */ if (!!(err = readOHDRHeaderMessageDataFillOld(reader))) return err; break; case 5: /* Data Fill Message */ if (!!(err = readOHDRHeaderMessageDataFill(reader))) return err; break; case 8: /* Data Layout Message */ if (!!(err = readOHDRHeaderMessageDataLayout(reader, dataobject))) return err; break; case 10: /* Group Info Message */ if (!!(err = readOHDRHeaderMessageGroupInfo(reader, &dataobject->gi))) return err; break; case 11: /* Filter Pipeline Message */ if (!!(err = readOHDRHeaderMessageFilterPipeline(reader))) return err; break; case 12: /* Attribute Message */ if (!!(err = readOHDRHeaderMessageAttribute(reader, dataobject))) return err; break; case 16: /* Continue Message */ if (!!(err = readOHDRHeaderMessageContinue(reader, dataobject))) return err; break; case 21: /* Attribute Info Message */ if (!!(err = readOHDRHeaderMessageAttributeInfo(reader, &dataobject->ai))) return err; break; default: mylog("OHDR unknown header message of type %d\n", header_message_type); return MYSOFA_UNSUPPORTED_FORMAT; } if (ftell(fhd) != end) { mylog("OHDR message length mismatch by %ld\n", ftell(fhd) - end); return MYSOFA_INTERNAL_ERROR; } } if (fseek(fhd, end_of_messages + 4, SEEK_SET) < 0) /* skip checksum */ return errno; return MYSOFA_OK; } static int readOCHK(struct READER *reader, struct DATAOBJECT *dataobject, uint64_t end) { int err; char buf[5]; /* read signature */ if (fread(buf, 1, 4, reader->fhd) != 4 || strncmp(buf, "OCHK", 4)) { mylog("cannot read signature of OCHK\n"); return MYSOFA_INVALID_FORMAT; } buf[4] = 0; mylog("%08" PRIX64 " %.4s\n", (uint64_t)ftell(reader->fhd) - 4, buf); err = readOHDRmessages(reader, dataobject, end - 4); /* subtract checksum */ if (err) { return err; } return MYSOFA_OK; } int dataobjectRead(struct READER *reader, struct DATAOBJECT *dataobject, char *name) { uint64_t size_of_chunk, end_of_messages; int err; char buf[5]; memset(dataobject, 0, sizeof(*dataobject)); dataobject->address = ftell(reader->fhd); dataobject->name = name; /* read signature */ if (fread(buf, 1, 4, reader->fhd) != 4 || strncmp(buf, "OHDR", 4)) { mylog("cannot read signature of data object\n"); return MYSOFA_INVALID_FORMAT; } buf[4] = 0; mylog("%08" PRIX64 " %.4s\n", dataobject->address, buf); if (fgetc(reader->fhd) != 2) { mylog("object OHDR must have version 2\n"); return MYSOFA_UNSUPPORTED_FORMAT; } dataobject->flags = (uint8_t)fgetc(reader->fhd); if (dataobject->flags & (1 << 5)) { /* bit 5 indicated time stamps */ if (fseek(reader->fhd, 16, SEEK_CUR) < 0) /* skip them */ return errno; } if (dataobject->flags & (1 << 4)) { /* bit 4 ? */ mylog("OHDR: unsupported flags bit 4: %02X\n", dataobject->flags); return MYSOFA_UNSUPPORTED_FORMAT; } size_of_chunk = readValue(reader, 1 << (dataobject->flags & 3)); if (size_of_chunk > 0x1000000) return MYSOFA_UNSUPPORTED_FORMAT; end_of_messages = ftell(reader->fhd) + size_of_chunk; err = readOHDRmessages(reader, dataobject, end_of_messages); if (err) { return err; } if (validAddress(reader, dataobject->ai.attribute_name_btree)) { /* not needed fseek(reader->fhd, dataobject->ai.attribute_name_btree, SEEK_SET); btreeRead(reader, &dataobject->attributes); */ } /* parse message attribute info */ if (validAddress(reader, dataobject->ai.fractal_heap_address)) { if (fseek(reader->fhd, dataobject->ai.fractal_heap_address, SEEK_SET) < 0) return errno; err = fractalheapRead(reader, dataobject, &dataobject->attributes_heap); if (err) return err; } /* parse message link info */ if (validAddress(reader, dataobject->li.fractal_heap_address)) { fseek(reader->fhd, dataobject->li.fractal_heap_address, SEEK_SET); err = fractalheapRead(reader, dataobject, &dataobject->objects_heap); if (err) return err; } if (validAddress(reader, dataobject->li.address_btree_index)) { /* not needed fseek(reader->fhd, dataobject->li.address_btree_index, SEEK_SET); btreeRead(reader, &dataobject->objects); */ } dataobject->all = reader->all; reader->all = dataobject; return MYSOFA_OK; } void dataobjectFree(struct READER *reader, struct DATAOBJECT *dataobject) { struct DATAOBJECT **p; btreeFree(&dataobject->attributes_btree); fractalheapFree(&dataobject->attributes_heap); btreeFree(&dataobject->objects_btree); fractalheapFree(&dataobject->objects_heap); while (dataobject->attributes) { struct MYSOFA_ATTRIBUTE *attr = dataobject->attributes; dataobject->attributes = attr->next; free(attr->name); free(attr->value); free(attr); } while (dataobject->directory) { struct DIR *dir = dataobject->directory; dataobject->directory = dir->next; dataobjectFree(reader, &dir->dataobject); free(dir); } free(dataobject->data); free(dataobject->string); free(dataobject->name); p = &reader->all; while (*p) { if ((*p) == dataobject) { *p = dataobject->all; break; } p = &((*p)->all); } }
null
/* Copyright 2016 Christian Hoene, Symonics GmbH */ /* IV.A.1.b. Version 2 Data Object Header Prefix 00000030 4f 48 44 52 02 2d d3 18 2b 53 d3 18 2b 53 d3 18 |OHDR.-..+S..+S..| 00000040 2b 53 d3 18 2b 53 f4 01 02 22 00 00 00 00 |+S..+S..."......| .... 00000230 00 00 00 00 00 00 00 00 00 00 00 00 f9 ba 5d c9 |..............].| */ #include "reader.h" #include <ctype.h> #include <errno.h> #include <inttypes.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <string.h> static int readOCHK(struct READER *reader, struct DATAOBJECT *dataobject, uint64_t end); static struct DATAOBJECT *findDataobject(struct READER *reader, uint64_t address) { struct DATAOBJECT *p = reader->all; while (p && p->address != address) p = p->all; return p; } /* * IV.A.2.a. The NIL Message 00000090 00 9c 01 00 00 00 |................| 000000a0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................| * 00000230 00 00 00 00 00 00 00 00 00 00 00 00 f9 ba 5d c9 |..............].| */ static int readOHDRHeaderMessageNIL(struct READER *reader, int length) { if (fseek(reader->fhd, length, SEEK_CUR) < 0) return errno; // LCOV_EXCL_LINE return MYSOFA_OK; } /* * IV.A.2.b. The Dataspace Message */ static int readOHDRHeaderMessageDataspace1(struct READER *reader, struct DATASPACE *ds) { int i; readValue(reader, 5); for (i = 0; i < ds->dimensionality; i++) { if (i < 4) { ds->dimension_size[i] = readValue(reader, reader->superblock.size_of_lengths); if (ds->dimension_size[i] > 1000000) { mylog("dimension_size is too large\n"); // LCOV_EXCL_LINE return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE } mylog(" dimension %d %" PRIu64 "\n", i, ds->dimension_size[i]); } else readValue(reader, reader->superblock.size_of_lengths); } if (ds->flags & 1) { for (i = 0; i < ds->dimensionality; i++) { if (i < 4) ds->dimension_max_size[i] = readValue(reader, reader->superblock.size_of_lengths); else readValue(reader, reader->superblock.size_of_lengths); } } if (ds->flags & 2) { mylog("permutation in OHDR not supported\n"); // LCOV_EXCL_LINE return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE } return MYSOFA_OK; } static int readOHDRHeaderMessageDataspace2(struct READER *reader, struct DATASPACE *ds) { int i; ds->type = (uint8_t)fgetc(reader->fhd); for (i = 0; i < ds->dimensionality; i++) { if (i < 4) { ds->dimension_size[i] = readValue(reader, reader->superblock.size_of_lengths); mylog(" dimension %d %" PRIu64 "\n", i, ds->dimension_size[i]); } else readValue(reader, reader->superblock.size_of_lengths); } if (ds->flags & 1) { for (i = 0; i < ds->dimensionality; i++) { if (i < 4) ds->dimension_max_size[i] = readValue(reader, reader->superblock.size_of_lengths); else readValue(reader, reader->superblock.size_of_lengths); } } return MYSOFA_OK; } static int readOHDRHeaderMessageDataspace(struct READER *reader, struct DATASPACE *ds) { int version = fgetc(reader->fhd); ds->dimensionality = (uint8_t)fgetc(reader->fhd); if (ds->dimensionality > 4) { mylog("dimensionality must be lower than 5\n"); // LCOV_EXCL_LINE return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE } ds->flags = (uint8_t)fgetc(reader->fhd); switch (version) { case 1: return readOHDRHeaderMessageDataspace1(reader, ds); case 2: return readOHDRHeaderMessageDataspace2(reader, ds); default: // LCOV_EXCL_START mylog("object OHDR dataspace message must have version 1 or 2 but is %X at " "%lX\n", version, ftell(reader->fhd) - 1); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } } /* * IV.A.2.c. The Link Info Message 00 03 |+S..+S..."......| 00000050 0f 00 00 00 00 00 00 00 c9 11 00 00 00 00 00 00 |................| 00000060 5b 12 00 00 00 00 00 00 81 12 00 00 00 00 00 00 |[...............| */ static int readOHDRHeaderMessageLinkInfo(struct READER *reader, struct LINKINFO *li) { if (fgetc(reader->fhd) != 0) { mylog( "object OHDR link info message must have version 0\n"); // LCOV_EXCL_LINE return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE } li->flags = (uint8_t)fgetc(reader->fhd); if (li->flags & 1) li->maximum_creation_index = readValue(reader, 8); li->fractal_heap_address = readValue(reader, reader->superblock.size_of_offsets); li->address_btree_index = readValue(reader, reader->superblock.size_of_offsets); if (li->flags & 2) li->address_btree_order = readValue(reader, reader->superblock.size_of_offsets); return MYSOFA_OK; } /* * IV.A.2.d. The Datatype Message 000007c0 03 14 00 01 00 00|11|21 1f |..............!.| 000007d0 00|04 00 00 00|00 00|20 00|17|08|00|17|7f 00 00 |....... ........| 000007e0 00|05 02 00 01 00 00 03 0a 10 10 00 00 07 00 6d |...............m| 000007f0 36 00 00 00 00 00 00 ea 00 00 00 00 00 00 00 15 |6...............| */ static int readOHDRHeaderMessageDatatype(struct READER *reader, struct DATATYPE *dt) { int i, j, c, err; char *buffer; struct DATATYPE dt2; dt->class_and_version = (uint8_t)fgetc(reader->fhd); if ((dt->class_and_version & 0xf0) != 0x10 && (dt->class_and_version & 0xf0) != 0x30) { // LCOV_EXCL_START mylog("object OHDR datatype message must have version 1 not %d at %lX\n", dt->class_and_version >> 4, ftell(reader->fhd) - 1); return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_STOP } dt->class_bit_field = (uint32_t)readValue(reader, 3); dt->size = (uint32_t)readValue(reader, 4); if (dt->size > 64) return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE switch (dt->class_and_version & 0xf) { case 0: /* int */ dt->u.i.bit_offset = readValue(reader, 2); dt->u.i.bit_precision = readValue(reader, 2); mylog(" INT bit %d %d %d %d\n", dt->u.i.bit_offset, dt->u.i.bit_precision, dt->class_and_version >> 4, dt->size); break; case 1: /* float */ dt->u.f.bit_offset = (uint16_t)readValue(reader, 2); dt->u.f.bit_precision = (uint16_t)readValue(reader, 2); dt->u.f.exponent_location = (uint8_t)fgetc(reader->fhd); dt->u.f.exponent_size = (uint8_t)fgetc(reader->fhd); dt->u.f.mantissa_location = (uint8_t)fgetc(reader->fhd); dt->u.f.mantissa_size = (uint8_t)fgetc(reader->fhd); dt->u.f.exponent_bias = (uint32_t)readValue(reader, 4); mylog(" FLOAT bit %d %d exponent %d %d MANTISSA %d %d OFFSET %d\n", dt->u.f.bit_offset, dt->u.f.bit_precision, dt->u.f.exponent_location, dt->u.f.exponent_size, dt->u.f.mantissa_location, dt->u.f.mantissa_size, dt->u.f.exponent_bias); /* FLOAT bit 0 32 exponent 23 8 MANTISSA 0 23 OFFSET 127 FLOAT bit 0 64 exponent 52 11 MANTISSA 0 52 OFFSET 1023 */ if (dt->u.f.bit_offset != 0 || dt->u.f.mantissa_location != 0 || (dt->u.f.bit_precision != 32 && dt->u.f.bit_precision != 64) || (dt->u.f.bit_precision == 32 && (dt->u.f.exponent_location != 23 || dt->u.f.exponent_size != 8 || dt->u.f.mantissa_size != 23 || dt->u.f.exponent_bias != 127)) || (dt->u.f.bit_precision == 64 && (dt->u.f.exponent_location != 52 || dt->u.f.exponent_size != 11 || dt->u.f.mantissa_size != 52 || dt->u.f.exponent_bias != 1023))) return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE break; case 3: /* string */ mylog(" STRING %d %02X\n", dt->size, dt->class_bit_field); break; case 6: mylog(" COMPOUND %d %02X\n", dt->size, dt->class_bit_field); switch (dt->class_and_version >> 4) { case 3: for (i = 0; i < (dt->class_bit_field & 0xffff); i++) { int maxsize = 0x1000; buffer = malloc(maxsize); if (!buffer) return MYSOFA_NO_MEMORY; for (j = 0; j < maxsize - 1; j++) { c = fgetc(reader->fhd); if (c < 0) { free(buffer); return MYSOFA_READ_ERROR; } buffer[j] = c; if (c == 0) break; } buffer[j] = 0; for (j = 0, c = 0; (dt->size >> (8 * j)) > 0; j++) { c |= fgetc(reader->fhd) << (8 * j); } mylog(" COMPOUND %s offset %d\n", buffer, c); /* not needed until the data is stored somewhere permanently p = realloc(buffer, j); if (!p) { free(buffer); return errno; } buffer = p; */ free(buffer); err = readOHDRHeaderMessageDatatype(reader, &dt2); if (err) return err; // LCOV_EXCL_LINE } break; case 1: for (i = 0; i < (dt->class_bit_field & 0xffff); i++) { char name[256]; int res; for (j = 0;; j++) { if (j == sizeof(name)) return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE res = fgetc(reader->fhd); if (res < 0) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE name[j] = res; if (name[j] == 0) break; } if (fseek(reader->fhd, (7 - j) & 7, SEEK_CUR)) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE c = readValue(reader, 4); int dimension = fgetc(reader->fhd); if (dimension != 0) { mylog("COMPOUND v1 with dimension not supported"); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE } // ignore the following fields if (fseek(reader->fhd, 3 + 4 + 4 + 4 * 4, SEEK_CUR)) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE mylog(" COMPOUND %s %d %d %lX\n", name, c, dimension, ftell(reader->fhd)); err = readOHDRHeaderMessageDatatype(reader, &dt2); if (err) return err; // LCOV_EXCL_LINE } break; default: // LCOV_EXCL_START mylog("object OHDR datatype message must have version 1 or 3 not %d\n", dt->class_and_version >> 4); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } break; case 7: /* reference */ mylog(" REFERENCE %d %02X\n", dt->size, dt->class_bit_field); break; case 9: /* list */ dt->list = dt->size; mylog(" LIST %d\n", dt->size); err = readOHDRHeaderMessageDatatype(reader, dt); if (err) return err; // LCOV_EXCL_LINE break; default: // LCOV_EXCL_START mylog("object OHDR datatype message has unknown variable type %d\n", dt->class_and_version & 0xf); return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_STOP } return MYSOFA_OK; } /* * IV.A.2.f. The Data Storage - Fill Value Message 000007e0 |05 02 00 01 00 00|03 0a */ static int readOHDRHeaderMessageDataFill1or2(struct READER *reader) { int spaceAllocationTime = fgetc(reader->fhd); int fillValueWriteTime = fgetc(reader->fhd); int fillValueDefined = fgetc(reader->fhd); if (spaceAllocationTime < 0 || fillValueWriteTime < 0 || fillValueDefined < 0) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE if ((spaceAllocationTime & ~1) != 2 || fillValueWriteTime != 2 || (fillValueDefined & ~1) != 0) { mylog("spaceAllocationTime %d fillValueWriteTime %d fillValueDefined %d\n", spaceAllocationTime, fillValueWriteTime, fillValueDefined); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE } if (fillValueDefined > 0) { uint32_t size = (uint32_t)readValue(reader, 4); if (fseek(reader->fhd, size, SEEK_CUR) < 0) return errno; // LCOV_EXCL_LINE } return MYSOFA_OK; } static int readOHDRHeaderMessageDataFill3(struct READER *reader) { uint8_t flags; uint32_t size; flags = (uint8_t)fgetc(reader->fhd); if (flags & (1 << 5)) { size = (uint32_t)readValue(reader, 4); if (fseek(reader->fhd, size, SEEK_CUR) < 0) return errno; // LCOV_EXCL_LINE } return MYSOFA_OK; } static int readOHDRHeaderMessageDataFill(struct READER *reader) { int version = fgetc(reader->fhd); switch (version) { case 1: case 2: return readOHDRHeaderMessageDataFill1or2(reader); case 3: return readOHDRHeaderMessageDataFill3(reader); default: // LCOV_EXCL_START mylog("object OHDR data storage fill value message must have version 1,2, " "or 3 not " "%d\n", version); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } } static int readOHDRHeaderMessageDataFillOld(struct READER *reader) { uint32_t size; size = (uint32_t)readValue(reader, 4); if (fseek(reader->fhd, size, SEEK_CUR) < 0) return errno; // LCOV_EXCL_LINE return MYSOFA_OK; } /* * IV.A.2.i. The Data Layout Message 00000ec0 08 00 00 00 00 00 00 00 00 |......+.........| 00000ed0 00 9e 47 0b 16 00 01 00 00 02 02 02 00 01 00 01 |..G.............| 00000ee0 00 08 00 00 00 01 00 01 00 01 00 01 00 00 00 08 |................| 00000ef0 17 00 01 00 00 03 02 03 01 42 00 00 00 00 00 00 |.........B......| 00000f00 01 00 00 00 03 00 00 00 08 00 00 00 15 1c 00 04 |................| 03 02 03 01 42 00 00 00 00 00 00 |.........B......| 00000f00 01 00 00 00 03 00 00 00 08 00 00 00 15 1c 00 04 |................| 00000f10 00 00 00 03 03 00 ff ff ff ff ff ff ff ff ff ff |................| 00000f20 ff ff ff ff ff ff ff ff ff ff ff ff ff ff 0c 23 |...............#| 00000f30 00 00 00 00 03 00 05 00 08 00 04 00 00 54 79 70 |.............Typ| */ static int readOHDRHeaderMessageDataLayout(struct READER *reader, struct DATAOBJECT *data) { int i, err; unsigned size; uint8_t dimensionality, layout_class; uint32_t dataset_element_size; uint64_t data_address, store, data_size; UNUSED(dataset_element_size); UNUSED(data_size); if (fgetc(reader->fhd) != 3) { // LCOV_EXCL_START mylog("object OHDR message data layout message must have version 3\n"); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } layout_class = (uint8_t)fgetc(reader->fhd); mylog("data layout %d\n", layout_class); switch (layout_class) { #if 0 case 0: data_size = readValue(reader, 2); fseek(reader->fhd, data_size, SEEK_CUR); mylog("TODO 0 SIZE %u\n", data_size); break; #endif case 1: data_address = readValue(reader, reader->superblock.size_of_offsets); data_size = readValue(reader, reader->superblock.size_of_lengths); mylog("CHUNK Contiguous SIZE %" PRIu64 "\n", data_size); if (validAddress(reader, data_address)) { store = ftell(reader->fhd); if (fseek(reader->fhd, data_address, SEEK_SET) < 0) return errno; // LCOV_EXCL_LINE if (data->data) { free(data->data); data->data = NULL; } if (data_size > 0x10000000) return MYSOFA_INVALID_FORMAT; data->data_len = data_size; data->data = calloc(1, data_size); if (!data->data) return MYSOFA_NO_MEMORY; // LCOV_EXCL_LINE err = fread(data->data, 1, data_size, reader->fhd); if (err != data_size) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE if (fseek(reader->fhd, store, SEEK_SET) < 0) return errno; // LCOV_EXCL_LINE } break; case 2: dimensionality = (uint8_t)fgetc(reader->fhd); mylog("dimensionality %d\n", dimensionality); if (dimensionality < 1 || dimensionality > DATAOBJECT_MAX_DIMENSIONALITY) { mylog("data layout 2: invalid dimensionality %d %lu %lu\n", dimensionality, sizeof(data->datalayout_chunk), sizeof(data->datalayout_chunk[0])); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE } data_address = readValue(reader, reader->superblock.size_of_offsets); mylog(" CHUNK %" PRIX64 "\n", data_address); for (i = 0; i < dimensionality; i++) { data->datalayout_chunk[i] = readValue(reader, 4); mylog(" %d\n", data->datalayout_chunk[i]); } /* TODO last entry? error in spec: ?*/ size = data->datalayout_chunk[dimensionality - 1]; for (i = 0; i < data->ds.dimensionality; i++) size *= data->ds.dimension_size[i]; if (validAddress(reader, data_address) && dimensionality <= 4) { store = ftell(reader->fhd); if (fseek(reader->fhd, data_address, SEEK_SET) < 0) return errno; // LCOV_EXCL_LINE if (!data->data) { if (size > 0x10000000) return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_LINE data->data_len = size; data->data = calloc(1, size); if (!data->data) return MYSOFA_NO_MEMORY; // LCOV_EXCL_LINE } err = treeRead(reader, data); if (err) return err; // LCOV_EXCL_LINE if (fseek(reader->fhd, store, SEEK_SET) < 0) return errno; // LCOV_EXCL_LINE } break; default: // LCOV_EXCL_START mylog("object OHDR message data layout message has unknown layout class " "%d\n", layout_class); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } return MYSOFA_OK; } /* * IV.A.2.k. The Group Info Message * 00000070 0a 02 00 01 00 00 00 00 * */ static int readOHDRHeaderMessageGroupInfo(struct READER *reader, struct GROUPINFO *gi) { if (fgetc(reader->fhd) != 0) { // LCOV_EXCL_START mylog("object OHDR group info message must have version 0\n"); return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_STOP } gi->flags = (uint8_t)fgetc(reader->fhd); if (gi->flags & 1) { gi->maximum_compact_value = (uint16_t)readValue(reader, 2); gi->minimum_dense_value = (uint16_t)readValue(reader, 2); } if (gi->flags & 2) { gi->number_of_entries = (uint16_t)readValue(reader, 2); gi->length_of_entries = (uint16_t)readValue(reader, 2); } return MYSOFA_OK; } /* * IV.A.2.l. The Data Storage - Filter Pipeline Message * * 00000070 0a 02 00 01 00 00 00 00 * */ /* type 1 00 |......G.8.......| 000010a0 00 00 00 00 00 02 00 08 00 01 00 01 00 73 68 75 |.............shu| 000010b0 66 66 6c 65 00 08 00 00 00 00 00 00 00 01 00 08 |ffle............| 000010c0 00 01 00 01 00 64 65 66 6c 61 74 65 00 01 00 00 |.....deflate....| 000010d0 00 00 00 00 00 08 17 00 01 00 00 03 02 03 01 48 |...............H| */ static int readOHDRHeaderMessageFilterPipelineV1(struct READER *reader, uint8_t filters) { int i, j; uint16_t filter_identification_value, flags, number_client_data_values, namelength; if (readValue(reader, 6) != 0) { mylog("reserved values not zero\n"); return MYSOFA_INVALID_FORMAT; } for (i = 0; i < filters; i++) { filter_identification_value = (uint16_t)readValue(reader, 2); switch (filter_identification_value) { case 1: case 2: break; default: // LCOV_EXCL_START mylog("object OHDR filter pipeline message contains unsupported filter: " "%d %lX\n", filter_identification_value, ftell(reader->fhd) - 2); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } namelength = (uint16_t)readValue(reader, 2); flags = (uint16_t)readValue(reader, 2); number_client_data_values = (uint16_t)readValue(reader, 2); if (namelength > 0) if (fseek(reader->fhd, ((namelength - 1) & ~7) + 8, SEEK_CUR) == -1) // skip name return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE mylog(" filter %d namelen %d flags %04X values %d\n", filter_identification_value, namelength, flags, number_client_data_values); if (number_client_data_values > 0x1000) return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE /* no name here */ for (j = 0; j < number_client_data_values; j++) { readValue(reader, 4); } if ((number_client_data_values & 1) == 1) readValue(reader, 4); } return MYSOFA_OK; } static int readOHDRHeaderMessageFilterPipelineV2(struct READER *reader, uint8_t filters) { int i, j; uint16_t filter_identification_value, flags, number_client_data_values; uint32_t client_data; uint64_t maximum_compact_value, minimum_dense_value, number_of_entries, length_of_entries; UNUSED(flags); UNUSED(client_data); UNUSED(maximum_compact_value); UNUSED(minimum_dense_value); UNUSED(number_of_entries); UNUSED(length_of_entries); for (i = 0; i < filters; i++) { filter_identification_value = (uint16_t)readValue(reader, 2); switch (filter_identification_value) { case 1: case 2: break; default: // LCOV_EXCL_START mylog("object OHDR filter pipeline message contains unsupported filter: " "%d\n", filter_identification_value); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } mylog(" filter %d\n", filter_identification_value); flags = (uint16_t)readValue(reader, 2); number_client_data_values = (uint16_t)readValue(reader, 2); if (number_client_data_values > 0x1000) return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE /* no name here */ for (j = 0; j < number_client_data_values; j++) { client_data = readValue(reader, 4); } } return MYSOFA_OK; } static int readOHDRHeaderMessageFilterPipeline(struct READER *reader) { int filterversion, filters; filterversion = fgetc(reader->fhd); filters = fgetc(reader->fhd); if (filterversion < 0 || filters < 0) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE if (filters > 32) { // LCOV_EXCL_START mylog("object OHDR filter pipeline message has too many filters: %d\n", filters); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } switch (filterversion) { case 1: return readOHDRHeaderMessageFilterPipelineV1(reader, filters); case 2: return readOHDRHeaderMessageFilterPipelineV2(reader, filters); default: // LCOV_EXCL_START mylog( "object OHDR filter pipeline message must have version 1 or 2 not %d\n", filterversion); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } } int readDataVar(struct READER *reader, struct DATAOBJECT *data, struct DATATYPE *dt, struct DATASPACE *ds) { char *buffer, number[20]; uint64_t reference, gcol = 0, dataobject; int err; struct DATAOBJECT *referenceData; if (dt->list) { if (dt->list - dt->size == 8) { readValue(reader, 4); /* TODO unknown? */ gcol = readValue(reader, 4); } else { gcol = readValue(reader, dt->list - dt->size); } mylog(" GCOL %d %8" PRIX64 " %8lX\n", dt->list - dt->size, gcol, ftell(reader->fhd)); /* fseek(reader->fhd, dt->list - dt->size, SEEK_CUR); TODO: * TODO: missing part in specification */ } switch (dt->class_and_version & 0xf) { case 0: mylog("FIXED POINT todo %lX %d\n", ftell(reader->fhd), dt->size); if (fseek(reader->fhd, dt->size, SEEK_CUR)) return errno; // LCOV_EXCL_LINE break; case 3: buffer = malloc(dt->size + 1); if (buffer == NULL) { return MYSOFA_NO_MEMORY; // LCOV_EXCL_LINE } if (fread(buffer, 1, dt->size, reader->fhd) != dt->size) { free(buffer); // LCOV_EXCL_LINE return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE } buffer[dt->size] = 0; mylog("STRING %s\n", buffer); data->string = buffer; break; /* * 000036e3 67 0e 00 00 00 00 00 00 00 00 00 00 00 |...g............| 000036f0 00 00 00 */ case 6: /* TODO unclear spec */ mylog("COMPONENT todo %lX %d\n", ftell(reader->fhd), dt->size); if (fseek(reader->fhd, dt->size, SEEK_CUR)) return errno; // LCOV_EXCL_LINE break; case 7: readValue(reader, 4); /* TODO unclear reference */ reference = readValue(reader, dt->size - 4); mylog(" REFERENCE size %d %" PRIX64 "\n", dt->size, reference); if (!!(err = gcolRead(reader, gcol, reference, &dataobject))) { return MYSOFA_OK; /* ignore error. TODO: why? return err; */ } referenceData = findDataobject(reader, dataobject); if (referenceData) buffer = referenceData->name; else { sprintf(number, "REF%08lX", (long unsigned int)reference); buffer = number; } mylog(" REFERENCE %" PRIX64 " %" PRIX64 " %s\n", reference, dataobject, buffer); /* if(!referenceData) { TODO? return MYSOFA_UNSUPPORTED_FORMAT; } */ if (data->string) { data->string = realloc(data->string, strlen(data->string) + strlen(buffer) + 2); if (!data->string) return MYSOFA_NO_MEMORY; strcat(data->string, ","); strcat(data->string, buffer); } else { data->string = mysofa_strdup(buffer); } break; default: // LCOV_EXCL_START mylog("data reader unknown type %d\n", dt->class_and_version & 0xf); return MYSOFA_INTERNAL_ERROR; // LCOV_EXCL_STOP } return MYSOFA_OK; } int readDataDim(struct READER *reader, struct DATAOBJECT *da, struct DATATYPE *dt, struct DATASPACE *ds, int dim) { int i, err; if (dim >= sizeof(ds->dimension_size) / sizeof(ds->dimension_size[0])) return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE for (i = 0; i < ds->dimension_size[dim]; i++) { if (dim + 1 < ds->dimensionality) { if (!!(err = readDataDim(reader, da, dt, ds, dim + 1))) { return err; // LCOV_EXCL_LINE } } else { if (!!(err = readDataVar(reader, da, dt, ds))) { return err; // LCOV_EXCL_LINE } } } return MYSOFA_OK; } int readData(struct READER *reader, struct DATAOBJECT *da, struct DATATYPE *dt, struct DATASPACE *ds) { if (ds->dimensionality == 0) { ds->dimension_size[0] = 1; } return readDataDim(reader, da, dt, ds, 0); } /* IV.A.2.q. The Object Header Continuation Message 10 10 00 00 07 00 6d |...............m| 000007f0 36 00 00 00 00 00 00 ea 00 00 00 00 00 00 00 15 |6...............| */ static int readOHDRHeaderMessageContinue(struct READER *reader, struct DATAOBJECT *dataobject) { int err; uint64_t offset, length; long store; offset = readValue(reader, reader->superblock.size_of_offsets); length = readValue(reader, reader->superblock.size_of_lengths); if (offset > 0x2000000 || length > 0x10000000) return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE mylog(" continue %08" PRIX64 " %08" PRIX64 "\n", offset, length); if (reader->recursive_counter >= 25) { mylog("recursive problem"); return MYSOFA_UNSUPPORTED_FORMAT; // LCOV_EXCL_LINE } else reader->recursive_counter++; store = ftell(reader->fhd); if (fseek(reader->fhd, offset, SEEK_SET) < 0) return errno; // LCOV_EXCL_LINE err = readOCHK(reader, dataobject, offset + length); if (err) return err; // LCOV_EXCL_LINE if (store < 0) return MYSOFA_READ_ERROR; // LCOV_EXCL_LINE if (fseek(reader->fhd, store, SEEK_SET) < 0) return errno; // LCOV_EXCL_LINE mylog(" continue back\n"); return MYSOFA_OK; } /* IV.A.2.m. The Attribute Message */ static int readOHDRHeaderMessageAttribute(struct READER *reader, struct DATAOBJECT *dataobject) { int err; uint8_t flags, encoding; uint16_t name_size, datatype_size, dataspace_size; char *name; struct DATAOBJECT d; struct MYSOFA_ATTRIBUTE *attr; UNUSED(encoding); UNUSED(datatype_size); UNUSED(dataspace_size); memset(&d, 0, sizeof(d)); int version = fgetc(reader->fhd); if (version != 1 && version != 3) { // LCOV_EXCL_START mylog("object OHDR attribute message must have version 1 or 3\n"); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } flags = (uint8_t)fgetc(reader->fhd); name_size = (uint16_t)readValue(reader, 2); datatype_size = (uint16_t)readValue(reader, 2); dataspace_size = (uint16_t)readValue(reader, 2); if (version == 3) encoding = (uint8_t)fgetc(reader->fhd); if (name_size > 0x1000) return MYSOFA_NO_MEMORY; // LCOV_EXCL_LINE name = malloc(name_size + 1); if (!name) return MYSOFA_NO_MEMORY; // LCOV_EXCL_LINE if (fread(name, 1, name_size, reader->fhd) != name_size) { free(name); // LCOV_EXCL_LINE return errno; // LCOV_EXCL_LINE } if (version == 1 && fseek(reader->fhd, (8 - name_size) & 7, SEEK_CUR) != 0) { free(name); // LCOV_EXCL_LINE return errno; // LCOV_EXCL_LINE } name[name_size] = 0; mylog(" attribute name %s %d %d %lX\n", name, datatype_size, dataspace_size, ftell(reader->fhd)); if (version == 3 && (flags & 3)) { // LCOV_EXCL_START mylog("object OHDR attribute message must not have any flags set\n"); free(name); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } err = readOHDRHeaderMessageDatatype(reader, &d.dt); if (err) { // LCOV_EXCL_START mylog("object OHDR attribute message read datatype error\n"); free(name); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } if (version == 1) { if (fseek(reader->fhd, (8 - datatype_size) & 7, SEEK_CUR) < 0) { // LCOV_EXCL_START free(name); return errno; // LCOV_EXCL_STOP } } err = readOHDRHeaderMessageDataspace(reader, &d.ds); if (err) { // LCOV_EXCL_START mylog("object OHDR attribute message read dataspace error\n"); free(name); return MYSOFA_INVALID_FORMAT; // LCOV_EXCL_STOP } if (version == 1) { if (fseek(reader->fhd, (8 - dataspace_size) & 7, SEEK_CUR) < 0) { // LCOV_EXCL_START free(name); return errno; // LCOV_EXCL_STOP } } err = readData(reader, &d, &d.dt, &d.ds); if (err) { mylog("object OHDR attribute message read data error\n"); free(name); return MYSOFA_INVALID_FORMAT; } attr = malloc(sizeof(struct MYSOFA_ATTRIBUTE)); if (!attr) { // LCOV_EXCL_START free(name); return MYSOFA_NO_MEMORY; // LCOV_EXCL_STOP } attr->name = name; attr->value = d.string; d.string = NULL; attr->next = dataobject->attributes; dataobject->attributes = attr; dataobjectFree(reader, &d); return MYSOFA_OK; } /* * IV.A.2.v. The Attribute Info Message 00000070 15 1c 00 04 00 00 00 03 |................| 00000080 16 00 40 02 00 00 00 00 00 00 d2 02 00 00 00 00 |..@.............| 00000090 00 00 f8 02 00 00 00 00 00 00 */ static int readOHDRHeaderMessageAttributeInfo(struct READER *reader, struct ATTRIBUTEINFO *ai) { if (fgetc(reader->fhd) != 0) { mylog("object OHDR attribute info message must have version 0\n"); return MYSOFA_UNSUPPORTED_FORMAT; } ai->flags = (uint8_t)fgetc(reader->fhd); if (ai->flags & 1) ai->maximum_creation_index = readValue(reader, 2); ai->fractal_heap_address = readValue(reader, reader->superblock.size_of_offsets); ai->attribute_name_btree = readValue(reader, reader->superblock.size_of_offsets); if (ai->flags & 2) ai->attribute_creation_order_btree = readValue(reader, reader->superblock.size_of_offsets); return MYSOFA_OK; } /** * read all OHDR messages */ static int readOHDRmessages(struct READER *reader, struct DATAOBJECT *dataobject, uint64_t end_of_messages) { FILE *fhd = reader->fhd; int err; long end; while (ftell(fhd) < end_of_messages - 4) { /* final gap may has a size of up to 3 */ uint8_t header_message_type = (uint8_t)fgetc(fhd); uint16_t header_message_size = (uint16_t)readValue(reader, 2); uint8_t header_message_flags = (uint8_t)fgetc(fhd); if ((header_message_flags & ~5) != 0) { mylog("OHDR unsupported OHDR message flag %02X\n", header_message_flags); return MYSOFA_UNSUPPORTED_FORMAT; } if ((dataobject->flags & (1 << 2)) != 0) /* ignore header_creation_order */ if (fseek(reader->fhd, 2, SEEK_CUR) < 0) return errno; mylog(" OHDR message type %2d offset %6lX len %4X\n", header_message_type, ftell(fhd), header_message_size); end = ftell(fhd) + header_message_size; switch (header_message_type) { case 0: /* NIL Message */ if (!!(err = readOHDRHeaderMessageNIL(reader, header_message_size))) return err; break; case 1: /* Dataspace Message */ if (!!(err = readOHDRHeaderMessageDataspace(reader, &dataobject->ds))) return err; break; case 2: /* Link Info Message */ if (!!(err = readOHDRHeaderMessageLinkInfo(reader, &dataobject->li))) return err; break; case 3: /* Datatype Message */ if (!!(err = readOHDRHeaderMessageDatatype(reader, &dataobject->dt))) return err; break; case 4: /* Data Fill Message Old */ if (!!(err = readOHDRHeaderMessageDataFillOld(reader))) return err; break; case 5: /* Data Fill Message */ if (!!(err = readOHDRHeaderMessageDataFill(reader))) return err; break; case 8: /* Data Layout Message */ if (!!(err = readOHDRHeaderMessageDataLayout(reader, dataobject))) return err; break; case 10: /* Group Info Message */ if (!!(err = readOHDRHeaderMessageGroupInfo(reader, &dataobject->gi))) return err; break; case 11: /* Filter Pipeline Message */ if (!!(err = readOHDRHeaderMessageFilterPipeline(reader))) return err; break; case 12: /* Attribute Message */ if (!!(err = readOHDRHeaderMessageAttribute(reader, dataobject))) return err; break; case 16: /* Continue Message */ if (!!(err = readOHDRHeaderMessageContinue(reader, dataobject))) return err; break; case 21: /* Attribute Info Message */ if (!!(err = readOHDRHeaderMessageAttributeInfo(reader, &dataobject->ai))) return err; break; default: mylog("OHDR unknown header message of type %d\n", header_message_type); return MYSOFA_UNSUPPORTED_FORMAT; } if (ftell(fhd) != end) { mylog("OHDR message length mismatch by %ld\n", ftell(fhd) - end); return MYSOFA_INTERNAL_ERROR; } } if (fseek(fhd, end_of_messages + 4, SEEK_SET) < 0) /* skip checksum */ return errno; return MYSOFA_OK; } static int readOCHK(struct READER *reader, struct DATAOBJECT *dataobject, uint64_t end) { int err; char buf[5]; /* read signature */ if (fread(buf, 1, 4, reader->fhd) != 4 || strncmp(buf, "OCHK", 4)) { mylog("cannot read signature of OCHK\n"); return MYSOFA_INVALID_FORMAT; } buf[4] = 0; mylog("%08" PRIX64 " %.4s\n", (uint64_t)ftell(reader->fhd) - 4, buf); err = readOHDRmessages(reader, dataobject, end - 4); /* subtract checksum */ if (err) { return err; } return MYSOFA_OK; } int dataobjectRead(struct READER *reader, struct DATAOBJECT *dataobject, char *name) { uint64_t size_of_chunk, end_of_messages; int err; char buf[5]; memset(dataobject, 0, sizeof(*dataobject)); dataobject->address = ftell(reader->fhd); dataobject->name = name; /* read signature */ if (fread(buf, 1, 4, reader->fhd) != 4 || strncmp(buf, "OHDR", 4)) { mylog("cannot read signature of data object\n"); return MYSOFA_INVALID_FORMAT; } buf[4] = 0; mylog("%08" PRIX64 " %.4s\n", dataobject->address, buf); if (fgetc(reader->fhd) != 2) { mylog("object OHDR must have version 2\n"); return MYSOFA_UNSUPPORTED_FORMAT; } dataobject->flags = (uint8_t)fgetc(reader->fhd); if (dataobject->flags & (1 << 5)) { /* bit 5 indicated time stamps */ if (fseek(reader->fhd, 16, SEEK_CUR) < 0) /* skip them */ return errno; } if (dataobject->flags & (1 << 4)) { /* bit 4 ? */ mylog("OHDR: unsupported flags bit 4: %02X\n", dataobject->flags); return MYSOFA_UNSUPPORTED_FORMAT; } size_of_chunk = readValue(reader, 1 << (dataobject->flags & 3)); if (size_of_chunk > 0x1000000) return MYSOFA_UNSUPPORTED_FORMAT; end_of_messages = ftell(reader->fhd) + size_of_chunk; err = readOHDRmessages(reader, dataobject, end_of_messages); if (err) { return err; } if (validAddress(reader, dataobject->ai.attribute_name_btree)) { /* not needed fseek(reader->fhd, dataobject->ai.attribute_name_btree, SEEK_SET); btreeRead(reader, &dataobject->attributes); */ } /* parse message attribute info */ if (validAddress(reader, dataobject->ai.fractal_heap_address)) { if (fseek(reader->fhd, dataobject->ai.fractal_heap_address, SEEK_SET) < 0) return errno; err = fractalheapRead(reader, dataobject, &dataobject->attributes_heap); if (err) return err; } /* parse message link info */ if (validAddress(reader, dataobject->li.fractal_heap_address)) { fseek(reader->fhd, dataobject->li.fractal_heap_address, SEEK_SET); err = fractalheapRead(reader, dataobject, &dataobject->objects_heap); if (err) return err; } if (validAddress(reader, dataobject->li.address_btree_index)) { /* not needed fseek(reader->fhd, dataobject->li.address_btree_index, SEEK_SET); btreeRead(reader, &dataobject->objects); */ } dataobject->all = reader->all; reader->all = dataobject; return MYSOFA_OK; } void dataobjectFree(struct READER *reader, struct DATAOBJECT *dataobject) { struct DATAOBJECT **p; btreeFree(&dataobject->attributes_btree); fractalheapFree(&dataobject->attributes_heap); btreeFree(&dataobject->objects_btree); fractalheapFree(&dataobject->objects_heap); while (dataobject->attributes) { struct MYSOFA_ATTRIBUTE *attr = dataobject->attributes; dataobject->attributes = attr->next; free(attr->name); free(attr->value); free(attr); } while (dataobject->directory) { struct DIR *dir = dataobject->directory; dataobject->directory = dir->next; dataobjectFree(reader, &dir->dataobject); free(dir); } free(dataobject->data); free(dataobject->string); free(dataobject->name); p = &reader->all; while (*p) { if ((*p) == dataobject) { *p = dataobject->all; break; } p = &((*p)->all); } }
null
284
CWE-787
CVE-2021-37650
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/data/dataset_utils.h" #include "tensorflow/core/data/root_dataset.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/function_handle_cache.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/lib/io/record_writer.h" #include "tensorflow/core/platform/file_system.h" #include "tensorflow/core/platform/resource.h" namespace tensorflow { namespace data { namespace experimental { namespace { class ToTFRecordOp : public AsyncOpKernel { public: explicit ToTFRecordOp(OpKernelConstruction* ctx) : AsyncOpKernel(ctx), background_worker_(ctx->env(), "tf_data_to_tf_record") {} template <typename T> Status ParseScalarArgument(OpKernelContext* ctx, const StringPiece& argument_name, T* output) { const Tensor* argument_t; TF_RETURN_IF_ERROR(ctx->input(argument_name, &argument_t)); if (!TensorShapeUtils::IsScalar(argument_t->shape())) { return errors::InvalidArgument(argument_name, " must be a scalar"); } *output = argument_t->scalar<T>()(); return Status::OK(); } void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override { // The call to `iterator->GetNext()` may block and depend on an inter-op // thread pool thread, so we issue the call using a background thread. background_worker_.Schedule([this, ctx, done = std::move(done)]() { OP_REQUIRES_OK_ASYNC(ctx, DoCompute(ctx), done); done(); }); } private: Status DoCompute(OpKernelContext* ctx) { tensorflow::ResourceTagger tag(kTFDataResourceTag, ctx->op_kernel().type_string()); tstring filename; TF_RETURN_IF_ERROR( ParseScalarArgument<tstring>(ctx, "filename", &filename)); tstring compression_type; TF_RETURN_IF_ERROR(ParseScalarArgument<tstring>(ctx, "compression_type", &compression_type)); std::unique_ptr<WritableFile> file; TF_RETURN_IF_ERROR(ctx->env()->NewWritableFile(filename, &file)); auto writer = absl::make_unique<io::RecordWriter>( file.get(), io::RecordWriterOptions::CreateRecordWriterOptions(compression_type)); DatasetBase* dataset; TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset)); IteratorContext::Params params(ctx); FunctionHandleCache function_handle_cache(params.flr); params.function_handle_cache = &function_handle_cache; ResourceMgr resource_mgr; params.resource_mgr = &resource_mgr; CancellationManager cancellation_manager(ctx->cancellation_manager()); params.cancellation_manager = &cancellation_manager; IteratorContext iter_ctx(std::move(params)); DatasetBase* finalized_dataset; TF_RETURN_IF_ERROR(FinalizeDataset(ctx, dataset, &finalized_dataset)); std::unique_ptr<IteratorBase> iterator; TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator( &iter_ctx, /*parent=*/nullptr, "ToTFRecordOpIterator", &iterator)); std::vector<Tensor> components; components.reserve(finalized_dataset->output_dtypes().size()); bool end_of_sequence; do { TF_RETURN_IF_ERROR( iterator->GetNext(&iter_ctx, &components, &end_of_sequence)); if (!end_of_sequence) { TF_RETURN_IF_ERROR( writer->WriteRecord(components[0].scalar<tstring>()())); } components.clear(); } while (!end_of_sequence); return Status::OK(); } BackgroundWorker background_worker_; }; REGISTER_KERNEL_BUILDER(Name("DatasetToTFRecord").Device(DEVICE_CPU), ToTFRecordOp); REGISTER_KERNEL_BUILDER( Name("ExperimentalDatasetToTFRecord").Device(DEVICE_CPU), ToTFRecordOp); } // namespace } // namespace experimental } // namespace data } // namespace tensorflow
null
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/data/dataset_utils.h" #include "tensorflow/core/data/root_dataset.h" #include "tensorflow/core/framework/dataset.h" #include "tensorflow/core/framework/function_handle_cache.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/ops_util.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/lib/io/record_writer.h" #include "tensorflow/core/platform/file_system.h" #include "tensorflow/core/platform/resource.h" namespace tensorflow { namespace data { namespace experimental { namespace { class ToTFRecordOp : public AsyncOpKernel { public: explicit ToTFRecordOp(OpKernelConstruction* ctx) : AsyncOpKernel(ctx), background_worker_(ctx->env(), "tf_data_to_tf_record") {} template <typename T> Status ParseScalarArgument(OpKernelContext* ctx, const StringPiece& argument_name, T* output) { const Tensor* argument_t; TF_RETURN_IF_ERROR(ctx->input(argument_name, &argument_t)); if (!TensorShapeUtils::IsScalar(argument_t->shape())) { return errors::InvalidArgument(argument_name, " must be a scalar"); } *output = argument_t->scalar<T>()(); return Status::OK(); } void ComputeAsync(OpKernelContext* ctx, DoneCallback done) override { // The call to `iterator->GetNext()` may block and depend on an inter-op // thread pool thread, so we issue the call using a background thread. background_worker_.Schedule([this, ctx, done = std::move(done)]() { OP_REQUIRES_OK_ASYNC(ctx, DoCompute(ctx), done); done(); }); } private: Status DoCompute(OpKernelContext* ctx) { tensorflow::ResourceTagger tag(kTFDataResourceTag, ctx->op_kernel().type_string()); tstring filename; TF_RETURN_IF_ERROR( ParseScalarArgument<tstring>(ctx, "filename", &filename)); tstring compression_type; TF_RETURN_IF_ERROR(ParseScalarArgument<tstring>(ctx, "compression_type", &compression_type)); std::unique_ptr<WritableFile> file; TF_RETURN_IF_ERROR(ctx->env()->NewWritableFile(filename, &file)); auto writer = absl::make_unique<io::RecordWriter>( file.get(), io::RecordWriterOptions::CreateRecordWriterOptions(compression_type)); DatasetBase* dataset; TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset)); IteratorContext::Params params(ctx); FunctionHandleCache function_handle_cache(params.flr); params.function_handle_cache = &function_handle_cache; ResourceMgr resource_mgr; params.resource_mgr = &resource_mgr; CancellationManager cancellation_manager(ctx->cancellation_manager()); params.cancellation_manager = &cancellation_manager; IteratorContext iter_ctx(std::move(params)); DatasetBase* finalized_dataset; TF_RETURN_IF_ERROR(FinalizeDataset(ctx, dataset, &finalized_dataset)); std::unique_ptr<IteratorBase> iterator; TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator( &iter_ctx, /*parent=*/nullptr, "ToTFRecordOpIterator", &iterator)); const int num_output_dtypes = finalized_dataset->output_dtypes().size(); if (num_output_dtypes != 1) { return errors::InvalidArgument( "ToTFRecordOp currently only support datasets of 1 single column, ", "but got ", num_output_dtypes); } const DataType dt = finalized_dataset->output_dtypes()[0]; if (dt != DT_STRING) { return errors::InvalidArgument( "ToTFRecordOp currently only supports DT_STRING dataypes, but got ", DataTypeString(dt)); } std::vector<Tensor> components; components.reserve(num_output_dtypes); bool end_of_sequence; do { TF_RETURN_IF_ERROR( iterator->GetNext(&iter_ctx, &components, &end_of_sequence)); if (!end_of_sequence) { TF_RETURN_IF_ERROR( writer->WriteRecord(components[0].scalar<tstring>()())); } components.clear(); } while (!end_of_sequence); return Status::OK(); } BackgroundWorker background_worker_; }; REGISTER_KERNEL_BUILDER(Name("DatasetToTFRecord").Device(DEVICE_CPU), ToTFRecordOp); REGISTER_KERNEL_BUILDER( Name("ExperimentalDatasetToTFRecord").Device(DEVICE_CPU), ToTFRecordOp); } // namespace } // namespace experimental } // namespace data } // namespace tensorflow
null
285
CWE-787
CVE-2021-37651
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include <algorithm> #include <cmath> #include <random> #include <vector> #include "tensorflow/core/kernels/fractional_pool_common.h" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/lib/random/random.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/util/guarded_philox_random.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; template <typename T> class FractionalAvgPoolOp : public OpKernel { public: explicit FractionalAvgPoolOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("pooling_ratio", &pooling_ratio_)); OP_REQUIRES_OK(context, context->GetAttr("pseudo_random", &pseudo_random_)); OP_REQUIRES_OK(context, context->GetAttr("overlapping", &overlapping_)); OP_REQUIRES(context, pooling_ratio_.size() == 4, errors::InvalidArgument( "pooling_ratio field must specify 4 dimensions")); OP_REQUIRES( context, pooling_ratio_[0] == 1 || pooling_ratio_[3] == 1, errors::Unimplemented("Fractional average pooling is not yet " "supported on the batch nor channel dimension.")); OP_REQUIRES_OK(context, context->GetAttr("deterministic", &deterministic_)); OP_REQUIRES_OK(context, context->GetAttr("seed", &seed_)); OP_REQUIRES_OK(context, context->GetAttr("seed2", &seed2_)); if (deterministic_) { // If both seeds are not set when deterministic_ is true, force set seeds. if ((seed_ == 0) && (seed2_ == 0)) { seed_ = random::New64(); seed2_ = random::New64(); } } else { OP_REQUIRES( context, (seed_ == 0) && (seed2_ == 0), errors::InvalidArgument( "Both seed and seed2 should be 0 if deterministic is false.")); } } void Compute(OpKernelContext* context) override { typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; constexpr int tensor_in_and_out_dims = 4; const Tensor& tensor_in = context->input(0); OP_REQUIRES(context, tensor_in.dims() == tensor_in_and_out_dims, errors::InvalidArgument("tensor_in must be 4-dimensional")); std::vector<int> input_size(tensor_in_and_out_dims); std::vector<int> output_size(tensor_in_and_out_dims); for (int i = 0; i < tensor_in_and_out_dims; ++i) { input_size[i] = tensor_in.dim_size(i); OP_REQUIRES( context, pooling_ratio_[i] <= input_size[i], errors::InvalidArgument( "Pooling ratio cannot be bigger than input tensor dim size.")); } // Output size. for (int i = 0; i < tensor_in_and_out_dims; ++i) { output_size[i] = static_cast<int>(std::floor(input_size[i] / pooling_ratio_[i])); DCHECK_GT(output_size[i], 0); } // Generate pooling sequence. std::vector<int64> row_cum_seq; std::vector<int64> col_cum_seq; GuardedPhiloxRandom generator; generator.Init(seed_, seed2_); row_cum_seq = GeneratePoolingSequence(input_size[1], output_size[1], &generator, pseudo_random_); col_cum_seq = GeneratePoolingSequence(input_size[2], output_size[2], &generator, pseudo_random_); // Prepare output. Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 0, TensorShape({output_size[0], output_size[1], output_size[2], output_size[3]}), &output_tensor)); Tensor* output_row_seq_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 1, TensorShape({static_cast<int64>(row_cum_seq.size())}), &output_row_seq_tensor)); Tensor* output_col_seq_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 2, TensorShape({static_cast<int64>(col_cum_seq.size())}), &output_col_seq_tensor)); ConstEigenMatrixMap in_mat(tensor_in.flat<T>().data(), input_size[3], input_size[2] * input_size[1] * input_size[0]); EigenMatrixMap out_mat(output_tensor->flat<T>().data(), output_size[3], output_size[2] * output_size[1] * output_size[0]); // out_count corresponds to number of elements in each pooling cell. Eigen::Matrix<T, Eigen::Dynamic, 1> out_count(out_mat.cols()); // Initializes the output tensor and out_count with 0. out_mat.setZero(); out_count.setZero(); auto output_row_seq_flat = output_row_seq_tensor->flat<int64>(); auto output_col_seq_flat = output_col_seq_tensor->flat<int64>(); // Set output tensors. for (int i = 0; i < row_cum_seq.size(); ++i) { output_row_seq_flat(i) = row_cum_seq[i]; } for (int i = 0; i < col_cum_seq.size(); ++i) { output_col_seq_flat(i) = col_cum_seq[i]; } // For both input and output, // 0: batch // 1: row / row // 2: col / col // 3: depth / channel const int64_t row_max = input_size[1] - 1; const int64_t col_max = input_size[2] - 1; for (int64_t b = 0; b < input_size[0]; ++b) { // row sequence. for (int64_t hs = 0; hs < row_cum_seq.size() - 1; ++hs) { // row start and end. const int64_t row_start = row_cum_seq[hs]; int64_t row_end = overlapping_ ? row_cum_seq[hs + 1] : row_cum_seq[hs + 1] - 1; row_end = std::min(row_end, row_max); // col sequence. for (int64_t ws = 0; ws < col_cum_seq.size() - 1; ++ws) { const int64_t out_offset = (b * output_size[1] + hs) * output_size[2] + ws; // col start and end. const int64_t col_start = col_cum_seq[ws]; int64_t col_end = overlapping_ ? col_cum_seq[ws + 1] : col_cum_seq[ws + 1] - 1; col_end = std::min(col_end, col_max); for (int64_t h = row_start; h <= row_end; ++h) { for (int64_t w = col_start; w <= col_end; ++w) { const int64_t in_offset = (b * input_size[1] + h) * input_size[2] + w; out_mat.col(out_offset) += in_mat.col(in_offset); out_count(out_offset)++; } } } } } DCHECK_GT(out_count.minCoeff(), 0); out_mat.array().rowwise() /= out_count.transpose().array(); } private: bool deterministic_; int64 seed_; int64 seed2_; std::vector<float> pooling_ratio_; bool pseudo_random_; bool overlapping_; }; #define REGISTER_FRACTIONALAVGPOOL(type) \ REGISTER_KERNEL_BUILDER( \ Name("FractionalAvgPool").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ FractionalAvgPoolOp<type>) REGISTER_FRACTIONALAVGPOOL(int32); REGISTER_FRACTIONALAVGPOOL(int64); REGISTER_FRACTIONALAVGPOOL(float); REGISTER_FRACTIONALAVGPOOL(double); #undef REGISTER_FRACTIONALAVGPOOL template <class T> class FractionalAvgPoolGradOp : public OpKernel { public: explicit FractionalAvgPoolGradOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("overlapping", &overlapping_)); } void Compute(OpKernelContext* context) override { // Here's the basic idea: // Batch and depth dimension are independent from row and col dimension. And // because FractionalAvgPool currently only support pooling along row and // col, we can basically think of this 4D tensor backpropagation as // operation of a series of 2D planes. // // For each element of a 'slice' (2D plane) of output_backprop, we need to // figure out its contributors when doing FractionalAvgPool operation. This // can be done based on row_pooling_sequence, col_pooling_seq and // overlapping. // Once we figure out the original contributors, we just need to evenly // divide the value of this element among these contributors. // // Internally, we divide the out_backprop tensor and store it in a temporary // tensor of double type. And cast it to the corresponding type. typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>> EigenDoubleMatrixMap; // Grab the inputs. const Tensor& orig_input_tensor_shape = context->input(0); OP_REQUIRES(context, orig_input_tensor_shape.dims() == 1 && orig_input_tensor_shape.NumElements() == 4, errors::InvalidArgument("original input tensor shape must be" "1-dimensional and 4 elements")); const Tensor& out_backprop = context->input(1); const Tensor& row_seq_tensor = context->input(2); const Tensor& col_seq_tensor = context->input(3); const int64_t out_batch = out_backprop.dim_size(0); const int64_t out_rows = out_backprop.dim_size(1); const int64_t out_cols = out_backprop.dim_size(2); const int64_t out_depth = out_backprop.dim_size(3); OP_REQUIRES(context, row_seq_tensor.NumElements() > out_rows, errors::InvalidArgument("Given out_backprop shape ", out_backprop.shape().DebugString(), ", row_seq_tensor must have at least ", out_rows + 1, " elements, but got ", row_seq_tensor.NumElements())); OP_REQUIRES(context, col_seq_tensor.NumElements() > out_cols, errors::InvalidArgument("Given out_backprop shape ", out_backprop.shape().DebugString(), ", col_seq_tensor must have at least ", out_cols + 1, " elements, but got ", col_seq_tensor.NumElements())); auto row_seq_tensor_flat = row_seq_tensor.flat<int64>(); auto col_seq_tensor_flat = col_seq_tensor.flat<int64>(); auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat<int64>(); const int64_t in_batch = orig_input_tensor_shape_flat(0); const int64_t in_rows = orig_input_tensor_shape_flat(1); const int64_t in_cols = orig_input_tensor_shape_flat(2); const int64_t in_depth = orig_input_tensor_shape_flat(3); constexpr int tensor_in_and_out_dims = 4; // Transform orig_input_tensor_shape into TensorShape TensorShape in_shape; for (auto i = 0; i < tensor_in_and_out_dims; ++i) { in_shape.AddDim(orig_input_tensor_shape_flat(i)); } // Create intermediate in_backprop. Tensor in_backprop_tensor_temp; OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp( {0}, DataTypeToEnum<double>::v(), in_shape, &in_backprop_tensor_temp)); in_backprop_tensor_temp.flat<double>().setZero(); // Transform 4D tensor to 2D matrix. EigenDoubleMatrixMap in_backprop_tensor_temp_mat( in_backprop_tensor_temp.flat<double>().data(), in_depth, in_cols * in_rows * in_batch); ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(), out_depth, out_cols * out_rows * out_batch); // Loop through each element of out_backprop and evenly distribute the // element to the corresponding pooling cell. const int64_t in_max_row_index = in_rows - 1; const int64_t in_max_col_index = in_cols - 1; for (int64_t b = 0; b < out_batch; ++b) { for (int64_t r = 0; r < out_rows; ++r) { const int64_t in_row_start = row_seq_tensor_flat(r); int64_t in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1) : row_seq_tensor_flat(r + 1) - 1; in_row_end = std::min(in_row_end, in_max_row_index); for (int64_t c = 0; c < out_cols; ++c) { const int64_t in_col_start = col_seq_tensor_flat(c); int64_t in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1) : col_seq_tensor_flat(c + 1) - 1; in_col_end = std::min(in_col_end, in_max_col_index); const int64_t num_elements_in_pooling_cell = (in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1); const int64_t out_index = (b * out_rows + r) * out_cols + c; // Now we can evenly distribute out_backprop(b, h, w, *) to // in_backprop(b, hs:he, ws:we, *). for (int64_t in_r = in_row_start; in_r <= in_row_end; ++in_r) { for (int64_t in_c = in_col_start; in_c <= in_col_end; ++in_c) { const int64_t in_index = (b * in_rows + in_r) * in_cols + in_c; // Walk through each channel (depth). for (int64_t d = 0; d < out_depth; ++d) { const double out_backprop_element = static_cast<double>( out_backprop_mat.coeffRef(d, out_index)); double& in_backprop_ref = in_backprop_tensor_temp_mat.coeffRef(d, in_index); in_backprop_ref += out_backprop_element / num_elements_in_pooling_cell; } } } } } } // Depending on the type, cast double to type T. Tensor* in_backprop_tensor = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, in_shape, &in_backprop_tensor)); auto in_backprop_tensor_flat = in_backprop_tensor->flat<T>(); auto in_backprop_tensor_temp_flat = in_backprop_tensor_temp.flat<double>(); for (int64_t i = 0; i < in_backprop_tensor_flat.size(); ++i) { in_backprop_tensor_flat(i) = static_cast<T>(in_backprop_tensor_temp_flat(i)); } } private: bool overlapping_; }; #define REGISTER_FRACTIONALAVGPOOLGRAD(type) \ REGISTER_KERNEL_BUILDER(Name("FractionalAvgPoolGrad") \ .Device(DEVICE_CPU) \ .TypeConstraint<type>("T"), \ FractionalAvgPoolGradOp<type>) REGISTER_FRACTIONALAVGPOOLGRAD(int32); REGISTER_FRACTIONALAVGPOOLGRAD(int64); REGISTER_FRACTIONALAVGPOOLGRAD(float); REGISTER_FRACTIONALAVGPOOLGRAD(double); #undef REGISTER_FRACTIONALAVGPOOLGRAD } // namespace tensorflow
null
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include <algorithm> #include <cmath> #include <random> #include <vector> #include "tensorflow/core/kernels/fractional_pool_common.h" #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/lib/random/random.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/util/guarded_philox_random.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; template <typename T> class FractionalAvgPoolOp : public OpKernel { public: explicit FractionalAvgPoolOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("pooling_ratio", &pooling_ratio_)); OP_REQUIRES_OK(context, context->GetAttr("pseudo_random", &pseudo_random_)); OP_REQUIRES_OK(context, context->GetAttr("overlapping", &overlapping_)); OP_REQUIRES(context, pooling_ratio_.size() == 4, errors::InvalidArgument( "pooling_ratio field must specify 4 dimensions")); OP_REQUIRES( context, pooling_ratio_[0] == 1 || pooling_ratio_[3] == 1, errors::Unimplemented("Fractional average pooling is not yet " "supported on the batch nor channel dimension.")); OP_REQUIRES_OK(context, context->GetAttr("deterministic", &deterministic_)); OP_REQUIRES_OK(context, context->GetAttr("seed", &seed_)); OP_REQUIRES_OK(context, context->GetAttr("seed2", &seed2_)); if (deterministic_) { // If both seeds are not set when deterministic_ is true, force set seeds. if ((seed_ == 0) && (seed2_ == 0)) { seed_ = random::New64(); seed2_ = random::New64(); } } else { OP_REQUIRES( context, (seed_ == 0) && (seed2_ == 0), errors::InvalidArgument( "Both seed and seed2 should be 0 if deterministic is false.")); } } void Compute(OpKernelContext* context) override { typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; constexpr int tensor_in_and_out_dims = 4; const Tensor& tensor_in = context->input(0); OP_REQUIRES(context, tensor_in.dims() == tensor_in_and_out_dims, errors::InvalidArgument("tensor_in must be 4-dimensional")); std::vector<int> input_size(tensor_in_and_out_dims); std::vector<int> output_size(tensor_in_and_out_dims); for (int i = 0; i < tensor_in_and_out_dims; ++i) { input_size[i] = tensor_in.dim_size(i); OP_REQUIRES( context, pooling_ratio_[i] <= input_size[i], errors::InvalidArgument( "Pooling ratio cannot be bigger than input tensor dim size.")); } // Output size. for (int i = 0; i < tensor_in_and_out_dims; ++i) { output_size[i] = static_cast<int>(std::floor(input_size[i] / pooling_ratio_[i])); DCHECK_GT(output_size[i], 0); } // Generate pooling sequence. std::vector<int64> row_cum_seq; std::vector<int64> col_cum_seq; GuardedPhiloxRandom generator; generator.Init(seed_, seed2_); row_cum_seq = GeneratePoolingSequence(input_size[1], output_size[1], &generator, pseudo_random_); col_cum_seq = GeneratePoolingSequence(input_size[2], output_size[2], &generator, pseudo_random_); // Prepare output. Tensor* output_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 0, TensorShape({output_size[0], output_size[1], output_size[2], output_size[3]}), &output_tensor)); Tensor* output_row_seq_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 1, TensorShape({static_cast<int64>(row_cum_seq.size())}), &output_row_seq_tensor)); Tensor* output_col_seq_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output( 2, TensorShape({static_cast<int64>(col_cum_seq.size())}), &output_col_seq_tensor)); ConstEigenMatrixMap in_mat(tensor_in.flat<T>().data(), input_size[3], input_size[2] * input_size[1] * input_size[0]); EigenMatrixMap out_mat(output_tensor->flat<T>().data(), output_size[3], output_size[2] * output_size[1] * output_size[0]); // out_count corresponds to number of elements in each pooling cell. Eigen::Matrix<T, Eigen::Dynamic, 1> out_count(out_mat.cols()); // Initializes the output tensor and out_count with 0. out_mat.setZero(); out_count.setZero(); auto output_row_seq_flat = output_row_seq_tensor->flat<int64>(); auto output_col_seq_flat = output_col_seq_tensor->flat<int64>(); // Set output tensors. for (int i = 0; i < row_cum_seq.size(); ++i) { output_row_seq_flat(i) = row_cum_seq[i]; } for (int i = 0; i < col_cum_seq.size(); ++i) { output_col_seq_flat(i) = col_cum_seq[i]; } // For both input and output, // 0: batch // 1: row / row // 2: col / col // 3: depth / channel const int64_t row_max = input_size[1] - 1; const int64_t col_max = input_size[2] - 1; for (int64_t b = 0; b < input_size[0]; ++b) { // row sequence. for (int64_t hs = 0; hs < row_cum_seq.size() - 1; ++hs) { // row start and end. const int64_t row_start = row_cum_seq[hs]; int64_t row_end = overlapping_ ? row_cum_seq[hs + 1] : row_cum_seq[hs + 1] - 1; row_end = std::min(row_end, row_max); // col sequence. for (int64_t ws = 0; ws < col_cum_seq.size() - 1; ++ws) { const int64_t out_offset = (b * output_size[1] + hs) * output_size[2] + ws; // col start and end. const int64_t col_start = col_cum_seq[ws]; int64_t col_end = overlapping_ ? col_cum_seq[ws + 1] : col_cum_seq[ws + 1] - 1; col_end = std::min(col_end, col_max); for (int64_t h = row_start; h <= row_end; ++h) { for (int64_t w = col_start; w <= col_end; ++w) { const int64_t in_offset = (b * input_size[1] + h) * input_size[2] + w; out_mat.col(out_offset) += in_mat.col(in_offset); out_count(out_offset)++; } } } } } DCHECK_GT(out_count.minCoeff(), 0); out_mat.array().rowwise() /= out_count.transpose().array(); } private: bool deterministic_; int64 seed_; int64 seed2_; std::vector<float> pooling_ratio_; bool pseudo_random_; bool overlapping_; }; #define REGISTER_FRACTIONALAVGPOOL(type) \ REGISTER_KERNEL_BUILDER( \ Name("FractionalAvgPool").Device(DEVICE_CPU).TypeConstraint<type>("T"), \ FractionalAvgPoolOp<type>) REGISTER_FRACTIONALAVGPOOL(int32); REGISTER_FRACTIONALAVGPOOL(int64); REGISTER_FRACTIONALAVGPOOL(float); REGISTER_FRACTIONALAVGPOOL(double); #undef REGISTER_FRACTIONALAVGPOOL template <class T> class FractionalAvgPoolGradOp : public OpKernel { public: explicit FractionalAvgPoolGradOp(OpKernelConstruction* context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("overlapping", &overlapping_)); } void Compute(OpKernelContext* context) override { // Here's the basic idea: // Batch and depth dimension are independent from row and col dimension. And // because FractionalAvgPool currently only support pooling along row and // col, we can basically think of this 4D tensor backpropagation as // operation of a series of 2D planes. // // For each element of a 'slice' (2D plane) of output_backprop, we need to // figure out its contributors when doing FractionalAvgPool operation. This // can be done based on row_pooling_sequence, col_pooling_seq and // overlapping. // Once we figure out the original contributors, we just need to evenly // divide the value of this element among these contributors. // // Internally, we divide the out_backprop tensor and store it in a temporary // tensor of double type. And cast it to the corresponding type. typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic>> EigenDoubleMatrixMap; // Grab the inputs. const Tensor& orig_input_tensor_shape = context->input(0); OP_REQUIRES(context, orig_input_tensor_shape.dims() == 1 && orig_input_tensor_shape.NumElements() == 4, errors::InvalidArgument("original input tensor shape must be" "1-dimensional and 4 elements")); const Tensor& out_backprop = context->input(1); const Tensor& row_seq_tensor = context->input(2); const Tensor& col_seq_tensor = context->input(3); const int64_t out_batch = out_backprop.dim_size(0); const int64_t out_rows = out_backprop.dim_size(1); const int64_t out_cols = out_backprop.dim_size(2); const int64_t out_depth = out_backprop.dim_size(3); OP_REQUIRES(context, row_seq_tensor.NumElements() > out_rows, errors::InvalidArgument("Given out_backprop shape ", out_backprop.shape().DebugString(), ", row_seq_tensor must have at least ", out_rows + 1, " elements, but got ", row_seq_tensor.NumElements())); OP_REQUIRES(context, col_seq_tensor.NumElements() > out_cols, errors::InvalidArgument("Given out_backprop shape ", out_backprop.shape().DebugString(), ", col_seq_tensor must have at least ", out_cols + 1, " elements, but got ", col_seq_tensor.NumElements())); auto row_seq_tensor_flat = row_seq_tensor.flat<int64>(); auto col_seq_tensor_flat = col_seq_tensor.flat<int64>(); auto orig_input_tensor_shape_flat = orig_input_tensor_shape.flat<int64>(); const int64_t in_batch = orig_input_tensor_shape_flat(0); const int64_t in_rows = orig_input_tensor_shape_flat(1); const int64_t in_cols = orig_input_tensor_shape_flat(2); const int64_t in_depth = orig_input_tensor_shape_flat(3); OP_REQUIRES( context, in_batch != 0, errors::InvalidArgument("Batch dimension of input must not be 0")); OP_REQUIRES( context, in_rows != 0, errors::InvalidArgument("Rows dimension of input must not be 0")); OP_REQUIRES( context, in_cols != 0, errors::InvalidArgument("Columns dimension of input must not be 0")); OP_REQUIRES( context, in_depth != 0, errors::InvalidArgument("Depth dimension of input must not be 0")); constexpr int tensor_in_and_out_dims = 4; // Transform orig_input_tensor_shape into TensorShape TensorShape in_shape; for (auto i = 0; i < tensor_in_and_out_dims; ++i) { in_shape.AddDim(orig_input_tensor_shape_flat(i)); } // Create intermediate in_backprop. Tensor in_backprop_tensor_temp; OP_REQUIRES_OK(context, context->forward_input_or_allocate_temp( {0}, DataTypeToEnum<double>::v(), in_shape, &in_backprop_tensor_temp)); in_backprop_tensor_temp.flat<double>().setZero(); // Transform 4D tensor to 2D matrix. EigenDoubleMatrixMap in_backprop_tensor_temp_mat( in_backprop_tensor_temp.flat<double>().data(), in_depth, in_cols * in_rows * in_batch); ConstEigenMatrixMap out_backprop_mat(out_backprop.flat<T>().data(), out_depth, out_cols * out_rows * out_batch); // Loop through each element of out_backprop and evenly distribute the // element to the corresponding pooling cell. const int64_t in_max_row_index = in_rows - 1; const int64_t in_max_col_index = in_cols - 1; for (int64_t b = 0; b < out_batch; ++b) { for (int64_t r = 0; r < out_rows; ++r) { const int64_t in_row_start = row_seq_tensor_flat(r); int64_t in_row_end = overlapping_ ? row_seq_tensor_flat(r + 1) : row_seq_tensor_flat(r + 1) - 1; in_row_end = std::min(in_row_end, in_max_row_index); for (int64_t c = 0; c < out_cols; ++c) { const int64_t in_col_start = col_seq_tensor_flat(c); int64_t in_col_end = overlapping_ ? col_seq_tensor_flat(c + 1) : col_seq_tensor_flat(c + 1) - 1; in_col_end = std::min(in_col_end, in_max_col_index); const int64_t num_elements_in_pooling_cell = (in_row_end - in_row_start + 1) * (in_col_end - in_col_start + 1); const int64_t out_index = (b * out_rows + r) * out_cols + c; // Now we can evenly distribute out_backprop(b, h, w, *) to // in_backprop(b, hs:he, ws:we, *). for (int64_t in_r = in_row_start; in_r <= in_row_end; ++in_r) { for (int64_t in_c = in_col_start; in_c <= in_col_end; ++in_c) { const int64_t in_index = (b * in_rows + in_r) * in_cols + in_c; // Walk through each channel (depth). for (int64_t d = 0; d < out_depth; ++d) { const double out_backprop_element = static_cast<double>( out_backprop_mat.coeffRef(d, out_index)); double& in_backprop_ref = in_backprop_tensor_temp_mat.coeffRef(d, in_index); in_backprop_ref += out_backprop_element / num_elements_in_pooling_cell; } } } } } } // Depending on the type, cast double to type T. Tensor* in_backprop_tensor = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, in_shape, &in_backprop_tensor)); auto in_backprop_tensor_flat = in_backprop_tensor->flat<T>(); auto in_backprop_tensor_temp_flat = in_backprop_tensor_temp.flat<double>(); for (int64_t i = 0; i < in_backprop_tensor_flat.size(); ++i) { in_backprop_tensor_flat(i) = static_cast<T>(in_backprop_tensor_temp_flat(i)); } } private: bool overlapping_; }; #define REGISTER_FRACTIONALAVGPOOLGRAD(type) \ REGISTER_KERNEL_BUILDER(Name("FractionalAvgPoolGrad") \ .Device(DEVICE_CPU) \ .TypeConstraint<type>("T"), \ FractionalAvgPoolGradOp<type>) REGISTER_FRACTIONALAVGPOOLGRAD(int32); REGISTER_FRACTIONALAVGPOOLGRAD(int64); REGISTER_FRACTIONALAVGPOOLGRAD(float); REGISTER_FRACTIONALAVGPOOLGRAD(double); #undef REGISTER_FRACTIONALAVGPOOLGRAD } // namespace tensorflow
null
286
CWE-787
CVE-2021-3839
/* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2018 Intel Corporation */ /* Security model * -------------- * The vhost-user protocol connection is an external interface, so it must be * robust against invalid inputs. * * This is important because the vhost-user master is only one step removed * from the guest. Malicious guests that have escaped will then launch further * attacks from the vhost-user master. * * Even in deployments where guests are trusted, a bug in the vhost-user master * can still cause invalid messages to be sent. Such messages must not * compromise the stability of the DPDK application by causing crashes, memory * corruption, or other problematic behavior. * * Do not assume received VhostUserMsg fields contain sensible values! */ #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <fcntl.h> #include <sys/ioctl.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/syscall.h> #ifdef RTE_LIBRTE_VHOST_NUMA #include <numaif.h> #endif #ifdef RTE_LIBRTE_VHOST_POSTCOPY #include <linux/userfaultfd.h> #endif #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */ #include <linux/memfd.h> #define MEMFD_SUPPORTED #endif #include <rte_common.h> #include <rte_malloc.h> #include <rte_log.h> #include <rte_vfio.h> #include <rte_errno.h> #include "iotlb.h" #include "vhost.h" #include "vhost_user.h" #define VIRTIO_MIN_MTU 68 #define VIRTIO_MAX_MTU 65535 #define INFLIGHT_ALIGNMENT 64 #define INFLIGHT_VERSION 0x1 static const char *vhost_message_str[VHOST_USER_MAX] = { [VHOST_USER_NONE] = "VHOST_USER_NONE", [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES", [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES", [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER", [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER", [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE", [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE", [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD", [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM", [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR", [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE", [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE", [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK", [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL", [VHOST_USER_SET_VRING_ERR] = "VHOST_USER_SET_VRING_ERR", [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES", [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES", [VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM", [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE", [VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP", [VHOST_USER_NET_SET_MTU] = "VHOST_USER_NET_SET_MTU", [VHOST_USER_SET_SLAVE_REQ_FD] = "VHOST_USER_SET_SLAVE_REQ_FD", [VHOST_USER_IOTLB_MSG] = "VHOST_USER_IOTLB_MSG", [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS", [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS", [VHOST_USER_POSTCOPY_ADVISE] = "VHOST_USER_POSTCOPY_ADVISE", [VHOST_USER_POSTCOPY_LISTEN] = "VHOST_USER_POSTCOPY_LISTEN", [VHOST_USER_POSTCOPY_END] = "VHOST_USER_POSTCOPY_END", [VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD", [VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD", [VHOST_USER_SET_STATUS] = "VHOST_USER_SET_STATUS", [VHOST_USER_GET_STATUS] = "VHOST_USER_GET_STATUS", }; static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx); static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx); static void close_msg_fds(struct vhu_msg_context *ctx) { int i; for (i = 0; i < ctx->fd_num; i++) { int fd = ctx->fds[i]; if (fd == -1) continue; ctx->fds[i] = -1; close(fd); } } /* * Ensure the expected number of FDs is received, * close all FDs and return an error if this is not the case. */ static int validate_msg_fds(struct virtio_net *dev, struct vhu_msg_context *ctx, int expected_fds) { if (ctx->fd_num == expected_fds) return 0; VHOST_LOG_CONFIG(ERR, "(%s) expect %d FDs for request %s, received %d\n", dev->ifname, expected_fds, vhost_message_str[ctx->msg.request.master], ctx->fd_num); close_msg_fds(ctx); return -1; } static uint64_t get_blk_size(int fd) { struct stat stat; int ret; ret = fstat(fd, &stat); return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize; } static void async_dma_map(struct virtio_net *dev, bool do_map) { int ret = 0; uint32_t i; struct guest_page *page; if (do_map) { for (i = 0; i < dev->nr_guest_pages; i++) { page = &dev->guest_pages[i]; ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD, page->host_user_addr, page->host_iova, page->size); if (ret) { /* * DMA device may bind with kernel driver, in this case, * we don't need to program IOMMU manually. However, if no * device is bound with vfio/uio in DPDK, and vfio kernel * module is loaded, the API will still be called and return * with ENODEV. * * DPDK vfio only returns ENODEV in very similar situations * (vfio either unsupported, or supported but no devices found). * Either way, no mappings could be performed. We treat it as * normal case in async path. This is a workaround. */ if (rte_errno == ENODEV) return; /* DMA mapping errors won't stop VHOST_USER_SET_MEM_TABLE. */ VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n"); } } } else { for (i = 0; i < dev->nr_guest_pages; i++) { page = &dev->guest_pages[i]; ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD, page->host_user_addr, page->host_iova, page->size); if (ret) { /* like DMA map, ignore the kernel driver case when unmap. */ if (rte_errno == EINVAL) return; VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n"); } } } } static void free_mem_region(struct virtio_net *dev) { uint32_t i; struct rte_vhost_mem_region *reg; if (!dev || !dev->mem) return; if (dev->async_copy && rte_vfio_is_enabled("vfio")) async_dma_map(dev, false); for (i = 0; i < dev->mem->nregions; i++) { reg = &dev->mem->regions[i]; if (reg->host_user_addr) { munmap(reg->mmap_addr, reg->mmap_size); close(reg->fd); } } } void vhost_backend_cleanup(struct virtio_net *dev) { struct rte_vdpa_device *vdpa_dev; vdpa_dev = dev->vdpa_dev; if (vdpa_dev && vdpa_dev->ops->dev_cleanup != NULL) vdpa_dev->ops->dev_cleanup(dev->vid); if (dev->mem) { free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; } rte_free(dev->guest_pages); dev->guest_pages = NULL; if (dev->log_addr) { munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); dev->log_addr = 0; } if (dev->inflight_info) { if (dev->inflight_info->addr) { munmap(dev->inflight_info->addr, dev->inflight_info->size); dev->inflight_info->addr = NULL; } if (dev->inflight_info->fd >= 0) { close(dev->inflight_info->fd); dev->inflight_info->fd = -1; } rte_free(dev->inflight_info); dev->inflight_info = NULL; } if (dev->slave_req_fd >= 0) { close(dev->slave_req_fd); dev->slave_req_fd = -1; } if (dev->postcopy_ufd >= 0) { close(dev->postcopy_ufd); dev->postcopy_ufd = -1; } dev->postcopy_listening = 0; } static void vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index, int enable) { struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; struct vhost_virtqueue *vq = dev->virtqueue[index]; /* Configure guest notifications on enable */ if (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF) vhost_enable_guest_notification(dev, vq, vq->notif_enable); if (vdpa_dev && vdpa_dev->ops->set_vring_state) vdpa_dev->ops->set_vring_state(dev->vid, index, enable); if (dev->notify_ops->vring_state_changed) dev->notify_ops->vring_state_changed(dev->vid, index, enable); } /* * This function just returns success at the moment unless * the device hasn't been initialised. */ static int vhost_user_set_owner(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_reset_owner(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; vhost_destroy_device_notify(dev); cleanup_device(dev, 0); reset_device(dev); return RTE_VHOST_MSG_RESULT_OK; } /* * The features that we support are requested. */ static int vhost_user_get_features(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; uint64_t features = 0; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; rte_vhost_driver_get_features(dev->ifname, &features); ctx->msg.payload.u64 = features; ctx->msg.size = sizeof(ctx->msg.payload.u64); ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; } /* * The queue number that we support are requested. */ static int vhost_user_get_queue_num(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; uint32_t queue_num = 0; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; rte_vhost_driver_get_queue_num(dev->ifname, &queue_num); ctx->msg.payload.u64 = (uint64_t)queue_num; ctx->msg.size = sizeof(ctx->msg.payload.u64); ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; } /* * We receive the negotiated features supported by us and the virtio device. */ static int vhost_user_set_features(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; uint64_t features = ctx->msg.payload.u64; uint64_t vhost_features = 0; struct rte_vdpa_device *vdpa_dev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; rte_vhost_driver_get_features(dev->ifname, &vhost_features); if (features & ~vhost_features) { VHOST_LOG_CONFIG(ERR, "(%s) received invalid negotiated features.\n", dev->ifname); dev->flags |= VIRTIO_DEV_FEATURES_FAILED; dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK; return RTE_VHOST_MSG_RESULT_ERR; } if (dev->flags & VIRTIO_DEV_RUNNING) { if (dev->features == features) return RTE_VHOST_MSG_RESULT_OK; /* * Error out if master tries to change features while device is * in running state. The exception being VHOST_F_LOG_ALL, which * is enabled when the live-migration starts. */ if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) { VHOST_LOG_CONFIG(ERR, "(%s) features changed while device is running.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } if (dev->notify_ops->features_changed) dev->notify_ops->features_changed(dev->vid, features); } dev->features = features; if (dev->features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1) | (1ULL << VIRTIO_F_RING_PACKED))) { dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf); } else { dev->vhost_hlen = sizeof(struct virtio_net_hdr); } VHOST_LOG_CONFIG(INFO, "(%s) negotiated Virtio features: 0x%" PRIx64 "\n", dev->ifname, dev->features); VHOST_LOG_CONFIG(DEBUG, "(%s) mergeable RX buffers %s, virtio 1 %s\n", dev->ifname, (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off", (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off"); if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) && !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) { /* * Remove all but first queue pair if MQ hasn't been * negotiated. This is safe because the device is not * running at this stage. */ while (dev->nr_vring > 2) { struct vhost_virtqueue *vq; vq = dev->virtqueue[--dev->nr_vring]; if (!vq) continue; dev->virtqueue[dev->nr_vring] = NULL; cleanup_vq(vq, 1); cleanup_vq_inflight(dev, vq); free_vq(dev, vq); } } vdpa_dev = dev->vdpa_dev; if (vdpa_dev) vdpa_dev->ops->set_features(dev->vid); dev->flags &= ~VIRTIO_DEV_FEATURES_FAILED; return RTE_VHOST_MSG_RESULT_OK; } /* * The virtio device sends us the size of the descriptor ring. */ static int vhost_user_set_vring_num(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (ctx->msg.payload.state.num > 32768) { VHOST_LOG_CONFIG(ERR, "(%s) invalid virtqueue size %u\n", dev->ifname, ctx->msg.payload.state.num); return RTE_VHOST_MSG_RESULT_ERR; } vq->size = ctx->msg.payload.state.num; /* VIRTIO 1.0, 2.4 Virtqueues says: * * Queue Size value is always a power of 2. The maximum Queue Size * value is 32768. * * VIRTIO 1.1 2.7 Virtqueues says: * * Packed virtqueues support up to 2^15 entries each. */ if (!vq_is_packed(dev)) { if (vq->size & (vq->size - 1)) { VHOST_LOG_CONFIG(ERR, "(%s) invalid virtqueue size %u\n", dev->ifname, vq->size); return RTE_VHOST_MSG_RESULT_ERR; } } if (vq_is_packed(dev)) { rte_free(vq->shadow_used_packed); vq->shadow_used_packed = rte_malloc_socket(NULL, vq->size * sizeof(struct vring_used_elem_packed), RTE_CACHE_LINE_SIZE, vq->numa_node); if (!vq->shadow_used_packed) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for shadow used ring.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } } else { rte_free(vq->shadow_used_split); vq->shadow_used_split = rte_malloc_socket(NULL, vq->size * sizeof(struct vring_used_elem), RTE_CACHE_LINE_SIZE, vq->numa_node); if (!vq->shadow_used_split) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for vq internal data.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } } rte_free(vq->batch_copy_elems); vq->batch_copy_elems = rte_malloc_socket(NULL, vq->size * sizeof(struct batch_copy_elem), RTE_CACHE_LINE_SIZE, vq->numa_node); if (!vq->batch_copy_elems) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for batching copy.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } return RTE_VHOST_MSG_RESULT_OK; } /* * Reallocate virtio_dev, vhost_virtqueue and related data structures to * make them on the same numa node as the memory of vring descriptor. */ #ifdef RTE_LIBRTE_VHOST_NUMA static struct virtio_net* numa_realloc(struct virtio_net *dev, int index) { int node, dev_node; struct virtio_net *old_dev; struct vhost_virtqueue *vq; struct batch_copy_elem *bce; struct guest_page *gp; struct rte_vhost_memory *mem; size_t mem_size; int ret; old_dev = dev; vq = dev->virtqueue[index]; /* * If VQ is ready, it is too late to reallocate, it certainly already * happened anyway on VHOST_USER_SET_VRING_ADRR. */ if (vq->ready) return dev; ret = get_mempolicy(&node, NULL, 0, vq->desc, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { VHOST_LOG_CONFIG(ERR, "(%s) unable to get virtqueue %d numa information.\n", dev->ifname, index); return dev; } if (node == vq->numa_node) goto out_dev_realloc; vq = rte_realloc_socket(vq, sizeof(*vq), 0, node); if (!vq) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc virtqueue %d on node %d\n", dev->ifname, index, node); return dev; } if (vq != dev->virtqueue[index]) { VHOST_LOG_CONFIG(INFO, "(%s) reallocated virtqueue on node %d\n", dev->ifname, node); dev->virtqueue[index] = vq; vhost_user_iotlb_init(dev, index); } if (vq_is_packed(dev)) { struct vring_used_elem_packed *sup; sup = rte_realloc_socket(vq->shadow_used_packed, vq->size * sizeof(*sup), RTE_CACHE_LINE_SIZE, node); if (!sup) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc shadow packed on node %d\n", dev->ifname, node); return dev; } vq->shadow_used_packed = sup; } else { struct vring_used_elem *sus; sus = rte_realloc_socket(vq->shadow_used_split, vq->size * sizeof(*sus), RTE_CACHE_LINE_SIZE, node); if (!sus) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc shadow split on node %d\n", dev->ifname, node); return dev; } vq->shadow_used_split = sus; } bce = rte_realloc_socket(vq->batch_copy_elems, vq->size * sizeof(*bce), RTE_CACHE_LINE_SIZE, node); if (!bce) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc batch copy elem on node %d\n", dev->ifname, node); return dev; } vq->batch_copy_elems = bce; if (vq->log_cache) { struct log_cache_entry *lc; lc = rte_realloc_socket(vq->log_cache, sizeof(*lc) * VHOST_LOG_CACHE_NR, 0, node); if (!lc) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc log cache on node %d\n", dev->ifname, node); return dev; } vq->log_cache = lc; } if (vq->resubmit_inflight) { struct rte_vhost_resubmit_info *ri; ri = rte_realloc_socket(vq->resubmit_inflight, sizeof(*ri), 0, node); if (!ri) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc resubmit inflight on node %d\n", dev->ifname, node); return dev; } vq->resubmit_inflight = ri; if (ri->resubmit_list) { struct rte_vhost_resubmit_desc *rd; rd = rte_realloc_socket(ri->resubmit_list, sizeof(*rd) * ri->resubmit_num, 0, node); if (!rd) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc resubmit list on node %d\n", dev->ifname, node); return dev; } ri->resubmit_list = rd; } } vq->numa_node = node; out_dev_realloc: if (dev->flags & VIRTIO_DEV_RUNNING) return dev; ret = get_mempolicy(&dev_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { VHOST_LOG_CONFIG(ERR, "(%s) unable to get numa information.\n", dev->ifname); return dev; } if (dev_node == node) return dev; dev = rte_realloc_socket(old_dev, sizeof(*dev), 0, node); if (!dev) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc dev on node %d\n", old_dev->ifname, node); return old_dev; } VHOST_LOG_CONFIG(INFO, "(%s) reallocated device on node %d\n", dev->ifname, node); vhost_devices[dev->vid] = dev; mem_size = sizeof(struct rte_vhost_memory) + sizeof(struct rte_vhost_mem_region) * dev->mem->nregions; mem = rte_realloc_socket(dev->mem, mem_size, 0, node); if (!mem) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc mem table on node %d\n", dev->ifname, node); return dev; } dev->mem = mem; gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp), RTE_CACHE_LINE_SIZE, node); if (!gp) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc guest pages on node %d\n", dev->ifname, node); return dev; } dev->guest_pages = gp; return dev; } #else static struct virtio_net* numa_realloc(struct virtio_net *dev, int index __rte_unused) { return dev; } #endif /* Converts QEMU virtual address to Vhost virtual address. */ static uint64_t qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len) { struct rte_vhost_mem_region *r; uint32_t i; if (unlikely(!dev || !dev->mem)) goto out_error; /* Find the region where the address lives. */ for (i = 0; i < dev->mem->nregions; i++) { r = &dev->mem->regions[i]; if (qva >= r->guest_user_addr && qva < r->guest_user_addr + r->size) { if (unlikely(*len > r->guest_user_addr + r->size - qva)) *len = r->guest_user_addr + r->size - qva; return qva - r->guest_user_addr + r->host_user_addr; } } out_error: *len = 0; return 0; } /* * Converts ring address to Vhost virtual address. * If IOMMU is enabled, the ring address is a guest IO virtual address, * else it is a QEMU virtual address. */ static uint64_t ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t ra, uint64_t *size) { if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) { uint64_t vva; vhost_user_iotlb_rd_lock(vq); vva = vhost_iova_to_vva(dev, vq, ra, size, VHOST_ACCESS_RW); vhost_user_iotlb_rd_unlock(vq); return vva; } return qva_to_vva(dev, ra, size); } static uint64_t log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq) { uint64_t log_gpa; vhost_user_iotlb_rd_lock(vq); log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr); vhost_user_iotlb_rd_unlock(vq); return log_gpa; } static struct virtio_net * translate_ring_addresses(struct virtio_net *dev, int vq_index) { struct vhost_virtqueue *vq = dev->virtqueue[vq_index]; struct vhost_vring_addr *addr = &vq->ring_addrs; uint64_t len, expected_len; if (addr->flags & (1 << VHOST_VRING_F_LOG)) { vq->log_guest_addr = log_addr_to_gpa(dev, vq); if (vq->log_guest_addr == 0) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map log_guest_addr.\n", dev->ifname); return dev; } } if (vq_is_packed(dev)) { len = sizeof(struct vring_packed_desc) * vq->size; vq->desc_packed = (struct vring_packed_desc *)(uintptr_t) ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len); if (vq->desc_packed == NULL || len != sizeof(struct vring_packed_desc) * vq->size) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map desc_packed ring.\n", dev->ifname); return dev; } dev = numa_realloc(dev, vq_index); vq = dev->virtqueue[vq_index]; addr = &vq->ring_addrs; len = sizeof(struct vring_packed_desc_event); vq->driver_event = (struct vring_packed_desc_event *) (uintptr_t)ring_addr_to_vva(dev, vq, addr->avail_user_addr, &len); if (vq->driver_event == NULL || len != sizeof(struct vring_packed_desc_event)) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to find driver area address.\n", dev->ifname); return dev; } len = sizeof(struct vring_packed_desc_event); vq->device_event = (struct vring_packed_desc_event *) (uintptr_t)ring_addr_to_vva(dev, vq, addr->used_user_addr, &len); if (vq->device_event == NULL || len != sizeof(struct vring_packed_desc_event)) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to find device area address.\n", dev->ifname); return dev; } vq->access_ok = true; return dev; } /* The addresses are converted from QEMU virtual to Vhost virtual. */ if (vq->desc && vq->avail && vq->used) return dev; len = sizeof(struct vring_desc) * vq->size; vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len); if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map desc ring.\n", dev->ifname); return dev; } dev = numa_realloc(dev, vq_index); vq = dev->virtqueue[vq_index]; addr = &vq->ring_addrs; len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size; if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) len += sizeof(uint16_t); expected_len = len; vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev, vq, addr->avail_user_addr, &len); if (vq->avail == 0 || len != expected_len) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map avail ring.\n", dev->ifname); return dev; } len = sizeof(struct vring_used) + sizeof(struct vring_used_elem) * vq->size; if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) len += sizeof(uint16_t); expected_len = len; vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev, vq, addr->used_user_addr, &len); if (vq->used == 0 || len != expected_len) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map used ring.\n", dev->ifname); return dev; } if (vq->last_used_idx != vq->used->idx) { VHOST_LOG_CONFIG(WARNING, "(%s) last_used_idx (%u) and vq->used->idx (%u) mismatches;\n", dev->ifname, vq->last_used_idx, vq->used->idx); vq->last_used_idx = vq->used->idx; vq->last_avail_idx = vq->used->idx; VHOST_LOG_CONFIG(WARNING, "(%s) some packets maybe resent for Tx and dropped for Rx\n", dev->ifname); } vq->access_ok = true; VHOST_LOG_CONFIG(DEBUG, "(%s) mapped address desc: %p\n", dev->ifname, vq->desc); VHOST_LOG_CONFIG(DEBUG, "(%s) mapped address avail: %p\n", dev->ifname, vq->avail); VHOST_LOG_CONFIG(DEBUG, "(%s) mapped address used: %p\n", dev->ifname, vq->used); VHOST_LOG_CONFIG(DEBUG, "(%s) log_guest_addr: %" PRIx64 "\n", dev->ifname, vq->log_guest_addr); return dev; } /* * The virtio device sends us the desc, used and avail ring addresses. * This function then converts these to our address space. */ static int vhost_user_set_vring_addr(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_virtqueue *vq; struct vhost_vring_addr *addr = &ctx->msg.payload.addr; bool access_ok; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (dev->mem == NULL) return RTE_VHOST_MSG_RESULT_ERR; /* addr->index refers to the queue index. The txq 1, rxq is 0. */ vq = dev->virtqueue[ctx->msg.payload.addr.index]; access_ok = vq->access_ok; /* * Rings addresses should not be interpreted as long as the ring is not * started and enabled */ memcpy(&vq->ring_addrs, addr, sizeof(*addr)); vring_invalidate(dev, vq); if ((vq->enabled && (dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) || access_ok) { dev = translate_ring_addresses(dev, ctx->msg.payload.addr.index); if (!dev) return RTE_VHOST_MSG_RESULT_ERR; *pdev = dev; } return RTE_VHOST_MSG_RESULT_OK; } /* * The virtio device sends us the available ring last used index. */ static int vhost_user_set_vring_base(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; uint64_t val = ctx->msg.payload.state.num; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (vq_is_packed(dev)) { /* * Bit[0:14]: avail index * Bit[15]: avail wrap counter */ vq->last_avail_idx = val & 0x7fff; vq->avail_wrap_counter = !!(val & (0x1 << 15)); /* * Set used index to same value as available one, as * their values should be the same since ring processing * was stopped at get time. */ vq->last_used_idx = vq->last_avail_idx; vq->used_wrap_counter = vq->avail_wrap_counter; } else { vq->last_used_idx = ctx->msg.payload.state.num; vq->last_avail_idx = ctx->msg.payload.state.num; } VHOST_LOG_CONFIG(INFO, "(%s) vring base idx:%u last_used_idx:%u last_avail_idx:%u.\n", dev->ifname, ctx->msg.payload.state.index, vq->last_used_idx, vq->last_avail_idx); return RTE_VHOST_MSG_RESULT_OK; } static int add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, uint64_t host_iova, uint64_t host_user_addr, uint64_t size) { struct guest_page *page, *last_page; struct guest_page *old_pages; if (dev->nr_guest_pages == dev->max_guest_pages) { dev->max_guest_pages *= 2; old_pages = dev->guest_pages; dev->guest_pages = rte_realloc(dev->guest_pages, dev->max_guest_pages * sizeof(*page), RTE_CACHE_LINE_SIZE); if (dev->guest_pages == NULL) { VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n"); rte_free(old_pages); return -1; } } if (dev->nr_guest_pages > 0) { last_page = &dev->guest_pages[dev->nr_guest_pages - 1]; /* merge if the two pages are continuous */ if (host_iova == last_page->host_iova + last_page->size && guest_phys_addr == last_page->guest_phys_addr + last_page->size && host_user_addr == last_page->host_user_addr + last_page->size) { last_page->size += size; return 0; } } page = &dev->guest_pages[dev->nr_guest_pages++]; page->guest_phys_addr = guest_phys_addr; page->host_iova = host_iova; page->host_user_addr = host_user_addr; page->size = size; return 0; } static int add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, uint64_t page_size) { uint64_t reg_size = reg->size; uint64_t host_user_addr = reg->host_user_addr; uint64_t guest_phys_addr = reg->guest_phys_addr; uint64_t host_iova; uint64_t size; host_iova = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr); size = page_size - (guest_phys_addr & (page_size - 1)); size = RTE_MIN(size, reg_size); if (add_one_guest_page(dev, guest_phys_addr, host_iova, host_user_addr, size) < 0) return -1; host_user_addr += size; guest_phys_addr += size; reg_size -= size; while (reg_size > 0) { size = RTE_MIN(reg_size, page_size); host_iova = rte_mem_virt2iova((void *)(uintptr_t) host_user_addr); if (add_one_guest_page(dev, guest_phys_addr, host_iova, host_user_addr, size) < 0) return -1; host_user_addr += size; guest_phys_addr += size; reg_size -= size; } /* sort guest page array if over binary search threshold */ if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { qsort((void *)dev->guest_pages, dev->nr_guest_pages, sizeof(struct guest_page), guest_page_addrcmp); } return 0; } #ifdef RTE_LIBRTE_VHOST_DEBUG /* TODO: enable it only in debug mode? */ static void dump_guest_pages(struct virtio_net *dev) { uint32_t i; struct guest_page *page; for (i = 0; i < dev->nr_guest_pages; i++) { page = &dev->guest_pages[i]; VHOST_LOG_CONFIG(INFO, "(%s) guest physical page region %u\n", dev->ifname, i); VHOST_LOG_CONFIG(INFO, "(%s)\tguest_phys_addr: %" PRIx64 "\n", dev->ifname, page->guest_phys_addr); VHOST_LOG_CONFIG(INFO, "(%s)\thost_iova : %" PRIx64 "\n", dev->ifname, page->host_iova); VHOST_LOG_CONFIG(INFO, "(%s)\tsize : %" PRIx64 "\n", dev->ifname, page->size); } } #else #define dump_guest_pages(dev) #endif static bool vhost_memory_changed(struct VhostUserMemory *new, struct rte_vhost_memory *old) { uint32_t i; if (new->nregions != old->nregions) return true; for (i = 0; i < new->nregions; ++i) { VhostUserMemoryRegion *new_r = &new->regions[i]; struct rte_vhost_mem_region *old_r = &old->regions[i]; if (new_r->guest_phys_addr != old_r->guest_phys_addr) return true; if (new_r->memory_size != old_r->size) return true; if (new_r->userspace_addr != old_r->guest_user_addr) return true; } return false; } #ifdef RTE_LIBRTE_VHOST_POSTCOPY static int vhost_user_postcopy_region_register(struct virtio_net *dev, struct rte_vhost_mem_region *reg) { struct uffdio_register reg_struct; /* * Let's register all the mmapped area to ensure * alignment on page boundary. */ reg_struct.range.start = (uint64_t)(uintptr_t)reg->mmap_addr; reg_struct.range.len = reg->mmap_size; reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, &reg_struct)) { VHOST_LOG_CONFIG(ERR, "(%s) failed to register ufd for region " "%" PRIx64 " - %" PRIx64 " (ufd = %d) %s\n", dev->ifname, (uint64_t)reg_struct.range.start, (uint64_t)reg_struct.range.start + (uint64_t)reg_struct.range.len - 1, dev->postcopy_ufd, strerror(errno)); return -1; } VHOST_LOG_CONFIG(INFO, "(%s)\t userfaultfd registered for range : %" PRIx64 " - %" PRIx64 "\n", dev->ifname, (uint64_t)reg_struct.range.start, (uint64_t)reg_struct.range.start + (uint64_t)reg_struct.range.len - 1); return 0; } #else static int vhost_user_postcopy_region_register(struct virtio_net *dev __rte_unused, struct rte_vhost_mem_region *reg __rte_unused) { return -1; } #endif static int vhost_user_postcopy_register(struct virtio_net *dev, int main_fd, struct vhu_msg_context *ctx) { struct VhostUserMemory *memory; struct rte_vhost_mem_region *reg; struct vhu_msg_context ack_ctx; uint32_t i; if (!dev->postcopy_listening) return 0; /* * We haven't a better way right now than sharing * DPDK's virtual address with Qemu, so that Qemu can * retrieve the region offset when handling userfaults. */ memory = &ctx->msg.payload.memory; for (i = 0; i < memory->nregions; i++) { reg = &dev->mem->regions[i]; memory->regions[i].userspace_addr = reg->host_user_addr; } /* Send the addresses back to qemu */ ctx->fd_num = 0; send_vhost_reply(dev, main_fd, ctx); /* Wait for qemu to acknowledge it got the addresses * we've got to wait before we're allowed to generate faults. */ if (read_vhost_message(dev, main_fd, &ack_ctx) <= 0) { VHOST_LOG_CONFIG(ERR, "(%s) failed to read qemu ack on postcopy set-mem-table\n", dev->ifname); return -1; } if (validate_msg_fds(dev, &ack_ctx, 0) != 0) return -1; if (ack_ctx.msg.request.master != VHOST_USER_SET_MEM_TABLE) { VHOST_LOG_CONFIG(ERR, "(%s) bad qemu ack on postcopy set-mem-table (%d)\n", dev->ifname, ack_ctx.msg.request.master); return -1; } /* Now userfault register and we can use the memory */ for (i = 0; i < memory->nregions; i++) { reg = &dev->mem->regions[i]; if (vhost_user_postcopy_region_register(dev, reg) < 0) return -1; } return 0; } static int vhost_user_mmap_region(struct virtio_net *dev, struct rte_vhost_mem_region *region, uint64_t mmap_offset) { void *mmap_addr; uint64_t mmap_size; uint64_t alignment; int populate; /* Check for memory_size + mmap_offset overflow */ if (mmap_offset >= -region->size) { VHOST_LOG_CONFIG(ERR, "(%s) mmap_offset (%#"PRIx64") and memory_size (%#"PRIx64") overflow\n", dev->ifname, mmap_offset, region->size); return -1; } mmap_size = region->size + mmap_offset; /* mmap() without flag of MAP_ANONYMOUS, should be called with length * argument aligned with hugepagesz at older longterm version Linux, * like 2.6.32 and 3.2.72, or mmap() will fail with EINVAL. * * To avoid failure, make sure in caller to keep length aligned. */ alignment = get_blk_size(region->fd); if (alignment == (uint64_t)-1) { VHOST_LOG_CONFIG(ERR, "(%s) couldn't get hugepage size through fstat\n", dev->ifname); return -1; } mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment); if (mmap_size == 0) { /* * It could happen if initial mmap_size + alignment overflows * the sizeof uint64, which could happen if either mmap_size or * alignment value is wrong. * * mmap() kernel implementation would return an error, but * better catch it before and provide useful info in the logs. */ VHOST_LOG_CONFIG(ERR, "(%s) mmap size (0x%" PRIx64 ") or alignment (0x%" PRIx64 ") is invalid\n", dev->ifname, region->size + mmap_offset, alignment); return -1; } populate = dev->async_copy ? MAP_POPULATE : 0; mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED | populate, region->fd, 0); if (mmap_addr == MAP_FAILED) { VHOST_LOG_CONFIG(ERR, "(%s) mmap failed (%s).\n", dev->ifname, strerror(errno)); return -1; } region->mmap_addr = mmap_addr; region->mmap_size = mmap_size; region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset; if (dev->async_copy) { if (add_guest_pages(dev, region, alignment) < 0) { VHOST_LOG_CONFIG(ERR, "(%s) adding guest pages to region failed.\n", dev->ifname); return -1; } } VHOST_LOG_CONFIG(INFO, "(%s) guest memory region size: 0x%" PRIx64 "\n", dev->ifname, region->size); VHOST_LOG_CONFIG(INFO, "(%s)\t guest physical addr: 0x%" PRIx64 "\n", dev->ifname, region->guest_phys_addr); VHOST_LOG_CONFIG(INFO, "(%s)\t guest virtual addr: 0x%" PRIx64 "\n", dev->ifname, region->guest_user_addr); VHOST_LOG_CONFIG(INFO, "(%s)\t host virtual addr: 0x%" PRIx64 "\n", dev->ifname, region->host_user_addr); VHOST_LOG_CONFIG(INFO, "(%s)\t mmap addr : 0x%" PRIx64 "\n", dev->ifname, (uint64_t)(uintptr_t)mmap_addr); VHOST_LOG_CONFIG(INFO, "(%s)\t mmap size : 0x%" PRIx64 "\n", dev->ifname, mmap_size); VHOST_LOG_CONFIG(INFO, "(%s)\t mmap align: 0x%" PRIx64 "\n", dev->ifname, alignment); VHOST_LOG_CONFIG(INFO, "(%s)\t mmap off : 0x%" PRIx64 "\n", dev->ifname, mmap_offset); return 0; } static int vhost_user_set_mem_table(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd) { struct virtio_net *dev = *pdev; struct VhostUserMemory *memory = &ctx->msg.payload.memory; struct rte_vhost_mem_region *reg; int numa_node = SOCKET_ID_ANY; uint64_t mmap_offset; uint32_t i; bool async_notify = false; if (validate_msg_fds(dev, ctx, memory->nregions) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) { VHOST_LOG_CONFIG(ERR, "(%s) too many memory regions (%u)\n", dev->ifname, memory->nregions); goto close_msg_fds; } if (dev->mem && !vhost_memory_changed(memory, dev->mem)) { VHOST_LOG_CONFIG(INFO, "(%s) memory regions not changed\n", dev->ifname); close_msg_fds(ctx); return RTE_VHOST_MSG_RESULT_OK; } if (dev->mem) { if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) { struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; if (vdpa_dev && vdpa_dev->ops->dev_close) vdpa_dev->ops->dev_close(dev->vid); dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED; } /* notify the vhost application to stop DMA transfers */ if (dev->async_copy && dev->notify_ops->vring_state_changed) { for (i = 0; i < dev->nr_vring; i++) { dev->notify_ops->vring_state_changed(dev->vid, i, 0); } async_notify = true; } free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; } /* Flush IOTLB cache as previous HVAs are now invalid */ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) for (i = 0; i < dev->nr_vring; i++) vhost_user_iotlb_flush_all(dev->virtqueue[i]); /* * If VQ 0 has already been allocated, try to allocate on the same * NUMA node. It can be reallocated later in numa_realloc(). */ if (dev->nr_vring > 0) numa_node = dev->virtqueue[0]->numa_node; dev->nr_guest_pages = 0; if (dev->guest_pages == NULL) { dev->max_guest_pages = 8; dev->guest_pages = rte_zmalloc_socket(NULL, dev->max_guest_pages * sizeof(struct guest_page), RTE_CACHE_LINE_SIZE, numa_node); if (dev->guest_pages == NULL) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for dev->guest_pages\n", dev->ifname); goto close_msg_fds; } } dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) + sizeof(struct rte_vhost_mem_region) * memory->nregions, 0, numa_node); if (dev->mem == NULL) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for dev->mem\n", dev->ifname); goto free_guest_pages; } for (i = 0; i < memory->nregions; i++) { reg = &dev->mem->regions[i]; reg->guest_phys_addr = memory->regions[i].guest_phys_addr; reg->guest_user_addr = memory->regions[i].userspace_addr; reg->size = memory->regions[i].memory_size; reg->fd = ctx->fds[i]; /* * Assign invalid file descriptor value to avoid double * closing on error path. */ ctx->fds[i] = -1; mmap_offset = memory->regions[i].mmap_offset; if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) { VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap region %u\n", dev->ifname, i); goto free_mem_table; } dev->mem->nregions++; } if (dev->async_copy && rte_vfio_is_enabled("vfio")) async_dma_map(dev, true); if (vhost_user_postcopy_register(dev, main_fd, ctx) < 0) goto free_mem_table; for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; if (!vq) continue; if (vq->desc || vq->avail || vq->used) { /* * If the memory table got updated, the ring addresses * need to be translated again as virtual addresses have * changed. */ vring_invalidate(dev, vq); dev = translate_ring_addresses(dev, i); if (!dev) { dev = *pdev; goto free_mem_table; } *pdev = dev; } } dump_guest_pages(dev); if (async_notify) { for (i = 0; i < dev->nr_vring; i++) dev->notify_ops->vring_state_changed(dev->vid, i, 1); } return RTE_VHOST_MSG_RESULT_OK; free_mem_table: free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; free_guest_pages: rte_free(dev->guest_pages); dev->guest_pages = NULL; close_msg_fds: close_msg_fds(ctx); return RTE_VHOST_MSG_RESULT_ERR; } static bool vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq) { bool rings_ok; if (!vq) return false; if (vq_is_packed(dev)) rings_ok = vq->desc_packed && vq->driver_event && vq->device_event; else rings_ok = vq->desc && vq->avail && vq->used; return rings_ok && vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD && vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD && vq->enabled; } #define VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY 2u static int virtio_is_ready(struct virtio_net *dev) { struct vhost_virtqueue *vq; uint32_t i, nr_vring = dev->nr_vring; if (dev->flags & VIRTIO_DEV_READY) return 1; if (!dev->nr_vring) return 0; if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) { nr_vring = VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY; if (dev->nr_vring < nr_vring) return 0; } for (i = 0; i < nr_vring; i++) { vq = dev->virtqueue[i]; if (!vq_is_ready(dev, vq)) return 0; } /* If supported, ensure the frontend is really done with config */ if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS)) if (!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)) return 0; dev->flags |= VIRTIO_DEV_READY; if (!(dev->flags & VIRTIO_DEV_RUNNING)) VHOST_LOG_CONFIG(INFO, "(%s) virtio is now ready for processing.\n", dev->ifname); return 1; } static void * inflight_mem_alloc(struct virtio_net *dev, const char *name, size_t size, int *fd) { void *ptr; int mfd = -1; char fname[20] = "/tmp/memfd-XXXXXX"; *fd = -1; #ifdef MEMFD_SUPPORTED mfd = memfd_create(name, MFD_CLOEXEC); #else RTE_SET_USED(name); #endif if (mfd == -1) { mfd = mkstemp(fname); if (mfd == -1) { VHOST_LOG_CONFIG(ERR, "(%s) failed to get inflight buffer fd\n", dev->ifname); return NULL; } unlink(fname); } if (ftruncate(mfd, size) == -1) { VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc inflight buffer\n", dev->ifname); close(mfd); return NULL; } ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0); if (ptr == MAP_FAILED) { VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap inflight buffer\n", dev->ifname); close(mfd); return NULL; } *fd = mfd; return ptr; } static uint32_t get_pervq_shm_size_split(uint16_t queue_size) { return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_split) * queue_size + sizeof(uint64_t) + sizeof(uint16_t) * 4, INFLIGHT_ALIGNMENT); } static uint32_t get_pervq_shm_size_packed(uint16_t queue_size) { return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_packed) * queue_size + sizeof(uint64_t) + sizeof(uint16_t) * 6 + sizeof(uint8_t) * 9, INFLIGHT_ALIGNMENT); } static int vhost_user_get_inflight_fd(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct rte_vhost_inflight_info_packed *inflight_packed; uint64_t pervq_inflight_size, mmap_size; uint16_t num_queues, queue_size; struct virtio_net *dev = *pdev; int fd, i, j; int numa_node = SOCKET_ID_ANY; void *addr; if (ctx->msg.size != sizeof(ctx->msg.payload.inflight)) { VHOST_LOG_CONFIG(ERR, "(%s) invalid get_inflight_fd message size is %d\n", dev->ifname, ctx->msg.size); return RTE_VHOST_MSG_RESULT_ERR; } /* * If VQ 0 has already been allocated, try to allocate on the same * NUMA node. It can be reallocated later in numa_realloc(). */ if (dev->nr_vring > 0) numa_node = dev->virtqueue[0]->numa_node; if (dev->inflight_info == NULL) { dev->inflight_info = rte_zmalloc_socket("inflight_info", sizeof(struct inflight_mem_info), 0, numa_node); if (!dev->inflight_info) { VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc dev inflight area\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } dev->inflight_info->fd = -1; } num_queues = ctx->msg.payload.inflight.num_queues; queue_size = ctx->msg.payload.inflight.queue_size; VHOST_LOG_CONFIG(INFO, "(%s) get_inflight_fd num_queues: %u\n", dev->ifname, ctx->msg.payload.inflight.num_queues); VHOST_LOG_CONFIG(INFO, "(%s) get_inflight_fd queue_size: %u\n", dev->ifname, ctx->msg.payload.inflight.queue_size); if (vq_is_packed(dev)) pervq_inflight_size = get_pervq_shm_size_packed(queue_size); else pervq_inflight_size = get_pervq_shm_size_split(queue_size); mmap_size = num_queues * pervq_inflight_size; addr = inflight_mem_alloc(dev, "vhost-inflight", mmap_size, &fd); if (!addr) { VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc vhost inflight area\n", dev->ifname); ctx->msg.payload.inflight.mmap_size = 0; return RTE_VHOST_MSG_RESULT_ERR; } memset(addr, 0, mmap_size); if (dev->inflight_info->addr) { munmap(dev->inflight_info->addr, dev->inflight_info->size); dev->inflight_info->addr = NULL; } if (dev->inflight_info->fd >= 0) { close(dev->inflight_info->fd); dev->inflight_info->fd = -1; } dev->inflight_info->addr = addr; dev->inflight_info->size = ctx->msg.payload.inflight.mmap_size = mmap_size; dev->inflight_info->fd = ctx->fds[0] = fd; ctx->msg.payload.inflight.mmap_offset = 0; ctx->fd_num = 1; if (vq_is_packed(dev)) { for (i = 0; i < num_queues; i++) { inflight_packed = (struct rte_vhost_inflight_info_packed *)addr; inflight_packed->used_wrap_counter = 1; inflight_packed->old_used_wrap_counter = 1; for (j = 0; j < queue_size; j++) inflight_packed->desc[j].next = j + 1; addr = (void *)((char *)addr + pervq_inflight_size); } } VHOST_LOG_CONFIG(INFO, "(%s) send inflight mmap_size: %"PRIu64"\n", dev->ifname, ctx->msg.payload.inflight.mmap_size); VHOST_LOG_CONFIG(INFO, "(%s) send inflight mmap_offset: %"PRIu64"\n", dev->ifname, ctx->msg.payload.inflight.mmap_offset); VHOST_LOG_CONFIG(INFO, "(%s) send inflight fd: %d\n", dev->ifname, ctx->fds[0]); return RTE_VHOST_MSG_RESULT_REPLY; } static int vhost_user_set_inflight_fd(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { uint64_t mmap_size, mmap_offset; uint16_t num_queues, queue_size; struct virtio_net *dev = *pdev; uint32_t pervq_inflight_size; struct vhost_virtqueue *vq; void *addr; int fd, i; int numa_node = SOCKET_ID_ANY; fd = ctx->fds[0]; if (ctx->msg.size != sizeof(ctx->msg.payload.inflight) || fd < 0) { VHOST_LOG_CONFIG(ERR, "(%s) invalid set_inflight_fd message size is %d,fd is %d\n", dev->ifname, ctx->msg.size, fd); return RTE_VHOST_MSG_RESULT_ERR; } mmap_size = ctx->msg.payload.inflight.mmap_size; mmap_offset = ctx->msg.payload.inflight.mmap_offset; num_queues = ctx->msg.payload.inflight.num_queues; queue_size = ctx->msg.payload.inflight.queue_size; if (vq_is_packed(dev)) pervq_inflight_size = get_pervq_shm_size_packed(queue_size); else pervq_inflight_size = get_pervq_shm_size_split(queue_size); VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd mmap_size: %"PRIu64"\n", dev->ifname, mmap_size); VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd mmap_offset: %"PRIu64"\n", dev->ifname, mmap_offset); VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd num_queues: %u\n", dev->ifname, num_queues); VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd queue_size: %u\n", dev->ifname, queue_size); VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd fd: %d\n", dev->ifname, fd); VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd pervq_inflight_size: %d\n", dev->ifname, pervq_inflight_size); /* * If VQ 0 has already been allocated, try to allocate on the same * NUMA node. It can be reallocated later in numa_realloc(). */ if (dev->nr_vring > 0) numa_node = dev->virtqueue[0]->numa_node; if (!dev->inflight_info) { dev->inflight_info = rte_zmalloc_socket("inflight_info", sizeof(struct inflight_mem_info), 0, numa_node); if (dev->inflight_info == NULL) { VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc dev inflight area\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } dev->inflight_info->fd = -1; } if (dev->inflight_info->addr) { munmap(dev->inflight_info->addr, dev->inflight_info->size); dev->inflight_info->addr = NULL; } addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mmap_offset); if (addr == MAP_FAILED) { VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap share memory.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } if (dev->inflight_info->fd >= 0) { close(dev->inflight_info->fd); dev->inflight_info->fd = -1; } dev->inflight_info->fd = fd; dev->inflight_info->addr = addr; dev->inflight_info->size = mmap_size; for (i = 0; i < num_queues; i++) { vq = dev->virtqueue[i]; if (!vq) continue; if (vq_is_packed(dev)) { vq->inflight_packed = addr; vq->inflight_packed->desc_num = queue_size; } else { vq->inflight_split = addr; vq->inflight_split->desc_num = queue_size; } addr = (void *)((char *)addr + pervq_inflight_size); } return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_set_vring_call(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_vring_file file; struct vhost_virtqueue *vq; int expected_fds; expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1; if (validate_msg_fds(dev, ctx, expected_fds) != 0) return RTE_VHOST_MSG_RESULT_ERR; file.index = ctx->msg.payload.u64 & VHOST_USER_VRING_IDX_MASK; if (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) file.fd = VIRTIO_INVALID_EVENTFD; else file.fd = ctx->fds[0]; VHOST_LOG_CONFIG(INFO, "(%s) vring call idx:%d file:%d\n", dev->ifname, file.index, file.fd); vq = dev->virtqueue[file.index]; if (vq->ready) { vq->ready = false; vhost_user_notify_queue_state(dev, file.index, 0); } if (vq->callfd >= 0) close(vq->callfd); vq->callfd = file.fd; return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_set_vring_err(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; int expected_fds; expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1; if (validate_msg_fds(dev, ctx, expected_fds) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (!(ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK)) close(ctx->fds[0]); VHOST_LOG_CONFIG(INFO, "(%s) not implemented\n", dev->ifname); return RTE_VHOST_MSG_RESULT_OK; } static int resubmit_desc_compare(const void *a, const void *b) { const struct rte_vhost_resubmit_desc *desc0 = a; const struct rte_vhost_resubmit_desc *desc1 = b; if (desc1->counter > desc0->counter) return 1; return -1; } static int vhost_check_queue_inflights_split(struct virtio_net *dev, struct vhost_virtqueue *vq) { uint16_t i; uint16_t resubmit_num = 0, last_io, num; struct vring_used *used = vq->used; struct rte_vhost_resubmit_info *resubmit; struct rte_vhost_inflight_info_split *inflight_split; if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) return RTE_VHOST_MSG_RESULT_OK; /* The frontend may still not support the inflight feature * although we negotiate the protocol feature. */ if ((!vq->inflight_split)) return RTE_VHOST_MSG_RESULT_OK; if (!vq->inflight_split->version) { vq->inflight_split->version = INFLIGHT_VERSION; return RTE_VHOST_MSG_RESULT_OK; } if (vq->resubmit_inflight) return RTE_VHOST_MSG_RESULT_OK; inflight_split = vq->inflight_split; vq->global_counter = 0; last_io = inflight_split->last_inflight_io; if (inflight_split->used_idx != used->idx) { inflight_split->desc[last_io].inflight = 0; rte_atomic_thread_fence(__ATOMIC_SEQ_CST); inflight_split->used_idx = used->idx; } for (i = 0; i < inflight_split->desc_num; i++) { if (inflight_split->desc[i].inflight == 1) resubmit_num++; } vq->last_avail_idx += resubmit_num; if (resubmit_num) { resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info), 0, vq->numa_node); if (!resubmit) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for resubmit info.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list", resubmit_num * sizeof(struct rte_vhost_resubmit_desc), 0, vq->numa_node); if (!resubmit->resubmit_list) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for inflight desc.\n", dev->ifname); rte_free(resubmit); return RTE_VHOST_MSG_RESULT_ERR; } num = 0; for (i = 0; i < vq->inflight_split->desc_num; i++) { if (vq->inflight_split->desc[i].inflight == 1) { resubmit->resubmit_list[num].index = i; resubmit->resubmit_list[num].counter = inflight_split->desc[i].counter; num++; } } resubmit->resubmit_num = num; if (resubmit->resubmit_num > 1) qsort(resubmit->resubmit_list, resubmit->resubmit_num, sizeof(struct rte_vhost_resubmit_desc), resubmit_desc_compare); vq->global_counter = resubmit->resubmit_list[0].counter + 1; vq->resubmit_inflight = resubmit; } return RTE_VHOST_MSG_RESULT_OK; } static int vhost_check_queue_inflights_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) { uint16_t i; uint16_t resubmit_num = 0, old_used_idx, num; struct rte_vhost_resubmit_info *resubmit; struct rte_vhost_inflight_info_packed *inflight_packed; if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) return RTE_VHOST_MSG_RESULT_OK; /* The frontend may still not support the inflight feature * although we negotiate the protocol feature. */ if ((!vq->inflight_packed)) return RTE_VHOST_MSG_RESULT_OK; if (!vq->inflight_packed->version) { vq->inflight_packed->version = INFLIGHT_VERSION; return RTE_VHOST_MSG_RESULT_OK; } if (vq->resubmit_inflight) return RTE_VHOST_MSG_RESULT_OK; inflight_packed = vq->inflight_packed; vq->global_counter = 0; old_used_idx = inflight_packed->old_used_idx; if (inflight_packed->used_idx != old_used_idx) { if (inflight_packed->desc[old_used_idx].inflight == 0) { inflight_packed->old_used_idx = inflight_packed->used_idx; inflight_packed->old_used_wrap_counter = inflight_packed->used_wrap_counter; inflight_packed->old_free_head = inflight_packed->free_head; } else { inflight_packed->used_idx = inflight_packed->old_used_idx; inflight_packed->used_wrap_counter = inflight_packed->old_used_wrap_counter; inflight_packed->free_head = inflight_packed->old_free_head; } } for (i = 0; i < inflight_packed->desc_num; i++) { if (inflight_packed->desc[i].inflight == 1) resubmit_num++; } if (resubmit_num) { resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info), 0, vq->numa_node); if (resubmit == NULL) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for resubmit info.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list", resubmit_num * sizeof(struct rte_vhost_resubmit_desc), 0, vq->numa_node); if (resubmit->resubmit_list == NULL) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for resubmit desc.\n", dev->ifname); rte_free(resubmit); return RTE_VHOST_MSG_RESULT_ERR; } num = 0; for (i = 0; i < inflight_packed->desc_num; i++) { if (vq->inflight_packed->desc[i].inflight == 1) { resubmit->resubmit_list[num].index = i; resubmit->resubmit_list[num].counter = inflight_packed->desc[i].counter; num++; } } resubmit->resubmit_num = num; if (resubmit->resubmit_num > 1) qsort(resubmit->resubmit_list, resubmit->resubmit_num, sizeof(struct rte_vhost_resubmit_desc), resubmit_desc_compare); vq->global_counter = resubmit->resubmit_list[0].counter + 1; vq->resubmit_inflight = resubmit; } return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_set_vring_kick(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_vring_file file; struct vhost_virtqueue *vq; int expected_fds; expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1; if (validate_msg_fds(dev, ctx, expected_fds) != 0) return RTE_VHOST_MSG_RESULT_ERR; file.index = ctx->msg.payload.u64 & VHOST_USER_VRING_IDX_MASK; if (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) file.fd = VIRTIO_INVALID_EVENTFD; else file.fd = ctx->fds[0]; VHOST_LOG_CONFIG(INFO, "(%s) vring kick idx:%d file:%d\n", dev->ifname, file.index, file.fd); /* Interpret ring addresses only when ring is started. */ dev = translate_ring_addresses(dev, file.index); if (!dev) { if (file.fd != VIRTIO_INVALID_EVENTFD) close(file.fd); return RTE_VHOST_MSG_RESULT_ERR; } *pdev = dev; vq = dev->virtqueue[file.index]; /* * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated, * the ring starts already enabled. Otherwise, it is enabled via * the SET_VRING_ENABLE message. */ if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) { vq->enabled = true; } if (vq->ready) { vq->ready = false; vhost_user_notify_queue_state(dev, file.index, 0); } if (vq->kickfd >= 0) close(vq->kickfd); vq->kickfd = file.fd; if (vq_is_packed(dev)) { if (vhost_check_queue_inflights_packed(dev, vq)) { VHOST_LOG_CONFIG(ERR, "(%s) failed to inflights for vq: %d\n", dev->ifname, file.index); return RTE_VHOST_MSG_RESULT_ERR; } } else { if (vhost_check_queue_inflights_split(dev, vq)) { VHOST_LOG_CONFIG(ERR, "(%s) failed to inflights for vq: %d\n", dev->ifname, file.index); return RTE_VHOST_MSG_RESULT_ERR; } } return RTE_VHOST_MSG_RESULT_OK; } /* * when virtio is stopped, qemu will send us the GET_VRING_BASE message. */ static int vhost_user_get_vring_base(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; uint64_t val; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; /* We have to stop the queue (virtio) if it is running. */ vhost_destroy_device_notify(dev); dev->flags &= ~VIRTIO_DEV_READY; dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED; /* Here we are safe to get the indexes */ if (vq_is_packed(dev)) { /* * Bit[0:14]: avail index * Bit[15]: avail wrap counter */ val = vq->last_avail_idx & 0x7fff; val |= vq->avail_wrap_counter << 15; ctx->msg.payload.state.num = val; } else { ctx->msg.payload.state.num = vq->last_avail_idx; } VHOST_LOG_CONFIG(INFO, "(%s) vring base idx:%d file:%d\n", dev->ifname, ctx->msg.payload.state.index, ctx->msg.payload.state.num); /* * Based on current qemu vhost-user implementation, this message is * sent and only sent in vhost_vring_stop. * TODO: cleanup the vring, it isn't usable since here. */ if (vq->kickfd >= 0) close(vq->kickfd); vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; if (vq->callfd >= 0) close(vq->callfd); vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; vq->signalled_used_valid = false; if (vq_is_packed(dev)) { rte_free(vq->shadow_used_packed); vq->shadow_used_packed = NULL; } else { rte_free(vq->shadow_used_split); vq->shadow_used_split = NULL; } rte_free(vq->batch_copy_elems); vq->batch_copy_elems = NULL; rte_free(vq->log_cache); vq->log_cache = NULL; ctx->msg.size = sizeof(ctx->msg.payload.state); ctx->fd_num = 0; vhost_user_iotlb_flush_all(vq); vring_invalidate(dev, vq); return RTE_VHOST_MSG_RESULT_REPLY; } /* * when virtio queues are ready to work, qemu will send us to * enable the virtio queue pair. */ static int vhost_user_set_vring_enable(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; bool enable = !!ctx->msg.payload.state.num; int index = (int)ctx->msg.payload.state.index; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; VHOST_LOG_CONFIG(INFO, "(%s) set queue enable: %d to qp idx: %d\n", dev->ifname, enable, index); if (enable && dev->virtqueue[index]->async) { if (dev->virtqueue[index]->async->pkts_inflight_n) { VHOST_LOG_CONFIG(ERR, "(%s) failed to enable vring. Inflight packets must be completed first\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } } dev->virtqueue[index]->enabled = enable; return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_get_protocol_features(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; uint64_t features, protocol_features; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; rte_vhost_driver_get_features(dev->ifname, &features); rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features); ctx->msg.payload.u64 = protocol_features; ctx->msg.size = sizeof(ctx->msg.payload.u64); ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; } static int vhost_user_set_protocol_features(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; uint64_t protocol_features = ctx->msg.payload.u64; uint64_t slave_protocol_features = 0; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; rte_vhost_driver_get_protocol_features(dev->ifname, &slave_protocol_features); if (protocol_features & ~slave_protocol_features) { VHOST_LOG_CONFIG(ERR, "(%s) received invalid protocol features.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } dev->protocol_features = protocol_features; VHOST_LOG_CONFIG(INFO, "(%s) negotiated Vhost-user protocol features: 0x%" PRIx64 "\n", dev->ifname, dev->protocol_features); return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_set_log_base(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; int fd = ctx->fds[0]; uint64_t size, off; void *addr; uint32_t i; if (validate_msg_fds(dev, ctx, 1) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (fd < 0) { VHOST_LOG_CONFIG(ERR, "(%s) invalid log fd: %d\n", dev->ifname, fd); return RTE_VHOST_MSG_RESULT_ERR; } if (ctx->msg.size != sizeof(VhostUserLog)) { VHOST_LOG_CONFIG(ERR, "(%s) invalid log base msg size: %"PRId32" != %d\n", dev->ifname, ctx->msg.size, (int)sizeof(VhostUserLog)); goto close_msg_fds; } size = ctx->msg.payload.log.mmap_size; off = ctx->msg.payload.log.mmap_offset; /* Check for mmap size and offset overflow. */ if (off >= -size) { VHOST_LOG_CONFIG(ERR, "(%s) log offset %#"PRIx64" and log size %#"PRIx64" overflow\n", dev->ifname, off, size); goto close_msg_fds; } VHOST_LOG_CONFIG(INFO, "(%s) log mmap size: %"PRId64", offset: %"PRId64"\n", dev->ifname, size, off); /* * mmap from 0 to workaround a hugepage mmap bug: mmap will * fail when offset is not page size aligned. */ addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); close(fd); if (addr == MAP_FAILED) { VHOST_LOG_CONFIG(ERR, "(%s) mmap log base failed!\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } /* * Free previously mapped log memory on occasionally * multiple VHOST_USER_SET_LOG_BASE. */ if (dev->log_addr) { munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); } dev->log_addr = (uint64_t)(uintptr_t)addr; dev->log_base = dev->log_addr + off; dev->log_size = size; for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; rte_free(vq->log_cache); vq->log_cache = NULL; vq->log_cache_nb_elem = 0; vq->log_cache = rte_malloc_socket("vq log cache", sizeof(struct log_cache_entry) * VHOST_LOG_CACHE_NR, 0, vq->numa_node); /* * If log cache alloc fail, don't fail migration, but no * caching will be done, which will impact performance */ if (!vq->log_cache) VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate VQ logging cache\n", dev->ifname); } /* * The spec is not clear about it (yet), but QEMU doesn't expect * any payload in the reply. */ ctx->msg.size = 0; ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; close_msg_fds: close_msg_fds(ctx); return RTE_VHOST_MSG_RESULT_ERR; } static int vhost_user_set_log_fd(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 1) != 0) return RTE_VHOST_MSG_RESULT_ERR; close(ctx->fds[0]); VHOST_LOG_CONFIG(INFO, "(%s) not implemented.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_OK; } /* * An rarp packet is constructed and broadcasted to notify switches about * the new location of the migrated VM, so that packets from outside will * not be lost after migration. * * However, we don't actually "send" a rarp packet here, instead, we set * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it. */ static int vhost_user_send_rarp(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; uint8_t *mac = (uint8_t *)&ctx->msg.payload.u64; struct rte_vdpa_device *vdpa_dev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; VHOST_LOG_CONFIG(DEBUG, "(%s) MAC: " RTE_ETHER_ADDR_PRT_FMT "\n", dev->ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); memcpy(dev->mac.addr_bytes, mac, 6); /* * Set the flag to inject a RARP broadcast packet at * rte_vhost_dequeue_burst(). * * __ATOMIC_RELEASE ordering is for making sure the mac is * copied before the flag is set. */ __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE); vdpa_dev = dev->vdpa_dev; if (vdpa_dev && vdpa_dev->ops->migration_done) vdpa_dev->ops->migration_done(dev->vid); return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_net_set_mtu(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (ctx->msg.payload.u64 < VIRTIO_MIN_MTU || ctx->msg.payload.u64 > VIRTIO_MAX_MTU) { VHOST_LOG_CONFIG(ERR, "(%s) invalid MTU size (%"PRIu64")\n", dev->ifname, ctx->msg.payload.u64); return RTE_VHOST_MSG_RESULT_ERR; } dev->mtu = ctx->msg.payload.u64; return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_set_req_fd(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; int fd = ctx->fds[0]; if (validate_msg_fds(dev, ctx, 1) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (fd < 0) { VHOST_LOG_CONFIG(ERR, "(%s) invalid file descriptor for slave channel (%d)\n", dev->ifname, fd); return RTE_VHOST_MSG_RESULT_ERR; } if (dev->slave_req_fd >= 0) close(dev->slave_req_fd); dev->slave_req_fd = fd; return RTE_VHOST_MSG_RESULT_OK; } static int is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg) { struct vhost_vring_addr *ra; uint64_t start, end, len; start = imsg->iova; end = start + imsg->size; ra = &vq->ring_addrs; len = sizeof(struct vring_desc) * vq->size; if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start) return 1; len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size; if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start) return 1; len = sizeof(struct vring_used) + sizeof(struct vring_used_elem) * vq->size; if (ra->used_user_addr < end && (ra->used_user_addr + len) > start) return 1; if (ra->flags & (1 << VHOST_VRING_F_LOG)) { len = sizeof(uint64_t); if (ra->log_guest_addr < end && (ra->log_guest_addr + len) > start) return 1; } return 0; } static int is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg) { struct vhost_vring_addr *ra; uint64_t start, end, len; start = imsg->iova; end = start + imsg->size; ra = &vq->ring_addrs; len = sizeof(struct vring_packed_desc) * vq->size; if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start) return 1; len = sizeof(struct vring_packed_desc_event); if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start) return 1; len = sizeof(struct vring_packed_desc_event); if (ra->used_user_addr < end && (ra->used_user_addr + len) > start) return 1; if (ra->flags & (1 << VHOST_VRING_F_LOG)) { len = sizeof(uint64_t); if (ra->log_guest_addr < end && (ra->log_guest_addr + len) > start) return 1; } return 0; } static int is_vring_iotlb(struct virtio_net *dev, struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg) { if (vq_is_packed(dev)) return is_vring_iotlb_packed(vq, imsg); else return is_vring_iotlb_split(vq, imsg); } static int vhost_user_iotlb_msg(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_iotlb_msg *imsg = &ctx->msg.payload.iotlb; uint16_t i; uint64_t vva, len; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; switch (imsg->type) { case VHOST_IOTLB_UPDATE: len = imsg->size; vva = qva_to_vva(dev, imsg->uaddr, &len); if (!vva) return RTE_VHOST_MSG_RESULT_ERR; for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; if (!vq) continue; vhost_user_iotlb_cache_insert(dev, vq, imsg->iova, vva, len, imsg->perm); if (is_vring_iotlb(dev, vq, imsg)) { rte_spinlock_lock(&vq->access_lock); *pdev = dev = translate_ring_addresses(dev, i); rte_spinlock_unlock(&vq->access_lock); } } break; case VHOST_IOTLB_INVALIDATE: for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; if (!vq) continue; vhost_user_iotlb_cache_remove(vq, imsg->iova, imsg->size); if (is_vring_iotlb(dev, vq, imsg)) { rte_spinlock_lock(&vq->access_lock); vring_invalidate(dev, vq); rte_spinlock_unlock(&vq->access_lock); } } break; default: VHOST_LOG_CONFIG(ERR, "(%s) invalid IOTLB message type (%d)\n", dev->ifname, imsg->type); return RTE_VHOST_MSG_RESULT_ERR; } return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_set_postcopy_advise(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; #ifdef RTE_LIBRTE_VHOST_POSTCOPY struct uffdio_api api_struct; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); if (dev->postcopy_ufd == -1) { VHOST_LOG_CONFIG(ERR, "(%s) userfaultfd not available: %s\n", dev->ifname, strerror(errno)); return RTE_VHOST_MSG_RESULT_ERR; } api_struct.api = UFFD_API; api_struct.features = 0; if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { VHOST_LOG_CONFIG(ERR, "(%s) UFFDIO_API ioctl failure: %s\n", dev->ifname, strerror(errno)); close(dev->postcopy_ufd); dev->postcopy_ufd = -1; return RTE_VHOST_MSG_RESULT_ERR; } ctx->fds[0] = dev->postcopy_ufd; ctx->fd_num = 1; return RTE_VHOST_MSG_RESULT_REPLY; #else dev->postcopy_ufd = -1; ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_ERR; #endif } static int vhost_user_set_postcopy_listen(struct virtio_net **pdev, struct vhu_msg_context *ctx __rte_unused, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (dev->mem && dev->mem->nregions) { VHOST_LOG_CONFIG(ERR, "(%s) regions already registered at postcopy-listen\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } dev->postcopy_listening = 1; return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_postcopy_end(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; dev->postcopy_listening = 0; if (dev->postcopy_ufd >= 0) { close(dev->postcopy_ufd); dev->postcopy_ufd = -1; } ctx->msg.payload.u64 = 0; ctx->msg.size = sizeof(ctx->msg.payload.u64); ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; } static int vhost_user_get_status(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; ctx->msg.payload.u64 = dev->status; ctx->msg.size = sizeof(ctx->msg.payload.u64); ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; } static int vhost_user_set_status(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; /* As per Virtio specification, the device status is 8bits long */ if (ctx->msg.payload.u64 > UINT8_MAX) { VHOST_LOG_CONFIG(ERR, "(%s) invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n", dev->ifname, ctx->msg.payload.u64); return RTE_VHOST_MSG_RESULT_ERR; } dev->status = ctx->msg.payload.u64; if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) && (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) { VHOST_LOG_CONFIG(ERR, "(%s) FEATURES_OK bit is set but feature negotiation failed\n", dev->ifname); /* * Clear the bit to let the driver know about the feature * negotiation failure */ dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK; } VHOST_LOG_CONFIG(INFO, "(%s) new device status(0x%08x):\n", dev->ifname, dev->status); VHOST_LOG_CONFIG(INFO, "(%s)\t-RESET: %u\n", dev->ifname, (dev->status == VIRTIO_DEVICE_STATUS_RESET)); VHOST_LOG_CONFIG(INFO, "(%s)\t-ACKNOWLEDGE: %u\n", dev->ifname, !!(dev->status & VIRTIO_DEVICE_STATUS_ACK)); VHOST_LOG_CONFIG(INFO, "(%s)\t-DRIVER: %u\n", dev->ifname, !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER)); VHOST_LOG_CONFIG(INFO, "(%s)\t-FEATURES_OK: %u\n", dev->ifname, !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK)); VHOST_LOG_CONFIG(INFO, "(%s)\t-DRIVER_OK: %u\n", dev->ifname, !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)); VHOST_LOG_CONFIG(INFO, "(%s)\t-DEVICE_NEED_RESET: %u\n", dev->ifname, !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET)); VHOST_LOG_CONFIG(INFO, "(%s)\t-FAILED: %u\n", dev->ifname, !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED)); return RTE_VHOST_MSG_RESULT_OK; } typedef int (*vhost_message_handler_t)(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd); static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = { [VHOST_USER_NONE] = NULL, [VHOST_USER_GET_FEATURES] = vhost_user_get_features, [VHOST_USER_SET_FEATURES] = vhost_user_set_features, [VHOST_USER_SET_OWNER] = vhost_user_set_owner, [VHOST_USER_RESET_OWNER] = vhost_user_reset_owner, [VHOST_USER_SET_MEM_TABLE] = vhost_user_set_mem_table, [VHOST_USER_SET_LOG_BASE] = vhost_user_set_log_base, [VHOST_USER_SET_LOG_FD] = vhost_user_set_log_fd, [VHOST_USER_SET_VRING_NUM] = vhost_user_set_vring_num, [VHOST_USER_SET_VRING_ADDR] = vhost_user_set_vring_addr, [VHOST_USER_SET_VRING_BASE] = vhost_user_set_vring_base, [VHOST_USER_GET_VRING_BASE] = vhost_user_get_vring_base, [VHOST_USER_SET_VRING_KICK] = vhost_user_set_vring_kick, [VHOST_USER_SET_VRING_CALL] = vhost_user_set_vring_call, [VHOST_USER_SET_VRING_ERR] = vhost_user_set_vring_err, [VHOST_USER_GET_PROTOCOL_FEATURES] = vhost_user_get_protocol_features, [VHOST_USER_SET_PROTOCOL_FEATURES] = vhost_user_set_protocol_features, [VHOST_USER_GET_QUEUE_NUM] = vhost_user_get_queue_num, [VHOST_USER_SET_VRING_ENABLE] = vhost_user_set_vring_enable, [VHOST_USER_SEND_RARP] = vhost_user_send_rarp, [VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu, [VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd, [VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg, [VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise, [VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen, [VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end, [VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd, [VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd, [VHOST_USER_SET_STATUS] = vhost_user_set_status, [VHOST_USER_GET_STATUS] = vhost_user_get_status, }; /* return bytes# of read on success or negative val on failure. */ static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx) { int ret; ret = read_fd_message(dev->ifname, sockfd, (char *)&ctx->msg, VHOST_USER_HDR_SIZE, ctx->fds, VHOST_MEMORY_MAX_NREGIONS, &ctx->fd_num); if (ret <= 0) { return ret; } else if (ret != VHOST_USER_HDR_SIZE) { VHOST_LOG_CONFIG(ERR, "(%s) Unexpected header size read\n", dev->ifname); close_msg_fds(ctx); return -1; } if (ctx->msg.size) { if (ctx->msg.size > sizeof(ctx->msg.payload)) { VHOST_LOG_CONFIG(ERR, "(%s) invalid msg size: %d\n", dev->ifname, ctx->msg.size); return -1; } ret = read(sockfd, &ctx->msg.payload, ctx->msg.size); if (ret <= 0) return ret; if (ret != (int)ctx->msg.size) { VHOST_LOG_CONFIG(ERR, "(%s) read control message failed\n", dev->ifname); return -1; } } return ret; } static int send_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx) { if (!ctx) return 0; return send_fd_message(dev->ifname, sockfd, (char *)&ctx->msg, VHOST_USER_HDR_SIZE + ctx->msg.size, ctx->fds, ctx->fd_num); } static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx) { if (!ctx) return 0; ctx->msg.flags &= ~VHOST_USER_VERSION_MASK; ctx->msg.flags &= ~VHOST_USER_NEED_REPLY; ctx->msg.flags |= VHOST_USER_VERSION; ctx->msg.flags |= VHOST_USER_REPLY_MASK; return send_vhost_message(dev, sockfd, ctx); } static int send_vhost_slave_message(struct virtio_net *dev, struct vhu_msg_context *ctx) { int ret; if (ctx->msg.flags & VHOST_USER_NEED_REPLY) rte_spinlock_lock(&dev->slave_req_lock); ret = send_vhost_message(dev, dev->slave_req_fd, ctx); if (ret < 0 && (ctx->msg.flags & VHOST_USER_NEED_REPLY)) rte_spinlock_unlock(&dev->slave_req_lock); return ret; } /* * Allocate a queue pair if it hasn't been allocated yet */ static int vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, struct vhu_msg_context *ctx) { uint32_t vring_idx; switch (ctx->msg.request.master) { case VHOST_USER_SET_VRING_KICK: case VHOST_USER_SET_VRING_CALL: case VHOST_USER_SET_VRING_ERR: vring_idx = ctx->msg.payload.u64 & VHOST_USER_VRING_IDX_MASK; break; case VHOST_USER_SET_VRING_NUM: case VHOST_USER_SET_VRING_BASE: case VHOST_USER_GET_VRING_BASE: case VHOST_USER_SET_VRING_ENABLE: vring_idx = ctx->msg.payload.state.index; break; case VHOST_USER_SET_VRING_ADDR: vring_idx = ctx->msg.payload.addr.index; break; default: return 0; } if (vring_idx >= VHOST_MAX_VRING) { VHOST_LOG_CONFIG(ERR, "(%s) invalid vring index: %u\n", dev->ifname, vring_idx); return -1; } if (dev->virtqueue[vring_idx]) return 0; return alloc_vring_queue(dev, vring_idx); } static void vhost_user_lock_all_queue_pairs(struct virtio_net *dev) { unsigned int i = 0; unsigned int vq_num = 0; while (vq_num < dev->nr_vring) { struct vhost_virtqueue *vq = dev->virtqueue[i]; if (vq) { rte_spinlock_lock(&vq->access_lock); vq_num++; } i++; } } static void vhost_user_unlock_all_queue_pairs(struct virtio_net *dev) { unsigned int i = 0; unsigned int vq_num = 0; while (vq_num < dev->nr_vring) { struct vhost_virtqueue *vq = dev->virtqueue[i]; if (vq) { rte_spinlock_unlock(&vq->access_lock); vq_num++; } i++; } } int vhost_user_msg_handler(int vid, int fd) { struct virtio_net *dev; struct vhu_msg_context ctx; struct rte_vdpa_device *vdpa_dev; int ret; int unlock_required = 0; bool handled; int request; uint32_t i; dev = get_device(vid); if (dev == NULL) return -1; if (!dev->notify_ops) { dev->notify_ops = vhost_driver_callback_get(dev->ifname); if (!dev->notify_ops) { VHOST_LOG_CONFIG(ERR, "(%s) failed to get callback ops for driver\n", dev->ifname); return -1; } } ret = read_vhost_message(dev, fd, &ctx); if (ret <= 0) { if (ret < 0) VHOST_LOG_CONFIG(ERR, "(%s) vhost read message failed\n", dev->ifname); else VHOST_LOG_CONFIG(INFO, "(%s) vhost peer closed\n", dev->ifname); return -1; } ret = 0; request = ctx.msg.request.master; if (request > VHOST_USER_NONE && request < VHOST_USER_MAX && vhost_message_str[request]) { if (request != VHOST_USER_IOTLB_MSG) VHOST_LOG_CONFIG(INFO, "(%s) read message %s\n", dev->ifname, vhost_message_str[request]); else VHOST_LOG_CONFIG(DEBUG, "(%s) read message %s\n", dev->ifname, vhost_message_str[request]); } else { VHOST_LOG_CONFIG(DEBUG, "(%s) external request %d\n", dev->ifname, request); } ret = vhost_user_check_and_alloc_queue_pair(dev, &ctx); if (ret < 0) { VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc queue\n", dev->ifname); return -1; } /* * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops * and device is destroyed. destroy_device waits for queues to be * inactive, so it is safe. Otherwise taking the access_lock * would cause a dead lock. */ switch (request) { case VHOST_USER_SET_FEATURES: case VHOST_USER_SET_PROTOCOL_FEATURES: case VHOST_USER_SET_OWNER: case VHOST_USER_SET_MEM_TABLE: case VHOST_USER_SET_LOG_BASE: case VHOST_USER_SET_LOG_FD: case VHOST_USER_SET_VRING_NUM: case VHOST_USER_SET_VRING_ADDR: case VHOST_USER_SET_VRING_BASE: case VHOST_USER_SET_VRING_KICK: case VHOST_USER_SET_VRING_CALL: case VHOST_USER_SET_VRING_ERR: case VHOST_USER_SET_VRING_ENABLE: case VHOST_USER_SEND_RARP: case VHOST_USER_NET_SET_MTU: case VHOST_USER_SET_SLAVE_REQ_FD: if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { vhost_user_lock_all_queue_pairs(dev); unlock_required = 1; } break; default: break; } handled = false; if (dev->extern_ops.pre_msg_handle) { RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0); ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx); switch (ret) { case RTE_VHOST_MSG_RESULT_REPLY: send_vhost_reply(dev, fd, &ctx); /* Fall-through */ case RTE_VHOST_MSG_RESULT_ERR: case RTE_VHOST_MSG_RESULT_OK: handled = true; goto skip_to_post_handle; case RTE_VHOST_MSG_RESULT_NOT_HANDLED: default: break; } } if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) { if (!vhost_message_handlers[request]) goto skip_to_post_handle; ret = vhost_message_handlers[request](&dev, &ctx, fd); switch (ret) { case RTE_VHOST_MSG_RESULT_ERR: VHOST_LOG_CONFIG(ERR, "(%s) processing %s failed.\n", dev->ifname, vhost_message_str[request]); handled = true; break; case RTE_VHOST_MSG_RESULT_OK: VHOST_LOG_CONFIG(DEBUG, "(%s) processing %s succeeded.\n", dev->ifname, vhost_message_str[request]); handled = true; break; case RTE_VHOST_MSG_RESULT_REPLY: VHOST_LOG_CONFIG(DEBUG, "(%s) processing %s succeeded and needs reply.\n", dev->ifname, vhost_message_str[request]); send_vhost_reply(dev, fd, &ctx); handled = true; break; default: break; } } skip_to_post_handle: if (ret != RTE_VHOST_MSG_RESULT_ERR && dev->extern_ops.post_msg_handle) { RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0); ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx); switch (ret) { case RTE_VHOST_MSG_RESULT_REPLY: send_vhost_reply(dev, fd, &ctx); /* Fall-through */ case RTE_VHOST_MSG_RESULT_ERR: case RTE_VHOST_MSG_RESULT_OK: handled = true; case RTE_VHOST_MSG_RESULT_NOT_HANDLED: default: break; } } /* If message was not handled at this stage, treat it as an error */ if (!handled) { VHOST_LOG_CONFIG(ERR, "(%s) vhost message (req: %d) was not handled.\n", dev->ifname, request); close_msg_fds(&ctx); ret = RTE_VHOST_MSG_RESULT_ERR; } /* * If the request required a reply that was already sent, * this optional reply-ack won't be sent as the * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply(). */ if (ctx.msg.flags & VHOST_USER_NEED_REPLY) { ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR; ctx.msg.size = sizeof(ctx.msg.payload.u64); ctx.fd_num = 0; send_vhost_reply(dev, fd, &ctx); } else if (ret == RTE_VHOST_MSG_RESULT_ERR) { VHOST_LOG_CONFIG(ERR, "(%s) vhost message handling failed.\n", dev->ifname); return -1; } for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; bool cur_ready = vq_is_ready(dev, vq); if (cur_ready != (vq && vq->ready)) { vq->ready = cur_ready; vhost_user_notify_queue_state(dev, i, cur_ready); } } if (unlock_required) vhost_user_unlock_all_queue_pairs(dev); if (!virtio_is_ready(dev)) goto out; /* * Virtio is now ready. If not done already, it is time * to notify the application it can process the rings and * configure the vDPA device if present. */ if (!(dev->flags & VIRTIO_DEV_RUNNING)) { if (dev->notify_ops->new_device(dev->vid) == 0) dev->flags |= VIRTIO_DEV_RUNNING; } vdpa_dev = dev->vdpa_dev; if (!vdpa_dev) goto out; if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { if (vdpa_dev->ops->dev_conf(dev->vid)) VHOST_LOG_CONFIG(ERR, "(%s) failed to configure vDPA device\n", dev->ifname); else dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED; } out: return 0; } static int process_slave_message_reply(struct virtio_net *dev, const struct vhu_msg_context *ctx) { struct vhu_msg_context msg_reply; int ret; if ((ctx->msg.flags & VHOST_USER_NEED_REPLY) == 0) return 0; ret = read_vhost_message(dev, dev->slave_req_fd, &msg_reply); if (ret <= 0) { if (ret < 0) VHOST_LOG_CONFIG(ERR, "(%s) vhost read slave message reply failed\n", dev->ifname); else VHOST_LOG_CONFIG(INFO, "(%s) vhost peer closed\n", dev->ifname); ret = -1; goto out; } ret = 0; if (msg_reply.msg.request.slave != ctx->msg.request.slave) { VHOST_LOG_CONFIG(ERR, "(%s) received unexpected msg type (%u), expected %u\n", dev->ifname, msg_reply.msg.request.slave, ctx->msg.request.slave); ret = -1; goto out; } ret = msg_reply.msg.payload.u64 ? -1 : 0; out: rte_spinlock_unlock(&dev->slave_req_lock); return ret; } int vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm) { int ret; struct vhu_msg_context ctx = { .msg = { .request.slave = VHOST_USER_SLAVE_IOTLB_MSG, .flags = VHOST_USER_VERSION, .size = sizeof(ctx.msg.payload.iotlb), .payload.iotlb = { .iova = iova, .perm = perm, .type = VHOST_IOTLB_MISS, }, }, }; ret = send_vhost_message(dev, dev->slave_req_fd, &ctx); if (ret < 0) { VHOST_LOG_CONFIG(ERR, "(%s) failed to send IOTLB miss message (%d)\n", dev->ifname, ret); return ret; } return 0; } static int vhost_user_slave_config_change(struct virtio_net *dev, bool need_reply) { int ret; struct vhu_msg_context ctx = { .msg = { .request.slave = VHOST_USER_SLAVE_CONFIG_CHANGE_MSG, .flags = VHOST_USER_VERSION, .size = 0, } }; if (need_reply) ctx.msg.flags |= VHOST_USER_NEED_REPLY; ret = send_vhost_slave_message(dev, &ctx); if (ret < 0) { VHOST_LOG_CONFIG(ERR, "(%s) failed to send config change (%d)\n", dev->ifname, ret); return ret; } return process_slave_message_reply(dev, &ctx); } int rte_vhost_slave_config_change(int vid, bool need_reply) { struct virtio_net *dev; dev = get_device(vid); if (!dev) return -ENODEV; return vhost_user_slave_config_change(dev, need_reply); } static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev, int index, int fd, uint64_t offset, uint64_t size) { int ret; struct vhu_msg_context ctx = { .msg = { .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG, .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY, .size = sizeof(ctx.msg.payload.area), .payload.area = { .u64 = index & VHOST_USER_VRING_IDX_MASK, .size = size, .offset = offset, }, }, }; if (fd < 0) ctx.msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK; else { ctx.fds[0] = fd; ctx.fd_num = 1; } ret = send_vhost_slave_message(dev, &ctx); if (ret < 0) { VHOST_LOG_CONFIG(ERR, "(%s) failed to set host notifier (%d)\n", dev->ifname, ret); return ret; } return process_slave_message_reply(dev, &ctx); } int rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable) { struct virtio_net *dev; struct rte_vdpa_device *vdpa_dev; int vfio_device_fd, ret = 0; uint64_t offset, size; unsigned int i, q_start, q_last; dev = get_device(vid); if (!dev) return -ENODEV; vdpa_dev = dev->vdpa_dev; if (vdpa_dev == NULL) return -ENODEV; if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) || !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) || !(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) || !(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) || !(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER))) return -ENOTSUP; if (qid == RTE_VHOST_QUEUE_ALL) { q_start = 0; q_last = dev->nr_vring - 1; } else { if (qid >= dev->nr_vring) return -EINVAL; q_start = qid; q_last = qid; } RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP); RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP); vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid); if (vfio_device_fd < 0) return -ENOTSUP; if (enable) { for (i = q_start; i <= q_last; i++) { if (vdpa_dev->ops->get_notify_area(vid, i, &offset, &size) < 0) { ret = -ENOTSUP; goto disable; } if (vhost_user_slave_set_vring_host_notifier(dev, i, vfio_device_fd, offset, size) < 0) { ret = -EFAULT; goto disable; } } } else { disable: for (i = q_start; i <= q_last; i++) { vhost_user_slave_set_vring_host_notifier(dev, i, -1, 0, 0); } } return ret; }
null
/* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2018 Intel Corporation */ /* Security model * -------------- * The vhost-user protocol connection is an external interface, so it must be * robust against invalid inputs. * * This is important because the vhost-user master is only one step removed * from the guest. Malicious guests that have escaped will then launch further * attacks from the vhost-user master. * * Even in deployments where guests are trusted, a bug in the vhost-user master * can still cause invalid messages to be sent. Such messages must not * compromise the stability of the DPDK application by causing crashes, memory * corruption, or other problematic behavior. * * Do not assume received VhostUserMsg fields contain sensible values! */ #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <fcntl.h> #include <sys/ioctl.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/syscall.h> #ifdef RTE_LIBRTE_VHOST_NUMA #include <numaif.h> #endif #ifdef RTE_LIBRTE_VHOST_POSTCOPY #include <linux/userfaultfd.h> #endif #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */ #include <linux/memfd.h> #define MEMFD_SUPPORTED #endif #include <rte_common.h> #include <rte_malloc.h> #include <rte_log.h> #include <rte_vfio.h> #include <rte_errno.h> #include "iotlb.h" #include "vhost.h" #include "vhost_user.h" #define VIRTIO_MIN_MTU 68 #define VIRTIO_MAX_MTU 65535 #define INFLIGHT_ALIGNMENT 64 #define INFLIGHT_VERSION 0x1 static const char *vhost_message_str[VHOST_USER_MAX] = { [VHOST_USER_NONE] = "VHOST_USER_NONE", [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES", [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES", [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER", [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER", [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE", [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE", [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD", [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM", [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR", [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE", [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE", [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK", [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL", [VHOST_USER_SET_VRING_ERR] = "VHOST_USER_SET_VRING_ERR", [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES", [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES", [VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM", [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE", [VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP", [VHOST_USER_NET_SET_MTU] = "VHOST_USER_NET_SET_MTU", [VHOST_USER_SET_SLAVE_REQ_FD] = "VHOST_USER_SET_SLAVE_REQ_FD", [VHOST_USER_IOTLB_MSG] = "VHOST_USER_IOTLB_MSG", [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS", [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS", [VHOST_USER_POSTCOPY_ADVISE] = "VHOST_USER_POSTCOPY_ADVISE", [VHOST_USER_POSTCOPY_LISTEN] = "VHOST_USER_POSTCOPY_LISTEN", [VHOST_USER_POSTCOPY_END] = "VHOST_USER_POSTCOPY_END", [VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD", [VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD", [VHOST_USER_SET_STATUS] = "VHOST_USER_SET_STATUS", [VHOST_USER_GET_STATUS] = "VHOST_USER_GET_STATUS", }; static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx); static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx); static void close_msg_fds(struct vhu_msg_context *ctx) { int i; for (i = 0; i < ctx->fd_num; i++) { int fd = ctx->fds[i]; if (fd == -1) continue; ctx->fds[i] = -1; close(fd); } } /* * Ensure the expected number of FDs is received, * close all FDs and return an error if this is not the case. */ static int validate_msg_fds(struct virtio_net *dev, struct vhu_msg_context *ctx, int expected_fds) { if (ctx->fd_num == expected_fds) return 0; VHOST_LOG_CONFIG(ERR, "(%s) expect %d FDs for request %s, received %d\n", dev->ifname, expected_fds, vhost_message_str[ctx->msg.request.master], ctx->fd_num); close_msg_fds(ctx); return -1; } static uint64_t get_blk_size(int fd) { struct stat stat; int ret; ret = fstat(fd, &stat); return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize; } static void async_dma_map(struct virtio_net *dev, bool do_map) { int ret = 0; uint32_t i; struct guest_page *page; if (do_map) { for (i = 0; i < dev->nr_guest_pages; i++) { page = &dev->guest_pages[i]; ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD, page->host_user_addr, page->host_iova, page->size); if (ret) { /* * DMA device may bind with kernel driver, in this case, * we don't need to program IOMMU manually. However, if no * device is bound with vfio/uio in DPDK, and vfio kernel * module is loaded, the API will still be called and return * with ENODEV. * * DPDK vfio only returns ENODEV in very similar situations * (vfio either unsupported, or supported but no devices found). * Either way, no mappings could be performed. We treat it as * normal case in async path. This is a workaround. */ if (rte_errno == ENODEV) return; /* DMA mapping errors won't stop VHOST_USER_SET_MEM_TABLE. */ VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n"); } } } else { for (i = 0; i < dev->nr_guest_pages; i++) { page = &dev->guest_pages[i]; ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD, page->host_user_addr, page->host_iova, page->size); if (ret) { /* like DMA map, ignore the kernel driver case when unmap. */ if (rte_errno == EINVAL) return; VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n"); } } } } static void free_mem_region(struct virtio_net *dev) { uint32_t i; struct rte_vhost_mem_region *reg; if (!dev || !dev->mem) return; if (dev->async_copy && rte_vfio_is_enabled("vfio")) async_dma_map(dev, false); for (i = 0; i < dev->mem->nregions; i++) { reg = &dev->mem->regions[i]; if (reg->host_user_addr) { munmap(reg->mmap_addr, reg->mmap_size); close(reg->fd); } } } void vhost_backend_cleanup(struct virtio_net *dev) { struct rte_vdpa_device *vdpa_dev; vdpa_dev = dev->vdpa_dev; if (vdpa_dev && vdpa_dev->ops->dev_cleanup != NULL) vdpa_dev->ops->dev_cleanup(dev->vid); if (dev->mem) { free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; } rte_free(dev->guest_pages); dev->guest_pages = NULL; if (dev->log_addr) { munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); dev->log_addr = 0; } if (dev->inflight_info) { if (dev->inflight_info->addr) { munmap(dev->inflight_info->addr, dev->inflight_info->size); dev->inflight_info->addr = NULL; } if (dev->inflight_info->fd >= 0) { close(dev->inflight_info->fd); dev->inflight_info->fd = -1; } rte_free(dev->inflight_info); dev->inflight_info = NULL; } if (dev->slave_req_fd >= 0) { close(dev->slave_req_fd); dev->slave_req_fd = -1; } if (dev->postcopy_ufd >= 0) { close(dev->postcopy_ufd); dev->postcopy_ufd = -1; } dev->postcopy_listening = 0; } static void vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index, int enable) { struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; struct vhost_virtqueue *vq = dev->virtqueue[index]; /* Configure guest notifications on enable */ if (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF) vhost_enable_guest_notification(dev, vq, vq->notif_enable); if (vdpa_dev && vdpa_dev->ops->set_vring_state) vdpa_dev->ops->set_vring_state(dev->vid, index, enable); if (dev->notify_ops->vring_state_changed) dev->notify_ops->vring_state_changed(dev->vid, index, enable); } /* * This function just returns success at the moment unless * the device hasn't been initialised. */ static int vhost_user_set_owner(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_reset_owner(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; vhost_destroy_device_notify(dev); cleanup_device(dev, 0); reset_device(dev); return RTE_VHOST_MSG_RESULT_OK; } /* * The features that we support are requested. */ static int vhost_user_get_features(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; uint64_t features = 0; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; rte_vhost_driver_get_features(dev->ifname, &features); ctx->msg.payload.u64 = features; ctx->msg.size = sizeof(ctx->msg.payload.u64); ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; } /* * The queue number that we support are requested. */ static int vhost_user_get_queue_num(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; uint32_t queue_num = 0; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; rte_vhost_driver_get_queue_num(dev->ifname, &queue_num); ctx->msg.payload.u64 = (uint64_t)queue_num; ctx->msg.size = sizeof(ctx->msg.payload.u64); ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; } /* * We receive the negotiated features supported by us and the virtio device. */ static int vhost_user_set_features(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; uint64_t features = ctx->msg.payload.u64; uint64_t vhost_features = 0; struct rte_vdpa_device *vdpa_dev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; rte_vhost_driver_get_features(dev->ifname, &vhost_features); if (features & ~vhost_features) { VHOST_LOG_CONFIG(ERR, "(%s) received invalid negotiated features.\n", dev->ifname); dev->flags |= VIRTIO_DEV_FEATURES_FAILED; dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK; return RTE_VHOST_MSG_RESULT_ERR; } if (dev->flags & VIRTIO_DEV_RUNNING) { if (dev->features == features) return RTE_VHOST_MSG_RESULT_OK; /* * Error out if master tries to change features while device is * in running state. The exception being VHOST_F_LOG_ALL, which * is enabled when the live-migration starts. */ if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) { VHOST_LOG_CONFIG(ERR, "(%s) features changed while device is running.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } if (dev->notify_ops->features_changed) dev->notify_ops->features_changed(dev->vid, features); } dev->features = features; if (dev->features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1) | (1ULL << VIRTIO_F_RING_PACKED))) { dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf); } else { dev->vhost_hlen = sizeof(struct virtio_net_hdr); } VHOST_LOG_CONFIG(INFO, "(%s) negotiated Virtio features: 0x%" PRIx64 "\n", dev->ifname, dev->features); VHOST_LOG_CONFIG(DEBUG, "(%s) mergeable RX buffers %s, virtio 1 %s\n", dev->ifname, (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off", (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off"); if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) && !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) { /* * Remove all but first queue pair if MQ hasn't been * negotiated. This is safe because the device is not * running at this stage. */ while (dev->nr_vring > 2) { struct vhost_virtqueue *vq; vq = dev->virtqueue[--dev->nr_vring]; if (!vq) continue; dev->virtqueue[dev->nr_vring] = NULL; cleanup_vq(vq, 1); cleanup_vq_inflight(dev, vq); free_vq(dev, vq); } } vdpa_dev = dev->vdpa_dev; if (vdpa_dev) vdpa_dev->ops->set_features(dev->vid); dev->flags &= ~VIRTIO_DEV_FEATURES_FAILED; return RTE_VHOST_MSG_RESULT_OK; } /* * The virtio device sends us the size of the descriptor ring. */ static int vhost_user_set_vring_num(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (ctx->msg.payload.state.num > 32768) { VHOST_LOG_CONFIG(ERR, "(%s) invalid virtqueue size %u\n", dev->ifname, ctx->msg.payload.state.num); return RTE_VHOST_MSG_RESULT_ERR; } vq->size = ctx->msg.payload.state.num; /* VIRTIO 1.0, 2.4 Virtqueues says: * * Queue Size value is always a power of 2. The maximum Queue Size * value is 32768. * * VIRTIO 1.1 2.7 Virtqueues says: * * Packed virtqueues support up to 2^15 entries each. */ if (!vq_is_packed(dev)) { if (vq->size & (vq->size - 1)) { VHOST_LOG_CONFIG(ERR, "(%s) invalid virtqueue size %u\n", dev->ifname, vq->size); return RTE_VHOST_MSG_RESULT_ERR; } } if (vq_is_packed(dev)) { rte_free(vq->shadow_used_packed); vq->shadow_used_packed = rte_malloc_socket(NULL, vq->size * sizeof(struct vring_used_elem_packed), RTE_CACHE_LINE_SIZE, vq->numa_node); if (!vq->shadow_used_packed) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for shadow used ring.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } } else { rte_free(vq->shadow_used_split); vq->shadow_used_split = rte_malloc_socket(NULL, vq->size * sizeof(struct vring_used_elem), RTE_CACHE_LINE_SIZE, vq->numa_node); if (!vq->shadow_used_split) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for vq internal data.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } } rte_free(vq->batch_copy_elems); vq->batch_copy_elems = rte_malloc_socket(NULL, vq->size * sizeof(struct batch_copy_elem), RTE_CACHE_LINE_SIZE, vq->numa_node); if (!vq->batch_copy_elems) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for batching copy.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } return RTE_VHOST_MSG_RESULT_OK; } /* * Reallocate virtio_dev, vhost_virtqueue and related data structures to * make them on the same numa node as the memory of vring descriptor. */ #ifdef RTE_LIBRTE_VHOST_NUMA static struct virtio_net* numa_realloc(struct virtio_net *dev, int index) { int node, dev_node; struct virtio_net *old_dev; struct vhost_virtqueue *vq; struct batch_copy_elem *bce; struct guest_page *gp; struct rte_vhost_memory *mem; size_t mem_size; int ret; old_dev = dev; vq = dev->virtqueue[index]; /* * If VQ is ready, it is too late to reallocate, it certainly already * happened anyway on VHOST_USER_SET_VRING_ADRR. */ if (vq->ready) return dev; ret = get_mempolicy(&node, NULL, 0, vq->desc, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { VHOST_LOG_CONFIG(ERR, "(%s) unable to get virtqueue %d numa information.\n", dev->ifname, index); return dev; } if (node == vq->numa_node) goto out_dev_realloc; vq = rte_realloc_socket(vq, sizeof(*vq), 0, node); if (!vq) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc virtqueue %d on node %d\n", dev->ifname, index, node); return dev; } if (vq != dev->virtqueue[index]) { VHOST_LOG_CONFIG(INFO, "(%s) reallocated virtqueue on node %d\n", dev->ifname, node); dev->virtqueue[index] = vq; vhost_user_iotlb_init(dev, index); } if (vq_is_packed(dev)) { struct vring_used_elem_packed *sup; sup = rte_realloc_socket(vq->shadow_used_packed, vq->size * sizeof(*sup), RTE_CACHE_LINE_SIZE, node); if (!sup) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc shadow packed on node %d\n", dev->ifname, node); return dev; } vq->shadow_used_packed = sup; } else { struct vring_used_elem *sus; sus = rte_realloc_socket(vq->shadow_used_split, vq->size * sizeof(*sus), RTE_CACHE_LINE_SIZE, node); if (!sus) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc shadow split on node %d\n", dev->ifname, node); return dev; } vq->shadow_used_split = sus; } bce = rte_realloc_socket(vq->batch_copy_elems, vq->size * sizeof(*bce), RTE_CACHE_LINE_SIZE, node); if (!bce) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc batch copy elem on node %d\n", dev->ifname, node); return dev; } vq->batch_copy_elems = bce; if (vq->log_cache) { struct log_cache_entry *lc; lc = rte_realloc_socket(vq->log_cache, sizeof(*lc) * VHOST_LOG_CACHE_NR, 0, node); if (!lc) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc log cache on node %d\n", dev->ifname, node); return dev; } vq->log_cache = lc; } if (vq->resubmit_inflight) { struct rte_vhost_resubmit_info *ri; ri = rte_realloc_socket(vq->resubmit_inflight, sizeof(*ri), 0, node); if (!ri) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc resubmit inflight on node %d\n", dev->ifname, node); return dev; } vq->resubmit_inflight = ri; if (ri->resubmit_list) { struct rte_vhost_resubmit_desc *rd; rd = rte_realloc_socket(ri->resubmit_list, sizeof(*rd) * ri->resubmit_num, 0, node); if (!rd) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc resubmit list on node %d\n", dev->ifname, node); return dev; } ri->resubmit_list = rd; } } vq->numa_node = node; out_dev_realloc: if (dev->flags & VIRTIO_DEV_RUNNING) return dev; ret = get_mempolicy(&dev_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR); if (ret) { VHOST_LOG_CONFIG(ERR, "(%s) unable to get numa information.\n", dev->ifname); return dev; } if (dev_node == node) return dev; dev = rte_realloc_socket(old_dev, sizeof(*dev), 0, node); if (!dev) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc dev on node %d\n", old_dev->ifname, node); return old_dev; } VHOST_LOG_CONFIG(INFO, "(%s) reallocated device on node %d\n", dev->ifname, node); vhost_devices[dev->vid] = dev; mem_size = sizeof(struct rte_vhost_memory) + sizeof(struct rte_vhost_mem_region) * dev->mem->nregions; mem = rte_realloc_socket(dev->mem, mem_size, 0, node); if (!mem) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc mem table on node %d\n", dev->ifname, node); return dev; } dev->mem = mem; gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp), RTE_CACHE_LINE_SIZE, node); if (!gp) { VHOST_LOG_CONFIG(ERR, "(%s) failed to realloc guest pages on node %d\n", dev->ifname, node); return dev; } dev->guest_pages = gp; return dev; } #else static struct virtio_net* numa_realloc(struct virtio_net *dev, int index __rte_unused) { return dev; } #endif /* Converts QEMU virtual address to Vhost virtual address. */ static uint64_t qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len) { struct rte_vhost_mem_region *r; uint32_t i; if (unlikely(!dev || !dev->mem)) goto out_error; /* Find the region where the address lives. */ for (i = 0; i < dev->mem->nregions; i++) { r = &dev->mem->regions[i]; if (qva >= r->guest_user_addr && qva < r->guest_user_addr + r->size) { if (unlikely(*len > r->guest_user_addr + r->size - qva)) *len = r->guest_user_addr + r->size - qva; return qva - r->guest_user_addr + r->host_user_addr; } } out_error: *len = 0; return 0; } /* * Converts ring address to Vhost virtual address. * If IOMMU is enabled, the ring address is a guest IO virtual address, * else it is a QEMU virtual address. */ static uint64_t ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t ra, uint64_t *size) { if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) { uint64_t vva; vhost_user_iotlb_rd_lock(vq); vva = vhost_iova_to_vva(dev, vq, ra, size, VHOST_ACCESS_RW); vhost_user_iotlb_rd_unlock(vq); return vva; } return qva_to_vva(dev, ra, size); } static uint64_t log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq) { uint64_t log_gpa; vhost_user_iotlb_rd_lock(vq); log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr); vhost_user_iotlb_rd_unlock(vq); return log_gpa; } static struct virtio_net * translate_ring_addresses(struct virtio_net *dev, int vq_index) { struct vhost_virtqueue *vq = dev->virtqueue[vq_index]; struct vhost_vring_addr *addr = &vq->ring_addrs; uint64_t len, expected_len; if (addr->flags & (1 << VHOST_VRING_F_LOG)) { vq->log_guest_addr = log_addr_to_gpa(dev, vq); if (vq->log_guest_addr == 0) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map log_guest_addr.\n", dev->ifname); return dev; } } if (vq_is_packed(dev)) { len = sizeof(struct vring_packed_desc) * vq->size; vq->desc_packed = (struct vring_packed_desc *)(uintptr_t) ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len); if (vq->desc_packed == NULL || len != sizeof(struct vring_packed_desc) * vq->size) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map desc_packed ring.\n", dev->ifname); return dev; } dev = numa_realloc(dev, vq_index); vq = dev->virtqueue[vq_index]; addr = &vq->ring_addrs; len = sizeof(struct vring_packed_desc_event); vq->driver_event = (struct vring_packed_desc_event *) (uintptr_t)ring_addr_to_vva(dev, vq, addr->avail_user_addr, &len); if (vq->driver_event == NULL || len != sizeof(struct vring_packed_desc_event)) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to find driver area address.\n", dev->ifname); return dev; } len = sizeof(struct vring_packed_desc_event); vq->device_event = (struct vring_packed_desc_event *) (uintptr_t)ring_addr_to_vva(dev, vq, addr->used_user_addr, &len); if (vq->device_event == NULL || len != sizeof(struct vring_packed_desc_event)) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to find device area address.\n", dev->ifname); return dev; } vq->access_ok = true; return dev; } /* The addresses are converted from QEMU virtual to Vhost virtual. */ if (vq->desc && vq->avail && vq->used) return dev; len = sizeof(struct vring_desc) * vq->size; vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len); if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map desc ring.\n", dev->ifname); return dev; } dev = numa_realloc(dev, vq_index); vq = dev->virtqueue[vq_index]; addr = &vq->ring_addrs; len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size; if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) len += sizeof(uint16_t); expected_len = len; vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev, vq, addr->avail_user_addr, &len); if (vq->avail == 0 || len != expected_len) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map avail ring.\n", dev->ifname); return dev; } len = sizeof(struct vring_used) + sizeof(struct vring_used_elem) * vq->size; if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) len += sizeof(uint16_t); expected_len = len; vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev, vq, addr->used_user_addr, &len); if (vq->used == 0 || len != expected_len) { VHOST_LOG_CONFIG(DEBUG, "(%s) failed to map used ring.\n", dev->ifname); return dev; } if (vq->last_used_idx != vq->used->idx) { VHOST_LOG_CONFIG(WARNING, "(%s) last_used_idx (%u) and vq->used->idx (%u) mismatches;\n", dev->ifname, vq->last_used_idx, vq->used->idx); vq->last_used_idx = vq->used->idx; vq->last_avail_idx = vq->used->idx; VHOST_LOG_CONFIG(WARNING, "(%s) some packets maybe resent for Tx and dropped for Rx\n", dev->ifname); } vq->access_ok = true; VHOST_LOG_CONFIG(DEBUG, "(%s) mapped address desc: %p\n", dev->ifname, vq->desc); VHOST_LOG_CONFIG(DEBUG, "(%s) mapped address avail: %p\n", dev->ifname, vq->avail); VHOST_LOG_CONFIG(DEBUG, "(%s) mapped address used: %p\n", dev->ifname, vq->used); VHOST_LOG_CONFIG(DEBUG, "(%s) log_guest_addr: %" PRIx64 "\n", dev->ifname, vq->log_guest_addr); return dev; } /* * The virtio device sends us the desc, used and avail ring addresses. * This function then converts these to our address space. */ static int vhost_user_set_vring_addr(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_virtqueue *vq; struct vhost_vring_addr *addr = &ctx->msg.payload.addr; bool access_ok; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (dev->mem == NULL) return RTE_VHOST_MSG_RESULT_ERR; /* addr->index refers to the queue index. The txq 1, rxq is 0. */ vq = dev->virtqueue[ctx->msg.payload.addr.index]; access_ok = vq->access_ok; /* * Rings addresses should not be interpreted as long as the ring is not * started and enabled */ memcpy(&vq->ring_addrs, addr, sizeof(*addr)); vring_invalidate(dev, vq); if ((vq->enabled && (dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) || access_ok) { dev = translate_ring_addresses(dev, ctx->msg.payload.addr.index); if (!dev) return RTE_VHOST_MSG_RESULT_ERR; *pdev = dev; } return RTE_VHOST_MSG_RESULT_OK; } /* * The virtio device sends us the available ring last used index. */ static int vhost_user_set_vring_base(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; uint64_t val = ctx->msg.payload.state.num; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (vq_is_packed(dev)) { /* * Bit[0:14]: avail index * Bit[15]: avail wrap counter */ vq->last_avail_idx = val & 0x7fff; vq->avail_wrap_counter = !!(val & (0x1 << 15)); /* * Set used index to same value as available one, as * their values should be the same since ring processing * was stopped at get time. */ vq->last_used_idx = vq->last_avail_idx; vq->used_wrap_counter = vq->avail_wrap_counter; } else { vq->last_used_idx = ctx->msg.payload.state.num; vq->last_avail_idx = ctx->msg.payload.state.num; } VHOST_LOG_CONFIG(INFO, "(%s) vring base idx:%u last_used_idx:%u last_avail_idx:%u.\n", dev->ifname, ctx->msg.payload.state.index, vq->last_used_idx, vq->last_avail_idx); return RTE_VHOST_MSG_RESULT_OK; } static int add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, uint64_t host_iova, uint64_t host_user_addr, uint64_t size) { struct guest_page *page, *last_page; struct guest_page *old_pages; if (dev->nr_guest_pages == dev->max_guest_pages) { dev->max_guest_pages *= 2; old_pages = dev->guest_pages; dev->guest_pages = rte_realloc(dev->guest_pages, dev->max_guest_pages * sizeof(*page), RTE_CACHE_LINE_SIZE); if (dev->guest_pages == NULL) { VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n"); rte_free(old_pages); return -1; } } if (dev->nr_guest_pages > 0) { last_page = &dev->guest_pages[dev->nr_guest_pages - 1]; /* merge if the two pages are continuous */ if (host_iova == last_page->host_iova + last_page->size && guest_phys_addr == last_page->guest_phys_addr + last_page->size && host_user_addr == last_page->host_user_addr + last_page->size) { last_page->size += size; return 0; } } page = &dev->guest_pages[dev->nr_guest_pages++]; page->guest_phys_addr = guest_phys_addr; page->host_iova = host_iova; page->host_user_addr = host_user_addr; page->size = size; return 0; } static int add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, uint64_t page_size) { uint64_t reg_size = reg->size; uint64_t host_user_addr = reg->host_user_addr; uint64_t guest_phys_addr = reg->guest_phys_addr; uint64_t host_iova; uint64_t size; host_iova = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr); size = page_size - (guest_phys_addr & (page_size - 1)); size = RTE_MIN(size, reg_size); if (add_one_guest_page(dev, guest_phys_addr, host_iova, host_user_addr, size) < 0) return -1; host_user_addr += size; guest_phys_addr += size; reg_size -= size; while (reg_size > 0) { size = RTE_MIN(reg_size, page_size); host_iova = rte_mem_virt2iova((void *)(uintptr_t) host_user_addr); if (add_one_guest_page(dev, guest_phys_addr, host_iova, host_user_addr, size) < 0) return -1; host_user_addr += size; guest_phys_addr += size; reg_size -= size; } /* sort guest page array if over binary search threshold */ if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { qsort((void *)dev->guest_pages, dev->nr_guest_pages, sizeof(struct guest_page), guest_page_addrcmp); } return 0; } #ifdef RTE_LIBRTE_VHOST_DEBUG /* TODO: enable it only in debug mode? */ static void dump_guest_pages(struct virtio_net *dev) { uint32_t i; struct guest_page *page; for (i = 0; i < dev->nr_guest_pages; i++) { page = &dev->guest_pages[i]; VHOST_LOG_CONFIG(INFO, "(%s) guest physical page region %u\n", dev->ifname, i); VHOST_LOG_CONFIG(INFO, "(%s)\tguest_phys_addr: %" PRIx64 "\n", dev->ifname, page->guest_phys_addr); VHOST_LOG_CONFIG(INFO, "(%s)\thost_iova : %" PRIx64 "\n", dev->ifname, page->host_iova); VHOST_LOG_CONFIG(INFO, "(%s)\tsize : %" PRIx64 "\n", dev->ifname, page->size); } } #else #define dump_guest_pages(dev) #endif static bool vhost_memory_changed(struct VhostUserMemory *new, struct rte_vhost_memory *old) { uint32_t i; if (new->nregions != old->nregions) return true; for (i = 0; i < new->nregions; ++i) { VhostUserMemoryRegion *new_r = &new->regions[i]; struct rte_vhost_mem_region *old_r = &old->regions[i]; if (new_r->guest_phys_addr != old_r->guest_phys_addr) return true; if (new_r->memory_size != old_r->size) return true; if (new_r->userspace_addr != old_r->guest_user_addr) return true; } return false; } #ifdef RTE_LIBRTE_VHOST_POSTCOPY static int vhost_user_postcopy_region_register(struct virtio_net *dev, struct rte_vhost_mem_region *reg) { struct uffdio_register reg_struct; /* * Let's register all the mmapped area to ensure * alignment on page boundary. */ reg_struct.range.start = (uint64_t)(uintptr_t)reg->mmap_addr; reg_struct.range.len = reg->mmap_size; reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, &reg_struct)) { VHOST_LOG_CONFIG(ERR, "(%s) failed to register ufd for region " "%" PRIx64 " - %" PRIx64 " (ufd = %d) %s\n", dev->ifname, (uint64_t)reg_struct.range.start, (uint64_t)reg_struct.range.start + (uint64_t)reg_struct.range.len - 1, dev->postcopy_ufd, strerror(errno)); return -1; } VHOST_LOG_CONFIG(INFO, "(%s)\t userfaultfd registered for range : %" PRIx64 " - %" PRIx64 "\n", dev->ifname, (uint64_t)reg_struct.range.start, (uint64_t)reg_struct.range.start + (uint64_t)reg_struct.range.len - 1); return 0; } #else static int vhost_user_postcopy_region_register(struct virtio_net *dev __rte_unused, struct rte_vhost_mem_region *reg __rte_unused) { return -1; } #endif static int vhost_user_postcopy_register(struct virtio_net *dev, int main_fd, struct vhu_msg_context *ctx) { struct VhostUserMemory *memory; struct rte_vhost_mem_region *reg; struct vhu_msg_context ack_ctx; uint32_t i; if (!dev->postcopy_listening) return 0; /* * We haven't a better way right now than sharing * DPDK's virtual address with Qemu, so that Qemu can * retrieve the region offset when handling userfaults. */ memory = &ctx->msg.payload.memory; for (i = 0; i < memory->nregions; i++) { reg = &dev->mem->regions[i]; memory->regions[i].userspace_addr = reg->host_user_addr; } /* Send the addresses back to qemu */ ctx->fd_num = 0; send_vhost_reply(dev, main_fd, ctx); /* Wait for qemu to acknowledge it got the addresses * we've got to wait before we're allowed to generate faults. */ if (read_vhost_message(dev, main_fd, &ack_ctx) <= 0) { VHOST_LOG_CONFIG(ERR, "(%s) failed to read qemu ack on postcopy set-mem-table\n", dev->ifname); return -1; } if (validate_msg_fds(dev, &ack_ctx, 0) != 0) return -1; if (ack_ctx.msg.request.master != VHOST_USER_SET_MEM_TABLE) { VHOST_LOG_CONFIG(ERR, "(%s) bad qemu ack on postcopy set-mem-table (%d)\n", dev->ifname, ack_ctx.msg.request.master); return -1; } /* Now userfault register and we can use the memory */ for (i = 0; i < memory->nregions; i++) { reg = &dev->mem->regions[i]; if (vhost_user_postcopy_region_register(dev, reg) < 0) return -1; } return 0; } static int vhost_user_mmap_region(struct virtio_net *dev, struct rte_vhost_mem_region *region, uint64_t mmap_offset) { void *mmap_addr; uint64_t mmap_size; uint64_t alignment; int populate; /* Check for memory_size + mmap_offset overflow */ if (mmap_offset >= -region->size) { VHOST_LOG_CONFIG(ERR, "(%s) mmap_offset (%#"PRIx64") and memory_size (%#"PRIx64") overflow\n", dev->ifname, mmap_offset, region->size); return -1; } mmap_size = region->size + mmap_offset; /* mmap() without flag of MAP_ANONYMOUS, should be called with length * argument aligned with hugepagesz at older longterm version Linux, * like 2.6.32 and 3.2.72, or mmap() will fail with EINVAL. * * To avoid failure, make sure in caller to keep length aligned. */ alignment = get_blk_size(region->fd); if (alignment == (uint64_t)-1) { VHOST_LOG_CONFIG(ERR, "(%s) couldn't get hugepage size through fstat\n", dev->ifname); return -1; } mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment); if (mmap_size == 0) { /* * It could happen if initial mmap_size + alignment overflows * the sizeof uint64, which could happen if either mmap_size or * alignment value is wrong. * * mmap() kernel implementation would return an error, but * better catch it before and provide useful info in the logs. */ VHOST_LOG_CONFIG(ERR, "(%s) mmap size (0x%" PRIx64 ") or alignment (0x%" PRIx64 ") is invalid\n", dev->ifname, region->size + mmap_offset, alignment); return -1; } populate = dev->async_copy ? MAP_POPULATE : 0; mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED | populate, region->fd, 0); if (mmap_addr == MAP_FAILED) { VHOST_LOG_CONFIG(ERR, "(%s) mmap failed (%s).\n", dev->ifname, strerror(errno)); return -1; } region->mmap_addr = mmap_addr; region->mmap_size = mmap_size; region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset; if (dev->async_copy) { if (add_guest_pages(dev, region, alignment) < 0) { VHOST_LOG_CONFIG(ERR, "(%s) adding guest pages to region failed.\n", dev->ifname); return -1; } } VHOST_LOG_CONFIG(INFO, "(%s) guest memory region size: 0x%" PRIx64 "\n", dev->ifname, region->size); VHOST_LOG_CONFIG(INFO, "(%s)\t guest physical addr: 0x%" PRIx64 "\n", dev->ifname, region->guest_phys_addr); VHOST_LOG_CONFIG(INFO, "(%s)\t guest virtual addr: 0x%" PRIx64 "\n", dev->ifname, region->guest_user_addr); VHOST_LOG_CONFIG(INFO, "(%s)\t host virtual addr: 0x%" PRIx64 "\n", dev->ifname, region->host_user_addr); VHOST_LOG_CONFIG(INFO, "(%s)\t mmap addr : 0x%" PRIx64 "\n", dev->ifname, (uint64_t)(uintptr_t)mmap_addr); VHOST_LOG_CONFIG(INFO, "(%s)\t mmap size : 0x%" PRIx64 "\n", dev->ifname, mmap_size); VHOST_LOG_CONFIG(INFO, "(%s)\t mmap align: 0x%" PRIx64 "\n", dev->ifname, alignment); VHOST_LOG_CONFIG(INFO, "(%s)\t mmap off : 0x%" PRIx64 "\n", dev->ifname, mmap_offset); return 0; } static int vhost_user_set_mem_table(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd) { struct virtio_net *dev = *pdev; struct VhostUserMemory *memory = &ctx->msg.payload.memory; struct rte_vhost_mem_region *reg; int numa_node = SOCKET_ID_ANY; uint64_t mmap_offset; uint32_t i; bool async_notify = false; if (validate_msg_fds(dev, ctx, memory->nregions) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) { VHOST_LOG_CONFIG(ERR, "(%s) too many memory regions (%u)\n", dev->ifname, memory->nregions); goto close_msg_fds; } if (dev->mem && !vhost_memory_changed(memory, dev->mem)) { VHOST_LOG_CONFIG(INFO, "(%s) memory regions not changed\n", dev->ifname); close_msg_fds(ctx); return RTE_VHOST_MSG_RESULT_OK; } if (dev->mem) { if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) { struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; if (vdpa_dev && vdpa_dev->ops->dev_close) vdpa_dev->ops->dev_close(dev->vid); dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED; } /* notify the vhost application to stop DMA transfers */ if (dev->async_copy && dev->notify_ops->vring_state_changed) { for (i = 0; i < dev->nr_vring; i++) { dev->notify_ops->vring_state_changed(dev->vid, i, 0); } async_notify = true; } free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; } /* Flush IOTLB cache as previous HVAs are now invalid */ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) for (i = 0; i < dev->nr_vring; i++) vhost_user_iotlb_flush_all(dev->virtqueue[i]); /* * If VQ 0 has already been allocated, try to allocate on the same * NUMA node. It can be reallocated later in numa_realloc(). */ if (dev->nr_vring > 0) numa_node = dev->virtqueue[0]->numa_node; dev->nr_guest_pages = 0; if (dev->guest_pages == NULL) { dev->max_guest_pages = 8; dev->guest_pages = rte_zmalloc_socket(NULL, dev->max_guest_pages * sizeof(struct guest_page), RTE_CACHE_LINE_SIZE, numa_node); if (dev->guest_pages == NULL) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for dev->guest_pages\n", dev->ifname); goto close_msg_fds; } } dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) + sizeof(struct rte_vhost_mem_region) * memory->nregions, 0, numa_node); if (dev->mem == NULL) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for dev->mem\n", dev->ifname); goto free_guest_pages; } for (i = 0; i < memory->nregions; i++) { reg = &dev->mem->regions[i]; reg->guest_phys_addr = memory->regions[i].guest_phys_addr; reg->guest_user_addr = memory->regions[i].userspace_addr; reg->size = memory->regions[i].memory_size; reg->fd = ctx->fds[i]; /* * Assign invalid file descriptor value to avoid double * closing on error path. */ ctx->fds[i] = -1; mmap_offset = memory->regions[i].mmap_offset; if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) { VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap region %u\n", dev->ifname, i); goto free_mem_table; } dev->mem->nregions++; } if (dev->async_copy && rte_vfio_is_enabled("vfio")) async_dma_map(dev, true); if (vhost_user_postcopy_register(dev, main_fd, ctx) < 0) goto free_mem_table; for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; if (!vq) continue; if (vq->desc || vq->avail || vq->used) { /* * If the memory table got updated, the ring addresses * need to be translated again as virtual addresses have * changed. */ vring_invalidate(dev, vq); dev = translate_ring_addresses(dev, i); if (!dev) { dev = *pdev; goto free_mem_table; } *pdev = dev; } } dump_guest_pages(dev); if (async_notify) { for (i = 0; i < dev->nr_vring; i++) dev->notify_ops->vring_state_changed(dev->vid, i, 1); } return RTE_VHOST_MSG_RESULT_OK; free_mem_table: free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; free_guest_pages: rte_free(dev->guest_pages); dev->guest_pages = NULL; close_msg_fds: close_msg_fds(ctx); return RTE_VHOST_MSG_RESULT_ERR; } static bool vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq) { bool rings_ok; if (!vq) return false; if (vq_is_packed(dev)) rings_ok = vq->desc_packed && vq->driver_event && vq->device_event; else rings_ok = vq->desc && vq->avail && vq->used; return rings_ok && vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD && vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD && vq->enabled; } #define VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY 2u static int virtio_is_ready(struct virtio_net *dev) { struct vhost_virtqueue *vq; uint32_t i, nr_vring = dev->nr_vring; if (dev->flags & VIRTIO_DEV_READY) return 1; if (!dev->nr_vring) return 0; if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) { nr_vring = VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY; if (dev->nr_vring < nr_vring) return 0; } for (i = 0; i < nr_vring; i++) { vq = dev->virtqueue[i]; if (!vq_is_ready(dev, vq)) return 0; } /* If supported, ensure the frontend is really done with config */ if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS)) if (!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)) return 0; dev->flags |= VIRTIO_DEV_READY; if (!(dev->flags & VIRTIO_DEV_RUNNING)) VHOST_LOG_CONFIG(INFO, "(%s) virtio is now ready for processing.\n", dev->ifname); return 1; } static void * inflight_mem_alloc(struct virtio_net *dev, const char *name, size_t size, int *fd) { void *ptr; int mfd = -1; char fname[20] = "/tmp/memfd-XXXXXX"; *fd = -1; #ifdef MEMFD_SUPPORTED mfd = memfd_create(name, MFD_CLOEXEC); #else RTE_SET_USED(name); #endif if (mfd == -1) { mfd = mkstemp(fname); if (mfd == -1) { VHOST_LOG_CONFIG(ERR, "(%s) failed to get inflight buffer fd\n", dev->ifname); return NULL; } unlink(fname); } if (ftruncate(mfd, size) == -1) { VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc inflight buffer\n", dev->ifname); close(mfd); return NULL; } ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0); if (ptr == MAP_FAILED) { VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap inflight buffer\n", dev->ifname); close(mfd); return NULL; } *fd = mfd; return ptr; } static uint32_t get_pervq_shm_size_split(uint16_t queue_size) { return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_split) * queue_size + sizeof(uint64_t) + sizeof(uint16_t) * 4, INFLIGHT_ALIGNMENT); } static uint32_t get_pervq_shm_size_packed(uint16_t queue_size) { return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_packed) * queue_size + sizeof(uint64_t) + sizeof(uint16_t) * 6 + sizeof(uint8_t) * 9, INFLIGHT_ALIGNMENT); } static int vhost_user_get_inflight_fd(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct rte_vhost_inflight_info_packed *inflight_packed; uint64_t pervq_inflight_size, mmap_size; uint16_t num_queues, queue_size; struct virtio_net *dev = *pdev; int fd, i, j; int numa_node = SOCKET_ID_ANY; void *addr; if (ctx->msg.size != sizeof(ctx->msg.payload.inflight)) { VHOST_LOG_CONFIG(ERR, "(%s) invalid get_inflight_fd message size is %d\n", dev->ifname, ctx->msg.size); return RTE_VHOST_MSG_RESULT_ERR; } /* * If VQ 0 has already been allocated, try to allocate on the same * NUMA node. It can be reallocated later in numa_realloc(). */ if (dev->nr_vring > 0) numa_node = dev->virtqueue[0]->numa_node; if (dev->inflight_info == NULL) { dev->inflight_info = rte_zmalloc_socket("inflight_info", sizeof(struct inflight_mem_info), 0, numa_node); if (!dev->inflight_info) { VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc dev inflight area\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } dev->inflight_info->fd = -1; } num_queues = ctx->msg.payload.inflight.num_queues; queue_size = ctx->msg.payload.inflight.queue_size; VHOST_LOG_CONFIG(INFO, "(%s) get_inflight_fd num_queues: %u\n", dev->ifname, ctx->msg.payload.inflight.num_queues); VHOST_LOG_CONFIG(INFO, "(%s) get_inflight_fd queue_size: %u\n", dev->ifname, ctx->msg.payload.inflight.queue_size); if (vq_is_packed(dev)) pervq_inflight_size = get_pervq_shm_size_packed(queue_size); else pervq_inflight_size = get_pervq_shm_size_split(queue_size); mmap_size = num_queues * pervq_inflight_size; addr = inflight_mem_alloc(dev, "vhost-inflight", mmap_size, &fd); if (!addr) { VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc vhost inflight area\n", dev->ifname); ctx->msg.payload.inflight.mmap_size = 0; return RTE_VHOST_MSG_RESULT_ERR; } memset(addr, 0, mmap_size); if (dev->inflight_info->addr) { munmap(dev->inflight_info->addr, dev->inflight_info->size); dev->inflight_info->addr = NULL; } if (dev->inflight_info->fd >= 0) { close(dev->inflight_info->fd); dev->inflight_info->fd = -1; } dev->inflight_info->addr = addr; dev->inflight_info->size = ctx->msg.payload.inflight.mmap_size = mmap_size; dev->inflight_info->fd = ctx->fds[0] = fd; ctx->msg.payload.inflight.mmap_offset = 0; ctx->fd_num = 1; if (vq_is_packed(dev)) { for (i = 0; i < num_queues; i++) { inflight_packed = (struct rte_vhost_inflight_info_packed *)addr; inflight_packed->used_wrap_counter = 1; inflight_packed->old_used_wrap_counter = 1; for (j = 0; j < queue_size; j++) inflight_packed->desc[j].next = j + 1; addr = (void *)((char *)addr + pervq_inflight_size); } } VHOST_LOG_CONFIG(INFO, "(%s) send inflight mmap_size: %"PRIu64"\n", dev->ifname, ctx->msg.payload.inflight.mmap_size); VHOST_LOG_CONFIG(INFO, "(%s) send inflight mmap_offset: %"PRIu64"\n", dev->ifname, ctx->msg.payload.inflight.mmap_offset); VHOST_LOG_CONFIG(INFO, "(%s) send inflight fd: %d\n", dev->ifname, ctx->fds[0]); return RTE_VHOST_MSG_RESULT_REPLY; } static int vhost_user_set_inflight_fd(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { uint64_t mmap_size, mmap_offset; uint16_t num_queues, queue_size; struct virtio_net *dev = *pdev; uint32_t pervq_inflight_size; struct vhost_virtqueue *vq; void *addr; int fd, i; int numa_node = SOCKET_ID_ANY; fd = ctx->fds[0]; if (ctx->msg.size != sizeof(ctx->msg.payload.inflight) || fd < 0) { VHOST_LOG_CONFIG(ERR, "(%s) invalid set_inflight_fd message size is %d,fd is %d\n", dev->ifname, ctx->msg.size, fd); return RTE_VHOST_MSG_RESULT_ERR; } mmap_size = ctx->msg.payload.inflight.mmap_size; mmap_offset = ctx->msg.payload.inflight.mmap_offset; num_queues = ctx->msg.payload.inflight.num_queues; queue_size = ctx->msg.payload.inflight.queue_size; if (vq_is_packed(dev)) pervq_inflight_size = get_pervq_shm_size_packed(queue_size); else pervq_inflight_size = get_pervq_shm_size_split(queue_size); VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd mmap_size: %"PRIu64"\n", dev->ifname, mmap_size); VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd mmap_offset: %"PRIu64"\n", dev->ifname, mmap_offset); VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd num_queues: %u\n", dev->ifname, num_queues); VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd queue_size: %u\n", dev->ifname, queue_size); VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd fd: %d\n", dev->ifname, fd); VHOST_LOG_CONFIG(INFO, "(%s) set_inflight_fd pervq_inflight_size: %d\n", dev->ifname, pervq_inflight_size); /* * If VQ 0 has already been allocated, try to allocate on the same * NUMA node. It can be reallocated later in numa_realloc(). */ if (dev->nr_vring > 0) numa_node = dev->virtqueue[0]->numa_node; if (!dev->inflight_info) { dev->inflight_info = rte_zmalloc_socket("inflight_info", sizeof(struct inflight_mem_info), 0, numa_node); if (dev->inflight_info == NULL) { VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc dev inflight area\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } dev->inflight_info->fd = -1; } if (dev->inflight_info->addr) { munmap(dev->inflight_info->addr, dev->inflight_info->size); dev->inflight_info->addr = NULL; } addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mmap_offset); if (addr == MAP_FAILED) { VHOST_LOG_CONFIG(ERR, "(%s) failed to mmap share memory.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } if (dev->inflight_info->fd >= 0) { close(dev->inflight_info->fd); dev->inflight_info->fd = -1; } dev->inflight_info->fd = fd; dev->inflight_info->addr = addr; dev->inflight_info->size = mmap_size; for (i = 0; i < num_queues; i++) { vq = dev->virtqueue[i]; if (!vq) continue; if (vq_is_packed(dev)) { vq->inflight_packed = addr; vq->inflight_packed->desc_num = queue_size; } else { vq->inflight_split = addr; vq->inflight_split->desc_num = queue_size; } addr = (void *)((char *)addr + pervq_inflight_size); } return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_set_vring_call(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_vring_file file; struct vhost_virtqueue *vq; int expected_fds; expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1; if (validate_msg_fds(dev, ctx, expected_fds) != 0) return RTE_VHOST_MSG_RESULT_ERR; file.index = ctx->msg.payload.u64 & VHOST_USER_VRING_IDX_MASK; if (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) file.fd = VIRTIO_INVALID_EVENTFD; else file.fd = ctx->fds[0]; VHOST_LOG_CONFIG(INFO, "(%s) vring call idx:%d file:%d\n", dev->ifname, file.index, file.fd); vq = dev->virtqueue[file.index]; if (vq->ready) { vq->ready = false; vhost_user_notify_queue_state(dev, file.index, 0); } if (vq->callfd >= 0) close(vq->callfd); vq->callfd = file.fd; return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_set_vring_err(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; int expected_fds; expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1; if (validate_msg_fds(dev, ctx, expected_fds) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (!(ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK)) close(ctx->fds[0]); VHOST_LOG_CONFIG(INFO, "(%s) not implemented\n", dev->ifname); return RTE_VHOST_MSG_RESULT_OK; } static int resubmit_desc_compare(const void *a, const void *b) { const struct rte_vhost_resubmit_desc *desc0 = a; const struct rte_vhost_resubmit_desc *desc1 = b; if (desc1->counter > desc0->counter) return 1; return -1; } static int vhost_check_queue_inflights_split(struct virtio_net *dev, struct vhost_virtqueue *vq) { uint16_t i; uint16_t resubmit_num = 0, last_io, num; struct vring_used *used = vq->used; struct rte_vhost_resubmit_info *resubmit; struct rte_vhost_inflight_info_split *inflight_split; if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) return RTE_VHOST_MSG_RESULT_OK; /* The frontend may still not support the inflight feature * although we negotiate the protocol feature. */ if ((!vq->inflight_split)) return RTE_VHOST_MSG_RESULT_OK; if (!vq->inflight_split->version) { vq->inflight_split->version = INFLIGHT_VERSION; return RTE_VHOST_MSG_RESULT_OK; } if (vq->resubmit_inflight) return RTE_VHOST_MSG_RESULT_OK; inflight_split = vq->inflight_split; vq->global_counter = 0; last_io = inflight_split->last_inflight_io; if (inflight_split->used_idx != used->idx) { inflight_split->desc[last_io].inflight = 0; rte_atomic_thread_fence(__ATOMIC_SEQ_CST); inflight_split->used_idx = used->idx; } for (i = 0; i < inflight_split->desc_num; i++) { if (inflight_split->desc[i].inflight == 1) resubmit_num++; } vq->last_avail_idx += resubmit_num; if (resubmit_num) { resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info), 0, vq->numa_node); if (!resubmit) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for resubmit info.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list", resubmit_num * sizeof(struct rte_vhost_resubmit_desc), 0, vq->numa_node); if (!resubmit->resubmit_list) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for inflight desc.\n", dev->ifname); rte_free(resubmit); return RTE_VHOST_MSG_RESULT_ERR; } num = 0; for (i = 0; i < vq->inflight_split->desc_num; i++) { if (vq->inflight_split->desc[i].inflight == 1) { resubmit->resubmit_list[num].index = i; resubmit->resubmit_list[num].counter = inflight_split->desc[i].counter; num++; } } resubmit->resubmit_num = num; if (resubmit->resubmit_num > 1) qsort(resubmit->resubmit_list, resubmit->resubmit_num, sizeof(struct rte_vhost_resubmit_desc), resubmit_desc_compare); vq->global_counter = resubmit->resubmit_list[0].counter + 1; vq->resubmit_inflight = resubmit; } return RTE_VHOST_MSG_RESULT_OK; } static int vhost_check_queue_inflights_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) { uint16_t i; uint16_t resubmit_num = 0, old_used_idx, num; struct rte_vhost_resubmit_info *resubmit; struct rte_vhost_inflight_info_packed *inflight_packed; if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) return RTE_VHOST_MSG_RESULT_OK; /* The frontend may still not support the inflight feature * although we negotiate the protocol feature. */ if ((!vq->inflight_packed)) return RTE_VHOST_MSG_RESULT_OK; if (!vq->inflight_packed->version) { vq->inflight_packed->version = INFLIGHT_VERSION; return RTE_VHOST_MSG_RESULT_OK; } if (vq->resubmit_inflight) return RTE_VHOST_MSG_RESULT_OK; inflight_packed = vq->inflight_packed; vq->global_counter = 0; old_used_idx = inflight_packed->old_used_idx; if (inflight_packed->used_idx != old_used_idx) { if (inflight_packed->desc[old_used_idx].inflight == 0) { inflight_packed->old_used_idx = inflight_packed->used_idx; inflight_packed->old_used_wrap_counter = inflight_packed->used_wrap_counter; inflight_packed->old_free_head = inflight_packed->free_head; } else { inflight_packed->used_idx = inflight_packed->old_used_idx; inflight_packed->used_wrap_counter = inflight_packed->old_used_wrap_counter; inflight_packed->free_head = inflight_packed->old_free_head; } } for (i = 0; i < inflight_packed->desc_num; i++) { if (inflight_packed->desc[i].inflight == 1) resubmit_num++; } if (resubmit_num) { resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info), 0, vq->numa_node); if (resubmit == NULL) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for resubmit info.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list", resubmit_num * sizeof(struct rte_vhost_resubmit_desc), 0, vq->numa_node); if (resubmit->resubmit_list == NULL) { VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for resubmit desc.\n", dev->ifname); rte_free(resubmit); return RTE_VHOST_MSG_RESULT_ERR; } num = 0; for (i = 0; i < inflight_packed->desc_num; i++) { if (vq->inflight_packed->desc[i].inflight == 1) { resubmit->resubmit_list[num].index = i; resubmit->resubmit_list[num].counter = inflight_packed->desc[i].counter; num++; } } resubmit->resubmit_num = num; if (resubmit->resubmit_num > 1) qsort(resubmit->resubmit_list, resubmit->resubmit_num, sizeof(struct rte_vhost_resubmit_desc), resubmit_desc_compare); vq->global_counter = resubmit->resubmit_list[0].counter + 1; vq->resubmit_inflight = resubmit; } return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_set_vring_kick(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_vring_file file; struct vhost_virtqueue *vq; int expected_fds; expected_fds = (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1; if (validate_msg_fds(dev, ctx, expected_fds) != 0) return RTE_VHOST_MSG_RESULT_ERR; file.index = ctx->msg.payload.u64 & VHOST_USER_VRING_IDX_MASK; if (ctx->msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK) file.fd = VIRTIO_INVALID_EVENTFD; else file.fd = ctx->fds[0]; VHOST_LOG_CONFIG(INFO, "(%s) vring kick idx:%d file:%d\n", dev->ifname, file.index, file.fd); /* Interpret ring addresses only when ring is started. */ dev = translate_ring_addresses(dev, file.index); if (!dev) { if (file.fd != VIRTIO_INVALID_EVENTFD) close(file.fd); return RTE_VHOST_MSG_RESULT_ERR; } *pdev = dev; vq = dev->virtqueue[file.index]; /* * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated, * the ring starts already enabled. Otherwise, it is enabled via * the SET_VRING_ENABLE message. */ if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) { vq->enabled = true; } if (vq->ready) { vq->ready = false; vhost_user_notify_queue_state(dev, file.index, 0); } if (vq->kickfd >= 0) close(vq->kickfd); vq->kickfd = file.fd; if (vq_is_packed(dev)) { if (vhost_check_queue_inflights_packed(dev, vq)) { VHOST_LOG_CONFIG(ERR, "(%s) failed to inflights for vq: %d\n", dev->ifname, file.index); return RTE_VHOST_MSG_RESULT_ERR; } } else { if (vhost_check_queue_inflights_split(dev, vq)) { VHOST_LOG_CONFIG(ERR, "(%s) failed to inflights for vq: %d\n", dev->ifname, file.index); return RTE_VHOST_MSG_RESULT_ERR; } } return RTE_VHOST_MSG_RESULT_OK; } /* * when virtio is stopped, qemu will send us the GET_VRING_BASE message. */ static int vhost_user_get_vring_base(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_virtqueue *vq = dev->virtqueue[ctx->msg.payload.state.index]; uint64_t val; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; /* We have to stop the queue (virtio) if it is running. */ vhost_destroy_device_notify(dev); dev->flags &= ~VIRTIO_DEV_READY; dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED; /* Here we are safe to get the indexes */ if (vq_is_packed(dev)) { /* * Bit[0:14]: avail index * Bit[15]: avail wrap counter */ val = vq->last_avail_idx & 0x7fff; val |= vq->avail_wrap_counter << 15; ctx->msg.payload.state.num = val; } else { ctx->msg.payload.state.num = vq->last_avail_idx; } VHOST_LOG_CONFIG(INFO, "(%s) vring base idx:%d file:%d\n", dev->ifname, ctx->msg.payload.state.index, ctx->msg.payload.state.num); /* * Based on current qemu vhost-user implementation, this message is * sent and only sent in vhost_vring_stop. * TODO: cleanup the vring, it isn't usable since here. */ if (vq->kickfd >= 0) close(vq->kickfd); vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; if (vq->callfd >= 0) close(vq->callfd); vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; vq->signalled_used_valid = false; if (vq_is_packed(dev)) { rte_free(vq->shadow_used_packed); vq->shadow_used_packed = NULL; } else { rte_free(vq->shadow_used_split); vq->shadow_used_split = NULL; } rte_free(vq->batch_copy_elems); vq->batch_copy_elems = NULL; rte_free(vq->log_cache); vq->log_cache = NULL; ctx->msg.size = sizeof(ctx->msg.payload.state); ctx->fd_num = 0; vhost_user_iotlb_flush_all(vq); vring_invalidate(dev, vq); return RTE_VHOST_MSG_RESULT_REPLY; } /* * when virtio queues are ready to work, qemu will send us to * enable the virtio queue pair. */ static int vhost_user_set_vring_enable(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; bool enable = !!ctx->msg.payload.state.num; int index = (int)ctx->msg.payload.state.index; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; VHOST_LOG_CONFIG(INFO, "(%s) set queue enable: %d to qp idx: %d\n", dev->ifname, enable, index); if (enable && dev->virtqueue[index]->async) { if (dev->virtqueue[index]->async->pkts_inflight_n) { VHOST_LOG_CONFIG(ERR, "(%s) failed to enable vring. Inflight packets must be completed first\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } } dev->virtqueue[index]->enabled = enable; return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_get_protocol_features(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; uint64_t features, protocol_features; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; rte_vhost_driver_get_features(dev->ifname, &features); rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features); ctx->msg.payload.u64 = protocol_features; ctx->msg.size = sizeof(ctx->msg.payload.u64); ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; } static int vhost_user_set_protocol_features(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; uint64_t protocol_features = ctx->msg.payload.u64; uint64_t slave_protocol_features = 0; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; rte_vhost_driver_get_protocol_features(dev->ifname, &slave_protocol_features); if (protocol_features & ~slave_protocol_features) { VHOST_LOG_CONFIG(ERR, "(%s) received invalid protocol features.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } dev->protocol_features = protocol_features; VHOST_LOG_CONFIG(INFO, "(%s) negotiated Vhost-user protocol features: 0x%" PRIx64 "\n", dev->ifname, dev->protocol_features); return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_set_log_base(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; int fd = ctx->fds[0]; uint64_t size, off; void *addr; uint32_t i; if (validate_msg_fds(dev, ctx, 1) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (fd < 0) { VHOST_LOG_CONFIG(ERR, "(%s) invalid log fd: %d\n", dev->ifname, fd); return RTE_VHOST_MSG_RESULT_ERR; } if (ctx->msg.size != sizeof(VhostUserLog)) { VHOST_LOG_CONFIG(ERR, "(%s) invalid log base msg size: %"PRId32" != %d\n", dev->ifname, ctx->msg.size, (int)sizeof(VhostUserLog)); goto close_msg_fds; } size = ctx->msg.payload.log.mmap_size; off = ctx->msg.payload.log.mmap_offset; /* Check for mmap size and offset overflow. */ if (off >= -size) { VHOST_LOG_CONFIG(ERR, "(%s) log offset %#"PRIx64" and log size %#"PRIx64" overflow\n", dev->ifname, off, size); goto close_msg_fds; } VHOST_LOG_CONFIG(INFO, "(%s) log mmap size: %"PRId64", offset: %"PRId64"\n", dev->ifname, size, off); /* * mmap from 0 to workaround a hugepage mmap bug: mmap will * fail when offset is not page size aligned. */ addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); close(fd); if (addr == MAP_FAILED) { VHOST_LOG_CONFIG(ERR, "(%s) mmap log base failed!\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } /* * Free previously mapped log memory on occasionally * multiple VHOST_USER_SET_LOG_BASE. */ if (dev->log_addr) { munmap((void *)(uintptr_t)dev->log_addr, dev->log_size); } dev->log_addr = (uint64_t)(uintptr_t)addr; dev->log_base = dev->log_addr + off; dev->log_size = size; for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; rte_free(vq->log_cache); vq->log_cache = NULL; vq->log_cache_nb_elem = 0; vq->log_cache = rte_malloc_socket("vq log cache", sizeof(struct log_cache_entry) * VHOST_LOG_CACHE_NR, 0, vq->numa_node); /* * If log cache alloc fail, don't fail migration, but no * caching will be done, which will impact performance */ if (!vq->log_cache) VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate VQ logging cache\n", dev->ifname); } /* * The spec is not clear about it (yet), but QEMU doesn't expect * any payload in the reply. */ ctx->msg.size = 0; ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; close_msg_fds: close_msg_fds(ctx); return RTE_VHOST_MSG_RESULT_ERR; } static int vhost_user_set_log_fd(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 1) != 0) return RTE_VHOST_MSG_RESULT_ERR; close(ctx->fds[0]); VHOST_LOG_CONFIG(INFO, "(%s) not implemented.\n", dev->ifname); return RTE_VHOST_MSG_RESULT_OK; } /* * An rarp packet is constructed and broadcasted to notify switches about * the new location of the migrated VM, so that packets from outside will * not be lost after migration. * * However, we don't actually "send" a rarp packet here, instead, we set * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it. */ static int vhost_user_send_rarp(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; uint8_t *mac = (uint8_t *)&ctx->msg.payload.u64; struct rte_vdpa_device *vdpa_dev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; VHOST_LOG_CONFIG(DEBUG, "(%s) MAC: " RTE_ETHER_ADDR_PRT_FMT "\n", dev->ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); memcpy(dev->mac.addr_bytes, mac, 6); /* * Set the flag to inject a RARP broadcast packet at * rte_vhost_dequeue_burst(). * * __ATOMIC_RELEASE ordering is for making sure the mac is * copied before the flag is set. */ __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE); vdpa_dev = dev->vdpa_dev; if (vdpa_dev && vdpa_dev->ops->migration_done) vdpa_dev->ops->migration_done(dev->vid); return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_net_set_mtu(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (ctx->msg.payload.u64 < VIRTIO_MIN_MTU || ctx->msg.payload.u64 > VIRTIO_MAX_MTU) { VHOST_LOG_CONFIG(ERR, "(%s) invalid MTU size (%"PRIu64")\n", dev->ifname, ctx->msg.payload.u64); return RTE_VHOST_MSG_RESULT_ERR; } dev->mtu = ctx->msg.payload.u64; return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_set_req_fd(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; int fd = ctx->fds[0]; if (validate_msg_fds(dev, ctx, 1) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (fd < 0) { VHOST_LOG_CONFIG(ERR, "(%s) invalid file descriptor for slave channel (%d)\n", dev->ifname, fd); return RTE_VHOST_MSG_RESULT_ERR; } if (dev->slave_req_fd >= 0) close(dev->slave_req_fd); dev->slave_req_fd = fd; return RTE_VHOST_MSG_RESULT_OK; } static int is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg) { struct vhost_vring_addr *ra; uint64_t start, end, len; start = imsg->iova; end = start + imsg->size; ra = &vq->ring_addrs; len = sizeof(struct vring_desc) * vq->size; if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start) return 1; len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size; if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start) return 1; len = sizeof(struct vring_used) + sizeof(struct vring_used_elem) * vq->size; if (ra->used_user_addr < end && (ra->used_user_addr + len) > start) return 1; if (ra->flags & (1 << VHOST_VRING_F_LOG)) { len = sizeof(uint64_t); if (ra->log_guest_addr < end && (ra->log_guest_addr + len) > start) return 1; } return 0; } static int is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg) { struct vhost_vring_addr *ra; uint64_t start, end, len; start = imsg->iova; end = start + imsg->size; ra = &vq->ring_addrs; len = sizeof(struct vring_packed_desc) * vq->size; if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start) return 1; len = sizeof(struct vring_packed_desc_event); if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start) return 1; len = sizeof(struct vring_packed_desc_event); if (ra->used_user_addr < end && (ra->used_user_addr + len) > start) return 1; if (ra->flags & (1 << VHOST_VRING_F_LOG)) { len = sizeof(uint64_t); if (ra->log_guest_addr < end && (ra->log_guest_addr + len) > start) return 1; } return 0; } static int is_vring_iotlb(struct virtio_net *dev, struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg) { if (vq_is_packed(dev)) return is_vring_iotlb_packed(vq, imsg); else return is_vring_iotlb_split(vq, imsg); } static int vhost_user_iotlb_msg(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; struct vhost_iotlb_msg *imsg = &ctx->msg.payload.iotlb; uint16_t i; uint64_t vva, len; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; switch (imsg->type) { case VHOST_IOTLB_UPDATE: len = imsg->size; vva = qva_to_vva(dev, imsg->uaddr, &len); if (!vva) return RTE_VHOST_MSG_RESULT_ERR; for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; if (!vq) continue; vhost_user_iotlb_cache_insert(dev, vq, imsg->iova, vva, len, imsg->perm); if (is_vring_iotlb(dev, vq, imsg)) { rte_spinlock_lock(&vq->access_lock); *pdev = dev = translate_ring_addresses(dev, i); rte_spinlock_unlock(&vq->access_lock); } } break; case VHOST_IOTLB_INVALIDATE: for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; if (!vq) continue; vhost_user_iotlb_cache_remove(vq, imsg->iova, imsg->size); if (is_vring_iotlb(dev, vq, imsg)) { rte_spinlock_lock(&vq->access_lock); vring_invalidate(dev, vq); rte_spinlock_unlock(&vq->access_lock); } } break; default: VHOST_LOG_CONFIG(ERR, "(%s) invalid IOTLB message type (%d)\n", dev->ifname, imsg->type); return RTE_VHOST_MSG_RESULT_ERR; } return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_set_postcopy_advise(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; #ifdef RTE_LIBRTE_VHOST_POSTCOPY struct uffdio_api api_struct; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); if (dev->postcopy_ufd == -1) { VHOST_LOG_CONFIG(ERR, "(%s) userfaultfd not available: %s\n", dev->ifname, strerror(errno)); return RTE_VHOST_MSG_RESULT_ERR; } api_struct.api = UFFD_API; api_struct.features = 0; if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) { VHOST_LOG_CONFIG(ERR, "(%s) UFFDIO_API ioctl failure: %s\n", dev->ifname, strerror(errno)); close(dev->postcopy_ufd); dev->postcopy_ufd = -1; return RTE_VHOST_MSG_RESULT_ERR; } ctx->fds[0] = dev->postcopy_ufd; ctx->fd_num = 1; return RTE_VHOST_MSG_RESULT_REPLY; #else dev->postcopy_ufd = -1; ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_ERR; #endif } static int vhost_user_set_postcopy_listen(struct virtio_net **pdev, struct vhu_msg_context *ctx __rte_unused, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; if (dev->mem && dev->mem->nregions) { VHOST_LOG_CONFIG(ERR, "(%s) regions already registered at postcopy-listen\n", dev->ifname); return RTE_VHOST_MSG_RESULT_ERR; } dev->postcopy_listening = 1; return RTE_VHOST_MSG_RESULT_OK; } static int vhost_user_postcopy_end(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; dev->postcopy_listening = 0; if (dev->postcopy_ufd >= 0) { close(dev->postcopy_ufd); dev->postcopy_ufd = -1; } ctx->msg.payload.u64 = 0; ctx->msg.size = sizeof(ctx->msg.payload.u64); ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; } static int vhost_user_get_status(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; ctx->msg.payload.u64 = dev->status; ctx->msg.size = sizeof(ctx->msg.payload.u64); ctx->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; } static int vhost_user_set_status(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd __rte_unused) { struct virtio_net *dev = *pdev; if (validate_msg_fds(dev, ctx, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; /* As per Virtio specification, the device status is 8bits long */ if (ctx->msg.payload.u64 > UINT8_MAX) { VHOST_LOG_CONFIG(ERR, "(%s) invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n", dev->ifname, ctx->msg.payload.u64); return RTE_VHOST_MSG_RESULT_ERR; } dev->status = ctx->msg.payload.u64; if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) && (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) { VHOST_LOG_CONFIG(ERR, "(%s) FEATURES_OK bit is set but feature negotiation failed\n", dev->ifname); /* * Clear the bit to let the driver know about the feature * negotiation failure */ dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK; } VHOST_LOG_CONFIG(INFO, "(%s) new device status(0x%08x):\n", dev->ifname, dev->status); VHOST_LOG_CONFIG(INFO, "(%s)\t-RESET: %u\n", dev->ifname, (dev->status == VIRTIO_DEVICE_STATUS_RESET)); VHOST_LOG_CONFIG(INFO, "(%s)\t-ACKNOWLEDGE: %u\n", dev->ifname, !!(dev->status & VIRTIO_DEVICE_STATUS_ACK)); VHOST_LOG_CONFIG(INFO, "(%s)\t-DRIVER: %u\n", dev->ifname, !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER)); VHOST_LOG_CONFIG(INFO, "(%s)\t-FEATURES_OK: %u\n", dev->ifname, !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK)); VHOST_LOG_CONFIG(INFO, "(%s)\t-DRIVER_OK: %u\n", dev->ifname, !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)); VHOST_LOG_CONFIG(INFO, "(%s)\t-DEVICE_NEED_RESET: %u\n", dev->ifname, !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET)); VHOST_LOG_CONFIG(INFO, "(%s)\t-FAILED: %u\n", dev->ifname, !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED)); return RTE_VHOST_MSG_RESULT_OK; } typedef int (*vhost_message_handler_t)(struct virtio_net **pdev, struct vhu_msg_context *ctx, int main_fd); static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = { [VHOST_USER_NONE] = NULL, [VHOST_USER_GET_FEATURES] = vhost_user_get_features, [VHOST_USER_SET_FEATURES] = vhost_user_set_features, [VHOST_USER_SET_OWNER] = vhost_user_set_owner, [VHOST_USER_RESET_OWNER] = vhost_user_reset_owner, [VHOST_USER_SET_MEM_TABLE] = vhost_user_set_mem_table, [VHOST_USER_SET_LOG_BASE] = vhost_user_set_log_base, [VHOST_USER_SET_LOG_FD] = vhost_user_set_log_fd, [VHOST_USER_SET_VRING_NUM] = vhost_user_set_vring_num, [VHOST_USER_SET_VRING_ADDR] = vhost_user_set_vring_addr, [VHOST_USER_SET_VRING_BASE] = vhost_user_set_vring_base, [VHOST_USER_GET_VRING_BASE] = vhost_user_get_vring_base, [VHOST_USER_SET_VRING_KICK] = vhost_user_set_vring_kick, [VHOST_USER_SET_VRING_CALL] = vhost_user_set_vring_call, [VHOST_USER_SET_VRING_ERR] = vhost_user_set_vring_err, [VHOST_USER_GET_PROTOCOL_FEATURES] = vhost_user_get_protocol_features, [VHOST_USER_SET_PROTOCOL_FEATURES] = vhost_user_set_protocol_features, [VHOST_USER_GET_QUEUE_NUM] = vhost_user_get_queue_num, [VHOST_USER_SET_VRING_ENABLE] = vhost_user_set_vring_enable, [VHOST_USER_SEND_RARP] = vhost_user_send_rarp, [VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu, [VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd, [VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg, [VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise, [VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen, [VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end, [VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd, [VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd, [VHOST_USER_SET_STATUS] = vhost_user_set_status, [VHOST_USER_GET_STATUS] = vhost_user_get_status, }; /* return bytes# of read on success or negative val on failure. */ static int read_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx) { int ret; ret = read_fd_message(dev->ifname, sockfd, (char *)&ctx->msg, VHOST_USER_HDR_SIZE, ctx->fds, VHOST_MEMORY_MAX_NREGIONS, &ctx->fd_num); if (ret <= 0) { return ret; } else if (ret != VHOST_USER_HDR_SIZE) { VHOST_LOG_CONFIG(ERR, "(%s) Unexpected header size read\n", dev->ifname); close_msg_fds(ctx); return -1; } if (ctx->msg.size) { if (ctx->msg.size > sizeof(ctx->msg.payload)) { VHOST_LOG_CONFIG(ERR, "(%s) invalid msg size: %d\n", dev->ifname, ctx->msg.size); return -1; } ret = read(sockfd, &ctx->msg.payload, ctx->msg.size); if (ret <= 0) return ret; if (ret != (int)ctx->msg.size) { VHOST_LOG_CONFIG(ERR, "(%s) read control message failed\n", dev->ifname); return -1; } } return ret; } static int send_vhost_message(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx) { if (!ctx) return 0; return send_fd_message(dev->ifname, sockfd, (char *)&ctx->msg, VHOST_USER_HDR_SIZE + ctx->msg.size, ctx->fds, ctx->fd_num); } static int send_vhost_reply(struct virtio_net *dev, int sockfd, struct vhu_msg_context *ctx) { if (!ctx) return 0; ctx->msg.flags &= ~VHOST_USER_VERSION_MASK; ctx->msg.flags &= ~VHOST_USER_NEED_REPLY; ctx->msg.flags |= VHOST_USER_VERSION; ctx->msg.flags |= VHOST_USER_REPLY_MASK; return send_vhost_message(dev, sockfd, ctx); } static int send_vhost_slave_message(struct virtio_net *dev, struct vhu_msg_context *ctx) { int ret; if (ctx->msg.flags & VHOST_USER_NEED_REPLY) rte_spinlock_lock(&dev->slave_req_lock); ret = send_vhost_message(dev, dev->slave_req_fd, ctx); if (ret < 0 && (ctx->msg.flags & VHOST_USER_NEED_REPLY)) rte_spinlock_unlock(&dev->slave_req_lock); return ret; } /* * Allocate a queue pair if it hasn't been allocated yet */ static int vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, struct vhu_msg_context *ctx) { uint32_t vring_idx; switch (ctx->msg.request.master) { case VHOST_USER_SET_VRING_KICK: case VHOST_USER_SET_VRING_CALL: case VHOST_USER_SET_VRING_ERR: vring_idx = ctx->msg.payload.u64 & VHOST_USER_VRING_IDX_MASK; break; case VHOST_USER_SET_VRING_NUM: case VHOST_USER_SET_VRING_BASE: case VHOST_USER_GET_VRING_BASE: case VHOST_USER_SET_VRING_ENABLE: vring_idx = ctx->msg.payload.state.index; break; case VHOST_USER_SET_VRING_ADDR: vring_idx = ctx->msg.payload.addr.index; break; case VHOST_USER_SET_INFLIGHT_FD: vring_idx = ctx->msg.payload.inflight.num_queues - 1; break; default: return 0; } if (vring_idx >= VHOST_MAX_VRING) { VHOST_LOG_CONFIG(ERR, "(%s) invalid vring index: %u\n", dev->ifname, vring_idx); return -1; } if (dev->virtqueue[vring_idx]) return 0; return alloc_vring_queue(dev, vring_idx); } static void vhost_user_lock_all_queue_pairs(struct virtio_net *dev) { unsigned int i = 0; unsigned int vq_num = 0; while (vq_num < dev->nr_vring) { struct vhost_virtqueue *vq = dev->virtqueue[i]; if (vq) { rte_spinlock_lock(&vq->access_lock); vq_num++; } i++; } } static void vhost_user_unlock_all_queue_pairs(struct virtio_net *dev) { unsigned int i = 0; unsigned int vq_num = 0; while (vq_num < dev->nr_vring) { struct vhost_virtqueue *vq = dev->virtqueue[i]; if (vq) { rte_spinlock_unlock(&vq->access_lock); vq_num++; } i++; } } int vhost_user_msg_handler(int vid, int fd) { struct virtio_net *dev; struct vhu_msg_context ctx; struct rte_vdpa_device *vdpa_dev; int ret; int unlock_required = 0; bool handled; int request; uint32_t i; dev = get_device(vid); if (dev == NULL) return -1; if (!dev->notify_ops) { dev->notify_ops = vhost_driver_callback_get(dev->ifname); if (!dev->notify_ops) { VHOST_LOG_CONFIG(ERR, "(%s) failed to get callback ops for driver\n", dev->ifname); return -1; } } ret = read_vhost_message(dev, fd, &ctx); if (ret <= 0) { if (ret < 0) VHOST_LOG_CONFIG(ERR, "(%s) vhost read message failed\n", dev->ifname); else VHOST_LOG_CONFIG(INFO, "(%s) vhost peer closed\n", dev->ifname); return -1; } ret = 0; request = ctx.msg.request.master; if (request > VHOST_USER_NONE && request < VHOST_USER_MAX && vhost_message_str[request]) { if (request != VHOST_USER_IOTLB_MSG) VHOST_LOG_CONFIG(INFO, "(%s) read message %s\n", dev->ifname, vhost_message_str[request]); else VHOST_LOG_CONFIG(DEBUG, "(%s) read message %s\n", dev->ifname, vhost_message_str[request]); } else { VHOST_LOG_CONFIG(DEBUG, "(%s) external request %d\n", dev->ifname, request); } ret = vhost_user_check_and_alloc_queue_pair(dev, &ctx); if (ret < 0) { VHOST_LOG_CONFIG(ERR, "(%s) failed to alloc queue\n", dev->ifname); return -1; } /* * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops * and device is destroyed. destroy_device waits for queues to be * inactive, so it is safe. Otherwise taking the access_lock * would cause a dead lock. */ switch (request) { case VHOST_USER_SET_FEATURES: case VHOST_USER_SET_PROTOCOL_FEATURES: case VHOST_USER_SET_OWNER: case VHOST_USER_SET_MEM_TABLE: case VHOST_USER_SET_LOG_BASE: case VHOST_USER_SET_LOG_FD: case VHOST_USER_SET_VRING_NUM: case VHOST_USER_SET_VRING_ADDR: case VHOST_USER_SET_VRING_BASE: case VHOST_USER_SET_VRING_KICK: case VHOST_USER_SET_VRING_CALL: case VHOST_USER_SET_VRING_ERR: case VHOST_USER_SET_VRING_ENABLE: case VHOST_USER_SEND_RARP: case VHOST_USER_NET_SET_MTU: case VHOST_USER_SET_SLAVE_REQ_FD: if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { vhost_user_lock_all_queue_pairs(dev); unlock_required = 1; } break; default: break; } handled = false; if (dev->extern_ops.pre_msg_handle) { RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0); ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, &ctx); switch (ret) { case RTE_VHOST_MSG_RESULT_REPLY: send_vhost_reply(dev, fd, &ctx); /* Fall-through */ case RTE_VHOST_MSG_RESULT_ERR: case RTE_VHOST_MSG_RESULT_OK: handled = true; goto skip_to_post_handle; case RTE_VHOST_MSG_RESULT_NOT_HANDLED: default: break; } } if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) { if (!vhost_message_handlers[request]) goto skip_to_post_handle; ret = vhost_message_handlers[request](&dev, &ctx, fd); switch (ret) { case RTE_VHOST_MSG_RESULT_ERR: VHOST_LOG_CONFIG(ERR, "(%s) processing %s failed.\n", dev->ifname, vhost_message_str[request]); handled = true; break; case RTE_VHOST_MSG_RESULT_OK: VHOST_LOG_CONFIG(DEBUG, "(%s) processing %s succeeded.\n", dev->ifname, vhost_message_str[request]); handled = true; break; case RTE_VHOST_MSG_RESULT_REPLY: VHOST_LOG_CONFIG(DEBUG, "(%s) processing %s succeeded and needs reply.\n", dev->ifname, vhost_message_str[request]); send_vhost_reply(dev, fd, &ctx); handled = true; break; default: break; } } skip_to_post_handle: if (ret != RTE_VHOST_MSG_RESULT_ERR && dev->extern_ops.post_msg_handle) { RTE_BUILD_BUG_ON(offsetof(struct vhu_msg_context, msg) != 0); ret = (*dev->extern_ops.post_msg_handle)(dev->vid, &ctx); switch (ret) { case RTE_VHOST_MSG_RESULT_REPLY: send_vhost_reply(dev, fd, &ctx); /* Fall-through */ case RTE_VHOST_MSG_RESULT_ERR: case RTE_VHOST_MSG_RESULT_OK: handled = true; case RTE_VHOST_MSG_RESULT_NOT_HANDLED: default: break; } } /* If message was not handled at this stage, treat it as an error */ if (!handled) { VHOST_LOG_CONFIG(ERR, "(%s) vhost message (req: %d) was not handled.\n", dev->ifname, request); close_msg_fds(&ctx); ret = RTE_VHOST_MSG_RESULT_ERR; } /* * If the request required a reply that was already sent, * this optional reply-ack won't be sent as the * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply(). */ if (ctx.msg.flags & VHOST_USER_NEED_REPLY) { ctx.msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR; ctx.msg.size = sizeof(ctx.msg.payload.u64); ctx.fd_num = 0; send_vhost_reply(dev, fd, &ctx); } else if (ret == RTE_VHOST_MSG_RESULT_ERR) { VHOST_LOG_CONFIG(ERR, "(%s) vhost message handling failed.\n", dev->ifname); return -1; } for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; bool cur_ready = vq_is_ready(dev, vq); if (cur_ready != (vq && vq->ready)) { vq->ready = cur_ready; vhost_user_notify_queue_state(dev, i, cur_ready); } } if (unlock_required) vhost_user_unlock_all_queue_pairs(dev); if (!virtio_is_ready(dev)) goto out; /* * Virtio is now ready. If not done already, it is time * to notify the application it can process the rings and * configure the vDPA device if present. */ if (!(dev->flags & VIRTIO_DEV_RUNNING)) { if (dev->notify_ops->new_device(dev->vid) == 0) dev->flags |= VIRTIO_DEV_RUNNING; } vdpa_dev = dev->vdpa_dev; if (!vdpa_dev) goto out; if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { if (vdpa_dev->ops->dev_conf(dev->vid)) VHOST_LOG_CONFIG(ERR, "(%s) failed to configure vDPA device\n", dev->ifname); else dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED; } out: return 0; } static int process_slave_message_reply(struct virtio_net *dev, const struct vhu_msg_context *ctx) { struct vhu_msg_context msg_reply; int ret; if ((ctx->msg.flags & VHOST_USER_NEED_REPLY) == 0) return 0; ret = read_vhost_message(dev, dev->slave_req_fd, &msg_reply); if (ret <= 0) { if (ret < 0) VHOST_LOG_CONFIG(ERR, "(%s) vhost read slave message reply failed\n", dev->ifname); else VHOST_LOG_CONFIG(INFO, "(%s) vhost peer closed\n", dev->ifname); ret = -1; goto out; } ret = 0; if (msg_reply.msg.request.slave != ctx->msg.request.slave) { VHOST_LOG_CONFIG(ERR, "(%s) received unexpected msg type (%u), expected %u\n", dev->ifname, msg_reply.msg.request.slave, ctx->msg.request.slave); ret = -1; goto out; } ret = msg_reply.msg.payload.u64 ? -1 : 0; out: rte_spinlock_unlock(&dev->slave_req_lock); return ret; } int vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm) { int ret; struct vhu_msg_context ctx = { .msg = { .request.slave = VHOST_USER_SLAVE_IOTLB_MSG, .flags = VHOST_USER_VERSION, .size = sizeof(ctx.msg.payload.iotlb), .payload.iotlb = { .iova = iova, .perm = perm, .type = VHOST_IOTLB_MISS, }, }, }; ret = send_vhost_message(dev, dev->slave_req_fd, &ctx); if (ret < 0) { VHOST_LOG_CONFIG(ERR, "(%s) failed to send IOTLB miss message (%d)\n", dev->ifname, ret); return ret; } return 0; } static int vhost_user_slave_config_change(struct virtio_net *dev, bool need_reply) { int ret; struct vhu_msg_context ctx = { .msg = { .request.slave = VHOST_USER_SLAVE_CONFIG_CHANGE_MSG, .flags = VHOST_USER_VERSION, .size = 0, } }; if (need_reply) ctx.msg.flags |= VHOST_USER_NEED_REPLY; ret = send_vhost_slave_message(dev, &ctx); if (ret < 0) { VHOST_LOG_CONFIG(ERR, "(%s) failed to send config change (%d)\n", dev->ifname, ret); return ret; } return process_slave_message_reply(dev, &ctx); } int rte_vhost_slave_config_change(int vid, bool need_reply) { struct virtio_net *dev; dev = get_device(vid); if (!dev) return -ENODEV; return vhost_user_slave_config_change(dev, need_reply); } static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev, int index, int fd, uint64_t offset, uint64_t size) { int ret; struct vhu_msg_context ctx = { .msg = { .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG, .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY, .size = sizeof(ctx.msg.payload.area), .payload.area = { .u64 = index & VHOST_USER_VRING_IDX_MASK, .size = size, .offset = offset, }, }, }; if (fd < 0) ctx.msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK; else { ctx.fds[0] = fd; ctx.fd_num = 1; } ret = send_vhost_slave_message(dev, &ctx); if (ret < 0) { VHOST_LOG_CONFIG(ERR, "(%s) failed to set host notifier (%d)\n", dev->ifname, ret); return ret; } return process_slave_message_reply(dev, &ctx); } int rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable) { struct virtio_net *dev; struct rte_vdpa_device *vdpa_dev; int vfio_device_fd, ret = 0; uint64_t offset, size; unsigned int i, q_start, q_last; dev = get_device(vid); if (!dev) return -ENODEV; vdpa_dev = dev->vdpa_dev; if (vdpa_dev == NULL) return -ENODEV; if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) || !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) || !(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) || !(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) || !(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER))) return -ENOTSUP; if (qid == RTE_VHOST_QUEUE_ALL) { q_start = 0; q_last = dev->nr_vring - 1; } else { if (qid >= dev->nr_vring) return -EINVAL; q_start = qid; q_last = qid; } RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP); RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP); vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid); if (vfio_device_fd < 0) return -ENOTSUP; if (enable) { for (i = q_start; i <= q_last; i++) { if (vdpa_dev->ops->get_notify_area(vid, i, &offset, &size) < 0) { ret = -ENOTSUP; goto disable; } if (vhost_user_slave_set_vring_host_notifier(dev, i, vfio_device_fd, offset, size) < 0) { ret = -EFAULT; goto disable; } } } else { disable: for (i = q_start; i <= q_last; i++) { vhost_user_slave_set_vring_host_notifier(dev, i, -1, 0, 0); } } return ret; }
null
287
CWE-787
CVE-2021-38593
/**************************************************************************** ** ** Copyright (C) 2016 The Qt Company Ltd. ** Contact: https://www.qt.io/licensing/ ** ** This file is part of the QtGui module of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL$ ** Commercial License Usage ** Licensees holding valid commercial Qt licenses may use this file in ** accordance with the commercial license agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and The Qt Company. For licensing terms ** and conditions see https://www.qt.io/terms-conditions. For further ** information use the contact form at https://www.qt.io/contact-us. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 3 as published by the Free Software ** Foundation and appearing in the file LICENSE.LGPL3 included in the ** packaging of this file. Please review the following information to ** ensure the GNU Lesser General Public License version 3 requirements ** will be met: https://www.gnu.org/licenses/lgpl-3.0.html. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License version 2.0 or (at your option) the GNU General ** Public license version 3 or any later version approved by the KDE Free ** Qt Foundation. The licenses are as published by the Free Software ** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3 ** included in the packaging of this file. Please review the following ** information to ensure the GNU General Public License requirements will ** be met: https://www.gnu.org/licenses/gpl-2.0.html and ** https://www.gnu.org/licenses/gpl-3.0.html. ** ** $QT_END_LICENSE$ ** ****************************************************************************/ #include "qpaintengineex_p.h" #include "qpainter_p.h" #include "qstroker_p.h" #include "qbezier_p.h" #include <private/qpainterpath_p.h> #include <private/qfontengine_p.h> #include <private/qstatictext_p.h> #include <qvarlengtharray.h> #include <qdebug.h> QT_BEGIN_NAMESPACE #if !defined(QT_MAX_CACHED_GLYPH_SIZE) # define QT_MAX_CACHED_GLYPH_SIZE 64 #endif /******************************************************************************* * * class QVectorPath * */ QVectorPath::~QVectorPath() { if (m_hints & ShouldUseCacheHint) { CacheEntry *e = m_cache; while (e) { if (e->data) e->cleanup(e->engine, e->data); CacheEntry *n = e->next; delete e; e = n; } } } QRectF QVectorPath::controlPointRect() const { if (m_hints & ControlPointRect) return QRectF(QPointF(m_cp_rect.x1, m_cp_rect.y1), QPointF(m_cp_rect.x2, m_cp_rect.y2)); if (m_count == 0) { m_cp_rect.x1 = m_cp_rect.x2 = m_cp_rect.y1 = m_cp_rect.y2 = 0; m_hints |= ControlPointRect; return QRectF(QPointF(m_cp_rect.x1, m_cp_rect.y1), QPointF(m_cp_rect.x2, m_cp_rect.y2)); } Q_ASSERT(m_points && m_count > 0); const qreal *pts = m_points; m_cp_rect.x1 = m_cp_rect.x2 = *pts; ++pts; m_cp_rect.y1 = m_cp_rect.y2 = *pts; ++pts; const qreal *epts = m_points + (m_count << 1); while (pts < epts) { qreal x = *pts; if (x < m_cp_rect.x1) m_cp_rect.x1 = x; else if (x > m_cp_rect.x2) m_cp_rect.x2 = x; ++pts; qreal y = *pts; if (y < m_cp_rect.y1) m_cp_rect.y1 = y; else if (y > m_cp_rect.y2) m_cp_rect.y2 = y; ++pts; } m_hints |= ControlPointRect; return QRectF(QPointF(m_cp_rect.x1, m_cp_rect.y1), QPointF(m_cp_rect.x2, m_cp_rect.y2)); } QVectorPath::CacheEntry *QVectorPath::addCacheData(QPaintEngineEx *engine, void *data, qvectorpath_cache_cleanup cleanup) const{ Q_ASSERT(!lookupCacheData(engine)); if ((m_hints & IsCachedHint) == 0) { m_cache = nullptr; m_hints |= IsCachedHint; } CacheEntry *e = new CacheEntry; e->engine = engine; e->data = data; e->cleanup = cleanup; e->next = m_cache; m_cache = e; return m_cache; } const QVectorPath &qtVectorPathForPath(const QPainterPath &path) { Q_ASSERT(path.d_func()); return path.d_func()->vectorPath(); } #ifndef QT_NO_DEBUG_STREAM QDebug Q_GUI_EXPORT &operator<<(QDebug &s, const QVectorPath &path) { QDebugStateSaver saver(s); QRectF rf = path.controlPointRect(); s << "QVectorPath(size:" << path.elementCount() << " hints:" << Qt::hex << path.hints() << rf << ')'; return s; } #endif /******************************************************************************* * * class QPaintEngineExPrivate: * */ struct StrokeHandler { StrokeHandler(int reserve) : pts(reserve), types(reserve) {} QDataBuffer<qreal> pts; QDataBuffer<QPainterPath::ElementType> types; }; QPaintEngineExPrivate::QPaintEngineExPrivate() : dasher(&stroker), strokeHandler(nullptr), activeStroker(nullptr), strokerPen(Qt::NoPen) { } QPaintEngineExPrivate::~QPaintEngineExPrivate() { delete strokeHandler; } void QPaintEngineExPrivate::replayClipOperations() { Q_Q(QPaintEngineEx); QPainter *p = q->painter(); if (!p || !p->d_ptr) return; const QList<QPainterClipInfo> &clipInfo = p->d_ptr->state->clipInfo; QTransform transform = q->state()->matrix; for (const QPainterClipInfo &info : clipInfo) { if (info.matrix != q->state()->matrix) { q->state()->matrix = info.matrix; q->transformChanged(); } switch (info.clipType) { case QPainterClipInfo::RegionClip: q->clip(info.region, info.operation); break; case QPainterClipInfo::PathClip: q->clip(info.path, info.operation); break; case QPainterClipInfo::RectClip: q->clip(info.rect, info.operation); break; case QPainterClipInfo::RectFClip: { qreal right = info.rectf.x() + info.rectf.width(); qreal bottom = info.rectf.y() + info.rectf.height(); qreal pts[] = { info.rectf.x(), info.rectf.y(), right, info.rectf.y(), right, bottom, info.rectf.x(), bottom }; QVectorPath vp(pts, 4, nullptr, QVectorPath::RectangleHint); q->clip(vp, info.operation); break; } } } if (transform != q->state()->matrix) { q->state()->matrix = transform; q->transformChanged(); } } bool QPaintEngineExPrivate::hasClipOperations() const { Q_Q(const QPaintEngineEx); QPainter *p = q->painter(); if (!p || !p->d_ptr) return false; return !p->d_ptr->state->clipInfo.isEmpty(); } /******************************************************************************* * * class QPaintEngineEx: * */ static const QPainterPath::ElementType qpaintengineex_ellipse_types[] = { QPainterPath::MoveToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement }; static const QPainterPath::ElementType qpaintengineex_line_types_16[] = { QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement }; static const QPainterPath::ElementType qpaintengineex_rect4_types_32[] = { QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 1 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 2 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 3 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 4 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 5 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 6 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 7 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 8 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 9 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 10 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 11 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 12 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 13 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 14 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 15 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 16 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 17 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 18 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 19 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 20 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 21 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 22 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 23 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 24 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 25 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 26 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 27 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 28 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 29 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 30 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 31 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 32 }; static const QPainterPath::ElementType qpaintengineex_roundedrect_types[] = { QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement }; static void qpaintengineex_moveTo(qreal x, qreal y, void *data) { ((StrokeHandler *) data)->pts.add(x); ((StrokeHandler *) data)->pts.add(y); ((StrokeHandler *) data)->types.add(QPainterPath::MoveToElement); } static void qpaintengineex_lineTo(qreal x, qreal y, void *data) { ((StrokeHandler *) data)->pts.add(x); ((StrokeHandler *) data)->pts.add(y); ((StrokeHandler *) data)->types.add(QPainterPath::LineToElement); } static void qpaintengineex_cubicTo(qreal c1x, qreal c1y, qreal c2x, qreal c2y, qreal ex, qreal ey, void *data) { ((StrokeHandler *) data)->pts.add(c1x); ((StrokeHandler *) data)->pts.add(c1y); ((StrokeHandler *) data)->types.add(QPainterPath::CurveToElement); ((StrokeHandler *) data)->pts.add(c2x); ((StrokeHandler *) data)->pts.add(c2y); ((StrokeHandler *) data)->types.add(QPainterPath::CurveToDataElement); ((StrokeHandler *) data)->pts.add(ex); ((StrokeHandler *) data)->pts.add(ey); ((StrokeHandler *) data)->types.add(QPainterPath::CurveToDataElement); } QPaintEngineEx::QPaintEngineEx() : QPaintEngine(*new QPaintEngineExPrivate, AllFeatures) { extended = true; } QPaintEngineEx::QPaintEngineEx(QPaintEngineExPrivate &data) : QPaintEngine(data, AllFeatures) { extended = true; } QPainterState *QPaintEngineEx::createState(QPainterState *orig) const { if (!orig) return new QPainterState; return new QPainterState(orig); } Q_GUI_EXPORT extern bool qt_scaleForTransform(const QTransform &transform, qreal *scale); // qtransform.cpp void QPaintEngineEx::stroke(const QVectorPath &path, const QPen &inPen) { #ifdef QT_DEBUG_DRAW qDebug() << "QPaintEngineEx::stroke()" << pen; #endif Q_D(QPaintEngineEx); if (path.isEmpty()) return; if (!d->strokeHandler) { d->strokeHandler = new StrokeHandler(path.elementCount()+4); d->stroker.setMoveToHook(qpaintengineex_moveTo); d->stroker.setLineToHook(qpaintengineex_lineTo); d->stroker.setCubicToHook(qpaintengineex_cubicTo); } QRectF clipRect; QPen pen = inPen; if (pen.style() > Qt::SolidLine) { QRectF cpRect = path.controlPointRect(); const QTransform &xf = state()->matrix; if (pen.isCosmetic()) { clipRect = d->exDeviceRect; cpRect.translate(xf.dx(), xf.dy()); } else { clipRect = xf.inverted().mapRect(QRectF(d->exDeviceRect)); } // Check to avoid generating unwieldy amount of dashes that will not be visible anyway QRectF extentRect = cpRect & clipRect; qreal extent = qMax(extentRect.width(), extentRect.height()); qreal patternLength = 0; const QList<qreal> pattern = pen.dashPattern(); const int patternSize = qMin(pattern.size(), 32); for (int i = 0; i < patternSize; i++) patternLength += qMax(pattern.at(i), qreal(0)); if (pen.widthF()) patternLength *= pen.widthF(); if (qFuzzyIsNull(patternLength)) { pen.setStyle(Qt::NoPen); } else if (extent / patternLength > 10000) { // approximate stream of tiny dashes with semi-transparent solid line pen.setStyle(Qt::SolidLine); QColor color(pen.color()); color.setAlpha(color.alpha() / 2); pen.setColor(color); } } if (!qpen_fast_equals(pen, d->strokerPen)) { d->strokerPen = pen; d->stroker.setJoinStyle(pen.joinStyle()); d->stroker.setCapStyle(pen.capStyle()); d->stroker.setMiterLimit(pen.miterLimit()); qreal penWidth = pen.widthF(); if (penWidth == 0) d->stroker.setStrokeWidth(1); else d->stroker.setStrokeWidth(penWidth); Qt::PenStyle style = pen.style(); if (style == Qt::SolidLine) { d->activeStroker = &d->stroker; } else if (style == Qt::NoPen) { d->activeStroker = nullptr; } else { d->dasher.setDashPattern(pen.dashPattern()); d->dasher.setDashOffset(pen.dashOffset()); d->activeStroker = &d->dasher; } } if (!d->activeStroker) { return; } if (!clipRect.isNull()) d->activeStroker->setClipRect(clipRect); if (d->activeStroker == &d->stroker) d->stroker.setForceOpen(path.hasExplicitOpen()); const QPainterPath::ElementType *types = path.elements(); const qreal *points = path.points(); int pointCount = path.elementCount(); const qreal *lastPoint = points + (pointCount<<1); d->strokeHandler->types.reset(); d->strokeHandler->pts.reset(); // Some engines might decide to optimize for the non-shape hint later on... uint flags = QVectorPath::WindingFill; if (path.elementCount() > 2) flags |= QVectorPath::NonConvexShapeMask; if (d->stroker.capStyle() == Qt::RoundCap || d->stroker.joinStyle() == Qt::RoundJoin) flags |= QVectorPath::CurvedShapeMask; // ### Perspective Xforms are currently not supported... if (!pen.isCosmetic()) { // We include cosmetic pens in this case to avoid having to // change the current transform. Normal transformed, // non-cosmetic pens will be transformed as part of fill // later, so they are also covered here.. d->activeStroker->setCurveThresholdFromTransform(state()->matrix); d->activeStroker->begin(d->strokeHandler); if (types) { while (points < lastPoint) { switch (*types) { case QPainterPath::MoveToElement: d->activeStroker->moveTo(points[0], points[1]); points += 2; ++types; break; case QPainterPath::LineToElement: d->activeStroker->lineTo(points[0], points[1]); points += 2; ++types; break; case QPainterPath::CurveToElement: d->activeStroker->cubicTo(points[0], points[1], points[2], points[3], points[4], points[5]); points += 6; types += 3; flags |= QVectorPath::CurvedShapeMask; break; default: break; } } if (path.hasImplicitClose()) d->activeStroker->lineTo(path.points()[0], path.points()[1]); } else { d->activeStroker->moveTo(points[0], points[1]); points += 2; while (points < lastPoint) { d->activeStroker->lineTo(points[0], points[1]); points += 2; } if (path.hasImplicitClose()) d->activeStroker->lineTo(path.points()[0], path.points()[1]); } d->activeStroker->end(); if (!d->strokeHandler->types.size()) // an empty path... return; QVectorPath strokePath(d->strokeHandler->pts.data(), d->strokeHandler->types.size(), d->strokeHandler->types.data(), flags); fill(strokePath, pen.brush()); } else { // For cosmetic pens we need a bit of trickery... We to process xform the input points if (state()->matrix.type() >= QTransform::TxProject) { QPainterPath painterPath = state()->matrix.map(path.convertToPainterPath()); d->activeStroker->strokePath(painterPath, d->strokeHandler, QTransform()); } else { d->activeStroker->setCurveThresholdFromTransform(QTransform()); d->activeStroker->begin(d->strokeHandler); if (types) { while (points < lastPoint) { switch (*types) { case QPainterPath::MoveToElement: { QPointF pt = (*(const QPointF *) points) * state()->matrix; d->activeStroker->moveTo(pt.x(), pt.y()); points += 2; ++types; break; } case QPainterPath::LineToElement: { QPointF pt = (*(const QPointF *) points) * state()->matrix; d->activeStroker->lineTo(pt.x(), pt.y()); points += 2; ++types; break; } case QPainterPath::CurveToElement: { QPointF c1 = ((const QPointF *) points)[0] * state()->matrix; QPointF c2 = ((const QPointF *) points)[1] * state()->matrix; QPointF e = ((const QPointF *) points)[2] * state()->matrix; d->activeStroker->cubicTo(c1.x(), c1.y(), c2.x(), c2.y(), e.x(), e.y()); points += 6; types += 3; flags |= QVectorPath::CurvedShapeMask; break; } default: break; } } if (path.hasImplicitClose()) { QPointF pt = * ((const QPointF *) path.points()) * state()->matrix; d->activeStroker->lineTo(pt.x(), pt.y()); } } else { QPointF p = ((const QPointF *)points)[0] * state()->matrix; d->activeStroker->moveTo(p.x(), p.y()); points += 2; while (points < lastPoint) { QPointF p = ((const QPointF *)points)[0] * state()->matrix; d->activeStroker->lineTo(p.x(), p.y()); points += 2; } if (path.hasImplicitClose()) d->activeStroker->lineTo(p.x(), p.y()); } d->activeStroker->end(); } QVectorPath strokePath(d->strokeHandler->pts.data(), d->strokeHandler->types.size(), d->strokeHandler->types.data(), flags); QTransform xform = state()->matrix; state()->matrix = QTransform(); transformChanged(); QBrush brush = pen.brush(); if (qbrush_style(brush) != Qt::SolidPattern) brush.setTransform(brush.transform() * xform); fill(strokePath, brush); state()->matrix = xform; transformChanged(); } } void QPaintEngineEx::draw(const QVectorPath &path) { const QBrush &brush = state()->brush; if (qbrush_style(brush) != Qt::NoBrush) fill(path, brush); const QPen &pen = state()->pen; if (qpen_style(pen) != Qt::NoPen && qbrush_style(qpen_brush(pen)) != Qt::NoBrush) stroke(path, pen); } void QPaintEngineEx::clip(const QRect &r, Qt::ClipOperation op) { qreal right = r.x() + r.width(); qreal bottom = r.y() + r.height(); qreal pts[] = { qreal(r.x()), qreal(r.y()), right, qreal(r.y()), right, bottom, qreal(r.x()), bottom, qreal(r.x()), qreal(r.y()) }; QVectorPath vp(pts, 5, nullptr, QVectorPath::RectangleHint); clip(vp, op); } void QPaintEngineEx::clip(const QRegion &region, Qt::ClipOperation op) { const auto rectsInRegion = region.rectCount(); if (rectsInRegion == 1) { clip(*region.begin(), op); } else if (rectsInRegion <= 32) { qreal pts[2*32*4]; int pos = 0; for (QRect r : region) { qreal x1 = r.x(); qreal y1 = r.y(); qreal x2 = r.x() + r.width(); qreal y2 = r.y() + r.height(); pts[pos++] = x1; pts[pos++] = y1; pts[pos++] = x2; pts[pos++] = y1; pts[pos++] = x2; pts[pos++] = y2; pts[pos++] = x1; pts[pos++] = y2; } QVectorPath vp(pts, rectsInRegion * 4, qpaintengineex_rect4_types_32); clip(vp, op); } else { QVarLengthArray<qreal> pts(rectsInRegion * 2 * 4); QVarLengthArray<QPainterPath::ElementType> types(rectsInRegion * 4); int ppos = 0; int tpos = 0; for (QRect r : region) { qreal x1 = r.x(); qreal y1 = r.y(); qreal x2 = r.x() + r.width(); qreal y2 = r.y() + r.height(); pts[ppos++] = x1; pts[ppos++] = y1; pts[ppos++] = x2; pts[ppos++] = y1; pts[ppos++] = x2; pts[ppos++] = y2; pts[ppos++] = x1; pts[ppos++] = y2; types[tpos++] = QPainterPath::MoveToElement; types[tpos++] = QPainterPath::LineToElement; types[tpos++] = QPainterPath::LineToElement; types[tpos++] = QPainterPath::LineToElement; } QVectorPath vp(pts.data(), rectsInRegion * 4, types.data()); clip(vp, op); } } void QPaintEngineEx::clip(const QPainterPath &path, Qt::ClipOperation op) { if (path.isEmpty()) { QVectorPath vp(nullptr, 0); clip(vp, op); } else { clip(qtVectorPathForPath(path), op); } } void QPaintEngineEx::fillRect(const QRectF &r, const QBrush &brush) { qreal pts[] = { r.x(), r.y(), r.x() + r.width(), r.y(), r.x() + r.width(), r.y() + r.height(), r.x(), r.y() + r.height() }; QVectorPath vp(pts, 4, nullptr, QVectorPath::RectangleHint); fill(vp, brush); } void QPaintEngineEx::fillRect(const QRectF &r, const QColor &color) { fillRect(r, QBrush(color)); } void QPaintEngineEx::drawRects(const QRect *rects, int rectCount) { for (int i=0; i<rectCount; ++i) { const QRect &r = rects[i]; // ### Is there a one off here? qreal right = r.x() + r.width(); qreal bottom = r.y() + r.height(); qreal pts[] = { qreal(r.x()), qreal(r.y()), right, qreal(r.y()), right, bottom, qreal(r.x()), bottom, qreal(r.x()), qreal(r.y()) }; QVectorPath vp(pts, 5, nullptr, QVectorPath::RectangleHint); draw(vp); } } void QPaintEngineEx::drawRects(const QRectF *rects, int rectCount) { for (int i=0; i<rectCount; ++i) { const QRectF &r = rects[i]; qreal right = r.x() + r.width(); qreal bottom = r.y() + r.height(); qreal pts[] = { r.x(), r.y(), right, r.y(), right, bottom, r.x(), bottom, r.x(), r.y() }; QVectorPath vp(pts, 5, nullptr, QVectorPath::RectangleHint); draw(vp); } } void QPaintEngineEx::drawRoundedRect(const QRectF &rect, qreal xRadius, qreal yRadius, Qt::SizeMode mode) { qreal x1 = rect.left(); qreal x2 = rect.right(); qreal y1 = rect.top(); qreal y2 = rect.bottom(); if (mode == Qt::RelativeSize) { xRadius = xRadius * rect.width() / 200.; yRadius = yRadius * rect.height() / 200.; } xRadius = qMin(xRadius, rect.width() / 2); yRadius = qMin(yRadius, rect.height() / 2); qreal pts[] = { x1 + xRadius, y1, // MoveTo x2 - xRadius, y1, // LineTo x2 - (1 - KAPPA) * xRadius, y1, // CurveTo x2, y1 + (1 - KAPPA) * yRadius, x2, y1 + yRadius, x2, y2 - yRadius, // LineTo x2, y2 - (1 - KAPPA) * yRadius, // CurveTo x2 - (1 - KAPPA) * xRadius, y2, x2 - xRadius, y2, x1 + xRadius, y2, // LineTo x1 + (1 - KAPPA) * xRadius, y2, // CurveTo x1, y2 - (1 - KAPPA) * yRadius, x1, y2 - yRadius, x1, y1 + yRadius, // LineTo x1, y1 + (1 - KAPPA) * yRadius, // CurveTo x1 + (1 - KAPPA) * xRadius, y1, x1 + xRadius, y1 }; QVectorPath path(pts, 17, qpaintengineex_roundedrect_types, QVectorPath::RoundedRectHint); draw(path); } void QPaintEngineEx::drawLines(const QLine *lines, int lineCount) { int elementCount = lineCount << 1; while (elementCount > 0) { int count = qMin(elementCount, 32); qreal pts[64]; int count2 = count<<1; for (int i=0; i<count2; ++i) pts[i] = ((const int *) lines)[i]; QVectorPath path(pts, count, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, state()->pen); elementCount -= 32; lines += 16; } } void QPaintEngineEx::drawLines(const QLineF *lines, int lineCount) { int elementCount = lineCount << 1; while (elementCount > 0) { int count = qMin(elementCount, 32); QVectorPath path((const qreal *) lines, count, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, state()->pen); elementCount -= 32; lines += 16; } } void QPaintEngineEx::drawEllipse(const QRectF &r) { qreal pts[26]; // QPointF[13] without constructors... union { qreal *ptr; QPointF *points; } x; x.ptr = pts; int point_count = 0; x.points[0] = qt_curves_for_arc(r, 0, -360, x.points + 1, &point_count); if (point_count == 0) return; QVectorPath vp((qreal *) pts, point_count + 1, qpaintengineex_ellipse_types, QVectorPath::EllipseHint); draw(vp); } void QPaintEngineEx::drawEllipse(const QRect &r) { drawEllipse(QRectF(r)); } void QPaintEngineEx::drawPath(const QPainterPath &path) { if (!path.isEmpty()) draw(qtVectorPathForPath(path)); } void QPaintEngineEx::drawPoints(const QPointF *points, int pointCount) { QPen pen = state()->pen; if (pen.capStyle() == Qt::FlatCap) pen.setCapStyle(Qt::SquareCap); if (pen.brush().isOpaque()) { while (pointCount > 0) { int count = qMin(pointCount, 16); qreal pts[64]; int oset = -1; for (int i=0; i<count; ++i) { pts[++oset] = points[i].x(); pts[++oset] = points[i].y(); pts[++oset] = points[i].x() + 1/63.; pts[++oset] = points[i].y(); } QVectorPath path(pts, count * 2, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, pen); pointCount -= 16; points += 16; } } else { for (int i=0; i<pointCount; ++i) { qreal pts[] = { points[i].x(), points[i].y(), points[i].x() + qreal(1/63.), points[i].y() }; QVectorPath path(pts, 2, nullptr); stroke(path, pen); } } } void QPaintEngineEx::drawPoints(const QPoint *points, int pointCount) { QPen pen = state()->pen; if (pen.capStyle() == Qt::FlatCap) pen.setCapStyle(Qt::SquareCap); if (pen.brush().isOpaque()) { while (pointCount > 0) { int count = qMin(pointCount, 16); qreal pts[64]; int oset = -1; for (int i=0; i<count; ++i) { pts[++oset] = points[i].x(); pts[++oset] = points[i].y(); pts[++oset] = points[i].x() + 1/63.; pts[++oset] = points[i].y(); } QVectorPath path(pts, count * 2, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, pen); pointCount -= 16; points += 16; } } else { for (int i=0; i<pointCount; ++i) { qreal pts[] = { qreal(points[i].x()), qreal(points[i].y()), qreal(points[i].x() +1/63.), qreal(points[i].y()) }; QVectorPath path(pts, 2, nullptr); stroke(path, pen); } } } void QPaintEngineEx::drawPolygon(const QPointF *points, int pointCount, PolygonDrawMode mode) { Q_ASSUME(pointCount >= 2); QVectorPath path((const qreal *) points, pointCount, nullptr, QVectorPath::polygonFlags(mode)); if (mode == PolylineMode) stroke(path, state()->pen); else draw(path); } void QPaintEngineEx::drawPolygon(const QPoint *points, int pointCount, PolygonDrawMode mode) { Q_ASSUME(pointCount >= 2); int count = pointCount<<1; QVarLengthArray<qreal> pts(count); for (int i=0; i<count; ++i) pts[i] = ((const int *) points)[i]; QVectorPath path(pts.data(), pointCount, nullptr, QVectorPath::polygonFlags(mode)); if (mode == PolylineMode) stroke(path, state()->pen); else draw(path); } void QPaintEngineEx::drawPixmap(const QPointF &pos, const QPixmap &pm) { drawPixmap(QRectF(pos, pm.size() / pm.devicePixelRatio()), pm, pm.rect()); } void QPaintEngineEx::drawImage(const QPointF &pos, const QImage &image) { drawImage(QRectF(pos, image.size() / image.devicePixelRatio()), image, image.rect()); } void QPaintEngineEx::drawTiledPixmap(const QRectF &r, const QPixmap &pixmap, const QPointF &s) { QBrush brush(state()->pen.color(), pixmap); QTransform xform = QTransform::fromTranslate(r.x() - s.x(), r.y() - s.y()); if (!qFuzzyCompare(pixmap.devicePixelRatio(), qreal(1.0))) xform.scale(1.0/pixmap.devicePixelRatio(), 1.0/pixmap.devicePixelRatio()); brush.setTransform(xform); qreal pts[] = { r.x(), r.y(), r.x() + r.width(), r.y(), r.x() + r.width(), r.y() + r.height(), r.x(), r.y() + r.height() }; QVectorPath path(pts, 4, nullptr, QVectorPath::RectangleHint); fill(path, brush); } void QPaintEngineEx::drawPixmapFragments(const QPainter::PixmapFragment *fragments, int fragmentCount, const QPixmap &pixmap, QPainter::PixmapFragmentHints /*hints*/) { if (pixmap.isNull()) return; qreal oldOpacity = state()->opacity; QTransform oldTransform = state()->matrix; for (int i = 0; i < fragmentCount; ++i) { QTransform transform = oldTransform; transform.translate(fragments[i].x, fragments[i].y); transform.rotate(fragments[i].rotation); state()->opacity = oldOpacity * fragments[i].opacity; state()->matrix = transform; opacityChanged(); transformChanged(); qreal w = fragments[i].scaleX * fragments[i].width; qreal h = fragments[i].scaleY * fragments[i].height; QRectF sourceRect(fragments[i].sourceLeft, fragments[i].sourceTop, fragments[i].width, fragments[i].height); drawPixmap(QRectF(-0.5 * w, -0.5 * h, w, h), pixmap, sourceRect); } state()->opacity = oldOpacity; state()->matrix = oldTransform; opacityChanged(); transformChanged(); } void QPaintEngineEx::setState(QPainterState *s) { QPaintEngine::state = s; } void QPaintEngineEx::updateState(const QPaintEngineState &) { // do nothing... } Q_GUI_EXPORT QPainterPath qt_painterPathFromVectorPath(const QVectorPath &path) { const qreal *points = path.points(); const QPainterPath::ElementType *types = path.elements(); QPainterPath p; if (types) { int id = 0; for (int i=0; i<path.elementCount(); ++i) { switch(types[i]) { case QPainterPath::MoveToElement: p.moveTo(QPointF(points[id], points[id+1])); id+=2; break; case QPainterPath::LineToElement: p.lineTo(QPointF(points[id], points[id+1])); id+=2; break; case QPainterPath::CurveToElement: { QPointF p1(points[id], points[id+1]); QPointF p2(points[id+2], points[id+3]); QPointF p3(points[id+4], points[id+5]); p.cubicTo(p1, p2, p3); id+=6; break; } case QPainterPath::CurveToDataElement: ; break; } } } else { p.moveTo(QPointF(points[0], points[1])); int id = 2; for (int i=1; i<path.elementCount(); ++i) { p.lineTo(QPointF(points[id], points[id+1])); id+=2; } } if (path.hints() & QVectorPath::WindingFill) p.setFillRule(Qt::WindingFill); return p; } void QPaintEngineEx::drawStaticTextItem(QStaticTextItem *staticTextItem) { QPainterPath path; path.setFillRule(Qt::WindingFill); if (staticTextItem->numGlyphs == 0) return; QFontEngine *fontEngine = staticTextItem->fontEngine(); fontEngine->addGlyphsToPath(staticTextItem->glyphs, staticTextItem->glyphPositions, staticTextItem->numGlyphs, &path, { }); if (!path.isEmpty()) { QPainterState *s = state(); QPainter::RenderHints oldHints = s->renderHints; bool changedHints = false; if (bool(oldHints & QPainter::TextAntialiasing) && !bool(fontEngine->fontDef.styleStrategy & QFont::NoAntialias) && !bool(oldHints & QPainter::Antialiasing)) { s->renderHints |= QPainter::Antialiasing; renderHintsChanged(); changedHints = true; } fill(qtVectorPathForPath(path), s->pen.brush()); if (changedHints) { s->renderHints = oldHints; renderHintsChanged(); } } } bool QPaintEngineEx::requiresPretransformedGlyphPositions(QFontEngine *, const QTransform &) const { return false; } bool QPaintEngineEx::shouldDrawCachedGlyphs(QFontEngine *fontEngine, const QTransform &m) const { if (fontEngine->glyphFormat == QFontEngine::Format_ARGB) return true; static const int maxCachedGlyphSizeSquared = std::pow([]{ if (int env = qEnvironmentVariableIntValue("QT_MAX_CACHED_GLYPH_SIZE")) return env; return QT_MAX_CACHED_GLYPH_SIZE; }(), 2); qreal pixelSize = fontEngine->fontDef.pixelSize; return (pixelSize * pixelSize * qAbs(m.determinant())) <= maxCachedGlyphSizeSquared; } QT_END_NAMESPACE
null
/**************************************************************************** ** ** Copyright (C) 2016 The Qt Company Ltd. ** Contact: https://www.qt.io/licensing/ ** ** This file is part of the QtGui module of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL$ ** Commercial License Usage ** Licensees holding valid commercial Qt licenses may use this file in ** accordance with the commercial license agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and The Qt Company. For licensing terms ** and conditions see https://www.qt.io/terms-conditions. For further ** information use the contact form at https://www.qt.io/contact-us. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 3 as published by the Free Software ** Foundation and appearing in the file LICENSE.LGPL3 included in the ** packaging of this file. Please review the following information to ** ensure the GNU Lesser General Public License version 3 requirements ** will be met: https://www.gnu.org/licenses/lgpl-3.0.html. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License version 2.0 or (at your option) the GNU General ** Public license version 3 or any later version approved by the KDE Free ** Qt Foundation. The licenses are as published by the Free Software ** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3 ** included in the packaging of this file. Please review the following ** information to ensure the GNU General Public License requirements will ** be met: https://www.gnu.org/licenses/gpl-2.0.html and ** https://www.gnu.org/licenses/gpl-3.0.html. ** ** $QT_END_LICENSE$ ** ****************************************************************************/ #include "qpaintengineex_p.h" #include "qpainter_p.h" #include "qstroker_p.h" #include "qbezier_p.h" #include <private/qpainterpath_p.h> #include <private/qfontengine_p.h> #include <private/qstatictext_p.h> #include <qvarlengtharray.h> #include <qdebug.h> QT_BEGIN_NAMESPACE #if !defined(QT_MAX_CACHED_GLYPH_SIZE) # define QT_MAX_CACHED_GLYPH_SIZE 64 #endif /******************************************************************************* * * class QVectorPath * */ QVectorPath::~QVectorPath() { if (m_hints & ShouldUseCacheHint) { CacheEntry *e = m_cache; while (e) { if (e->data) e->cleanup(e->engine, e->data); CacheEntry *n = e->next; delete e; e = n; } } } QRectF QVectorPath::controlPointRect() const { if (m_hints & ControlPointRect) return QRectF(QPointF(m_cp_rect.x1, m_cp_rect.y1), QPointF(m_cp_rect.x2, m_cp_rect.y2)); if (m_count == 0) { m_cp_rect.x1 = m_cp_rect.x2 = m_cp_rect.y1 = m_cp_rect.y2 = 0; m_hints |= ControlPointRect; return QRectF(QPointF(m_cp_rect.x1, m_cp_rect.y1), QPointF(m_cp_rect.x2, m_cp_rect.y2)); } Q_ASSERT(m_points && m_count > 0); const qreal *pts = m_points; m_cp_rect.x1 = m_cp_rect.x2 = *pts; ++pts; m_cp_rect.y1 = m_cp_rect.y2 = *pts; ++pts; const qreal *epts = m_points + (m_count << 1); while (pts < epts) { qreal x = *pts; if (x < m_cp_rect.x1) m_cp_rect.x1 = x; else if (x > m_cp_rect.x2) m_cp_rect.x2 = x; ++pts; qreal y = *pts; if (y < m_cp_rect.y1) m_cp_rect.y1 = y; else if (y > m_cp_rect.y2) m_cp_rect.y2 = y; ++pts; } m_hints |= ControlPointRect; return QRectF(QPointF(m_cp_rect.x1, m_cp_rect.y1), QPointF(m_cp_rect.x2, m_cp_rect.y2)); } QVectorPath::CacheEntry *QVectorPath::addCacheData(QPaintEngineEx *engine, void *data, qvectorpath_cache_cleanup cleanup) const{ Q_ASSERT(!lookupCacheData(engine)); if ((m_hints & IsCachedHint) == 0) { m_cache = nullptr; m_hints |= IsCachedHint; } CacheEntry *e = new CacheEntry; e->engine = engine; e->data = data; e->cleanup = cleanup; e->next = m_cache; m_cache = e; return m_cache; } const QVectorPath &qtVectorPathForPath(const QPainterPath &path) { Q_ASSERT(path.d_func()); return path.d_func()->vectorPath(); } #ifndef QT_NO_DEBUG_STREAM QDebug Q_GUI_EXPORT &operator<<(QDebug &s, const QVectorPath &path) { QDebugStateSaver saver(s); QRectF rf = path.controlPointRect(); s << "QVectorPath(size:" << path.elementCount() << " hints:" << Qt::hex << path.hints() << rf << ')'; return s; } #endif /******************************************************************************* * * class QPaintEngineExPrivate: * */ struct StrokeHandler { StrokeHandler(int reserve) : pts(reserve), types(reserve) {} QDataBuffer<qreal> pts; QDataBuffer<QPainterPath::ElementType> types; }; QPaintEngineExPrivate::QPaintEngineExPrivate() : dasher(&stroker), strokeHandler(nullptr), activeStroker(nullptr), strokerPen(Qt::NoPen) { } QPaintEngineExPrivate::~QPaintEngineExPrivate() { delete strokeHandler; } void QPaintEngineExPrivate::replayClipOperations() { Q_Q(QPaintEngineEx); QPainter *p = q->painter(); if (!p || !p->d_ptr) return; const QList<QPainterClipInfo> &clipInfo = p->d_ptr->state->clipInfo; QTransform transform = q->state()->matrix; for (const QPainterClipInfo &info : clipInfo) { if (info.matrix != q->state()->matrix) { q->state()->matrix = info.matrix; q->transformChanged(); } switch (info.clipType) { case QPainterClipInfo::RegionClip: q->clip(info.region, info.operation); break; case QPainterClipInfo::PathClip: q->clip(info.path, info.operation); break; case QPainterClipInfo::RectClip: q->clip(info.rect, info.operation); break; case QPainterClipInfo::RectFClip: { qreal right = info.rectf.x() + info.rectf.width(); qreal bottom = info.rectf.y() + info.rectf.height(); qreal pts[] = { info.rectf.x(), info.rectf.y(), right, info.rectf.y(), right, bottom, info.rectf.x(), bottom }; QVectorPath vp(pts, 4, nullptr, QVectorPath::RectangleHint); q->clip(vp, info.operation); break; } } } if (transform != q->state()->matrix) { q->state()->matrix = transform; q->transformChanged(); } } bool QPaintEngineExPrivate::hasClipOperations() const { Q_Q(const QPaintEngineEx); QPainter *p = q->painter(); if (!p || !p->d_ptr) return false; return !p->d_ptr->state->clipInfo.isEmpty(); } /******************************************************************************* * * class QPaintEngineEx: * */ static const QPainterPath::ElementType qpaintengineex_ellipse_types[] = { QPainterPath::MoveToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement }; static const QPainterPath::ElementType qpaintengineex_line_types_16[] = { QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement }; static const QPainterPath::ElementType qpaintengineex_rect4_types_32[] = { QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 1 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 2 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 3 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 4 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 5 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 6 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 7 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 8 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 9 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 10 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 11 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 12 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 13 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 14 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 15 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 16 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 17 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 18 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 19 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 20 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 21 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 22 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 23 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 24 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 25 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 26 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 27 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 28 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 29 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 30 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 31 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 32 }; static const QPainterPath::ElementType qpaintengineex_roundedrect_types[] = { QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement }; static void qpaintengineex_moveTo(qreal x, qreal y, void *data) { ((StrokeHandler *) data)->pts.add(x); ((StrokeHandler *) data)->pts.add(y); ((StrokeHandler *) data)->types.add(QPainterPath::MoveToElement); } static void qpaintengineex_lineTo(qreal x, qreal y, void *data) { ((StrokeHandler *) data)->pts.add(x); ((StrokeHandler *) data)->pts.add(y); ((StrokeHandler *) data)->types.add(QPainterPath::LineToElement); } static void qpaintengineex_cubicTo(qreal c1x, qreal c1y, qreal c2x, qreal c2y, qreal ex, qreal ey, void *data) { ((StrokeHandler *) data)->pts.add(c1x); ((StrokeHandler *) data)->pts.add(c1y); ((StrokeHandler *) data)->types.add(QPainterPath::CurveToElement); ((StrokeHandler *) data)->pts.add(c2x); ((StrokeHandler *) data)->pts.add(c2y); ((StrokeHandler *) data)->types.add(QPainterPath::CurveToDataElement); ((StrokeHandler *) data)->pts.add(ex); ((StrokeHandler *) data)->pts.add(ey); ((StrokeHandler *) data)->types.add(QPainterPath::CurveToDataElement); } QPaintEngineEx::QPaintEngineEx() : QPaintEngine(*new QPaintEngineExPrivate, AllFeatures) { extended = true; } QPaintEngineEx::QPaintEngineEx(QPaintEngineExPrivate &data) : QPaintEngine(data, AllFeatures) { extended = true; } QPainterState *QPaintEngineEx::createState(QPainterState *orig) const { if (!orig) return new QPainterState; return new QPainterState(orig); } Q_GUI_EXPORT extern bool qt_scaleForTransform(const QTransform &transform, qreal *scale); // qtransform.cpp void QPaintEngineEx::stroke(const QVectorPath &path, const QPen &inPen) { #ifdef QT_DEBUG_DRAW qDebug() << "QPaintEngineEx::stroke()" << pen; #endif Q_D(QPaintEngineEx); if (path.isEmpty()) return; if (!d->strokeHandler) { d->strokeHandler = new StrokeHandler(path.elementCount()+4); d->stroker.setMoveToHook(qpaintengineex_moveTo); d->stroker.setLineToHook(qpaintengineex_lineTo); d->stroker.setCubicToHook(qpaintengineex_cubicTo); } QRectF clipRect; QPen pen = inPen; if (pen.style() > Qt::SolidLine) { QRectF cpRect = path.controlPointRect(); const QTransform &xf = state()->matrix; if (pen.isCosmetic()) { clipRect = d->exDeviceRect; cpRect.translate(xf.dx(), xf.dy()); } else { clipRect = xf.inverted().mapRect(QRectF(d->exDeviceRect)); } // Check to avoid generating unwieldy amount of dashes that will not be visible anyway QRectF extentRect = cpRect & clipRect; qreal extent = qMax(extentRect.width(), extentRect.height()); qreal patternLength = 0; const QList<qreal> pattern = pen.dashPattern(); const int patternSize = qMin(pattern.size(), 32); for (int i = 0; i < patternSize; i++) patternLength += qMax(pattern.at(i), qreal(0)); if (pen.widthF()) patternLength *= pen.widthF(); if (qFuzzyIsNull(patternLength)) { pen.setStyle(Qt::NoPen); } else if (qFuzzyIsNull(extent) || extent / patternLength > 10000) { // approximate stream of tiny dashes with semi-transparent solid line pen.setStyle(Qt::SolidLine); QColor color(pen.color()); color.setAlpha(color.alpha() / 2); pen.setColor(color); } } if (!qpen_fast_equals(pen, d->strokerPen)) { d->strokerPen = pen; d->stroker.setJoinStyle(pen.joinStyle()); d->stroker.setCapStyle(pen.capStyle()); d->stroker.setMiterLimit(pen.miterLimit()); qreal penWidth = pen.widthF(); if (penWidth == 0) d->stroker.setStrokeWidth(1); else d->stroker.setStrokeWidth(penWidth); Qt::PenStyle style = pen.style(); if (style == Qt::SolidLine) { d->activeStroker = &d->stroker; } else if (style == Qt::NoPen) { d->activeStroker = nullptr; } else { d->dasher.setDashPattern(pen.dashPattern()); d->dasher.setDashOffset(pen.dashOffset()); d->activeStroker = &d->dasher; } } if (!d->activeStroker) { return; } if (!clipRect.isNull()) d->activeStroker->setClipRect(clipRect); if (d->activeStroker == &d->stroker) d->stroker.setForceOpen(path.hasExplicitOpen()); const QPainterPath::ElementType *types = path.elements(); const qreal *points = path.points(); int pointCount = path.elementCount(); const qreal *lastPoint = points + (pointCount<<1); d->strokeHandler->types.reset(); d->strokeHandler->pts.reset(); // Some engines might decide to optimize for the non-shape hint later on... uint flags = QVectorPath::WindingFill; if (path.elementCount() > 2) flags |= QVectorPath::NonConvexShapeMask; if (d->stroker.capStyle() == Qt::RoundCap || d->stroker.joinStyle() == Qt::RoundJoin) flags |= QVectorPath::CurvedShapeMask; // ### Perspective Xforms are currently not supported... if (!pen.isCosmetic()) { // We include cosmetic pens in this case to avoid having to // change the current transform. Normal transformed, // non-cosmetic pens will be transformed as part of fill // later, so they are also covered here.. d->activeStroker->setCurveThresholdFromTransform(state()->matrix); d->activeStroker->begin(d->strokeHandler); if (types) { while (points < lastPoint) { switch (*types) { case QPainterPath::MoveToElement: d->activeStroker->moveTo(points[0], points[1]); points += 2; ++types; break; case QPainterPath::LineToElement: d->activeStroker->lineTo(points[0], points[1]); points += 2; ++types; break; case QPainterPath::CurveToElement: d->activeStroker->cubicTo(points[0], points[1], points[2], points[3], points[4], points[5]); points += 6; types += 3; flags |= QVectorPath::CurvedShapeMask; break; default: break; } } if (path.hasImplicitClose()) d->activeStroker->lineTo(path.points()[0], path.points()[1]); } else { d->activeStroker->moveTo(points[0], points[1]); points += 2; while (points < lastPoint) { d->activeStroker->lineTo(points[0], points[1]); points += 2; } if (path.hasImplicitClose()) d->activeStroker->lineTo(path.points()[0], path.points()[1]); } d->activeStroker->end(); if (!d->strokeHandler->types.size()) // an empty path... return; QVectorPath strokePath(d->strokeHandler->pts.data(), d->strokeHandler->types.size(), d->strokeHandler->types.data(), flags); fill(strokePath, pen.brush()); } else { // For cosmetic pens we need a bit of trickery... We to process xform the input points if (state()->matrix.type() >= QTransform::TxProject) { QPainterPath painterPath = state()->matrix.map(path.convertToPainterPath()); d->activeStroker->strokePath(painterPath, d->strokeHandler, QTransform()); } else { d->activeStroker->setCurveThresholdFromTransform(QTransform()); d->activeStroker->begin(d->strokeHandler); if (types) { while (points < lastPoint) { switch (*types) { case QPainterPath::MoveToElement: { QPointF pt = (*(const QPointF *) points) * state()->matrix; d->activeStroker->moveTo(pt.x(), pt.y()); points += 2; ++types; break; } case QPainterPath::LineToElement: { QPointF pt = (*(const QPointF *) points) * state()->matrix; d->activeStroker->lineTo(pt.x(), pt.y()); points += 2; ++types; break; } case QPainterPath::CurveToElement: { QPointF c1 = ((const QPointF *) points)[0] * state()->matrix; QPointF c2 = ((const QPointF *) points)[1] * state()->matrix; QPointF e = ((const QPointF *) points)[2] * state()->matrix; d->activeStroker->cubicTo(c1.x(), c1.y(), c2.x(), c2.y(), e.x(), e.y()); points += 6; types += 3; flags |= QVectorPath::CurvedShapeMask; break; } default: break; } } if (path.hasImplicitClose()) { QPointF pt = * ((const QPointF *) path.points()) * state()->matrix; d->activeStroker->lineTo(pt.x(), pt.y()); } } else { QPointF p = ((const QPointF *)points)[0] * state()->matrix; d->activeStroker->moveTo(p.x(), p.y()); points += 2; while (points < lastPoint) { QPointF p = ((const QPointF *)points)[0] * state()->matrix; d->activeStroker->lineTo(p.x(), p.y()); points += 2; } if (path.hasImplicitClose()) d->activeStroker->lineTo(p.x(), p.y()); } d->activeStroker->end(); } QVectorPath strokePath(d->strokeHandler->pts.data(), d->strokeHandler->types.size(), d->strokeHandler->types.data(), flags); QTransform xform = state()->matrix; state()->matrix = QTransform(); transformChanged(); QBrush brush = pen.brush(); if (qbrush_style(brush) != Qt::SolidPattern) brush.setTransform(brush.transform() * xform); fill(strokePath, brush); state()->matrix = xform; transformChanged(); } } void QPaintEngineEx::draw(const QVectorPath &path) { const QBrush &brush = state()->brush; if (qbrush_style(brush) != Qt::NoBrush) fill(path, brush); const QPen &pen = state()->pen; if (qpen_style(pen) != Qt::NoPen && qbrush_style(qpen_brush(pen)) != Qt::NoBrush) stroke(path, pen); } void QPaintEngineEx::clip(const QRect &r, Qt::ClipOperation op) { qreal right = r.x() + r.width(); qreal bottom = r.y() + r.height(); qreal pts[] = { qreal(r.x()), qreal(r.y()), right, qreal(r.y()), right, bottom, qreal(r.x()), bottom, qreal(r.x()), qreal(r.y()) }; QVectorPath vp(pts, 5, nullptr, QVectorPath::RectangleHint); clip(vp, op); } void QPaintEngineEx::clip(const QRegion &region, Qt::ClipOperation op) { const auto rectsInRegion = region.rectCount(); if (rectsInRegion == 1) { clip(*region.begin(), op); } else if (rectsInRegion <= 32) { qreal pts[2*32*4]; int pos = 0; for (QRect r : region) { qreal x1 = r.x(); qreal y1 = r.y(); qreal x2 = r.x() + r.width(); qreal y2 = r.y() + r.height(); pts[pos++] = x1; pts[pos++] = y1; pts[pos++] = x2; pts[pos++] = y1; pts[pos++] = x2; pts[pos++] = y2; pts[pos++] = x1; pts[pos++] = y2; } QVectorPath vp(pts, rectsInRegion * 4, qpaintengineex_rect4_types_32); clip(vp, op); } else { QVarLengthArray<qreal> pts(rectsInRegion * 2 * 4); QVarLengthArray<QPainterPath::ElementType> types(rectsInRegion * 4); int ppos = 0; int tpos = 0; for (QRect r : region) { qreal x1 = r.x(); qreal y1 = r.y(); qreal x2 = r.x() + r.width(); qreal y2 = r.y() + r.height(); pts[ppos++] = x1; pts[ppos++] = y1; pts[ppos++] = x2; pts[ppos++] = y1; pts[ppos++] = x2; pts[ppos++] = y2; pts[ppos++] = x1; pts[ppos++] = y2; types[tpos++] = QPainterPath::MoveToElement; types[tpos++] = QPainterPath::LineToElement; types[tpos++] = QPainterPath::LineToElement; types[tpos++] = QPainterPath::LineToElement; } QVectorPath vp(pts.data(), rectsInRegion * 4, types.data()); clip(vp, op); } } void QPaintEngineEx::clip(const QPainterPath &path, Qt::ClipOperation op) { if (path.isEmpty()) { QVectorPath vp(nullptr, 0); clip(vp, op); } else { clip(qtVectorPathForPath(path), op); } } void QPaintEngineEx::fillRect(const QRectF &r, const QBrush &brush) { qreal pts[] = { r.x(), r.y(), r.x() + r.width(), r.y(), r.x() + r.width(), r.y() + r.height(), r.x(), r.y() + r.height() }; QVectorPath vp(pts, 4, nullptr, QVectorPath::RectangleHint); fill(vp, brush); } void QPaintEngineEx::fillRect(const QRectF &r, const QColor &color) { fillRect(r, QBrush(color)); } void QPaintEngineEx::drawRects(const QRect *rects, int rectCount) { for (int i=0; i<rectCount; ++i) { const QRect &r = rects[i]; // ### Is there a one off here? qreal right = r.x() + r.width(); qreal bottom = r.y() + r.height(); qreal pts[] = { qreal(r.x()), qreal(r.y()), right, qreal(r.y()), right, bottom, qreal(r.x()), bottom, qreal(r.x()), qreal(r.y()) }; QVectorPath vp(pts, 5, nullptr, QVectorPath::RectangleHint); draw(vp); } } void QPaintEngineEx::drawRects(const QRectF *rects, int rectCount) { for (int i=0; i<rectCount; ++i) { const QRectF &r = rects[i]; qreal right = r.x() + r.width(); qreal bottom = r.y() + r.height(); qreal pts[] = { r.x(), r.y(), right, r.y(), right, bottom, r.x(), bottom, r.x(), r.y() }; QVectorPath vp(pts, 5, nullptr, QVectorPath::RectangleHint); draw(vp); } } void QPaintEngineEx::drawRoundedRect(const QRectF &rect, qreal xRadius, qreal yRadius, Qt::SizeMode mode) { qreal x1 = rect.left(); qreal x2 = rect.right(); qreal y1 = rect.top(); qreal y2 = rect.bottom(); if (mode == Qt::RelativeSize) { xRadius = xRadius * rect.width() / 200.; yRadius = yRadius * rect.height() / 200.; } xRadius = qMin(xRadius, rect.width() / 2); yRadius = qMin(yRadius, rect.height() / 2); qreal pts[] = { x1 + xRadius, y1, // MoveTo x2 - xRadius, y1, // LineTo x2 - (1 - KAPPA) * xRadius, y1, // CurveTo x2, y1 + (1 - KAPPA) * yRadius, x2, y1 + yRadius, x2, y2 - yRadius, // LineTo x2, y2 - (1 - KAPPA) * yRadius, // CurveTo x2 - (1 - KAPPA) * xRadius, y2, x2 - xRadius, y2, x1 + xRadius, y2, // LineTo x1 + (1 - KAPPA) * xRadius, y2, // CurveTo x1, y2 - (1 - KAPPA) * yRadius, x1, y2 - yRadius, x1, y1 + yRadius, // LineTo x1, y1 + (1 - KAPPA) * yRadius, // CurveTo x1 + (1 - KAPPA) * xRadius, y1, x1 + xRadius, y1 }; QVectorPath path(pts, 17, qpaintengineex_roundedrect_types, QVectorPath::RoundedRectHint); draw(path); } void QPaintEngineEx::drawLines(const QLine *lines, int lineCount) { int elementCount = lineCount << 1; while (elementCount > 0) { int count = qMin(elementCount, 32); qreal pts[64]; int count2 = count<<1; for (int i=0; i<count2; ++i) pts[i] = ((const int *) lines)[i]; QVectorPath path(pts, count, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, state()->pen); elementCount -= 32; lines += 16; } } void QPaintEngineEx::drawLines(const QLineF *lines, int lineCount) { int elementCount = lineCount << 1; while (elementCount > 0) { int count = qMin(elementCount, 32); QVectorPath path((const qreal *) lines, count, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, state()->pen); elementCount -= 32; lines += 16; } } void QPaintEngineEx::drawEllipse(const QRectF &r) { qreal pts[26]; // QPointF[13] without constructors... union { qreal *ptr; QPointF *points; } x; x.ptr = pts; int point_count = 0; x.points[0] = qt_curves_for_arc(r, 0, -360, x.points + 1, &point_count); if (point_count == 0) return; QVectorPath vp((qreal *) pts, point_count + 1, qpaintengineex_ellipse_types, QVectorPath::EllipseHint); draw(vp); } void QPaintEngineEx::drawEllipse(const QRect &r) { drawEllipse(QRectF(r)); } void QPaintEngineEx::drawPath(const QPainterPath &path) { if (!path.isEmpty()) draw(qtVectorPathForPath(path)); } void QPaintEngineEx::drawPoints(const QPointF *points, int pointCount) { QPen pen = state()->pen; if (pen.capStyle() == Qt::FlatCap) pen.setCapStyle(Qt::SquareCap); if (pen.brush().isOpaque()) { while (pointCount > 0) { int count = qMin(pointCount, 16); qreal pts[64]; int oset = -1; for (int i=0; i<count; ++i) { pts[++oset] = points[i].x(); pts[++oset] = points[i].y(); pts[++oset] = points[i].x() + 1/63.; pts[++oset] = points[i].y(); } QVectorPath path(pts, count * 2, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, pen); pointCount -= 16; points += 16; } } else { for (int i=0; i<pointCount; ++i) { qreal pts[] = { points[i].x(), points[i].y(), points[i].x() + qreal(1/63.), points[i].y() }; QVectorPath path(pts, 2, nullptr); stroke(path, pen); } } } void QPaintEngineEx::drawPoints(const QPoint *points, int pointCount) { QPen pen = state()->pen; if (pen.capStyle() == Qt::FlatCap) pen.setCapStyle(Qt::SquareCap); if (pen.brush().isOpaque()) { while (pointCount > 0) { int count = qMin(pointCount, 16); qreal pts[64]; int oset = -1; for (int i=0; i<count; ++i) { pts[++oset] = points[i].x(); pts[++oset] = points[i].y(); pts[++oset] = points[i].x() + 1/63.; pts[++oset] = points[i].y(); } QVectorPath path(pts, count * 2, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, pen); pointCount -= 16; points += 16; } } else { for (int i=0; i<pointCount; ++i) { qreal pts[] = { qreal(points[i].x()), qreal(points[i].y()), qreal(points[i].x() +1/63.), qreal(points[i].y()) }; QVectorPath path(pts, 2, nullptr); stroke(path, pen); } } } void QPaintEngineEx::drawPolygon(const QPointF *points, int pointCount, PolygonDrawMode mode) { Q_ASSUME(pointCount >= 2); QVectorPath path((const qreal *) points, pointCount, nullptr, QVectorPath::polygonFlags(mode)); if (mode == PolylineMode) stroke(path, state()->pen); else draw(path); } void QPaintEngineEx::drawPolygon(const QPoint *points, int pointCount, PolygonDrawMode mode) { Q_ASSUME(pointCount >= 2); int count = pointCount<<1; QVarLengthArray<qreal> pts(count); for (int i=0; i<count; ++i) pts[i] = ((const int *) points)[i]; QVectorPath path(pts.data(), pointCount, nullptr, QVectorPath::polygonFlags(mode)); if (mode == PolylineMode) stroke(path, state()->pen); else draw(path); } void QPaintEngineEx::drawPixmap(const QPointF &pos, const QPixmap &pm) { drawPixmap(QRectF(pos, pm.size() / pm.devicePixelRatio()), pm, pm.rect()); } void QPaintEngineEx::drawImage(const QPointF &pos, const QImage &image) { drawImage(QRectF(pos, image.size() / image.devicePixelRatio()), image, image.rect()); } void QPaintEngineEx::drawTiledPixmap(const QRectF &r, const QPixmap &pixmap, const QPointF &s) { QBrush brush(state()->pen.color(), pixmap); QTransform xform = QTransform::fromTranslate(r.x() - s.x(), r.y() - s.y()); if (!qFuzzyCompare(pixmap.devicePixelRatio(), qreal(1.0))) xform.scale(1.0/pixmap.devicePixelRatio(), 1.0/pixmap.devicePixelRatio()); brush.setTransform(xform); qreal pts[] = { r.x(), r.y(), r.x() + r.width(), r.y(), r.x() + r.width(), r.y() + r.height(), r.x(), r.y() + r.height() }; QVectorPath path(pts, 4, nullptr, QVectorPath::RectangleHint); fill(path, brush); } void QPaintEngineEx::drawPixmapFragments(const QPainter::PixmapFragment *fragments, int fragmentCount, const QPixmap &pixmap, QPainter::PixmapFragmentHints /*hints*/) { if (pixmap.isNull()) return; qreal oldOpacity = state()->opacity; QTransform oldTransform = state()->matrix; for (int i = 0; i < fragmentCount; ++i) { QTransform transform = oldTransform; transform.translate(fragments[i].x, fragments[i].y); transform.rotate(fragments[i].rotation); state()->opacity = oldOpacity * fragments[i].opacity; state()->matrix = transform; opacityChanged(); transformChanged(); qreal w = fragments[i].scaleX * fragments[i].width; qreal h = fragments[i].scaleY * fragments[i].height; QRectF sourceRect(fragments[i].sourceLeft, fragments[i].sourceTop, fragments[i].width, fragments[i].height); drawPixmap(QRectF(-0.5 * w, -0.5 * h, w, h), pixmap, sourceRect); } state()->opacity = oldOpacity; state()->matrix = oldTransform; opacityChanged(); transformChanged(); } void QPaintEngineEx::setState(QPainterState *s) { QPaintEngine::state = s; } void QPaintEngineEx::updateState(const QPaintEngineState &) { // do nothing... } Q_GUI_EXPORT QPainterPath qt_painterPathFromVectorPath(const QVectorPath &path) { const qreal *points = path.points(); const QPainterPath::ElementType *types = path.elements(); QPainterPath p; if (types) { int id = 0; for (int i=0; i<path.elementCount(); ++i) { switch(types[i]) { case QPainterPath::MoveToElement: p.moveTo(QPointF(points[id], points[id+1])); id+=2; break; case QPainterPath::LineToElement: p.lineTo(QPointF(points[id], points[id+1])); id+=2; break; case QPainterPath::CurveToElement: { QPointF p1(points[id], points[id+1]); QPointF p2(points[id+2], points[id+3]); QPointF p3(points[id+4], points[id+5]); p.cubicTo(p1, p2, p3); id+=6; break; } case QPainterPath::CurveToDataElement: ; break; } } } else { p.moveTo(QPointF(points[0], points[1])); int id = 2; for (int i=1; i<path.elementCount(); ++i) { p.lineTo(QPointF(points[id], points[id+1])); id+=2; } } if (path.hints() & QVectorPath::WindingFill) p.setFillRule(Qt::WindingFill); return p; } void QPaintEngineEx::drawStaticTextItem(QStaticTextItem *staticTextItem) { QPainterPath path; path.setFillRule(Qt::WindingFill); if (staticTextItem->numGlyphs == 0) return; QFontEngine *fontEngine = staticTextItem->fontEngine(); fontEngine->addGlyphsToPath(staticTextItem->glyphs, staticTextItem->glyphPositions, staticTextItem->numGlyphs, &path, { }); if (!path.isEmpty()) { QPainterState *s = state(); QPainter::RenderHints oldHints = s->renderHints; bool changedHints = false; if (bool(oldHints & QPainter::TextAntialiasing) && !bool(fontEngine->fontDef.styleStrategy & QFont::NoAntialias) && !bool(oldHints & QPainter::Antialiasing)) { s->renderHints |= QPainter::Antialiasing; renderHintsChanged(); changedHints = true; } fill(qtVectorPathForPath(path), s->pen.brush()); if (changedHints) { s->renderHints = oldHints; renderHintsChanged(); } } } bool QPaintEngineEx::requiresPretransformedGlyphPositions(QFontEngine *, const QTransform &) const { return false; } bool QPaintEngineEx::shouldDrawCachedGlyphs(QFontEngine *fontEngine, const QTransform &m) const { if (fontEngine->glyphFormat == QFontEngine::Format_ARGB) return true; static const int maxCachedGlyphSizeSquared = std::pow([]{ if (int env = qEnvironmentVariableIntValue("QT_MAX_CACHED_GLYPH_SIZE")) return env; return QT_MAX_CACHED_GLYPH_SIZE; }(), 2); qreal pixelSize = fontEngine->fontDef.pixelSize; return (pixelSize * pixelSize * qAbs(m.determinant())) <= maxCachedGlyphSizeSquared; } QT_END_NAMESPACE
null
288
CWE-787
CVE-2021-39218
/**************************************************************************** ** ** Copyright (C) 2016 The Qt Company Ltd. ** Contact: https://www.qt.io/licensing/ ** ** This file is part of the QtGui module of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL$ ** Commercial License Usage ** Licensees holding valid commercial Qt licenses may use this file in ** accordance with the commercial license agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and The Qt Company. For licensing terms ** and conditions see https://www.qt.io/terms-conditions. For further ** information use the contact form at https://www.qt.io/contact-us. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 3 as published by the Free Software ** Foundation and appearing in the file LICENSE.LGPL3 included in the ** packaging of this file. Please review the following information to ** ensure the GNU Lesser General Public License version 3 requirements ** will be met: https://www.gnu.org/licenses/lgpl-3.0.html. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License version 2.0 or (at your option) the GNU General ** Public license version 3 or any later version approved by the KDE Free ** Qt Foundation. The licenses are as published by the Free Software ** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3 ** included in the packaging of this file. Please review the following ** information to ensure the GNU General Public License requirements will ** be met: https://www.gnu.org/licenses/gpl-2.0.html and ** https://www.gnu.org/licenses/gpl-3.0.html. ** ** $QT_END_LICENSE$ ** ****************************************************************************/ #include "qpaintengineex_p.h" #include "qpainter_p.h" #include "qstroker_p.h" #include "qbezier_p.h" #include <private/qpainterpath_p.h> #include <private/qfontengine_p.h> #include <private/qstatictext_p.h> #include <qvarlengtharray.h> #include <qdebug.h> QT_BEGIN_NAMESPACE #if !defined(QT_MAX_CACHED_GLYPH_SIZE) # define QT_MAX_CACHED_GLYPH_SIZE 64 #endif /******************************************************************************* * * class QVectorPath * */ QVectorPath::~QVectorPath() { if (m_hints & ShouldUseCacheHint) { CacheEntry *e = m_cache; while (e) { if (e->data) e->cleanup(e->engine, e->data); CacheEntry *n = e->next; delete e; e = n; } } } QRectF QVectorPath::controlPointRect() const { if (m_hints & ControlPointRect) return QRectF(QPointF(m_cp_rect.x1, m_cp_rect.y1), QPointF(m_cp_rect.x2, m_cp_rect.y2)); if (m_count == 0) { m_cp_rect.x1 = m_cp_rect.x2 = m_cp_rect.y1 = m_cp_rect.y2 = 0; m_hints |= ControlPointRect; return QRectF(QPointF(m_cp_rect.x1, m_cp_rect.y1), QPointF(m_cp_rect.x2, m_cp_rect.y2)); } Q_ASSERT(m_points && m_count > 0); const qreal *pts = m_points; m_cp_rect.x1 = m_cp_rect.x2 = *pts; ++pts; m_cp_rect.y1 = m_cp_rect.y2 = *pts; ++pts; const qreal *epts = m_points + (m_count << 1); while (pts < epts) { qreal x = *pts; if (x < m_cp_rect.x1) m_cp_rect.x1 = x; else if (x > m_cp_rect.x2) m_cp_rect.x2 = x; ++pts; qreal y = *pts; if (y < m_cp_rect.y1) m_cp_rect.y1 = y; else if (y > m_cp_rect.y2) m_cp_rect.y2 = y; ++pts; } m_hints |= ControlPointRect; return QRectF(QPointF(m_cp_rect.x1, m_cp_rect.y1), QPointF(m_cp_rect.x2, m_cp_rect.y2)); } QVectorPath::CacheEntry *QVectorPath::addCacheData(QPaintEngineEx *engine, void *data, qvectorpath_cache_cleanup cleanup) const{ Q_ASSERT(!lookupCacheData(engine)); if ((m_hints & IsCachedHint) == 0) { m_cache = nullptr; m_hints |= IsCachedHint; } CacheEntry *e = new CacheEntry; e->engine = engine; e->data = data; e->cleanup = cleanup; e->next = m_cache; m_cache = e; return m_cache; } const QVectorPath &qtVectorPathForPath(const QPainterPath &path) { Q_ASSERT(path.d_func()); return path.d_func()->vectorPath(); } #ifndef QT_NO_DEBUG_STREAM QDebug Q_GUI_EXPORT &operator<<(QDebug &s, const QVectorPath &path) { QDebugStateSaver saver(s); QRectF rf = path.controlPointRect(); s << "QVectorPath(size:" << path.elementCount() << " hints:" << Qt::hex << path.hints() << rf << ')'; return s; } #endif /******************************************************************************* * * class QPaintEngineExPrivate: * */ struct StrokeHandler { StrokeHandler(int reserve) : pts(reserve), types(reserve) {} QDataBuffer<qreal> pts; QDataBuffer<QPainterPath::ElementType> types; }; QPaintEngineExPrivate::QPaintEngineExPrivate() : dasher(&stroker), strokeHandler(nullptr), activeStroker(nullptr), strokerPen(Qt::NoPen) { } QPaintEngineExPrivate::~QPaintEngineExPrivate() { delete strokeHandler; } void QPaintEngineExPrivate::replayClipOperations() { Q_Q(QPaintEngineEx); QPainter *p = q->painter(); if (!p || !p->d_ptr) return; const QList<QPainterClipInfo> &clipInfo = p->d_ptr->state->clipInfo; QTransform transform = q->state()->matrix; for (const QPainterClipInfo &info : clipInfo) { if (info.matrix != q->state()->matrix) { q->state()->matrix = info.matrix; q->transformChanged(); } switch (info.clipType) { case QPainterClipInfo::RegionClip: q->clip(info.region, info.operation); break; case QPainterClipInfo::PathClip: q->clip(info.path, info.operation); break; case QPainterClipInfo::RectClip: q->clip(info.rect, info.operation); break; case QPainterClipInfo::RectFClip: { qreal right = info.rectf.x() + info.rectf.width(); qreal bottom = info.rectf.y() + info.rectf.height(); qreal pts[] = { info.rectf.x(), info.rectf.y(), right, info.rectf.y(), right, bottom, info.rectf.x(), bottom }; QVectorPath vp(pts, 4, nullptr, QVectorPath::RectangleHint); q->clip(vp, info.operation); break; } } } if (transform != q->state()->matrix) { q->state()->matrix = transform; q->transformChanged(); } } bool QPaintEngineExPrivate::hasClipOperations() const { Q_Q(const QPaintEngineEx); QPainter *p = q->painter(); if (!p || !p->d_ptr) return false; return !p->d_ptr->state->clipInfo.isEmpty(); } /******************************************************************************* * * class QPaintEngineEx: * */ static const QPainterPath::ElementType qpaintengineex_ellipse_types[] = { QPainterPath::MoveToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement }; static const QPainterPath::ElementType qpaintengineex_line_types_16[] = { QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement }; static const QPainterPath::ElementType qpaintengineex_rect4_types_32[] = { QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 1 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 2 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 3 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 4 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 5 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 6 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 7 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 8 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 9 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 10 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 11 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 12 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 13 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 14 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 15 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 16 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 17 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 18 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 19 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 20 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 21 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 22 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 23 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 24 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 25 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 26 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 27 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 28 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 29 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 30 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 31 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 32 }; static const QPainterPath::ElementType qpaintengineex_roundedrect_types[] = { QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement }; static void qpaintengineex_moveTo(qreal x, qreal y, void *data) { ((StrokeHandler *) data)->pts.add(x); ((StrokeHandler *) data)->pts.add(y); ((StrokeHandler *) data)->types.add(QPainterPath::MoveToElement); } static void qpaintengineex_lineTo(qreal x, qreal y, void *data) { ((StrokeHandler *) data)->pts.add(x); ((StrokeHandler *) data)->pts.add(y); ((StrokeHandler *) data)->types.add(QPainterPath::LineToElement); } static void qpaintengineex_cubicTo(qreal c1x, qreal c1y, qreal c2x, qreal c2y, qreal ex, qreal ey, void *data) { ((StrokeHandler *) data)->pts.add(c1x); ((StrokeHandler *) data)->pts.add(c1y); ((StrokeHandler *) data)->types.add(QPainterPath::CurveToElement); ((StrokeHandler *) data)->pts.add(c2x); ((StrokeHandler *) data)->pts.add(c2y); ((StrokeHandler *) data)->types.add(QPainterPath::CurveToDataElement); ((StrokeHandler *) data)->pts.add(ex); ((StrokeHandler *) data)->pts.add(ey); ((StrokeHandler *) data)->types.add(QPainterPath::CurveToDataElement); } QPaintEngineEx::QPaintEngineEx() : QPaintEngine(*new QPaintEngineExPrivate, AllFeatures) { extended = true; } QPaintEngineEx::QPaintEngineEx(QPaintEngineExPrivate &data) : QPaintEngine(data, AllFeatures) { extended = true; } QPainterState *QPaintEngineEx::createState(QPainterState *orig) const { if (!orig) return new QPainterState; return new QPainterState(orig); } Q_GUI_EXPORT extern bool qt_scaleForTransform(const QTransform &transform, qreal *scale); // qtransform.cpp void QPaintEngineEx::stroke(const QVectorPath &path, const QPen &inPen) { #ifdef QT_DEBUG_DRAW qDebug() << "QPaintEngineEx::stroke()" << pen; #endif Q_D(QPaintEngineEx); if (path.isEmpty()) return; if (!d->strokeHandler) { d->strokeHandler = new StrokeHandler(path.elementCount()+4); d->stroker.setMoveToHook(qpaintengineex_moveTo); d->stroker.setLineToHook(qpaintengineex_lineTo); d->stroker.setCubicToHook(qpaintengineex_cubicTo); } QRectF clipRect; QPen pen = inPen; if (pen.style() > Qt::SolidLine) { QRectF cpRect = path.controlPointRect(); const QTransform &xf = state()->matrix; if (pen.isCosmetic()) { clipRect = d->exDeviceRect; cpRect.translate(xf.dx(), xf.dy()); } else { clipRect = xf.inverted().mapRect(QRectF(d->exDeviceRect)); } // Check to avoid generating unwieldy amount of dashes that will not be visible anyway QRectF extentRect = cpRect & clipRect; qreal extent = qMax(extentRect.width(), extentRect.height()); qreal patternLength = 0; const QList<qreal> pattern = pen.dashPattern(); const int patternSize = qMin(pattern.size(), 32); for (int i = 0; i < patternSize; i++) patternLength += qMax(pattern.at(i), qreal(0)); if (pen.widthF()) patternLength *= pen.widthF(); if (qFuzzyIsNull(patternLength)) { pen.setStyle(Qt::NoPen); } else if (extent / patternLength > 10000) { // approximate stream of tiny dashes with semi-transparent solid line pen.setStyle(Qt::SolidLine); QColor color(pen.color()); color.setAlpha(color.alpha() / 2); pen.setColor(color); } } if (!qpen_fast_equals(pen, d->strokerPen)) { d->strokerPen = pen; d->stroker.setJoinStyle(pen.joinStyle()); d->stroker.setCapStyle(pen.capStyle()); d->stroker.setMiterLimit(pen.miterLimit()); qreal penWidth = pen.widthF(); if (penWidth == 0) d->stroker.setStrokeWidth(1); else d->stroker.setStrokeWidth(penWidth); Qt::PenStyle style = pen.style(); if (style == Qt::SolidLine) { d->activeStroker = &d->stroker; } else if (style == Qt::NoPen) { d->activeStroker = nullptr; } else { d->dasher.setDashPattern(pen.dashPattern()); d->dasher.setDashOffset(pen.dashOffset()); d->activeStroker = &d->dasher; } } if (!d->activeStroker) { return; } if (!clipRect.isNull()) d->activeStroker->setClipRect(clipRect); if (d->activeStroker == &d->stroker) d->stroker.setForceOpen(path.hasExplicitOpen()); const QPainterPath::ElementType *types = path.elements(); const qreal *points = path.points(); int pointCount = path.elementCount(); const qreal *lastPoint = points + (pointCount<<1); d->strokeHandler->types.reset(); d->strokeHandler->pts.reset(); // Some engines might decide to optimize for the non-shape hint later on... uint flags = QVectorPath::WindingFill; if (path.elementCount() > 2) flags |= QVectorPath::NonConvexShapeMask; if (d->stroker.capStyle() == Qt::RoundCap || d->stroker.joinStyle() == Qt::RoundJoin) flags |= QVectorPath::CurvedShapeMask; // ### Perspective Xforms are currently not supported... if (!pen.isCosmetic()) { // We include cosmetic pens in this case to avoid having to // change the current transform. Normal transformed, // non-cosmetic pens will be transformed as part of fill // later, so they are also covered here.. d->activeStroker->setCurveThresholdFromTransform(state()->matrix); d->activeStroker->begin(d->strokeHandler); if (types) { while (points < lastPoint) { switch (*types) { case QPainterPath::MoveToElement: d->activeStroker->moveTo(points[0], points[1]); points += 2; ++types; break; case QPainterPath::LineToElement: d->activeStroker->lineTo(points[0], points[1]); points += 2; ++types; break; case QPainterPath::CurveToElement: d->activeStroker->cubicTo(points[0], points[1], points[2], points[3], points[4], points[5]); points += 6; types += 3; flags |= QVectorPath::CurvedShapeMask; break; default: break; } } if (path.hasImplicitClose()) d->activeStroker->lineTo(path.points()[0], path.points()[1]); } else { d->activeStroker->moveTo(points[0], points[1]); points += 2; while (points < lastPoint) { d->activeStroker->lineTo(points[0], points[1]); points += 2; } if (path.hasImplicitClose()) d->activeStroker->lineTo(path.points()[0], path.points()[1]); } d->activeStroker->end(); if (!d->strokeHandler->types.size()) // an empty path... return; QVectorPath strokePath(d->strokeHandler->pts.data(), d->strokeHandler->types.size(), d->strokeHandler->types.data(), flags); fill(strokePath, pen.brush()); } else { // For cosmetic pens we need a bit of trickery... We to process xform the input points if (state()->matrix.type() >= QTransform::TxProject) { QPainterPath painterPath = state()->matrix.map(path.convertToPainterPath()); d->activeStroker->strokePath(painterPath, d->strokeHandler, QTransform()); } else { d->activeStroker->setCurveThresholdFromTransform(QTransform()); d->activeStroker->begin(d->strokeHandler); if (types) { while (points < lastPoint) { switch (*types) { case QPainterPath::MoveToElement: { QPointF pt = (*(const QPointF *) points) * state()->matrix; d->activeStroker->moveTo(pt.x(), pt.y()); points += 2; ++types; break; } case QPainterPath::LineToElement: { QPointF pt = (*(const QPointF *) points) * state()->matrix; d->activeStroker->lineTo(pt.x(), pt.y()); points += 2; ++types; break; } case QPainterPath::CurveToElement: { QPointF c1 = ((const QPointF *) points)[0] * state()->matrix; QPointF c2 = ((const QPointF *) points)[1] * state()->matrix; QPointF e = ((const QPointF *) points)[2] * state()->matrix; d->activeStroker->cubicTo(c1.x(), c1.y(), c2.x(), c2.y(), e.x(), e.y()); points += 6; types += 3; flags |= QVectorPath::CurvedShapeMask; break; } default: break; } } if (path.hasImplicitClose()) { QPointF pt = * ((const QPointF *) path.points()) * state()->matrix; d->activeStroker->lineTo(pt.x(), pt.y()); } } else { QPointF p = ((const QPointF *)points)[0] * state()->matrix; d->activeStroker->moveTo(p.x(), p.y()); points += 2; while (points < lastPoint) { QPointF p = ((const QPointF *)points)[0] * state()->matrix; d->activeStroker->lineTo(p.x(), p.y()); points += 2; } if (path.hasImplicitClose()) d->activeStroker->lineTo(p.x(), p.y()); } d->activeStroker->end(); } QVectorPath strokePath(d->strokeHandler->pts.data(), d->strokeHandler->types.size(), d->strokeHandler->types.data(), flags); QTransform xform = state()->matrix; state()->matrix = QTransform(); transformChanged(); QBrush brush = pen.brush(); if (qbrush_style(brush) != Qt::SolidPattern) brush.setTransform(brush.transform() * xform); fill(strokePath, brush); state()->matrix = xform; transformChanged(); } } void QPaintEngineEx::draw(const QVectorPath &path) { const QBrush &brush = state()->brush; if (qbrush_style(brush) != Qt::NoBrush) fill(path, brush); const QPen &pen = state()->pen; if (qpen_style(pen) != Qt::NoPen && qbrush_style(qpen_brush(pen)) != Qt::NoBrush) stroke(path, pen); } void QPaintEngineEx::clip(const QRect &r, Qt::ClipOperation op) { qreal right = r.x() + r.width(); qreal bottom = r.y() + r.height(); qreal pts[] = { qreal(r.x()), qreal(r.y()), right, qreal(r.y()), right, bottom, qreal(r.x()), bottom, qreal(r.x()), qreal(r.y()) }; QVectorPath vp(pts, 5, nullptr, QVectorPath::RectangleHint); clip(vp, op); } void QPaintEngineEx::clip(const QRegion &region, Qt::ClipOperation op) { const auto rectsInRegion = region.rectCount(); if (rectsInRegion == 1) { clip(*region.begin(), op); } else if (rectsInRegion <= 32) { qreal pts[2*32*4]; int pos = 0; for (QRect r : region) { qreal x1 = r.x(); qreal y1 = r.y(); qreal x2 = r.x() + r.width(); qreal y2 = r.y() + r.height(); pts[pos++] = x1; pts[pos++] = y1; pts[pos++] = x2; pts[pos++] = y1; pts[pos++] = x2; pts[pos++] = y2; pts[pos++] = x1; pts[pos++] = y2; } QVectorPath vp(pts, rectsInRegion * 4, qpaintengineex_rect4_types_32); clip(vp, op); } else { QVarLengthArray<qreal> pts(rectsInRegion * 2 * 4); QVarLengthArray<QPainterPath::ElementType> types(rectsInRegion * 4); int ppos = 0; int tpos = 0; for (QRect r : region) { qreal x1 = r.x(); qreal y1 = r.y(); qreal x2 = r.x() + r.width(); qreal y2 = r.y() + r.height(); pts[ppos++] = x1; pts[ppos++] = y1; pts[ppos++] = x2; pts[ppos++] = y1; pts[ppos++] = x2; pts[ppos++] = y2; pts[ppos++] = x1; pts[ppos++] = y2; types[tpos++] = QPainterPath::MoveToElement; types[tpos++] = QPainterPath::LineToElement; types[tpos++] = QPainterPath::LineToElement; types[tpos++] = QPainterPath::LineToElement; } QVectorPath vp(pts.data(), rectsInRegion * 4, types.data()); clip(vp, op); } } void QPaintEngineEx::clip(const QPainterPath &path, Qt::ClipOperation op) { if (path.isEmpty()) { QVectorPath vp(nullptr, 0); clip(vp, op); } else { clip(qtVectorPathForPath(path), op); } } void QPaintEngineEx::fillRect(const QRectF &r, const QBrush &brush) { qreal pts[] = { r.x(), r.y(), r.x() + r.width(), r.y(), r.x() + r.width(), r.y() + r.height(), r.x(), r.y() + r.height() }; QVectorPath vp(pts, 4, nullptr, QVectorPath::RectangleHint); fill(vp, brush); } void QPaintEngineEx::fillRect(const QRectF &r, const QColor &color) { fillRect(r, QBrush(color)); } void QPaintEngineEx::drawRects(const QRect *rects, int rectCount) { for (int i=0; i<rectCount; ++i) { const QRect &r = rects[i]; // ### Is there a one off here? qreal right = r.x() + r.width(); qreal bottom = r.y() + r.height(); qreal pts[] = { qreal(r.x()), qreal(r.y()), right, qreal(r.y()), right, bottom, qreal(r.x()), bottom, qreal(r.x()), qreal(r.y()) }; QVectorPath vp(pts, 5, nullptr, QVectorPath::RectangleHint); draw(vp); } } void QPaintEngineEx::drawRects(const QRectF *rects, int rectCount) { for (int i=0; i<rectCount; ++i) { const QRectF &r = rects[i]; qreal right = r.x() + r.width(); qreal bottom = r.y() + r.height(); qreal pts[] = { r.x(), r.y(), right, r.y(), right, bottom, r.x(), bottom, r.x(), r.y() }; QVectorPath vp(pts, 5, nullptr, QVectorPath::RectangleHint); draw(vp); } } void QPaintEngineEx::drawRoundedRect(const QRectF &rect, qreal xRadius, qreal yRadius, Qt::SizeMode mode) { qreal x1 = rect.left(); qreal x2 = rect.right(); qreal y1 = rect.top(); qreal y2 = rect.bottom(); if (mode == Qt::RelativeSize) { xRadius = xRadius * rect.width() / 200.; yRadius = yRadius * rect.height() / 200.; } xRadius = qMin(xRadius, rect.width() / 2); yRadius = qMin(yRadius, rect.height() / 2); qreal pts[] = { x1 + xRadius, y1, // MoveTo x2 - xRadius, y1, // LineTo x2 - (1 - KAPPA) * xRadius, y1, // CurveTo x2, y1 + (1 - KAPPA) * yRadius, x2, y1 + yRadius, x2, y2 - yRadius, // LineTo x2, y2 - (1 - KAPPA) * yRadius, // CurveTo x2 - (1 - KAPPA) * xRadius, y2, x2 - xRadius, y2, x1 + xRadius, y2, // LineTo x1 + (1 - KAPPA) * xRadius, y2, // CurveTo x1, y2 - (1 - KAPPA) * yRadius, x1, y2 - yRadius, x1, y1 + yRadius, // LineTo x1, y1 + (1 - KAPPA) * yRadius, // CurveTo x1 + (1 - KAPPA) * xRadius, y1, x1 + xRadius, y1 }; QVectorPath path(pts, 17, qpaintengineex_roundedrect_types, QVectorPath::RoundedRectHint); draw(path); } void QPaintEngineEx::drawLines(const QLine *lines, int lineCount) { int elementCount = lineCount << 1; while (elementCount > 0) { int count = qMin(elementCount, 32); qreal pts[64]; int count2 = count<<1; for (int i=0; i<count2; ++i) pts[i] = ((const int *) lines)[i]; QVectorPath path(pts, count, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, state()->pen); elementCount -= 32; lines += 16; } } void QPaintEngineEx::drawLines(const QLineF *lines, int lineCount) { int elementCount = lineCount << 1; while (elementCount > 0) { int count = qMin(elementCount, 32); QVectorPath path((const qreal *) lines, count, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, state()->pen); elementCount -= 32; lines += 16; } } void QPaintEngineEx::drawEllipse(const QRectF &r) { qreal pts[26]; // QPointF[13] without constructors... union { qreal *ptr; QPointF *points; } x; x.ptr = pts; int point_count = 0; x.points[0] = qt_curves_for_arc(r, 0, -360, x.points + 1, &point_count); if (point_count == 0) return; QVectorPath vp((qreal *) pts, point_count + 1, qpaintengineex_ellipse_types, QVectorPath::EllipseHint); draw(vp); } void QPaintEngineEx::drawEllipse(const QRect &r) { drawEllipse(QRectF(r)); } void QPaintEngineEx::drawPath(const QPainterPath &path) { if (!path.isEmpty()) draw(qtVectorPathForPath(path)); } void QPaintEngineEx::drawPoints(const QPointF *points, int pointCount) { QPen pen = state()->pen; if (pen.capStyle() == Qt::FlatCap) pen.setCapStyle(Qt::SquareCap); if (pen.brush().isOpaque()) { while (pointCount > 0) { int count = qMin(pointCount, 16); qreal pts[64]; int oset = -1; for (int i=0; i<count; ++i) { pts[++oset] = points[i].x(); pts[++oset] = points[i].y(); pts[++oset] = points[i].x() + 1/63.; pts[++oset] = points[i].y(); } QVectorPath path(pts, count * 2, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, pen); pointCount -= 16; points += 16; } } else { for (int i=0; i<pointCount; ++i) { qreal pts[] = { points[i].x(), points[i].y(), points[i].x() + qreal(1/63.), points[i].y() }; QVectorPath path(pts, 2, nullptr); stroke(path, pen); } } } void QPaintEngineEx::drawPoints(const QPoint *points, int pointCount) { QPen pen = state()->pen; if (pen.capStyle() == Qt::FlatCap) pen.setCapStyle(Qt::SquareCap); if (pen.brush().isOpaque()) { while (pointCount > 0) { int count = qMin(pointCount, 16); qreal pts[64]; int oset = -1; for (int i=0; i<count; ++i) { pts[++oset] = points[i].x(); pts[++oset] = points[i].y(); pts[++oset] = points[i].x() + 1/63.; pts[++oset] = points[i].y(); } QVectorPath path(pts, count * 2, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, pen); pointCount -= 16; points += 16; } } else { for (int i=0; i<pointCount; ++i) { qreal pts[] = { qreal(points[i].x()), qreal(points[i].y()), qreal(points[i].x() +1/63.), qreal(points[i].y()) }; QVectorPath path(pts, 2, nullptr); stroke(path, pen); } } } void QPaintEngineEx::drawPolygon(const QPointF *points, int pointCount, PolygonDrawMode mode) { Q_ASSUME(pointCount >= 2); QVectorPath path((const qreal *) points, pointCount, nullptr, QVectorPath::polygonFlags(mode)); if (mode == PolylineMode) stroke(path, state()->pen); else draw(path); } void QPaintEngineEx::drawPolygon(const QPoint *points, int pointCount, PolygonDrawMode mode) { Q_ASSUME(pointCount >= 2); int count = pointCount<<1; QVarLengthArray<qreal> pts(count); for (int i=0; i<count; ++i) pts[i] = ((const int *) points)[i]; QVectorPath path(pts.data(), pointCount, nullptr, QVectorPath::polygonFlags(mode)); if (mode == PolylineMode) stroke(path, state()->pen); else draw(path); } void QPaintEngineEx::drawPixmap(const QPointF &pos, const QPixmap &pm) { drawPixmap(QRectF(pos, pm.size() / pm.devicePixelRatio()), pm, pm.rect()); } void QPaintEngineEx::drawImage(const QPointF &pos, const QImage &image) { drawImage(QRectF(pos, image.size() / image.devicePixelRatio()), image, image.rect()); } void QPaintEngineEx::drawTiledPixmap(const QRectF &r, const QPixmap &pixmap, const QPointF &s) { QBrush brush(state()->pen.color(), pixmap); QTransform xform = QTransform::fromTranslate(r.x() - s.x(), r.y() - s.y()); if (!qFuzzyCompare(pixmap.devicePixelRatio(), qreal(1.0))) xform.scale(1.0/pixmap.devicePixelRatio(), 1.0/pixmap.devicePixelRatio()); brush.setTransform(xform); qreal pts[] = { r.x(), r.y(), r.x() + r.width(), r.y(), r.x() + r.width(), r.y() + r.height(), r.x(), r.y() + r.height() }; QVectorPath path(pts, 4, nullptr, QVectorPath::RectangleHint); fill(path, brush); } void QPaintEngineEx::drawPixmapFragments(const QPainter::PixmapFragment *fragments, int fragmentCount, const QPixmap &pixmap, QPainter::PixmapFragmentHints /*hints*/) { if (pixmap.isNull()) return; qreal oldOpacity = state()->opacity; QTransform oldTransform = state()->matrix; for (int i = 0; i < fragmentCount; ++i) { QTransform transform = oldTransform; transform.translate(fragments[i].x, fragments[i].y); transform.rotate(fragments[i].rotation); state()->opacity = oldOpacity * fragments[i].opacity; state()->matrix = transform; opacityChanged(); transformChanged(); qreal w = fragments[i].scaleX * fragments[i].width; qreal h = fragments[i].scaleY * fragments[i].height; QRectF sourceRect(fragments[i].sourceLeft, fragments[i].sourceTop, fragments[i].width, fragments[i].height); drawPixmap(QRectF(-0.5 * w, -0.5 * h, w, h), pixmap, sourceRect); } state()->opacity = oldOpacity; state()->matrix = oldTransform; opacityChanged(); transformChanged(); } void QPaintEngineEx::setState(QPainterState *s) { QPaintEngine::state = s; } void QPaintEngineEx::updateState(const QPaintEngineState &) { // do nothing... } Q_GUI_EXPORT QPainterPath qt_painterPathFromVectorPath(const QVectorPath &path) { const qreal *points = path.points(); const QPainterPath::ElementType *types = path.elements(); QPainterPath p; if (types) { int id = 0; for (int i=0; i<path.elementCount(); ++i) { switch(types[i]) { case QPainterPath::MoveToElement: p.moveTo(QPointF(points[id], points[id+1])); id+=2; break; case QPainterPath::LineToElement: p.lineTo(QPointF(points[id], points[id+1])); id+=2; break; case QPainterPath::CurveToElement: { QPointF p1(points[id], points[id+1]); QPointF p2(points[id+2], points[id+3]); QPointF p3(points[id+4], points[id+5]); p.cubicTo(p1, p2, p3); id+=6; break; } case QPainterPath::CurveToDataElement: ; break; } } } else { p.moveTo(QPointF(points[0], points[1])); int id = 2; for (int i=1; i<path.elementCount(); ++i) { p.lineTo(QPointF(points[id], points[id+1])); id+=2; } } if (path.hints() & QVectorPath::WindingFill) p.setFillRule(Qt::WindingFill); return p; } void QPaintEngineEx::drawStaticTextItem(QStaticTextItem *staticTextItem) { QPainterPath path; path.setFillRule(Qt::WindingFill); if (staticTextItem->numGlyphs == 0) return; QFontEngine *fontEngine = staticTextItem->fontEngine(); fontEngine->addGlyphsToPath(staticTextItem->glyphs, staticTextItem->glyphPositions, staticTextItem->numGlyphs, &path, { }); if (!path.isEmpty()) { QPainterState *s = state(); QPainter::RenderHints oldHints = s->renderHints; bool changedHints = false; if (bool(oldHints & QPainter::TextAntialiasing) && !bool(fontEngine->fontDef.styleStrategy & QFont::NoAntialias) && !bool(oldHints & QPainter::Antialiasing)) { s->renderHints |= QPainter::Antialiasing; renderHintsChanged(); changedHints = true; } fill(qtVectorPathForPath(path), s->pen.brush()); if (changedHints) { s->renderHints = oldHints; renderHintsChanged(); } } } bool QPaintEngineEx::requiresPretransformedGlyphPositions(QFontEngine *, const QTransform &) const { return false; } bool QPaintEngineEx::shouldDrawCachedGlyphs(QFontEngine *fontEngine, const QTransform &m) const { if (fontEngine->glyphFormat == QFontEngine::Format_ARGB) return true; static const int maxCachedGlyphSizeSquared = std::pow([]{ if (int env = qEnvironmentVariableIntValue("QT_MAX_CACHED_GLYPH_SIZE")) return env; return QT_MAX_CACHED_GLYPH_SIZE; }(), 2); qreal pixelSize = fontEngine->fontDef.pixelSize; return (pixelSize * pixelSize * qAbs(m.determinant())) <= maxCachedGlyphSizeSquared; } QT_END_NAMESPACE
null
/**************************************************************************** ** ** Copyright (C) 2016 The Qt Company Ltd. ** Contact: https://www.qt.io/licensing/ ** ** This file is part of the QtGui module of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL$ ** Commercial License Usage ** Licensees holding valid commercial Qt licenses may use this file in ** accordance with the commercial license agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and The Qt Company. For licensing terms ** and conditions see https://www.qt.io/terms-conditions. For further ** information use the contact form at https://www.qt.io/contact-us. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 3 as published by the Free Software ** Foundation and appearing in the file LICENSE.LGPL3 included in the ** packaging of this file. Please review the following information to ** ensure the GNU Lesser General Public License version 3 requirements ** will be met: https://www.gnu.org/licenses/lgpl-3.0.html. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License version 2.0 or (at your option) the GNU General ** Public license version 3 or any later version approved by the KDE Free ** Qt Foundation. The licenses are as published by the Free Software ** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3 ** included in the packaging of this file. Please review the following ** information to ensure the GNU General Public License requirements will ** be met: https://www.gnu.org/licenses/gpl-2.0.html and ** https://www.gnu.org/licenses/gpl-3.0.html. ** ** $QT_END_LICENSE$ ** ****************************************************************************/ #include "qpaintengineex_p.h" #include "qpainter_p.h" #include "qstroker_p.h" #include "qbezier_p.h" #include <private/qpainterpath_p.h> #include <private/qfontengine_p.h> #include <private/qstatictext_p.h> #include <qvarlengtharray.h> #include <qdebug.h> QT_BEGIN_NAMESPACE #if !defined(QT_MAX_CACHED_GLYPH_SIZE) # define QT_MAX_CACHED_GLYPH_SIZE 64 #endif /******************************************************************************* * * class QVectorPath * */ QVectorPath::~QVectorPath() { if (m_hints & ShouldUseCacheHint) { CacheEntry *e = m_cache; while (e) { if (e->data) e->cleanup(e->engine, e->data); CacheEntry *n = e->next; delete e; e = n; } } } QRectF QVectorPath::controlPointRect() const { if (m_hints & ControlPointRect) return QRectF(QPointF(m_cp_rect.x1, m_cp_rect.y1), QPointF(m_cp_rect.x2, m_cp_rect.y2)); if (m_count == 0) { m_cp_rect.x1 = m_cp_rect.x2 = m_cp_rect.y1 = m_cp_rect.y2 = 0; m_hints |= ControlPointRect; return QRectF(QPointF(m_cp_rect.x1, m_cp_rect.y1), QPointF(m_cp_rect.x2, m_cp_rect.y2)); } Q_ASSERT(m_points && m_count > 0); const qreal *pts = m_points; m_cp_rect.x1 = m_cp_rect.x2 = *pts; ++pts; m_cp_rect.y1 = m_cp_rect.y2 = *pts; ++pts; const qreal *epts = m_points + (m_count << 1); while (pts < epts) { qreal x = *pts; if (x < m_cp_rect.x1) m_cp_rect.x1 = x; else if (x > m_cp_rect.x2) m_cp_rect.x2 = x; ++pts; qreal y = *pts; if (y < m_cp_rect.y1) m_cp_rect.y1 = y; else if (y > m_cp_rect.y2) m_cp_rect.y2 = y; ++pts; } m_hints |= ControlPointRect; return QRectF(QPointF(m_cp_rect.x1, m_cp_rect.y1), QPointF(m_cp_rect.x2, m_cp_rect.y2)); } QVectorPath::CacheEntry *QVectorPath::addCacheData(QPaintEngineEx *engine, void *data, qvectorpath_cache_cleanup cleanup) const{ Q_ASSERT(!lookupCacheData(engine)); if ((m_hints & IsCachedHint) == 0) { m_cache = nullptr; m_hints |= IsCachedHint; } CacheEntry *e = new CacheEntry; e->engine = engine; e->data = data; e->cleanup = cleanup; e->next = m_cache; m_cache = e; return m_cache; } const QVectorPath &qtVectorPathForPath(const QPainterPath &path) { Q_ASSERT(path.d_func()); return path.d_func()->vectorPath(); } #ifndef QT_NO_DEBUG_STREAM QDebug Q_GUI_EXPORT &operator<<(QDebug &s, const QVectorPath &path) { QDebugStateSaver saver(s); QRectF rf = path.controlPointRect(); s << "QVectorPath(size:" << path.elementCount() << " hints:" << Qt::hex << path.hints() << rf << ')'; return s; } #endif /******************************************************************************* * * class QPaintEngineExPrivate: * */ struct StrokeHandler { StrokeHandler(int reserve) : pts(reserve), types(reserve) {} QDataBuffer<qreal> pts; QDataBuffer<QPainterPath::ElementType> types; }; QPaintEngineExPrivate::QPaintEngineExPrivate() : dasher(&stroker), strokeHandler(nullptr), activeStroker(nullptr), strokerPen(Qt::NoPen) { } QPaintEngineExPrivate::~QPaintEngineExPrivate() { delete strokeHandler; } void QPaintEngineExPrivate::replayClipOperations() { Q_Q(QPaintEngineEx); QPainter *p = q->painter(); if (!p || !p->d_ptr) return; const QList<QPainterClipInfo> &clipInfo = p->d_ptr->state->clipInfo; QTransform transform = q->state()->matrix; for (const QPainterClipInfo &info : clipInfo) { if (info.matrix != q->state()->matrix) { q->state()->matrix = info.matrix; q->transformChanged(); } switch (info.clipType) { case QPainterClipInfo::RegionClip: q->clip(info.region, info.operation); break; case QPainterClipInfo::PathClip: q->clip(info.path, info.operation); break; case QPainterClipInfo::RectClip: q->clip(info.rect, info.operation); break; case QPainterClipInfo::RectFClip: { qreal right = info.rectf.x() + info.rectf.width(); qreal bottom = info.rectf.y() + info.rectf.height(); qreal pts[] = { info.rectf.x(), info.rectf.y(), right, info.rectf.y(), right, bottom, info.rectf.x(), bottom }; QVectorPath vp(pts, 4, nullptr, QVectorPath::RectangleHint); q->clip(vp, info.operation); break; } } } if (transform != q->state()->matrix) { q->state()->matrix = transform; q->transformChanged(); } } bool QPaintEngineExPrivate::hasClipOperations() const { Q_Q(const QPaintEngineEx); QPainter *p = q->painter(); if (!p || !p->d_ptr) return false; return !p->d_ptr->state->clipInfo.isEmpty(); } /******************************************************************************* * * class QPaintEngineEx: * */ static const QPainterPath::ElementType qpaintengineex_ellipse_types[] = { QPainterPath::MoveToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement }; static const QPainterPath::ElementType qpaintengineex_line_types_16[] = { QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::MoveToElement, QPainterPath::LineToElement }; static const QPainterPath::ElementType qpaintengineex_rect4_types_32[] = { QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 1 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 2 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 3 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 4 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 5 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 6 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 7 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 8 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 9 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 10 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 11 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 12 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 13 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 14 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 15 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 16 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 17 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 18 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 19 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 20 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 21 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 22 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 23 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 24 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 25 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 26 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 27 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 28 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 29 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 30 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 31 QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, QPainterPath::LineToElement, // 32 }; static const QPainterPath::ElementType qpaintengineex_roundedrect_types[] = { QPainterPath::MoveToElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement, QPainterPath::LineToElement, QPainterPath::CurveToElement, QPainterPath::CurveToDataElement, QPainterPath::CurveToDataElement }; static void qpaintengineex_moveTo(qreal x, qreal y, void *data) { ((StrokeHandler *) data)->pts.add(x); ((StrokeHandler *) data)->pts.add(y); ((StrokeHandler *) data)->types.add(QPainterPath::MoveToElement); } static void qpaintengineex_lineTo(qreal x, qreal y, void *data) { ((StrokeHandler *) data)->pts.add(x); ((StrokeHandler *) data)->pts.add(y); ((StrokeHandler *) data)->types.add(QPainterPath::LineToElement); } static void qpaintengineex_cubicTo(qreal c1x, qreal c1y, qreal c2x, qreal c2y, qreal ex, qreal ey, void *data) { ((StrokeHandler *) data)->pts.add(c1x); ((StrokeHandler *) data)->pts.add(c1y); ((StrokeHandler *) data)->types.add(QPainterPath::CurveToElement); ((StrokeHandler *) data)->pts.add(c2x); ((StrokeHandler *) data)->pts.add(c2y); ((StrokeHandler *) data)->types.add(QPainterPath::CurveToDataElement); ((StrokeHandler *) data)->pts.add(ex); ((StrokeHandler *) data)->pts.add(ey); ((StrokeHandler *) data)->types.add(QPainterPath::CurveToDataElement); } QPaintEngineEx::QPaintEngineEx() : QPaintEngine(*new QPaintEngineExPrivate, AllFeatures) { extended = true; } QPaintEngineEx::QPaintEngineEx(QPaintEngineExPrivate &data) : QPaintEngine(data, AllFeatures) { extended = true; } QPainterState *QPaintEngineEx::createState(QPainterState *orig) const { if (!orig) return new QPainterState; return new QPainterState(orig); } Q_GUI_EXPORT extern bool qt_scaleForTransform(const QTransform &transform, qreal *scale); // qtransform.cpp void QPaintEngineEx::stroke(const QVectorPath &path, const QPen &inPen) { #ifdef QT_DEBUG_DRAW qDebug() << "QPaintEngineEx::stroke()" << pen; #endif Q_D(QPaintEngineEx); if (path.isEmpty()) return; if (!d->strokeHandler) { d->strokeHandler = new StrokeHandler(path.elementCount()+4); d->stroker.setMoveToHook(qpaintengineex_moveTo); d->stroker.setLineToHook(qpaintengineex_lineTo); d->stroker.setCubicToHook(qpaintengineex_cubicTo); } QRectF clipRect; QPen pen = inPen; if (pen.style() > Qt::SolidLine) { QRectF cpRect = path.controlPointRect(); const QTransform &xf = state()->matrix; if (pen.isCosmetic()) { clipRect = d->exDeviceRect; cpRect.translate(xf.dx(), xf.dy()); } else { clipRect = xf.inverted().mapRect(QRectF(d->exDeviceRect)); } // Check to avoid generating unwieldy amount of dashes that will not be visible anyway QRectF extentRect = cpRect & clipRect; qreal extent = qMax(extentRect.width(), extentRect.height()); qreal patternLength = 0; const QList<qreal> pattern = pen.dashPattern(); const int patternSize = qMin(pattern.size(), 32); for (int i = 0; i < patternSize; i++) patternLength += qMax(pattern.at(i), qreal(0)); if (pen.widthF()) patternLength *= pen.widthF(); if (qFuzzyIsNull(patternLength)) { pen.setStyle(Qt::NoPen); } else if (qFuzzyIsNull(extent) || extent / patternLength > 10000) { // approximate stream of tiny dashes with semi-transparent solid line pen.setStyle(Qt::SolidLine); QColor color(pen.color()); color.setAlpha(color.alpha() / 2); pen.setColor(color); } } if (!qpen_fast_equals(pen, d->strokerPen)) { d->strokerPen = pen; d->stroker.setJoinStyle(pen.joinStyle()); d->stroker.setCapStyle(pen.capStyle()); d->stroker.setMiterLimit(pen.miterLimit()); qreal penWidth = pen.widthF(); if (penWidth == 0) d->stroker.setStrokeWidth(1); else d->stroker.setStrokeWidth(penWidth); Qt::PenStyle style = pen.style(); if (style == Qt::SolidLine) { d->activeStroker = &d->stroker; } else if (style == Qt::NoPen) { d->activeStroker = nullptr; } else { d->dasher.setDashPattern(pen.dashPattern()); d->dasher.setDashOffset(pen.dashOffset()); d->activeStroker = &d->dasher; } } if (!d->activeStroker) { return; } if (!clipRect.isNull()) d->activeStroker->setClipRect(clipRect); if (d->activeStroker == &d->stroker) d->stroker.setForceOpen(path.hasExplicitOpen()); const QPainterPath::ElementType *types = path.elements(); const qreal *points = path.points(); int pointCount = path.elementCount(); const qreal *lastPoint = points + (pointCount<<1); d->strokeHandler->types.reset(); d->strokeHandler->pts.reset(); // Some engines might decide to optimize for the non-shape hint later on... uint flags = QVectorPath::WindingFill; if (path.elementCount() > 2) flags |= QVectorPath::NonConvexShapeMask; if (d->stroker.capStyle() == Qt::RoundCap || d->stroker.joinStyle() == Qt::RoundJoin) flags |= QVectorPath::CurvedShapeMask; // ### Perspective Xforms are currently not supported... if (!pen.isCosmetic()) { // We include cosmetic pens in this case to avoid having to // change the current transform. Normal transformed, // non-cosmetic pens will be transformed as part of fill // later, so they are also covered here.. d->activeStroker->setCurveThresholdFromTransform(state()->matrix); d->activeStroker->begin(d->strokeHandler); if (types) { while (points < lastPoint) { switch (*types) { case QPainterPath::MoveToElement: d->activeStroker->moveTo(points[0], points[1]); points += 2; ++types; break; case QPainterPath::LineToElement: d->activeStroker->lineTo(points[0], points[1]); points += 2; ++types; break; case QPainterPath::CurveToElement: d->activeStroker->cubicTo(points[0], points[1], points[2], points[3], points[4], points[5]); points += 6; types += 3; flags |= QVectorPath::CurvedShapeMask; break; default: break; } } if (path.hasImplicitClose()) d->activeStroker->lineTo(path.points()[0], path.points()[1]); } else { d->activeStroker->moveTo(points[0], points[1]); points += 2; while (points < lastPoint) { d->activeStroker->lineTo(points[0], points[1]); points += 2; } if (path.hasImplicitClose()) d->activeStroker->lineTo(path.points()[0], path.points()[1]); } d->activeStroker->end(); if (!d->strokeHandler->types.size()) // an empty path... return; QVectorPath strokePath(d->strokeHandler->pts.data(), d->strokeHandler->types.size(), d->strokeHandler->types.data(), flags); fill(strokePath, pen.brush()); } else { // For cosmetic pens we need a bit of trickery... We to process xform the input points if (state()->matrix.type() >= QTransform::TxProject) { QPainterPath painterPath = state()->matrix.map(path.convertToPainterPath()); d->activeStroker->strokePath(painterPath, d->strokeHandler, QTransform()); } else { d->activeStroker->setCurveThresholdFromTransform(QTransform()); d->activeStroker->begin(d->strokeHandler); if (types) { while (points < lastPoint) { switch (*types) { case QPainterPath::MoveToElement: { QPointF pt = (*(const QPointF *) points) * state()->matrix; d->activeStroker->moveTo(pt.x(), pt.y()); points += 2; ++types; break; } case QPainterPath::LineToElement: { QPointF pt = (*(const QPointF *) points) * state()->matrix; d->activeStroker->lineTo(pt.x(), pt.y()); points += 2; ++types; break; } case QPainterPath::CurveToElement: { QPointF c1 = ((const QPointF *) points)[0] * state()->matrix; QPointF c2 = ((const QPointF *) points)[1] * state()->matrix; QPointF e = ((const QPointF *) points)[2] * state()->matrix; d->activeStroker->cubicTo(c1.x(), c1.y(), c2.x(), c2.y(), e.x(), e.y()); points += 6; types += 3; flags |= QVectorPath::CurvedShapeMask; break; } default: break; } } if (path.hasImplicitClose()) { QPointF pt = * ((const QPointF *) path.points()) * state()->matrix; d->activeStroker->lineTo(pt.x(), pt.y()); } } else { QPointF p = ((const QPointF *)points)[0] * state()->matrix; d->activeStroker->moveTo(p.x(), p.y()); points += 2; while (points < lastPoint) { QPointF p = ((const QPointF *)points)[0] * state()->matrix; d->activeStroker->lineTo(p.x(), p.y()); points += 2; } if (path.hasImplicitClose()) d->activeStroker->lineTo(p.x(), p.y()); } d->activeStroker->end(); } QVectorPath strokePath(d->strokeHandler->pts.data(), d->strokeHandler->types.size(), d->strokeHandler->types.data(), flags); QTransform xform = state()->matrix; state()->matrix = QTransform(); transformChanged(); QBrush brush = pen.brush(); if (qbrush_style(brush) != Qt::SolidPattern) brush.setTransform(brush.transform() * xform); fill(strokePath, brush); state()->matrix = xform; transformChanged(); } } void QPaintEngineEx::draw(const QVectorPath &path) { const QBrush &brush = state()->brush; if (qbrush_style(brush) != Qt::NoBrush) fill(path, brush); const QPen &pen = state()->pen; if (qpen_style(pen) != Qt::NoPen && qbrush_style(qpen_brush(pen)) != Qt::NoBrush) stroke(path, pen); } void QPaintEngineEx::clip(const QRect &r, Qt::ClipOperation op) { qreal right = r.x() + r.width(); qreal bottom = r.y() + r.height(); qreal pts[] = { qreal(r.x()), qreal(r.y()), right, qreal(r.y()), right, bottom, qreal(r.x()), bottom, qreal(r.x()), qreal(r.y()) }; QVectorPath vp(pts, 5, nullptr, QVectorPath::RectangleHint); clip(vp, op); } void QPaintEngineEx::clip(const QRegion &region, Qt::ClipOperation op) { const auto rectsInRegion = region.rectCount(); if (rectsInRegion == 1) { clip(*region.begin(), op); } else if (rectsInRegion <= 32) { qreal pts[2*32*4]; int pos = 0; for (QRect r : region) { qreal x1 = r.x(); qreal y1 = r.y(); qreal x2 = r.x() + r.width(); qreal y2 = r.y() + r.height(); pts[pos++] = x1; pts[pos++] = y1; pts[pos++] = x2; pts[pos++] = y1; pts[pos++] = x2; pts[pos++] = y2; pts[pos++] = x1; pts[pos++] = y2; } QVectorPath vp(pts, rectsInRegion * 4, qpaintengineex_rect4_types_32); clip(vp, op); } else { QVarLengthArray<qreal> pts(rectsInRegion * 2 * 4); QVarLengthArray<QPainterPath::ElementType> types(rectsInRegion * 4); int ppos = 0; int tpos = 0; for (QRect r : region) { qreal x1 = r.x(); qreal y1 = r.y(); qreal x2 = r.x() + r.width(); qreal y2 = r.y() + r.height(); pts[ppos++] = x1; pts[ppos++] = y1; pts[ppos++] = x2; pts[ppos++] = y1; pts[ppos++] = x2; pts[ppos++] = y2; pts[ppos++] = x1; pts[ppos++] = y2; types[tpos++] = QPainterPath::MoveToElement; types[tpos++] = QPainterPath::LineToElement; types[tpos++] = QPainterPath::LineToElement; types[tpos++] = QPainterPath::LineToElement; } QVectorPath vp(pts.data(), rectsInRegion * 4, types.data()); clip(vp, op); } } void QPaintEngineEx::clip(const QPainterPath &path, Qt::ClipOperation op) { if (path.isEmpty()) { QVectorPath vp(nullptr, 0); clip(vp, op); } else { clip(qtVectorPathForPath(path), op); } } void QPaintEngineEx::fillRect(const QRectF &r, const QBrush &brush) { qreal pts[] = { r.x(), r.y(), r.x() + r.width(), r.y(), r.x() + r.width(), r.y() + r.height(), r.x(), r.y() + r.height() }; QVectorPath vp(pts, 4, nullptr, QVectorPath::RectangleHint); fill(vp, brush); } void QPaintEngineEx::fillRect(const QRectF &r, const QColor &color) { fillRect(r, QBrush(color)); } void QPaintEngineEx::drawRects(const QRect *rects, int rectCount) { for (int i=0; i<rectCount; ++i) { const QRect &r = rects[i]; // ### Is there a one off here? qreal right = r.x() + r.width(); qreal bottom = r.y() + r.height(); qreal pts[] = { qreal(r.x()), qreal(r.y()), right, qreal(r.y()), right, bottom, qreal(r.x()), bottom, qreal(r.x()), qreal(r.y()) }; QVectorPath vp(pts, 5, nullptr, QVectorPath::RectangleHint); draw(vp); } } void QPaintEngineEx::drawRects(const QRectF *rects, int rectCount) { for (int i=0; i<rectCount; ++i) { const QRectF &r = rects[i]; qreal right = r.x() + r.width(); qreal bottom = r.y() + r.height(); qreal pts[] = { r.x(), r.y(), right, r.y(), right, bottom, r.x(), bottom, r.x(), r.y() }; QVectorPath vp(pts, 5, nullptr, QVectorPath::RectangleHint); draw(vp); } } void QPaintEngineEx::drawRoundedRect(const QRectF &rect, qreal xRadius, qreal yRadius, Qt::SizeMode mode) { qreal x1 = rect.left(); qreal x2 = rect.right(); qreal y1 = rect.top(); qreal y2 = rect.bottom(); if (mode == Qt::RelativeSize) { xRadius = xRadius * rect.width() / 200.; yRadius = yRadius * rect.height() / 200.; } xRadius = qMin(xRadius, rect.width() / 2); yRadius = qMin(yRadius, rect.height() / 2); qreal pts[] = { x1 + xRadius, y1, // MoveTo x2 - xRadius, y1, // LineTo x2 - (1 - KAPPA) * xRadius, y1, // CurveTo x2, y1 + (1 - KAPPA) * yRadius, x2, y1 + yRadius, x2, y2 - yRadius, // LineTo x2, y2 - (1 - KAPPA) * yRadius, // CurveTo x2 - (1 - KAPPA) * xRadius, y2, x2 - xRadius, y2, x1 + xRadius, y2, // LineTo x1 + (1 - KAPPA) * xRadius, y2, // CurveTo x1, y2 - (1 - KAPPA) * yRadius, x1, y2 - yRadius, x1, y1 + yRadius, // LineTo x1, y1 + (1 - KAPPA) * yRadius, // CurveTo x1 + (1 - KAPPA) * xRadius, y1, x1 + xRadius, y1 }; QVectorPath path(pts, 17, qpaintengineex_roundedrect_types, QVectorPath::RoundedRectHint); draw(path); } void QPaintEngineEx::drawLines(const QLine *lines, int lineCount) { int elementCount = lineCount << 1; while (elementCount > 0) { int count = qMin(elementCount, 32); qreal pts[64]; int count2 = count<<1; for (int i=0; i<count2; ++i) pts[i] = ((const int *) lines)[i]; QVectorPath path(pts, count, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, state()->pen); elementCount -= 32; lines += 16; } } void QPaintEngineEx::drawLines(const QLineF *lines, int lineCount) { int elementCount = lineCount << 1; while (elementCount > 0) { int count = qMin(elementCount, 32); QVectorPath path((const qreal *) lines, count, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, state()->pen); elementCount -= 32; lines += 16; } } void QPaintEngineEx::drawEllipse(const QRectF &r) { qreal pts[26]; // QPointF[13] without constructors... union { qreal *ptr; QPointF *points; } x; x.ptr = pts; int point_count = 0; x.points[0] = qt_curves_for_arc(r, 0, -360, x.points + 1, &point_count); if (point_count == 0) return; QVectorPath vp((qreal *) pts, point_count + 1, qpaintengineex_ellipse_types, QVectorPath::EllipseHint); draw(vp); } void QPaintEngineEx::drawEllipse(const QRect &r) { drawEllipse(QRectF(r)); } void QPaintEngineEx::drawPath(const QPainterPath &path) { if (!path.isEmpty()) draw(qtVectorPathForPath(path)); } void QPaintEngineEx::drawPoints(const QPointF *points, int pointCount) { QPen pen = state()->pen; if (pen.capStyle() == Qt::FlatCap) pen.setCapStyle(Qt::SquareCap); if (pen.brush().isOpaque()) { while (pointCount > 0) { int count = qMin(pointCount, 16); qreal pts[64]; int oset = -1; for (int i=0; i<count; ++i) { pts[++oset] = points[i].x(); pts[++oset] = points[i].y(); pts[++oset] = points[i].x() + 1/63.; pts[++oset] = points[i].y(); } QVectorPath path(pts, count * 2, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, pen); pointCount -= 16; points += 16; } } else { for (int i=0; i<pointCount; ++i) { qreal pts[] = { points[i].x(), points[i].y(), points[i].x() + qreal(1/63.), points[i].y() }; QVectorPath path(pts, 2, nullptr); stroke(path, pen); } } } void QPaintEngineEx::drawPoints(const QPoint *points, int pointCount) { QPen pen = state()->pen; if (pen.capStyle() == Qt::FlatCap) pen.setCapStyle(Qt::SquareCap); if (pen.brush().isOpaque()) { while (pointCount > 0) { int count = qMin(pointCount, 16); qreal pts[64]; int oset = -1; for (int i=0; i<count; ++i) { pts[++oset] = points[i].x(); pts[++oset] = points[i].y(); pts[++oset] = points[i].x() + 1/63.; pts[++oset] = points[i].y(); } QVectorPath path(pts, count * 2, qpaintengineex_line_types_16, QVectorPath::LinesHint); stroke(path, pen); pointCount -= 16; points += 16; } } else { for (int i=0; i<pointCount; ++i) { qreal pts[] = { qreal(points[i].x()), qreal(points[i].y()), qreal(points[i].x() +1/63.), qreal(points[i].y()) }; QVectorPath path(pts, 2, nullptr); stroke(path, pen); } } } void QPaintEngineEx::drawPolygon(const QPointF *points, int pointCount, PolygonDrawMode mode) { Q_ASSUME(pointCount >= 2); QVectorPath path((const qreal *) points, pointCount, nullptr, QVectorPath::polygonFlags(mode)); if (mode == PolylineMode) stroke(path, state()->pen); else draw(path); } void QPaintEngineEx::drawPolygon(const QPoint *points, int pointCount, PolygonDrawMode mode) { Q_ASSUME(pointCount >= 2); int count = pointCount<<1; QVarLengthArray<qreal> pts(count); for (int i=0; i<count; ++i) pts[i] = ((const int *) points)[i]; QVectorPath path(pts.data(), pointCount, nullptr, QVectorPath::polygonFlags(mode)); if (mode == PolylineMode) stroke(path, state()->pen); else draw(path); } void QPaintEngineEx::drawPixmap(const QPointF &pos, const QPixmap &pm) { drawPixmap(QRectF(pos, pm.size() / pm.devicePixelRatio()), pm, pm.rect()); } void QPaintEngineEx::drawImage(const QPointF &pos, const QImage &image) { drawImage(QRectF(pos, image.size() / image.devicePixelRatio()), image, image.rect()); } void QPaintEngineEx::drawTiledPixmap(const QRectF &r, const QPixmap &pixmap, const QPointF &s) { QBrush brush(state()->pen.color(), pixmap); QTransform xform = QTransform::fromTranslate(r.x() - s.x(), r.y() - s.y()); if (!qFuzzyCompare(pixmap.devicePixelRatio(), qreal(1.0))) xform.scale(1.0/pixmap.devicePixelRatio(), 1.0/pixmap.devicePixelRatio()); brush.setTransform(xform); qreal pts[] = { r.x(), r.y(), r.x() + r.width(), r.y(), r.x() + r.width(), r.y() + r.height(), r.x(), r.y() + r.height() }; QVectorPath path(pts, 4, nullptr, QVectorPath::RectangleHint); fill(path, brush); } void QPaintEngineEx::drawPixmapFragments(const QPainter::PixmapFragment *fragments, int fragmentCount, const QPixmap &pixmap, QPainter::PixmapFragmentHints /*hints*/) { if (pixmap.isNull()) return; qreal oldOpacity = state()->opacity; QTransform oldTransform = state()->matrix; for (int i = 0; i < fragmentCount; ++i) { QTransform transform = oldTransform; transform.translate(fragments[i].x, fragments[i].y); transform.rotate(fragments[i].rotation); state()->opacity = oldOpacity * fragments[i].opacity; state()->matrix = transform; opacityChanged(); transformChanged(); qreal w = fragments[i].scaleX * fragments[i].width; qreal h = fragments[i].scaleY * fragments[i].height; QRectF sourceRect(fragments[i].sourceLeft, fragments[i].sourceTop, fragments[i].width, fragments[i].height); drawPixmap(QRectF(-0.5 * w, -0.5 * h, w, h), pixmap, sourceRect); } state()->opacity = oldOpacity; state()->matrix = oldTransform; opacityChanged(); transformChanged(); } void QPaintEngineEx::setState(QPainterState *s) { QPaintEngine::state = s; } void QPaintEngineEx::updateState(const QPaintEngineState &) { // do nothing... } Q_GUI_EXPORT QPainterPath qt_painterPathFromVectorPath(const QVectorPath &path) { const qreal *points = path.points(); const QPainterPath::ElementType *types = path.elements(); QPainterPath p; if (types) { int id = 0; for (int i=0; i<path.elementCount(); ++i) { switch(types[i]) { case QPainterPath::MoveToElement: p.moveTo(QPointF(points[id], points[id+1])); id+=2; break; case QPainterPath::LineToElement: p.lineTo(QPointF(points[id], points[id+1])); id+=2; break; case QPainterPath::CurveToElement: { QPointF p1(points[id], points[id+1]); QPointF p2(points[id+2], points[id+3]); QPointF p3(points[id+4], points[id+5]); p.cubicTo(p1, p2, p3); id+=6; break; } case QPainterPath::CurveToDataElement: ; break; } } } else { p.moveTo(QPointF(points[0], points[1])); int id = 2; for (int i=1; i<path.elementCount(); ++i) { p.lineTo(QPointF(points[id], points[id+1])); id+=2; } } if (path.hints() & QVectorPath::WindingFill) p.setFillRule(Qt::WindingFill); return p; } void QPaintEngineEx::drawStaticTextItem(QStaticTextItem *staticTextItem) { QPainterPath path; path.setFillRule(Qt::WindingFill); if (staticTextItem->numGlyphs == 0) return; QFontEngine *fontEngine = staticTextItem->fontEngine(); fontEngine->addGlyphsToPath(staticTextItem->glyphs, staticTextItem->glyphPositions, staticTextItem->numGlyphs, &path, { }); if (!path.isEmpty()) { QPainterState *s = state(); QPainter::RenderHints oldHints = s->renderHints; bool changedHints = false; if (bool(oldHints & QPainter::TextAntialiasing) && !bool(fontEngine->fontDef.styleStrategy & QFont::NoAntialias) && !bool(oldHints & QPainter::Antialiasing)) { s->renderHints |= QPainter::Antialiasing; renderHintsChanged(); changedHints = true; } fill(qtVectorPathForPath(path), s->pen.brush()); if (changedHints) { s->renderHints = oldHints; renderHintsChanged(); } } } bool QPaintEngineEx::requiresPretransformedGlyphPositions(QFontEngine *, const QTransform &) const { return false; } bool QPaintEngineEx::shouldDrawCachedGlyphs(QFontEngine *fontEngine, const QTransform &m) const { if (fontEngine->glyphFormat == QFontEngine::Format_ARGB) return true; static const int maxCachedGlyphSizeSquared = std::pow([]{ if (int env = qEnvironmentVariableIntValue("QT_MAX_CACHED_GLYPH_SIZE")) return env; return QT_MAX_CACHED_GLYPH_SIZE; }(), 2); qreal pixelSize = fontEngine->fontDef.pixelSize; return (pixelSize * pixelSize * qAbs(m.determinant())) <= maxCachedGlyphSizeSquared; } QT_END_NAMESPACE
null
289
CWE-787
CVE-2021-40985
# Changes in HTMLDOC v1.9.13 - Now install a 32x32 icon for Linux (Issue #432) - Fixed an issue with large values for roman numerals and letters in headings (Issue #433) - Fixed a crash bug when a HTML comment contains an invalid nul character (Issue #439) # Changes in HTMLDOC v1.9.12 - Fixed a crash bug with "data:" URIs and EPUB output (Issue #410) - Fixed crash bugs for books (Issue #412, Issue #414) - Fixed a number-up crash bug (Issue #413) - Fixed JPEG error handling (Issue #415) - Fixed crash bugs with bogus table attributes (Issue #416, Issue #417) - Fixed a crash bug with malformed URIs (Issue #418) - Fixed a crash bug with malformed GIF files (Issue #423) - Fixed a crash bug with empty titles (Issue #425) - Fixed crash bugs with bogus text (Issue #426, Issue #429, Issue #430, Issue #431) - Fixed some issues reported by Coverity. - Removed the bundled libjpeg, libpng, and zlib. # Changes in HTMLDOC v1.9.11 - Added high-resolution desktop icons for Linux. - Updated the internal HTTP library to fix truncation of redirection URLs (Issue #396) - Fixed a regression in the handling of character entities for UTF-8 input (Issue #401) - The `--numbered` option did not work when the table-of-contents was disabled (Issue #405) # Changes in HTMLDOC v1.9.10 - Updated local zlib to v1.2.11. - Updated local libpng to v1.6.37. - Fixed packaging issues on macOS and Windows (Issue #377, Issue #386) - Now ignore sRGB profile errors in PNG files (Issue #390) - The GUI would crash when saving (Issue #391) - Page comments are now allowed in `pre` text (Issue #394) # Changes in HTMLDOC v1.9.9 - Fixed a redirection issue - some sites (incorrectly) provide an incomplete Location: URL in the HTTP response. - Fixed https: support on newer versions of Windows (Issue #378) - Fixed a problem with remote URLs containing spaces (Issue #379) - Fixed a UTF-8 processing bug for Markdown files (Issue #383) - Added support for `<FONT FACE="monospace">` (Issue #385) # Changes in HTMLDOC v1.9.8 - Added support for a `HTMLDOC.filename` META keyword that controls the filename reported in CGI mode; the default remains "htmldoc.pdf" (Issue #367) - Fixed a paragraph formatting issue with large inline images (Issue #369) - Fixed a buffer underflow issue (Issue #370) - Fixed PDF page numbers (Issue #371) - Added support for a new `L` header/footer format (`$LETTERHEAD`), which inserts a letterhead image at its full size (Issue #372, Issue #373, Issue #375) - Updated the build documentation (Issue #374) # Changes in HTMLDOC v1.9.7 - Refactored the PRE rendering code to work around compiler optimization bugs (Issue #349) - Added support for links with targets (Issue #351) - Fixed a table rowspan + valign bug (Issue #360) # Changes in HTMLDOC v1.9.6 - Added support for data URIs (Issue #340) - HTMLDOC no longer includes a PDF table of contents when converting a single web page (Issue #344) - Updated the markdown support with external links, additional inline markup, and hard line breaks. - Links in markdown text no longer render with a leading space as part of the link (Issue #346) - Fixed a buffer underflow bug discovered by AddressSanitizer. - Fixed a bug in UTF-8 support (Issue #348) - PDF output now includes the base language of the input document(s) (Issue #350) - Optimized the loading of font widths (Issue #354) - Optimized PDF page resources (Issue #356) - Optimized the base memory used for font widths (Issue #357) - Added proper `&shy;` support (Issue #361) - Title files can now be markdown. # Changes in HTMLDOC v1.9.5 - The GUI did not support EPUB output. - Empty markdown table cells were not rendered in PDF or PostScript output. - The automatically-generated title page now supports both "docnumber" and "version" metadata. - Added support for dc:subject and dc:language metadata in EPUB output from the HTML keywords and lang values. - Added support for the subject and language metadata in markdown input. - Fixed a buffer underflow bug (Issue #338) - `htmldoc --help` now reports whether HTTPS URLs are supported (Issue #339) - Fixed an issue with HTML title pages and EPUB output. # Changes in HTMLDOC v1.9.4 - Inline fixed-width text is no longer reduced in size automatically (Issue #309) - Optimized initialization of font width data (Issue #334) # Changes in HTMLDOC v1.9.3 - Fixed formatting bugs with aligned images (Issue #322, Issue #324) - Fixed support for three digit "#RGB" color values (Issue #323) - Fixed character set support for markdown metadata. - Updated libpng to v1.6.34 (Issue #326) - The makefiles did not use the CPPFLAGS value (Issue #328) # Changes in HTMLDOC v1.9.2 - Added Markdown table support. - Fixed parsing of TBODY, TFOOT, and THEAD elements in HTML files. # Changes in HTMLDOC v1.9.1 - Fixed monospace font size issue (Issue #309) - Added support for reproducible builds (Issue #310) - Added limited support for the HTML 4.0 SPAN element (Issue #311) - Added (extremely limited) UTF-8 support for input files (Issue #314) - Fixed buffer underflow for (invalid) short HTML comments (Issue #316) - Now indent PRE text, by popular request. - EPUB output now makes sure that `<element property>` is written as `<element property="property">`. - Now support both NAME and ID for table-of-contents targets. # Changes in HTMLDOC v1.9 - Added support for repeating a single header row for tables that span multiple pages (Issue #16) - Added support for embedding the current filename/URL in the header or footer (Issue #50) - Added EPUB support (Issue #301) - Added Markdown support (Issue #302) - Fixed a regression in header/footer image scaling (Issue #303) - Documentation updates (Issue #305) - Compiler fixes (Issue #304, Issue #306) - Fixed a bug when running HTMLDOC as a macOS application. - Updated the bundled libpng to v1.6.29. # Changes in HTMLDOC v1.8.30 - Updated documentation to reflect new project page on Github. - Dropped old CDE and IRIX desktop integration files. - Cleaned up the GUI and adopted new default text editors for Linux and macOS. - PAGE BREAK comments at the end of a file in web page mode would lose the first page (Issue #251) - Fixed the scaling of header/footer images to limit them to the height of the header or footer (Issue #273) - Fixed an issue with the top-level makefile not exiting with an error as needed (Issue #282) - Fixed a URL referencing bug when the same hostname but a different port was used (Issue #290) - Fixed build issue on macOS (Issue #291) - Fixed handling of indexed+alpha PNG images (Issue #295) # Changes in HTMLDOC v1.8.29 - Updated local PNG library to version 1.6.20. - Updated local JPEG library to version 9b. - Dropped support for OpenSSL. - Added configure script support for libjpeg-turbo. - Updated HTTP code to latest CUPS/ippsample sources. - Duplex PDF output incorrectly forced an even number of pages - The table of contents showed the wrong page numbers after headings containing the "_HD_OMIT_TOC" attribute. - Fixed reported build issues - The configure script's --enable-local* options did not work. # Changes in HTMLDOC v1.8.28 - Updated local zlib to version 1.2.8. - Updated local PNG library to version 1.6.8. - Updated local JPEG library to version 9. - Updated default PDF version to 1.4. - SECURITY: Fixed three buffer overflow issues when reading AFM files and parsing page sizes. - Fixed incompatibility with Fortify's version of strcpy, which does not work properly with variable-length arrays - Fixed compilation against PNG library 1.5 or later - Fixed documentation errors - Marked Zapf-Dingbats as a standard font - Fixed GPL license text in GUI - Fixed a table formatting problem when a column has multiple colspan values - Fixed parsing of HTML comments - Fixed potential out-of-bounds read in table-of-contents rendering code - Fixed handling of image URLs with ampersands in them - Fixed top/bottom margins for logo and header/footer images - Fixed image alignment bug - Fixed X11 build problem # Changes in HTMLDOC v1.8.27 - Fixed a crash bug that appeared when more than 10 blank pages were present in a document - Color changes were not reflected in PRE text - Remote URLs did not always work on older operating systems - Image filenames using % escapes were not decoded properly. - Rows using BGCOLOR that spanned across multiple pages did not render properly - Rows no longer start on a new page due to a cell with both HEIGHT and ROWSPAN specified - CMYK JPEG images caused HTMLDOC to crash - Table cell width calculations didn't always account for the proper minimum width - Images were not copied when generating indexed HTML output to a directory - Changing the bottom margin resulted in text that was formatted below the bottom margin. - The Monospace-Oblique font was not embedded properly in PDF files. # Changes in HTMLDOC v1.8.26 - Outline and keyword strings in PDF files are now stored as Unicode - The Flate compression code could get in an infinite loop if it ran out of memory - Book files saved from the GUI did not handle filenames with spaces - Fixed and re-enabled the ASCII85Device filter support in PostScript Level 2/3 output - Character entities in the first word of a file were not rendered properly - Fixed-size table columns were incorrectly resized when a table width was also specified and there was extra space to distribute - Text could "walk" up or down when in-line images were used - Row backgrounds incorrectly replaced cell backgrounds when the first cell in a row used ROWSPAN - HTMLDOC did not correctly parse FONT FACE attributes - Images in Level 2/3 PostScript output did not work on some printers - The GUI did not use the first page header # Changes in HTMLDOC v1.8.25 - Added "--overflow" and "--no-overflow" command-line options to show or hide the content-too-large errors; the default is "--no-overflow". - Added "--header1" command-line option and "HEADER1" page comments to set the page header for the first page of each chapter. - Added "timing" and "remotebytes" debug data generation. - Added DejaVu font collection to better support Cyrillic and Greek text; the new fonts are available under the generic names "monospace", "sans", and "serif". - Added "--referer" command-line option and corresponding CGI-mode support to pass Referer: information in HTTP requests - On Windows, HTMLDOC now logs CGI mode errors to a file called "htmldoc.log" in the Windows temporary directory. - HTMLDOC no longer uses Base-85 encoding for image data when producing Level 2 and 3 PostScript output. It appears that many printers and PostScript interpreters cannot properly decode this data when the original image data is not a multiple of 8 bits. - HTMLDOC now renders STRONG elements in boldface instead of bold-italic to match the W3C recommendations. - HTMLDOC now automatically inserts a TR element before a TD or TH element as needed to improve web site compatibility; this also triggers a HTML error in --strict mode. - "$HFIMAGEn" didn't work in a header/footer string. - HTMLDOC could crash when rendering a table. - Book files were not used in CGI mode - Cookies were not sent in HTTP requests - Table cells were not aligned properly when the ROWSPAN attribute was set to 1 - HTMLDOC crashed when rendering unresolved hyperlinks in aligned images - Documented the HTMLDOC_NOCGI environment variable - HTMLDOC sometimes crashed when rendering tables with background colors - HTMLDOC would crash when writing encrypted strings longer than 1024 bytes - HTMLDOC didn't set the data directory when running in CGI mode on Windows. - HTMLDOC could crash when loading the Symbol.afm file - HTMLDOC did not always honor HEIGHT attributes in table rows. - Tables with a mix of colspan and rowspan sometimes caused cells to be moved vertically outside the cell.
null
# Changes in HTMLDOC v1.9.13 - Now install a 32x32 icon for Linux (Issue #432) - Fixed an issue with large values for roman numerals and letters in headings (Issue #433) - Fixed a crash bug when a HTML comment contains an invalid nul character (Issue #439) - Fixed a crash bug with bogus BMP images (Issue #444) # Changes in HTMLDOC v1.9.12 - Fixed a crash bug with "data:" URIs and EPUB output (Issue #410) - Fixed crash bugs for books (Issue #412, Issue #414) - Fixed a number-up crash bug (Issue #413) - Fixed JPEG error handling (Issue #415) - Fixed crash bugs with bogus table attributes (Issue #416, Issue #417) - Fixed a crash bug with malformed URIs (Issue #418) - Fixed a crash bug with malformed GIF files (Issue #423) - Fixed a crash bug with empty titles (Issue #425) - Fixed crash bugs with bogus text (Issue #426, Issue #429, Issue #430, Issue #431) - Fixed some issues reported by Coverity. - Removed the bundled libjpeg, libpng, and zlib. # Changes in HTMLDOC v1.9.11 - Added high-resolution desktop icons for Linux. - Updated the internal HTTP library to fix truncation of redirection URLs (Issue #396) - Fixed a regression in the handling of character entities for UTF-8 input (Issue #401) - The `--numbered` option did not work when the table-of-contents was disabled (Issue #405) # Changes in HTMLDOC v1.9.10 - Updated local zlib to v1.2.11. - Updated local libpng to v1.6.37. - Fixed packaging issues on macOS and Windows (Issue #377, Issue #386) - Now ignore sRGB profile errors in PNG files (Issue #390) - The GUI would crash when saving (Issue #391) - Page comments are now allowed in `pre` text (Issue #394) # Changes in HTMLDOC v1.9.9 - Fixed a redirection issue - some sites (incorrectly) provide an incomplete Location: URL in the HTTP response. - Fixed https: support on newer versions of Windows (Issue #378) - Fixed a problem with remote URLs containing spaces (Issue #379) - Fixed a UTF-8 processing bug for Markdown files (Issue #383) - Added support for `<FONT FACE="monospace">` (Issue #385) # Changes in HTMLDOC v1.9.8 - Added support for a `HTMLDOC.filename` META keyword that controls the filename reported in CGI mode; the default remains "htmldoc.pdf" (Issue #367) - Fixed a paragraph formatting issue with large inline images (Issue #369) - Fixed a buffer underflow issue (Issue #370) - Fixed PDF page numbers (Issue #371) - Added support for a new `L` header/footer format (`$LETTERHEAD`), which inserts a letterhead image at its full size (Issue #372, Issue #373, Issue #375) - Updated the build documentation (Issue #374) # Changes in HTMLDOC v1.9.7 - Refactored the PRE rendering code to work around compiler optimization bugs (Issue #349) - Added support for links with targets (Issue #351) - Fixed a table rowspan + valign bug (Issue #360) # Changes in HTMLDOC v1.9.6 - Added support for data URIs (Issue #340) - HTMLDOC no longer includes a PDF table of contents when converting a single web page (Issue #344) - Updated the markdown support with external links, additional inline markup, and hard line breaks. - Links in markdown text no longer render with a leading space as part of the link (Issue #346) - Fixed a buffer underflow bug discovered by AddressSanitizer. - Fixed a bug in UTF-8 support (Issue #348) - PDF output now includes the base language of the input document(s) (Issue #350) - Optimized the loading of font widths (Issue #354) - Optimized PDF page resources (Issue #356) - Optimized the base memory used for font widths (Issue #357) - Added proper `&shy;` support (Issue #361) - Title files can now be markdown. # Changes in HTMLDOC v1.9.5 - The GUI did not support EPUB output. - Empty markdown table cells were not rendered in PDF or PostScript output. - The automatically-generated title page now supports both "docnumber" and "version" metadata. - Added support for dc:subject and dc:language metadata in EPUB output from the HTML keywords and lang values. - Added support for the subject and language metadata in markdown input. - Fixed a buffer underflow bug (Issue #338) - `htmldoc --help` now reports whether HTTPS URLs are supported (Issue #339) - Fixed an issue with HTML title pages and EPUB output. # Changes in HTMLDOC v1.9.4 - Inline fixed-width text is no longer reduced in size automatically (Issue #309) - Optimized initialization of font width data (Issue #334) # Changes in HTMLDOC v1.9.3 - Fixed formatting bugs with aligned images (Issue #322, Issue #324) - Fixed support for three digit "#RGB" color values (Issue #323) - Fixed character set support for markdown metadata. - Updated libpng to v1.6.34 (Issue #326) - The makefiles did not use the CPPFLAGS value (Issue #328) # Changes in HTMLDOC v1.9.2 - Added Markdown table support. - Fixed parsing of TBODY, TFOOT, and THEAD elements in HTML files. # Changes in HTMLDOC v1.9.1 - Fixed monospace font size issue (Issue #309) - Added support for reproducible builds (Issue #310) - Added limited support for the HTML 4.0 SPAN element (Issue #311) - Added (extremely limited) UTF-8 support for input files (Issue #314) - Fixed buffer underflow for (invalid) short HTML comments (Issue #316) - Now indent PRE text, by popular request. - EPUB output now makes sure that `<element property>` is written as `<element property="property">`. - Now support both NAME and ID for table-of-contents targets. # Changes in HTMLDOC v1.9 - Added support for repeating a single header row for tables that span multiple pages (Issue #16) - Added support for embedding the current filename/URL in the header or footer (Issue #50) - Added EPUB support (Issue #301) - Added Markdown support (Issue #302) - Fixed a regression in header/footer image scaling (Issue #303) - Documentation updates (Issue #305) - Compiler fixes (Issue #304, Issue #306) - Fixed a bug when running HTMLDOC as a macOS application. - Updated the bundled libpng to v1.6.29. # Changes in HTMLDOC v1.8.30 - Updated documentation to reflect new project page on Github. - Dropped old CDE and IRIX desktop integration files. - Cleaned up the GUI and adopted new default text editors for Linux and macOS. - PAGE BREAK comments at the end of a file in web page mode would lose the first page (Issue #251) - Fixed the scaling of header/footer images to limit them to the height of the header or footer (Issue #273) - Fixed an issue with the top-level makefile not exiting with an error as needed (Issue #282) - Fixed a URL referencing bug when the same hostname but a different port was used (Issue #290) - Fixed build issue on macOS (Issue #291) - Fixed handling of indexed+alpha PNG images (Issue #295) # Changes in HTMLDOC v1.8.29 - Updated local PNG library to version 1.6.20. - Updated local JPEG library to version 9b. - Dropped support for OpenSSL. - Added configure script support for libjpeg-turbo. - Updated HTTP code to latest CUPS/ippsample sources. - Duplex PDF output incorrectly forced an even number of pages - The table of contents showed the wrong page numbers after headings containing the "_HD_OMIT_TOC" attribute. - Fixed reported build issues - The configure script's --enable-local* options did not work. # Changes in HTMLDOC v1.8.28 - Updated local zlib to version 1.2.8. - Updated local PNG library to version 1.6.8. - Updated local JPEG library to version 9. - Updated default PDF version to 1.4. - SECURITY: Fixed three buffer overflow issues when reading AFM files and parsing page sizes. - Fixed incompatibility with Fortify's version of strcpy, which does not work properly with variable-length arrays - Fixed compilation against PNG library 1.5 or later - Fixed documentation errors - Marked Zapf-Dingbats as a standard font - Fixed GPL license text in GUI - Fixed a table formatting problem when a column has multiple colspan values - Fixed parsing of HTML comments - Fixed potential out-of-bounds read in table-of-contents rendering code - Fixed handling of image URLs with ampersands in them - Fixed top/bottom margins for logo and header/footer images - Fixed image alignment bug - Fixed X11 build problem # Changes in HTMLDOC v1.8.27 - Fixed a crash bug that appeared when more than 10 blank pages were present in a document - Color changes were not reflected in PRE text - Remote URLs did not always work on older operating systems - Image filenames using % escapes were not decoded properly. - Rows using BGCOLOR that spanned across multiple pages did not render properly - Rows no longer start on a new page due to a cell with both HEIGHT and ROWSPAN specified - CMYK JPEG images caused HTMLDOC to crash - Table cell width calculations didn't always account for the proper minimum width - Images were not copied when generating indexed HTML output to a directory - Changing the bottom margin resulted in text that was formatted below the bottom margin. - The Monospace-Oblique font was not embedded properly in PDF files. # Changes in HTMLDOC v1.8.26 - Outline and keyword strings in PDF files are now stored as Unicode - The Flate compression code could get in an infinite loop if it ran out of memory - Book files saved from the GUI did not handle filenames with spaces - Fixed and re-enabled the ASCII85Device filter support in PostScript Level 2/3 output - Character entities in the first word of a file were not rendered properly - Fixed-size table columns were incorrectly resized when a table width was also specified and there was extra space to distribute - Text could "walk" up or down when in-line images were used - Row backgrounds incorrectly replaced cell backgrounds when the first cell in a row used ROWSPAN - HTMLDOC did not correctly parse FONT FACE attributes - Images in Level 2/3 PostScript output did not work on some printers - The GUI did not use the first page header # Changes in HTMLDOC v1.8.25 - Added "--overflow" and "--no-overflow" command-line options to show or hide the content-too-large errors; the default is "--no-overflow". - Added "--header1" command-line option and "HEADER1" page comments to set the page header for the first page of each chapter. - Added "timing" and "remotebytes" debug data generation. - Added DejaVu font collection to better support Cyrillic and Greek text; the new fonts are available under the generic names "monospace", "sans", and "serif". - Added "--referer" command-line option and corresponding CGI-mode support to pass Referer: information in HTTP requests - On Windows, HTMLDOC now logs CGI mode errors to a file called "htmldoc.log" in the Windows temporary directory. - HTMLDOC no longer uses Base-85 encoding for image data when producing Level 2 and 3 PostScript output. It appears that many printers and PostScript interpreters cannot properly decode this data when the original image data is not a multiple of 8 bits. - HTMLDOC now renders STRONG elements in boldface instead of bold-italic to match the W3C recommendations. - HTMLDOC now automatically inserts a TR element before a TD or TH element as needed to improve web site compatibility; this also triggers a HTML error in --strict mode. - "$HFIMAGEn" didn't work in a header/footer string. - HTMLDOC could crash when rendering a table. - Book files were not used in CGI mode - Cookies were not sent in HTTP requests - Table cells were not aligned properly when the ROWSPAN attribute was set to 1 - HTMLDOC crashed when rendering unresolved hyperlinks in aligned images - Documented the HTMLDOC_NOCGI environment variable - HTMLDOC sometimes crashed when rendering tables with background colors - HTMLDOC would crash when writing encrypted strings longer than 1024 bytes - HTMLDOC didn't set the data directory when running in CGI mode on Windows. - HTMLDOC could crash when loading the Symbol.afm file - HTMLDOC did not always honor HEIGHT attributes in table rows. - Tables with a mix of colspan and rowspan sometimes caused cells to be moved vertically outside the cell.
null
290
CWE-787
CVE-2021-41216
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <algorithm> #include <ostream> #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/mirror_pad_mode.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/strided_slice_op.h" #include "tensorflow/core/util/tensor_format.h" namespace tensorflow { using shape_inference::DimensionHandle; using shape_inference::InferenceContext; using shape_inference::ShapeHandle; using shape_inference::UnchangedShape; namespace { Status GetAxisForPackAndUnpack(InferenceContext* c, int32_t rank_after_pack, int32* axis) { TF_RETURN_IF_ERROR(c->GetAttr("axis", axis)); if (*axis < -1 * rank_after_pack || *axis >= rank_after_pack) { return errors::InvalidArgument("Invalid axis: ", *axis, "; must be in [", -1 * rank_after_pack, ",", rank_after_pack, ")"); } if (*axis < 0) *axis = (rank_after_pack + *axis); return Status::OK(); } template <typename T> std::vector<int64_t> AsInt64(const Tensor* tensor, int64_t num_elements) { std::vector<int64_t> ret(num_elements); auto data = tensor->vec<T>(); for (int64_t i = 0; i < num_elements; ++i) { ret[i] = data(i); } return ret; } template <typename T> Status PadKnown(InferenceContext* c, ShapeHandle input, const Tensor* paddings_t, int64_t num_dims) { // paddings_t is known. std::vector<DimensionHandle> dims(num_dims); auto paddings_data = paddings_t->matrix<T>(); for (int64_t i = 0; i < num_dims; ++i) { const T pad0 = paddings_data(i, 0); const T pad1 = paddings_data(i, 1); if (pad0 < 0 || pad1 < 0) { return errors::InvalidArgument("Paddings must be non-negative"); } TF_RETURN_IF_ERROR(c->Add(c->Dim(input, i), pad0 + pad1, &dims[i])); } c->set_output(0, c->MakeShape(dims)); return Status::OK(); } Status PadShapeFn(InferenceContext* c) { // Paddings is a matrix of [input_rank, 2]. ShapeHandle paddings; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &paddings)); DimensionHandle unused; TF_RETURN_IF_ERROR(c->WithValue(c->Dim(paddings, 1), 2, &unused)); // n_dim and input.rank are equivalent. ShapeHandle input = c->input(0); DimensionHandle n_dim = c->Dim(paddings, 0); if (c->ValueKnown(n_dim)) { TF_RETURN_IF_ERROR(c->WithRank(input, c->Value(n_dim), &input)); } else if (c->RankKnown(input)) { TF_RETURN_IF_ERROR(c->WithValue(n_dim, c->Rank(input), &n_dim)); } const Tensor* paddings_t = c->input_tensor(1); // paddings_t is unknown if (paddings_t == nullptr) { if (c->ValueKnown(n_dim)) { // Make output with n_dim unknown dims. c->set_output(0, c->UnknownShapeOfRank(c->Value(n_dim))); } else { c->set_output(0, c->UnknownShape()); } return Status::OK(); } const int64_t num_dims = paddings_t->shape().dim_size(0); TF_RETURN_IF_ERROR(c->WithRank(input, num_dims, &input)); TF_RETURN_IF_ERROR(c->WithValue(n_dim, num_dims, &n_dim)); if (paddings_t->dtype() == DT_INT32) { return PadKnown<int32>(c, input, paddings_t, num_dims); } else { return PadKnown<int64_t>(c, input, paddings_t, num_dims); } } Status TransposeShapeFn(InferenceContext* c) { ShapeHandle input = c->input(0); ShapeHandle perm_shape = c->input(1); const Tensor* perm = c->input_tensor(1); DimensionHandle perm_elems = c->NumElements(perm_shape); // If we don't have rank information on the input or value information on // perm we can't return any shape information, otherwise we have enough // information to at least find the rank of the output. if (!c->RankKnown(input) && !c->ValueKnown(perm_elems) && perm == nullptr) { c->set_output(0, c->UnknownShape()); return Status::OK(); } // Find our value of the rank. int64_t rank; if (c->RankKnown(input)) { rank = c->Rank(input); } else if (c->ValueKnown(perm_elems)) { rank = c->Value(perm_elems); } else { rank = perm->NumElements(); } if (!c->RankKnown(input) && rank < 2) { // A permutation array containing a single element is ambiguous. It could // indicate either a scalar or a 1-dimensional array, both of which the // transpose op returns unchanged. c->set_output(0, input); return Status::OK(); } std::vector<DimensionHandle> dims; dims.resize(rank); TF_RETURN_IF_ERROR(c->WithRank(input, rank, &input)); // Ensure that perm is a vector and has rank elements. TF_RETURN_IF_ERROR(c->WithRank(perm_shape, 1, &perm_shape)); TF_RETURN_IF_ERROR(c->WithValue(perm_elems, rank, &perm_elems)); // If we know the rank of the input and the value of perm, we can return // all shape information, otherwise we can only return rank information, // but no information for the dimensions. if (perm != nullptr) { std::vector<int64_t> data; if (perm->dtype() == DT_INT32) { data = AsInt64<int32>(perm, rank); } else { data = AsInt64<int64_t>(perm, rank); } for (int32_t i = 0; i < rank; ++i) { int64_t in_idx = data[i]; if (in_idx >= rank) { return errors::InvalidArgument("perm dim ", in_idx, " is out of range of input rank ", rank); } dims[i] = c->Dim(input, in_idx); } } else { for (int i = 0; i < rank; ++i) { dims[i] = c->UnknownDim(); } } c->set_output(0, c->MakeShape(dims)); return Status::OK(); } Status SetOutputShapeForReshape(InferenceContext* c) { ShapeHandle in = c->input(0); ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &out)); if (!c->RankKnown(out)) { // We have no information about the shape of the output. c->set_output(0, out); return Status::OK(); } if (c->RankKnown(in)) { // We don't know the number of output elements, but we can try to infer // the missing dimension. bool too_many_unknown = false; int32_t out_unknown_idx = -1; DimensionHandle known_out_elems = c->NumElements(out); if (!c->ValueKnown(known_out_elems)) { known_out_elems = c->MakeDim(1); for (int32_t i = 0; i < c->Rank(out); ++i) { DimensionHandle dim = c->Dim(out, i); if (!c->ValueKnown(dim)) { if (out_unknown_idx >= 0) { too_many_unknown = true; break; } out_unknown_idx = i; } else { TF_RETURN_IF_ERROR( c->Multiply(known_out_elems, dim, &known_out_elems)); } } } int32_t in_unknown_idx = -1; DimensionHandle known_in_elems = c->NumElements(in); if (!c->ValueKnown(known_in_elems)) { known_in_elems = c->MakeDim(1); for (int32_t i = 0; i < c->Rank(in); ++i) { DimensionHandle dim = c->Dim(in, i); if (!c->ValueKnown(dim)) { if (in_unknown_idx >= 0) { too_many_unknown = true; break; } in_unknown_idx = i; } else { TF_RETURN_IF_ERROR(c->Multiply(known_in_elems, dim, &known_in_elems)); } } } if (!too_many_unknown) { if (in_unknown_idx < 0 && out_unknown_idx < 0) { // Just check that the dimensions match. if (c->Value(known_in_elems) != c->Value(known_out_elems)) { return errors::InvalidArgument( "Cannot reshape a tensor with ", c->DebugString(known_in_elems), " elements to shape ", c->DebugString(out), " (", c->DebugString(known_out_elems), " elements)"); } } else if (in_unknown_idx < 0 && out_unknown_idx >= 0 && c->Value(known_out_elems) > 0) { // Input fully known, infer the one missing output dim DimensionHandle inferred_dim; TF_RETURN_IF_ERROR(c->Divide(known_in_elems, c->Value(known_out_elems), true /* evenly_divisible */, &inferred_dim)); TF_RETURN_IF_ERROR( c->ReplaceDim(out, out_unknown_idx, inferred_dim, &out)); } else if (in_unknown_idx >= 0 && out_unknown_idx < 0 && c->Value(known_in_elems) != 0) { // Output fully known, infer the one missing input dim DimensionHandle inferred_dim; TF_RETURN_IF_ERROR(c->Divide(known_out_elems, c->Value(known_in_elems), true /* evenly_divisible */, &inferred_dim)); DimensionHandle unknown_in_dim = c->Dim(in, in_unknown_idx); TF_RETURN_IF_ERROR( c->Merge(unknown_in_dim, inferred_dim, &unknown_in_dim)); } else if (in_unknown_idx >= 0 && out_unknown_idx >= 0) { // Exactly one unknown dimension in both input and output. These 2 are // equal iff the known elements are equal. if (c->Value(known_in_elems) == c->Value(known_out_elems)) { DimensionHandle unknown_in_dim = c->Dim(in, in_unknown_idx); TF_RETURN_IF_ERROR( c->ReplaceDim(out, out_unknown_idx, unknown_in_dim, &out)); } } } } c->set_output(0, out); return Status::OK(); } } // namespace REGISTER_OP("ParallelConcat") .Input("values: N * T") .Output("output: T") .Attr("N: int >= 1") .Attr("T: type") .Attr("shape: shape") .SetShapeFn([](InferenceContext* c) { // Validate that the shape attr is correct. PartialTensorShape shape; TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape)); ShapeHandle passed_shape; TF_RETURN_IF_ERROR( c->MakeShapeFromPartialTensorShape(shape, &passed_shape)); if (!c->FullyDefined(passed_shape)) { return errors::InvalidArgument("shape attr must be fully defined."); } ShapeHandle cur; TF_RETURN_IF_ERROR(c->ReplaceDim( passed_shape, 0, c->MakeDim(shape_inference::DimensionOrConstant(1)), &cur)); for (int i = 0; i < c->num_inputs(); ++i) { if (!c->FullyDefined(c->input(i))) { return errors::InvalidArgument( "All input shapes must be fully defined."); } DimensionHandle unused; if (!c->WithValue(c->Dim(c->input(i), 0), 1, &unused).ok()) { return errors::InvalidArgument("Size of first dimension must be 1."); } TF_RETURN_WITH_CONTEXT_IF_ERROR(c->Merge(c->input(i), cur, &cur), "From merging shape ", i, " with other shapes."); } c->set_output(0, passed_shape); return Status::OK(); }); REGISTER_OP("Pack") .Input("values: N * T") .Output("output: T") .Attr("N: int >= 1") .Attr("T: type") .Attr("axis: int = 0") .SetShapeFn([](InferenceContext* c) { // Validate shapes of all inputs are compatible ShapeHandle cur = c->input(c->num_inputs() - 1); for (int i = c->num_inputs() - 2; i >= 0; --i) { TF_RETURN_WITH_CONTEXT_IF_ERROR(c->Merge(c->input(i), cur, &cur), "From merging shape ", i, " with other shapes."); } if (!c->RankKnown(cur)) { c->set_output(0, c->UnknownShape()); return Status::OK(); } // Determine the axis that will be added, converting from negative // axes to a positive point per negative indexing rules. int32_t rank = c->Rank(cur); int32_t axis; TF_RETURN_IF_ERROR(GetAxisForPackAndUnpack(c, rank + 1, &axis)); // Copy all dimensions over, inserting a dimension of value #inputs // at <axis>. std::vector<DimensionHandle> dims; int index = 0; while (index < axis) dims.push_back(c->Dim(cur, index++)); dims.push_back(c->MakeDim(c->num_inputs())); while (index < rank) dims.push_back(c->Dim(cur, index++)); c->set_output(0, c->MakeShape(dims)); for (int i = 0; i < c->num_inputs(); ++i) { auto* shape_and_type = c->input_handle_shapes_and_types(i); if (shape_and_type) { if (!c->RelaxOutputHandleShapesAndMergeTypes(0, *shape_and_type)) { c->set_output_handle_shapes_and_types( 0, std::vector<shape_inference::ShapeAndType>({})); break; } } } return Status::OK(); }); REGISTER_OP("DeepCopy") .Input("x: T") .Output("y: T") .Attr("T: type") .SetIsStateful() .SetShapeFn(UnchangedShape); REGISTER_OP("InplaceUpdate") .Input("x: T") .Input("i: int32") .Input("v: T") .Output("y: T") .Attr("T: type") .SetShapeFn(UnchangedShape); REGISTER_OP("InplaceAdd") .Input("x: T") .Input("i: int32") .Input("v: T") .Output("y: T") .Attr("T: type") .SetShapeFn(UnchangedShape); REGISTER_OP("InplaceSub") .Input("x: T") .Input("i: int32") .Input("v: T") .Output("y: T") .Attr("T: type") .SetShapeFn(UnchangedShape); REGISTER_OP("Empty") .Input("shape: int32") .Output("output: dtype") .Attr("dtype: type") .Attr("init: bool = false") .SetDoNotOptimize() .SetShapeFn([](InferenceContext* c) { ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &out)); c->set_output(0, out); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Unpack") .Input("value: T") .Output("output: num * T") .Attr("num: int >= 0") .Attr("T: type") .Attr("axis: int = 0") .SetShapeFn([](InferenceContext* c) { ShapeHandle s = c->input(0); ShapeHandle out; if (c->RankKnown(s)) { // Determine the axis that will be removed, converting from negative // axes to a positive point per negative indexing rules. int32_t rank = c->Rank(s); int32_t axis; TF_RETURN_IF_ERROR(GetAxisForPackAndUnpack(c, rank, &axis)); // The axis dim matches the number of outputs. DimensionHandle unused; TF_RETURN_IF_ERROR( c->WithValue(c->Dim(s, axis), c->num_outputs(), &unused)); // Copy all dimensions, removing the <axis> dimension. std::vector<DimensionHandle> dims; for (int i = 0; i < rank; ++i) { if (i != axis) dims.push_back(c->Dim(s, i)); } out = c->MakeShape(dims); } else { // All outputs are the same shape, but it's not known. out = c->UnknownShape(); } for (int i = 0; i < c->num_outputs(); ++i) c->set_output(i, out); return Status::OK(); }); REGISTER_OP("UnravelIndex") .Input("indices: Tidx") .Input("dims: Tidx") .Output("output: Tidx") .Attr("Tidx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle indices = c->input(0); ShapeHandle dims; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &dims)); if (c->RankKnown(indices) && c->Rank(indices) == 0) { c->set_output(0, c->Vector(c->Dim(dims, 0))); } else if (c->RankKnown(indices)) { c->set_output(0, c->Matrix(c->Dim(dims, 0), c->NumElements(indices))); } else { c->set_output(0, c->UnknownShape()); } return Status::OK(); }); REGISTER_OP("BroadcastTo") .Input("input: T") .Input("shape: Tidx") .Output("output: T") .Attr("T: type") .Attr("Tidx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle shape_in = c->input(1); TF_RETURN_IF_ERROR(c->WithRank(shape_in, 1, &shape_in)); ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &out)); if (!c->RankKnown(out)) { // We have no information about the shape of the output. c->set_output(0, out); return Status::OK(); } ShapeHandle in = c->input(0); if (!c->RankKnown(in)) { // We have no information about the shape of the input, // nothing to do here. c->set_output(0, out); return Status::OK(); } int out_rank = c->Rank(out); TF_RETURN_IF_ERROR(c->WithRankAtMost(in, out_rank, &in)); int in_rank = c->Rank(in); for (int i = 0; i < in_rank; ++i) { auto in_dim = c->Dim(in, in_rank - i - 1); if (c->Value(in_dim) > 1) { // If the input dimension is greater than 1 then the output dimension // must be equal to it, since we only broadcast "from left to right". auto out_dim = c->Dim(out, out_rank - i - 1); TF_RETURN_IF_ERROR(c->Merge(in_dim, out_dim, &out_dim)); TF_RETURN_IF_ERROR( c->ReplaceDim(out, out_rank - i - 1, out_dim, &out)); } } c->set_output(0, out); return Status::OK(); }); // -------------------------------------------------------------------------- // TODO(josh11b): Remove the >= 2 constraint, once we can rewrite the graph // in the N == 1 case to remove the node. REGISTER_OP("Concat") .Input("concat_dim: int32") .Input("values: N * T") .Output("output: T") .Attr("N: int >= 2") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { return shape_inference::ConcatShape(c, c->num_inputs() - 1); }); REGISTER_OP("ConcatV2") .Input("values: N * T") .Input("axis: Tidx") .Output("output: T") .Attr("N: int >= 2") .Attr("T: type") .Attr("Tidx: {int32, int64} = DT_INT32") .SetShapeFn(shape_inference::ConcatV2Shape); // TODO(vivek.v.rane@intel.com): Prefix the op names with underscore if the ops // are not to be made user-accessible. #ifdef INTEL_MKL REGISTER_OP("_MklConcatV2") .Input("values: N * T") .Input("axis: Tidx") .Input("mkl_values: N * uint8") .Input("mkl_axis: uint8") .Output("output: T") .Output("mkl_output: uint8") .Attr("N: int >= 2") .Attr("T: type") .Attr("Tidx: {int32, int64} = DT_INT32") .SetShapeFn(shape_inference::ConcatV2Shape) .Doc(R"doc( MKL version of ConcatV2 operator. Uses MKL DNN APIs to perform concatenation. NOTE Do not invoke this operator directly in Python. Graph rewrite pass is expected to invoke these operators. )doc"); #endif REGISTER_OP("ConcatOffset") .Input("concat_dim: int32") .Input("shape: N * int32") .Output("offset: N * int32") .Attr("N: int >= 2") .SetShapeFn([](InferenceContext* c) { for (int i = 1; i < c->num_inputs(); ++i) { c->set_output(i - 1, c->input(i)); } return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Split") .Input("split_dim: int32") .Input("value: T") .Output("output: num_split * T") .Attr("num_split: int >= 1") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { DimensionHandle split_dimension; ShapeHandle input = c->input(1); TF_RETURN_IF_ERROR(c->MakeDimForScalarInputWithNegativeIndexing( 0, c->Rank(input), &split_dimension)); int num_split = c->num_outputs(); ShapeHandle out; if (!c->ValueKnown(split_dimension)) { if (c->RankKnown(input)) { out = c->UnknownShapeOfRank(c->Rank(input)); } else { out = c->UnknownShape(); } } else { int64_t split_dim = c->Value(split_dimension); TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, split_dim + 1, &input)); DimensionHandle split_dim_size; TF_RETURN_WITH_CONTEXT_IF_ERROR( c->Divide(c->Dim(input, split_dim), num_split, true /* evenly_divisible */, &split_dim_size), "Number of ways to split should evenly divide the split dimension"); TF_RETURN_IF_ERROR( c->ReplaceDim(input, split_dim, split_dim_size, &out)); } for (int i = 0; i < num_split; ++i) c->set_output(i, out); return Status::OK(); }); REGISTER_OP("SplitV") .Input("value: T") .Input("size_splits: Tlen") .Input("split_dim: int32") .Output("output: num_split * T") .Attr("num_split: int >= 1") .Attr("T: type") .Attr("Tlen: {int32, int64} = DT_INT64") .SetShapeFn([](InferenceContext* c) { DimensionHandle split_dimension; ShapeHandle input = c->input(0); TF_RETURN_IF_ERROR(c->MakeDimForScalarInputWithNegativeIndexing( 2, c->Rank(input), &split_dimension)); int32_t num_outputs = c->num_outputs(); int32_t rank = c->Rank(input); ShapeHandle output_shape; const Tensor* size_splits = c->input_tensor(1); if (rank == InferenceContext::kUnknownRank) { // If the rank of input tensor is unknown, then return unknown shapes. // Note that the shape of each output can be different. for (int i = 0; i < num_outputs; ++i) { c->set_output(i, c->UnknownShape()); } } else if (rank == 0) { // Throw error if input is a scalar. return errors::InvalidArgument("Can't split scalars"); } else if (size_splits == nullptr && c->ValueKnown(split_dimension)) { // If split dimension is known, but the sizes are unknown, then // only the split dimension is unknown output_shape = input; for (int i = 0; i < num_outputs; ++i) { TF_RETURN_IF_ERROR(c->ReplaceDim(output_shape, c->Value(split_dimension), c->UnknownDim(), &output_shape)); c->set_output(i, output_shape); } } else if (size_splits == nullptr && !c->ValueKnown(split_dimension)) { // If split dimension or tensor containing the split sizes is unknown, // then return unknown shapes of same rank as input. Note that each // output shape can be different since splitv doesn't always split // tensors evenly. for (int i = 0; i < num_outputs; ++i) { c->set_output(i, c->UnknownShapeOfRank(rank)); } } else { // Determine the output shape if split dimension and split sizes are // known. int64_t split_dim = c->Value(split_dimension); TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, split_dim + 1, &input)); std::vector<int64_t> data; if (size_splits->dtype() == DT_INT32) { data = AsInt64<int32>(size_splits, size_splits->shape().dim_size(0)); } else { data = AsInt64<int64_t>(size_splits, size_splits->shape().dim_size(0)); } if (num_outputs != data.size()) { return errors::InvalidArgument( "Length of size_splits should be equal to num_outputs"); } int64_t total_size = 0; bool has_neg_one = false; for (const auto size : data) { if (size == -1) { if (has_neg_one) { return errors::InvalidArgument( "size_splits can only have one -1"); } has_neg_one = true; } else { total_size += size; } } auto split_dim_size = c->Value(c->Dim(input, split_dim)); // If the sizes of the splits are known, then // make sure that the sizes add up to the expected // dimension size, with the possibility of a -1. // Specify the full output shapes. for (int i = 0; i < num_outputs; ++i) { auto size = data[i]; if (data[i] == -1 && c->ValueKnown(split_dim_size)) { size = split_dim_size - total_size; } // If we have a negative known size (either explicit, or computed // via -1), then the split sizes are invalid. if (size < -1 || (size == -1 && c->ValueKnown(split_dim_size))) { return errors::InvalidArgument("Split size at index ", i, " must be >= 0. Got: ", size); } TF_RETURN_IF_ERROR( c->ReplaceDim(input, split_dim, c->MakeDim(size), &output_shape)); c->set_output(i, output_shape); } if (c->ValueKnown(split_dim_size)) { if (has_neg_one ? total_size > split_dim_size : total_size != split_dim_size) { return errors::InvalidArgument( "can't split axis of size ", split_dim_size, " into pieces of size [", absl::StrJoin(data, ","), "]"); } } } return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Const") .Output("output: dtype") .Attr("value: tensor") .Attr("dtype: type") .SetShapeFn([](InferenceContext* c) { const TensorProto* proto = nullptr; TF_RETURN_IF_ERROR(c->GetAttr("value", &proto)); TF_RETURN_IF_ERROR(TensorShape::IsValidShape(proto->tensor_shape())); TensorShape shape(proto->tensor_shape()); std::vector<DimensionHandle> dims; dims.reserve(shape.dims()); for (int i = 0; i < shape.dims(); ++i) { dims.push_back(c->MakeDim(shape.dim_size(i))); } c->set_output(0, c->MakeShape(dims)); return Status::OK(); }); // Returns a constant tensor on the host. Useful for writing C++ tests // and benchmarks which run on GPU but require arguments pinned to the host. // Used by test::graph::HostConstant. // value: Attr `value` is the tensor to return. REGISTER_OP("HostConst") .Output("output: dtype") .Attr("value: tensor") .Attr("dtype: type") .SetShapeFn(shape_inference::UnknownShape); // Used executing op-by-op to copy constants to the current device without // serializing tensors as TensorProtos, after a host tensor has been // created. Same behavior as Identity, but no gradient and potentially relaxed // copy semantics. REGISTER_OP("_EagerConst") .Input("input: T") .Output("output: T") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- // TODO(mgubin): Update the doc when the freeze_graph script supports converting // into memmapped format. REGISTER_OP("ImmutableConst") .Attr("dtype: type") .Attr("shape: shape") .Attr("memory_region_name: string") .Output("tensor: dtype") .SetShapeFn(shape_inference::ExplicitShape); REGISTER_OP("GuaranteeConst") .Input("input: T") .Output("output: T") .Attr("T: type") .SetShapeFn([](shape_inference::InferenceContext* c) { return UnchangedShape(c); }) // We don't want this to be optimized away. .SetDoNotOptimize(); // -------------------------------------------------------------------------- REGISTER_OP("ZerosLike") .Input("x: T") .Output("y: T") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- REGISTER_OP("OnesLike") .Input("x: T") .Output("y: T") .Attr( "T: {bfloat16, half, float, double, int8, uint8, int16, uint16, int32, " "int64, complex64, complex128, bool}") .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- REGISTER_OP("Diag") .Input("diagonal: T") .Output("output: T") .Attr( "T: {bfloat16, half, float, double, int32, int64, complex64, " "complex128}") .SetShapeFn([](InferenceContext* c) { ShapeHandle in = c->input(0); TF_RETURN_IF_ERROR(c->WithRankAtLeast(in, 1, &in)); // Output shape is original concatenated with itself. ShapeHandle out; TF_RETURN_IF_ERROR(c->Concatenate(in, in, &out)); c->set_output(0, out); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("DiagPart") .Input("input: T") .Output("diagonal: T") .Attr( "T: {bfloat16, half, float, double, int32, int64, complex64, " "complex128}") .SetShapeFn([](InferenceContext* c) { ShapeHandle in = c->input(0); if (!c->RankKnown(in)) { c->set_output(0, c->UnknownShape()); return Status::OK(); } // Rank must be even, and result will have rank <rank/2>. const int32_t rank = c->Rank(in); if ((rank % 2) != 0 || rank <= 0) { return errors::InvalidArgument( "Input must have even and non-zero rank, input rank is ", rank); } const int32_t mid = rank / 2; // output dim[i] is the merge of in.dim[i] and in.dim[i+mid]. std::vector<DimensionHandle> dims(mid); for (int i = 0; i < mid; ++i) { TF_RETURN_IF_ERROR( c->Merge(c->Dim(in, i), c->Dim(in, i + mid), &dims[i])); } c->set_output(0, c->MakeShape(dims)); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("MatrixDiag") .Input("diagonal: T") .Output("output: T") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle in; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &in)); if (!c->RankKnown(in)) { c->set_output(0, c->UnknownShape()); return Status::OK(); } const int32_t rank = c->Rank(in); ShapeHandle out; TF_RETURN_IF_ERROR( c->Concatenate(in, c->Vector(c->Dim(in, rank - 1)), &out)); c->set_output(0, out); return Status::OK(); }); REGISTER_OP("MatrixDiagV2") .Input("diagonal: T") .Input("k: int32") .Input("num_rows: int32") .Input("num_cols: int32") .Input("padding_value: T") .Output("output: T") .Attr("T: type") .SetShapeFn(shape_inference::MatrixDiagV2Shape); REGISTER_OP("MatrixDiagV3") .Input("diagonal: T") .Input("k: int32") .Input("num_rows: int32") .Input("num_cols: int32") .Input("padding_value: T") .Output("output: T") .Attr("T: type") .Attr( "align: {'LEFT_RIGHT', 'RIGHT_LEFT', 'LEFT_LEFT', 'RIGHT_RIGHT'} = " "'RIGHT_LEFT'") .SetShapeFn(shape_inference::MatrixDiagV2Shape); // -------------------------------------------------------------------------- REGISTER_OP("MatrixSetDiag") .Input("input: T") .Input("diagonal: T") .Output("output: T") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle input; ShapeHandle diag; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input)); TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &diag)); if (c->RankKnown(input)) { TF_RETURN_IF_ERROR(c->WithRank(c->input(1), c->Rank(input) - 1, &diag)); } DimensionHandle smallest_dim; TF_RETURN_IF_ERROR( c->Min(c->Dim(input, -2), c->Dim(input, -1), &smallest_dim)); TF_RETURN_IF_ERROR( c->Merge(smallest_dim, c->Dim(diag, -1), &smallest_dim)); ShapeHandle output = input; if (c->RankKnown(diag) && !c->FullyDefined(input)) { // Try to infer parts of shape from diag. ShapeHandle diag_batch_shape; TF_RETURN_IF_ERROR(c->Subshape(diag, 0, -1, &diag_batch_shape)); TF_RETURN_IF_ERROR( c->Concatenate(diag_batch_shape, c->UnknownShapeOfRank(2), &diag)); TF_RETURN_IF_ERROR(c->Merge(input, diag, &output)); } c->set_output(0, output); return Status::OK(); }); REGISTER_OP("MatrixSetDiagV2") .Input("input: T") .Input("diagonal: T") .Input("k: int32") .Output("output: T") .Attr("T: type") .SetShapeFn(shape_inference::MatrixSetDiagV2Shape); REGISTER_OP("MatrixSetDiagV3") .Input("input: T") .Input("diagonal: T") .Input("k: int32") .Output("output: T") .Attr("T: type") .Attr( "align: {'LEFT_RIGHT', 'RIGHT_LEFT', 'LEFT_LEFT', 'RIGHT_RIGHT'} = " "'RIGHT_LEFT'") .SetShapeFn(shape_inference::MatrixSetDiagV2Shape); // -------------------------------------------------------------------------- REGISTER_OP("MatrixDiagPart") .Input("input: T") .Output("diagonal: T") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle in; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &in)); if (!c->RankKnown(in)) { c->set_output(0, c->UnknownShape()); return Status::OK(); } const int32_t rank = c->Rank(in); std::vector<DimensionHandle> dims; dims.reserve(rank - 2); for (int i = 0; i < rank - 2; ++i) dims.push_back(c->Dim(in, i)); DimensionHandle min_dim; TF_RETURN_IF_ERROR( c->Min(c->Dim(in, rank - 2), c->Dim(in, rank - 1), &min_dim)); dims.push_back(min_dim); c->set_output(0, c->MakeShape(dims)); return Status::OK(); }); REGISTER_OP("MatrixDiagPartV2") .Input("input: T") .Input("k: int32") .Input("padding_value: T") .Output("diagonal: T") .Attr("T: type") .SetShapeFn(shape_inference::MatrixDiagPartV2Shape); REGISTER_OP("MatrixDiagPartV3") .Input("input: T") .Input("k: int32") .Input("padding_value: T") .Output("diagonal: T") .Attr("T: type") .Attr( "align: {'LEFT_RIGHT', 'RIGHT_LEFT', 'LEFT_LEFT', 'RIGHT_RIGHT'} = " "'RIGHT_LEFT'") .SetShapeFn(shape_inference::MatrixDiagPartV2Shape); // -------------------------------------------------------------------------- REGISTER_OP("MatrixBandPart") .Input("input: T") .Input("num_lower: Tindex") .Input("num_upper: Tindex") .Output("band: T") .Attr("T: type") .Attr("Tindex: {int32, int64} = DT_INT64") .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- REGISTER_OP("Reverse") .Input("tensor: T") .Input("dims: bool") .Output("output: T") .Attr( "T: {uint8, int8, uint16, int16, uint32, int32, uint64, int64, bool, " "bfloat16, half, float, double, complex64, complex128, string}") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); ShapeHandle dims; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &dims)); DimensionHandle dims_dim = c->Dim(dims, 0); if (c->ValueKnown(dims_dim)) { TF_RETURN_IF_ERROR(c->WithRank(input, c->Value(dims_dim), &input)); } if (c->Rank(input) > 8) { return errors::InvalidArgument( "reverse does not work on tensors with more than 8 dimensions"); } c->set_output(0, input); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("ReverseV2") .Input("tensor: T") .Input("axis: Tidx") .Output("output: T") .Attr("Tidx: {int32, int64} = DT_INT32") .Attr( "T: {uint8, int8, uint16, int16, int32, uint32, int64, uint64, bool, " "bfloat16, half, float, double, complex64, complex128, string}") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); ShapeHandle axis; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &axis)); if (c->Rank(input) > 8) { return errors::InvalidArgument( "reverse does not work on tensors with more than 8 dimensions"); } const Tensor* axis_tensor = c->input_tensor(1); if (axis_tensor != nullptr && c->RankKnown(input)) { int32_t rank = c->Rank(input); std::vector<int64_t> axis_value; if (axis_tensor->dtype() == DT_INT32) { axis_value = AsInt64<int32>(axis_tensor, axis_tensor->NumElements()); } else { axis_value = AsInt64<int64_t>(axis_tensor, axis_tensor->NumElements()); } std::vector<bool> axes_dense(c->Rank(input), false); for (int i = 0; i < axis_value.size(); i++) { int64_t canonical_axis = axis_value[i] < 0 ? rank + axis_value[i] : axis_value[i]; if (canonical_axis < 0 || canonical_axis >= rank) { return errors::InvalidArgument("'axis'[", i, "] = ", axis_value[i], " is out of valid range [", 0, ", ", rank - 1); } if (axes_dense[canonical_axis]) { return errors::InvalidArgument("axis ", canonical_axis, " specified more than once."); } axes_dense[canonical_axis] = true; } } c->set_output(0, input); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("EditDistance") .Input("hypothesis_indices: int64") .Input("hypothesis_values: T") .Input("hypothesis_shape: int64") .Input("truth_indices: int64") .Input("truth_values: T") .Input("truth_shape: int64") .Attr("normalize: bool = true") .Attr("T: type") .Output("output: float") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor( c, c->input(0), c->input(1), c->input(2))); TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor( c, c->input(3), c->input(4), c->input(5))); const Tensor* hypothesis_shape_t = c->input_tensor(2); const Tensor* truth_shape_t = c->input_tensor(5); if (hypothesis_shape_t == nullptr || truth_shape_t == nullptr) { // We need to know the runtime shape of the two tensors, // or else the output shape is unknown. return shape_inference::UnknownShape(c); } if (hypothesis_shape_t->NumElements() != truth_shape_t->NumElements()) { return errors::InvalidArgument( "Num elements of hypothesis_shape does not match truth_shape: ", hypothesis_shape_t->NumElements(), " vs. ", truth_shape_t->NumElements()); } auto h_values = hypothesis_shape_t->flat<int64_t>(); auto t_values = truth_shape_t->flat<int64_t>(); std::vector<DimensionHandle> dims(hypothesis_shape_t->NumElements() - 1); for (int i = 0; i < dims.size(); ++i) { dims[i] = c->MakeDim(std::max(h_values(i), t_values(i))); } c->set_output(0, c->MakeShape(dims)); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Fill") .Input("dims: index_type") .Input("value: T") .Output("output: T") .Attr("T: type") .Attr("index_type: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { DataType index_type = DT_INT32; Status s = c->GetAttr("index_type", &index_type); if (!s.ok() && s.code() != error::NOT_FOUND) { return s; } ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); const Tensor* t = c->input_tensor(0); if (t != nullptr) { for (int i = 0; i < t->NumElements(); ++i) { if ((index_type == DT_INT32 && t->vec<int32>()(i) < 0) || (index_type == DT_INT64 && t->vec<int64_t>()(i) < 0)) { return errors::InvalidArgument("Fill dimensions must be >= 0"); } } } ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &out)); c->set_output(0, out); auto* shape_and_type = c->input_handle_shapes_and_types(1); if (shape_and_type) { c->set_output_handle_shapes_and_types(0, *shape_and_type); } return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("_ParallelConcatStart") .Output("output: dtype") .Attr("shape: shape") .Attr("dtype: type") .SetIsStateful() .SetShapeFn(shape_inference::ExplicitShape) .Doc(R"doc( Creates an empty Tensor with shape `shape` and type `dtype`. The memory can optionally be initialized. This is usually useful in conjunction with inplace operations. shape: 1-D `Tensor` indicating the shape of the output. dtype: The element type of the returned tensor. output: An empty Tensor of the specified type. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("_ParallelConcatUpdate") .Input("value: T") .Input("update: T") .Output("output: T") .Attr("T: type") .Attr("loc: int") .SetShapeFn(shape_inference::UnchangedShape) .Doc(R"doc( Updates input `value` at `loc` with `update`. If you use this function you will almost certainly want to add a control dependency as done in the implementation of parallel_stack to avoid race conditions. value: A `Tensor` object that will be updated in-place. loc: A scalar indicating the index of the first dimension such that value[loc, :] is updated. update: A `Tensor` of rank one less than `value` if `loc` is a scalar, otherwise of rank equal to `value` that contains the new values for `value`. output: `value` that has been updated accordingly. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("Gather") .Input("params: Tparams") .Input("indices: Tindices") .Attr("validate_indices: bool = true") .Output("output: Tparams") .Attr("Tparams: type") .Attr("Tindices: {int32,int64}") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused)); ShapeHandle params_subshape; TF_RETURN_IF_ERROR(c->Subshape(c->input(0), 1, &params_subshape)); ShapeHandle indices_shape = c->input(1); ShapeHandle out; TF_RETURN_IF_ERROR(c->Concatenate(indices_shape, params_subshape, &out)); c->set_output(0, out); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("GatherV2") .Input("params: Tparams") .Input("indices: Tindices") .Input("axis: Taxis") .Attr("batch_dims: int = 0") .Output("output: Tparams") .Attr("Tparams: type") .Attr("Tindices: {int32,int64}") .Attr("Taxis: {int32,int64}") .SetShapeFn([](InferenceContext* c) { ShapeHandle params_shape; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &params_shape)); ShapeHandle indices_shape = c->input(1); ShapeHandle unused_axis_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused_axis_shape)); const Tensor* axis_t = c->input_tensor(2); // If axis is unknown, we can only infer that the result is params_rank + // indices_rank - 1. if (axis_t == nullptr) { if (c->RankKnown(params_shape) && c->RankKnown(indices_shape)) { int32_t batch_dims; TF_RETURN_IF_ERROR(c->GetAttr("batch_dims", &batch_dims)); c->set_output(0, c->UnknownShapeOfRank(c->Rank(params_shape) + c->Rank(indices_shape) - 1 - batch_dims)); } else { c->set_output(0, c->UnknownShape()); } return Status::OK(); } // Note, axis can be negative. int64_t axis = 0; if (axis_t->dtype() == DT_INT32) { axis = axis_t->scalar<int32>()(); } else { axis = axis_t->scalar<int64_t>()(); } // Check that params has rank of at least axis + 1. ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRankAtLeast( params_shape, axis < 0 ? -axis : axis + 1, &unused)); // Note, batch_dims can be negative. int32_t batch_dims; TF_RETURN_IF_ERROR(c->GetAttr("batch_dims", &batch_dims)); // -rank(indices) <= batch_dims <= rank(indices) TF_RETURN_IF_ERROR( c->WithRankAtLeast(indices_shape, std::abs(batch_dims), &unused)); if (batch_dims < 0) { batch_dims += c->Rank(indices_shape); } // rank(params) > batch_dims TF_RETURN_IF_ERROR( c->WithRankAtLeast(params_shape, batch_dims + 1, &unused)); ShapeHandle params_outer_subshape; TF_RETURN_IF_ERROR( c->Subshape(params_shape, 0, axis, &params_outer_subshape)); ShapeHandle indices_inner_subshape; TF_RETURN_IF_ERROR( c->Subshape(indices_shape, batch_dims, &indices_inner_subshape)); ShapeHandle out; TF_RETURN_IF_ERROR( c->Concatenate(params_outer_subshape, indices_inner_subshape, &out)); // Slice from axis + 1 to the end of params_shape to collect the inner // dimensions of the result. Special case -1 here since -1 + 1 wraps, and // we slice from 0 to the end of shape. Subshape() handles all other // out-of-bounds checking. if (axis != -1) { ShapeHandle params_inner_subshape; TF_RETURN_IF_ERROR( c->Subshape(params_shape, axis + 1, &params_inner_subshape)); TF_RETURN_IF_ERROR(c->Concatenate(out, params_inner_subshape, &out)); } c->set_output(0, out); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("GatherNd") .Input("params: Tparams") .Input("indices: Tindices") .Output("output: Tparams") .Attr("Tparams: type") .Attr("Tindices: {int32,int64}") .SetShapeFn(shape_inference::GatherNdShape); // -------------------------------------------------------------------------- REGISTER_OP("Identity") .Input("input: T") .Output("output: T") .Attr("T: type") .SetForwardTypeFn(full_type::ReplicateInputs()) .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("Snapshot") .Input("input: T") .Output("output: T") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape); #ifdef INTEL_MKL REGISTER_OP("_MklIdentity") .Input("input: T") .Input("mkl_input: uint8") .Output("output: T") .Output("mkl_output: uint8") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape) .Doc(R"Doc( Mkl implementation of IdentityOp )Doc"); #endif REGISTER_OP("IdentityN") .Input("input: T") .Output("output: T") .Attr("T: list(type)") .SetShapeFn([](shape_inference::InferenceContext* c) { std::vector<ShapeHandle> input; TF_RETURN_IF_ERROR(c->input("input", &input)); TF_RETURN_IF_ERROR(c->set_output("output", input)); // If any of the input shapes are not known, we should return error. for (int i = 0; i < input.size(); i++) { if (!input[i].Handle()) { return errors::InvalidArgument(absl::StrCat( "Cannot infer output shape #", i, " for IdentityN node because input shape #", i, " is unknown.")); } } return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("RefIdentity") .Input("input: Ref(T)") .Output("output: Ref(T)") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape) .SetAllowsUninitializedInput(); // -------------------------------------------------------------------------- REGISTER_OP("DebugGradientIdentity") .Input("input: T") .Output("output: T") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape) .SetAllowsUninitializedInput(); REGISTER_OP("DebugGradientRefIdentity") .Input("input: Ref(T)") .Output("output: Ref(T)") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape) .SetAllowsUninitializedInput(); // -------------------------------------------------------------------------- REGISTER_OP("StopGradient") .Input("input: T") .Output("output: T") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("PreventGradient") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("message: string = ''") .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- REGISTER_OP("CheckNumerics") .Input("tensor: T") .Output("output: T") .Attr("T: {bfloat16, half, float, double}") .Attr("message: string") .SetIsStateful() .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- REGISTER_OP("CheckNumericsV2") .Input("tensor: T") .Output("output: T") .Attr("T: {bfloat16, half, float, double}") .Attr("message: string") .SetIsStateful() .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- REGISTER_OP("Reshape") .Input("tensor: T") .Input("shape: Tshape") .Output("output: T") .Attr("T: type") .Attr("Tshape: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { return SetOutputShapeForReshape(c); }); #ifdef INTEL_MKL REGISTER_OP("_MklReshape") .Input("tensor: T") .Input("shape: Tshape") .Input("mkl_tensor: uint8") .Input("mkl_shape: uint8") .Output("output: T") .Output("mkl_output: uint8") .Attr("T: type") .Attr("Tshape: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { return SetOutputShapeForReshape(c); }) .Doc(R"Doc( MKL implementation of ReshapeOp. )Doc"); #endif // INTEL_MKL // -------------------------------------------------------------------------- REGISTER_OP("InvertPermutation") .Input("x: T") .Output("y: T") .Attr("T: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle x; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &x)); c->set_output(0, x); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Transpose") .Input("x: T") .Input("perm: Tperm") .Output("y: T") .Attr("T: type") .Attr("Tperm: {int32, int64} = DT_INT32") .SetShapeFn(TransposeShapeFn); #ifdef INTEL_MKL REGISTER_OP("_MklTranspose") .Input("x: T") .Input("perm: Tperm") .Output("y: T") .Attr("T: type") .Attr("Tperm: {int32, int64} = DT_INT32") .SetShapeFn(TransposeShapeFn); #endif // INTEL_MKL // -------------------------------------------------------------------------- REGISTER_OP("ConjugateTranspose") .Input("x: T") .Input("perm: Tperm") .Output("y: T") .Attr("T: type") .Attr("Tperm: {int32, int64} = DT_INT32") .SetShapeFn(TransposeShapeFn); #ifdef INTEL_MKL REGISTER_OP("_MklConjugateTranspose") .Input("x: T") .Input("perm: Tperm") .Output("y: T") .Attr("T: type") .Attr("Tperm: {int32, int64} = DT_INT32") .SetShapeFn(TransposeShapeFn); #endif // INTEL_MKL // -------------------------------------------------------------------------- namespace { Status UniqueIdxShapeFn(InferenceContext* c) { ShapeHandle input = c->input(0); const Tensor* axis_t = c->input_tensor(1); if (axis_t == nullptr || !c->RankKnown(input)) { c->set_output(1, c->Vector(InferenceContext::kUnknownDim)); return Status::OK(); } if (c->Rank(c->input(1)) != 1) { return errors::InvalidArgument("axis expects a 1D vector."); } int32_t n = axis_t->NumElements(); if (n == 0) { if (c->Rank(input) != 1) { return errors::InvalidArgument("x expects a 1D vector."); } c->set_output(1, input); return Status::OK(); } else if (n == 1) { int64_t axis; if (axis_t->dtype() == DT_INT32) { axis = static_cast<int64_t>(axis_t->flat<int32>()(0)); } else { axis = axis_t->flat<int64_t>()(0); } int64_t input_rank = c->Rank(input); if (axis < -input_rank || axis >= input_rank) { return errors::InvalidArgument("axis expects to be in the range [", -input_rank, ", ", input_rank, ")"); } if (axis < 0) { axis += input_rank; } c->set_output(1, c->Vector(c->Dim(input, axis))); return Status::OK(); } return errors::InvalidArgument( "axis does not support input tensors larger than 1 elements."); } } // namespace REGISTER_OP("Unique") .Input("x: T") .Output("y: T") .Output("idx: out_idx") .Attr("T: type") .Attr("out_idx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->Vector(InferenceContext::kUnknownDim)); c->set_output(1, c->input(0)); // Assert that the input rank is 1. ShapeHandle dummy; return c->WithRank(c->input(0), 1, &dummy); }); REGISTER_OP("UniqueV2") .Input("x: T") .Input("axis: Taxis") .Output("y: T") .Output("idx: out_idx") .Attr("T: type") .Attr("Taxis: {int32,int64} = DT_INT64") .Attr("out_idx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->UnknownShapeOfRank(c->Rank(c->input(0)))); TF_RETURN_IF_ERROR(UniqueIdxShapeFn(c)); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("UniqueWithCounts") .Input("x: T") .Output("y: T") .Output("idx: out_idx") .Output("count: out_idx") .Attr("T: type") .Attr("out_idx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { auto uniq = c->Vector(InferenceContext::kUnknownDim); c->set_output(0, uniq); c->set_output(1, c->input(0)); c->set_output(2, uniq); return Status::OK(); }); REGISTER_OP("UniqueWithCountsV2") .Input("x: T") .Input("axis: Taxis") .Output("y: T") .Output("idx: out_idx") .Output("count: out_idx") .Attr("T: type") .Attr("Taxis: {int32,int64} = DT_INT64") .Attr("out_idx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->UnknownShapeOfRank(c->Rank(c->input(0)))); TF_RETURN_IF_ERROR(UniqueIdxShapeFn(c)); c->set_output(2, c->Vector(InferenceContext::kUnknownDim)); return Status::OK(); }); namespace { Status ShapeShapeFn(InferenceContext* c) { for (int i = 0; i < c->num_inputs(); ++i) { DimensionHandle dim; if (c->RankKnown(c->input(i))) { dim = c->MakeDim(c->Rank(c->input(i))); } else { dim = c->UnknownDim(); } c->set_output(i, c->Vector(dim)); } return Status::OK(); } } // namespace // -------------------------------------------------------------------------- REGISTER_OP("Shape") .Input("input: T") .Output("output: out_type") .Attr("T: type") .Attr("out_type: {int32, int64} = DT_INT32") .SetShapeFn(ShapeShapeFn); REGISTER_OP("ShapeN") .Input("input: N * T") .Output("output: N * out_type") .Attr("N: int") .Attr("T: type") .Attr("out_type: {int32, int64} = DT_INT32") .SetShapeFn(ShapeShapeFn); REGISTER_OP("EnsureShape") .Input("input: T") .Output("output: T") .Attr("shape: shape") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { // Merges desired shape and statically known shape of input PartialTensorShape desired_shape; TF_RETURN_IF_ERROR(c->GetAttr("shape", &desired_shape)); int rank = desired_shape.dims(); ShapeHandle input_shape_handle; ShapeHandle desired_shape_handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &input_shape_handle)); TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape( desired_shape, &desired_shape_handle)); ShapeHandle merged_shape; TF_RETURN_IF_ERROR( c->Merge(desired_shape_handle, input_shape_handle, &merged_shape)); c->set_output(0, merged_shape); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("ReverseSequence") .Input("input: T") .Input("seq_lengths: Tlen") .Output("output: T") .Attr("seq_dim: int") .Attr("batch_dim: int = 0") .Attr("T: type") .Attr("Tlen: {int32, int64} = DT_INT64") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); ShapeHandle seq_lens_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &seq_lens_shape)); int64_t seq_dim; TF_RETURN_IF_ERROR(c->GetAttr("seq_dim", &seq_dim)); int64_t batch_dim; TF_RETURN_IF_ERROR(c->GetAttr("batch_dim", &batch_dim)); if (!c->RankKnown(input)) { return shape_inference::UnknownShape(c); } // Validate batch_dim and seq_dim against input. const int32_t input_rank = c->Rank(input); if (batch_dim >= input_rank) { return errors::InvalidArgument( "batch_dim must be < input rank: ", batch_dim, " vs. ", input_rank); } if (seq_dim >= input_rank) { return errors::InvalidArgument( "seq_dim must be < input rank: ", seq_dim, " vs. ", input_rank); } DimensionHandle batch_dim_dim = c->Dim(input, batch_dim); TF_RETURN_IF_ERROR( c->Merge(batch_dim_dim, c->Dim(seq_lens_shape, 0), &batch_dim_dim)); // Replace batch_dim of input with batch_size ShapeHandle output_shape; TF_RETURN_IF_ERROR( c->ReplaceDim(input, batch_dim, batch_dim_dim, &output_shape)); c->set_output(0, output_shape); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Rank") .Input("input: T") .Output("output: int32") .Attr("T: type") .SetShapeFn(shape_inference::ScalarShape); // -------------------------------------------------------------------------- REGISTER_OP("Size") .Input("input: T") .Output("output: out_type") .Attr("T: type") .Attr("out_type: {int32, int64} = DT_INT32") .SetShapeFn(shape_inference::ScalarShape); // -------------------------------------------------------------------------- REGISTER_OP("Slice") .Input("input: T") .Input("begin: Index") .Input("size: Index") .Output("output: T") .Attr("T: type") .Attr("Index: {int32,int64}") .SetShapeFn(shape_inference::SliceShape); #ifdef INTEL_MKL REGISTER_OP("_MklSlice") .Input("input: T") .Input("begin: Index") .Input("size: Index") .Input("mkl_input: uint8") .Input("mkl_begin: uint8") .Input("mkl_size: uint8") .Output("output: T") .Output("mkl_output: uint8") .Attr("T: type") .Attr("Index: {int32,int64}") .SetShapeFn(shape_inference::SliceShape); #endif REGISTER_OP("StridedSlice") .Input("input: T") .Input("begin: Index") .Input("end: Index") .Input("strides: Index") .Output("output: T") .Attr("T: type") .Attr("Index: {int32, int64}") .Attr("begin_mask: int = 0") .Attr("end_mask: int = 0") .Attr("ellipsis_mask: int = 0") .Attr("new_axis_mask: int = 0") .Attr("shrink_axis_mask: int = 0") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); ShapeHandle begin_shape, end_shape, strides_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &begin_shape)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &end_shape)); TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &strides_shape)); TF_RETURN_IF_ERROR(c->Merge(begin_shape, end_shape, &begin_shape)); TF_RETURN_IF_ERROR(c->Merge(begin_shape, strides_shape, &begin_shape)); DimensionHandle sparse_dims_dim = c->Dim(begin_shape, 0); const Tensor* strides_value = c->input_tensor(3); // TODO(aselle,allenl): If we had a stride_mask it would be possible to do // more shape inference here (e.g. for x[3, ::T]). if (!c->RankKnown(input) || !c->ValueKnown(sparse_dims_dim) || strides_value == nullptr) { c->set_output(0, c->UnknownShape()); return Status::OK(); } PartialTensorShape input_shape({}); for (int i = 0; i < c->Rank(input); ++i) { auto dim = c->Dim(input, i); input_shape.AddDim(c->ValueKnown(dim) ? c->Value(dim) : -1); } int32_t begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask; TF_RETURN_IF_ERROR(c->GetAttr("begin_mask", &begin_mask)); TF_RETURN_IF_ERROR(c->GetAttr("end_mask", &end_mask)); TF_RETURN_IF_ERROR(c->GetAttr("ellipsis_mask", &ellipsis_mask)); TF_RETURN_IF_ERROR(c->GetAttr("new_axis_mask", &new_axis_mask)); TF_RETURN_IF_ERROR(c->GetAttr("shrink_axis_mask", &shrink_axis_mask)); const Tensor* begin_value = c->input_tensor(1); const Tensor* end_value = c->input_tensor(2); PartialTensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; gtl::InlinedVector<int64, 4> begin, end, strides; TF_RETURN_IF_ERROR(ValidateStridedSliceOp( begin_value, end_value, *strides_value, input_shape, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides)); ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(final_shape, &out)); c->set_output(0, out); auto* shape_and_type = c->input_handle_shapes_and_types(0); if (shape_and_type) { c->set_output_handle_shapes_and_types(0, *shape_and_type); } return Status::OK(); }); REGISTER_OP("StridedSliceGrad") .Input("shape: Index") .Input("begin: Index") .Input("end: Index") .Input("strides: Index") .Input("dy: T") .Output("output: T") .Attr("T: type") .Attr("Index: {int32, int64}") .Attr("begin_mask: int = 0") .Attr("end_mask: int = 0") .Attr("ellipsis_mask: int = 0") .Attr("new_axis_mask: int = 0") .Attr("shrink_axis_mask: int = 0") .SetShapeFn([](InferenceContext* c) { ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &out)); c->set_output(0, out); return Status::OK(); }); REGISTER_OP("StridedSliceAssign") .Input("ref: Ref(T)") .Input("begin: Index") .Input("end: Index") .Input("strides: Index") .Input("value: T") .Output("output_ref: Ref(T)") .Attr("T: type") .Attr("Index: {int32, int64}") .Attr("begin_mask: int = 0") .Attr("end_mask: int = 0") .Attr("ellipsis_mask: int = 0") .Attr("new_axis_mask: int = 0") .Attr("shrink_axis_mask: int = 0") .SetShapeFn(shape_inference::UnchangedShape); // TODO(aselle): Fix this documentation once StridedSliceAssign Supports // broadcasting. // -------------------------------------------------------------------------- REGISTER_OP("ResourceStridedSliceAssign") .Input("ref: resource") .Input("begin: Index") .Input("end: Index") .Input("strides: Index") .Input("value: T") .Attr("T: type") .Attr("Index: {int32, int64}") .Attr("begin_mask: int = 0") .Attr("end_mask: int = 0") .Attr("ellipsis_mask: int = 0") .Attr("new_axis_mask: int = 0") .Attr("shrink_axis_mask: int = 0") .SetShapeFn(shape_inference::NoOutputs); REGISTER_OP("TensorStridedSliceUpdate") .Input("input: T") .Input("begin: Index") .Input("end: Index") .Input("strides: Index") .Input("value: T") .Output("output: T") .Attr("T: type") .Attr("Index: {int32, int64}") .Attr("begin_mask: int = 0") .Attr("end_mask: int = 0") .Attr("ellipsis_mask: int = 0") .Attr("new_axis_mask: int = 0") .Attr("shrink_axis_mask: int = 0") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("Tile") .Input("input: T") .Input("multiples: Tmultiples") .Output("output: T") .Attr("T: type") .Attr("Tmultiples: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); // NOTE(mrry): Represent `multiples` as a `TensorShape` because (i) // it is a vector of non-negative integers, and (ii) doing so allows // us to handle partially-known multiples. ShapeHandle multiples; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &multiples)); if (c->RankKnown(input)) { TF_RETURN_IF_ERROR(c->WithRank(multiples, c->Rank(input), &multiples)); ShapeHandle dummy; TF_RETURN_IF_ERROR( c->Merge(c->input(1), c->Vector(c->Rank(input)), &dummy)); } if (!c->RankKnown(multiples)) { return shape_inference::UnknownShape(c); } int32_t rank = c->Rank(multiples); TF_RETURN_IF_ERROR(c->WithRank(input, rank, &input)); std::vector<DimensionHandle> dims(rank); for (int i = 0; i < rank; ++i) { TF_RETURN_IF_ERROR( c->Multiply(c->Dim(input, i), c->Dim(multiples, i), &dims[i])); } c->set_output(0, c->MakeShape(dims)); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("TileGrad") .Input("input: T") .Input("multiples: int32") .Output("output: T") .Attr("T: type") .Deprecated(3, "TileGrad has been replaced with reduce_sum") .SetShapeFn(tensorflow::shape_inference::UnknownShape); // -------------------------------------------------------------------------- REGISTER_OP("Where") .Input("input: T") .Attr("T: {numbertype, bool} = DT_BOOL") .Output("index: int64") .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->Matrix(c->UnknownDim(), c->Rank(c->input(0)))); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("BroadcastArgs") .Input("s0: T") .Input("s1: T") .Output("r0: T") .Attr("T: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; ShapeHandle shape_x = c->input(0); ShapeHandle shape_y = c->input(1); TF_RETURN_IF_ERROR(c->WithRank(shape_x, 1, &unused)); TF_RETURN_IF_ERROR(c->WithRank(shape_y, 1, &unused)); if (!c->ValueKnown(c->Dim(shape_x, 0)) || !c->ValueKnown(c->Dim(shape_y, 0))) { c->set_output(0, c->Vector(InferenceContext::kUnknownDim)); return Status::OK(); } int64_t x_dim = c->Value(c->Dim(shape_x, 0)); int64_t y_dim = c->Value(c->Dim(shape_y, 0)); // Broadcasted shape is going to be as large as the largest dimension. c->set_output(0, c->Vector(std::max(x_dim, y_dim))); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("BroadcastGradientArgs") .Input("s0: T") .Input("s1: T") .Output("r0: T") .Output("r1: T") .Attr("T: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { // TODO(mrry): Implement constant_value for BroadcastGradientArgs? ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused)); c->set_output(0, c->Vector(InferenceContext::kUnknownDim)); c->set_output(1, c->Vector(InferenceContext::kUnknownDim)); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Pad") .Input("input: T") .Input("paddings: Tpaddings") .Output("output: T") .Attr("T: type") .Attr("Tpaddings: {int32, int64} = DT_INT32") .SetShapeFn(PadShapeFn); // -------------------------------------------------------------------------- REGISTER_OP("PadV2") .Input("input: T") .Input("paddings: Tpaddings") .Input("constant_values: T") .Output("output: T") .Attr("T: type") .Attr("Tpaddings: {int32, int64} = DT_INT32") .SetShapeFn(PadShapeFn); // -------------------------------------------------------------------------- REGISTER_OP("MirrorPad") .Input("input: T") .Input("paddings: Tpaddings") .Output("output: T") .Attr("T: type") .Attr("Tpaddings: {int32, int64} = DT_INT32") .Attr(GetMirrorPadModeAttrString()) .SetShapeFn(PadShapeFn); // -------------------------------------------------------------------------- namespace { template <typename T> Status MirrorPadKnown(InferenceContext* c, ShapeHandle input, const Tensor* paddings_t, int64_t input_rank) { auto paddings_data = paddings_t->matrix<T>(); std::vector<DimensionHandle> dims(input_rank); for (int64_t i = 0; i < input_rank; ++i) { const int64_t pad0 = static_cast<int64_t>(paddings_data(i, 0)); const int64_t pad1 = static_cast<int64_t>(paddings_data(i, 1)); if (pad0 < 0 || pad1 < 0) { return errors::InvalidArgument("Paddings must be non-negative"); } TF_RETURN_IF_ERROR(c->Subtract(c->Dim(input, i), pad0 + pad1, &dims[i])); } c->set_output(0, c->MakeShape(dims)); return Status::OK(); } } // namespace REGISTER_OP("MirrorPadGrad") .Input("input: T") .Input("paddings: Tpaddings") .Output("output: T") .Attr("T: type") .Attr("Tpaddings: {int32, int64} = DT_INT32") .Attr(GetMirrorPadModeAttrString()) .SetShapeFn([](InferenceContext* c) { ShapeHandle paddings; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &paddings)); DimensionHandle pad_0 = c->Dim(paddings, 0); if (!c->ValueKnown(pad_0)) { // We don't know the rank of the output since the first // padding dimension is unknown. c->set_output(0, c->UnknownShape()); return Status::OK(); } int64_t input_rank = c->Value(pad_0); ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), input_rank, &input)); TF_RETURN_IF_ERROR( c->Merge(paddings, c->Matrix(input_rank, 2), &paddings)); const Tensor* paddings_t = c->input_tensor(1); if (paddings_t == nullptr) { // Values of 'paddings' is not available, but we know the // input rank, so return the rank of the output with unknown // dimensions. c->set_output(0, c->UnknownShapeOfRank(input_rank)); return Status::OK(); } if (paddings_t->dtype() == DT_INT32) { return MirrorPadKnown<int32>(c, input, paddings_t, input_rank); } else { return MirrorPadKnown<int64_t>(c, input, paddings_t, input_rank); } }); // -------------------------------------------------------------------------- REGISTER_OP("Placeholder") .Output("output: dtype") .Attr("dtype: type") .Attr("shape: shape = { unknown_rank: true }") .SetShapeFn([](InferenceContext* c) { PartialTensorShape shape; TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape)); // Placeholder has legacy behavior where we cannot tell the difference // between a scalar shape attribute and 'unknown shape'. So if the shape // is a scalar, we return an unknown shape. if (c->graph_def_version() <= 21 && shape.dims() <= 0) { return shape_inference::UnknownShape(c); } ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(shape, &out)); c->set_output(0, out); return Status::OK(); }); // Placeholder was modified in a backwards compatible way to do what // PlaceholderV2 did, so we have deprecated V2 (no one was really // using it). REGISTER_OP("PlaceholderV2") .Output("output: dtype") .Attr("dtype: type") .Attr("shape: shape") .SetShapeFn(shape_inference::ExplicitShape) .Deprecated(23, "Placeholder now behaves the same as PlaceholderV2."); // -------------------------------------------------------------------------- REGISTER_OP("PlaceholderWithDefault") .Input("input: dtype") .Output("output: dtype") .Attr("dtype: type") .Attr("shape: shape") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); PartialTensorShape shape; TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape)); ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(shape, &out)); // We merge for compatibility checking, but return the output, // since output_shape may be less precise than input_shape. ShapeHandle unused; TF_RETURN_IF_ERROR(c->Merge(input, out, &unused)); c->set_output(0, out); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("ExpandDims") .Input("input: T") .Input("dim: Tdim") .Output("output: T") .Attr("T: type") .Attr("Tdim: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); const Tensor* dim_t = c->input_tensor(1); if (dim_t != nullptr && dim_t->NumElements() != 1) { return errors::InvalidArgument( "'dim' input must be a tensor with a single value"); } if (dim_t == nullptr || !c->RankKnown(input)) { c->set_output(0, c->UnknownShape()); return Status::OK(); } int64_t dim; if (dim_t->dtype() == DT_INT32) { dim = static_cast<int64_t>(dim_t->flat<int32>()(0)); } else { dim = dim_t->flat<int64_t>()(0); } const int32_t rank = c->Rank(input); const int32_t min_dim = -1 * rank - 1; if (dim < min_dim || dim > rank) { return errors::InvalidArgument("dim ", dim, " not in the interval [", min_dim, ", ", rank, "]."); } if (dim < 0) { dim += rank + 1; } ShapeHandle end; TF_RETURN_IF_ERROR(c->Subshape(input, dim, &end)); // Build output as start + 1 + end. ShapeHandle output; TF_RETURN_IF_ERROR(c->Subshape(input, 0, dim, &output)); TF_RETURN_IF_ERROR(c->Concatenate(output, c->Vector(1), &output)); TF_RETURN_IF_ERROR(c->Concatenate(output, end, &output)); c->set_output(0, output); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Squeeze") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("squeeze_dims: list(int) >= 0 = []") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); if (!c->RankKnown(input)) { // Input shape unknown. return shape_inference::UnknownShape(c); } const int32_t input_rank = c->Rank(input); // Validate and wrap squeeze dimensions. std::vector<int32> squeeze_dims; TF_RETURN_IF_ERROR(c->GetAttr("squeeze_dims", &squeeze_dims)); for (int i = 0; i < squeeze_dims.size(); ++i) { if (squeeze_dims[i] < -input_rank || squeeze_dims[i] >= input_rank) { return errors::InvalidArgument("squeeze_dims[", i, "] not in [", -input_rank, ",", input_rank, ")."); } if (squeeze_dims[i] < 0) { squeeze_dims[i] += input_rank; } } std::vector<DimensionHandle> result_shape; for (int i = 0; i < input_rank; ++i) { // True if squeeze_dims contains an entry to squeeze this // dimension. bool is_explicit_match = std::find(squeeze_dims.begin(), squeeze_dims.end(), i) != squeeze_dims.end(); DimensionHandle dim = c->Dim(input, i); if (!c->ValueKnown(dim)) { // Assume that the squeezed dimension will be 1 at runtime. if (is_explicit_match) continue; // If squeezing all 1 dimensions, and we see an unknown value, // give up and return Unknown Shape. if (squeeze_dims.empty()) { c->set_output(0, c->UnknownShape()); return Status::OK(); } } else if (c->Value(dim) == 1) { if (is_explicit_match || squeeze_dims.empty()) { // If explicitly squeezing, or squeezing all 1s, remove // this dimension. continue; } } else if (is_explicit_match) { return errors::InvalidArgument("Can not squeeze dim[", i, "], expected a dimension of 1, got ", c->Value(c->Dim(input, i))); } result_shape.emplace_back(dim); } c->set_output(0, c->MakeShape(result_shape)); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("ListDiff") .Input("x: T") .Input("y: T") .Output("out: T") .Output("idx: out_idx") .Attr("T: type") .Attr("out_idx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused)); // TODO(mrry): Indicate that the length falls within an interval? ShapeHandle out = c->Vector(InferenceContext::kUnknownDim); c->set_output(0, out); c->set_output(1, out); return Status::OK(); }); namespace { // Converts Tensor to flat std::vector<int64_t>. template <typename InputType> std::vector<int64_t> GetFlatInt64(const Tensor& t) { std::vector<int64_t> output(t.shape().num_elements()); if (t.shape().num_elements() > 0) { auto eigen_vec = t.flat<InputType>(); std::copy_n(&eigen_vec(0), output.size(), output.begin()); } return output; } // Converts int32 or int64 Tensor to flat std::vector<int64_t>. std::vector<int64_t> GetFlatInt64(const Tensor& t) { if (t.dtype() == DT_INT32) { return GetFlatInt64<int32>(t); } else { return GetFlatInt64<int64_t>(t); } } Status SpaceToBatchShapeHelper(InferenceContext* c, ShapeHandle input_shape, ShapeHandle block_shape_shape, const Tensor* block_shape_t, ShapeHandle paddings_shape, const Tensor* paddings_t) { if (c->Rank(block_shape_shape) != 1) { return errors::InvalidArgument("block_shape must have rank 1."); } const DimensionHandle num_block_dims_handle = c->Dim(block_shape_shape, 0); if (!c->ValueKnown(num_block_dims_handle)) { return errors::InvalidArgument("block_shape must have known size."); } const int64_t num_block_dims = c->Value(num_block_dims_handle); TF_RETURN_IF_ERROR( c->WithRankAtLeast(input_shape, num_block_dims + 1, &input_shape)); TF_RETURN_IF_ERROR( c->Merge(paddings_shape, c->Matrix(num_block_dims, 2), &paddings_shape)); DimensionHandle batch_size = c->Dim(input_shape, 0); std::vector<int64_t> block_shape_vec; if (block_shape_t && (block_shape_t->NumElements() > 0)) { block_shape_vec = GetFlatInt64(*block_shape_t); for (int64_t dim = 0; dim < num_block_dims; ++dim) { const int64_t block_shape_value = block_shape_vec[dim]; if (block_shape_value < 1) { return errors::InvalidArgument("block_shape must be positive"); } if (c->ValueKnown(batch_size)) { TF_RETURN_IF_ERROR( c->Multiply(batch_size, block_shape_value, &batch_size)); } else { batch_size = c->UnknownDim(); } } } else if (num_block_dims > 0) { batch_size = c->UnknownDim(); } std::vector<DimensionHandle> output_dims{batch_size}; output_dims.resize(num_block_dims + 1, c->UnknownDim()); if (paddings_t && (paddings_t->NumElements() > 0)) { const std::vector<int64_t> paddings_vec = GetFlatInt64(*paddings_t); for (int64_t dim = 0; dim < num_block_dims; ++dim) { const int64_t pad_start = paddings_vec[dim * 2], pad_end = paddings_vec[dim * 2 + 1]; if (pad_start < 0 || pad_end < 0) { return errors::InvalidArgument("paddings cannot be negative"); } if (block_shape_t) { DimensionHandle padded_size; TF_RETURN_IF_ERROR( c->Add(c->Dim(input_shape, dim + 1), pad_start, &padded_size)); TF_RETURN_IF_ERROR(c->Add(padded_size, pad_end, &padded_size)); TF_RETURN_IF_ERROR(c->Divide(padded_size, block_shape_vec[dim], /*evenly_divisible=*/true, &output_dims[dim + 1])); } } } ShapeHandle remaining_input_shape; TF_RETURN_IF_ERROR( c->Subshape(input_shape, 1 + num_block_dims, &remaining_input_shape)); ShapeHandle result; TF_RETURN_IF_ERROR(c->Concatenate(c->MakeShape(output_dims), remaining_input_shape, &result)); c->set_output(0, result); return Status::OK(); } Status BatchToSpaceShapeHelper(InferenceContext* c, ShapeHandle input_shape, ShapeHandle block_shape_shape, const Tensor* block_shape_t, ShapeHandle crops_shape, const Tensor* crops_t) { if (c->Rank(block_shape_shape) != 1) { return errors::InvalidArgument("block_shape must have rank 1."); } const DimensionHandle num_block_dims_handle = c->Dim(block_shape_shape, 0); if (!c->ValueKnown(num_block_dims_handle)) { return errors::InvalidArgument("block_shape must have known size."); } const int64_t num_block_dims = c->Value(num_block_dims_handle); TF_RETURN_IF_ERROR( c->WithRankAtLeast(input_shape, num_block_dims + 1, &input_shape)); TF_RETURN_IF_ERROR( c->Merge(crops_shape, c->Matrix(num_block_dims, 2), &crops_shape)); DimensionHandle batch_size = c->Dim(input_shape, 0); std::vector<int64_t> block_shape_vec; if (block_shape_t) { block_shape_vec = GetFlatInt64(*block_shape_t); for (int64_t dim = 0; dim < num_block_dims; ++dim) { const int64_t block_shape_value = block_shape_vec[dim]; if (block_shape_value < 1) { return errors::InvalidArgument("block_shape must be positive"); } if (c->ValueKnown(batch_size)) { TF_RETURN_IF_ERROR(c->Divide(batch_size, block_shape_value, /*evenly_divisible=*/true, &batch_size)); } else { batch_size = c->UnknownDim(); } } } else if (num_block_dims > 0) { batch_size = c->UnknownDim(); } std::vector<DimensionHandle> output_dims{batch_size}; output_dims.resize(num_block_dims + 1, c->UnknownDim()); if (crops_t) { const std::vector<int64_t> crops_vec = GetFlatInt64(*crops_t); for (int64_t dim = 0; dim < num_block_dims; ++dim) { const int64_t crop_start = crops_vec[dim * 2], crop_end = crops_vec[dim * 2 + 1]; if (crop_start < 0 || crop_end < 0) { return errors::InvalidArgument("crops cannot be negative"); } if (block_shape_t) { DimensionHandle cropped_size; TF_RETURN_IF_ERROR(c->Multiply(c->Dim(input_shape, dim + 1), block_shape_vec[dim], &cropped_size)); TF_RETURN_IF_ERROR( c->Subtract(cropped_size, crop_start, &cropped_size)); TF_RETURN_IF_ERROR( c->Subtract(cropped_size, crop_end, &output_dims[dim + 1])); } } } ShapeHandle remaining_input_shape; TF_RETURN_IF_ERROR( c->Subshape(input_shape, 1 + num_block_dims, &remaining_input_shape)); ShapeHandle result; TF_RETURN_IF_ERROR(c->Concatenate(c->MakeShape(output_dims), remaining_input_shape, &result)); c->set_output(0, result); return Status::OK(); } } // namespace // -------------------------------------------------------------------------- REGISTER_OP("SpaceToBatchND") .Input("input: T") .Input("block_shape: Tblock_shape") .Input("paddings: Tpaddings") .Output("output: T") .Attr("T: type") .Attr("Tblock_shape: {int32, int64} = DT_INT32") .Attr("Tpaddings: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { return SpaceToBatchShapeHelper(c, c->input(0), c->input(1), c->input_tensor(1), c->input(2), c->input_tensor(2)); }); // -------------------------------------------------------------------------- REGISTER_OP("SpaceToBatch") .Input("input: T") .Input("paddings: Tpaddings") .Output("output: T") .Attr("T: type") .Attr("Tpaddings: {int32, int64} = DT_INT32") .Attr("block_size: int >= 2") .SetShapeFn([](InferenceContext* c) { ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape)); int32_t block_size; TF_RETURN_IF_ERROR(c->GetAttr("block_size", &block_size)); Tensor block_shape(tensorflow::DT_INT64, TensorShape({2})); auto block_shape_vec = block_shape.vec<int64_t>(); block_shape_vec(0) = block_size; block_shape_vec(1) = block_size; return SpaceToBatchShapeHelper(c, input_shape, c->MakeShape({2}), &block_shape, c->input(1), c->input_tensor(1)); }); // -------------------------------------------------------------------------- REGISTER_OP("BatchToSpaceND") .Input("input: T") .Input("block_shape: Tblock_shape") .Input("crops: Tcrops") .Output("output: T") .Attr("T: type") .Attr("Tblock_shape: {int32, int64} = DT_INT32") .Attr("Tcrops: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { return BatchToSpaceShapeHelper(c, c->input(0), c->input(1), c->input_tensor(1), c->input(2), c->input_tensor(2)); }); // -------------------------------------------------------------------------- REGISTER_OP("BatchToSpace") .Input("input: T") .Input("crops: Tidx") .Output("output: T") .Attr("T: type") .Attr("block_size: int >= 2") .Attr("Tidx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape)); int32_t block_size; TF_RETURN_IF_ERROR(c->GetAttr("block_size", &block_size)); Tensor block_shape(tensorflow::DT_INT64, TensorShape({2})); auto block_shape_vec = block_shape.vec<int64_t>(); block_shape_vec(0) = block_size; block_shape_vec(1) = block_size; return BatchToSpaceShapeHelper(c, input_shape, c->MakeShape({2}), &block_shape, c->input(1), c->input_tensor(1)); }); // -------------------------------------------------------------------------- REGISTER_OP("SpaceToDepth") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("block_size: int >= 2") .Attr("data_format: {'NHWC', 'NCHW', 'NCHW_VECT_C'} = 'NHWC'") // TODO(pauldonnelly): Implement GPU kernels for NCHW_VECT_C. .SetShapeFn([](InferenceContext* c) { string data_format_str; TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str)); TensorFormat data_format; FormatFromString(data_format_str, &data_format); constexpr int num_spatial_dims = 2; const int dims = GetTensorDimsFromSpatialDims(num_spatial_dims, data_format); ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), dims, &input)); int32_t block_size; TF_RETURN_IF_ERROR(c->GetAttr("block_size", &block_size)); DimensionHandle batch_size = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'N')); DimensionHandle input_height = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'H')); DimensionHandle input_width = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'W')); DimensionHandle input_depth = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'C')); DimensionHandle output_height; DimensionHandle output_width; DimensionHandle output_depth; // Will return an error if input height or width are not evenly divisible. TF_RETURN_IF_ERROR(c->Divide(input_height, block_size, true /* evenly_divisible */, &output_height)); TF_RETURN_IF_ERROR(c->Divide(input_width, block_size, true /* evenly_divisible */, &output_width)); TF_RETURN_IF_ERROR( c->Multiply(input_depth, block_size * block_size, &output_depth)); ShapeHandle output_shape; TF_RETURN_IF_ERROR(MakeShapeFromFormat(data_format, batch_size, {output_height, output_width}, output_depth, &output_shape, c)); c->set_output(0, output_shape); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("DepthToSpace") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("block_size: int >= 2") .Attr("data_format: {'NHWC', 'NCHW', 'NCHW_VECT_C'} = 'NHWC'") // TODO(pauldonnelly): Implement GPU kernels for NCHW and NCHW_VECT_C. .SetShapeFn([](InferenceContext* c) { string data_format_str; TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str)); TensorFormat data_format; FormatFromString(data_format_str, &data_format); constexpr int num_spatial_dims = 2; const int dims = GetTensorDimsFromSpatialDims(num_spatial_dims, data_format); ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), dims, &input)); int32_t block_size; TF_RETURN_IF_ERROR(c->GetAttr("block_size", &block_size)); DimensionHandle batch_size = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'N')); DimensionHandle input_height = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'H')); DimensionHandle input_width = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'W')); DimensionHandle input_depth = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'C')); DimensionHandle output_height; DimensionHandle output_width; DimensionHandle output_depth; TF_RETURN_IF_ERROR(c->Multiply(input_height, block_size, &output_height)); TF_RETURN_IF_ERROR(c->Multiply(input_width, block_size, &output_width)); // Will return an error if input_depth is not evenly divisible. TF_RETURN_IF_ERROR(c->Divide(input_depth, block_size * block_size, true /* evenly_divisible */, &output_depth)); ShapeHandle output_shape; TF_RETURN_IF_ERROR(MakeShapeFromFormat(data_format, batch_size, {output_height, output_width}, output_depth, &output_shape, c)); c->set_output(0, output_shape); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("ExtractImagePatches") .Input("images: T") .Output("patches: T") .Attr("ksizes: list(int) >= 4") .Attr("strides: list(int) >= 4") .Attr("rates: list(int) >= 4") .Attr( "T: {bfloat16, half, float, double, int8, int16, int32, int64, " "uint8, uint16, uint32, uint64, complex64, complex128, bool}") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape)); std::vector<int32> ksizes; TF_RETURN_IF_ERROR(c->GetAttr("ksizes", &ksizes)); if (ksizes.size() != 4) { return errors::InvalidArgument( "ExtractImagePatches requires the ksizes attribute to contain 4 " "values, but got: ", ksizes.size()); } std::vector<int32> strides; TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides)); if (strides.size() != 4) { return errors::InvalidArgument( "ExtractImagePatches requires the stride attribute to contain 4 " "values, but got: ", strides.size()); } std::vector<int32> rates; TF_RETURN_IF_ERROR(c->GetAttr("rates", &rates)); if (rates.size() != 4) { return errors::InvalidArgument( "ExtractImagePatches requires the rates attribute to contain 4 " "values, but got: ", rates.size()); } int32_t ksize_rows = ksizes[1]; int32_t ksize_cols = ksizes[2]; int32_t stride_rows = strides[1]; int32_t stride_cols = strides[2]; int32_t rate_rows = rates[1]; int32_t rate_cols = rates[2]; int32_t ksize_rows_eff = ksize_rows + (ksize_rows - 1) * (rate_rows - 1); int32_t ksize_cols_eff = ksize_cols + (ksize_cols - 1) * (rate_cols - 1); DimensionHandle batch_size_dim = c->Dim(input_shape, 0); DimensionHandle in_rows_dim = c->Dim(input_shape, 1); DimensionHandle in_cols_dim = c->Dim(input_shape, 2); DimensionHandle output_depth_dim; TF_RETURN_IF_ERROR(c->Multiply( c->Dim(input_shape, 3), ksize_rows * ksize_cols, &output_depth_dim)); if (!c->ValueKnown(in_rows_dim) || !c->ValueKnown(in_cols_dim)) { ShapeHandle output_shape = c->MakeShape({batch_size_dim, InferenceContext::kUnknownDim, InferenceContext::kUnknownDim, output_depth_dim}); c->set_output(0, output_shape); return Status::OK(); } auto in_rows = c->Value(in_rows_dim); auto in_cols = c->Value(in_cols_dim); Padding padding; TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding)); int64_t output_rows, output_cols; int64_t padding_before, padding_after; TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose( in_rows, ksize_rows_eff, stride_rows, padding, &output_rows, &padding_before, &padding_after)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose( in_cols, ksize_cols_eff, stride_cols, padding, &output_cols, &padding_before, &padding_after)); ShapeHandle output_shape = c->MakeShape( {batch_size_dim, output_rows, output_cols, output_depth_dim}); c->set_output(0, output_shape); return Status::OK(); }); // -------------------------------------------------------------------------- // To enable rates, uncomment all lines commented below and use ksize_*_eff // as the second parameter of all GetWindowedOutputSizeVerbose calls instead // of ksize_*. REGISTER_OP("ExtractVolumePatches") .Input("input: T") .Output("patches: T") .Attr("ksizes: list(int) >= 5") .Attr("strides: list(int) >= 5") /* .Attr("rates: list(int) >= 5") */ .Attr("T: realnumbertype") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 5, &input_shape)); std::vector<int32> ksizes; TF_RETURN_IF_ERROR(c->GetAttr("ksizes", &ksizes)); if (ksizes.size() != 5) { return errors::InvalidArgument( "ExtractVolumePatches requires the ksizes attribute to contain 5 " "values, but got: ", ksizes.size()); } std::vector<int32> strides; TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides)); if (strides.size() != 5) { return errors::InvalidArgument( "ExtractVolumePatches requires the stride attribute to contain 5 " "values, but got: ", strides.size()); } /* // TODO(hsgkim): Enable rates. // See extract_volume_patches_op.cc for why rates are disabled now. std::vector<int32> rates; TF_RETURN_IF_ERROR(c->GetAttr("rates", &rates)); if (rates.size() != 5) { return errors::InvalidArgument( "ExtractVolumePatches requires the rates attribute to contain 5 " "values, but got: ", rates.size()); } */ int32_t ksize_planes = ksizes[1]; int32_t ksize_rows = ksizes[2]; int32_t ksize_cols = ksizes[3]; int32_t stride_planes = strides[1]; int32_t stride_rows = strides[2]; int32_t stride_cols = strides[3]; /* int32 rate_planes = rates[1]; int32 rate_rows = rates[2]; int32 rate_cols = rates[3]; int32 ksize_planes_eff = ksize_planes + (ksize_planes - 1) * (rate_planes - 1); int32 ksize_rows_eff = ksize_rows + (ksize_rows - 1) * (rate_rows - 1); int32 ksize_cols_eff = ksize_cols + (ksize_cols - 1) * (rate_cols - 1); */ DimensionHandle batch_size_dim = c->Dim(input_shape, 0); DimensionHandle in_planes_dim = c->Dim(input_shape, 1); DimensionHandle in_rows_dim = c->Dim(input_shape, 2); DimensionHandle in_cols_dim = c->Dim(input_shape, 3); DimensionHandle output_depth_dim; TF_RETURN_IF_ERROR(c->Multiply(c->Dim(input_shape, 4), ksize_planes * ksize_rows * ksize_cols, &output_depth_dim)); if (!c->ValueKnown(in_planes_dim) || !c->ValueKnown(in_rows_dim) || !c->ValueKnown(in_cols_dim)) { ShapeHandle output_shape = c->MakeShape({batch_size_dim, InferenceContext::kUnknownDim, InferenceContext::kUnknownDim, output_depth_dim}); c->set_output(0, output_shape); return Status::OK(); } auto in_planes = c->Value(in_planes_dim); auto in_rows = c->Value(in_rows_dim); auto in_cols = c->Value(in_cols_dim); Padding padding; TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding)); int64_t output_planes, output_rows, output_cols; int64_t padding_before, padding_after; TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose( in_planes, ksize_planes, stride_planes, padding, &output_planes, &padding_before, &padding_after)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose( in_rows, ksize_rows, stride_rows, padding, &output_rows, &padding_before, &padding_after)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose( in_cols, ksize_cols, stride_cols, padding, &output_cols, &padding_before, &padding_after)); ShapeHandle output_shape = c->MakeShape({batch_size_dim, output_planes, output_rows, output_cols, output_depth_dim}); c->set_output(0, output_shape); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("OneHot") .Input("indices: TI") .Input("depth: int32") .Input("on_value: T") .Input("off_value: T") .Attr("axis: int = -1") .Output("output: T") .Attr("T: type") .Attr("TI: {uint8, int32, int64} = DT_INT64") .SetShapeFn([](InferenceContext* c) { int32_t axis; TF_RETURN_IF_ERROR(c->GetAttr("axis", &axis)); if (axis < -1) return errors::InvalidArgument("axis must be >= -1"); DimensionHandle depth; TF_RETURN_IF_ERROR(c->MakeDimForScalarInput(1, &depth)); ShapeHandle indices = c->input(0); if (!c->RankKnown(indices)) return shape_inference::UnknownShape(c); int32_t new_rank = c->Rank(indices) + 1; // We need to add new_rank to axis in the case the axis is -1 because // C++ returns negative values from % if the dividend is negative. int32_t depth_index = (axis + new_rank) % new_rank; // Out shape is indices[0:depth_index] + [depth] + indices[depth_index:]. ShapeHandle front; ShapeHandle back; ShapeHandle out; TF_RETURN_IF_ERROR(c->Subshape(indices, 0, depth_index, &front)); TF_RETURN_IF_ERROR(c->Subshape(indices, depth_index, &back)); TF_RETURN_IF_ERROR(c->Concatenate(front, c->Vector(depth), &front)); TF_RETURN_IF_ERROR(c->Concatenate(front, back, &out)); c->set_output(0, out); return Status::OK(); }); // EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET. REGISTER_OP("QuantizeAndDequantize") .Input("input: T") .Attr("signed_input: bool = true") .Attr("num_bits: int = 8") .Attr("range_given: bool = false") .Attr("input_min: float = 0") .Attr("input_max: float = 0") .Output("output: T") .Attr("T: {bfloat16, half, float, double}") .SetShapeFn(shape_inference::UnchangedShape) .Deprecated(22, "Replaced by QuantizeAndDequantizeV2"); // TODO(suharshs): Deprecate QuantizeAndDequantizeV2. REGISTER_OP("QuantizeAndDequantizeV2") .Input("input: T") .Input("input_min: T") .Input("input_max: T") .Attr("signed_input: bool = true") .Attr("num_bits: int = 8") .Attr("range_given: bool = false") .Output("output: T") .Attr("T: {bfloat16, half, float, double}") .Attr( "round_mode: {'HALF_TO_EVEN', 'HALF_UP'} = " "'HALF_TO_EVEN'") .Attr("narrow_range: bool = false") .Attr("axis: int = -1") .SetShapeFn([](InferenceContext* c) { int axis; TF_RETURN_IF_ERROR(c->GetAttr("axis", &axis)); const int minmax_rank = (axis == -1) ? 0 : 1; ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->Merge(c->input(2), minmax, &minmax)); if (axis < -1) { return errors::InvalidArgument("axis should be at least -1, got ", axis); } else if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; TF_RETURN_IF_ERROR( c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth)); } c->set_output(0, c->input(0)); return Status::OK(); }); REGISTER_OP("QuantizeAndDequantizeV4") .Input("input: T") .Input("input_min: T") .Input("input_max: T") .Attr("signed_input: bool = true") .Attr("num_bits: int = 8") .Attr("range_given: bool = false") .Output("output: T") .Attr("T: {bfloat16, half, float, double}") .Attr( "round_mode: {'HALF_TO_EVEN', 'HALF_UP'} = " "'HALF_TO_EVEN'") .Attr("narrow_range: bool = false") .Attr("axis: int = -1") .SetShapeFn([](InferenceContext* c) { int axis; TF_RETURN_IF_ERROR(c->GetAttr("axis", &axis)); const int minmax_rank = (axis == -1) ? 0 : 1; ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->Merge(c->input(2), minmax, &minmax)); if (axis < -1) { return errors::InvalidArgument("axis should be at least -1, got ", axis); } else if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; TF_RETURN_IF_ERROR( c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth)); } c->set_output(0, c->input(0)); return Status::OK(); }); REGISTER_OP("QuantizeAndDequantizeV4Grad") .Input("gradients: T") .Input("input: T") .Input("input_min: T") .Input("input_max: T") .Output("input_backprop: T") .Output("input_min_backprop: T") .Output("input_max_backprop: T") .Attr("T: {bfloat16, half, float, double}") .Attr("axis: int = -1") .SetShapeFn([](InferenceContext* c) { int axis; TF_RETURN_IF_ERROR(c->GetAttr("axis", &axis)); const int minmax_rank = (axis == -1) ? 0 : 1; ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->Merge(c->input(3), minmax, &minmax)); if (axis < -1) { return errors::InvalidArgument("axis should be at least -1, got ", axis); } else if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; TF_RETURN_IF_ERROR( c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth)); } ShapeHandle inputs; TF_RETURN_IF_ERROR(c->Merge(c->input(0), c->input(1), &inputs)); c->set_output(0, inputs); c->set_output(1, minmax); c->set_output(2, minmax); return Status::OK(); }); REGISTER_OP("QuantizeAndDequantizeV3") .Input("input: T") .Input("input_min: T") .Input("input_max: T") .Input("num_bits: int32") .Attr("signed_input: bool = true") .Attr("range_given: bool = true") .Output("output: T") .Attr("T: {bfloat16, half, float, double}") .Attr("narrow_range: bool = false") .Attr("axis: int = -1") .SetShapeFn([](InferenceContext* c) { int axis; TF_RETURN_IF_ERROR(c->GetAttr("axis", &axis)); const int minmax_rank = (axis == -1) ? 0 : 1; ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->Merge(c->input(2), minmax, &minmax)); if (axis < -1) { return errors::InvalidArgument("axis should be at least -1, got ", axis); } else if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; TF_RETURN_IF_ERROR( c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth)); } ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused)); c->set_output(0, c->input(0)); return Status::OK(); }); REGISTER_OP("QuantizeV2") .Input("input: float") .Input("min_range: float") .Input("max_range: float") .Output("output: T") .Output("output_min: float") .Output("output_max: float") .Attr("T: quantizedtype") .Attr("mode: {'MIN_COMBINED', 'MIN_FIRST', 'SCALED'} = 'MIN_COMBINED'") .Attr( "round_mode: {'HALF_AWAY_FROM_ZERO', 'HALF_TO_EVEN'} = " "'HALF_AWAY_FROM_ZERO'") .Attr("narrow_range: bool = false") .Attr("axis: int = -1") .Attr("ensure_minimum_range: float = 0.01") .SetShapeFn(shape_inference::QuantizeV2Shape); REGISTER_OP("Dequantize") .Input("input: T") .Input("min_range: float") .Input("max_range: float") .Output("output: dtype") .Attr("T: quantizedtype") .Attr("mode: {'MIN_COMBINED', 'MIN_FIRST', 'SCALED'} = 'MIN_COMBINED'") .Attr("narrow_range: bool = false") .Attr("axis: int = -1") .Attr("dtype: {bfloat16, float} = DT_FLOAT") .SetShapeFn([](InferenceContext* c) { int axis = -1; Status s = c->GetAttr("axis", &axis); if (!s.ok() && s.code() != error::NOT_FOUND) { return s; } if (axis < -1) { return errors::InvalidArgument("axis should be at least -1, got ", axis); } const int minmax_rank = (axis == -1) ? 0 : 1; TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), minmax_rank, &minmax)); if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; TF_RETURN_IF_ERROR( c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth)); } return Status::OK(); }); REGISTER_OP("QuantizedConcat") .Input("concat_dim: int32") .Input("values: N * T") .Input("input_mins: N * float32") .Input("input_maxes: N * float32") .Output("output: T") .Output("output_min: float") .Output("output_max: float") .Attr("N: int >= 2") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { const int n = (c->num_inputs() - 1) / 3; TF_RETURN_IF_ERROR(shape_inference::ConcatShape(c, n)); ShapeHandle unused; for (int i = n + 1; i < c->num_inputs(); ++i) { TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 0, &unused)); } c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }); REGISTER_OP("QuantizedReshape") .Input("tensor: T") .Input("shape: Tshape") .Input("input_min: float") .Input("input_max: float") .Output("output: T") .Output("output_min: float") .Output("output_max: float") .Attr("T: type") .Attr("Tshape: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(SetOutputShapeForReshape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused)); c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }); REGISTER_OP("QuantizedInstanceNorm") .Input("x: T") .Input("x_min: float") .Input("x_max: float") .Output("y: T") .Output("y_min: float") .Output("y_max: float") .Attr("T: quantizedtype") .Attr("output_range_given: bool = false") .Attr("given_y_min: float = 0") .Attr("given_y_max: float = 0") .Attr("variance_epsilon: float = 1e-5") .Attr("min_separation: float = 1e-3") .SetShapeFn([](shape_inference::InferenceContext* c) { shape_inference::ShapeHandle unused; // x should be a rank 4 tensor. TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &unused)); // Assert x_min and x_max are scalars (rank 0). TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); // y has the same shape as x. TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); // y_min and y_max are scalars. c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }); namespace { Status ScatterNdTensorShape(InferenceContext* c) { ShapeHandle output_shape; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &output_shape)); ShapeHandle indices_shape; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &indices_shape)); ShapeHandle updates_shape; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(2), 1, &updates_shape)); return shape_inference::ScatterNdShapeHelper(c, indices_shape, updates_shape, output_shape); } } // namespace REGISTER_OP("UpperBound") .Input("sorted_inputs: T") .Input("values: T") .Output("output: out_type") .Attr("T: type") .Attr("out_type: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused_shape)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &unused_shape)); c->set_output(0, c->input(1)); return Status::OK(); }); REGISTER_OP("LowerBound") .Input("sorted_inputs: T") .Input("values: T") .Output("output: out_type") .Attr("T: type") .Attr("out_type: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused_shape)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &unused_shape)); c->set_output(0, c->input(1)); return Status::OK(); }); REGISTER_OP("ScatterNd") .Input("indices: Tindices") .Input("updates: T") .Input("shape: Tindices") .Output("output: T") .Attr("T: type") .Attr("Tindices: {int32, int64}") .SetShapeFn([](InferenceContext* c) { ShapeHandle indices_shape; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &indices_shape)); ShapeHandle updates_shape; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &updates_shape)); ShapeHandle output_shape; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(2, &output_shape)); return shape_inference::ScatterNdShapeHelper(c, indices_shape, updates_shape, output_shape); }); REGISTER_OP("TensorScatterUpdate") .Input("tensor: T") .Input("indices: Tindices") .Input("updates: T") .Output("output: T") .Attr("T: type") .Attr("Tindices: {int32, int64}") .SetShapeFn(ScatterNdTensorShape); REGISTER_OP("TensorScatterAdd") .Input("tensor: T") .Input("indices: Tindices") .Input("updates: T") .Output("output: T") .Attr("T: type") .Attr("Tindices: {int32, int64}") .SetShapeFn(ScatterNdTensorShape); REGISTER_OP("TensorScatterSub") .Input("tensor: T") .Input("indices: Tindices") .Input("updates: T") .Output("output: T") .Attr("T: type") .Attr("Tindices: {int32, int64}") .SetShapeFn(ScatterNdTensorShape); REGISTER_OP("TensorScatterMin") .Input("tensor: T") .Input("indices: Tindices") .Input("updates: T") .Output("output: T") .Attr("T: type") .Attr("Tindices: {int32, int64}") .SetShapeFn(ScatterNdTensorShape); REGISTER_OP("TensorScatterMax") .Input("tensor: T") .Input("indices: Tindices") .Input("updates: T") .Output("output: T") .Attr("T: type") .Attr("Tindices: {int32, int64}") .SetShapeFn(ScatterNdTensorShape); REGISTER_OP("ScatterNdNonAliasingAdd") .Input("input: T") .Input("indices: Tindices") .Input("updates: T") .Output("output: T") .Attr("T: {numbertype, bool}") .Attr("Tindices: {int32, int64}") .SetShapeFn(ScatterNdTensorShape); REGISTER_OP("FakeQuantWithMinMaxArgs") .Attr("min: float = -6.0") .Attr("max: float = 6.0") .Attr("num_bits: int = 8") .Attr("narrow_range: bool = false") .Input("inputs: float") .Output("outputs: float") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("FakeQuantWithMinMaxArgsGradient") .Attr("min: float = -6.0") .Attr("max: float = 6.0") .Attr("num_bits: int = 8") .Attr("narrow_range: bool = false") .Input("gradients: float") .Input("inputs: float") .Output("backprops: float") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("FakeQuantWithMinMaxVars") .Attr("num_bits: int = 8") .Attr("narrow_range: bool = false") .Input("inputs: float") .Input("min: float") .Input("max: float") .Output("outputs: float") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); return Status::OK(); }); REGISTER_OP("FakeQuantWithMinMaxVarsGradient") .Attr("num_bits: int = 8") .Attr("narrow_range: bool = false") .Input("gradients: float") .Input("inputs: float") .Input("min: float") .Input("max: float") .Output("backprops_wrt_input: float") .Output("backprop_wrt_min: float") .Output("backprop_wrt_max: float") .SetShapeFn([](InferenceContext* c) { // gradients and inputs are same size. ShapeHandle inputs; TF_RETURN_IF_ERROR(c->Merge(c->input(0), c->input(1), &inputs)); // min and max are scalars ShapeHandle min_max; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &min_max)); TF_RETURN_IF_ERROR(c->Merge(min_max, c->input(3), &min_max)); c->set_output(0, inputs); c->set_output(1, min_max); c->set_output(2, min_max); return Status::OK(); }); REGISTER_OP("FakeQuantWithMinMaxVarsPerChannel") .Attr("num_bits: int = 8") .Attr("narrow_range: bool = false") .Input("inputs: float") .Input("min: float") .Input("max: float") .Output("outputs: float") .SetShapeFn([](InferenceContext* c) { ShapeHandle input, min, max; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &input)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &min)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &max)); DimensionHandle unused; TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -1), c->Dim(min, 0), &unused)); TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -1), c->Dim(max, 0), &unused)); TF_RETURN_IF_ERROR(c->Merge(c->Dim(min, 0), c->Dim(max, 0), &unused)); c->set_output(0, input); return Status::OK(); }); REGISTER_OP("FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits: int = 8") .Attr("narrow_range: bool = false") .Input("gradients: float") .Input("inputs: float") .Input("min: float") .Input("max: float") .Output("backprops_wrt_input: float") .Output("backprop_wrt_min: float") .Output("backprop_wrt_max: float") .SetShapeFn([](InferenceContext* c) { ShapeHandle inputs; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &inputs)); TF_RETURN_IF_ERROR(c->WithRankAtMost(inputs, 4, &inputs)); TF_RETURN_IF_ERROR(c->Merge(inputs, c->input(1), &inputs)); ShapeHandle last_dim = c->Vector(c->Dim(inputs, -1)); ShapeHandle min_max; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &min_max)); TF_RETURN_IF_ERROR(c->Merge(min_max, last_dim, &min_max)); TF_RETURN_IF_ERROR(c->Merge(c->input(3), min_max, &min_max)); c->set_output(0, inputs); c->set_output(1, min_max); c->set_output(2, min_max); return Status::OK(); }); REGISTER_OP("Fingerprint") .Input("data: T") .Input("method: string") .Output("fingerprint: uint8") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); DimensionHandle fingerprint_size; const Tensor* method = c->input_tensor(1); if (method == nullptr) { fingerprint_size = c->UnknownDim(); } else { if (method->dims() != 0) { return errors::InvalidArgument("`method` must be rank 0: ", method->shape()); } const string& method_string = method->scalar<tstring>()(); if (method_string != "farmhash64") { return errors::InvalidArgument("Unsupported method: ", method_string); } fingerprint_size = c->MakeDim(sizeof(uint64)); } DimensionHandle batch = c->Dim(c->input(0), 0); c->set_output(0, c->MakeShape({batch, fingerprint_size})); return Status::OK(); }); #ifdef INTEL_MKL REGISTER_OP("_MklConcat") .Input("concat_dim: int32") .Input("values: N * T") .Input("mkl_concat_dim: uint8") .Input("mkl_values: N * uint8") .Output("output: T") .Output("mkl_output: uint8") .Attr("N: int >= 2") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { return shape_inference::ConcatShape(c, c->num_inputs() - 3); }) .Doc(R"doc( MKL version of Concat operator. Uses MKL DNN APIs to perform concatenation. NOTE Do not invoke this operator directly in Python. Graph rewrite pass is expected to invoke these operators. )doc"); #endif // Deprecated op registrations: // The following can be deleted after 10mar2017. REGISTER_OP("BatchMatrixDiag") .Input("diagonal: T") .Output("output: T") .Attr("T: type") .Deprecated(14, "Use MatrixDiag") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchMatrixSetDiag") .Input("input: T") .Input("diagonal: T") .Output("output: T") .Attr("T: type") .Deprecated(14, "Use MatrixSetDiag") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchMatrixDiagPart") .Input("input: T") .Output("diagonal: T") .Attr("T: type") .Deprecated(14, "Use MatrixDiagPart") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchMatrixBandPart") .Input("input: T") .Input("num_lower: int64") .Input("num_upper: int64") .Output("band: T") .Attr("T: type") .Deprecated(14, "Use MatrixBandPart") .SetShapeFn(shape_inference::UnknownShape); } // namespace tensorflow
null
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <algorithm> #include <ostream> #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/util/mirror_pad_mode.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/strided_slice_op.h" #include "tensorflow/core/util/tensor_format.h" namespace tensorflow { using shape_inference::DimensionHandle; using shape_inference::InferenceContext; using shape_inference::ShapeHandle; using shape_inference::UnchangedShape; namespace { Status GetAxisForPackAndUnpack(InferenceContext* c, int32_t rank_after_pack, int32* axis) { TF_RETURN_IF_ERROR(c->GetAttr("axis", axis)); if (*axis < -1 * rank_after_pack || *axis >= rank_after_pack) { return errors::InvalidArgument("Invalid axis: ", *axis, "; must be in [", -1 * rank_after_pack, ",", rank_after_pack, ")"); } if (*axis < 0) *axis = (rank_after_pack + *axis); return Status::OK(); } template <typename T> std::vector<int64_t> AsInt64(const Tensor* tensor, int64_t num_elements) { std::vector<int64_t> ret(num_elements); auto data = tensor->vec<T>(); for (int64_t i = 0; i < num_elements; ++i) { ret[i] = data(i); } return ret; } template <typename T> Status PadKnown(InferenceContext* c, ShapeHandle input, const Tensor* paddings_t, int64_t num_dims) { // paddings_t is known. std::vector<DimensionHandle> dims(num_dims); auto paddings_data = paddings_t->matrix<T>(); for (int64_t i = 0; i < num_dims; ++i) { const T pad0 = paddings_data(i, 0); const T pad1 = paddings_data(i, 1); if (pad0 < 0 || pad1 < 0) { return errors::InvalidArgument("Paddings must be non-negative"); } TF_RETURN_IF_ERROR(c->Add(c->Dim(input, i), pad0 + pad1, &dims[i])); } c->set_output(0, c->MakeShape(dims)); return Status::OK(); } Status PadShapeFn(InferenceContext* c) { // Paddings is a matrix of [input_rank, 2]. ShapeHandle paddings; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &paddings)); DimensionHandle unused; TF_RETURN_IF_ERROR(c->WithValue(c->Dim(paddings, 1), 2, &unused)); // n_dim and input.rank are equivalent. ShapeHandle input = c->input(0); DimensionHandle n_dim = c->Dim(paddings, 0); if (c->ValueKnown(n_dim)) { TF_RETURN_IF_ERROR(c->WithRank(input, c->Value(n_dim), &input)); } else if (c->RankKnown(input)) { TF_RETURN_IF_ERROR(c->WithValue(n_dim, c->Rank(input), &n_dim)); } const Tensor* paddings_t = c->input_tensor(1); // paddings_t is unknown if (paddings_t == nullptr) { if (c->ValueKnown(n_dim)) { // Make output with n_dim unknown dims. c->set_output(0, c->UnknownShapeOfRank(c->Value(n_dim))); } else { c->set_output(0, c->UnknownShape()); } return Status::OK(); } const int64_t num_dims = paddings_t->shape().dim_size(0); TF_RETURN_IF_ERROR(c->WithRank(input, num_dims, &input)); TF_RETURN_IF_ERROR(c->WithValue(n_dim, num_dims, &n_dim)); if (paddings_t->dtype() == DT_INT32) { return PadKnown<int32>(c, input, paddings_t, num_dims); } else { return PadKnown<int64_t>(c, input, paddings_t, num_dims); } } Status TransposeShapeFn(InferenceContext* c) { ShapeHandle input = c->input(0); ShapeHandle perm_shape = c->input(1); const Tensor* perm = c->input_tensor(1); DimensionHandle perm_elems = c->NumElements(perm_shape); // If we don't have rank information on the input or value information on // perm we can't return any shape information, otherwise we have enough // information to at least find the rank of the output. if (!c->RankKnown(input) && !c->ValueKnown(perm_elems) && perm == nullptr) { c->set_output(0, c->UnknownShape()); return Status::OK(); } // Find our value of the rank. int64_t rank; if (c->RankKnown(input)) { rank = c->Rank(input); } else if (c->ValueKnown(perm_elems)) { rank = c->Value(perm_elems); } else { rank = perm->NumElements(); } if (!c->RankKnown(input) && rank < 2) { // A permutation array containing a single element is ambiguous. It could // indicate either a scalar or a 1-dimensional array, both of which the // transpose op returns unchanged. c->set_output(0, input); return Status::OK(); } std::vector<DimensionHandle> dims; dims.resize(rank); TF_RETURN_IF_ERROR(c->WithRank(input, rank, &input)); // Ensure that perm is a vector and has rank elements. TF_RETURN_IF_ERROR(c->WithRank(perm_shape, 1, &perm_shape)); TF_RETURN_IF_ERROR(c->WithValue(perm_elems, rank, &perm_elems)); // If we know the rank of the input and the value of perm, we can return // all shape information, otherwise we can only return rank information, // but no information for the dimensions. if (perm != nullptr) { std::vector<int64_t> data; if (perm->dtype() == DT_INT32) { data = AsInt64<int32>(perm, rank); } else { data = AsInt64<int64_t>(perm, rank); } for (int32_t i = 0; i < rank; ++i) { int64_t in_idx = data[i]; if (in_idx >= rank || in_idx <= -rank) { return errors::InvalidArgument("perm dim ", in_idx, " is out of range of input rank ", rank); } dims[i] = c->Dim(input, in_idx); } } else { for (int i = 0; i < rank; ++i) { dims[i] = c->UnknownDim(); } } c->set_output(0, c->MakeShape(dims)); return Status::OK(); } Status SetOutputShapeForReshape(InferenceContext* c) { ShapeHandle in = c->input(0); ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &out)); if (!c->RankKnown(out)) { // We have no information about the shape of the output. c->set_output(0, out); return Status::OK(); } if (c->RankKnown(in)) { // We don't know the number of output elements, but we can try to infer // the missing dimension. bool too_many_unknown = false; int32_t out_unknown_idx = -1; DimensionHandle known_out_elems = c->NumElements(out); if (!c->ValueKnown(known_out_elems)) { known_out_elems = c->MakeDim(1); for (int32_t i = 0; i < c->Rank(out); ++i) { DimensionHandle dim = c->Dim(out, i); if (!c->ValueKnown(dim)) { if (out_unknown_idx >= 0) { too_many_unknown = true; break; } out_unknown_idx = i; } else { TF_RETURN_IF_ERROR( c->Multiply(known_out_elems, dim, &known_out_elems)); } } } int32_t in_unknown_idx = -1; DimensionHandle known_in_elems = c->NumElements(in); if (!c->ValueKnown(known_in_elems)) { known_in_elems = c->MakeDim(1); for (int32_t i = 0; i < c->Rank(in); ++i) { DimensionHandle dim = c->Dim(in, i); if (!c->ValueKnown(dim)) { if (in_unknown_idx >= 0) { too_many_unknown = true; break; } in_unknown_idx = i; } else { TF_RETURN_IF_ERROR(c->Multiply(known_in_elems, dim, &known_in_elems)); } } } if (!too_many_unknown) { if (in_unknown_idx < 0 && out_unknown_idx < 0) { // Just check that the dimensions match. if (c->Value(known_in_elems) != c->Value(known_out_elems)) { return errors::InvalidArgument( "Cannot reshape a tensor with ", c->DebugString(known_in_elems), " elements to shape ", c->DebugString(out), " (", c->DebugString(known_out_elems), " elements)"); } } else if (in_unknown_idx < 0 && out_unknown_idx >= 0 && c->Value(known_out_elems) > 0) { // Input fully known, infer the one missing output dim DimensionHandle inferred_dim; TF_RETURN_IF_ERROR(c->Divide(known_in_elems, c->Value(known_out_elems), true /* evenly_divisible */, &inferred_dim)); TF_RETURN_IF_ERROR( c->ReplaceDim(out, out_unknown_idx, inferred_dim, &out)); } else if (in_unknown_idx >= 0 && out_unknown_idx < 0 && c->Value(known_in_elems) != 0) { // Output fully known, infer the one missing input dim DimensionHandle inferred_dim; TF_RETURN_IF_ERROR(c->Divide(known_out_elems, c->Value(known_in_elems), true /* evenly_divisible */, &inferred_dim)); DimensionHandle unknown_in_dim = c->Dim(in, in_unknown_idx); TF_RETURN_IF_ERROR( c->Merge(unknown_in_dim, inferred_dim, &unknown_in_dim)); } else if (in_unknown_idx >= 0 && out_unknown_idx >= 0) { // Exactly one unknown dimension in both input and output. These 2 are // equal iff the known elements are equal. if (c->Value(known_in_elems) == c->Value(known_out_elems)) { DimensionHandle unknown_in_dim = c->Dim(in, in_unknown_idx); TF_RETURN_IF_ERROR( c->ReplaceDim(out, out_unknown_idx, unknown_in_dim, &out)); } } } } c->set_output(0, out); return Status::OK(); } } // namespace REGISTER_OP("ParallelConcat") .Input("values: N * T") .Output("output: T") .Attr("N: int >= 1") .Attr("T: type") .Attr("shape: shape") .SetShapeFn([](InferenceContext* c) { // Validate that the shape attr is correct. PartialTensorShape shape; TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape)); ShapeHandle passed_shape; TF_RETURN_IF_ERROR( c->MakeShapeFromPartialTensorShape(shape, &passed_shape)); if (!c->FullyDefined(passed_shape)) { return errors::InvalidArgument("shape attr must be fully defined."); } ShapeHandle cur; TF_RETURN_IF_ERROR(c->ReplaceDim( passed_shape, 0, c->MakeDim(shape_inference::DimensionOrConstant(1)), &cur)); for (int i = 0; i < c->num_inputs(); ++i) { if (!c->FullyDefined(c->input(i))) { return errors::InvalidArgument( "All input shapes must be fully defined."); } DimensionHandle unused; if (!c->WithValue(c->Dim(c->input(i), 0), 1, &unused).ok()) { return errors::InvalidArgument("Size of first dimension must be 1."); } TF_RETURN_WITH_CONTEXT_IF_ERROR(c->Merge(c->input(i), cur, &cur), "From merging shape ", i, " with other shapes."); } c->set_output(0, passed_shape); return Status::OK(); }); REGISTER_OP("Pack") .Input("values: N * T") .Output("output: T") .Attr("N: int >= 1") .Attr("T: type") .Attr("axis: int = 0") .SetShapeFn([](InferenceContext* c) { // Validate shapes of all inputs are compatible ShapeHandle cur = c->input(c->num_inputs() - 1); for (int i = c->num_inputs() - 2; i >= 0; --i) { TF_RETURN_WITH_CONTEXT_IF_ERROR(c->Merge(c->input(i), cur, &cur), "From merging shape ", i, " with other shapes."); } if (!c->RankKnown(cur)) { c->set_output(0, c->UnknownShape()); return Status::OK(); } // Determine the axis that will be added, converting from negative // axes to a positive point per negative indexing rules. int32_t rank = c->Rank(cur); int32_t axis; TF_RETURN_IF_ERROR(GetAxisForPackAndUnpack(c, rank + 1, &axis)); // Copy all dimensions over, inserting a dimension of value #inputs // at <axis>. std::vector<DimensionHandle> dims; int index = 0; while (index < axis) dims.push_back(c->Dim(cur, index++)); dims.push_back(c->MakeDim(c->num_inputs())); while (index < rank) dims.push_back(c->Dim(cur, index++)); c->set_output(0, c->MakeShape(dims)); for (int i = 0; i < c->num_inputs(); ++i) { auto* shape_and_type = c->input_handle_shapes_and_types(i); if (shape_and_type) { if (!c->RelaxOutputHandleShapesAndMergeTypes(0, *shape_and_type)) { c->set_output_handle_shapes_and_types( 0, std::vector<shape_inference::ShapeAndType>({})); break; } } } return Status::OK(); }); REGISTER_OP("DeepCopy") .Input("x: T") .Output("y: T") .Attr("T: type") .SetIsStateful() .SetShapeFn(UnchangedShape); REGISTER_OP("InplaceUpdate") .Input("x: T") .Input("i: int32") .Input("v: T") .Output("y: T") .Attr("T: type") .SetShapeFn(UnchangedShape); REGISTER_OP("InplaceAdd") .Input("x: T") .Input("i: int32") .Input("v: T") .Output("y: T") .Attr("T: type") .SetShapeFn(UnchangedShape); REGISTER_OP("InplaceSub") .Input("x: T") .Input("i: int32") .Input("v: T") .Output("y: T") .Attr("T: type") .SetShapeFn(UnchangedShape); REGISTER_OP("Empty") .Input("shape: int32") .Output("output: dtype") .Attr("dtype: type") .Attr("init: bool = false") .SetDoNotOptimize() .SetShapeFn([](InferenceContext* c) { ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &out)); c->set_output(0, out); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Unpack") .Input("value: T") .Output("output: num * T") .Attr("num: int >= 0") .Attr("T: type") .Attr("axis: int = 0") .SetShapeFn([](InferenceContext* c) { ShapeHandle s = c->input(0); ShapeHandle out; if (c->RankKnown(s)) { // Determine the axis that will be removed, converting from negative // axes to a positive point per negative indexing rules. int32_t rank = c->Rank(s); int32_t axis; TF_RETURN_IF_ERROR(GetAxisForPackAndUnpack(c, rank, &axis)); // The axis dim matches the number of outputs. DimensionHandle unused; TF_RETURN_IF_ERROR( c->WithValue(c->Dim(s, axis), c->num_outputs(), &unused)); // Copy all dimensions, removing the <axis> dimension. std::vector<DimensionHandle> dims; for (int i = 0; i < rank; ++i) { if (i != axis) dims.push_back(c->Dim(s, i)); } out = c->MakeShape(dims); } else { // All outputs are the same shape, but it's not known. out = c->UnknownShape(); } for (int i = 0; i < c->num_outputs(); ++i) c->set_output(i, out); return Status::OK(); }); REGISTER_OP("UnravelIndex") .Input("indices: Tidx") .Input("dims: Tidx") .Output("output: Tidx") .Attr("Tidx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle indices = c->input(0); ShapeHandle dims; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &dims)); if (c->RankKnown(indices) && c->Rank(indices) == 0) { c->set_output(0, c->Vector(c->Dim(dims, 0))); } else if (c->RankKnown(indices)) { c->set_output(0, c->Matrix(c->Dim(dims, 0), c->NumElements(indices))); } else { c->set_output(0, c->UnknownShape()); } return Status::OK(); }); REGISTER_OP("BroadcastTo") .Input("input: T") .Input("shape: Tidx") .Output("output: T") .Attr("T: type") .Attr("Tidx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle shape_in = c->input(1); TF_RETURN_IF_ERROR(c->WithRank(shape_in, 1, &shape_in)); ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &out)); if (!c->RankKnown(out)) { // We have no information about the shape of the output. c->set_output(0, out); return Status::OK(); } ShapeHandle in = c->input(0); if (!c->RankKnown(in)) { // We have no information about the shape of the input, // nothing to do here. c->set_output(0, out); return Status::OK(); } int out_rank = c->Rank(out); TF_RETURN_IF_ERROR(c->WithRankAtMost(in, out_rank, &in)); int in_rank = c->Rank(in); for (int i = 0; i < in_rank; ++i) { auto in_dim = c->Dim(in, in_rank - i - 1); if (c->Value(in_dim) > 1) { // If the input dimension is greater than 1 then the output dimension // must be equal to it, since we only broadcast "from left to right". auto out_dim = c->Dim(out, out_rank - i - 1); TF_RETURN_IF_ERROR(c->Merge(in_dim, out_dim, &out_dim)); TF_RETURN_IF_ERROR( c->ReplaceDim(out, out_rank - i - 1, out_dim, &out)); } } c->set_output(0, out); return Status::OK(); }); // -------------------------------------------------------------------------- // TODO(josh11b): Remove the >= 2 constraint, once we can rewrite the graph // in the N == 1 case to remove the node. REGISTER_OP("Concat") .Input("concat_dim: int32") .Input("values: N * T") .Output("output: T") .Attr("N: int >= 2") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { return shape_inference::ConcatShape(c, c->num_inputs() - 1); }); REGISTER_OP("ConcatV2") .Input("values: N * T") .Input("axis: Tidx") .Output("output: T") .Attr("N: int >= 2") .Attr("T: type") .Attr("Tidx: {int32, int64} = DT_INT32") .SetShapeFn(shape_inference::ConcatV2Shape); // TODO(vivek.v.rane@intel.com): Prefix the op names with underscore if the ops // are not to be made user-accessible. #ifdef INTEL_MKL REGISTER_OP("_MklConcatV2") .Input("values: N * T") .Input("axis: Tidx") .Input("mkl_values: N * uint8") .Input("mkl_axis: uint8") .Output("output: T") .Output("mkl_output: uint8") .Attr("N: int >= 2") .Attr("T: type") .Attr("Tidx: {int32, int64} = DT_INT32") .SetShapeFn(shape_inference::ConcatV2Shape) .Doc(R"doc( MKL version of ConcatV2 operator. Uses MKL DNN APIs to perform concatenation. NOTE Do not invoke this operator directly in Python. Graph rewrite pass is expected to invoke these operators. )doc"); #endif REGISTER_OP("ConcatOffset") .Input("concat_dim: int32") .Input("shape: N * int32") .Output("offset: N * int32") .Attr("N: int >= 2") .SetShapeFn([](InferenceContext* c) { for (int i = 1; i < c->num_inputs(); ++i) { c->set_output(i - 1, c->input(i)); } return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Split") .Input("split_dim: int32") .Input("value: T") .Output("output: num_split * T") .Attr("num_split: int >= 1") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { DimensionHandle split_dimension; ShapeHandle input = c->input(1); TF_RETURN_IF_ERROR(c->MakeDimForScalarInputWithNegativeIndexing( 0, c->Rank(input), &split_dimension)); int num_split = c->num_outputs(); ShapeHandle out; if (!c->ValueKnown(split_dimension)) { if (c->RankKnown(input)) { out = c->UnknownShapeOfRank(c->Rank(input)); } else { out = c->UnknownShape(); } } else { int64_t split_dim = c->Value(split_dimension); TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, split_dim + 1, &input)); DimensionHandle split_dim_size; TF_RETURN_WITH_CONTEXT_IF_ERROR( c->Divide(c->Dim(input, split_dim), num_split, true /* evenly_divisible */, &split_dim_size), "Number of ways to split should evenly divide the split dimension"); TF_RETURN_IF_ERROR( c->ReplaceDim(input, split_dim, split_dim_size, &out)); } for (int i = 0; i < num_split; ++i) c->set_output(i, out); return Status::OK(); }); REGISTER_OP("SplitV") .Input("value: T") .Input("size_splits: Tlen") .Input("split_dim: int32") .Output("output: num_split * T") .Attr("num_split: int >= 1") .Attr("T: type") .Attr("Tlen: {int32, int64} = DT_INT64") .SetShapeFn([](InferenceContext* c) { DimensionHandle split_dimension; ShapeHandle input = c->input(0); TF_RETURN_IF_ERROR(c->MakeDimForScalarInputWithNegativeIndexing( 2, c->Rank(input), &split_dimension)); int32_t num_outputs = c->num_outputs(); int32_t rank = c->Rank(input); ShapeHandle output_shape; const Tensor* size_splits = c->input_tensor(1); if (rank == InferenceContext::kUnknownRank) { // If the rank of input tensor is unknown, then return unknown shapes. // Note that the shape of each output can be different. for (int i = 0; i < num_outputs; ++i) { c->set_output(i, c->UnknownShape()); } } else if (rank == 0) { // Throw error if input is a scalar. return errors::InvalidArgument("Can't split scalars"); } else if (size_splits == nullptr && c->ValueKnown(split_dimension)) { // If split dimension is known, but the sizes are unknown, then // only the split dimension is unknown output_shape = input; for (int i = 0; i < num_outputs; ++i) { TF_RETURN_IF_ERROR(c->ReplaceDim(output_shape, c->Value(split_dimension), c->UnknownDim(), &output_shape)); c->set_output(i, output_shape); } } else if (size_splits == nullptr && !c->ValueKnown(split_dimension)) { // If split dimension or tensor containing the split sizes is unknown, // then return unknown shapes of same rank as input. Note that each // output shape can be different since splitv doesn't always split // tensors evenly. for (int i = 0; i < num_outputs; ++i) { c->set_output(i, c->UnknownShapeOfRank(rank)); } } else { // Determine the output shape if split dimension and split sizes are // known. int64_t split_dim = c->Value(split_dimension); TF_RETURN_IF_ERROR(c->WithRankAtLeast(input, split_dim + 1, &input)); std::vector<int64_t> data; if (size_splits->dtype() == DT_INT32) { data = AsInt64<int32>(size_splits, size_splits->shape().dim_size(0)); } else { data = AsInt64<int64_t>(size_splits, size_splits->shape().dim_size(0)); } if (num_outputs != data.size()) { return errors::InvalidArgument( "Length of size_splits should be equal to num_outputs"); } int64_t total_size = 0; bool has_neg_one = false; for (const auto size : data) { if (size == -1) { if (has_neg_one) { return errors::InvalidArgument( "size_splits can only have one -1"); } has_neg_one = true; } else { total_size += size; } } auto split_dim_size = c->Value(c->Dim(input, split_dim)); // If the sizes of the splits are known, then // make sure that the sizes add up to the expected // dimension size, with the possibility of a -1. // Specify the full output shapes. for (int i = 0; i < num_outputs; ++i) { auto size = data[i]; if (data[i] == -1 && c->ValueKnown(split_dim_size)) { size = split_dim_size - total_size; } // If we have a negative known size (either explicit, or computed // via -1), then the split sizes are invalid. if (size < -1 || (size == -1 && c->ValueKnown(split_dim_size))) { return errors::InvalidArgument("Split size at index ", i, " must be >= 0. Got: ", size); } TF_RETURN_IF_ERROR( c->ReplaceDim(input, split_dim, c->MakeDim(size), &output_shape)); c->set_output(i, output_shape); } if (c->ValueKnown(split_dim_size)) { if (has_neg_one ? total_size > split_dim_size : total_size != split_dim_size) { return errors::InvalidArgument( "can't split axis of size ", split_dim_size, " into pieces of size [", absl::StrJoin(data, ","), "]"); } } } return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Const") .Output("output: dtype") .Attr("value: tensor") .Attr("dtype: type") .SetShapeFn([](InferenceContext* c) { const TensorProto* proto = nullptr; TF_RETURN_IF_ERROR(c->GetAttr("value", &proto)); TF_RETURN_IF_ERROR(TensorShape::IsValidShape(proto->tensor_shape())); TensorShape shape(proto->tensor_shape()); std::vector<DimensionHandle> dims; dims.reserve(shape.dims()); for (int i = 0; i < shape.dims(); ++i) { dims.push_back(c->MakeDim(shape.dim_size(i))); } c->set_output(0, c->MakeShape(dims)); return Status::OK(); }); // Returns a constant tensor on the host. Useful for writing C++ tests // and benchmarks which run on GPU but require arguments pinned to the host. // Used by test::graph::HostConstant. // value: Attr `value` is the tensor to return. REGISTER_OP("HostConst") .Output("output: dtype") .Attr("value: tensor") .Attr("dtype: type") .SetShapeFn(shape_inference::UnknownShape); // Used executing op-by-op to copy constants to the current device without // serializing tensors as TensorProtos, after a host tensor has been // created. Same behavior as Identity, but no gradient and potentially relaxed // copy semantics. REGISTER_OP("_EagerConst") .Input("input: T") .Output("output: T") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- // TODO(mgubin): Update the doc when the freeze_graph script supports converting // into memmapped format. REGISTER_OP("ImmutableConst") .Attr("dtype: type") .Attr("shape: shape") .Attr("memory_region_name: string") .Output("tensor: dtype") .SetShapeFn(shape_inference::ExplicitShape); REGISTER_OP("GuaranteeConst") .Input("input: T") .Output("output: T") .Attr("T: type") .SetShapeFn([](shape_inference::InferenceContext* c) { return UnchangedShape(c); }) // We don't want this to be optimized away. .SetDoNotOptimize(); // -------------------------------------------------------------------------- REGISTER_OP("ZerosLike") .Input("x: T") .Output("y: T") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- REGISTER_OP("OnesLike") .Input("x: T") .Output("y: T") .Attr( "T: {bfloat16, half, float, double, int8, uint8, int16, uint16, int32, " "int64, complex64, complex128, bool}") .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- REGISTER_OP("Diag") .Input("diagonal: T") .Output("output: T") .Attr( "T: {bfloat16, half, float, double, int32, int64, complex64, " "complex128}") .SetShapeFn([](InferenceContext* c) { ShapeHandle in = c->input(0); TF_RETURN_IF_ERROR(c->WithRankAtLeast(in, 1, &in)); // Output shape is original concatenated with itself. ShapeHandle out; TF_RETURN_IF_ERROR(c->Concatenate(in, in, &out)); c->set_output(0, out); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("DiagPart") .Input("input: T") .Output("diagonal: T") .Attr( "T: {bfloat16, half, float, double, int32, int64, complex64, " "complex128}") .SetShapeFn([](InferenceContext* c) { ShapeHandle in = c->input(0); if (!c->RankKnown(in)) { c->set_output(0, c->UnknownShape()); return Status::OK(); } // Rank must be even, and result will have rank <rank/2>. const int32_t rank = c->Rank(in); if ((rank % 2) != 0 || rank <= 0) { return errors::InvalidArgument( "Input must have even and non-zero rank, input rank is ", rank); } const int32_t mid = rank / 2; // output dim[i] is the merge of in.dim[i] and in.dim[i+mid]. std::vector<DimensionHandle> dims(mid); for (int i = 0; i < mid; ++i) { TF_RETURN_IF_ERROR( c->Merge(c->Dim(in, i), c->Dim(in, i + mid), &dims[i])); } c->set_output(0, c->MakeShape(dims)); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("MatrixDiag") .Input("diagonal: T") .Output("output: T") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle in; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &in)); if (!c->RankKnown(in)) { c->set_output(0, c->UnknownShape()); return Status::OK(); } const int32_t rank = c->Rank(in); ShapeHandle out; TF_RETURN_IF_ERROR( c->Concatenate(in, c->Vector(c->Dim(in, rank - 1)), &out)); c->set_output(0, out); return Status::OK(); }); REGISTER_OP("MatrixDiagV2") .Input("diagonal: T") .Input("k: int32") .Input("num_rows: int32") .Input("num_cols: int32") .Input("padding_value: T") .Output("output: T") .Attr("T: type") .SetShapeFn(shape_inference::MatrixDiagV2Shape); REGISTER_OP("MatrixDiagV3") .Input("diagonal: T") .Input("k: int32") .Input("num_rows: int32") .Input("num_cols: int32") .Input("padding_value: T") .Output("output: T") .Attr("T: type") .Attr( "align: {'LEFT_RIGHT', 'RIGHT_LEFT', 'LEFT_LEFT', 'RIGHT_RIGHT'} = " "'RIGHT_LEFT'") .SetShapeFn(shape_inference::MatrixDiagV2Shape); // -------------------------------------------------------------------------- REGISTER_OP("MatrixSetDiag") .Input("input: T") .Input("diagonal: T") .Output("output: T") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle input; ShapeHandle diag; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &input)); TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &diag)); if (c->RankKnown(input)) { TF_RETURN_IF_ERROR(c->WithRank(c->input(1), c->Rank(input) - 1, &diag)); } DimensionHandle smallest_dim; TF_RETURN_IF_ERROR( c->Min(c->Dim(input, -2), c->Dim(input, -1), &smallest_dim)); TF_RETURN_IF_ERROR( c->Merge(smallest_dim, c->Dim(diag, -1), &smallest_dim)); ShapeHandle output = input; if (c->RankKnown(diag) && !c->FullyDefined(input)) { // Try to infer parts of shape from diag. ShapeHandle diag_batch_shape; TF_RETURN_IF_ERROR(c->Subshape(diag, 0, -1, &diag_batch_shape)); TF_RETURN_IF_ERROR( c->Concatenate(diag_batch_shape, c->UnknownShapeOfRank(2), &diag)); TF_RETURN_IF_ERROR(c->Merge(input, diag, &output)); } c->set_output(0, output); return Status::OK(); }); REGISTER_OP("MatrixSetDiagV2") .Input("input: T") .Input("diagonal: T") .Input("k: int32") .Output("output: T") .Attr("T: type") .SetShapeFn(shape_inference::MatrixSetDiagV2Shape); REGISTER_OP("MatrixSetDiagV3") .Input("input: T") .Input("diagonal: T") .Input("k: int32") .Output("output: T") .Attr("T: type") .Attr( "align: {'LEFT_RIGHT', 'RIGHT_LEFT', 'LEFT_LEFT', 'RIGHT_RIGHT'} = " "'RIGHT_LEFT'") .SetShapeFn(shape_inference::MatrixSetDiagV2Shape); // -------------------------------------------------------------------------- REGISTER_OP("MatrixDiagPart") .Input("input: T") .Output("diagonal: T") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle in; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 2, &in)); if (!c->RankKnown(in)) { c->set_output(0, c->UnknownShape()); return Status::OK(); } const int32_t rank = c->Rank(in); std::vector<DimensionHandle> dims; dims.reserve(rank - 2); for (int i = 0; i < rank - 2; ++i) dims.push_back(c->Dim(in, i)); DimensionHandle min_dim; TF_RETURN_IF_ERROR( c->Min(c->Dim(in, rank - 2), c->Dim(in, rank - 1), &min_dim)); dims.push_back(min_dim); c->set_output(0, c->MakeShape(dims)); return Status::OK(); }); REGISTER_OP("MatrixDiagPartV2") .Input("input: T") .Input("k: int32") .Input("padding_value: T") .Output("diagonal: T") .Attr("T: type") .SetShapeFn(shape_inference::MatrixDiagPartV2Shape); REGISTER_OP("MatrixDiagPartV3") .Input("input: T") .Input("k: int32") .Input("padding_value: T") .Output("diagonal: T") .Attr("T: type") .Attr( "align: {'LEFT_RIGHT', 'RIGHT_LEFT', 'LEFT_LEFT', 'RIGHT_RIGHT'} = " "'RIGHT_LEFT'") .SetShapeFn(shape_inference::MatrixDiagPartV2Shape); // -------------------------------------------------------------------------- REGISTER_OP("MatrixBandPart") .Input("input: T") .Input("num_lower: Tindex") .Input("num_upper: Tindex") .Output("band: T") .Attr("T: type") .Attr("Tindex: {int32, int64} = DT_INT64") .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- REGISTER_OP("Reverse") .Input("tensor: T") .Input("dims: bool") .Output("output: T") .Attr( "T: {uint8, int8, uint16, int16, uint32, int32, uint64, int64, bool, " "bfloat16, half, float, double, complex64, complex128, string}") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); ShapeHandle dims; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &dims)); DimensionHandle dims_dim = c->Dim(dims, 0); if (c->ValueKnown(dims_dim)) { TF_RETURN_IF_ERROR(c->WithRank(input, c->Value(dims_dim), &input)); } if (c->Rank(input) > 8) { return errors::InvalidArgument( "reverse does not work on tensors with more than 8 dimensions"); } c->set_output(0, input); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("ReverseV2") .Input("tensor: T") .Input("axis: Tidx") .Output("output: T") .Attr("Tidx: {int32, int64} = DT_INT32") .Attr( "T: {uint8, int8, uint16, int16, int32, uint32, int64, uint64, bool, " "bfloat16, half, float, double, complex64, complex128, string}") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); ShapeHandle axis; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &axis)); if (c->Rank(input) > 8) { return errors::InvalidArgument( "reverse does not work on tensors with more than 8 dimensions"); } const Tensor* axis_tensor = c->input_tensor(1); if (axis_tensor != nullptr && c->RankKnown(input)) { int32_t rank = c->Rank(input); std::vector<int64_t> axis_value; if (axis_tensor->dtype() == DT_INT32) { axis_value = AsInt64<int32>(axis_tensor, axis_tensor->NumElements()); } else { axis_value = AsInt64<int64_t>(axis_tensor, axis_tensor->NumElements()); } std::vector<bool> axes_dense(c->Rank(input), false); for (int i = 0; i < axis_value.size(); i++) { int64_t canonical_axis = axis_value[i] < 0 ? rank + axis_value[i] : axis_value[i]; if (canonical_axis < 0 || canonical_axis >= rank) { return errors::InvalidArgument("'axis'[", i, "] = ", axis_value[i], " is out of valid range [", 0, ", ", rank - 1); } if (axes_dense[canonical_axis]) { return errors::InvalidArgument("axis ", canonical_axis, " specified more than once."); } axes_dense[canonical_axis] = true; } } c->set_output(0, input); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("EditDistance") .Input("hypothesis_indices: int64") .Input("hypothesis_values: T") .Input("hypothesis_shape: int64") .Input("truth_indices: int64") .Input("truth_values: T") .Input("truth_shape: int64") .Attr("normalize: bool = true") .Attr("T: type") .Output("output: float") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor( c, c->input(0), c->input(1), c->input(2))); TF_RETURN_IF_ERROR(shape_inference::ValidateSparseTensor( c, c->input(3), c->input(4), c->input(5))); const Tensor* hypothesis_shape_t = c->input_tensor(2); const Tensor* truth_shape_t = c->input_tensor(5); if (hypothesis_shape_t == nullptr || truth_shape_t == nullptr) { // We need to know the runtime shape of the two tensors, // or else the output shape is unknown. return shape_inference::UnknownShape(c); } if (hypothesis_shape_t->NumElements() != truth_shape_t->NumElements()) { return errors::InvalidArgument( "Num elements of hypothesis_shape does not match truth_shape: ", hypothesis_shape_t->NumElements(), " vs. ", truth_shape_t->NumElements()); } auto h_values = hypothesis_shape_t->flat<int64_t>(); auto t_values = truth_shape_t->flat<int64_t>(); std::vector<DimensionHandle> dims(hypothesis_shape_t->NumElements() - 1); for (int i = 0; i < dims.size(); ++i) { dims[i] = c->MakeDim(std::max(h_values(i), t_values(i))); } c->set_output(0, c->MakeShape(dims)); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Fill") .Input("dims: index_type") .Input("value: T") .Output("output: T") .Attr("T: type") .Attr("index_type: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { DataType index_type = DT_INT32; Status s = c->GetAttr("index_type", &index_type); if (!s.ok() && s.code() != error::NOT_FOUND) { return s; } ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); const Tensor* t = c->input_tensor(0); if (t != nullptr) { for (int i = 0; i < t->NumElements(); ++i) { if ((index_type == DT_INT32 && t->vec<int32>()(i) < 0) || (index_type == DT_INT64 && t->vec<int64_t>()(i) < 0)) { return errors::InvalidArgument("Fill dimensions must be >= 0"); } } } ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &out)); c->set_output(0, out); auto* shape_and_type = c->input_handle_shapes_and_types(1); if (shape_and_type) { c->set_output_handle_shapes_and_types(0, *shape_and_type); } return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("_ParallelConcatStart") .Output("output: dtype") .Attr("shape: shape") .Attr("dtype: type") .SetIsStateful() .SetShapeFn(shape_inference::ExplicitShape) .Doc(R"doc( Creates an empty Tensor with shape `shape` and type `dtype`. The memory can optionally be initialized. This is usually useful in conjunction with inplace operations. shape: 1-D `Tensor` indicating the shape of the output. dtype: The element type of the returned tensor. output: An empty Tensor of the specified type. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("_ParallelConcatUpdate") .Input("value: T") .Input("update: T") .Output("output: T") .Attr("T: type") .Attr("loc: int") .SetShapeFn(shape_inference::UnchangedShape) .Doc(R"doc( Updates input `value` at `loc` with `update`. If you use this function you will almost certainly want to add a control dependency as done in the implementation of parallel_stack to avoid race conditions. value: A `Tensor` object that will be updated in-place. loc: A scalar indicating the index of the first dimension such that value[loc, :] is updated. update: A `Tensor` of rank one less than `value` if `loc` is a scalar, otherwise of rank equal to `value` that contains the new values for `value`. output: `value` that has been updated accordingly. )doc"); // -------------------------------------------------------------------------- REGISTER_OP("Gather") .Input("params: Tparams") .Input("indices: Tindices") .Attr("validate_indices: bool = true") .Output("output: Tparams") .Attr("Tparams: type") .Attr("Tindices: {int32,int64}") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused)); ShapeHandle params_subshape; TF_RETURN_IF_ERROR(c->Subshape(c->input(0), 1, &params_subshape)); ShapeHandle indices_shape = c->input(1); ShapeHandle out; TF_RETURN_IF_ERROR(c->Concatenate(indices_shape, params_subshape, &out)); c->set_output(0, out); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("GatherV2") .Input("params: Tparams") .Input("indices: Tindices") .Input("axis: Taxis") .Attr("batch_dims: int = 0") .Output("output: Tparams") .Attr("Tparams: type") .Attr("Tindices: {int32,int64}") .Attr("Taxis: {int32,int64}") .SetShapeFn([](InferenceContext* c) { ShapeHandle params_shape; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &params_shape)); ShapeHandle indices_shape = c->input(1); ShapeHandle unused_axis_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused_axis_shape)); const Tensor* axis_t = c->input_tensor(2); // If axis is unknown, we can only infer that the result is params_rank + // indices_rank - 1. if (axis_t == nullptr) { if (c->RankKnown(params_shape) && c->RankKnown(indices_shape)) { int32_t batch_dims; TF_RETURN_IF_ERROR(c->GetAttr("batch_dims", &batch_dims)); c->set_output(0, c->UnknownShapeOfRank(c->Rank(params_shape) + c->Rank(indices_shape) - 1 - batch_dims)); } else { c->set_output(0, c->UnknownShape()); } return Status::OK(); } // Note, axis can be negative. int64_t axis = 0; if (axis_t->dtype() == DT_INT32) { axis = axis_t->scalar<int32>()(); } else { axis = axis_t->scalar<int64_t>()(); } // Check that params has rank of at least axis + 1. ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRankAtLeast( params_shape, axis < 0 ? -axis : axis + 1, &unused)); // Note, batch_dims can be negative. int32_t batch_dims; TF_RETURN_IF_ERROR(c->GetAttr("batch_dims", &batch_dims)); // -rank(indices) <= batch_dims <= rank(indices) TF_RETURN_IF_ERROR( c->WithRankAtLeast(indices_shape, std::abs(batch_dims), &unused)); if (batch_dims < 0) { batch_dims += c->Rank(indices_shape); } // rank(params) > batch_dims TF_RETURN_IF_ERROR( c->WithRankAtLeast(params_shape, batch_dims + 1, &unused)); ShapeHandle params_outer_subshape; TF_RETURN_IF_ERROR( c->Subshape(params_shape, 0, axis, &params_outer_subshape)); ShapeHandle indices_inner_subshape; TF_RETURN_IF_ERROR( c->Subshape(indices_shape, batch_dims, &indices_inner_subshape)); ShapeHandle out; TF_RETURN_IF_ERROR( c->Concatenate(params_outer_subshape, indices_inner_subshape, &out)); // Slice from axis + 1 to the end of params_shape to collect the inner // dimensions of the result. Special case -1 here since -1 + 1 wraps, and // we slice from 0 to the end of shape. Subshape() handles all other // out-of-bounds checking. if (axis != -1) { ShapeHandle params_inner_subshape; TF_RETURN_IF_ERROR( c->Subshape(params_shape, axis + 1, &params_inner_subshape)); TF_RETURN_IF_ERROR(c->Concatenate(out, params_inner_subshape, &out)); } c->set_output(0, out); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("GatherNd") .Input("params: Tparams") .Input("indices: Tindices") .Output("output: Tparams") .Attr("Tparams: type") .Attr("Tindices: {int32,int64}") .SetShapeFn(shape_inference::GatherNdShape); // -------------------------------------------------------------------------- REGISTER_OP("Identity") .Input("input: T") .Output("output: T") .Attr("T: type") .SetForwardTypeFn(full_type::ReplicateInputs()) .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("Snapshot") .Input("input: T") .Output("output: T") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape); #ifdef INTEL_MKL REGISTER_OP("_MklIdentity") .Input("input: T") .Input("mkl_input: uint8") .Output("output: T") .Output("mkl_output: uint8") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape) .Doc(R"Doc( Mkl implementation of IdentityOp )Doc"); #endif REGISTER_OP("IdentityN") .Input("input: T") .Output("output: T") .Attr("T: list(type)") .SetShapeFn([](shape_inference::InferenceContext* c) { std::vector<ShapeHandle> input; TF_RETURN_IF_ERROR(c->input("input", &input)); TF_RETURN_IF_ERROR(c->set_output("output", input)); // If any of the input shapes are not known, we should return error. for (int i = 0; i < input.size(); i++) { if (!input[i].Handle()) { return errors::InvalidArgument(absl::StrCat( "Cannot infer output shape #", i, " for IdentityN node because input shape #", i, " is unknown.")); } } return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("RefIdentity") .Input("input: Ref(T)") .Output("output: Ref(T)") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape) .SetAllowsUninitializedInput(); // -------------------------------------------------------------------------- REGISTER_OP("DebugGradientIdentity") .Input("input: T") .Output("output: T") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape) .SetAllowsUninitializedInput(); REGISTER_OP("DebugGradientRefIdentity") .Input("input: Ref(T)") .Output("output: Ref(T)") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape) .SetAllowsUninitializedInput(); // -------------------------------------------------------------------------- REGISTER_OP("StopGradient") .Input("input: T") .Output("output: T") .Attr("T: type") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("PreventGradient") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("message: string = ''") .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- REGISTER_OP("CheckNumerics") .Input("tensor: T") .Output("output: T") .Attr("T: {bfloat16, half, float, double}") .Attr("message: string") .SetIsStateful() .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- REGISTER_OP("CheckNumericsV2") .Input("tensor: T") .Output("output: T") .Attr("T: {bfloat16, half, float, double}") .Attr("message: string") .SetIsStateful() .SetShapeFn(shape_inference::UnchangedShape); // -------------------------------------------------------------------------- REGISTER_OP("Reshape") .Input("tensor: T") .Input("shape: Tshape") .Output("output: T") .Attr("T: type") .Attr("Tshape: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { return SetOutputShapeForReshape(c); }); #ifdef INTEL_MKL REGISTER_OP("_MklReshape") .Input("tensor: T") .Input("shape: Tshape") .Input("mkl_tensor: uint8") .Input("mkl_shape: uint8") .Output("output: T") .Output("mkl_output: uint8") .Attr("T: type") .Attr("Tshape: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { return SetOutputShapeForReshape(c); }) .Doc(R"Doc( MKL implementation of ReshapeOp. )Doc"); #endif // INTEL_MKL // -------------------------------------------------------------------------- REGISTER_OP("InvertPermutation") .Input("x: T") .Output("y: T") .Attr("T: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle x; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &x)); c->set_output(0, x); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Transpose") .Input("x: T") .Input("perm: Tperm") .Output("y: T") .Attr("T: type") .Attr("Tperm: {int32, int64} = DT_INT32") .SetShapeFn(TransposeShapeFn); #ifdef INTEL_MKL REGISTER_OP("_MklTranspose") .Input("x: T") .Input("perm: Tperm") .Output("y: T") .Attr("T: type") .Attr("Tperm: {int32, int64} = DT_INT32") .SetShapeFn(TransposeShapeFn); #endif // INTEL_MKL // -------------------------------------------------------------------------- REGISTER_OP("ConjugateTranspose") .Input("x: T") .Input("perm: Tperm") .Output("y: T") .Attr("T: type") .Attr("Tperm: {int32, int64} = DT_INT32") .SetShapeFn(TransposeShapeFn); #ifdef INTEL_MKL REGISTER_OP("_MklConjugateTranspose") .Input("x: T") .Input("perm: Tperm") .Output("y: T") .Attr("T: type") .Attr("Tperm: {int32, int64} = DT_INT32") .SetShapeFn(TransposeShapeFn); #endif // INTEL_MKL // -------------------------------------------------------------------------- namespace { Status UniqueIdxShapeFn(InferenceContext* c) { ShapeHandle input = c->input(0); const Tensor* axis_t = c->input_tensor(1); if (axis_t == nullptr || !c->RankKnown(input)) { c->set_output(1, c->Vector(InferenceContext::kUnknownDim)); return Status::OK(); } if (c->Rank(c->input(1)) != 1) { return errors::InvalidArgument("axis expects a 1D vector."); } int32_t n = axis_t->NumElements(); if (n == 0) { if (c->Rank(input) != 1) { return errors::InvalidArgument("x expects a 1D vector."); } c->set_output(1, input); return Status::OK(); } else if (n == 1) { int64_t axis; if (axis_t->dtype() == DT_INT32) { axis = static_cast<int64_t>(axis_t->flat<int32>()(0)); } else { axis = axis_t->flat<int64_t>()(0); } int64_t input_rank = c->Rank(input); if (axis < -input_rank || axis >= input_rank) { return errors::InvalidArgument("axis expects to be in the range [", -input_rank, ", ", input_rank, ")"); } if (axis < 0) { axis += input_rank; } c->set_output(1, c->Vector(c->Dim(input, axis))); return Status::OK(); } return errors::InvalidArgument( "axis does not support input tensors larger than 1 elements."); } } // namespace REGISTER_OP("Unique") .Input("x: T") .Output("y: T") .Output("idx: out_idx") .Attr("T: type") .Attr("out_idx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->Vector(InferenceContext::kUnknownDim)); c->set_output(1, c->input(0)); // Assert that the input rank is 1. ShapeHandle dummy; return c->WithRank(c->input(0), 1, &dummy); }); REGISTER_OP("UniqueV2") .Input("x: T") .Input("axis: Taxis") .Output("y: T") .Output("idx: out_idx") .Attr("T: type") .Attr("Taxis: {int32,int64} = DT_INT64") .Attr("out_idx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->UnknownShapeOfRank(c->Rank(c->input(0)))); TF_RETURN_IF_ERROR(UniqueIdxShapeFn(c)); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("UniqueWithCounts") .Input("x: T") .Output("y: T") .Output("idx: out_idx") .Output("count: out_idx") .Attr("T: type") .Attr("out_idx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { auto uniq = c->Vector(InferenceContext::kUnknownDim); c->set_output(0, uniq); c->set_output(1, c->input(0)); c->set_output(2, uniq); return Status::OK(); }); REGISTER_OP("UniqueWithCountsV2") .Input("x: T") .Input("axis: Taxis") .Output("y: T") .Output("idx: out_idx") .Output("count: out_idx") .Attr("T: type") .Attr("Taxis: {int32,int64} = DT_INT64") .Attr("out_idx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->UnknownShapeOfRank(c->Rank(c->input(0)))); TF_RETURN_IF_ERROR(UniqueIdxShapeFn(c)); c->set_output(2, c->Vector(InferenceContext::kUnknownDim)); return Status::OK(); }); namespace { Status ShapeShapeFn(InferenceContext* c) { for (int i = 0; i < c->num_inputs(); ++i) { DimensionHandle dim; if (c->RankKnown(c->input(i))) { dim = c->MakeDim(c->Rank(c->input(i))); } else { dim = c->UnknownDim(); } c->set_output(i, c->Vector(dim)); } return Status::OK(); } } // namespace // -------------------------------------------------------------------------- REGISTER_OP("Shape") .Input("input: T") .Output("output: out_type") .Attr("T: type") .Attr("out_type: {int32, int64} = DT_INT32") .SetShapeFn(ShapeShapeFn); REGISTER_OP("ShapeN") .Input("input: N * T") .Output("output: N * out_type") .Attr("N: int") .Attr("T: type") .Attr("out_type: {int32, int64} = DT_INT32") .SetShapeFn(ShapeShapeFn); REGISTER_OP("EnsureShape") .Input("input: T") .Output("output: T") .Attr("shape: shape") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { // Merges desired shape and statically known shape of input PartialTensorShape desired_shape; TF_RETURN_IF_ERROR(c->GetAttr("shape", &desired_shape)); int rank = desired_shape.dims(); ShapeHandle input_shape_handle; ShapeHandle desired_shape_handle; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), rank, &input_shape_handle)); TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape( desired_shape, &desired_shape_handle)); ShapeHandle merged_shape; TF_RETURN_IF_ERROR( c->Merge(desired_shape_handle, input_shape_handle, &merged_shape)); c->set_output(0, merged_shape); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("ReverseSequence") .Input("input: T") .Input("seq_lengths: Tlen") .Output("output: T") .Attr("seq_dim: int") .Attr("batch_dim: int = 0") .Attr("T: type") .Attr("Tlen: {int32, int64} = DT_INT64") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); ShapeHandle seq_lens_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &seq_lens_shape)); int64_t seq_dim; TF_RETURN_IF_ERROR(c->GetAttr("seq_dim", &seq_dim)); int64_t batch_dim; TF_RETURN_IF_ERROR(c->GetAttr("batch_dim", &batch_dim)); if (!c->RankKnown(input)) { return shape_inference::UnknownShape(c); } // Validate batch_dim and seq_dim against input. const int32_t input_rank = c->Rank(input); if (batch_dim >= input_rank) { return errors::InvalidArgument( "batch_dim must be < input rank: ", batch_dim, " vs. ", input_rank); } if (seq_dim >= input_rank) { return errors::InvalidArgument( "seq_dim must be < input rank: ", seq_dim, " vs. ", input_rank); } DimensionHandle batch_dim_dim = c->Dim(input, batch_dim); TF_RETURN_IF_ERROR( c->Merge(batch_dim_dim, c->Dim(seq_lens_shape, 0), &batch_dim_dim)); // Replace batch_dim of input with batch_size ShapeHandle output_shape; TF_RETURN_IF_ERROR( c->ReplaceDim(input, batch_dim, batch_dim_dim, &output_shape)); c->set_output(0, output_shape); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Rank") .Input("input: T") .Output("output: int32") .Attr("T: type") .SetShapeFn(shape_inference::ScalarShape); // -------------------------------------------------------------------------- REGISTER_OP("Size") .Input("input: T") .Output("output: out_type") .Attr("T: type") .Attr("out_type: {int32, int64} = DT_INT32") .SetShapeFn(shape_inference::ScalarShape); // -------------------------------------------------------------------------- REGISTER_OP("Slice") .Input("input: T") .Input("begin: Index") .Input("size: Index") .Output("output: T") .Attr("T: type") .Attr("Index: {int32,int64}") .SetShapeFn(shape_inference::SliceShape); #ifdef INTEL_MKL REGISTER_OP("_MklSlice") .Input("input: T") .Input("begin: Index") .Input("size: Index") .Input("mkl_input: uint8") .Input("mkl_begin: uint8") .Input("mkl_size: uint8") .Output("output: T") .Output("mkl_output: uint8") .Attr("T: type") .Attr("Index: {int32,int64}") .SetShapeFn(shape_inference::SliceShape); #endif REGISTER_OP("StridedSlice") .Input("input: T") .Input("begin: Index") .Input("end: Index") .Input("strides: Index") .Output("output: T") .Attr("T: type") .Attr("Index: {int32, int64}") .Attr("begin_mask: int = 0") .Attr("end_mask: int = 0") .Attr("ellipsis_mask: int = 0") .Attr("new_axis_mask: int = 0") .Attr("shrink_axis_mask: int = 0") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); ShapeHandle begin_shape, end_shape, strides_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &begin_shape)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &end_shape)); TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 1, &strides_shape)); TF_RETURN_IF_ERROR(c->Merge(begin_shape, end_shape, &begin_shape)); TF_RETURN_IF_ERROR(c->Merge(begin_shape, strides_shape, &begin_shape)); DimensionHandle sparse_dims_dim = c->Dim(begin_shape, 0); const Tensor* strides_value = c->input_tensor(3); // TODO(aselle,allenl): If we had a stride_mask it would be possible to do // more shape inference here (e.g. for x[3, ::T]). if (!c->RankKnown(input) || !c->ValueKnown(sparse_dims_dim) || strides_value == nullptr) { c->set_output(0, c->UnknownShape()); return Status::OK(); } PartialTensorShape input_shape({}); for (int i = 0; i < c->Rank(input); ++i) { auto dim = c->Dim(input, i); input_shape.AddDim(c->ValueKnown(dim) ? c->Value(dim) : -1); } int32_t begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask; TF_RETURN_IF_ERROR(c->GetAttr("begin_mask", &begin_mask)); TF_RETURN_IF_ERROR(c->GetAttr("end_mask", &end_mask)); TF_RETURN_IF_ERROR(c->GetAttr("ellipsis_mask", &ellipsis_mask)); TF_RETURN_IF_ERROR(c->GetAttr("new_axis_mask", &new_axis_mask)); TF_RETURN_IF_ERROR(c->GetAttr("shrink_axis_mask", &shrink_axis_mask)); const Tensor* begin_value = c->input_tensor(1); const Tensor* end_value = c->input_tensor(2); PartialTensorShape processing_shape, final_shape; bool is_identity, is_simple_slice, slice_dim0; gtl::InlinedVector<int64, 4> begin, end, strides; TF_RETURN_IF_ERROR(ValidateStridedSliceOp( begin_value, end_value, *strides_value, input_shape, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape, &final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides)); ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(final_shape, &out)); c->set_output(0, out); auto* shape_and_type = c->input_handle_shapes_and_types(0); if (shape_and_type) { c->set_output_handle_shapes_and_types(0, *shape_and_type); } return Status::OK(); }); REGISTER_OP("StridedSliceGrad") .Input("shape: Index") .Input("begin: Index") .Input("end: Index") .Input("strides: Index") .Input("dy: T") .Output("output: T") .Attr("T: type") .Attr("Index: {int32, int64}") .Attr("begin_mask: int = 0") .Attr("end_mask: int = 0") .Attr("ellipsis_mask: int = 0") .Attr("new_axis_mask: int = 0") .Attr("shrink_axis_mask: int = 0") .SetShapeFn([](InferenceContext* c) { ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &out)); c->set_output(0, out); return Status::OK(); }); REGISTER_OP("StridedSliceAssign") .Input("ref: Ref(T)") .Input("begin: Index") .Input("end: Index") .Input("strides: Index") .Input("value: T") .Output("output_ref: Ref(T)") .Attr("T: type") .Attr("Index: {int32, int64}") .Attr("begin_mask: int = 0") .Attr("end_mask: int = 0") .Attr("ellipsis_mask: int = 0") .Attr("new_axis_mask: int = 0") .Attr("shrink_axis_mask: int = 0") .SetShapeFn(shape_inference::UnchangedShape); // TODO(aselle): Fix this documentation once StridedSliceAssign Supports // broadcasting. // -------------------------------------------------------------------------- REGISTER_OP("ResourceStridedSliceAssign") .Input("ref: resource") .Input("begin: Index") .Input("end: Index") .Input("strides: Index") .Input("value: T") .Attr("T: type") .Attr("Index: {int32, int64}") .Attr("begin_mask: int = 0") .Attr("end_mask: int = 0") .Attr("ellipsis_mask: int = 0") .Attr("new_axis_mask: int = 0") .Attr("shrink_axis_mask: int = 0") .SetShapeFn(shape_inference::NoOutputs); REGISTER_OP("TensorStridedSliceUpdate") .Input("input: T") .Input("begin: Index") .Input("end: Index") .Input("strides: Index") .Input("value: T") .Output("output: T") .Attr("T: type") .Attr("Index: {int32, int64}") .Attr("begin_mask: int = 0") .Attr("end_mask: int = 0") .Attr("ellipsis_mask: int = 0") .Attr("new_axis_mask: int = 0") .Attr("shrink_axis_mask: int = 0") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("Tile") .Input("input: T") .Input("multiples: Tmultiples") .Output("output: T") .Attr("T: type") .Attr("Tmultiples: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); // NOTE(mrry): Represent `multiples` as a `TensorShape` because (i) // it is a vector of non-negative integers, and (ii) doing so allows // us to handle partially-known multiples. ShapeHandle multiples; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &multiples)); if (c->RankKnown(input)) { TF_RETURN_IF_ERROR(c->WithRank(multiples, c->Rank(input), &multiples)); ShapeHandle dummy; TF_RETURN_IF_ERROR( c->Merge(c->input(1), c->Vector(c->Rank(input)), &dummy)); } if (!c->RankKnown(multiples)) { return shape_inference::UnknownShape(c); } int32_t rank = c->Rank(multiples); TF_RETURN_IF_ERROR(c->WithRank(input, rank, &input)); std::vector<DimensionHandle> dims(rank); for (int i = 0; i < rank; ++i) { TF_RETURN_IF_ERROR( c->Multiply(c->Dim(input, i), c->Dim(multiples, i), &dims[i])); } c->set_output(0, c->MakeShape(dims)); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("TileGrad") .Input("input: T") .Input("multiples: int32") .Output("output: T") .Attr("T: type") .Deprecated(3, "TileGrad has been replaced with reduce_sum") .SetShapeFn(tensorflow::shape_inference::UnknownShape); // -------------------------------------------------------------------------- REGISTER_OP("Where") .Input("input: T") .Attr("T: {numbertype, bool} = DT_BOOL") .Output("index: int64") .SetShapeFn([](InferenceContext* c) { c->set_output(0, c->Matrix(c->UnknownDim(), c->Rank(c->input(0)))); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("BroadcastArgs") .Input("s0: T") .Input("s1: T") .Output("r0: T") .Attr("T: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; ShapeHandle shape_x = c->input(0); ShapeHandle shape_y = c->input(1); TF_RETURN_IF_ERROR(c->WithRank(shape_x, 1, &unused)); TF_RETURN_IF_ERROR(c->WithRank(shape_y, 1, &unused)); if (!c->ValueKnown(c->Dim(shape_x, 0)) || !c->ValueKnown(c->Dim(shape_y, 0))) { c->set_output(0, c->Vector(InferenceContext::kUnknownDim)); return Status::OK(); } int64_t x_dim = c->Value(c->Dim(shape_x, 0)); int64_t y_dim = c->Value(c->Dim(shape_y, 0)); // Broadcasted shape is going to be as large as the largest dimension. c->set_output(0, c->Vector(std::max(x_dim, y_dim))); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("BroadcastGradientArgs") .Input("s0: T") .Input("s1: T") .Output("r0: T") .Output("r1: T") .Attr("T: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { // TODO(mrry): Implement constant_value for BroadcastGradientArgs? ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused)); c->set_output(0, c->Vector(InferenceContext::kUnknownDim)); c->set_output(1, c->Vector(InferenceContext::kUnknownDim)); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Pad") .Input("input: T") .Input("paddings: Tpaddings") .Output("output: T") .Attr("T: type") .Attr("Tpaddings: {int32, int64} = DT_INT32") .SetShapeFn(PadShapeFn); // -------------------------------------------------------------------------- REGISTER_OP("PadV2") .Input("input: T") .Input("paddings: Tpaddings") .Input("constant_values: T") .Output("output: T") .Attr("T: type") .Attr("Tpaddings: {int32, int64} = DT_INT32") .SetShapeFn(PadShapeFn); // -------------------------------------------------------------------------- REGISTER_OP("MirrorPad") .Input("input: T") .Input("paddings: Tpaddings") .Output("output: T") .Attr("T: type") .Attr("Tpaddings: {int32, int64} = DT_INT32") .Attr(GetMirrorPadModeAttrString()) .SetShapeFn(PadShapeFn); // -------------------------------------------------------------------------- namespace { template <typename T> Status MirrorPadKnown(InferenceContext* c, ShapeHandle input, const Tensor* paddings_t, int64_t input_rank) { auto paddings_data = paddings_t->matrix<T>(); std::vector<DimensionHandle> dims(input_rank); for (int64_t i = 0; i < input_rank; ++i) { const int64_t pad0 = static_cast<int64_t>(paddings_data(i, 0)); const int64_t pad1 = static_cast<int64_t>(paddings_data(i, 1)); if (pad0 < 0 || pad1 < 0) { return errors::InvalidArgument("Paddings must be non-negative"); } TF_RETURN_IF_ERROR(c->Subtract(c->Dim(input, i), pad0 + pad1, &dims[i])); } c->set_output(0, c->MakeShape(dims)); return Status::OK(); } } // namespace REGISTER_OP("MirrorPadGrad") .Input("input: T") .Input("paddings: Tpaddings") .Output("output: T") .Attr("T: type") .Attr("Tpaddings: {int32, int64} = DT_INT32") .Attr(GetMirrorPadModeAttrString()) .SetShapeFn([](InferenceContext* c) { ShapeHandle paddings; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &paddings)); DimensionHandle pad_0 = c->Dim(paddings, 0); if (!c->ValueKnown(pad_0)) { // We don't know the rank of the output since the first // padding dimension is unknown. c->set_output(0, c->UnknownShape()); return Status::OK(); } int64_t input_rank = c->Value(pad_0); ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), input_rank, &input)); TF_RETURN_IF_ERROR( c->Merge(paddings, c->Matrix(input_rank, 2), &paddings)); const Tensor* paddings_t = c->input_tensor(1); if (paddings_t == nullptr) { // Values of 'paddings' is not available, but we know the // input rank, so return the rank of the output with unknown // dimensions. c->set_output(0, c->UnknownShapeOfRank(input_rank)); return Status::OK(); } if (paddings_t->dtype() == DT_INT32) { return MirrorPadKnown<int32>(c, input, paddings_t, input_rank); } else { return MirrorPadKnown<int64_t>(c, input, paddings_t, input_rank); } }); // -------------------------------------------------------------------------- REGISTER_OP("Placeholder") .Output("output: dtype") .Attr("dtype: type") .Attr("shape: shape = { unknown_rank: true }") .SetShapeFn([](InferenceContext* c) { PartialTensorShape shape; TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape)); // Placeholder has legacy behavior where we cannot tell the difference // between a scalar shape attribute and 'unknown shape'. So if the shape // is a scalar, we return an unknown shape. if (c->graph_def_version() <= 21 && shape.dims() <= 0) { return shape_inference::UnknownShape(c); } ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(shape, &out)); c->set_output(0, out); return Status::OK(); }); // Placeholder was modified in a backwards compatible way to do what // PlaceholderV2 did, so we have deprecated V2 (no one was really // using it). REGISTER_OP("PlaceholderV2") .Output("output: dtype") .Attr("dtype: type") .Attr("shape: shape") .SetShapeFn(shape_inference::ExplicitShape) .Deprecated(23, "Placeholder now behaves the same as PlaceholderV2."); // -------------------------------------------------------------------------- REGISTER_OP("PlaceholderWithDefault") .Input("input: dtype") .Output("output: dtype") .Attr("dtype: type") .Attr("shape: shape") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); PartialTensorShape shape; TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape)); ShapeHandle out; TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(shape, &out)); // We merge for compatibility checking, but return the output, // since output_shape may be less precise than input_shape. ShapeHandle unused; TF_RETURN_IF_ERROR(c->Merge(input, out, &unused)); c->set_output(0, out); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("ExpandDims") .Input("input: T") .Input("dim: Tdim") .Output("output: T") .Attr("T: type") .Attr("Tdim: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); const Tensor* dim_t = c->input_tensor(1); if (dim_t != nullptr && dim_t->NumElements() != 1) { return errors::InvalidArgument( "'dim' input must be a tensor with a single value"); } if (dim_t == nullptr || !c->RankKnown(input)) { c->set_output(0, c->UnknownShape()); return Status::OK(); } int64_t dim; if (dim_t->dtype() == DT_INT32) { dim = static_cast<int64_t>(dim_t->flat<int32>()(0)); } else { dim = dim_t->flat<int64_t>()(0); } const int32_t rank = c->Rank(input); const int32_t min_dim = -1 * rank - 1; if (dim < min_dim || dim > rank) { return errors::InvalidArgument("dim ", dim, " not in the interval [", min_dim, ", ", rank, "]."); } if (dim < 0) { dim += rank + 1; } ShapeHandle end; TF_RETURN_IF_ERROR(c->Subshape(input, dim, &end)); // Build output as start + 1 + end. ShapeHandle output; TF_RETURN_IF_ERROR(c->Subshape(input, 0, dim, &output)); TF_RETURN_IF_ERROR(c->Concatenate(output, c->Vector(1), &output)); TF_RETURN_IF_ERROR(c->Concatenate(output, end, &output)); c->set_output(0, output); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("Squeeze") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("squeeze_dims: list(int) >= 0 = []") .SetShapeFn([](InferenceContext* c) { ShapeHandle input = c->input(0); if (!c->RankKnown(input)) { // Input shape unknown. return shape_inference::UnknownShape(c); } const int32_t input_rank = c->Rank(input); // Validate and wrap squeeze dimensions. std::vector<int32> squeeze_dims; TF_RETURN_IF_ERROR(c->GetAttr("squeeze_dims", &squeeze_dims)); for (int i = 0; i < squeeze_dims.size(); ++i) { if (squeeze_dims[i] < -input_rank || squeeze_dims[i] >= input_rank) { return errors::InvalidArgument("squeeze_dims[", i, "] not in [", -input_rank, ",", input_rank, ")."); } if (squeeze_dims[i] < 0) { squeeze_dims[i] += input_rank; } } std::vector<DimensionHandle> result_shape; for (int i = 0; i < input_rank; ++i) { // True if squeeze_dims contains an entry to squeeze this // dimension. bool is_explicit_match = std::find(squeeze_dims.begin(), squeeze_dims.end(), i) != squeeze_dims.end(); DimensionHandle dim = c->Dim(input, i); if (!c->ValueKnown(dim)) { // Assume that the squeezed dimension will be 1 at runtime. if (is_explicit_match) continue; // If squeezing all 1 dimensions, and we see an unknown value, // give up and return Unknown Shape. if (squeeze_dims.empty()) { c->set_output(0, c->UnknownShape()); return Status::OK(); } } else if (c->Value(dim) == 1) { if (is_explicit_match || squeeze_dims.empty()) { // If explicitly squeezing, or squeezing all 1s, remove // this dimension. continue; } } else if (is_explicit_match) { return errors::InvalidArgument("Can not squeeze dim[", i, "], expected a dimension of 1, got ", c->Value(c->Dim(input, i))); } result_shape.emplace_back(dim); } c->set_output(0, c->MakeShape(result_shape)); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("ListDiff") .Input("x: T") .Input("y: T") .Output("out: T") .Output("idx: out_idx") .Attr("T: type") .Attr("out_idx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused)); // TODO(mrry): Indicate that the length falls within an interval? ShapeHandle out = c->Vector(InferenceContext::kUnknownDim); c->set_output(0, out); c->set_output(1, out); return Status::OK(); }); namespace { // Converts Tensor to flat std::vector<int64_t>. template <typename InputType> std::vector<int64_t> GetFlatInt64(const Tensor& t) { std::vector<int64_t> output(t.shape().num_elements()); if (t.shape().num_elements() > 0) { auto eigen_vec = t.flat<InputType>(); std::copy_n(&eigen_vec(0), output.size(), output.begin()); } return output; } // Converts int32 or int64 Tensor to flat std::vector<int64_t>. std::vector<int64_t> GetFlatInt64(const Tensor& t) { if (t.dtype() == DT_INT32) { return GetFlatInt64<int32>(t); } else { return GetFlatInt64<int64_t>(t); } } Status SpaceToBatchShapeHelper(InferenceContext* c, ShapeHandle input_shape, ShapeHandle block_shape_shape, const Tensor* block_shape_t, ShapeHandle paddings_shape, const Tensor* paddings_t) { if (c->Rank(block_shape_shape) != 1) { return errors::InvalidArgument("block_shape must have rank 1."); } const DimensionHandle num_block_dims_handle = c->Dim(block_shape_shape, 0); if (!c->ValueKnown(num_block_dims_handle)) { return errors::InvalidArgument("block_shape must have known size."); } const int64_t num_block_dims = c->Value(num_block_dims_handle); TF_RETURN_IF_ERROR( c->WithRankAtLeast(input_shape, num_block_dims + 1, &input_shape)); TF_RETURN_IF_ERROR( c->Merge(paddings_shape, c->Matrix(num_block_dims, 2), &paddings_shape)); DimensionHandle batch_size = c->Dim(input_shape, 0); std::vector<int64_t> block_shape_vec; if (block_shape_t && (block_shape_t->NumElements() > 0)) { block_shape_vec = GetFlatInt64(*block_shape_t); for (int64_t dim = 0; dim < num_block_dims; ++dim) { const int64_t block_shape_value = block_shape_vec[dim]; if (block_shape_value < 1) { return errors::InvalidArgument("block_shape must be positive"); } if (c->ValueKnown(batch_size)) { TF_RETURN_IF_ERROR( c->Multiply(batch_size, block_shape_value, &batch_size)); } else { batch_size = c->UnknownDim(); } } } else if (num_block_dims > 0) { batch_size = c->UnknownDim(); } std::vector<DimensionHandle> output_dims{batch_size}; output_dims.resize(num_block_dims + 1, c->UnknownDim()); if (paddings_t && (paddings_t->NumElements() > 0)) { const std::vector<int64_t> paddings_vec = GetFlatInt64(*paddings_t); for (int64_t dim = 0; dim < num_block_dims; ++dim) { const int64_t pad_start = paddings_vec[dim * 2], pad_end = paddings_vec[dim * 2 + 1]; if (pad_start < 0 || pad_end < 0) { return errors::InvalidArgument("paddings cannot be negative"); } if (block_shape_t) { DimensionHandle padded_size; TF_RETURN_IF_ERROR( c->Add(c->Dim(input_shape, dim + 1), pad_start, &padded_size)); TF_RETURN_IF_ERROR(c->Add(padded_size, pad_end, &padded_size)); TF_RETURN_IF_ERROR(c->Divide(padded_size, block_shape_vec[dim], /*evenly_divisible=*/true, &output_dims[dim + 1])); } } } ShapeHandle remaining_input_shape; TF_RETURN_IF_ERROR( c->Subshape(input_shape, 1 + num_block_dims, &remaining_input_shape)); ShapeHandle result; TF_RETURN_IF_ERROR(c->Concatenate(c->MakeShape(output_dims), remaining_input_shape, &result)); c->set_output(0, result); return Status::OK(); } Status BatchToSpaceShapeHelper(InferenceContext* c, ShapeHandle input_shape, ShapeHandle block_shape_shape, const Tensor* block_shape_t, ShapeHandle crops_shape, const Tensor* crops_t) { if (c->Rank(block_shape_shape) != 1) { return errors::InvalidArgument("block_shape must have rank 1."); } const DimensionHandle num_block_dims_handle = c->Dim(block_shape_shape, 0); if (!c->ValueKnown(num_block_dims_handle)) { return errors::InvalidArgument("block_shape must have known size."); } const int64_t num_block_dims = c->Value(num_block_dims_handle); TF_RETURN_IF_ERROR( c->WithRankAtLeast(input_shape, num_block_dims + 1, &input_shape)); TF_RETURN_IF_ERROR( c->Merge(crops_shape, c->Matrix(num_block_dims, 2), &crops_shape)); DimensionHandle batch_size = c->Dim(input_shape, 0); std::vector<int64_t> block_shape_vec; if (block_shape_t) { block_shape_vec = GetFlatInt64(*block_shape_t); for (int64_t dim = 0; dim < num_block_dims; ++dim) { const int64_t block_shape_value = block_shape_vec[dim]; if (block_shape_value < 1) { return errors::InvalidArgument("block_shape must be positive"); } if (c->ValueKnown(batch_size)) { TF_RETURN_IF_ERROR(c->Divide(batch_size, block_shape_value, /*evenly_divisible=*/true, &batch_size)); } else { batch_size = c->UnknownDim(); } } } else if (num_block_dims > 0) { batch_size = c->UnknownDim(); } std::vector<DimensionHandle> output_dims{batch_size}; output_dims.resize(num_block_dims + 1, c->UnknownDim()); if (crops_t) { const std::vector<int64_t> crops_vec = GetFlatInt64(*crops_t); for (int64_t dim = 0; dim < num_block_dims; ++dim) { const int64_t crop_start = crops_vec[dim * 2], crop_end = crops_vec[dim * 2 + 1]; if (crop_start < 0 || crop_end < 0) { return errors::InvalidArgument("crops cannot be negative"); } if (block_shape_t) { DimensionHandle cropped_size; TF_RETURN_IF_ERROR(c->Multiply(c->Dim(input_shape, dim + 1), block_shape_vec[dim], &cropped_size)); TF_RETURN_IF_ERROR( c->Subtract(cropped_size, crop_start, &cropped_size)); TF_RETURN_IF_ERROR( c->Subtract(cropped_size, crop_end, &output_dims[dim + 1])); } } } ShapeHandle remaining_input_shape; TF_RETURN_IF_ERROR( c->Subshape(input_shape, 1 + num_block_dims, &remaining_input_shape)); ShapeHandle result; TF_RETURN_IF_ERROR(c->Concatenate(c->MakeShape(output_dims), remaining_input_shape, &result)); c->set_output(0, result); return Status::OK(); } } // namespace // -------------------------------------------------------------------------- REGISTER_OP("SpaceToBatchND") .Input("input: T") .Input("block_shape: Tblock_shape") .Input("paddings: Tpaddings") .Output("output: T") .Attr("T: type") .Attr("Tblock_shape: {int32, int64} = DT_INT32") .Attr("Tpaddings: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { return SpaceToBatchShapeHelper(c, c->input(0), c->input(1), c->input_tensor(1), c->input(2), c->input_tensor(2)); }); // -------------------------------------------------------------------------- REGISTER_OP("SpaceToBatch") .Input("input: T") .Input("paddings: Tpaddings") .Output("output: T") .Attr("T: type") .Attr("Tpaddings: {int32, int64} = DT_INT32") .Attr("block_size: int >= 2") .SetShapeFn([](InferenceContext* c) { ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape)); int32_t block_size; TF_RETURN_IF_ERROR(c->GetAttr("block_size", &block_size)); Tensor block_shape(tensorflow::DT_INT64, TensorShape({2})); auto block_shape_vec = block_shape.vec<int64_t>(); block_shape_vec(0) = block_size; block_shape_vec(1) = block_size; return SpaceToBatchShapeHelper(c, input_shape, c->MakeShape({2}), &block_shape, c->input(1), c->input_tensor(1)); }); // -------------------------------------------------------------------------- REGISTER_OP("BatchToSpaceND") .Input("input: T") .Input("block_shape: Tblock_shape") .Input("crops: Tcrops") .Output("output: T") .Attr("T: type") .Attr("Tblock_shape: {int32, int64} = DT_INT32") .Attr("Tcrops: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { return BatchToSpaceShapeHelper(c, c->input(0), c->input(1), c->input_tensor(1), c->input(2), c->input_tensor(2)); }); // -------------------------------------------------------------------------- REGISTER_OP("BatchToSpace") .Input("input: T") .Input("crops: Tidx") .Output("output: T") .Attr("T: type") .Attr("block_size: int >= 2") .Attr("Tidx: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape)); int32_t block_size; TF_RETURN_IF_ERROR(c->GetAttr("block_size", &block_size)); Tensor block_shape(tensorflow::DT_INT64, TensorShape({2})); auto block_shape_vec = block_shape.vec<int64_t>(); block_shape_vec(0) = block_size; block_shape_vec(1) = block_size; return BatchToSpaceShapeHelper(c, input_shape, c->MakeShape({2}), &block_shape, c->input(1), c->input_tensor(1)); }); // -------------------------------------------------------------------------- REGISTER_OP("SpaceToDepth") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("block_size: int >= 2") .Attr("data_format: {'NHWC', 'NCHW', 'NCHW_VECT_C'} = 'NHWC'") // TODO(pauldonnelly): Implement GPU kernels for NCHW_VECT_C. .SetShapeFn([](InferenceContext* c) { string data_format_str; TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str)); TensorFormat data_format; FormatFromString(data_format_str, &data_format); constexpr int num_spatial_dims = 2; const int dims = GetTensorDimsFromSpatialDims(num_spatial_dims, data_format); ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), dims, &input)); int32_t block_size; TF_RETURN_IF_ERROR(c->GetAttr("block_size", &block_size)); DimensionHandle batch_size = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'N')); DimensionHandle input_height = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'H')); DimensionHandle input_width = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'W')); DimensionHandle input_depth = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'C')); DimensionHandle output_height; DimensionHandle output_width; DimensionHandle output_depth; // Will return an error if input height or width are not evenly divisible. TF_RETURN_IF_ERROR(c->Divide(input_height, block_size, true /* evenly_divisible */, &output_height)); TF_RETURN_IF_ERROR(c->Divide(input_width, block_size, true /* evenly_divisible */, &output_width)); TF_RETURN_IF_ERROR( c->Multiply(input_depth, block_size * block_size, &output_depth)); ShapeHandle output_shape; TF_RETURN_IF_ERROR(MakeShapeFromFormat(data_format, batch_size, {output_height, output_width}, output_depth, &output_shape, c)); c->set_output(0, output_shape); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("DepthToSpace") .Input("input: T") .Output("output: T") .Attr("T: type") .Attr("block_size: int >= 2") .Attr("data_format: {'NHWC', 'NCHW', 'NCHW_VECT_C'} = 'NHWC'") // TODO(pauldonnelly): Implement GPU kernels for NCHW and NCHW_VECT_C. .SetShapeFn([](InferenceContext* c) { string data_format_str; TF_RETURN_IF_ERROR(c->GetAttr("data_format", &data_format_str)); TensorFormat data_format; FormatFromString(data_format_str, &data_format); constexpr int num_spatial_dims = 2; const int dims = GetTensorDimsFromSpatialDims(num_spatial_dims, data_format); ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), dims, &input)); int32_t block_size; TF_RETURN_IF_ERROR(c->GetAttr("block_size", &block_size)); DimensionHandle batch_size = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'N')); DimensionHandle input_height = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'H')); DimensionHandle input_width = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'W')); DimensionHandle input_depth = c->Dim(input, GetTensorDimIndex<num_spatial_dims>(data_format, 'C')); DimensionHandle output_height; DimensionHandle output_width; DimensionHandle output_depth; TF_RETURN_IF_ERROR(c->Multiply(input_height, block_size, &output_height)); TF_RETURN_IF_ERROR(c->Multiply(input_width, block_size, &output_width)); // Will return an error if input_depth is not evenly divisible. TF_RETURN_IF_ERROR(c->Divide(input_depth, block_size * block_size, true /* evenly_divisible */, &output_depth)); ShapeHandle output_shape; TF_RETURN_IF_ERROR(MakeShapeFromFormat(data_format, batch_size, {output_height, output_width}, output_depth, &output_shape, c)); c->set_output(0, output_shape); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("ExtractImagePatches") .Input("images: T") .Output("patches: T") .Attr("ksizes: list(int) >= 4") .Attr("strides: list(int) >= 4") .Attr("rates: list(int) >= 4") .Attr( "T: {bfloat16, half, float, double, int8, int16, int32, int64, " "uint8, uint16, uint32, uint64, complex64, complex128, bool}") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input_shape)); std::vector<int32> ksizes; TF_RETURN_IF_ERROR(c->GetAttr("ksizes", &ksizes)); if (ksizes.size() != 4) { return errors::InvalidArgument( "ExtractImagePatches requires the ksizes attribute to contain 4 " "values, but got: ", ksizes.size()); } std::vector<int32> strides; TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides)); if (strides.size() != 4) { return errors::InvalidArgument( "ExtractImagePatches requires the stride attribute to contain 4 " "values, but got: ", strides.size()); } std::vector<int32> rates; TF_RETURN_IF_ERROR(c->GetAttr("rates", &rates)); if (rates.size() != 4) { return errors::InvalidArgument( "ExtractImagePatches requires the rates attribute to contain 4 " "values, but got: ", rates.size()); } int32_t ksize_rows = ksizes[1]; int32_t ksize_cols = ksizes[2]; int32_t stride_rows = strides[1]; int32_t stride_cols = strides[2]; int32_t rate_rows = rates[1]; int32_t rate_cols = rates[2]; int32_t ksize_rows_eff = ksize_rows + (ksize_rows - 1) * (rate_rows - 1); int32_t ksize_cols_eff = ksize_cols + (ksize_cols - 1) * (rate_cols - 1); DimensionHandle batch_size_dim = c->Dim(input_shape, 0); DimensionHandle in_rows_dim = c->Dim(input_shape, 1); DimensionHandle in_cols_dim = c->Dim(input_shape, 2); DimensionHandle output_depth_dim; TF_RETURN_IF_ERROR(c->Multiply( c->Dim(input_shape, 3), ksize_rows * ksize_cols, &output_depth_dim)); if (!c->ValueKnown(in_rows_dim) || !c->ValueKnown(in_cols_dim)) { ShapeHandle output_shape = c->MakeShape({batch_size_dim, InferenceContext::kUnknownDim, InferenceContext::kUnknownDim, output_depth_dim}); c->set_output(0, output_shape); return Status::OK(); } auto in_rows = c->Value(in_rows_dim); auto in_cols = c->Value(in_cols_dim); Padding padding; TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding)); int64_t output_rows, output_cols; int64_t padding_before, padding_after; TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose( in_rows, ksize_rows_eff, stride_rows, padding, &output_rows, &padding_before, &padding_after)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose( in_cols, ksize_cols_eff, stride_cols, padding, &output_cols, &padding_before, &padding_after)); ShapeHandle output_shape = c->MakeShape( {batch_size_dim, output_rows, output_cols, output_depth_dim}); c->set_output(0, output_shape); return Status::OK(); }); // -------------------------------------------------------------------------- // To enable rates, uncomment all lines commented below and use ksize_*_eff // as the second parameter of all GetWindowedOutputSizeVerbose calls instead // of ksize_*. REGISTER_OP("ExtractVolumePatches") .Input("input: T") .Output("patches: T") .Attr("ksizes: list(int) >= 5") .Attr("strides: list(int) >= 5") /* .Attr("rates: list(int) >= 5") */ .Attr("T: realnumbertype") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { ShapeHandle input_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 5, &input_shape)); std::vector<int32> ksizes; TF_RETURN_IF_ERROR(c->GetAttr("ksizes", &ksizes)); if (ksizes.size() != 5) { return errors::InvalidArgument( "ExtractVolumePatches requires the ksizes attribute to contain 5 " "values, but got: ", ksizes.size()); } std::vector<int32> strides; TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides)); if (strides.size() != 5) { return errors::InvalidArgument( "ExtractVolumePatches requires the stride attribute to contain 5 " "values, but got: ", strides.size()); } /* // TODO(hsgkim): Enable rates. // See extract_volume_patches_op.cc for why rates are disabled now. std::vector<int32> rates; TF_RETURN_IF_ERROR(c->GetAttr("rates", &rates)); if (rates.size() != 5) { return errors::InvalidArgument( "ExtractVolumePatches requires the rates attribute to contain 5 " "values, but got: ", rates.size()); } */ int32_t ksize_planes = ksizes[1]; int32_t ksize_rows = ksizes[2]; int32_t ksize_cols = ksizes[3]; int32_t stride_planes = strides[1]; int32_t stride_rows = strides[2]; int32_t stride_cols = strides[3]; /* int32 rate_planes = rates[1]; int32 rate_rows = rates[2]; int32 rate_cols = rates[3]; int32 ksize_planes_eff = ksize_planes + (ksize_planes - 1) * (rate_planes - 1); int32 ksize_rows_eff = ksize_rows + (ksize_rows - 1) * (rate_rows - 1); int32 ksize_cols_eff = ksize_cols + (ksize_cols - 1) * (rate_cols - 1); */ DimensionHandle batch_size_dim = c->Dim(input_shape, 0); DimensionHandle in_planes_dim = c->Dim(input_shape, 1); DimensionHandle in_rows_dim = c->Dim(input_shape, 2); DimensionHandle in_cols_dim = c->Dim(input_shape, 3); DimensionHandle output_depth_dim; TF_RETURN_IF_ERROR(c->Multiply(c->Dim(input_shape, 4), ksize_planes * ksize_rows * ksize_cols, &output_depth_dim)); if (!c->ValueKnown(in_planes_dim) || !c->ValueKnown(in_rows_dim) || !c->ValueKnown(in_cols_dim)) { ShapeHandle output_shape = c->MakeShape({batch_size_dim, InferenceContext::kUnknownDim, InferenceContext::kUnknownDim, output_depth_dim}); c->set_output(0, output_shape); return Status::OK(); } auto in_planes = c->Value(in_planes_dim); auto in_rows = c->Value(in_rows_dim); auto in_cols = c->Value(in_cols_dim); Padding padding; TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding)); int64_t output_planes, output_rows, output_cols; int64_t padding_before, padding_after; TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose( in_planes, ksize_planes, stride_planes, padding, &output_planes, &padding_before, &padding_after)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose( in_rows, ksize_rows, stride_rows, padding, &output_rows, &padding_before, &padding_after)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeVerbose( in_cols, ksize_cols, stride_cols, padding, &output_cols, &padding_before, &padding_after)); ShapeHandle output_shape = c->MakeShape({batch_size_dim, output_planes, output_rows, output_cols, output_depth_dim}); c->set_output(0, output_shape); return Status::OK(); }); // -------------------------------------------------------------------------- REGISTER_OP("OneHot") .Input("indices: TI") .Input("depth: int32") .Input("on_value: T") .Input("off_value: T") .Attr("axis: int = -1") .Output("output: T") .Attr("T: type") .Attr("TI: {uint8, int32, int64} = DT_INT64") .SetShapeFn([](InferenceContext* c) { int32_t axis; TF_RETURN_IF_ERROR(c->GetAttr("axis", &axis)); if (axis < -1) return errors::InvalidArgument("axis must be >= -1"); DimensionHandle depth; TF_RETURN_IF_ERROR(c->MakeDimForScalarInput(1, &depth)); ShapeHandle indices = c->input(0); if (!c->RankKnown(indices)) return shape_inference::UnknownShape(c); int32_t new_rank = c->Rank(indices) + 1; // We need to add new_rank to axis in the case the axis is -1 because // C++ returns negative values from % if the dividend is negative. int32_t depth_index = (axis + new_rank) % new_rank; // Out shape is indices[0:depth_index] + [depth] + indices[depth_index:]. ShapeHandle front; ShapeHandle back; ShapeHandle out; TF_RETURN_IF_ERROR(c->Subshape(indices, 0, depth_index, &front)); TF_RETURN_IF_ERROR(c->Subshape(indices, depth_index, &back)); TF_RETURN_IF_ERROR(c->Concatenate(front, c->Vector(depth), &front)); TF_RETURN_IF_ERROR(c->Concatenate(front, back, &out)); c->set_output(0, out); return Status::OK(); }); // EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET. REGISTER_OP("QuantizeAndDequantize") .Input("input: T") .Attr("signed_input: bool = true") .Attr("num_bits: int = 8") .Attr("range_given: bool = false") .Attr("input_min: float = 0") .Attr("input_max: float = 0") .Output("output: T") .Attr("T: {bfloat16, half, float, double}") .SetShapeFn(shape_inference::UnchangedShape) .Deprecated(22, "Replaced by QuantizeAndDequantizeV2"); // TODO(suharshs): Deprecate QuantizeAndDequantizeV2. REGISTER_OP("QuantizeAndDequantizeV2") .Input("input: T") .Input("input_min: T") .Input("input_max: T") .Attr("signed_input: bool = true") .Attr("num_bits: int = 8") .Attr("range_given: bool = false") .Output("output: T") .Attr("T: {bfloat16, half, float, double}") .Attr( "round_mode: {'HALF_TO_EVEN', 'HALF_UP'} = " "'HALF_TO_EVEN'") .Attr("narrow_range: bool = false") .Attr("axis: int = -1") .SetShapeFn([](InferenceContext* c) { int axis; TF_RETURN_IF_ERROR(c->GetAttr("axis", &axis)); const int minmax_rank = (axis == -1) ? 0 : 1; ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->Merge(c->input(2), minmax, &minmax)); if (axis < -1) { return errors::InvalidArgument("axis should be at least -1, got ", axis); } else if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; TF_RETURN_IF_ERROR( c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth)); } c->set_output(0, c->input(0)); return Status::OK(); }); REGISTER_OP("QuantizeAndDequantizeV4") .Input("input: T") .Input("input_min: T") .Input("input_max: T") .Attr("signed_input: bool = true") .Attr("num_bits: int = 8") .Attr("range_given: bool = false") .Output("output: T") .Attr("T: {bfloat16, half, float, double}") .Attr( "round_mode: {'HALF_TO_EVEN', 'HALF_UP'} = " "'HALF_TO_EVEN'") .Attr("narrow_range: bool = false") .Attr("axis: int = -1") .SetShapeFn([](InferenceContext* c) { int axis; TF_RETURN_IF_ERROR(c->GetAttr("axis", &axis)); const int minmax_rank = (axis == -1) ? 0 : 1; ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->Merge(c->input(2), minmax, &minmax)); if (axis < -1) { return errors::InvalidArgument("axis should be at least -1, got ", axis); } else if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; TF_RETURN_IF_ERROR( c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth)); } c->set_output(0, c->input(0)); return Status::OK(); }); REGISTER_OP("QuantizeAndDequantizeV4Grad") .Input("gradients: T") .Input("input: T") .Input("input_min: T") .Input("input_max: T") .Output("input_backprop: T") .Output("input_min_backprop: T") .Output("input_max_backprop: T") .Attr("T: {bfloat16, half, float, double}") .Attr("axis: int = -1") .SetShapeFn([](InferenceContext* c) { int axis; TF_RETURN_IF_ERROR(c->GetAttr("axis", &axis)); const int minmax_rank = (axis == -1) ? 0 : 1; ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->Merge(c->input(3), minmax, &minmax)); if (axis < -1) { return errors::InvalidArgument("axis should be at least -1, got ", axis); } else if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; TF_RETURN_IF_ERROR( c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth)); } ShapeHandle inputs; TF_RETURN_IF_ERROR(c->Merge(c->input(0), c->input(1), &inputs)); c->set_output(0, inputs); c->set_output(1, minmax); c->set_output(2, minmax); return Status::OK(); }); REGISTER_OP("QuantizeAndDequantizeV3") .Input("input: T") .Input("input_min: T") .Input("input_max: T") .Input("num_bits: int32") .Attr("signed_input: bool = true") .Attr("range_given: bool = true") .Output("output: T") .Attr("T: {bfloat16, half, float, double}") .Attr("narrow_range: bool = false") .Attr("axis: int = -1") .SetShapeFn([](InferenceContext* c) { int axis; TF_RETURN_IF_ERROR(c->GetAttr("axis", &axis)); const int minmax_rank = (axis == -1) ? 0 : 1; ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->Merge(c->input(2), minmax, &minmax)); if (axis < -1) { return errors::InvalidArgument("axis should be at least -1, got ", axis); } else if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; TF_RETURN_IF_ERROR( c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth)); } ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused)); c->set_output(0, c->input(0)); return Status::OK(); }); REGISTER_OP("QuantizeV2") .Input("input: float") .Input("min_range: float") .Input("max_range: float") .Output("output: T") .Output("output_min: float") .Output("output_max: float") .Attr("T: quantizedtype") .Attr("mode: {'MIN_COMBINED', 'MIN_FIRST', 'SCALED'} = 'MIN_COMBINED'") .Attr( "round_mode: {'HALF_AWAY_FROM_ZERO', 'HALF_TO_EVEN'} = " "'HALF_AWAY_FROM_ZERO'") .Attr("narrow_range: bool = false") .Attr("axis: int = -1") .Attr("ensure_minimum_range: float = 0.01") .SetShapeFn(shape_inference::QuantizeV2Shape); REGISTER_OP("Dequantize") .Input("input: T") .Input("min_range: float") .Input("max_range: float") .Output("output: dtype") .Attr("T: quantizedtype") .Attr("mode: {'MIN_COMBINED', 'MIN_FIRST', 'SCALED'} = 'MIN_COMBINED'") .Attr("narrow_range: bool = false") .Attr("axis: int = -1") .Attr("dtype: {bfloat16, float} = DT_FLOAT") .SetShapeFn([](InferenceContext* c) { int axis = -1; Status s = c->GetAttr("axis", &axis); if (!s.ok() && s.code() != error::NOT_FOUND) { return s; } if (axis < -1) { return errors::InvalidArgument("axis should be at least -1, got ", axis); } const int minmax_rank = (axis == -1) ? 0 : 1; TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); ShapeHandle minmax; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), minmax_rank, &minmax)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), minmax_rank, &minmax)); if (axis != -1) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), axis + 1, &input)); DimensionHandle depth; TF_RETURN_IF_ERROR( c->Merge(c->Dim(minmax, 0), c->Dim(input, axis), &depth)); } return Status::OK(); }); REGISTER_OP("QuantizedConcat") .Input("concat_dim: int32") .Input("values: N * T") .Input("input_mins: N * float32") .Input("input_maxes: N * float32") .Output("output: T") .Output("output_min: float") .Output("output_max: float") .Attr("N: int >= 2") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { const int n = (c->num_inputs() - 1) / 3; TF_RETURN_IF_ERROR(shape_inference::ConcatShape(c, n)); ShapeHandle unused; for (int i = n + 1; i < c->num_inputs(); ++i) { TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 0, &unused)); } c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }); REGISTER_OP("QuantizedReshape") .Input("tensor: T") .Input("shape: Tshape") .Input("input_min: float") .Input("input_max: float") .Output("output: T") .Output("output_min: float") .Output("output_max: float") .Attr("T: type") .Attr("Tshape: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(SetOutputShapeForReshape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 0, &unused)); c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }); REGISTER_OP("QuantizedInstanceNorm") .Input("x: T") .Input("x_min: float") .Input("x_max: float") .Output("y: T") .Output("y_min: float") .Output("y_max: float") .Attr("T: quantizedtype") .Attr("output_range_given: bool = false") .Attr("given_y_min: float = 0") .Attr("given_y_max: float = 0") .Attr("variance_epsilon: float = 1e-5") .Attr("min_separation: float = 1e-3") .SetShapeFn([](shape_inference::InferenceContext* c) { shape_inference::ShapeHandle unused; // x should be a rank 4 tensor. TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &unused)); // Assert x_min and x_max are scalars (rank 0). TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); // y has the same shape as x. TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); // y_min and y_max are scalars. c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }); namespace { Status ScatterNdTensorShape(InferenceContext* c) { ShapeHandle output_shape; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &output_shape)); ShapeHandle indices_shape; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &indices_shape)); ShapeHandle updates_shape; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(2), 1, &updates_shape)); return shape_inference::ScatterNdShapeHelper(c, indices_shape, updates_shape, output_shape); } } // namespace REGISTER_OP("UpperBound") .Input("sorted_inputs: T") .Input("values: T") .Output("output: out_type") .Attr("T: type") .Attr("out_type: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused_shape)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &unused_shape)); c->set_output(0, c->input(1)); return Status::OK(); }); REGISTER_OP("LowerBound") .Input("sorted_inputs: T") .Input("values: T") .Output("output: out_type") .Attr("T: type") .Attr("out_type: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused_shape; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused_shape)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &unused_shape)); c->set_output(0, c->input(1)); return Status::OK(); }); REGISTER_OP("ScatterNd") .Input("indices: Tindices") .Input("updates: T") .Input("shape: Tindices") .Output("output: T") .Attr("T: type") .Attr("Tindices: {int32, int64}") .SetShapeFn([](InferenceContext* c) { ShapeHandle indices_shape; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &indices_shape)); ShapeHandle updates_shape; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &updates_shape)); ShapeHandle output_shape; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(2, &output_shape)); return shape_inference::ScatterNdShapeHelper(c, indices_shape, updates_shape, output_shape); }); REGISTER_OP("TensorScatterUpdate") .Input("tensor: T") .Input("indices: Tindices") .Input("updates: T") .Output("output: T") .Attr("T: type") .Attr("Tindices: {int32, int64}") .SetShapeFn(ScatterNdTensorShape); REGISTER_OP("TensorScatterAdd") .Input("tensor: T") .Input("indices: Tindices") .Input("updates: T") .Output("output: T") .Attr("T: type") .Attr("Tindices: {int32, int64}") .SetShapeFn(ScatterNdTensorShape); REGISTER_OP("TensorScatterSub") .Input("tensor: T") .Input("indices: Tindices") .Input("updates: T") .Output("output: T") .Attr("T: type") .Attr("Tindices: {int32, int64}") .SetShapeFn(ScatterNdTensorShape); REGISTER_OP("TensorScatterMin") .Input("tensor: T") .Input("indices: Tindices") .Input("updates: T") .Output("output: T") .Attr("T: type") .Attr("Tindices: {int32, int64}") .SetShapeFn(ScatterNdTensorShape); REGISTER_OP("TensorScatterMax") .Input("tensor: T") .Input("indices: Tindices") .Input("updates: T") .Output("output: T") .Attr("T: type") .Attr("Tindices: {int32, int64}") .SetShapeFn(ScatterNdTensorShape); REGISTER_OP("ScatterNdNonAliasingAdd") .Input("input: T") .Input("indices: Tindices") .Input("updates: T") .Output("output: T") .Attr("T: {numbertype, bool}") .Attr("Tindices: {int32, int64}") .SetShapeFn(ScatterNdTensorShape); REGISTER_OP("FakeQuantWithMinMaxArgs") .Attr("min: float = -6.0") .Attr("max: float = 6.0") .Attr("num_bits: int = 8") .Attr("narrow_range: bool = false") .Input("inputs: float") .Output("outputs: float") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("FakeQuantWithMinMaxArgsGradient") .Attr("min: float = -6.0") .Attr("max: float = 6.0") .Attr("num_bits: int = 8") .Attr("narrow_range: bool = false") .Input("gradients: float") .Input("inputs: float") .Output("backprops: float") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("FakeQuantWithMinMaxVars") .Attr("num_bits: int = 8") .Attr("narrow_range: bool = false") .Input("inputs: float") .Input("min: float") .Input("max: float") .Output("outputs: float") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); return Status::OK(); }); REGISTER_OP("FakeQuantWithMinMaxVarsGradient") .Attr("num_bits: int = 8") .Attr("narrow_range: bool = false") .Input("gradients: float") .Input("inputs: float") .Input("min: float") .Input("max: float") .Output("backprops_wrt_input: float") .Output("backprop_wrt_min: float") .Output("backprop_wrt_max: float") .SetShapeFn([](InferenceContext* c) { // gradients and inputs are same size. ShapeHandle inputs; TF_RETURN_IF_ERROR(c->Merge(c->input(0), c->input(1), &inputs)); // min and max are scalars ShapeHandle min_max; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &min_max)); TF_RETURN_IF_ERROR(c->Merge(min_max, c->input(3), &min_max)); c->set_output(0, inputs); c->set_output(1, min_max); c->set_output(2, min_max); return Status::OK(); }); REGISTER_OP("FakeQuantWithMinMaxVarsPerChannel") .Attr("num_bits: int = 8") .Attr("narrow_range: bool = false") .Input("inputs: float") .Input("min: float") .Input("max: float") .Output("outputs: float") .SetShapeFn([](InferenceContext* c) { ShapeHandle input, min, max; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &input)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &min)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &max)); DimensionHandle unused; TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -1), c->Dim(min, 0), &unused)); TF_RETURN_IF_ERROR(c->Merge(c->Dim(input, -1), c->Dim(max, 0), &unused)); TF_RETURN_IF_ERROR(c->Merge(c->Dim(min, 0), c->Dim(max, 0), &unused)); c->set_output(0, input); return Status::OK(); }); REGISTER_OP("FakeQuantWithMinMaxVarsPerChannelGradient") .Attr("num_bits: int = 8") .Attr("narrow_range: bool = false") .Input("gradients: float") .Input("inputs: float") .Input("min: float") .Input("max: float") .Output("backprops_wrt_input: float") .Output("backprop_wrt_min: float") .Output("backprop_wrt_max: float") .SetShapeFn([](InferenceContext* c) { ShapeHandle inputs; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &inputs)); TF_RETURN_IF_ERROR(c->WithRankAtMost(inputs, 4, &inputs)); TF_RETURN_IF_ERROR(c->Merge(inputs, c->input(1), &inputs)); ShapeHandle last_dim = c->Vector(c->Dim(inputs, -1)); ShapeHandle min_max; TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &min_max)); TF_RETURN_IF_ERROR(c->Merge(min_max, last_dim, &min_max)); TF_RETURN_IF_ERROR(c->Merge(c->input(3), min_max, &min_max)); c->set_output(0, inputs); c->set_output(1, min_max); c->set_output(2, min_max); return Status::OK(); }); REGISTER_OP("Fingerprint") .Input("data: T") .Input("method: string") .Output("fingerprint: uint8") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); DimensionHandle fingerprint_size; const Tensor* method = c->input_tensor(1); if (method == nullptr) { fingerprint_size = c->UnknownDim(); } else { if (method->dims() != 0) { return errors::InvalidArgument("`method` must be rank 0: ", method->shape()); } const string& method_string = method->scalar<tstring>()(); if (method_string != "farmhash64") { return errors::InvalidArgument("Unsupported method: ", method_string); } fingerprint_size = c->MakeDim(sizeof(uint64)); } DimensionHandle batch = c->Dim(c->input(0), 0); c->set_output(0, c->MakeShape({batch, fingerprint_size})); return Status::OK(); }); #ifdef INTEL_MKL REGISTER_OP("_MklConcat") .Input("concat_dim: int32") .Input("values: N * T") .Input("mkl_concat_dim: uint8") .Input("mkl_values: N * uint8") .Output("output: T") .Output("mkl_output: uint8") .Attr("N: int >= 2") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { return shape_inference::ConcatShape(c, c->num_inputs() - 3); }) .Doc(R"doc( MKL version of Concat operator. Uses MKL DNN APIs to perform concatenation. NOTE Do not invoke this operator directly in Python. Graph rewrite pass is expected to invoke these operators. )doc"); #endif // Deprecated op registrations: // The following can be deleted after 10mar2017. REGISTER_OP("BatchMatrixDiag") .Input("diagonal: T") .Output("output: T") .Attr("T: type") .Deprecated(14, "Use MatrixDiag") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchMatrixSetDiag") .Input("input: T") .Input("diagonal: T") .Output("output: T") .Attr("T: type") .Deprecated(14, "Use MatrixSetDiag") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchMatrixDiagPart") .Input("input: T") .Output("diagonal: T") .Attr("T: type") .Deprecated(14, "Use MatrixDiagPart") .SetShapeFn(shape_inference::UnknownShape); REGISTER_OP("BatchMatrixBandPart") .Input("input: T") .Input("num_lower: int64") .Input("num_upper: int64") .Output("band: T") .Attr("T: type") .Deprecated(14, "Use MatrixBandPart") .SetShapeFn(shape_inference::UnknownShape); } // namespace tensorflow
null
291
CWE-787
CVE-2021-41221
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { TEST(CudnnRNNOpsTest, ParamsSize_ShapeFn) { ShapeInferenceTestOp op("CudnnRNNParamsSize"); INFER_OK(op, "[];[];[]", "[1]"); INFER_OK(op, "?;[];[]", "[1]"); INFER_OK(op, "[];?;[]", "[1]"); INFER_OK(op, "[];[];?", "[1]"); INFER_OK(op, "[];?;?", "[1]"); INFER_OK(op, "?;?;?", "[1]"); INFER_ERROR("Shape must be rank 0 ", op, "[1,2];?;[]"); INFER_ERROR("Shape must be rank 0 ", op, "?;[2];[]"); INFER_ERROR("Shape must be rank 0 ", op, "?;?;[1]"); } TEST(CudnnRNNOpsTest, ForwardLstm_ShapeFn) { int seq_length = 2; int batch_size = 3; int num_units = 4; int num_layers = 5; int dir_count = 1; std::vector<int> input_shape = {seq_length, batch_size, num_units}; std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> output_shape = {seq_length, batch_size, num_units * dir_count}; auto shape_to_str = [](const std::vector<int>& v) { return strings::StrCat("[", absl::StrJoin(v, ","), "]"); }; string input_shapes_desc = strings::StrCat( shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", shape_to_str(input_h_shape), ";", "[?]"); string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;in1;?"; ShapeInferenceTestOp op("CudnnRNN"); TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNN") .Input({"input", 0, DT_FLOAT}) .Input({"input_h", 0, DT_FLOAT}) .Input({"input_c", 0, DT_FLOAT}) .Input({"params", 0, DT_FLOAT}) .Attr("rnn_mode", "lstm") .Attr("input_mode", "auto_select") .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); } TEST(CudnnRNNOpsTest, ForwardV2Lstm_ShapeFn) { int seq_length = 2; int batch_size = 3; int num_units = 4; int num_layers = 5; int dir_count = 1; std::vector<int> input_shape = {seq_length, batch_size, num_units}; std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> output_shape = {seq_length, batch_size, num_units * dir_count}; auto shape_to_str = [](const std::vector<int>& v) { return strings::StrCat("[", absl::StrJoin(v, ","), "]"); }; string input_shapes_desc = strings::StrCat( shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", shape_to_str(input_h_shape), ";", "[?]"); string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;in1;?;?"; ShapeInferenceTestOp op("CudnnRNNV2"); TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNNV2") .Input({"input", 0, DT_FLOAT}) .Input({"input_h", 0, DT_FLOAT}) .Input({"input_c", 0, DT_FLOAT}) .Input({"params", 0, DT_FLOAT}) .Attr("rnn_mode", "lstm") .Attr("input_mode", "auto_select") .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); } TEST(CudnnRNNOpsTest, ForwardV3Lstm_ShapeFn) { int max_seq_length = 2; int batch_size = 3; int num_units = 4; int num_layers = 5; int dir_count = 1; std::vector<int> input_shape = {max_seq_length, batch_size, num_units}; std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> input_c_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> output_shape = {max_seq_length, batch_size, num_units * dir_count}; std::vector<int> seq_lengths_shape = {batch_size}; auto shape_to_str = [](const std::vector<int>& v) { return strings::StrCat("[", absl::StrJoin(v, ","), "]"); }; string input_shapes_desc = strings::StrCat( shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", shape_to_str(input_c_shape), ";", "[?]", ";", shape_to_str(seq_lengths_shape)); string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;in2;?;?"; ShapeInferenceTestOp op("CudnnRNNV3"); TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNNV3") .Input({"input", 0, DT_FLOAT}) .Input({"input_h", 0, DT_FLOAT}) .Input({"input_c", 0, DT_FLOAT}) .Input({"params", 0, DT_FLOAT}) .Input({"sequence_lengths", 0, DT_INT32}) .Attr("rnn_mode", "lstm") .Attr("input_mode", "auto_select") .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); } } // end namespace tensorflow
null
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { TEST(CudnnRNNOpsTest, ParamsSize_ShapeFn) { ShapeInferenceTestOp op("CudnnRNNParamsSize"); INFER_OK(op, "[];[];[]", "[1]"); INFER_OK(op, "?;[];[]", "[1]"); INFER_OK(op, "[];?;[]", "[1]"); INFER_OK(op, "[];[];?", "[1]"); INFER_OK(op, "[];?;?", "[1]"); INFER_OK(op, "?;?;?", "[1]"); INFER_ERROR("Shape must be rank 0 ", op, "[1,2];?;[]"); INFER_ERROR("Shape must be rank 0 ", op, "?;[2];[]"); INFER_ERROR("Shape must be rank 0 ", op, "?;?;[1]"); } TEST(CudnnRNNOpsTest, ForwardLstm_ShapeFn) { int seq_length = 2; int batch_size = 3; int num_units = 4; int num_layers = 5; int dir_count = 1; std::vector<int> input_shape = {seq_length, batch_size, num_units}; std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> output_shape = {seq_length, batch_size, num_units * dir_count}; auto shape_to_str = [](const std::vector<int>& v) { return strings::StrCat("[", absl::StrJoin(v, ","), "]"); }; string input_shapes_desc = strings::StrCat( shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", shape_to_str(input_h_shape), ";", "[?]"); string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;in1;?"; ShapeInferenceTestOp op("CudnnRNN"); TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNN") .Input({"input", 0, DT_FLOAT}) .Input({"input_h", 0, DT_FLOAT}) .Input({"input_c", 0, DT_FLOAT}) .Input({"params", 0, DT_FLOAT}) .Attr("rnn_mode", "lstm") .Attr("input_mode", "auto_select") .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[?,?,?];[?]"); INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[?,?,?];[?]"); // Disabled because the kernel does not check shape of input_c. // INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[?,?,?];[?];[?]"); INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[]"); } TEST(CudnnRNNOpsTest, ForwardV2Lstm_ShapeFn) { int seq_length = 2; int batch_size = 3; int num_units = 4; int num_layers = 5; int dir_count = 1; std::vector<int> input_shape = {seq_length, batch_size, num_units}; std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> output_shape = {seq_length, batch_size, num_units * dir_count}; auto shape_to_str = [](const std::vector<int>& v) { return strings::StrCat("[", absl::StrJoin(v, ","), "]"); }; string input_shapes_desc = strings::StrCat( shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", shape_to_str(input_h_shape), ";", "[?]"); string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;in1;?;?"; ShapeInferenceTestOp op("CudnnRNNV2"); TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNNV2") .Input({"input", 0, DT_FLOAT}) .Input({"input_h", 0, DT_FLOAT}) .Input({"input_c", 0, DT_FLOAT}) .Input({"params", 0, DT_FLOAT}) .Attr("rnn_mode", "lstm") .Attr("input_mode", "auto_select") .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[?,?,?];[?]"); INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[?,?,?];[?]"); // Disabled because the kernel does not check shape of input_c. // INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[?,?,?];[?];[?]"); INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[]"); } TEST(CudnnRNNOpsTest, ForwardV3Lstm_ShapeFn) { int max_seq_length = 2; int batch_size = 3; int num_units = 4; int num_layers = 5; int dir_count = 1; std::vector<int> input_shape = {max_seq_length, batch_size, num_units}; std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> input_c_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> output_shape = {max_seq_length, batch_size, num_units * dir_count}; std::vector<int> seq_lengths_shape = {batch_size}; auto shape_to_str = [](const std::vector<int>& v) { return strings::StrCat("[", absl::StrJoin(v, ","), "]"); }; string input_shapes_desc = strings::StrCat( shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", shape_to_str(input_c_shape), ";", "[?]", ";", shape_to_str(seq_lengths_shape)); string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;in2;?;?"; ShapeInferenceTestOp op("CudnnRNNV3"); TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNNV3") .Input({"input", 0, DT_FLOAT}) .Input({"input_h", 0, DT_FLOAT}) .Input({"input_c", 0, DT_FLOAT}) .Input({"params", 0, DT_FLOAT}) .Input({"sequence_lengths", 0, DT_INT32}) .Attr("rnn_mode", "lstm") .Attr("input_mode", "auto_select") .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[?,?,?];[?];[?]"); INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[?,?,?];[?];[?]"); INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[?,?,?];[];[?];[?]"); INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[];[?]"); INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[?,?,?];[?];[]"); } TEST(CudnnRNNOpsTest, ForwardV3Gru) { int max_seq_length = 2; int batch_size = 3; int num_units = 4; int num_layers = 5; int dir_count = 1; std::vector<int> input_shape = {max_seq_length, batch_size, num_units}; std::vector<int> input_h_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> input_c_shape = {num_layers * dir_count, batch_size, num_units}; std::vector<int> output_shape = {max_seq_length, batch_size, num_units * dir_count}; std::vector<int> seq_lengths_shape = {batch_size}; auto shape_to_str = [](const std::vector<int>& v) { return strings::StrCat("[", absl::StrJoin(v, ","), "]"); }; string input_shapes_desc = strings::StrCat( shape_to_str(input_shape), ";", shape_to_str(input_h_shape), ";", shape_to_str(input_c_shape), ";", "[?]", ";", shape_to_str(seq_lengths_shape)); string output_shapes_desc = "[d0_0,d0_1,d1_2];in1;[];?;?"; ShapeInferenceTestOp op("CudnnRNNV3"); TF_ASSERT_OK(NodeDefBuilder("test", "CudnnRNNV3") .Input({"input", 0, DT_FLOAT}) .Input({"input_h", 0, DT_FLOAT}) .Input({"input_c", 0, DT_FLOAT}) .Input({"params", 0, DT_FLOAT}) .Input({"sequence_lengths", 0, DT_INT32}) .Attr("rnn_mode", "gru") .Attr("input_mode", "auto_select") .Attr("direction", "unidirectional") .Finalize(&op.node_def)); INFER_OK(op, input_shapes_desc, output_shapes_desc); INFER_ERROR("Shape must be rank 3 ", op, "[];[?,?,?];[];[?];[?]"); INFER_ERROR("Shape must be rank 3 ", op, "[?,?,?];[];[];[?];[?]"); INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[];[];[?]"); INFER_ERROR("Shape must be rank 1 ", op, "[?,?,?];[?,?,?];[];[?];[]"); } } // end namespace tensorflow
null
292
CWE-787
CVE-2021-43579
# Changes in HTMLDOC v1.9.13 - Now install a 32x32 icon for Linux (Issue #432) - Fixed an issue with large values for roman numerals and letters in headings (Issue #433) - Fixed a crash bug when a HTML comment contains an invalid nul character (Issue #439) - Fixed a crash bug with bogus BMP images (Issue #444) # Changes in HTMLDOC v1.9.12 - Fixed a crash bug with "data:" URIs and EPUB output (Issue #410) - Fixed crash bugs for books (Issue #412, Issue #414) - Fixed a number-up crash bug (Issue #413) - Fixed JPEG error handling (Issue #415) - Fixed crash bugs with bogus table attributes (Issue #416, Issue #417) - Fixed a crash bug with malformed URIs (Issue #418) - Fixed a crash bug with malformed GIF files (Issue #423) - Fixed a crash bug with empty titles (Issue #425) - Fixed crash bugs with bogus text (Issue #426, Issue #429, Issue #430, Issue #431) - Fixed some issues reported by Coverity. - Removed the bundled libjpeg, libpng, and zlib. # Changes in HTMLDOC v1.9.11 - Added high-resolution desktop icons for Linux. - Updated the internal HTTP library to fix truncation of redirection URLs (Issue #396) - Fixed a regression in the handling of character entities for UTF-8 input (Issue #401) - The `--numbered` option did not work when the table-of-contents was disabled (Issue #405) # Changes in HTMLDOC v1.9.10 - Updated local zlib to v1.2.11. - Updated local libpng to v1.6.37. - Fixed packaging issues on macOS and Windows (Issue #377, Issue #386) - Now ignore sRGB profile errors in PNG files (Issue #390) - The GUI would crash when saving (Issue #391) - Page comments are now allowed in `pre` text (Issue #394) # Changes in HTMLDOC v1.9.9 - Fixed a redirection issue - some sites (incorrectly) provide an incomplete Location: URL in the HTTP response. - Fixed https: support on newer versions of Windows (Issue #378) - Fixed a problem with remote URLs containing spaces (Issue #379) - Fixed a UTF-8 processing bug for Markdown files (Issue #383) - Added support for `<FONT FACE="monospace">` (Issue #385) # Changes in HTMLDOC v1.9.8 - Added support for a `HTMLDOC.filename` META keyword that controls the filename reported in CGI mode; the default remains "htmldoc.pdf" (Issue #367) - Fixed a paragraph formatting issue with large inline images (Issue #369) - Fixed a buffer underflow issue (Issue #370) - Fixed PDF page numbers (Issue #371) - Added support for a new `L` header/footer format (`$LETTERHEAD`), which inserts a letterhead image at its full size (Issue #372, Issue #373, Issue #375) - Updated the build documentation (Issue #374) # Changes in HTMLDOC v1.9.7 - Refactored the PRE rendering code to work around compiler optimization bugs (Issue #349) - Added support for links with targets (Issue #351) - Fixed a table rowspan + valign bug (Issue #360) # Changes in HTMLDOC v1.9.6 - Added support for data URIs (Issue #340) - HTMLDOC no longer includes a PDF table of contents when converting a single web page (Issue #344) - Updated the markdown support with external links, additional inline markup, and hard line breaks. - Links in markdown text no longer render with a leading space as part of the link (Issue #346) - Fixed a buffer underflow bug discovered by AddressSanitizer. - Fixed a bug in UTF-8 support (Issue #348) - PDF output now includes the base language of the input document(s) (Issue #350) - Optimized the loading of font widths (Issue #354) - Optimized PDF page resources (Issue #356) - Optimized the base memory used for font widths (Issue #357) - Added proper `&shy;` support (Issue #361) - Title files can now be markdown. # Changes in HTMLDOC v1.9.5 - The GUI did not support EPUB output. - Empty markdown table cells were not rendered in PDF or PostScript output. - The automatically-generated title page now supports both "docnumber" and "version" metadata. - Added support for dc:subject and dc:language metadata in EPUB output from the HTML keywords and lang values. - Added support for the subject and language metadata in markdown input. - Fixed a buffer underflow bug (Issue #338) - `htmldoc --help` now reports whether HTTPS URLs are supported (Issue #339) - Fixed an issue with HTML title pages and EPUB output. # Changes in HTMLDOC v1.9.4 - Inline fixed-width text is no longer reduced in size automatically (Issue #309) - Optimized initialization of font width data (Issue #334) # Changes in HTMLDOC v1.9.3 - Fixed formatting bugs with aligned images (Issue #322, Issue #324) - Fixed support for three digit "#RGB" color values (Issue #323) - Fixed character set support for markdown metadata. - Updated libpng to v1.6.34 (Issue #326) - The makefiles did not use the CPPFLAGS value (Issue #328) # Changes in HTMLDOC v1.9.2 - Added Markdown table support. - Fixed parsing of TBODY, TFOOT, and THEAD elements in HTML files. # Changes in HTMLDOC v1.9.1 - Fixed monospace font size issue (Issue #309) - Added support for reproducible builds (Issue #310) - Added limited support for the HTML 4.0 SPAN element (Issue #311) - Added (extremely limited) UTF-8 support for input files (Issue #314) - Fixed buffer underflow for (invalid) short HTML comments (Issue #316) - Now indent PRE text, by popular request. - EPUB output now makes sure that `<element property>` is written as `<element property="property">`. - Now support both NAME and ID for table-of-contents targets. # Changes in HTMLDOC v1.9 - Added support for repeating a single header row for tables that span multiple pages (Issue #16) - Added support for embedding the current filename/URL in the header or footer (Issue #50) - Added EPUB support (Issue #301) - Added Markdown support (Issue #302) - Fixed a regression in header/footer image scaling (Issue #303) - Documentation updates (Issue #305) - Compiler fixes (Issue #304, Issue #306) - Fixed a bug when running HTMLDOC as a macOS application. - Updated the bundled libpng to v1.6.29. # Changes in HTMLDOC v1.8.30 - Updated documentation to reflect new project page on Github. - Dropped old CDE and IRIX desktop integration files. - Cleaned up the GUI and adopted new default text editors for Linux and macOS. - PAGE BREAK comments at the end of a file in web page mode would lose the first page (Issue #251) - Fixed the scaling of header/footer images to limit them to the height of the header or footer (Issue #273) - Fixed an issue with the top-level makefile not exiting with an error as needed (Issue #282) - Fixed a URL referencing bug when the same hostname but a different port was used (Issue #290) - Fixed build issue on macOS (Issue #291) - Fixed handling of indexed+alpha PNG images (Issue #295) # Changes in HTMLDOC v1.8.29 - Updated local PNG library to version 1.6.20. - Updated local JPEG library to version 9b. - Dropped support for OpenSSL. - Added configure script support for libjpeg-turbo. - Updated HTTP code to latest CUPS/ippsample sources. - Duplex PDF output incorrectly forced an even number of pages - The table of contents showed the wrong page numbers after headings containing the "_HD_OMIT_TOC" attribute. - Fixed reported build issues - The configure script's --enable-local* options did not work. # Changes in HTMLDOC v1.8.28 - Updated local zlib to version 1.2.8. - Updated local PNG library to version 1.6.8. - Updated local JPEG library to version 9. - Updated default PDF version to 1.4. - SECURITY: Fixed three buffer overflow issues when reading AFM files and parsing page sizes. - Fixed incompatibility with Fortify's version of strcpy, which does not work properly with variable-length arrays - Fixed compilation against PNG library 1.5 or later - Fixed documentation errors - Marked Zapf-Dingbats as a standard font - Fixed GPL license text in GUI - Fixed a table formatting problem when a column has multiple colspan values - Fixed parsing of HTML comments - Fixed potential out-of-bounds read in table-of-contents rendering code - Fixed handling of image URLs with ampersands in them - Fixed top/bottom margins for logo and header/footer images - Fixed image alignment bug - Fixed X11 build problem # Changes in HTMLDOC v1.8.27 - Fixed a crash bug that appeared when more than 10 blank pages were present in a document - Color changes were not reflected in PRE text - Remote URLs did not always work on older operating systems - Image filenames using % escapes were not decoded properly. - Rows using BGCOLOR that spanned across multiple pages did not render properly - Rows no longer start on a new page due to a cell with both HEIGHT and ROWSPAN specified - CMYK JPEG images caused HTMLDOC to crash - Table cell width calculations didn't always account for the proper minimum width - Images were not copied when generating indexed HTML output to a directory - Changing the bottom margin resulted in text that was formatted below the bottom margin. - The Monospace-Oblique font was not embedded properly in PDF files. # Changes in HTMLDOC v1.8.26 - Outline and keyword strings in PDF files are now stored as Unicode - The Flate compression code could get in an infinite loop if it ran out of memory - Book files saved from the GUI did not handle filenames with spaces - Fixed and re-enabled the ASCII85Device filter support in PostScript Level 2/3 output - Character entities in the first word of a file were not rendered properly - Fixed-size table columns were incorrectly resized when a table width was also specified and there was extra space to distribute - Text could "walk" up or down when in-line images were used - Row backgrounds incorrectly replaced cell backgrounds when the first cell in a row used ROWSPAN - HTMLDOC did not correctly parse FONT FACE attributes - Images in Level 2/3 PostScript output did not work on some printers - The GUI did not use the first page header # Changes in HTMLDOC v1.8.25 - Added "--overflow" and "--no-overflow" command-line options to show or hide the content-too-large errors; the default is "--no-overflow". - Added "--header1" command-line option and "HEADER1" page comments to set the page header for the first page of each chapter. - Added "timing" and "remotebytes" debug data generation. - Added DejaVu font collection to better support Cyrillic and Greek text; the new fonts are available under the generic names "monospace", "sans", and "serif". - Added "--referer" command-line option and corresponding CGI-mode support to pass Referer: information in HTTP requests - On Windows, HTMLDOC now logs CGI mode errors to a file called "htmldoc.log" in the Windows temporary directory. - HTMLDOC no longer uses Base-85 encoding for image data when producing Level 2 and 3 PostScript output. It appears that many printers and PostScript interpreters cannot properly decode this data when the original image data is not a multiple of 8 bits. - HTMLDOC now renders STRONG elements in boldface instead of bold-italic to match the W3C recommendations. - HTMLDOC now automatically inserts a TR element before a TD or TH element as needed to improve web site compatibility; this also triggers a HTML error in --strict mode. - "$HFIMAGEn" didn't work in a header/footer string. - HTMLDOC could crash when rendering a table. - Book files were not used in CGI mode - Cookies were not sent in HTTP requests - Table cells were not aligned properly when the ROWSPAN attribute was set to 1 - HTMLDOC crashed when rendering unresolved hyperlinks in aligned images - Documented the HTMLDOC_NOCGI environment variable - HTMLDOC sometimes crashed when rendering tables with background colors - HTMLDOC would crash when writing encrypted strings longer than 1024 bytes - HTMLDOC didn't set the data directory when running in CGI mode on Windows. - HTMLDOC could crash when loading the Symbol.afm file - HTMLDOC did not always honor HEIGHT attributes in table rows. - Tables with a mix of colspan and rowspan sometimes caused cells to be moved vertically outside the cell.
null
# Changes in HTMLDOC v1.9.13 - Now install a 32x32 icon for Linux (Issue #432) - Fixed an issue with large values for roman numerals and letters in headings (Issue #433) - Fixed a crash bug when a HTML comment contains an invalid nul character (Issue #439) - Fixed a crash bug with bogus BMP images (Issue #444) - Fixed a stack overflow bug with bogus BMP images (Issue #453) # Changes in HTMLDOC v1.9.12 - Fixed a crash bug with "data:" URIs and EPUB output (Issue #410) - Fixed crash bugs for books (Issue #412, Issue #414) - Fixed a number-up crash bug (Issue #413) - Fixed JPEG error handling (Issue #415) - Fixed crash bugs with bogus table attributes (Issue #416, Issue #417) - Fixed a crash bug with malformed URIs (Issue #418) - Fixed a crash bug with malformed GIF files (Issue #423) - Fixed a crash bug with empty titles (Issue #425) - Fixed crash bugs with bogus text (Issue #426, Issue #429, Issue #430, Issue #431) - Fixed some issues reported by Coverity. - Removed the bundled libjpeg, libpng, and zlib. # Changes in HTMLDOC v1.9.11 - Added high-resolution desktop icons for Linux. - Updated the internal HTTP library to fix truncation of redirection URLs (Issue #396) - Fixed a regression in the handling of character entities for UTF-8 input (Issue #401) - The `--numbered` option did not work when the table-of-contents was disabled (Issue #405) # Changes in HTMLDOC v1.9.10 - Updated local zlib to v1.2.11. - Updated local libpng to v1.6.37. - Fixed packaging issues on macOS and Windows (Issue #377, Issue #386) - Now ignore sRGB profile errors in PNG files (Issue #390) - The GUI would crash when saving (Issue #391) - Page comments are now allowed in `pre` text (Issue #394) # Changes in HTMLDOC v1.9.9 - Fixed a redirection issue - some sites (incorrectly) provide an incomplete Location: URL in the HTTP response. - Fixed https: support on newer versions of Windows (Issue #378) - Fixed a problem with remote URLs containing spaces (Issue #379) - Fixed a UTF-8 processing bug for Markdown files (Issue #383) - Added support for `<FONT FACE="monospace">` (Issue #385) # Changes in HTMLDOC v1.9.8 - Added support for a `HTMLDOC.filename` META keyword that controls the filename reported in CGI mode; the default remains "htmldoc.pdf" (Issue #367) - Fixed a paragraph formatting issue with large inline images (Issue #369) - Fixed a buffer underflow issue (Issue #370) - Fixed PDF page numbers (Issue #371) - Added support for a new `L` header/footer format (`$LETTERHEAD`), which inserts a letterhead image at its full size (Issue #372, Issue #373, Issue #375) - Updated the build documentation (Issue #374) # Changes in HTMLDOC v1.9.7 - Refactored the PRE rendering code to work around compiler optimization bugs (Issue #349) - Added support for links with targets (Issue #351) - Fixed a table rowspan + valign bug (Issue #360) # Changes in HTMLDOC v1.9.6 - Added support for data URIs (Issue #340) - HTMLDOC no longer includes a PDF table of contents when converting a single web page (Issue #344) - Updated the markdown support with external links, additional inline markup, and hard line breaks. - Links in markdown text no longer render with a leading space as part of the link (Issue #346) - Fixed a buffer underflow bug discovered by AddressSanitizer. - Fixed a bug in UTF-8 support (Issue #348) - PDF output now includes the base language of the input document(s) (Issue #350) - Optimized the loading of font widths (Issue #354) - Optimized PDF page resources (Issue #356) - Optimized the base memory used for font widths (Issue #357) - Added proper `&shy;` support (Issue #361) - Title files can now be markdown. # Changes in HTMLDOC v1.9.5 - The GUI did not support EPUB output. - Empty markdown table cells were not rendered in PDF or PostScript output. - The automatically-generated title page now supports both "docnumber" and "version" metadata. - Added support for dc:subject and dc:language metadata in EPUB output from the HTML keywords and lang values. - Added support for the subject and language metadata in markdown input. - Fixed a buffer underflow bug (Issue #338) - `htmldoc --help` now reports whether HTTPS URLs are supported (Issue #339) - Fixed an issue with HTML title pages and EPUB output. # Changes in HTMLDOC v1.9.4 - Inline fixed-width text is no longer reduced in size automatically (Issue #309) - Optimized initialization of font width data (Issue #334) # Changes in HTMLDOC v1.9.3 - Fixed formatting bugs with aligned images (Issue #322, Issue #324) - Fixed support for three digit "#RGB" color values (Issue #323) - Fixed character set support for markdown metadata. - Updated libpng to v1.6.34 (Issue #326) - The makefiles did not use the CPPFLAGS value (Issue #328) # Changes in HTMLDOC v1.9.2 - Added Markdown table support. - Fixed parsing of TBODY, TFOOT, and THEAD elements in HTML files. # Changes in HTMLDOC v1.9.1 - Fixed monospace font size issue (Issue #309) - Added support for reproducible builds (Issue #310) - Added limited support for the HTML 4.0 SPAN element (Issue #311) - Added (extremely limited) UTF-8 support for input files (Issue #314) - Fixed buffer underflow for (invalid) short HTML comments (Issue #316) - Now indent PRE text, by popular request. - EPUB output now makes sure that `<element property>` is written as `<element property="property">`. - Now support both NAME and ID for table-of-contents targets. # Changes in HTMLDOC v1.9 - Added support for repeating a single header row for tables that span multiple pages (Issue #16) - Added support for embedding the current filename/URL in the header or footer (Issue #50) - Added EPUB support (Issue #301) - Added Markdown support (Issue #302) - Fixed a regression in header/footer image scaling (Issue #303) - Documentation updates (Issue #305) - Compiler fixes (Issue #304, Issue #306) - Fixed a bug when running HTMLDOC as a macOS application. - Updated the bundled libpng to v1.6.29. # Changes in HTMLDOC v1.8.30 - Updated documentation to reflect new project page on Github. - Dropped old CDE and IRIX desktop integration files. - Cleaned up the GUI and adopted new default text editors for Linux and macOS. - PAGE BREAK comments at the end of a file in web page mode would lose the first page (Issue #251) - Fixed the scaling of header/footer images to limit them to the height of the header or footer (Issue #273) - Fixed an issue with the top-level makefile not exiting with an error as needed (Issue #282) - Fixed a URL referencing bug when the same hostname but a different port was used (Issue #290) - Fixed build issue on macOS (Issue #291) - Fixed handling of indexed+alpha PNG images (Issue #295) # Changes in HTMLDOC v1.8.29 - Updated local PNG library to version 1.6.20. - Updated local JPEG library to version 9b. - Dropped support for OpenSSL. - Added configure script support for libjpeg-turbo. - Updated HTTP code to latest CUPS/ippsample sources. - Duplex PDF output incorrectly forced an even number of pages - The table of contents showed the wrong page numbers after headings containing the "_HD_OMIT_TOC" attribute. - Fixed reported build issues - The configure script's --enable-local* options did not work. # Changes in HTMLDOC v1.8.28 - Updated local zlib to version 1.2.8. - Updated local PNG library to version 1.6.8. - Updated local JPEG library to version 9. - Updated default PDF version to 1.4. - SECURITY: Fixed three buffer overflow issues when reading AFM files and parsing page sizes. - Fixed incompatibility with Fortify's version of strcpy, which does not work properly with variable-length arrays - Fixed compilation against PNG library 1.5 or later - Fixed documentation errors - Marked Zapf-Dingbats as a standard font - Fixed GPL license text in GUI - Fixed a table formatting problem when a column has multiple colspan values - Fixed parsing of HTML comments - Fixed potential out-of-bounds read in table-of-contents rendering code - Fixed handling of image URLs with ampersands in them - Fixed top/bottom margins for logo and header/footer images - Fixed image alignment bug - Fixed X11 build problem # Changes in HTMLDOC v1.8.27 - Fixed a crash bug that appeared when more than 10 blank pages were present in a document - Color changes were not reflected in PRE text - Remote URLs did not always work on older operating systems - Image filenames using % escapes were not decoded properly. - Rows using BGCOLOR that spanned across multiple pages did not render properly - Rows no longer start on a new page due to a cell with both HEIGHT and ROWSPAN specified - CMYK JPEG images caused HTMLDOC to crash - Table cell width calculations didn't always account for the proper minimum width - Images were not copied when generating indexed HTML output to a directory - Changing the bottom margin resulted in text that was formatted below the bottom margin. - The Monospace-Oblique font was not embedded properly in PDF files. # Changes in HTMLDOC v1.8.26 - Outline and keyword strings in PDF files are now stored as Unicode - The Flate compression code could get in an infinite loop if it ran out of memory - Book files saved from the GUI did not handle filenames with spaces - Fixed and re-enabled the ASCII85Device filter support in PostScript Level 2/3 output - Character entities in the first word of a file were not rendered properly - Fixed-size table columns were incorrectly resized when a table width was also specified and there was extra space to distribute - Text could "walk" up or down when in-line images were used - Row backgrounds incorrectly replaced cell backgrounds when the first cell in a row used ROWSPAN - HTMLDOC did not correctly parse FONT FACE attributes - Images in Level 2/3 PostScript output did not work on some printers - The GUI did not use the first page header # Changes in HTMLDOC v1.8.25 - Added "--overflow" and "--no-overflow" command-line options to show or hide the content-too-large errors; the default is "--no-overflow". - Added "--header1" command-line option and "HEADER1" page comments to set the page header for the first page of each chapter. - Added "timing" and "remotebytes" debug data generation. - Added DejaVu font collection to better support Cyrillic and Greek text; the new fonts are available under the generic names "monospace", "sans", and "serif". - Added "--referer" command-line option and corresponding CGI-mode support to pass Referer: information in HTTP requests - On Windows, HTMLDOC now logs CGI mode errors to a file called "htmldoc.log" in the Windows temporary directory. - HTMLDOC no longer uses Base-85 encoding for image data when producing Level 2 and 3 PostScript output. It appears that many printers and PostScript interpreters cannot properly decode this data when the original image data is not a multiple of 8 bits. - HTMLDOC now renders STRONG elements in boldface instead of bold-italic to match the W3C recommendations. - HTMLDOC now automatically inserts a TR element before a TD or TH element as needed to improve web site compatibility; this also triggers a HTML error in --strict mode. - "$HFIMAGEn" didn't work in a header/footer string. - HTMLDOC could crash when rendering a table. - Book files were not used in CGI mode - Cookies were not sent in HTTP requests - Table cells were not aligned properly when the ROWSPAN attribute was set to 1 - HTMLDOC crashed when rendering unresolved hyperlinks in aligned images - Documented the HTMLDOC_NOCGI environment variable - HTMLDOC sometimes crashed when rendering tables with background colors - HTMLDOC would crash when writing encrypted strings longer than 1024 bytes - HTMLDOC didn't set the data directory when running in CGI mode on Windows. - HTMLDOC could crash when loading the Symbol.afm file - HTMLDOC did not always honor HEIGHT attributes in table rows. - Tables with a mix of colspan and rowspan sometimes caused cells to be moved vertically outside the cell.
null
293
CWE-787
CVE-2021-43814
NAME=ELF: negative vnext crash FILE=bins/elf/analysis/6921737e-08e3-11e6-998c-a8ddd566ab1c.jpg CMDS=q! EXPECT=<<EOF EOF RUN NAME=pseudo-crash FILE== CMDS=e asm.pseudo=1; e asm.arch=x86; e asm.bits=64; wx 7299; pdj 1 EXPECT=<<EOF [{"offset":0,"esil":"cf,?{,18446744073709551515,rip,=,}","refptr":false,"fcn_addr":0,"fcn_last":0,"size":2,"opcode":"if (((unsigned) var) < 0) goto 0xffffffffffffff9b","disasm":"jb 0xffffffffffffff9b","bytes":"7299","family":"cpu","type":"cjmp","reloc":false,"type_num":2147483649,"type2_num":0,"jump":-101,"fail":2}] EOF RUN NAME=ELF: ld-uclibc FILE=bins/elf/ld-uClibc-0.9.33.2.so CMDS=<<EOF ii iij EOF EXPECT=<<EOF nth vaddr bind type lib name ----------------------------- [] EOF RUN
null
NAME=ELF: negative vnext crash FILE=bins/elf/analysis/6921737e-08e3-11e6-998c-a8ddd566ab1c.jpg CMDS=q! EXPECT=<<EOF EOF RUN NAME=pseudo-crash FILE== CMDS=e asm.pseudo=1; e asm.arch=x86; e asm.bits=64; wx 7299; pdj 1 EXPECT=<<EOF [{"offset":0,"esil":"cf,?{,18446744073709551515,rip,=,}","refptr":false,"fcn_addr":0,"fcn_last":0,"size":2,"opcode":"if (((unsigned) var) < 0) goto 0xffffffffffffff9b","disasm":"jb 0xffffffffffffff9b","bytes":"7299","family":"cpu","type":"cjmp","reloc":false,"type_num":2147483649,"type2_num":0,"jump":-101,"fail":2}] EOF RUN NAME=ELF: ld-uclibc FILE=bins/elf/ld-uClibc-0.9.33.2.so CMDS=<<EOF ii iij EOF EXPECT=<<EOF nth vaddr bind type lib name ----------------------------- [] EOF RUN NAME=ELF/Dwarf: abbrev empty FILE=bins/elf/dwarf_fuzzed_abbrev_empty CMDS=<<EOF aaa EOF EXPECT= RUN
null
294
CWE-787
CVE-2021-44109
/* * Copyright (C) 2019 by Sukchan Lee <acetcom@gmail.com> * * This file is part of Open5GS. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ #include "ogs-sbi.h" #include "ogs-crypt.h" #include "yuarel.h" #include "contrib/multipart_parser.h" static OGS_POOL(request_pool, ogs_sbi_request_t); static OGS_POOL(response_pool, ogs_sbi_response_t); static char *build_json(ogs_sbi_message_t *message); static int parse_json(ogs_sbi_message_t *message, char *content_type, char *json); static bool build_content( ogs_sbi_http_message_t *http, ogs_sbi_message_t *message); static int parse_content( ogs_sbi_message_t *message, ogs_sbi_http_message_t *http); static bool build_multipart( ogs_sbi_http_message_t *http, ogs_sbi_message_t *message); static int parse_multipart( ogs_sbi_message_t *sbi_message, ogs_sbi_http_message_t *http); static void http_message_free(ogs_sbi_http_message_t *http); void ogs_sbi_message_init(int num_of_request_pool, int num_of_response_pool) { ogs_pool_init(&request_pool, num_of_request_pool); ogs_pool_init(&response_pool, num_of_response_pool); } void ogs_sbi_message_final(void) { ogs_pool_final(&request_pool); ogs_pool_final(&response_pool); } void ogs_sbi_message_free(ogs_sbi_message_t *message) { int i; ogs_assert(message); if (message->NFProfile) OpenAPI_nf_profile_free(message->NFProfile); if (message->ProblemDetails) OpenAPI_problem_details_free(message->ProblemDetails); if (message->PatchItemList) { OpenAPI_lnode_t *node = NULL; OpenAPI_list_for_each(message->PatchItemList, node) OpenAPI_patch_item_free(node->data); OpenAPI_list_free(message->PatchItemList); } if (message->SubscriptionData) OpenAPI_subscription_data_free(message->SubscriptionData); if (message->NotificationData) OpenAPI_notification_data_free(message->NotificationData); if (message->SearchResult) OpenAPI_search_result_free(message->SearchResult); if (message->AuthenticationInfo) OpenAPI_authentication_info_free(message->AuthenticationInfo); if (message->AuthenticationInfoRequest) OpenAPI_authentication_info_request_free( message->AuthenticationInfoRequest); if (message->AuthenticationInfoResult) OpenAPI_authentication_info_result_free( message->AuthenticationInfoResult); if (message->AuthenticationSubscription) OpenAPI_authentication_subscription_free( message->AuthenticationSubscription); if (message->UeAuthenticationCtx) OpenAPI_ue_authentication_ctx_free(message->UeAuthenticationCtx); if (message->ConfirmationData) OpenAPI_confirmation_data_free(message->ConfirmationData); if (message->ConfirmationDataResponse) OpenAPI_confirmation_data_response_free( message->ConfirmationDataResponse); if (message->AuthEvent) OpenAPI_auth_event_free(message->AuthEvent); if (message->Amf3GppAccessRegistration) OpenAPI_amf3_gpp_access_registration_free( message->Amf3GppAccessRegistration); if (message->AccessAndMobilitySubscriptionData) OpenAPI_access_and_mobility_subscription_data_free( message->AccessAndMobilitySubscriptionData); if (message->SmfSelectionSubscriptionData) OpenAPI_smf_selection_subscription_data_free( message->SmfSelectionSubscriptionData); if (message->UeContextInSmfData) OpenAPI_ue_context_in_smf_data_free(message->UeContextInSmfData); if (message->SmContextCreateData) OpenAPI_sm_context_create_data_free(message->SmContextCreateData); if (message->SmContextCreatedData) OpenAPI_sm_context_created_data_free(message->SmContextCreatedData); if (message->SmContextCreateError) OpenAPI_sm_context_create_error_free(message->SmContextCreateError); if (message->SmContextUpdateData) OpenAPI_sm_context_update_data_free(message->SmContextUpdateData); if (message->SmContextUpdatedData) OpenAPI_sm_context_updated_data_free(message->SmContextUpdatedData); if (message->SmContextUpdateError) OpenAPI_sm_context_update_error_free(message->SmContextUpdateError); if (message->SmContextReleaseData) OpenAPI_sm_context_release_data_free(message->SmContextReleaseData); if (message->SmContextReleasedData) OpenAPI_sm_context_released_data_free(message->SmContextReleasedData); if (message->SessionManagementSubscriptionData) OpenAPI_session_management_subscription_data_free( message->SessionManagementSubscriptionData); if (message->N1N2MessageTransferReqData) OpenAPI_n1_n2_message_transfer_req_data_free( message->N1N2MessageTransferReqData); if (message->N1N2MessageTransferRspData) OpenAPI_n1_n2_message_transfer_rsp_data_free( message->N1N2MessageTransferRspData); if (message->N1N2MsgTxfrFailureNotification) OpenAPI_n1_n2_msg_txfr_failure_notification_free( message->N1N2MsgTxfrFailureNotification); if (message->SmContextStatusNotification) OpenAPI_sm_context_status_notification_free( message->SmContextStatusNotification); if (message->PolicyAssociationRequest) OpenAPI_policy_association_request_free( message->PolicyAssociationRequest); if (message->PolicyAssociation) OpenAPI_policy_association_free(message->PolicyAssociation); if (message->AmPolicyData) OpenAPI_am_policy_data_free(message->AmPolicyData); if (message->SmPolicyContextData) OpenAPI_sm_policy_context_data_free(message->SmPolicyContextData); if (message->SmPolicyDecision) OpenAPI_sm_policy_decision_free(message->SmPolicyDecision); if (message->SmPolicyData) OpenAPI_sm_policy_data_free(message->SmPolicyData); if (message->SmPolicyDeleteData) OpenAPI_sm_policy_delete_data_free(message->SmPolicyDeleteData); if (message->AuthorizedNetworkSliceInfo) OpenAPI_authorized_network_slice_info_free( message->AuthorizedNetworkSliceInfo); if (message->PcfBinding) OpenAPI_pcf_binding_free(message->PcfBinding); if (message->AppSessionContext) OpenAPI_app_session_context_free(message->AppSessionContext); if (message->AppSessionContextUpdateDataPatch) OpenAPI_app_session_context_update_data_patch_free(message->AppSessionContextUpdateDataPatch); if (message->SmPolicyNotification) OpenAPI_sm_policy_notification_free(message->SmPolicyNotification); if (message->TerminationNotification) OpenAPI_termination_notification_free(message->TerminationNotification); for (i = 0; i < message->num_of_part; i++) { if (message->part[i].pkbuf) ogs_pkbuf_free(message->part[i].pkbuf); } } ogs_sbi_request_t *ogs_sbi_request_new(void) { ogs_sbi_request_t *request = NULL; ogs_pool_alloc(&request_pool, &request); ogs_expect_or_return_val(request, NULL); memset(request, 0, sizeof(ogs_sbi_request_t)); request->http.params = ogs_hash_make(); ogs_expect_or_return_val(request->http.params, NULL); request->http.headers = ogs_hash_make(); ogs_expect_or_return_val(request->http.headers, NULL); return request; } ogs_sbi_response_t *ogs_sbi_response_new(void) { ogs_sbi_response_t *response = NULL; ogs_pool_alloc(&response_pool, &response); ogs_expect_or_return_val(response, NULL); memset(response, 0, sizeof(ogs_sbi_response_t)); response->http.params = ogs_hash_make(); ogs_expect_or_return_val(response->http.params, NULL); response->http.headers = ogs_hash_make(); ogs_expect_or_return_val(response->http.headers, NULL); return response; } void ogs_sbi_request_free(ogs_sbi_request_t *request) { ogs_assert(request); if (request->h.uri) ogs_free(request->h.uri); ogs_sbi_header_free(&request->h); http_message_free(&request->http); ogs_pool_free(&request_pool, request); } void ogs_sbi_response_free(ogs_sbi_response_t *response) { ogs_assert(response); if (response->h.uri) ogs_free(response->h.uri); ogs_sbi_header_free(&response->h); http_message_free(&response->http); ogs_pool_free(&response_pool, response); } ogs_sbi_request_t *ogs_sbi_build_request(ogs_sbi_message_t *message) { ogs_sbi_request_t *request = NULL; ogs_assert(message); request = ogs_sbi_request_new(); ogs_expect_or_return_val(request, NULL); ogs_expect_or_return_val(message->h.method, NULL); request->h.method = ogs_strdup(message->h.method); if (message->h.uri) { ogs_expect_or_return_val(message->h.uri, NULL); request->h.uri = ogs_strdup(message->h.uri); ogs_expect_or_return_val(request->h.uri, NULL); } else { int i; ogs_expect_or_return_val(message->h.service.name, NULL); request->h.service.name = ogs_strdup(message->h.service.name); ogs_expect_or_return_val(message->h.api.version, NULL); request->h.api.version = ogs_strdup(message->h.api.version); ogs_expect_or_return_val(request->h.api.version, NULL); ogs_expect_or_return_val(message->h.resource.component[0], NULL); for (i = 0; i < OGS_SBI_MAX_NUM_OF_RESOURCE_COMPONENT && message->h.resource.component[i]; i++) request->h.resource.component[i] = ogs_strdup( message->h.resource.component[i]); } /* URL Param */ if (message->param.nf_id) { ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_NF_ID, message->param.nf_id); } if (message->param.nf_type) { char *v = OpenAPI_nf_type_ToString(message->param.nf_type); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_NF_TYPE, v); } if (message->param.requester_nf_type) { char *v = OpenAPI_nf_type_ToString(message->param.requester_nf_type); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_REQUESTER_NF_TYPE, v); } if (message->param.target_nf_type) { char *v = OpenAPI_nf_type_ToString(message->param.target_nf_type); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_TARGET_NF_TYPE, v); } if (message->param.limit) { char *v = ogs_msprintf("%d", message->param.limit); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_LIMIT, v); ogs_free(v); } if (message->param.dnn) { ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_DNN, message->param.dnn); } if (message->param.plmn_id_presence) { OpenAPI_plmn_id_t plmn_id; plmn_id.mnc = ogs_plmn_id_mnc_string(&message->param.plmn_id); plmn_id.mcc = ogs_plmn_id_mcc_string(&message->param.plmn_id); if (plmn_id.mnc && plmn_id.mcc) { char *v = NULL; cJSON *item = NULL; item = OpenAPI_plmn_id_convertToJSON(&plmn_id); ogs_expect_or_return_val(item, NULL); if (plmn_id.mnc) ogs_free(plmn_id.mnc); if (plmn_id.mcc) ogs_free(plmn_id.mcc); v = cJSON_Print(item); ogs_expect_or_return_val(v, NULL); cJSON_Delete(item); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_PLMN_ID, v); ogs_free(v); } } if (message->param.single_nssai_presence) { char *v = ogs_sbi_s_nssai_to_string(&message->param.s_nssai); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_SINGLE_NSSAI, v); ogs_free(v); } if (message->param.snssai_presence) { char *v = ogs_sbi_s_nssai_to_string(&message->param.s_nssai); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_SNSSAI, v); ogs_free(v); } if (message->param.plmn_id_presence) { OpenAPI_plmn_id_t plmn_id; plmn_id.mnc = ogs_plmn_id_mnc_string(&message->param.plmn_id); plmn_id.mcc = ogs_plmn_id_mcc_string(&message->param.plmn_id); if (plmn_id.mnc && plmn_id.mcc) { char *v = NULL; cJSON *item = NULL; item = OpenAPI_plmn_id_convertToJSON(&plmn_id); ogs_expect_or_return_val(item, NULL); if (plmn_id.mnc) ogs_free(plmn_id.mnc); if (plmn_id.mcc) ogs_free(plmn_id.mcc); v = cJSON_Print(item); ogs_expect_or_return_val(v, NULL); cJSON_Delete(item); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_PLMN_ID, v); ogs_free(v); } } if (message->param.slice_info_request_for_pdu_session_presence) { OpenAPI_slice_info_for_pdu_session_t SliceInfoForPDUSession; OpenAPI_snssai_t sNSSAI; char *v = NULL; cJSON *item = NULL; ogs_expect_or_return_val(message->param.s_nssai.sst, NULL); ogs_expect_or_return_val(message->param.roaming_indication, NULL); memset(&sNSSAI, 0, sizeof(sNSSAI)); sNSSAI.sst = message->param.s_nssai.sst; sNSSAI.sd = ogs_s_nssai_sd_to_string(message->param.s_nssai.sd); memset(&SliceInfoForPDUSession, 0, sizeof(SliceInfoForPDUSession)); SliceInfoForPDUSession.s_nssai = &sNSSAI; SliceInfoForPDUSession.roaming_indication = message->param.roaming_indication; item = OpenAPI_slice_info_for_pdu_session_convertToJSON( &SliceInfoForPDUSession); ogs_expect_or_return_val(item, NULL); v = cJSON_Print(item); ogs_expect_or_return_val(v, NULL); cJSON_Delete(item); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_SLICE_INFO_REQUEST_FOR_PDU_SESSION, v); ogs_free(v); if (sNSSAI.sd) ogs_free(sNSSAI.sd); } if (message->param.ipv4addr) { ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_IPV4ADDR, message->param.ipv4addr); } if (message->param.ipv6prefix) { ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_IPV6PREFIX, message->param.ipv6prefix); } ogs_expect_or_return_val(true == build_content(&request->http, message), NULL); if (message->http.accept) { ogs_sbi_header_set(request->http.headers, OGS_SBI_ACCEPT, message->http.accept); } else { SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_DELETE) ogs_sbi_header_set(request->http.headers, OGS_SBI_ACCEPT, OGS_SBI_CONTENT_PROBLEM_TYPE); break; DEFAULT ogs_sbi_header_set(request->http.headers, OGS_SBI_ACCEPT, OGS_SBI_CONTENT_JSON_TYPE "," OGS_SBI_CONTENT_PROBLEM_TYPE); break; END } if (message->http.content_encoding) ogs_sbi_header_set(request->http.headers, OGS_SBI_ACCEPT_ENCODING, message->http.content_encoding); return request; } ogs_sbi_response_t *ogs_sbi_build_response( ogs_sbi_message_t *message, int status) { ogs_sbi_response_t *response = NULL; ogs_assert(message); response = ogs_sbi_response_new(); ogs_expect_or_return_val(response, NULL); response->status = status; if (response->status != OGS_SBI_HTTP_STATUS_NO_CONTENT) { ogs_expect_or_return_val(true == build_content(&response->http, message), NULL); } if (message->http.location) { ogs_sbi_header_set(response->http.headers, "Location", message->http.location); } if (message->http.cache_control) ogs_sbi_header_set(response->http.headers, "Cache-Control", message->http.cache_control); return response; } int ogs_sbi_parse_request( ogs_sbi_message_t *message, ogs_sbi_request_t *request) { int rv; ogs_hash_index_t *hi; ogs_assert(request); ogs_assert(message); rv = ogs_sbi_parse_header(message, &request->h); if (rv != OGS_OK) { ogs_error("ogs_sbi_parse_header() failed"); return OGS_ERROR; } for (hi = ogs_hash_first(request->http.params); hi; hi = ogs_hash_next(hi)) { if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_NF_ID)) { message->param.nf_id = ogs_hash_this_val(hi); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_NF_TYPE)) { message->param.nf_type = OpenAPI_nf_type_FromString(ogs_hash_this_val(hi)); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_TARGET_NF_TYPE)) { message->param.target_nf_type = OpenAPI_nf_type_FromString(ogs_hash_this_val(hi)); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_REQUESTER_NF_TYPE)) { message->param.requester_nf_type = OpenAPI_nf_type_FromString(ogs_hash_this_val(hi)); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_LIMIT)) { message->param.limit = atoi(ogs_hash_this_val(hi)); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_DNN)) { message->param.dnn = ogs_hash_this_val(hi); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_PLMN_ID)) { char *v = NULL; cJSON *item = NULL; OpenAPI_plmn_id_t *plmn_id = NULL; v = ogs_hash_this_val(hi); if (v) { item = cJSON_Parse(v); if (item) { plmn_id = OpenAPI_plmn_id_parseFromJSON(item); if (plmn_id && plmn_id->mnc && plmn_id->mcc) { ogs_plmn_id_build(&message->param.plmn_id, atoi(plmn_id->mcc), atoi(plmn_id->mnc), strlen(plmn_id->mnc)); message->param.plmn_id_presence = true; OpenAPI_plmn_id_free(plmn_id); } cJSON_Delete(item); } } } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_SINGLE_NSSAI)) { char *v = ogs_hash_this_val(hi); if (v) { bool rc = ogs_sbi_s_nssai_from_string( &message->param.s_nssai, v); if (rc == true) message->param.single_nssai_presence = true; } } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_SNSSAI)) { char *v = ogs_hash_this_val(hi); if (v) { bool rc = ogs_sbi_s_nssai_from_string( &message->param.s_nssai, v); if (rc == true) message->param.snssai_presence = true; } } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_SLICE_INFO_REQUEST_FOR_PDU_SESSION)) { char *v = NULL; cJSON *item = NULL; OpenAPI_slice_info_for_pdu_session_t *SliceInfoForPduSession = NULL; v = ogs_hash_this_val(hi); if (v) { item = cJSON_Parse(v); if (item) { SliceInfoForPduSession = OpenAPI_slice_info_for_pdu_session_parseFromJSON(item); if (SliceInfoForPduSession) { OpenAPI_snssai_t *s_nssai = SliceInfoForPduSession->s_nssai; if (s_nssai) { message->param.s_nssai.sst = s_nssai->sst; message->param.s_nssai.sd = ogs_s_nssai_sd_from_string(s_nssai->sd); } message->param.roaming_indication = SliceInfoForPduSession->roaming_indication; message->param. slice_info_request_for_pdu_session_presence = true; OpenAPI_slice_info_for_pdu_session_free( SliceInfoForPduSession); } cJSON_Delete(item); } } } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_IPV4ADDR)) { message->param.ipv4addr = ogs_hash_this_val(hi); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_IPV6PREFIX)) { message->param.ipv6prefix = ogs_hash_this_val(hi); } } for (hi = ogs_hash_first(request->http.headers); hi; hi = ogs_hash_next(hi)) { if (!ogs_strcasecmp(ogs_hash_this_key(hi), OGS_SBI_ACCEPT_ENCODING)) { message->http.content_encoding = ogs_hash_this_val(hi); } else if (!ogs_strcasecmp( ogs_hash_this_key(hi), OGS_SBI_CONTENT_TYPE)) { message->http.content_type = ogs_hash_this_val(hi); } else if (!ogs_strcasecmp(ogs_hash_this_key(hi), OGS_SBI_ACCEPT)) { message->http.accept = ogs_hash_this_val(hi); } } if (parse_content(message, &request->http) != OGS_OK) { ogs_error("parse_content() failed"); return OGS_ERROR; } return OGS_OK; } int ogs_sbi_parse_response( ogs_sbi_message_t *message, ogs_sbi_response_t *response) { int rv; ogs_hash_index_t *hi; ogs_assert(response); ogs_assert(message); rv = ogs_sbi_parse_header(message, &response->h); if (rv != OGS_OK) { ogs_error("ogs_sbi_parse_header() failed"); return OGS_ERROR; } for (hi = ogs_hash_first(response->http.headers); hi; hi = ogs_hash_next(hi)) { if (!ogs_strcasecmp(ogs_hash_this_key(hi), OGS_SBI_CONTENT_TYPE)) { message->http.content_type = ogs_hash_this_val(hi); } else if (!ogs_strcasecmp(ogs_hash_this_key(hi), OGS_SBI_LOCATION)) { message->http.location = ogs_hash_this_val(hi); } } message->res_status = response->status; if (parse_content(message, &response->http) != OGS_OK) { ogs_error("parse_content() failed"); return OGS_ERROR; } return OGS_OK; } ogs_pkbuf_t *ogs_sbi_find_part_by_content_id( ogs_sbi_message_t *message, char *content_id) { int i; ogs_assert(message); ogs_assert(content_id); for (i = 0; i < message->num_of_part; i++) { if (message->part[i].content_id && strcmp(message->part[i].content_id, content_id) == 0) return message->part[i].pkbuf; } return NULL; } int ogs_sbi_parse_header(ogs_sbi_message_t *message, ogs_sbi_header_t *header) { struct yuarel yuarel; char *saveptr = NULL; char *uri = NULL, *p = NULL;; char *component = NULL; int i = 0; ogs_assert(message); ogs_assert(header); memset(message, 0, sizeof(*message)); message->h.method = header->method; message->h.uri = header->uri; ogs_assert(message->h.uri); uri = ogs_strdup(header->uri); ogs_assert(uri); p = uri; if (p[0] != '/') { int rv = yuarel_parse(&yuarel, p); if (rv != OGS_OK) { ogs_error("yuarel_parse() failed"); ogs_free(uri); return OGS_ERROR; } p = yuarel.path; } header->service.name = ogs_sbi_parse_uri(p, "/", &saveptr); if (!header->service.name) { ogs_error("ogs_sbi_parse_uri() failed"); ogs_free(uri); return OGS_ERROR; } message->h.service.name = header->service.name; header->api.version = ogs_sbi_parse_uri(NULL, "/", &saveptr); if (!header->api.version) { ogs_error("ogs_sbi_parse_uri() failed"); ogs_free(uri); return OGS_ERROR; } message->h.api.version = header->api.version; for (i = 0; i < OGS_SBI_MAX_NUM_OF_RESOURCE_COMPONENT && (component = ogs_sbi_parse_uri(NULL, "/", &saveptr)) != NULL; i++) { header->resource.component[i] = component; message->h.resource.component[i] = component; } ogs_free(uri); return OGS_OK; } void ogs_sbi_header_free(ogs_sbi_header_t *h) { int i; ogs_assert(h); if (h->method) ogs_free(h->method); if (h->service.name) ogs_free(h->service.name); if (h->api.version) ogs_free(h->api.version); for (i = 0; i < OGS_SBI_MAX_NUM_OF_RESOURCE_COMPONENT && h->resource.component[i]; i++) ogs_free(h->resource.component[i]); } static char *build_json(ogs_sbi_message_t *message) { char *content = NULL; cJSON *item = NULL; ogs_assert(message); if (message->ProblemDetails) { item = OpenAPI_problem_details_convertToJSON(message->ProblemDetails); ogs_assert(item); } else if (message->NFProfile) { item = OpenAPI_nf_profile_convertToJSON(message->NFProfile); ogs_assert(item); } else if (message->PatchItemList) { OpenAPI_lnode_t *node = NULL; item = cJSON_CreateArray(); ogs_assert(item); OpenAPI_list_for_each(message->PatchItemList, node) { cJSON *patchItem = OpenAPI_patch_item_convertToJSON(node->data); ogs_assert(patchItem); cJSON_AddItemToArray(item, patchItem); } } else if (message->SubscriptionData) { item = OpenAPI_subscription_data_convertToJSON( message->SubscriptionData); ogs_assert(item); } else if (message->NotificationData) { item = OpenAPI_notification_data_convertToJSON( message->NotificationData); ogs_assert(item); } else if (message->SearchResult) { item = OpenAPI_search_result_convertToJSON(message->SearchResult); ogs_assert(item); } else if (message->links) { item = ogs_sbi_links_convertToJSON(message->links); ogs_assert(item); } else if (message->AuthenticationInfo) { item = OpenAPI_authentication_info_convertToJSON( message->AuthenticationInfo); ogs_assert(item); } else if (message->AuthenticationInfoRequest) { item = OpenAPI_authentication_info_request_convertToJSON( message->AuthenticationInfoRequest); ogs_assert(item); } else if (message->AuthenticationInfoResult) { item = OpenAPI_authentication_info_result_convertToJSON( message->AuthenticationInfoResult); ogs_assert(item); } else if (message->AuthenticationSubscription) { item = OpenAPI_authentication_subscription_convertToJSON( message->AuthenticationSubscription); ogs_assert(item); } else if (message->UeAuthenticationCtx) { item = OpenAPI_ue_authentication_ctx_convertToJSON( message->UeAuthenticationCtx); ogs_assert(item); } else if (message->ConfirmationData) { item = OpenAPI_confirmation_data_convertToJSON( message->ConfirmationData); ogs_assert(item); } else if (message->ConfirmationDataResponse) { item = OpenAPI_confirmation_data_response_convertToJSON( message->ConfirmationDataResponse); ogs_assert(item); } else if (message->AuthEvent) { item = OpenAPI_auth_event_convertToJSON(message->AuthEvent); ogs_assert(item); } else if (message->Amf3GppAccessRegistration) { item = OpenAPI_amf3_gpp_access_registration_convertToJSON( message->Amf3GppAccessRegistration); ogs_assert(item); } else if (message->AccessAndMobilitySubscriptionData) { item = OpenAPI_access_and_mobility_subscription_data_convertToJSON( message->AccessAndMobilitySubscriptionData); ogs_assert(item); } else if (message->SmfSelectionSubscriptionData) { item = OpenAPI_smf_selection_subscription_data_convertToJSON( message->SmfSelectionSubscriptionData); ogs_assert(item); } else if (message->UeContextInSmfData) { item = OpenAPI_ue_context_in_smf_data_convertToJSON( message->UeContextInSmfData); ogs_assert(item); } else if (message->SmContextCreateData) { item = OpenAPI_sm_context_create_data_convertToJSON( message->SmContextCreateData); ogs_assert(item); } else if (message->SmContextCreatedData) { item = OpenAPI_sm_context_created_data_convertToJSON( message->SmContextCreatedData); ogs_assert(item); } else if (message->SmContextCreateError) { item = OpenAPI_sm_context_create_error_convertToJSON( message->SmContextCreateError); ogs_assert(item); } else if (message->SmContextUpdateData) { item = OpenAPI_sm_context_update_data_convertToJSON( message->SmContextUpdateData); ogs_assert(item); } else if (message->SmContextUpdatedData) { item = OpenAPI_sm_context_updated_data_convertToJSON( message->SmContextUpdatedData); ogs_assert(item); } else if (message->SmContextUpdateError) { item = OpenAPI_sm_context_update_error_convertToJSON( message->SmContextUpdateError); ogs_assert(item); } else if (message->SmContextReleaseData) { item = OpenAPI_sm_context_release_data_convertToJSON( message->SmContextReleaseData); ogs_assert(item); } else if (message->SmContextReleasedData) { item = OpenAPI_sm_context_released_data_convertToJSON( message->SmContextReleasedData); ogs_assert(item); } else if (message->SessionManagementSubscriptionData) { item = OpenAPI_session_management_subscription_data_convertToJSON( message->SessionManagementSubscriptionData); ogs_assert(item); } else if (message->N1N2MessageTransferReqData) { item = OpenAPI_n1_n2_message_transfer_req_data_convertToJSON( message->N1N2MessageTransferReqData); ogs_assert(item); } else if (message->N1N2MessageTransferRspData) { item = OpenAPI_n1_n2_message_transfer_rsp_data_convertToJSON( message->N1N2MessageTransferRspData); ogs_assert(item); } else if (message->N1N2MsgTxfrFailureNotification) { item = OpenAPI_n1_n2_msg_txfr_failure_notification_convertToJSON( message->N1N2MsgTxfrFailureNotification); ogs_assert(item); } else if (message->SmContextStatusNotification) { item = OpenAPI_sm_context_status_notification_convertToJSON( message->SmContextStatusNotification); ogs_assert(item); } else if (message->PolicyAssociationRequest) { item = OpenAPI_policy_association_request_convertToJSON( message->PolicyAssociationRequest); ogs_assert(item); } else if (message->PolicyAssociation) { item = OpenAPI_policy_association_convertToJSON( message->PolicyAssociation); ogs_assert(item); } else if (message->AmPolicyData) { item = OpenAPI_am_policy_data_convertToJSON(message->AmPolicyData); ogs_assert(item); } else if (message->SmPolicyContextData) { item = OpenAPI_sm_policy_context_data_convertToJSON( message->SmPolicyContextData); ogs_assert(item); } else if (message->SmPolicyDecision) { item = OpenAPI_sm_policy_decision_convertToJSON( message->SmPolicyDecision); ogs_assert(item); } else if (message->SmPolicyData) { item = OpenAPI_sm_policy_data_convertToJSON(message->SmPolicyData); ogs_assert(item); } else if (message->SmPolicyDeleteData) { item = OpenAPI_sm_policy_delete_data_convertToJSON( message->SmPolicyDeleteData); ogs_assert(item); } else if (message->AuthorizedNetworkSliceInfo) { item = OpenAPI_authorized_network_slice_info_convertToJSON( message->AuthorizedNetworkSliceInfo); ogs_assert(item); } else if (message->PcfBinding) { item = OpenAPI_pcf_binding_convertToJSON(message->PcfBinding); ogs_assert(item); } else if (message->AppSessionContext) { item = OpenAPI_app_session_context_convertToJSON( message->AppSessionContext); ogs_assert(item); } else if (message->AppSessionContextUpdateDataPatch) { item = OpenAPI_app_session_context_update_data_patch_convertToJSON( message->AppSessionContextUpdateDataPatch); ogs_assert(item); } else if (message->SmPolicyNotification) { item = OpenAPI_sm_policy_notification_convertToJSON( message->SmPolicyNotification); ogs_assert(item); } else if (message->TerminationNotification) { item = OpenAPI_termination_notification_convertToJSON( message->TerminationNotification); ogs_assert(item); } if (item) { content = cJSON_Print(item); ogs_assert(content); ogs_log_print(OGS_LOG_TRACE, "%s", content); cJSON_Delete(item); } return content; } static int parse_json(ogs_sbi_message_t *message, char *content_type, char *json) { int rv = OGS_OK; cJSON *item = NULL; ogs_assert(message); if (!json) return OGS_OK; if (!content_type) { ogs_error("No Content-type"); return OGS_ERROR; } ogs_log_print(OGS_LOG_TRACE, "%s", json); item = cJSON_Parse(json); if (!item) { ogs_error("JSON parse error"); return OGS_ERROR; } if (content_type && !strncmp(content_type, OGS_SBI_CONTENT_PROBLEM_TYPE, strlen(OGS_SBI_CONTENT_PROBLEM_TYPE))) { message->ProblemDetails = OpenAPI_problem_details_parseFromJSON(item); } else if (content_type && !strncmp(content_type, OGS_SBI_CONTENT_PATCH_TYPE, strlen(OGS_SBI_CONTENT_PATCH_TYPE))) { if (item) { OpenAPI_patch_item_t *patch_item = NULL; cJSON *patchJSON = NULL; message->PatchItemList = OpenAPI_list_create(); cJSON_ArrayForEach(patchJSON, item) { if (!cJSON_IsObject(patchJSON)) { rv = OGS_ERROR; ogs_error("Unknown JSON"); goto cleanup; } patch_item = OpenAPI_patch_item_parseFromJSON(patchJSON); OpenAPI_list_add(message->PatchItemList, patch_item); } } } else { SWITCH(message->h.service.name) CASE(OGS_SBI_SERVICE_NAME_NNRF_NFM) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_NF_INSTANCES) message->NFProfile = OpenAPI_nf_profile_parseFromJSON(item); if (!message->NFProfile) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SUBSCRIPTIONS) message->SubscriptionData = OpenAPI_subscription_data_parseFromJSON(item); if (!message->SubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_NF_STATUS_NOTIFY) message->NotificationData = OpenAPI_notification_data_parseFromJSON(item); if (!message->NotificationData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NNRF_DISC) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_NF_INSTANCES) message->SearchResult = OpenAPI_search_result_parseFromJSON(item); if (!message->SearchResult) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NAUSF_AUTH) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_UE_AUTHENTICATIONS) SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_POST) if (message->res_status == 0) { message->AuthenticationInfo = OpenAPI_authentication_info_parseFromJSON(item); if (!message->AuthenticationInfo) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_CREATED) { message->UeAuthenticationCtx = OpenAPI_ue_authentication_ctx_parseFromJSON(item); if (!message->UeAuthenticationCtx) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; CASE(OGS_SBI_HTTP_METHOD_PUT) if (message->res_status == 0) { message->ConfirmationData = OpenAPI_confirmation_data_parseFromJSON(item); if (!message->ConfirmationData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->ConfirmationDataResponse = OpenAPI_confirmation_data_response_parseFromJSON( item); if (!message->ConfirmationDataResponse) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown method [%s]", message->h.method); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NUDM_UEAU) SWITCH(message->h.resource.component[1]) CASE(OGS_SBI_RESOURCE_NAME_SECURITY_INFORMATION) SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_GENERATE_AUTH_DATA) if (message->res_status == 0) { message->AuthenticationInfoRequest = OpenAPI_authentication_info_request_parseFromJSON( item); if (!message->AuthenticationInfoRequest) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->AuthenticationInfoResult = OpenAPI_authentication_info_result_parseFromJSON( item); if (!message->AuthenticationInfoResult) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[2]); END break; CASE(OGS_SBI_RESOURCE_NAME_AUTH_EVENTS) message->AuthEvent = OpenAPI_auth_event_parseFromJSON(item); if (!message->AuthEvent) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[1]); END break; CASE(OGS_SBI_SERVICE_NAME_NUDM_UECM) SWITCH(message->h.resource.component[1]) CASE(OGS_SBI_RESOURCE_NAME_REGISTRATIONS) SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_AMF_3GPP_ACCESS) message->Amf3GppAccessRegistration = OpenAPI_amf3_gpp_access_registration_parseFromJSON( item); if (!message->Amf3GppAccessRegistration) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[2]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[1]); END break; CASE(OGS_SBI_SERVICE_NAME_NUDM_SDM) SWITCH(message->h.resource.component[1]) CASE(OGS_SBI_RESOURCE_NAME_AM_DATA) message->AccessAndMobilitySubscriptionData = OpenAPI_access_and_mobility_subscription_data_parseFromJSON( item); if (!message->AccessAndMobilitySubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SMF_SELECT_DATA) message->SmfSelectionSubscriptionData = OpenAPI_smf_selection_subscription_data_parseFromJSON(item); if (!message->SmfSelectionSubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_UE_CONTEXT_IN_SMF_DATA) message->UeContextInSmfData = OpenAPI_ue_context_in_smf_data_parseFromJSON(item); if (!message->UeContextInSmfData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SM_DATA) message->SessionManagementSubscriptionData = OpenAPI_session_management_subscription_data_parseFromJSON( item); if (!message->SessionManagementSubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[1]); END break; CASE(OGS_SBI_SERVICE_NAME_NUDR_DR) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_SUBSCRIPTION_DATA) SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_AUTHENTICATION_DATA) SWITCH(message->h.resource.component[3]) CASE(OGS_SBI_RESOURCE_NAME_AUTHENTICATION_SUBSCRIPTION) if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->AuthenticationSubscription = OpenAPI_authentication_subscription_parseFromJSON(item); if (!message->AuthenticationSubscription) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; CASE(OGS_SBI_RESOURCE_NAME_AUTHENTICATION_STATUS) message->AuthEvent = OpenAPI_auth_event_parseFromJSON(item); if (!message->AuthEvent) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[3]); END break; CASE(OGS_SBI_RESOURCE_NAME_CONTEXT_DATA) message->Amf3GppAccessRegistration = OpenAPI_amf3_gpp_access_registration_parseFromJSON( item); if (!message->Amf3GppAccessRegistration) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT SWITCH(message->h.resource.component[3]) CASE(OGS_SBI_RESOURCE_NAME_PROVISIONED_DATA) SWITCH(message->h.resource.component[4]) CASE(OGS_SBI_RESOURCE_NAME_AM_DATA) message->AccessAndMobilitySubscriptionData = OpenAPI_access_and_mobility_subscription_data_parseFromJSON(item); if (!message->AccessAndMobilitySubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SMF_SELECTION_SUBSCRIPTION_DATA) message->SmfSelectionSubscriptionData = OpenAPI_smf_selection_subscription_data_parseFromJSON(item); if (!message->SmfSelectionSubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_UE_CONTEXT_IN_SMF_DATA) message->UeContextInSmfData = OpenAPI_ue_context_in_smf_data_parseFromJSON( item); if (!message->UeContextInSmfData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SM_DATA) message->SessionManagementSubscriptionData = OpenAPI_session_management_subscription_data_parseFromJSON(item); if (!message->SessionManagementSubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[4]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[3]); END END break; CASE(OGS_SBI_RESOURCE_NAME_POLICY_DATA) SWITCH(message->h.resource.component[1]) CASE(OGS_SBI_RESOURCE_NAME_UES) SWITCH(message->h.resource.component[3]) CASE(OGS_SBI_RESOURCE_NAME_AM_DATA) message->AmPolicyData = OpenAPI_am_policy_data_parseFromJSON(item); if (!message->AmPolicyData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SM_DATA) message->SmPolicyData = OpenAPI_sm_policy_data_parseFromJSON(item); if (!message->SmPolicyData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[3]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[1]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NSMF_PDUSESSION) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_SM_CONTEXTS) SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_MODIFY) if (message->res_status == 0) { message->SmContextUpdateData = OpenAPI_sm_context_update_data_parseFromJSON(item); if (!message->SmContextUpdateData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->SmContextUpdatedData = OpenAPI_sm_context_updated_data_parseFromJSON(item); if (!message->SmContextUpdatedData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_BAD_REQUEST || message->res_status == OGS_SBI_HTTP_STATUS_FORBIDDEN || message->res_status == OGS_SBI_HTTP_STATUS_NOT_FOUND || message->res_status == OGS_SBI_HTTP_STATUS_INTERNAL_SERVER_ERROR || message->res_status == OGS_SBI_HTTP_STATUS_SERVICE_UNAVAILABLE || message->res_status == OGS_SBI_HTTP_STATUS_GATEWAY_TIMEOUT) { message->SmContextUpdateError = OpenAPI_sm_context_update_error_parseFromJSON(item); if (!message->SmContextUpdateError) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; CASE(OGS_SBI_RESOURCE_NAME_RELEASE) if (message->res_status == 0) { message->SmContextReleaseData = OpenAPI_sm_context_release_data_parseFromJSON(item); if (!message->SmContextReleaseData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_NO_CONTENT) { } else if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->SmContextReleasedData = OpenAPI_sm_context_released_data_parseFromJSON( item); if (!message->SmContextReleasedData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT if (message->res_status == 0) { message->SmContextCreateData = OpenAPI_sm_context_create_data_parseFromJSON(item); if (!message->SmContextCreateData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_CREATED) { message->SmContextCreatedData = OpenAPI_sm_context_created_data_parseFromJSON(item); if (!message->SmContextCreatedData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_BAD_REQUEST || message->res_status == OGS_SBI_HTTP_STATUS_FORBIDDEN || message->res_status == OGS_SBI_HTTP_STATUS_NOT_FOUND || message->res_status == OGS_SBI_HTTP_STATUS_INTERNAL_SERVER_ERROR || message->res_status == OGS_SBI_HTTP_STATUS_SERVICE_UNAVAILABLE || message->res_status == OGS_SBI_HTTP_STATUS_GATEWAY_TIMEOUT) { message->SmContextCreateError = OpenAPI_sm_context_create_error_parseFromJSON(item); if (!message->SmContextCreateError) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NAMF_COMM) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_UE_CONTEXTS) SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_N1_N2_MESSAGES) if (message->res_status == 0) { message->N1N2MessageTransferReqData = OpenAPI_n1_n2_message_transfer_req_data_parseFromJSON(item); if (!message->N1N2MessageTransferReqData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_OK || message->res_status == OGS_SBI_HTTP_STATUS_ACCEPTED) { message->N1N2MessageTransferRspData = OpenAPI_n1_n2_message_transfer_rsp_data_parseFromJSON(item); if (!message->N1N2MessageTransferRspData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[2]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NPCF_AM_POLICY_CONTROL) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_POLICIES) if (message->res_status == 0) { message->PolicyAssociationRequest = OpenAPI_policy_association_request_parseFromJSON( item); if (!message->PolicyAssociationRequest) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_CREATED) { message->PolicyAssociation = OpenAPI_policy_association_parseFromJSON(item); if (!message->PolicyAssociation) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NPCF_SMPOLICYCONTROL) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_SM_POLICIES) if (!message->h.resource.component[1]) { if (message->res_status == 0) { message->SmPolicyContextData = OpenAPI_sm_policy_context_data_parseFromJSON(item); if (!message->SmPolicyContextData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_CREATED) { message->SmPolicyDecision = OpenAPI_sm_policy_decision_parseFromJSON(item); if (!message->SmPolicyDecision) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } } else { SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_DELETE) if (message->res_status == 0) { message->SmPolicyDeleteData = OpenAPI_sm_policy_delete_data_parseFromJSON( item); if (!message->SmPolicyDeleteData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[2]); END break; } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NNSSF_NSSELECTION) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_NETWORK_SLICE_INFORMATION) if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->AuthorizedNetworkSliceInfo = OpenAPI_authorized_network_slice_info_parseFromJSON( item); if (!message->AuthorizedNetworkSliceInfo) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NBSF_MANAGEMENT) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_PCF_BINDINGS) if (message->h.resource.component[1]) { SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_PATCH) if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->PcfBinding = OpenAPI_pcf_binding_parseFromJSON(item); if (!message->PcfBinding) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; CASE(OGS_SBI_HTTP_METHOD_DELETE) break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown method [%s]", message->h.method); END break; } else { SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_POST) if (message->res_status == 0 || message->res_status == OGS_SBI_HTTP_STATUS_CREATED) { message->PcfBinding = OpenAPI_pcf_binding_parseFromJSON(item); if (!message->PcfBinding) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; CASE(OGS_SBI_HTTP_METHOD_GET) if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->PcfBinding = OpenAPI_pcf_binding_parseFromJSON(item); if (!message->PcfBinding) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown method [%s]", message->h.method); END break; } DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NPCF_POLICYAUTHORIZATION) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_APP_SESSIONS) if (message->h.resource.component[1]) { if (message->h.resource.component[2]) { SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_DELETE) /* Nothing */ break; DEFAULT rv = OGS_ERROR; ogs_error("JSON parse error"); END } else { SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_PATCH) message->AppSessionContextUpdateDataPatch = OpenAPI_app_session_context_update_data_patch_parseFromJSON(item); if (!message->AppSessionContextUpdateDataPatch) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("JSON parse error"); END } } else { SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_POST) if (message->res_status == 0 || message->res_status == OGS_SBI_HTTP_STATUS_CREATED) { message->AppSessionContext = OpenAPI_app_session_context_parseFromJSON(item); if (!message->AppSessionContext) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown method [%s]", message->h.method); END } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NAMF_CALLBACK) SWITCH(message->h.resource.component[1]) CASE(OGS_SBI_RESOURCE_NAME_SM_CONTEXT_STATUS) message->SmContextStatusNotification = OpenAPI_sm_context_status_notification_parseFromJSON(item); if (!message->SmContextStatusNotification) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[1]); END break; CASE(OGS_SBI_SERVICE_NAME_NSMF_CALLBACK) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_N1_N2_FAILURE_NOTIFY) message->N1N2MsgTxfrFailureNotification = OpenAPI_n1_n2_msg_txfr_failure_notification_parseFromJSON( item); if (!message->N1N2MsgTxfrFailureNotification) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SM_POLICY_NOTIFY) SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_UPDATE) message->SmPolicyNotification = OpenAPI_sm_policy_notification_parseFromJSON(item); if (!message->SmPolicyNotification) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_TERMINATE) message->TerminationNotification = OpenAPI_termination_notification_parseFromJSON(item); if (!message->TerminationNotification) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[2]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Not implemented API name [%s]", message->h.service.name); END } cleanup: cJSON_Delete(item); return rv; } static int parse_content( ogs_sbi_message_t *message, ogs_sbi_http_message_t *http) { ogs_assert(message); ogs_assert(http); if (message->http.content_type && !strncmp(message->http.content_type, OGS_SBI_CONTENT_MULTIPART_TYPE, strlen(OGS_SBI_CONTENT_MULTIPART_TYPE))) { return parse_multipart(message, http); } else { return parse_json(message, message->http.content_type, http->content); } } static bool build_content( ogs_sbi_http_message_t *http, ogs_sbi_message_t *message) { ogs_assert(message); ogs_assert(http); if (message->num_of_part) { ogs_expect_or_return_val(true == build_multipart(http, message), false); } else { http->content = build_json(message); if (http->content) { http->content_length = strlen(http->content); if (message->http.content_type) { ogs_sbi_header_set(http->headers, OGS_SBI_CONTENT_TYPE, message->http.content_type); } else { ogs_sbi_header_set(http->headers, OGS_SBI_CONTENT_TYPE, OGS_SBI_CONTENT_JSON_TYPE); } } } return true; } typedef struct multipart_parser_data_s { int num_of_part; struct { char *content_type; char *content_id; char *content; size_t content_length; } part[OGS_SBI_MAX_NUM_OF_PART]; char *header_field; } multipart_parser_data_t; static int on_header_field( multipart_parser *parser, const char *at, size_t length) { multipart_parser_data_t *data = NULL; ogs_assert(parser); data = multipart_parser_get_data(parser); ogs_assert(data); if (at && length) { if (data->header_field) ogs_free(data->header_field); data->header_field = ogs_strndup(at, length); ogs_assert(data->header_field); } return 0; } static int on_header_value( multipart_parser *parser, const char *at, size_t length) { multipart_parser_data_t *data = NULL; ogs_assert(parser); data = multipart_parser_get_data(parser); ogs_assert(data); if (at && length) { SWITCH(data->header_field) CASE(OGS_SBI_CONTENT_TYPE) if (data->part[data->num_of_part].content_type) ogs_free(data->part[data->num_of_part].content_type); data->part[data->num_of_part].content_type = ogs_strndup(at, length); ogs_assert(data->part[data->num_of_part].content_type); break; CASE(OGS_SBI_CONTENT_ID) if (data->part[data->num_of_part].content_id) ogs_free(data->part[data->num_of_part].content_id); data->part[data->num_of_part].content_id = ogs_strndup(at, length); ogs_assert(data->part[data->num_of_part].content_id); break; DEFAULT ogs_error("Unknown header field [%s]", data->header_field); END } return 0; } static int on_part_data( multipart_parser *parser, const char *at, size_t length) { multipart_parser_data_t *data = NULL; ogs_assert(parser); data = multipart_parser_get_data(parser); ogs_assert(data); if (at && length) { SWITCH(data->part[data->num_of_part].content_type) CASE(OGS_SBI_CONTENT_JSON_TYPE) CASE(OGS_SBI_CONTENT_5GNAS_TYPE) CASE(OGS_SBI_CONTENT_NGAP_TYPE) size_t offset = 0; if (data->part[data->num_of_part].content == NULL) { data->part[data->num_of_part].content_length = length; data->part[data->num_of_part].content = (char *)ogs_malloc(length + 1); ogs_assert(data->part[data->num_of_part].content); } else { offset = data->part[data->num_of_part].content_length; if ((data->part[data->num_of_part].content_length + length) > OGS_HUGE_LEN) { ogs_error("Overflow length [%d:%d]", (int)data->part[data->num_of_part].content_length, (int)length); ogs_assert_if_reached(); return 0; } data->part[data->num_of_part].content_length += length; data->part[data->num_of_part].content = (char *)ogs_realloc( data->part[data->num_of_part].content, data->part[data->num_of_part].content_length + 1); ogs_assert(data->part[data->num_of_part].content); } memcpy(data->part[data->num_of_part].content + offset, at, length); data->part[data->num_of_part].content[ data->part[data->num_of_part].content_length] = 0; break; DEFAULT ogs_log_hexdump(OGS_LOG_FATAL, (unsigned char *)at, length); ogs_error("Unknown content_type [%s]", data->part[data->num_of_part].content_type); END } return 0; } static int on_part_data_end(multipart_parser *parser) { multipart_parser_data_t *data = NULL; ogs_assert(parser); data = multipart_parser_get_data(parser); ogs_assert(data); data->num_of_part++; return 0; } static int parse_multipart( ogs_sbi_message_t *message, ogs_sbi_http_message_t *http) { char *boundary = NULL; int i; multipart_parser_settings settings; multipart_parser_data_t data; multipart_parser *parser = NULL; ogs_assert(message); ogs_assert(http); memset(&settings, 0, sizeof(settings)); settings.on_header_field = &on_header_field; settings.on_header_value = &on_header_value; settings.on_part_data = &on_part_data; settings.on_part_data_end = &on_part_data_end; for (i = 0; i < http->content_length; i++) { if (http->content[i] == '\r' && http->content[i+1] == '\n') break; } if (i >= http->content_length) { ogs_error("Invalid HTTP content [%d]", i); ogs_log_hexdump(OGS_LOG_ERROR, (unsigned char *)http->content, http->content_length); return OGS_ERROR; } boundary = ogs_strndup(http->content, i); ogs_assert(boundary); parser = multipart_parser_init(boundary, &settings); ogs_assert(parser); memset(&data, 0, sizeof(data)); multipart_parser_set_data(parser, &data); multipart_parser_execute(parser, http->content, http->content_length); multipart_parser_free(parser); ogs_free(boundary); for (i = 0; i < data.num_of_part; i++) { SWITCH(data.part[i].content_type) CASE(OGS_SBI_CONTENT_JSON_TYPE) parse_json(message, data.part[i].content_type, data.part[i].content); if (data.part[i].content_id) ogs_free(data.part[i].content_id); if (data.part[i].content_type) ogs_free(data.part[i].content_type); if (data.part[i].content) ogs_free(data.part[i].content); break; CASE(OGS_SBI_CONTENT_5GNAS_TYPE) CASE(OGS_SBI_CONTENT_NGAP_TYPE) http->part[http->num_of_part].content_id = data.part[i].content_id; http->part[http->num_of_part].content_type = data.part[i].content_type; http->part[http->num_of_part].pkbuf = ogs_pkbuf_alloc(NULL, data.part[i].content_length); ogs_expect_or_return_val( http->part[http->num_of_part].pkbuf, OGS_ERROR); ogs_pkbuf_put_data(http->part[http->num_of_part].pkbuf, data.part[i].content, data.part[i].content_length); message->part[message->num_of_part].content_id = http->part[http->num_of_part].content_id; message->part[message->num_of_part].content_type = http->part[http->num_of_part].content_type; message->part[message->num_of_part].pkbuf = ogs_pkbuf_copy(http->part[http->num_of_part].pkbuf); ogs_expect_or_return_val( message->part[message->num_of_part].pkbuf, OGS_ERROR); http->num_of_part++; message->num_of_part++; if (data.part[i].content) ogs_free(data.part[i].content); break; DEFAULT ogs_error("Unknown content-type[%s]", data.part[i].content_type); END } if (data.part[i].content_id) ogs_free(data.part[i].content_id); if (data.part[i].content_type) ogs_free(data.part[i].content_type); if (data.header_field) ogs_free(data.header_field); return OGS_OK; } static bool build_multipart( ogs_sbi_http_message_t *http, ogs_sbi_message_t *message) { int i; char boundary[32]; unsigned char digest[16]; char *p = NULL, *last; char *content_type = NULL; char *json = NULL; ogs_assert(message); ogs_assert(http); ogs_random(digest, 16); strcpy(boundary, "=-"); ogs_base64_encode_binary(boundary + 2, digest, 16); p = http->content = ogs_calloc(1, OGS_HUGE_LEN); ogs_expect_or_return_val(p, false); last = p + OGS_HUGE_LEN; /* First boundary */ p = ogs_slprintf(p, last, "--%s\r\n", boundary); /* Encapsulated multipart part (application/json) */ json = build_json(message); ogs_expect_or_return_val(json, false); p = ogs_slprintf(p, last, "%s\r\n\r\n%s", OGS_SBI_CONTENT_TYPE ": " OGS_SBI_CONTENT_JSON_TYPE, json); ogs_free(json); /* Add part */ for (i = 0; i < message->num_of_part; i++) { p = ogs_slprintf(p, last, "\r\n--%s\r\n", boundary); p = ogs_slprintf(p, last, "%s: %s\r\n", OGS_SBI_CONTENT_ID, message->part[i].content_id); p = ogs_slprintf(p, last, "%s: %s\r\n\r\n", OGS_SBI_CONTENT_TYPE, message->part[i].content_type); memcpy(p, message->part[i].pkbuf->data, message->part[i].pkbuf->len); p += message->part[i].pkbuf->len; } /* Last boundary */ p = ogs_slprintf(p, last, "\r\n--%s--\r\n", boundary); http->content_length = p - http->content; content_type = ogs_msprintf("%s; boundary=\"%s\"", OGS_SBI_CONTENT_MULTIPART_TYPE, boundary); ogs_expect_or_return_val(content_type, false); ogs_sbi_header_set(http->headers, OGS_SBI_CONTENT_TYPE, content_type); ogs_free(content_type); return true; } static void http_message_free(ogs_sbi_http_message_t *http) { int i; ogs_assert(http); if (http->params) { ogs_hash_index_t *hi; for (hi = ogs_hash_first(http->params); hi; hi = ogs_hash_next(hi)) { char *key = (char *)ogs_hash_this_key(hi); char *val = ogs_hash_this_val(hi); ogs_free(key); ogs_free(val); } ogs_hash_destroy(http->params); } if (http->headers) { ogs_hash_index_t *hi; for (hi = ogs_hash_first(http->headers); hi; hi = ogs_hash_next(hi)) { char *key = (char *)ogs_hash_this_key(hi); char *val = ogs_hash_this_val(hi); ogs_free(key); ogs_free(val); } ogs_hash_destroy(http->headers); } if (http->content) ogs_free(http->content); for (i = 0; i < http->num_of_part; i++) { if (http->part[i].pkbuf) ogs_pkbuf_free(http->part[i].pkbuf); if (http->part[i].content_id) ogs_free(http->part[i].content_id); if (http->part[i].content_type) ogs_free(http->part[i].content_type); } }
null
/* * Copyright (C) 2019 by Sukchan Lee <acetcom@gmail.com> * * This file is part of Open5GS. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. */ #include "ogs-sbi.h" #include "ogs-crypt.h" #include "yuarel.h" #include "contrib/multipart_parser.h" static OGS_POOL(request_pool, ogs_sbi_request_t); static OGS_POOL(response_pool, ogs_sbi_response_t); static char *build_json(ogs_sbi_message_t *message); static int parse_json(ogs_sbi_message_t *message, char *content_type, char *json); static bool build_content( ogs_sbi_http_message_t *http, ogs_sbi_message_t *message); static int parse_content( ogs_sbi_message_t *message, ogs_sbi_http_message_t *http); static bool build_multipart( ogs_sbi_http_message_t *http, ogs_sbi_message_t *message); static int parse_multipart( ogs_sbi_message_t *sbi_message, ogs_sbi_http_message_t *http); static void http_message_free(ogs_sbi_http_message_t *http); void ogs_sbi_message_init(int num_of_request_pool, int num_of_response_pool) { ogs_pool_init(&request_pool, num_of_request_pool); ogs_pool_init(&response_pool, num_of_response_pool); } void ogs_sbi_message_final(void) { ogs_pool_final(&request_pool); ogs_pool_final(&response_pool); } void ogs_sbi_message_free(ogs_sbi_message_t *message) { int i; ogs_assert(message); if (message->NFProfile) OpenAPI_nf_profile_free(message->NFProfile); if (message->ProblemDetails) OpenAPI_problem_details_free(message->ProblemDetails); if (message->PatchItemList) { OpenAPI_lnode_t *node = NULL; OpenAPI_list_for_each(message->PatchItemList, node) OpenAPI_patch_item_free(node->data); OpenAPI_list_free(message->PatchItemList); } if (message->SubscriptionData) OpenAPI_subscription_data_free(message->SubscriptionData); if (message->NotificationData) OpenAPI_notification_data_free(message->NotificationData); if (message->SearchResult) OpenAPI_search_result_free(message->SearchResult); if (message->AuthenticationInfo) OpenAPI_authentication_info_free(message->AuthenticationInfo); if (message->AuthenticationInfoRequest) OpenAPI_authentication_info_request_free( message->AuthenticationInfoRequest); if (message->AuthenticationInfoResult) OpenAPI_authentication_info_result_free( message->AuthenticationInfoResult); if (message->AuthenticationSubscription) OpenAPI_authentication_subscription_free( message->AuthenticationSubscription); if (message->UeAuthenticationCtx) OpenAPI_ue_authentication_ctx_free(message->UeAuthenticationCtx); if (message->ConfirmationData) OpenAPI_confirmation_data_free(message->ConfirmationData); if (message->ConfirmationDataResponse) OpenAPI_confirmation_data_response_free( message->ConfirmationDataResponse); if (message->AuthEvent) OpenAPI_auth_event_free(message->AuthEvent); if (message->Amf3GppAccessRegistration) OpenAPI_amf3_gpp_access_registration_free( message->Amf3GppAccessRegistration); if (message->AccessAndMobilitySubscriptionData) OpenAPI_access_and_mobility_subscription_data_free( message->AccessAndMobilitySubscriptionData); if (message->SmfSelectionSubscriptionData) OpenAPI_smf_selection_subscription_data_free( message->SmfSelectionSubscriptionData); if (message->UeContextInSmfData) OpenAPI_ue_context_in_smf_data_free(message->UeContextInSmfData); if (message->SmContextCreateData) OpenAPI_sm_context_create_data_free(message->SmContextCreateData); if (message->SmContextCreatedData) OpenAPI_sm_context_created_data_free(message->SmContextCreatedData); if (message->SmContextCreateError) OpenAPI_sm_context_create_error_free(message->SmContextCreateError); if (message->SmContextUpdateData) OpenAPI_sm_context_update_data_free(message->SmContextUpdateData); if (message->SmContextUpdatedData) OpenAPI_sm_context_updated_data_free(message->SmContextUpdatedData); if (message->SmContextUpdateError) OpenAPI_sm_context_update_error_free(message->SmContextUpdateError); if (message->SmContextReleaseData) OpenAPI_sm_context_release_data_free(message->SmContextReleaseData); if (message->SmContextReleasedData) OpenAPI_sm_context_released_data_free(message->SmContextReleasedData); if (message->SessionManagementSubscriptionData) OpenAPI_session_management_subscription_data_free( message->SessionManagementSubscriptionData); if (message->N1N2MessageTransferReqData) OpenAPI_n1_n2_message_transfer_req_data_free( message->N1N2MessageTransferReqData); if (message->N1N2MessageTransferRspData) OpenAPI_n1_n2_message_transfer_rsp_data_free( message->N1N2MessageTransferRspData); if (message->N1N2MsgTxfrFailureNotification) OpenAPI_n1_n2_msg_txfr_failure_notification_free( message->N1N2MsgTxfrFailureNotification); if (message->SmContextStatusNotification) OpenAPI_sm_context_status_notification_free( message->SmContextStatusNotification); if (message->PolicyAssociationRequest) OpenAPI_policy_association_request_free( message->PolicyAssociationRequest); if (message->PolicyAssociation) OpenAPI_policy_association_free(message->PolicyAssociation); if (message->AmPolicyData) OpenAPI_am_policy_data_free(message->AmPolicyData); if (message->SmPolicyContextData) OpenAPI_sm_policy_context_data_free(message->SmPolicyContextData); if (message->SmPolicyDecision) OpenAPI_sm_policy_decision_free(message->SmPolicyDecision); if (message->SmPolicyData) OpenAPI_sm_policy_data_free(message->SmPolicyData); if (message->SmPolicyDeleteData) OpenAPI_sm_policy_delete_data_free(message->SmPolicyDeleteData); if (message->AuthorizedNetworkSliceInfo) OpenAPI_authorized_network_slice_info_free( message->AuthorizedNetworkSliceInfo); if (message->PcfBinding) OpenAPI_pcf_binding_free(message->PcfBinding); if (message->AppSessionContext) OpenAPI_app_session_context_free(message->AppSessionContext); if (message->AppSessionContextUpdateDataPatch) OpenAPI_app_session_context_update_data_patch_free(message->AppSessionContextUpdateDataPatch); if (message->SmPolicyNotification) OpenAPI_sm_policy_notification_free(message->SmPolicyNotification); if (message->TerminationNotification) OpenAPI_termination_notification_free(message->TerminationNotification); for (i = 0; i < message->num_of_part; i++) { if (message->part[i].pkbuf) ogs_pkbuf_free(message->part[i].pkbuf); } } ogs_sbi_request_t *ogs_sbi_request_new(void) { ogs_sbi_request_t *request = NULL; ogs_pool_alloc(&request_pool, &request); ogs_expect_or_return_val(request, NULL); memset(request, 0, sizeof(ogs_sbi_request_t)); request->http.params = ogs_hash_make(); ogs_expect_or_return_val(request->http.params, NULL); request->http.headers = ogs_hash_make(); ogs_expect_or_return_val(request->http.headers, NULL); return request; } ogs_sbi_response_t *ogs_sbi_response_new(void) { ogs_sbi_response_t *response = NULL; ogs_pool_alloc(&response_pool, &response); ogs_expect_or_return_val(response, NULL); memset(response, 0, sizeof(ogs_sbi_response_t)); response->http.params = ogs_hash_make(); ogs_expect_or_return_val(response->http.params, NULL); response->http.headers = ogs_hash_make(); ogs_expect_or_return_val(response->http.headers, NULL); return response; } void ogs_sbi_request_free(ogs_sbi_request_t *request) { ogs_assert(request); if (request->h.uri) ogs_free(request->h.uri); ogs_sbi_header_free(&request->h); http_message_free(&request->http); ogs_pool_free(&request_pool, request); } void ogs_sbi_response_free(ogs_sbi_response_t *response) { ogs_assert(response); if (response->h.uri) ogs_free(response->h.uri); ogs_sbi_header_free(&response->h); http_message_free(&response->http); ogs_pool_free(&response_pool, response); } ogs_sbi_request_t *ogs_sbi_build_request(ogs_sbi_message_t *message) { ogs_sbi_request_t *request = NULL; ogs_assert(message); request = ogs_sbi_request_new(); ogs_expect_or_return_val(request, NULL); ogs_expect_or_return_val(message->h.method, NULL); request->h.method = ogs_strdup(message->h.method); if (message->h.uri) { ogs_expect_or_return_val(message->h.uri, NULL); request->h.uri = ogs_strdup(message->h.uri); ogs_expect_or_return_val(request->h.uri, NULL); } else { int i; ogs_expect_or_return_val(message->h.service.name, NULL); request->h.service.name = ogs_strdup(message->h.service.name); ogs_expect_or_return_val(message->h.api.version, NULL); request->h.api.version = ogs_strdup(message->h.api.version); ogs_expect_or_return_val(request->h.api.version, NULL); ogs_expect_or_return_val(message->h.resource.component[0], NULL); for (i = 0; i < OGS_SBI_MAX_NUM_OF_RESOURCE_COMPONENT && message->h.resource.component[i]; i++) request->h.resource.component[i] = ogs_strdup( message->h.resource.component[i]); } /* URL Param */ if (message->param.nf_id) { ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_NF_ID, message->param.nf_id); } if (message->param.nf_type) { char *v = OpenAPI_nf_type_ToString(message->param.nf_type); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_NF_TYPE, v); } if (message->param.requester_nf_type) { char *v = OpenAPI_nf_type_ToString(message->param.requester_nf_type); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_REQUESTER_NF_TYPE, v); } if (message->param.target_nf_type) { char *v = OpenAPI_nf_type_ToString(message->param.target_nf_type); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_TARGET_NF_TYPE, v); } if (message->param.limit) { char *v = ogs_msprintf("%d", message->param.limit); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_LIMIT, v); ogs_free(v); } if (message->param.dnn) { ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_DNN, message->param.dnn); } if (message->param.plmn_id_presence) { OpenAPI_plmn_id_t plmn_id; plmn_id.mnc = ogs_plmn_id_mnc_string(&message->param.plmn_id); plmn_id.mcc = ogs_plmn_id_mcc_string(&message->param.plmn_id); if (plmn_id.mnc && plmn_id.mcc) { char *v = NULL; cJSON *item = NULL; item = OpenAPI_plmn_id_convertToJSON(&plmn_id); ogs_expect_or_return_val(item, NULL); if (plmn_id.mnc) ogs_free(plmn_id.mnc); if (plmn_id.mcc) ogs_free(plmn_id.mcc); v = cJSON_Print(item); ogs_expect_or_return_val(v, NULL); cJSON_Delete(item); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_PLMN_ID, v); ogs_free(v); } } if (message->param.single_nssai_presence) { char *v = ogs_sbi_s_nssai_to_string(&message->param.s_nssai); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_SINGLE_NSSAI, v); ogs_free(v); } if (message->param.snssai_presence) { char *v = ogs_sbi_s_nssai_to_string(&message->param.s_nssai); ogs_expect_or_return_val(v, NULL); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_SNSSAI, v); ogs_free(v); } if (message->param.plmn_id_presence) { OpenAPI_plmn_id_t plmn_id; plmn_id.mnc = ogs_plmn_id_mnc_string(&message->param.plmn_id); plmn_id.mcc = ogs_plmn_id_mcc_string(&message->param.plmn_id); if (plmn_id.mnc && plmn_id.mcc) { char *v = NULL; cJSON *item = NULL; item = OpenAPI_plmn_id_convertToJSON(&plmn_id); ogs_expect_or_return_val(item, NULL); if (plmn_id.mnc) ogs_free(plmn_id.mnc); if (plmn_id.mcc) ogs_free(plmn_id.mcc); v = cJSON_Print(item); ogs_expect_or_return_val(v, NULL); cJSON_Delete(item); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_PLMN_ID, v); ogs_free(v); } } if (message->param.slice_info_request_for_pdu_session_presence) { OpenAPI_slice_info_for_pdu_session_t SliceInfoForPDUSession; OpenAPI_snssai_t sNSSAI; char *v = NULL; cJSON *item = NULL; ogs_expect_or_return_val(message->param.s_nssai.sst, NULL); ogs_expect_or_return_val(message->param.roaming_indication, NULL); memset(&sNSSAI, 0, sizeof(sNSSAI)); sNSSAI.sst = message->param.s_nssai.sst; sNSSAI.sd = ogs_s_nssai_sd_to_string(message->param.s_nssai.sd); memset(&SliceInfoForPDUSession, 0, sizeof(SliceInfoForPDUSession)); SliceInfoForPDUSession.s_nssai = &sNSSAI; SliceInfoForPDUSession.roaming_indication = message->param.roaming_indication; item = OpenAPI_slice_info_for_pdu_session_convertToJSON( &SliceInfoForPDUSession); ogs_expect_or_return_val(item, NULL); v = cJSON_Print(item); ogs_expect_or_return_val(v, NULL); cJSON_Delete(item); ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_SLICE_INFO_REQUEST_FOR_PDU_SESSION, v); ogs_free(v); if (sNSSAI.sd) ogs_free(sNSSAI.sd); } if (message->param.ipv4addr) { ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_IPV4ADDR, message->param.ipv4addr); } if (message->param.ipv6prefix) { ogs_sbi_header_set(request->http.params, OGS_SBI_PARAM_IPV6PREFIX, message->param.ipv6prefix); } ogs_expect_or_return_val(true == build_content(&request->http, message), NULL); if (message->http.accept) { ogs_sbi_header_set(request->http.headers, OGS_SBI_ACCEPT, message->http.accept); } else { SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_DELETE) ogs_sbi_header_set(request->http.headers, OGS_SBI_ACCEPT, OGS_SBI_CONTENT_PROBLEM_TYPE); break; DEFAULT ogs_sbi_header_set(request->http.headers, OGS_SBI_ACCEPT, OGS_SBI_CONTENT_JSON_TYPE "," OGS_SBI_CONTENT_PROBLEM_TYPE); break; END } if (message->http.content_encoding) ogs_sbi_header_set(request->http.headers, OGS_SBI_ACCEPT_ENCODING, message->http.content_encoding); return request; } ogs_sbi_response_t *ogs_sbi_build_response( ogs_sbi_message_t *message, int status) { ogs_sbi_response_t *response = NULL; ogs_assert(message); response = ogs_sbi_response_new(); ogs_expect_or_return_val(response, NULL); response->status = status; if (response->status != OGS_SBI_HTTP_STATUS_NO_CONTENT) { ogs_expect_or_return_val(true == build_content(&response->http, message), NULL); } if (message->http.location) { ogs_sbi_header_set(response->http.headers, "Location", message->http.location); } if (message->http.cache_control) ogs_sbi_header_set(response->http.headers, "Cache-Control", message->http.cache_control); return response; } int ogs_sbi_parse_request( ogs_sbi_message_t *message, ogs_sbi_request_t *request) { int rv; ogs_hash_index_t *hi; ogs_assert(request); ogs_assert(message); rv = ogs_sbi_parse_header(message, &request->h); if (rv != OGS_OK) { ogs_error("ogs_sbi_parse_header() failed"); return OGS_ERROR; } for (hi = ogs_hash_first(request->http.params); hi; hi = ogs_hash_next(hi)) { if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_NF_ID)) { message->param.nf_id = ogs_hash_this_val(hi); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_NF_TYPE)) { message->param.nf_type = OpenAPI_nf_type_FromString(ogs_hash_this_val(hi)); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_TARGET_NF_TYPE)) { message->param.target_nf_type = OpenAPI_nf_type_FromString(ogs_hash_this_val(hi)); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_REQUESTER_NF_TYPE)) { message->param.requester_nf_type = OpenAPI_nf_type_FromString(ogs_hash_this_val(hi)); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_LIMIT)) { message->param.limit = atoi(ogs_hash_this_val(hi)); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_DNN)) { message->param.dnn = ogs_hash_this_val(hi); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_PLMN_ID)) { char *v = NULL; cJSON *item = NULL; OpenAPI_plmn_id_t *plmn_id = NULL; v = ogs_hash_this_val(hi); if (v) { item = cJSON_Parse(v); if (item) { plmn_id = OpenAPI_plmn_id_parseFromJSON(item); if (plmn_id && plmn_id->mnc && plmn_id->mcc) { ogs_plmn_id_build(&message->param.plmn_id, atoi(plmn_id->mcc), atoi(plmn_id->mnc), strlen(plmn_id->mnc)); message->param.plmn_id_presence = true; OpenAPI_plmn_id_free(plmn_id); } cJSON_Delete(item); } } } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_SINGLE_NSSAI)) { char *v = ogs_hash_this_val(hi); if (v) { bool rc = ogs_sbi_s_nssai_from_string( &message->param.s_nssai, v); if (rc == true) message->param.single_nssai_presence = true; } } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_SNSSAI)) { char *v = ogs_hash_this_val(hi); if (v) { bool rc = ogs_sbi_s_nssai_from_string( &message->param.s_nssai, v); if (rc == true) message->param.snssai_presence = true; } } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_SLICE_INFO_REQUEST_FOR_PDU_SESSION)) { char *v = NULL; cJSON *item = NULL; OpenAPI_slice_info_for_pdu_session_t *SliceInfoForPduSession = NULL; v = ogs_hash_this_val(hi); if (v) { item = cJSON_Parse(v); if (item) { SliceInfoForPduSession = OpenAPI_slice_info_for_pdu_session_parseFromJSON(item); if (SliceInfoForPduSession) { OpenAPI_snssai_t *s_nssai = SliceInfoForPduSession->s_nssai; if (s_nssai) { message->param.s_nssai.sst = s_nssai->sst; message->param.s_nssai.sd = ogs_s_nssai_sd_from_string(s_nssai->sd); } message->param.roaming_indication = SliceInfoForPduSession->roaming_indication; message->param. slice_info_request_for_pdu_session_presence = true; OpenAPI_slice_info_for_pdu_session_free( SliceInfoForPduSession); } cJSON_Delete(item); } } } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_IPV4ADDR)) { message->param.ipv4addr = ogs_hash_this_val(hi); } else if (!strcmp(ogs_hash_this_key(hi), OGS_SBI_PARAM_IPV6PREFIX)) { message->param.ipv6prefix = ogs_hash_this_val(hi); } } for (hi = ogs_hash_first(request->http.headers); hi; hi = ogs_hash_next(hi)) { if (!ogs_strcasecmp(ogs_hash_this_key(hi), OGS_SBI_ACCEPT_ENCODING)) { message->http.content_encoding = ogs_hash_this_val(hi); } else if (!ogs_strcasecmp( ogs_hash_this_key(hi), OGS_SBI_CONTENT_TYPE)) { message->http.content_type = ogs_hash_this_val(hi); } else if (!ogs_strcasecmp(ogs_hash_this_key(hi), OGS_SBI_ACCEPT)) { message->http.accept = ogs_hash_this_val(hi); } } if (parse_content(message, &request->http) != OGS_OK) { ogs_error("parse_content() failed"); return OGS_ERROR; } return OGS_OK; } int ogs_sbi_parse_response( ogs_sbi_message_t *message, ogs_sbi_response_t *response) { int rv; ogs_hash_index_t *hi; ogs_assert(response); ogs_assert(message); rv = ogs_sbi_parse_header(message, &response->h); if (rv != OGS_OK) { ogs_error("ogs_sbi_parse_header() failed"); return OGS_ERROR; } for (hi = ogs_hash_first(response->http.headers); hi; hi = ogs_hash_next(hi)) { if (!ogs_strcasecmp(ogs_hash_this_key(hi), OGS_SBI_CONTENT_TYPE)) { message->http.content_type = ogs_hash_this_val(hi); } else if (!ogs_strcasecmp(ogs_hash_this_key(hi), OGS_SBI_LOCATION)) { message->http.location = ogs_hash_this_val(hi); } } message->res_status = response->status; if (parse_content(message, &response->http) != OGS_OK) { ogs_error("parse_content() failed"); return OGS_ERROR; } return OGS_OK; } ogs_pkbuf_t *ogs_sbi_find_part_by_content_id( ogs_sbi_message_t *message, char *content_id) { int i; ogs_assert(message); ogs_assert(content_id); for (i = 0; i < message->num_of_part; i++) { if (message->part[i].content_id && strcmp(message->part[i].content_id, content_id) == 0) return message->part[i].pkbuf; } return NULL; } int ogs_sbi_parse_header(ogs_sbi_message_t *message, ogs_sbi_header_t *header) { struct yuarel yuarel; char *saveptr = NULL; char *uri = NULL, *p = NULL;; char *component = NULL; int i = 0; ogs_assert(message); ogs_assert(header); memset(message, 0, sizeof(*message)); message->h.method = header->method; message->h.uri = header->uri; ogs_assert(message->h.uri); uri = ogs_strdup(header->uri); ogs_assert(uri); p = uri; if (p[0] != '/') { int rv = yuarel_parse(&yuarel, p); if (rv != OGS_OK) { ogs_error("yuarel_parse() failed"); ogs_free(uri); return OGS_ERROR; } p = yuarel.path; } header->service.name = ogs_sbi_parse_uri(p, "/", &saveptr); if (!header->service.name) { ogs_error("ogs_sbi_parse_uri() failed"); ogs_free(uri); return OGS_ERROR; } message->h.service.name = header->service.name; header->api.version = ogs_sbi_parse_uri(NULL, "/", &saveptr); if (!header->api.version) { ogs_error("ogs_sbi_parse_uri() failed"); ogs_free(uri); return OGS_ERROR; } message->h.api.version = header->api.version; for (i = 0; i < OGS_SBI_MAX_NUM_OF_RESOURCE_COMPONENT && (component = ogs_sbi_parse_uri(NULL, "/", &saveptr)) != NULL; i++) { header->resource.component[i] = component; message->h.resource.component[i] = component; } ogs_free(uri); return OGS_OK; } void ogs_sbi_header_free(ogs_sbi_header_t *h) { int i; ogs_assert(h); if (h->method) ogs_free(h->method); if (h->service.name) ogs_free(h->service.name); if (h->api.version) ogs_free(h->api.version); for (i = 0; i < OGS_SBI_MAX_NUM_OF_RESOURCE_COMPONENT && h->resource.component[i]; i++) ogs_free(h->resource.component[i]); } static char *build_json(ogs_sbi_message_t *message) { char *content = NULL; cJSON *item = NULL; ogs_assert(message); if (message->ProblemDetails) { item = OpenAPI_problem_details_convertToJSON(message->ProblemDetails); ogs_assert(item); } else if (message->NFProfile) { item = OpenAPI_nf_profile_convertToJSON(message->NFProfile); ogs_assert(item); } else if (message->PatchItemList) { OpenAPI_lnode_t *node = NULL; item = cJSON_CreateArray(); ogs_assert(item); OpenAPI_list_for_each(message->PatchItemList, node) { cJSON *patchItem = OpenAPI_patch_item_convertToJSON(node->data); ogs_assert(patchItem); cJSON_AddItemToArray(item, patchItem); } } else if (message->SubscriptionData) { item = OpenAPI_subscription_data_convertToJSON( message->SubscriptionData); ogs_assert(item); } else if (message->NotificationData) { item = OpenAPI_notification_data_convertToJSON( message->NotificationData); ogs_assert(item); } else if (message->SearchResult) { item = OpenAPI_search_result_convertToJSON(message->SearchResult); ogs_assert(item); } else if (message->links) { item = ogs_sbi_links_convertToJSON(message->links); ogs_assert(item); } else if (message->AuthenticationInfo) { item = OpenAPI_authentication_info_convertToJSON( message->AuthenticationInfo); ogs_assert(item); } else if (message->AuthenticationInfoRequest) { item = OpenAPI_authentication_info_request_convertToJSON( message->AuthenticationInfoRequest); ogs_assert(item); } else if (message->AuthenticationInfoResult) { item = OpenAPI_authentication_info_result_convertToJSON( message->AuthenticationInfoResult); ogs_assert(item); } else if (message->AuthenticationSubscription) { item = OpenAPI_authentication_subscription_convertToJSON( message->AuthenticationSubscription); ogs_assert(item); } else if (message->UeAuthenticationCtx) { item = OpenAPI_ue_authentication_ctx_convertToJSON( message->UeAuthenticationCtx); ogs_assert(item); } else if (message->ConfirmationData) { item = OpenAPI_confirmation_data_convertToJSON( message->ConfirmationData); ogs_assert(item); } else if (message->ConfirmationDataResponse) { item = OpenAPI_confirmation_data_response_convertToJSON( message->ConfirmationDataResponse); ogs_assert(item); } else if (message->AuthEvent) { item = OpenAPI_auth_event_convertToJSON(message->AuthEvent); ogs_assert(item); } else if (message->Amf3GppAccessRegistration) { item = OpenAPI_amf3_gpp_access_registration_convertToJSON( message->Amf3GppAccessRegistration); ogs_assert(item); } else if (message->AccessAndMobilitySubscriptionData) { item = OpenAPI_access_and_mobility_subscription_data_convertToJSON( message->AccessAndMobilitySubscriptionData); ogs_assert(item); } else if (message->SmfSelectionSubscriptionData) { item = OpenAPI_smf_selection_subscription_data_convertToJSON( message->SmfSelectionSubscriptionData); ogs_assert(item); } else if (message->UeContextInSmfData) { item = OpenAPI_ue_context_in_smf_data_convertToJSON( message->UeContextInSmfData); ogs_assert(item); } else if (message->SmContextCreateData) { item = OpenAPI_sm_context_create_data_convertToJSON( message->SmContextCreateData); ogs_assert(item); } else if (message->SmContextCreatedData) { item = OpenAPI_sm_context_created_data_convertToJSON( message->SmContextCreatedData); ogs_assert(item); } else if (message->SmContextCreateError) { item = OpenAPI_sm_context_create_error_convertToJSON( message->SmContextCreateError); ogs_assert(item); } else if (message->SmContextUpdateData) { item = OpenAPI_sm_context_update_data_convertToJSON( message->SmContextUpdateData); ogs_assert(item); } else if (message->SmContextUpdatedData) { item = OpenAPI_sm_context_updated_data_convertToJSON( message->SmContextUpdatedData); ogs_assert(item); } else if (message->SmContextUpdateError) { item = OpenAPI_sm_context_update_error_convertToJSON( message->SmContextUpdateError); ogs_assert(item); } else if (message->SmContextReleaseData) { item = OpenAPI_sm_context_release_data_convertToJSON( message->SmContextReleaseData); ogs_assert(item); } else if (message->SmContextReleasedData) { item = OpenAPI_sm_context_released_data_convertToJSON( message->SmContextReleasedData); ogs_assert(item); } else if (message->SessionManagementSubscriptionData) { item = OpenAPI_session_management_subscription_data_convertToJSON( message->SessionManagementSubscriptionData); ogs_assert(item); } else if (message->N1N2MessageTransferReqData) { item = OpenAPI_n1_n2_message_transfer_req_data_convertToJSON( message->N1N2MessageTransferReqData); ogs_assert(item); } else if (message->N1N2MessageTransferRspData) { item = OpenAPI_n1_n2_message_transfer_rsp_data_convertToJSON( message->N1N2MessageTransferRspData); ogs_assert(item); } else if (message->N1N2MsgTxfrFailureNotification) { item = OpenAPI_n1_n2_msg_txfr_failure_notification_convertToJSON( message->N1N2MsgTxfrFailureNotification); ogs_assert(item); } else if (message->SmContextStatusNotification) { item = OpenAPI_sm_context_status_notification_convertToJSON( message->SmContextStatusNotification); ogs_assert(item); } else if (message->PolicyAssociationRequest) { item = OpenAPI_policy_association_request_convertToJSON( message->PolicyAssociationRequest); ogs_assert(item); } else if (message->PolicyAssociation) { item = OpenAPI_policy_association_convertToJSON( message->PolicyAssociation); ogs_assert(item); } else if (message->AmPolicyData) { item = OpenAPI_am_policy_data_convertToJSON(message->AmPolicyData); ogs_assert(item); } else if (message->SmPolicyContextData) { item = OpenAPI_sm_policy_context_data_convertToJSON( message->SmPolicyContextData); ogs_assert(item); } else if (message->SmPolicyDecision) { item = OpenAPI_sm_policy_decision_convertToJSON( message->SmPolicyDecision); ogs_assert(item); } else if (message->SmPolicyData) { item = OpenAPI_sm_policy_data_convertToJSON(message->SmPolicyData); ogs_assert(item); } else if (message->SmPolicyDeleteData) { item = OpenAPI_sm_policy_delete_data_convertToJSON( message->SmPolicyDeleteData); ogs_assert(item); } else if (message->AuthorizedNetworkSliceInfo) { item = OpenAPI_authorized_network_slice_info_convertToJSON( message->AuthorizedNetworkSliceInfo); ogs_assert(item); } else if (message->PcfBinding) { item = OpenAPI_pcf_binding_convertToJSON(message->PcfBinding); ogs_assert(item); } else if (message->AppSessionContext) { item = OpenAPI_app_session_context_convertToJSON( message->AppSessionContext); ogs_assert(item); } else if (message->AppSessionContextUpdateDataPatch) { item = OpenAPI_app_session_context_update_data_patch_convertToJSON( message->AppSessionContextUpdateDataPatch); ogs_assert(item); } else if (message->SmPolicyNotification) { item = OpenAPI_sm_policy_notification_convertToJSON( message->SmPolicyNotification); ogs_assert(item); } else if (message->TerminationNotification) { item = OpenAPI_termination_notification_convertToJSON( message->TerminationNotification); ogs_assert(item); } if (item) { content = cJSON_Print(item); ogs_assert(content); ogs_log_print(OGS_LOG_TRACE, "%s", content); cJSON_Delete(item); } return content; } static int parse_json(ogs_sbi_message_t *message, char *content_type, char *json) { int rv = OGS_OK; cJSON *item = NULL; ogs_assert(message); if (!json) return OGS_OK; if (!content_type) { ogs_error("No Content-type"); return OGS_ERROR; } ogs_log_print(OGS_LOG_TRACE, "%s", json); item = cJSON_Parse(json); if (!item) { ogs_error("JSON parse error [%s]", json); return OGS_ERROR; } if (content_type && !strncmp(content_type, OGS_SBI_CONTENT_PROBLEM_TYPE, strlen(OGS_SBI_CONTENT_PROBLEM_TYPE))) { message->ProblemDetails = OpenAPI_problem_details_parseFromJSON(item); } else if (content_type && !strncmp(content_type, OGS_SBI_CONTENT_PATCH_TYPE, strlen(OGS_SBI_CONTENT_PATCH_TYPE))) { if (item) { OpenAPI_patch_item_t *patch_item = NULL; cJSON *patchJSON = NULL; message->PatchItemList = OpenAPI_list_create(); cJSON_ArrayForEach(patchJSON, item) { if (!cJSON_IsObject(patchJSON)) { rv = OGS_ERROR; ogs_error("Unknown JSON"); goto cleanup; } patch_item = OpenAPI_patch_item_parseFromJSON(patchJSON); OpenAPI_list_add(message->PatchItemList, patch_item); } } } else { SWITCH(message->h.service.name) CASE(OGS_SBI_SERVICE_NAME_NNRF_NFM) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_NF_INSTANCES) message->NFProfile = OpenAPI_nf_profile_parseFromJSON(item); if (!message->NFProfile) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SUBSCRIPTIONS) message->SubscriptionData = OpenAPI_subscription_data_parseFromJSON(item); if (!message->SubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_NF_STATUS_NOTIFY) message->NotificationData = OpenAPI_notification_data_parseFromJSON(item); if (!message->NotificationData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NNRF_DISC) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_NF_INSTANCES) message->SearchResult = OpenAPI_search_result_parseFromJSON(item); if (!message->SearchResult) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NAUSF_AUTH) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_UE_AUTHENTICATIONS) SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_POST) if (message->res_status == 0) { message->AuthenticationInfo = OpenAPI_authentication_info_parseFromJSON(item); if (!message->AuthenticationInfo) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_CREATED) { message->UeAuthenticationCtx = OpenAPI_ue_authentication_ctx_parseFromJSON(item); if (!message->UeAuthenticationCtx) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; CASE(OGS_SBI_HTTP_METHOD_PUT) if (message->res_status == 0) { message->ConfirmationData = OpenAPI_confirmation_data_parseFromJSON(item); if (!message->ConfirmationData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->ConfirmationDataResponse = OpenAPI_confirmation_data_response_parseFromJSON( item); if (!message->ConfirmationDataResponse) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown method [%s]", message->h.method); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NUDM_UEAU) SWITCH(message->h.resource.component[1]) CASE(OGS_SBI_RESOURCE_NAME_SECURITY_INFORMATION) SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_GENERATE_AUTH_DATA) if (message->res_status == 0) { message->AuthenticationInfoRequest = OpenAPI_authentication_info_request_parseFromJSON( item); if (!message->AuthenticationInfoRequest) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->AuthenticationInfoResult = OpenAPI_authentication_info_result_parseFromJSON( item); if (!message->AuthenticationInfoResult) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[2]); END break; CASE(OGS_SBI_RESOURCE_NAME_AUTH_EVENTS) message->AuthEvent = OpenAPI_auth_event_parseFromJSON(item); if (!message->AuthEvent) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[1]); END break; CASE(OGS_SBI_SERVICE_NAME_NUDM_UECM) SWITCH(message->h.resource.component[1]) CASE(OGS_SBI_RESOURCE_NAME_REGISTRATIONS) SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_AMF_3GPP_ACCESS) message->Amf3GppAccessRegistration = OpenAPI_amf3_gpp_access_registration_parseFromJSON( item); if (!message->Amf3GppAccessRegistration) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[2]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[1]); END break; CASE(OGS_SBI_SERVICE_NAME_NUDM_SDM) SWITCH(message->h.resource.component[1]) CASE(OGS_SBI_RESOURCE_NAME_AM_DATA) message->AccessAndMobilitySubscriptionData = OpenAPI_access_and_mobility_subscription_data_parseFromJSON( item); if (!message->AccessAndMobilitySubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SMF_SELECT_DATA) message->SmfSelectionSubscriptionData = OpenAPI_smf_selection_subscription_data_parseFromJSON(item); if (!message->SmfSelectionSubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_UE_CONTEXT_IN_SMF_DATA) message->UeContextInSmfData = OpenAPI_ue_context_in_smf_data_parseFromJSON(item); if (!message->UeContextInSmfData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SM_DATA) message->SessionManagementSubscriptionData = OpenAPI_session_management_subscription_data_parseFromJSON( item); if (!message->SessionManagementSubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[1]); END break; CASE(OGS_SBI_SERVICE_NAME_NUDR_DR) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_SUBSCRIPTION_DATA) SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_AUTHENTICATION_DATA) SWITCH(message->h.resource.component[3]) CASE(OGS_SBI_RESOURCE_NAME_AUTHENTICATION_SUBSCRIPTION) if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->AuthenticationSubscription = OpenAPI_authentication_subscription_parseFromJSON(item); if (!message->AuthenticationSubscription) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; CASE(OGS_SBI_RESOURCE_NAME_AUTHENTICATION_STATUS) message->AuthEvent = OpenAPI_auth_event_parseFromJSON(item); if (!message->AuthEvent) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[3]); END break; CASE(OGS_SBI_RESOURCE_NAME_CONTEXT_DATA) message->Amf3GppAccessRegistration = OpenAPI_amf3_gpp_access_registration_parseFromJSON( item); if (!message->Amf3GppAccessRegistration) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT SWITCH(message->h.resource.component[3]) CASE(OGS_SBI_RESOURCE_NAME_PROVISIONED_DATA) SWITCH(message->h.resource.component[4]) CASE(OGS_SBI_RESOURCE_NAME_AM_DATA) message->AccessAndMobilitySubscriptionData = OpenAPI_access_and_mobility_subscription_data_parseFromJSON(item); if (!message->AccessAndMobilitySubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SMF_SELECTION_SUBSCRIPTION_DATA) message->SmfSelectionSubscriptionData = OpenAPI_smf_selection_subscription_data_parseFromJSON(item); if (!message->SmfSelectionSubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_UE_CONTEXT_IN_SMF_DATA) message->UeContextInSmfData = OpenAPI_ue_context_in_smf_data_parseFromJSON( item); if (!message->UeContextInSmfData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SM_DATA) message->SessionManagementSubscriptionData = OpenAPI_session_management_subscription_data_parseFromJSON(item); if (!message->SessionManagementSubscriptionData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[4]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[3]); END END break; CASE(OGS_SBI_RESOURCE_NAME_POLICY_DATA) SWITCH(message->h.resource.component[1]) CASE(OGS_SBI_RESOURCE_NAME_UES) SWITCH(message->h.resource.component[3]) CASE(OGS_SBI_RESOURCE_NAME_AM_DATA) message->AmPolicyData = OpenAPI_am_policy_data_parseFromJSON(item); if (!message->AmPolicyData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SM_DATA) message->SmPolicyData = OpenAPI_sm_policy_data_parseFromJSON(item); if (!message->SmPolicyData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[3]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[1]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NSMF_PDUSESSION) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_SM_CONTEXTS) SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_MODIFY) if (message->res_status == 0) { message->SmContextUpdateData = OpenAPI_sm_context_update_data_parseFromJSON(item); if (!message->SmContextUpdateData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->SmContextUpdatedData = OpenAPI_sm_context_updated_data_parseFromJSON(item); if (!message->SmContextUpdatedData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_BAD_REQUEST || message->res_status == OGS_SBI_HTTP_STATUS_FORBIDDEN || message->res_status == OGS_SBI_HTTP_STATUS_NOT_FOUND || message->res_status == OGS_SBI_HTTP_STATUS_INTERNAL_SERVER_ERROR || message->res_status == OGS_SBI_HTTP_STATUS_SERVICE_UNAVAILABLE || message->res_status == OGS_SBI_HTTP_STATUS_GATEWAY_TIMEOUT) { message->SmContextUpdateError = OpenAPI_sm_context_update_error_parseFromJSON(item); if (!message->SmContextUpdateError) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; CASE(OGS_SBI_RESOURCE_NAME_RELEASE) if (message->res_status == 0) { message->SmContextReleaseData = OpenAPI_sm_context_release_data_parseFromJSON(item); if (!message->SmContextReleaseData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_NO_CONTENT) { } else if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->SmContextReleasedData = OpenAPI_sm_context_released_data_parseFromJSON( item); if (!message->SmContextReleasedData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT if (message->res_status == 0) { message->SmContextCreateData = OpenAPI_sm_context_create_data_parseFromJSON(item); if (!message->SmContextCreateData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_CREATED) { message->SmContextCreatedData = OpenAPI_sm_context_created_data_parseFromJSON(item); if (!message->SmContextCreatedData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_BAD_REQUEST || message->res_status == OGS_SBI_HTTP_STATUS_FORBIDDEN || message->res_status == OGS_SBI_HTTP_STATUS_NOT_FOUND || message->res_status == OGS_SBI_HTTP_STATUS_INTERNAL_SERVER_ERROR || message->res_status == OGS_SBI_HTTP_STATUS_SERVICE_UNAVAILABLE || message->res_status == OGS_SBI_HTTP_STATUS_GATEWAY_TIMEOUT) { message->SmContextCreateError = OpenAPI_sm_context_create_error_parseFromJSON(item); if (!message->SmContextCreateError) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NAMF_COMM) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_UE_CONTEXTS) SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_N1_N2_MESSAGES) if (message->res_status == 0) { message->N1N2MessageTransferReqData = OpenAPI_n1_n2_message_transfer_req_data_parseFromJSON(item); if (!message->N1N2MessageTransferReqData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_OK || message->res_status == OGS_SBI_HTTP_STATUS_ACCEPTED) { message->N1N2MessageTransferRspData = OpenAPI_n1_n2_message_transfer_rsp_data_parseFromJSON(item); if (!message->N1N2MessageTransferRspData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[2]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NPCF_AM_POLICY_CONTROL) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_POLICIES) if (message->res_status == 0) { message->PolicyAssociationRequest = OpenAPI_policy_association_request_parseFromJSON( item); if (!message->PolicyAssociationRequest) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_CREATED) { message->PolicyAssociation = OpenAPI_policy_association_parseFromJSON(item); if (!message->PolicyAssociation) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NPCF_SMPOLICYCONTROL) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_SM_POLICIES) if (!message->h.resource.component[1]) { if (message->res_status == 0) { message->SmPolicyContextData = OpenAPI_sm_policy_context_data_parseFromJSON(item); if (!message->SmPolicyContextData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } else if (message->res_status == OGS_SBI_HTTP_STATUS_CREATED) { message->SmPolicyDecision = OpenAPI_sm_policy_decision_parseFromJSON(item); if (!message->SmPolicyDecision) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } } else { SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_DELETE) if (message->res_status == 0) { message->SmPolicyDeleteData = OpenAPI_sm_policy_delete_data_parseFromJSON( item); if (!message->SmPolicyDeleteData) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[2]); END break; } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NNSSF_NSSELECTION) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_NETWORK_SLICE_INFORMATION) if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->AuthorizedNetworkSliceInfo = OpenAPI_authorized_network_slice_info_parseFromJSON( item); if (!message->AuthorizedNetworkSliceInfo) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NBSF_MANAGEMENT) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_PCF_BINDINGS) if (message->h.resource.component[1]) { SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_PATCH) if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->PcfBinding = OpenAPI_pcf_binding_parseFromJSON(item); if (!message->PcfBinding) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; CASE(OGS_SBI_HTTP_METHOD_DELETE) break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown method [%s]", message->h.method); END break; } else { SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_POST) if (message->res_status == 0 || message->res_status == OGS_SBI_HTTP_STATUS_CREATED) { message->PcfBinding = OpenAPI_pcf_binding_parseFromJSON(item); if (!message->PcfBinding) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; CASE(OGS_SBI_HTTP_METHOD_GET) if (message->res_status == OGS_SBI_HTTP_STATUS_OK) { message->PcfBinding = OpenAPI_pcf_binding_parseFromJSON(item); if (!message->PcfBinding) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown method [%s]", message->h.method); END break; } DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NPCF_POLICYAUTHORIZATION) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_APP_SESSIONS) if (message->h.resource.component[1]) { if (message->h.resource.component[2]) { SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_DELETE) /* Nothing */ break; DEFAULT rv = OGS_ERROR; ogs_error("JSON parse error"); END } else { SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_PATCH) message->AppSessionContextUpdateDataPatch = OpenAPI_app_session_context_update_data_patch_parseFromJSON(item); if (!message->AppSessionContextUpdateDataPatch) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("JSON parse error"); END } } else { SWITCH(message->h.method) CASE(OGS_SBI_HTTP_METHOD_POST) if (message->res_status == 0 || message->res_status == OGS_SBI_HTTP_STATUS_CREATED) { message->AppSessionContext = OpenAPI_app_session_context_parseFromJSON(item); if (!message->AppSessionContext) { rv = OGS_ERROR; ogs_error("JSON parse error"); } } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown method [%s]", message->h.method); END } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; CASE(OGS_SBI_SERVICE_NAME_NAMF_CALLBACK) SWITCH(message->h.resource.component[1]) CASE(OGS_SBI_RESOURCE_NAME_SM_CONTEXT_STATUS) message->SmContextStatusNotification = OpenAPI_sm_context_status_notification_parseFromJSON(item); if (!message->SmContextStatusNotification) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[1]); END break; CASE(OGS_SBI_SERVICE_NAME_NSMF_CALLBACK) SWITCH(message->h.resource.component[0]) CASE(OGS_SBI_RESOURCE_NAME_N1_N2_FAILURE_NOTIFY) message->N1N2MsgTxfrFailureNotification = OpenAPI_n1_n2_msg_txfr_failure_notification_parseFromJSON( item); if (!message->N1N2MsgTxfrFailureNotification) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_SM_POLICY_NOTIFY) SWITCH(message->h.resource.component[2]) CASE(OGS_SBI_RESOURCE_NAME_UPDATE) message->SmPolicyNotification = OpenAPI_sm_policy_notification_parseFromJSON(item); if (!message->SmPolicyNotification) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; CASE(OGS_SBI_RESOURCE_NAME_TERMINATE) message->TerminationNotification = OpenAPI_termination_notification_parseFromJSON(item); if (!message->TerminationNotification) { rv = OGS_ERROR; ogs_error("JSON parse error"); } break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[2]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Unknown resource name [%s]", message->h.resource.component[0]); END break; DEFAULT rv = OGS_ERROR; ogs_error("Not implemented API name [%s]", message->h.service.name); END } cleanup: cJSON_Delete(item); return rv; } static int parse_content( ogs_sbi_message_t *message, ogs_sbi_http_message_t *http) { ogs_assert(message); ogs_assert(http); if (message->http.content_type && !strncmp(message->http.content_type, OGS_SBI_CONTENT_MULTIPART_TYPE, strlen(OGS_SBI_CONTENT_MULTIPART_TYPE))) { return parse_multipart(message, http); } else { return parse_json(message, message->http.content_type, http->content); } } static bool build_content( ogs_sbi_http_message_t *http, ogs_sbi_message_t *message) { ogs_assert(message); ogs_assert(http); if (message->num_of_part) { ogs_expect_or_return_val(true == build_multipart(http, message), false); } else { http->content = build_json(message); if (http->content) { http->content_length = strlen(http->content); if (message->http.content_type) { ogs_sbi_header_set(http->headers, OGS_SBI_CONTENT_TYPE, message->http.content_type); } else { ogs_sbi_header_set(http->headers, OGS_SBI_CONTENT_TYPE, OGS_SBI_CONTENT_JSON_TYPE); } } } return true; } typedef struct multipart_parser_data_s { int num_of_part; struct { char *content_type; char *content_id; char *content; size_t content_length; } part[OGS_SBI_MAX_NUM_OF_PART]; char *header_field; } multipart_parser_data_t; static int on_header_field( multipart_parser *parser, const char *at, size_t length) { multipart_parser_data_t *data = NULL; ogs_assert(parser); data = multipart_parser_get_data(parser); ogs_assert(data); if (at && length) { if (data->header_field) ogs_free(data->header_field); data->header_field = ogs_strndup(at, length); ogs_assert(data->header_field); } return 0; } static int on_header_value( multipart_parser *parser, const char *at, size_t length) { multipart_parser_data_t *data = NULL; ogs_assert(parser); data = multipart_parser_get_data(parser); ogs_assert(data); if (data->num_of_part < OGS_SBI_MAX_NUM_OF_PART && at && length) { SWITCH(data->header_field) CASE(OGS_SBI_CONTENT_TYPE) ogs_assert(data->part[data->num_of_part].content_type == NULL); data->part[data->num_of_part].content_type = ogs_strndup(at, length); ogs_assert(data->part[data->num_of_part].content_type); break; CASE(OGS_SBI_CONTENT_ID) ogs_assert(data->part[data->num_of_part].content_id == NULL); data->part[data->num_of_part].content_id = ogs_strndup(at, length); ogs_assert(data->part[data->num_of_part].content_id); break; DEFAULT ogs_error("Unknown header field [%s]", data->header_field); END } return 0; } static int on_part_data( multipart_parser *parser, const char *at, size_t length) { multipart_parser_data_t *data = NULL; ogs_assert(parser); data = multipart_parser_get_data(parser); ogs_assert(data); if (data->num_of_part < OGS_SBI_MAX_NUM_OF_PART && at && length) { SWITCH(data->part[data->num_of_part].content_type) CASE(OGS_SBI_CONTENT_JSON_TYPE) CASE(OGS_SBI_CONTENT_5GNAS_TYPE) CASE(OGS_SBI_CONTENT_NGAP_TYPE) size_t offset = 0; if (data->part[data->num_of_part].content == NULL) { data->part[data->num_of_part].content_length = length; data->part[data->num_of_part].content = (char *)ogs_malloc(length + 1); ogs_assert(data->part[data->num_of_part].content); } else { offset = data->part[data->num_of_part].content_length; if ((data->part[data->num_of_part].content_length + length) > OGS_HUGE_LEN) { ogs_error("Overflow length [%d:%d]", (int)data->part[data->num_of_part].content_length, (int)length); ogs_assert_if_reached(); return 0; } data->part[data->num_of_part].content_length += length; data->part[data->num_of_part].content = (char *)ogs_realloc( data->part[data->num_of_part].content, data->part[data->num_of_part].content_length + 1); ogs_assert(data->part[data->num_of_part].content); } memcpy(data->part[data->num_of_part].content + offset, at, length); data->part[data->num_of_part].content[ data->part[data->num_of_part].content_length] = 0; break; DEFAULT ogs_error("Unknown content_type [%s]", data->part[data->num_of_part].content_type); ogs_log_hexdump(OGS_LOG_ERROR, (unsigned char *)at, length); END } return 0; } static int on_part_data_end(multipart_parser *parser) { multipart_parser_data_t *data = NULL; ogs_assert(parser); data = multipart_parser_get_data(parser); ogs_assert(data); if (data->num_of_part < OGS_SBI_MAX_NUM_OF_PART) { data->num_of_part++; } return 0; } static int parse_multipart( ogs_sbi_message_t *message, ogs_sbi_http_message_t *http) { char *boundary = NULL; int i; multipart_parser_settings settings; multipart_parser_data_t data; multipart_parser *parser = NULL; ogs_assert(message); ogs_assert(http); memset(&settings, 0, sizeof(settings)); settings.on_header_field = &on_header_field; settings.on_header_value = &on_header_value; settings.on_part_data = &on_part_data; settings.on_part_data_end = &on_part_data_end; for (i = 0; i < http->content_length; i++) { if (http->content[i] == '\r' && http->content[i+1] == '\n') break; } if (i >= http->content_length) { ogs_error("Invalid HTTP content [%d]", i); ogs_log_hexdump(OGS_LOG_ERROR, (unsigned char *)http->content, http->content_length); return OGS_ERROR; } boundary = ogs_strndup(http->content, i); ogs_assert(boundary); parser = multipart_parser_init(boundary, &settings); ogs_assert(parser); memset(&data, 0, sizeof(data)); multipart_parser_set_data(parser, &data); multipart_parser_execute(parser, http->content, http->content_length); multipart_parser_free(parser); ogs_free(boundary); if (data.num_of_part > OGS_SBI_MAX_NUM_OF_PART) { /* Overflow Issues #1247 */ ogs_fatal("Overflow num_of_part[%d]", data.num_of_part); ogs_assert_if_reached(); } for (i = 0; i < data.num_of_part; i++) { SWITCH(data.part[i].content_type) CASE(OGS_SBI_CONTENT_JSON_TYPE) parse_json(message, data.part[i].content_type, data.part[i].content); if (data.part[i].content_id) ogs_free(data.part[i].content_id); if (data.part[i].content_type) ogs_free(data.part[i].content_type); if (data.part[i].content) ogs_free(data.part[i].content); break; CASE(OGS_SBI_CONTENT_5GNAS_TYPE) CASE(OGS_SBI_CONTENT_NGAP_TYPE) http->part[http->num_of_part].content_id = data.part[i].content_id; http->part[http->num_of_part].content_type = data.part[i].content_type; http->part[http->num_of_part].pkbuf = ogs_pkbuf_alloc(NULL, data.part[i].content_length); ogs_expect_or_return_val( http->part[http->num_of_part].pkbuf, OGS_ERROR); ogs_pkbuf_put_data(http->part[http->num_of_part].pkbuf, data.part[i].content, data.part[i].content_length); message->part[message->num_of_part].content_id = http->part[http->num_of_part].content_id; message->part[message->num_of_part].content_type = http->part[http->num_of_part].content_type; message->part[message->num_of_part].pkbuf = ogs_pkbuf_copy(http->part[http->num_of_part].pkbuf); ogs_expect_or_return_val( message->part[message->num_of_part].pkbuf, OGS_ERROR); http->num_of_part++; message->num_of_part++; if (data.part[i].content) ogs_free(data.part[i].content); break; DEFAULT ogs_error("Unknown content-type[%s]", data.part[i].content_type); if (data.part[i].content_id) ogs_free(data.part[i].content_id); if (data.part[i].content_type) ogs_free(data.part[i].content_type); END } if (data.header_field) ogs_free(data.header_field); return OGS_OK; } static bool build_multipart( ogs_sbi_http_message_t *http, ogs_sbi_message_t *message) { int i; char boundary[32]; unsigned char digest[16]; char *p = NULL, *last; char *content_type = NULL; char *json = NULL; ogs_assert(message); ogs_assert(http); ogs_random(digest, 16); strcpy(boundary, "=-"); ogs_base64_encode_binary(boundary + 2, digest, 16); p = http->content = ogs_calloc(1, OGS_HUGE_LEN); ogs_expect_or_return_val(p, false); last = p + OGS_HUGE_LEN; /* First boundary */ p = ogs_slprintf(p, last, "--%s\r\n", boundary); /* Encapsulated multipart part (application/json) */ json = build_json(message); ogs_expect_or_return_val(json, false); p = ogs_slprintf(p, last, "%s\r\n\r\n%s", OGS_SBI_CONTENT_TYPE ": " OGS_SBI_CONTENT_JSON_TYPE, json); ogs_free(json); /* Add part */ for (i = 0; i < message->num_of_part; i++) { p = ogs_slprintf(p, last, "\r\n--%s\r\n", boundary); p = ogs_slprintf(p, last, "%s: %s\r\n", OGS_SBI_CONTENT_ID, message->part[i].content_id); p = ogs_slprintf(p, last, "%s: %s\r\n\r\n", OGS_SBI_CONTENT_TYPE, message->part[i].content_type); memcpy(p, message->part[i].pkbuf->data, message->part[i].pkbuf->len); p += message->part[i].pkbuf->len; } /* Last boundary */ p = ogs_slprintf(p, last, "\r\n--%s--\r\n", boundary); http->content_length = p - http->content; content_type = ogs_msprintf("%s; boundary=\"%s\"", OGS_SBI_CONTENT_MULTIPART_TYPE, boundary); ogs_expect_or_return_val(content_type, false); ogs_sbi_header_set(http->headers, OGS_SBI_CONTENT_TYPE, content_type); ogs_free(content_type); return true; } static void http_message_free(ogs_sbi_http_message_t *http) { int i; ogs_assert(http); if (http->params) { ogs_hash_index_t *hi; for (hi = ogs_hash_first(http->params); hi; hi = ogs_hash_next(hi)) { char *key = (char *)ogs_hash_this_key(hi); char *val = ogs_hash_this_val(hi); ogs_free(key); ogs_free(val); } ogs_hash_destroy(http->params); } if (http->headers) { ogs_hash_index_t *hi; for (hi = ogs_hash_first(http->headers); hi; hi = ogs_hash_next(hi)) { char *key = (char *)ogs_hash_this_key(hi); char *val = ogs_hash_this_val(hi); ogs_free(key); ogs_free(val); } ogs_hash_destroy(http->headers); } if (http->content) ogs_free(http->content); for (i = 0; i < http->num_of_part; i++) { if (http->part[i].pkbuf) ogs_pkbuf_free(http->part[i].pkbuf); if (http->part[i].content_id) ogs_free(http->part[i].content_id); if (http->part[i].content_type) ogs_free(http->part[i].content_type); } }
null
295
CWE-787
CVE-2021-45005
#include "jsi.h" #include "jslex.h" #include "jsparse.h" #include "jscompile.h" #include "jsvalue.h" /* for jsV_numbertostring */ #define cexp jsC_cexp /* collision with math.h */ #define JF js_State *J, js_Function *F JS_NORETURN void jsC_error(js_State *J, js_Ast *node, const char *fmt, ...) JS_PRINTFLIKE(3,4); static void cfunbody(JF, js_Ast *name, js_Ast *params, js_Ast *body); static void cexp(JF, js_Ast *exp); static void cstmlist(JF, js_Ast *list); static void cstm(JF, js_Ast *stm); void jsC_error(js_State *J, js_Ast *node, const char *fmt, ...) { va_list ap; char buf[512]; char msgbuf[256]; va_start(ap, fmt); vsnprintf(msgbuf, 256, fmt, ap); va_end(ap); snprintf(buf, 256, "%s:%d: ", J->filename, node->line); strcat(buf, msgbuf); js_newsyntaxerror(J, buf); js_throw(J); } static const char *futurewords[] = { "class", "const", "enum", "export", "extends", "import", "super", }; static const char *strictfuturewords[] = { "implements", "interface", "let", "package", "private", "protected", "public", "static", "yield", }; static void checkfutureword(JF, js_Ast *exp) { if (jsY_findword(exp->string, futurewords, nelem(futurewords)) >= 0) jsC_error(J, exp, "'%s' is a future reserved word", exp->string); if (F->strict) { if (jsY_findword(exp->string, strictfuturewords, nelem(strictfuturewords)) >= 0) jsC_error(J, exp, "'%s' is a strict mode future reserved word", exp->string); } } static js_Function *newfun(js_State *J, int line, js_Ast *name, js_Ast *params, js_Ast *body, int script, int default_strict) { js_Function *F = js_malloc(J, sizeof *F); memset(F, 0, sizeof *F); F->gcmark = 0; F->gcnext = J->gcfun; J->gcfun = F; ++J->gccounter; F->filename = js_intern(J, J->filename); F->line = line; F->script = script; F->strict = default_strict; F->name = name ? name->string : ""; cfunbody(J, F, name, params, body); return F; } /* Emit opcodes, constants and jumps */ static void emitraw(JF, int value) { if (value != (js_Instruction)value) js_syntaxerror(J, "integer overflow in instruction coding"); if (F->codelen >= F->codecap) { F->codecap = F->codecap ? F->codecap * 2 : 64; F->code = js_realloc(J, F->code, F->codecap * sizeof *F->code); } F->code[F->codelen++] = value; } static void emit(JF, int value) { emitraw(J, F, F->lastline); emitraw(J, F, value); } static void emitarg(JF, int value) { emitraw(J, F, value); } static void emitline(JF, js_Ast *node) { F->lastline = node->line; } static int addfunction(JF, js_Function *value) { if (F->funlen >= F->funcap) { F->funcap = F->funcap ? F->funcap * 2 : 16; F->funtab = js_realloc(J, F->funtab, F->funcap * sizeof *F->funtab); } F->funtab[F->funlen] = value; return F->funlen++; } static int addlocal(JF, js_Ast *ident, int reuse) { const char *name = ident->string; if (F->strict) { if (!strcmp(name, "arguments")) jsC_error(J, ident, "redefining 'arguments' is not allowed in strict mode"); if (!strcmp(name, "eval")) jsC_error(J, ident, "redefining 'eval' is not allowed in strict mode"); } else { if (!strcmp(name, "eval")) js_evalerror(J, "%s:%d: invalid use of 'eval'", J->filename, ident->line); } if (reuse || F->strict) { int i; for (i = 0; i < F->varlen; ++i) { if (!strcmp(F->vartab[i], name)) { if (reuse) return i+1; if (F->strict) jsC_error(J, ident, "duplicate formal parameter '%s'", name); } } } if (F->varlen >= F->varcap) { F->varcap = F->varcap ? F->varcap * 2 : 16; F->vartab = js_realloc(J, F->vartab, F->varcap * sizeof *F->vartab); } F->vartab[F->varlen] = name; return ++F->varlen; } static int findlocal(JF, const char *name) { int i; for (i = F->varlen; i > 0; --i) if (!strcmp(F->vartab[i-1], name)) return i; return -1; } static void emitfunction(JF, js_Function *fun) { F->lightweight = 0; emit(J, F, OP_CLOSURE); emitarg(J, F, addfunction(J, F, fun)); } static void emitnumber(JF, double num) { if (num == 0) { emit(J, F, OP_INTEGER); emitarg(J, F, 32768); if (signbit(num)) emit(J, F, OP_NEG); } else if (num >= SHRT_MIN && num <= SHRT_MAX && num == (int)num) { emit(J, F, OP_INTEGER); emitarg(J, F, num + 32768); } else { #define N (sizeof(num) / sizeof(js_Instruction)) js_Instruction x[N]; size_t i; emit(J, F, OP_NUMBER); memcpy(x, &num, sizeof(num)); for (i = 0; i < N; ++i) emitarg(J, F, x[i]); #undef N } } static void emitstring(JF, int opcode, const char *str) { #define N (sizeof(str) / sizeof(js_Instruction)) js_Instruction x[N]; size_t i; emit(J, F, opcode); memcpy(x, &str, sizeof(str)); for (i = 0; i < N; ++i) emitarg(J, F, x[i]); #undef N } static void emitlocal(JF, int oploc, int opvar, js_Ast *ident) { int is_arguments = !strcmp(ident->string, "arguments"); int is_eval = !strcmp(ident->string, "eval"); int i; if (is_arguments) { F->lightweight = 0; F->arguments = 1; } checkfutureword(J, F, ident); if (F->strict && oploc == OP_SETLOCAL) { if (is_arguments) jsC_error(J, ident, "'arguments' is read-only in strict mode"); if (is_eval) jsC_error(J, ident, "'eval' is read-only in strict mode"); } if (is_eval) js_evalerror(J, "%s:%d: invalid use of 'eval'", J->filename, ident->line); i = findlocal(J, F, ident->string); if (i < 0) { emitstring(J, F, opvar, ident->string); } else { emit(J, F, oploc); emitarg(J, F, i); } } static int here(JF) { return F->codelen; } static int emitjump(JF, int opcode) { int inst; emit(J, F, opcode); inst = F->codelen; emitarg(J, F, 0); return inst; } static void emitjumpto(JF, int opcode, int dest) { emit(J, F, opcode); if (dest != (js_Instruction)dest) js_syntaxerror(J, "jump address integer overflow"); emitarg(J, F, dest); } static void labelto(JF, int inst, int addr) { if (addr != (js_Instruction)addr) js_syntaxerror(J, "jump address integer overflow"); F->code[inst] = addr; } static void label(JF, int inst) { labelto(J, F, inst, F->codelen); } /* Expressions */ static void ctypeof(JF, js_Ast *exp) { if (exp->a->type == EXP_IDENTIFIER) { emitline(J, F, exp->a); emitlocal(J, F, OP_GETLOCAL, OP_HASVAR, exp->a); } else { cexp(J, F, exp->a); } emitline(J, F, exp); emit(J, F, OP_TYPEOF); } static void cunary(JF, js_Ast *exp, int opcode) { cexp(J, F, exp->a); emitline(J, F, exp); emit(J, F, opcode); } static void cbinary(JF, js_Ast *exp, int opcode) { cexp(J, F, exp->a); cexp(J, F, exp->b); emitline(J, F, exp); emit(J, F, opcode); } static void carray(JF, js_Ast *list) { while (list) { emitline(J, F, list->a); cexp(J, F, list->a); emit(J, F, OP_INITARRAY); list = list->b; } } static void checkdup(JF, js_Ast *list, js_Ast *end) { char nbuf[32], sbuf[32]; const char *needle, *straw; if (end->a->type == EXP_NUMBER) needle = jsV_numbertostring(J, nbuf, end->a->number); else needle = end->a->string; while (list->a != end) { if (list->a->type == end->type) { js_Ast *prop = list->a->a; if (prop->type == EXP_NUMBER) straw = jsV_numbertostring(J, sbuf, prop->number); else straw = prop->string; if (!strcmp(needle, straw)) jsC_error(J, list, "duplicate property '%s' in object literal", needle); } list = list->b; } } static void cobject(JF, js_Ast *list) { js_Ast *head = list; while (list) { js_Ast *kv = list->a; js_Ast *prop = kv->a; if (prop->type == AST_IDENTIFIER || prop->type == EXP_STRING) { emitline(J, F, prop); emitstring(J, F, OP_STRING, prop->string); } else if (prop->type == EXP_NUMBER) { emitline(J, F, prop); emitnumber(J, F, prop->number); } else { jsC_error(J, prop, "invalid property name in object initializer"); } if (F->strict) checkdup(J, F, head, kv); switch (kv->type) { default: /* impossible */ break; case EXP_PROP_VAL: cexp(J, F, kv->b); emitline(J, F, kv); emit(J, F, OP_INITPROP); break; case EXP_PROP_GET: emitfunction(J, F, newfun(J, prop->line, NULL, NULL, kv->c, 0, F->strict)); emitline(J, F, kv); emit(J, F, OP_INITGETTER); break; case EXP_PROP_SET: emitfunction(J, F, newfun(J, prop->line, NULL, kv->b, kv->c, 0, F->strict)); emitline(J, F, kv); emit(J, F, OP_INITSETTER); break; } list = list->b; } } static int cargs(JF, js_Ast *list) { int n = 0; while (list) { cexp(J, F, list->a); list = list->b; ++n; } return n; } static void cassign(JF, js_Ast *exp) { js_Ast *lhs = exp->a; js_Ast *rhs = exp->b; switch (lhs->type) { case EXP_IDENTIFIER: cexp(J, F, rhs); emitline(J, F, exp); emitlocal(J, F, OP_SETLOCAL, OP_SETVAR, lhs); break; case EXP_INDEX: cexp(J, F, lhs->a); cexp(J, F, lhs->b); cexp(J, F, rhs); emitline(J, F, exp); emit(J, F, OP_SETPROP); break; case EXP_MEMBER: cexp(J, F, lhs->a); cexp(J, F, rhs); emitline(J, F, exp); emitstring(J, F, OP_SETPROP_S, lhs->b->string); break; default: jsC_error(J, lhs, "invalid l-value in assignment"); } } static void cassignforin(JF, js_Ast *stm) { js_Ast *lhs = stm->a; if (stm->type == STM_FOR_IN_VAR) { if (lhs->b) jsC_error(J, lhs->b, "more than one loop variable in for-in statement"); emitline(J, F, lhs->a); emitlocal(J, F, OP_SETLOCAL, OP_SETVAR, lhs->a->a); /* list(var-init(ident)) */ emit(J, F, OP_POP); return; } switch (lhs->type) { case EXP_IDENTIFIER: emitline(J, F, lhs); emitlocal(J, F, OP_SETLOCAL, OP_SETVAR, lhs); emit(J, F, OP_POP); break; case EXP_INDEX: cexp(J, F, lhs->a); cexp(J, F, lhs->b); emitline(J, F, lhs); emit(J, F, OP_ROT3); emit(J, F, OP_SETPROP); emit(J, F, OP_POP); break; case EXP_MEMBER: cexp(J, F, lhs->a); emitline(J, F, lhs); emit(J, F, OP_ROT2); emitstring(J, F, OP_SETPROP_S, lhs->b->string); emit(J, F, OP_POP); break; default: jsC_error(J, lhs, "invalid l-value in for-in loop assignment"); } } static void cassignop1(JF, js_Ast *lhs) { switch (lhs->type) { case EXP_IDENTIFIER: emitline(J, F, lhs); emitlocal(J, F, OP_GETLOCAL, OP_GETVAR, lhs); break; case EXP_INDEX: cexp(J, F, lhs->a); cexp(J, F, lhs->b); emitline(J, F, lhs); emit(J, F, OP_DUP2); emit(J, F, OP_GETPROP); break; case EXP_MEMBER: cexp(J, F, lhs->a); emitline(J, F, lhs); emit(J, F, OP_DUP); emitstring(J, F, OP_GETPROP_S, lhs->b->string); break; default: jsC_error(J, lhs, "invalid l-value in assignment"); } } static void cassignop2(JF, js_Ast *lhs, int postfix) { switch (lhs->type) { case EXP_IDENTIFIER: emitline(J, F, lhs); if (postfix) emit(J, F, OP_ROT2); emitlocal(J, F, OP_SETLOCAL, OP_SETVAR, lhs); break; case EXP_INDEX: emitline(J, F, lhs); if (postfix) emit(J, F, OP_ROT4); emit(J, F, OP_SETPROP); break; case EXP_MEMBER: emitline(J, F, lhs); if (postfix) emit(J, F, OP_ROT3); emitstring(J, F, OP_SETPROP_S, lhs->b->string); break; default: jsC_error(J, lhs, "invalid l-value in assignment"); } } static void cassignop(JF, js_Ast *exp, int opcode) { js_Ast *lhs = exp->a; js_Ast *rhs = exp->b; cassignop1(J, F, lhs); cexp(J, F, rhs); emitline(J, F, exp); emit(J, F, opcode); cassignop2(J, F, lhs, 0); } static void cdelete(JF, js_Ast *exp) { js_Ast *arg = exp->a; switch (arg->type) { case EXP_IDENTIFIER: if (F->strict) jsC_error(J, exp, "delete on an unqualified name is not allowed in strict mode"); emitline(J, F, exp); emitlocal(J, F, OP_DELLOCAL, OP_DELVAR, arg); break; case EXP_INDEX: cexp(J, F, arg->a); cexp(J, F, arg->b); emitline(J, F, exp); emit(J, F, OP_DELPROP); break; case EXP_MEMBER: cexp(J, F, arg->a); emitline(J, F, exp); emitstring(J, F, OP_DELPROP_S, arg->b->string); break; default: jsC_error(J, exp, "invalid l-value in delete expression"); } } static void ceval(JF, js_Ast *fun, js_Ast *args) { int n = cargs(J, F, args); F->lightweight = 0; F->arguments = 1; if (n == 0) emit(J, F, OP_UNDEF); else while (n-- > 1) emit(J, F, OP_POP); emit(J, F, OP_EVAL); } static void ccall(JF, js_Ast *fun, js_Ast *args) { int n; switch (fun->type) { case EXP_INDEX: cexp(J, F, fun->a); emit(J, F, OP_DUP); cexp(J, F, fun->b); emit(J, F, OP_GETPROP); emit(J, F, OP_ROT2); break; case EXP_MEMBER: cexp(J, F, fun->a); emit(J, F, OP_DUP); emitstring(J, F, OP_GETPROP_S, fun->b->string); emit(J, F, OP_ROT2); break; case EXP_IDENTIFIER: if (!strcmp(fun->string, "eval")) { ceval(J, F, fun, args); return; } /* fallthrough */ default: cexp(J, F, fun); emit(J, F, OP_UNDEF); break; } n = cargs(J, F, args); emit(J, F, OP_CALL); emitarg(J, F, n); } static void cexp(JF, js_Ast *exp) { int then, end; int n; switch (exp->type) { case EXP_STRING: emitline(J, F, exp); emitstring(J, F, OP_STRING, exp->string); break; case EXP_NUMBER: emitline(J, F, exp); emitnumber(J, F, exp->number); break; case EXP_UNDEF: emitline(J, F, exp); emit(J, F, OP_UNDEF); break; case EXP_NULL: emitline(J, F, exp); emit(J, F, OP_NULL); break; case EXP_TRUE: emitline(J, F, exp); emit(J, F, OP_TRUE); break; case EXP_FALSE: emitline(J, F, exp); emit(J, F, OP_FALSE); break; case EXP_THIS: emitline(J, F, exp); emit(J, F, OP_THIS); break; case EXP_REGEXP: emitline(J, F, exp); emitstring(J, F, OP_NEWREGEXP, exp->string); emitarg(J, F, exp->number); break; case EXP_OBJECT: emitline(J, F, exp); emit(J, F, OP_NEWOBJECT); cobject(J, F, exp->a); break; case EXP_ARRAY: emitline(J, F, exp); emit(J, F, OP_NEWARRAY); carray(J, F, exp->a); break; case EXP_FUN: emitline(J, F, exp); emitfunction(J, F, newfun(J, exp->line, exp->a, exp->b, exp->c, 0, F->strict)); break; case EXP_IDENTIFIER: emitline(J, F, exp); emitlocal(J, F, OP_GETLOCAL, OP_GETVAR, exp); break; case EXP_INDEX: cexp(J, F, exp->a); cexp(J, F, exp->b); emitline(J, F, exp); emit(J, F, OP_GETPROP); break; case EXP_MEMBER: cexp(J, F, exp->a); emitline(J, F, exp); emitstring(J, F, OP_GETPROP_S, exp->b->string); break; case EXP_CALL: ccall(J, F, exp->a, exp->b); break; case EXP_NEW: cexp(J, F, exp->a); n = cargs(J, F, exp->b); emitline(J, F, exp); emit(J, F, OP_NEW); emitarg(J, F, n); break; case EXP_DELETE: cdelete(J, F, exp); break; case EXP_PREINC: cassignop1(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_INC); cassignop2(J, F, exp->a, 0); break; case EXP_PREDEC: cassignop1(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_DEC); cassignop2(J, F, exp->a, 0); break; case EXP_POSTINC: cassignop1(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_POSTINC); cassignop2(J, F, exp->a, 1); emit(J, F, OP_POP); break; case EXP_POSTDEC: cassignop1(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_POSTDEC); cassignop2(J, F, exp->a, 1); emit(J, F, OP_POP); break; case EXP_VOID: cexp(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_POP); emit(J, F, OP_UNDEF); break; case EXP_TYPEOF: ctypeof(J, F, exp); break; case EXP_POS: cunary(J, F, exp, OP_POS); break; case EXP_NEG: cunary(J, F, exp, OP_NEG); break; case EXP_BITNOT: cunary(J, F, exp, OP_BITNOT); break; case EXP_LOGNOT: cunary(J, F, exp, OP_LOGNOT); break; case EXP_BITOR: cbinary(J, F, exp, OP_BITOR); break; case EXP_BITXOR: cbinary(J, F, exp, OP_BITXOR); break; case EXP_BITAND: cbinary(J, F, exp, OP_BITAND); break; case EXP_EQ: cbinary(J, F, exp, OP_EQ); break; case EXP_NE: cbinary(J, F, exp, OP_NE); break; case EXP_STRICTEQ: cbinary(J, F, exp, OP_STRICTEQ); break; case EXP_STRICTNE: cbinary(J, F, exp, OP_STRICTNE); break; case EXP_LT: cbinary(J, F, exp, OP_LT); break; case EXP_GT: cbinary(J, F, exp, OP_GT); break; case EXP_LE: cbinary(J, F, exp, OP_LE); break; case EXP_GE: cbinary(J, F, exp, OP_GE); break; case EXP_INSTANCEOF: cbinary(J, F, exp, OP_INSTANCEOF); break; case EXP_IN: cbinary(J, F, exp, OP_IN); break; case EXP_SHL: cbinary(J, F, exp, OP_SHL); break; case EXP_SHR: cbinary(J, F, exp, OP_SHR); break; case EXP_USHR: cbinary(J, F, exp, OP_USHR); break; case EXP_ADD: cbinary(J, F, exp, OP_ADD); break; case EXP_SUB: cbinary(J, F, exp, OP_SUB); break; case EXP_MUL: cbinary(J, F, exp, OP_MUL); break; case EXP_DIV: cbinary(J, F, exp, OP_DIV); break; case EXP_MOD: cbinary(J, F, exp, OP_MOD); break; case EXP_ASS: cassign(J, F, exp); break; case EXP_ASS_MUL: cassignop(J, F, exp, OP_MUL); break; case EXP_ASS_DIV: cassignop(J, F, exp, OP_DIV); break; case EXP_ASS_MOD: cassignop(J, F, exp, OP_MOD); break; case EXP_ASS_ADD: cassignop(J, F, exp, OP_ADD); break; case EXP_ASS_SUB: cassignop(J, F, exp, OP_SUB); break; case EXP_ASS_SHL: cassignop(J, F, exp, OP_SHL); break; case EXP_ASS_SHR: cassignop(J, F, exp, OP_SHR); break; case EXP_ASS_USHR: cassignop(J, F, exp, OP_USHR); break; case EXP_ASS_BITAND: cassignop(J, F, exp, OP_BITAND); break; case EXP_ASS_BITXOR: cassignop(J, F, exp, OP_BITXOR); break; case EXP_ASS_BITOR: cassignop(J, F, exp, OP_BITOR); break; case EXP_COMMA: cexp(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_POP); cexp(J, F, exp->b); break; case EXP_LOGOR: cexp(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_DUP); end = emitjump(J, F, OP_JTRUE); emit(J, F, OP_POP); cexp(J, F, exp->b); label(J, F, end); break; case EXP_LOGAND: cexp(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_DUP); end = emitjump(J, F, OP_JFALSE); emit(J, F, OP_POP); cexp(J, F, exp->b); label(J, F, end); break; case EXP_COND: cexp(J, F, exp->a); emitline(J, F, exp); then = emitjump(J, F, OP_JTRUE); cexp(J, F, exp->c); end = emitjump(J, F, OP_JUMP); label(J, F, then); cexp(J, F, exp->b); label(J, F, end); break; default: jsC_error(J, exp, "unknown expression: (%s)", jsP_aststring(exp->type)); } } /* Patch break and continue statements */ static void addjump(JF, enum js_AstType type, js_Ast *target, int inst) { js_JumpList *jump = js_malloc(J, sizeof *jump); jump->type = type; jump->inst = inst; jump->next = target->jumps; target->jumps = jump; } static void labeljumps(JF, js_JumpList *jump, int baddr, int caddr) { while (jump) { if (jump->type == STM_BREAK) labelto(J, F, jump->inst, baddr); if (jump->type == STM_CONTINUE) labelto(J, F, jump->inst, caddr); jump = jump->next; } } static int isloop(enum js_AstType T) { return T == STM_DO || T == STM_WHILE || T == STM_FOR || T == STM_FOR_VAR || T == STM_FOR_IN || T == STM_FOR_IN_VAR; } static int isfun(enum js_AstType T) { return T == AST_FUNDEC || T == EXP_FUN || T == EXP_PROP_GET || T == EXP_PROP_SET; } static int matchlabel(js_Ast *node, const char *label) { while (node && node->type == STM_LABEL) { if (!strcmp(node->a->string, label)) return 1; node = node->parent; } return 0; } static js_Ast *breaktarget(JF, js_Ast *node, const char *label) { while (node) { if (isfun(node->type)) break; if (!label) { if (isloop(node->type) || node->type == STM_SWITCH) return node; } else { if (matchlabel(node->parent, label)) return node; } node = node->parent; } return NULL; } static js_Ast *continuetarget(JF, js_Ast *node, const char *label) { while (node) { if (isfun(node->type)) break; if (isloop(node->type)) { if (!label) return node; else if (matchlabel(node->parent, label)) return node; } node = node->parent; } return NULL; } static js_Ast *returntarget(JF, js_Ast *node) { while (node) { if (isfun(node->type)) return node; node = node->parent; } return NULL; } /* Emit code to rebalance stack and scopes during an abrupt exit */ static void cexit(JF, enum js_AstType T, js_Ast *node, js_Ast *target) { js_Ast *prev; do { prev = node, node = node->parent; switch (node->type) { default: /* impossible */ break; case STM_WITH: emitline(J, F, node); emit(J, F, OP_ENDWITH); break; case STM_FOR_IN: case STM_FOR_IN_VAR: emitline(J, F, node); /* pop the iterator if leaving the loop */ if (F->script) { if (T == STM_RETURN || T == STM_BREAK || (T == STM_CONTINUE && target != node)) { /* pop the iterator, save the return or exp value */ emit(J, F, OP_ROT2); emit(J, F, OP_POP); } if (T == STM_CONTINUE) emit(J, F, OP_ROT2); /* put the iterator back on top */ } else { if (T == STM_RETURN) { /* pop the iterator, save the return value */ emit(J, F, OP_ROT2); emit(J, F, OP_POP); } if (T == STM_BREAK || (T == STM_CONTINUE && target != node)) emit(J, F, OP_POP); /* pop the iterator */ } break; case STM_TRY: emitline(J, F, node); /* came from try block */ if (prev == node->a) { emit(J, F, OP_ENDTRY); if (node->d) cstm(J, F, node->d); /* finally */ } /* came from catch block */ if (prev == node->c) { /* ... with finally */ if (node->d) { emit(J, F, OP_ENDCATCH); emit(J, F, OP_ENDTRY); cstm(J, F, node->d); /* finally */ } else { emit(J, F, OP_ENDCATCH); } } break; } } while (node != target); } /* Try/catch/finally */ static void ctryfinally(JF, js_Ast *trystm, js_Ast *finallystm) { int L1; L1 = emitjump(J, F, OP_TRY); { /* if we get here, we have caught an exception in the try block */ cstm(J, F, finallystm); /* inline finally block */ emit(J, F, OP_THROW); /* rethrow exception */ } label(J, F, L1); cstm(J, F, trystm); emit(J, F, OP_ENDTRY); cstm(J, F, finallystm); } static void ctrycatch(JF, js_Ast *trystm, js_Ast *catchvar, js_Ast *catchstm) { int L1, L2; L1 = emitjump(J, F, OP_TRY); { /* if we get here, we have caught an exception in the try block */ checkfutureword(J, F, catchvar); if (F->strict) { if (!strcmp(catchvar->string, "arguments")) jsC_error(J, catchvar, "redefining 'arguments' is not allowed in strict mode"); if (!strcmp(catchvar->string, "eval")) jsC_error(J, catchvar, "redefining 'eval' is not allowed in strict mode"); } emitline(J, F, catchvar); emitstring(J, F, OP_CATCH, catchvar->string); cstm(J, F, catchstm); emit(J, F, OP_ENDCATCH); L2 = emitjump(J, F, OP_JUMP); /* skip past the try block */ } label(J, F, L1); cstm(J, F, trystm); emit(J, F, OP_ENDTRY); label(J, F, L2); } static void ctrycatchfinally(JF, js_Ast *trystm, js_Ast *catchvar, js_Ast *catchstm, js_Ast *finallystm) { int L1, L2, L3; L1 = emitjump(J, F, OP_TRY); { /* if we get here, we have caught an exception in the try block */ L2 = emitjump(J, F, OP_TRY); { /* if we get here, we have caught an exception in the catch block */ cstm(J, F, finallystm); /* inline finally block */ emit(J, F, OP_THROW); /* rethrow exception */ } label(J, F, L2); if (F->strict) { checkfutureword(J, F, catchvar); if (!strcmp(catchvar->string, "arguments")) jsC_error(J, catchvar, "redefining 'arguments' is not allowed in strict mode"); if (!strcmp(catchvar->string, "eval")) jsC_error(J, catchvar, "redefining 'eval' is not allowed in strict mode"); } emitline(J, F, catchvar); emitstring(J, F, OP_CATCH, catchvar->string); cstm(J, F, catchstm); emit(J, F, OP_ENDCATCH); emit(J, F, OP_ENDTRY); L3 = emitjump(J, F, OP_JUMP); /* skip past the try block to the finally block */ } label(J, F, L1); cstm(J, F, trystm); emit(J, F, OP_ENDTRY); label(J, F, L3); cstm(J, F, finallystm); } /* Switch */ static void cswitch(JF, js_Ast *ref, js_Ast *head) { js_Ast *node, *clause, *def = NULL; int end; cexp(J, F, ref); /* emit an if-else chain of tests for the case clause expressions */ for (node = head; node; node = node->b) { clause = node->a; if (clause->type == STM_DEFAULT) { if (def) jsC_error(J, clause, "more than one default label in switch"); def = clause; } else { cexp(J, F, clause->a); emitline(J, F, clause); clause->casejump = emitjump(J, F, OP_JCASE); } } emit(J, F, OP_POP); if (def) { emitline(J, F, def); def->casejump = emitjump(J, F, OP_JUMP); end = 0; } else { end = emitjump(J, F, OP_JUMP); } /* emit the case clause bodies */ for (node = head; node; node = node->b) { clause = node->a; label(J, F, clause->casejump); if (clause->type == STM_DEFAULT) cstmlist(J, F, clause->a); else cstmlist(J, F, clause->b); } if (end) label(J, F, end); } /* Statements */ static void cvarinit(JF, js_Ast *list) { while (list) { js_Ast *var = list->a; if (var->b) { cexp(J, F, var->b); emitline(J, F, var); emitlocal(J, F, OP_SETLOCAL, OP_SETVAR, var->a); emit(J, F, OP_POP); } list = list->b; } } static void cstm(JF, js_Ast *stm) { js_Ast *target; int loop, cont, then, end; emitline(J, F, stm); switch (stm->type) { case AST_FUNDEC: break; case STM_BLOCK: cstmlist(J, F, stm->a); break; case STM_EMPTY: if (F->script) { emitline(J, F, stm); emit(J, F, OP_POP); emit(J, F, OP_UNDEF); } break; case STM_VAR: cvarinit(J, F, stm->a); break; case STM_IF: if (stm->c) { cexp(J, F, stm->a); emitline(J, F, stm); then = emitjump(J, F, OP_JTRUE); cstm(J, F, stm->c); emitline(J, F, stm); end = emitjump(J, F, OP_JUMP); label(J, F, then); cstm(J, F, stm->b); label(J, F, end); } else { cexp(J, F, stm->a); emitline(J, F, stm); end = emitjump(J, F, OP_JFALSE); cstm(J, F, stm->b); label(J, F, end); } break; case STM_DO: loop = here(J, F); cstm(J, F, stm->a); cont = here(J, F); cexp(J, F, stm->b); emitline(J, F, stm); emitjumpto(J, F, OP_JTRUE, loop); labeljumps(J, F, stm->jumps, here(J,F), cont); break; case STM_WHILE: loop = here(J, F); cexp(J, F, stm->a); emitline(J, F, stm); end = emitjump(J, F, OP_JFALSE); cstm(J, F, stm->b); emitline(J, F, stm); emitjumpto(J, F, OP_JUMP, loop); label(J, F, end); labeljumps(J, F, stm->jumps, here(J,F), loop); break; case STM_FOR: case STM_FOR_VAR: if (stm->type == STM_FOR_VAR) { cvarinit(J, F, stm->a); } else { if (stm->a) { cexp(J, F, stm->a); emit(J, F, OP_POP); } } loop = here(J, F); if (stm->b) { cexp(J, F, stm->b); emitline(J, F, stm); end = emitjump(J, F, OP_JFALSE); } else { end = 0; } cstm(J, F, stm->d); cont = here(J, F); if (stm->c) { cexp(J, F, stm->c); emit(J, F, OP_POP); } emitline(J, F, stm); emitjumpto(J, F, OP_JUMP, loop); if (end) label(J, F, end); labeljumps(J, F, stm->jumps, here(J,F), cont); break; case STM_FOR_IN: case STM_FOR_IN_VAR: cexp(J, F, stm->b); emitline(J, F, stm); emit(J, F, OP_ITERATOR); loop = here(J, F); { emitline(J, F, stm); emit(J, F, OP_NEXTITER); end = emitjump(J, F, OP_JFALSE); cassignforin(J, F, stm); if (F->script) { emit(J, F, OP_ROT2); cstm(J, F, stm->c); emit(J, F, OP_ROT2); } else { cstm(J, F, stm->c); } emitline(J, F, stm); emitjumpto(J, F, OP_JUMP, loop); } label(J, F, end); labeljumps(J, F, stm->jumps, here(J,F), loop); break; case STM_SWITCH: cswitch(J, F, stm->a, stm->b); labeljumps(J, F, stm->jumps, here(J,F), 0); break; case STM_LABEL: cstm(J, F, stm->b); /* skip consecutive labels */ while (stm->type == STM_LABEL) stm = stm->b; /* loops and switches have already been labelled */ if (!isloop(stm->type) && stm->type != STM_SWITCH) labeljumps(J, F, stm->jumps, here(J,F), 0); break; case STM_BREAK: if (stm->a) { checkfutureword(J, F, stm->a); target = breaktarget(J, F, stm->parent, stm->a->string); if (!target) jsC_error(J, stm, "break label '%s' not found", stm->a->string); } else { target = breaktarget(J, F, stm->parent, NULL); if (!target) jsC_error(J, stm, "unlabelled break must be inside loop or switch"); } cexit(J, F, STM_BREAK, stm, target); emitline(J, F, stm); addjump(J, F, STM_BREAK, target, emitjump(J, F, OP_JUMP)); break; case STM_CONTINUE: if (stm->a) { checkfutureword(J, F, stm->a); target = continuetarget(J, F, stm->parent, stm->a->string); if (!target) jsC_error(J, stm, "continue label '%s' not found", stm->a->string); } else { target = continuetarget(J, F, stm->parent, NULL); if (!target) jsC_error(J, stm, "continue must be inside loop"); } cexit(J, F, STM_CONTINUE, stm, target); emitline(J, F, stm); addjump(J, F, STM_CONTINUE, target, emitjump(J, F, OP_JUMP)); break; case STM_RETURN: if (stm->a) cexp(J, F, stm->a); else emit(J, F, OP_UNDEF); target = returntarget(J, F, stm->parent); if (!target) jsC_error(J, stm, "return not in function"); cexit(J, F, STM_RETURN, stm, target); emitline(J, F, stm); emit(J, F, OP_RETURN); break; case STM_THROW: cexp(J, F, stm->a); emitline(J, F, stm); emit(J, F, OP_THROW); break; case STM_WITH: F->lightweight = 0; if (F->strict) jsC_error(J, stm->a, "'with' statements are not allowed in strict mode"); cexp(J, F, stm->a); emitline(J, F, stm); emit(J, F, OP_WITH); cstm(J, F, stm->b); emitline(J, F, stm); emit(J, F, OP_ENDWITH); break; case STM_TRY: emitline(J, F, stm); if (stm->b && stm->c) { F->lightweight = 0; if (stm->d) ctrycatchfinally(J, F, stm->a, stm->b, stm->c, stm->d); else ctrycatch(J, F, stm->a, stm->b, stm->c); } else { ctryfinally(J, F, stm->a, stm->d); } break; case STM_DEBUGGER: emitline(J, F, stm); emit(J, F, OP_DEBUGGER); break; default: if (F->script) { emitline(J, F, stm); emit(J, F, OP_POP); cexp(J, F, stm); } else { cexp(J, F, stm); emitline(J, F, stm); emit(J, F, OP_POP); } break; } } static void cstmlist(JF, js_Ast *list) { while (list) { cstm(J, F, list->a); list = list->b; } } /* Declarations and programs */ static int listlength(js_Ast *list) { int n = 0; while (list) ++n, list = list->b; return n; } static void cparams(JF, js_Ast *list, js_Ast *fname) { F->numparams = listlength(list); while (list) { checkfutureword(J, F, list->a); addlocal(J, F, list->a, 0); list = list->b; } } static void cvardecs(JF, js_Ast *node) { if (node->type == AST_LIST) { while (node) { cvardecs(J, F, node->a); node = node->b; } return; } if (isfun(node->type)) return; /* stop at inner functions */ if (node->type == EXP_VAR) { checkfutureword(J, F, node->a); addlocal(J, F, node->a, 1); } if (node->a) cvardecs(J, F, node->a); if (node->b) cvardecs(J, F, node->b); if (node->c) cvardecs(J, F, node->c); if (node->d) cvardecs(J, F, node->d); } static void cfundecs(JF, js_Ast *list) { while (list) { js_Ast *stm = list->a; if (stm->type == AST_FUNDEC) { emitline(J, F, stm); emitfunction(J, F, newfun(J, stm->line, stm->a, stm->b, stm->c, 0, F->strict)); emitline(J, F, stm); emit(J, F, OP_SETLOCAL); emitarg(J, F, addlocal(J, F, stm->a, 0)); emit(J, F, OP_POP); } list = list->b; } } static void cfunbody(JF, js_Ast *name, js_Ast *params, js_Ast *body) { F->lightweight = 1; F->arguments = 0; if (F->script) F->lightweight = 0; /* Check if first statement is 'use strict': */ if (body && body->type == AST_LIST && body->a && body->a->type == EXP_STRING) if (!strcmp(body->a->string, "use strict")) F->strict = 1; F->lastline = F->line; cparams(J, F, params, name); if (body) { cvardecs(J, F, body); cfundecs(J, F, body); } if (name) { checkfutureword(J, F, name); if (findlocal(J, F, name->string) < 0) { emit(J, F, OP_CURRENT); emit(J, F, OP_SETLOCAL); emitarg(J, F, addlocal(J, F, name, 0)); emit(J, F, OP_POP); } } if (F->script) { emit(J, F, OP_UNDEF); cstmlist(J, F, body); emit(J, F, OP_RETURN); } else { cstmlist(J, F, body); emit(J, F, OP_UNDEF); emit(J, F, OP_RETURN); } } js_Function *jsC_compilefunction(js_State *J, js_Ast *prog) { return newfun(J, prog->line, prog->a, prog->b, prog->c, 0, J->default_strict); } js_Function *jsC_compilescript(js_State *J, js_Ast *prog, int default_strict) { return newfun(J, prog ? prog->line : 0, NULL, NULL, prog, 1, default_strict); }
null
#include "jsi.h" #include "jslex.h" #include "jsparse.h" #include "jscompile.h" #include "jsvalue.h" /* for jsV_numbertostring */ #define cexp jsC_cexp /* collision with math.h */ #define JF js_State *J, js_Function *F JS_NORETURN void jsC_error(js_State *J, js_Ast *node, const char *fmt, ...) JS_PRINTFLIKE(3,4); static void cfunbody(JF, js_Ast *name, js_Ast *params, js_Ast *body); static void cexp(JF, js_Ast *exp); static void cstmlist(JF, js_Ast *list); static void cstm(JF, js_Ast *stm); void jsC_error(js_State *J, js_Ast *node, const char *fmt, ...) { va_list ap; char buf[512]; char msgbuf[256]; va_start(ap, fmt); vsnprintf(msgbuf, 256, fmt, ap); va_end(ap); snprintf(buf, 256, "%s:%d: ", J->filename, node->line); strcat(buf, msgbuf); js_newsyntaxerror(J, buf); js_throw(J); } static const char *futurewords[] = { "class", "const", "enum", "export", "extends", "import", "super", }; static const char *strictfuturewords[] = { "implements", "interface", "let", "package", "private", "protected", "public", "static", "yield", }; static void checkfutureword(JF, js_Ast *exp) { if (jsY_findword(exp->string, futurewords, nelem(futurewords)) >= 0) jsC_error(J, exp, "'%s' is a future reserved word", exp->string); if (F->strict) { if (jsY_findword(exp->string, strictfuturewords, nelem(strictfuturewords)) >= 0) jsC_error(J, exp, "'%s' is a strict mode future reserved word", exp->string); } } static js_Function *newfun(js_State *J, int line, js_Ast *name, js_Ast *params, js_Ast *body, int script, int default_strict) { js_Function *F = js_malloc(J, sizeof *F); memset(F, 0, sizeof *F); F->gcmark = 0; F->gcnext = J->gcfun; J->gcfun = F; ++J->gccounter; F->filename = js_intern(J, J->filename); F->line = line; F->script = script; F->strict = default_strict; F->name = name ? name->string : ""; cfunbody(J, F, name, params, body); return F; } /* Emit opcodes, constants and jumps */ static void emitraw(JF, int value) { if (value != (js_Instruction)value) js_syntaxerror(J, "integer overflow in instruction coding"); if (F->codelen >= F->codecap) { F->codecap = F->codecap ? F->codecap * 2 : 64; F->code = js_realloc(J, F->code, F->codecap * sizeof *F->code); } F->code[F->codelen++] = value; } static void emit(JF, int value) { emitraw(J, F, F->lastline); emitraw(J, F, value); } static void emitarg(JF, int value) { emitraw(J, F, value); } static void emitline(JF, js_Ast *node) { F->lastline = node->line; } static int addfunction(JF, js_Function *value) { if (F->funlen >= F->funcap) { F->funcap = F->funcap ? F->funcap * 2 : 16; F->funtab = js_realloc(J, F->funtab, F->funcap * sizeof *F->funtab); } F->funtab[F->funlen] = value; return F->funlen++; } static int addlocal(JF, js_Ast *ident, int reuse) { const char *name = ident->string; if (F->strict) { if (!strcmp(name, "arguments")) jsC_error(J, ident, "redefining 'arguments' is not allowed in strict mode"); if (!strcmp(name, "eval")) jsC_error(J, ident, "redefining 'eval' is not allowed in strict mode"); } else { if (!strcmp(name, "eval")) js_evalerror(J, "%s:%d: invalid use of 'eval'", J->filename, ident->line); } if (reuse || F->strict) { int i; for (i = 0; i < F->varlen; ++i) { if (!strcmp(F->vartab[i], name)) { if (reuse) return i+1; if (F->strict) jsC_error(J, ident, "duplicate formal parameter '%s'", name); } } } if (F->varlen >= F->varcap) { F->varcap = F->varcap ? F->varcap * 2 : 16; F->vartab = js_realloc(J, F->vartab, F->varcap * sizeof *F->vartab); } F->vartab[F->varlen] = name; return ++F->varlen; } static int findlocal(JF, const char *name) { int i; for (i = F->varlen; i > 0; --i) if (!strcmp(F->vartab[i-1], name)) return i; return -1; } static void emitfunction(JF, js_Function *fun) { F->lightweight = 0; emit(J, F, OP_CLOSURE); emitarg(J, F, addfunction(J, F, fun)); } static void emitnumber(JF, double num) { if (num == 0) { emit(J, F, OP_INTEGER); emitarg(J, F, 32768); if (signbit(num)) emit(J, F, OP_NEG); } else if (num >= SHRT_MIN && num <= SHRT_MAX && num == (int)num) { emit(J, F, OP_INTEGER); emitarg(J, F, num + 32768); } else { #define N (sizeof(num) / sizeof(js_Instruction)) js_Instruction x[N]; size_t i; emit(J, F, OP_NUMBER); memcpy(x, &num, sizeof(num)); for (i = 0; i < N; ++i) emitarg(J, F, x[i]); #undef N } } static void emitstring(JF, int opcode, const char *str) { #define N (sizeof(str) / sizeof(js_Instruction)) js_Instruction x[N]; size_t i; emit(J, F, opcode); memcpy(x, &str, sizeof(str)); for (i = 0; i < N; ++i) emitarg(J, F, x[i]); #undef N } static void emitlocal(JF, int oploc, int opvar, js_Ast *ident) { int is_arguments = !strcmp(ident->string, "arguments"); int is_eval = !strcmp(ident->string, "eval"); int i; if (is_arguments) { F->lightweight = 0; F->arguments = 1; } checkfutureword(J, F, ident); if (F->strict && oploc == OP_SETLOCAL) { if (is_arguments) jsC_error(J, ident, "'arguments' is read-only in strict mode"); if (is_eval) jsC_error(J, ident, "'eval' is read-only in strict mode"); } if (is_eval) js_evalerror(J, "%s:%d: invalid use of 'eval'", J->filename, ident->line); i = findlocal(J, F, ident->string); if (i < 0) { emitstring(J, F, opvar, ident->string); } else { emit(J, F, oploc); emitarg(J, F, i); } } static int here(JF) { return F->codelen; } static int emitjump(JF, int opcode) { int inst; emit(J, F, opcode); inst = F->codelen; emitarg(J, F, 0); return inst; } static void emitjumpto(JF, int opcode, int dest) { emit(J, F, opcode); if (dest != (js_Instruction)dest) js_syntaxerror(J, "jump address integer overflow"); emitarg(J, F, dest); } static void labelto(JF, int inst, int addr) { if (addr != (js_Instruction)addr) js_syntaxerror(J, "jump address integer overflow"); F->code[inst] = addr; } static void label(JF, int inst) { labelto(J, F, inst, F->codelen); } /* Expressions */ static void ctypeof(JF, js_Ast *exp) { if (exp->a->type == EXP_IDENTIFIER) { emitline(J, F, exp->a); emitlocal(J, F, OP_GETLOCAL, OP_HASVAR, exp->a); } else { cexp(J, F, exp->a); } emitline(J, F, exp); emit(J, F, OP_TYPEOF); } static void cunary(JF, js_Ast *exp, int opcode) { cexp(J, F, exp->a); emitline(J, F, exp); emit(J, F, opcode); } static void cbinary(JF, js_Ast *exp, int opcode) { cexp(J, F, exp->a); cexp(J, F, exp->b); emitline(J, F, exp); emit(J, F, opcode); } static void carray(JF, js_Ast *list) { while (list) { emitline(J, F, list->a); cexp(J, F, list->a); emit(J, F, OP_INITARRAY); list = list->b; } } static void checkdup(JF, js_Ast *list, js_Ast *end) { char nbuf[32], sbuf[32]; const char *needle, *straw; if (end->a->type == EXP_NUMBER) needle = jsV_numbertostring(J, nbuf, end->a->number); else needle = end->a->string; while (list->a != end) { if (list->a->type == end->type) { js_Ast *prop = list->a->a; if (prop->type == EXP_NUMBER) straw = jsV_numbertostring(J, sbuf, prop->number); else straw = prop->string; if (!strcmp(needle, straw)) jsC_error(J, list, "duplicate property '%s' in object literal", needle); } list = list->b; } } static void cobject(JF, js_Ast *list) { js_Ast *head = list; while (list) { js_Ast *kv = list->a; js_Ast *prop = kv->a; if (prop->type == AST_IDENTIFIER || prop->type == EXP_STRING) { emitline(J, F, prop); emitstring(J, F, OP_STRING, prop->string); } else if (prop->type == EXP_NUMBER) { emitline(J, F, prop); emitnumber(J, F, prop->number); } else { jsC_error(J, prop, "invalid property name in object initializer"); } if (F->strict) checkdup(J, F, head, kv); switch (kv->type) { default: /* impossible */ break; case EXP_PROP_VAL: cexp(J, F, kv->b); emitline(J, F, kv); emit(J, F, OP_INITPROP); break; case EXP_PROP_GET: emitfunction(J, F, newfun(J, prop->line, NULL, NULL, kv->c, 0, F->strict)); emitline(J, F, kv); emit(J, F, OP_INITGETTER); break; case EXP_PROP_SET: emitfunction(J, F, newfun(J, prop->line, NULL, kv->b, kv->c, 0, F->strict)); emitline(J, F, kv); emit(J, F, OP_INITSETTER); break; } list = list->b; } } static int cargs(JF, js_Ast *list) { int n = 0; while (list) { cexp(J, F, list->a); list = list->b; ++n; } return n; } static void cassign(JF, js_Ast *exp) { js_Ast *lhs = exp->a; js_Ast *rhs = exp->b; switch (lhs->type) { case EXP_IDENTIFIER: cexp(J, F, rhs); emitline(J, F, exp); emitlocal(J, F, OP_SETLOCAL, OP_SETVAR, lhs); break; case EXP_INDEX: cexp(J, F, lhs->a); cexp(J, F, lhs->b); cexp(J, F, rhs); emitline(J, F, exp); emit(J, F, OP_SETPROP); break; case EXP_MEMBER: cexp(J, F, lhs->a); cexp(J, F, rhs); emitline(J, F, exp); emitstring(J, F, OP_SETPROP_S, lhs->b->string); break; default: jsC_error(J, lhs, "invalid l-value in assignment"); } } static void cassignforin(JF, js_Ast *stm) { js_Ast *lhs = stm->a; if (stm->type == STM_FOR_IN_VAR) { if (lhs->b) jsC_error(J, lhs->b, "more than one loop variable in for-in statement"); emitline(J, F, lhs->a); emitlocal(J, F, OP_SETLOCAL, OP_SETVAR, lhs->a->a); /* list(var-init(ident)) */ emit(J, F, OP_POP); return; } switch (lhs->type) { case EXP_IDENTIFIER: emitline(J, F, lhs); emitlocal(J, F, OP_SETLOCAL, OP_SETVAR, lhs); emit(J, F, OP_POP); break; case EXP_INDEX: cexp(J, F, lhs->a); cexp(J, F, lhs->b); emitline(J, F, lhs); emit(J, F, OP_ROT3); emit(J, F, OP_SETPROP); emit(J, F, OP_POP); break; case EXP_MEMBER: cexp(J, F, lhs->a); emitline(J, F, lhs); emit(J, F, OP_ROT2); emitstring(J, F, OP_SETPROP_S, lhs->b->string); emit(J, F, OP_POP); break; default: jsC_error(J, lhs, "invalid l-value in for-in loop assignment"); } } static void cassignop1(JF, js_Ast *lhs) { switch (lhs->type) { case EXP_IDENTIFIER: emitline(J, F, lhs); emitlocal(J, F, OP_GETLOCAL, OP_GETVAR, lhs); break; case EXP_INDEX: cexp(J, F, lhs->a); cexp(J, F, lhs->b); emitline(J, F, lhs); emit(J, F, OP_DUP2); emit(J, F, OP_GETPROP); break; case EXP_MEMBER: cexp(J, F, lhs->a); emitline(J, F, lhs); emit(J, F, OP_DUP); emitstring(J, F, OP_GETPROP_S, lhs->b->string); break; default: jsC_error(J, lhs, "invalid l-value in assignment"); } } static void cassignop2(JF, js_Ast *lhs, int postfix) { switch (lhs->type) { case EXP_IDENTIFIER: emitline(J, F, lhs); if (postfix) emit(J, F, OP_ROT2); emitlocal(J, F, OP_SETLOCAL, OP_SETVAR, lhs); break; case EXP_INDEX: emitline(J, F, lhs); if (postfix) emit(J, F, OP_ROT4); emit(J, F, OP_SETPROP); break; case EXP_MEMBER: emitline(J, F, lhs); if (postfix) emit(J, F, OP_ROT3); emitstring(J, F, OP_SETPROP_S, lhs->b->string); break; default: jsC_error(J, lhs, "invalid l-value in assignment"); } } static void cassignop(JF, js_Ast *exp, int opcode) { js_Ast *lhs = exp->a; js_Ast *rhs = exp->b; cassignop1(J, F, lhs); cexp(J, F, rhs); emitline(J, F, exp); emit(J, F, opcode); cassignop2(J, F, lhs, 0); } static void cdelete(JF, js_Ast *exp) { js_Ast *arg = exp->a; switch (arg->type) { case EXP_IDENTIFIER: if (F->strict) jsC_error(J, exp, "delete on an unqualified name is not allowed in strict mode"); emitline(J, F, exp); emitlocal(J, F, OP_DELLOCAL, OP_DELVAR, arg); break; case EXP_INDEX: cexp(J, F, arg->a); cexp(J, F, arg->b); emitline(J, F, exp); emit(J, F, OP_DELPROP); break; case EXP_MEMBER: cexp(J, F, arg->a); emitline(J, F, exp); emitstring(J, F, OP_DELPROP_S, arg->b->string); break; default: jsC_error(J, exp, "invalid l-value in delete expression"); } } static void ceval(JF, js_Ast *fun, js_Ast *args) { int n = cargs(J, F, args); F->lightweight = 0; F->arguments = 1; if (n == 0) emit(J, F, OP_UNDEF); else while (n-- > 1) emit(J, F, OP_POP); emit(J, F, OP_EVAL); } static void ccall(JF, js_Ast *fun, js_Ast *args) { int n; switch (fun->type) { case EXP_INDEX: cexp(J, F, fun->a); emit(J, F, OP_DUP); cexp(J, F, fun->b); emit(J, F, OP_GETPROP); emit(J, F, OP_ROT2); break; case EXP_MEMBER: cexp(J, F, fun->a); emit(J, F, OP_DUP); emitstring(J, F, OP_GETPROP_S, fun->b->string); emit(J, F, OP_ROT2); break; case EXP_IDENTIFIER: if (!strcmp(fun->string, "eval")) { ceval(J, F, fun, args); return; } /* fallthrough */ default: cexp(J, F, fun); emit(J, F, OP_UNDEF); break; } n = cargs(J, F, args); emit(J, F, OP_CALL); emitarg(J, F, n); } static void cexp(JF, js_Ast *exp) { int then, end; int n; switch (exp->type) { case EXP_STRING: emitline(J, F, exp); emitstring(J, F, OP_STRING, exp->string); break; case EXP_NUMBER: emitline(J, F, exp); emitnumber(J, F, exp->number); break; case EXP_UNDEF: emitline(J, F, exp); emit(J, F, OP_UNDEF); break; case EXP_NULL: emitline(J, F, exp); emit(J, F, OP_NULL); break; case EXP_TRUE: emitline(J, F, exp); emit(J, F, OP_TRUE); break; case EXP_FALSE: emitline(J, F, exp); emit(J, F, OP_FALSE); break; case EXP_THIS: emitline(J, F, exp); emit(J, F, OP_THIS); break; case EXP_REGEXP: emitline(J, F, exp); emitstring(J, F, OP_NEWREGEXP, exp->string); emitarg(J, F, exp->number); break; case EXP_OBJECT: emitline(J, F, exp); emit(J, F, OP_NEWOBJECT); cobject(J, F, exp->a); break; case EXP_ARRAY: emitline(J, F, exp); emit(J, F, OP_NEWARRAY); carray(J, F, exp->a); break; case EXP_FUN: emitline(J, F, exp); emitfunction(J, F, newfun(J, exp->line, exp->a, exp->b, exp->c, 0, F->strict)); break; case EXP_IDENTIFIER: emitline(J, F, exp); emitlocal(J, F, OP_GETLOCAL, OP_GETVAR, exp); break; case EXP_INDEX: cexp(J, F, exp->a); cexp(J, F, exp->b); emitline(J, F, exp); emit(J, F, OP_GETPROP); break; case EXP_MEMBER: cexp(J, F, exp->a); emitline(J, F, exp); emitstring(J, F, OP_GETPROP_S, exp->b->string); break; case EXP_CALL: ccall(J, F, exp->a, exp->b); break; case EXP_NEW: cexp(J, F, exp->a); n = cargs(J, F, exp->b); emitline(J, F, exp); emit(J, F, OP_NEW); emitarg(J, F, n); break; case EXP_DELETE: cdelete(J, F, exp); break; case EXP_PREINC: cassignop1(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_INC); cassignop2(J, F, exp->a, 0); break; case EXP_PREDEC: cassignop1(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_DEC); cassignop2(J, F, exp->a, 0); break; case EXP_POSTINC: cassignop1(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_POSTINC); cassignop2(J, F, exp->a, 1); emit(J, F, OP_POP); break; case EXP_POSTDEC: cassignop1(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_POSTDEC); cassignop2(J, F, exp->a, 1); emit(J, F, OP_POP); break; case EXP_VOID: cexp(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_POP); emit(J, F, OP_UNDEF); break; case EXP_TYPEOF: ctypeof(J, F, exp); break; case EXP_POS: cunary(J, F, exp, OP_POS); break; case EXP_NEG: cunary(J, F, exp, OP_NEG); break; case EXP_BITNOT: cunary(J, F, exp, OP_BITNOT); break; case EXP_LOGNOT: cunary(J, F, exp, OP_LOGNOT); break; case EXP_BITOR: cbinary(J, F, exp, OP_BITOR); break; case EXP_BITXOR: cbinary(J, F, exp, OP_BITXOR); break; case EXP_BITAND: cbinary(J, F, exp, OP_BITAND); break; case EXP_EQ: cbinary(J, F, exp, OP_EQ); break; case EXP_NE: cbinary(J, F, exp, OP_NE); break; case EXP_STRICTEQ: cbinary(J, F, exp, OP_STRICTEQ); break; case EXP_STRICTNE: cbinary(J, F, exp, OP_STRICTNE); break; case EXP_LT: cbinary(J, F, exp, OP_LT); break; case EXP_GT: cbinary(J, F, exp, OP_GT); break; case EXP_LE: cbinary(J, F, exp, OP_LE); break; case EXP_GE: cbinary(J, F, exp, OP_GE); break; case EXP_INSTANCEOF: cbinary(J, F, exp, OP_INSTANCEOF); break; case EXP_IN: cbinary(J, F, exp, OP_IN); break; case EXP_SHL: cbinary(J, F, exp, OP_SHL); break; case EXP_SHR: cbinary(J, F, exp, OP_SHR); break; case EXP_USHR: cbinary(J, F, exp, OP_USHR); break; case EXP_ADD: cbinary(J, F, exp, OP_ADD); break; case EXP_SUB: cbinary(J, F, exp, OP_SUB); break; case EXP_MUL: cbinary(J, F, exp, OP_MUL); break; case EXP_DIV: cbinary(J, F, exp, OP_DIV); break; case EXP_MOD: cbinary(J, F, exp, OP_MOD); break; case EXP_ASS: cassign(J, F, exp); break; case EXP_ASS_MUL: cassignop(J, F, exp, OP_MUL); break; case EXP_ASS_DIV: cassignop(J, F, exp, OP_DIV); break; case EXP_ASS_MOD: cassignop(J, F, exp, OP_MOD); break; case EXP_ASS_ADD: cassignop(J, F, exp, OP_ADD); break; case EXP_ASS_SUB: cassignop(J, F, exp, OP_SUB); break; case EXP_ASS_SHL: cassignop(J, F, exp, OP_SHL); break; case EXP_ASS_SHR: cassignop(J, F, exp, OP_SHR); break; case EXP_ASS_USHR: cassignop(J, F, exp, OP_USHR); break; case EXP_ASS_BITAND: cassignop(J, F, exp, OP_BITAND); break; case EXP_ASS_BITXOR: cassignop(J, F, exp, OP_BITXOR); break; case EXP_ASS_BITOR: cassignop(J, F, exp, OP_BITOR); break; case EXP_COMMA: cexp(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_POP); cexp(J, F, exp->b); break; case EXP_LOGOR: cexp(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_DUP); end = emitjump(J, F, OP_JTRUE); emit(J, F, OP_POP); cexp(J, F, exp->b); label(J, F, end); break; case EXP_LOGAND: cexp(J, F, exp->a); emitline(J, F, exp); emit(J, F, OP_DUP); end = emitjump(J, F, OP_JFALSE); emit(J, F, OP_POP); cexp(J, F, exp->b); label(J, F, end); break; case EXP_COND: cexp(J, F, exp->a); emitline(J, F, exp); then = emitjump(J, F, OP_JTRUE); cexp(J, F, exp->c); end = emitjump(J, F, OP_JUMP); label(J, F, then); cexp(J, F, exp->b); label(J, F, end); break; default: jsC_error(J, exp, "unknown expression: (%s)", jsP_aststring(exp->type)); } } /* Patch break and continue statements */ static void addjump(JF, enum js_AstType type, js_Ast *target, int inst) { js_JumpList *jump = js_malloc(J, sizeof *jump); jump->type = type; jump->inst = inst; jump->next = target->jumps; target->jumps = jump; } static void labeljumps(JF, js_Ast *stm, int baddr, int caddr) { js_JumpList *jump = stm->jumps; while (jump) { js_JumpList *next = jump->next; if (jump->type == STM_BREAK) labelto(J, F, jump->inst, baddr); if (jump->type == STM_CONTINUE) labelto(J, F, jump->inst, caddr); js_free(J, jump); jump = next; } stm->jumps = NULL; } static int isloop(enum js_AstType T) { return T == STM_DO || T == STM_WHILE || T == STM_FOR || T == STM_FOR_VAR || T == STM_FOR_IN || T == STM_FOR_IN_VAR; } static int isfun(enum js_AstType T) { return T == AST_FUNDEC || T == EXP_FUN || T == EXP_PROP_GET || T == EXP_PROP_SET; } static int matchlabel(js_Ast *node, const char *label) { while (node && node->type == STM_LABEL) { if (!strcmp(node->a->string, label)) return 1; node = node->parent; } return 0; } static js_Ast *breaktarget(JF, js_Ast *node, const char *label) { while (node) { if (isfun(node->type)) break; if (!label) { if (isloop(node->type) || node->type == STM_SWITCH) return node; } else { if (matchlabel(node->parent, label)) return node; } node = node->parent; } return NULL; } static js_Ast *continuetarget(JF, js_Ast *node, const char *label) { while (node) { if (isfun(node->type)) break; if (isloop(node->type)) { if (!label) return node; else if (matchlabel(node->parent, label)) return node; } node = node->parent; } return NULL; } static js_Ast *returntarget(JF, js_Ast *node) { while (node) { if (isfun(node->type)) return node; node = node->parent; } return NULL; } /* Emit code to rebalance stack and scopes during an abrupt exit */ static void cexit(JF, enum js_AstType T, js_Ast *node, js_Ast *target) { js_Ast *prev; do { prev = node, node = node->parent; switch (node->type) { default: /* impossible */ break; case STM_WITH: emitline(J, F, node); emit(J, F, OP_ENDWITH); break; case STM_FOR_IN: case STM_FOR_IN_VAR: emitline(J, F, node); /* pop the iterator if leaving the loop */ if (F->script) { if (T == STM_RETURN || T == STM_BREAK || (T == STM_CONTINUE && target != node)) { /* pop the iterator, save the return or exp value */ emit(J, F, OP_ROT2); emit(J, F, OP_POP); } if (T == STM_CONTINUE) emit(J, F, OP_ROT2); /* put the iterator back on top */ } else { if (T == STM_RETURN) { /* pop the iterator, save the return value */ emit(J, F, OP_ROT2); emit(J, F, OP_POP); } if (T == STM_BREAK || (T == STM_CONTINUE && target != node)) emit(J, F, OP_POP); /* pop the iterator */ } break; case STM_TRY: emitline(J, F, node); /* came from try block */ if (prev == node->a) { emit(J, F, OP_ENDTRY); if (node->d) cstm(J, F, node->d); /* finally */ } /* came from catch block */ if (prev == node->c) { /* ... with finally */ if (node->d) { emit(J, F, OP_ENDCATCH); emit(J, F, OP_ENDTRY); cstm(J, F, node->d); /* finally */ } else { emit(J, F, OP_ENDCATCH); } } break; } } while (node != target); } /* Try/catch/finally */ static void ctryfinally(JF, js_Ast *trystm, js_Ast *finallystm) { int L1; L1 = emitjump(J, F, OP_TRY); { /* if we get here, we have caught an exception in the try block */ cstm(J, F, finallystm); /* inline finally block */ emit(J, F, OP_THROW); /* rethrow exception */ } label(J, F, L1); cstm(J, F, trystm); emit(J, F, OP_ENDTRY); cstm(J, F, finallystm); } static void ctrycatch(JF, js_Ast *trystm, js_Ast *catchvar, js_Ast *catchstm) { int L1, L2; L1 = emitjump(J, F, OP_TRY); { /* if we get here, we have caught an exception in the try block */ checkfutureword(J, F, catchvar); if (F->strict) { if (!strcmp(catchvar->string, "arguments")) jsC_error(J, catchvar, "redefining 'arguments' is not allowed in strict mode"); if (!strcmp(catchvar->string, "eval")) jsC_error(J, catchvar, "redefining 'eval' is not allowed in strict mode"); } emitline(J, F, catchvar); emitstring(J, F, OP_CATCH, catchvar->string); cstm(J, F, catchstm); emit(J, F, OP_ENDCATCH); L2 = emitjump(J, F, OP_JUMP); /* skip past the try block */ } label(J, F, L1); cstm(J, F, trystm); emit(J, F, OP_ENDTRY); label(J, F, L2); } static void ctrycatchfinally(JF, js_Ast *trystm, js_Ast *catchvar, js_Ast *catchstm, js_Ast *finallystm) { int L1, L2, L3; L1 = emitjump(J, F, OP_TRY); { /* if we get here, we have caught an exception in the try block */ L2 = emitjump(J, F, OP_TRY); { /* if we get here, we have caught an exception in the catch block */ cstm(J, F, finallystm); /* inline finally block */ emit(J, F, OP_THROW); /* rethrow exception */ } label(J, F, L2); if (F->strict) { checkfutureword(J, F, catchvar); if (!strcmp(catchvar->string, "arguments")) jsC_error(J, catchvar, "redefining 'arguments' is not allowed in strict mode"); if (!strcmp(catchvar->string, "eval")) jsC_error(J, catchvar, "redefining 'eval' is not allowed in strict mode"); } emitline(J, F, catchvar); emitstring(J, F, OP_CATCH, catchvar->string); cstm(J, F, catchstm); emit(J, F, OP_ENDCATCH); emit(J, F, OP_ENDTRY); L3 = emitjump(J, F, OP_JUMP); /* skip past the try block to the finally block */ } label(J, F, L1); cstm(J, F, trystm); emit(J, F, OP_ENDTRY); label(J, F, L3); cstm(J, F, finallystm); } /* Switch */ static void cswitch(JF, js_Ast *ref, js_Ast *head) { js_Ast *node, *clause, *def = NULL; int end; cexp(J, F, ref); /* emit an if-else chain of tests for the case clause expressions */ for (node = head; node; node = node->b) { clause = node->a; if (clause->type == STM_DEFAULT) { if (def) jsC_error(J, clause, "more than one default label in switch"); def = clause; } else { cexp(J, F, clause->a); emitline(J, F, clause); clause->casejump = emitjump(J, F, OP_JCASE); } } emit(J, F, OP_POP); if (def) { emitline(J, F, def); def->casejump = emitjump(J, F, OP_JUMP); end = 0; } else { end = emitjump(J, F, OP_JUMP); } /* emit the case clause bodies */ for (node = head; node; node = node->b) { clause = node->a; label(J, F, clause->casejump); if (clause->type == STM_DEFAULT) cstmlist(J, F, clause->a); else cstmlist(J, F, clause->b); } if (end) label(J, F, end); } /* Statements */ static void cvarinit(JF, js_Ast *list) { while (list) { js_Ast *var = list->a; if (var->b) { cexp(J, F, var->b); emitline(J, F, var); emitlocal(J, F, OP_SETLOCAL, OP_SETVAR, var->a); emit(J, F, OP_POP); } list = list->b; } } static void cstm(JF, js_Ast *stm) { js_Ast *target; int loop, cont, then, end; emitline(J, F, stm); switch (stm->type) { case AST_FUNDEC: break; case STM_BLOCK: cstmlist(J, F, stm->a); break; case STM_EMPTY: if (F->script) { emitline(J, F, stm); emit(J, F, OP_POP); emit(J, F, OP_UNDEF); } break; case STM_VAR: cvarinit(J, F, stm->a); break; case STM_IF: if (stm->c) { cexp(J, F, stm->a); emitline(J, F, stm); then = emitjump(J, F, OP_JTRUE); cstm(J, F, stm->c); emitline(J, F, stm); end = emitjump(J, F, OP_JUMP); label(J, F, then); cstm(J, F, stm->b); label(J, F, end); } else { cexp(J, F, stm->a); emitline(J, F, stm); end = emitjump(J, F, OP_JFALSE); cstm(J, F, stm->b); label(J, F, end); } break; case STM_DO: loop = here(J, F); cstm(J, F, stm->a); cont = here(J, F); cexp(J, F, stm->b); emitline(J, F, stm); emitjumpto(J, F, OP_JTRUE, loop); labeljumps(J, F, stm, here(J,F), cont); break; case STM_WHILE: loop = here(J, F); cexp(J, F, stm->a); emitline(J, F, stm); end = emitjump(J, F, OP_JFALSE); cstm(J, F, stm->b); emitline(J, F, stm); emitjumpto(J, F, OP_JUMP, loop); label(J, F, end); labeljumps(J, F, stm, here(J,F), loop); break; case STM_FOR: case STM_FOR_VAR: if (stm->type == STM_FOR_VAR) { cvarinit(J, F, stm->a); } else { if (stm->a) { cexp(J, F, stm->a); emit(J, F, OP_POP); } } loop = here(J, F); if (stm->b) { cexp(J, F, stm->b); emitline(J, F, stm); end = emitjump(J, F, OP_JFALSE); } else { end = 0; } cstm(J, F, stm->d); cont = here(J, F); if (stm->c) { cexp(J, F, stm->c); emit(J, F, OP_POP); } emitline(J, F, stm); emitjumpto(J, F, OP_JUMP, loop); if (end) label(J, F, end); labeljumps(J, F, stm, here(J,F), cont); break; case STM_FOR_IN: case STM_FOR_IN_VAR: cexp(J, F, stm->b); emitline(J, F, stm); emit(J, F, OP_ITERATOR); loop = here(J, F); { emitline(J, F, stm); emit(J, F, OP_NEXTITER); end = emitjump(J, F, OP_JFALSE); cassignforin(J, F, stm); if (F->script) { emit(J, F, OP_ROT2); cstm(J, F, stm->c); emit(J, F, OP_ROT2); } else { cstm(J, F, stm->c); } emitline(J, F, stm); emitjumpto(J, F, OP_JUMP, loop); } label(J, F, end); labeljumps(J, F, stm, here(J,F), loop); break; case STM_SWITCH: cswitch(J, F, stm->a, stm->b); labeljumps(J, F, stm, here(J,F), 0); break; case STM_LABEL: cstm(J, F, stm->b); /* skip consecutive labels */ while (stm->type == STM_LABEL) stm = stm->b; /* loops and switches have already been labelled */ if (!isloop(stm->type) && stm->type != STM_SWITCH) labeljumps(J, F, stm, here(J,F), 0); break; case STM_BREAK: if (stm->a) { checkfutureword(J, F, stm->a); target = breaktarget(J, F, stm->parent, stm->a->string); if (!target) jsC_error(J, stm, "break label '%s' not found", stm->a->string); } else { target = breaktarget(J, F, stm->parent, NULL); if (!target) jsC_error(J, stm, "unlabelled break must be inside loop or switch"); } cexit(J, F, STM_BREAK, stm, target); emitline(J, F, stm); addjump(J, F, STM_BREAK, target, emitjump(J, F, OP_JUMP)); break; case STM_CONTINUE: if (stm->a) { checkfutureword(J, F, stm->a); target = continuetarget(J, F, stm->parent, stm->a->string); if (!target) jsC_error(J, stm, "continue label '%s' not found", stm->a->string); } else { target = continuetarget(J, F, stm->parent, NULL); if (!target) jsC_error(J, stm, "continue must be inside loop"); } cexit(J, F, STM_CONTINUE, stm, target); emitline(J, F, stm); addjump(J, F, STM_CONTINUE, target, emitjump(J, F, OP_JUMP)); break; case STM_RETURN: if (stm->a) cexp(J, F, stm->a); else emit(J, F, OP_UNDEF); target = returntarget(J, F, stm->parent); if (!target) jsC_error(J, stm, "return not in function"); cexit(J, F, STM_RETURN, stm, target); emitline(J, F, stm); emit(J, F, OP_RETURN); break; case STM_THROW: cexp(J, F, stm->a); emitline(J, F, stm); emit(J, F, OP_THROW); break; case STM_WITH: F->lightweight = 0; if (F->strict) jsC_error(J, stm->a, "'with' statements are not allowed in strict mode"); cexp(J, F, stm->a); emitline(J, F, stm); emit(J, F, OP_WITH); cstm(J, F, stm->b); emitline(J, F, stm); emit(J, F, OP_ENDWITH); break; case STM_TRY: emitline(J, F, stm); if (stm->b && stm->c) { F->lightweight = 0; if (stm->d) ctrycatchfinally(J, F, stm->a, stm->b, stm->c, stm->d); else ctrycatch(J, F, stm->a, stm->b, stm->c); } else { ctryfinally(J, F, stm->a, stm->d); } break; case STM_DEBUGGER: emitline(J, F, stm); emit(J, F, OP_DEBUGGER); break; default: if (F->script) { emitline(J, F, stm); emit(J, F, OP_POP); cexp(J, F, stm); } else { cexp(J, F, stm); emitline(J, F, stm); emit(J, F, OP_POP); } break; } } static void cstmlist(JF, js_Ast *list) { while (list) { cstm(J, F, list->a); list = list->b; } } /* Declarations and programs */ static int listlength(js_Ast *list) { int n = 0; while (list) ++n, list = list->b; return n; } static void cparams(JF, js_Ast *list, js_Ast *fname) { F->numparams = listlength(list); while (list) { checkfutureword(J, F, list->a); addlocal(J, F, list->a, 0); list = list->b; } } static void cvardecs(JF, js_Ast *node) { if (node->type == AST_LIST) { while (node) { cvardecs(J, F, node->a); node = node->b; } return; } if (isfun(node->type)) return; /* stop at inner functions */ if (node->type == EXP_VAR) { checkfutureword(J, F, node->a); addlocal(J, F, node->a, 1); } if (node->a) cvardecs(J, F, node->a); if (node->b) cvardecs(J, F, node->b); if (node->c) cvardecs(J, F, node->c); if (node->d) cvardecs(J, F, node->d); } static void cfundecs(JF, js_Ast *list) { while (list) { js_Ast *stm = list->a; if (stm->type == AST_FUNDEC) { emitline(J, F, stm); emitfunction(J, F, newfun(J, stm->line, stm->a, stm->b, stm->c, 0, F->strict)); emitline(J, F, stm); emit(J, F, OP_SETLOCAL); emitarg(J, F, addlocal(J, F, stm->a, 0)); emit(J, F, OP_POP); } list = list->b; } } static void cfunbody(JF, js_Ast *name, js_Ast *params, js_Ast *body) { F->lightweight = 1; F->arguments = 0; if (F->script) F->lightweight = 0; /* Check if first statement is 'use strict': */ if (body && body->type == AST_LIST && body->a && body->a->type == EXP_STRING) if (!strcmp(body->a->string, "use strict")) F->strict = 1; F->lastline = F->line; cparams(J, F, params, name); if (body) { cvardecs(J, F, body); cfundecs(J, F, body); } if (name) { checkfutureword(J, F, name); if (findlocal(J, F, name->string) < 0) { emit(J, F, OP_CURRENT); emit(J, F, OP_SETLOCAL); emitarg(J, F, addlocal(J, F, name, 0)); emit(J, F, OP_POP); } } if (F->script) { emit(J, F, OP_UNDEF); cstmlist(J, F, body); emit(J, F, OP_RETURN); } else { cstmlist(J, F, body); emit(J, F, OP_UNDEF); emit(J, F, OP_RETURN); } } js_Function *jsC_compilefunction(js_State *J, js_Ast *prog) { return newfun(J, prog->line, prog->a, prog->b, prog->c, 0, J->default_strict); } js_Function *jsC_compilescript(js_State *J, js_Ast *prog, int default_strict) { return newfun(J, prog ? prog->line : 0, NULL, NULL, prog, 1, default_strict); }
null
296
CWE-787
CVE-2021-45926
Version 0.9.3, Beta 4 ============= libmdb: * Fix build failure with emscripten #299 Version 0.9.3, Beta 3 ============= libmdb / libmdbsql: * Fix build when _XOPEN_SOURCE was already defined on the platform #298 Version 0.9.3, Beta 2 ============= libmdb: * Migrate to g_memdup2 #287 #288 libmdbsql: * Allow double quoted (") database names #291 * Allow spaces in database names #292 #293 Docs: * Add JET version for access 2013/2016/2019 to docs #286 Version 0.9.3, Beta 1 ============= libmdb: * Support files created with Access 2019 #260 #277 * Fix a warning when reading in binary property values #262 * Fix signed-unsigned comparison warning #269 libmdbsql: * Support negative floating point literals #274 #279 * Comparison operators behaved incorrectly when the constant was on the left #283 #285 * Improved support for file paths in `CONNECT TO` statements #275 #280 #282 ODBC: * unixODBC now uses the `--libdir` passed at configure-time #261 * Fix a segfault in PyODBC when `SQLGetTypeInfo` is called on an unsupported data type #278 Version 0.9.2 ============= MDB Tools 0.9.2 is a security and bug-fix release. Due to a number of memory errors uncovered by OSS-Fuzz, all users who use MDB Tools to read data from untrusted sources are encouraged to upgrade to 0.9.2 as soon as possible. The release also includes some minor improvements and behavior changes, described below. libmdb: * Fix infinite loop with malformed input (oss-fuzz/28789) * Fix buffer overrun and some out of bounds memory accesses (oss-fuzz/28832 + oss-fuzz/28807) * Fix potential memory leak (oss-fuzz/28791) * Improved bounds and return value checking (oss-fuzz/29328 + oss-fuzz/29329) * Add support for numeric scale/precision on JET3 databases and floating-point column types * `mdb_col_to_string` now prints a warning and returns `""` for any unsupported data type * Improved warning with invalid row data #253 Command-line tools: * All CLI tools which accept string arguments are now locale-aware (#237) * All CLI tools now accept a `--version` argument (#232) * `mdb-export`: Fix issue where exported SQL field sizes were sometimes twice the necessary size (#112) * `mdb-export`: Improved handling of BLOBs (#250) * `mdb-export`: Implement a serial type and relations for MySQL (#257) * `mdb-queries` now has long option names `--list`, `--newline`, and `--delimiter` * `mdb-schema`: Exit with an error code if the requested table is not found ODBC: * `SQLBindCol` now respects its TargetType (fCType) argument and converts bound data to the requested type (#23 #242) * `SQLFetchCol` now returns `SQL_SUCCESS_WITH_INFO` if a bound column was truncated, and `SQL_ERROR` if a bound value is out of range of the target type. * Fix handling of numeric types in ODBC driver (#255) Build and documentation: * Updated man pages with new SQL features and correct `MDBOPTS` information (#203) * Add generated API documentation (#239) * Move `HACKING` to `HACKING.md` (with a relocation notice) and `TODO` to `TODO.md` * Fuzz-test all pull requests (#230) * Add `-DHAVE_GLIB` to library's `pkg-config --cflags` if needed (#254) Version 0.9.1 ============= * MDB Tools has migrated from Travis CI to GitHub Actions for automated builds * Fix a build error with msys2 (Windows) when GLib was disabled * Remove dependency on math.h #228 * mdb-export now uses scientific notation only for very small numbers #84 * mdb-schema no longer emits illegal ALTER TABLE statements for SQLite relationships #82 * mdb-schema now omits the namespace for PostgreSQL indexes and constraints #93 * Automatically detect character encoding of JET3 databases #224 * JET3: Transcode Latin-1 text to UTF-8 when iconv is not present * JET4: Transcode Unicode text to UTF-8 when iconv is not present (using `wcstombs`) #223 * Fix a buffer overrun with binary string fields OSS-Fuzz/28779 #216 * Fix a stack overflow with malformed input OSS-Fuzz/28780 #217 * Improved validation of date input OSS-Fuzz/28790 #219 * Fix a potential buffer overrun when compiled without iconv OSS-Fuzz/28773 #220 * Fix an extra newline that appeared at the `mdb-sql` prompt when Readline was not present * Fix potential stack corruption with malformed input * mdb-export has a new --escape-invisible flag for C-style escaping of tabs and newlines #222 * Print a warning if MDBOPTS=use_index is set at run-time but libmswstr was absent at compile-time #215 #226 * Improved support for databases that were improperly closed #120 * Remove warnings about mdb_find_row and row_size that did not necessarily indicate a problem Version 0.9.0 ============= MDB Tools is under new management! Update your bookmarks and tell your favorite search engine that this is our new home on the WWW: https://github.com/mdbtools/mdbtools MDB Tools 0.9.0 builds off the fork of cyberemissary on GitHub, collects many years of unapplied patches, and includes a number of internal improvements. Besides a host of security fixes, GLib is now completely optional, files can be opened completely in-memory, and many global variables have been eliminated to facilitate thread safety. The core library, libmdb, is believed to be thread-safe in the sense that several handles can be opened simultaneously and passed between threads. However, individual handles are NOT thread-safe, so don't try to do work on a single handle from multiple threads without implementing your own locking mechanism. The auxiliary libraries, libmdbsql and ODBC, still have some non-reentrant function calls. Thread safety is not promised, though the situation is significantly improved from the 0.7 days. The ABI has some slight changes (mostly having to do with struct layouts) so you'll need to bump your SO numbers and such. The main changes to the API are the introduction of a new function for in-memory parsing: MdbHandle *mdb_open_buffer(void *buffer, size_t len, MdbFileFlags flags); And the modification of the "mdb_set_date_fmt" function to include a handle parameter, so that date formats can be set in a non-global fashion: void mdb_set_date_fmt(MdbHandle *mdb, const char *); The above function is the most common reason that old projects will fail to compile on 0.9, and the fix is straightforward. The previously-deprecated functions mdb_init() and mdb_exit() have been removed. These functions did nothing; any calls to them should be excised with prejudice. This release introduces two new CLI tools (mdb-count and mdb-queries) and deprecates mdb-array, mdb-header, and mdb-parsecsv. Several man pages have been added or otherwise brought up to date. The GNOME-based GUI tool that formerly shipped with MDB Tools has been moved to a separate project, here: https://github.com/mdbtools/gmdb2 It seems to compile but should probably be classified as abandonware. The project uses a deprecated GUI toolkit, and as such gmdb2 is no longer included in Linux distros that used to carry it. Version 0.7.1 ============= autogen.sh is no more. Use "autoreconf -if" to bootstrap the configuration. autoconf/automake has been updated to more recent standards. - config.guess config.sub depcomp install-sh ltmain.sh missing ylwrap files are now in build-aux/ directory You'll now need autoconf >= 2.58 An experimental (buggy) version of ODBC driver that supports UCS-2 interface is now available: libmdbodbcW.so Note that libmdbodbc supports UTF-8 really well, so that this is usually not needed. Version 0.3 =========== The organization of the project files has changed a bit from that last release. The autoconf stuff has been moved to the root directory. The include directory has also been moved from src/include to include
null
Version 0.9.3 ============= libmdb: * Support files created with Access 2019 #260 #277 * Fix a warning when reading in binary property values #262 * Fix signed-unsigned comparison warning #269 * Migrate to `g_memdup2` #287 #288 * Fix build when `_XOPEN_SOURCE` was already defined on the platform #298 * Fix build failure with emscripten #299 libmdbsql: * Support negative floating point literals #274 #279 * Improved support for file paths in `CONNECT TO` statements #275 #280 #282 * Comparison operators behaved incorrectly when the constant was on the left #283 #285 * Allow double quoted (") database names #291 * Allow spaces in database names #292 #293 ODBC: * unixODBC now uses the `--libdir` passed at configure-time #261 * Fix a segfault in PyODBC when `SQLGetTypeInfo` is called on an unsupported data type #278 Docs: * Add JET version for access 2013/2016/2019 to docs #286 Version 0.9.2 ============= MDB Tools 0.9.2 is a security and bug-fix release. Due to a number of memory errors uncovered by OSS-Fuzz, all users who use MDB Tools to read data from untrusted sources are encouraged to upgrade to 0.9.2 as soon as possible. The release also includes some minor improvements and behavior changes, described below. libmdb: * Fix infinite loop with malformed input (oss-fuzz/28789) * Fix buffer overrun and some out of bounds memory accesses (oss-fuzz/28832 + oss-fuzz/28807) * Fix potential memory leak (oss-fuzz/28791) * Improved bounds and return value checking (oss-fuzz/29328 + oss-fuzz/29329) * Add support for numeric scale/precision on JET3 databases and floating-point column types * `mdb_col_to_string` now prints a warning and returns `""` for any unsupported data type * Improved warning with invalid row data #253 Command-line tools: * All CLI tools which accept string arguments are now locale-aware (#237) * All CLI tools now accept a `--version` argument (#232) * `mdb-export`: Fix issue where exported SQL field sizes were sometimes twice the necessary size (#112) * `mdb-export`: Improved handling of BLOBs (#250) * `mdb-export`: Implement a serial type and relations for MySQL (#257) * `mdb-queries` now has long option names `--list`, `--newline`, and `--delimiter` * `mdb-schema`: Exit with an error code if the requested table is not found ODBC: * `SQLBindCol` now respects its TargetType (fCType) argument and converts bound data to the requested type (#23 #242) * `SQLFetchCol` now returns `SQL_SUCCESS_WITH_INFO` if a bound column was truncated, and `SQL_ERROR` if a bound value is out of range of the target type. * Fix handling of numeric types in ODBC driver (#255) Build and documentation: * Updated man pages with new SQL features and correct `MDBOPTS` information (#203) * Add generated API documentation (#239) * Move `HACKING` to `HACKING.md` (with a relocation notice) and `TODO` to `TODO.md` * Fuzz-test all pull requests (#230) * Add `-DHAVE_GLIB` to library's `pkg-config --cflags` if needed (#254) Version 0.9.1 ============= * MDB Tools has migrated from Travis CI to GitHub Actions for automated builds * Fix a build error with msys2 (Windows) when GLib was disabled * Remove dependency on math.h #228 * mdb-export now uses scientific notation only for very small numbers #84 * mdb-schema no longer emits illegal ALTER TABLE statements for SQLite relationships #82 * mdb-schema now omits the namespace for PostgreSQL indexes and constraints #93 * Automatically detect character encoding of JET3 databases #224 * JET3: Transcode Latin-1 text to UTF-8 when iconv is not present * JET4: Transcode Unicode text to UTF-8 when iconv is not present (using `wcstombs`) #223 * Fix a buffer overrun with binary string fields OSS-Fuzz/28779 #216 * Fix a stack overflow with malformed input OSS-Fuzz/28780 #217 * Improved validation of date input OSS-Fuzz/28790 #219 * Fix a potential buffer overrun when compiled without iconv OSS-Fuzz/28773 #220 * Fix an extra newline that appeared at the `mdb-sql` prompt when Readline was not present * Fix potential stack corruption with malformed input * mdb-export has a new --escape-invisible flag for C-style escaping of tabs and newlines #222 * Print a warning if MDBOPTS=use_index is set at run-time but libmswstr was absent at compile-time #215 #226 * Improved support for databases that were improperly closed #120 * Remove warnings about mdb_find_row and row_size that did not necessarily indicate a problem Version 0.9.0 ============= MDB Tools is under new management! Update your bookmarks and tell your favorite search engine that this is our new home on the WWW: https://github.com/mdbtools/mdbtools MDB Tools 0.9.0 builds off the fork of cyberemissary on GitHub, collects many years of unapplied patches, and includes a number of internal improvements. Besides a host of security fixes, GLib is now completely optional, files can be opened completely in-memory, and many global variables have been eliminated to facilitate thread safety. The core library, libmdb, is believed to be thread-safe in the sense that several handles can be opened simultaneously and passed between threads. However, individual handles are NOT thread-safe, so don't try to do work on a single handle from multiple threads without implementing your own locking mechanism. The auxiliary libraries, libmdbsql and ODBC, still have some non-reentrant function calls. Thread safety is not promised, though the situation is significantly improved from the 0.7 days. The ABI has some slight changes (mostly having to do with struct layouts) so you'll need to bump your SO numbers and such. The main changes to the API are the introduction of a new function for in-memory parsing: MdbHandle *mdb_open_buffer(void *buffer, size_t len, MdbFileFlags flags); And the modification of the "mdb_set_date_fmt" function to include a handle parameter, so that date formats can be set in a non-global fashion: void mdb_set_date_fmt(MdbHandle *mdb, const char *); The above function is the most common reason that old projects will fail to compile on 0.9, and the fix is straightforward. The previously-deprecated functions mdb_init() and mdb_exit() have been removed. These functions did nothing; any calls to them should be excised with prejudice. This release introduces two new CLI tools (mdb-count and mdb-queries) and deprecates mdb-array, mdb-header, and mdb-parsecsv. Several man pages have been added or otherwise brought up to date. The GNOME-based GUI tool that formerly shipped with MDB Tools has been moved to a separate project, here: https://github.com/mdbtools/gmdb2 It seems to compile but should probably be classified as abandonware. The project uses a deprecated GUI toolkit, and as such gmdb2 is no longer included in Linux distros that used to carry it. Version 0.7.1 ============= autogen.sh is no more. Use "autoreconf -if" to bootstrap the configuration. autoconf/automake has been updated to more recent standards. - config.guess config.sub depcomp install-sh ltmain.sh missing ylwrap files are now in build-aux/ directory You'll now need autoconf >= 2.58 An experimental (buggy) version of ODBC driver that supports UCS-2 interface is now available: libmdbodbcW.so Note that libmdbodbc supports UTF-8 really well, so that this is usually not needed. Version 0.3 =========== The organization of the project files has changed a bit from that last release. The autoconf stuff has been moved to the root directory. The include directory has also been moved from src/include to include
null
297
CWE-787
CVE-2021-45927
Version 0.9.3, Beta 4 ============= libmdb: * Fix build failure with emscripten #299 Version 0.9.3, Beta 3 ============= libmdb / libmdbsql: * Fix build when _XOPEN_SOURCE was already defined on the platform #298 Version 0.9.3, Beta 2 ============= libmdb: * Migrate to g_memdup2 #287 #288 libmdbsql: * Allow double quoted (") database names #291 * Allow spaces in database names #292 #293 Docs: * Add JET version for access 2013/2016/2019 to docs #286 Version 0.9.3, Beta 1 ============= libmdb: * Support files created with Access 2019 #260 #277 * Fix a warning when reading in binary property values #262 * Fix signed-unsigned comparison warning #269 libmdbsql: * Support negative floating point literals #274 #279 * Comparison operators behaved incorrectly when the constant was on the left #283 #285 * Improved support for file paths in `CONNECT TO` statements #275 #280 #282 ODBC: * unixODBC now uses the `--libdir` passed at configure-time #261 * Fix a segfault in PyODBC when `SQLGetTypeInfo` is called on an unsupported data type #278 Version 0.9.2 ============= MDB Tools 0.9.2 is a security and bug-fix release. Due to a number of memory errors uncovered by OSS-Fuzz, all users who use MDB Tools to read data from untrusted sources are encouraged to upgrade to 0.9.2 as soon as possible. The release also includes some minor improvements and behavior changes, described below. libmdb: * Fix infinite loop with malformed input (oss-fuzz/28789) * Fix buffer overrun and some out of bounds memory accesses (oss-fuzz/28832 + oss-fuzz/28807) * Fix potential memory leak (oss-fuzz/28791) * Improved bounds and return value checking (oss-fuzz/29328 + oss-fuzz/29329) * Add support for numeric scale/precision on JET3 databases and floating-point column types * `mdb_col_to_string` now prints a warning and returns `""` for any unsupported data type * Improved warning with invalid row data #253 Command-line tools: * All CLI tools which accept string arguments are now locale-aware (#237) * All CLI tools now accept a `--version` argument (#232) * `mdb-export`: Fix issue where exported SQL field sizes were sometimes twice the necessary size (#112) * `mdb-export`: Improved handling of BLOBs (#250) * `mdb-export`: Implement a serial type and relations for MySQL (#257) * `mdb-queries` now has long option names `--list`, `--newline`, and `--delimiter` * `mdb-schema`: Exit with an error code if the requested table is not found ODBC: * `SQLBindCol` now respects its TargetType (fCType) argument and converts bound data to the requested type (#23 #242) * `SQLFetchCol` now returns `SQL_SUCCESS_WITH_INFO` if a bound column was truncated, and `SQL_ERROR` if a bound value is out of range of the target type. * Fix handling of numeric types in ODBC driver (#255) Build and documentation: * Updated man pages with new SQL features and correct `MDBOPTS` information (#203) * Add generated API documentation (#239) * Move `HACKING` to `HACKING.md` (with a relocation notice) and `TODO` to `TODO.md` * Fuzz-test all pull requests (#230) * Add `-DHAVE_GLIB` to library's `pkg-config --cflags` if needed (#254) Version 0.9.1 ============= * MDB Tools has migrated from Travis CI to GitHub Actions for automated builds * Fix a build error with msys2 (Windows) when GLib was disabled * Remove dependency on math.h #228 * mdb-export now uses scientific notation only for very small numbers #84 * mdb-schema no longer emits illegal ALTER TABLE statements for SQLite relationships #82 * mdb-schema now omits the namespace for PostgreSQL indexes and constraints #93 * Automatically detect character encoding of JET3 databases #224 * JET3: Transcode Latin-1 text to UTF-8 when iconv is not present * JET4: Transcode Unicode text to UTF-8 when iconv is not present (using `wcstombs`) #223 * Fix a buffer overrun with binary string fields OSS-Fuzz/28779 #216 * Fix a stack overflow with malformed input OSS-Fuzz/28780 #217 * Improved validation of date input OSS-Fuzz/28790 #219 * Fix a potential buffer overrun when compiled without iconv OSS-Fuzz/28773 #220 * Fix an extra newline that appeared at the `mdb-sql` prompt when Readline was not present * Fix potential stack corruption with malformed input * mdb-export has a new --escape-invisible flag for C-style escaping of tabs and newlines #222 * Print a warning if MDBOPTS=use_index is set at run-time but libmswstr was absent at compile-time #215 #226 * Improved support for databases that were improperly closed #120 * Remove warnings about mdb_find_row and row_size that did not necessarily indicate a problem Version 0.9.0 ============= MDB Tools is under new management! Update your bookmarks and tell your favorite search engine that this is our new home on the WWW: https://github.com/mdbtools/mdbtools MDB Tools 0.9.0 builds off the fork of cyberemissary on GitHub, collects many years of unapplied patches, and includes a number of internal improvements. Besides a host of security fixes, GLib is now completely optional, files can be opened completely in-memory, and many global variables have been eliminated to facilitate thread safety. The core library, libmdb, is believed to be thread-safe in the sense that several handles can be opened simultaneously and passed between threads. However, individual handles are NOT thread-safe, so don't try to do work on a single handle from multiple threads without implementing your own locking mechanism. The auxiliary libraries, libmdbsql and ODBC, still have some non-reentrant function calls. Thread safety is not promised, though the situation is significantly improved from the 0.7 days. The ABI has some slight changes (mostly having to do with struct layouts) so you'll need to bump your SO numbers and such. The main changes to the API are the introduction of a new function for in-memory parsing: MdbHandle *mdb_open_buffer(void *buffer, size_t len, MdbFileFlags flags); And the modification of the "mdb_set_date_fmt" function to include a handle parameter, so that date formats can be set in a non-global fashion: void mdb_set_date_fmt(MdbHandle *mdb, const char *); The above function is the most common reason that old projects will fail to compile on 0.9, and the fix is straightforward. The previously-deprecated functions mdb_init() and mdb_exit() have been removed. These functions did nothing; any calls to them should be excised with prejudice. This release introduces two new CLI tools (mdb-count and mdb-queries) and deprecates mdb-array, mdb-header, and mdb-parsecsv. Several man pages have been added or otherwise brought up to date. The GNOME-based GUI tool that formerly shipped with MDB Tools has been moved to a separate project, here: https://github.com/mdbtools/gmdb2 It seems to compile but should probably be classified as abandonware. The project uses a deprecated GUI toolkit, and as such gmdb2 is no longer included in Linux distros that used to carry it. Version 0.7.1 ============= autogen.sh is no more. Use "autoreconf -if" to bootstrap the configuration. autoconf/automake has been updated to more recent standards. - config.guess config.sub depcomp install-sh ltmain.sh missing ylwrap files are now in build-aux/ directory You'll now need autoconf >= 2.58 An experimental (buggy) version of ODBC driver that supports UCS-2 interface is now available: libmdbodbcW.so Note that libmdbodbc supports UTF-8 really well, so that this is usually not needed. Version 0.3 =========== The organization of the project files has changed a bit from that last release. The autoconf stuff has been moved to the root directory. The include directory has also been moved from src/include to include
null
Version 0.9.3 ============= libmdb: * Support files created with Access 2019 #260 #277 * Fix a warning when reading in binary property values #262 * Fix signed-unsigned comparison warning #269 * Migrate to `g_memdup2` #287 #288 * Fix build when `_XOPEN_SOURCE` was already defined on the platform #298 * Fix build failure with emscripten #299 libmdbsql: * Support negative floating point literals #274 #279 * Improved support for file paths in `CONNECT TO` statements #275 #280 #282 * Comparison operators behaved incorrectly when the constant was on the left #283 #285 * Allow double quoted (") database names #291 * Allow spaces in database names #292 #293 ODBC: * unixODBC now uses the `--libdir` passed at configure-time #261 * Fix a segfault in PyODBC when `SQLGetTypeInfo` is called on an unsupported data type #278 Docs: * Add JET version for access 2013/2016/2019 to docs #286 Version 0.9.2 ============= MDB Tools 0.9.2 is a security and bug-fix release. Due to a number of memory errors uncovered by OSS-Fuzz, all users who use MDB Tools to read data from untrusted sources are encouraged to upgrade to 0.9.2 as soon as possible. The release also includes some minor improvements and behavior changes, described below. libmdb: * Fix infinite loop with malformed input (oss-fuzz/28789) * Fix buffer overrun and some out of bounds memory accesses (oss-fuzz/28832 + oss-fuzz/28807) * Fix potential memory leak (oss-fuzz/28791) * Improved bounds and return value checking (oss-fuzz/29328 + oss-fuzz/29329) * Add support for numeric scale/precision on JET3 databases and floating-point column types * `mdb_col_to_string` now prints a warning and returns `""` for any unsupported data type * Improved warning with invalid row data #253 Command-line tools: * All CLI tools which accept string arguments are now locale-aware (#237) * All CLI tools now accept a `--version` argument (#232) * `mdb-export`: Fix issue where exported SQL field sizes were sometimes twice the necessary size (#112) * `mdb-export`: Improved handling of BLOBs (#250) * `mdb-export`: Implement a serial type and relations for MySQL (#257) * `mdb-queries` now has long option names `--list`, `--newline`, and `--delimiter` * `mdb-schema`: Exit with an error code if the requested table is not found ODBC: * `SQLBindCol` now respects its TargetType (fCType) argument and converts bound data to the requested type (#23 #242) * `SQLFetchCol` now returns `SQL_SUCCESS_WITH_INFO` if a bound column was truncated, and `SQL_ERROR` if a bound value is out of range of the target type. * Fix handling of numeric types in ODBC driver (#255) Build and documentation: * Updated man pages with new SQL features and correct `MDBOPTS` information (#203) * Add generated API documentation (#239) * Move `HACKING` to `HACKING.md` (with a relocation notice) and `TODO` to `TODO.md` * Fuzz-test all pull requests (#230) * Add `-DHAVE_GLIB` to library's `pkg-config --cflags` if needed (#254) Version 0.9.1 ============= * MDB Tools has migrated from Travis CI to GitHub Actions for automated builds * Fix a build error with msys2 (Windows) when GLib was disabled * Remove dependency on math.h #228 * mdb-export now uses scientific notation only for very small numbers #84 * mdb-schema no longer emits illegal ALTER TABLE statements for SQLite relationships #82 * mdb-schema now omits the namespace for PostgreSQL indexes and constraints #93 * Automatically detect character encoding of JET3 databases #224 * JET3: Transcode Latin-1 text to UTF-8 when iconv is not present * JET4: Transcode Unicode text to UTF-8 when iconv is not present (using `wcstombs`) #223 * Fix a buffer overrun with binary string fields OSS-Fuzz/28779 #216 * Fix a stack overflow with malformed input OSS-Fuzz/28780 #217 * Improved validation of date input OSS-Fuzz/28790 #219 * Fix a potential buffer overrun when compiled without iconv OSS-Fuzz/28773 #220 * Fix an extra newline that appeared at the `mdb-sql` prompt when Readline was not present * Fix potential stack corruption with malformed input * mdb-export has a new --escape-invisible flag for C-style escaping of tabs and newlines #222 * Print a warning if MDBOPTS=use_index is set at run-time but libmswstr was absent at compile-time #215 #226 * Improved support for databases that were improperly closed #120 * Remove warnings about mdb_find_row and row_size that did not necessarily indicate a problem Version 0.9.0 ============= MDB Tools is under new management! Update your bookmarks and tell your favorite search engine that this is our new home on the WWW: https://github.com/mdbtools/mdbtools MDB Tools 0.9.0 builds off the fork of cyberemissary on GitHub, collects many years of unapplied patches, and includes a number of internal improvements. Besides a host of security fixes, GLib is now completely optional, files can be opened completely in-memory, and many global variables have been eliminated to facilitate thread safety. The core library, libmdb, is believed to be thread-safe in the sense that several handles can be opened simultaneously and passed between threads. However, individual handles are NOT thread-safe, so don't try to do work on a single handle from multiple threads without implementing your own locking mechanism. The auxiliary libraries, libmdbsql and ODBC, still have some non-reentrant function calls. Thread safety is not promised, though the situation is significantly improved from the 0.7 days. The ABI has some slight changes (mostly having to do with struct layouts) so you'll need to bump your SO numbers and such. The main changes to the API are the introduction of a new function for in-memory parsing: MdbHandle *mdb_open_buffer(void *buffer, size_t len, MdbFileFlags flags); And the modification of the "mdb_set_date_fmt" function to include a handle parameter, so that date formats can be set in a non-global fashion: void mdb_set_date_fmt(MdbHandle *mdb, const char *); The above function is the most common reason that old projects will fail to compile on 0.9, and the fix is straightforward. The previously-deprecated functions mdb_init() and mdb_exit() have been removed. These functions did nothing; any calls to them should be excised with prejudice. This release introduces two new CLI tools (mdb-count and mdb-queries) and deprecates mdb-array, mdb-header, and mdb-parsecsv. Several man pages have been added or otherwise brought up to date. The GNOME-based GUI tool that formerly shipped with MDB Tools has been moved to a separate project, here: https://github.com/mdbtools/gmdb2 It seems to compile but should probably be classified as abandonware. The project uses a deprecated GUI toolkit, and as such gmdb2 is no longer included in Linux distros that used to carry it. Version 0.7.1 ============= autogen.sh is no more. Use "autoreconf -if" to bootstrap the configuration. autoconf/automake has been updated to more recent standards. - config.guess config.sub depcomp install-sh ltmain.sh missing ylwrap files are now in build-aux/ directory You'll now need autoconf >= 2.58 An experimental (buggy) version of ODBC driver that supports UCS-2 interface is now available: libmdbodbcW.so Note that libmdbodbc supports UTF-8 really well, so that this is usually not needed. Version 0.3 =========== The organization of the project files has changed a bit from that last release. The autoconf stuff has been moved to the root directory. The include directory has also been moved from src/include to include
null
298
CWE-787
CVE-2021-45930
/**************************************************************************** ** ** Copyright (C) 2021 The Qt Company Ltd. ** Contact: https://www.qt.io/licensing/ ** ** This file is part of the Qt SVG module of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL$ ** Commercial License Usage ** Licensees holding valid commercial Qt licenses may use this file in ** accordance with the commercial license agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and The Qt Company. For licensing terms ** and conditions see https://www.qt.io/terms-conditions. For further ** information use the contact form at https://www.qt.io/contact-us. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 3 as published by the Free Software ** Foundation and appearing in the file LICENSE.LGPL3 included in the ** packaging of this file. Please review the following information to ** ensure the GNU Lesser General Public License version 3 requirements ** will be met: https://www.gnu.org/licenses/lgpl-3.0.html. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License version 2.0 or (at your option) the GNU General ** Public license version 3 or any later version approved by the KDE Free ** Qt Foundation. The licenses are as published by the Free Software ** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3 ** included in the packaging of this file. Please review the following ** information to ensure the GNU General Public License requirements will ** be met: https://www.gnu.org/licenses/gpl-2.0.html and ** https://www.gnu.org/licenses/gpl-3.0.html. ** ** $QT_END_LICENSE$ ** ****************************************************************************/ #include "qplatformdefs.h" #include "qsvghandler_p.h" #include "qsvgtinydocument_p.h" #include "qsvgstructure_p.h" #include "qsvggraphics_p.h" #include "qsvgnode_p.h" #include "qsvgfont_p.h" #include "qpen.h" #include "qpainterpath.h" #include "qbrush.h" #include "qcolor.h" #include "qtextformat.h" #include "qlist.h" #include "qfileinfo.h" #include "qfile.h" #include "qdir.h" #include "qdebug.h" #include "qmath.h" #include "qnumeric.h" #include <qregularexpression.h> #include "qtransform.h" #include "qvarlengtharray.h" #include "private/qmath_p.h" #include "float.h" #include <cmath> QT_BEGIN_NAMESPACE Q_LOGGING_CATEGORY(lcSvgHandler, "qt.svg") static const char *qt_inherit_text = "inherit"; #define QT_INHERIT QLatin1String(qt_inherit_text) static QByteArray prefixMessage(const QByteArray &msg, const QXmlStreamReader *r) { QByteArray result; if (r) { if (const QFile *file = qobject_cast<const QFile *>(r->device())) result.append(QFile::encodeName(QDir::toNativeSeparators(file->fileName()))); else result.append(QByteArrayLiteral("<input>")); result.append(':'); result.append(QByteArray::number(r->lineNumber())); if (const qint64 column = r->columnNumber()) { result.append(':'); result.append(QByteArray::number(column)); } result.append(QByteArrayLiteral(": ")); } result.append(msg); return result; } static inline QByteArray msgProblemParsing(const QString &localName, const QXmlStreamReader *r) { return prefixMessage(QByteArrayLiteral("Problem parsing ") + localName.toLocal8Bit(), r); } static inline QByteArray msgCouldNotResolveProperty(const QString &id, const QXmlStreamReader *r) { return prefixMessage(QByteArrayLiteral("Could not resolve property: ") + id.toLocal8Bit(), r); } // ======== duplicated from qcolor_p static inline int qsvg_h2i(char hex, bool *ok = nullptr) { if (hex >= '0' && hex <= '9') return hex - '0'; if (hex >= 'a' && hex <= 'f') return hex - 'a' + 10; if (hex >= 'A' && hex <= 'F') return hex - 'A' + 10; if (ok) *ok = false; return -1; } static inline int qsvg_hex2int(const char *s, bool *ok = nullptr) { return (qsvg_h2i(s[0], ok) * 16) | qsvg_h2i(s[1], ok); } static inline int qsvg_hex2int(char s, bool *ok = nullptr) { int h = qsvg_h2i(s, ok); return (h * 16) | h; } bool qsvg_get_hex_rgb(const char *name, QRgb *rgb) { if(name[0] != '#') return false; name++; const size_t len = qstrlen(name); int r, g, b; bool ok = true; if (len == 12) { r = qsvg_hex2int(name, &ok); g = qsvg_hex2int(name + 4, &ok); b = qsvg_hex2int(name + 8, &ok); } else if (len == 9) { r = qsvg_hex2int(name, &ok); g = qsvg_hex2int(name + 3, &ok); b = qsvg_hex2int(name + 6, &ok); } else if (len == 6) { r = qsvg_hex2int(name, &ok); g = qsvg_hex2int(name + 2, &ok); b = qsvg_hex2int(name + 4, &ok); } else if (len == 3) { r = qsvg_hex2int(name[0], &ok); g = qsvg_hex2int(name[1], &ok); b = qsvg_hex2int(name[2], &ok); } else { r = g = b = -1; } if ((uint)r > 255 || (uint)g > 255 || (uint)b > 255 || !ok) { *rgb = 0; return false; } *rgb = qRgb(r, g ,b); return true; } bool qsvg_get_hex_rgb(const QChar *str, int len, QRgb *rgb) { if (len > 13) return false; char tmp[16]; for(int i = 0; i < len; ++i) tmp[i] = str[i].toLatin1(); tmp[len] = 0; return qsvg_get_hex_rgb(tmp, rgb); } // ======== end of qcolor_p duplicate static bool parsePathDataFast(QStringView data, QPainterPath &path); static inline QString someId(const QXmlStreamAttributes &attributes) { QString id = attributes.value(QLatin1String("id")).toString(); if (id.isEmpty()) id = attributes.value(QLatin1String("xml:id")).toString(); return id; } struct QSvgAttributes { QSvgAttributes(const QXmlStreamAttributes &xmlAttributes, QSvgHandler *handler); QString id; QStringView color; QStringView colorOpacity; QStringView fill; QStringView fillRule; QStringView fillOpacity; QStringView stroke; QStringView strokeDashArray; QStringView strokeDashOffset; QStringView strokeLineCap; QStringView strokeLineJoin; QStringView strokeMiterLimit; QStringView strokeOpacity; QStringView strokeWidth; QStringView vectorEffect; QStringView fontFamily; QStringView fontSize; QStringView fontStyle; QStringView fontWeight; QStringView fontVariant; QStringView textAnchor; QStringView transform; QStringView visibility; QStringView opacity; QStringView compOp; QStringView display; QStringView offset; QStringView stopColor; QStringView stopOpacity; QStringView imageRendering; #ifndef QT_NO_CSSPARSER QList<QSvgCssAttribute> m_cssAttributes; #endif }; QSvgAttributes::QSvgAttributes(const QXmlStreamAttributes &xmlAttributes, QSvgHandler *handler) { #ifndef QT_NO_CSSPARSER QStringView style = xmlAttributes.value(QLatin1String("style")); if (!style.isEmpty()) { handler->parseCSStoXMLAttrs(style.toString(), &m_cssAttributes); for (int j = 0; j < m_cssAttributes.count(); ++j) { const QSvgCssAttribute &attribute = m_cssAttributes.at(j); QStringView name = attribute.name; QStringView value = attribute.value; if (name.isEmpty()) continue; switch (name.at(0).unicode()) { case 'c': if (name == QLatin1String("color")) color = value; else if (name == QLatin1String("color-opacity")) colorOpacity = value; else if (name == QLatin1String("comp-op")) compOp = value; break; case 'd': if (name == QLatin1String("display")) display = value; break; case 'f': if (name == QLatin1String("fill")) fill = value; else if (name == QLatin1String("fill-rule")) fillRule = value; else if (name == QLatin1String("fill-opacity")) fillOpacity = value; else if (name == QLatin1String("font-family")) fontFamily = value; else if (name == QLatin1String("font-size")) fontSize = value; else if (name == QLatin1String("font-style")) fontStyle = value; else if (name == QLatin1String("font-weight")) fontWeight = value; else if (name == QLatin1String("font-variant")) fontVariant = value; break; case 'i': if (name == QLatin1String("image-rendering")) imageRendering = value; break; case 'o': if (name == QLatin1String("opacity")) opacity = value; else if (name == QLatin1String("offset")) offset = value; break; case 's': if (name.length() > 5 && name.mid(1, 5) == QLatin1String("troke")) { QStringView strokeRef = name.mid(6, name.length() - 6); if (strokeRef.isEmpty()) stroke = value; else if (strokeRef == QLatin1String("-dasharray")) strokeDashArray = value; else if (strokeRef == QLatin1String("-dashoffset")) strokeDashOffset = value; else if (strokeRef == QLatin1String("-linecap")) strokeLineCap = value; else if (strokeRef == QLatin1String("-linejoin")) strokeLineJoin = value; else if (strokeRef == QLatin1String("-miterlimit")) strokeMiterLimit = value; else if (strokeRef == QLatin1String("-opacity")) strokeOpacity = value; else if (strokeRef == QLatin1String("-width")) strokeWidth = value; } else if (name == QLatin1String("stop-color")) stopColor = value; else if (name == QLatin1String("stop-opacity")) stopOpacity = value; break; case 't': if (name == QLatin1String("text-anchor")) textAnchor = value; else if (name == QLatin1String("transform")) transform = value; break; case 'v': if (name == QLatin1String("vector-effect")) vectorEffect = value; else if (name == QLatin1String("visibility")) visibility = value; break; default: break; } } } #else Q_UNUSED(handler); #endif // QT_NO_CSSPARSER for (int i = 0; i < xmlAttributes.count(); ++i) { const QXmlStreamAttribute &attribute = xmlAttributes.at(i); QStringView name = attribute.qualifiedName(); if (name.isEmpty()) continue; QStringView value = attribute.value(); switch (name.at(0).unicode()) { case 'c': if (name == QLatin1String("color")) color = value; else if (name == QLatin1String("color-opacity")) colorOpacity = value; else if (name == QLatin1String("comp-op")) compOp = value; break; case 'd': if (name == QLatin1String("display")) display = value; break; case 'f': if (name == QLatin1String("fill")) fill = value; else if (name == QLatin1String("fill-rule")) fillRule = value; else if (name == QLatin1String("fill-opacity")) fillOpacity = value; else if (name == QLatin1String("font-family")) fontFamily = value; else if (name == QLatin1String("font-size")) fontSize = value; else if (name == QLatin1String("font-style")) fontStyle = value; else if (name == QLatin1String("font-weight")) fontWeight = value; else if (name == QLatin1String("font-variant")) fontVariant = value; break; case 'i': if (name == QLatin1String("id")) id = value.toString(); else if (name == QLatin1String("image-rendering")) imageRendering = value; break; case 'o': if (name == QLatin1String("opacity")) opacity = value; if (name == QLatin1String("offset")) offset = value; break; case 's': if (name.length() > 5 && name.mid(1, 5) == QLatin1String("troke")) { QStringView strokeRef = name.mid(6, name.length() - 6); if (strokeRef.isEmpty()) stroke = value; else if (strokeRef == QLatin1String("-dasharray")) strokeDashArray = value; else if (strokeRef == QLatin1String("-dashoffset")) strokeDashOffset = value; else if (strokeRef == QLatin1String("-linecap")) strokeLineCap = value; else if (strokeRef == QLatin1String("-linejoin")) strokeLineJoin = value; else if (strokeRef == QLatin1String("-miterlimit")) strokeMiterLimit = value; else if (strokeRef == QLatin1String("-opacity")) strokeOpacity = value; else if (strokeRef == QLatin1String("-width")) strokeWidth = value; } else if (name == QLatin1String("stop-color")) stopColor = value; else if (name == QLatin1String("stop-opacity")) stopOpacity = value; break; case 't': if (name == QLatin1String("text-anchor")) textAnchor = value; else if (name == QLatin1String("transform")) transform = value; break; case 'v': if (name == QLatin1String("vector-effect")) vectorEffect = value; else if (name == QLatin1String("visibility")) visibility = value; break; case 'x': if (name == QLatin1String("xml:id") && id.isEmpty()) id = value.toString(); break; default: break; } } } #ifndef QT_NO_CSSPARSER static const char * QSvgStyleSelector_nodeString[] = { "svg", "g", "defs", "switch", "animation", "arc", "circle", "ellipse", "image", "line", "path", "polygon", "polyline", "rect", "text", "textarea", "tspan", "use", "video" }; class QSvgStyleSelector : public QCss::StyleSelector { public: QSvgStyleSelector() { nameCaseSensitivity = Qt::CaseInsensitive; } virtual ~QSvgStyleSelector() { } inline QString nodeToName(QSvgNode *node) const { return QLatin1String(QSvgStyleSelector_nodeString[node->type()]); } inline QSvgNode *svgNode(NodePtr node) const { return (QSvgNode*)node.ptr; } inline QSvgStructureNode *nodeToStructure(QSvgNode *n) const { if (n && (n->type() == QSvgNode::DOC || n->type() == QSvgNode::G || n->type() == QSvgNode::DEFS || n->type() == QSvgNode::SWITCH)) { return (QSvgStructureNode*)n; } return 0; } inline QSvgStructureNode *svgStructure(NodePtr node) const { QSvgNode *n = svgNode(node); QSvgStructureNode *st = nodeToStructure(n); return st; } bool nodeNameEquals(NodePtr node, const QString& nodeName) const override { QSvgNode *n = svgNode(node); if (!n) return false; QString name = nodeToName(n); return QString::compare(name, nodeName, Qt::CaseInsensitive) == 0; } QString attribute(NodePtr node, const QString &name) const override { QSvgNode *n = svgNode(node); if ((!n->nodeId().isEmpty() && (name == QLatin1String("id") || name == QLatin1String("xml:id")))) return n->nodeId(); if (!n->xmlClass().isEmpty() && name == QLatin1String("class")) return n->xmlClass(); return QString(); } bool hasAttributes(NodePtr node) const override { QSvgNode *n = svgNode(node); return (n && (!n->nodeId().isEmpty() || !n->xmlClass().isEmpty())); } QStringList nodeIds(NodePtr node) const override { QSvgNode *n = svgNode(node); QString nid; if (n) nid = n->nodeId(); QStringList lst; lst.append(nid); return lst; } QStringList nodeNames(NodePtr node) const override { QSvgNode *n = svgNode(node); if (n) return QStringList(nodeToName(n)); return QStringList(); } bool isNullNode(NodePtr node) const override { return !node.ptr; } NodePtr parentNode(NodePtr node) const override { QSvgNode *n = svgNode(node); NodePtr newNode; newNode.ptr = 0; newNode.id = 0; if (n) { QSvgNode *svgParent = n->parent(); if (svgParent) { newNode.ptr = svgParent; } } return newNode; } NodePtr previousSiblingNode(NodePtr node) const override { NodePtr newNode; newNode.ptr = 0; newNode.id = 0; QSvgNode *n = svgNode(node); if (!n) return newNode; QSvgStructureNode *svgParent = nodeToStructure(n->parent()); if (svgParent) { newNode.ptr = svgParent->previousSiblingNode(n); } return newNode; } NodePtr duplicateNode(NodePtr node) const override { NodePtr n; n.ptr = node.ptr; n.id = node.id; return n; } void freeNode(NodePtr node) const override { Q_UNUSED(node); } }; #endif // QT_NO_CSSPARSER // '0' is 0x30 and '9' is 0x39 static inline bool isDigit(ushort ch) { static quint16 magic = 0x3ff; return ((ch >> 4) == 3) && (magic >> (ch & 15)); } static qreal toDouble(const QChar *&str) { const int maxLen = 255;//technically doubles can go til 308+ but whatever char temp[maxLen+1]; int pos = 0; if (*str == QLatin1Char('-')) { temp[pos++] = '-'; ++str; } else if (*str == QLatin1Char('+')) { ++str; } while (isDigit(str->unicode()) && pos < maxLen) { temp[pos++] = str->toLatin1(); ++str; } if (*str == QLatin1Char('.') && pos < maxLen) { temp[pos++] = '.'; ++str; } while (isDigit(str->unicode()) && pos < maxLen) { temp[pos++] = str->toLatin1(); ++str; } bool exponent = false; if ((*str == QLatin1Char('e') || *str == QLatin1Char('E')) && pos < maxLen) { exponent = true; temp[pos++] = 'e'; ++str; if ((*str == QLatin1Char('-') || *str == QLatin1Char('+')) && pos < maxLen) { temp[pos++] = str->toLatin1(); ++str; } while (isDigit(str->unicode()) && pos < maxLen) { temp[pos++] = str->toLatin1(); ++str; } } temp[pos] = '\0'; qreal val; if (!exponent && pos < 10) { int ival = 0; const char *t = temp; bool neg = false; if(*t == '-') { neg = true; ++t; } while(*t && *t != '.') { ival *= 10; ival += (*t) - '0'; ++t; } if(*t == '.') { ++t; int div = 1; while(*t) { ival *= 10; ival += (*t) - '0'; div *= 10; ++t; } val = ((qreal)ival)/((qreal)div); } else { val = ival; } if (neg) val = -val; } else { val = QByteArray::fromRawData(temp, pos).toDouble(); // Do not tolerate values too wild to be represented normally by floats if (qFpClassify(float(val)) != FP_NORMAL) val = 0; } return val; } static qreal toDouble(QStringView str, bool *ok = NULL) { const QChar *c = str.constData(); qreal res = (c == nullptr ? qreal{} : toDouble(c)); if (ok) *ok = (c == (str.constData() + str.length())); return res; } static QList<qreal> parseNumbersList(const QChar *&str) { QList<qreal> points; if (!str) return points; points.reserve(32); while (str->isSpace()) ++str; while (isDigit(str->unicode()) || *str == QLatin1Char('-') || *str == QLatin1Char('+') || *str == QLatin1Char('.')) { points.append(toDouble(str)); while (str->isSpace()) ++str; if (*str == QLatin1Char(',')) ++str; //eat the rest of space while (str->isSpace()) ++str; } return points; } static inline void parseNumbersArray(const QChar *&str, QVarLengthArray<qreal, 8> &points, const char *pattern = nullptr) { const size_t patternLen = qstrlen(pattern); while (str->isSpace()) ++str; while (isDigit(str->unicode()) || *str == QLatin1Char('-') || *str == QLatin1Char('+') || *str == QLatin1Char('.')) { if (patternLen && pattern[points.size() % patternLen] == 'f') { // flag expected, may only be 0 or 1 if (*str != QLatin1Char('0') && *str != QLatin1Char('1')) return; points.append(*str == QLatin1Char('0') ? 0.0 : 1.0); ++str; } else { points.append(toDouble(str)); } while (str->isSpace()) ++str; if (*str == QLatin1Char(',')) ++str; //eat the rest of space while (str->isSpace()) ++str; } } static QList<qreal> parsePercentageList(const QChar *&str) { QList<qreal> points; if (!str) return points; while (str->isSpace()) ++str; while ((*str >= QLatin1Char('0') && *str <= QLatin1Char('9')) || *str == QLatin1Char('-') || *str == QLatin1Char('+') || *str == QLatin1Char('.')) { points.append(toDouble(str)); while (str->isSpace()) ++str; if (*str == QLatin1Char('%')) ++str; while (str->isSpace()) ++str; if (*str == QLatin1Char(',')) ++str; //eat the rest of space while (str->isSpace()) ++str; } return points; } static QString idFromUrl(const QString &url) { // The form is url(<IRI>), where IRI can be // just an ID on #<id> form. QString::const_iterator itr = url.constBegin(); QString::const_iterator end = url.constEnd(); QString id; while (itr != end && (*itr).isSpace()) ++itr; if (itr != end && (*itr) == QLatin1Char('(')) ++itr; else return QString(); while (itr != end && (*itr).isSpace()) ++itr; if (itr != end && (*itr) == QLatin1Char('#')) { id += *itr; ++itr; } else { return QString(); } while (itr != end && (*itr) != QLatin1Char(')')) { id += *itr; ++itr; } if (itr == end || (*itr) != QLatin1Char(')')) return QString(); return id; } /** * returns true when successfully set the color. false signifies * that the color should be inherited */ static bool resolveColor(QStringView colorStr, QColor &color, QSvgHandler *handler) { QStringView colorStrTr = colorStr.trimmed(); if (colorStrTr.isEmpty()) return false; switch(colorStrTr.at(0).unicode()) { case '#': { // #rrggbb is very very common, so let's tackle it here // rather than falling back to QColor QRgb rgb; bool ok = qsvg_get_hex_rgb(colorStrTr.constData(), colorStrTr.length(), &rgb); if (ok) color.setRgb(rgb); return ok; } break; case 'r': { // starts with "rgb(", ends with ")" and consists of at least 7 characters "rgb(,,)" if (colorStrTr.length() >= 7 && colorStrTr.at(colorStrTr.length() - 1) == QLatin1Char(')') && colorStrTr.mid(0, 4) == QLatin1String("rgb(")) { const QChar *s = colorStrTr.constData() + 4; QList<qreal> compo = parseNumbersList(s); //1 means that it failed after reaching non-parsable //character which is going to be "%" if (compo.size() == 1) { s = colorStrTr.constData() + 4; compo = parsePercentageList(s); for (int i = 0; i < compo.size(); ++i) compo[i] *= (qreal)2.55; } if (compo.size() == 3) { color = QColor(int(compo[0]), int(compo[1]), int(compo[2])); return true; } return false; } } break; case 'c': if (colorStrTr == QLatin1String("currentColor")) { color = handler->currentColor(); return true; } break; case 'i': if (colorStrTr == QT_INHERIT) return false; break; default: break; } color = QColor(colorStrTr.toString()); return color.isValid(); } static bool constructColor(QStringView colorStr, QStringView opacity, QColor &color, QSvgHandler *handler) { if (!resolveColor(colorStr, color, handler)) return false; if (!opacity.isEmpty()) { bool ok = true; qreal op = qMin(qreal(1.0), qMax(qreal(0.0), toDouble(opacity, &ok))); if (!ok) op = 1.0; color.setAlphaF(op); } return true; } static qreal parseLength(QStringView str, QSvgHandler::LengthType &type, QSvgHandler *handler, bool *ok = NULL) { QStringView numStr = str.trimmed(); if (numStr.endsWith(QLatin1Char('%'))) { numStr.chop(1); type = QSvgHandler::LT_PERCENT; } else if (numStr.endsWith(QLatin1String("px"))) { numStr.chop(2); type = QSvgHandler::LT_PX; } else if (numStr.endsWith(QLatin1String("pc"))) { numStr.chop(2); type = QSvgHandler::LT_PC; } else if (numStr.endsWith(QLatin1String("pt"))) { numStr.chop(2); type = QSvgHandler::LT_PT; } else if (numStr.endsWith(QLatin1String("mm"))) { numStr.chop(2); type = QSvgHandler::LT_MM; } else if (numStr.endsWith(QLatin1String("cm"))) { numStr.chop(2); type = QSvgHandler::LT_CM; } else if (numStr.endsWith(QLatin1String("in"))) { numStr.chop(2); type = QSvgHandler::LT_IN; } else { type = handler->defaultCoordinateSystem(); //type = QSvgHandler::LT_OTHER; } qreal len = toDouble(numStr, ok); //qDebug()<<"len is "<<len<<", from '"<<numStr << "'"; return len; } static inline qreal convertToNumber(QStringView str, QSvgHandler *handler, bool *ok = NULL) { QSvgHandler::LengthType type; qreal num = parseLength(str.toString(), type, handler, ok); if (type == QSvgHandler::LT_PERCENT) { num = num/100.0; } return num; } static bool createSvgGlyph(QSvgFont *font, const QXmlStreamAttributes &attributes) { QStringView uncStr = attributes.value(QLatin1String("unicode")); QStringView havStr = attributes.value(QLatin1String("horiz-adv-x")); QStringView pathStr = attributes.value(QLatin1String("d")); QChar unicode = (uncStr.isEmpty()) ? u'\0' : uncStr.at(0); qreal havx = (havStr.isEmpty()) ? -1 : toDouble(havStr); QPainterPath path; path.setFillRule(Qt::WindingFill); parsePathDataFast(pathStr, path); font->addGlyph(unicode, path, havx); return true; } // this should really be called convertToDefaultCoordinateSystem // and convert when type != QSvgHandler::defaultCoordinateSystem static qreal convertToPixels(qreal len, bool , QSvgHandler::LengthType type) { switch (type) { case QSvgHandler::LT_PERCENT: break; case QSvgHandler::LT_PX: break; case QSvgHandler::LT_PC: break; case QSvgHandler::LT_PT: return len * 1.25; break; case QSvgHandler::LT_MM: return len * 3.543307; break; case QSvgHandler::LT_CM: return len * 35.43307; break; case QSvgHandler::LT_IN: return len * 90; break; case QSvgHandler::LT_OTHER: break; default: break; } return len; } static void parseColor(QSvgNode *, const QSvgAttributes &attributes, QSvgHandler *handler) { QColor color; if (constructColor(attributes.color, attributes.colorOpacity, color, handler)) { handler->popColor(); handler->pushColor(color); } } static QSvgStyleProperty *styleFromUrl(QSvgNode *node, const QString &url) { return node ? node->styleProperty(idFromUrl(url)) : 0; } static void parseBrush(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *handler) { if (!attributes.fill.isEmpty() || !attributes.fillRule.isEmpty() || !attributes.fillOpacity.isEmpty()) { QSvgFillStyle *prop = new QSvgFillStyle; //fill-rule attribute handling if (!attributes.fillRule.isEmpty() && attributes.fillRule != QT_INHERIT) { if (attributes.fillRule == QLatin1String("evenodd")) prop->setFillRule(Qt::OddEvenFill); else if (attributes.fillRule == QLatin1String("nonzero")) prop->setFillRule(Qt::WindingFill); } //fill-opacity attribute handling if (!attributes.fillOpacity.isEmpty() && attributes.fillOpacity != QT_INHERIT) { prop->setFillOpacity(qMin(qreal(1.0), qMax(qreal(0.0), toDouble(attributes.fillOpacity)))); } //fill attribute handling if ((!attributes.fill.isEmpty()) && (attributes.fill != QT_INHERIT) ) { if (attributes.fill.length() > 3 && attributes.fill.mid(0, 3) == QLatin1String("url")) { QString value = attributes.fill.mid(3, attributes.fill.length() - 3).toString(); QSvgStyleProperty *style = styleFromUrl(node, value); if (style) { if (style->type() == QSvgStyleProperty::SOLID_COLOR || style->type() == QSvgStyleProperty::GRADIENT) prop->setFillStyle(reinterpret_cast<QSvgFillStyleProperty *>(style)); } else { QString id = idFromUrl(value); prop->setGradientId(id); prop->setGradientResolved(false); } } else if (attributes.fill != QLatin1String("none")) { QColor color; if (resolveColor(attributes.fill, color, handler)) prop->setBrush(QBrush(color)); } else { prop->setBrush(QBrush(Qt::NoBrush)); } } node->appendStyleProperty(prop, attributes.id); } } static QTransform parseTransformationMatrix(QStringView value) { if (value.isEmpty()) return QTransform(); QTransform matrix; const QChar *str = value.constData(); const QChar *end = str + value.length(); while (str < end) { if (str->isSpace() || *str == QLatin1Char(',')) { ++str; continue; } enum State { Matrix, Translate, Rotate, Scale, SkewX, SkewY }; State state = Matrix; if (*str == QLatin1Char('m')) { //matrix const char *ident = "atrix"; for (int i = 0; i < 5; ++i) if (*(++str) != QLatin1Char(ident[i])) goto error; ++str; state = Matrix; } else if (*str == QLatin1Char('t')) { //translate const char *ident = "ranslate"; for (int i = 0; i < 8; ++i) if (*(++str) != QLatin1Char(ident[i])) goto error; ++str; state = Translate; } else if (*str == QLatin1Char('r')) { //rotate const char *ident = "otate"; for (int i = 0; i < 5; ++i) if (*(++str) != QLatin1Char(ident[i])) goto error; ++str; state = Rotate; } else if (*str == QLatin1Char('s')) { //scale, skewX, skewY ++str; if (*str == QLatin1Char('c')) { const char *ident = "ale"; for (int i = 0; i < 3; ++i) if (*(++str) != QLatin1Char(ident[i])) goto error; ++str; state = Scale; } else if (*str == QLatin1Char('k')) { if (*(++str) != QLatin1Char('e')) goto error; if (*(++str) != QLatin1Char('w')) goto error; ++str; if (*str == QLatin1Char('X')) state = SkewX; else if (*str == QLatin1Char('Y')) state = SkewY; else goto error; ++str; } else { goto error; } } else { goto error; } while (str < end && str->isSpace()) ++str; if (*str != QLatin1Char('(')) goto error; ++str; QVarLengthArray<qreal, 8> points; parseNumbersArray(str, points); if (*str != QLatin1Char(')')) goto error; ++str; if(state == Matrix) { if(points.count() != 6) goto error; matrix = QTransform(points[0], points[1], points[2], points[3], points[4], points[5]) * matrix; } else if (state == Translate) { if (points.count() == 1) matrix.translate(points[0], 0); else if (points.count() == 2) matrix.translate(points[0], points[1]); else goto error; } else if (state == Rotate) { if(points.count() == 1) { matrix.rotate(points[0]); } else if (points.count() == 3) { matrix.translate(points[1], points[2]); matrix.rotate(points[0]); matrix.translate(-points[1], -points[2]); } else { goto error; } } else if (state == Scale) { if (points.count() < 1 || points.count() > 2) goto error; qreal sx = points[0]; qreal sy = sx; if(points.count() == 2) sy = points[1]; matrix.scale(sx, sy); } else if (state == SkewX) { if (points.count() != 1) goto error; matrix.shear(qTan(qDegreesToRadians(points[0])), 0); } else if (state == SkewY) { if (points.count() != 1) goto error; matrix.shear(0, qTan(qDegreesToRadians(points[0]))); } } error: return matrix; } static void parsePen(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *handler) { //qDebug()<<"Node "<<node->type()<<", attrs are "<<value<<width; if (!attributes.stroke.isEmpty() || !attributes.strokeDashArray.isEmpty() || !attributes.strokeDashOffset.isEmpty() || !attributes.strokeLineCap.isEmpty() || !attributes.strokeLineJoin.isEmpty() || !attributes.strokeMiterLimit.isEmpty() || !attributes.strokeOpacity.isEmpty() || !attributes.strokeWidth.isEmpty() || !attributes.vectorEffect.isEmpty()) { QSvgStrokeStyle *prop = new QSvgStrokeStyle; //stroke attribute handling if ((!attributes.stroke.isEmpty()) && (attributes.stroke != QT_INHERIT) ) { if (attributes.stroke.length() > 3 && attributes.stroke.mid(0, 3) == QLatin1String("url")) { QString value = attributes.stroke.mid(3, attributes.stroke.length() - 3).toString(); QSvgStyleProperty *style = styleFromUrl(node, value); if (style) { if (style->type() == QSvgStyleProperty::SOLID_COLOR || style->type() == QSvgStyleProperty::GRADIENT) prop->setStyle(reinterpret_cast<QSvgFillStyleProperty *>(style)); } else { QString id = idFromUrl(value); prop->setGradientId(id); prop->setGradientResolved(false); } } else if (attributes.stroke != QLatin1String("none")) { QColor color; if (resolveColor(attributes.stroke, color, handler)) prop->setStroke(QBrush(color)); } else { prop->setStroke(QBrush(Qt::NoBrush)); } } //stroke-width handling if (!attributes.strokeWidth.isEmpty() && attributes.strokeWidth != QT_INHERIT) { QSvgHandler::LengthType lt; prop->setWidth(parseLength(attributes.strokeWidth, lt, handler)); } //stroke-dasharray if (!attributes.strokeDashArray.isEmpty() && attributes.strokeDashArray != QT_INHERIT) { if (attributes.strokeDashArray == QLatin1String("none")) { prop->setDashArrayNone(); } else { QString dashArray = attributes.strokeDashArray.toString(); const QChar *s = dashArray.constData(); QList<qreal> dashes = parseNumbersList(s); // if the dash count is odd the dashes should be duplicated if ((dashes.size() & 1) != 0) dashes << QList<qreal>(dashes); prop->setDashArray(dashes); } } //stroke-linejoin attribute handling if (!attributes.strokeLineJoin.isEmpty()) { if (attributes.strokeLineJoin == QLatin1String("miter")) prop->setLineJoin(Qt::SvgMiterJoin); else if (attributes.strokeLineJoin == QLatin1String("round")) prop->setLineJoin(Qt::RoundJoin); else if (attributes.strokeLineJoin == QLatin1String("bevel")) prop->setLineJoin(Qt::BevelJoin); } //stroke-linecap attribute handling if (!attributes.strokeLineCap.isEmpty()) { if (attributes.strokeLineCap == QLatin1String("butt")) prop->setLineCap(Qt::FlatCap); else if (attributes.strokeLineCap == QLatin1String("round")) prop->setLineCap(Qt::RoundCap); else if (attributes.strokeLineCap == QLatin1String("square")) prop->setLineCap(Qt::SquareCap); } //stroke-dashoffset attribute handling if (!attributes.strokeDashOffset.isEmpty() && attributes.strokeDashOffset != QT_INHERIT) prop->setDashOffset(toDouble(attributes.strokeDashOffset)); //vector-effect attribute handling if (!attributes.vectorEffect.isEmpty()) { if (attributes.vectorEffect == QLatin1String("non-scaling-stroke")) prop->setVectorEffect(true); else if (attributes.vectorEffect == QLatin1String("none")) prop->setVectorEffect(false); } //stroke-miterlimit if (!attributes.strokeMiterLimit.isEmpty() && attributes.strokeMiterLimit != QT_INHERIT) prop->setMiterLimit(toDouble(attributes.strokeMiterLimit)); //stroke-opacity atttribute handling if (!attributes.strokeOpacity.isEmpty() && attributes.strokeOpacity != QT_INHERIT) prop->setOpacity(qMin(qreal(1.0), qMax(qreal(0.0), toDouble(attributes.strokeOpacity)))); node->appendStyleProperty(prop, attributes.id); } } enum FontSizeSpec { XXSmall, XSmall, Small, Medium, Large, XLarge, XXLarge, FontSizeNone, FontSizeValue }; static const qreal sizeTable[] = { qreal(6.9), qreal(8.3), qreal(10.0), qreal(12.0), qreal(14.4), qreal(17.3), qreal(20.7) }; Q_STATIC_ASSERT(sizeof(sizeTable)/sizeof(sizeTable[0]) == FontSizeNone); static FontSizeSpec fontSizeSpec(QStringView spec) { switch (spec.at(0).unicode()) { case 'x': if (spec == QLatin1String("xx-small")) return XXSmall; if (spec == QLatin1String("x-small")) return XSmall; if (spec == QLatin1String("x-large")) return XLarge; if (spec == QLatin1String("xx-large")) return XXLarge; break; case 's': if (spec == QLatin1String("small")) return Small; break; case 'm': if (spec == QLatin1String("medium")) return Medium; break; case 'l': if (spec == QLatin1String("large")) return Large; break; case 'n': if (spec == QLatin1String("none")) return FontSizeNone; break; default: break; } return FontSizeValue; } static void parseFont(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *handler) { if (attributes.fontFamily.isEmpty() && attributes.fontSize.isEmpty() && attributes.fontStyle.isEmpty() && attributes.fontWeight.isEmpty() && attributes.fontVariant.isEmpty() && attributes.textAnchor.isEmpty()) return; QSvgTinyDocument *doc = node->document(); QSvgFontStyle *fontStyle = nullptr; if (!attributes.fontFamily.isEmpty()) { QSvgFont *svgFont = doc->svgFont(attributes.fontFamily.toString()); if (svgFont) fontStyle = new QSvgFontStyle(svgFont, doc); } if (!fontStyle) fontStyle = new QSvgFontStyle; if (!attributes.fontFamily.isEmpty() && attributes.fontFamily != QT_INHERIT) { QString family = attributes.fontFamily.toString().trimmed(); if (family.at(0) == QLatin1Char('\'') || family.at(0) == QLatin1Char('\"')) family = family.mid(1, family.length() - 2); fontStyle->setFamily(family); } if (!attributes.fontSize.isEmpty() && attributes.fontSize != QT_INHERIT) { // TODO: Support relative sizes 'larger' and 'smaller'. const FontSizeSpec spec = fontSizeSpec(attributes.fontSize); switch (spec) { case FontSizeNone: break; case FontSizeValue: { QSvgHandler::LengthType type; qreal fs = parseLength(attributes.fontSize, type, handler); fs = convertToPixels(fs, true, type); fontStyle->setSize(qMin(fs, qreal(0xffff))); } break; default: fontStyle->setSize(sizeTable[spec]); break; } } if (!attributes.fontStyle.isEmpty() && attributes.fontStyle != QT_INHERIT) { if (attributes.fontStyle == QLatin1String("normal")) { fontStyle->setStyle(QFont::StyleNormal); } else if (attributes.fontStyle == QLatin1String("italic")) { fontStyle->setStyle(QFont::StyleItalic); } else if (attributes.fontStyle == QLatin1String("oblique")) { fontStyle->setStyle(QFont::StyleOblique); } } if (!attributes.fontWeight.isEmpty() && attributes.fontWeight != QT_INHERIT) { bool ok = false; const int weightNum = attributes.fontWeight.toInt(&ok); if (ok) { fontStyle->setWeight(weightNum); } else { if (attributes.fontWeight == QLatin1String("normal")) { fontStyle->setWeight(QFont::Normal); } else if (attributes.fontWeight == QLatin1String("bold")) { fontStyle->setWeight(QFont::Bold); } else if (attributes.fontWeight == QLatin1String("bolder")) { fontStyle->setWeight(QSvgFontStyle::BOLDER); } else if (attributes.fontWeight == QLatin1String("lighter")) { fontStyle->setWeight(QSvgFontStyle::LIGHTER); } } } if (!attributes.fontVariant.isEmpty() && attributes.fontVariant != QT_INHERIT) { if (attributes.fontVariant == QLatin1String("normal")) fontStyle->setVariant(QFont::MixedCase); else if (attributes.fontVariant == QLatin1String("small-caps")) fontStyle->setVariant(QFont::SmallCaps); } if (!attributes.textAnchor.isEmpty() && attributes.textAnchor != QT_INHERIT) { if (attributes.textAnchor == QLatin1String("start")) fontStyle->setTextAnchor(Qt::AlignLeft); if (attributes.textAnchor == QLatin1String("middle")) fontStyle->setTextAnchor(Qt::AlignHCenter); else if (attributes.textAnchor == QLatin1String("end")) fontStyle->setTextAnchor(Qt::AlignRight); } node->appendStyleProperty(fontStyle, attributes.id); } static void parseTransform(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *) { if (attributes.transform.isEmpty()) return; QTransform matrix = parseTransformationMatrix(attributes.transform.trimmed()); if (!matrix.isIdentity()) { node->appendStyleProperty(new QSvgTransformStyle(QTransform(matrix)), attributes.id); } } static void parseVisibility(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *) { QSvgNode *parent = node->parent(); if (parent && (attributes.visibility.isEmpty() || attributes.visibility == QT_INHERIT)) node->setVisible(parent->isVisible()); else if (attributes.visibility == QLatin1String("hidden") || attributes.visibility == QLatin1String("collapse")) { node->setVisible(false); } else node->setVisible(true); } static void pathArcSegment(QPainterPath &path, qreal xc, qreal yc, qreal th0, qreal th1, qreal rx, qreal ry, qreal xAxisRotation) { qreal sinTh, cosTh; qreal a00, a01, a10, a11; qreal x1, y1, x2, y2, x3, y3; qreal t; qreal thHalf; sinTh = qSin(xAxisRotation * (Q_PI / 180.0)); cosTh = qCos(xAxisRotation * (Q_PI / 180.0)); a00 = cosTh * rx; a01 = -sinTh * ry; a10 = sinTh * rx; a11 = cosTh * ry; thHalf = 0.5 * (th1 - th0); t = (8.0 / 3.0) * qSin(thHalf * 0.5) * qSin(thHalf * 0.5) / qSin(thHalf); x1 = xc + qCos(th0) - t * qSin(th0); y1 = yc + qSin(th0) + t * qCos(th0); x3 = xc + qCos(th1); y3 = yc + qSin(th1); x2 = x3 + t * qSin(th1); y2 = y3 - t * qCos(th1); path.cubicTo(a00 * x1 + a01 * y1, a10 * x1 + a11 * y1, a00 * x2 + a01 * y2, a10 * x2 + a11 * y2, a00 * x3 + a01 * y3, a10 * x3 + a11 * y3); } // the arc handling code underneath is from XSVG (BSD license) /* * Copyright 2002 USC/Information Sciences Institute * * Permission to use, copy, modify, distribute, and sell this software * and its documentation for any purpose is hereby granted without * fee, provided that the above copyright notice appear in all copies * and that both that copyright notice and this permission notice * appear in supporting documentation, and that the name of * Information Sciences Institute not be used in advertising or * publicity pertaining to distribution of the software without * specific, written prior permission. Information Sciences Institute * makes no representations about the suitability of this software for * any purpose. It is provided "as is" without express or implied * warranty. * * INFORMATION SCIENCES INSTITUTE DISCLAIMS ALL WARRANTIES WITH REGARD * TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL INFORMATION SCIENCES * INSTITUTE BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA * OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. * */ static void pathArc(QPainterPath &path, qreal rx, qreal ry, qreal x_axis_rotation, int large_arc_flag, int sweep_flag, qreal x, qreal y, qreal curx, qreal cury) { const qreal Pr1 = rx * rx; const qreal Pr2 = ry * ry; if (!Pr1 || !Pr2) return; qreal sin_th, cos_th; qreal a00, a01, a10, a11; qreal x0, y0, x1, y1, xc, yc; qreal d, sfactor, sfactor_sq; qreal th0, th1, th_arc; int i, n_segs; qreal dx, dy, dx1, dy1, Px, Py, check; rx = qAbs(rx); ry = qAbs(ry); sin_th = qSin(x_axis_rotation * (Q_PI / 180.0)); cos_th = qCos(x_axis_rotation * (Q_PI / 180.0)); dx = (curx - x) / 2.0; dy = (cury - y) / 2.0; dx1 = cos_th * dx + sin_th * dy; dy1 = -sin_th * dx + cos_th * dy; Px = dx1 * dx1; Py = dy1 * dy1; /* Spec : check if radii are large enough */ check = Px / Pr1 + Py / Pr2; if (check > 1) { rx = rx * qSqrt(check); ry = ry * qSqrt(check); } a00 = cos_th / rx; a01 = sin_th / rx; a10 = -sin_th / ry; a11 = cos_th / ry; x0 = a00 * curx + a01 * cury; y0 = a10 * curx + a11 * cury; x1 = a00 * x + a01 * y; y1 = a10 * x + a11 * y; /* (x0, y0) is current point in transformed coordinate space. (x1, y1) is new point in transformed coordinate space. The arc fits a unit-radius circle in this space. */ d = (x1 - x0) * (x1 - x0) + (y1 - y0) * (y1 - y0); if (!d) return; sfactor_sq = 1.0 / d - 0.25; if (sfactor_sq < 0) sfactor_sq = 0; sfactor = qSqrt(sfactor_sq); if (sweep_flag == large_arc_flag) sfactor = -sfactor; xc = 0.5 * (x0 + x1) - sfactor * (y1 - y0); yc = 0.5 * (y0 + y1) + sfactor * (x1 - x0); /* (xc, yc) is center of the circle. */ th0 = qAtan2(y0 - yc, x0 - xc); th1 = qAtan2(y1 - yc, x1 - xc); th_arc = th1 - th0; if (th_arc < 0 && sweep_flag) th_arc += 2 * Q_PI; else if (th_arc > 0 && !sweep_flag) th_arc -= 2 * Q_PI; n_segs = qCeil(qAbs(th_arc / (Q_PI * 0.5 + 0.001))); for (i = 0; i < n_segs; i++) { pathArcSegment(path, xc, yc, th0 + i * th_arc / n_segs, th0 + (i + 1) * th_arc / n_segs, rx, ry, x_axis_rotation); } } static bool parsePathDataFast(QStringView dataStr, QPainterPath &path) { qreal x0 = 0, y0 = 0; // starting point qreal x = 0, y = 0; // current point char lastMode = 0; QPointF ctrlPt; const QChar *str = dataStr.constData(); const QChar *end = str + dataStr.size(); while (str != end) { while (str->isSpace() && (str + 1) != end) ++str; QChar pathElem = *str; ++str; QChar endc = *end; *const_cast<QChar *>(end) = u'\0'; // parseNumbersArray requires 0-termination that QStringView cannot guarantee const char *pattern = nullptr; if (pathElem == QLatin1Char('a') || pathElem == QLatin1Char('A')) pattern = "rrrffrr"; QVarLengthArray<qreal, 8> arg; parseNumbersArray(str, arg, pattern); *const_cast<QChar *>(end) = endc; if (pathElem == QLatin1Char('z') || pathElem == QLatin1Char('Z')) arg.append(0);//dummy const qreal *num = arg.constData(); int count = arg.count(); while (count > 0) { qreal offsetX = x; // correction offsets qreal offsetY = y; // for relative commands switch (pathElem.unicode()) { case 'm': { if (count < 2) { num++; count--; break; } x = x0 = num[0] + offsetX; y = y0 = num[1] + offsetY; num += 2; count -= 2; path.moveTo(x0, y0); // As per 1.2 spec 8.3.2 The "moveto" commands // If a 'moveto' is followed by multiple pairs of coordinates without explicit commands, // the subsequent pairs shall be treated as implicit 'lineto' commands. pathElem = QLatin1Char('l'); } break; case 'M': { if (count < 2) { num++; count--; break; } x = x0 = num[0]; y = y0 = num[1]; num += 2; count -= 2; path.moveTo(x0, y0); // As per 1.2 spec 8.3.2 The "moveto" commands // If a 'moveto' is followed by multiple pairs of coordinates without explicit commands, // the subsequent pairs shall be treated as implicit 'lineto' commands. pathElem = QLatin1Char('L'); } break; case 'z': case 'Z': { x = x0; y = y0; count--; // skip dummy num++; path.closeSubpath(); } break; case 'l': { if (count < 2) { num++; count--; break; } x = num[0] + offsetX; y = num[1] + offsetY; num += 2; count -= 2; path.lineTo(x, y); } break; case 'L': { if (count < 2) { num++; count--; break; } x = num[0]; y = num[1]; num += 2; count -= 2; path.lineTo(x, y); } break; case 'h': { x = num[0] + offsetX; num++; count--; path.lineTo(x, y); } break; case 'H': { x = num[0]; num++; count--; path.lineTo(x, y); } break; case 'v': { y = num[0] + offsetY; num++; count--; path.lineTo(x, y); } break; case 'V': { y = num[0]; num++; count--; path.lineTo(x, y); } break; case 'c': { if (count < 6) { num += count; count = 0; break; } QPointF c1(num[0] + offsetX, num[1] + offsetY); QPointF c2(num[2] + offsetX, num[3] + offsetY); QPointF e(num[4] + offsetX, num[5] + offsetY); num += 6; count -= 6; path.cubicTo(c1, c2, e); ctrlPt = c2; x = e.x(); y = e.y(); break; } case 'C': { if (count < 6) { num += count; count = 0; break; } QPointF c1(num[0], num[1]); QPointF c2(num[2], num[3]); QPointF e(num[4], num[5]); num += 6; count -= 6; path.cubicTo(c1, c2, e); ctrlPt = c2; x = e.x(); y = e.y(); break; } case 's': { if (count < 4) { num += count; count = 0; break; } QPointF c1; if (lastMode == 'c' || lastMode == 'C' || lastMode == 's' || lastMode == 'S') c1 = QPointF(2*x-ctrlPt.x(), 2*y-ctrlPt.y()); else c1 = QPointF(x, y); QPointF c2(num[0] + offsetX, num[1] + offsetY); QPointF e(num[2] + offsetX, num[3] + offsetY); num += 4; count -= 4; path.cubicTo(c1, c2, e); ctrlPt = c2; x = e.x(); y = e.y(); break; } case 'S': { if (count < 4) { num += count; count = 0; break; } QPointF c1; if (lastMode == 'c' || lastMode == 'C' || lastMode == 's' || lastMode == 'S') c1 = QPointF(2*x-ctrlPt.x(), 2*y-ctrlPt.y()); else c1 = QPointF(x, y); QPointF c2(num[0], num[1]); QPointF e(num[2], num[3]); num += 4; count -= 4; path.cubicTo(c1, c2, e); ctrlPt = c2; x = e.x(); y = e.y(); break; } case 'q': { if (count < 4) { num += count; count = 0; break; } QPointF c(num[0] + offsetX, num[1] + offsetY); QPointF e(num[2] + offsetX, num[3] + offsetY); num += 4; count -= 4; path.quadTo(c, e); ctrlPt = c; x = e.x(); y = e.y(); break; } case 'Q': { if (count < 4) { num += count; count = 0; break; } QPointF c(num[0], num[1]); QPointF e(num[2], num[3]); num += 4; count -= 4; path.quadTo(c, e); ctrlPt = c; x = e.x(); y = e.y(); break; } case 't': { if (count < 2) { num += count; count = 0; break; } QPointF e(num[0] + offsetX, num[1] + offsetY); num += 2; count -= 2; QPointF c; if (lastMode == 'q' || lastMode == 'Q' || lastMode == 't' || lastMode == 'T') c = QPointF(2*x-ctrlPt.x(), 2*y-ctrlPt.y()); else c = QPointF(x, y); path.quadTo(c, e); ctrlPt = c; x = e.x(); y = e.y(); break; } case 'T': { if (count < 2) { num += count; count = 0; break; } QPointF e(num[0], num[1]); num += 2; count -= 2; QPointF c; if (lastMode == 'q' || lastMode == 'Q' || lastMode == 't' || lastMode == 'T') c = QPointF(2*x-ctrlPt.x(), 2*y-ctrlPt.y()); else c = QPointF(x, y); path.quadTo(c, e); ctrlPt = c; x = e.x(); y = e.y(); break; } case 'a': { if (count < 7) { num += count; count = 0; break; } qreal rx = (*num++); qreal ry = (*num++); qreal xAxisRotation = (*num++); qreal largeArcFlag = (*num++); qreal sweepFlag = (*num++); qreal ex = (*num++) + offsetX; qreal ey = (*num++) + offsetY; count -= 7; qreal curx = x; qreal cury = y; pathArc(path, rx, ry, xAxisRotation, int(largeArcFlag), int(sweepFlag), ex, ey, curx, cury); x = ex; y = ey; } break; case 'A': { if (count < 7) { num += count; count = 0; break; } qreal rx = (*num++); qreal ry = (*num++); qreal xAxisRotation = (*num++); qreal largeArcFlag = (*num++); qreal sweepFlag = (*num++); qreal ex = (*num++); qreal ey = (*num++); count -= 7; qreal curx = x; qreal cury = y; pathArc(path, rx, ry, xAxisRotation, int(largeArcFlag), int(sweepFlag), ex, ey, curx, cury); x = ex; y = ey; } break; default: return false; } lastMode = pathElem.toLatin1(); } } return true; } static bool parseStyle(QSvgNode *node, const QXmlStreamAttributes &attributes, QSvgHandler *); static bool parseStyle(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *); #ifndef QT_NO_CSSPARSER static void parseCSStoXMLAttrs(const QList<QCss::Declaration> &declarations, QXmlStreamAttributes &attributes) { for (int i = 0; i < declarations.count(); ++i) { const QCss::Declaration &decl = declarations.at(i); if (decl.d->property.isEmpty()) continue; QCss::Value val = decl.d->values.first(); QString valueStr; const int valCount = decl.d->values.count(); if (valCount != 1) { for (int i = 0; i < valCount; ++i) { valueStr += decl.d->values[i].toString(); if (i + 1 < valCount) valueStr += QLatin1Char(','); } } else { valueStr = val.toString(); } if (val.type == QCss::Value::Uri) { valueStr.prepend(QLatin1String("url(")); valueStr.append(QLatin1Char(')')); } else if (val.type == QCss::Value::Function) { QStringList lst = val.variant.toStringList(); valueStr.append(lst.at(0)); valueStr.append(QLatin1Char('(')); for (int i = 1; i < lst.count(); ++i) { valueStr.append(lst.at(i)); if ((i +1) < lst.count()) valueStr.append(QLatin1Char(',')); } valueStr.append(QLatin1Char(')')); } else if (val.type == QCss::Value::KnownIdentifier) { switch (val.variant.toInt()) { case QCss::Value_None: valueStr = QLatin1String("none"); break; default: break; } } attributes.append(QString(), decl.d->property, valueStr); } } void QSvgHandler::parseCSStoXMLAttrs(const QString &css, QList<QSvgCssAttribute> *attributes) { // preprocess (for unicode escapes), tokenize and remove comments m_cssParser.init(css); QString key; attributes->reserve(10); while (m_cssParser.hasNext()) { m_cssParser.skipSpace(); if (!m_cssParser.hasNext()) break; m_cssParser.next(); QString name; if (m_cssParser.hasEscapeSequences) { key = m_cssParser.lexem(); name = key; } else { const QCss::Symbol &sym = m_cssParser.symbol(); name = sym.text.mid(sym.start, sym.len); } m_cssParser.skipSpace(); if (!m_cssParser.test(QCss::COLON)) break; m_cssParser.skipSpace(); if (!m_cssParser.hasNext()) break; QSvgCssAttribute attribute; attribute.name = name; const int firstSymbol = m_cssParser.index; int symbolCount = 0; do { m_cssParser.next(); ++symbolCount; } while (m_cssParser.hasNext() && !m_cssParser.test(QCss::SEMICOLON)); bool canExtractValueByRef = !m_cssParser.hasEscapeSequences; if (canExtractValueByRef) { int len = m_cssParser.symbols.at(firstSymbol).len; for (int i = firstSymbol + 1; i < firstSymbol + symbolCount; ++i) { len += m_cssParser.symbols.at(i).len; if (m_cssParser.symbols.at(i - 1).start + m_cssParser.symbols.at(i - 1).len != m_cssParser.symbols.at(i).start) { canExtractValueByRef = false; break; } } if (canExtractValueByRef) { const QCss::Symbol &sym = m_cssParser.symbols.at(firstSymbol); attribute.value = sym.text.mid(sym.start, len); } } if (!canExtractValueByRef) { QString value; for (int i = firstSymbol; i < m_cssParser.index - 1; ++i) value += m_cssParser.symbols.at(i).lexem(); attribute.value = value; } attributes->append(attribute); m_cssParser.skipSpace(); } } static void cssStyleLookup(QSvgNode *node, QSvgHandler *handler, QSvgStyleSelector *selector) { QCss::StyleSelector::NodePtr cssNode; cssNode.ptr = node; QList<QCss::Declaration> decls = selector->declarationsForNode(cssNode); QXmlStreamAttributes attributes; parseCSStoXMLAttrs(decls, attributes); parseStyle(node, attributes, handler); } #endif // QT_NO_CSSPARSER static inline QStringList stringToList(const QString &str) { QStringList lst = str.split(QLatin1Char(','), Qt::SkipEmptyParts); return lst; } static bool parseCoreNode(QSvgNode *node, const QXmlStreamAttributes &attributes) { QStringList features; QStringList extensions; QStringList languages; QStringList formats; QStringList fonts; QString xmlClassStr; for (int i = 0; i < attributes.count(); ++i) { const QXmlStreamAttribute &attribute = attributes.at(i); QStringView name = attribute.qualifiedName(); if (name.isEmpty()) continue; QStringView value = attribute.value(); switch (name.at(0).unicode()) { case 'c': if (name == QLatin1String("class")) xmlClassStr = value.toString(); break; case 'r': if (name == QLatin1String("requiredFeatures")) features = stringToList(value.toString()); else if (name == QLatin1String("requiredExtensions")) extensions = stringToList(value.toString()); else if (name == QLatin1String("requiredFormats")) formats = stringToList(value.toString()); else if (name == QLatin1String("requiredFonts")) fonts = stringToList(value.toString()); break; case 's': if (name == QLatin1String("systemLanguage")) languages = stringToList(value.toString()); break; default: break; } } node->setRequiredFeatures(features); node->setRequiredExtensions(extensions); node->setRequiredLanguages(languages); node->setRequiredFormats(formats); node->setRequiredFonts(fonts); node->setNodeId(someId(attributes)); node->setXmlClass(xmlClassStr); return true; } static void parseOpacity(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *) { if (attributes.opacity.isEmpty()) return; const QStringView value = attributes.opacity.trimmed(); bool ok = false; qreal op = value.toDouble(&ok); if (ok) { QSvgOpacityStyle *opacity = new QSvgOpacityStyle(qBound(qreal(0.0), op, qreal(1.0))); node->appendStyleProperty(opacity, attributes.id); } } static QPainter::CompositionMode svgToQtCompositionMode(const QString &op) { #define NOOP qDebug()<<"Operation: "<<op<<" is not implemented" if (op == QLatin1String("clear")) { return QPainter::CompositionMode_Clear; } else if (op == QLatin1String("src")) { return QPainter::CompositionMode_Source; } else if (op == QLatin1String("dst")) { return QPainter::CompositionMode_Destination; } else if (op == QLatin1String("src-over")) { return QPainter::CompositionMode_SourceOver; } else if (op == QLatin1String("dst-over")) { return QPainter::CompositionMode_DestinationOver; } else if (op == QLatin1String("src-in")) { return QPainter::CompositionMode_SourceIn; } else if (op == QLatin1String("dst-in")) { return QPainter::CompositionMode_DestinationIn; } else if (op == QLatin1String("src-out")) { return QPainter::CompositionMode_SourceOut; } else if (op == QLatin1String("dst-out")) { return QPainter::CompositionMode_DestinationOut; } else if (op == QLatin1String("src-atop")) { return QPainter::CompositionMode_SourceAtop; } else if (op == QLatin1String("dst-atop")) { return QPainter::CompositionMode_DestinationAtop; } else if (op == QLatin1String("xor")) { return QPainter::CompositionMode_Xor; } else if (op == QLatin1String("plus")) { return QPainter::CompositionMode_Plus; } else if (op == QLatin1String("multiply")) { return QPainter::CompositionMode_Multiply; } else if (op == QLatin1String("screen")) { return QPainter::CompositionMode_Screen; } else if (op == QLatin1String("overlay")) { return QPainter::CompositionMode_Overlay; } else if (op == QLatin1String("darken")) { return QPainter::CompositionMode_Darken; } else if (op == QLatin1String("lighten")) { return QPainter::CompositionMode_Lighten; } else if (op == QLatin1String("color-dodge")) { return QPainter::CompositionMode_ColorDodge; } else if (op == QLatin1String("color-burn")) { return QPainter::CompositionMode_ColorBurn; } else if (op == QLatin1String("hard-light")) { return QPainter::CompositionMode_HardLight; } else if (op == QLatin1String("soft-light")) { return QPainter::CompositionMode_SoftLight; } else if (op == QLatin1String("difference")) { return QPainter::CompositionMode_Difference; } else if (op == QLatin1String("exclusion")) { return QPainter::CompositionMode_Exclusion; } else { NOOP; } return QPainter::CompositionMode_SourceOver; } static void parseCompOp(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *) { if (attributes.compOp.isEmpty()) return; QString value = attributes.compOp.toString().trimmed(); if (!value.isEmpty()) { QSvgCompOpStyle *compop = new QSvgCompOpStyle(svgToQtCompositionMode(value)); node->appendStyleProperty(compop, attributes.id); } } static inline QSvgNode::DisplayMode displayStringToEnum(const QString &str) { if (str == QLatin1String("inline")) { return QSvgNode::InlineMode; } else if (str == QLatin1String("block")) { return QSvgNode::BlockMode; } else if (str == QLatin1String("list-item")) { return QSvgNode::ListItemMode; } else if (str == QLatin1String("run-in")) { return QSvgNode::RunInMode; } else if (str == QLatin1String("compact")) { return QSvgNode::CompactMode; } else if (str == QLatin1String("marker")) { return QSvgNode::MarkerMode; } else if (str == QLatin1String("table")) { return QSvgNode::TableMode; } else if (str == QLatin1String("inline-table")) { return QSvgNode::InlineTableMode; } else if (str == QLatin1String("table-row-group")) { return QSvgNode::TableRowGroupMode; } else if (str == QLatin1String("table-header-group")) { return QSvgNode::TableHeaderGroupMode; } else if (str == QLatin1String("table-footer-group")) { return QSvgNode::TableFooterGroupMode; } else if (str == QLatin1String("table-row")) { return QSvgNode::TableRowMode; } else if (str == QLatin1String("table-column-group")) { return QSvgNode::TableColumnGroupMode; } else if (str == QLatin1String("table-column")) { return QSvgNode::TableColumnMode; } else if (str == QLatin1String("table-cell")) { return QSvgNode::TableCellMode; } else if (str == QLatin1String("table-caption")) { return QSvgNode::TableCaptionMode; } else if (str == QLatin1String("none")) { return QSvgNode::NoneMode; } else if (str == QT_INHERIT) { return QSvgNode::InheritMode; } return QSvgNode::BlockMode; } static void parseOthers(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *) { if (attributes.display.isEmpty()) return; QString displayStr = attributes.display.toString().trimmed(); if (!displayStr.isEmpty()) { node->setDisplayMode(displayStringToEnum(displayStr)); } } static void parseRenderingHints(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *) { if (attributes.imageRendering.isEmpty()) return; QString ir = attributes.imageRendering.toString().trimmed(); QSvgQualityStyle *p = new QSvgQualityStyle(0); if (ir == QLatin1String("auto")) p->setImageRendering(QSvgQualityStyle::ImageRenderingAuto); else if (ir == QLatin1String("optimizeSpeed")) p->setImageRendering(QSvgQualityStyle::ImageRenderingOptimizeSpeed); else if (ir == QLatin1String("optimizeQuality")) p->setImageRendering(QSvgQualityStyle::ImageRenderingOptimizeQuality); node->appendStyleProperty(p, attributes.id); } static bool parseStyle(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *handler) { parseColor(node, attributes, handler); parseBrush(node, attributes, handler); parsePen(node, attributes, handler); parseFont(node, attributes, handler); parseTransform(node, attributes, handler); parseVisibility(node, attributes, handler); parseOpacity(node, attributes, handler); parseCompOp(node, attributes, handler); parseRenderingHints(node, attributes, handler); parseOthers(node, attributes, handler); #if 0 value = attributes.value("audio-level"); value = attributes.value("color-rendering"); value = attributes.value("display-align"); value = attributes.value("image-rendering"); value = attributes.value("line-increment"); value = attributes.value("pointer-events"); value = attributes.value("shape-rendering"); value = attributes.value("solid-color"); value = attributes.value("solid-opacity"); value = attributes.value("text-rendering"); value = attributes.value("vector-effect"); value = attributes.value("viewport-fill"); value = attributes.value("viewport-fill-opacity"); #endif return true; } static bool parseStyle(QSvgNode *node, const QXmlStreamAttributes &attrs, QSvgHandler *handler) { return parseStyle(node, QSvgAttributes(attrs, handler), handler); } static bool parseAnchorNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseAnimateNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static int parseClockValue(QStringView str, bool *ok) { int res = 0; int ms = 1000; str = str.trimmed(); if (str.endsWith(QLatin1String("ms"))) { str.chop(2); ms = 1; } else if (str.endsWith(QLatin1String("s"))) { str.chop(1); } double val = ms * toDouble(str, ok); if (ok) { if (val > std::numeric_limits<int>::min() && val < std::numeric_limits<int>::max()) res = static_cast<int>(val); else *ok = false; } return res; } static bool parseAnimateColorNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { QStringView fromStr = attributes.value(QLatin1String("from")); QStringView toStr = attributes.value(QLatin1String("to")); QString valuesStr = attributes.value(QLatin1String("values")).toString(); QString beginStr = attributes.value(QLatin1String("begin")).toString(); QString durStr = attributes.value(QLatin1String("dur")).toString(); QString targetStr = attributes.value(QLatin1String("attributeName")).toString(); QString repeatStr = attributes.value(QLatin1String("repeatCount")).toString(); QString fillStr = attributes.value(QLatin1String("fill")).toString(); QList<QColor> colors; if (valuesStr.isEmpty()) { QColor startColor, endColor; resolveColor(fromStr, startColor, handler); resolveColor(toStr, endColor, handler); colors.reserve(2); colors.append(startColor); colors.append(endColor); } else { QStringList str = valuesStr.split(QLatin1Char(';')); colors.reserve(str.count()); QStringList::const_iterator itr; for (itr = str.constBegin(); itr != str.constEnd(); ++itr) { QColor color; resolveColor(*itr, color, handler); colors.append(color); } } bool ok = true; int begin = parseClockValue(beginStr, &ok); if (!ok) return false; int end = begin + parseClockValue(durStr, &ok); if (!ok || end <= begin) return false; QSvgAnimateColor *anim = new QSvgAnimateColor(begin, end, 0); anim->setArgs((targetStr == QLatin1String("fill")), colors); anim->setFreeze(fillStr == QLatin1String("freeze")); anim->setRepeatCount( (repeatStr == QLatin1String("indefinite")) ? -1 : (repeatStr == QLatin1String("")) ? 1 : toDouble(repeatStr)); parent->appendStyleProperty(anim, someId(attributes)); parent->document()->setAnimated(true); handler->setAnimPeriod(begin, end); return true; } static bool parseAimateMotionNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static void parseNumberTriplet(QList<qreal> &values, const QChar *&s) { QList<qreal> list = parseNumbersList(s); values << list; for (int i = 3 - list.size(); i > 0; --i) values.append(0.0); } static bool parseAnimateTransformNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { QString typeStr = attributes.value(QLatin1String("type")).toString(); QString values = attributes.value(QLatin1String("values")).toString(); QString beginStr = attributes.value(QLatin1String("begin")).toString(); QString durStr = attributes.value(QLatin1String("dur")).toString(); QString repeatStr = attributes.value(QLatin1String("repeatCount")).toString(); QString fillStr = attributes.value(QLatin1String("fill")).toString(); QString fromStr = attributes.value(QLatin1String("from")).toString(); QString toStr = attributes.value(QLatin1String("to")).toString(); QString byStr = attributes.value(QLatin1String("by")).toString(); QString addtv = attributes.value(QLatin1String("additive")).toString(); QSvgAnimateTransform::Additive additive = QSvgAnimateTransform::Replace; if (addtv == QLatin1String("sum")) additive = QSvgAnimateTransform::Sum; QList<qreal> vals; if (values.isEmpty()) { const QChar *s; if (fromStr.isEmpty()) { if (!byStr.isEmpty()) { // By-animation. additive = QSvgAnimateTransform::Sum; vals.append(0.0); vals.append(0.0); vals.append(0.0); parseNumberTriplet(vals, s = byStr.constData()); } else { // To-animation not defined. return false; } } else { if (!toStr.isEmpty()) { // From-to-animation. parseNumberTriplet(vals, s = fromStr.constData()); parseNumberTriplet(vals, s = toStr.constData()); } else if (!byStr.isEmpty()) { // From-by-animation. parseNumberTriplet(vals, s = fromStr.constData()); parseNumberTriplet(vals, s = byStr.constData()); for (int i = vals.size() - 3; i < vals.size(); ++i) vals[i] += vals[i - 3]; } else { return false; } } } else { const QChar *s = values.constData(); while (s && *s != QLatin1Char(0)) { parseNumberTriplet(vals, s); if (*s == QLatin1Char(0)) break; ++s; } } bool ok = true; int begin = parseClockValue(beginStr, &ok); if (!ok) return false; int end = begin + parseClockValue(durStr, &ok); if (!ok || end <= begin) return false; QSvgAnimateTransform::TransformType type = QSvgAnimateTransform::Empty; if (typeStr == QLatin1String("translate")) { type = QSvgAnimateTransform::Translate; } else if (typeStr == QLatin1String("scale")) { type = QSvgAnimateTransform::Scale; } else if (typeStr == QLatin1String("rotate")) { type = QSvgAnimateTransform::Rotate; } else if (typeStr == QLatin1String("skewX")) { type = QSvgAnimateTransform::SkewX; } else if (typeStr == QLatin1String("skewY")) { type = QSvgAnimateTransform::SkewY; } else { return false; } QSvgAnimateTransform *anim = new QSvgAnimateTransform(begin, end, 0); anim->setArgs(type, additive, vals); anim->setFreeze(fillStr == QLatin1String("freeze")); anim->setRepeatCount( (repeatStr == QLatin1String("indefinite"))? -1 : (repeatStr == QLatin1String(""))? 1 : toDouble(repeatStr)); parent->appendStyleProperty(anim, someId(attributes)); parent->document()->setAnimated(true); handler->setAnimPeriod(begin, end); return true; } static QSvgNode * createAnimationNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return 0; } static bool parseAudioNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgNode *createCircleNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { const QStringView cx = attributes.value(QLatin1String("cx")); const QStringView cy = attributes.value(QLatin1String("cy")); const QStringView r = attributes.value(QLatin1String("r")); qreal ncx = toDouble(cx); qreal ncy = toDouble(cy); qreal nr = toDouble(r); if (nr < 0.0) return nullptr; QRectF rect(ncx-nr, ncy-nr, nr*2, nr*2); QSvgNode *circle = new QSvgCircle(parent, rect); return circle; } static QSvgNode *createDefsNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(attributes); QSvgDefs *defs = new QSvgDefs(parent); return defs; } static bool parseDescNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseDiscardNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgNode *createEllipseNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { const QStringView cx = attributes.value(QLatin1String("cx")); const QStringView cy = attributes.value(QLatin1String("cy")); const QStringView rx = attributes.value(QLatin1String("rx")); const QStringView ry = attributes.value(QLatin1String("ry")); qreal ncx = toDouble(cx); qreal ncy = toDouble(cy); qreal nrx = toDouble(rx); qreal nry = toDouble(ry); QRectF rect(ncx-nrx, ncy-nry, nrx*2, nry*2); QSvgNode *ellipse = new QSvgEllipse(parent, rect); return ellipse; } static QSvgStyleProperty *createFontNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { const QStringView hax = attributes.value(QLatin1String("horiz-adv-x")); QString myId = someId(attributes); qreal horizAdvX = toDouble(hax); while (parent && parent->type() != QSvgNode::DOC) { parent = parent->parent(); } if (parent && !myId.isEmpty()) { QSvgTinyDocument *doc = static_cast<QSvgTinyDocument*>(parent); QSvgFont *font = doc->svgFont(myId); if (!font) { font = new QSvgFont(horizAdvX); font->setFamilyName(myId); doc->addSvgFont(font); } return new QSvgFontStyle(font, doc); } return nullptr; } static bool parseFontFaceNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { if (parent->type() != QSvgStyleProperty::FONT) { return false; } QSvgFontStyle *style = static_cast<QSvgFontStyle*>(parent); QSvgFont *font = style->svgFont(); QString name = attributes.value(QLatin1String("font-family")).toString(); const QStringView unitsPerEmStr = attributes.value(QLatin1String("units-per-em")); qreal unitsPerEm = toDouble(unitsPerEmStr); if (!unitsPerEm) unitsPerEm = 1000; if (!name.isEmpty()) font->setFamilyName(name); font->setUnitsPerEm(unitsPerEm); if (!font->familyName().isEmpty()) if (!style->doc()->svgFont(font->familyName())) style->doc()->addSvgFont(font); return true; } static bool parseFontFaceNameNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { if (parent->type() != QSvgStyleProperty::FONT) { return false; } QSvgFontStyle *style = static_cast<QSvgFontStyle*>(parent); QSvgFont *font = style->svgFont(); QString name = attributes.value(QLatin1String("name")).toString(); if (!name.isEmpty()) font->setFamilyName(name); if (!font->familyName().isEmpty()) if (!style->doc()->svgFont(font->familyName())) style->doc()->addSvgFont(font); return true; } static bool parseFontFaceSrcNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseFontFaceUriNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseForeignObjectNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgNode *createGNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(attributes); QSvgG *node = new QSvgG(parent); return node; } static bool parseGlyphNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { if (parent->type() != QSvgStyleProperty::FONT) { return false; } QSvgFontStyle *style = static_cast<QSvgFontStyle*>(parent); QSvgFont *font = style->svgFont(); createSvgGlyph(font, attributes); return true; } static bool parseHandlerNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseHkernNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgNode *createImageNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { const QStringView x = attributes.value(QLatin1String("x")); const QStringView y = attributes.value(QLatin1String("y")); const QStringView width = attributes.value(QLatin1String("width")); const QStringView height = attributes.value(QLatin1String("height")); QString filename = attributes.value(QLatin1String("xlink:href")).toString(); qreal nx = toDouble(x); qreal ny = toDouble(y); QSvgHandler::LengthType type; qreal nwidth = parseLength(width.toString(), type, handler); nwidth = convertToPixels(nwidth, true, type); qreal nheight = parseLength(height.toString(), type, handler); nheight = convertToPixels(nheight, false, type); filename = filename.trimmed(); if (filename.isEmpty()) { qCWarning(lcSvgHandler) << "QSvgHandler: Image filename is empty"; return 0; } if (nwidth <= 0 || nheight <= 0) { qCWarning(lcSvgHandler) << "QSvgHandler: Width or height for" << filename << "image was not greater than 0"; return 0; } QImage image; if (filename.startsWith(QLatin1String("data"))) { int idx = filename.lastIndexOf(QLatin1String("base64,")); if (idx != -1) { idx += 7; const QString dataStr = filename.mid(idx); QByteArray data = QByteArray::fromBase64(dataStr.toLatin1()); image = QImage::fromData(data); } else { qCDebug(lcSvgHandler) << "QSvgHandler::createImageNode: Unrecognized inline image format!"; } } else { const auto *file = qobject_cast<QFile *>(handler->device()); if (file) { QUrl url(filename); if (url.isRelative()) { QFileInfo info(file->fileName()); filename = info.absoluteDir().absoluteFilePath(filename); } } image = QImage(filename); } if (image.isNull()) { qCWarning(lcSvgHandler) << "Could not create image from" << filename; return 0; } if (image.format() == QImage::Format_ARGB32) image = image.convertToFormat(QImage::Format_ARGB32_Premultiplied); QSvgNode *img = new QSvgImage(parent, image, QRectF(nx, ny, nwidth, nheight)); return img; } static QSvgNode *createLineNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { const QStringView x1 = attributes.value(QLatin1String("x1")); const QStringView y1 = attributes.value(QLatin1String("y1")); const QStringView x2 = attributes.value(QLatin1String("x2")); const QStringView y2 = attributes.value(QLatin1String("y2")); qreal nx1 = toDouble(x1); qreal ny1 = toDouble(y1); qreal nx2 = toDouble(x2); qreal ny2 = toDouble(y2); QLineF lineBounds(nx1, ny1, nx2, ny2); QSvgNode *line = new QSvgLine(parent, lineBounds); return line; } static void parseBaseGradient(QSvgNode *node, const QXmlStreamAttributes &attributes, QSvgGradientStyle *gradProp, QSvgHandler *handler) { QString link = attributes.value(QLatin1String("xlink:href")).toString(); QStringView trans = attributes.value(QLatin1String("gradientTransform")); QString spread = attributes.value(QLatin1String("spreadMethod")).toString(); QString units = attributes.value(QLatin1String("gradientUnits")).toString(); QStringView colorStr = attributes.value(QLatin1String("color")); QStringView colorOpacityStr = attributes.value(QLatin1String("color-opacity")); QColor color; if (constructColor(colorStr, colorOpacityStr, color, handler)) { handler->popColor(); handler->pushColor(color); } QTransform matrix; QGradient *grad = gradProp->qgradient(); if (!link.isEmpty()) { QSvgStyleProperty *prop = node->styleProperty(link); //qDebug()<<"inherited "<<prop<<" ("<<link<<")"; if (prop && prop->type() == QSvgStyleProperty::GRADIENT) { QSvgGradientStyle *inherited = static_cast<QSvgGradientStyle*>(prop); if (!inherited->stopLink().isEmpty()) { gradProp->setStopLink(inherited->stopLink(), handler->document()); } else { grad->setStops(inherited->qgradient()->stops()); gradProp->setGradientStopsSet(inherited->gradientStopsSet()); } matrix = inherited->qtransform(); } else { gradProp->setStopLink(link, handler->document()); } } if (!trans.isEmpty()) { matrix = parseTransformationMatrix(trans); gradProp->setTransform(matrix); } else if (!matrix.isIdentity()) { gradProp->setTransform(matrix); } if (!spread.isEmpty()) { if (spread == QLatin1String("pad")) { grad->setSpread(QGradient::PadSpread); } else if (spread == QLatin1String("reflect")) { grad->setSpread(QGradient::ReflectSpread); } else if (spread == QLatin1String("repeat")) { grad->setSpread(QGradient::RepeatSpread); } } if (units.isEmpty() || units == QLatin1String("objectBoundingBox")) { grad->setCoordinateMode(QGradient::ObjectMode); } } static QSvgStyleProperty *createLinearGradientNode(QSvgNode *node, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { const QStringView x1 = attributes.value(QLatin1String("x1")); const QStringView y1 = attributes.value(QLatin1String("y1")); const QStringView x2 = attributes.value(QLatin1String("x2")); const QStringView y2 = attributes.value(QLatin1String("y2")); qreal nx1 = 0.0; qreal ny1 = 0.0; qreal nx2 = 1.0; qreal ny2 = 0.0; if (!x1.isEmpty()) nx1 = convertToNumber(x1, handler); if (!y1.isEmpty()) ny1 = convertToNumber(y1, handler); if (!x2.isEmpty()) nx2 = convertToNumber(x2, handler); if (!y2.isEmpty()) ny2 = convertToNumber(y2, handler); QSvgNode *itr = node; while (itr && itr->type() != QSvgNode::DOC) { itr = itr->parent(); } QLinearGradient *grad = new QLinearGradient(nx1, ny1, nx2, ny2); grad->setInterpolationMode(QGradient::ComponentInterpolation); QSvgGradientStyle *prop = new QSvgGradientStyle(grad); parseBaseGradient(node, attributes, prop, handler); return prop; } static bool parseMetadataNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseMissingGlyphNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { if (parent->type() != QSvgStyleProperty::FONT) { return false; } QSvgFontStyle *style = static_cast<QSvgFontStyle*>(parent); QSvgFont *font = style->svgFont(); createSvgGlyph(font, attributes); return true; } static bool parseMpathNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgNode *createPathNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { QStringView data = attributes.value(QLatin1String("d")); QPainterPath qpath; qpath.setFillRule(Qt::WindingFill); //XXX do error handling parsePathDataFast(data, qpath); QSvgNode *path = new QSvgPath(parent, qpath); return path; } static QSvgNode *createPolygonNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { QString pointsStr = attributes.value(QLatin1String("points")).toString(); //same QPolygon parsing is in createPolylineNode const QChar *s = pointsStr.constData(); QList<qreal> points = parseNumbersList(s); QPolygonF poly(points.count()/2); for (int i = 0; i < poly.size(); ++i) poly[i] = QPointF(points.at(2 * i), points.at(2 * i + 1)); QSvgNode *polygon = new QSvgPolygon(parent, poly); return polygon; } static QSvgNode *createPolylineNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { QString pointsStr = attributes.value(QLatin1String("points")).toString(); //same QPolygon parsing is in createPolygonNode const QChar *s = pointsStr.constData(); QList<qreal> points = parseNumbersList(s); QPolygonF poly(points.count()/2); for (int i = 0; i < poly.size(); ++i) poly[i] = QPointF(points.at(2 * i), points.at(2 * i + 1)); QSvgNode *line = new QSvgPolyline(parent, poly); return line; } static bool parsePrefetchNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgStyleProperty *createRadialGradientNode(QSvgNode *node, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { const QStringView cx = attributes.value(QLatin1String("cx")); const QStringView cy = attributes.value(QLatin1String("cy")); const QStringView r = attributes.value(QLatin1String("r")); const QStringView fx = attributes.value(QLatin1String("fx")); const QStringView fy = attributes.value(QLatin1String("fy")); qreal ncx = 0.5; qreal ncy = 0.5; if (!cx.isEmpty()) ncx = toDouble(cx); if (!cy.isEmpty()) ncy = toDouble(cy); qreal nr = 0.0; if (!r.isEmpty()) nr = toDouble(r); if (nr <= 0.0) return nullptr; qreal nfx = ncx; if (!fx.isEmpty()) nfx = toDouble(fx); qreal nfy = ncy; if (!fy.isEmpty()) nfy = toDouble(fy); QRadialGradient *grad = new QRadialGradient(ncx, ncy, nr, nfx, nfy, 0); grad->setInterpolationMode(QGradient::ComponentInterpolation); QSvgGradientStyle *prop = new QSvgGradientStyle(grad); parseBaseGradient(node, attributes, prop, handler); return prop; } static QSvgNode *createRectNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { const QStringView x = attributes.value(QLatin1String("x")); const QStringView y = attributes.value(QLatin1String("y")); const QStringView width = attributes.value(QLatin1String("width")); const QStringView height = attributes.value(QLatin1String("height")); const QStringView rx = attributes.value(QLatin1String("rx")); const QStringView ry = attributes.value(QLatin1String("ry")); bool ok = true; QSvgHandler::LengthType type; qreal nwidth = parseLength(width.toString(), type, handler, &ok); if (!ok) return nullptr; nwidth = convertToPixels(nwidth, true, type); qreal nheight = parseLength(height.toString(), type, handler, &ok); if (!ok) return nullptr; nheight = convertToPixels(nheight, true, type); qreal nrx = toDouble(rx); qreal nry = toDouble(ry); QRectF bounds(toDouble(x), toDouble(y), nwidth, nheight); if (bounds.isEmpty()) return nullptr; if (!rx.isEmpty() && ry.isEmpty()) nry = nrx; else if (!ry.isEmpty() && rx.isEmpty()) nrx = nry; //9.2 The 'rect' element clearly specifies it // but the case might in fact be handled because // we draw rounded rectangles differently if (nrx > bounds.width()/2) nrx = bounds.width()/2; if (nry > bounds.height()/2) nry = bounds.height()/2; //we draw rounded rect from 0...99 //svg from 0...bounds.width()/2 so we're adjusting the //coordinates nrx *= (100/(bounds.width()/2)); nry *= (100/(bounds.height()/2)); QSvgNode *rect = new QSvgRect(parent, bounds, int(nrx), int(nry)); return rect; } static bool parseScriptNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseSetNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgStyleProperty *createSolidColorNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { Q_UNUSED(parent); Q_UNUSED(attributes); QStringView solidColorStr = attributes.value(QLatin1String("solid-color")); QStringView solidOpacityStr = attributes.value(QLatin1String("solid-opacity")); if (solidOpacityStr.isEmpty()) solidOpacityStr = attributes.value(QLatin1String("opacity")); QColor color; if (!constructColor(solidColorStr, solidOpacityStr, color, handler)) return 0; QSvgSolidColorStyle *style = new QSvgSolidColorStyle(color); return style; } static bool parseStopNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { if (parent->type() != QSvgStyleProperty::GRADIENT) return false; QString nodeIdStr = someId(attributes); QString xmlClassStr = attributes.value(QLatin1String("class")).toString(); //### nasty hack because stop gradients are not in the rendering tree // we force a dummy node with the same id and class into a rendering // tree to figure out whether the selector has a style for it // QSvgStyleSelector should be coded in a way that could avoid it QSvgAnimation anim; anim.setNodeId(nodeIdStr); anim.setXmlClass(xmlClassStr); QXmlStreamAttributes xmlAttr = attributes; #ifndef QT_NO_CSSPARSER QCss::StyleSelector::NodePtr cssNode; cssNode.ptr = &anim; QList<QCss::Declaration> decls = handler->selector()->declarationsForNode(cssNode); for (int i = 0; i < decls.count(); ++i) { const QCss::Declaration &decl = decls.at(i); if (decl.d->property.isEmpty()) continue; if (decl.d->values.count() != 1) continue; QCss::Value val = decl.d->values.first(); QString valueStr = val.toString(); if (val.type == QCss::Value::Uri) { valueStr.prepend(QLatin1String("url(")); valueStr.append(QLatin1Char(')')); } xmlAttr.append(QString(), decl.d->property, valueStr); } #endif QSvgAttributes attrs(xmlAttr, handler); QSvgGradientStyle *style = static_cast<QSvgGradientStyle*>(parent); QStringView colorStr = attrs.stopColor; QColor color; bool ok = true; qreal offset = convertToNumber(attrs.offset, handler, &ok); if (!ok) offset = 0.0; QString black = QString::fromLatin1("#000000"); if (colorStr.isEmpty()) { colorStr = black; } constructColor(colorStr, attrs.stopOpacity, color, handler); QGradient *grad = style->qgradient(); offset = qMin(qreal(1), qMax(qreal(0), offset)); // Clamp to range [0, 1] QGradientStops stops; if (style->gradientStopsSet()) { stops = grad->stops(); // If the stop offset equals the one previously added, add an epsilon to make it greater. if (offset <= stops.back().first) offset = stops.back().first + FLT_EPSILON; } // If offset is greater than one, it must be clamped to one. if (offset > 1.0) { if ((stops.size() == 1) || (stops.at(stops.size() - 2).first < 1.0 - FLT_EPSILON)) { stops.back().first = 1.0 - FLT_EPSILON; grad->setStops(stops); } offset = 1.0; } grad->setColorAt(offset, color); style->setGradientStopsSet(true); return true; } static bool parseStyleNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { Q_UNUSED(parent); #ifdef QT_NO_CSSPARSER Q_UNUSED(attributes); Q_UNUSED(handler); #else const QStringView type = attributes.value(QLatin1String("type")); if (type.compare(QLatin1String("text/css"), Qt::CaseInsensitive) == 0 || type.isNull()) handler->setInStyle(true); #endif return true; } static QSvgNode *createSvgNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { Q_UNUSED(parent); Q_UNUSED(attributes); QSvgTinyDocument *node = new QSvgTinyDocument(); const QStringView widthStr = attributes.value(QLatin1String("width")); const QStringView heightStr = attributes.value(QLatin1String("height")); QString viewBoxStr = attributes.value(QLatin1String("viewBox")).toString(); QSvgHandler::LengthType type = QSvgHandler::LT_PX; // FIXME: is the default correct? qreal width = 0; if (!widthStr.isEmpty()) { width = parseLength(widthStr.toString(), type, handler); if (type != QSvgHandler::LT_PT) width = convertToPixels(width, true, type); node->setWidth(int(width), type == QSvgHandler::LT_PERCENT); } qreal height = 0; if (!heightStr.isEmpty()) { height = parseLength(heightStr.toString(), type, handler); if (type != QSvgHandler::LT_PT) height = convertToPixels(height, false, type); node->setHeight(int(height), type == QSvgHandler::LT_PERCENT); } QStringList viewBoxValues; if (!viewBoxStr.isEmpty()) { viewBoxStr = viewBoxStr.replace(QLatin1Char(' '), QLatin1Char(',')); viewBoxStr = viewBoxStr.replace(QLatin1Char('\r'), QLatin1Char(',')); viewBoxStr = viewBoxStr.replace(QLatin1Char('\n'), QLatin1Char(',')); viewBoxStr = viewBoxStr.replace(QLatin1Char('\t'), QLatin1Char(',')); viewBoxValues = viewBoxStr.split(QLatin1Char(','), Qt::SkipEmptyParts); } if (viewBoxValues.count() == 4) { QString xStr = viewBoxValues.at(0).trimmed(); QString yStr = viewBoxValues.at(1).trimmed(); QString widthStr = viewBoxValues.at(2).trimmed(); QString heightStr = viewBoxValues.at(3).trimmed(); QSvgHandler::LengthType lt; qreal x = parseLength(xStr, lt, handler); qreal y = parseLength(yStr, lt, handler); qreal w = parseLength(widthStr, lt, handler); qreal h = parseLength(heightStr, lt, handler); node->setViewBox(QRectF(x, y, w, h)); } else if (width && height) { if (type == QSvgHandler::LT_PT) { width = convertToPixels(width, false, type); height = convertToPixels(height, false, type); } node->setViewBox(QRectF(0, 0, width, height)); } handler->setDefaultCoordinateSystem(QSvgHandler::LT_PX); return node; } static QSvgNode *createSwitchNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(attributes); QSvgSwitch *node = new QSvgSwitch(parent); return node; } static bool parseTbreakNode(QSvgNode *parent, const QXmlStreamAttributes &, QSvgHandler *) { if (parent->type() != QSvgNode::TEXTAREA) return false; static_cast<QSvgText*>(parent)->addLineBreak(); return true; } static QSvgNode *createTextNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { const QStringView x = attributes.value(QLatin1String("x")); const QStringView y = attributes.value(QLatin1String("y")); //### editable and rotate not handled QSvgHandler::LengthType type; qreal nx = parseLength(x.toString(), type, handler); nx = convertToPixels(nx, true, type); qreal ny = parseLength(y.toString(), type, handler); ny = convertToPixels(ny, true, type); QSvgNode *text = new QSvgText(parent, QPointF(nx, ny)); return text; } static QSvgNode *createTextAreaNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { QSvgText *node = static_cast<QSvgText *>(createTextNode(parent, attributes, handler)); if (node) { QSvgHandler::LengthType type; qreal width = parseLength(attributes.value(QLatin1String("width")), type, handler); qreal height = parseLength(attributes.value(QLatin1String("height")), type, handler); node->setTextArea(QSizeF(width, height)); } return node; } static QSvgNode *createTspanNode(QSvgNode *parent, const QXmlStreamAttributes &, QSvgHandler *) { return new QSvgTspan(parent); } static bool parseTitleNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgNode *createUseNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { QString linkId = attributes.value(QLatin1String("xlink:href")).toString().remove(0, 1); const QStringView xStr = attributes.value(QLatin1String("x")); const QStringView yStr = attributes.value(QLatin1String("y")); QSvgStructureNode *group = nullptr; if (linkId.isEmpty()) linkId = attributes.value(QLatin1String("href")).toString().remove(0, 1); switch (parent->type()) { case QSvgNode::DOC: case QSvgNode::DEFS: case QSvgNode::G: case QSvgNode::SWITCH: group = static_cast<QSvgStructureNode*>(parent); break; default: break; } if (group) { QPointF pt; if (!xStr.isNull() || !yStr.isNull()) { QSvgHandler::LengthType type; qreal nx = parseLength(xStr.toString(), type, handler); nx = convertToPixels(nx, true, type); qreal ny = parseLength(yStr.toString(), type, handler); ny = convertToPixels(ny, true, type); pt = QPointF(nx, ny); } QSvgNode *link = group->scopeNode(linkId); if (link) { if (parent->isDescendantOf(link)) qCWarning(lcSvgHandler, "link #%s is recursive!", qPrintable(linkId)); return new QSvgUse(pt, parent, link); } //delay link resolving, link might have not been created yet return new QSvgUse(pt, parent, linkId); } qCWarning(lcSvgHandler, "<use> element %s in wrong context!", qPrintable(linkId)); return 0; } static QSvgNode *createVideoNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return 0; } typedef QSvgNode *(*FactoryMethod)(QSvgNode *, const QXmlStreamAttributes &, QSvgHandler *); static FactoryMethod findGroupFactory(const QString &name) { if (name.isEmpty()) return 0; QStringView ref = QStringView{name}.mid(1, name.length() - 1); switch (name.at(0).unicode()) { case 'd': if (ref == QLatin1String("efs")) return createDefsNode; break; case 'g': if (ref.isEmpty()) return createGNode; break; case 's': if (ref == QLatin1String("vg")) return createSvgNode; if (ref == QLatin1String("witch")) return createSwitchNode; break; default: break; } return 0; } static FactoryMethod findGraphicsFactory(const QString &name) { if (name.isEmpty()) return 0; QStringView ref = QStringView{name}.mid(1, name.length() - 1); switch (name.at(0).unicode()) { case 'a': if (ref == QLatin1String("nimation")) return createAnimationNode; break; case 'c': if (ref == QLatin1String("ircle")) return createCircleNode; break; case 'e': if (ref == QLatin1String("llipse")) return createEllipseNode; break; case 'i': if (ref == QLatin1String("mage")) return createImageNode; break; case 'l': if (ref == QLatin1String("ine")) return createLineNode; break; case 'p': if (ref == QLatin1String("ath")) return createPathNode; if (ref == QLatin1String("olygon")) return createPolygonNode; if (ref == QLatin1String("olyline")) return createPolylineNode; break; case 'r': if (ref == QLatin1String("ect")) return createRectNode; break; case 't': if (ref == QLatin1String("ext")) return createTextNode; if (ref == QLatin1String("extArea")) return createTextAreaNode; if (ref == QLatin1String("span")) return createTspanNode; break; case 'u': if (ref == QLatin1String("se")) return createUseNode; break; case 'v': if (ref == QLatin1String("ideo")) return createVideoNode; break; default: break; } return 0; } typedef bool (*ParseMethod)(QSvgNode *, const QXmlStreamAttributes &, QSvgHandler *); static ParseMethod findUtilFactory(const QString &name) { if (name.isEmpty()) return 0; QStringView ref = QStringView{name}.mid(1, name.length() - 1); switch (name.at(0).unicode()) { case 'a': if (ref.isEmpty()) return parseAnchorNode; if (ref == QLatin1String("nimate")) return parseAnimateNode; if (ref == QLatin1String("nimateColor")) return parseAnimateColorNode; if (ref == QLatin1String("nimateMotion")) return parseAimateMotionNode; if (ref == QLatin1String("nimateTransform")) return parseAnimateTransformNode; if (ref == QLatin1String("udio")) return parseAudioNode; break; case 'd': if (ref == QLatin1String("esc")) return parseDescNode; if (ref == QLatin1String("iscard")) return parseDiscardNode; break; case 'f': if (ref == QLatin1String("oreignObject")) return parseForeignObjectNode; break; case 'h': if (ref == QLatin1String("andler")) return parseHandlerNode; if (ref == QLatin1String("kern")) return parseHkernNode; break; case 'm': if (ref == QLatin1String("etadata")) return parseMetadataNode; if (ref == QLatin1String("path")) return parseMpathNode; break; case 'p': if (ref == QLatin1String("refetch")) return parsePrefetchNode; break; case 's': if (ref == QLatin1String("cript")) return parseScriptNode; if (ref == QLatin1String("et")) return parseSetNode; if (ref == QLatin1String("tyle")) return parseStyleNode; break; case 't': if (ref == QLatin1String("break")) return parseTbreakNode; if (ref == QLatin1String("itle")) return parseTitleNode; break; default: break; } return 0; } typedef QSvgStyleProperty *(*StyleFactoryMethod)(QSvgNode *, const QXmlStreamAttributes &, QSvgHandler *); static StyleFactoryMethod findStyleFactoryMethod(const QString &name) { if (name.isEmpty()) return 0; QStringView ref = QStringView{name}.mid(1, name.length() - 1); switch (name.at(0).unicode()) { case 'f': if (ref == QLatin1String("ont")) return createFontNode; break; case 'l': if (ref == QLatin1String("inearGradient")) return createLinearGradientNode; break; case 'r': if (ref == QLatin1String("adialGradient")) return createRadialGradientNode; break; case 's': if (ref == QLatin1String("olidColor")) return createSolidColorNode; break; default: break; } return 0; } typedef bool (*StyleParseMethod)(QSvgStyleProperty *, const QXmlStreamAttributes &, QSvgHandler *); static StyleParseMethod findStyleUtilFactoryMethod(const QString &name) { if (name.isEmpty()) return 0; QStringView ref = QStringView{name}.mid(1, name.length() - 1); switch (name.at(0).unicode()) { case 'f': if (ref == QLatin1String("ont-face")) return parseFontFaceNode; if (ref == QLatin1String("ont-face-name")) return parseFontFaceNameNode; if (ref == QLatin1String("ont-face-src")) return parseFontFaceSrcNode; if (ref == QLatin1String("ont-face-uri")) return parseFontFaceUriNode; break; case 'g': if (ref == QLatin1String("lyph")) return parseGlyphNode; break; case 'm': if (ref == QLatin1String("issing-glyph")) return parseMissingGlyphNode; break; case 's': if (ref == QLatin1String("top")) return parseStopNode; break; default: break; } return 0; } QSvgHandler::QSvgHandler(QIODevice *device) : xml(new QXmlStreamReader(device)) , m_ownsReader(true) { init(); } QSvgHandler::QSvgHandler(const QByteArray &data) : xml(new QXmlStreamReader(data)) , m_ownsReader(true) { init(); } QSvgHandler::QSvgHandler(QXmlStreamReader *const reader) : xml(reader) , m_ownsReader(false) { init(); } void QSvgHandler::init() { m_doc = 0; m_style = 0; m_animEnd = 0; m_defaultCoords = LT_PX; m_defaultPen = QPen(Qt::black, 1, Qt::SolidLine, Qt::FlatCap, Qt::SvgMiterJoin); m_defaultPen.setMiterLimit(4); parse(); } // Having too many unfinished elements will cause a stack overflow // in the dtor of QSvgTinyDocument, see oss-fuzz issue 24000. static const int unfinishedElementsLimit = 2048; void QSvgHandler::parse() { xml->setNamespaceProcessing(false); #ifndef QT_NO_CSSPARSER m_selector = new QSvgStyleSelector; m_inStyle = false; #endif bool done = false; int remainingUnfinishedElements = unfinishedElementsLimit; while (!xml->atEnd() && !done) { switch (xml->readNext()) { case QXmlStreamReader::StartElement: // he we could/should verify the namespaces, and simply // call m_skipNodes(Unknown) if we don't know the // namespace. We do support http://www.w3.org/2000/svg // but also http://www.w3.org/2000/svg-20000303-stylable // And if the document uses an external dtd, the reported // namespaceUri is empty. The only possible strategy at // this point is to do what everyone else seems to do and // ignore the reported namespaceUri completely. if (remainingUnfinishedElements && startElement(xml->name().toString(), xml->attributes())) { --remainingUnfinishedElements; } else { delete m_doc; m_doc = 0; return; } break; case QXmlStreamReader::EndElement: endElement(xml->name()); ++remainingUnfinishedElements; // if we are using somebody else's qxmlstreamreader // we should not read until the end of the stream done = !m_ownsReader && (xml->name() == QLatin1String("svg")); break; case QXmlStreamReader::Characters: characters(xml->text()); break; case QXmlStreamReader::ProcessingInstruction: processingInstruction(xml->processingInstructionTarget().toString(), xml->processingInstructionData().toString()); break; default: break; } } resolveGradients(m_doc); resolveNodes(); } bool QSvgHandler::startElement(const QString &localName, const QXmlStreamAttributes &attributes) { QSvgNode *node = nullptr; pushColorCopy(); /* The xml:space attribute may appear on any element. We do * a lookup by the qualified name here, but this is namespace aware, since * the XML namespace can only be bound to prefix "xml." */ const QStringView xmlSpace(attributes.value(QLatin1String("xml:space"))); if (xmlSpace.isNull()) { // This element has no xml:space attribute. m_whitespaceMode.push(m_whitespaceMode.isEmpty() ? QSvgText::Default : m_whitespaceMode.top()); } else if (xmlSpace == QLatin1String("preserve")) { m_whitespaceMode.push(QSvgText::Preserve); } else if (xmlSpace == QLatin1String("default")) { m_whitespaceMode.push(QSvgText::Default); } else { const QByteArray msg = '"' + xmlSpace.toString().toLocal8Bit() + "\" is an invalid value for attribute xml:space. " "Valid values are \"preserve\" and \"default\"."; qCWarning(lcSvgHandler, "%s", prefixMessage(msg, xml).constData()); m_whitespaceMode.push(QSvgText::Default); } if (!m_doc && localName != QLatin1String("svg")) return false; if (FactoryMethod method = findGroupFactory(localName)) { //group node = method(m_doc ? m_nodes.top() : 0, attributes, this); Q_ASSERT(node); if (!m_doc) { Q_ASSERT(node->type() == QSvgNode::DOC); m_doc = static_cast<QSvgTinyDocument*>(node); } else { switch (m_nodes.top()->type()) { case QSvgNode::DOC: case QSvgNode::G: case QSvgNode::DEFS: case QSvgNode::SWITCH: { QSvgStructureNode *group = static_cast<QSvgStructureNode*>(m_nodes.top()); group->addChild(node, someId(attributes)); } break; default: const QByteArray msg = QByteArrayLiteral("Could not add child element to parent element because the types are incorrect."); qCWarning(lcSvgHandler, "%s", prefixMessage(msg, xml).constData()); delete node; node = 0; break; } } if (node) { parseCoreNode(node, attributes); #ifndef QT_NO_CSSPARSER cssStyleLookup(node, this, m_selector); #endif parseStyle(node, attributes, this); } } else if (FactoryMethod method = findGraphicsFactory(localName)) { //rendering element Q_ASSERT(!m_nodes.isEmpty()); node = method(m_nodes.top(), attributes, this); if (node) { switch (m_nodes.top()->type()) { case QSvgNode::DOC: case QSvgNode::G: case QSvgNode::DEFS: case QSvgNode::SWITCH: { if (node->type() == QSvgNode::TSPAN) { const QByteArray msg = QByteArrayLiteral("\'tspan\' element in wrong context."); qCWarning(lcSvgHandler, "%s", prefixMessage(msg, xml).constData()); delete node; node = 0; break; } QSvgStructureNode *group = static_cast<QSvgStructureNode*>(m_nodes.top()); group->addChild(node, someId(attributes)); } break; case QSvgNode::TEXT: case QSvgNode::TEXTAREA: if (node->type() == QSvgNode::TSPAN) { static_cast<QSvgText *>(m_nodes.top())->addTspan(static_cast<QSvgTspan *>(node)); } else { const QByteArray msg = QByteArrayLiteral("\'text\' or \'textArea\' element contains invalid element type."); qCWarning(lcSvgHandler, "%s", prefixMessage(msg, xml).constData()); delete node; node = 0; } break; default: const QByteArray msg = QByteArrayLiteral("Could not add child element to parent element because the types are incorrect."); qCWarning(lcSvgHandler, "%s", prefixMessage(msg, xml).constData()); delete node; node = 0; break; } if (node) { parseCoreNode(node, attributes); #ifndef QT_NO_CSSPARSER cssStyleLookup(node, this, m_selector); #endif parseStyle(node, attributes, this); if (node->type() == QSvgNode::TEXT || node->type() == QSvgNode::TEXTAREA) { static_cast<QSvgText *>(node)->setWhitespaceMode(m_whitespaceMode.top()); } else if (node->type() == QSvgNode::TSPAN) { static_cast<QSvgTspan *>(node)->setWhitespaceMode(m_whitespaceMode.top()); } else if (node->type() == QSvgNode::USE) { if (!static_cast<QSvgUse *>(node)->isResolved()) m_resolveNodes.append(node); } } } } else if (ParseMethod method = findUtilFactory(localName)) { Q_ASSERT(!m_nodes.isEmpty()); if (!method(m_nodes.top(), attributes, this)) qCWarning(lcSvgHandler, "%s", msgProblemParsing(localName, xml).constData()); } else if (StyleFactoryMethod method = findStyleFactoryMethod(localName)) { QSvgStyleProperty *prop = method(m_nodes.top(), attributes, this); if (prop) { m_style = prop; m_nodes.top()->appendStyleProperty(prop, someId(attributes)); } else { const QByteArray msg = QByteArrayLiteral("Could not parse node: ") + localName.toLocal8Bit(); qCWarning(lcSvgHandler, "%s", prefixMessage(msg, xml).constData()); } } else if (StyleParseMethod method = findStyleUtilFactoryMethod(localName)) { if (m_style) { if (!method(m_style, attributes, this)) qCWarning(lcSvgHandler, "%s", msgProblemParsing(localName, xml).constData()); } } else { //qCWarning(lcSvgHandler) <<"Skipping unknown element!"<<namespaceURI<<"::"<<localName; m_skipNodes.push(Unknown); return true; } if (node) { m_nodes.push(node); m_skipNodes.push(Graphics); } else { //qDebug()<<"Skipping "<<localName; m_skipNodes.push(Style); } return true; } bool QSvgHandler::endElement(const QStringView localName) { CurrentNode node = m_skipNodes.top(); m_skipNodes.pop(); m_whitespaceMode.pop(); popColor(); if (node == Unknown) { return true; } #ifdef QT_NO_CSSPARSER Q_UNUSED(localName); #else if (m_inStyle && localName == QLatin1String("style")) m_inStyle = false; #endif if (node == Graphics) m_nodes.pop(); else if (m_style && !m_skipNodes.isEmpty() && m_skipNodes.top() != Style) m_style = 0; return true; } void QSvgHandler::resolveGradients(QSvgNode *node, int nestedDepth) { if (!node || (node->type() != QSvgNode::DOC && node->type() != QSvgNode::G && node->type() != QSvgNode::DEFS && node->type() != QSvgNode::SWITCH)) { return; } QSvgStructureNode *structureNode = static_cast<QSvgStructureNode *>(node); const QList<QSvgNode *> ren = structureNode->renderers(); for (auto it = ren.begin(); it != ren.end(); ++it) { QSvgFillStyle *fill = static_cast<QSvgFillStyle *>((*it)->styleProperty(QSvgStyleProperty::FILL)); if (fill && !fill->isGradientResolved()) { QString id = fill->gradientId(); QSvgFillStyleProperty *style = structureNode->styleProperty(id); if (style) { fill->setFillStyle(style); } else { qCWarning(lcSvgHandler, "%s", msgCouldNotResolveProperty(id, xml).constData()); fill->setBrush(Qt::NoBrush); } } QSvgStrokeStyle *stroke = static_cast<QSvgStrokeStyle *>((*it)->styleProperty(QSvgStyleProperty::STROKE)); if (stroke && !stroke->isGradientResolved()) { QString id = stroke->gradientId(); QSvgFillStyleProperty *style = structureNode->styleProperty(id); if (style) { stroke->setStyle(style); } else { qCWarning(lcSvgHandler, "%s", msgCouldNotResolveProperty(id, xml).constData()); stroke->setStroke(Qt::NoBrush); } } if (nestedDepth < 2048) resolveGradients(*it, nestedDepth + 1); } } void QSvgHandler::resolveNodes() { for (QSvgNode *node : qAsConst(m_resolveNodes)) { if (!node || !node->parent() || node->type() != QSvgNode::USE) continue; QSvgUse *useNode = static_cast<QSvgUse *>(node); if (useNode->isResolved()) continue; QSvgNode::Type t = useNode->parent()->type(); if (!(t == QSvgNode::DOC || t == QSvgNode::DEFS || t == QSvgNode::G || t == QSvgNode::SWITCH)) continue; QSvgStructureNode *group = static_cast<QSvgStructureNode *>(useNode->parent()); QSvgNode *link = group->scopeNode(useNode->linkId()); if (!link) { qCWarning(lcSvgHandler, "link #%s is undefined!", qPrintable(useNode->linkId())); continue; } if (useNode->parent()->isDescendantOf(link)) qCWarning(lcSvgHandler, "link #%s is recursive!", qPrintable(useNode->linkId())); useNode->setLink(link); } m_resolveNodes.clear(); } bool QSvgHandler::characters(const QStringView str) { #ifndef QT_NO_CSSPARSER if (m_inStyle) { QString css = str.toString(); QCss::StyleSheet sheet; QCss::Parser(css).parse(&sheet); m_selector->styleSheets.append(sheet); return true; } #endif if (m_skipNodes.isEmpty() || m_skipNodes.top() == Unknown || m_nodes.isEmpty()) return true; if (m_nodes.top()->type() == QSvgNode::TEXT || m_nodes.top()->type() == QSvgNode::TEXTAREA) { static_cast<QSvgText*>(m_nodes.top())->addText(str.toString()); } else if (m_nodes.top()->type() == QSvgNode::TSPAN) { static_cast<QSvgTspan*>(m_nodes.top())->addText(str.toString()); } return true; } QIODevice *QSvgHandler::device() const { return xml->device(); } QSvgTinyDocument * QSvgHandler::document() const { return m_doc; } QSvgHandler::LengthType QSvgHandler::defaultCoordinateSystem() const { return m_defaultCoords; } void QSvgHandler::setDefaultCoordinateSystem(LengthType type) { m_defaultCoords = type; } void QSvgHandler::pushColor(const QColor &color) { m_colorStack.push(color); m_colorTagCount.push(1); } void QSvgHandler::pushColorCopy() { if (m_colorTagCount.count()) ++m_colorTagCount.top(); else pushColor(Qt::black); } void QSvgHandler::popColor() { if (m_colorTagCount.count()) { if (!--m_colorTagCount.top()) { m_colorStack.pop(); m_colorTagCount.pop(); } } } QColor QSvgHandler::currentColor() const { if (!m_colorStack.isEmpty()) return m_colorStack.top(); else return QColor(0, 0, 0); } #ifndef QT_NO_CSSPARSER void QSvgHandler::setInStyle(bool b) { m_inStyle = b; } bool QSvgHandler::inStyle() const { return m_inStyle; } QSvgStyleSelector * QSvgHandler::selector() const { return m_selector; } #endif // QT_NO_CSSPARSER bool QSvgHandler::processingInstruction(const QString &target, const QString &data) { #ifdef QT_NO_CSSPARSER Q_UNUSED(target); Q_UNUSED(data); #else if (target == QLatin1String("xml-stylesheet")) { QRegularExpression rx(QLatin1String("type=\\\"(.+)\\\""), QRegularExpression::InvertedGreedinessOption); QRegularExpressionMatchIterator iter = rx.globalMatch(data); bool isCss = false; while (iter.hasNext()) { QRegularExpressionMatch match = iter.next(); QString type = match.captured(1); if (type.toLower() == QLatin1String("text/css")) { isCss = true; } } if (isCss) { QRegularExpression rx(QLatin1String("href=\\\"(.+)\\\""), QRegularExpression::InvertedGreedinessOption); QRegularExpressionMatch match = rx.match(data); QString addr = match.captured(1); QFileInfo fi(addr); //qDebug()<<"External CSS file "<<fi.absoluteFilePath()<<fi.exists(); if (fi.exists()) { QFile file(fi.absoluteFilePath()); if (!file.open(QIODevice::ReadOnly | QIODevice::Text)) { return true; } QByteArray cssData = file.readAll(); QString css = QString::fromUtf8(cssData); QCss::StyleSheet sheet; QCss::Parser(css).parse(&sheet); m_selector->styleSheets.append(sheet); } } } #endif return true; } void QSvgHandler::setAnimPeriod(int start, int end) { Q_UNUSED(start); m_animEnd = qMax(end, m_animEnd); } int QSvgHandler::animationDuration() const { return m_animEnd; } QSvgHandler::~QSvgHandler() { #ifndef QT_NO_CSSPARSER delete m_selector; m_selector = 0; #endif if(m_ownsReader) delete xml; } QT_END_NAMESPACE
null
/**************************************************************************** ** ** Copyright (C) 2021 The Qt Company Ltd. ** Contact: https://www.qt.io/licensing/ ** ** This file is part of the Qt SVG module of the Qt Toolkit. ** ** $QT_BEGIN_LICENSE:LGPL$ ** Commercial License Usage ** Licensees holding valid commercial Qt licenses may use this file in ** accordance with the commercial license agreement provided with the ** Software or, alternatively, in accordance with the terms contained in ** a written agreement between you and The Qt Company. For licensing terms ** and conditions see https://www.qt.io/terms-conditions. For further ** information use the contact form at https://www.qt.io/contact-us. ** ** GNU Lesser General Public License Usage ** Alternatively, this file may be used under the terms of the GNU Lesser ** General Public License version 3 as published by the Free Software ** Foundation and appearing in the file LICENSE.LGPL3 included in the ** packaging of this file. Please review the following information to ** ensure the GNU Lesser General Public License version 3 requirements ** will be met: https://www.gnu.org/licenses/lgpl-3.0.html. ** ** GNU General Public License Usage ** Alternatively, this file may be used under the terms of the GNU ** General Public License version 2.0 or (at your option) the GNU General ** Public license version 3 or any later version approved by the KDE Free ** Qt Foundation. The licenses are as published by the Free Software ** Foundation and appearing in the file LICENSE.GPL2 and LICENSE.GPL3 ** included in the packaging of this file. Please review the following ** information to ensure the GNU General Public License requirements will ** be met: https://www.gnu.org/licenses/gpl-2.0.html and ** https://www.gnu.org/licenses/gpl-3.0.html. ** ** $QT_END_LICENSE$ ** ****************************************************************************/ #include "qplatformdefs.h" #include "qsvghandler_p.h" #include "qsvgtinydocument_p.h" #include "qsvgstructure_p.h" #include "qsvggraphics_p.h" #include "qsvgnode_p.h" #include "qsvgfont_p.h" #include "qpen.h" #include "qpainterpath.h" #include "qbrush.h" #include "qcolor.h" #include "qtextformat.h" #include "qlist.h" #include "qfileinfo.h" #include "qfile.h" #include "qdir.h" #include "qdebug.h" #include "qmath.h" #include "qnumeric.h" #include <qregularexpression.h> #include "qtransform.h" #include "qvarlengtharray.h" #include "private/qmath_p.h" #include "float.h" #include <cmath> QT_BEGIN_NAMESPACE Q_LOGGING_CATEGORY(lcSvgHandler, "qt.svg") static const char *qt_inherit_text = "inherit"; #define QT_INHERIT QLatin1String(qt_inherit_text) static QByteArray prefixMessage(const QByteArray &msg, const QXmlStreamReader *r) { QByteArray result; if (r) { if (const QFile *file = qobject_cast<const QFile *>(r->device())) result.append(QFile::encodeName(QDir::toNativeSeparators(file->fileName()))); else result.append(QByteArrayLiteral("<input>")); result.append(':'); result.append(QByteArray::number(r->lineNumber())); if (const qint64 column = r->columnNumber()) { result.append(':'); result.append(QByteArray::number(column)); } result.append(QByteArrayLiteral(": ")); } result.append(msg); return result; } static inline QByteArray msgProblemParsing(const QString &localName, const QXmlStreamReader *r) { return prefixMessage(QByteArrayLiteral("Problem parsing ") + localName.toLocal8Bit(), r); } static inline QByteArray msgCouldNotResolveProperty(const QString &id, const QXmlStreamReader *r) { return prefixMessage(QByteArrayLiteral("Could not resolve property: ") + id.toLocal8Bit(), r); } // ======== duplicated from qcolor_p static inline int qsvg_h2i(char hex, bool *ok = nullptr) { if (hex >= '0' && hex <= '9') return hex - '0'; if (hex >= 'a' && hex <= 'f') return hex - 'a' + 10; if (hex >= 'A' && hex <= 'F') return hex - 'A' + 10; if (ok) *ok = false; return -1; } static inline int qsvg_hex2int(const char *s, bool *ok = nullptr) { return (qsvg_h2i(s[0], ok) * 16) | qsvg_h2i(s[1], ok); } static inline int qsvg_hex2int(char s, bool *ok = nullptr) { int h = qsvg_h2i(s, ok); return (h * 16) | h; } bool qsvg_get_hex_rgb(const char *name, QRgb *rgb) { if(name[0] != '#') return false; name++; const size_t len = qstrlen(name); int r, g, b; bool ok = true; if (len == 12) { r = qsvg_hex2int(name, &ok); g = qsvg_hex2int(name + 4, &ok); b = qsvg_hex2int(name + 8, &ok); } else if (len == 9) { r = qsvg_hex2int(name, &ok); g = qsvg_hex2int(name + 3, &ok); b = qsvg_hex2int(name + 6, &ok); } else if (len == 6) { r = qsvg_hex2int(name, &ok); g = qsvg_hex2int(name + 2, &ok); b = qsvg_hex2int(name + 4, &ok); } else if (len == 3) { r = qsvg_hex2int(name[0], &ok); g = qsvg_hex2int(name[1], &ok); b = qsvg_hex2int(name[2], &ok); } else { r = g = b = -1; } if ((uint)r > 255 || (uint)g > 255 || (uint)b > 255 || !ok) { *rgb = 0; return false; } *rgb = qRgb(r, g ,b); return true; } bool qsvg_get_hex_rgb(const QChar *str, int len, QRgb *rgb) { if (len > 13) return false; char tmp[16]; for(int i = 0; i < len; ++i) tmp[i] = str[i].toLatin1(); tmp[len] = 0; return qsvg_get_hex_rgb(tmp, rgb); } // ======== end of qcolor_p duplicate static bool parsePathDataFast(QStringView data, QPainterPath &path); static inline QString someId(const QXmlStreamAttributes &attributes) { QString id = attributes.value(QLatin1String("id")).toString(); if (id.isEmpty()) id = attributes.value(QLatin1String("xml:id")).toString(); return id; } struct QSvgAttributes { QSvgAttributes(const QXmlStreamAttributes &xmlAttributes, QSvgHandler *handler); QString id; QStringView color; QStringView colorOpacity; QStringView fill; QStringView fillRule; QStringView fillOpacity; QStringView stroke; QStringView strokeDashArray; QStringView strokeDashOffset; QStringView strokeLineCap; QStringView strokeLineJoin; QStringView strokeMiterLimit; QStringView strokeOpacity; QStringView strokeWidth; QStringView vectorEffect; QStringView fontFamily; QStringView fontSize; QStringView fontStyle; QStringView fontWeight; QStringView fontVariant; QStringView textAnchor; QStringView transform; QStringView visibility; QStringView opacity; QStringView compOp; QStringView display; QStringView offset; QStringView stopColor; QStringView stopOpacity; QStringView imageRendering; #ifndef QT_NO_CSSPARSER QList<QSvgCssAttribute> m_cssAttributes; #endif }; QSvgAttributes::QSvgAttributes(const QXmlStreamAttributes &xmlAttributes, QSvgHandler *handler) { #ifndef QT_NO_CSSPARSER QStringView style = xmlAttributes.value(QLatin1String("style")); if (!style.isEmpty()) { handler->parseCSStoXMLAttrs(style.toString(), &m_cssAttributes); for (int j = 0; j < m_cssAttributes.count(); ++j) { const QSvgCssAttribute &attribute = m_cssAttributes.at(j); QStringView name = attribute.name; QStringView value = attribute.value; if (name.isEmpty()) continue; switch (name.at(0).unicode()) { case 'c': if (name == QLatin1String("color")) color = value; else if (name == QLatin1String("color-opacity")) colorOpacity = value; else if (name == QLatin1String("comp-op")) compOp = value; break; case 'd': if (name == QLatin1String("display")) display = value; break; case 'f': if (name == QLatin1String("fill")) fill = value; else if (name == QLatin1String("fill-rule")) fillRule = value; else if (name == QLatin1String("fill-opacity")) fillOpacity = value; else if (name == QLatin1String("font-family")) fontFamily = value; else if (name == QLatin1String("font-size")) fontSize = value; else if (name == QLatin1String("font-style")) fontStyle = value; else if (name == QLatin1String("font-weight")) fontWeight = value; else if (name == QLatin1String("font-variant")) fontVariant = value; break; case 'i': if (name == QLatin1String("image-rendering")) imageRendering = value; break; case 'o': if (name == QLatin1String("opacity")) opacity = value; else if (name == QLatin1String("offset")) offset = value; break; case 's': if (name.length() > 5 && name.mid(1, 5) == QLatin1String("troke")) { QStringView strokeRef = name.mid(6, name.length() - 6); if (strokeRef.isEmpty()) stroke = value; else if (strokeRef == QLatin1String("-dasharray")) strokeDashArray = value; else if (strokeRef == QLatin1String("-dashoffset")) strokeDashOffset = value; else if (strokeRef == QLatin1String("-linecap")) strokeLineCap = value; else if (strokeRef == QLatin1String("-linejoin")) strokeLineJoin = value; else if (strokeRef == QLatin1String("-miterlimit")) strokeMiterLimit = value; else if (strokeRef == QLatin1String("-opacity")) strokeOpacity = value; else if (strokeRef == QLatin1String("-width")) strokeWidth = value; } else if (name == QLatin1String("stop-color")) stopColor = value; else if (name == QLatin1String("stop-opacity")) stopOpacity = value; break; case 't': if (name == QLatin1String("text-anchor")) textAnchor = value; else if (name == QLatin1String("transform")) transform = value; break; case 'v': if (name == QLatin1String("vector-effect")) vectorEffect = value; else if (name == QLatin1String("visibility")) visibility = value; break; default: break; } } } #else Q_UNUSED(handler); #endif // QT_NO_CSSPARSER for (int i = 0; i < xmlAttributes.count(); ++i) { const QXmlStreamAttribute &attribute = xmlAttributes.at(i); QStringView name = attribute.qualifiedName(); if (name.isEmpty()) continue; QStringView value = attribute.value(); switch (name.at(0).unicode()) { case 'c': if (name == QLatin1String("color")) color = value; else if (name == QLatin1String("color-opacity")) colorOpacity = value; else if (name == QLatin1String("comp-op")) compOp = value; break; case 'd': if (name == QLatin1String("display")) display = value; break; case 'f': if (name == QLatin1String("fill")) fill = value; else if (name == QLatin1String("fill-rule")) fillRule = value; else if (name == QLatin1String("fill-opacity")) fillOpacity = value; else if (name == QLatin1String("font-family")) fontFamily = value; else if (name == QLatin1String("font-size")) fontSize = value; else if (name == QLatin1String("font-style")) fontStyle = value; else if (name == QLatin1String("font-weight")) fontWeight = value; else if (name == QLatin1String("font-variant")) fontVariant = value; break; case 'i': if (name == QLatin1String("id")) id = value.toString(); else if (name == QLatin1String("image-rendering")) imageRendering = value; break; case 'o': if (name == QLatin1String("opacity")) opacity = value; if (name == QLatin1String("offset")) offset = value; break; case 's': if (name.length() > 5 && name.mid(1, 5) == QLatin1String("troke")) { QStringView strokeRef = name.mid(6, name.length() - 6); if (strokeRef.isEmpty()) stroke = value; else if (strokeRef == QLatin1String("-dasharray")) strokeDashArray = value; else if (strokeRef == QLatin1String("-dashoffset")) strokeDashOffset = value; else if (strokeRef == QLatin1String("-linecap")) strokeLineCap = value; else if (strokeRef == QLatin1String("-linejoin")) strokeLineJoin = value; else if (strokeRef == QLatin1String("-miterlimit")) strokeMiterLimit = value; else if (strokeRef == QLatin1String("-opacity")) strokeOpacity = value; else if (strokeRef == QLatin1String("-width")) strokeWidth = value; } else if (name == QLatin1String("stop-color")) stopColor = value; else if (name == QLatin1String("stop-opacity")) stopOpacity = value; break; case 't': if (name == QLatin1String("text-anchor")) textAnchor = value; else if (name == QLatin1String("transform")) transform = value; break; case 'v': if (name == QLatin1String("vector-effect")) vectorEffect = value; else if (name == QLatin1String("visibility")) visibility = value; break; case 'x': if (name == QLatin1String("xml:id") && id.isEmpty()) id = value.toString(); break; default: break; } } } #ifndef QT_NO_CSSPARSER static const char * QSvgStyleSelector_nodeString[] = { "svg", "g", "defs", "switch", "animation", "arc", "circle", "ellipse", "image", "line", "path", "polygon", "polyline", "rect", "text", "textarea", "tspan", "use", "video" }; class QSvgStyleSelector : public QCss::StyleSelector { public: QSvgStyleSelector() { nameCaseSensitivity = Qt::CaseInsensitive; } virtual ~QSvgStyleSelector() { } inline QString nodeToName(QSvgNode *node) const { return QLatin1String(QSvgStyleSelector_nodeString[node->type()]); } inline QSvgNode *svgNode(NodePtr node) const { return (QSvgNode*)node.ptr; } inline QSvgStructureNode *nodeToStructure(QSvgNode *n) const { if (n && (n->type() == QSvgNode::DOC || n->type() == QSvgNode::G || n->type() == QSvgNode::DEFS || n->type() == QSvgNode::SWITCH)) { return (QSvgStructureNode*)n; } return 0; } inline QSvgStructureNode *svgStructure(NodePtr node) const { QSvgNode *n = svgNode(node); QSvgStructureNode *st = nodeToStructure(n); return st; } bool nodeNameEquals(NodePtr node, const QString& nodeName) const override { QSvgNode *n = svgNode(node); if (!n) return false; QString name = nodeToName(n); return QString::compare(name, nodeName, Qt::CaseInsensitive) == 0; } QString attribute(NodePtr node, const QString &name) const override { QSvgNode *n = svgNode(node); if ((!n->nodeId().isEmpty() && (name == QLatin1String("id") || name == QLatin1String("xml:id")))) return n->nodeId(); if (!n->xmlClass().isEmpty() && name == QLatin1String("class")) return n->xmlClass(); return QString(); } bool hasAttributes(NodePtr node) const override { QSvgNode *n = svgNode(node); return (n && (!n->nodeId().isEmpty() || !n->xmlClass().isEmpty())); } QStringList nodeIds(NodePtr node) const override { QSvgNode *n = svgNode(node); QString nid; if (n) nid = n->nodeId(); QStringList lst; lst.append(nid); return lst; } QStringList nodeNames(NodePtr node) const override { QSvgNode *n = svgNode(node); if (n) return QStringList(nodeToName(n)); return QStringList(); } bool isNullNode(NodePtr node) const override { return !node.ptr; } NodePtr parentNode(NodePtr node) const override { QSvgNode *n = svgNode(node); NodePtr newNode; newNode.ptr = 0; newNode.id = 0; if (n) { QSvgNode *svgParent = n->parent(); if (svgParent) { newNode.ptr = svgParent; } } return newNode; } NodePtr previousSiblingNode(NodePtr node) const override { NodePtr newNode; newNode.ptr = 0; newNode.id = 0; QSvgNode *n = svgNode(node); if (!n) return newNode; QSvgStructureNode *svgParent = nodeToStructure(n->parent()); if (svgParent) { newNode.ptr = svgParent->previousSiblingNode(n); } return newNode; } NodePtr duplicateNode(NodePtr node) const override { NodePtr n; n.ptr = node.ptr; n.id = node.id; return n; } void freeNode(NodePtr node) const override { Q_UNUSED(node); } }; #endif // QT_NO_CSSPARSER // '0' is 0x30 and '9' is 0x39 static inline bool isDigit(ushort ch) { static quint16 magic = 0x3ff; return ((ch >> 4) == 3) && (magic >> (ch & 15)); } static qreal toDouble(const QChar *&str) { const int maxLen = 255;//technically doubles can go til 308+ but whatever char temp[maxLen+1]; int pos = 0; if (*str == QLatin1Char('-')) { temp[pos++] = '-'; ++str; } else if (*str == QLatin1Char('+')) { ++str; } while (isDigit(str->unicode()) && pos < maxLen) { temp[pos++] = str->toLatin1(); ++str; } if (*str == QLatin1Char('.') && pos < maxLen) { temp[pos++] = '.'; ++str; } while (isDigit(str->unicode()) && pos < maxLen) { temp[pos++] = str->toLatin1(); ++str; } bool exponent = false; if ((*str == QLatin1Char('e') || *str == QLatin1Char('E')) && pos < maxLen) { exponent = true; temp[pos++] = 'e'; ++str; if ((*str == QLatin1Char('-') || *str == QLatin1Char('+')) && pos < maxLen) { temp[pos++] = str->toLatin1(); ++str; } while (isDigit(str->unicode()) && pos < maxLen) { temp[pos++] = str->toLatin1(); ++str; } } temp[pos] = '\0'; qreal val; if (!exponent && pos < 10) { int ival = 0; const char *t = temp; bool neg = false; if(*t == '-') { neg = true; ++t; } while(*t && *t != '.') { ival *= 10; ival += (*t) - '0'; ++t; } if(*t == '.') { ++t; int div = 1; while(*t) { ival *= 10; ival += (*t) - '0'; div *= 10; ++t; } val = ((qreal)ival)/((qreal)div); } else { val = ival; } if (neg) val = -val; } else { val = QByteArray::fromRawData(temp, pos).toDouble(); // Do not tolerate values too wild to be represented normally by floats if (qFpClassify(float(val)) != FP_NORMAL) val = 0; } return val; } static qreal toDouble(QStringView str, bool *ok = NULL) { const QChar *c = str.constData(); qreal res = (c == nullptr ? qreal{} : toDouble(c)); if (ok) *ok = (c == (str.constData() + str.length())); return res; } static QList<qreal> parseNumbersList(const QChar *&str) { QList<qreal> points; if (!str) return points; points.reserve(32); while (str->isSpace()) ++str; while (isDigit(str->unicode()) || *str == QLatin1Char('-') || *str == QLatin1Char('+') || *str == QLatin1Char('.')) { points.append(toDouble(str)); while (str->isSpace()) ++str; if (*str == QLatin1Char(',')) ++str; //eat the rest of space while (str->isSpace()) ++str; } return points; } static inline void parseNumbersArray(const QChar *&str, QVarLengthArray<qreal, 8> &points, const char *pattern = nullptr) { const size_t patternLen = qstrlen(pattern); while (str->isSpace()) ++str; while (isDigit(str->unicode()) || *str == QLatin1Char('-') || *str == QLatin1Char('+') || *str == QLatin1Char('.')) { if (patternLen && pattern[points.size() % patternLen] == 'f') { // flag expected, may only be 0 or 1 if (*str != QLatin1Char('0') && *str != QLatin1Char('1')) return; points.append(*str == QLatin1Char('0') ? 0.0 : 1.0); ++str; } else { points.append(toDouble(str)); } while (str->isSpace()) ++str; if (*str == QLatin1Char(',')) ++str; //eat the rest of space while (str->isSpace()) ++str; } } static QList<qreal> parsePercentageList(const QChar *&str) { QList<qreal> points; if (!str) return points; while (str->isSpace()) ++str; while ((*str >= QLatin1Char('0') && *str <= QLatin1Char('9')) || *str == QLatin1Char('-') || *str == QLatin1Char('+') || *str == QLatin1Char('.')) { points.append(toDouble(str)); while (str->isSpace()) ++str; if (*str == QLatin1Char('%')) ++str; while (str->isSpace()) ++str; if (*str == QLatin1Char(',')) ++str; //eat the rest of space while (str->isSpace()) ++str; } return points; } static QString idFromUrl(const QString &url) { // The form is url(<IRI>), where IRI can be // just an ID on #<id> form. QString::const_iterator itr = url.constBegin(); QString::const_iterator end = url.constEnd(); QString id; while (itr != end && (*itr).isSpace()) ++itr; if (itr != end && (*itr) == QLatin1Char('(')) ++itr; else return QString(); while (itr != end && (*itr).isSpace()) ++itr; if (itr != end && (*itr) == QLatin1Char('#')) { id += *itr; ++itr; } else { return QString(); } while (itr != end && (*itr) != QLatin1Char(')')) { id += *itr; ++itr; } if (itr == end || (*itr) != QLatin1Char(')')) return QString(); return id; } /** * returns true when successfully set the color. false signifies * that the color should be inherited */ static bool resolveColor(QStringView colorStr, QColor &color, QSvgHandler *handler) { QStringView colorStrTr = colorStr.trimmed(); if (colorStrTr.isEmpty()) return false; switch(colorStrTr.at(0).unicode()) { case '#': { // #rrggbb is very very common, so let's tackle it here // rather than falling back to QColor QRgb rgb; bool ok = qsvg_get_hex_rgb(colorStrTr.constData(), colorStrTr.length(), &rgb); if (ok) color.setRgb(rgb); return ok; } break; case 'r': { // starts with "rgb(", ends with ")" and consists of at least 7 characters "rgb(,,)" if (colorStrTr.length() >= 7 && colorStrTr.at(colorStrTr.length() - 1) == QLatin1Char(')') && colorStrTr.mid(0, 4) == QLatin1String("rgb(")) { const QChar *s = colorStrTr.constData() + 4; QList<qreal> compo = parseNumbersList(s); //1 means that it failed after reaching non-parsable //character which is going to be "%" if (compo.size() == 1) { s = colorStrTr.constData() + 4; compo = parsePercentageList(s); for (int i = 0; i < compo.size(); ++i) compo[i] *= (qreal)2.55; } if (compo.size() == 3) { color = QColor(int(compo[0]), int(compo[1]), int(compo[2])); return true; } return false; } } break; case 'c': if (colorStrTr == QLatin1String("currentColor")) { color = handler->currentColor(); return true; } break; case 'i': if (colorStrTr == QT_INHERIT) return false; break; default: break; } color = QColor(colorStrTr.toString()); return color.isValid(); } static bool constructColor(QStringView colorStr, QStringView opacity, QColor &color, QSvgHandler *handler) { if (!resolveColor(colorStr, color, handler)) return false; if (!opacity.isEmpty()) { bool ok = true; qreal op = qMin(qreal(1.0), qMax(qreal(0.0), toDouble(opacity, &ok))); if (!ok) op = 1.0; color.setAlphaF(op); } return true; } static qreal parseLength(QStringView str, QSvgHandler::LengthType &type, QSvgHandler *handler, bool *ok = NULL) { QStringView numStr = str.trimmed(); if (numStr.endsWith(QLatin1Char('%'))) { numStr.chop(1); type = QSvgHandler::LT_PERCENT; } else if (numStr.endsWith(QLatin1String("px"))) { numStr.chop(2); type = QSvgHandler::LT_PX; } else if (numStr.endsWith(QLatin1String("pc"))) { numStr.chop(2); type = QSvgHandler::LT_PC; } else if (numStr.endsWith(QLatin1String("pt"))) { numStr.chop(2); type = QSvgHandler::LT_PT; } else if (numStr.endsWith(QLatin1String("mm"))) { numStr.chop(2); type = QSvgHandler::LT_MM; } else if (numStr.endsWith(QLatin1String("cm"))) { numStr.chop(2); type = QSvgHandler::LT_CM; } else if (numStr.endsWith(QLatin1String("in"))) { numStr.chop(2); type = QSvgHandler::LT_IN; } else { type = handler->defaultCoordinateSystem(); //type = QSvgHandler::LT_OTHER; } qreal len = toDouble(numStr, ok); //qDebug()<<"len is "<<len<<", from '"<<numStr << "'"; return len; } static inline qreal convertToNumber(QStringView str, QSvgHandler *handler, bool *ok = NULL) { QSvgHandler::LengthType type; qreal num = parseLength(str.toString(), type, handler, ok); if (type == QSvgHandler::LT_PERCENT) { num = num/100.0; } return num; } static bool createSvgGlyph(QSvgFont *font, const QXmlStreamAttributes &attributes) { QStringView uncStr = attributes.value(QLatin1String("unicode")); QStringView havStr = attributes.value(QLatin1String("horiz-adv-x")); QStringView pathStr = attributes.value(QLatin1String("d")); QChar unicode = (uncStr.isEmpty()) ? u'\0' : uncStr.at(0); qreal havx = (havStr.isEmpty()) ? -1 : toDouble(havStr); QPainterPath path; path.setFillRule(Qt::WindingFill); parsePathDataFast(pathStr, path); font->addGlyph(unicode, path, havx); return true; } // this should really be called convertToDefaultCoordinateSystem // and convert when type != QSvgHandler::defaultCoordinateSystem static qreal convertToPixels(qreal len, bool , QSvgHandler::LengthType type) { switch (type) { case QSvgHandler::LT_PERCENT: break; case QSvgHandler::LT_PX: break; case QSvgHandler::LT_PC: break; case QSvgHandler::LT_PT: return len * 1.25; break; case QSvgHandler::LT_MM: return len * 3.543307; break; case QSvgHandler::LT_CM: return len * 35.43307; break; case QSvgHandler::LT_IN: return len * 90; break; case QSvgHandler::LT_OTHER: break; default: break; } return len; } static void parseColor(QSvgNode *, const QSvgAttributes &attributes, QSvgHandler *handler) { QColor color; if (constructColor(attributes.color, attributes.colorOpacity, color, handler)) { handler->popColor(); handler->pushColor(color); } } static QSvgStyleProperty *styleFromUrl(QSvgNode *node, const QString &url) { return node ? node->styleProperty(idFromUrl(url)) : 0; } static void parseBrush(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *handler) { if (!attributes.fill.isEmpty() || !attributes.fillRule.isEmpty() || !attributes.fillOpacity.isEmpty()) { QSvgFillStyle *prop = new QSvgFillStyle; //fill-rule attribute handling if (!attributes.fillRule.isEmpty() && attributes.fillRule != QT_INHERIT) { if (attributes.fillRule == QLatin1String("evenodd")) prop->setFillRule(Qt::OddEvenFill); else if (attributes.fillRule == QLatin1String("nonzero")) prop->setFillRule(Qt::WindingFill); } //fill-opacity attribute handling if (!attributes.fillOpacity.isEmpty() && attributes.fillOpacity != QT_INHERIT) { prop->setFillOpacity(qMin(qreal(1.0), qMax(qreal(0.0), toDouble(attributes.fillOpacity)))); } //fill attribute handling if ((!attributes.fill.isEmpty()) && (attributes.fill != QT_INHERIT) ) { if (attributes.fill.length() > 3 && attributes.fill.mid(0, 3) == QLatin1String("url")) { QString value = attributes.fill.mid(3, attributes.fill.length() - 3).toString(); QSvgStyleProperty *style = styleFromUrl(node, value); if (style) { if (style->type() == QSvgStyleProperty::SOLID_COLOR || style->type() == QSvgStyleProperty::GRADIENT) prop->setFillStyle(reinterpret_cast<QSvgFillStyleProperty *>(style)); } else { QString id = idFromUrl(value); prop->setGradientId(id); prop->setGradientResolved(false); } } else if (attributes.fill != QLatin1String("none")) { QColor color; if (resolveColor(attributes.fill, color, handler)) prop->setBrush(QBrush(color)); } else { prop->setBrush(QBrush(Qt::NoBrush)); } } node->appendStyleProperty(prop, attributes.id); } } static QTransform parseTransformationMatrix(QStringView value) { if (value.isEmpty()) return QTransform(); QTransform matrix; const QChar *str = value.constData(); const QChar *end = str + value.length(); while (str < end) { if (str->isSpace() || *str == QLatin1Char(',')) { ++str; continue; } enum State { Matrix, Translate, Rotate, Scale, SkewX, SkewY }; State state = Matrix; if (*str == QLatin1Char('m')) { //matrix const char *ident = "atrix"; for (int i = 0; i < 5; ++i) if (*(++str) != QLatin1Char(ident[i])) goto error; ++str; state = Matrix; } else if (*str == QLatin1Char('t')) { //translate const char *ident = "ranslate"; for (int i = 0; i < 8; ++i) if (*(++str) != QLatin1Char(ident[i])) goto error; ++str; state = Translate; } else if (*str == QLatin1Char('r')) { //rotate const char *ident = "otate"; for (int i = 0; i < 5; ++i) if (*(++str) != QLatin1Char(ident[i])) goto error; ++str; state = Rotate; } else if (*str == QLatin1Char('s')) { //scale, skewX, skewY ++str; if (*str == QLatin1Char('c')) { const char *ident = "ale"; for (int i = 0; i < 3; ++i) if (*(++str) != QLatin1Char(ident[i])) goto error; ++str; state = Scale; } else if (*str == QLatin1Char('k')) { if (*(++str) != QLatin1Char('e')) goto error; if (*(++str) != QLatin1Char('w')) goto error; ++str; if (*str == QLatin1Char('X')) state = SkewX; else if (*str == QLatin1Char('Y')) state = SkewY; else goto error; ++str; } else { goto error; } } else { goto error; } while (str < end && str->isSpace()) ++str; if (*str != QLatin1Char('(')) goto error; ++str; QVarLengthArray<qreal, 8> points; parseNumbersArray(str, points); if (*str != QLatin1Char(')')) goto error; ++str; if(state == Matrix) { if(points.count() != 6) goto error; matrix = QTransform(points[0], points[1], points[2], points[3], points[4], points[5]) * matrix; } else if (state == Translate) { if (points.count() == 1) matrix.translate(points[0], 0); else if (points.count() == 2) matrix.translate(points[0], points[1]); else goto error; } else if (state == Rotate) { if(points.count() == 1) { matrix.rotate(points[0]); } else if (points.count() == 3) { matrix.translate(points[1], points[2]); matrix.rotate(points[0]); matrix.translate(-points[1], -points[2]); } else { goto error; } } else if (state == Scale) { if (points.count() < 1 || points.count() > 2) goto error; qreal sx = points[0]; qreal sy = sx; if(points.count() == 2) sy = points[1]; matrix.scale(sx, sy); } else if (state == SkewX) { if (points.count() != 1) goto error; matrix.shear(qTan(qDegreesToRadians(points[0])), 0); } else if (state == SkewY) { if (points.count() != 1) goto error; matrix.shear(0, qTan(qDegreesToRadians(points[0]))); } } error: return matrix; } static void parsePen(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *handler) { //qDebug()<<"Node "<<node->type()<<", attrs are "<<value<<width; if (!attributes.stroke.isEmpty() || !attributes.strokeDashArray.isEmpty() || !attributes.strokeDashOffset.isEmpty() || !attributes.strokeLineCap.isEmpty() || !attributes.strokeLineJoin.isEmpty() || !attributes.strokeMiterLimit.isEmpty() || !attributes.strokeOpacity.isEmpty() || !attributes.strokeWidth.isEmpty() || !attributes.vectorEffect.isEmpty()) { QSvgStrokeStyle *prop = new QSvgStrokeStyle; //stroke attribute handling if ((!attributes.stroke.isEmpty()) && (attributes.stroke != QT_INHERIT) ) { if (attributes.stroke.length() > 3 && attributes.stroke.mid(0, 3) == QLatin1String("url")) { QString value = attributes.stroke.mid(3, attributes.stroke.length() - 3).toString(); QSvgStyleProperty *style = styleFromUrl(node, value); if (style) { if (style->type() == QSvgStyleProperty::SOLID_COLOR || style->type() == QSvgStyleProperty::GRADIENT) prop->setStyle(reinterpret_cast<QSvgFillStyleProperty *>(style)); } else { QString id = idFromUrl(value); prop->setGradientId(id); prop->setGradientResolved(false); } } else if (attributes.stroke != QLatin1String("none")) { QColor color; if (resolveColor(attributes.stroke, color, handler)) prop->setStroke(QBrush(color)); } else { prop->setStroke(QBrush(Qt::NoBrush)); } } //stroke-width handling if (!attributes.strokeWidth.isEmpty() && attributes.strokeWidth != QT_INHERIT) { QSvgHandler::LengthType lt; prop->setWidth(parseLength(attributes.strokeWidth, lt, handler)); } //stroke-dasharray if (!attributes.strokeDashArray.isEmpty() && attributes.strokeDashArray != QT_INHERIT) { if (attributes.strokeDashArray == QLatin1String("none")) { prop->setDashArrayNone(); } else { QString dashArray = attributes.strokeDashArray.toString(); const QChar *s = dashArray.constData(); QList<qreal> dashes = parseNumbersList(s); // if the dash count is odd the dashes should be duplicated if ((dashes.size() & 1) != 0) dashes << QList<qreal>(dashes); prop->setDashArray(dashes); } } //stroke-linejoin attribute handling if (!attributes.strokeLineJoin.isEmpty()) { if (attributes.strokeLineJoin == QLatin1String("miter")) prop->setLineJoin(Qt::SvgMiterJoin); else if (attributes.strokeLineJoin == QLatin1String("round")) prop->setLineJoin(Qt::RoundJoin); else if (attributes.strokeLineJoin == QLatin1String("bevel")) prop->setLineJoin(Qt::BevelJoin); } //stroke-linecap attribute handling if (!attributes.strokeLineCap.isEmpty()) { if (attributes.strokeLineCap == QLatin1String("butt")) prop->setLineCap(Qt::FlatCap); else if (attributes.strokeLineCap == QLatin1String("round")) prop->setLineCap(Qt::RoundCap); else if (attributes.strokeLineCap == QLatin1String("square")) prop->setLineCap(Qt::SquareCap); } //stroke-dashoffset attribute handling if (!attributes.strokeDashOffset.isEmpty() && attributes.strokeDashOffset != QT_INHERIT) prop->setDashOffset(toDouble(attributes.strokeDashOffset)); //vector-effect attribute handling if (!attributes.vectorEffect.isEmpty()) { if (attributes.vectorEffect == QLatin1String("non-scaling-stroke")) prop->setVectorEffect(true); else if (attributes.vectorEffect == QLatin1String("none")) prop->setVectorEffect(false); } //stroke-miterlimit if (!attributes.strokeMiterLimit.isEmpty() && attributes.strokeMiterLimit != QT_INHERIT) prop->setMiterLimit(toDouble(attributes.strokeMiterLimit)); //stroke-opacity atttribute handling if (!attributes.strokeOpacity.isEmpty() && attributes.strokeOpacity != QT_INHERIT) prop->setOpacity(qMin(qreal(1.0), qMax(qreal(0.0), toDouble(attributes.strokeOpacity)))); node->appendStyleProperty(prop, attributes.id); } } enum FontSizeSpec { XXSmall, XSmall, Small, Medium, Large, XLarge, XXLarge, FontSizeNone, FontSizeValue }; static const qreal sizeTable[] = { qreal(6.9), qreal(8.3), qreal(10.0), qreal(12.0), qreal(14.4), qreal(17.3), qreal(20.7) }; Q_STATIC_ASSERT(sizeof(sizeTable)/sizeof(sizeTable[0]) == FontSizeNone); static FontSizeSpec fontSizeSpec(QStringView spec) { switch (spec.at(0).unicode()) { case 'x': if (spec == QLatin1String("xx-small")) return XXSmall; if (spec == QLatin1String("x-small")) return XSmall; if (spec == QLatin1String("x-large")) return XLarge; if (spec == QLatin1String("xx-large")) return XXLarge; break; case 's': if (spec == QLatin1String("small")) return Small; break; case 'm': if (spec == QLatin1String("medium")) return Medium; break; case 'l': if (spec == QLatin1String("large")) return Large; break; case 'n': if (spec == QLatin1String("none")) return FontSizeNone; break; default: break; } return FontSizeValue; } static void parseFont(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *handler) { if (attributes.fontFamily.isEmpty() && attributes.fontSize.isEmpty() && attributes.fontStyle.isEmpty() && attributes.fontWeight.isEmpty() && attributes.fontVariant.isEmpty() && attributes.textAnchor.isEmpty()) return; QSvgTinyDocument *doc = node->document(); QSvgFontStyle *fontStyle = nullptr; if (!attributes.fontFamily.isEmpty()) { QSvgFont *svgFont = doc->svgFont(attributes.fontFamily.toString()); if (svgFont) fontStyle = new QSvgFontStyle(svgFont, doc); } if (!fontStyle) fontStyle = new QSvgFontStyle; if (!attributes.fontFamily.isEmpty() && attributes.fontFamily != QT_INHERIT) { QString family = attributes.fontFamily.toString().trimmed(); if (family.at(0) == QLatin1Char('\'') || family.at(0) == QLatin1Char('\"')) family = family.mid(1, family.length() - 2); fontStyle->setFamily(family); } if (!attributes.fontSize.isEmpty() && attributes.fontSize != QT_INHERIT) { // TODO: Support relative sizes 'larger' and 'smaller'. const FontSizeSpec spec = fontSizeSpec(attributes.fontSize); switch (spec) { case FontSizeNone: break; case FontSizeValue: { QSvgHandler::LengthType type; qreal fs = parseLength(attributes.fontSize, type, handler); fs = convertToPixels(fs, true, type); fontStyle->setSize(qMin(fs, qreal(0xffff))); } break; default: fontStyle->setSize(sizeTable[spec]); break; } } if (!attributes.fontStyle.isEmpty() && attributes.fontStyle != QT_INHERIT) { if (attributes.fontStyle == QLatin1String("normal")) { fontStyle->setStyle(QFont::StyleNormal); } else if (attributes.fontStyle == QLatin1String("italic")) { fontStyle->setStyle(QFont::StyleItalic); } else if (attributes.fontStyle == QLatin1String("oblique")) { fontStyle->setStyle(QFont::StyleOblique); } } if (!attributes.fontWeight.isEmpty() && attributes.fontWeight != QT_INHERIT) { bool ok = false; const int weightNum = attributes.fontWeight.toInt(&ok); if (ok) { fontStyle->setWeight(weightNum); } else { if (attributes.fontWeight == QLatin1String("normal")) { fontStyle->setWeight(QFont::Normal); } else if (attributes.fontWeight == QLatin1String("bold")) { fontStyle->setWeight(QFont::Bold); } else if (attributes.fontWeight == QLatin1String("bolder")) { fontStyle->setWeight(QSvgFontStyle::BOLDER); } else if (attributes.fontWeight == QLatin1String("lighter")) { fontStyle->setWeight(QSvgFontStyle::LIGHTER); } } } if (!attributes.fontVariant.isEmpty() && attributes.fontVariant != QT_INHERIT) { if (attributes.fontVariant == QLatin1String("normal")) fontStyle->setVariant(QFont::MixedCase); else if (attributes.fontVariant == QLatin1String("small-caps")) fontStyle->setVariant(QFont::SmallCaps); } if (!attributes.textAnchor.isEmpty() && attributes.textAnchor != QT_INHERIT) { if (attributes.textAnchor == QLatin1String("start")) fontStyle->setTextAnchor(Qt::AlignLeft); if (attributes.textAnchor == QLatin1String("middle")) fontStyle->setTextAnchor(Qt::AlignHCenter); else if (attributes.textAnchor == QLatin1String("end")) fontStyle->setTextAnchor(Qt::AlignRight); } node->appendStyleProperty(fontStyle, attributes.id); } static void parseTransform(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *) { if (attributes.transform.isEmpty()) return; QTransform matrix = parseTransformationMatrix(attributes.transform.trimmed()); if (!matrix.isIdentity()) { node->appendStyleProperty(new QSvgTransformStyle(QTransform(matrix)), attributes.id); } } static void parseVisibility(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *) { QSvgNode *parent = node->parent(); if (parent && (attributes.visibility.isEmpty() || attributes.visibility == QT_INHERIT)) node->setVisible(parent->isVisible()); else if (attributes.visibility == QLatin1String("hidden") || attributes.visibility == QLatin1String("collapse")) { node->setVisible(false); } else node->setVisible(true); } static void pathArcSegment(QPainterPath &path, qreal xc, qreal yc, qreal th0, qreal th1, qreal rx, qreal ry, qreal xAxisRotation) { qreal sinTh, cosTh; qreal a00, a01, a10, a11; qreal x1, y1, x2, y2, x3, y3; qreal t; qreal thHalf; sinTh = qSin(xAxisRotation * (Q_PI / 180.0)); cosTh = qCos(xAxisRotation * (Q_PI / 180.0)); a00 = cosTh * rx; a01 = -sinTh * ry; a10 = sinTh * rx; a11 = cosTh * ry; thHalf = 0.5 * (th1 - th0); t = (8.0 / 3.0) * qSin(thHalf * 0.5) * qSin(thHalf * 0.5) / qSin(thHalf); x1 = xc + qCos(th0) - t * qSin(th0); y1 = yc + qSin(th0) + t * qCos(th0); x3 = xc + qCos(th1); y3 = yc + qSin(th1); x2 = x3 + t * qSin(th1); y2 = y3 - t * qCos(th1); path.cubicTo(a00 * x1 + a01 * y1, a10 * x1 + a11 * y1, a00 * x2 + a01 * y2, a10 * x2 + a11 * y2, a00 * x3 + a01 * y3, a10 * x3 + a11 * y3); } // the arc handling code underneath is from XSVG (BSD license) /* * Copyright 2002 USC/Information Sciences Institute * * Permission to use, copy, modify, distribute, and sell this software * and its documentation for any purpose is hereby granted without * fee, provided that the above copyright notice appear in all copies * and that both that copyright notice and this permission notice * appear in supporting documentation, and that the name of * Information Sciences Institute not be used in advertising or * publicity pertaining to distribution of the software without * specific, written prior permission. Information Sciences Institute * makes no representations about the suitability of this software for * any purpose. It is provided "as is" without express or implied * warranty. * * INFORMATION SCIENCES INSTITUTE DISCLAIMS ALL WARRANTIES WITH REGARD * TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL INFORMATION SCIENCES * INSTITUTE BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA * OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. * */ static void pathArc(QPainterPath &path, qreal rx, qreal ry, qreal x_axis_rotation, int large_arc_flag, int sweep_flag, qreal x, qreal y, qreal curx, qreal cury) { const qreal Pr1 = rx * rx; const qreal Pr2 = ry * ry; if (!Pr1 || !Pr2) return; qreal sin_th, cos_th; qreal a00, a01, a10, a11; qreal x0, y0, x1, y1, xc, yc; qreal d, sfactor, sfactor_sq; qreal th0, th1, th_arc; int i, n_segs; qreal dx, dy, dx1, dy1, Px, Py, check; rx = qAbs(rx); ry = qAbs(ry); sin_th = qSin(x_axis_rotation * (Q_PI / 180.0)); cos_th = qCos(x_axis_rotation * (Q_PI / 180.0)); dx = (curx - x) / 2.0; dy = (cury - y) / 2.0; dx1 = cos_th * dx + sin_th * dy; dy1 = -sin_th * dx + cos_th * dy; Px = dx1 * dx1; Py = dy1 * dy1; /* Spec : check if radii are large enough */ check = Px / Pr1 + Py / Pr2; if (check > 1) { rx = rx * qSqrt(check); ry = ry * qSqrt(check); } a00 = cos_th / rx; a01 = sin_th / rx; a10 = -sin_th / ry; a11 = cos_th / ry; x0 = a00 * curx + a01 * cury; y0 = a10 * curx + a11 * cury; x1 = a00 * x + a01 * y; y1 = a10 * x + a11 * y; /* (x0, y0) is current point in transformed coordinate space. (x1, y1) is new point in transformed coordinate space. The arc fits a unit-radius circle in this space. */ d = (x1 - x0) * (x1 - x0) + (y1 - y0) * (y1 - y0); if (!d) return; sfactor_sq = 1.0 / d - 0.25; if (sfactor_sq < 0) sfactor_sq = 0; sfactor = qSqrt(sfactor_sq); if (sweep_flag == large_arc_flag) sfactor = -sfactor; xc = 0.5 * (x0 + x1) - sfactor * (y1 - y0); yc = 0.5 * (y0 + y1) + sfactor * (x1 - x0); /* (xc, yc) is center of the circle. */ th0 = qAtan2(y0 - yc, x0 - xc); th1 = qAtan2(y1 - yc, x1 - xc); th_arc = th1 - th0; if (th_arc < 0 && sweep_flag) th_arc += 2 * Q_PI; else if (th_arc > 0 && !sweep_flag) th_arc -= 2 * Q_PI; n_segs = qCeil(qAbs(th_arc / (Q_PI * 0.5 + 0.001))); for (i = 0; i < n_segs; i++) { pathArcSegment(path, xc, yc, th0 + i * th_arc / n_segs, th0 + (i + 1) * th_arc / n_segs, rx, ry, x_axis_rotation); } } static bool parsePathDataFast(QStringView dataStr, QPainterPath &path) { const int maxElementCount = 0x7fff; // Assume file corruption if more path elements than this qreal x0 = 0, y0 = 0; // starting point qreal x = 0, y = 0; // current point char lastMode = 0; QPointF ctrlPt; const QChar *str = dataStr.constData(); const QChar *end = str + dataStr.size(); bool ok = true; while (ok && str != end) { while (str->isSpace() && (str + 1) != end) ++str; QChar pathElem = *str; ++str; QChar endc = *end; *const_cast<QChar *>(end) = u'\0'; // parseNumbersArray requires 0-termination that QStringView cannot guarantee const char *pattern = nullptr; if (pathElem == QLatin1Char('a') || pathElem == QLatin1Char('A')) pattern = "rrrffrr"; QVarLengthArray<qreal, 8> arg; parseNumbersArray(str, arg, pattern); *const_cast<QChar *>(end) = endc; if (pathElem == QLatin1Char('z') || pathElem == QLatin1Char('Z')) arg.append(0);//dummy const qreal *num = arg.constData(); int count = arg.count(); while (ok && count > 0) { qreal offsetX = x; // correction offsets qreal offsetY = y; // for relative commands switch (pathElem.unicode()) { case 'm': { if (count < 2) { ok = false; break; } x = x0 = num[0] + offsetX; y = y0 = num[1] + offsetY; num += 2; count -= 2; path.moveTo(x0, y0); // As per 1.2 spec 8.3.2 The "moveto" commands // If a 'moveto' is followed by multiple pairs of coordinates without explicit commands, // the subsequent pairs shall be treated as implicit 'lineto' commands. pathElem = QLatin1Char('l'); } break; case 'M': { if (count < 2) { ok = false; break; } x = x0 = num[0]; y = y0 = num[1]; num += 2; count -= 2; path.moveTo(x0, y0); // As per 1.2 spec 8.3.2 The "moveto" commands // If a 'moveto' is followed by multiple pairs of coordinates without explicit commands, // the subsequent pairs shall be treated as implicit 'lineto' commands. pathElem = QLatin1Char('L'); } break; case 'z': case 'Z': { x = x0; y = y0; count--; // skip dummy num++; path.closeSubpath(); } break; case 'l': { if (count < 2) { ok = false; break; } x = num[0] + offsetX; y = num[1] + offsetY; num += 2; count -= 2; path.lineTo(x, y); } break; case 'L': { if (count < 2) { ok = false; break; } x = num[0]; y = num[1]; num += 2; count -= 2; path.lineTo(x, y); } break; case 'h': { x = num[0] + offsetX; num++; count--; path.lineTo(x, y); } break; case 'H': { x = num[0]; num++; count--; path.lineTo(x, y); } break; case 'v': { y = num[0] + offsetY; num++; count--; path.lineTo(x, y); } break; case 'V': { y = num[0]; num++; count--; path.lineTo(x, y); } break; case 'c': { if (count < 6) { ok = false; break; } QPointF c1(num[0] + offsetX, num[1] + offsetY); QPointF c2(num[2] + offsetX, num[3] + offsetY); QPointF e(num[4] + offsetX, num[5] + offsetY); num += 6; count -= 6; path.cubicTo(c1, c2, e); ctrlPt = c2; x = e.x(); y = e.y(); break; } case 'C': { if (count < 6) { ok = false; break; } QPointF c1(num[0], num[1]); QPointF c2(num[2], num[3]); QPointF e(num[4], num[5]); num += 6; count -= 6; path.cubicTo(c1, c2, e); ctrlPt = c2; x = e.x(); y = e.y(); break; } case 's': { if (count < 4) { ok = false; break; } QPointF c1; if (lastMode == 'c' || lastMode == 'C' || lastMode == 's' || lastMode == 'S') c1 = QPointF(2*x-ctrlPt.x(), 2*y-ctrlPt.y()); else c1 = QPointF(x, y); QPointF c2(num[0] + offsetX, num[1] + offsetY); QPointF e(num[2] + offsetX, num[3] + offsetY); num += 4; count -= 4; path.cubicTo(c1, c2, e); ctrlPt = c2; x = e.x(); y = e.y(); break; } case 'S': { if (count < 4) { ok = false; break; } QPointF c1; if (lastMode == 'c' || lastMode == 'C' || lastMode == 's' || lastMode == 'S') c1 = QPointF(2*x-ctrlPt.x(), 2*y-ctrlPt.y()); else c1 = QPointF(x, y); QPointF c2(num[0], num[1]); QPointF e(num[2], num[3]); num += 4; count -= 4; path.cubicTo(c1, c2, e); ctrlPt = c2; x = e.x(); y = e.y(); break; } case 'q': { if (count < 4) { ok = false; break; } QPointF c(num[0] + offsetX, num[1] + offsetY); QPointF e(num[2] + offsetX, num[3] + offsetY); num += 4; count -= 4; path.quadTo(c, e); ctrlPt = c; x = e.x(); y = e.y(); break; } case 'Q': { if (count < 4) { ok = false; break; } QPointF c(num[0], num[1]); QPointF e(num[2], num[3]); num += 4; count -= 4; path.quadTo(c, e); ctrlPt = c; x = e.x(); y = e.y(); break; } case 't': { if (count < 2) { ok = false; break; } QPointF e(num[0] + offsetX, num[1] + offsetY); num += 2; count -= 2; QPointF c; if (lastMode == 'q' || lastMode == 'Q' || lastMode == 't' || lastMode == 'T') c = QPointF(2*x-ctrlPt.x(), 2*y-ctrlPt.y()); else c = QPointF(x, y); path.quadTo(c, e); ctrlPt = c; x = e.x(); y = e.y(); break; } case 'T': { if (count < 2) { ok = false; break; } QPointF e(num[0], num[1]); num += 2; count -= 2; QPointF c; if (lastMode == 'q' || lastMode == 'Q' || lastMode == 't' || lastMode == 'T') c = QPointF(2*x-ctrlPt.x(), 2*y-ctrlPt.y()); else c = QPointF(x, y); path.quadTo(c, e); ctrlPt = c; x = e.x(); y = e.y(); break; } case 'a': { if (count < 7) { ok = false; break; } qreal rx = (*num++); qreal ry = (*num++); qreal xAxisRotation = (*num++); qreal largeArcFlag = (*num++); qreal sweepFlag = (*num++); qreal ex = (*num++) + offsetX; qreal ey = (*num++) + offsetY; count -= 7; qreal curx = x; qreal cury = y; pathArc(path, rx, ry, xAxisRotation, int(largeArcFlag), int(sweepFlag), ex, ey, curx, cury); x = ex; y = ey; } break; case 'A': { if (count < 7) { ok = false; break; } qreal rx = (*num++); qreal ry = (*num++); qreal xAxisRotation = (*num++); qreal largeArcFlag = (*num++); qreal sweepFlag = (*num++); qreal ex = (*num++); qreal ey = (*num++); count -= 7; qreal curx = x; qreal cury = y; pathArc(path, rx, ry, xAxisRotation, int(largeArcFlag), int(sweepFlag), ex, ey, curx, cury); x = ex; y = ey; } break; default: ok = false; break; } lastMode = pathElem.toLatin1(); if (path.elementCount() > maxElementCount) ok = false; } } return ok; } static bool parseStyle(QSvgNode *node, const QXmlStreamAttributes &attributes, QSvgHandler *); static bool parseStyle(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *); #ifndef QT_NO_CSSPARSER static void parseCSStoXMLAttrs(const QList<QCss::Declaration> &declarations, QXmlStreamAttributes &attributes) { for (int i = 0; i < declarations.count(); ++i) { const QCss::Declaration &decl = declarations.at(i); if (decl.d->property.isEmpty()) continue; QCss::Value val = decl.d->values.first(); QString valueStr; const int valCount = decl.d->values.count(); if (valCount != 1) { for (int i = 0; i < valCount; ++i) { valueStr += decl.d->values[i].toString(); if (i + 1 < valCount) valueStr += QLatin1Char(','); } } else { valueStr = val.toString(); } if (val.type == QCss::Value::Uri) { valueStr.prepend(QLatin1String("url(")); valueStr.append(QLatin1Char(')')); } else if (val.type == QCss::Value::Function) { QStringList lst = val.variant.toStringList(); valueStr.append(lst.at(0)); valueStr.append(QLatin1Char('(')); for (int i = 1; i < lst.count(); ++i) { valueStr.append(lst.at(i)); if ((i +1) < lst.count()) valueStr.append(QLatin1Char(',')); } valueStr.append(QLatin1Char(')')); } else if (val.type == QCss::Value::KnownIdentifier) { switch (val.variant.toInt()) { case QCss::Value_None: valueStr = QLatin1String("none"); break; default: break; } } attributes.append(QString(), decl.d->property, valueStr); } } void QSvgHandler::parseCSStoXMLAttrs(const QString &css, QList<QSvgCssAttribute> *attributes) { // preprocess (for unicode escapes), tokenize and remove comments m_cssParser.init(css); QString key; attributes->reserve(10); while (m_cssParser.hasNext()) { m_cssParser.skipSpace(); if (!m_cssParser.hasNext()) break; m_cssParser.next(); QString name; if (m_cssParser.hasEscapeSequences) { key = m_cssParser.lexem(); name = key; } else { const QCss::Symbol &sym = m_cssParser.symbol(); name = sym.text.mid(sym.start, sym.len); } m_cssParser.skipSpace(); if (!m_cssParser.test(QCss::COLON)) break; m_cssParser.skipSpace(); if (!m_cssParser.hasNext()) break; QSvgCssAttribute attribute; attribute.name = name; const int firstSymbol = m_cssParser.index; int symbolCount = 0; do { m_cssParser.next(); ++symbolCount; } while (m_cssParser.hasNext() && !m_cssParser.test(QCss::SEMICOLON)); bool canExtractValueByRef = !m_cssParser.hasEscapeSequences; if (canExtractValueByRef) { int len = m_cssParser.symbols.at(firstSymbol).len; for (int i = firstSymbol + 1; i < firstSymbol + symbolCount; ++i) { len += m_cssParser.symbols.at(i).len; if (m_cssParser.symbols.at(i - 1).start + m_cssParser.symbols.at(i - 1).len != m_cssParser.symbols.at(i).start) { canExtractValueByRef = false; break; } } if (canExtractValueByRef) { const QCss::Symbol &sym = m_cssParser.symbols.at(firstSymbol); attribute.value = sym.text.mid(sym.start, len); } } if (!canExtractValueByRef) { QString value; for (int i = firstSymbol; i < m_cssParser.index - 1; ++i) value += m_cssParser.symbols.at(i).lexem(); attribute.value = value; } attributes->append(attribute); m_cssParser.skipSpace(); } } static void cssStyleLookup(QSvgNode *node, QSvgHandler *handler, QSvgStyleSelector *selector) { QCss::StyleSelector::NodePtr cssNode; cssNode.ptr = node; QList<QCss::Declaration> decls = selector->declarationsForNode(cssNode); QXmlStreamAttributes attributes; parseCSStoXMLAttrs(decls, attributes); parseStyle(node, attributes, handler); } #endif // QT_NO_CSSPARSER static inline QStringList stringToList(const QString &str) { QStringList lst = str.split(QLatin1Char(','), Qt::SkipEmptyParts); return lst; } static bool parseCoreNode(QSvgNode *node, const QXmlStreamAttributes &attributes) { QStringList features; QStringList extensions; QStringList languages; QStringList formats; QStringList fonts; QString xmlClassStr; for (int i = 0; i < attributes.count(); ++i) { const QXmlStreamAttribute &attribute = attributes.at(i); QStringView name = attribute.qualifiedName(); if (name.isEmpty()) continue; QStringView value = attribute.value(); switch (name.at(0).unicode()) { case 'c': if (name == QLatin1String("class")) xmlClassStr = value.toString(); break; case 'r': if (name == QLatin1String("requiredFeatures")) features = stringToList(value.toString()); else if (name == QLatin1String("requiredExtensions")) extensions = stringToList(value.toString()); else if (name == QLatin1String("requiredFormats")) formats = stringToList(value.toString()); else if (name == QLatin1String("requiredFonts")) fonts = stringToList(value.toString()); break; case 's': if (name == QLatin1String("systemLanguage")) languages = stringToList(value.toString()); break; default: break; } } node->setRequiredFeatures(features); node->setRequiredExtensions(extensions); node->setRequiredLanguages(languages); node->setRequiredFormats(formats); node->setRequiredFonts(fonts); node->setNodeId(someId(attributes)); node->setXmlClass(xmlClassStr); return true; } static void parseOpacity(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *) { if (attributes.opacity.isEmpty()) return; const QStringView value = attributes.opacity.trimmed(); bool ok = false; qreal op = value.toDouble(&ok); if (ok) { QSvgOpacityStyle *opacity = new QSvgOpacityStyle(qBound(qreal(0.0), op, qreal(1.0))); node->appendStyleProperty(opacity, attributes.id); } } static QPainter::CompositionMode svgToQtCompositionMode(const QString &op) { #define NOOP qDebug()<<"Operation: "<<op<<" is not implemented" if (op == QLatin1String("clear")) { return QPainter::CompositionMode_Clear; } else if (op == QLatin1String("src")) { return QPainter::CompositionMode_Source; } else if (op == QLatin1String("dst")) { return QPainter::CompositionMode_Destination; } else if (op == QLatin1String("src-over")) { return QPainter::CompositionMode_SourceOver; } else if (op == QLatin1String("dst-over")) { return QPainter::CompositionMode_DestinationOver; } else if (op == QLatin1String("src-in")) { return QPainter::CompositionMode_SourceIn; } else if (op == QLatin1String("dst-in")) { return QPainter::CompositionMode_DestinationIn; } else if (op == QLatin1String("src-out")) { return QPainter::CompositionMode_SourceOut; } else if (op == QLatin1String("dst-out")) { return QPainter::CompositionMode_DestinationOut; } else if (op == QLatin1String("src-atop")) { return QPainter::CompositionMode_SourceAtop; } else if (op == QLatin1String("dst-atop")) { return QPainter::CompositionMode_DestinationAtop; } else if (op == QLatin1String("xor")) { return QPainter::CompositionMode_Xor; } else if (op == QLatin1String("plus")) { return QPainter::CompositionMode_Plus; } else if (op == QLatin1String("multiply")) { return QPainter::CompositionMode_Multiply; } else if (op == QLatin1String("screen")) { return QPainter::CompositionMode_Screen; } else if (op == QLatin1String("overlay")) { return QPainter::CompositionMode_Overlay; } else if (op == QLatin1String("darken")) { return QPainter::CompositionMode_Darken; } else if (op == QLatin1String("lighten")) { return QPainter::CompositionMode_Lighten; } else if (op == QLatin1String("color-dodge")) { return QPainter::CompositionMode_ColorDodge; } else if (op == QLatin1String("color-burn")) { return QPainter::CompositionMode_ColorBurn; } else if (op == QLatin1String("hard-light")) { return QPainter::CompositionMode_HardLight; } else if (op == QLatin1String("soft-light")) { return QPainter::CompositionMode_SoftLight; } else if (op == QLatin1String("difference")) { return QPainter::CompositionMode_Difference; } else if (op == QLatin1String("exclusion")) { return QPainter::CompositionMode_Exclusion; } else { NOOP; } return QPainter::CompositionMode_SourceOver; } static void parseCompOp(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *) { if (attributes.compOp.isEmpty()) return; QString value = attributes.compOp.toString().trimmed(); if (!value.isEmpty()) { QSvgCompOpStyle *compop = new QSvgCompOpStyle(svgToQtCompositionMode(value)); node->appendStyleProperty(compop, attributes.id); } } static inline QSvgNode::DisplayMode displayStringToEnum(const QString &str) { if (str == QLatin1String("inline")) { return QSvgNode::InlineMode; } else if (str == QLatin1String("block")) { return QSvgNode::BlockMode; } else if (str == QLatin1String("list-item")) { return QSvgNode::ListItemMode; } else if (str == QLatin1String("run-in")) { return QSvgNode::RunInMode; } else if (str == QLatin1String("compact")) { return QSvgNode::CompactMode; } else if (str == QLatin1String("marker")) { return QSvgNode::MarkerMode; } else if (str == QLatin1String("table")) { return QSvgNode::TableMode; } else if (str == QLatin1String("inline-table")) { return QSvgNode::InlineTableMode; } else if (str == QLatin1String("table-row-group")) { return QSvgNode::TableRowGroupMode; } else if (str == QLatin1String("table-header-group")) { return QSvgNode::TableHeaderGroupMode; } else if (str == QLatin1String("table-footer-group")) { return QSvgNode::TableFooterGroupMode; } else if (str == QLatin1String("table-row")) { return QSvgNode::TableRowMode; } else if (str == QLatin1String("table-column-group")) { return QSvgNode::TableColumnGroupMode; } else if (str == QLatin1String("table-column")) { return QSvgNode::TableColumnMode; } else if (str == QLatin1String("table-cell")) { return QSvgNode::TableCellMode; } else if (str == QLatin1String("table-caption")) { return QSvgNode::TableCaptionMode; } else if (str == QLatin1String("none")) { return QSvgNode::NoneMode; } else if (str == QT_INHERIT) { return QSvgNode::InheritMode; } return QSvgNode::BlockMode; } static void parseOthers(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *) { if (attributes.display.isEmpty()) return; QString displayStr = attributes.display.toString().trimmed(); if (!displayStr.isEmpty()) { node->setDisplayMode(displayStringToEnum(displayStr)); } } static void parseRenderingHints(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *) { if (attributes.imageRendering.isEmpty()) return; QString ir = attributes.imageRendering.toString().trimmed(); QSvgQualityStyle *p = new QSvgQualityStyle(0); if (ir == QLatin1String("auto")) p->setImageRendering(QSvgQualityStyle::ImageRenderingAuto); else if (ir == QLatin1String("optimizeSpeed")) p->setImageRendering(QSvgQualityStyle::ImageRenderingOptimizeSpeed); else if (ir == QLatin1String("optimizeQuality")) p->setImageRendering(QSvgQualityStyle::ImageRenderingOptimizeQuality); node->appendStyleProperty(p, attributes.id); } static bool parseStyle(QSvgNode *node, const QSvgAttributes &attributes, QSvgHandler *handler) { parseColor(node, attributes, handler); parseBrush(node, attributes, handler); parsePen(node, attributes, handler); parseFont(node, attributes, handler); parseTransform(node, attributes, handler); parseVisibility(node, attributes, handler); parseOpacity(node, attributes, handler); parseCompOp(node, attributes, handler); parseRenderingHints(node, attributes, handler); parseOthers(node, attributes, handler); #if 0 value = attributes.value("audio-level"); value = attributes.value("color-rendering"); value = attributes.value("display-align"); value = attributes.value("image-rendering"); value = attributes.value("line-increment"); value = attributes.value("pointer-events"); value = attributes.value("shape-rendering"); value = attributes.value("solid-color"); value = attributes.value("solid-opacity"); value = attributes.value("text-rendering"); value = attributes.value("vector-effect"); value = attributes.value("viewport-fill"); value = attributes.value("viewport-fill-opacity"); #endif return true; } static bool parseStyle(QSvgNode *node, const QXmlStreamAttributes &attrs, QSvgHandler *handler) { return parseStyle(node, QSvgAttributes(attrs, handler), handler); } static bool parseAnchorNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseAnimateNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static int parseClockValue(QStringView str, bool *ok) { int res = 0; int ms = 1000; str = str.trimmed(); if (str.endsWith(QLatin1String("ms"))) { str.chop(2); ms = 1; } else if (str.endsWith(QLatin1String("s"))) { str.chop(1); } double val = ms * toDouble(str, ok); if (ok) { if (val > std::numeric_limits<int>::min() && val < std::numeric_limits<int>::max()) res = static_cast<int>(val); else *ok = false; } return res; } static bool parseAnimateColorNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { QStringView fromStr = attributes.value(QLatin1String("from")); QStringView toStr = attributes.value(QLatin1String("to")); QString valuesStr = attributes.value(QLatin1String("values")).toString(); QString beginStr = attributes.value(QLatin1String("begin")).toString(); QString durStr = attributes.value(QLatin1String("dur")).toString(); QString targetStr = attributes.value(QLatin1String("attributeName")).toString(); QString repeatStr = attributes.value(QLatin1String("repeatCount")).toString(); QString fillStr = attributes.value(QLatin1String("fill")).toString(); QList<QColor> colors; if (valuesStr.isEmpty()) { QColor startColor, endColor; resolveColor(fromStr, startColor, handler); resolveColor(toStr, endColor, handler); colors.reserve(2); colors.append(startColor); colors.append(endColor); } else { QStringList str = valuesStr.split(QLatin1Char(';')); colors.reserve(str.count()); QStringList::const_iterator itr; for (itr = str.constBegin(); itr != str.constEnd(); ++itr) { QColor color; resolveColor(*itr, color, handler); colors.append(color); } } bool ok = true; int begin = parseClockValue(beginStr, &ok); if (!ok) return false; int end = begin + parseClockValue(durStr, &ok); if (!ok || end <= begin) return false; QSvgAnimateColor *anim = new QSvgAnimateColor(begin, end, 0); anim->setArgs((targetStr == QLatin1String("fill")), colors); anim->setFreeze(fillStr == QLatin1String("freeze")); anim->setRepeatCount( (repeatStr == QLatin1String("indefinite")) ? -1 : (repeatStr == QLatin1String("")) ? 1 : toDouble(repeatStr)); parent->appendStyleProperty(anim, someId(attributes)); parent->document()->setAnimated(true); handler->setAnimPeriod(begin, end); return true; } static bool parseAimateMotionNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static void parseNumberTriplet(QList<qreal> &values, const QChar *&s) { QList<qreal> list = parseNumbersList(s); values << list; for (int i = 3 - list.size(); i > 0; --i) values.append(0.0); } static bool parseAnimateTransformNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { QString typeStr = attributes.value(QLatin1String("type")).toString(); QString values = attributes.value(QLatin1String("values")).toString(); QString beginStr = attributes.value(QLatin1String("begin")).toString(); QString durStr = attributes.value(QLatin1String("dur")).toString(); QString repeatStr = attributes.value(QLatin1String("repeatCount")).toString(); QString fillStr = attributes.value(QLatin1String("fill")).toString(); QString fromStr = attributes.value(QLatin1String("from")).toString(); QString toStr = attributes.value(QLatin1String("to")).toString(); QString byStr = attributes.value(QLatin1String("by")).toString(); QString addtv = attributes.value(QLatin1String("additive")).toString(); QSvgAnimateTransform::Additive additive = QSvgAnimateTransform::Replace; if (addtv == QLatin1String("sum")) additive = QSvgAnimateTransform::Sum; QList<qreal> vals; if (values.isEmpty()) { const QChar *s; if (fromStr.isEmpty()) { if (!byStr.isEmpty()) { // By-animation. additive = QSvgAnimateTransform::Sum; vals.append(0.0); vals.append(0.0); vals.append(0.0); parseNumberTriplet(vals, s = byStr.constData()); } else { // To-animation not defined. return false; } } else { if (!toStr.isEmpty()) { // From-to-animation. parseNumberTriplet(vals, s = fromStr.constData()); parseNumberTriplet(vals, s = toStr.constData()); } else if (!byStr.isEmpty()) { // From-by-animation. parseNumberTriplet(vals, s = fromStr.constData()); parseNumberTriplet(vals, s = byStr.constData()); for (int i = vals.size() - 3; i < vals.size(); ++i) vals[i] += vals[i - 3]; } else { return false; } } } else { const QChar *s = values.constData(); while (s && *s != QLatin1Char(0)) { parseNumberTriplet(vals, s); if (*s == QLatin1Char(0)) break; ++s; } } bool ok = true; int begin = parseClockValue(beginStr, &ok); if (!ok) return false; int end = begin + parseClockValue(durStr, &ok); if (!ok || end <= begin) return false; QSvgAnimateTransform::TransformType type = QSvgAnimateTransform::Empty; if (typeStr == QLatin1String("translate")) { type = QSvgAnimateTransform::Translate; } else if (typeStr == QLatin1String("scale")) { type = QSvgAnimateTransform::Scale; } else if (typeStr == QLatin1String("rotate")) { type = QSvgAnimateTransform::Rotate; } else if (typeStr == QLatin1String("skewX")) { type = QSvgAnimateTransform::SkewX; } else if (typeStr == QLatin1String("skewY")) { type = QSvgAnimateTransform::SkewY; } else { return false; } QSvgAnimateTransform *anim = new QSvgAnimateTransform(begin, end, 0); anim->setArgs(type, additive, vals); anim->setFreeze(fillStr == QLatin1String("freeze")); anim->setRepeatCount( (repeatStr == QLatin1String("indefinite"))? -1 : (repeatStr == QLatin1String(""))? 1 : toDouble(repeatStr)); parent->appendStyleProperty(anim, someId(attributes)); parent->document()->setAnimated(true); handler->setAnimPeriod(begin, end); return true; } static QSvgNode * createAnimationNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return 0; } static bool parseAudioNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgNode *createCircleNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { const QStringView cx = attributes.value(QLatin1String("cx")); const QStringView cy = attributes.value(QLatin1String("cy")); const QStringView r = attributes.value(QLatin1String("r")); qreal ncx = toDouble(cx); qreal ncy = toDouble(cy); qreal nr = toDouble(r); if (nr < 0.0) return nullptr; QRectF rect(ncx-nr, ncy-nr, nr*2, nr*2); QSvgNode *circle = new QSvgCircle(parent, rect); return circle; } static QSvgNode *createDefsNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(attributes); QSvgDefs *defs = new QSvgDefs(parent); return defs; } static bool parseDescNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseDiscardNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgNode *createEllipseNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { const QStringView cx = attributes.value(QLatin1String("cx")); const QStringView cy = attributes.value(QLatin1String("cy")); const QStringView rx = attributes.value(QLatin1String("rx")); const QStringView ry = attributes.value(QLatin1String("ry")); qreal ncx = toDouble(cx); qreal ncy = toDouble(cy); qreal nrx = toDouble(rx); qreal nry = toDouble(ry); QRectF rect(ncx-nrx, ncy-nry, nrx*2, nry*2); QSvgNode *ellipse = new QSvgEllipse(parent, rect); return ellipse; } static QSvgStyleProperty *createFontNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { const QStringView hax = attributes.value(QLatin1String("horiz-adv-x")); QString myId = someId(attributes); qreal horizAdvX = toDouble(hax); while (parent && parent->type() != QSvgNode::DOC) { parent = parent->parent(); } if (parent && !myId.isEmpty()) { QSvgTinyDocument *doc = static_cast<QSvgTinyDocument*>(parent); QSvgFont *font = doc->svgFont(myId); if (!font) { font = new QSvgFont(horizAdvX); font->setFamilyName(myId); doc->addSvgFont(font); } return new QSvgFontStyle(font, doc); } return nullptr; } static bool parseFontFaceNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { if (parent->type() != QSvgStyleProperty::FONT) { return false; } QSvgFontStyle *style = static_cast<QSvgFontStyle*>(parent); QSvgFont *font = style->svgFont(); QString name = attributes.value(QLatin1String("font-family")).toString(); const QStringView unitsPerEmStr = attributes.value(QLatin1String("units-per-em")); qreal unitsPerEm = toDouble(unitsPerEmStr); if (!unitsPerEm) unitsPerEm = 1000; if (!name.isEmpty()) font->setFamilyName(name); font->setUnitsPerEm(unitsPerEm); if (!font->familyName().isEmpty()) if (!style->doc()->svgFont(font->familyName())) style->doc()->addSvgFont(font); return true; } static bool parseFontFaceNameNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { if (parent->type() != QSvgStyleProperty::FONT) { return false; } QSvgFontStyle *style = static_cast<QSvgFontStyle*>(parent); QSvgFont *font = style->svgFont(); QString name = attributes.value(QLatin1String("name")).toString(); if (!name.isEmpty()) font->setFamilyName(name); if (!font->familyName().isEmpty()) if (!style->doc()->svgFont(font->familyName())) style->doc()->addSvgFont(font); return true; } static bool parseFontFaceSrcNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseFontFaceUriNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseForeignObjectNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgNode *createGNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(attributes); QSvgG *node = new QSvgG(parent); return node; } static bool parseGlyphNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { if (parent->type() != QSvgStyleProperty::FONT) { return false; } QSvgFontStyle *style = static_cast<QSvgFontStyle*>(parent); QSvgFont *font = style->svgFont(); createSvgGlyph(font, attributes); return true; } static bool parseHandlerNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseHkernNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgNode *createImageNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { const QStringView x = attributes.value(QLatin1String("x")); const QStringView y = attributes.value(QLatin1String("y")); const QStringView width = attributes.value(QLatin1String("width")); const QStringView height = attributes.value(QLatin1String("height")); QString filename = attributes.value(QLatin1String("xlink:href")).toString(); qreal nx = toDouble(x); qreal ny = toDouble(y); QSvgHandler::LengthType type; qreal nwidth = parseLength(width.toString(), type, handler); nwidth = convertToPixels(nwidth, true, type); qreal nheight = parseLength(height.toString(), type, handler); nheight = convertToPixels(nheight, false, type); filename = filename.trimmed(); if (filename.isEmpty()) { qCWarning(lcSvgHandler) << "QSvgHandler: Image filename is empty"; return 0; } if (nwidth <= 0 || nheight <= 0) { qCWarning(lcSvgHandler) << "QSvgHandler: Width or height for" << filename << "image was not greater than 0"; return 0; } QImage image; if (filename.startsWith(QLatin1String("data"))) { int idx = filename.lastIndexOf(QLatin1String("base64,")); if (idx != -1) { idx += 7; const QString dataStr = filename.mid(idx); QByteArray data = QByteArray::fromBase64(dataStr.toLatin1()); image = QImage::fromData(data); } else { qCDebug(lcSvgHandler) << "QSvgHandler::createImageNode: Unrecognized inline image format!"; } } else { const auto *file = qobject_cast<QFile *>(handler->device()); if (file) { QUrl url(filename); if (url.isRelative()) { QFileInfo info(file->fileName()); filename = info.absoluteDir().absoluteFilePath(filename); } } image = QImage(filename); } if (image.isNull()) { qCWarning(lcSvgHandler) << "Could not create image from" << filename; return 0; } if (image.format() == QImage::Format_ARGB32) image = image.convertToFormat(QImage::Format_ARGB32_Premultiplied); QSvgNode *img = new QSvgImage(parent, image, QRectF(nx, ny, nwidth, nheight)); return img; } static QSvgNode *createLineNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { const QStringView x1 = attributes.value(QLatin1String("x1")); const QStringView y1 = attributes.value(QLatin1String("y1")); const QStringView x2 = attributes.value(QLatin1String("x2")); const QStringView y2 = attributes.value(QLatin1String("y2")); qreal nx1 = toDouble(x1); qreal ny1 = toDouble(y1); qreal nx2 = toDouble(x2); qreal ny2 = toDouble(y2); QLineF lineBounds(nx1, ny1, nx2, ny2); QSvgNode *line = new QSvgLine(parent, lineBounds); return line; } static void parseBaseGradient(QSvgNode *node, const QXmlStreamAttributes &attributes, QSvgGradientStyle *gradProp, QSvgHandler *handler) { QString link = attributes.value(QLatin1String("xlink:href")).toString(); QStringView trans = attributes.value(QLatin1String("gradientTransform")); QString spread = attributes.value(QLatin1String("spreadMethod")).toString(); QString units = attributes.value(QLatin1String("gradientUnits")).toString(); QStringView colorStr = attributes.value(QLatin1String("color")); QStringView colorOpacityStr = attributes.value(QLatin1String("color-opacity")); QColor color; if (constructColor(colorStr, colorOpacityStr, color, handler)) { handler->popColor(); handler->pushColor(color); } QTransform matrix; QGradient *grad = gradProp->qgradient(); if (!link.isEmpty()) { QSvgStyleProperty *prop = node->styleProperty(link); //qDebug()<<"inherited "<<prop<<" ("<<link<<")"; if (prop && prop->type() == QSvgStyleProperty::GRADIENT) { QSvgGradientStyle *inherited = static_cast<QSvgGradientStyle*>(prop); if (!inherited->stopLink().isEmpty()) { gradProp->setStopLink(inherited->stopLink(), handler->document()); } else { grad->setStops(inherited->qgradient()->stops()); gradProp->setGradientStopsSet(inherited->gradientStopsSet()); } matrix = inherited->qtransform(); } else { gradProp->setStopLink(link, handler->document()); } } if (!trans.isEmpty()) { matrix = parseTransformationMatrix(trans); gradProp->setTransform(matrix); } else if (!matrix.isIdentity()) { gradProp->setTransform(matrix); } if (!spread.isEmpty()) { if (spread == QLatin1String("pad")) { grad->setSpread(QGradient::PadSpread); } else if (spread == QLatin1String("reflect")) { grad->setSpread(QGradient::ReflectSpread); } else if (spread == QLatin1String("repeat")) { grad->setSpread(QGradient::RepeatSpread); } } if (units.isEmpty() || units == QLatin1String("objectBoundingBox")) { grad->setCoordinateMode(QGradient::ObjectMode); } } static QSvgStyleProperty *createLinearGradientNode(QSvgNode *node, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { const QStringView x1 = attributes.value(QLatin1String("x1")); const QStringView y1 = attributes.value(QLatin1String("y1")); const QStringView x2 = attributes.value(QLatin1String("x2")); const QStringView y2 = attributes.value(QLatin1String("y2")); qreal nx1 = 0.0; qreal ny1 = 0.0; qreal nx2 = 1.0; qreal ny2 = 0.0; if (!x1.isEmpty()) nx1 = convertToNumber(x1, handler); if (!y1.isEmpty()) ny1 = convertToNumber(y1, handler); if (!x2.isEmpty()) nx2 = convertToNumber(x2, handler); if (!y2.isEmpty()) ny2 = convertToNumber(y2, handler); QSvgNode *itr = node; while (itr && itr->type() != QSvgNode::DOC) { itr = itr->parent(); } QLinearGradient *grad = new QLinearGradient(nx1, ny1, nx2, ny2); grad->setInterpolationMode(QGradient::ComponentInterpolation); QSvgGradientStyle *prop = new QSvgGradientStyle(grad); parseBaseGradient(node, attributes, prop, handler); return prop; } static bool parseMetadataNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseMissingGlyphNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { if (parent->type() != QSvgStyleProperty::FONT) { return false; } QSvgFontStyle *style = static_cast<QSvgFontStyle*>(parent); QSvgFont *font = style->svgFont(); createSvgGlyph(font, attributes); return true; } static bool parseMpathNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgNode *createPathNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { QStringView data = attributes.value(QLatin1String("d")); QPainterPath qpath; qpath.setFillRule(Qt::WindingFill); if (!parsePathDataFast(data, qpath)) qCWarning(lcSvgHandler, "Invalid path data; path truncated."); QSvgNode *path = new QSvgPath(parent, qpath); return path; } static QSvgNode *createPolygonNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { QString pointsStr = attributes.value(QLatin1String("points")).toString(); //same QPolygon parsing is in createPolylineNode const QChar *s = pointsStr.constData(); QList<qreal> points = parseNumbersList(s); QPolygonF poly(points.count()/2); for (int i = 0; i < poly.size(); ++i) poly[i] = QPointF(points.at(2 * i), points.at(2 * i + 1)); QSvgNode *polygon = new QSvgPolygon(parent, poly); return polygon; } static QSvgNode *createPolylineNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { QString pointsStr = attributes.value(QLatin1String("points")).toString(); //same QPolygon parsing is in createPolygonNode const QChar *s = pointsStr.constData(); QList<qreal> points = parseNumbersList(s); QPolygonF poly(points.count()/2); for (int i = 0; i < poly.size(); ++i) poly[i] = QPointF(points.at(2 * i), points.at(2 * i + 1)); QSvgNode *line = new QSvgPolyline(parent, poly); return line; } static bool parsePrefetchNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgStyleProperty *createRadialGradientNode(QSvgNode *node, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { const QStringView cx = attributes.value(QLatin1String("cx")); const QStringView cy = attributes.value(QLatin1String("cy")); const QStringView r = attributes.value(QLatin1String("r")); const QStringView fx = attributes.value(QLatin1String("fx")); const QStringView fy = attributes.value(QLatin1String("fy")); qreal ncx = 0.5; qreal ncy = 0.5; if (!cx.isEmpty()) ncx = toDouble(cx); if (!cy.isEmpty()) ncy = toDouble(cy); qreal nr = 0.0; if (!r.isEmpty()) nr = toDouble(r); if (nr <= 0.0) return nullptr; qreal nfx = ncx; if (!fx.isEmpty()) nfx = toDouble(fx); qreal nfy = ncy; if (!fy.isEmpty()) nfy = toDouble(fy); QRadialGradient *grad = new QRadialGradient(ncx, ncy, nr, nfx, nfy, 0); grad->setInterpolationMode(QGradient::ComponentInterpolation); QSvgGradientStyle *prop = new QSvgGradientStyle(grad); parseBaseGradient(node, attributes, prop, handler); return prop; } static QSvgNode *createRectNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { const QStringView x = attributes.value(QLatin1String("x")); const QStringView y = attributes.value(QLatin1String("y")); const QStringView width = attributes.value(QLatin1String("width")); const QStringView height = attributes.value(QLatin1String("height")); const QStringView rx = attributes.value(QLatin1String("rx")); const QStringView ry = attributes.value(QLatin1String("ry")); bool ok = true; QSvgHandler::LengthType type; qreal nwidth = parseLength(width.toString(), type, handler, &ok); if (!ok) return nullptr; nwidth = convertToPixels(nwidth, true, type); qreal nheight = parseLength(height.toString(), type, handler, &ok); if (!ok) return nullptr; nheight = convertToPixels(nheight, true, type); qreal nrx = toDouble(rx); qreal nry = toDouble(ry); QRectF bounds(toDouble(x), toDouble(y), nwidth, nheight); if (bounds.isEmpty()) return nullptr; if (!rx.isEmpty() && ry.isEmpty()) nry = nrx; else if (!ry.isEmpty() && rx.isEmpty()) nrx = nry; //9.2 The 'rect' element clearly specifies it // but the case might in fact be handled because // we draw rounded rectangles differently if (nrx > bounds.width()/2) nrx = bounds.width()/2; if (nry > bounds.height()/2) nry = bounds.height()/2; //we draw rounded rect from 0...99 //svg from 0...bounds.width()/2 so we're adjusting the //coordinates nrx *= (100/(bounds.width()/2)); nry *= (100/(bounds.height()/2)); QSvgNode *rect = new QSvgRect(parent, bounds, int(nrx), int(nry)); return rect; } static bool parseScriptNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static bool parseSetNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgStyleProperty *createSolidColorNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { Q_UNUSED(parent); Q_UNUSED(attributes); QStringView solidColorStr = attributes.value(QLatin1String("solid-color")); QStringView solidOpacityStr = attributes.value(QLatin1String("solid-opacity")); if (solidOpacityStr.isEmpty()) solidOpacityStr = attributes.value(QLatin1String("opacity")); QColor color; if (!constructColor(solidColorStr, solidOpacityStr, color, handler)) return 0; QSvgSolidColorStyle *style = new QSvgSolidColorStyle(color); return style; } static bool parseStopNode(QSvgStyleProperty *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { if (parent->type() != QSvgStyleProperty::GRADIENT) return false; QString nodeIdStr = someId(attributes); QString xmlClassStr = attributes.value(QLatin1String("class")).toString(); //### nasty hack because stop gradients are not in the rendering tree // we force a dummy node with the same id and class into a rendering // tree to figure out whether the selector has a style for it // QSvgStyleSelector should be coded in a way that could avoid it QSvgAnimation anim; anim.setNodeId(nodeIdStr); anim.setXmlClass(xmlClassStr); QXmlStreamAttributes xmlAttr = attributes; #ifndef QT_NO_CSSPARSER QCss::StyleSelector::NodePtr cssNode; cssNode.ptr = &anim; QList<QCss::Declaration> decls = handler->selector()->declarationsForNode(cssNode); for (int i = 0; i < decls.count(); ++i) { const QCss::Declaration &decl = decls.at(i); if (decl.d->property.isEmpty()) continue; if (decl.d->values.count() != 1) continue; QCss::Value val = decl.d->values.first(); QString valueStr = val.toString(); if (val.type == QCss::Value::Uri) { valueStr.prepend(QLatin1String("url(")); valueStr.append(QLatin1Char(')')); } xmlAttr.append(QString(), decl.d->property, valueStr); } #endif QSvgAttributes attrs(xmlAttr, handler); QSvgGradientStyle *style = static_cast<QSvgGradientStyle*>(parent); QStringView colorStr = attrs.stopColor; QColor color; bool ok = true; qreal offset = convertToNumber(attrs.offset, handler, &ok); if (!ok) offset = 0.0; QString black = QString::fromLatin1("#000000"); if (colorStr.isEmpty()) { colorStr = black; } constructColor(colorStr, attrs.stopOpacity, color, handler); QGradient *grad = style->qgradient(); offset = qMin(qreal(1), qMax(qreal(0), offset)); // Clamp to range [0, 1] QGradientStops stops; if (style->gradientStopsSet()) { stops = grad->stops(); // If the stop offset equals the one previously added, add an epsilon to make it greater. if (offset <= stops.back().first) offset = stops.back().first + FLT_EPSILON; } // If offset is greater than one, it must be clamped to one. if (offset > 1.0) { if ((stops.size() == 1) || (stops.at(stops.size() - 2).first < 1.0 - FLT_EPSILON)) { stops.back().first = 1.0 - FLT_EPSILON; grad->setStops(stops); } offset = 1.0; } grad->setColorAt(offset, color); style->setGradientStopsSet(true); return true; } static bool parseStyleNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { Q_UNUSED(parent); #ifdef QT_NO_CSSPARSER Q_UNUSED(attributes); Q_UNUSED(handler); #else const QStringView type = attributes.value(QLatin1String("type")); if (type.compare(QLatin1String("text/css"), Qt::CaseInsensitive) == 0 || type.isNull()) handler->setInStyle(true); #endif return true; } static QSvgNode *createSvgNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { Q_UNUSED(parent); Q_UNUSED(attributes); QSvgTinyDocument *node = new QSvgTinyDocument(); const QStringView widthStr = attributes.value(QLatin1String("width")); const QStringView heightStr = attributes.value(QLatin1String("height")); QString viewBoxStr = attributes.value(QLatin1String("viewBox")).toString(); QSvgHandler::LengthType type = QSvgHandler::LT_PX; // FIXME: is the default correct? qreal width = 0; if (!widthStr.isEmpty()) { width = parseLength(widthStr.toString(), type, handler); if (type != QSvgHandler::LT_PT) width = convertToPixels(width, true, type); node->setWidth(int(width), type == QSvgHandler::LT_PERCENT); } qreal height = 0; if (!heightStr.isEmpty()) { height = parseLength(heightStr.toString(), type, handler); if (type != QSvgHandler::LT_PT) height = convertToPixels(height, false, type); node->setHeight(int(height), type == QSvgHandler::LT_PERCENT); } QStringList viewBoxValues; if (!viewBoxStr.isEmpty()) { viewBoxStr = viewBoxStr.replace(QLatin1Char(' '), QLatin1Char(',')); viewBoxStr = viewBoxStr.replace(QLatin1Char('\r'), QLatin1Char(',')); viewBoxStr = viewBoxStr.replace(QLatin1Char('\n'), QLatin1Char(',')); viewBoxStr = viewBoxStr.replace(QLatin1Char('\t'), QLatin1Char(',')); viewBoxValues = viewBoxStr.split(QLatin1Char(','), Qt::SkipEmptyParts); } if (viewBoxValues.count() == 4) { QString xStr = viewBoxValues.at(0).trimmed(); QString yStr = viewBoxValues.at(1).trimmed(); QString widthStr = viewBoxValues.at(2).trimmed(); QString heightStr = viewBoxValues.at(3).trimmed(); QSvgHandler::LengthType lt; qreal x = parseLength(xStr, lt, handler); qreal y = parseLength(yStr, lt, handler); qreal w = parseLength(widthStr, lt, handler); qreal h = parseLength(heightStr, lt, handler); node->setViewBox(QRectF(x, y, w, h)); } else if (width && height) { if (type == QSvgHandler::LT_PT) { width = convertToPixels(width, false, type); height = convertToPixels(height, false, type); } node->setViewBox(QRectF(0, 0, width, height)); } handler->setDefaultCoordinateSystem(QSvgHandler::LT_PX); return node; } static QSvgNode *createSwitchNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(attributes); QSvgSwitch *node = new QSvgSwitch(parent); return node; } static bool parseTbreakNode(QSvgNode *parent, const QXmlStreamAttributes &, QSvgHandler *) { if (parent->type() != QSvgNode::TEXTAREA) return false; static_cast<QSvgText*>(parent)->addLineBreak(); return true; } static QSvgNode *createTextNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { const QStringView x = attributes.value(QLatin1String("x")); const QStringView y = attributes.value(QLatin1String("y")); //### editable and rotate not handled QSvgHandler::LengthType type; qreal nx = parseLength(x.toString(), type, handler); nx = convertToPixels(nx, true, type); qreal ny = parseLength(y.toString(), type, handler); ny = convertToPixels(ny, true, type); QSvgNode *text = new QSvgText(parent, QPointF(nx, ny)); return text; } static QSvgNode *createTextAreaNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { QSvgText *node = static_cast<QSvgText *>(createTextNode(parent, attributes, handler)); if (node) { QSvgHandler::LengthType type; qreal width = parseLength(attributes.value(QLatin1String("width")), type, handler); qreal height = parseLength(attributes.value(QLatin1String("height")), type, handler); node->setTextArea(QSizeF(width, height)); } return node; } static QSvgNode *createTspanNode(QSvgNode *parent, const QXmlStreamAttributes &, QSvgHandler *) { return new QSvgTspan(parent); } static bool parseTitleNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return true; } static QSvgNode *createUseNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *handler) { QString linkId = attributes.value(QLatin1String("xlink:href")).toString().remove(0, 1); const QStringView xStr = attributes.value(QLatin1String("x")); const QStringView yStr = attributes.value(QLatin1String("y")); QSvgStructureNode *group = nullptr; if (linkId.isEmpty()) linkId = attributes.value(QLatin1String("href")).toString().remove(0, 1); switch (parent->type()) { case QSvgNode::DOC: case QSvgNode::DEFS: case QSvgNode::G: case QSvgNode::SWITCH: group = static_cast<QSvgStructureNode*>(parent); break; default: break; } if (group) { QPointF pt; if (!xStr.isNull() || !yStr.isNull()) { QSvgHandler::LengthType type; qreal nx = parseLength(xStr.toString(), type, handler); nx = convertToPixels(nx, true, type); qreal ny = parseLength(yStr.toString(), type, handler); ny = convertToPixels(ny, true, type); pt = QPointF(nx, ny); } QSvgNode *link = group->scopeNode(linkId); if (link) { if (parent->isDescendantOf(link)) qCWarning(lcSvgHandler, "link #%s is recursive!", qPrintable(linkId)); return new QSvgUse(pt, parent, link); } //delay link resolving, link might have not been created yet return new QSvgUse(pt, parent, linkId); } qCWarning(lcSvgHandler, "<use> element %s in wrong context!", qPrintable(linkId)); return 0; } static QSvgNode *createVideoNode(QSvgNode *parent, const QXmlStreamAttributes &attributes, QSvgHandler *) { Q_UNUSED(parent); Q_UNUSED(attributes); return 0; } typedef QSvgNode *(*FactoryMethod)(QSvgNode *, const QXmlStreamAttributes &, QSvgHandler *); static FactoryMethod findGroupFactory(const QString &name) { if (name.isEmpty()) return 0; QStringView ref = QStringView{name}.mid(1, name.length() - 1); switch (name.at(0).unicode()) { case 'd': if (ref == QLatin1String("efs")) return createDefsNode; break; case 'g': if (ref.isEmpty()) return createGNode; break; case 's': if (ref == QLatin1String("vg")) return createSvgNode; if (ref == QLatin1String("witch")) return createSwitchNode; break; default: break; } return 0; } static FactoryMethod findGraphicsFactory(const QString &name) { if (name.isEmpty()) return 0; QStringView ref = QStringView{name}.mid(1, name.length() - 1); switch (name.at(0).unicode()) { case 'a': if (ref == QLatin1String("nimation")) return createAnimationNode; break; case 'c': if (ref == QLatin1String("ircle")) return createCircleNode; break; case 'e': if (ref == QLatin1String("llipse")) return createEllipseNode; break; case 'i': if (ref == QLatin1String("mage")) return createImageNode; break; case 'l': if (ref == QLatin1String("ine")) return createLineNode; break; case 'p': if (ref == QLatin1String("ath")) return createPathNode; if (ref == QLatin1String("olygon")) return createPolygonNode; if (ref == QLatin1String("olyline")) return createPolylineNode; break; case 'r': if (ref == QLatin1String("ect")) return createRectNode; break; case 't': if (ref == QLatin1String("ext")) return createTextNode; if (ref == QLatin1String("extArea")) return createTextAreaNode; if (ref == QLatin1String("span")) return createTspanNode; break; case 'u': if (ref == QLatin1String("se")) return createUseNode; break; case 'v': if (ref == QLatin1String("ideo")) return createVideoNode; break; default: break; } return 0; } typedef bool (*ParseMethod)(QSvgNode *, const QXmlStreamAttributes &, QSvgHandler *); static ParseMethod findUtilFactory(const QString &name) { if (name.isEmpty()) return 0; QStringView ref = QStringView{name}.mid(1, name.length() - 1); switch (name.at(0).unicode()) { case 'a': if (ref.isEmpty()) return parseAnchorNode; if (ref == QLatin1String("nimate")) return parseAnimateNode; if (ref == QLatin1String("nimateColor")) return parseAnimateColorNode; if (ref == QLatin1String("nimateMotion")) return parseAimateMotionNode; if (ref == QLatin1String("nimateTransform")) return parseAnimateTransformNode; if (ref == QLatin1String("udio")) return parseAudioNode; break; case 'd': if (ref == QLatin1String("esc")) return parseDescNode; if (ref == QLatin1String("iscard")) return parseDiscardNode; break; case 'f': if (ref == QLatin1String("oreignObject")) return parseForeignObjectNode; break; case 'h': if (ref == QLatin1String("andler")) return parseHandlerNode; if (ref == QLatin1String("kern")) return parseHkernNode; break; case 'm': if (ref == QLatin1String("etadata")) return parseMetadataNode; if (ref == QLatin1String("path")) return parseMpathNode; break; case 'p': if (ref == QLatin1String("refetch")) return parsePrefetchNode; break; case 's': if (ref == QLatin1String("cript")) return parseScriptNode; if (ref == QLatin1String("et")) return parseSetNode; if (ref == QLatin1String("tyle")) return parseStyleNode; break; case 't': if (ref == QLatin1String("break")) return parseTbreakNode; if (ref == QLatin1String("itle")) return parseTitleNode; break; default: break; } return 0; } typedef QSvgStyleProperty *(*StyleFactoryMethod)(QSvgNode *, const QXmlStreamAttributes &, QSvgHandler *); static StyleFactoryMethod findStyleFactoryMethod(const QString &name) { if (name.isEmpty()) return 0; QStringView ref = QStringView{name}.mid(1, name.length() - 1); switch (name.at(0).unicode()) { case 'f': if (ref == QLatin1String("ont")) return createFontNode; break; case 'l': if (ref == QLatin1String("inearGradient")) return createLinearGradientNode; break; case 'r': if (ref == QLatin1String("adialGradient")) return createRadialGradientNode; break; case 's': if (ref == QLatin1String("olidColor")) return createSolidColorNode; break; default: break; } return 0; } typedef bool (*StyleParseMethod)(QSvgStyleProperty *, const QXmlStreamAttributes &, QSvgHandler *); static StyleParseMethod findStyleUtilFactoryMethod(const QString &name) { if (name.isEmpty()) return 0; QStringView ref = QStringView{name}.mid(1, name.length() - 1); switch (name.at(0).unicode()) { case 'f': if (ref == QLatin1String("ont-face")) return parseFontFaceNode; if (ref == QLatin1String("ont-face-name")) return parseFontFaceNameNode; if (ref == QLatin1String("ont-face-src")) return parseFontFaceSrcNode; if (ref == QLatin1String("ont-face-uri")) return parseFontFaceUriNode; break; case 'g': if (ref == QLatin1String("lyph")) return parseGlyphNode; break; case 'm': if (ref == QLatin1String("issing-glyph")) return parseMissingGlyphNode; break; case 's': if (ref == QLatin1String("top")) return parseStopNode; break; default: break; } return 0; } QSvgHandler::QSvgHandler(QIODevice *device) : xml(new QXmlStreamReader(device)) , m_ownsReader(true) { init(); } QSvgHandler::QSvgHandler(const QByteArray &data) : xml(new QXmlStreamReader(data)) , m_ownsReader(true) { init(); } QSvgHandler::QSvgHandler(QXmlStreamReader *const reader) : xml(reader) , m_ownsReader(false) { init(); } void QSvgHandler::init() { m_doc = 0; m_style = 0; m_animEnd = 0; m_defaultCoords = LT_PX; m_defaultPen = QPen(Qt::black, 1, Qt::SolidLine, Qt::FlatCap, Qt::SvgMiterJoin); m_defaultPen.setMiterLimit(4); parse(); } // Having too many unfinished elements will cause a stack overflow // in the dtor of QSvgTinyDocument, see oss-fuzz issue 24000. static const int unfinishedElementsLimit = 2048; void QSvgHandler::parse() { xml->setNamespaceProcessing(false); #ifndef QT_NO_CSSPARSER m_selector = new QSvgStyleSelector; m_inStyle = false; #endif bool done = false; int remainingUnfinishedElements = unfinishedElementsLimit; while (!xml->atEnd() && !done) { switch (xml->readNext()) { case QXmlStreamReader::StartElement: // he we could/should verify the namespaces, and simply // call m_skipNodes(Unknown) if we don't know the // namespace. We do support http://www.w3.org/2000/svg // but also http://www.w3.org/2000/svg-20000303-stylable // And if the document uses an external dtd, the reported // namespaceUri is empty. The only possible strategy at // this point is to do what everyone else seems to do and // ignore the reported namespaceUri completely. if (remainingUnfinishedElements && startElement(xml->name().toString(), xml->attributes())) { --remainingUnfinishedElements; } else { delete m_doc; m_doc = 0; return; } break; case QXmlStreamReader::EndElement: endElement(xml->name()); ++remainingUnfinishedElements; // if we are using somebody else's qxmlstreamreader // we should not read until the end of the stream done = !m_ownsReader && (xml->name() == QLatin1String("svg")); break; case QXmlStreamReader::Characters: characters(xml->text()); break; case QXmlStreamReader::ProcessingInstruction: processingInstruction(xml->processingInstructionTarget().toString(), xml->processingInstructionData().toString()); break; default: break; } } resolveGradients(m_doc); resolveNodes(); } bool QSvgHandler::startElement(const QString &localName, const QXmlStreamAttributes &attributes) { QSvgNode *node = nullptr; pushColorCopy(); /* The xml:space attribute may appear on any element. We do * a lookup by the qualified name here, but this is namespace aware, since * the XML namespace can only be bound to prefix "xml." */ const QStringView xmlSpace(attributes.value(QLatin1String("xml:space"))); if (xmlSpace.isNull()) { // This element has no xml:space attribute. m_whitespaceMode.push(m_whitespaceMode.isEmpty() ? QSvgText::Default : m_whitespaceMode.top()); } else if (xmlSpace == QLatin1String("preserve")) { m_whitespaceMode.push(QSvgText::Preserve); } else if (xmlSpace == QLatin1String("default")) { m_whitespaceMode.push(QSvgText::Default); } else { const QByteArray msg = '"' + xmlSpace.toString().toLocal8Bit() + "\" is an invalid value for attribute xml:space. " "Valid values are \"preserve\" and \"default\"."; qCWarning(lcSvgHandler, "%s", prefixMessage(msg, xml).constData()); m_whitespaceMode.push(QSvgText::Default); } if (!m_doc && localName != QLatin1String("svg")) return false; if (FactoryMethod method = findGroupFactory(localName)) { //group node = method(m_doc ? m_nodes.top() : 0, attributes, this); Q_ASSERT(node); if (!m_doc) { Q_ASSERT(node->type() == QSvgNode::DOC); m_doc = static_cast<QSvgTinyDocument*>(node); } else { switch (m_nodes.top()->type()) { case QSvgNode::DOC: case QSvgNode::G: case QSvgNode::DEFS: case QSvgNode::SWITCH: { QSvgStructureNode *group = static_cast<QSvgStructureNode*>(m_nodes.top()); group->addChild(node, someId(attributes)); } break; default: const QByteArray msg = QByteArrayLiteral("Could not add child element to parent element because the types are incorrect."); qCWarning(lcSvgHandler, "%s", prefixMessage(msg, xml).constData()); delete node; node = 0; break; } } if (node) { parseCoreNode(node, attributes); #ifndef QT_NO_CSSPARSER cssStyleLookup(node, this, m_selector); #endif parseStyle(node, attributes, this); } } else if (FactoryMethod method = findGraphicsFactory(localName)) { //rendering element Q_ASSERT(!m_nodes.isEmpty()); node = method(m_nodes.top(), attributes, this); if (node) { switch (m_nodes.top()->type()) { case QSvgNode::DOC: case QSvgNode::G: case QSvgNode::DEFS: case QSvgNode::SWITCH: { if (node->type() == QSvgNode::TSPAN) { const QByteArray msg = QByteArrayLiteral("\'tspan\' element in wrong context."); qCWarning(lcSvgHandler, "%s", prefixMessage(msg, xml).constData()); delete node; node = 0; break; } QSvgStructureNode *group = static_cast<QSvgStructureNode*>(m_nodes.top()); group->addChild(node, someId(attributes)); } break; case QSvgNode::TEXT: case QSvgNode::TEXTAREA: if (node->type() == QSvgNode::TSPAN) { static_cast<QSvgText *>(m_nodes.top())->addTspan(static_cast<QSvgTspan *>(node)); } else { const QByteArray msg = QByteArrayLiteral("\'text\' or \'textArea\' element contains invalid element type."); qCWarning(lcSvgHandler, "%s", prefixMessage(msg, xml).constData()); delete node; node = 0; } break; default: const QByteArray msg = QByteArrayLiteral("Could not add child element to parent element because the types are incorrect."); qCWarning(lcSvgHandler, "%s", prefixMessage(msg, xml).constData()); delete node; node = 0; break; } if (node) { parseCoreNode(node, attributes); #ifndef QT_NO_CSSPARSER cssStyleLookup(node, this, m_selector); #endif parseStyle(node, attributes, this); if (node->type() == QSvgNode::TEXT || node->type() == QSvgNode::TEXTAREA) { static_cast<QSvgText *>(node)->setWhitespaceMode(m_whitespaceMode.top()); } else if (node->type() == QSvgNode::TSPAN) { static_cast<QSvgTspan *>(node)->setWhitespaceMode(m_whitespaceMode.top()); } else if (node->type() == QSvgNode::USE) { if (!static_cast<QSvgUse *>(node)->isResolved()) m_resolveNodes.append(node); } } } } else if (ParseMethod method = findUtilFactory(localName)) { Q_ASSERT(!m_nodes.isEmpty()); if (!method(m_nodes.top(), attributes, this)) qCWarning(lcSvgHandler, "%s", msgProblemParsing(localName, xml).constData()); } else if (StyleFactoryMethod method = findStyleFactoryMethod(localName)) { QSvgStyleProperty *prop = method(m_nodes.top(), attributes, this); if (prop) { m_style = prop; m_nodes.top()->appendStyleProperty(prop, someId(attributes)); } else { const QByteArray msg = QByteArrayLiteral("Could not parse node: ") + localName.toLocal8Bit(); qCWarning(lcSvgHandler, "%s", prefixMessage(msg, xml).constData()); } } else if (StyleParseMethod method = findStyleUtilFactoryMethod(localName)) { if (m_style) { if (!method(m_style, attributes, this)) qCWarning(lcSvgHandler, "%s", msgProblemParsing(localName, xml).constData()); } } else { //qCWarning(lcSvgHandler) <<"Skipping unknown element!"<<namespaceURI<<"::"<<localName; m_skipNodes.push(Unknown); return true; } if (node) { m_nodes.push(node); m_skipNodes.push(Graphics); } else { //qDebug()<<"Skipping "<<localName; m_skipNodes.push(Style); } return true; } bool QSvgHandler::endElement(const QStringView localName) { CurrentNode node = m_skipNodes.top(); m_skipNodes.pop(); m_whitespaceMode.pop(); popColor(); if (node == Unknown) { return true; } #ifdef QT_NO_CSSPARSER Q_UNUSED(localName); #else if (m_inStyle && localName == QLatin1String("style")) m_inStyle = false; #endif if (node == Graphics) m_nodes.pop(); else if (m_style && !m_skipNodes.isEmpty() && m_skipNodes.top() != Style) m_style = 0; return true; } void QSvgHandler::resolveGradients(QSvgNode *node, int nestedDepth) { if (!node || (node->type() != QSvgNode::DOC && node->type() != QSvgNode::G && node->type() != QSvgNode::DEFS && node->type() != QSvgNode::SWITCH)) { return; } QSvgStructureNode *structureNode = static_cast<QSvgStructureNode *>(node); const QList<QSvgNode *> ren = structureNode->renderers(); for (auto it = ren.begin(); it != ren.end(); ++it) { QSvgFillStyle *fill = static_cast<QSvgFillStyle *>((*it)->styleProperty(QSvgStyleProperty::FILL)); if (fill && !fill->isGradientResolved()) { QString id = fill->gradientId(); QSvgFillStyleProperty *style = structureNode->styleProperty(id); if (style) { fill->setFillStyle(style); } else { qCWarning(lcSvgHandler, "%s", msgCouldNotResolveProperty(id, xml).constData()); fill->setBrush(Qt::NoBrush); } } QSvgStrokeStyle *stroke = static_cast<QSvgStrokeStyle *>((*it)->styleProperty(QSvgStyleProperty::STROKE)); if (stroke && !stroke->isGradientResolved()) { QString id = stroke->gradientId(); QSvgFillStyleProperty *style = structureNode->styleProperty(id); if (style) { stroke->setStyle(style); } else { qCWarning(lcSvgHandler, "%s", msgCouldNotResolveProperty(id, xml).constData()); stroke->setStroke(Qt::NoBrush); } } if (nestedDepth < 2048) resolveGradients(*it, nestedDepth + 1); } } void QSvgHandler::resolveNodes() { for (QSvgNode *node : qAsConst(m_resolveNodes)) { if (!node || !node->parent() || node->type() != QSvgNode::USE) continue; QSvgUse *useNode = static_cast<QSvgUse *>(node); if (useNode->isResolved()) continue; QSvgNode::Type t = useNode->parent()->type(); if (!(t == QSvgNode::DOC || t == QSvgNode::DEFS || t == QSvgNode::G || t == QSvgNode::SWITCH)) continue; QSvgStructureNode *group = static_cast<QSvgStructureNode *>(useNode->parent()); QSvgNode *link = group->scopeNode(useNode->linkId()); if (!link) { qCWarning(lcSvgHandler, "link #%s is undefined!", qPrintable(useNode->linkId())); continue; } if (useNode->parent()->isDescendantOf(link)) qCWarning(lcSvgHandler, "link #%s is recursive!", qPrintable(useNode->linkId())); useNode->setLink(link); } m_resolveNodes.clear(); } bool QSvgHandler::characters(const QStringView str) { #ifndef QT_NO_CSSPARSER if (m_inStyle) { QString css = str.toString(); QCss::StyleSheet sheet; QCss::Parser(css).parse(&sheet); m_selector->styleSheets.append(sheet); return true; } #endif if (m_skipNodes.isEmpty() || m_skipNodes.top() == Unknown || m_nodes.isEmpty()) return true; if (m_nodes.top()->type() == QSvgNode::TEXT || m_nodes.top()->type() == QSvgNode::TEXTAREA) { static_cast<QSvgText*>(m_nodes.top())->addText(str.toString()); } else if (m_nodes.top()->type() == QSvgNode::TSPAN) { static_cast<QSvgTspan*>(m_nodes.top())->addText(str.toString()); } return true; } QIODevice *QSvgHandler::device() const { return xml->device(); } QSvgTinyDocument * QSvgHandler::document() const { return m_doc; } QSvgHandler::LengthType QSvgHandler::defaultCoordinateSystem() const { return m_defaultCoords; } void QSvgHandler::setDefaultCoordinateSystem(LengthType type) { m_defaultCoords = type; } void QSvgHandler::pushColor(const QColor &color) { m_colorStack.push(color); m_colorTagCount.push(1); } void QSvgHandler::pushColorCopy() { if (m_colorTagCount.count()) ++m_colorTagCount.top(); else pushColor(Qt::black); } void QSvgHandler::popColor() { if (m_colorTagCount.count()) { if (!--m_colorTagCount.top()) { m_colorStack.pop(); m_colorTagCount.pop(); } } } QColor QSvgHandler::currentColor() const { if (!m_colorStack.isEmpty()) return m_colorStack.top(); else return QColor(0, 0, 0); } #ifndef QT_NO_CSSPARSER void QSvgHandler::setInStyle(bool b) { m_inStyle = b; } bool QSvgHandler::inStyle() const { return m_inStyle; } QSvgStyleSelector * QSvgHandler::selector() const { return m_selector; } #endif // QT_NO_CSSPARSER bool QSvgHandler::processingInstruction(const QString &target, const QString &data) { #ifdef QT_NO_CSSPARSER Q_UNUSED(target); Q_UNUSED(data); #else if (target == QLatin1String("xml-stylesheet")) { QRegularExpression rx(QLatin1String("type=\\\"(.+)\\\""), QRegularExpression::InvertedGreedinessOption); QRegularExpressionMatchIterator iter = rx.globalMatch(data); bool isCss = false; while (iter.hasNext()) { QRegularExpressionMatch match = iter.next(); QString type = match.captured(1); if (type.toLower() == QLatin1String("text/css")) { isCss = true; } } if (isCss) { QRegularExpression rx(QLatin1String("href=\\\"(.+)\\\""), QRegularExpression::InvertedGreedinessOption); QRegularExpressionMatch match = rx.match(data); QString addr = match.captured(1); QFileInfo fi(addr); //qDebug()<<"External CSS file "<<fi.absoluteFilePath()<<fi.exists(); if (fi.exists()) { QFile file(fi.absoluteFilePath()); if (!file.open(QIODevice::ReadOnly | QIODevice::Text)) { return true; } QByteArray cssData = file.readAll(); QString css = QString::fromUtf8(cssData); QCss::StyleSheet sheet; QCss::Parser(css).parse(&sheet); m_selector->styleSheets.append(sheet); } } } #endif return true; } void QSvgHandler::setAnimPeriod(int start, int end) { Q_UNUSED(start); m_animEnd = qMax(end, m_animEnd); } int QSvgHandler::animationDuration() const { return m_animEnd; } QSvgHandler::~QSvgHandler() { #ifndef QT_NO_CSSPARSER delete m_selector; m_selector = 0; #endif if(m_ownsReader) delete xml; } QT_END_NAMESPACE
null
299
CWE-787
CVE-2021-45931
/* * Copyright © 2012 Google, Inc. * * This is part of HarfBuzz, a text shaping library. * * Permission is hereby granted, without written agreement and without * license or royalty fees, to use, copy, modify, and distribute this * software and its documentation for any purpose, provided that the * above copyright notice and the following two paragraphs appear in * all copies of this software. * * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. * * Google Author(s): Behdad Esfahbod */ #include "hb-set.hh" /** * SECTION:hb-set * @title: hb-set * @short_description: Objects representing a set of integers * @include: hb.h * * Set objects represent a mathematical set of integer values. They are * used in non-shaping APIs to query certain sets of characters or glyphs, * or other integer values. **/ /** * hb_set_create: (Xconstructor) * * Creates a new, initially empty set. * * Return value: (transfer full): The new #hb_set_t * * Since: 0.9.2 **/ hb_set_t * hb_set_create () { hb_set_t *set; if (!(set = hb_object_create<hb_set_t> ())) return hb_set_get_empty (); set->init_shallow (); return set; } /** * hb_set_get_empty: * * Fetches the singleton empty #hb_set_t. * * Return value: (transfer full): The empty #hb_set_t * * Since: 0.9.2 **/ hb_set_t * hb_set_get_empty () { return const_cast<hb_set_t *> (&Null (hb_set_t)); } /** * hb_set_reference: (skip) * @set: A set * * Increases the reference count on a set. * * Return value: (transfer full): The set * * Since: 0.9.2 **/ hb_set_t * hb_set_reference (hb_set_t *set) { return hb_object_reference (set); } /** * hb_set_destroy: (skip) * @set: A set * * Decreases the reference count on a set. When * the reference count reaches zero, the set is * destroyed, freeing all memory. * * Since: 0.9.2 **/ void hb_set_destroy (hb_set_t *set) { if (!hb_object_destroy (set)) return; set->fini_shallow (); hb_free (set); } /** * hb_set_set_user_data: (skip) * @set: A set * @key: The user-data key to set * @data: A pointer to the user data to set * @destroy: (nullable): A callback to call when @data is not needed anymore * @replace: Whether to replace an existing data with the same key * * Attaches a user-data key/data pair to the specified set. * * Return value: %true if success, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_set_user_data (hb_set_t *set, hb_user_data_key_t *key, void * data, hb_destroy_func_t destroy, hb_bool_t replace) { return hb_object_set_user_data (set, key, data, destroy, replace); } /** * hb_set_get_user_data: (skip) * @set: A set * @key: The user-data key to query * * Fetches the user data associated with the specified key, * attached to the specified set. * * Return value: (transfer none): A pointer to the user data * * Since: 0.9.2 **/ void * hb_set_get_user_data (hb_set_t *set, hb_user_data_key_t *key) { return hb_object_get_user_data (set, key); } /** * hb_set_allocation_successful: * @set: A set * * Tests whether memory allocation for a set was successful. * * Return value: %true if allocation succeeded, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_allocation_successful (const hb_set_t *set) { return !set->in_error (); } /** * hb_set_copy: * @set: A set * * Allocate a copy of @set. * * Return value: Newly-allocated set. * * Since: 2.8.2 **/ hb_set_t * hb_set_copy (const hb_set_t *set) { hb_set_t *copy = hb_set_create (); copy->set (*set); return copy; } /** * hb_set_clear: * @set: A set * * Clears out the contents of a set. * * Since: 0.9.2 **/ void hb_set_clear (hb_set_t *set) { if (unlikely (hb_object_is_immutable (set))) return; set->clear (); } /** * hb_set_is_empty: * @set: a set. * * Tests whether a set is empty (contains no elements). * * Return value: %true if @set is empty * * Since: 0.9.7 **/ hb_bool_t hb_set_is_empty (const hb_set_t *set) { return set->is_empty (); } /** * hb_set_has: * @set: A set * @codepoint: The element to query * * Tests whether @codepoint belongs to @set. * * Return value: %true if @codepoint is in @set, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_has (const hb_set_t *set, hb_codepoint_t codepoint) { return set->has (codepoint); } /** * hb_set_add: * @set: A set * @codepoint: The element to add to @set * * Adds @codepoint to @set. * * Since: 0.9.2 **/ void hb_set_add (hb_set_t *set, hb_codepoint_t codepoint) { /* Immutible-safe. */ set->add (codepoint); } /** * hb_set_add_range: * @set: A set * @first: The first element to add to @set * @last: The final element to add to @set * * Adds all of the elements from @first to @last * (inclusive) to @set. * * Since: 0.9.7 **/ void hb_set_add_range (hb_set_t *set, hb_codepoint_t first, hb_codepoint_t last) { /* Immutible-safe. */ set->add_range (first, last); } /** * hb_set_del: * @set: A set * @codepoint: Removes @codepoint from @set * * Removes @codepoint from @set. * * Since: 0.9.2 **/ void hb_set_del (hb_set_t *set, hb_codepoint_t codepoint) { /* Immutible-safe. */ set->del (codepoint); } /** * hb_set_del_range: * @set: A set * @first: The first element to remove from @set * @last: The final element to remove from @set * * Removes all of the elements from @first to @last * (inclusive) from @set. * * If @last is #HB_SET_VALUE_INVALID, then all values * greater than or equal to @first are removed. * * Since: 0.9.7 **/ void hb_set_del_range (hb_set_t *set, hb_codepoint_t first, hb_codepoint_t last) { /* Immutible-safe. */ set->del_range (first, last); } /** * hb_set_is_equal: * @set: A set * @other: Another set * * Tests whether @set and @other are equal (contain the same * elements). * * Return value: %true if the two sets are equal, %false otherwise. * * Since: 0.9.7 **/ hb_bool_t hb_set_is_equal (const hb_set_t *set, const hb_set_t *other) { return set->is_equal (*other); } /** * hb_set_is_subset: * @set: A set * @larger_set: Another set * * Tests whether @set is a subset of @larger_set. * * Return value: %true if the @set is a subset of (or equal to) @larger_set, %false otherwise. * * Since: 1.8.1 **/ hb_bool_t hb_set_is_subset (const hb_set_t *set, const hb_set_t *larger_set) { return set->is_subset (*larger_set); } /** * hb_set_set: * @set: A set * @other: Another set * * Makes the contents of @set equal to the contents of @other. * * Since: 0.9.2 **/ void hb_set_set (hb_set_t *set, const hb_set_t *other) { if (unlikely (hb_object_is_immutable (set))) return; set->set (*other); } /** * hb_set_union: * @set: A set * @other: Another set * * Makes @set the union of @set and @other. * * Since: 0.9.2 **/ void hb_set_union (hb_set_t *set, const hb_set_t *other) { if (unlikely (hb_object_is_immutable (set))) return; set->union_ (*other); } /** * hb_set_intersect: * @set: A set * @other: Another set * * Makes @set the intersection of @set and @other. * * Since: 0.9.2 **/ void hb_set_intersect (hb_set_t *set, const hb_set_t *other) { if (unlikely (hb_object_is_immutable (set))) return; set->intersect (*other); } /** * hb_set_subtract: * @set: A set * @other: Another set * * Subtracts the contents of @other from @set. * * Since: 0.9.2 **/ void hb_set_subtract (hb_set_t *set, const hb_set_t *other) { if (unlikely (hb_object_is_immutable (set))) return; set->subtract (*other); } /** * hb_set_symmetric_difference: * @set: A set * @other: Another set * * Makes @set the symmetric difference of @set * and @other. * * Since: 0.9.2 **/ void hb_set_symmetric_difference (hb_set_t *set, const hb_set_t *other) { if (unlikely (hb_object_is_immutable (set))) return; set->symmetric_difference (*other); } /** * hb_set_invert: * @set: A set * * Inverts the contents of @set. * * Since: 3.0.0 **/ void hb_set_invert (hb_set_t *set) { if (unlikely (hb_object_is_immutable (set))) return; set->invert (); } /** * hb_set_get_population: * @set: A set * * Returns the number of elements in the set. * * Return value: The population of @set * * Since: 0.9.7 **/ unsigned int hb_set_get_population (const hb_set_t *set) { return set->get_population (); } /** * hb_set_get_min: * @set: A set * * Finds the smallest element in the set. * * Return value: minimum of @set, or #HB_SET_VALUE_INVALID if @set is empty. * * Since: 0.9.7 **/ hb_codepoint_t hb_set_get_min (const hb_set_t *set) { return set->get_min (); } /** * hb_set_get_max: * @set: A set * * Finds the largest element in the set. * * Return value: maximum of @set, or #HB_SET_VALUE_INVALID if @set is empty. * * Since: 0.9.7 **/ hb_codepoint_t hb_set_get_max (const hb_set_t *set) { return set->get_max (); } /** * hb_set_next: * @set: A set * @codepoint: (inout): Input = Code point to query * Output = Code point retrieved * * Fetches the next element in @set that is greater than current value of @codepoint. * * Set @codepoint to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a next value, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_next (const hb_set_t *set, hb_codepoint_t *codepoint) { return set->next (codepoint); } /** * hb_set_previous: * @set: A set * @codepoint: (inout): Input = Code point to query * Output = Code point retrieved * * Fetches the previous element in @set that is lower than current value of @codepoint. * * Set @codepoint to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a previous value, %false otherwise * * Since: 1.8.0 **/ hb_bool_t hb_set_previous (const hb_set_t *set, hb_codepoint_t *codepoint) { return set->previous (codepoint); } /** * hb_set_next_range: * @set: A set * @first: (out): The first code point in the range * @last: (inout): Input = The current last code point in the range * Output = The last code point in the range * * Fetches the next consecutive range of elements in @set that * are greater than current value of @last. * * Set @last to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a next range, %false otherwise * * Since: 0.9.7 **/ hb_bool_t hb_set_next_range (const hb_set_t *set, hb_codepoint_t *first, hb_codepoint_t *last) { return set->next_range (first, last); } /** * hb_set_previous_range: * @set: A set * @first: (inout): Input = The current first code point in the range * Output = The first code point in the range * @last: (out): The last code point in the range * * Fetches the previous consecutive range of elements in @set that * are greater than current value of @last. * * Set @first to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a previous range, %false otherwise * * Since: 1.8.0 **/ hb_bool_t hb_set_previous_range (const hb_set_t *set, hb_codepoint_t *first, hb_codepoint_t *last) { return set->previous_range (first, last); }
null
/* * Copyright © 2012 Google, Inc. * * This is part of HarfBuzz, a text shaping library. * * Permission is hereby granted, without written agreement and without * license or royalty fees, to use, copy, modify, and distribute this * software and its documentation for any purpose, provided that the * above copyright notice and the following two paragraphs appear in * all copies of this software. * * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. * * Google Author(s): Behdad Esfahbod */ #include "hb-set.hh" /** * SECTION:hb-set * @title: hb-set * @short_description: Objects representing a set of integers * @include: hb.h * * Set objects represent a mathematical set of integer values. They are * used in non-shaping APIs to query certain sets of characters or glyphs, * or other integer values. **/ /** * hb_set_create: (Xconstructor) * * Creates a new, initially empty set. * * Return value: (transfer full): The new #hb_set_t * * Since: 0.9.2 **/ hb_set_t * hb_set_create () { hb_set_t *set; if (!(set = hb_object_create<hb_set_t> ())) return hb_set_get_empty (); set->init_shallow (); return set; } /** * hb_set_get_empty: * * Fetches the singleton empty #hb_set_t. * * Return value: (transfer full): The empty #hb_set_t * * Since: 0.9.2 **/ hb_set_t * hb_set_get_empty () { return const_cast<hb_set_t *> (&Null (hb_set_t)); } /** * hb_set_reference: (skip) * @set: A set * * Increases the reference count on a set. * * Return value: (transfer full): The set * * Since: 0.9.2 **/ hb_set_t * hb_set_reference (hb_set_t *set) { return hb_object_reference (set); } /** * hb_set_destroy: (skip) * @set: A set * * Decreases the reference count on a set. When * the reference count reaches zero, the set is * destroyed, freeing all memory. * * Since: 0.9.2 **/ void hb_set_destroy (hb_set_t *set) { if (!hb_object_destroy (set)) return; set->fini_shallow (); hb_free (set); } /** * hb_set_set_user_data: (skip) * @set: A set * @key: The user-data key to set * @data: A pointer to the user data to set * @destroy: (nullable): A callback to call when @data is not needed anymore * @replace: Whether to replace an existing data with the same key * * Attaches a user-data key/data pair to the specified set. * * Return value: %true if success, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_set_user_data (hb_set_t *set, hb_user_data_key_t *key, void * data, hb_destroy_func_t destroy, hb_bool_t replace) { return hb_object_set_user_data (set, key, data, destroy, replace); } /** * hb_set_get_user_data: (skip) * @set: A set * @key: The user-data key to query * * Fetches the user data associated with the specified key, * attached to the specified set. * * Return value: (transfer none): A pointer to the user data * * Since: 0.9.2 **/ void * hb_set_get_user_data (hb_set_t *set, hb_user_data_key_t *key) { return hb_object_get_user_data (set, key); } /** * hb_set_allocation_successful: * @set: A set * * Tests whether memory allocation for a set was successful. * * Return value: %true if allocation succeeded, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_allocation_successful (const hb_set_t *set) { return !set->in_error (); } /** * hb_set_copy: * @set: A set * * Allocate a copy of @set. * * Return value: Newly-allocated set. * * Since: 2.8.2 **/ hb_set_t * hb_set_copy (const hb_set_t *set) { hb_set_t *copy = hb_set_create (); copy->set (*set); return copy; } /** * hb_set_clear: * @set: A set * * Clears out the contents of a set. * * Since: 0.9.2 **/ void hb_set_clear (hb_set_t *set) { /* Immutible-safe. */ set->clear (); } /** * hb_set_is_empty: * @set: a set. * * Tests whether a set is empty (contains no elements). * * Return value: %true if @set is empty * * Since: 0.9.7 **/ hb_bool_t hb_set_is_empty (const hb_set_t *set) { return set->is_empty (); } /** * hb_set_has: * @set: A set * @codepoint: The element to query * * Tests whether @codepoint belongs to @set. * * Return value: %true if @codepoint is in @set, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_has (const hb_set_t *set, hb_codepoint_t codepoint) { return set->has (codepoint); } /** * hb_set_add: * @set: A set * @codepoint: The element to add to @set * * Adds @codepoint to @set. * * Since: 0.9.2 **/ void hb_set_add (hb_set_t *set, hb_codepoint_t codepoint) { /* Immutible-safe. */ set->add (codepoint); } /** * hb_set_add_range: * @set: A set * @first: The first element to add to @set * @last: The final element to add to @set * * Adds all of the elements from @first to @last * (inclusive) to @set. * * Since: 0.9.7 **/ void hb_set_add_range (hb_set_t *set, hb_codepoint_t first, hb_codepoint_t last) { /* Immutible-safe. */ set->add_range (first, last); } /** * hb_set_del: * @set: A set * @codepoint: Removes @codepoint from @set * * Removes @codepoint from @set. * * Since: 0.9.2 **/ void hb_set_del (hb_set_t *set, hb_codepoint_t codepoint) { /* Immutible-safe. */ set->del (codepoint); } /** * hb_set_del_range: * @set: A set * @first: The first element to remove from @set * @last: The final element to remove from @set * * Removes all of the elements from @first to @last * (inclusive) from @set. * * If @last is #HB_SET_VALUE_INVALID, then all values * greater than or equal to @first are removed. * * Since: 0.9.7 **/ void hb_set_del_range (hb_set_t *set, hb_codepoint_t first, hb_codepoint_t last) { /* Immutible-safe. */ set->del_range (first, last); } /** * hb_set_is_equal: * @set: A set * @other: Another set * * Tests whether @set and @other are equal (contain the same * elements). * * Return value: %true if the two sets are equal, %false otherwise. * * Since: 0.9.7 **/ hb_bool_t hb_set_is_equal (const hb_set_t *set, const hb_set_t *other) { return set->is_equal (*other); } /** * hb_set_is_subset: * @set: A set * @larger_set: Another set * * Tests whether @set is a subset of @larger_set. * * Return value: %true if the @set is a subset of (or equal to) @larger_set, %false otherwise. * * Since: 1.8.1 **/ hb_bool_t hb_set_is_subset (const hb_set_t *set, const hb_set_t *larger_set) { return set->is_subset (*larger_set); } /** * hb_set_set: * @set: A set * @other: Another set * * Makes the contents of @set equal to the contents of @other. * * Since: 0.9.2 **/ void hb_set_set (hb_set_t *set, const hb_set_t *other) { /* Immutible-safe. */ set->set (*other); } /** * hb_set_union: * @set: A set * @other: Another set * * Makes @set the union of @set and @other. * * Since: 0.9.2 **/ void hb_set_union (hb_set_t *set, const hb_set_t *other) { /* Immutible-safe. */ set->union_ (*other); } /** * hb_set_intersect: * @set: A set * @other: Another set * * Makes @set the intersection of @set and @other. * * Since: 0.9.2 **/ void hb_set_intersect (hb_set_t *set, const hb_set_t *other) { /* Immutible-safe. */ set->intersect (*other); } /** * hb_set_subtract: * @set: A set * @other: Another set * * Subtracts the contents of @other from @set. * * Since: 0.9.2 **/ void hb_set_subtract (hb_set_t *set, const hb_set_t *other) { /* Immutible-safe. */ set->subtract (*other); } /** * hb_set_symmetric_difference: * @set: A set * @other: Another set * * Makes @set the symmetric difference of @set * and @other. * * Since: 0.9.2 **/ void hb_set_symmetric_difference (hb_set_t *set, const hb_set_t *other) { /* Immutible-safe. */ set->symmetric_difference (*other); } /** * hb_set_invert: * @set: A set * * Inverts the contents of @set. * * Since: 3.0.0 **/ void hb_set_invert (hb_set_t *set) { /* Immutible-safe. */ set->invert (); } /** * hb_set_get_population: * @set: A set * * Returns the number of elements in the set. * * Return value: The population of @set * * Since: 0.9.7 **/ unsigned int hb_set_get_population (const hb_set_t *set) { return set->get_population (); } /** * hb_set_get_min: * @set: A set * * Finds the smallest element in the set. * * Return value: minimum of @set, or #HB_SET_VALUE_INVALID if @set is empty. * * Since: 0.9.7 **/ hb_codepoint_t hb_set_get_min (const hb_set_t *set) { return set->get_min (); } /** * hb_set_get_max: * @set: A set * * Finds the largest element in the set. * * Return value: maximum of @set, or #HB_SET_VALUE_INVALID if @set is empty. * * Since: 0.9.7 **/ hb_codepoint_t hb_set_get_max (const hb_set_t *set) { return set->get_max (); } /** * hb_set_next: * @set: A set * @codepoint: (inout): Input = Code point to query * Output = Code point retrieved * * Fetches the next element in @set that is greater than current value of @codepoint. * * Set @codepoint to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a next value, %false otherwise * * Since: 0.9.2 **/ hb_bool_t hb_set_next (const hb_set_t *set, hb_codepoint_t *codepoint) { return set->next (codepoint); } /** * hb_set_previous: * @set: A set * @codepoint: (inout): Input = Code point to query * Output = Code point retrieved * * Fetches the previous element in @set that is lower than current value of @codepoint. * * Set @codepoint to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a previous value, %false otherwise * * Since: 1.8.0 **/ hb_bool_t hb_set_previous (const hb_set_t *set, hb_codepoint_t *codepoint) { return set->previous (codepoint); } /** * hb_set_next_range: * @set: A set * @first: (out): The first code point in the range * @last: (inout): Input = The current last code point in the range * Output = The last code point in the range * * Fetches the next consecutive range of elements in @set that * are greater than current value of @last. * * Set @last to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a next range, %false otherwise * * Since: 0.9.7 **/ hb_bool_t hb_set_next_range (const hb_set_t *set, hb_codepoint_t *first, hb_codepoint_t *last) { return set->next_range (first, last); } /** * hb_set_previous_range: * @set: A set * @first: (inout): Input = The current first code point in the range * Output = The first code point in the range * @last: (out): The last code point in the range * * Fetches the previous consecutive range of elements in @set that * are greater than current value of @last. * * Set @first to #HB_SET_VALUE_INVALID to get started. * * Return value: %true if there was a previous range, %false otherwise * * Since: 1.8.0 **/ hb_bool_t hb_set_previous_range (const hb_set_t *set, hb_codepoint_t *first, hb_codepoint_t *last) { return set->previous_range (first, last); }
null